-
Notifications
You must be signed in to change notification settings - Fork 9
/
snr_data_gen.py
128 lines (101 loc) · 3.67 KB
/
snr_data_gen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import os
import sys
import librosa
import yaml
import numpy as np
import subprocess
''' SNR modified data generator for Experiment 2 '''
mixfilepath = '/media/bach3/dataset/MedleyDB/Audio/' # path to MIX files
vocalfilepath = '/media/bach1/dataset/MedleyDB/voice_only/' # path to RAW vocal files
list_of_vocal_files = os.listdir(vocalfilepath)
savepath = './loudness/'
audio_length = 15000
SR = 22050
START = 0
DURATION = 60
def rms(song):
sq = np.square(song)
meansq = np.mean(sq)
rootmeansq = np.sqrt(meansq)
return rootmeansq
def normalize(mix):
norm = 1 / (np.max(abs(mix)))
return norm
def make_mixfile(songtitle, start, duration):
'''
Args :
songtitle :
start :
duration :
'''
yamlfilepath = os.path.join(mixfilepath, songtitle, songtitle + '_METADATA.yaml')
inst_audios = []
stemfolder = None
with open(yamlfilepath, 'r') as stream:
song_dict = yaml.load(stream)
stemfolder = song_dict['stem_dir']
for stem in (song_dict['stems']):
if (song_dict['stems'][stem]['component'] == 'melody') and (
song_dict['stems'][stem]['instrument'] == 'male singer' or song_dict['stems'][stem][
'instrument'] == 'female singer'):
continue
stemfile = song_dict['stems'][stem]['filename']
print(stemfile)
inst_audios.append(stemfile)
mixsong = np.zeros((0,))
for track in inst_audios:
loaded_track, sr = librosa.load(os.path.join(mixfilepath, songtitle, stemfolder, track), sr=SR, offset=start,
duration=duration, mono=True)
if mixsong.shape[0] == 0:
mixsong = loaded_track
else:
mixsong += loaded_track
# mixsong = mixsong * rms(mixsong) * (1-1/np.power(2,16))
mixsong = normalize(mixsong) * mixsong
return mixsong
def change_decibel(vocalfile, level):
vocsong = AudioSegment.from_wav(vocalfile)
vocsong = vocsong + level
return vocsong
if __name__ == '__main__':
dB_scale = [-12, -6, 0, 6, 12]
save_dir = ['voc_m12', 'voc_m6', 'voc_p0', 'voc_p6', 'voc_p12']
for song in list_of_vocal_files:
print(song)
songname = song.split('_RAW')[0]
print(songname)
if song[0] == '.':
continue
# first make instrument mix file
vocalfile = os.path.join(vocalfilepath, song)
voc_signal, _ = librosa.load(vocalfile, sr=SR, mono=True, offset=START, duration=DURATION)
total_len = librosa.get_duration(voc_signal, sr=SR) # in seconds
print(total_len)
# voc_signal *= 0.7
voc_signal *= 0.7
'''
if total_len > 60:
start = 30
elif total_len > 30 :
start = total_len - 30
else:
'''
inst_mix = make_mixfile(songname, START, DURATION)
inst_mix *= 0.8
print(vocalfile)
for i in range(len(dB_scale)):
# modify audio and mix it as well
R_dB = dB_scale[i]
curr_savepath = os.path.join(savepath, save_dir[i], 'songs')
if not os.path.isdir(curr_savepath):
os.makedirs(curr_savepath)
R = np.power(10, (R_dB / 20.0)) # db to amplitude conversion
voc_to_inst_ratio = R
print("ratio", voc_to_inst_ratio)
# mix file
final_mix = voc_signal * voc_to_inst_ratio + inst_mix
if R_dB < 0:
lev = 'm' + str(abs(R_dB))
else:
lev = 'p' + str(abs(R_dB))
librosa.output.write_wav(os.path.join(curr_savepath, songname + '_' + lev + '.wav'), final_mix, sr=SR)