Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

create_training_data.py 7.7 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
  1. import argparse
  2. from pathlib import Path
  3. import pickle
  4. import numpy as np
  5. from p_tqdm import p_uimap, p_umap
  6. from utils.logging_utils import SummaryManager
  7. from data.text import TextToTokens
  8. from data.datasets import DataReader
  9. from utils.training_config_manager import TrainingConfigManager
  10. from data.audio import Audio
  11. from data.text.symbols import _alphabet
  12. np.random.seed(42)
  13. parser = argparse.ArgumentParser()
  14. parser.add_argument('--config', type=str, required=True)
  15. parser.add_argument('--skip_phonemes', action='store_true')
  16. parser.add_argument('--skip_mels', action='store_true')
  17. args = parser.parse_args()
  18. for arg in vars(args):
  19. print('{}: {}'.format(arg, getattr(args, arg)))
  20. cm = TrainingConfigManager(args.config, aligner=True)
  21. cm.create_remove_dirs()
  22. metadatareader = DataReader.from_config(cm, kind='original', scan_wavs=True)
  23. summary_manager = SummaryManager(model=None, log_dir=cm.log_dir / 'data_preprocessing', config=cm.config,
  24. default_writer='data_preprocessing')
  25. file_ids_from_wavs = list(metadatareader.wav_paths.keys())
  26. print(f"Reading wavs from {metadatareader.wav_directory}")
  27. print(f"Reading metadata from {metadatareader.metadata_path}")
  28. print(f'\nFound {len(metadatareader.filenames)} metadata lines.')
  29. print(f'\nFound {len(file_ids_from_wavs)} wav files.')
  30. cross_file_ids = [fid for fid in file_ids_from_wavs if fid in metadatareader.filenames]
  31. print(f'\nThere are {len(cross_file_ids)} wav file names that correspond to metadata lines.')
  32. if not args.skip_mels:
  33. def process_wav(wav_path: Path):
  34. file_name = wav_path.stem
  35. y, sr = audio.load_wav(str(wav_path))
  36. pitch = audio.extract_pitch(y)
  37. mel = audio.mel_spectrogram(y)
  38. assert mel.shape[1] == audio.config['mel_channels'], len(mel.shape) == 2
  39. assert mel.shape[0] == pitch.shape[0], f'{mel.shape[0]} == {pitch.shape[0]} (wav {y.shape})'
  40. mel_path = (cm.mel_dir / file_name).with_suffix('.npy')
  41. pitch_path = (cm.pitch_dir / file_name).with_suffix('.npy')
  42. np.save(mel_path, mel)
  43. np.save(pitch_path, pitch)
  44. return {'fname': file_name, 'mel.len': mel.shape[0], 'pitch.path': pitch_path, 'pitch': pitch}
  45. print(f"\nMels will be stored stored under")
  46. print(f"{cm.mel_dir}")
  47. audio = Audio.from_config(config=cm.config)
  48. wav_files = [metadatareader.wav_paths[k] for k in cross_file_ids]
  49. len_dict = {}
  50. remove_files = []
  51. mel_lens = []
  52. pitches = {}
  53. wav_iter = p_uimap(process_wav, wav_files)
  54. for out_dict in wav_iter:
  55. len_dict.update({out_dict['fname']: out_dict['mel.len']})
  56. pitches.update({out_dict['pitch.path']: out_dict['pitch']})
  57. if out_dict['mel.len'] > cm.config['max_mel_len'] or out_dict['mel.len'] < cm.config['min_mel_len']:
  58. remove_files.append(out_dict['fname'])
  59. else:
  60. mel_lens.append(out_dict['mel.len'])
  61. def normalize_pitch_vectors(pitch_vecs):
  62. nonzeros = np.concatenate([v[np.where(v != 0.0)[0]]
  63. for v in pitch_vecs.values()])
  64. mean, std = np.mean(nonzeros), np.std(nonzeros)
  65. return mean, std
  66. def process_pitches(item: tuple):
  67. fname, pitch = item
  68. zero_idxs = np.where(pitch == 0.0)[0]
  69. pitch -= mean
  70. pitch /= std
  71. pitch[zero_idxs] = 0.0
  72. np.save(fname, pitch)
  73. mean, std = normalize_pitch_vectors(pitches)
  74. pickle.dump({'pitch_mean': mean, 'pitch_std': std}, open(cm.data_dir / 'pitch_stats.pkl', 'wb'))
  75. pitch_iter = p_umap(process_pitches, pitches.items())
  76. pickle.dump(len_dict, open(cm.data_dir / 'mel_len.pkl', 'wb'))
  77. pickle.dump(remove_files, open(cm.data_dir / 'under-over_sized_mels.pkl', 'wb'))
  78. summary_manager.add_histogram('Mel Lengths', values=np.array(mel_lens))
  79. total_mel_len = np.sum(mel_lens)
  80. total_wav_len = total_mel_len * audio.config['hop_length']
  81. summary_manager.display_scalar('Total duration (hours)',
  82. scalar_value=total_wav_len / audio.config['sampling_rate'] / 60. ** 2)
  83. if not args.skip_phonemes:
  84. remove_files = pickle.load(open(cm.data_dir / 'under-over_sized_mels.pkl', 'rb'))
  85. phonemized_metadata_path = cm.phonemized_metadata_path
  86. train_metadata_path = cm.train_metadata_path
  87. test_metadata_path = cm.valid_metadata_path
  88. print(f'\nReading metadata from {metadatareader.metadata_path}')
  89. print(f'\nFound {len(metadatareader.filenames)} lines.')
  90. filter_metadata = []
  91. for fname in cross_file_ids:
  92. item = metadatareader.text_dict[fname]
  93. non_p = [c for c in item if c in _alphabet]
  94. if len(non_p) < 1:
  95. filter_metadata.append(fname)
  96. if len(filter_metadata) > 0:
  97. print(f'Removing {len(filter_metadata)} suspiciously short line(s):')
  98. for fname in filter_metadata:
  99. print(f'{fname}: {metadatareader.text_dict[fname]}')
  100. print(f'\nRemoving {len(remove_files)} line(s) due to mel filtering.')
  101. remove_files += filter_metadata
  102. metadata_file_ids = [fname for fname in cross_file_ids if fname not in remove_files]
  103. metadata_len = len(metadata_file_ids)
  104. sample_items = np.random.choice(metadata_file_ids, 5)
  105. test_len = cm.config['n_test']
  106. train_len = metadata_len - test_len
  107. print(f'\nMetadata contains {metadata_len} lines.')
  108. print(f'\nFiles will be stored under {cm.data_dir}')
  109. print(f' - all: {phonemized_metadata_path}')
  110. print(f' - {train_len} training lines: {train_metadata_path}')
  111. print(f' - {test_len} validation lines: {test_metadata_path}')
  112. print('\nMetadata samples:')
  113. for i in sample_items:
  114. print(f'{i}:{metadatareader.text_dict[i]}')
  115. summary_manager.add_text(f'{i}/text', text=metadatareader.text_dict[i])
  116. # run cleaner on raw text
  117. text_proc = TextToTokens.default(cm.config['phoneme_language'], add_start_end=False,
  118. with_stress=cm.config['with_stress'], model_breathing=cm.config['model_breathing'],
  119. njobs=1)
  120. def process_phonemes(file_id):
  121. text = metadatareader.text_dict[file_id]
  122. try:
  123. phon = text_proc.phonemizer(text)
  124. except Exception as e:
  125. print(f'{e}\nFile id {file_id}')
  126. raise BrokenPipeError
  127. return (file_id, phon)
  128. print('\nPHONEMIZING')
  129. phonemized_data = {}
  130. phon_iter = p_uimap(process_phonemes, metadata_file_ids)
  131. for (file_id, phonemes) in phon_iter:
  132. phonemized_data.update({file_id: phonemes})
  133. print('\nPhonemized metadata samples:')
  134. for i in sample_items:
  135. print(f'{i}:{phonemized_data[i]}')
  136. summary_manager.add_text(f'{i}/phonemes', text=phonemized_data[i])
  137. new_metadata = [f'{k}|{v}\n' for k, v in phonemized_data.items()]
  138. shuffled_metadata = np.random.permutation(new_metadata)
  139. train_metadata = shuffled_metadata[0:train_len]
  140. test_metadata = shuffled_metadata[-test_len:]
  141. with open(phonemized_metadata_path, 'w+', encoding='utf-8') as file:
  142. file.writelines(new_metadata)
  143. with open(train_metadata_path, 'w+', encoding='utf-8') as file:
  144. file.writelines(train_metadata)
  145. with open(test_metadata_path, 'w+', encoding='utf-8') as file:
  146. file.writelines(test_metadata)
  147. # some checks
  148. assert metadata_len == len(set(list(phonemized_data.keys()))), \
  149. f'Length of metadata ({metadata_len}) does not match the length of the phoneme array ({len(set(list(phonemized_data.keys())))}). Check for empty text lines in metadata.'
  150. assert len(train_metadata) + len(test_metadata) == metadata_len, \
  151. f'Train and/or validation lengths incorrect. ({len(train_metadata)} + {len(test_metadata)} != {metadata_len})'
  152. print('\nDone')
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...