Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

train.py 5.7 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  1. from meldataset import build_dataloader, build_dataloaderHF
  2. from optimizers import build_optimizer
  3. from utils import *
  4. from models import build_model
  5. from trainer import Trainer
  6. import os
  7. import os.path as osp
  8. import re
  9. import sys
  10. import yaml
  11. import shutil
  12. import numpy as np
  13. import torch
  14. from torch.utils.tensorboard import SummaryWriter
  15. import click
  16. import logging
  17. from logging import StreamHandler
  18. logger = logging.getLogger(__name__)
  19. logger.setLevel(logging.DEBUG)
  20. handler = StreamHandler()
  21. handler.setLevel(logging.DEBUG)
  22. logger.addHandler(handler)
  23. torch.backends.cudnn.benchmark = True
  24. @click.command()
  25. @click.option('-p', '--config_path', default='./Configs/config.yml', type=str)
  26. def main(config_path):
  27. config = yaml.safe_load(open(config_path))
  28. log_dir = config['log_dir']
  29. if not osp.exists(log_dir): os.mkdir(log_dir)
  30. shutil.copy(config_path, osp.join(log_dir, osp.basename(config_path)))
  31. writer = SummaryWriter(log_dir + "/tensorboard")
  32. # write logs
  33. file_handler = logging.FileHandler(osp.join(log_dir, 'train.log'))
  34. file_handler.setLevel(logging.DEBUG)
  35. file_handler.setFormatter(logging.Formatter('%(levelname)s:%(asctime)s: %(message)s'))
  36. logger.addHandler(file_handler)
  37. batch_size = config.get('batch_size', 10)
  38. device = config.get('device', 'cpu')
  39. epochs = config.get('epochs', 1000)
  40. save_freq = config.get('save_freq', 20)
  41. train_path = config.get('train_data', None)
  42. val_path = config.get('val_data', None)
  43. print(config.get('preprocess_params', {}))
  44. print(config.get('HF', {}))
  45. HF_config = config.get('HF', {})
  46. use_HF = HF_config["use"]
  47. HF_name = HF_config["name"]
  48. HF_train_split = HF_config["train_split"]
  49. HF_val_split = HF_config["val_split"]
  50. audio_column = HF_config["audio_column"]
  51. phoneme_column = HF_config["phoneme_column"]
  52. speaker_id_column = HF_config["speaker_id_column"]
  53. if not use_HF:
  54. train_list, val_list = get_data_path_list(train_path, val_path)
  55. train_dataloader = build_dataloader(train_list,
  56. batch_size=batch_size,
  57. dataset_config=config.get('preprocess_params', {}),
  58. device=device)
  59. val_dataloader = build_dataloader(val_list,
  60. batch_size=batch_size,
  61. validation=True,
  62. device=device,
  63. dataset_config=config.get('preprocess_params', {}))
  64. else:
  65. train_dataloader = build_dataloaderHF(name=HF_name,
  66. split=HF_train_split,
  67. audio_column=audio_column,
  68. text_column=phoneme_column,
  69. speaker_column=speaker_id_column,
  70. batch_size=batch_size,
  71. dataset_config=config.get('preprocess_params', {}),
  72. device=device)
  73. val_dataloader = build_dataloaderHF(name=HF_name,
  74. split=HF_val_split,
  75. audio_column=audio_column,
  76. text_column=phoneme_column,
  77. speaker_column=speaker_id_column,
  78. batch_size=batch_size,
  79. dataset_config=config.get('preprocess_params', {}),
  80. device=device,
  81. validation=True)
  82. model = build_model(model_params=config['model_params'] or {})
  83. scheduler_params = {
  84. "max_lr": float(config['optimizer_params'].get('lr', 5e-4)),
  85. "pct_start": float(config['optimizer_params'].get('pct_start', 0.0)),
  86. "epochs": epochs,
  87. "steps_per_epoch": len(train_dataloader),
  88. }
  89. model.to(device)
  90. optimizer, scheduler = build_optimizer(
  91. {"params": model.parameters(), "optimizer_params": {}, "scheduler_params": scheduler_params})
  92. blank_index = train_dataloader.dataset.text_cleaner.word_index_dictionary[" "] # get blank index
  93. criterion = build_criterion(critic_params={
  94. 'ctc': {'blank': blank_index},
  95. })
  96. trainer = Trainer(model=model,
  97. criterion=criterion,
  98. optimizer=optimizer,
  99. scheduler=scheduler,
  100. device=device,
  101. train_dataloader=train_dataloader,
  102. val_dataloader=val_dataloader,
  103. logger=logger)
  104. if config.get('pretrained_model', '') != '':
  105. trainer.load_checkpoint(config['pretrained_model'],
  106. load_only_params=config.get('load_only_params', True))
  107. for epoch in range(1, epochs + 1):
  108. train_results = trainer._train_epoch()
  109. eval_results = trainer._eval_epoch()
  110. results = train_results.copy()
  111. results.update(eval_results)
  112. logger.info('--- epoch %d ---' % epoch)
  113. for key, value in results.items():
  114. if isinstance(value, float):
  115. logger.info('%-15s: %.4f' % (key, value))
  116. writer.add_scalar(key, value, epoch)
  117. else:
  118. for v in value:
  119. writer.add_figure('eval_attn', plot_image(v), epoch)
  120. if (epoch % save_freq) == 0:
  121. trainer.save_checkpoint(osp.join(log_dir, 'epoch_%05d.pth' % epoch))
  122. return 0
  123. if __name__ == "__main__":
  124. main()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...