Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

train.py 16 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
  1. import torch
  2. from torch import nn
  3. import argparse
  4. import os
  5. import pathlib
  6. import importlib
  7. import ssl
  8. import time
  9. import copy
  10. import sys
  11. from datasets import utils as ds_utils
  12. from networks import utils as nt_utils
  13. from runners import utils as rn_utils
  14. from logger import Logger
  15. class TrainingWrapper(object):
  16. @staticmethod
  17. def get_args(parser):
  18. # General options
  19. parser.add('--project_dir', default='.', type=str,
  20. help='root directory of the code')
  21. parser.add('--torch_home', default='', type=str,
  22. help='directory used for storage of the checkpoints')
  23. parser.add('--experiment_name', default='test', type=str,
  24. help='name of the experiment used for logging')
  25. parser.add('--dataloader_name', default='voxceleb2', type=str,
  26. help='name of the file in dataset directory which is used for data loading')
  27. parser.add('--dataset_name', default='voxceleb2_512px', type=str,
  28. help='name of the dataset in the data root folder')
  29. parser.add('--data_root', default=".", type=str,
  30. help='root directory of the data')
  31. parser.add('--debug', action='store_true',
  32. help='turn on the debug mode: fast epoch, useful for testing')
  33. parser.add('--runner_name', default='default', type=str,
  34. help='class that wraps the models and performs training and inference steps')
  35. parser.add('--no_disk_write_ops', action='store_true',
  36. help='avoid doing write operations to disk')
  37. parser.add('--redirect_print_to_file', action='store_true',
  38. help='redirect stdout and stderr to file')
  39. parser.add('--random_seed', default=0, type=int,
  40. help='used for initialization of pytorch and numpy seeds')
  41. # Initialization options
  42. parser.add('--init_experiment_dir', default='', type=str,
  43. help='directory of the experiment used for the initialization of the networks')
  44. parser.add('--init_networks', default='', type=str,
  45. help='list of networks to intialize')
  46. parser.add('--init_which_epoch', default='none', type=str,
  47. help='epoch to initialize from')
  48. parser.add('--which_epoch', default='none', type=str,
  49. help='epoch to continue training from')
  50. # Distributed options
  51. parser.add('--num_gpus', default=1, type=int,
  52. help='>1 enables DDP')
  53. # Training options
  54. parser.add('--num_epochs', default=1000, type=int,
  55. help='number of epochs for training')
  56. parser.add('--checkpoint_freq', default=25, type=int,
  57. help='frequency of checkpoints creation in epochs')
  58. parser.add('--test_freq', default=5, type=int,
  59. help='frequency of testing in epochs')
  60. parser.add('--batch_size', default=1, type=int,
  61. help='batch size across all GPUs')
  62. parser.add('--num_workers_per_process', default=20, type=int,
  63. help='number of workers used for data loading in each process')
  64. parser.add('--skip_test', action='store_true',
  65. help='do not perform testing')
  66. parser.add('--calc_stats', action='store_true',
  67. help='calculate batch norm standing stats')
  68. parser.add('--visual_freq', default=-1, type=int,
  69. help='in iterations, -1 -- output logs every epoch')
  70. # Mixed precision options
  71. parser.add('--use_half', action='store_true',
  72. help='enable half precision calculation')
  73. parser.add('--use_closure', action='store_true',
  74. help='use closure function during optimization (required by LBFGS)')
  75. parser.add('--use_apex', action='store_true',
  76. help='enable apex')
  77. parser.add('--amp_opt_level', default='O0', type=str,
  78. help='full/mixed/half precision, refer to apex.amp docs')
  79. parser.add('--amp_loss_scale', default='dynamic', type=str,
  80. help='fixed or dynamic loss scale')
  81. # Technical options that are set automatically
  82. parser.add('--local_rank', default=0, type=int)
  83. parser.add('--rank', default=0, type=int)
  84. parser.add('--world_size', default=1, type=int)
  85. parser.add('--train_size', default=1, type=int)
  86. # Dataset options
  87. args, _ = parser.parse_known_args()
  88. os.environ['TORCH_HOME'] = args.torch_home
  89. importlib.import_module(f'datasets.{args.dataloader_name}').DatasetWrapper.get_args(parser)
  90. # runner options
  91. importlib.import_module(f'runners.{args.runner_name}').RunnerWrapper.get_args(parser)
  92. return parser
  93. def __init__(self, args, runner=None):
  94. super(TrainingWrapper, self).__init__()
  95. # Initialize and apply general options
  96. ssl._create_default_https_context = ssl._create_unverified_context
  97. torch.backends.cudnn.benchmark = True
  98. torch.manual_seed(args.random_seed)
  99. torch.cuda.manual_seed_all(args.random_seed)
  100. # Set distributed training options
  101. if args.num_gpus > 1 and args.num_gpus <= 8:
  102. args.rank = args.local_rank
  103. args.world_size = args.num_gpus
  104. torch.cuda.set_device(args.local_rank)
  105. torch.distributed.init_process_group(backend='nccl', init_method='env://')
  106. elif args.num_gpus > 8:
  107. raise # Not supported
  108. # Prepare experiment directories and save options
  109. project_dir = pathlib.Path(args.project_dir)
  110. self.checkpoints_dir = project_dir / 'runs' / args.experiment_name / 'checkpoints'
  111. # Store options
  112. if not args.no_disk_write_ops:
  113. os.makedirs(self.checkpoints_dir, exist_ok=True)
  114. self.experiment_dir = project_dir / 'runs' / args.experiment_name
  115. if not args.no_disk_write_ops:
  116. # Redirect stdout
  117. if args.redirect_print_to_file:
  118. logs_dir = self.experiment_dir / 'logs'
  119. os.makedirs(logs_dir, exist_ok=True)
  120. sys.stdout = open(os.path.join(logs_dir, f'stdout_{args.rank}.txt'), 'w')
  121. sys.stderr = open(os.path.join(logs_dir, f'stderr_{args.rank}.txt'), 'w')
  122. if args.rank == 0:
  123. print(args)
  124. with open(self.experiment_dir / 'args.txt', 'wt') as args_file:
  125. for k, v in sorted(vars(args).items()):
  126. args_file.write('%s: %s\n' % (str(k), str(v)))
  127. # Initialize model
  128. self.runner = runner
  129. if self.runner is None:
  130. self.runner = importlib.import_module(f'runners.{args.runner_name}').RunnerWrapper(args)
  131. # Load pre-trained weights (if needed)
  132. init_networks = rn_utils.parse_str_to_list(args.init_networks) if args.init_networks else {}
  133. networks_to_train = self.runner.nets_names_to_train
  134. if args.init_which_epoch != 'none' and args.init_experiment_dir:
  135. for net_name in init_networks:
  136. self.runner.nets[net_name].load_state_dict(torch.load(pathlib.Path(args.init_experiment_dir) / 'checkpoints' / f'{args.init_which_epoch}_{net_name}.pth', map_location='cpu'))
  137. if args.which_epoch != 'none':
  138. for net_name in networks_to_train:
  139. if net_name not in init_networks:
  140. self.runner.nets[net_name].load_state_dict(torch.load(self.checkpoints_dir / f'{args.which_epoch}_{net_name}.pth', map_location='cpu'))
  141. if args.num_gpus > 0:
  142. self.runner.cuda()
  143. if args.rank == 0:
  144. print(self.runner)
  145. def train(self, args):
  146. # Reset amp
  147. if args.use_apex:
  148. from apex import amp
  149. amp.init(False)
  150. # Get dataloaders
  151. train_dataloader = ds_utils.get_dataloader(args, 'train')
  152. if not args.skip_test:
  153. test_dataloader = ds_utils.get_dataloader(args, 'test')
  154. model = runner = self.runner
  155. if args.use_half:
  156. runner.half()
  157. # Initialize optimizers, schedulers and apex
  158. opts = runner.get_optimizers(args)
  159. # Load pre-trained params for optimizers and schedulers (if needed)
  160. if args.which_epoch != 'none' and not args.init_experiment_dir:
  161. for net_name, opt in opts.items():
  162. opt.load_state_dict(torch.load(self.checkpoints_dir / f'{args.which_epoch}_opt_{net_name}.pth', map_location='cpu'))
  163. if args.use_apex and args.num_gpus > 0 and args.num_gpus <= 8:
  164. # Enfornce apex mixed precision settings
  165. nets_list, opts_list = [], []
  166. for net_name in sorted(opts.keys()):
  167. nets_list.append(runner.nets[net_name])
  168. opts_list.append(opts[net_name])
  169. loss_scale = float(args.amp_loss_scale) if args.amp_loss_scale != 'dynamic' else args.amp_loss_scale
  170. nets_list, opts_list = amp.initialize(nets_list, opts_list, opt_level=args.amp_opt_level, num_losses=1, loss_scale=loss_scale)
  171. # Unpack opts_list into optimizers
  172. for net_name, net, opt in zip(sorted(opts.keys()), nets_list, opts_list):
  173. runner.nets[net_name] = net
  174. opts[net_name] = opt
  175. if args.which_epoch != 'none' and not args.init_experiment_dir and os.path.exists(self.checkpoints_dir / f'{args.which_epoch}_amp.pth'):
  176. amp.load_state_dict(torch.load(self.checkpoints_dir / f'{args.which_epoch}_amp.pth', map_location='cpu'))
  177. # Initialize apex distributed data parallel wrapper
  178. if args.num_gpus > 1 and args.num_gpus <= 8:
  179. from apex import parallel
  180. model = parallel.DistributedDataParallel(runner, delay_allreduce=True)
  181. epoch_start = 1 if args.which_epoch == 'none' else int(args.which_epoch) + 1
  182. # Initialize logging
  183. train_iter = epoch_start - 1
  184. if args.visual_freq != -1:
  185. train_iter /= args.visual_freq
  186. logger = Logger(args, self.experiment_dir)
  187. logger.set_num_iter(
  188. train_iter=train_iter,
  189. test_iter=(epoch_start - 1) // args.test_freq)
  190. if args.debug and not args.use_apex:
  191. torch.autograd.set_detect_anomaly(True)
  192. total_iters = 1
  193. for epoch in range(epoch_start, args.num_epochs + 1):
  194. if args.rank == 0:
  195. print('epoch %d' % epoch)
  196. # Train for one epoch
  197. model.train()
  198. time_start = time.time()
  199. # Shuffle the dataset before the epoch
  200. train_dataloader.dataset.shuffle()
  201. for i, data_dict in enumerate(train_dataloader, 1):
  202. # Prepare input data
  203. if args.num_gpus > 0 and args.num_gpus > 0:
  204. for key, value in data_dict.items():
  205. data_dict[key] = value.cuda()
  206. # Convert inputs to FP16
  207. if args.use_half:
  208. for key, value in data_dict.items():
  209. data_dict[key] = value.half()
  210. output_logs = i == len(train_dataloader)
  211. if args.visual_freq != -1:
  212. output_logs = not (total_iters % args.visual_freq)
  213. output_visuals = output_logs and not args.no_disk_write_ops
  214. # Accumulate list of optimizers that will perform opt step
  215. for opt in opts.values():
  216. opt.zero_grad()
  217. # Perform a forward pass
  218. if not args.use_closure:
  219. loss = model(data_dict)
  220. closure = None
  221. if args.use_apex and args.num_gpus > 0 and args.num_gpus <= 8:
  222. # Mixed precision requires a special wrapper for the loss
  223. with amp.scale_loss(loss, opts.values()) as scaled_loss:
  224. scaled_loss.backward()
  225. elif not args.use_closure:
  226. loss.backward()
  227. else:
  228. def closure():
  229. loss = model(data_dict)
  230. loss.backward()
  231. return loss
  232. # Perform steps for all optimizers
  233. for opt in opts.values():
  234. opt.step(closure)
  235. if output_logs:
  236. logger.output_logs('train', runner.output_visuals(), runner.output_losses(), time.time() - time_start)
  237. if args.debug:
  238. break
  239. if args.visual_freq != -1:
  240. total_iters += 1
  241. total_iters %= args.visual_freq
  242. # Increment the epoch counter in the training dataset
  243. train_dataloader.dataset.epoch += 1
  244. # If testing is not required -- continue
  245. if epoch % args.test_freq:
  246. continue
  247. # If skip test flag is set -- only check if a checkpoint if required
  248. if not args.skip_test:
  249. # Calculate "standing" stats for the batch normalization
  250. if args.calc_stats:
  251. runner.calculate_batchnorm_stats(train_dataloader, args.debug)
  252. # Test
  253. time_start = time.time()
  254. model.eval()
  255. for data_dict in test_dataloader:
  256. # Prepare input data
  257. if args.num_gpus > 0:
  258. for key, value in data_dict.items():
  259. data_dict[key] = value.cuda()
  260. # Forward pass
  261. with torch.no_grad():
  262. model(data_dict)
  263. if args.debug:
  264. break
  265. # Output logs
  266. logger.output_logs('test', runner.output_visuals(), runner.output_losses(), time.time() - time_start)
  267. # If creation of checkpoint is not required -- continue
  268. if epoch % args.checkpoint_freq and not args.debug:
  269. continue
  270. # Create or load a checkpoint
  271. if args.rank == 0 and not args.no_disk_write_ops:
  272. with torch.no_grad():
  273. for net_name in runner.nets_names_to_train:
  274. # Save a network
  275. torch.save(runner.nets[net_name].state_dict(), self.checkpoints_dir / f'{epoch}_{net_name}.pth')
  276. # Save an optimizer
  277. torch.save(opts[net_name].state_dict(), self.checkpoints_dir / f'{epoch}_opt_{net_name}.pth')
  278. # Save amp
  279. if args.use_apex:
  280. torch.save(amp.state_dict(), self.checkpoints_dir / f'{epoch}_amp.pth')
  281. return runner
  282. if __name__ == "__main__":
  283. ## Parse options ##
  284. parser = argparse.ArgumentParser(conflict_handler='resolve')
  285. parser.add = parser.add_argument
  286. TrainingWrapper.get_args(parser)
  287. args, _ = parser.parse_known_args()
  288. ## Initialize the model ##
  289. m = TrainingWrapper(args)
  290. ## Perform training ##
  291. nets = m.train(args)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...