Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

callbacks.py 17 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
  1. from enum import Enum
  2. import math
  3. from super_gradients.training.utils.utils import get_filename_suffix_by_framework
  4. import torch
  5. import numpy as np
  6. import onnxruntime
  7. import onnx
  8. import os
  9. from super_gradients.common.abstractions.abstract_logger import get_logger
  10. import getpass
  11. import copy
  12. logger = get_logger(__name__)
  13. try:
  14. from deci_lab_client.client import DeciPlatformClient
  15. from deci_lab_client.models import ModelBenchmarkState
  16. _imported_deci_lab_failiure = None
  17. except (ImportError, NameError, ModuleNotFoundError) as import_err:
  18. logger.warn('Failed to import deci_lab_client')
  19. _imported_deci_lab_failiure = import_err
  20. class Phase(Enum):
  21. PRE_TRAINING = "PRE_TRAINING"
  22. TRAIN_BATCH_END = "TRAIN_BATCH_END"
  23. TRAIN_BATCH_STEP = "TRAIN_BATCH_STEP"
  24. TRAIN_EPOCH_START = "TRAIN_EPOCH_START"
  25. TRAIN_EPOCH_END = "TRAIN_EPOCH_END"
  26. VALIDATION_BATCH_END = "VALIDATION_BATCH_END"
  27. VALIDATION_EPOCH_END = "VALIDATION_EPOCH_END"
  28. TEST_BATCH_END = "TEST_BATCH_END"
  29. TEST_END = "TEST_END"
  30. POST_TRAINING = "POST_TRAINING"
  31. class PhaseContext:
  32. """
  33. Represents the input for phase callbacks, and is constantly updated after callback calls.
  34. """
  35. def __init__(self, epoch=None, batch_idx=None, optimizer=None, metrics_dict=None, inputs=None, preds=None,
  36. target=None, metrics_compute_fn=None, loss_avg_meter=None, loss_log_items=None, criterion=None,
  37. device=None, experiment_name=None, ckpt_dir=None, net=None, lr_warmup_epochs=None, sg_logger=None):
  38. self.epoch = epoch
  39. self.batch_idx = batch_idx
  40. self.optimizer = optimizer
  41. self.inputs = inputs
  42. self.preds = preds
  43. self.target = target
  44. self.metrics_dict = metrics_dict
  45. self.metrics_compute_fn = metrics_compute_fn
  46. self.loss_avg_meter = loss_avg_meter
  47. self.loss_log_items = loss_log_items
  48. self.criterion = criterion
  49. self.device = device
  50. self.stop_training = False
  51. self.experiment_name = experiment_name
  52. self.ckpt_dir = ckpt_dir
  53. self.net = net
  54. self.lr_warmup_epochs = lr_warmup_epochs
  55. self.sg_logger = sg_logger
  56. def update_context(self, **kwargs):
  57. for attr, attr_val in kwargs.items():
  58. setattr(self, attr, attr_val)
  59. class PhaseCallback:
  60. def __init__(self, phase: Phase):
  61. self.phase = phase
  62. def __call__(self, *args, **kwargs):
  63. raise NotImplementedError
  64. def __repr__(self):
  65. return self.__class__.__name__
  66. class ModelConversionCheckCallback(PhaseCallback):
  67. """
  68. Pre-training callback that verifies model conversion to onnx given specified conversion parameters.
  69. The model is converted, then inference is applied with onnx runtime.
  70. Use this callback wit hthe same args as DeciPlatformCallback to prevent conversion fails at the end of training.
  71. Attributes:
  72. model_meta_data: (ModelMetadata) model's meta-data object.
  73. The following parameters may be passed as kwargs in order to control the conversion to onnx:
  74. :param opset_version (default=11)
  75. :param do_constant_folding (default=True)
  76. :param dynamic_axes (default=
  77. {'input': {0: 'batch_size'},
  78. # Variable length axes
  79. 'output': {0: 'batch_size'}}
  80. )
  81. :param input_names (default=["input"])
  82. :param output_names (default=["output"])
  83. """
  84. def __init__(self, model_meta_data, **kwargs):
  85. super(ModelConversionCheckCallback, self).__init__(phase=Phase.PRE_TRAINING)
  86. self.model_meta_data = model_meta_data
  87. self.opset_version = kwargs.get('opset_version') or 10
  88. self.do_constant_folding = kwargs.get('do_constant_folding', None) if kwargs.get('do_constant_folding',
  89. None) else True
  90. self.input_names = kwargs.get('input_names') or ['input']
  91. self.output_names = kwargs.get('output_names') or ['output']
  92. self.dynamic_axes = kwargs.get('dynamic_axes') or {'input': {0: 'batch_size'},
  93. 'output': {0: 'batch_size'}}
  94. def __call__(self, context: PhaseContext):
  95. model = copy.deepcopy(context.net.module)
  96. model = model.cpu()
  97. x = torch.randn(self.model_meta_data.primary_batch_size, *self.model_meta_data.input_dimensions,
  98. requires_grad=False)
  99. tmp_model_path = os.path.join(context.ckpt_dir, self.model_meta_data.name + '_tmp.onnx')
  100. model.eval() # Put model into eval mode
  101. with torch.no_grad():
  102. torch_out = model(x)
  103. torch.onnx.export(model, # Model being run
  104. x, # Model input (or a tuple for multiple inputs)
  105. tmp_model_path, # Where to save the model (can be a file or file-like object)
  106. export_params=True, # Store the trained parameter weights inside the model file
  107. opset_version=self.opset_version,
  108. do_constant_folding=self.do_constant_folding,
  109. input_names=self.input_names,
  110. output_names=self.output_names,
  111. dynamic_axes=self.dynamic_axes)
  112. onnx_model = onnx.load(tmp_model_path)
  113. onnx.checker.check_model(onnx_model)
  114. ort_session = onnxruntime.InferenceSession(tmp_model_path)
  115. # compute ONNX Runtime output prediction
  116. ort_inputs = {ort_session.get_inputs()[0].name: x.cpu().numpy()}
  117. ort_outs = ort_session.run(None, ort_inputs)
  118. # compare ONNX Runtime and PyTorch results
  119. np.testing.assert_allclose(torch_out.cpu().numpy(), ort_outs[0], rtol=1e-03, atol=1e-05)
  120. os.remove(tmp_model_path)
  121. logger.info("Exported model has been tested with ONNXRuntime, and the result looks good!")
  122. class DeciLabUploadCallback(PhaseCallback):
  123. """
  124. Post-training callback for uploading and optimizing a model.
  125. Attributes:
  126. email: (str) username for Deci platform.
  127. model_meta_data: (ModelMetadata) model's meta-data object.
  128. optimization_request_form: (dict) optimization request form object.
  129. password: (str) default=None, should only be used for testing.
  130. ckpt_name: (str) default="ckpt_best" refers to the filename of the checkpoint, inside the checkpoint directory.
  131. The following parameters may be passed as kwargs in order to control the conversion to onnx:
  132. :param opset_version
  133. :param do_constant_folding
  134. :param dynamic_axes
  135. :param input_names
  136. :param output_names
  137. """
  138. def __init__(self, email, model_meta_data, optimization_request_form, password=None, ckpt_name="ckpt_best.pth",
  139. **kwargs):
  140. super().__init__(phase=Phase.POST_TRAINING)
  141. if _imported_deci_lab_failiure is not None:
  142. raise _imported_deci_lab_failiure
  143. self.model_meta_data = model_meta_data
  144. self.optimization_request_form = optimization_request_form
  145. self.conversion_kwargs = kwargs
  146. self.ckpt_name = ckpt_name
  147. self.platform_client = DeciPlatformClient('api.deci.ai', 443, https=True)
  148. password = password or getpass.getpass()
  149. self.platform_client.login(email, password)
  150. def __call__(self, context: PhaseContext):
  151. try:
  152. model = copy.deepcopy(context.net)
  153. model_state_dict_path = os.path.join(context.ckpt_dir, self.ckpt_name)['net']
  154. model.load_state_dict(model_state_dict_path)
  155. self.platform_client.add_model(self.model_meta_data,
  156. local_loaded_model=model.module.cpu(),
  157. optimization_request=self.optimization_request_form,
  158. **self.conversion_kwargs)
  159. new_model_from_repo_name = self.model_meta_data.name + '_1_1'
  160. finished = False
  161. while not finished:
  162. your_model_from_repo = self.platform_client.get_model_by_name(name=new_model_from_repo_name).data
  163. if your_model_from_repo.benchmark_state not in [ModelBenchmarkState.IN_PROGRESS,
  164. ModelBenchmarkState.PENDING]:
  165. finished = True
  166. logger.info('successfully added ' + str(your_model_from_repo.name) + ' to model repository')
  167. filename_ext = get_filename_suffix_by_framework(self.model_meta_data.framework)
  168. download_path = os.path.join(context.ckpt_dir, new_model_from_repo_name + '_optimized' + filename_ext)
  169. self.platform_client.download_model(your_model_from_repo.model_id, download_to_path=download_path)
  170. except Exception as ex:
  171. logger.error(ex)
  172. class LRCallbackBase(PhaseCallback):
  173. """
  174. Base class for hard coded learning rate scheduling regimes, implemented as callbacks.
  175. """
  176. def __init__(self, phase, initial_lr, update_param_groups, train_loader_len, net, training_params, **kwargs):
  177. super(LRCallbackBase, self).__init__(phase)
  178. self.initial_lr = initial_lr
  179. self.lr = initial_lr
  180. self.update_param_groups = update_param_groups
  181. self.train_loader_len = train_loader_len
  182. self.net = net
  183. self.training_params = training_params
  184. def __call__(self, *args, **kwargs):
  185. raise NotImplementedError
  186. def update_lr(self, optimizer, epoch, batch_idx=None):
  187. if self.update_param_groups:
  188. param_groups = self.net.module.update_param_groups(optimizer.param_groups, self.lr, epoch, batch_idx,
  189. self.training_params, self.train_loader_len)
  190. optimizer.param_groups = param_groups
  191. else:
  192. # UPDATE THE OPTIMIZERS PARAMETER
  193. for param_group in optimizer.param_groups:
  194. param_group['lr'] = self.lr
  195. class WarmupLRCallback(LRCallbackBase):
  196. """
  197. LR scheduling callback for lr warmup.
  198. At each update
  199. """
  200. def __init__(self, **kwargs):
  201. super(WarmupLRCallback, self).__init__(Phase.TRAIN_EPOCH_START, **kwargs)
  202. def __call__(self, context: PhaseContext):
  203. if self.training_params.lr_warmup_epochs >= context.epoch:
  204. self.lr = self.initial_lr * (context.epoch + 1) / (self.training_params.lr_warmup_epochs + 1)
  205. self.update_lr(context.optimizer, context.epoch, None)
  206. class StepLRCallback(LRCallbackBase):
  207. """
  208. Hard coded step learning rate scheduling (i.e at specific milestones).
  209. """
  210. def __init__(self, lr_updates, lr_decay_factor, **kwargs):
  211. super(StepLRCallback, self).__init__(Phase.TRAIN_EPOCH_END, **kwargs)
  212. self.lr_updates = lr_updates
  213. self.lr_decay_factor = lr_decay_factor
  214. def __call__(self, context: PhaseContext):
  215. if self.training_params.lr_warmup_epochs <= context.epoch:
  216. num_updates_passed = [x for x in self.lr_updates if x <= context.epoch]
  217. self.lr = self.initial_lr * self.lr_decay_factor ** len(num_updates_passed)
  218. self.update_lr(context.optimizer, context.epoch, None)
  219. class PolyLRCallback(LRCallbackBase):
  220. """
  221. Hard coded polynomial decay learning rate scheduling (i.e at specific milestones).
  222. """
  223. def __init__(self, max_epochs, **kwargs):
  224. super(PolyLRCallback, self).__init__(Phase.TRAIN_BATCH_STEP, **kwargs)
  225. self.max_epochs = max_epochs
  226. def __call__(self, context: PhaseContext):
  227. # POLYNOMIAL LEARNING RATE
  228. if self.training_params.lr_warmup_epochs <= context.epoch:
  229. effective_epoch = context.epoch - self.training_params.lr_warmup_epochs
  230. effective_max_epochs = self.max_epochs - self.training_params.lr_warmup_epochs
  231. current_iter = self.train_loader_len * effective_epoch + context.batch_idx
  232. max_iter = self.train_loader_len * effective_max_epochs
  233. self.lr = self.initial_lr * pow((1.0 - (current_iter / max_iter)), 0.9)
  234. self.update_lr(context.optimizer, context.epoch, context.batch_idx)
  235. class CosineLRCallback(LRCallbackBase):
  236. """
  237. Hard coded step Cosine anealing learning rate scheduling.
  238. """
  239. def __init__(self, max_epochs, cosine_final_lr_ratio, **kwargs):
  240. super(CosineLRCallback, self).__init__(Phase.TRAIN_BATCH_STEP, **kwargs)
  241. self.max_epochs = max_epochs
  242. self.cosine_final_lr_ratio = cosine_final_lr_ratio
  243. def __call__(self, context: PhaseContext):
  244. # COSINE LEARNING RATE
  245. if self.training_params.lr_warmup_epochs <= context.epoch:
  246. effective_epoch = context.epoch - self.training_params.lr_warmup_epochs
  247. effective_max_epochs = self.max_epochs - self.training_params.lr_warmup_epochs
  248. current_iter = self.train_loader_len * effective_epoch + context.batch_idx
  249. max_iter = self.train_loader_len * effective_max_epochs
  250. lr = 0.5 * self.initial_lr * (1.0 + math.cos(current_iter / (max_iter + 1) * math.pi))
  251. # the cosine starts from initial_lr and reaches initial_lr * cosine_final_lr_ratio in last epoch
  252. self.lr = lr * (1 - self.cosine_final_lr_ratio) + (self.initial_lr * self.cosine_final_lr_ratio)
  253. self.update_lr(context.optimizer, context.epoch, context.batch_idx)
  254. class FunctionLRCallback(LRCallbackBase):
  255. """
  256. Hard coded rate scheduling for user defined lr scheduling function.
  257. """
  258. def __init__(self, max_epochs, lr_schedule_function, **kwargs):
  259. super(FunctionLRCallback, self).__init__(Phase.TRAIN_BATCH_STEP, **kwargs)
  260. assert callable(self.lr_schedule_function), 'self.lr_function must be callable'
  261. self.lr_schedule_function = lr_schedule_function
  262. self.max_epochs = max_epochs
  263. def __call__(self, context: PhaseContext):
  264. if self.training_params.lr_warmup_epochs <= context.epoch:
  265. effective_epoch = context.epoch - self.training_params.lr_warmup_epochs
  266. effective_max_epochs = self.max_epochs - self.training_params.lr_warmup_epochs
  267. self.lr = self.lr_schedule_function(initial_lr=self.initial_lr, epoch=effective_epoch, iter=context.batch_idx,
  268. max_epoch=effective_max_epochs,
  269. iters_per_epoch=self.train_loader_len)
  270. self.update_lr(context.optimizer, context.epoch, context.batch_idx)
  271. class IllegalLRSchedulerMetric(Exception):
  272. """Exception raised illegal combination of training parameters.
  273. Attributes:
  274. message -- explanation of the error
  275. """
  276. def __init__(self, metric_name, metrics_dict):
  277. self.message = "Illegal metric name: " + metric_name + ". Expected one of metics_dics keys: " + str(
  278. metrics_dict.keys())
  279. super().__init__(self.message)
  280. class LRSchedulerCallback(PhaseCallback):
  281. """
  282. Learning rate scheduler callback.
  283. Attributes:
  284. scheduler: torch.optim._LRScheduler, the learning rate scheduler to be called step() with.
  285. metric_name: str, (default=None) the metric name for ReduceLROnPlateau learning rate scheduler.
  286. When passing __call__ a metrics_dict, with a key=self.metric_name, the value of that metric will monitored
  287. for ReduceLROnPlateau (i.e step(metrics_dict[self.metric_name]).
  288. """
  289. def __init__(self, scheduler, phase, metric_name=None):
  290. super(LRSchedulerCallback, self).__init__(phase)
  291. self.scheduler = scheduler
  292. self.metric_name = metric_name
  293. def __call__(self, context: PhaseContext):
  294. if context.lr_warmup_epochs <= context.epoch:
  295. if self.metric_name and self.metric_name in context.metrics_dict.keys():
  296. self.scheduler.step(context.metrics_dict[self.metric_name])
  297. elif self.metric_name is None:
  298. self.scheduler.step()
  299. else:
  300. raise IllegalLRSchedulerMetric(self.metric_name, context.metrics_dict)
  301. def __repr__(self):
  302. return "LRSchedulerCallback: " + repr(self.scheduler)
  303. class MetricsUpdateCallback(PhaseCallback):
  304. def __init__(self, phase: Phase):
  305. super(MetricsUpdateCallback, self).__init__(phase)
  306. def __call__(self, context: PhaseContext):
  307. context.metrics_compute_fn.update(**context.__dict__)
  308. if context.criterion is not None:
  309. context.loss_avg_meter.update(context.loss_log_items, len(context.inputs))
  310. class PhaseContextTestCallback(PhaseCallback):
  311. """
  312. A callback that saves the phase context the for testing.
  313. """
  314. def __init__(self, phase: Phase):
  315. super(PhaseContextTestCallback, self).__init__(phase)
  316. self.context = None
  317. def __call__(self, context: PhaseContext):
  318. self.context = context
  319. class CallbackHandler:
  320. """
  321. Runs all callbacks who's phase attribute equals to the given phase.
  322. Attributes:
  323. callbacks: List[PhaseCallback]. Callbacks to be run.
  324. """
  325. def __init__(self, callbacks):
  326. self.callbacks = callbacks
  327. def __call__(self, phase: Phase, context: PhaseContext):
  328. for callback in self.callbacks:
  329. if callback.phase == phase:
  330. callback(context)
  331. # DICT FOR LEGACY LR HARD-CODED REGIMES, WILL BE DELETED IN THE FUTURE
  332. LR_SCHEDULERS_CLS_DICT = {"step": StepLRCallback,
  333. "poly": PolyLRCallback,
  334. "cosine": CosineLRCallback,
  335. "function": FunctionLRCallback
  336. }
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...