Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

cifar10_external_optimizer_example.py 2.4 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
  1. """
  2. Main purpose is to demonstrate the use of initialized optimizers and lr_Schedulers in training.
  3. """
  4. import super_gradients
  5. from super_gradients import SgModel
  6. from super_gradients.training.datasets.dataset_interfaces.dataset_interface import Cifar10DatasetInterface
  7. from super_gradients.training.metrics.classification_metrics import Accuracy, Top5
  8. from super_gradients.training import MultiGPUMode
  9. from torch.optim import ASGD
  10. from torch.optim.lr_scheduler import MultiStepLR, ReduceLROnPlateau
  11. from super_gradients.training.models import ResNet18Cifar
  12. from super_gradients.training.utils.callbacks import Phase, LRSchedulerCallback
  13. from super_gradients.training import utils
  14. super_gradients.init_trainer()
  15. lr = 2.5e-4
  16. net = ResNet18Cifar(arch_params=utils.HpmStruct(**{"num_classes": 10}))
  17. # Define optimizer and schedulers
  18. optimizer = ASGD(net.parameters(), lr=lr, weight_decay=0.0001)
  19. rop_lr_scheduler = ReduceLROnPlateau(optimizer, mode="max", patience=10, verbose=True)
  20. step_lr_scheduler = MultiStepLR(optimizer, milestones=[0, 150, 200], gamma=0.1)
  21. # Learning rate will be decreased after validation accuracy did not increase for 10 epochs, or at the specified
  22. # milestones. Notice how the callback for reduce on plateau scheduler is been called on Phase.VALIDATION_EPOCH_END
  23. # which causes it to take the accuracy value from the validation metrics.
  24. phase_callbacks = [LRSchedulerCallback(scheduler=rop_lr_scheduler, phase=Phase.VALIDATION_EPOCH_END, metric_name="Accuracy"),
  25. LRSchedulerCallback(scheduler=step_lr_scheduler, phase=Phase.TRAIN_EPOCH_END)]
  26. # Define Model
  27. model = SgModel("Cifar10_external_objects_example", multi_gpu=MultiGPUMode.OFF)
  28. # Connect Dataset
  29. dataset = Cifar10DatasetInterface(dataset_params={"batch_size": 64})
  30. model.connect_dataset_interface(dataset, data_loader_num_workers=8)
  31. # Build Model
  32. model.build_model(net, load_checkpoint=False)
  33. train_params = {"max_epochs": 300,
  34. "phase_callbacks": phase_callbacks,
  35. "initial_lr": lr,
  36. "loss": "cross_entropy",
  37. "criterion_params": {},
  38. 'optimizer': optimizer,
  39. "train_metrics_list": [Accuracy(), Top5()],
  40. "valid_metrics_list": [Accuracy(), Top5()],
  41. "loss_logging_items_names": ["Loss"], "metric_to_watch": "Accuracy",
  42. "greater_metric_to_watch_is_better": True,
  43. "lr_scheduler_step_type": "epoch"}
  44. model.train(training_params=train_params)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...