1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
|
- import unittest
- import torch
- from super_gradients import Trainer
- from super_gradients.training import models
- from super_gradients.training.dataloaders.dataloaders import classification_test_dataloader
- from super_gradients.training.metrics import Accuracy
- class CallTrainTwiceTest(unittest.TestCase):
- """
- CallTrainTwiceTest
- Purpose is to call train twice and see nothing crashes. Should be ran with available GPUs (when possible)
- so when calling train again we see there's no change in the model's device.
- """
- def test_call_train_twice(self):
- trainer = Trainer("external_criterion_test")
- dataloader = classification_test_dataloader(batch_size=10)
- model = models.get("resnet18", num_classes=5)
- train_params = {
- "max_epochs": 2,
- "lr_updates": [1],
- "lr_decay_factor": 0.1,
- "lr_mode": "step",
- "lr_warmup_epochs": 0,
- "initial_lr": 0.1,
- "loss": torch.nn.CrossEntropyLoss(),
- "optimizer": "SGD",
- "criterion_params": {},
- "optimizer_params": {"weight_decay": 1e-4, "momentum": 0.9},
- "train_metrics_list": [Accuracy()],
- "valid_metrics_list": [Accuracy()],
- "metric_to_watch": "Accuracy",
- "greater_metric_to_watch_is_better": True,
- }
- trainer.train(model=model, training_params=train_params, train_loader=dataloader, valid_loader=dataloader)
- trainer.train(model=model, training_params=train_params, train_loader=dataloader, valid_loader=dataloader)
- if __name__ == "__main__":
- unittest.main()
|