Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#258 fix naming of private functions

Merged
Ofri Masad merged 1 commits into Deci-AI:master from deci-ai:feature/SG-185_rename_and_cleanup
@@ -90,29 +90,10 @@ class TestTrainer(unittest.TestCase):
             ckpt = torch.load(ckpt_path)
             ckpt = torch.load(ckpt_path)
             self.assertListEqual(['net', 'acc', 'epoch', 'optimizer_state_dict', 'scaler_state_dict'],
             self.assertListEqual(['net', 'acc', 'epoch', 'optimizer_state_dict', 'scaler_state_dict'],
                                  list(ckpt.keys()))
                                  list(ckpt.keys()))
-        model.save_checkpoint()
+        model._save_checkpoint()
         weights_only = torch.load(os.path.join(model.checkpoints_dir_path, 'ckpt_latest_weights_only.pth'))
         weights_only = torch.load(os.path.join(model.checkpoints_dir_path, 'ckpt_latest_weights_only.pth'))
         self.assertListEqual(['net'], list(weights_only.keys()))
         self.assertListEqual(['net'], list(weights_only.keys()))
 
 
-    def test_compute_model_runtime(self):
-        model = self.get_classification_trainer(self.folder_names[6])
-        model.compute_model_runtime()
-        model.compute_model_runtime(batch_sizes=1, input_dims=(3, 224, 224), verbose=False)
-        model.compute_model_runtime(batch_sizes=[1, 2, 3], verbose=True)
-        # VERIFY MODEL RETURNS TO PREVIOUS TRAINING MODE
-        model.net.train()
-        model.compute_model_runtime(batch_sizes=1, verbose=False)
-        assert model.net.training, 'MODEL WAS SET TO eval DURING compute_model_runtime, BUT DIDN\'t RETURN TO PREVIOUS'
-        model.net.eval()
-        model.compute_model_runtime(batch_sizes=1, verbose=False)
-        assert not model.net.training, 'MODEL WAS SET TO eval DURING compute_model_runtime, BUT RETURNED TO TRAINING'
-
-        # THESE SHOULD HANDLE THE EXCEPTION OF CUDA OUT OF MEMORY
-        if torch.cuda.is_available():
-            model._switch_device('cuda')
-            model.compute_model_runtime(batch_sizes=10000, verbose=False, input_dims=(3, 224, 224))
-            model.compute_model_runtime(batch_sizes=[10000, 10, 50, 100, 1000, 5000], verbose=True)
-
     def test_predict(self):
     def test_predict(self):
         model = self.get_classification_trainer(self.folder_names[6])
         model = self.get_classification_trainer(self.folder_names[6])
         inputs = torch.randn((5, 3, 32, 32))
         inputs = torch.randn((5, 3, 32, 32))
Discard
@@ -58,7 +58,7 @@ class StrictLoadEnumTest(unittest.TestCase):
         cls.sg_model.sg_logger = BaseSGLogger('project_name', 'load_checkpoint_test', 'local', resumed=False,
         cls.sg_model.sg_logger = BaseSGLogger('project_name', 'load_checkpoint_test', 'local', resumed=False,
                                               training_params=HpmStruct(max_epochs=10),
                                               training_params=HpmStruct(max_epochs=10),
                                               checkpoints_dir_path=cls.sg_model.checkpoints_dir_path)
                                               checkpoints_dir_path=cls.sg_model.checkpoints_dir_path)
-        cls.sg_model.save_checkpoint()
+        cls.sg_model._save_checkpoint()
 
 
     @classmethod
     @classmethod
     def tearDownClass(cls):
     def tearDownClass(cls):
Discard