Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#739 Feature/sg 576 add regression tests

Merged
Ghost merged 1 commits into Deci-AI:master from deci-ai:feature/SG-576-add-regression-tests
Some lines were truncated since they exceed the maximum allowed length of 500, please use a local Git client to see the full diff.
@@ -570,6 +570,37 @@ jobs:
           command: "rm -r << parameters.sg_new_env_name >>"
           command: "rm -r << parameters.sg_new_env_name >>"
           when: on_fail
           when: on_fail
 
 
+  recipe_sanity_tests_pose_estimation:
+    machine: true
+    resource_class: deci-ai/sg-gpu-on-premise
+    parameters:
+      sg_existing_env_path:
+        type: string
+        default: "/env/persistent_env"
+      sg_new_env_name:
+        type: string
+        default: "${CIRCLE_BUILD_NUM}"
+      sg_new_env_python_version:
+        type: string
+        default: "python3.8"
+    steps:
+      - checkout
+      - run:
+          name: install requirements and run pose estimation sanity tests
+          command: |
+            << parameters.sg_new_env_python_version >> -m venv << parameters.sg_new_env_name >>
+            source << parameters.sg_new_env_name >>/bin/activate
+            python3.8 -m pip install --upgrade setuptools pip wheel
+            python3.8 -m pip install -r requirements.txt
+            python3.8 -m pip install .
+            python3.8 -m pip install torch==1.12.0+cu116 torchvision==0.13.0+cu116 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu116
+            python3.8 src/super_gradients/examples/train_from_recipe_example/train_from_recipe.py --config-name=coco2017_pose_dekr_w32 experiment_name=shortened_coco2017_pose_dekr_w32_ap_test dataset_params.train_dataloader_params.batch_size=4 dataset_params.val_dataloader_params.batch_size=8 training_hyperparams.max_epochs=1 training_hyperparams.lr_warmup_steps=0 training_hyperparams.average_best_models=False training_hyperparams.max_train_batches=1000 training_hyperparams.max_valid_batches=10
+
+      - run:
+          name: Remove new environment when failed
+          command: "rm -r << parameters.sg_new_env_name >>"
+          when: on_fail
+
 workflows:
 workflows:
   release:
   release:
     jobs:
     jobs:
@@ -602,10 +633,14 @@ workflows:
           requires:
           requires:
             - recipe_sanity_tests_segmentation
             - recipe_sanity_tests_segmentation
           <<: *release_tag_filter
           <<: *release_tag_filter
-      - recipe_accuracy_tests:
+      - recipe_sanity_tests_pose_estimation:
           requires:
           requires:
             - recipe_sanity_tests_detection
             - recipe_sanity_tests_detection
           <<: *release_tag_filter
           <<: *release_tag_filter
+      - recipe_accuracy_tests:
+          requires:
+            - recipe_sanity_tests_pose_estimation
+          <<: *release_tag_filter
       - release_version:
       - release_version:
           py_version: "3.7"
           py_version: "3.7"
           requires:
           requires:
@@ -615,6 +650,7 @@ workflows:
             - recipe_sanity_tests_classification_pt2
             - recipe_sanity_tests_classification_pt2
             - recipe_sanity_tests_segmentation
             - recipe_sanity_tests_segmentation
             - recipe_sanity_tests_detection
             - recipe_sanity_tests_detection
+            - recipe_sanity_tests_pose_estimation
           <<: *release_tag_filter
           <<: *release_tag_filter
       - deci-common/pip_upload_package_from_codeartifact_to_global_pypi:
       - deci-common/pip_upload_package_from_codeartifact_to_global_pypi:
           package_name: "super-gradients"
           package_name: "super-gradients"
Discard
@@ -269,9 +269,9 @@ class PoseEstimationMetrics(Metric):
 
 
         predictions = self.predictions  # All gathered by this time
         predictions = self.predictions  # All gathered by this time
         if len(predictions) > 0:
         if len(predictions) > 0:
-            preds_matched = torch.cat([x[0] for x in predictions], dim=0)
-            preds_to_ignore = torch.cat([x[1] for x in predictions], dim=0)
-            preds_scores = torch.cat([x[2] for x in predictions], dim=0)
+            preds_matched = torch.cat([x[0].cpu() for x in predictions], dim=0)
+            preds_to_ignore = torch.cat([x[1].cpu() for x in predictions], dim=0)
+            preds_scores = torch.cat([x[2].cpu() for x in predictions], dim=0)
             n_targets = sum([x[3] for x in predictions])
             n_targets = sum([x[3] for x in predictions])
 
 
             cls_precision, _, cls_recall = compute_detection_metrics_per_cls(
             cls_precision, _, cls_recall = compute_detection_metrics_per_cls(
@@ -279,9 +279,9 @@ class PoseEstimationMetrics(Metric):
                 preds_to_ignore=preds_to_ignore,
                 preds_to_ignore=preds_to_ignore,
                 preds_scores=preds_scores,
                 preds_scores=preds_scores,
                 n_targets=n_targets,
                 n_targets=n_targets,
-                recall_thresholds=self.recall_thresholds.to(self.device),
+                recall_thresholds=self.recall_thresholds.cpu(),
                 score_threshold=0,
                 score_threshold=0,
-                device=self.device,
+                device="cpu",
             )
             )
 
 
             precision[:, 0] = cls_precision.cpu().numpy()
             precision[:, 0] = cls_precision.cpu().numpy()
Discard
@@ -9,7 +9,12 @@ from super_gradients.common.environment.checkpoints_dir_utils import get_checkpo
 class ShortenedRecipesAccuracyTests(unittest.TestCase):
 class ShortenedRecipesAccuracyTests(unittest.TestCase):
     @classmethod
     @classmethod
     def setUp(cls):
     def setUp(cls):
-        cls.experiment_names = ["shortened_cifar10_resnet_accuracy_test", "shortened_coco2017_yolox_n_map_test", "shortened_cityscapes_regseg48_iou_test"]
+        cls.experiment_names = [
+            "shortened_cifar10_resnet_accuracy_test",
+            "shortened_coco2017_yolox_n_map_test",
+            "shortened_cityscapes_regseg48_iou_test",
+            "shortened_coco2017_pose_dekr_w32_ap_test",
+        ]
 
 
     def test_shortened_cifar10_resnet_accuracy(self):
     def test_shortened_cifar10_resnet_accuracy(self):
         self.assertTrue(self._reached_goal_metric(experiment_name="shortened_cifar10_resnet_accuracy_test", metric_value=0.9167, delta=0.05))
         self.assertTrue(self._reached_goal_metric(experiment_name="shortened_cifar10_resnet_accuracy_test", metric_value=0.9167, delta=0.05))
@@ -24,6 +29,9 @@ class ShortenedRecipesAccuracyTests(unittest.TestCase):
     def test_shortened_cityscapes_regseg48_iou(self):
     def test_shortened_cityscapes_regseg48_iou(self):
         self.assertTrue(self._reached_goal_metric(experiment_name="shortened_cityscapes_regseg48_iou_test", metric_value=0.263, delta=0.05))
         self.assertTrue(self._reached_goal_metric(experiment_name="shortened_cityscapes_regseg48_iou_test", metric_value=0.263, delta=0.05))
 
 
+    def test_shortened_coco_dekr_32_ap_test(self):
+        self.assertTrue(self._reached_goal_metric(experiment_name="shortened_coco2017_pose_dekr_w32_ap_test", metric_value=0.000154, delta=0.0001))
+
     @classmethod
     @classmethod
     def _reached_goal_metric(cls, experiment_name: str, metric_value: float, delta: float):
     def _reached_goal_metric(cls, experiment_name: str, metric_value: float, delta: float):
         checkpoints_dir_path = get_checkpoints_dir_path(experiment_name=experiment_name)
         checkpoints_dir_path = get_checkpoints_dir_path(experiment_name=experiment_name)
Discard