1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
|
- import unittest
- from super_gradients.training.datasets.dataset_interfaces.dataset_interface import PascalVOCUnifiedDetectionDatasetInterface,\
- CoCoDetectionDatasetInterface
- from super_gradients.training.transforms.transforms import DetectionPaddedRescale, DetectionTargetsFormatTransform, DetectionMosaic, DetectionRandomAffine,\
- DetectionHSV
- from super_gradients.training.utils.detection_utils import DetectionTargetsFormat
- from super_gradients.training.utils.detection_utils import DetectionCollateFN
- from super_gradients.training.utils import sg_trainer_utils
- from super_gradients.training import utils as core_utils
- class TestDatasetInterface(unittest.TestCase):
- def setUp(self) -> None:
- self.root_dir = "/home/louis.dupont/data/"
- self.train_batch_size, self.val_batch_size = 16, 32
- self.train_image_size, self.val_image_size = 640, 640
- self.train_input_dim = (self.train_image_size, self.train_image_size)
- self.val_input_dim = (self.val_image_size, self.val_image_size)
- self.train_max_num_samples = 100
- self.val_max_num_samples = 90
- def setup_pascal_voc_interface(self):
- """setup PascalVOCUnifiedDetectionDatasetInterface and return dataloaders"""
- dataset_params = {
- "data_dir": self.root_dir + "pascal_unified_coco_format/",
- "cache_dir": self.root_dir + "pascal_unified_coco_format/",
- "batch_size": self.train_batch_size,
- "val_batch_size": self.val_batch_size,
- "train_image_size": self.train_image_size,
- "val_image_size": self.val_image_size,
- "train_max_num_samples": self.train_max_num_samples,
- "val_max_num_samples": self.val_max_num_samples,
- "train_transforms": [
- DetectionMosaic(input_dim=self.train_input_dim, prob=1),
- DetectionRandomAffine(degrees=0.373, translate=0.245, scales=0.898, shear=0.602, target_size=self.train_input_dim),
- DetectionHSV(prob=1, hgain=0.0138, sgain=0.664, vgain=0.464),
- DetectionPaddedRescale(input_dim=self.train_input_dim, max_targets=100),
- DetectionTargetsFormatTransform(input_format=DetectionTargetsFormat.XYXY_LABEL,
- output_format=DetectionTargetsFormat.LABEL_CXCYWH)],
- "val_transforms": [
- DetectionPaddedRescale(input_dim=self.val_input_dim),
- DetectionTargetsFormatTransform(input_format=DetectionTargetsFormat.XYXY_LABEL,
- output_format=DetectionTargetsFormat.LABEL_CXCYWH)],
- "train_collate_fn": DetectionCollateFN(),
- "val_collate_fn": DetectionCollateFN(),
- "download": False,
- "cache_train_images": False,
- "cache_val_images": False,
- "class_inclusion_list": ["person"]
- }
- dataset_interface = PascalVOCUnifiedDetectionDatasetInterface(dataset_params=dataset_params)
- train_loader, valid_loader, _test_loader, _classes = dataset_interface.get_data_loaders()
- return train_loader, valid_loader
- def setup_coco_detection_interface(self):
- """setup CoCoDetectionDatasetInterface and return dataloaders"""
- dataset_params = {
- "data_dir": "/data/coco",
- "train_subdir": "images/train2017", # sub directory path of data_dir containing the train data.
- "val_subdir": "images/val2017", # sub directory path of data_dir containing the validation data.
- "train_json_file": "instances_train2017.json", # path to coco train json file, data_dir/annotations/train_json_file.
- "val_json_file": "instances_val2017.json", # path to coco validation json file, data_dir/annotations/val_json_file.
- "batch_size": self.train_batch_size,
- "val_batch_size": self.val_batch_size,
- "train_image_size": self.train_image_size,
- "val_image_size": self.val_image_size,
- "train_max_num_samples": self.train_max_num_samples,
- "val_max_num_samples": self.val_max_num_samples,
- "mixup_prob": 1.0, # probability to apply per-sample mixup
- "degrees": 10., # rotation degrees, randomly sampled from [-degrees, degrees]
- "shear": 2.0, # shear degrees, randomly sampled from [-degrees, degrees]
- "flip_prob": 0.5, # probability to apply horizontal flip
- "hsv_prob": 1.0, # probability to apply HSV transform
- "hgain": 5, # HSV transform hue gain (randomly sampled from [-hgain, hgain])
- "sgain": 30, # HSV transform saturation gain (randomly sampled from [-sgain, sgain])
- "vgain": 30, # HSV transform value gain (randomly sampled from [-vgain, vgain])
- "mosaic_scale": [0.1, 2], # random rescale range (keeps size by padding/cropping) after mosaic transform.
- "mixup_scale": [0.5, 1.5], # random rescale range for the additional sample in mixup
- "mosaic_prob": 1., # probability to apply mosaic
- "translate": 0.1, # image translation fraction
- "filter_box_candidates": False, # whether to filter out transformed bboxes by edge size, area ratio, and aspect ratio.
- "wh_thr": 2, # edge size threshold when filter_box_candidates = True (pixels)
- "ar_thr": 20, # aspect ratio threshold when filter_box_candidates = True
- "area_thr": 0.1, # threshold for area ratio between original image and the transformed one, when when filter_box_candidates = True
- "tight_box_rotation": False,
- "download": False,
- "train_collate_fn": DetectionCollateFN(),
- "val_collate_fn": DetectionCollateFN(),
- "cache_train_images": False,
- "cache_val_images": False,
- "cache_dir": "/home/data/cache", # Depends on the user
- "class_inclusion_list": None
- # "with_crowd": True
- }
- dataset_interface = CoCoDetectionDatasetInterface(dataset_params=dataset_params)
- train_loader, valid_loader, _test_loader, _classes = dataset_interface.get_data_loaders()
- return train_loader, valid_loader
- def test_coco_detection(self):
- """Check that the dataset interface is correctly instantiated, and that the batch items are of expected size"""
- train_loader, valid_loader = self.setup_coco_detection_interface()
- for loader, batch_size, image_size, max_num_samples in [(train_loader, self.train_batch_size, self.train_image_size, self.train_max_num_samples),
- (valid_loader, self.val_batch_size, self.val_image_size, self.val_max_num_samples)]:
- # The dataset is at most of length max_num_samples, but can be smaller if not enough samples
- self.assertGreaterEqual(max_num_samples, len(loader.dataset))
- batch_items = next(iter(loader))
- batch_items = core_utils.tensor_container_to_device(batch_items, 'cuda', non_blocking=True)
- inputs, targets, additional_batch_items = sg_trainer_utils.unpack_batch_items(batch_items)
- self.assertListEqual([batch_size, 3, image_size, image_size], list(inputs.shape))
- def test_pascal_voc(self):
- """Check that the dataset interface is correctly instantiated, and that the batch items are of expected size"""
- train_loader, valid_loader = self.setup_pascal_voc_interface()
- for loader, batch_size, image_size, max_num_samples in [(train_loader, self.train_batch_size, self.train_image_size, self.train_max_num_samples),
- (valid_loader, self.val_batch_size, self.val_image_size, self.val_max_num_samples)]:
- # The dataset is at most of length max_num_samples, but can be smaller if not enough samples
- self.assertGreaterEqual(max_num_samples, len(loader.dataset))
- batch_items = next(iter(loader))
- batch_items = core_utils.tensor_container_to_device(batch_items, 'cuda', non_blocking=True)
- inputs, targets, additional_batch_items = sg_trainer_utils.unpack_batch_items(batch_items)
- self.assertListEqual([batch_size, 3, image_size, image_size], list(inputs.shape))
- if __name__ == '__main__':
- unittest.main()
|