Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

mask_loss_test.py 8.4 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
  1. import torch
  2. import unittest
  3. import torch.nn as nn
  4. from super_gradients.training.losses.mask_loss import MaskAttentionLoss
  5. from super_gradients.training.utils.segmentation_utils import to_one_hot
  6. class MaskAttentionLossTest(unittest.TestCase):
  7. def setUp(self) -> None:
  8. self.img_size = 32
  9. self.num_classes = 4
  10. self.batch = 3
  11. torch.manual_seed(65)
  12. def _get_default_predictions_tensor(self):
  13. return torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)
  14. def _get_default_target_tensor(self):
  15. return torch.randint(0, self.num_classes, size=(self.batch, self.img_size, self.img_size))
  16. def _get_default_mask_tensor(self):
  17. mask = torch.zeros(self.batch, 1, self.img_size, self.img_size)
  18. # half tensor rows as 1
  19. mask[:, :, self.img_size // 2 :] = 1
  20. return mask.float()
  21. def _assertion_torch_values(self, expected_value: torch.Tensor, found_value: torch.Tensor, rtol: float = 1e-5):
  22. self.assertTrue(torch.allclose(found_value, expected_value, rtol=rtol), msg=f"Unequal torch tensors: excepted: {expected_value}, found: {found_value}")
  23. def test_with_cross_entropy_loss(self):
  24. """
  25. Test simple case using CrossEntropyLoss,
  26. shapes: predict [BxCxHxW], target [BxHxW], mask [Bx1xHxW]
  27. """
  28. predict = torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)
  29. target = self._get_default_target_tensor()
  30. mask = self._get_default_mask_tensor()
  31. loss_weigths = [1.0, 0.5]
  32. ce_crit = nn.CrossEntropyLoss(reduction="none")
  33. mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths)
  34. # expected result
  35. ce_loss = ce_crit(predict, target)
  36. _mask = mask.view_as(ce_loss)
  37. mask_loss = ce_loss * _mask
  38. mask_loss = mask_loss[_mask == 1] # consider only mask samples for mask loss computing
  39. expected_loss = ce_loss.mean() * loss_weigths[0] + mask_loss.mean() * loss_weigths[1]
  40. # mask ce loss result
  41. loss = mask_ce_crit(predict, target, mask)
  42. self._assertion_torch_values(expected_loss, loss)
  43. def test_with_binary_cross_entropy_loss(self):
  44. """
  45. Test case using BCEWithLogitsLoss, where mask is a spatial mask applied across all channels.
  46. shapes: predict [BxCxHxW], target (one-hot) [BxCxHxW], mask [Bx1xHxW]
  47. """
  48. predict = self._get_default_predictions_tensor()
  49. target = torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)
  50. mask = self._get_default_mask_tensor()
  51. loss_weigths = [1.0, 0.5]
  52. ce_crit = nn.BCEWithLogitsLoss(reduction="none")
  53. mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths)
  54. # expected result
  55. ce_loss = ce_crit(predict, target)
  56. _mask = mask.expand_as(ce_loss)
  57. mask_loss = ce_loss * _mask
  58. mask_loss = mask_loss[_mask == 1] # consider only mask samples for mask loss computing
  59. expected_loss = ce_loss.mean() * loss_weigths[0] + mask_loss.mean() * loss_weigths[1]
  60. # mask ce loss result
  61. loss = mask_ce_crit(predict, target, mask)
  62. self._assertion_torch_values(expected_loss, loss)
  63. def test_reduction_none(self):
  64. """
  65. Test case mask loss with reduction="none".
  66. shapes: predict [BxCxHxW], target [BxHxW], mask [Bx1xHxW], except output to be same as target shape.
  67. """
  68. predict = torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)
  69. target = self._get_default_target_tensor()
  70. mask = self._get_default_mask_tensor()
  71. loss_weigths = [1.0, 0.5]
  72. ce_crit = nn.CrossEntropyLoss(reduction="none")
  73. mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths, reduction="none")
  74. # expected result
  75. ce_loss = ce_crit(predict, target)
  76. _mask = mask.view_as(ce_loss)
  77. mask_loss = ce_loss * _mask
  78. expected_loss = ce_loss * loss_weigths[0] + mask_loss * loss_weigths[1]
  79. # mask ce loss result
  80. loss = mask_ce_crit(predict, target, mask)
  81. self._assertion_torch_values(expected_loss, loss)
  82. self.assertEqual(target.size(), loss.size())
  83. def test_assert_valid_arguments(self):
  84. # ce_criterion reduction must be none
  85. kwargs = {"criterion": nn.CrossEntropyLoss(reduction="mean")}
  86. self.failUnlessRaises(ValueError, MaskAttentionLoss, **kwargs)
  87. # loss_weights must have only 2 values
  88. kwargs = {"criterion": nn.CrossEntropyLoss(reduction="none"), "loss_weights": [1.0, 1.0, 1.0]}
  89. self.failUnlessRaises(ValueError, MaskAttentionLoss, **kwargs)
  90. # mask loss_weight must be a positive value
  91. kwargs = {"criterion": nn.CrossEntropyLoss(reduction="none"), "loss_weights": [1.0, 0.0]}
  92. self.failUnlessRaises(ValueError, MaskAttentionLoss, **kwargs)
  93. def test_multi_class_mask(self):
  94. """
  95. Test case using MSELoss, where there is different spatial masks per channel.
  96. shapes: predict [BxCxHxW], target [BxCxHxW], mask [BxCxHxW]
  97. """
  98. predict = self._get_default_predictions_tensor()
  99. # when using bce loss, target is usually a one hot vector and must be with the same shape as the prediction.
  100. target = self._get_default_target_tensor()
  101. target = to_one_hot(target, self.num_classes).float()
  102. mask = torch.randint(0, 2, size=(self.batch, self.num_classes, self.img_size, self.img_size)).float()
  103. loss_weigths = [1.0, 0.5]
  104. ce_crit = nn.MSELoss(reduction="none")
  105. mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths)
  106. # expected result
  107. mse_loss = ce_crit(predict, target)
  108. mask_loss = mse_loss * mask
  109. mask_loss = mask_loss[mask == 1] # consider only mask samples for mask loss computing
  110. expected_loss = mse_loss.mean() * loss_weigths[0] + mask_loss.mean() * loss_weigths[1]
  111. # mask ce loss result
  112. loss = mask_ce_crit(predict, target, mask)
  113. self._assertion_torch_values(expected_loss, loss)
  114. def test_broadcast_exceptions(self):
  115. """
  116. Test assertion in mask broadcasting
  117. """
  118. predict = torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)
  119. target = torch.randint(0, self.num_classes, size=(self.batch, self.num_classes, self.img_size, self.img_size)).float()
  120. loss_weigths = [1.0, 0.5]
  121. ce_crit = nn.BCEWithLogitsLoss(reduction="none")
  122. mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths)
  123. # mask with wrong spatial size.
  124. mask = torch.zeros(self.batch, self.img_size, 1).float()
  125. self.failUnlessRaises(AssertionError, mask_ce_crit, *(predict, target, mask))
  126. # mask with wrong batch size.
  127. mask = torch.zeros(self.batch + 1, self.img_size, self.img_size).float()
  128. self.failUnlessRaises(AssertionError, mask_ce_crit, *(predict, target, mask))
  129. # mask with invalid channels num.
  130. mask = torch.zeros(self.batch, 2, self.img_size, self.img_size).float()
  131. self.failUnlessRaises(AssertionError, mask_ce_crit, *(predict, target, mask))
  132. def test_with_cross_entropy_loss_maskless(self):
  133. """
  134. Test case with mask filled with zeros, corresponding to a scenario without
  135. attention. It's expected that the mask doesn't contribute to the loss.
  136. This scenario may happen when using edge masks on an image without
  137. edges - there's only one semantic region in the whole image.
  138. Shapes: predict [BxCxHxW], target [BxHxW], mask [Bx1xHxW]
  139. """
  140. predict = torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)
  141. target = self._get_default_target_tensor()
  142. # Create a mask filled with zeros to disable the attention component
  143. mask = self._get_default_mask_tensor() * 0.0
  144. loss_weigths = [1.0, 0.5]
  145. ce_crit = nn.CrossEntropyLoss(reduction="none")
  146. mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths)
  147. # expected result - no contribution from mask
  148. ce_loss = ce_crit(predict, target)
  149. expected_loss = ce_loss.mean() * loss_weigths[0]
  150. # mask ce loss result
  151. loss = mask_ce_crit(predict, target, mask)
  152. self._assertion_torch_values(expected_loss, loss)
  153. if __name__ == "__main__":
  154. unittest.main()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...