1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
|
- <!DOCTYPE html>
- <html class="writer-html5" lang="en" >
- <head>
- <meta charset="utf-8" /><meta name="generator" content="Docutils 0.17.1: http://docutils.sourceforge.net/" />
- <meta name="viewport" content="width=device-width, initial-scale=1.0" />
- <title>super_gradients.training.losses package — SuperGradients 1.0 documentation</title>
- <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
- <link rel="stylesheet" href="_static/css/theme.css" type="text/css" />
- <!--[if lt IE 9]>
- <script src="_static/js/html5shiv.min.js"></script>
- <![endif]-->
-
- <script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
- <script src="_static/jquery.js"></script>
- <script src="_static/underscore.js"></script>
- <script src="_static/doctools.js"></script>
- <script src="_static/js/theme.js"></script>
- <link rel="index" title="Index" href="genindex.html" />
- <link rel="search" title="Search" href="search.html" />
- <link rel="next" title="super_gradients.training.metrics package" href="super_gradients.training.metrics.html" />
- <link rel="prev" title="super_gradients.training.legacy package" href="super_gradients.training.legacy.html" />
- </head>
- <body class="wy-body-for-nav">
- <div class="wy-grid-for-nav">
- <nav data-toggle="wy-nav-shift" class="wy-nav-side">
- <div class="wy-side-scroll">
- <div class="wy-side-nav-search" >
- <a href="index.html" class="icon icon-home"> SuperGradients
- </a>
- <div role="search">
- <form id="rtd-search-form" class="wy-form" action="search.html" method="get">
- <input type="text" name="q" placeholder="Search docs" />
- <input type="hidden" name="check_keywords" value="yes" />
- <input type="hidden" name="area" value="default" />
- </form>
- </div>
- </div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
- <p class="caption"><span class="caption-text">Contents:</span></p>
- <ul class="current">
- <li class="toctree-l1"><a class="reference internal" href="intro.html">Introduction</a></li>
- <li class="toctree-l1"><a class="reference internal" href="intro.html#installation">Installation</a></li>
- <li class="toctree-l1 current"><a class="reference internal" href="super_gradients.training.html">super_gradients.training package</a><ul class="current">
- <li class="toctree-l2 current"><a class="reference internal" href="super_gradients.training.html#subpackages">Subpackages</a><ul class="current">
- <li class="toctree-l3"><a class="reference internal" href="super_gradients.training.datasets.html">super_gradients.training.datasets package</a></li>
- <li class="toctree-l3"><a class="reference internal" href="super_gradients.training.exceptions.html">super_gradients.training.exceptions package</a></li>
- <li class="toctree-l3"><a class="reference internal" href="super_gradients.training.legacy.html">super_gradients.training.legacy package</a></li>
- <li class="toctree-l3 current"><a class="current reference internal" href="#">super_gradients.training.losses package</a><ul>
- <li class="toctree-l4"><a class="reference internal" href="#submodules">Submodules</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.all_losses">super_gradients.training.losses.all_losses module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.ddrnet_loss">super_gradients.training.losses.ddrnet_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.focal_loss">super_gradients.training.losses.focal_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.label_smoothing_cross_entropy_loss">super_gradients.training.losses.label_smoothing_cross_entropy_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.ohem_ce_loss">super_gradients.training.losses.ohem_ce_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.r_squared_loss">super_gradients.training.losses.r_squared_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.shelfnet_ohem_loss">super_gradients.training.losses.shelfnet_ohem_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.shelfnet_semantic_encoding_loss">super_gradients.training.losses.shelfnet_semantic_encoding_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.ssd_loss">super_gradients.training.losses.ssd_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.yolo_v3_loss">super_gradients.training.losses.yolo_v3_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses.yolo_v5_loss">super_gradients.training.losses.yolo_v5_loss module</a></li>
- <li class="toctree-l4"><a class="reference internal" href="#module-super_gradients.training.losses">Module contents</a></li>
- </ul>
- </li>
- <li class="toctree-l3"><a class="reference internal" href="super_gradients.training.metrics.html">super_gradients.training.metrics package</a></li>
- <li class="toctree-l3"><a class="reference internal" href="super_gradients.training.models.html">super_gradients.training.models package</a></li>
- <li class="toctree-l3"><a class="reference internal" href="super_gradients.training.sg_model.html">super_gradients.training.sg_model package</a></li>
- <li class="toctree-l3"><a class="reference internal" href="super_gradients.training.utils.html">super_gradients.training.utils package</a></li>
- </ul>
- </li>
- <li class="toctree-l2"><a class="reference internal" href="super_gradients.training.html#submodules">Submodules</a></li>
- <li class="toctree-l2"><a class="reference internal" href="super_gradients.training.html#module-super_gradients.training.params">super_gradients.training.params module</a></li>
- <li class="toctree-l2"><a class="reference internal" href="super_gradients.training.html#module-super_gradients.training.pretrained_models">super_gradients.training.pretrained_models module</a></li>
- <li class="toctree-l2"><a class="reference internal" href="super_gradients.training.html#module-super_gradients.training">Module contents</a></li>
- </ul>
- </li>
- </ul>
- </div>
- </div>
- </nav>
- <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
- <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
- <a href="index.html">SuperGradients</a>
- </nav>
- <div class="wy-nav-content">
- <div class="rst-content">
- <div role="navigation" aria-label="Page navigation">
- <ul class="wy-breadcrumbs">
- <li><a href="index.html" class="icon icon-home"></a> »</li>
- <li><a href="super_gradients.training.html">super_gradients.training package</a> »</li>
- <li>super_gradients.training.losses package</li>
- <li class="wy-breadcrumbs-aside">
- <a href="_sources/super_gradients.training.losses.rst.txt" rel="nofollow"> View page source</a>
- </li>
- </ul>
- <hr/>
- </div>
- <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
- <div itemprop="articleBody">
-
- <section id="super-gradients-training-losses-package">
- <h1>super_gradients.training.losses package<a class="headerlink" href="#super-gradients-training-losses-package" title="Permalink to this headline"></a></h1>
- <section id="submodules">
- <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline"></a></h2>
- </section>
- <section id="module-super_gradients.training.losses.all_losses">
- <span id="super-gradients-training-losses-all-losses-module"></span><h2>super_gradients.training.losses.all_losses module<a class="headerlink" href="#module-super_gradients.training.losses.all_losses" title="Permalink to this headline"></a></h2>
- </section>
- <section id="module-super_gradients.training.losses.ddrnet_loss">
- <span id="super-gradients-training-losses-ddrnet-loss-module"></span><h2>super_gradients.training.losses.ddrnet_loss module<a class="headerlink" href="#module-super_gradients.training.losses.ddrnet_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ddrnet_loss.DDRNetLoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.ddrnet_loss.</span></span><span class="sig-name descname"><span class="pre">DDRNetLoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">threshold</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">0.7</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ohem_percentage</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">0.1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weights</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">list</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">[1.0,</span> <span class="pre">0.4]</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ignore_label</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">255</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/ddrnet_loss.html#DDRNetLoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.ddrnet_loss.DDRNetLoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <a class="reference internal" href="#super_gradients.training.losses.ohem_ce_loss.OhemCELoss" title="super_gradients.training.losses.ohem_ce_loss.OhemCELoss"><code class="xref py py-class docutils literal notranslate"><span class="pre">super_gradients.training.losses.ohem_ce_loss.OhemCELoss</span></code></a></p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ddrnet_loss.DDRNetLoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">predictions_list</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">Union</span><span class="p"><span class="pre">[</span></span><span class="pre">list</span><span class="p"><span class="pre">,</span> </span><span class="pre">tuple</span><span class="p"><span class="pre">,</span> </span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">]</span></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">torch.Tensor</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/ddrnet_loss.html#DDRNetLoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.ddrnet_loss.DDRNetLoss.forward" title="Permalink to this definition"></a></dt>
- <dd><p>Defines the computation performed at every call.</p>
- <p>Should be overridden by all subclasses.</p>
- <div class="admonition note">
- <p class="admonition-title">Note</p>
- <p>Although the recipe for forward pass needs to be defined within
- this function, one should call the <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code> instance afterwards
- instead of this since the former takes care of running the
- registered hooks while the latter silently ignores them.</p>
- </div>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ddrnet_loss.DDRNetLoss.reduction">
- <span class="sig-name descname"><span class="pre">reduction</span></span><em class="property"><span class="pre">:</span> <span class="pre">str</span></em><a class="headerlink" href="#super_gradients.training.losses.ddrnet_loss.DDRNetLoss.reduction" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses.focal_loss">
- <span id="super-gradients-training-losses-focal-loss-module"></span><h2>super_gradients.training.losses.focal_loss module<a class="headerlink" href="#module-super_gradients.training.losses.focal_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.focal_loss.FocalLoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.focal_loss.</span></span><span class="sig-name descname"><span class="pre">FocalLoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">loss_fcn</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">torch.nn.modules.loss.BCEWithLogitsLoss</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gamma</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1.5</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">alpha</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.25</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/focal_loss.html#FocalLoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.focal_loss.FocalLoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.modules.loss._Loss</span></code></p>
- <p>Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)</p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.focal_loss.FocalLoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">pred</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">true</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/focal_loss.html#FocalLoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.focal_loss.FocalLoss.forward" title="Permalink to this definition"></a></dt>
- <dd><p>Defines the computation performed at every call.</p>
- <p>Should be overridden by all subclasses.</p>
- <div class="admonition note">
- <p class="admonition-title">Note</p>
- <p>Although the recipe for forward pass needs to be defined within
- this function, one should call the <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code> instance afterwards
- instead of this since the former takes care of running the
- registered hooks while the latter silently ignores them.</p>
- </div>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.focal_loss.FocalLoss.reduction">
- <span class="sig-name descname"><span class="pre">reduction</span></span><em class="property"><span class="pre">:</span> <span class="pre">str</span></em><a class="headerlink" href="#super_gradients.training.losses.focal_loss.FocalLoss.reduction" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses.label_smoothing_cross_entropy_loss">
- <span id="super-gradients-training-losses-label-smoothing-cross-entropy-loss-module"></span><h2>super_gradients.training.losses.label_smoothing_cross_entropy_loss module<a class="headerlink" href="#module-super_gradients.training.losses.label_smoothing_cross_entropy_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.label_smoothing_cross_entropy_loss.LabelSmoothingCrossEntropyLoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.label_smoothing_cross_entropy_loss.</span></span><span class="sig-name descname"><span class="pre">LabelSmoothingCrossEntropyLoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ignore_index</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">-</span> <span class="pre">100</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">reduction</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'mean'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">smooth_eps</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">smooth_dist</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">from_logits</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/label_smoothing_cross_entropy_loss.html#LabelSmoothingCrossEntropyLoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.label_smoothing_cross_entropy_loss.LabelSmoothingCrossEntropyLoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.modules.loss.CrossEntropyLoss</span></code></p>
- <p>CrossEntropyLoss - with ability to recieve distrbution as targets, and optional label smoothing</p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.label_smoothing_cross_entropy_loss.LabelSmoothingCrossEntropyLoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">target</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">smooth_dist</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/label_smoothing_cross_entropy_loss.html#LabelSmoothingCrossEntropyLoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.label_smoothing_cross_entropy_loss.LabelSmoothingCrossEntropyLoss.forward" title="Permalink to this definition"></a></dt>
- <dd><p>Defines the computation performed at every call.</p>
- <p>Should be overridden by all subclasses.</p>
- <div class="admonition note">
- <p class="admonition-title">Note</p>
- <p>Although the recipe for forward pass needs to be defined within
- this function, one should call the <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code> instance afterwards
- instead of this since the former takes care of running the
- registered hooks while the latter silently ignores them.</p>
- </div>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.label_smoothing_cross_entropy_loss.LabelSmoothingCrossEntropyLoss.ignore_index">
- <span class="sig-name descname"><span class="pre">ignore_index</span></span><em class="property"><span class="pre">:</span> <span class="pre">int</span></em><a class="headerlink" href="#super_gradients.training.losses.label_smoothing_cross_entropy_loss.LabelSmoothingCrossEntropyLoss.ignore_index" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- <dl class="py function">
- <dt class="sig sig-object py" id="super_gradients.training.losses.label_smoothing_cross_entropy_loss.cross_entropy">
- <span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.label_smoothing_cross_entropy_loss.</span></span><span class="sig-name descname"><span class="pre">cross_entropy</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">inputs</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">target</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ignore_index</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">-</span> <span class="pre">100</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">reduction</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'mean'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">smooth_eps</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">smooth_dist</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">from_logits</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/label_smoothing_cross_entropy_loss.html#cross_entropy"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.label_smoothing_cross_entropy_loss.cross_entropy" title="Permalink to this definition"></a></dt>
- <dd><p>cross entropy loss, with support for target distributions and label smoothing <a class="reference external" href="https://arxiv.org/abs/1512.00567">https://arxiv.org/abs/1512.00567</a></p>
- </dd></dl>
- <dl class="py function">
- <dt class="sig sig-object py" id="super_gradients.training.losses.label_smoothing_cross_entropy_loss.onehot">
- <span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.label_smoothing_cross_entropy_loss.</span></span><span class="sig-name descname"><span class="pre">onehot</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">indexes</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">N</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ignore_index</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/label_smoothing_cross_entropy_loss.html#onehot"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.label_smoothing_cross_entropy_loss.onehot" title="Permalink to this definition"></a></dt>
- <dd><p>Creates a one-hot representation of indexes with N possible entries
- if N is not specified, it will suit the maximum index appearing.
- indexes is a long-tensor of indexes
- ignore_index will be zero in onehot representation</p>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses.ohem_ce_loss">
- <span id="super-gradients-training-losses-ohem-ce-loss-module"></span><h2>super_gradients.training.losses.ohem_ce_loss module<a class="headerlink" href="#module-super_gradients.training.losses.ohem_ce_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ohem_ce_loss.OhemCELoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.ohem_ce_loss.</span></span><span class="sig-name descname"><span class="pre">OhemCELoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">threshold</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mining_percent</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">0.1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ignore_lb</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">int</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">-</span> <span class="pre">100</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_pixels_exclude_ignored</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">bool</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/ohem_ce_loss.html#OhemCELoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.ohem_ce_loss.OhemCELoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.modules.loss._Loss</span></code></p>
- <p>OhemCELoss - Online Hard Example Mining Cross Entropy Loss</p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ohem_ce_loss.OhemCELoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">logits</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">labels</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/ohem_ce_loss.html#OhemCELoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.ohem_ce_loss.OhemCELoss.forward" title="Permalink to this definition"></a></dt>
- <dd><p>Defines the computation performed at every call.</p>
- <p>Should be overridden by all subclasses.</p>
- <div class="admonition note">
- <p class="admonition-title">Note</p>
- <p>Although the recipe for forward pass needs to be defined within
- this function, one should call the <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code> instance afterwards
- instead of this since the former takes care of running the
- registered hooks while the latter silently ignores them.</p>
- </div>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ohem_ce_loss.OhemCELoss.reduction">
- <span class="sig-name descname"><span class="pre">reduction</span></span><em class="property"><span class="pre">:</span> <span class="pre">str</span></em><a class="headerlink" href="#super_gradients.training.losses.ohem_ce_loss.OhemCELoss.reduction" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses.r_squared_loss">
- <span id="super-gradients-training-losses-r-squared-loss-module"></span><h2>super_gradients.training.losses.r_squared_loss module<a class="headerlink" href="#module-super_gradients.training.losses.r_squared_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.r_squared_loss.RSquaredLoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.r_squared_loss.</span></span><span class="sig-name descname"><span class="pre">RSquaredLoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">size_average</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">reduce</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">reduction</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">str</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">'mean'</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/r_squared_loss.html#RSquaredLoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.r_squared_loss.RSquaredLoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.modules.loss._Loss</span></code></p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.r_squared_loss.RSquaredLoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">output</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">target</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/r_squared_loss.html#RSquaredLoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.r_squared_loss.RSquaredLoss.forward" title="Permalink to this definition"></a></dt>
- <dd><p>Computes the R-squared for the output and target values
- :param output: Tensor / Numpy / List</p>
- <blockquote>
- <div><p>The prediction</p>
- </div></blockquote>
- <dl class="field-list simple">
- <dt class="field-odd">Parameters</dt>
- <dd class="field-odd"><p><strong>target</strong> – Tensor / Numpy / List
- The corresponding lables</p>
- </dd>
- </dl>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.r_squared_loss.RSquaredLoss.reduction">
- <span class="sig-name descname"><span class="pre">reduction</span></span><em class="property"><span class="pre">:</span> <span class="pre">str</span></em><a class="headerlink" href="#super_gradients.training.losses.r_squared_loss.RSquaredLoss.reduction" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses.shelfnet_ohem_loss">
- <span id="super-gradients-training-losses-shelfnet-ohem-loss-module"></span><h2>super_gradients.training.losses.shelfnet_ohem_loss module<a class="headerlink" href="#module-super_gradients.training.losses.shelfnet_ohem_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.shelfnet_ohem_loss.ShelfNetOHEMLoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.shelfnet_ohem_loss.</span></span><span class="sig-name descname"><span class="pre">ShelfNetOHEMLoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">threshold</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">0.7</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mining_percent</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">0.0001</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ignore_lb</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">int</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">255</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/shelfnet_ohem_loss.html#ShelfNetOHEMLoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.shelfnet_ohem_loss.ShelfNetOHEMLoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <a class="reference internal" href="#super_gradients.training.losses.ohem_ce_loss.OhemCELoss" title="super_gradients.training.losses.ohem_ce_loss.OhemCELoss"><code class="xref py py-class docutils literal notranslate"><span class="pre">super_gradients.training.losses.ohem_ce_loss.OhemCELoss</span></code></a></p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.shelfnet_ohem_loss.ShelfNetOHEMLoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">predictions_list</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">list</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/shelfnet_ohem_loss.html#ShelfNetOHEMLoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.shelfnet_ohem_loss.ShelfNetOHEMLoss.forward" title="Permalink to this definition"></a></dt>
- <dd><p>Defines the computation performed at every call.</p>
- <p>Should be overridden by all subclasses.</p>
- <div class="admonition note">
- <p class="admonition-title">Note</p>
- <p>Although the recipe for forward pass needs to be defined within
- this function, one should call the <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code> instance afterwards
- instead of this since the former takes care of running the
- registered hooks while the latter silently ignores them.</p>
- </div>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.shelfnet_ohem_loss.ShelfNetOHEMLoss.reduction">
- <span class="sig-name descname"><span class="pre">reduction</span></span><em class="property"><span class="pre">:</span> <span class="pre">str</span></em><a class="headerlink" href="#super_gradients.training.losses.shelfnet_ohem_loss.ShelfNetOHEMLoss.reduction" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.shelfnet_ohem_loss.ShelfNetOHEMLoss.training">
- <span class="sig-name descname"><span class="pre">training</span></span><em class="property"><span class="pre">:</span> <span class="pre">bool</span></em><a class="headerlink" href="#super_gradients.training.losses.shelfnet_ohem_loss.ShelfNetOHEMLoss.training" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses.shelfnet_semantic_encoding_loss">
- <span id="super-gradients-training-losses-shelfnet-semantic-encoding-loss-module"></span><h2>super_gradients.training.losses.shelfnet_semantic_encoding_loss module<a class="headerlink" href="#module-super_gradients.training.losses.shelfnet_semantic_encoding_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.shelfnet_semantic_encoding_loss.ShelfNetSemanticEncodingLoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.shelfnet_semantic_encoding_loss.</span></span><span class="sig-name descname"><span class="pre">ShelfNetSemanticEncodingLoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">se_weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.2</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">nclass</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">21</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">aux_weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.4</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ignore_index</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">-</span> <span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/shelfnet_semantic_encoding_loss.html#ShelfNetSemanticEncodingLoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.shelfnet_semantic_encoding_loss.ShelfNetSemanticEncodingLoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.modules.loss.CrossEntropyLoss</span></code></p>
- <p>2D Cross Entropy Loss with Auxilary Loss</p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.shelfnet_semantic_encoding_loss.ShelfNetSemanticEncodingLoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">logits</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">labels</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/shelfnet_semantic_encoding_loss.html#ShelfNetSemanticEncodingLoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.shelfnet_semantic_encoding_loss.ShelfNetSemanticEncodingLoss.forward" title="Permalink to this definition"></a></dt>
- <dd><p>Defines the computation performed at every call.</p>
- <p>Should be overridden by all subclasses.</p>
- <div class="admonition note">
- <p class="admonition-title">Note</p>
- <p>Although the recipe for forward pass needs to be defined within
- this function, one should call the <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code> instance afterwards
- instead of this since the former takes care of running the
- registered hooks while the latter silently ignores them.</p>
- </div>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.shelfnet_semantic_encoding_loss.ShelfNetSemanticEncodingLoss.ignore_index">
- <span class="sig-name descname"><span class="pre">ignore_index</span></span><em class="property"><span class="pre">:</span> <span class="pre">int</span></em><a class="headerlink" href="#super_gradients.training.losses.shelfnet_semantic_encoding_loss.ShelfNetSemanticEncodingLoss.ignore_index" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses.ssd_loss">
- <span id="super-gradients-training-losses-ssd-loss-module"></span><h2>super_gradients.training.losses.ssd_loss module<a class="headerlink" href="#module-super_gradients.training.losses.ssd_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ssd_loss.SSDLoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.ssd_loss.</span></span><span class="sig-name descname"><span class="pre">SSDLoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">dboxes</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><a class="reference internal" href="super_gradients.training.utils.html#super_gradients.training.utils.ssd_utils.DefaultBoxes" title="super_gradients.training.utils.ssd_utils.DefaultBoxes"><span class="pre">super_gradients.training.utils.ssd_utils.DefaultBoxes</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">alpha</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">1.0</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/ssd_loss.html#SSDLoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.ssd_loss.SSDLoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.modules.loss._Loss</span></code></p>
- <p>Implements the loss as the sum of the followings:
- 1. Confidence Loss: All labels, with hard negative mining
- 2. Localization Loss: Only on positive labels</p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ssd_loss.SSDLoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">predictions</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/ssd_loss.html#SSDLoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.ssd_loss.SSDLoss.forward" title="Permalink to this definition"></a></dt>
- <dd><dl class="simple">
- <dt>Compute the loss</dt><dd><p>:param predictions - predictions tensor coming from the network. shape [N, num_classes+4, num_dboxes]
- were the first four items are (x,y,w,h) and the rest are class confidence
- :param targets - targets for the batch. [num targets, 6] (index in batch, label, x,y,w,h)</p>
- </dd>
- </dl>
- </dd></dl>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ssd_loss.SSDLoss.match_dboxes">
- <span class="sig-name descname"><span class="pre">match_dboxes</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">targets</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/ssd_loss.html#SSDLoss.match_dboxes"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.ssd_loss.SSDLoss.match_dboxes" title="Permalink to this definition"></a></dt>
- <dd><p>convert ground truth boxes into a tensor with the same size as dboxes. each gt bbox is matched to every
- destination box which overlaps it over 0.5 (IoU). so some gt bboxes can be duplicated to a few destination boxes
- :param targets: a tensor containing the boxes for a single image. shape [num_boxes, 5] (x,y,w,h,label)
- :return: two tensors</p>
- <blockquote>
- <div><p>boxes - shape of dboxes [4, num_dboxes] (x,y,w,h)
- labels - sahpe [num_dboxes]</p>
- </div></blockquote>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.ssd_loss.SSDLoss.reduction">
- <span class="sig-name descname"><span class="pre">reduction</span></span><em class="property"><span class="pre">:</span> <span class="pre">str</span></em><a class="headerlink" href="#super_gradients.training.losses.ssd_loss.SSDLoss.reduction" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses.yolo_v3_loss">
- <span id="super-gradients-training-losses-yolo-v3-loss-module"></span><h2>super_gradients.training.losses.yolo_v3_loss module<a class="headerlink" href="#module-super_gradients.training.losses.yolo_v3_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.yolo_v3_loss.YoLoV3DetectionLoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.yolo_v3_loss.</span></span><span class="sig-name descname"><span class="pre">YoLoV3DetectionLoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">torch.nn.modules.module.Module</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cls_pw</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">obj_pw</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">giou</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">3.54</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">obj</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">64.3</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cls</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">37.4</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/yolo_v3_loss.html#YoLoV3DetectionLoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.yolo_v3_loss.YoLoV3DetectionLoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.modules.loss._Loss</span></code></p>
- <p>YoLoV3DetectionLoss - Loss Class for Object Detection</p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.yolo_v3_loss.YoLoV3DetectionLoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model_output</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/yolo_v3_loss.html#YoLoV3DetectionLoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.yolo_v3_loss.YoLoV3DetectionLoss.forward" title="Permalink to this definition"></a></dt>
- <dd><p>Defines the computation performed at every call.</p>
- <p>Should be overridden by all subclasses.</p>
- <div class="admonition note">
- <p class="admonition-title">Note</p>
- <p>Although the recipe for forward pass needs to be defined within
- this function, one should call the <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code> instance afterwards
- instead of this since the former takes care of running the
- registered hooks while the latter silently ignores them.</p>
- </div>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.yolo_v3_loss.YoLoV3DetectionLoss.reduction">
- <span class="sig-name descname"><span class="pre">reduction</span></span><em class="property"><span class="pre">:</span> <span class="pre">str</span></em><a class="headerlink" href="#super_gradients.training.losses.yolo_v3_loss.YoLoV3DetectionLoss.reduction" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses.yolo_v5_loss">
- <span id="super-gradients-training-losses-yolo-v5-loss-module"></span><h2>super_gradients.training.losses.yolo_v5_loss module<a class="headerlink" href="#module-super_gradients.training.losses.yolo_v5_loss" title="Permalink to this headline"></a></h2>
- <dl class="py class">
- <dt class="sig sig-object py" id="super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss">
- <em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">super_gradients.training.losses.yolo_v5_loss.</span></span><span class="sig-name descname"><span class="pre">YoLoV5DetectionLoss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">anchors</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><a class="reference internal" href="super_gradients.training.utils.html#super_gradients.training.utils.detection_utils.Anchors" title="super_gradients.training.utils.detection_utils.Anchors"><span class="pre">super_gradients.training.utils.detection_utils.Anchors</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">cls_pos_weight</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">Union</span><span class="p"><span class="pre">[</span></span><span class="pre">float</span><span class="p"><span class="pre">,</span> </span><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">float</span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">]</span></span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">obj_pos_weight</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">obj_loss_gain</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">box_loss_gain</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">0.05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cls_loss_gain</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">0.5</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">focal_loss_gamma</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">0.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cls_objectness_weights</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">Optional</span><span class="p"><span class="pre">[</span></span><span class="pre">Union</span><span class="p"><span class="pre">[</span></span><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">float</span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">,</span> </span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">]</span></span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/yolo_v5_loss.html#YoLoV5DetectionLoss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss" title="Permalink to this definition"></a></dt>
- <dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.modules.loss._Loss</span></code></p>
- <p>Calculate YOLO V5 loss:
- L = L_objectivness + L_boxes + L_classification</p>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss.build_targets">
- <span class="sig-name descname"><span class="pre">build_targets</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">predictions</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">]</span></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">torch.Tensor</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">anchor_threshold</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">4.0</span></span></em><span class="sig-paren">)</span> → <span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">,</span> </span><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">,</span> </span><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">,</span> </span><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">]</span></span><span class="p"><span class="pre">]</span></span><a class="reference internal" href="_modules/super_gradients/training/losses/yolo_v5_loss.html#YoLoV5DetectionLoss.build_targets"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss.build_targets" title="Permalink to this definition"></a></dt>
- <dd><dl>
- <dt>Assign targets to anchors to use in L_boxes & L_classification calculation:</dt><dd><ul class="simple">
- <li><p>each target can be assigned to a few anchors,</p></li>
- </ul>
- <p>all anchors that are within [1/anchor_threshold, anchor_threshold] times target size range
- * each anchor can be assigned to a few targets</p>
- </dd>
- </dl>
- <dl class="field-list simple">
- <dt class="field-odd">Parameters</dt>
- <dd class="field-odd"><ul class="simple">
- <li><p><strong>predictions</strong> – Yolo predictions</p></li>
- <li><p><strong>targets</strong> – ground truth targets</p></li>
- <li><p><strong>anchor_threshold</strong> – ratio defining a size range of an appropriate anchor</p></li>
- </ul>
- </dd>
- <dt class="field-even">Returns</dt>
- <dd class="field-even"><p><p>each of 4 outputs contains one element for each Yolo output,
- correspondences are raveled over the whole batch and all anchors:</p>
- <blockquote>
- <div><ul class="simple">
- <li><p>classes of the targets;</p></li>
- <li><p>boxes of the targets;</p></li>
- <li><p>image id in a batch, anchor id, grid y, grid x coordinates;</p></li>
- <li><p>anchor sizes.</p></li>
- </ul>
- </div></blockquote>
- <p>All the above can be indexed in parallel to get the selected correspondences</p>
- </p>
- </dd>
- </dl>
- </dd></dl>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss.compute_loss">
- <span class="sig-name descname"><span class="pre">compute_loss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">predictions</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">List</span><span class="p"><span class="pre">[</span></span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">]</span></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">torch.Tensor</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">giou_loss_ratio</span></span><span class="p"><span class="pre">:</span></span> <span class="n"><span class="pre">float</span></span> <span class="o"><span class="pre">=</span></span> <span class="default_value"><span class="pre">1.0</span></span></em><span class="sig-paren">)</span> → <span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">,</span> </span><span class="pre">torch.Tensor</span><span class="p"><span class="pre">]</span></span><a class="reference internal" href="_modules/super_gradients/training/losses/yolo_v5_loss.html#YoLoV5DetectionLoss.compute_loss"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss.compute_loss" title="Permalink to this definition"></a></dt>
- <dd><p>L = L_objectivness + L_boxes + L_classification
- where:</p>
- <blockquote>
- <div><ul class="simple">
- <li><p>L_boxes and L_classification are calculated only between anchors and targets that suit them;</p></li>
- <li><p>L_objectivness is calculated on all anchors.</p></li>
- </ul>
- </div></blockquote>
- <dl>
- <dt>L_classification:</dt><dd><p>for anchors that have suitable ground truths in their grid locations add BCEs
- to force max probability for each GT class in a multi-label way
- Coef: self.cls_loss_gain</p>
- </dd>
- <dt>L_boxes:</dt><dd><p>for anchors that have suitable ground truths in their grid locations
- add (1 - IoU), IoU between a predicted box and each GT box, force maximum IoU
- Coef: self.box_loss_gain</p>
- </dd>
- <dt>L_objectness:</dt><dd><p>for each anchor add BCE to force a prediction of (1 - giou_loss_ratio) + giou_loss_ratio * IoU,
- IoU between a predicted box and random GT in it
- Coef: self.obj_loss_gain, loss from each YOLO grid is additionally multiplied by balance = [4.0, 1.0, 0.4]</p>
- <blockquote>
- <div><p>to balance different contributions coming from different numbers of grid cells</p>
- </div></blockquote>
- </dd>
- </dl>
- <dl class="field-list simple">
- <dt class="field-odd">Parameters</dt>
- <dd class="field-odd"><ul class="simple">
- <li><p><strong>predictions</strong> – output from all Yolo levels, each of shape
- [Batch x Num_Anchors x GridSizeY x GridSizeX x (4 + 1 + Num_classes)]</p></li>
- <li><p><strong>targets</strong> – [Num_targets x (4 + 2)], values on dim 1 are: image id in a batch, class, box x y w h</p></li>
- <li><p><strong>giou_loss_ratio</strong> – a coef in L_objectness defining what should be predicted as objecness
- in a call with a target: can be a value in [IoU, 1] range</p></li>
- </ul>
- </dd>
- <dt class="field-even">Returns</dt>
- <dd class="field-even"><p>loss, all losses separately in a detached tensor</p>
- </dd>
- </dl>
- </dd></dl>
- <dl class="py method">
- <dt class="sig sig-object py" id="super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss.forward">
- <span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model_output</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="_modules/super_gradients/training/losses/yolo_v5_loss.html#YoLoV5DetectionLoss.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss.forward" title="Permalink to this definition"></a></dt>
- <dd><p>Defines the computation performed at every call.</p>
- <p>Should be overridden by all subclasses.</p>
- <div class="admonition note">
- <p class="admonition-title">Note</p>
- <p>Although the recipe for forward pass needs to be defined within
- this function, one should call the <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code> instance afterwards
- instead of this since the former takes care of running the
- registered hooks while the latter silently ignores them.</p>
- </div>
- </dd></dl>
- <dl class="py attribute">
- <dt class="sig sig-object py" id="super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss.reduction">
- <span class="sig-name descname"><span class="pre">reduction</span></span><em class="property"><span class="pre">:</span> <span class="pre">str</span></em><a class="headerlink" href="#super_gradients.training.losses.yolo_v5_loss.YoLoV5DetectionLoss.reduction" title="Permalink to this definition"></a></dt>
- <dd></dd></dl>
- </dd></dl>
- </section>
- <section id="module-super_gradients.training.losses">
- <span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-super_gradients.training.losses" title="Permalink to this headline"></a></h2>
- </section>
- </section>
- </div>
- </div>
- <footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
- <a href="super_gradients.training.legacy.html" class="btn btn-neutral float-left" title="super_gradients.training.legacy package" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
- <a href="super_gradients.training.metrics.html" class="btn btn-neutral float-right" title="super_gradients.training.metrics package" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
- </div>
- <hr/>
- <div role="contentinfo">
- <p>© Copyright 2021, SuperGradients team.</p>
- </div>
- Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
- <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
- provided by <a href="https://readthedocs.org">Read the Docs</a>.
-
- </footer>
- </div>
- </div>
- </section>
- </div>
- <script>
- jQuery(function () {
- SphinxRtdTheme.Navigation.enable(true);
- });
- </script>
- </body>
- </html>
|