Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

experimental.py 5.1 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
  1. # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
  2. """Experimental modules."""
  3. import math
  4. import numpy as np
  5. import torch
  6. import torch.nn as nn
  7. from utils.downloads import attempt_download
  8. class Sum(nn.Module):
  9. """Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070."""
  10. def __init__(self, n, weight=False):
  11. """Initializes a module to sum outputs of layers with number of inputs `n` and optional weighting, supporting 2+
  12. inputs.
  13. """
  14. super().__init__()
  15. self.weight = weight # apply weights boolean
  16. self.iter = range(n - 1) # iter object
  17. if weight:
  18. self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
  19. def forward(self, x):
  20. """Processes input through a customizable weighted sum of `n` inputs, optionally applying learned weights."""
  21. y = x[0] # no weight
  22. if self.weight:
  23. w = torch.sigmoid(self.w) * 2
  24. for i in self.iter:
  25. y = y + x[i + 1] * w[i]
  26. else:
  27. for i in self.iter:
  28. y = y + x[i + 1]
  29. return y
  30. class MixConv2d(nn.Module):
  31. """Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595."""
  32. def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
  33. """Initializes MixConv2d with mixed depth-wise convolutional layers, taking input and output channels (c1, c2),
  34. kernel sizes (k), stride (s), and channel distribution strategy (equal_ch).
  35. """
  36. super().__init__()
  37. n = len(k) # number of convolutions
  38. if equal_ch: # equal c_ per group
  39. i = torch.linspace(0, n - 1e-6, c2).floor() # c2 indices
  40. c_ = [(i == g).sum() for g in range(n)] # intermediate channels
  41. else: # equal weight.numel() per group
  42. b = [c2] + [0] * n
  43. a = np.eye(n + 1, n, k=-1)
  44. a -= np.roll(a, 1, axis=1)
  45. a *= np.array(k) ** 2
  46. a[0] = 1
  47. c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
  48. self.m = nn.ModuleList(
  49. [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]
  50. )
  51. self.bn = nn.BatchNorm2d(c2)
  52. self.act = nn.SiLU()
  53. def forward(self, x):
  54. """Performs forward pass by applying SiLU activation on batch-normalized concatenated convolutional layer
  55. outputs.
  56. """
  57. return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
  58. class Ensemble(nn.ModuleList):
  59. """Ensemble of models."""
  60. def __init__(self):
  61. """Initializes an ensemble of models to be used for aggregated predictions."""
  62. super().__init__()
  63. def forward(self, x, augment=False, profile=False, visualize=False):
  64. """Performs forward pass aggregating outputs from an ensemble of models.."""
  65. y = [module(x, augment, profile, visualize)[0] for module in self]
  66. # y = torch.stack(y).max(0)[0] # max ensemble
  67. # y = torch.stack(y).mean(0) # mean ensemble
  68. y = torch.cat(y, 1) # nms ensemble
  69. return y, None # inference, train output
  70. def attempt_load(weights, device=None, inplace=True, fuse=True):
  71. """
  72. Loads and fuses an ensemble or single YOLOv5 model from weights, handling device placement and model adjustments.
  73. Example inputs: weights=[a,b,c] or a single model weights=[a] or weights=a.
  74. """
  75. from models.yolo import Detect, Model
  76. model = Ensemble()
  77. for w in weights if isinstance(weights, list) else [weights]:
  78. ckpt = torch.load(attempt_download(w), map_location="cpu") # load
  79. ckpt = (ckpt.get("ema") or ckpt["model"]).to(device).float() # FP32 model
  80. # Model compatibility updates
  81. if not hasattr(ckpt, "stride"):
  82. ckpt.stride = torch.tensor([32.0])
  83. if hasattr(ckpt, "names") and isinstance(ckpt.names, (list, tuple)):
  84. ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
  85. model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, "fuse") else ckpt.eval()) # model in eval mode
  86. # Module updates
  87. for m in model.modules():
  88. t = type(m)
  89. if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
  90. m.inplace = inplace
  91. if t is Detect and not isinstance(m.anchor_grid, list):
  92. delattr(m, "anchor_grid")
  93. setattr(m, "anchor_grid", [torch.zeros(1)] * m.nl)
  94. elif t is nn.Upsample and not hasattr(m, "recompute_scale_factor"):
  95. m.recompute_scale_factor = None # torch 1.11.0 compatibility
  96. # Return model
  97. if len(model) == 1:
  98. return model[-1]
  99. # Return detection ensemble
  100. print(f"Ensemble created with {weights}\n")
  101. for k in "names", "nc", "yaml":
  102. setattr(model, k, getattr(model[0], k))
  103. model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
  104. assert all(model[0].nc == m.nc for m in model), f"Models have different class counts: {[m.nc for m in model]}"
  105. return model
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...