Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

val.py 8.0 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
  1. # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
  2. """
  3. Validate a trained YOLOv5 classification model on a classification dataset.
  4. Usage:
  5. $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
  6. $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet
  7. Usage - formats:
  8. $ python classify/val.py --weights yolov5s-cls.pt # PyTorch
  9. yolov5s-cls.torchscript # TorchScript
  10. yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
  11. yolov5s-cls_openvino_model # OpenVINO
  12. yolov5s-cls.engine # TensorRT
  13. yolov5s-cls.mlmodel # CoreML (macOS-only)
  14. yolov5s-cls_saved_model # TensorFlow SavedModel
  15. yolov5s-cls.pb # TensorFlow GraphDef
  16. yolov5s-cls.tflite # TensorFlow Lite
  17. yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
  18. yolov5s-cls_paddle_model # PaddlePaddle
  19. """
  20. import argparse
  21. import os
  22. import sys
  23. from pathlib import Path
  24. import torch
  25. from tqdm import tqdm
  26. FILE = Path(__file__).resolve()
  27. ROOT = FILE.parents[1] # YOLOv5 root directory
  28. if str(ROOT) not in sys.path:
  29. sys.path.append(str(ROOT)) # add ROOT to PATH
  30. ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
  31. from models.common import DetectMultiBackend
  32. from utils.dataloaders import create_classification_dataloader
  33. from utils.general import (
  34. LOGGER,
  35. TQDM_BAR_FORMAT,
  36. Profile,
  37. check_img_size,
  38. check_requirements,
  39. colorstr,
  40. increment_path,
  41. print_args,
  42. )
  43. from utils.torch_utils import select_device, smart_inference_mode
  44. @smart_inference_mode()
  45. def run(
  46. data=ROOT / "../datasets/mnist", # dataset dir
  47. weights=ROOT / "yolov5s-cls.pt", # model.pt path(s)
  48. batch_size=128, # batch size
  49. imgsz=224, # inference size (pixels)
  50. device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
  51. workers=8, # max dataloader workers (per RANK in DDP mode)
  52. verbose=False, # verbose output
  53. project=ROOT / "runs/val-cls", # save to project/name
  54. name="exp", # save to project/name
  55. exist_ok=False, # existing project/name ok, do not increment
  56. half=False, # use FP16 half-precision inference
  57. dnn=False, # use OpenCV DNN for ONNX inference
  58. model=None,
  59. dataloader=None,
  60. criterion=None,
  61. pbar=None,
  62. ):
  63. """Validates a YOLOv5 classification model on a dataset, computing metrics like top1 and top5 accuracy."""
  64. # Initialize/load model and set device
  65. training = model is not None
  66. if training: # called by train.py
  67. device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
  68. half &= device.type != "cpu" # half precision only supported on CUDA
  69. model.half() if half else model.float()
  70. else: # called directly
  71. device = select_device(device, batch_size=batch_size)
  72. # Directories
  73. save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
  74. save_dir.mkdir(parents=True, exist_ok=True) # make dir
  75. # Load model
  76. model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
  77. stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
  78. imgsz = check_img_size(imgsz, s=stride) # check image size
  79. half = model.fp16 # FP16 supported on limited backends with CUDA
  80. if engine:
  81. batch_size = model.batch_size
  82. else:
  83. device = model.device
  84. if not (pt or jit):
  85. batch_size = 1 # export.py models default to batch-size 1
  86. LOGGER.info(f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models")
  87. # Dataloader
  88. data = Path(data)
  89. test_dir = data / "test" if (data / "test").exists() else data / "val" # data/test or data/val
  90. dataloader = create_classification_dataloader(
  91. path=test_dir, imgsz=imgsz, batch_size=batch_size, augment=False, rank=-1, workers=workers
  92. )
  93. model.eval()
  94. pred, targets, loss, dt = [], [], 0, (Profile(device=device), Profile(device=device), Profile(device=device))
  95. n = len(dataloader) # number of batches
  96. action = "validating" if dataloader.dataset.root.stem == "val" else "testing"
  97. desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}"
  98. bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
  99. with torch.cuda.amp.autocast(enabled=device.type != "cpu"):
  100. for images, labels in bar:
  101. with dt[0]:
  102. images, labels = images.to(device, non_blocking=True), labels.to(device)
  103. with dt[1]:
  104. y = model(images)
  105. with dt[2]:
  106. pred.append(y.argsort(1, descending=True)[:, :5])
  107. targets.append(labels)
  108. if criterion:
  109. loss += criterion(y, labels)
  110. loss /= n
  111. pred, targets = torch.cat(pred), torch.cat(targets)
  112. correct = (targets[:, None] == pred).float()
  113. acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy
  114. top1, top5 = acc.mean(0).tolist()
  115. if pbar:
  116. pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}"
  117. if verbose: # all classes
  118. LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}")
  119. LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}")
  120. for i, c in model.names.items():
  121. acc_i = acc[targets == i]
  122. top1i, top5i = acc_i.mean(0).tolist()
  123. LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}")
  124. # Print results
  125. t = tuple(x.t / len(dataloader.dataset.samples) * 1e3 for x in dt) # speeds per image
  126. shape = (1, 3, imgsz, imgsz)
  127. LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}" % t)
  128. LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
  129. return top1, top5, loss
  130. def parse_opt():
  131. """Parses and returns command line arguments for YOLOv5 model evaluation and inference settings."""
  132. parser = argparse.ArgumentParser()
  133. parser.add_argument("--data", type=str, default=ROOT / "../datasets/mnist", help="dataset path")
  134. parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-cls.pt", help="model.pt path(s)")
  135. parser.add_argument("--batch-size", type=int, default=128, help="batch size")
  136. parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=224, help="inference size (pixels)")
  137. parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
  138. parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
  139. parser.add_argument("--verbose", nargs="?", const=True, default=True, help="verbose output")
  140. parser.add_argument("--project", default=ROOT / "runs/val-cls", help="save to project/name")
  141. parser.add_argument("--name", default="exp", help="save to project/name")
  142. parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
  143. parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
  144. parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
  145. opt = parser.parse_args()
  146. print_args(vars(opt))
  147. return opt
  148. def main(opt):
  149. """Executes the YOLOv5 model prediction workflow, handling argument parsing and requirement checks."""
  150. check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
  151. run(**vars(opt))
  152. if __name__ == "__main__":
  153. opt = parse_opt()
  154. main(opt)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...