ultralytics 8.3.166__py3-none-any.whl → 8.3.168__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.166"
3
+ __version__ = "8.3.168"
4
4
 
5
5
  import os
6
6
 
@@ -142,7 +142,7 @@ def export_formats():
142
142
  ["PaddlePaddle", "paddle", "_paddle_model", True, True, ["batch"]],
143
143
  ["MNN", "mnn", ".mnn", True, True, ["batch", "half", "int8"]],
144
144
  ["NCNN", "ncnn", "_ncnn_model", True, True, ["batch", "half"]],
145
- ["IMX", "imx", "_imx_model", True, True, ["int8", "fraction"]],
145
+ ["IMX", "imx", "_imx_model", True, True, ["int8", "fraction", "nms"]],
146
146
  ["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name"]],
147
147
  ]
148
148
  return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
@@ -313,8 +313,11 @@ class Exporter:
313
313
  if not self.args.int8:
314
314
  LOGGER.warning("IMX export requires int8=True, setting int8=True.")
315
315
  self.args.int8 = True
316
- if model.task != "detect":
317
- raise ValueError("IMX export only supported for detection models.")
316
+ if not self.args.nms:
317
+ LOGGER.warning("IMX export requires nms=True, setting nms=True.")
318
+ self.args.nms = True
319
+ if model.task not in {"detect", "pose"}:
320
+ raise ValueError("IMX export only supported for detection and pose estimation models.")
318
321
  if not hasattr(model, "names"):
319
322
  model.names = default_class_names()
320
323
  model.names = check_class_names(model.names)
@@ -428,7 +431,7 @@ class Exporter:
428
431
 
429
432
  y = None
430
433
  for _ in range(2): # dry runs
431
- y = NMSModel(model, self.args)(im) if self.args.nms and not coreml else model(im)
434
+ y = NMSModel(model, self.args)(im) if self.args.nms and not (coreml or imx) else model(im)
432
435
  if self.args.half and onnx and self.device.type != "cpu":
433
436
  im, model = im.half(), model.half() # to FP16
434
437
 
@@ -1166,15 +1169,14 @@ class Exporter:
1166
1169
  )
1167
1170
  if getattr(self.model, "end2end", False):
1168
1171
  raise ValueError("IMX export is not supported for end2end models.")
1169
- check_requirements(
1170
- ("model-compression-toolkit>=2.3.0,<2.4.1", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0")
1171
- )
1172
+ check_requirements(("model-compression-toolkit>=2.4.1", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0"))
1172
1173
  check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
1174
+ check_requirements("mct-quantizers>=1.6.0") # Separate for compatibility with model-compression-toolkit
1173
1175
 
1174
1176
  import model_compression_toolkit as mct
1175
1177
  import onnx
1176
1178
  from edgemdt_tpc import get_target_platform_capabilities
1177
- from sony_custom_layers.pytorch import multiclass_nms
1179
+ from sony_custom_layers.pytorch import multiclass_nms_with_indices
1178
1180
 
1179
1181
  LOGGER.info(f"\n{prefix} starting export with model_compression_toolkit {mct.__version__}...")
1180
1182
 
@@ -1198,13 +1200,23 @@ class Exporter:
1198
1200
 
1199
1201
  bit_cfg = mct.core.BitWidthConfig()
1200
1202
  if "C2PSA" in self.model.__str__(): # YOLO11
1201
- layer_names = ["sub", "mul_2", "add_14", "cat_21"]
1202
- weights_memory = 2585350.2439
1203
- n_layers = 238 # 238 layers for fused YOLO11n
1203
+ if self.model.task == "detect":
1204
+ layer_names = ["sub", "mul_2", "add_14", "cat_21"]
1205
+ weights_memory = 2585350.2439
1206
+ n_layers = 238 # 238 layers for fused YOLO11n
1207
+ elif self.model.task == "pose":
1208
+ layer_names = ["sub", "mul_2", "add_14", "cat_22", "cat_23", "mul_4", "add_15"]
1209
+ weights_memory = 2437771.67
1210
+ n_layers = 257 # 257 layers for fused YOLO11n-pose
1204
1211
  else: # YOLOv8
1205
- layer_names = ["sub", "mul", "add_6", "cat_17"]
1206
- weights_memory = 2550540.8
1207
- n_layers = 168 # 168 layers for fused YOLOv8n
1212
+ if self.model.task == "detect":
1213
+ layer_names = ["sub", "mul", "add_6", "cat_17"]
1214
+ weights_memory = 2550540.8
1215
+ n_layers = 168 # 168 layers for fused YOLOv8n
1216
+ elif self.model.task == "pose":
1217
+ layer_names = ["add_7", "mul_2", "cat_19", "mul", "sub", "add_6", "cat_18"]
1218
+ weights_memory = 2482451.85
1219
+ n_layers = 187 # 187 layers for fused YOLO11n-pose
1208
1220
 
1209
1221
  # Check if the model has the expected number of layers
1210
1222
  if len(list(self.model.modules())) != n_layers:
@@ -1251,6 +1263,7 @@ class Exporter:
1251
1263
  score_threshold: float = 0.001,
1252
1264
  iou_threshold: float = 0.7,
1253
1265
  max_detections: int = 300,
1266
+ task: str = "detect",
1254
1267
  ):
1255
1268
  """
1256
1269
  Initialize NMSWrapper with PyTorch Module and NMS parameters.
@@ -1260,34 +1273,40 @@ class Exporter:
1260
1273
  score_threshold (float): Score threshold for non-maximum suppression.
1261
1274
  iou_threshold (float): Intersection over union threshold for non-maximum suppression.
1262
1275
  max_detections (int): The number of detections to return.
1276
+ task (str): Task type, either 'detect' or 'pose'.
1263
1277
  """
1264
1278
  super().__init__()
1265
1279
  self.model = model
1266
1280
  self.score_threshold = score_threshold
1267
1281
  self.iou_threshold = iou_threshold
1268
1282
  self.max_detections = max_detections
1283
+ self.task = task
1269
1284
 
1270
1285
  def forward(self, images):
1271
1286
  """Forward pass with model inference and NMS post-processing."""
1272
1287
  # model inference
1273
1288
  outputs = self.model(images)
1274
1289
 
1275
- boxes = outputs[0]
1276
- scores = outputs[1]
1277
- nms = multiclass_nms(
1290
+ boxes, scores = outputs[0], outputs[1]
1291
+ nms_outputs = multiclass_nms_with_indices(
1278
1292
  boxes=boxes,
1279
1293
  scores=scores,
1280
1294
  score_threshold=self.score_threshold,
1281
1295
  iou_threshold=self.iou_threshold,
1282
1296
  max_detections=self.max_detections,
1283
1297
  )
1284
- return nms
1298
+ if self.task == "pose":
1299
+ kpts = outputs[2] # (bs, max_detections, kpts 17*3)
1300
+ out_kpts = torch.gather(kpts, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, kpts.size(-1)))
1301
+ return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_kpts
1302
+ return nms_outputs
1285
1303
 
1286
1304
  quant_model = NMSWrapper(
1287
1305
  model=quant_model,
1288
1306
  score_threshold=self.args.conf or 0.001,
1289
1307
  iou_threshold=self.args.iou,
1290
1308
  max_detections=self.args.max_det,
1309
+ task=self.model.task,
1291
1310
  ).to(self.device)
1292
1311
 
1293
1312
  f = Path(str(self.file).replace(self.file.suffix, "_imx_model"))
@@ -1,5 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from pathlib import Path
3
4
  from typing import Any, Dict, List, Tuple, Union
4
5
 
5
6
  import torch
@@ -186,45 +187,28 @@ class RTDETRValidator(DetectionValidator):
186
187
 
187
188
  return [{"bboxes": x[:, :4], "conf": x[:, 4], "cls": x[:, 5]} for x in outputs]
188
189
 
189
- def _prepare_batch(self, si: int, batch: Dict[str, Any]) -> Dict[str, Any]:
190
+ def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
190
191
  """
191
- Prepare a batch for validation by applying necessary transformations.
192
+ Serialize YOLO predictions to COCO json format.
192
193
 
193
194
  Args:
194
- si (int): Batch index.
195
- batch (Dict[str, Any]): Batch data containing images and annotations.
196
-
197
- Returns:
198
- (Dict[str, Any]): Prepared batch with transformed annotations containing cls, bboxes,
199
- ori_shape, imgsz, and ratio_pad.
200
- """
201
- idx = batch["batch_idx"] == si
202
- cls = batch["cls"][idx].squeeze(-1)
203
- bbox = batch["bboxes"][idx]
204
- ori_shape = batch["ori_shape"][si]
205
- imgsz = batch["img"].shape[2:]
206
- ratio_pad = batch["ratio_pad"][si]
207
- if len(cls):
208
- bbox = ops.xywh2xyxy(bbox) # target boxes
209
- bbox[..., [0, 2]] *= ori_shape[1] # native-space pred
210
- bbox[..., [1, 3]] *= ori_shape[0] # native-space pred
211
- return {"cls": cls, "bboxes": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
212
-
213
- def _prepare_pred(self, pred: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
195
+ predn (Dict[str, torch.Tensor]): Predictions dictionary containing 'bboxes', 'conf', and 'cls' keys
196
+ with bounding box coordinates, confidence scores, and class predictions.
197
+ pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
214
198
  """
215
- Prepare predictions by scaling bounding boxes to original image dimensions.
216
-
217
- Args:
218
- pred (Dict[str, torch.Tensor]): Raw predictions containing 'cls', 'bboxes', and 'conf'.
219
- pbatch (Dict[str, torch.Tensor]): Prepared batch information containing 'ori_shape' and other metadata.
220
-
221
- Returns:
222
- (Dict[str, torch.Tensor]): Predictions scaled to original image dimensions.
223
- """
224
- cls = pred["cls"]
225
- if self.args.single_cls:
226
- cls *= 0
227
- bboxes = pred["bboxes"].clone()
228
- bboxes[..., [0, 2]] *= pbatch["ori_shape"][1] / self.args.imgsz # native-space pred
229
- bboxes[..., [1, 3]] *= pbatch["ori_shape"][0] / self.args.imgsz # native-space pred
230
- return {"bboxes": bboxes, "conf": pred["conf"], "cls": cls}
199
+ stem = Path(pbatch["im_file"]).stem
200
+ image_id = int(stem) if stem.isnumeric() else stem
201
+ box = predn["bboxes"].clone()
202
+ box[..., [0, 2]] *= pbatch["ori_shape"][1] / self.args.imgsz # native-space pred
203
+ box[..., [1, 3]] *= pbatch["ori_shape"][0] / self.args.imgsz # native-space pred
204
+ box = ops.xyxy2xywh(box) # xywh
205
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
206
+ for b, s, c in zip(box.tolist(), predn["conf"].tolist(), predn["cls"].tolist()):
207
+ self.jdict.append(
208
+ {
209
+ "image_id": image_id,
210
+ "category_id": self.class_map[int(c)],
211
+ "bbox": [round(x, 3) for x in b],
212
+ "score": round(s, 5),
213
+ }
214
+ )
@@ -147,28 +147,28 @@ class DetectionValidator(BaseValidator):
147
147
  ratio_pad = batch["ratio_pad"][si]
148
148
  if len(cls):
149
149
  bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes
150
- ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad) # native-space labels
151
- return {"cls": cls, "bboxes": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
150
+ return {
151
+ "cls": cls,
152
+ "bboxes": bbox,
153
+ "ori_shape": ori_shape,
154
+ "imgsz": imgsz,
155
+ "ratio_pad": ratio_pad,
156
+ "im_file": batch["im_file"][si],
157
+ }
152
158
 
153
- def _prepare_pred(self, pred: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
159
+ def _prepare_pred(self, pred: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
154
160
  """
155
161
  Prepare predictions for evaluation against ground truth.
156
162
 
157
163
  Args:
158
164
  pred (Dict[str, torch.Tensor]): Post-processed predictions from the model.
159
- pbatch (Dict[str, Any]): Prepared batch information.
160
165
 
161
166
  Returns:
162
167
  (Dict[str, torch.Tensor]): Prepared predictions in native space.
163
168
  """
164
- cls = pred["cls"]
165
169
  if self.args.single_cls:
166
- cls *= 0
167
- # predn = pred.clone()
168
- bboxes = ops.scale_boxes(
169
- pbatch["imgsz"], pred["bboxes"].clone(), pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"]
170
- ) # native-space pred
171
- return {"bboxes": bboxes, "conf": pred["conf"], "cls": cls}
170
+ pred["cls"] *= 0
171
+ return pred
172
172
 
173
173
  def update_metrics(self, preds: List[Dict[str, torch.Tensor]], batch: Dict[str, Any]) -> None:
174
174
  """
@@ -181,7 +181,7 @@ class DetectionValidator(BaseValidator):
181
181
  for si, pred in enumerate(preds):
182
182
  self.seen += 1
183
183
  pbatch = self._prepare_batch(si, batch)
184
- predn = self._prepare_pred(pred, pbatch)
184
+ predn = self._prepare_pred(pred)
185
185
 
186
186
  cls = pbatch["cls"].cpu().numpy()
187
187
  no_pred = len(predn["cls"]) == 0
@@ -203,7 +203,7 @@ class DetectionValidator(BaseValidator):
203
203
 
204
204
  # Save
205
205
  if self.args.save_json:
206
- self.pred_to_json(predn, batch["im_file"][si])
206
+ self.pred_to_json(predn, pbatch)
207
207
  if self.args.save_txt:
208
208
  self.save_one_txt(
209
209
  predn,
@@ -360,18 +360,24 @@ class DetectionValidator(BaseValidator):
360
360
  boxes=torch.cat([predn["bboxes"], predn["conf"].unsqueeze(-1), predn["cls"].unsqueeze(-1)], dim=1),
361
361
  ).save_txt(file, save_conf=save_conf)
362
362
 
363
- def pred_to_json(self, predn: Dict[str, torch.Tensor], filename: str) -> None:
363
+ def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
364
364
  """
365
365
  Serialize YOLO predictions to COCO json format.
366
366
 
367
367
  Args:
368
368
  predn (Dict[str, torch.Tensor]): Predictions dictionary containing 'bboxes', 'conf', and 'cls' keys
369
369
  with bounding box coordinates, confidence scores, and class predictions.
370
- filename (str): Image filename.
370
+ pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
371
371
  """
372
- stem = Path(filename).stem
372
+ stem = Path(pbatch["im_file"]).stem
373
373
  image_id = int(stem) if stem.isnumeric() else stem
374
- box = ops.xyxy2xywh(predn["bboxes"]) # xywh
374
+ box = ops.scale_boxes(
375
+ pbatch["imgsz"],
376
+ predn["bboxes"].clone(),
377
+ pbatch["ori_shape"],
378
+ ratio_pad=pbatch["ratio_pad"],
379
+ )
380
+ box = ops.xyxy2xywh(box) # xywh
375
381
  box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
376
382
  for b, s, c in zip(box.tolist(), predn["conf"].tolist(), predn["cls"].tolist()):
377
383
  self.jdict.append(
@@ -243,10 +243,6 @@ class YOLOE(Model):
243
243
  """
244
244
  super().__init__(model=model, task=task, verbose=verbose)
245
245
 
246
- # Assign default COCO class names when there are no custom names
247
- if not hasattr(self.model, "names"):
248
- self.model.names = YAML.load(ROOT / "cfg/datasets/coco8.yaml").get("names")
249
-
250
246
  @property
251
247
  def task_map(self) -> Dict[str, Dict[str, Any]]:
252
248
  """Map head to model, validator, and predictor classes."""
@@ -287,7 +283,7 @@ class YOLOE(Model):
287
283
  Examples:
288
284
  >>> model = YOLOE("yoloe-11s-seg.pt")
289
285
  >>> img = torch.rand(1, 3, 640, 640)
290
- >>> visual_features = model.model.backbone(img)
286
+ >>> visual_features = torch.rand(1, 1, 80, 80)
291
287
  >>> pe = model.get_visual_pe(img, visual_features)
292
288
  """
293
289
  assert isinstance(self.model, YOLOEModel)
@@ -1,7 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  from pathlib import Path
4
- from typing import Any, Dict, List, Tuple, Union
4
+ from typing import Any, Dict, List, Tuple
5
5
 
6
6
  import numpy as np
7
7
  import torch
@@ -132,33 +132,14 @@ class OBBValidator(DetectionValidator):
132
132
  ratio_pad = batch["ratio_pad"][si]
133
133
  if len(cls):
134
134
  bbox[..., :4].mul_(torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]]) # target boxes
135
- ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad, xywh=True) # native-space labels
136
- return {"cls": cls, "bboxes": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
137
-
138
- def _prepare_pred(self, pred: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
139
- """
140
- Prepare predictions by scaling bounding boxes to original image dimensions.
141
-
142
- This method takes prediction tensors containing bounding box coordinates and scales them from the model's
143
- input dimensions to the original image dimensions using the provided batch information.
144
-
145
- Args:
146
- pred (Dict[str, torch.Tensor]): Prediction dictionary containing bounding box coordinates and other information.
147
- pbatch (Dict[str, Any]): Dictionary containing batch information with keys:
148
- - imgsz (tuple): Model input image size.
149
- - ori_shape (tuple): Original image shape.
150
- - ratio_pad (tuple): Ratio and padding information for scaling.
151
-
152
- Returns:
153
- (Dict[str, torch.Tensor]): Scaled prediction dictionary with bounding boxes in original image dimensions.
154
- """
155
- cls = pred["cls"]
156
- if self.args.single_cls:
157
- cls *= 0
158
- bboxes = ops.scale_boxes(
159
- pbatch["imgsz"], pred["bboxes"].clone(), pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"], xywh=True
160
- ) # native-space pred
161
- return {"bboxes": bboxes, "conf": pred["conf"], "cls": cls}
135
+ return {
136
+ "cls": cls,
137
+ "bboxes": bbox,
138
+ "ori_shape": ori_shape,
139
+ "imgsz": imgsz,
140
+ "ratio_pad": ratio_pad,
141
+ "im_file": batch["im_file"][si],
142
+ }
162
143
 
163
144
  def plot_predictions(self, batch: Dict[str, Any], preds: List[torch.Tensor], ni: int) -> None:
164
145
  """
@@ -180,23 +161,26 @@ class OBBValidator(DetectionValidator):
180
161
  p["bboxes"][:, :4] = ops.xywh2xyxy(p["bboxes"][:, :4]) # convert to xyxy format for plotting
181
162
  super().plot_predictions(batch, preds, ni) # plot bboxes
182
163
 
183
- def pred_to_json(self, predn: Dict[str, torch.Tensor], filename: Union[str, Path]) -> None:
164
+ def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
184
165
  """
185
166
  Convert YOLO predictions to COCO JSON format with rotated bounding box information.
186
167
 
187
168
  Args:
188
169
  predn (Dict[str, torch.Tensor]): Prediction dictionary containing 'bboxes', 'conf', and 'cls' keys
189
170
  with bounding box coordinates, confidence scores, and class predictions.
190
- filename (str | Path): Path to the image file for which predictions are being processed.
171
+ pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
191
172
 
192
173
  Notes:
193
174
  This method processes rotated bounding box predictions and converts them to both rbox format
194
175
  (x, y, w, h, angle) and polygon format (x1, y1, x2, y2, x3, y3, x4, y4) before adding them
195
176
  to the JSON dictionary.
196
177
  """
197
- stem = Path(filename).stem
178
+ stem = Path(pbatch["im_file"]).stem
198
179
  image_id = int(stem) if stem.isnumeric() else stem
199
180
  rbox = predn["bboxes"]
181
+ rbox = ops.scale_boxes(
182
+ pbatch["imgsz"], predn["bboxes"].clone(), pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"], xywh=True
183
+ ) # native-space pred
200
184
  poly = ops.xywhr2xyxyxyxy(rbox).view(-1, 8)
201
185
  for r, b, s, c in zip(rbox.tolist(), poly.tolist(), predn["conf"].tolist(), predn["cls"].tolist()):
202
186
  self.jdict.append(
@@ -167,34 +167,9 @@ class PoseValidator(DetectionValidator):
167
167
  kpts = kpts.clone()
168
168
  kpts[..., 0] *= w
169
169
  kpts[..., 1] *= h
170
- kpts = ops.scale_coords(pbatch["imgsz"], kpts, pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"])
171
170
  pbatch["keypoints"] = kpts
172
171
  return pbatch
173
172
 
174
- def _prepare_pred(self, pred: Dict[str, Any], pbatch: Dict[str, Any]) -> Dict[str, Any]:
175
- """
176
- Prepare and scale keypoints in predictions for pose processing.
177
-
178
- This method extends the parent class's _prepare_pred method to handle keypoint scaling. It first calls
179
- the parent method to get the basic prediction boxes, then extracts and scales the keypoint coordinates
180
- to match the original image dimensions.
181
-
182
- Args:
183
- pred (Dict[str, torch.Tensor]): Post-processed predictions from the model.
184
- pbatch (Dict[str, Any]): Processed batch dictionary containing image information including:
185
- - imgsz: Image size used for inference
186
- - ori_shape: Original image shape
187
- - ratio_pad: Ratio and padding information for coordinate scaling
188
-
189
- Returns:
190
- (Dict[str, Any]): Processed prediction dictionary with keypoints scaled to original image dimensions.
191
- """
192
- predn = super()._prepare_pred(pred, pbatch)
193
- predn["keypoints"] = ops.scale_coords(
194
- pbatch["imgsz"], pred.get("keypoints").clone(), pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"]
195
- )
196
- return predn
197
-
198
173
  def _process_batch(self, preds: Dict[str, torch.Tensor], batch: Dict[str, Any]) -> Dict[str, np.ndarray]:
199
174
  """
200
175
  Return correct prediction matrix by computing Intersection over Union (IoU) between detections and ground truth.
@@ -249,7 +224,7 @@ class PoseValidator(DetectionValidator):
249
224
  keypoints=predn["keypoints"],
250
225
  ).save_txt(file, save_conf=save_conf)
251
226
 
252
- def pred_to_json(self, predn: Dict[str, torch.Tensor], filename: str) -> None:
227
+ def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
253
228
  """
254
229
  Convert YOLO predictions to COCO JSON format.
255
230
 
@@ -259,32 +234,22 @@ class PoseValidator(DetectionValidator):
259
234
  Args:
260
235
  predn (Dict[str, torch.Tensor]): Prediction dictionary containing 'bboxes', 'conf', 'cls',
261
236
  and 'keypoints' tensors.
262
- filename (str): Path to the image file for which predictions are being processed.
237
+ pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
263
238
 
264
239
  Notes:
265
240
  The method extracts the image ID from the filename stem (either as an integer if numeric, or as a string),
266
241
  converts bounding boxes from xyxy to xywh format, and adjusts coordinates from center to top-left corner
267
242
  before saving to the JSON dictionary.
268
243
  """
269
- stem = Path(filename).stem
270
- image_id = int(stem) if stem.isnumeric() else stem
271
- box = ops.xyxy2xywh(predn["bboxes"]) # xywh
272
- box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
273
- for b, s, c, k in zip(
274
- box.tolist(),
275
- predn["conf"].tolist(),
276
- predn["cls"].tolist(),
277
- predn["keypoints"].flatten(1, 2).tolist(),
278
- ):
279
- self.jdict.append(
280
- {
281
- "image_id": image_id,
282
- "category_id": self.class_map[int(c)],
283
- "bbox": [round(x, 3) for x in b],
284
- "keypoints": k,
285
- "score": round(s, 5),
286
- }
287
- )
244
+ super().pred_to_json(predn, pbatch)
245
+ kpts = ops.scale_coords(
246
+ pbatch["imgsz"],
247
+ predn["keypoints"].clone(),
248
+ pbatch["ori_shape"],
249
+ ratio_pad=pbatch["ratio_pad"],
250
+ )
251
+ for i, k in enumerate(kpts.flatten(1, 2).tolist()):
252
+ self.jdict[-len(kpts) + i]["keypoints"] = k # keypoints
288
253
 
289
254
  def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
290
255
  """Evaluate object detection model using COCO JSON format."""
@@ -135,29 +135,6 @@ class SegmentationValidator(DetectionValidator):
135
135
  prepared_batch["masks"] = batch["masks"][midx]
136
136
  return prepared_batch
137
137
 
138
- def _prepare_pred(self, pred: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
139
- """
140
- Prepare predictions for evaluation by processing bounding boxes and masks.
141
-
142
- Args:
143
- pred (Dict[str, torch.Tensor]): Post-processed predictions from the model.
144
- pbatch (Dict[str, Any]): Prepared batch information.
145
-
146
- Returns:
147
- Dict[str, torch.Tensor]: Processed bounding box predictions.
148
- """
149
- predn = super()._prepare_pred(pred, pbatch)
150
- predn["masks"] = pred["masks"]
151
- if self.args.save_json and len(predn["masks"]):
152
- coco_masks = torch.as_tensor(pred["masks"], dtype=torch.uint8)
153
- coco_masks = ops.scale_image(
154
- coco_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
155
- pbatch["ori_shape"],
156
- ratio_pad=pbatch["ratio_pad"],
157
- )
158
- predn["coco_masks"] = coco_masks
159
- return predn
160
-
161
138
  def _process_batch(self, preds: Dict[str, torch.Tensor], batch: Dict[str, Any]) -> Dict[str, np.ndarray]:
162
139
  """
163
140
  Compute correct prediction matrix for a batch based on bounding boxes and optional masks.
@@ -233,13 +210,13 @@ class SegmentationValidator(DetectionValidator):
233
210
  masks=torch.as_tensor(predn["masks"], dtype=torch.uint8),
234
211
  ).save_txt(file, save_conf=save_conf)
235
212
 
236
- def pred_to_json(self, predn: torch.Tensor, filename: str) -> None:
213
+ def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
237
214
  """
238
215
  Save one JSON result for COCO evaluation.
239
216
 
240
217
  Args:
241
218
  predn (Dict[str, torch.Tensor]): Predictions containing bboxes, masks, confidence scores, and classes.
242
- filename (str): Image filename.
219
+ pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
243
220
 
244
221
  Examples:
245
222
  >>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
@@ -252,23 +229,18 @@ class SegmentationValidator(DetectionValidator):
252
229
  rle["counts"] = rle["counts"].decode("utf-8")
253
230
  return rle
254
231
 
255
- stem = Path(filename).stem
256
- image_id = int(stem) if stem.isnumeric() else stem
257
- box = ops.xyxy2xywh(predn["bboxes"]) # xywh
258
- box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
259
- pred_masks = np.transpose(predn["coco_masks"], (2, 0, 1))
232
+ coco_masks = torch.as_tensor(predn["masks"], dtype=torch.uint8)
233
+ coco_masks = ops.scale_image(
234
+ coco_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
235
+ pbatch["ori_shape"],
236
+ ratio_pad=pbatch["ratio_pad"],
237
+ )
238
+ pred_masks = np.transpose(coco_masks, (2, 0, 1))
260
239
  with ThreadPool(NUM_THREADS) as pool:
261
240
  rles = pool.map(single_encode, pred_masks)
262
- for i, (b, s, c) in enumerate(zip(box.tolist(), predn["conf"].tolist(), predn["cls"].tolist())):
263
- self.jdict.append(
264
- {
265
- "image_id": image_id,
266
- "category_id": self.class_map[int(c)],
267
- "bbox": [round(x, 3) for x in b],
268
- "score": round(s, 5),
269
- "segmentation": rles[i],
270
- }
271
- )
241
+ super().pred_to_json(predn, pbatch)
242
+ for i, r in enumerate(rles):
243
+ self.jdict[-len(rles) + i]["segmentation"] = r # segmentation
272
244
 
273
245
  def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
274
246
  """Return COCO-style instance segmentation evaluation metrics."""
@@ -259,11 +259,7 @@ class AutoBackend(nn.Module):
259
259
  session = onnxruntime.InferenceSession(w, providers=providers)
260
260
  else:
261
261
  check_requirements(
262
- [
263
- "model-compression-toolkit>=2.3.0,<2.4.1",
264
- "sony-custom-layers[torch]>=0.3.0",
265
- "onnxruntime-extensions",
266
- ]
262
+ ["model-compression-toolkit>=2.4.1", "sony-custom-layers[torch]>=0.3.0", "onnxruntime-extensions"]
267
263
  )
268
264
  w = next(Path(w).glob("*.onnx"))
269
265
  LOGGER.info(f"Loading {w} for ONNX IMX inference...")
@@ -273,7 +269,6 @@ class AutoBackend(nn.Module):
273
269
  session_options = mctq.get_ort_session_options()
274
270
  session_options.enable_mem_reuse = False # fix the shape mismatch from onnxruntime
275
271
  session = onnxruntime.InferenceSession(w, session_options, providers=["CPUExecutionProvider"])
276
- task = "detect"
277
272
 
278
273
  output_names = [x.name for x in session.get_outputs()]
279
274
  metadata = session.get_modelmeta().custom_metadata_map
@@ -674,8 +669,12 @@ class AutoBackend(nn.Module):
674
669
  self.session.run_with_iobinding(self.io)
675
670
  y = self.bindings
676
671
  if self.imx:
677
- # boxes, conf, cls
678
- y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None]], axis=-1)
672
+ if self.task == "detect":
673
+ # boxes, conf, cls
674
+ y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None]], axis=-1)
675
+ elif self.task == "pose":
676
+ # boxes, conf, kpts
677
+ y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1)
679
678
 
680
679
  # OpenVINO
681
680
  elif self.xml:
@@ -178,14 +178,10 @@ class Detect(nn.Module):
178
178
  grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
179
179
  norm = self.strides / (self.stride[0] * grid_size)
180
180
  dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
181
- elif self.export and self.format == "imx":
182
- dbox = self.decode_bboxes(
183
- self.dfl(box) * self.strides, self.anchors.unsqueeze(0) * self.strides, xywh=False
184
- )
185
- return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
186
181
  else:
187
182
  dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
188
-
183
+ if self.export and self.format == "imx":
184
+ return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
189
185
  return torch.cat((dbox, cls.sigmoid()), 1)
190
186
 
191
187
  def bias_init(self):
@@ -384,6 +380,8 @@ class Pose(Detect):
384
380
  if self.training:
385
381
  return x, kpt
386
382
  pred_kpt = self.kpts_decode(bs, kpt)
383
+ if self.export and self.format == "imx":
384
+ return (*x, pred_kpt.permute(0, 2, 1))
387
385
  return torch.cat([x, pred_kpt], 1) if self.export else (torch.cat([x[0], pred_kpt], 1), (x[1], kpt))
388
386
 
389
387
  def kpts_decode(self, bs: int, kpts: torch.Tensor) -> torch.Tensor:
@@ -118,12 +118,13 @@ class RegionCounter(BaseSolution):
118
118
  x1, y1, x2, y2 = map(int, region["polygon"].bounds)
119
119
  pts = [(x1, y1), (x2, y1), (x2, y2), (x1, y2)]
120
120
  annotator.draw_region(pts, region["region_color"], self.line_width * 2)
121
- annotator.text_label(
121
+ annotator.adaptive_label(
122
122
  [x1, y1, x2, y2],
123
123
  label=str(region["counts"]),
124
124
  color=region["region_color"],
125
125
  txt_color=region["text_color"],
126
126
  margin=self.line_width * 4,
127
+ shape="rect",
127
128
  )
128
129
  region["counts"] = 0 # Reset for next frame
129
130
  plot_im = annotator.result()
@@ -8,7 +8,6 @@ import numpy as np
8
8
  from PIL import Image
9
9
 
10
10
  from ultralytics.data.utils import IMG_FORMATS
11
- from ultralytics.nn.text_model import build_text_model
12
11
  from ultralytics.utils import LOGGER
13
12
  from ultralytics.utils.checks import check_requirements
14
13
  from ultralytics.utils.torch_utils import select_device
@@ -48,6 +47,8 @@ class VisualAISearch:
48
47
 
49
48
  def __init__(self, **kwargs: Any) -> None:
50
49
  """Initialize the VisualAISearch class with FAISS index and CLIP model."""
50
+ from ultralytics.nn.text_model import build_text_model
51
+
51
52
  check_requirements("faiss-cpu")
52
53
 
53
54
  self.faiss = __import__("faiss")
@@ -287,8 +287,7 @@ class SolutionAnnotator(Annotator):
287
287
  display_objects_labels: Annotate bounding boxes with object class labels.
288
288
  sweep_annotator: Visualize a vertical sweep line and optional label.
289
289
  visioneye: Map and connect object centroids to a visual "eye" point.
290
- circle_label: Draw a circular label within a bounding box.
291
- text_label: Draw a rectangular label within a bounding box.
290
+ adaptive_label: Draw a circular or rectangle background shape label in center of a bounding box.
292
291
 
293
292
  Examples:
294
293
  >>> annotator = SolutionAnnotator(image)
@@ -695,90 +694,58 @@ class SolutionAnnotator(Annotator):
695
694
  cv2.circle(self.im, center_bbox, self.tf * 2, color, -1)
696
695
  cv2.line(self.im, center_point, center_bbox, color, self.tf)
697
696
 
698
- def circle_label(
697
+ def adaptive_label(
699
698
  self,
700
699
  box: Tuple[float, float, float, float],
701
700
  label: str = "",
702
701
  color: Tuple[int, int, int] = (128, 128, 128),
703
702
  txt_color: Tuple[int, int, int] = (255, 255, 255),
704
- margin: int = 2,
703
+ shape: str = "rect",
704
+ margin: int = 5,
705
705
  ):
706
706
  """
707
- Draw a label with a background circle centered within a given bounding box.
707
+ Draw a label with a background rectangle or circle centered within a given bounding box.
708
708
 
709
709
  Args:
710
710
  box (Tuple[float, float, float, float]): The bounding box coordinates (x1, y1, x2, y2).
711
711
  label (str): The text label to be displayed.
712
- color (Tuple[int, int, int]): The background color of the circle (B, G, R).
712
+ color (Tuple[int, int, int]): The background color of the rectangle (B, G, R).
713
713
  txt_color (Tuple[int, int, int]): The color of the text (R, G, B).
714
- margin (int): The margin between the text and the circle border.
714
+ shape (str): The shape of the label i.e "circle" or "rect"
715
+ margin (int): The margin between the text and the rectangle border.
715
716
  """
716
- if len(label) > 3:
717
+ if shape == "circle" and len(label) > 3:
717
718
  LOGGER.warning(f"Length of label is {len(label)}, only first 3 letters will be used for circle annotation.")
718
719
  label = label[:3]
719
720
 
720
- # Calculate the center of the box
721
- x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
722
- # Get the text size
723
- text_size = cv2.getTextSize(str(label), cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.15, self.tf)[0]
724
- # Calculate the required radius to fit the text with the margin
725
- required_radius = int(((text_size[0] ** 2 + text_size[1] ** 2) ** 0.5) / 2) + margin
726
- # Draw the circle with the required radius
727
- cv2.circle(self.im, (x_center, y_center), required_radius, color, -1)
728
- # Calculate the position for the text
729
- text_x = x_center - text_size[0] // 2
730
- text_y = y_center + text_size[1] // 2
731
- # Draw the text
732
- cv2.putText(
733
- self.im,
734
- str(label),
735
- (text_x, text_y),
736
- cv2.FONT_HERSHEY_SIMPLEX,
737
- self.sf - 0.15,
738
- self.get_txt_color(color, txt_color),
739
- self.tf,
740
- lineType=cv2.LINE_AA,
741
- )
721
+ x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2) # Calculate center of the bbox
722
+ text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.15, self.tf)[0] # Get size of the text
723
+ text_x, text_y = x_center - text_size[0] // 2, y_center + text_size[1] // 2 # Calculate top-left corner of text
742
724
 
743
- def text_label(
744
- self,
745
- box: Tuple[float, float, float, float],
746
- label: str = "",
747
- color: Tuple[int, int, int] = (128, 128, 128),
748
- txt_color: Tuple[int, int, int] = (255, 255, 255),
749
- margin: int = 5,
750
- ):
751
- """
752
- Draw a label with a background rectangle centered within a given bounding box.
725
+ if shape == "circle":
726
+ cv2.circle(
727
+ self.im,
728
+ (x_center, y_center),
729
+ int(((text_size[0] ** 2 + text_size[1] ** 2) ** 0.5) / 2) + margin, # Calculate the radius
730
+ color,
731
+ -1,
732
+ )
733
+ else:
734
+ cv2.rectangle(
735
+ self.im,
736
+ (text_x - margin, text_y - text_size[1] - margin), # Calculate coordinates of the rectangle
737
+ (text_x + text_size[0] + margin, text_y + margin), # Calculate coordinates of the rectangle
738
+ color,
739
+ -1,
740
+ )
753
741
 
754
- Args:
755
- box (Tuple[float, float, float, float]): The bounding box coordinates (x1, y1, x2, y2).
756
- label (str): The text label to be displayed.
757
- color (Tuple[int, int, int]): The background color of the rectangle (B, G, R).
758
- txt_color (Tuple[int, int, int]): The color of the text (R, G, B).
759
- margin (int): The margin between the text and the rectangle border.
760
- """
761
- # Calculate the center of the bounding box
762
- x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
763
- # Get the size of the text
764
- text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.1, self.tf)[0]
765
- # Calculate the top-left corner of the text (to center it)
766
- text_x = x_center - text_size[0] // 2
767
- text_y = y_center + text_size[1] // 2
768
- # Calculate the coordinates of the background rectangle
769
- rect_x1 = text_x - margin
770
- rect_y1 = text_y - text_size[1] - margin
771
- rect_x2 = text_x + text_size[0] + margin
772
- rect_y2 = text_y + margin
773
- # Draw the background rectangle
774
- cv2.rectangle(self.im, (rect_x1, rect_y1), (rect_x2, rect_y2), color, -1)
775
742
  # Draw the text on top of the rectangle
776
743
  cv2.putText(
777
744
  self.im,
778
745
  label,
779
- (text_x, text_y),
746
+ (text_x, text_y), # Calculate top-left corner of the text
780
747
  cv2.FONT_HERSHEY_SIMPLEX,
781
- self.sf - 0.1,
748
+ self.sf - 0.15,
782
749
  self.get_txt_color(color, txt_color),
783
750
  self.tf,
784
751
  lineType=cv2.LINE_AA,
@@ -1,6 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  import io
4
+ import os
4
5
  from typing import Any, List
5
6
 
6
7
  import cv2
@@ -64,6 +65,7 @@ class Inference:
64
65
 
65
66
  self.st = st # Reference to the Streamlit module
66
67
  self.source = None # Video source selection (webcam or video file)
68
+ self.img_file_names = [] # List of image file names
67
69
  self.enable_trk = False # Flag to toggle object tracking
68
70
  self.conf = 0.25 # Confidence threshold for detection
69
71
  self.iou = 0.45 # Intersection-over-Union (IoU) threshold for non-maximum suppression
@@ -85,13 +87,13 @@ class Inference:
85
87
  menu_style_cfg = """<style>MainMenu {visibility: hidden;}</style>""" # Hide main menu style
86
88
 
87
89
  # Main title of streamlit application
88
- main_title_cfg = """<div><h1 style="color:#FF64DA; text-align:center; font-size:40px; margin-top:-50px;
90
+ main_title_cfg = """<div><h1 style="color:#111F68; text-align:center; font-size:40px; margin-top:-50px;
89
91
  font-family: 'Archivo', sans-serif; margin-bottom:20px;">Ultralytics YOLO Streamlit Application</h1></div>"""
90
92
 
91
93
  # Subtitle of streamlit application
92
- sub_title_cfg = """<div><h4 style="color:#042AFF; text-align:center; font-family: 'Archivo', sans-serif;
93
- margin-top:-15px; margin-bottom:50px;">Experience real-time object detection on your webcam with the power
94
- of Ultralytics YOLO! 🚀</h4></div>"""
94
+ sub_title_cfg = """<div><h5 style="color:#042AFF; text-align:center; font-family: 'Archivo', sans-serif;
95
+ margin-top:-15px; margin-bottom:50px;">Experience real-time object detection on your webcam, videos, and images
96
+ with the power of Ultralytics YOLO! 🚀</h5></div>"""
95
97
 
96
98
  # Set html page configuration and append custom HTML
97
99
  self.st.set_page_config(page_title="Ultralytics Streamlit App", layout="wide")
@@ -107,24 +109,28 @@ class Inference:
107
109
 
108
110
  self.st.sidebar.title("User Configuration") # Add elements to vertical setting menu
109
111
  self.source = self.st.sidebar.selectbox(
110
- "Video",
111
- ("webcam", "video"),
112
+ "Source",
113
+ ("webcam", "video", "image"),
112
114
  ) # Add source selection dropdown
113
- self.enable_trk = self.st.sidebar.radio("Enable Tracking", ("Yes", "No")) == "Yes" # Enable object tracking
115
+ if self.source in ["webcam", "video"]:
116
+ self.enable_trk = self.st.sidebar.radio("Enable Tracking", ("Yes", "No")) == "Yes" # Enable object tracking
114
117
  self.conf = float(
115
118
  self.st.sidebar.slider("Confidence Threshold", 0.0, 1.0, self.conf, 0.01)
116
119
  ) # Slider for confidence
117
120
  self.iou = float(self.st.sidebar.slider("IoU Threshold", 0.0, 1.0, self.iou, 0.01)) # Slider for NMS threshold
118
121
 
119
- col1, col2 = self.st.columns(2) # Create two columns for displaying frames
120
- self.org_frame = col1.empty() # Container for original frame
121
- self.ann_frame = col2.empty() # Container for annotated frame
122
+ if self.source != "image": # Only create columns for video/webcam
123
+ col1, col2 = self.st.columns(2) # Create two columns for displaying frames
124
+ self.org_frame = col1.empty() # Container for original frame
125
+ self.ann_frame = col2.empty() # Container for annotated frame
122
126
 
123
127
  def source_upload(self) -> None:
124
128
  """Handle video file uploads through the Streamlit interface."""
129
+ from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS # scope import
130
+
125
131
  self.vid_file_name = ""
126
132
  if self.source == "video":
127
- vid_file = self.st.sidebar.file_uploader("Upload Video File", type=["mp4", "mov", "avi", "mkv"])
133
+ vid_file = self.st.sidebar.file_uploader("Upload Video File", type=VID_FORMATS)
128
134
  if vid_file is not None:
129
135
  g = io.BytesIO(vid_file.read()) # BytesIO Object
130
136
  with open("ultralytics.mp4", "wb") as out: # Open temporary file as bytes
@@ -132,6 +138,15 @@ class Inference:
132
138
  self.vid_file_name = "ultralytics.mp4"
133
139
  elif self.source == "webcam":
134
140
  self.vid_file_name = 0 # Use webcam index 0
141
+ elif self.source == "image":
142
+ import tempfile # scope import
143
+
144
+ imgfiles = self.st.sidebar.file_uploader("Upload Image Files", type=IMG_FORMATS, accept_multiple_files=True)
145
+ if imgfiles:
146
+ for imgfile in imgfiles: # Save each uploaded image to a temporary file
147
+ with tempfile.NamedTemporaryFile(delete=False, suffix=f".{imgfile.name.split('.')[-1]}") as tf:
148
+ tf.write(imgfile.read())
149
+ self.img_file_names.append({"path": tf.name, "name": imgfile.name})
135
150
 
136
151
  def configure(self) -> None:
137
152
  """Configure the model and load selected classes for inference."""
@@ -161,6 +176,27 @@ class Inference:
161
176
  if not isinstance(self.selected_ind, list): # Ensure selected_options is a list
162
177
  self.selected_ind = list(self.selected_ind)
163
178
 
179
+ def image_inference(self) -> None:
180
+ """Perform inference on uploaded images."""
181
+ for idx, img_info in enumerate(self.img_file_names):
182
+ img_path = img_info["path"]
183
+ image = cv2.imread(img_path) # Load and display the original image
184
+ if image is not None:
185
+ self.st.markdown(f"#### Processed: {img_info['name']}")
186
+ col1, col2 = self.st.columns(2)
187
+ with col1:
188
+ self.st.image(image, channels="BGR", caption="Original Image")
189
+ results = self.model(image, conf=self.conf, iou=self.iou, classes=self.selected_ind)
190
+ annotated_image = results[0].plot()
191
+ with col2:
192
+ self.st.image(annotated_image, channels="BGR", caption="Predicted Image")
193
+ try: # Clean up temporary file
194
+ os.unlink(img_path)
195
+ except FileNotFoundError:
196
+ pass # File doesn't exist, ignore
197
+ else:
198
+ self.st.error("Could not load the uploaded image.")
199
+
164
200
  def inference(self) -> None:
165
201
  """Perform real-time object detection inference on video or webcam feed."""
166
202
  self.web_ui() # Initialize the web interface
@@ -169,7 +205,14 @@ class Inference:
169
205
  self.configure() # Configure the app
170
206
 
171
207
  if self.st.sidebar.button("Start"):
172
- stop_button = self.st.button("Stop") # Button to stop the inference
208
+ if self.source == "image":
209
+ if self.img_file_names:
210
+ self.image_inference()
211
+ else:
212
+ self.st.info("Please upload an image file to perform inference.")
213
+ return
214
+
215
+ stop_button = self.st.sidebar.button("Stop") # Button to stop the inference
173
216
  cap = cv2.VideoCapture(self.vid_file_name) # Capture the video
174
217
  if not cap.isOpened():
175
218
  self.st.error("Could not open webcam or video source.")
@@ -195,8 +238,8 @@ class Inference:
195
238
  cap.release() # Release the capture
196
239
  self.st.stop() # Stop streamlit app
197
240
 
198
- self.org_frame.image(frame, channels="BGR") # Display original frame
199
- self.ann_frame.image(annotated_frame, channels="BGR") # Display processed frame
241
+ self.org_frame.image(frame, channels="BGR", caption="Original Frame") # Display original frame
242
+ self.ann_frame.image(annotated_frame, channels="BGR", caption="Predicted Frame") # Display processed
200
243
 
201
244
  cap.release() # Release the capture
202
245
  cv2.destroyAllWindows() # Destroy all OpenCV windows
@@ -441,16 +441,15 @@ class ConfusionMatrix(DataExportMixin):
441
441
  array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
442
442
 
443
443
  fig, ax = plt.subplots(1, 1, figsize=(12, 9))
444
+ names, n = self.names, self.nc
444
445
  if self.nc >= 100: # downsample for large class count
445
446
  k = max(2, self.nc // 60) # step size for downsampling, always > 1
446
447
  keep_idx = slice(None, None, k) # create slice instead of array
447
- self.names = self.names[keep_idx] # slice class names
448
+ names = names[keep_idx] # slice class names
448
449
  array = array[keep_idx, :][:, keep_idx] # slice matrix rows and cols
449
450
  n = (self.nc + k - 1) // k # number of retained classes
450
- nc = nn = n if self.task == "classify" else n + 1 # adjust for background if needed
451
- else:
452
- nc = nn = self.nc if self.task == "classify" else self.nc + 1
453
- ticklabels = (self.names + ["background"]) if (0 < nn < 99) and (nn == nc) else "auto"
451
+ nc = nn = n if self.task == "classify" else n + 1 # adjust for background if needed
452
+ ticklabels = (names + ["background"]) if (0 < nn < 99) and (nn == nc) else "auto"
454
453
  xy_ticks = np.arange(len(ticklabels))
455
454
  tick_fontsize = max(6, 15 - 0.1 * nc) # Minimum size is 6
456
455
  label_fontsize = max(6, 12 - 0.1 * nc)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.166
3
+ Version: 8.3.168
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -7,7 +7,7 @@ tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
7
7
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
8
8
  tests/test_python.py,sha256=JJu-69IfuUf1dLK7Ko9elyPONiQ1yu7yhapMVIAt_KI,27907
9
9
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
10
- ultralytics/__init__.py,sha256=yczpDVZI5DkFqH3t28doRPDuDqSjoNtwLkDWy4qLC3c,730
10
+ ultralytics/__init__.py,sha256=4WtcHqsFXTjYzmeOIAOMUX3wLs-ZjEt4inIaEc77h5s,730
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
13
  ultralytics/cfg/__init__.py,sha256=VIpPHImhjb0XLJquGZrG_LBGZchtOtBSXR7HYTYV2GU,39602
@@ -119,7 +119,7 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
119
119
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
120
120
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
121
121
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
122
- ultralytics/engine/exporter.py,sha256=mb_mJ2eQ7pvCpRk9xrzGOmTvJ6dbknGWN6adcHe_7pM,73500
122
+ ultralytics/engine/exporter.py,sha256=m6HAaoDRDaUR4P0zue3o7bUKjnPa4QlMCjcbJtS4iCI,74926
123
123
  ultralytics/engine/model.py,sha256=FmLwiKuItVNgoyXhAvesUnD3UeHBzCVzGHDrqB8J4ms,53453
124
124
  ultralytics/engine/predictor.py,sha256=xxl1kdAzKrN8Y_5MQ5f92uFPeeRq1mYOl6hNlzpPjy8,22520
125
125
  ultralytics/engine/results.py,sha256=QcHcbPVlLBiy_APwABr-T5K65HR8Bl1rRzxawjjP76E,71873
@@ -145,7 +145,7 @@ ultralytics/models/rtdetr/__init__.py,sha256=_jEHmOjI_QP_nT3XJXLgYHQ6bXG4EL8Gnvn
145
145
  ultralytics/models/rtdetr/model.py,sha256=e2u6kQEYawRXGGO6HbFDE1uyHfsIqvKk4IpVjjYN41k,2182
146
146
  ultralytics/models/rtdetr/predict.py,sha256=_jk9ZkIW0gNLUHYyRCz_n9UgGnMTtTkFZ3Pzmkbyjgw,4197
147
147
  ultralytics/models/rtdetr/train.py,sha256=6FA3nDEcH1diFQ8Ky0xENp9cOOYATHxU6f42z9npMvs,3766
148
- ultralytics/models/rtdetr/val.py,sha256=MGzHWMfVDx9KPgaK09nvuHfXRQ6FagpzEyNO1R_8Xp8,9495
148
+ ultralytics/models/rtdetr/val.py,sha256=QT7JNKFJmD8dqUVSUBb78t9wGtE7KEw5l92CKJU50TM,8849
149
149
  ultralytics/models/sam/__init__.py,sha256=iR7B06rAEni21eptg8n4rLOP0Z_qV9y9PL-L93n4_7s,266
150
150
  ultralytics/models/sam/amg.py,sha256=IpcuIfC5KBRiF4sdrsPl1ecWEJy75axo1yG23r5BFsw,11783
151
151
  ultralytics/models/sam/build.py,sha256=J6n-_QOYLa63jldEZmhRe9D3Is_AJE8xyZLUjzfRyTY,12629
@@ -164,7 +164,7 @@ ultralytics/models/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXp
164
164
  ultralytics/models/utils/loss.py,sha256=E-61TfLPc04IdeL6IlFDityDoPju-ov0ouWV_cNY4Kg,21254
165
165
  ultralytics/models/utils/ops.py,sha256=Pr77n8XW25SUEx4X3bBvXcVIbRdJPoaXJuG0KWWawRQ,15253
166
166
  ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR5b7zDk,307
167
- ultralytics/models/yolo/model.py,sha256=xK-Te6D0PGY3vpWQg-HT3TwP0bzPs0XfUjd_L_tVXRs,18752
167
+ ultralytics/models/yolo/model.py,sha256=e66CIsSLHbEeGlkEQ1r6WwVDKAoR2nc0-UoGA94z-eM,18544
168
168
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
169
169
  ultralytics/models/yolo/classify/predict.py,sha256=FqAC2YXe25bRwedMZhF3Lw0waoY-a60xMKELhxApP9I,4149
170
170
  ultralytics/models/yolo/classify/train.py,sha256=V-hevc6X7xemnpyru84OfTRA77eNnkVSMEz16_OUvo4,10244
@@ -172,19 +172,19 @@ ultralytics/models/yolo/classify/val.py,sha256=YakPxBVZCd85Kp4wFKx8KH6JJFiU7nkFS
172
172
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
173
173
  ultralytics/models/yolo/detect/predict.py,sha256=ySUsdIf8dw00bzWhcxN1jZwLWKPRT2M7-N7TNL3o4zo,5387
174
174
  ultralytics/models/yolo/detect/train.py,sha256=HlaCoHJ6Y2TpCXXWabMRZApAYqBvjuM_YQJUV5JYCvw,9907
175
- ultralytics/models/yolo/detect/val.py,sha256=TrLclevqfD9NnpqPSIEvB5KakCsozyBegaD4lhd3noE,20485
175
+ ultralytics/models/yolo/detect/val.py,sha256=jxpaKmWH5VBAR7FuxEnnbN7c1hjFJYPfDWAanemqiS0,20388
176
176
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
177
177
  ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
178
178
  ultralytics/models/yolo/obb/train.py,sha256=bnYFAMur7Uvbw5Dc09-S2ge7B05iGX-t37Ksgc0ef6g,3921
179
- ultralytics/models/yolo/obb/val.py,sha256=nT82lKXewUw3bgX45Ms045rzcYn2A1j8g3Dxig2c-FU,14844
179
+ ultralytics/models/yolo/obb/val.py,sha256=GAZ1yEUYke_qzSl59kAkROXgc3Af22gDICfwUXukl1Q,13725
180
180
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
181
181
  ultralytics/models/yolo/pose/predict.py,sha256=M0C7ZfVXx4QXgv-szjnaXYEPas76ZLGAgDNNh1GG0vI,3743
182
182
  ultralytics/models/yolo/pose/train.py,sha256=GyvNnDPJ3UFq_90HN8_FJ0dbwRkw3JJTVpkMFH0vC0o,5457
183
- ultralytics/models/yolo/pose/val.py,sha256=abAll3lWT6IRwoHOFNsgAZyNQtTtPBXHq0Wszpu9p5E,13994
183
+ ultralytics/models/yolo/pose/val.py,sha256=Sa4YAYpOhdt_mpNGWX2tvjwkDvt1RjiNjqdZ5p532hw,12327
184
184
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
185
185
  ultralytics/models/yolo/segment/predict.py,sha256=qlprQCZn4_bpjpI08U0MU9Q9_1gpHrw_7MXwtXE1l1Y,5377
186
186
  ultralytics/models/yolo/segment/train.py,sha256=XrPkXUiNu1Jvhn8iDew_RaLLjZA3un65rK-QH9mtNIw,3802
187
- ultralytics/models/yolo/segment/val.py,sha256=AnvY0O7HhD5xZ2BE2artLTAVW4SNmHbVopBJsYRcmk8,12328
187
+ ultralytics/models/yolo/segment/val.py,sha256=yVFJpYZCjGJ8fBgp4XEDO5ivAhkcctGqfkHI8uB-RwM,11209
188
188
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
189
189
  ultralytics/models/yolo/world/train.py,sha256=wBKnSC-TvrKWM1Taxqwo13XcwGHwwAXzNYV1tmqcOpc,7845
190
190
  ultralytics/models/yolo/world/train_world.py,sha256=lk9z_INGPSTP_W7Rjh3qrWSmjHaxOJtGngonh1cj2SM,9551
@@ -194,14 +194,14 @@ ultralytics/models/yolo/yoloe/train.py,sha256=XYpQYSnSD8vi_9VSj_S5oIsNUEqm3e66vP
194
194
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
195
195
  ultralytics/models/yolo/yoloe/val.py,sha256=yebPkxwKKt__cY05Zbh1YXg4_BKzzpcDc3Cv3FJ5SAA,9769
196
196
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
197
- ultralytics/nn/autobackend.py,sha256=n-2ADzX3Y2MRE8nHFeVvFCJFJP9rCbkkNbcufPZ24dE,41532
197
+ ultralytics/nn/autobackend.py,sha256=_65yU6AIpmz1vV24oSNNMPIBmywPTQQdWF0pwHDHxiU,41628
198
198
  ultralytics/nn/tasks.py,sha256=jRUjYn1xz_LEa_zx6Upb0UpXvy0Bca1o5HEc7FCRgwM,72653
199
199
  ultralytics/nn/text_model.py,sha256=cYwD-0el4VeToDBP4iPFOQGqyEQatJOBHrVyONL3K_s,15282
200
200
  ultralytics/nn/modules/__init__.py,sha256=2nY0X69Z5DD5SWt6v3CUTZa5gXSzC9TQr3VTVqhyGho,3158
201
201
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
202
202
  ultralytics/nn/modules/block.py,sha256=JfOjWEgUNfwFCt-P2awhga4B7GXeDlkKVhLBp7oA-Es,70652
203
203
  ultralytics/nn/modules/conv.py,sha256=eM_t0hQwvEH4rllJucqRMNq7IoipEjbTa_ELROu4ubs,21445
204
- ultralytics/nn/modules/head.py,sha256=zTXFXc46ljPdP3mjgH7B3y2bPIjvbVPtgTu_rQCV8xo,53505
204
+ ultralytics/nn/modules/head.py,sha256=WiYJ-odEWisWZKKbOuvj1dJkUky2Z6D3yCTFqiRO-B0,53450
205
205
  ultralytics/nn/modules/transformer.py,sha256=PW5-6gzOP3_rZ_uAkmxvI42nU5bkrgbgLKCy5PC5px4,31415
206
206
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
207
207
  ultralytics/solutions/__init__.py,sha256=ZoeAQavTLp8aClnhZ9tbl6lxy86GxofyGvZWTx2aWkI,1209
@@ -216,12 +216,12 @@ ultralytics/solutions/object_counter.py,sha256=zD-EYIxu_y7qCFEkv6aqV60oMCZ4q6b_k
216
216
  ultralytics/solutions/object_cropper.py,sha256=x3gN-ihtwkJntp6EMcVWnIvVTOu1iRkP5RrX-1kwJHg,3522
217
217
  ultralytics/solutions/parking_management.py,sha256=IfPUn15aelxz6YZNo9WYkVEl5IOVSw8VD0OrpKtExPE,13613
218
218
  ultralytics/solutions/queue_management.py,sha256=gTkILx4dVcsKRZXSCXtelkEjCRiDS5iznb3FnddC61c,4390
219
- ultralytics/solutions/region_counter.py,sha256=nmtCoq1sFIU2Hx4gKImYNF7Yf5YpADHwujxxQGDvf1s,5916
219
+ ultralytics/solutions/region_counter.py,sha256=Ncd6_qIXmSQXUxCwQkgYc2-nI7KifQYhxPi3pOelZak,5950
220
220
  ultralytics/solutions/security_alarm.py,sha256=czEaMcy04q-iBkKqT_14d8H20CFB6zcKH_31nBGQnyw,6345
221
- ultralytics/solutions/similarity_search.py,sha256=H9MPf8F5AvVfmb9hnng0FrIOTbLU_I-CkVHGpC81CE0,9496
222
- ultralytics/solutions/solutions.py,sha256=KtoSUSxM4s-Ti5EAzT21pItuv70qlIOH6ymJP95Gl-E,37318
221
+ ultralytics/solutions/similarity_search.py,sha256=c18TK0qW5AvanXU28nAX4o_WtB1SDAJStUtyLDuEBHQ,9505
222
+ ultralytics/solutions/solutions.py,sha256=KuQ5M9oocygExRjKAIN0HjHNFYebENUSyw-i7ykDsO8,35903
223
223
  ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
224
- ultralytics/solutions/streamlit_inference.py,sha256=SqL-YxU3RCxCKscH2AYUTkmJknilV9jCCco6ufqsFk4,10501
224
+ ultralytics/solutions/streamlit_inference.py,sha256=JAVOCc_eNtszUHKU-rZ-iUQtA6m6d3QqCgtPfwrlcsE,12773
225
225
  ultralytics/solutions/trackzone.py,sha256=kIS94rNfL3yVPAtSbnW8F-aLMxXowQtsfKNB-jLezz8,3941
226
226
  ultralytics/solutions/vision_eye.py,sha256=J_nsXhWkhfWz8THNJU4Yag4wbPv78ymby6SlNKeSuk4,3005
227
227
  ultralytics/solutions/templates/similarity-search.html,sha256=nyyurpWlkvYlDeNh-74TlV4ctCpTksvkVy2Yc4ImQ1U,4261
@@ -246,7 +246,7 @@ ultralytics/utils/export.py,sha256=LK-wlTlyb_zIKtSvOmfmvR70RcUU9Ct9UBDt5wn9_rY,9
246
246
  ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
247
247
  ultralytics/utils/instance.py,sha256=dC83rHvQXciAED3rOiScFs3BOX9OI06Ey1mj9sjUKvs,19070
248
248
  ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
249
- ultralytics/utils/metrics.py,sha256=pazuzAjKFnfnhSVH_w6xEWB4vN7RpC8n7v3zj9LkFbs,62247
249
+ ultralytics/utils/metrics.py,sha256=AbaYgGPEFY-IVv1_Izb0dXulSs1NEZ2-TVkO1GcP8iI,62179
250
250
  ultralytics/utils/ops.py,sha256=8d60fbpntrexK3gPoLUS6mWAYGrtrQaQCOYyRJsCjuI,34521
251
251
  ultralytics/utils/patches.py,sha256=tBAsNo_RyoFLL9OAzVuJmuoDLUJIPuTMByBYyblGG1A,6517
252
252
  ultralytics/utils/plotting.py,sha256=LO-iR-k1UewV5vt4xXDUIirdmNEZdpfiQvLyIWqINPg,47171
@@ -265,9 +265,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
265
265
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
266
266
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
267
267
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
268
- ultralytics-8.3.166.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
269
- ultralytics-8.3.166.dist-info/METADATA,sha256=4N4h2N1Vii9mOjtYcrL76k9zyCXctBsm-0k_zdReNCw,37576
270
- ultralytics-8.3.166.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
- ultralytics-8.3.166.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
- ultralytics-8.3.166.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
- ultralytics-8.3.166.dist-info/RECORD,,
268
+ ultralytics-8.3.168.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
269
+ ultralytics-8.3.168.dist-info/METADATA,sha256=7afOJPw9IKBqVgBS71Nk08KhkNpEZXyOsbgp9G6IHFQ,37576
270
+ ultralytics-8.3.168.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
+ ultralytics-8.3.168.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
+ ultralytics-8.3.168.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
+ ultralytics-8.3.168.dist-info/RECORD,,