ultralytics 8.3.157__py3-none-any.whl → 8.3.159__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.157"
3
+ __version__ = "8.3.159"
4
4
 
5
5
  import os
6
6
 
@@ -2928,7 +2928,7 @@ class ToTensor:
2928
2928
  the color channels are reversed from BGR to RGB.
2929
2929
 
2930
2930
  Args:
2931
- im (numpy.ndarray): Input image as a numpy array with shape (H, W, C) in BGR order.
2931
+ im (numpy.ndarray): Input image as a numpy array with shape (H, W, C) in RGB order.
2932
2932
 
2933
2933
  Returns:
2934
2934
  (torch.Tensor): The transformed image as a PyTorch tensor in float32 or float16, normalized
@@ -2941,7 +2941,7 @@ class ToTensor:
2941
2941
  >>> print(tensor_img.shape, tensor_img.dtype)
2942
2942
  torch.Size([3, 640, 640]) torch.float16
2943
2943
  """
2944
- im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous
2944
+ im = np.ascontiguousarray(im.transpose((2, 0, 1))) # HWC to CHW -> contiguous
2945
2945
  im = torch.from_numpy(im) # to torch
2946
2946
  im = im.half() if self.half else im.float() # uint8 to fp16/32
2947
2947
  im /= 255.0 # 0-255 to 0.0-1.0
@@ -1152,7 +1152,9 @@ class Exporter:
1152
1152
  )
1153
1153
  if getattr(self.model, "end2end", False):
1154
1154
  raise ValueError("IMX export is not supported for end2end models.")
1155
- check_requirements(("model-compression-toolkit>=2.3.0", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0"))
1155
+ check_requirements(
1156
+ ("model-compression-toolkit>=2.3.0,<2.4.1", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0")
1157
+ )
1156
1158
  check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
1157
1159
 
1158
1160
  import model_compression_toolkit as mct
@@ -19,7 +19,6 @@ class ClassificationPredictor(BasePredictor):
19
19
 
20
20
  Attributes:
21
21
  args (dict): Configuration arguments for the predictor.
22
- _legacy_transform_name (str): Name of the legacy transform class for backward compatibility.
23
22
 
24
23
  Methods:
25
24
  preprocess: Convert input images to model-compatible format.
@@ -50,7 +49,6 @@ class ClassificationPredictor(BasePredictor):
50
49
  """
51
50
  super().__init__(cfg, overrides, _callbacks)
52
51
  self.args.task = "classify"
53
- self._legacy_transform_name = "ultralytics.yolo.data.augment.ToTensor"
54
52
 
55
53
  def setup_source(self, source):
56
54
  """Set up source and inference mode and classify transforms."""
@@ -58,22 +56,18 @@ class ClassificationPredictor(BasePredictor):
58
56
  updated = (
59
57
  self.model.model.transforms.transforms[0].size != max(self.imgsz)
60
58
  if hasattr(self.model.model, "transforms") and hasattr(self.model.model.transforms.transforms[0], "size")
61
- else True
59
+ else False
60
+ )
61
+ self.transforms = (
62
+ classify_transforms(self.imgsz) if updated or not self.model.pt else self.model.model.transforms
62
63
  )
63
- self.transforms = self.model.model.transforms if not updated else classify_transforms(self.imgsz)
64
64
 
65
65
  def preprocess(self, img):
66
66
  """Convert input images to model-compatible tensor format with appropriate normalization."""
67
67
  if not isinstance(img, torch.Tensor):
68
- is_legacy_transform = any(
69
- self._legacy_transform_name in str(transform) for transform in self.transforms.transforms
68
+ img = torch.stack(
69
+ [self.transforms(Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))) for im in img], dim=0
70
70
  )
71
- if is_legacy_transform: # Handle legacy transforms
72
- img = torch.stack([self.transforms(im) for im in img], dim=0)
73
- else:
74
- img = torch.stack(
75
- [self.transforms(Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))) for im in img], dim=0
76
- )
77
71
  img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device)
78
72
  return img.half() if self.model.fp16 else img.float() # Convert uint8 to fp16/32
79
73
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  import os
4
4
  from pathlib import Path
5
- from typing import Any, Dict, List, Optional, Tuple
5
+ from typing import Any, Dict, List, Optional, Tuple, Union
6
6
 
7
7
  import numpy as np
8
8
  import torch
@@ -219,6 +219,7 @@ class DetectionValidator(BaseValidator):
219
219
  self.confusion_matrix.plot(save_dir=self.save_dir, normalize=normalize, on_plot=self.on_plot)
220
220
  self.metrics.speed = self.speed
221
221
  self.metrics.confusion_matrix = self.confusion_matrix
222
+ self.metrics.save_dir = self.save_dir
222
223
 
223
224
  def get_stats(self) -> Dict[str, Any]:
224
225
  """
@@ -392,38 +393,73 @@ class DetectionValidator(BaseValidator):
392
393
  Returns:
393
394
  (Dict[str, Any]): Updated statistics dictionary with COCO/LVIS evaluation results.
394
395
  """
395
- if self.args.save_json and (self.is_coco or self.is_lvis) and len(self.jdict):
396
- pred_json = self.save_dir / "predictions.json" # predictions
397
- anno_json = (
398
- self.data["path"]
399
- / "annotations"
400
- / ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
401
- ) # annotations
396
+ pred_json = self.save_dir / "predictions.json" # predictions
397
+ anno_json = (
398
+ self.data["path"]
399
+ / "annotations"
400
+ / ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
401
+ ) # annotations
402
+ return self.coco_evaluate(stats, pred_json, anno_json)
403
+
404
+ def coco_evaluate(
405
+ self,
406
+ stats: Dict[str, Any],
407
+ pred_json: str,
408
+ anno_json: str,
409
+ iou_types: Union[str, List[str]] = "bbox",
410
+ suffix: Union[str, List[str]] = "Box",
411
+ ) -> Dict[str, Any]:
412
+ """
413
+ Evaluate COCO/LVIS metrics using faster-coco-eval library.
414
+
415
+ Performs evaluation using the faster-coco-eval library to compute mAP metrics
416
+ for object detection. Updates the provided stats dictionary with computed metrics
417
+ including mAP50, mAP50-95, and LVIS-specific metrics if applicable.
418
+
419
+ Args:
420
+ stats (Dict[str, Any]): Dictionary to store computed metrics and statistics.
421
+ pred_json (str | Path]): Path to JSON file containing predictions in COCO format.
422
+ anno_json (str | Path]): Path to JSON file containing ground truth annotations in COCO format.
423
+ iou_types (str | List[str]]): IoU type(s) for evaluation. Can be single string or list of strings.
424
+ Common values include "bbox", "segm", "keypoints". Defaults to "bbox".
425
+ suffix (str | List[str]]): Suffix to append to metric names in stats dictionary. Should correspond
426
+ to iou_types if multiple types provided. Defaults to "Box".
402
427
 
428
+ Returns:
429
+ (Dict[str, Any]): Updated stats dictionary containing the computed COCO/LVIS evaluation metrics.
430
+ """
431
+ if self.args.save_json and (self.is_coco or self.is_lvis) and len(self.jdict):
403
432
  LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
404
433
  try:
405
434
  for x in pred_json, anno_json:
406
435
  assert x.is_file(), f"{x} file not found"
436
+ iou_types = [iou_types] if isinstance(iou_types, str) else iou_types
437
+ suffix = [suffix] if isinstance(suffix, str) else suffix
407
438
  check_requirements("faster-coco-eval>=1.6.7")
408
439
  from faster_coco_eval import COCO, COCOeval_faster
409
440
 
410
441
  anno = COCO(anno_json)
411
442
  pred = anno.loadRes(pred_json)
412
- val = COCOeval_faster(anno, pred, iouType="bbox", lvis_style=self.is_lvis, print_function=LOGGER.info)
413
- val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
414
- val.evaluate()
415
- val.accumulate()
416
- val.summarize()
443
+ for i, iou_type in enumerate(iou_types):
444
+ val = COCOeval_faster(
445
+ anno, pred, iouType=iou_type, lvis_style=self.is_lvis, print_function=LOGGER.info
446
+ )
447
+ val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
448
+ val.evaluate()
449
+ val.accumulate()
450
+ val.summarize()
451
+
452
+ # update mAP50-95 and mAP50
453
+ stats[f"metrics/mAP50({suffix[i][0]})"] = val.stats_as_dict["AP_all"]
454
+ stats[f"metrics/mAP50-95({suffix[i][0]})"] = val.stats_as_dict["AP_50"]
417
455
 
418
- # update mAP50-95 and mAP50
419
- stats[self.metrics.keys[-1]] = val.stats_as_dict["AP_all"]
420
- stats[self.metrics.keys[-2]] = val.stats_as_dict["AP_50"]
456
+ if self.is_lvis:
457
+ stats[f"metrics/APr({suffix[i][0]})"] = val.stats_as_dict["APr"]
458
+ stats[f"metrics/APc({suffix[i][0]})"] = val.stats_as_dict["APc"]
459
+ stats[f"metrics/APf({suffix[i][0]})"] = val.stats_as_dict["APf"]
421
460
 
422
461
  if self.is_lvis:
423
- stats["metrics/APr(B)"] = val.stats_as_dict["APr"]
424
- stats["metrics/APc(B)"] = val.stats_as_dict["APc"]
425
- stats["metrics/APf(B)"] = val.stats_as_dict["APf"]
426
- stats["fitness"] = val.stats_as_dict["AP_all"]
462
+ stats["fitness"] = stats["metrics/mAP50-95(B)"] # always use box mAP50-95 for fitness
427
463
  except Exception as e:
428
464
  LOGGER.warning(f"faster-coco-eval unable to run: {e}")
429
465
  return stats
@@ -8,7 +8,6 @@ import torch
8
8
 
9
9
  from ultralytics.models.yolo.detect import DetectionValidator
10
10
  from ultralytics.utils import LOGGER, ops
11
- from ultralytics.utils.checks import check_requirements
12
11
  from ultralytics.utils.metrics import OKS_SIGMA, PoseMetrics, kpt_iou
13
12
 
14
13
 
@@ -289,29 +288,6 @@ class PoseValidator(DetectionValidator):
289
288
 
290
289
  def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
291
290
  """Evaluate object detection model using COCO JSON format."""
292
- if self.args.save_json and self.is_coco and len(self.jdict):
293
- anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations
294
- pred_json = self.save_dir / "predictions.json" # predictions
295
- LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
296
- try:
297
- check_requirements("faster-coco-eval>=1.6.7")
298
- from faster_coco_eval import COCO, COCOeval_faster
299
-
300
- for x in anno_json, pred_json:
301
- assert x.is_file(), f"{x} file not found"
302
- anno = COCO(anno_json) # init annotations api
303
- pred = anno.loadRes(pred_json) # init predictions api (must pass string, not Path)
304
- kwargs = dict(cocoGt=anno, cocoDt=pred, print_function=LOGGER.info)
305
- for i, eval in enumerate(
306
- [COCOeval_faster(iouType="bbox", **kwargs), COCOeval_faster(iouType="keypoints", **kwargs)]
307
- ):
308
- eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
309
- eval.evaluate()
310
- eval.accumulate()
311
- eval.summarize()
312
- idx = i * 4 + 2
313
- # update mAP50-95 and mAP50
314
- stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[:2]
315
- except Exception as e:
316
- LOGGER.warning(f"faster-coco-eval unable to run: {e}")
317
- return stats
291
+ anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations
292
+ pred_json = self.save_dir / "predictions.json" # predictions
293
+ return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "keypoints"], suffix=["Box", "Pose"])
@@ -272,45 +272,10 @@ class SegmentationValidator(DetectionValidator):
272
272
 
273
273
  def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
274
274
  """Return COCO-style instance segmentation evaluation metrics."""
275
- if self.args.save_json and (self.is_lvis or self.is_coco) and len(self.jdict):
276
- pred_json = self.save_dir / "predictions.json" # predictions
277
- anno_json = (
278
- self.data["path"]
279
- / "annotations"
280
- / ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
281
- ) # annotations
282
-
283
- LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
284
- try:
285
- for x in anno_json, pred_json:
286
- assert x.is_file(), f"{x} file not found"
287
- check_requirements("faster-coco-eval>=1.6.7")
288
- from faster_coco_eval import COCO, COCOeval_faster
289
-
290
- anno = COCO(anno_json) # init annotations api
291
- pred = anno.loadRes(pred_json) # init predictions api (must pass string, not Path)
292
- kwargs = dict(cocoGt=anno, cocoDt=pred, lvis_style=self.is_lvis, print_function=LOGGER.info)
293
- for i, eval in enumerate(
294
- [COCOeval_faster(iouType="bbox", **kwargs), COCOeval_faster(iouType="segm", **kwargs)]
295
- ):
296
- eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
297
- eval.evaluate()
298
- eval.accumulate()
299
- eval.summarize()
300
- idx = i * 4 + 2
301
- # update mAP50-95 and mAP50
302
- stats[self.metrics.keys[idx + 1]] = eval.stats_as_dict["AP_all"]
303
- stats[self.metrics.keys[idx]] = eval.stats_as_dict["AP_50"]
304
-
305
- if self.is_lvis:
306
- tag = "B" if i == 0 else "M"
307
- stats[f"metrics/APr({tag})"] = eval.stats_as_dict["APr"]
308
- stats[f"metrics/APc({tag})"] = eval.stats_as_dict["APc"]
309
- stats[f"metrics/APf({tag})"] = eval.stats_as_dict["APf"]
310
-
311
- if self.is_lvis:
312
- stats["fitness"] = stats["metrics/mAP50-95(B)"]
313
-
314
- except Exception as e:
315
- LOGGER.warning(f"faster-coco-eval unable to run: {e}")
316
- return stats
275
+ pred_json = self.save_dir / "predictions.json" # predictions
276
+ anno_json = (
277
+ self.data["path"]
278
+ / "annotations"
279
+ / ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
280
+ ) # annotations
281
+ return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "segm"], suffix=["Box", "Mask"])
@@ -259,7 +259,11 @@ class AutoBackend(nn.Module):
259
259
  session = onnxruntime.InferenceSession(w, providers=providers)
260
260
  else:
261
261
  check_requirements(
262
- ["model-compression-toolkit>=2.3.0", "sony-custom-layers[torch]>=0.3.0", "onnxruntime-extensions"]
262
+ [
263
+ "model-compression-toolkit>=2.3.0,<2.4.1",
264
+ "sony-custom-layers[torch]>=0.3.0",
265
+ "onnxruntime-extensions",
266
+ ]
263
267
  )
264
268
  w = next(Path(w).glob("*.onnx"))
265
269
  LOGGER.info(f"Loading {w} for ONNX IMX inference...")
@@ -6,6 +6,7 @@ from typing import List, Union
6
6
 
7
7
  import torch
8
8
  import torch.nn as nn
9
+ from PIL import Image
9
10
 
10
11
  from ultralytics.utils import checks
11
12
  from ultralytics.utils.torch_utils import smart_inference_mode
@@ -68,7 +69,7 @@ class CLIP(TextModel):
68
69
  >>> print(text_features.shape)
69
70
  """
70
71
 
71
- def __init__(self, size: str, device: torch.device):
72
+ def __init__(self, size: str, device: torch.device) -> None:
72
73
  """
73
74
  Initialize the CLIP text encoder.
74
75
 
@@ -85,12 +86,12 @@ class CLIP(TextModel):
85
86
  >>> text_features = clip_model.encode_text(["a photo of a cat", "a photo of a dog"])
86
87
  """
87
88
  super().__init__()
88
- self.model = clip.load(size, device=device)[0]
89
+ self.model, self.image_preprocess = clip.load(size, device=device)
89
90
  self.to(device)
90
91
  self.device = device
91
92
  self.eval()
92
93
 
93
- def tokenize(self, texts: Union[str, List[str]]):
94
+ def tokenize(self, texts: Union[str, List[str]]) -> torch.Tensor:
94
95
  """
95
96
  Convert input texts to CLIP tokens.
96
97
 
@@ -108,7 +109,7 @@ class CLIP(TextModel):
108
109
  return clip.tokenize(texts).to(self.device)
109
110
 
110
111
  @smart_inference_mode()
111
- def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32):
112
+ def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
112
113
  """
113
114
  Encode tokenized texts into normalized feature vectors.
114
115
 
@@ -133,6 +134,38 @@ class CLIP(TextModel):
133
134
  txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
134
135
  return txt_feats
135
136
 
137
+ @smart_inference_mode()
138
+ def encode_image(self, image: Union[Image.Image, torch.Tensor], dtype: torch.dtype = torch.float32) -> torch.Tensor:
139
+ """
140
+ Encode preprocessed images into normalized feature vectors.
141
+
142
+ This method processes preprocessed image inputs through the CLIP model to generate feature vectors, which are then
143
+ normalized to unit length. These normalized vectors can be used for text-image similarity comparisons.
144
+
145
+ Args:
146
+ image (PIL.Image | torch.Tensor): Preprocessed image input. If a PIL Image is provided, it will be
147
+ converted to a tensor using the model's image preprocessing function.
148
+ dtype (torch.dtype, optional): Data type for output features.
149
+
150
+ Returns:
151
+ (torch.Tensor): Normalized image feature vectors with unit length (L2 norm = 1).
152
+
153
+ Examples:
154
+ >>> from ultralytics.nn.text_model import CLIP
155
+ >>> from PIL import Image
156
+ >>> clip_model = CLIP("ViT-B/32", device="cuda")
157
+ >>> image = Image.open("path/to/image.jpg")
158
+ >>> image_tensor = clip_model.image_preprocess(image).unsqueeze(0).to("cuda")
159
+ >>> features = clip_model.encode_image(image_tensor)
160
+ >>> features.shape
161
+ torch.Size([1, 512])
162
+ """
163
+ if isinstance(image, Image.Image):
164
+ image = self.image_preprocess(image).unsqueeze(0).to(self.device)
165
+ img_feats = self.model.encode_image(image).to(dtype)
166
+ img_feats = img_feats / img_feats.norm(p=2, dim=-1, keepdim=True)
167
+ return img_feats
168
+
136
169
 
137
170
  class MobileCLIP(TextModel):
138
171
  """
@@ -160,7 +193,7 @@ class MobileCLIP(TextModel):
160
193
 
161
194
  config_size_map = {"s0": "s0", "s1": "s1", "s2": "s2", "b": "b", "blt": "b"}
162
195
 
163
- def __init__(self, size: str, device: torch.device):
196
+ def __init__(self, size: str, device: torch.device) -> None:
164
197
  """
165
198
  Initialize the MobileCLIP text encoder.
166
199
 
@@ -201,7 +234,7 @@ class MobileCLIP(TextModel):
201
234
  self.device = device
202
235
  self.eval()
203
236
 
204
- def tokenize(self, texts: List[str]):
237
+ def tokenize(self, texts: List[str]) -> torch.Tensor:
205
238
  """
206
239
  Convert input texts to MobileCLIP tokens.
207
240
 
@@ -218,7 +251,7 @@ class MobileCLIP(TextModel):
218
251
  return self.tokenizer(texts).to(self.device)
219
252
 
220
253
  @smart_inference_mode()
221
- def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32):
254
+ def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
222
255
  """
223
256
  Encode tokenized texts into normalized feature vectors.
224
257
 
@@ -286,7 +319,7 @@ class MobileCLIPTS(TextModel):
286
319
  self.tokenizer = clip.clip.tokenize
287
320
  self.device = device
288
321
 
289
- def tokenize(self, texts: List[str]):
322
+ def tokenize(self, texts: List[str]) -> torch.Tensor:
290
323
  """
291
324
  Convert input texts to MobileCLIP tokens.
292
325
 
@@ -303,7 +336,7 @@ class MobileCLIPTS(TextModel):
303
336
  return self.tokenizer(texts).to(self.device)
304
337
 
305
338
  @smart_inference_mode()
306
- def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32):
339
+ def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
307
340
  """
308
341
  Encode tokenized texts into normalized feature vectors.
309
342
 
@@ -322,10 +355,10 @@ class MobileCLIPTS(TextModel):
322
355
  torch.Size([2, 512]) # Actual dimension depends on model size
323
356
  """
324
357
  # NOTE: no need to do normalization here as it's embedded in the torchscript model
325
- return self.encoder(texts)
358
+ return self.encoder(texts).to(dtype)
326
359
 
327
360
 
328
- def build_text_model(variant: str, device: torch.device = None):
361
+ def build_text_model(variant: str, device: torch.device = None) -> TextModel:
329
362
  """
330
363
  Build a text encoding model based on the specified variant.
331
364
 
@@ -133,7 +133,7 @@ class ObjectCounter(BaseSolution):
133
133
  str.capitalize(key): f"{'IN ' + str(value['IN']) if self.show_in else ''} "
134
134
  f"{'OUT ' + str(value['OUT']) if self.show_out else ''}".strip()
135
135
  for key, value in self.classwise_counts.items()
136
- if value["IN"] != 0 or value["OUT"] != 0
136
+ if value["IN"] != 0 or value["OUT"] != 0 and (self.show_in or self.show_out)
137
137
  }
138
138
  if labels_dict:
139
139
  self.annotator.display_analytics(plot_im, labels_dict, (104, 31, 17), (255, 255, 255), self.margin)
@@ -5,10 +5,10 @@ from pathlib import Path
5
5
  from typing import Any, List
6
6
 
7
7
  import numpy as np
8
- import torch
9
8
  from PIL import Image
10
9
 
11
10
  from ultralytics.data.utils import IMG_FORMATS
11
+ from ultralytics.nn.text_model import build_text_model
12
12
  from ultralytics.solutions.solutions import BaseSolution
13
13
  from ultralytics.utils.checks import check_requirements
14
14
  from ultralytics.utils.torch_utils import select_device
@@ -29,10 +29,8 @@ class VisualAISearch(BaseSolution):
29
29
  device (str): Computation device, e.g., 'cpu' or 'cuda'.
30
30
  faiss_index (str): Path to the FAISS index file.
31
31
  data_path_npy (str): Path to the numpy file storing image paths.
32
- model_name (str): Name of the CLIP model to use.
33
32
  data_dir (Path): Path object for the data directory.
34
33
  model: Loaded CLIP model.
35
- preprocess: CLIP preprocessing function.
36
34
  index: FAISS index for similarity search.
37
35
  image_paths (List[str]): List of image file paths.
38
36
 
@@ -51,13 +49,11 @@ class VisualAISearch(BaseSolution):
51
49
  def __init__(self, **kwargs: Any) -> None:
52
50
  """Initialize the VisualAISearch class with FAISS index and CLIP model."""
53
51
  super().__init__(**kwargs)
54
- check_requirements(["git+https://github.com/ultralytics/CLIP.git", "faiss-cpu"])
52
+ check_requirements("faiss-cpu")
55
53
 
56
54
  self.faiss = __import__("faiss")
57
- self.clip = __import__("clip")
58
55
  self.faiss_index = "faiss.index"
59
56
  self.data_path_npy = "paths.npy"
60
- self.model_name = "ViT-B/32"
61
57
  self.data_dir = Path(self.CFG["data"])
62
58
  self.device = select_device(self.CFG["device"])
63
59
 
@@ -70,7 +66,7 @@ class VisualAISearch(BaseSolution):
70
66
  safe_download(url=f"{ASSETS_URL}/images.zip", unzip=True, retry=3)
71
67
  self.data_dir = Path("images")
72
68
 
73
- self.model, self.preprocess = self.clip.load(self.model_name, device=self.device)
69
+ self.model = build_text_model("clip:ViT-B/32", device=self.device)
74
70
 
75
71
  self.index = None
76
72
  self.image_paths = []
@@ -79,16 +75,11 @@ class VisualAISearch(BaseSolution):
79
75
 
80
76
  def extract_image_feature(self, path: Path) -> np.ndarray:
81
77
  """Extract CLIP image embedding from the given image path."""
82
- image = Image.open(path)
83
- tensor = self.preprocess(image).unsqueeze(0).to(self.device)
84
- with torch.no_grad():
85
- return self.model.encode_image(tensor).cpu().numpy()
78
+ return self.model.encode_image(Image.open(path)).cpu().numpy()
86
79
 
87
80
  def extract_text_feature(self, text: str) -> np.ndarray:
88
81
  """Extract CLIP text embedding from the given text query."""
89
- tokens = self.clip.tokenize([text]).to(self.device)
90
- with torch.no_grad():
91
- return self.model.encode_text(tokens).cpu().numpy()
82
+ return self.model.encode_text(self.model.tokenize([text])).cpu().numpy()
92
83
 
93
84
  def load_or_build_index(self) -> None:
94
85
  """
@@ -4,12 +4,15 @@ import io
4
4
  from typing import Any, List
5
5
 
6
6
  import cv2
7
+ import torch
7
8
 
8
9
  from ultralytics import YOLO
9
10
  from ultralytics.utils import LOGGER
10
11
  from ultralytics.utils.checks import check_requirements
11
12
  from ultralytics.utils.downloads import GITHUB_ASSETS_STEMS
12
13
 
14
+ torch.classes.__path__ = [] # Torch module __path__._path issue: https://github.com/datalab-to/marker/issues/442
15
+
13
16
 
14
17
  class Inference:
15
18
  """
@@ -133,7 +136,15 @@ class Inference:
133
136
  def configure(self) -> None:
134
137
  """Configure the model and load selected classes for inference."""
135
138
  # Add dropdown menu for model selection
136
- available_models = [x.replace("yolo", "YOLO") for x in GITHUB_ASSETS_STEMS if x.startswith("yolo11")]
139
+ M_ORD, T_ORD = ["yolo11n", "yolo11s", "yolo11m", "yolo11l", "yolo11x"], ["", "-seg", "-pose", "-obb", "-cls"]
140
+ available_models = sorted(
141
+ [
142
+ x.replace("yolo", "YOLO")
143
+ for x in GITHUB_ASSETS_STEMS
144
+ if any(x.startswith(b) for b in M_ORD) and "grayscale" not in x
145
+ ],
146
+ key=lambda x: (M_ORD.index(x[:7].lower()), T_ORD.index(x[7:].lower() or "")),
147
+ )
137
148
  if self.model_path: # If user provided the custom model, insert model without suffix as *.pt is added later
138
149
  available_models.insert(0, self.model_path.split(".pt", 1)[0])
139
150
  selected_model = self.st.sidebar.selectbox("Model", available_models)
@@ -694,7 +694,7 @@ def plot_images(
694
694
 
695
695
  Args:
696
696
  labels (Dict[str, Any]): Dictionary containing detection data with keys like 'cls', 'bboxes', 'conf', 'masks', 'keypoints', 'batch_idx', 'img'.
697
- images (Union[torch.Tensor, np.ndarray]): Batch of images to plot. Shape: (batch_size, channels, height, width).
697
+ images (torch.Tensor | np.ndarray]): Batch of images to plot. Shape: (batch_size, channels, height, width).
698
698
  paths (Optional[List[str]]): List of file paths for each image in the batch.
699
699
  fname (str): Output filename for the plotted image grid.
700
700
  names (Optional[Dict[int, str]]): Dictionary mapping class indices to class names.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.157
3
+ Version: 8.3.159
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -87,7 +87,7 @@ Dynamic: license-file
87
87
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="Ultralytics YOLO banner"></a>
88
88
  </p>
89
89
 
90
- [中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar) <br>
90
+ [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/) <br>
91
91
 
92
92
  <div>
93
93
  <a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg" alt="Ultralytics CI"></a>
@@ -7,7 +7,7 @@ tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
7
7
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
8
8
  tests/test_python.py,sha256=nOoaPDg-0j7ZPRz9-uGFny3uocxjUM1ze5wA3BpGxKQ,27865
9
9
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
10
- ultralytics/__init__.py,sha256=2mwBem7xtvNmrW5pBkCtYV3rgq4UvYlvOHu6FkTIDKs,730
10
+ ultralytics/__init__.py,sha256=sbeS4zCdUAcxO1GIm2GxM1Pk92RQ2Kom9Fk52c9syUs,730
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
13
  ultralytics/cfg/__init__.py,sha256=ds63URbbeRj5UxkCSyl62OrNw6HQy7xeit5-0wGDEKg,39699
@@ -105,7 +105,7 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=TpRaK5kH_-QbjCQ7ekM4s_7j8I8ti3q8Hs7
105
105
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
106
106
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
107
107
  ultralytics/data/annotator.py,sha256=uAgd7K-yudxiwdNqHz0ubfFg5JsfNlae4cgxdvCMyuY,3030
108
- ultralytics/data/augment.py,sha256=yAUn0P7z9dQ37DwoIXF6Tz2PvTxxHMMj54311mOSWP8,129050
108
+ ultralytics/data/augment.py,sha256=Zxqp6dWKALAbUYha-R_MVrcysdlBj9glm4Nsth_JLrg,129030
109
109
  ultralytics/data/base.py,sha256=mRcuehK1thNuuzQGL6D1AaZkod71oHRdYTod_zdQZQg,19688
110
110
  ultralytics/data/build.py,sha256=13gPxCJIZRjgcNh7zbzanCgtyK6_oZM0ho9KQhHcM6c,11153
111
111
  ultralytics/data/converter.py,sha256=oKW8ODtvFOKBx9Un8n87xUUm3b5GStU4ViIBH5UDylM,27200
@@ -119,7 +119,7 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
119
119
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
120
120
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
121
121
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
122
- ultralytics/engine/exporter.py,sha256=Q3y4yo0h8zLuCWE_FEvGV_eUwMfKjDrdZsmN_bc24R8,73041
122
+ ultralytics/engine/exporter.py,sha256=n9mRjOWdX-3T9SroICwdMaRRVi9h98coAfCzDYopyW4,73070
123
123
  ultralytics/engine/model.py,sha256=DwugtVxUbCGzpY2pStFMcEloim0ai6LrT6kTbwskSJ8,53302
124
124
  ultralytics/engine/predictor.py,sha256=88zrgZP91ehwdeGl8BM_cQ_caeuwKIPDy3OzxcRBjTU,22474
125
125
  ultralytics/engine/results.py,sha256=Mb8pBTOrBtQh0PQtGVbhRZ_C1VyqYFumjLggiKCRIJs,72295
@@ -166,13 +166,13 @@ ultralytics/models/utils/ops.py,sha256=Pr77n8XW25SUEx4X3bBvXcVIbRdJPoaXJuG0KWWaw
166
166
  ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR5b7zDk,307
167
167
  ultralytics/models/yolo/model.py,sha256=C0wInQC6rFuFOGpdAen1s2e5LIFDmqevto8uPbpmB8c,18449
168
168
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
169
- ultralytics/models/yolo/classify/predict.py,sha256=_GiN6muuZOBrMS1KER85FE4ktcw_Onn1bZdGvpbsGCE,4618
169
+ ultralytics/models/yolo/classify/predict.py,sha256=FqAC2YXe25bRwedMZhF3Lw0waoY-a60xMKELhxApP9I,4149
170
170
  ultralytics/models/yolo/classify/train.py,sha256=V-hevc6X7xemnpyru84OfTRA77eNnkVSMEz16_OUvo4,10244
171
171
  ultralytics/models/yolo/classify/val.py,sha256=YakPxBVZCd85Kp4wFKx8KH6JJFiU7nkFS3r9_ZSwFRM,10036
172
172
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
173
173
  ultralytics/models/yolo/detect/predict.py,sha256=ySUsdIf8dw00bzWhcxN1jZwLWKPRT2M7-N7TNL3o4zo,5387
174
174
  ultralytics/models/yolo/detect/train.py,sha256=HlaCoHJ6Y2TpCXXWabMRZApAYqBvjuM_YQJUV5JYCvw,9907
175
- ultralytics/models/yolo/detect/val.py,sha256=nY3NhT50fMLk0wMwQBv3AnLAVoPMI6mx37OJw9-QT5A,18541
175
+ ultralytics/models/yolo/detect/val.py,sha256=Yhs7SdS8O_4_61N_ZxzGaEfm4tnpEzIRV5XcMsrI-e4,20485
176
176
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
177
177
  ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
178
178
  ultralytics/models/yolo/obb/train.py,sha256=bnYFAMur7Uvbw5Dc09-S2ge7B05iGX-t37Ksgc0ef6g,3921
@@ -180,11 +180,11 @@ ultralytics/models/yolo/obb/val.py,sha256=nT82lKXewUw3bgX45Ms045rzcYn2A1j8g3Dxig
180
180
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
181
181
  ultralytics/models/yolo/pose/predict.py,sha256=M0C7ZfVXx4QXgv-szjnaXYEPas76ZLGAgDNNh1GG0vI,3743
182
182
  ultralytics/models/yolo/pose/train.py,sha256=GyvNnDPJ3UFq_90HN8_FJ0dbwRkw3JJTVpkMFH0vC0o,5457
183
- ultralytics/models/yolo/pose/val.py,sha256=8d7AthoJYUK8BK01ptxpANdcR9_-REEMKicB1hCYgio,15330
183
+ ultralytics/models/yolo/pose/val.py,sha256=abAll3lWT6IRwoHOFNsgAZyNQtTtPBXHq0Wszpu9p5E,13994
184
184
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
185
185
  ultralytics/models/yolo/segment/predict.py,sha256=qlprQCZn4_bpjpI08U0MU9Q9_1gpHrw_7MXwtXE1l1Y,5377
186
186
  ultralytics/models/yolo/segment/train.py,sha256=XrPkXUiNu1Jvhn8iDew_RaLLjZA3un65rK-QH9mtNIw,3802
187
- ultralytics/models/yolo/segment/val.py,sha256=Iai-oK1XeD6y23WWq7FouiE_Az7o4C24E770OPCO2WY,14168
187
+ ultralytics/models/yolo/segment/val.py,sha256=AnvY0O7HhD5xZ2BE2artLTAVW4SNmHbVopBJsYRcmk8,12328
188
188
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
189
189
  ultralytics/models/yolo/world/train.py,sha256=94_hgCluzsv39JkBVDmR2gjuycYjeJC8wVrCfrjpENk,7806
190
190
  ultralytics/models/yolo/world/train_world.py,sha256=YJm37ZTgr0CoE_sYrjxN45w9mICr2RMWfWZrriiHqbM,9022
@@ -194,9 +194,9 @@ ultralytics/models/yolo/yoloe/train.py,sha256=Dt6orqXcQTzyoAqMVvleP1FQbXChMvEj3Q
194
194
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
195
195
  ultralytics/models/yolo/yoloe/val.py,sha256=yebPkxwKKt__cY05Zbh1YXg4_BKzzpcDc3Cv3FJ5SAA,9769
196
196
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
197
- ultralytics/nn/autobackend.py,sha256=smyYoozUOtXPNKW9Rd24dZX-EY36CDvXMr7xH-uLEs0,41256
197
+ ultralytics/nn/autobackend.py,sha256=yk1IXPChI1D7rupJdH2TMvUqFv6PVmBU3tgfZOquQ_8,41358
198
198
  ultralytics/nn/tasks.py,sha256=aCXYmWan2LTznH3i_-2OwMagG3ZwnVL1gjKtY-3oShM,72456
199
- ultralytics/nn/text_model.py,sha256=m4jDB5bzOLOS8XNmFi9oQk-skzRHiIpJy4K-_SIARR0,13498
199
+ ultralytics/nn/text_model.py,sha256=cYwD-0el4VeToDBP4iPFOQGqyEQatJOBHrVyONL3K_s,15282
200
200
  ultralytics/nn/modules/__init__.py,sha256=2nY0X69Z5DD5SWt6v3CUTZa5gXSzC9TQr3VTVqhyGho,3158
201
201
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
202
202
  ultralytics/nn/modules/block.py,sha256=JfOjWEgUNfwFCt-P2awhga4B7GXeDlkKVhLBp7oA-Es,70652
@@ -212,16 +212,16 @@ ultralytics/solutions/distance_calculation.py,sha256=r05_ufxb2Mpw3EIX8X32PIWlh9r
212
212
  ultralytics/solutions/heatmap.py,sha256=vEdzLSYCNIFC9CsBWYSnCLiM8xNuYLJ-1i7enjQgOQw,5516
213
213
  ultralytics/solutions/instance_segmentation.py,sha256=qsIQkvuR1Ur2bdEsCCJP2IEO1Hz2l0wfR2KUBo247xE,3795
214
214
  ultralytics/solutions/object_blurrer.py,sha256=wHbfrudh6li_JADc-dTHGGMI8GU-MvesoTvVlX6YuYc,3998
215
- ultralytics/solutions/object_counter.py,sha256=Zt6FNfPSPN3L69zks1u4DSPM3A6mdl7p29im4O-2QFQ,9406
215
+ ultralytics/solutions/object_counter.py,sha256=djg6XIgOuseoKCEY5PrLRf4Z1JjbTEBXrERRV8dOSlU,9442
216
216
  ultralytics/solutions/object_cropper.py,sha256=mS3iT_CgqfqG9ldM_AM5ptq5bfYFyTycPQY5DxxMlSA,3525
217
217
  ultralytics/solutions/parking_management.py,sha256=IfPUn15aelxz6YZNo9WYkVEl5IOVSw8VD0OrpKtExPE,13613
218
218
  ultralytics/solutions/queue_management.py,sha256=u0VFzRqa0OxIWY7xXItsXEm073CzkQGFhhXG-6VK3SI,4393
219
219
  ultralytics/solutions/region_counter.py,sha256=j6f5VAaE1JWGdWOecZpWMFp6yF1GdCnHjftN6CRybjQ,5967
220
220
  ultralytics/solutions/security_alarm.py,sha256=U6FTbg3cthKLfWeLunsFhOJvB6GGmwYDDxZ3K0GCx-Q,6351
221
- ultralytics/solutions/similarity_search.py,sha256=Tx5R_IVzQjUVLrraS0oJkoJLkx8dJCyaf_Nwbu_4yyo,9982
221
+ ultralytics/solutions/similarity_search.py,sha256=ri8bf65tt6xyS6Xa-ikj2AgvfCsFOtaQk6IM_k7FhKg,9579
222
222
  ultralytics/solutions/solutions.py,sha256=N5t1DgZpuFBbDvLVZ7wRkafmgu8SS1VC9VNjuupglwQ,37532
223
223
  ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
224
- ultralytics/solutions/streamlit_inference.py,sha256=lqHh0UDCVmWIeh3yzpvoV7j9K6Ipx7pJBkOsb0ZpZes,10034
224
+ ultralytics/solutions/streamlit_inference.py,sha256=SqL-YxU3RCxCKscH2AYUTkmJknilV9jCCco6ufqsFk4,10501
225
225
  ultralytics/solutions/trackzone.py,sha256=kIS94rNfL3yVPAtSbnW8F-aLMxXowQtsfKNB-jLezz8,3941
226
226
  ultralytics/solutions/vision_eye.py,sha256=nlIdXhfM5EwJh4vqVhz3AEOoHXIELMo1OG8Cr1tMQRw,3008
227
227
  ultralytics/solutions/templates/similarity-search.html,sha256=vdz9XCH6VHbksvSW_sSg6Z2xVp82_EanaS_rY7xjZBE,4743
@@ -249,7 +249,7 @@ ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,397
249
249
  ultralytics/utils/metrics.py,sha256=1XaTT3n3tfLms6LOCiEzg_QGHQJzjZmfjFoAYsCCc24,62646
250
250
  ultralytics/utils/ops.py,sha256=Jkh80ujyi0XDQwNqCUYyomH8NQ145AH9doMUS8Vt8GE,34545
251
251
  ultralytics/utils/patches.py,sha256=P2uQy7S4RzSHBfwJEXJsjyuRUluaaUusiVU84lV3moQ,6577
252
- ultralytics/utils/plotting.py,sha256=OzanAqs7Z02ddAd1LiXce0Jjjo8DSjAjbKViE6S5CKg,47176
252
+ ultralytics/utils/plotting.py,sha256=SCpG5DHZUPlFUsu72kNH3DYGpsjgkd3eIZ9-QTllY88,47171
253
253
  ultralytics/utils/tal.py,sha256=aXawOnhn8ni65tJWIW-PYqWr_TRvltbHBjrTo7o6lDQ,20924
254
254
  ultralytics/utils/torch_utils.py,sha256=iIAjf2g4hikzBeHvKN-EQK8QFlC_QtWWRuYQuBF2zIk,39184
255
255
  ultralytics/utils/triton.py,sha256=M7qe4RztiADBJQEWQKaIQsp94ERFJ_8_DUHDR6TXEOM,5410
@@ -265,9 +265,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
265
265
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
266
266
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
267
267
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
268
- ultralytics-8.3.157.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
269
- ultralytics-8.3.157.dist-info/METADATA,sha256=vZ9QsDSUEX148oGlo6qNsbooXGnT_pK-mlPBdc0k-L4,37212
270
- ultralytics-8.3.157.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
- ultralytics-8.3.157.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
- ultralytics-8.3.157.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
- ultralytics-8.3.157.dist-info/RECORD,,
268
+ ultralytics-8.3.159.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
269
+ ultralytics-8.3.159.dist-info/METADATA,sha256=rDjTuSzOBsjgNEKv23itvJdbVi69RoZGid-Nx5IjscA,37222
270
+ ultralytics-8.3.159.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
+ ultralytics-8.3.159.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
+ ultralytics-8.3.159.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
+ ultralytics-8.3.159.dist-info/RECORD,,