ultralytics 8.3.133__py3-none-any.whl → 8.3.134__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_cli.py CHANGED
@@ -61,6 +61,7 @@ def test_rtdetr(task: str = "detect", model: str = "yolov8n-rtdetr.yaml", data:
61
61
  if TORCH_1_9:
62
62
  weights = WEIGHTS_DIR / "rtdetr-l.pt"
63
63
  run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
64
+ run(f"yolo train {task} model={weights} epochs=1 imgsz=160 cache=disk data=coco8.yaml")
64
65
 
65
66
 
66
67
  @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
@@ -126,3 +127,12 @@ def test_train_gpu(task: str, model: str, data: str) -> None:
126
127
  """Test YOLO training on GPU(s) for various tasks and models."""
127
128
  run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0") # single GPU
128
129
  run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0,1") # multi GPU
130
+
131
+
132
+ @pytest.mark.parametrize(
133
+ "solution",
134
+ ["count", "blur", "workout", "heatmap", "isegment", "visioneye", "speed", "queue", "analytics", "trackzone"],
135
+ )
136
+ def test_solutions(solution: str) -> None:
137
+ """Test yolo solutions command-line modes."""
138
+ run(f"yolo solutions {solution} verbose=False")
tests/test_python.py CHANGED
@@ -271,10 +271,12 @@ def test_results(model):
271
271
  r = r.to(device="cpu", dtype=torch.float32)
272
272
  r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
273
273
  r.save_crop(save_dir=TMP / "runs/tests/crops/")
274
- r.to_json(normalize=True)
275
- r.to_df(decimals=3)
274
+ r.to_df(decimals=3) # Align to_ methods: https://docs.ultralytics.com/modes/predict/#working-with-results
276
275
  r.to_csv()
277
276
  r.to_xml()
277
+ r.to_html()
278
+ r.to_json(normalize=True)
279
+ r.to_sql()
278
280
  r.plot(pil=True, save=True, filename=TMP / "results_plot_save.jpg")
279
281
  r.plot(conf=True, boxes=True)
280
282
  print(r, len(r), r.path) # print after methods
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.133"
3
+ __version__ = "8.3.134"
4
4
 
5
5
  import os
6
6
 
@@ -1170,6 +1170,8 @@ class RandomPerspective:
1170
1170
  img = cv2.warpPerspective(img, M, dsize=self.size, borderValue=(114, 114, 114))
1171
1171
  else: # affine
1172
1172
  img = cv2.warpAffine(img, M[:2], dsize=self.size, borderValue=(114, 114, 114))
1173
+ if img.ndim == 2:
1174
+ img = img[..., None]
1173
1175
  return img, M, s
1174
1176
 
1175
1177
  def apply_bboxes(self, bboxes, M):
@@ -1824,6 +1826,8 @@ class CopyPaste(BaseMixTransform):
1824
1826
  cv2.drawContours(im_new, instances2.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED)
1825
1827
 
1826
1828
  result = labels2.get("img", cv2.flip(im, 1)) # augment segments
1829
+ if result.ndim == 2: # cv2.flip would eliminate the last dimension for grayscale images
1830
+ result = result[..., None]
1827
1831
  i = im_new.astype(bool)
1828
1832
  im[i] = result[i]
1829
1833
 
ultralytics/data/build.py CHANGED
@@ -244,9 +244,9 @@ def load_inference_source(source=None, batch=1, vid_stride=1, buffer=False, chan
244
244
  elif in_memory:
245
245
  dataset = source
246
246
  elif stream:
247
- dataset = LoadStreams(source, vid_stride=vid_stride, buffer=buffer)
247
+ dataset = LoadStreams(source, vid_stride=vid_stride, buffer=buffer, channels=channels)
248
248
  elif screenshot:
249
- dataset = LoadScreenshots(source)
249
+ dataset = LoadScreenshots(source, channels=channels)
250
250
  elif from_img:
251
251
  dataset = LoadPilAndNumpy(source, channels=channels)
252
252
  else:
@@ -68,6 +68,7 @@ class LoadStreams:
68
68
  shape (List[Tuple[int, int, int]]): List of shapes for each stream.
69
69
  caps (List[cv2.VideoCapture]): List of cv2.VideoCapture objects for each stream.
70
70
  bs (int): Batch size for processing.
71
+ cv2_flag (int): OpenCV flag for image reading (grayscale or RGB).
71
72
 
72
73
  Methods:
73
74
  update: Read stream frames in daemon thread.
@@ -89,13 +90,14 @@ class LoadStreams:
89
90
  - The class implements a buffer system to manage frame storage and retrieval.
90
91
  """
91
92
 
92
- def __init__(self, sources="file.streams", vid_stride=1, buffer=False):
93
+ def __init__(self, sources="file.streams", vid_stride=1, buffer=False, channels=3):
93
94
  """Initialize stream loader for multiple video sources, supporting various stream types."""
94
95
  torch.backends.cudnn.benchmark = True # faster for fixed-size inference
95
96
  self.buffer = buffer # buffer input streams
96
97
  self.running = True # running flag for Thread
97
98
  self.mode = "stream"
98
99
  self.vid_stride = vid_stride # video frame-rate stride
100
+ self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR # grayscale or RGB
99
101
 
100
102
  sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
101
103
  n = len(sources)
@@ -131,6 +133,7 @@ class LoadStreams:
131
133
  self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
132
134
 
133
135
  success, im = self.caps[i].read() # guarantee first frame
136
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im
134
137
  if not success or im is None:
135
138
  raise ConnectionError(f"{st}Failed to read images from {s}")
136
139
  self.imgs[i].append(im)
@@ -149,6 +152,9 @@ class LoadStreams:
149
152
  cap.grab() # .read() = .grab() followed by .retrieve()
150
153
  if n % self.vid_stride == 0:
151
154
  success, im = cap.retrieve()
155
+ im = (
156
+ cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im
157
+ )
152
158
  if not success:
153
159
  im = np.zeros(self.shape[i], dtype=np.uint8)
154
160
  LOGGER.warning("Video stream unresponsive, please check your IP camera connection.")
@@ -230,6 +236,7 @@ class LoadScreenshots:
230
236
  bs (int): Batch size, set to 1.
231
237
  fps (int): Frames per second, set to 30.
232
238
  monitor (Dict[str, int]): Monitor configuration details.
239
+ cv2_flag (int): OpenCV flag for image reading (grayscale or RGB).
233
240
 
234
241
  Methods:
235
242
  __iter__: Returns an iterator object.
@@ -241,7 +248,7 @@ class LoadScreenshots:
241
248
  ... print(f"Captured frame: {im.shape}")
242
249
  """
243
250
 
244
- def __init__(self, source):
251
+ def __init__(self, source, channels=3):
245
252
  """Initialize screenshot capture with specified screen and region parameters."""
246
253
  check_requirements("mss")
247
254
  import mss # noqa
@@ -259,6 +266,7 @@ class LoadScreenshots:
259
266
  self.sct = mss.mss()
260
267
  self.bs = 1
261
268
  self.fps = 30
269
+ self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR # grayscale or RGB
262
270
 
263
271
  # Parse monitor shape
264
272
  monitor = self.sct.monitors[self.screen]
@@ -275,6 +283,7 @@ class LoadScreenshots:
275
283
  def __next__(self):
276
284
  """Captures and returns the next screenshot as a numpy array using the mss library."""
277
285
  im0 = np.asarray(self.sct.grab(self.monitor))[:, :, :3] # BGRA to BGR
286
+ im0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im0
278
287
  s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
279
288
 
280
289
  self.frame += 1
@@ -395,6 +404,11 @@ class LoadImagesAndVideos:
395
404
 
396
405
  if success:
397
406
  success, im0 = self.cap.retrieve()
407
+ im0 = (
408
+ cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)[..., None]
409
+ if self.cv2_flag == cv2.IMREAD_GRAYSCALE
410
+ else im0
411
+ )
398
412
  if success:
399
413
  self.frame += 1
400
414
  paths.append(path)
@@ -497,6 +511,8 @@ class LoadPilAndNumpy:
497
511
  # adding new axis if it's grayscale, and converting to BGR if it's RGB
498
512
  im = im[..., None] if flag == "L" else im[..., ::-1]
499
513
  im = np.ascontiguousarray(im) # contiguous
514
+ elif im.ndim == 2: # grayscale in numpy form
515
+ im = im[..., None]
500
516
  return im
501
517
 
502
518
  def __len__(self):
ultralytics/data/utils.py CHANGED
@@ -424,8 +424,8 @@ def check_det_dataset(dataset, autodownload=True):
424
424
 
425
425
  # Resolve paths
426
426
  path = Path(extract_dir or data.get("path") or Path(data.get("yaml_file", "")).parent) # dataset root
427
- if not path.is_absolute():
428
- path = (DATASETS_DIR / path).resolve()
427
+ if not path.exists() and not path.is_absolute():
428
+ path = (DATASETS_DIR / path).resolve() # path relative to DATASETS_DIR
429
429
 
430
430
  # Set paths
431
431
  data["path"] = path # download scripts
@@ -142,7 +142,7 @@ def export_formats():
142
142
  ["MNN", "mnn", ".mnn", True, True, ["batch", "half", "int8"]],
143
143
  ["NCNN", "ncnn", "_ncnn_model", True, True, ["batch", "half"]],
144
144
  ["IMX", "imx", "_imx_model", True, True, ["int8", "fraction"]],
145
- ["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name", "int8"]],
145
+ ["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name"]],
146
146
  ]
147
147
  return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
148
148
 
@@ -555,7 +555,7 @@ class Exporter:
555
555
  @try_export
556
556
  def export_onnx(self, prefix=colorstr("ONNX:")):
557
557
  """YOLO ONNX export."""
558
- requirements = ["onnx>=1.12.0"]
558
+ requirements = ["onnx>=1.12.0,<1.18.0"]
559
559
  if self.args.simplify:
560
560
  requirements += ["onnxslim>=0.1.46", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
561
561
  check_requirements(requirements)
@@ -1121,8 +1121,8 @@ class Exporter:
1121
1121
  rknn = RKNN(verbose=False)
1122
1122
  rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], target_platform=self.args.name)
1123
1123
  rknn.load_onnx(model=f)
1124
- rknn.build(do_quantization=self.args.int8)
1125
- f = f.replace(".onnx", f"-{self.args.name}-int8.rknn" if self.args.int8 else f"-{self.args.name}-fp16.rknn")
1124
+ rknn.build(do_quantization=False) # TODO: Add quantization support
1125
+ f = f.replace(".onnx", f"-{self.args.name}.rknn")
1126
1126
  rknn.export_rknn(f"{export_path / f}")
1127
1127
  YAML.save(export_path / "metadata.yaml", self.metadata)
1128
1128
  return export_path, None
@@ -1032,7 +1032,7 @@ class Results(SimpleClass):
1032
1032
  conn.commit()
1033
1033
  conn.close()
1034
1034
 
1035
- LOGGER.info(f"Detection results successfully written to SQL table '{table_name}' in database '{db_path}'.")
1035
+ LOGGER.info(f"Detection results successfully written to SQL table '{table_name}' in database '{db_path}'.")
1036
1036
 
1037
1037
 
1038
1038
  class Boxes(BaseTensor):
@@ -51,7 +51,7 @@ class DetectionPredictor(BasePredictor):
51
51
  >>> results = predictor.predict("path/to/image.jpg")
52
52
  >>> processed_results = predictor.postprocess(preds, img, orig_imgs)
53
53
  """
54
- save_feats = getattr(self, "save_feats", False)
54
+ save_feats = getattr(self, "_feats", None) is not None
55
55
  preds = ops.non_max_suppression(
56
56
  preds,
57
57
  self.args.conf,
ultralytics/nn/tasks.py CHANGED
@@ -284,13 +284,15 @@ class BaseModel(torch.nn.Module):
284
284
  updated_csd = intersect_dicts(csd, self.state_dict()) # intersect
285
285
  self.load_state_dict(updated_csd, strict=False) # load
286
286
  len_updated_csd = len(updated_csd)
287
- first_conv = "model.0.conv.weight"
288
- if first_conv not in updated_csd: # mostly used to boost multi-channel training
289
- c1, c2, h, w = self.state_dict()[first_conv].shape
287
+ first_conv = "model.0.conv.weight" # hard-coded to yolo models for now
288
+ # mostly used to boost multi-channel training
289
+ state_dict = self.state_dict()
290
+ if first_conv not in updated_csd and first_conv in state_dict:
291
+ c1, c2, h, w = state_dict[first_conv].shape
290
292
  cc1, cc2, ch, cw = csd[first_conv].shape
291
293
  if ch == h and cw == w:
292
294
  c1, c2 = min(c1, cc1), min(c2, cc2)
293
- self.state_dict()[first_conv][:c1, :c2] = csd[first_conv][:c1, :c2]
295
+ state_dict[first_conv][:c1, :c2] = csd[first_conv][:c1, :c2]
294
296
  len_updated_csd += 1
295
297
  if verbose:
296
298
  LOGGER.info(f"Transferred {len_updated_csd}/{len(self.model.state_dict())} items from pretrained weights")
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from collections import defaultdict
4
+
3
5
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
6
 
5
7
 
@@ -11,10 +13,7 @@ class AIGym(BaseSolution):
11
13
  repetitions of exercises based on predefined angle thresholds for up and down positions.
12
14
 
13
15
  Attributes:
14
- count (List[int]): Repetition counts for each detected person.
15
- angle (List[float]): Current angle of the tracked body part for each person.
16
- stage (List[str]): Current exercise stage ('up', 'down', or '-') for each person.
17
- initial_stage (str | None): Initial stage of the exercise.
16
+ states (Dict[float, int, str]): Stores per-track angle, count, and stage for workout monitoring.
18
17
  up_angle (float): Angle threshold for considering the 'up' position of an exercise.
19
18
  down_angle (float): Angle threshold for considering the 'down' position of an exercise.
20
19
  kpts (List[int]): Indices of keypoints used for angle calculation.
@@ -41,12 +40,9 @@ class AIGym(BaseSolution):
41
40
  """
42
41
  kwargs["model"] = kwargs.get("model", "yolo11n-pose.pt")
43
42
  super().__init__(**kwargs)
44
- self.count = [] # List for counts, necessary where there are multiple objects in frame
45
- self.angle = [] # List for angle, necessary where there are multiple objects in frame
46
- self.stage = [] # List for stage, necessary where there are multiple objects in frame
43
+ self.states = defaultdict(lambda: {"angle": 0, "count": 0, "stage": "-"}) # Dict for count, angle and stage
47
44
 
48
45
  # Extract details from CFG single time for usage later
49
- self.initial_stage = None
50
46
  self.up_angle = float(self.CFG["up_angle"]) # Pose up predefined angle to consider up pose
51
47
  self.down_angle = float(self.CFG["down_angle"]) # Pose down predefined angle to consider down pose
52
48
  self.kpts = self.CFG["kpts"] # User selected kpts of workouts storage for further usage
@@ -81,33 +77,30 @@ class AIGym(BaseSolution):
81
77
  tracks = self.tracks[0]
82
78
 
83
79
  if tracks.boxes.id is not None:
84
- if len(tracks) > len(self.count): # Add new entries for newly detected people
85
- new_human = len(tracks) - len(self.count)
86
- self.angle += [0] * new_human
87
- self.count += [0] * new_human
88
- self.stage += ["-"] * new_human
89
-
90
- # Enumerate over keypoints
91
- for ind, k in enumerate(reversed(tracks.keypoints.data)):
80
+ track_ids = tracks.boxes.id.cpu().tolist()
81
+ kpt_data = tracks.keypoints.data.cpu() # Avoid repeated .cpu() calls
82
+
83
+ for i, k in enumerate(kpt_data):
84
+ track_id = int(track_ids[i]) # get track id
85
+ state = self.states[track_id] # get state details
92
86
  # Get keypoints and estimate the angle
93
- kpts = [k[int(self.kpts[i])].cpu() for i in range(3)]
94
- self.angle[ind] = annotator.estimate_pose_angle(*kpts)
87
+ state["angle"] = annotator.estimate_pose_angle(*[k[int(idx)] for idx in self.kpts])
95
88
  annotator.draw_specific_kpts(k, self.kpts, radius=self.line_width * 3)
96
89
 
97
90
  # Determine stage and count logic based on angle thresholds
98
- if self.angle[ind] < self.down_angle:
99
- if self.stage[ind] == "up":
100
- self.count[ind] += 1
101
- self.stage[ind] = "down"
102
- elif self.angle[ind] > self.up_angle:
103
- self.stage[ind] = "up"
91
+ if state["angle"] < self.down_angle:
92
+ if state["stage"] == "up":
93
+ state["count"] += 1
94
+ state["stage"] = "down"
95
+ elif state["angle"] > self.up_angle:
96
+ state["stage"] = "up"
104
97
 
105
98
  # Display angle, count, and stage text
106
99
  if self.show_labels:
107
100
  annotator.plot_angle_and_count_and_stage(
108
- angle_text=self.angle[ind], # angle text for display
109
- count_text=self.count[ind], # count text for workouts
110
- stage_text=self.stage[ind], # stage position text
101
+ angle_text=state["angle"], # angle text for display
102
+ count_text=state["count"], # count text for workouts
103
+ stage_text=state["stage"], # stage position text
111
104
  center_kpt=k[int(self.kpts[1])], # center keypoint for display
112
105
  )
113
106
  plot_im = annotator.result()
@@ -116,8 +109,8 @@ class AIGym(BaseSolution):
116
109
  # Return SolutionResults
117
110
  return SolutionResults(
118
111
  plot_im=plot_im,
119
- workout_count=self.count,
120
- workout_stage=self.stage,
121
- workout_angle=self.angle,
112
+ workout_count=[v["count"] for v in self.states.values()],
113
+ workout_stage=[v["stage"] for v in self.states.values()],
114
+ workout_angle=[v["angle"] for v in self.states.values()],
122
115
  total_tracks=len(self.track_ids),
123
116
  )
@@ -99,7 +99,6 @@ class Heatmap(ObjectCounter):
99
99
  if self.region is not None:
100
100
  self.annotator.draw_region(reg_pts=self.region, color=(104, 0, 123), thickness=self.line_width * 2)
101
101
  self.store_tracking_history(track_id, box) # Store track history
102
- self.store_classwise_counts(cls) # Store classwise counts in dict
103
102
  # Get previous position if available
104
103
  prev_position = None
105
104
  if len(self.track_history[track_id]) > 1:
@@ -123,6 +122,6 @@ class Heatmap(ObjectCounter):
123
122
  plot_im=plot_im,
124
123
  in_count=self.in_count,
125
124
  out_count=self.out_count,
126
- classwise_count=self.classwise_counts,
125
+ classwise_count=dict(self.classwise_counts),
127
126
  total_tracks=len(self.track_ids),
128
127
  )
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from collections import defaultdict
4
+
3
5
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
6
  from ultralytics.utils.plotting import colors
5
7
 
@@ -22,7 +24,6 @@ class ObjectCounter(BaseSolution):
22
24
 
23
25
  Methods:
24
26
  count_objects: Counts objects within a polygonal or linear region.
25
- store_classwise_counts: Initializes class-wise counts if not already present.
26
27
  display_counts: Displays object counts on the frame.
27
28
  process: Processes input data (frames or object tracks) and updates counts.
28
29
 
@@ -40,7 +41,7 @@ class ObjectCounter(BaseSolution):
40
41
  self.in_count = 0 # Counter for objects moving inward
41
42
  self.out_count = 0 # Counter for objects moving outward
42
43
  self.counted_ids = [] # List of IDs of objects that have been counted
43
- self.classwise_counts = {} # Dictionary for counts, categorized by object class
44
+ self.classwise_counts = defaultdict(lambda: {"IN": 0, "OUT": 0}) # Dictionary for counts, categorized by class
44
45
  self.region_initialized = False # Flag indicating whether the region has been initialized
45
46
 
46
47
  self.show_in = self.CFG["show_in"]
@@ -110,22 +111,6 @@ class ObjectCounter(BaseSolution):
110
111
  self.classwise_counts[self.names[cls]]["OUT"] += 1
111
112
  self.counted_ids.append(track_id)
112
113
 
113
- def store_classwise_counts(self, cls):
114
- """
115
- Initialize class-wise counts for a specific object class if not already present.
116
-
117
- Args:
118
- cls (int): Class index for classwise count updates.
119
-
120
- Examples:
121
- >>> counter = ObjectCounter()
122
- >>> counter.store_classwise_counts(0) # Initialize counts for class index 0
123
- >>> print(counter.classwise_counts)
124
- {'person': {'IN': 0, 'OUT': 0}}
125
- """
126
- if self.names[cls] not in self.classwise_counts:
127
- self.classwise_counts[self.names[cls]] = {"IN": 0, "OUT": 0}
128
-
129
114
  def display_counts(self, plot_im):
130
115
  """
131
116
  Display object counts on the input image or frame.
@@ -189,7 +174,6 @@ class ObjectCounter(BaseSolution):
189
174
  box, label=self.adjust_box_label(cls, conf, track_id), color=colors(cls, True), rotated=is_obb
190
175
  )
191
176
  self.store_tracking_history(track_id, box, is_obb=is_obb) # Store track history
192
- self.store_classwise_counts(cls) # Store classwise counts in dict
193
177
 
194
178
  # Store previous position of track for object counting
195
179
  prev_position = None
@@ -206,6 +190,6 @@ class ObjectCounter(BaseSolution):
206
190
  plot_im=plot_im,
207
191
  in_count=self.in_count,
208
192
  out_count=self.out_count,
209
- classwise_count=self.classwise_counts,
193
+ classwise_count=dict(self.classwise_counts),
210
194
  total_tracks=len(self.track_ids),
211
195
  )
@@ -110,7 +110,7 @@ class SecurityAlarm(BaseSolution):
110
110
  # Send the email
111
111
  try:
112
112
  self.server.send_message(message)
113
- LOGGER.info("Email sent successfully!")
113
+ LOGGER.info("Email sent successfully!")
114
114
  except Exception as e:
115
115
  LOGGER.error(f"Failed to send email: {e}")
116
116
 
@@ -330,7 +330,11 @@ class BYTETracker:
330
330
  # Predict the current location with KF
331
331
  self.multi_predict(strack_pool)
332
332
  if hasattr(self, "gmc") and img is not None:
333
- warp = self.gmc.apply(img, dets)
333
+ # use try-except here to bypass errors from gmc module
334
+ try:
335
+ warp = self.gmc.apply(img, dets)
336
+ except Exception:
337
+ warp = np.eye(2, 3)
334
338
  STrack.multi_gmc(strack_pool, warp)
335
339
  STrack.multi_gmc(unconfirmed, warp)
336
340
 
@@ -45,7 +45,8 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
45
45
  raise AssertionError(f"Only 'bytetrack' and 'botsort' are supported for now, but got '{cfg.tracker_type}'")
46
46
 
47
47
  predictor._feats = None # reset in case used earlier
48
- predictor.save_feats = False
48
+ if hasattr(predictor, "_hook"):
49
+ predictor._hook.remove()
49
50
  if cfg.tracker_type == "botsort" and cfg.with_reid and cfg.model == "auto":
50
51
  from ultralytics.nn.modules.head import Detect
51
52
 
@@ -56,13 +57,11 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
56
57
  ):
57
58
  cfg.model = "yolo11n-cls.pt"
58
59
  else:
59
- predictor.save_feats = True
60
-
61
60
  # Register hook to extract input of Detect layer
62
61
  def pre_hook(module, input):
63
- predictor._feats = [t.clone() for t in input[0]]
62
+ predictor._feats = list(input[0]) # unroll to new list to avoid mutation in forward
64
63
 
65
- predictor.model.model.model[-1].register_forward_pre_hook(pre_hook)
64
+ predictor._hook = predictor.model.model.model[-1].register_forward_pre_hook(pre_hook)
66
65
 
67
66
  trackers = []
68
67
  for _ in range(predictor.dataset.bs):
@@ -132,8 +132,8 @@ class GMC:
132
132
  [[1. 0. 0.]
133
133
  [0. 1. 0.]]
134
134
  """
135
- height, width, _ = raw_frame.shape
136
- frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
135
+ height, width, c = raw_frame.shape
136
+ frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) if c == 3 else raw_frame
137
137
  H = np.eye(2, 3, dtype=np.float32)
138
138
 
139
139
  # Downscale image
@@ -178,8 +178,8 @@ class GMC:
178
178
  >>> print(transformation_matrix.shape)
179
179
  (2, 3)
180
180
  """
181
- height, width, _ = raw_frame.shape
182
- frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
181
+ height, width, c = raw_frame.shape
182
+ frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) if c == 3 else raw_frame
183
183
  H = np.eye(2, 3)
184
184
 
185
185
  # Downscale image
@@ -320,8 +320,8 @@ class GMC:
320
320
  [[1. 0. 0.]
321
321
  [0. 1. 0.]]
322
322
  """
323
- height, width, _ = raw_frame.shape
324
- frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
323
+ height, width, c = raw_frame.shape
324
+ frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) if c == 3 else raw_frame
325
325
  H = np.eye(2, 3)
326
326
 
327
327
  # Downscale image
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.133
3
+ Version: 8.3.134
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -59,7 +59,7 @@ Requires-Dist: mkdocstrings[python]; extra == "dev"
59
59
  Requires-Dist: mkdocs-ultralytics-plugin>=0.1.17; extra == "dev"
60
60
  Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
61
61
  Provides-Extra: export
62
- Requires-Dist: onnx>=1.12.0; extra == "export"
62
+ Requires-Dist: onnx<1.18.0,>=1.12.0; extra == "export"
63
63
  Requires-Dist: coremltools>=8.0; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
64
64
  Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
65
65
  Requires-Dist: openvino>=2024.0.0; extra == "export"
@@ -1,13 +1,13 @@
1
1
  tests/__init__.py,sha256=xnMhv3O_DF1YrW4zk__ZywQzAaoTDjPKPoiI1Ktss1w,670
2
2
  tests/conftest.py,sha256=rsIAipRKfrVNoTaJ1LdpYue8AbcJ_fr3d3WIlM_6uXY,2982
3
- tests/test_cli.py,sha256=PtMFl5Lp_6ygBbYDJ1ndofz2k7ZYupMPEAiZw6aZVm8,5450
3
+ tests/test_cli.py,sha256=vXUC_EK0fa87JRhHsCOZf7AJQ5_Jm1sL8u-yhmsaQh0,5851
4
4
  tests/test_cuda.py,sha256=eKwaqLxWTRRYNROnkH24Ch-HmxTRKQLSIxbMYFYq_p0,8123
5
5
  tests/test_engine.py,sha256=aGqZ8P7QO5C_nOa1b4FOyk92Ysdk5WiP-ST310Vyxys,4962
6
6
  tests/test_exports.py,sha256=UeeBloqYYGZNh520R3CR80XBxA9XFrNmbK9An6V6C4w,9838
7
7
  tests/test_integrations.py,sha256=dQteeRsRVuT_p5-T88-7jqT65Zm9iAXkyKg-KQ1_TQ8,6341
8
- tests/test_python.py,sha256=m3tV3atrc3DvXZ5S-_C1ief_pDo4KlLgudjc7rq26l0,25492
8
+ tests/test_python.py,sha256=KWsncKpeDdRmjRftmJpsMl7bBLI3TG_I7Lb4kuemZzQ,25618
9
9
  tests/test_solutions.py,sha256=IFlqyOUCvGbLe_YZqWmNCe_afg4as0p-SfAv3j7VURI,6205
10
- ultralytics/__init__.py,sha256=5KJcFLzyXLEENlwDYrbaJSUI5eiIL_K54mrNQvfpFhE,730
10
+ ultralytics/__init__.py,sha256=MjIVksx-Ewf4xIqHmiJ0y8l0R7lDgbyrJLoEaGYahu4,730
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
13
  ultralytics/cfg/__init__.py,sha256=We3ti0mvUQrGRmUPcufDGboW0YAO3nSRYuoWxGagk3M,39462
@@ -104,24 +104,24 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=TpRaK5kH_-QbjCQ7ekM4s_7j8I8ti3q8Hs7
104
104
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
105
105
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
106
106
  ultralytics/data/annotator.py,sha256=VEwb11FsEZm75qlEp8XDHFGKW0_rGsEaFDaBVd771Kw,2902
107
- ultralytics/data/augment.py,sha256=7Md80H36S0X5RiSqCcwynSgGcRwMqnI4YbSw-rkYnlk,129139
107
+ ultralytics/data/augment.py,sha256=5O02Um483j7VAutLUz13IGpuuEdvyD9mhTMxFCFwCas,129342
108
108
  ultralytics/data/base.py,sha256=bsASjxdkvojkFjas-JfFNSpBjo0GRAbYKDh64Y2hCH4,19015
109
- ultralytics/data/build.py,sha256=0nW3fjx-DceRIKJX786zP3cMAekUXHkuTGr5eVr9rSU,9769
109
+ ultralytics/data/build.py,sha256=Ez_HSx-ZpL3Z1C4mDnyGPi107saG3TLR4PC7iv2sz_4,9807
110
110
  ultralytics/data/converter.py,sha256=znXH2XTdo0Q4NDHMny1ydVBvrxKn2kbbwI-X5bn1MlQ,26890
111
111
  ultralytics/data/dataset.py,sha256=uc5OMkaQtWQHBd_KST_WXO6FEoeF4xUhKDDJBKkQ354,34916
112
- ultralytics/data/loaders.py,sha256=q1dlJ9hyLnf-gorutgFZLndP8ZNJDCmCcZzJZRDDLDw,28868
112
+ ultralytics/data/loaders.py,sha256=Wn_93-niQZg57VuX-vXF9MmcdHrGs5RlevdyO_V5J0s,29951
113
113
  ultralytics/data/split.py,sha256=6UFXcbVrzYVAPmFbl4FeZFJOkdbN3jQFepJxi_pD-I0,4748
114
114
  ultralytics/data/split_dota.py,sha256=ihG56YfNFZJDq1r7Zcgk8fKzde3gn21W0f67ub6nT68,11879
115
- ultralytics/data/utils.py,sha256=cF9w7cCzHN-EwL5dEMuf_gD7HoQsefQgDWpwYQsSA20,35496
115
+ ultralytics/data/utils.py,sha256=5vD6Nea2SE14Ap9nFTHkJgzOgVKJy-P8-bcqqxa_UB0,35551
116
116
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
117
117
  ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J3jKrnPw,1768
118
118
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
119
119
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
120
120
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
121
- ultralytics/engine/exporter.py,sha256=QI84hCFHFAbBX2evpPBxtcCLUjJEyEv40ASjqq64du4,70782
121
+ ultralytics/engine/exporter.py,sha256=rWXGtgSYfjY6C1rI3ySHJrxtzw5yPUTVWOo6rxyR8c0,70748
122
122
  ultralytics/engine/model.py,sha256=fWhPNWUQzjjWfTEXzTaqSSearV4THRkEa_fl4dDvzWw,52930
123
123
  ultralytics/engine/predictor.py,sha256=AwKpOGY2G-thNNiRw4Kf_MBLamq5tbRhXLNSMRArqFo,21803
124
- ultralytics/engine/results.py,sha256=-JPBn_YMyZv6HhdlyhjRIZCcMf41LTyWID7JrEP64rc,79632
124
+ ultralytics/engine/results.py,sha256=MhbyMCwgslmtV53fqii4UJUaLQ4gKTKdkXi7vvmJDAE,79628
125
125
  ultralytics/engine/trainer.py,sha256=c_iGyt6bwIf4aRUeVcVEuOKG9ZpixJsZUbI2eMqQXto,38951
126
126
  ultralytics/engine/tuner.py,sha256=zEW1UpLlZ6N4xbvS7MxICkshRlaFgLNfuADA0VfRpao,12629
127
127
  ultralytics/engine/validator.py,sha256=jfV81wuFDgrVVXEcPzgOpxAPrAZn-1LgpKwu9l_1-ts,17050
@@ -169,7 +169,7 @@ ultralytics/models/yolo/classify/predict.py,sha256=JV9szginTQ9Lpob0FozhKMiEIu1vV
169
169
  ultralytics/models/yolo/classify/train.py,sha256=rv2CJv9fzvtHf2q4l5g0RsjplWKeLpz637kKqjtrLNY,9737
170
170
  ultralytics/models/yolo/classify/val.py,sha256=xk-YwSQdl_oqyCBV0OOAOcXFL6CchebFOc36AkRSyjE,9992
171
171
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
172
- ultralytics/models/yolo/detect/predict.py,sha256=DOjhYCHPFPPAwZLWWmNt0d7lGka8GFeriM0OA9PTEGU,5310
172
+ ultralytics/models/yolo/detect/predict.py,sha256=b0u4qthWKb-jxkObZM_FWUPHYKKb73yL7FYAqIrb4PE,5317
173
173
  ultralytics/models/yolo/detect/train.py,sha256=FHA2rQPbWFjceng4uVMU-k0kyOnvC5hbpv2VRnYuPSM,9543
174
174
  ultralytics/models/yolo/detect/val.py,sha256=7AB_wZi7aQ9_V1pZQSWk5qiJYS34fuO3P5aX7_3eeFE,18471
175
175
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
@@ -194,7 +194,7 @@ ultralytics/models/yolo/yoloe/train_seg.py,sha256=BYFBd04k5WQaJPcFbCvVIbEf2IOQyW
194
194
  ultralytics/models/yolo/yoloe/val.py,sha256=oA8cVT3pBXF6aPZy7ITq0mDcktRuIgks8tTtqMRISyY,8431
195
195
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
196
196
  ultralytics/nn/autobackend.py,sha256=X2cxCytBu9fmniy8uJ5aZb28IukQ-uxV1INXeS1lclA,39368
197
- ultralytics/nn/tasks.py,sha256=BvNqt1Igk-DulR6jH9vI3LsiPBcui41t-s4xmBlTg3Y,63496
197
+ ultralytics/nn/tasks.py,sha256=o7QZvlZyvmECxkITJjtDCPf-hAxXcZOLXP7PKtegOPQ,63594
198
198
  ultralytics/nn/text_model.py,sha256=8_7SRejKZA4Pi-ha0gjcWrQDDCDMBhtwlg8pPMWgjDE,13145
199
199
  ultralytics/nn/modules/__init__.py,sha256=dXLtIk9rt944WfsTdpgEdWOg3HQEHdwQztuZ6WNJygs,3144
200
200
  ultralytics/nn/modules/activation.py,sha256=PvXZkA9AzEntR575JkFORdmtcRwATyy0lje-uHA5_8w,2210
@@ -204,19 +204,19 @@ ultralytics/nn/modules/head.py,sha256=FbFB-e44Zvxgzdfy0FqeGWUn0DDahmEZvD1W_N2olc
204
204
  ultralytics/nn/modules/transformer.py,sha256=tC80QKFaLtWZo0zVNTuORX4pOu6HVs2wS0vSM-3h5W4,28227
205
205
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
206
206
  ultralytics/solutions/__init__.py,sha256=ZoeAQavTLp8aClnhZ9tbl6lxy86GxofyGvZWTx2aWkI,1209
207
- ultralytics/solutions/ai_gym.py,sha256=QRrZGMka83NY4B9gU3N2GxTaomo0WmTMNLxkNZTxo9U,5763
207
+ ultralytics/solutions/ai_gym.py,sha256=QRTFwuD0g9KJgAjqdww4OeitXm-hsyXL1pJlrAhTyqA,5347
208
208
  ultralytics/solutions/analytics.py,sha256=u-khRAViGupjq9mkuAFCl9G3yE8hXfXASfKZd_SQZ-8,12111
209
209
  ultralytics/solutions/config.py,sha256=TLxQuZjqW-vhbS2OFmTT188-31ukHg1XP7l-BeOmqbU,5427
210
210
  ultralytics/solutions/distance_calculation.py,sha256=E13siGlQTqaGCk0xULk5Q86PwxiBAL4XWp83kQPb0YE,5751
211
- ultralytics/solutions/heatmap.py,sha256=_QzsWTL6S32J3pt8N1gcl-2DZeypNpn_iuhQzkYKxEo,5495
211
+ ultralytics/solutions/heatmap.py,sha256=0Hw2Vhg4heglpnbNkM-RiGrQOkvgYbPRf4x8x4-zTjg,5418
212
212
  ultralytics/solutions/instance_segmentation.py,sha256=IuAxxEkKrbTPHmD0jV3VEjNWpBc78o8exg00nE0ldeQ,3558
213
213
  ultralytics/solutions/object_blurrer.py,sha256=-wXOdqqZisVhxLutZz7JvZmdgVGmsN7Ymary0JHc2qo,3946
214
- ultralytics/solutions/object_counter.py,sha256=aYjNTeEr5TGAwoecICp14K1cSrob7O6iPEe72l4E6CM,10224
214
+ ultralytics/solutions/object_counter.py,sha256=cL3wqyYsClr_V4_ZjQZBefB-Y0Qswn-l7lWceNmDyN4,9525
215
215
  ultralytics/solutions/object_cropper.py,sha256=L6QZC5as_cUT42TMzeyXmkHa7vBi2UpNFf_-Jc7C1G0,3316
216
216
  ultralytics/solutions/parking_management.py,sha256=BV-2lpSfgmK7fib3DnPSZ5rtLdy11c8pBQm-72iTetc,13289
217
217
  ultralytics/solutions/queue_management.py,sha256=p1-cuI_rs4ygtlBryXjE65NYG2bnZXhp3ylggFnWcRs,4344
218
218
  ultralytics/solutions/region_counter.py,sha256=Zn35YRXNzhBk27D9MLOHBYe2L1o6H2ey3mEwCXofB_E,5418
219
- ultralytics/solutions/security_alarm.py,sha256=cmUWvz7U9IAxlOr-QCIU_j95lc2c8eUx9wI04t1vDFU,6251
219
+ ultralytics/solutions/security_alarm.py,sha256=JdkQUjqJl3iCd2MLVYkh1L7askvhi3_gp0RLXG6s390,6247
220
220
  ultralytics/solutions/similarity_search.py,sha256=WTYmHNHfFrRiJ6mrZhJvGPsjt3szQUiM6VRpw2eBRjA,7332
221
221
  ultralytics/solutions/solutions.py,sha256=1iZIj3Z5bs14WbVT8MIDXABfW-pBmfvQNdBJ6l21uVY,32696
222
222
  ultralytics/solutions/speed_estimation.py,sha256=r7S5nGIx8PTV-zC4zCI36lQD2DVy5cen5cTXItfQIHo,5318
@@ -227,10 +227,10 @@ ultralytics/solutions/templates/similarity-search.html,sha256=DPoAO-1H-KXNt_T8mG
227
227
  ultralytics/trackers/__init__.py,sha256=Zlu_Ig5osn7hqch_g5Be_e4pwZUkeeTQiesJCi0pFGI,255
228
228
  ultralytics/trackers/basetrack.py,sha256=LYvWB5d7Woyrz_RlxaopjV07RQKH3sff_lZJfMcMxcA,4450
229
229
  ultralytics/trackers/bot_sort.py,sha256=fAMV6PJE19jXe-6u524bpcz7x3Ssauk3b3wKXUYpvoY,11462
230
- ultralytics/trackers/byte_tracker.py,sha256=D7JQ_6V8OUMQryxTrAr010UXMSaboQnI7T1xppzHXYg,20921
231
- ultralytics/trackers/track.py,sha256=hTh-qRZvCrnmo8TsfMQK8sp1F7qeUi97jgtXX-xhX3I,4880
230
+ ultralytics/trackers/byte_tracker.py,sha256=9v0DY0l4TVD22M_KNhQdQdETu0P5J5pbWaZmaYYFIs4,21075
231
+ ultralytics/trackers/track.py,sha256=A9Fy24PJQJNnb-hx4BuTZe27eycZpqqWAbRXaocl0KI,4929
232
232
  ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
233
- ultralytics/trackers/utils/gmc.py,sha256=dz3I5LbIv7h1__Xg7rGHecQFE32VFTe54tUnxb8F0Z8,14466
233
+ ultralytics/trackers/utils/gmc.py,sha256=843LlmqWuXdUULBNpxVCZlil-_2QG-UwvscUCFbpGjA,14541
234
234
  ultralytics/trackers/utils/kalman_filter.py,sha256=A0CqOnnaKH6kr0XwuHzyHmIU6aJAjJYxF9jVlNBKZHo,21326
235
235
  ultralytics/trackers/utils/matching.py,sha256=7eIufSdeN7cXuFMjvcfvz0Ldq84m4YKZl5IGxBR8IIo,7169
236
236
  ultralytics/utils/__init__.py,sha256=vac0M-Hx55QXl6Vod3QPjnLBlt87Hwxu1784RXPmeQA,52879
@@ -264,9 +264,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=yYUgEgSv6L39sSev6vjwhAWU3DlPDsbSDV
264
264
  ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
265
265
  ultralytics/utils/callbacks/tensorboard.py,sha256=jgYnym3cUQFAgN1GzTyO7l3jINtfAh8zhrllDvnLuVQ,5339
266
266
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
267
- ultralytics-8.3.133.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
268
- ultralytics-8.3.133.dist-info/METADATA,sha256=po51EqOXoP7a9l8ZhORSK5BE5RJ3iPeUvxPUqILhT5s,37223
269
- ultralytics-8.3.133.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
270
- ultralytics-8.3.133.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
271
- ultralytics-8.3.133.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
272
- ultralytics-8.3.133.dist-info/RECORD,,
267
+ ultralytics-8.3.134.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
268
+ ultralytics-8.3.134.dist-info/METADATA,sha256=OSG5BnJ9SEoVF4C3GUecf9O_LcbZ2enGMnFQFJ0OuHE,37231
269
+ ultralytics-8.3.134.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
270
+ ultralytics-8.3.134.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
271
+ ultralytics-8.3.134.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
272
+ ultralytics-8.3.134.dist-info/RECORD,,