ultralytics 8.3.38__py3-none-any.whl → 8.3.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.3.38"
3
+ __version__ = "8.3.40"
4
4
 
5
5
  import os
6
6
 
@@ -11,7 +11,6 @@ import cv2
11
11
 
12
12
  from ultralytics.utils import (
13
13
  ASSETS,
14
- ASSETS_URL,
15
14
  DEFAULT_CFG,
16
15
  DEFAULT_CFG_DICT,
17
16
  DEFAULT_CFG_PATH,
@@ -42,6 +41,7 @@ SOLUTION_MAP = {
42
41
  "speed": ("SpeedEstimator", "estimate_speed"),
43
42
  "workout": ("AIGym", "monitor"),
44
43
  "analytics": ("Analytics", "process_data"),
44
+ "trackzone": ("TrackZone", "trackzone"),
45
45
  "help": None,
46
46
  }
47
47
 
@@ -75,13 +75,12 @@ ARGV = sys.argv or ["", ""] # sometimes sys.argv = []
75
75
  SOLUTIONS_HELP_MSG = f"""
76
76
  Arguments received: {str(['yolo'] + ARGV[1:])}. Ultralytics 'yolo solutions' usage overview:
77
77
 
78
- yolo SOLUTIONS SOLUTION ARGS
79
-
80
- Where SOLUTIONS (required) is a keyword
81
- SOLUTION (optional) is one of {list(SOLUTION_MAP.keys())}
82
- ARGS (optional) are any number of custom 'arg=value' pairs like 'show_in=True' that override defaults.
83
- See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
78
+ yolo solutions SOLUTION ARGS
84
79
 
80
+ Where SOLUTION (optional) is one of {list(SOLUTION_MAP.keys())}
81
+ ARGS (optional) are any number of custom 'arg=value' pairs like 'show_in=True' that override defaults
82
+ at https://docs.ultralytics.com/usage/cfg
83
+
85
84
  1. Call object counting solution
86
85
  yolo solutions count source="path/to/video/file.mp4" region=[(20, 400), (1080, 400), (1080, 360), (20, 360)]
87
86
 
@@ -96,6 +95,9 @@ SOLUTIONS_HELP_MSG = f"""
96
95
 
97
96
  5. Generate analytical graphs
98
97
  yolo solutions analytics analytics_type="pie"
98
+
99
+ 6. Track Objects Within Specific Zones
100
+ yolo solutions trackzone source="path/to/video/file.mp4" region=[(150, 150), (1130, 150), (1130, 570), (150, 570)]
99
101
  """
100
102
  CLI_HELP_MSG = f"""
101
103
  Arguments received: {str(['yolo'] + ARGV[1:])}. Ultralytics 'yolo' commands use the following syntax:
@@ -144,6 +144,9 @@ class Model(nn.Module):
144
144
  else:
145
145
  self._load(model, task=task)
146
146
 
147
+ # Delete super().training for accessing self.model.training
148
+ del self.training
149
+
147
150
  def __call__(
148
151
  self,
149
152
  source: Union[str, Path, int, Image.Image, list, tuple, np.ndarray, torch.Tensor] = None,
@@ -1143,3 +1146,29 @@ class Model(nn.Module):
1143
1146
  """
1144
1147
  self.model.eval()
1145
1148
  return self
1149
+
1150
+ def __getattr__(self, name):
1151
+ """
1152
+ Enables accessing model attributes directly through the Model class.
1153
+
1154
+ This method provides a way to access attributes of the underlying model directly through the Model class
1155
+ instance. It first checks if the requested attribute is 'model', in which case it returns the model from
1156
+ the module dictionary. Otherwise, it delegates the attribute lookup to the underlying model.
1157
+
1158
+ Args:
1159
+ name (str): The name of the attribute to retrieve.
1160
+
1161
+ Returns:
1162
+ (Any): The requested attribute value.
1163
+
1164
+ Raises:
1165
+ AttributeError: If the requested attribute does not exist in the model.
1166
+
1167
+ Examples:
1168
+ >>> model = YOLO("yolo11n.pt")
1169
+ >>> print(model.stride)
1170
+ >>> print(model.task)
1171
+ """
1172
+ if name == "model":
1173
+ return self._modules["model"]
1174
+ return getattr(self.model, name)
@@ -54,6 +54,6 @@ class ClassificationPredictor(BasePredictor):
54
54
  orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
55
55
 
56
56
  return [
57
- Results(orig_img, path=img_path, names=self.model.names, probs=pred)
57
+ Results(orig_img, path=img_path, names=self.model.names, probs=pred.softmax(0))
58
58
  for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0])
59
59
  ]
@@ -146,5 +146,5 @@ class DetectionTrainer(BaseTrainer):
146
146
  """Get batch size by calculating memory occupation of model."""
147
147
  train_dataset = self.build_dataset(self.trainset, mode="train", batch=16)
148
148
  # 4 for mosaic augmentation
149
- max_num_obj = max(len(l["cls"]) for l in train_dataset.labels) * 4
149
+ max_num_obj = max(len(label["cls"]) for label in train_dataset.labels) * 4
150
150
  return super().auto_batch(max_num_obj)
@@ -296,7 +296,7 @@ class Classify(nn.Module):
296
296
  if isinstance(x, list):
297
297
  x = torch.cat(x, 1)
298
298
  x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
299
- return x if self.training else x.softmax(1)
299
+ return x
300
300
 
301
301
 
302
302
  class WorldDetect(Detect):
@@ -10,6 +10,7 @@ from .queue_management import QueueManager
10
10
  from .region_counter import RegionCounter
11
11
  from .speed_estimation import SpeedEstimator
12
12
  from .streamlit_inference import inference
13
+ from .trackzone import TrackZone
13
14
 
14
15
  __all__ = (
15
16
  "AIGym",
@@ -23,4 +24,5 @@ __all__ = (
23
24
  "Analytics",
24
25
  "inference",
25
26
  "RegionCounter",
27
+ "TrackZone",
26
28
  )
@@ -71,7 +71,7 @@ class AIGym(BaseSolution):
71
71
  >>> processed_image = gym.monitor(image)
72
72
  """
73
73
  # Extract tracks
74
- tracks = self.model.track(source=im0, persist=True, classes=self.CFG["classes"])[0]
74
+ tracks = self.model.track(source=im0, persist=True, classes=self.CFG["classes"], **self.track_add_args)[0]
75
75
 
76
76
  if tracks.boxes.id is not None:
77
77
  # Extract and check keypoints
@@ -74,6 +74,10 @@ class BaseSolution:
74
74
  self.model = YOLO(self.CFG["model"])
75
75
  self.names = self.model.names
76
76
 
77
+ self.track_add_args = { # Tracker additional arguments for advance configuration
78
+ k: self.CFG[k] for k in ["verbose", "iou", "conf", "device", "max_det", "half", "tracker"]
79
+ }
80
+
77
81
  if IS_CLI and self.CFG["source"] is None:
78
82
  d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
79
83
  LOGGER.warning(f"⚠️ WARNING: source not provided. using default source {ASSETS_URL}/{d_s}")
@@ -98,7 +102,7 @@ class BaseSolution:
98
102
  >>> frame = cv2.imread("path/to/image.jpg")
99
103
  >>> solution.extract_tracks(frame)
100
104
  """
101
- self.tracks = self.model.track(source=im0, persist=True, classes=self.CFG["classes"])
105
+ self.tracks = self.model.track(source=im0, persist=True, classes=self.CFG["classes"], **self.track_add_args)
102
106
 
103
107
  # Extract tracks for OBB or object detection
104
108
  self.track_data = self.tracks[0].obb or self.tracks[0].boxes
@@ -0,0 +1,68 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import cv2
4
+ import numpy as np
5
+
6
+ from ultralytics.solutions.solutions import BaseSolution
7
+ from ultralytics.utils.plotting import Annotator, colors
8
+
9
+
10
+ class TrackZone(BaseSolution):
11
+ """
12
+ A class to manage region-based object tracking in a video stream.
13
+
14
+ This class extends the BaseSolution class and provides functionality for tracking objects within a specific region
15
+ defined by a polygonal area. Objects outside the region are excluded from tracking. It supports dynamic initialization
16
+ of the region, allowing either a default region or a user-specified polygon.
17
+
18
+ Attributes:
19
+ region (ndarray): The polygonal region for tracking, represented as a convex hull.
20
+
21
+ Methods:
22
+ trackzone: Processes each frame of the video, applying region-based tracking.
23
+
24
+ Examples:
25
+ >>> tracker = TrackZone()
26
+ >>> frame = cv2.imread("frame.jpg")
27
+ >>> processed_frame = tracker.trackzone(frame)
28
+ >>> cv2.imshow("Tracked Frame", processed_frame)
29
+ """
30
+
31
+ def __init__(self, **kwargs):
32
+ """Initializes the TrackZone class for tracking objects within a defined region in video streams."""
33
+ super().__init__(**kwargs)
34
+ default_region = [(150, 150), (1130, 150), (1130, 570), (150, 570)]
35
+ self.region = cv2.convexHull(np.array(self.region or default_region, dtype=np.int32))
36
+
37
+ def trackzone(self, im0):
38
+ """
39
+ Processes the input frame to track objects within a defined region.
40
+
41
+ This method initializes the annotator, creates a mask for the specified region, extracts tracks
42
+ only from the masked area, and updates tracking information. Objects outside the region are ignored.
43
+
44
+ Args:
45
+ im0 (numpy.ndarray): The input image or frame to be processed.
46
+
47
+ Returns:
48
+ (numpy.ndarray): The processed image with tracking id and bounding boxes annotations.
49
+
50
+ Examples:
51
+ >>> tracker = TrackZone()
52
+ >>> frame = cv2.imread("path/to/image.jpg")
53
+ >>> tracker.trackzone(frame)
54
+ """
55
+ self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
56
+ # Create a mask for the region and extract tracks from the masked image
57
+ masked_frame = cv2.bitwise_and(im0, im0, mask=cv2.fillPoly(np.zeros_like(im0[:, :, 0]), [self.region], 255))
58
+ self.extract_tracks(masked_frame)
59
+
60
+ cv2.polylines(im0, [self.region], isClosed=True, color=(255, 255, 255), thickness=self.line_width * 2)
61
+
62
+ # Iterate over boxes, track ids, classes indexes list and draw bounding boxes
63
+ for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
64
+ self.annotator.box_label(box, label=f"{self.names[cls]}:{track_id}", color=colors(track_id, True))
65
+
66
+ self.display_output(im0) # display output with base class function
67
+
68
+ return im0 # return output image for more usage
ultralytics/utils/ops.py CHANGED
@@ -75,9 +75,8 @@ def segment2box(segment, width=640, height=640):
75
75
  (np.ndarray): the minimum and maximum x and y values of the segment.
76
76
  """
77
77
  x, y = segment.T # segment xy
78
- inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
79
- x = x[inside]
80
- y = y[inside]
78
+ x = x.clip(0, width)
79
+ y = y.clip(0, height)
81
80
  return (
82
81
  np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype)
83
82
  if any(x)
@@ -238,7 +238,16 @@ class Annotator:
238
238
  }
239
239
 
240
240
  def get_txt_color(self, color=(128, 128, 128), txt_color=(255, 255, 255)):
241
- """Assign text color based on background color."""
241
+ """
242
+ Assign text color based on background color.
243
+
244
+ Args:
245
+ color (tuple, optional): The background color of the rectangle for text (B, G, R).
246
+ txt_color (tuple, optional): The color of the text (R, G, B).
247
+
248
+ Returns:
249
+ txt_color (tuple): Text color for label
250
+ """
242
251
  if color in self.dark_colors:
243
252
  return 104, 31, 17
244
253
  elif color in self.light_colors:
@@ -544,7 +553,9 @@ class Annotator:
544
553
  bbox (tuple): Bounding box coordinates in the format (x_min, y_min, x_max, y_max).
545
554
 
546
555
  Returns:
547
- angle (degree): Degree value of angle between three points
556
+ width (float): Width of the bounding box.
557
+ height (float): Height of the bounding box.
558
+ area (float): Area enclosed by the bounding box.
548
559
  """
549
560
  x_min, y_min, x_max, y_max = bbox
550
561
  width = x_max - x_min
@@ -791,19 +802,52 @@ class Annotator:
791
802
  cv2.polylines(self.im, [np.int32([mask])], isClosed=True, color=mask_color, thickness=2)
792
803
  text_size, _ = cv2.getTextSize(label, 0, self.sf, self.tf)
793
804
 
794
- cv2.rectangle(
795
- self.im,
796
- (int(mask[0][0]) - text_size[0] // 2 - 10, int(mask[0][1]) - text_size[1] - 10),
797
- (int(mask[0][0]) + text_size[0] // 2 + 10, int(mask[0][1] + 10)),
798
- mask_color,
799
- -1,
800
- )
801
-
802
805
  if label:
806
+ cv2.rectangle(
807
+ self.im,
808
+ (int(mask[0][0]) - text_size[0] // 2 - 10, int(mask[0][1]) - text_size[1] - 10),
809
+ (int(mask[0][0]) + text_size[0] // 2 + 10, int(mask[0][1] + 10)),
810
+ mask_color,
811
+ -1,
812
+ )
803
813
  cv2.putText(
804
814
  self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
805
815
  )
806
816
 
817
+ def sweep_annotator(self, line_x=0, line_y=0, label=None, color=(221, 0, 186), txt_color=(255, 255, 255)):
818
+ """
819
+ Function for drawing a sweep annotation line and an optional label.
820
+
821
+ Args:
822
+ line_x (int): The x-coordinate of the sweep line.
823
+ line_y (int): The y-coordinate limit of the sweep line.
824
+ label (str, optional): Text label to be drawn in center of sweep line. If None, no label is drawn.
825
+ color (tuple): RGB color for the line and label background.
826
+ txt_color (tuple): RGB color for the label text.
827
+ """
828
+ # Draw the sweep line
829
+ cv2.line(self.im, (line_x, 0), (line_x, line_y), color, self.tf * 2)
830
+
831
+ # Draw label, if provided
832
+ if label:
833
+ (text_width, text_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf, self.tf)
834
+ cv2.rectangle(
835
+ self.im,
836
+ (line_x - text_width // 2 - 10, line_y // 2 - text_height // 2 - 10),
837
+ (line_x + text_width // 2 + 10, line_y // 2 + text_height // 2 + 10),
838
+ color,
839
+ -1,
840
+ )
841
+ cv2.putText(
842
+ self.im,
843
+ label,
844
+ (line_x - text_width // 2, line_y // 2 + text_height // 2),
845
+ cv2.FONT_HERSHEY_SIMPLEX,
846
+ self.sf,
847
+ txt_color,
848
+ self.tf,
849
+ )
850
+
807
851
  def plot_distance_and_line(
808
852
  self, pixels_distance, centroids, line_color=(104, 31, 17), centroid_color=(255, 0, 255)
809
853
  ):
@@ -301,28 +301,22 @@ def fuse_deconv_and_bn(deconv, bn):
301
301
 
302
302
 
303
303
  def model_info(model, detailed=False, verbose=True, imgsz=640):
304
- """
305
- Model information.
306
-
307
- imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320].
308
- """
304
+ """Print and return detailed model information layer by layer."""
309
305
  if not verbose:
310
306
  return
311
307
  n_p = get_num_params(model) # number of parameters
312
308
  n_g = get_num_gradients(model) # number of gradients
313
309
  n_l = len(list(model.modules())) # number of layers
314
310
  if detailed:
315
- LOGGER.info(
316
- f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}"
317
- )
311
+ LOGGER.info(f"{'layer':>5}{'name':>40}{'gradient':>10}{'parameters':>12}{'shape':>20}{'mu':>10}{'sigma':>10}")
318
312
  for i, (name, p) in enumerate(model.named_parameters()):
319
313
  name = name.replace("module_list.", "")
320
314
  LOGGER.info(
321
- "%5g %40s %9s %12g %20s %10.3g %10.3g %10s"
322
- % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std(), p.dtype)
315
+ f"{i:>5g}{name:>40s}{p.requires_grad!r:>10}{p.numel():>12g}{str(list(p.shape)):>20s}"
316
+ f"{p.mean():>10.3g}{p.std():>10.3g}{str(p.dtype):>15s}"
323
317
  )
324
318
 
325
- flops = get_flops(model, imgsz)
319
+ flops = get_flops(model, imgsz) # imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320]
326
320
  fused = " (fused)" if getattr(model, "is_fused", lambda: False)() else ""
327
321
  fs = f", {flops:.1f} GFLOPs" if flops else ""
328
322
  yaml_file = getattr(model, "yaml_file", "") or getattr(model, "yaml", {}).get("yaml_file", "")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.3.38
3
+ Version: 8.3.40
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -115,7 +115,9 @@ We hope that the resources here will help you get the most out of YOLO. Please b
115
115
 
116
116
  To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
117
117
 
118
- <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png" alt="YOLO11 performance plots"></a>
118
+ <a href="https://docs.ultralytics.com/models/yolo11/" target="_blank">
119
+ <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png" alt="YOLO11 performance plots">
120
+ </a>
119
121
 
120
122
  <div align="center">
121
123
  <a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
@@ -166,7 +168,7 @@ YOLO may be used directly in the Command Line Interface (CLI) with a `yolo` comm
166
168
  yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg'
167
169
  ```
168
170
 
169
- `yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLO [CLI Docs](https://docs.ultralytics.com/usage/cli/) for examples.
171
+ `yolo` can be used for a variety of tasks and modes and accepts additional arguments, e.g. `imgsz=640`. See the YOLO [CLI Docs](https://docs.ultralytics.com/usage/cli/) for examples.
170
172
 
171
173
  ### Python
172
174
 
@@ -203,11 +205,13 @@ See YOLO [Python Docs](https://docs.ultralytics.com/usage/python/) for more exam
203
205
 
204
206
  ## <div align="center">Models</div>
205
207
 
206
- YOLO11 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLO11 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models.
208
+ YOLO11 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLO11 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models. All [Models](https://docs.ultralytics.com/models/) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
207
209
 
208
- <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png" alt="Ultralytics YOLO supported tasks">
209
-
210
- All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
210
+ <a href="https://docs.ultralytics.com/tasks/" target="_blank">
211
+ <img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-tasks-banner.avif" alt="Ultralytics YOLO supported tasks">
212
+ </a>
213
+ <br>
214
+ <br>
211
215
 
212
216
  <details open><summary>Detection (COCO)</summary>
213
217
 
@@ -298,9 +302,9 @@ See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with
298
302
 
299
303
  Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/), [Comet](https://bit.ly/yolov8-readme-comet), [Roboflow](https://roboflow.com/?ref=ultralytics) and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow.
300
304
 
301
- <br>
302
305
  <a href="https://www.ultralytics.com/hub" target="_blank">
303
- <img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics active learning integrations"></a>
306
+ <img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics active learning integrations">
307
+ </a>
304
308
  <br>
305
309
  <br>
306
310
 
@@ -7,10 +7,10 @@ tests/test_exports.py,sha256=1MvhcQ2qHdbJImHII-bFarcaIcm-kPlEK-OdFLxnj7o,8769
7
7
  tests/test_integrations.py,sha256=f5-QCUk1SU_-qn4mBCZwS3GN3tXEBIIXo4z2EhExbHw,6126
8
8
  tests/test_python.py,sha256=I1RRdCwLdrc3jX06huVxct8HX8ccQOmQgVpuEflRl0U,23560
9
9
  tests/test_solutions.py,sha256=HlDe-XOgBX0k1cLhRTAhhawMHk6p-5dg5xl2AIRjfdk,3790
10
- ultralytics/__init__.py,sha256=E7u0cCuS67ALDjZVP9cgv7qP2VVAgLMVCk0-Vhgc0ug,681
10
+ ultralytics/__init__.py,sha256=g043TDkiEqdLx6EEsZlBx5SW4RgiiHq5CUtD78wMHIo,681
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
- ultralytics/cfg/__init__.py,sha256=4O7zcTGSWzT1O4zg71f7XSh-PywdeJ4PrBiuEZiBeiM,38771
13
+ ultralytics/cfg/__init__.py,sha256=LgTvW_Rd_phZoLzC8p5UEh8o7pIjx9xc67I91Xh5llY,38910
14
14
  ultralytics/cfg/default.yaml,sha256=FcXbvTXXvMpssk9fSwdlnVTtyqfmlYE9gAcHsf0OMf8,8347
15
15
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
16
16
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8l_fIDo8gw,1195
@@ -101,7 +101,7 @@ ultralytics/data/split_dota.py,sha256=eFafJ7Vg52wj6KDCHFJAf1tKzyPD5YaPB8kM4VX5Ae
101
101
  ultralytics/data/utils.py,sha256=bmWEIrdogj4kssZQSJdSbIF8QsJU00lo-EY-Mgcqv4M,31073
102
102
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
103
103
  ultralytics/engine/exporter.py,sha256=137idYe5ct3KuJBpjjjNRUAb6Gx0PeETKm21GZm43Nk,66972
104
- ultralytics/engine/model.py,sha256=VthPB0IK4tsT0VAmu8Jz7q-crWsggCLFH17NwwIxnOo,51962
104
+ ultralytics/engine/model.py,sha256=SDlZw6yvbNWHzbPN5VjJYx6qM1v1iZHVKAoa-PgJ8ig,53010
105
105
  ultralytics/engine/predictor.py,sha256=nO6lzxG75GXyQsUNEimLk5MLfcMwl8AkRAaoYMPwQug,17687
106
106
  ultralytics/engine/results.py,sha256=a1XFZRPwqgKDBOEAibHuT9nP2xefLiWVsMoBJbcr4iA,75058
107
107
  ultralytics/engine/trainer.py,sha256=Cd95QLJ3C4fncoOX1YgauLA9aWVYRd1G6x0Au2xX86k,37335
@@ -147,12 +147,12 @@ ultralytics/models/utils/ops.py,sha256=aPAPwWMLJLWq-I04wS_YrqJ_Vy_xBXtqQu6Aox15Y
147
147
  ultralytics/models/yolo/__init__.py,sha256=e1cZr9pbSbf3Ya2OvkTjGRwD_E2YZpe610xskBM8gEk,247
148
148
  ultralytics/models/yolo/model.py,sha256=E4TuJZZux0L_SG7sC0SDgxrmeBvuZRpxprPrCC26lvs,4233
149
149
  ultralytics/models/yolo/classify/__init__.py,sha256=t-4pUHmgI2gjhc-l3bqNEcEtKD1dO40nD4Vc6Y2xD6o,355
150
- ultralytics/models/yolo/classify/predict.py,sha256=0CEJ4B4fXbOMUnJy79gRvG-qdszOzTSLOb1xxkgsKek,2444
150
+ ultralytics/models/yolo/classify/predict.py,sha256=ungApAXm_KkLMMlz4MQpmL5IFzAKX69wLYHSliSR7VA,2455
151
151
  ultralytics/models/yolo/classify/train.py,sha256=3aYzLDqX_03xR1xqlTn1TxA4t58cCIGI8RCtWheTrm0,6273
152
152
  ultralytics/models/yolo/classify/val.py,sha256=Tzizhp3ebzPvwJejrE8tb-TuXw4MdkEI9mOANV74eXQ,4909
153
153
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
154
154
  ultralytics/models/yolo/detect/predict.py,sha256=-uZFLutxGYZX47RANcaxC-LFStRbv0nBv_8-ypadQoI,1471
155
- ultralytics/models/yolo/detect/train.py,sha256=f-QwAxaRsdtY-KY9femcWLQt1wB1tAHu-QV0a13LOGA,6702
155
+ ultralytics/models/yolo/detect/train.py,sha256=LKCcQTAsXm3-TPK2zkE1YJhbAcS65qhY2-MSlj-kB4w,6710
156
156
  ultralytics/models/yolo/detect/val.py,sha256=Ydf0W7FPf-nSz_lOCTcIrlDqhG_kOK8Od6eYsGQDx9U,15144
157
157
  ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
158
158
  ultralytics/models/yolo/obb/predict.py,sha256=VxpKCKV5dWnOr0GyV1rJGH5SzzRouCYW_8T26xJ8MU8,2037
@@ -176,11 +176,11 @@ ultralytics/nn/modules/__init__.py,sha256=xhW2BennT9U_VaMXVpRu-bdLgp1BXt9L8mkIUB
176
176
  ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
177
177
  ultralytics/nn/modules/block.py,sha256=Rk9CT23Bpqpo3LYRuQePYML6HAvsM20p2QlFTCaYFH4,41851
178
178
  ultralytics/nn/modules/conv.py,sha256=DPLZCRno_ZOjsuajAXIq-GbJdOh2jp1WayRXfDEd8z8,12724
179
- ultralytics/nn/modules/head.py,sha256=KCO-qarg2K7uJqQ7L5zVJ4-viiHqmu4bzbSgAw3L_nk,27815
179
+ ultralytics/nn/modules/head.py,sha256=Bg_WXtvO004fAKF7qExFreywWFrgQoc5Tc3fA9KVoL4,27780
180
180
  ultralytics/nn/modules/transformer.py,sha256=tGiK8NmPfswwW1rbF21r5ILUkkZQ6Nk4s8j16vFBmps,18069
181
181
  ultralytics/nn/modules/utils.py,sha256=a88cKl2wz1nMVSEBiajtvaCbDBQIkESWOKTZ_WAJy90,3195
182
- ultralytics/solutions/__init__.py,sha256=4i8QCABfxRWPYUEdb58Pbg1xvR_Ra1u-ifB9fx21UUs,711
183
- ultralytics/solutions/ai_gym.py,sha256=Jb9Rbd9gOOj2ox4Q5mqalCdvg3RMXA6Cxe5kS18IFgA,5232
182
+ ultralytics/solutions/__init__.py,sha256=lpTOauaJf7dFlymZB9lHiH_feDlS8Vlrp4TC7GuM8SU,761
183
+ ultralytics/solutions/ai_gym.py,sha256=Jv8ERJqcSjQeFh78zCAH2XnXoTIngCK7X_7XOQ6cPzs,5255
184
184
  ultralytics/solutions/analytics.py,sha256=C57pIghXeKN8hul8QOV7W9YDMpfFfSfPTBb-lE9HeAc,11535
185
185
  ultralytics/solutions/distance_calculation.py,sha256=KN3CC-dm2dTQylj79IrifCJT8ZhE7hc2EweH3KK31mE,5461
186
186
  ultralytics/solutions/heatmap.py,sha256=-1VtMCJRmpHnLqgna0i2HOBsxNoqFernzpKQnICngUM,5449
@@ -188,9 +188,10 @@ ultralytics/solutions/object_counter.py,sha256=MuxQG4a22458WwciAB96m5AxVXwH98AIW
188
188
  ultralytics/solutions/parking_management.py,sha256=Hh28FTuP_TaO7x5RadYm-JSVJuEu1M2SSgHqgdYYtr8,11198
189
189
  ultralytics/solutions/queue_management.py,sha256=D9TqwJSVrZQFxp_M8O62WfBAxkAuDWWnXe7FFmnp7_w,4881
190
190
  ultralytics/solutions/region_counter.py,sha256=w0c0Sz9XG6rwzr5nA6nb1zFW8IVkTQuatfZNBtOik68,4947
191
- ultralytics/solutions/solutions.py,sha256=HC5008BgQmWTw4aY8VgTEQioUzvuZxJebIk35E5HdcA,7275
191
+ ultralytics/solutions/solutions.py,sha256=BqkMDAq9A8kqL4TkjHLkMYXrJAdZPK-VAdNSObS1kNQ,7502
192
192
  ultralytics/solutions/speed_estimation.py,sha256=A10DmuZlGkoZUyfHhZWcDRjj1-9GXiDhEjyBbAzfaDs,4936
193
193
  ultralytics/solutions/streamlit_inference.py,sha256=w4dnvSv2FOrpji9W1Ir86phka3OXc7jd_38-OCbQdZw,5701
194
+ ultralytics/solutions/trackzone.py,sha256=jsSuvW3ExoQl5JyUF-5ZLQMou8h4qbkCGGGP831cHSY,2952
194
195
  ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
195
196
  ultralytics/trackers/basetrack.py,sha256=kPOeAX2ihvANtQJk-zUsN0C7JjhlJbx0UhjaCFk_ovQ,4423
196
197
  ultralytics/trackers/bot_sort.py,sha256=766grVQExvonb087Wy-SB32TSwYYsTEM22yoWeQ_EEo,10494
@@ -211,11 +212,11 @@ ultralytics/utils/files.py,sha256=uiXQSVABJRoI5ImnM6ndEBIFbECfksmWNEldBg8GnSo,82
211
212
  ultralytics/utils/instance.py,sha256=EnLp3hCihG5-32eGSMmjzspbxZsDvbqEOs-X0kcvxwQ,16252
212
213
  ultralytics/utils/loss.py,sha256=jUCiUcxgF6jGxGdvIcupeMidLoF-gI7s1tcJoQCZbnk,34113
213
214
  ultralytics/utils/metrics.py,sha256=toJlyA0W-xtChqAtIDiHISolxc_30NP33ezxWQ1rnPc,53804
214
- ultralytics/utils/ops.py,sha256=L9DEpuJOdIiysZaypDy-w8r3VWg6nJChGnORBBJo4y8,33100
215
+ ultralytics/utils/ops.py,sha256=ojw9AT7HI1_SgmYIFWrFM7QTs7zvf0QPsSLrMgAq2uI,33051
215
216
  ultralytics/utils/patches.py,sha256=J-iOwIRbfUs-inBZerhnXby5tUKjYcOIyvhLTS352JE,3270
216
- ultralytics/utils/plotting.py,sha256=6Iwh2dn6hDhaTk4hlZ14fRYKhqVnr7f1NNUw2Oq3PWk,61115
217
+ ultralytics/utils/plotting.py,sha256=GmBkN7e1skJK2cZ2hzKBXQCb1gayWTrA9TLHw0q07UM,62948
217
218
  ultralytics/utils/tal.py,sha256=thD_AEhVmhaZqmS5szZMvpKO-RKOeZwfX1BYAhdnA0o,18470
218
- ultralytics/utils/torch_utils.py,sha256=57y3iY2ke-E-v7MGMN2nPPAEwqEBsf0rjHEOfo9VPBc,32068
219
+ ultralytics/utils/torch_utils.py,sha256=ddWR82FkxSiFQqr_uzqxQvir-RACvCxsQbqphKSFTok,32084
219
220
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
220
221
  ultralytics/utils/tuner.py,sha256=K09-z5k1E4ZriSKoWdwQrJ2PJ2fY1ez3-b2R6aKPTqM,6198
221
222
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
@@ -229,9 +230,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=IbGQfEltamUKXJt93uSLQFn8c2rYh3DMTg
229
230
  ultralytics/utils/callbacks/raytune.py,sha256=Ck_yFzg7UZXiDWrLHaltjQybzVWSFDfzpdrx9ZYTRfI,700
230
231
  ultralytics/utils/callbacks/tensorboard.py,sha256=SHlE58Fb-sg-uZKtgy-ybIO3SAIfK55aj8kTYGA0Cyg,4167
231
232
  ultralytics/utils/callbacks/wb.py,sha256=sizfTa-xI9k2pnDSP_Q9pHZEFwcl__gSFM0AcneuRpY,7058
232
- ultralytics-8.3.38.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
233
- ultralytics-8.3.38.dist-info/METADATA,sha256=pMzt-gXnvYy-Am3XsD_H_io7DnC1HYF7nZ85sON6fRo,35201
234
- ultralytics-8.3.38.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
235
- ultralytics-8.3.38.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
236
- ultralytics-8.3.38.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
237
- ultralytics-8.3.38.dist-info/RECORD,,
233
+ ultralytics-8.3.40.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
234
+ ultralytics-8.3.40.dist-info/METADATA,sha256=765LKLYZ8BHcGLWpKO5pQFPka_hilm8fl_96W_xvp2c,35332
235
+ ultralytics-8.3.40.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
236
+ ultralytics-8.3.40.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
237
+ ultralytics-8.3.40.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
238
+ ultralytics-8.3.40.dist-info/RECORD,,