ultralytics 8.2.57__py3-none-any.whl → 8.2.58__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.57"
3
+ __version__ = "8.2.58"
4
4
 
5
5
  import os
6
6
 
@@ -512,14 +512,14 @@ def handle_yolo_settings(args: List[str]) -> None:
512
512
 
513
513
  def handle_explorer():
514
514
  """Open the Ultralytics Explorer GUI for dataset exploration and analysis."""
515
- checks.check_requirements("streamlit")
515
+ checks.check_requirements("streamlit>=1.29.0")
516
516
  LOGGER.info("💡 Loading Explorer dashboard...")
517
517
  subprocess.run(["streamlit", "run", ROOT / "data/explorer/gui/dash.py", "--server.maxMessageSize", "2048"])
518
518
 
519
519
 
520
520
  def handle_streamlit_inference():
521
521
  """Open the Ultralytics Live Inference streamlit app for real time object detection."""
522
- checks.check_requirements(["streamlit", "opencv-python", "torch"])
522
+ checks.check_requirements("streamlit>=1.29.0")
523
523
  LOGGER.info("💡 Loading Ultralytics Live Inference app...")
524
524
  subprocess.run(["streamlit", "run", ROOT / "solutions/streamlit_inference.py", "--server.headless", "true"])
525
525
 
@@ -7,6 +7,7 @@ import cv2
7
7
  import numpy as np
8
8
  import torch
9
9
  from PIL import Image
10
+ from torch import Tensor
10
11
 
11
12
  from ultralytics.utils import TQDM, checks
12
13
 
@@ -249,7 +250,7 @@ class FastSAMPrompt:
249
250
  ax.imshow(show)
250
251
 
251
252
  @torch.no_grad()
252
- def retrieve(self, model, preprocess, elements, search_text: str, device) -> int:
253
+ def retrieve(self, model, preprocess, elements, search_text: str, device) -> Tensor:
253
254
  """Processes images and text with a model, calculates similarity, and returns softmax score."""
254
255
  preprocessed_images = [preprocess(image).to(device) for image in elements]
255
256
  tokenized_text = self.clip.tokenize([search_text]).to(device)
@@ -269,19 +270,16 @@ class FastSAMPrompt:
269
270
  mask_h, mask_w = annotations[0]["segmentation"].shape
270
271
  if ori_w != mask_w or ori_h != mask_h:
271
272
  image = image.resize((mask_w, mask_h))
272
- cropped_boxes = []
273
273
  cropped_images = []
274
- not_crop = []
275
274
  filter_id = []
276
275
  for _, mask in enumerate(annotations):
277
276
  if np.sum(mask["segmentation"]) <= 100:
278
277
  filter_id.append(_)
279
278
  continue
280
279
  bbox = self._get_bbox_from_mask(mask["segmentation"]) # bbox from mask
281
- cropped_boxes.append(self._segment_image(image, bbox)) # save cropped image
282
- cropped_images.append(bbox) # save cropped image bbox
280
+ cropped_images.append(self._segment_image(image, bbox)) # save cropped image
283
281
 
284
- return cropped_boxes, cropped_images, not_crop, filter_id, annotations
282
+ return cropped_images, filter_id, annotations
285
283
 
286
284
  def box_prompt(self, bbox):
287
285
  """Modifies the bounding box properties and calculates IoU between masks and bounding box."""
@@ -341,11 +339,10 @@ class FastSAMPrompt:
341
339
  """Processes a text prompt, applies it to existing results and returns the updated results."""
342
340
  if self.results[0].masks is not None:
343
341
  format_results = self._format_results(self.results[0], 0)
344
- cropped_boxes, cropped_images, not_crop, filter_id, annotations = self._crop_image(format_results)
342
+ cropped_images, filter_id, annotations = self._crop_image(format_results)
345
343
  clip_model, preprocess = self.clip.load("ViT-B/32", device=self.device)
346
- scores = self.retrieve(clip_model, preprocess, cropped_boxes, text, device=self.device)
347
- max_idx = scores.argsort()
348
- max_idx = max_idx[-1]
344
+ scores = self.retrieve(clip_model, preprocess, cropped_images, text, device=self.device)
345
+ max_idx = torch.argmax(scores)
349
346
  max_idx += sum(np.array(filter_id) <= int(max_idx))
350
347
  self.results[0].masks.data = torch.tensor(np.array([annotations[max_idx]["segmentation"]]))
351
348
  return self.results
@@ -202,13 +202,18 @@ class DetectionValidator(BaseValidator):
202
202
  Return correct prediction matrix.
203
203
 
204
204
  Args:
205
- detections (torch.Tensor): Tensor of shape [N, 6] representing detections.
206
- Each detection is of the format: x1, y1, x2, y2, conf, class.
207
- labels (torch.Tensor): Tensor of shape [M, 5] representing labels.
208
- Each label is of the format: class, x1, y1, x2, y2.
205
+ detections (torch.Tensor): Tensor of shape (N, 6) representing detections where each detection is
206
+ (x1, y1, x2, y2, conf, class).
207
+ gt_bboxes (torch.Tensor): Tensor of shape (M, 4) representing ground-truth bounding box coordinates. Each
208
+ bounding box is of the format: (x1, y1, x2, y2).
209
+ gt_cls (torch.Tensor): Tensor of shape (M,) representing target class indices.
209
210
 
210
211
  Returns:
211
- (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels.
212
+ (torch.Tensor): Correct prediction matrix of shape (N, 10) for 10 IoU levels.
213
+
214
+ Note:
215
+ The function does not return any value directly usable for metrics calculation. Instead, it provides an
216
+ intermediate representation used for evaluating predictions against ground truth.
212
217
  """
213
218
  iou = box_iou(gt_bboxes, detections[:, :4])
214
219
  return self.match_predictions(detections[:, 5], gt_cls, iou)
@@ -52,17 +52,29 @@ class OBBValidator(DetectionValidator):
52
52
 
53
53
  def _process_batch(self, detections, gt_bboxes, gt_cls):
54
54
  """
55
- Return correct prediction matrix.
55
+ Perform computation of the correct prediction matrix for a batch of detections and ground truth bounding boxes.
56
56
 
57
57
  Args:
58
- detections (torch.Tensor): Tensor of shape [N, 7] representing detections.
59
- Each detection is of the format: x1, y1, x2, y2, conf, class, angle.
60
- gt_bboxes (torch.Tensor): Tensor of shape [M, 5] representing rotated boxes.
61
- Each box is of the format: x1, y1, x2, y2, angle.
62
- labels (torch.Tensor): Tensor of shape [M] representing labels.
58
+ detections (torch.Tensor): A tensor of shape (N, 7) representing the detected bounding boxes and associated
59
+ data. Each detection is represented as (x1, y1, x2, y2, conf, class, angle).
60
+ gt_bboxes (torch.Tensor): A tensor of shape (M, 5) representing the ground truth bounding boxes. Each box is
61
+ represented as (x1, y1, x2, y2, angle).
62
+ gt_cls (torch.Tensor): A tensor of shape (M,) representing class labels for the ground truth bounding boxes.
63
63
 
64
64
  Returns:
65
- (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels.
65
+ (torch.Tensor): The correct prediction matrix with shape (N, 10), which includes 10 IoU (Intersection over
66
+ Union) levels for each detection, indicating the accuracy of predictions compared to the ground truth.
67
+
68
+ Example:
69
+ ```python
70
+ detections = torch.rand(100, 7) # 100 sample detections
71
+ gt_bboxes = torch.rand(50, 5) # 50 sample ground truth boxes
72
+ gt_cls = torch.randint(0, 5, (50,)) # 50 ground truth class labels
73
+ correct_matrix = OBBValidator._process_batch(detections, gt_bboxes, gt_cls)
74
+ ```
75
+
76
+ Note:
77
+ This method relies on `batch_probiou` to calculate IoU between detections and ground truth bounding boxes.
66
78
  """
67
79
  iou = batch_probiou(gt_bboxes, torch.cat([detections[:, :4], detections[:, -1:]], dim=-1))
68
80
  return self.match_predictions(detections[:, 5], gt_cls, iou)
@@ -152,19 +152,34 @@ class PoseValidator(DetectionValidator):
152
152
 
153
153
  def _process_batch(self, detections, gt_bboxes, gt_cls, pred_kpts=None, gt_kpts=None):
154
154
  """
155
- Return correct prediction matrix.
155
+ Return correct prediction matrix by computing Intersection over Union (IoU) between detections and ground truth.
156
156
 
157
157
  Args:
158
- detections (torch.Tensor): Tensor of shape [N, 6] representing detections.
159
- Each detection is of the format: x1, y1, x2, y2, conf, class.
160
- labels (torch.Tensor): Tensor of shape [M, 5] representing labels.
161
- Each label is of the format: class, x1, y1, x2, y2.
162
- pred_kpts (torch.Tensor, optional): Tensor of shape [N, 51] representing predicted keypoints.
163
- 51 corresponds to 17 keypoints each with 3 values.
164
- gt_kpts (torch.Tensor, optional): Tensor of shape [N, 51] representing ground truth keypoints.
158
+ detections (torch.Tensor): Tensor with shape (N, 6) representing detection boxes and scores, where each
159
+ detection is of the format (x1, y1, x2, y2, conf, class).
160
+ gt_bboxes (torch.Tensor): Tensor with shape (M, 4) representing ground truth bounding boxes, where each
161
+ box is of the format (x1, y1, x2, y2).
162
+ gt_cls (torch.Tensor): Tensor with shape (M,) representing ground truth class indices.
163
+ pred_kpts (torch.Tensor | None): Optional tensor with shape (N, 51) representing predicted keypoints, where
164
+ 51 corresponds to 17 keypoints each having 3 values.
165
+ gt_kpts (torch.Tensor | None): Optional tensor with shape (N, 51) representing ground truth keypoints.
165
166
 
166
167
  Returns:
167
- torch.Tensor: Correct prediction matrix of shape [N, 10] for 10 IoU levels.
168
+ torch.Tensor: A tensor with shape (N, 10) representing the correct prediction matrix for 10 IoU levels,
169
+ where N is the number of detections.
170
+
171
+ Example:
172
+ ```python
173
+ detections = torch.rand(100, 6) # 100 predictions: (x1, y1, x2, y2, conf, class)
174
+ gt_bboxes = torch.rand(50, 4) # 50 ground truth boxes: (x1, y1, x2, y2)
175
+ gt_cls = torch.randint(0, 2, (50,)) # 50 ground truth class indices
176
+ pred_kpts = torch.rand(100, 51) # 100 predicted keypoints
177
+ gt_kpts = torch.rand(50, 51) # 50 ground truth keypoints
178
+ correct_preds = _process_batch(detections, gt_bboxes, gt_cls, pred_kpts, gt_kpts)
179
+ ```
180
+
181
+ Note:
182
+ `0.53` scale factor used in area computation is referenced from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384.
168
183
  """
169
184
  if pred_kpts is not None and gt_kpts is not None:
170
185
  # `0.53` is from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384
@@ -164,14 +164,34 @@ class SegmentationValidator(DetectionValidator):
164
164
 
165
165
  def _process_batch(self, detections, gt_bboxes, gt_cls, pred_masks=None, gt_masks=None, overlap=False, masks=False):
166
166
  """
167
- Return correct prediction matrix.
167
+ Compute correct prediction matrix for a batch based on bounding boxes and optional masks.
168
168
 
169
169
  Args:
170
- detections (array[N, 6]), x1, y1, x2, y2, conf, class
171
- labels (array[M, 5]), class, x1, y1, x2, y2
170
+ detections (torch.Tensor): Tensor of shape (N, 6) representing detected bounding boxes and
171
+ associated confidence scores and class indices. Each row is of the format [x1, y1, x2, y2, conf, class].
172
+ gt_bboxes (torch.Tensor): Tensor of shape (M, 4) representing ground truth bounding box coordinates.
173
+ Each row is of the format [x1, y1, x2, y2].
174
+ gt_cls (torch.Tensor): Tensor of shape (M,) representing ground truth class indices.
175
+ pred_masks (torch.Tensor | None): Tensor representing predicted masks, if available. The shape should
176
+ match the ground truth masks.
177
+ gt_masks (torch.Tensor | None): Tensor of shape (M, H, W) representing ground truth masks, if available.
178
+ overlap (bool): Flag indicating if overlapping masks should be considered.
179
+ masks (bool): Flag indicating if the batch contains mask data.
172
180
 
173
181
  Returns:
174
- correct (array[N, 10]), for 10 IoU levels
182
+ (torch.Tensor): A correct prediction matrix of shape (N, 10), where 10 represents different IoU levels.
183
+
184
+ Note:
185
+ - If `masks` is True, the function computes IoU between predicted and ground truth masks.
186
+ - If `overlap` is True and `masks` is True, overlapping masks are taken into account when computing IoU.
187
+
188
+ Example:
189
+ ```python
190
+ detections = torch.tensor([[25, 30, 200, 300, 0.8, 1], [50, 60, 180, 290, 0.75, 0]])
191
+ gt_bboxes = torch.tensor([[24, 29, 199, 299], [55, 65, 185, 295]])
192
+ gt_cls = torch.tensor([1, 0])
193
+ correct_preds = validator._process_batch(detections, gt_bboxes, gt_cls)
194
+ ```
175
195
  """
176
196
  if masks:
177
197
  if overlap:
@@ -1,11 +1,9 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
3
  import json
4
- from tkinter import filedialog, messagebox
5
4
 
6
5
  import cv2
7
6
  import numpy as np
8
- from PIL import Image, ImageTk
9
7
 
10
8
  from ultralytics.utils.checks import check_imshow, check_requirements
11
9
  from ultralytics.utils.plotting import Annotator
@@ -16,7 +14,7 @@ class ParkingPtsSelection:
16
14
  """Initializes the UI for selecting parking zone points in a tkinter window."""
17
15
  check_requirements("tkinter")
18
16
 
19
- import tkinter as tk
17
+ import tkinter as tk # scope for multi-environment compatibility
20
18
 
21
19
  self.tk = tk
22
20
  self.master = tk.Tk()
@@ -55,6 +53,10 @@ class ParkingPtsSelection:
55
53
 
56
54
  def upload_image(self):
57
55
  """Upload an image and resize it to fit canvas."""
56
+ from tkinter import filedialog
57
+
58
+ from PIL import Image, ImageTk # scope because ImageTk requires tkinter package
59
+
58
60
  self.image_path = filedialog.askopenfilename(filetypes=[("Image Files", "*.png;*.jpg;*.jpeg")])
59
61
  if not self.image_path:
60
62
  return
@@ -115,6 +117,8 @@ class ParkingPtsSelection:
115
117
 
116
118
  def remove_last_bounding_box(self):
117
119
  """Remove the last drawn bounding box from canvas."""
120
+ from tkinter import messagebox # scope for multi-environment compatibility
121
+
118
122
  if self.bounding_boxes:
119
123
  self.bounding_boxes.pop() # Remove the last bounding box
120
124
  self.canvas.delete("all") # Clear the canvas
@@ -130,6 +134,8 @@ class ParkingPtsSelection:
130
134
 
131
135
  def save_to_json(self):
132
136
  """Saves rescaled bounding boxes to 'bounding_boxes.json' based on image-to-canvas size ratio."""
137
+ from tkinter import messagebox # scope for multi-environment compatibility
138
+
133
139
  canvas_width, canvas_height = self.canvas.winfo_width(), self.canvas.winfo_height()
134
140
  width_scaling_factor = self.img_width / canvas_width
135
141
  height_scaling_factor = self.img_height / canvas_height
@@ -141,8 +147,8 @@ class ParkingPtsSelection:
141
147
  rescaled_y = int(y * height_scaling_factor)
142
148
  rescaled_box.append((rescaled_x, rescaled_y))
143
149
  bounding_boxes_data.append({"points": rescaled_box})
144
- with open("bounding_boxes.json", "w") as json_file:
145
- json.dump(bounding_boxes_data, json_file, indent=4)
150
+ with open("bounding_boxes.json", "w") as f:
151
+ json.dump(bounding_boxes_data, f, indent=4)
146
152
 
147
153
  messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
148
154
 
@@ -187,11 +193,10 @@ class ParkingManagement:
187
193
  self.env_check = check_imshow(warn=True)
188
194
 
189
195
  def load_model(self):
190
- """Load the Ultralytics YOLOv8 model for inference and analytics."""
196
+ """Load the Ultralytics YOLO model for inference and analytics."""
191
197
  from ultralytics import YOLO
192
198
 
193
- self.model = YOLO(self.model_path)
194
- return self.model
199
+ return YOLO(self.model_path)
195
200
 
196
201
  @staticmethod
197
202
  def parking_regions_extraction(json_file):
@@ -201,8 +206,8 @@ class ParkingManagement:
201
206
  Args:
202
207
  json_file (str): file that have all parking slot points
203
208
  """
204
- with open(json_file, "r") as json_file:
205
- return json.load(json_file)
209
+ with open(json_file, "r") as f:
210
+ return json.load(f)
206
211
 
207
212
  def process_data(self, json_data, im0, boxes, clss):
208
213
  """
@@ -219,12 +224,9 @@ class ParkingManagement:
219
224
  empty_slots (int): total slots that are available in parking lot
220
225
  """
221
226
  annotator = Annotator(im0)
222
- total_slots, filled_slots = len(json_data), 0
223
- empty_slots = total_slots
224
-
227
+ empty_slots, filled_slots = len(json_data), 0
225
228
  for region in json_data:
226
- points = region["points"]
227
- points_array = np.array(points, dtype=np.int32).reshape((-1, 1, 2))
229
+ points_array = np.array(region["points"], dtype=np.int32).reshape((-1, 1, 2))
228
230
  region_occupied = False
229
231
 
230
232
  for box, cls in zip(boxes, clss):
@@ -6,13 +6,13 @@ import time
6
6
  import cv2
7
7
  import torch
8
8
 
9
+ from ultralytics.utils.checks import check_requirements
9
10
  from ultralytics.utils.downloads import GITHUB_ASSETS_STEMS
10
11
 
11
12
 
12
13
  def inference():
13
14
  """Runs real-time object detection on video input using Ultralytics YOLOv8 in a Streamlit application."""
14
-
15
- # Scope imports for faster ultralytics package load speeds
15
+ check_requirements("streamlit>=1.29.0") # scope imports for faster ultralytics package load speeds
16
16
  import streamlit as st
17
17
 
18
18
  from ultralytics import YOLO
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.57
3
+ Version: 8.2.58
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -8,10 +8,10 @@ tests/test_exports.py,sha256=Uezf3OatpPHlo5qoPw-2kqkZxuMCF9L4XF2riD4vmII,8225
8
8
  tests/test_integrations.py,sha256=xglcfMPjfVh346PV8WTpk6tBxraCXEFJEQyyJMr5tyU,6064
9
9
  tests/test_python.py,sha256=qhtSQ7NDfBChsVUxeSwfUIkoKq0S1Z-Rd9_MP023Y5k,21794
10
10
  tests/test_solutions.py,sha256=EACnPXbeJe2aVTOKfqMk5jclKKCWCVgFEzjpR6y7Sh8,3304
11
- ultralytics/__init__.py,sha256=xxc9nMqmuQxnsmyw2hU4F0LM5UJkCIXsBBqzyv-czNE,694
11
+ ultralytics/__init__.py,sha256=Cfjin2MEmuwjjw4wyXtKmTKRpM_6SD6i4baqR34duUs,694
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
- ultralytics/cfg/__init__.py,sha256=MqUsV-Mdk80dO64yY7JmplEO0Awb-25Lfx4YC9QYxhc,26210
14
+ ultralytics/cfg/__init__.py,sha256=-3FW9UuCjhvWw0OFWbiXHWMqujOvBX428-NgSMFG0sQ,26198
15
15
  ultralytics/cfg/default.yaml,sha256=xRKVF-Z9E3imXTU9OCK94kj3jGgYoo67VJQwuYlHiUU,8228
16
16
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
17
17
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8l_fIDo8gw,1195
@@ -113,7 +113,7 @@ ultralytics/models/__init__.py,sha256=TT9iLCL_n9Y80dcUq0Fo-p-GRZCSU2vrWXM3CoMwqq
113
113
  ultralytics/models/fastsam/__init__.py,sha256=0dt65jZ_5b7Q-mdXN8MSEkgnFRA0FIwlel_LS2RaOlU,254
114
114
  ultralytics/models/fastsam/model.py,sha256=c7GGwaa9AXssJFwrcuytFHpPOlgSrS3n0utyf4JSL2o,1055
115
115
  ultralytics/models/fastsam/predict.py,sha256=0WHUFrqHUNy1cTNpLKsN0FKqLKCvr7fHU6pp91_QVg0,4121
116
- ultralytics/models/fastsam/prompt.py,sha256=_SZumoIYjZA8jML9K2bNY8UX6T5_8MTjw9Hhm_Ozdyo,15967
116
+ ultralytics/models/fastsam/prompt.py,sha256=JJP8Ow-F5iBRWmCPCQk3Z5MiX8aTiX1jGlbejC8LpOI,15801
117
117
  ultralytics/models/fastsam/utils.py,sha256=r-b362Wb7P2ZAlOwWckPJM6HLvg-eFDDz4wkA0ymLd0,2157
118
118
  ultralytics/models/fastsam/val.py,sha256=ILKmw3U8FYmmQsO9wk9-bJ9Pyp_ZthJM36b61L75s3Y,1967
119
119
  ultralytics/models/nas/__init__.py,sha256=d6-WTrYLXvbPs58ebA0-583ODi-VyzXc-t4aGIDQK6M,179
@@ -148,19 +148,19 @@ ultralytics/models/yolo/classify/val.py,sha256=MXdtWrBYVpfFuPfFPOTLKa_wBdTIA4dBZ
148
148
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
149
149
  ultralytics/models/yolo/detect/predict.py,sha256=_a9vH3DmKFY6eeztFTdj3nkfu_MKG6n7zb5rRKGjs9I,1510
150
150
  ultralytics/models/yolo/detect/train.py,sha256=8Ulq1SPNLrkOqXj0Yt5zNR1c_Xl_QnOjllCdqBHUMds,6353
151
- ultralytics/models/yolo/detect/val.py,sha256=NVUHkea2iSY58P7a2Tg1yeNp_ItwKomuLqkgB7mGUk4,14637
151
+ ultralytics/models/yolo/detect/val.py,sha256=BJpA37JA-RBCa5RsUtQlB7N69HVrFAmPGA9jKpboAf8,14981
152
152
  ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
153
153
  ultralytics/models/yolo/obb/predict.py,sha256=prfDzhwuVHKF6CRwnFVBA-YFI5q7U7NEQwITGHmB2Ow,2037
154
154
  ultralytics/models/yolo/obb/train.py,sha256=tWpFtcasMwWq1A_9VdbEg5pIVHwuWwmeLOyj-S4_1sY,1473
155
- ultralytics/models/yolo/obb/val.py,sha256=tHoUDh-Pv95GEnQ73yzCAAxnTMNayv4yZg33hmGuNww,8511
155
+ ultralytics/models/yolo/obb/val.py,sha256=YMFZ79aaW45LdPBrQwRACrxbOI9cH9M_C_ibwi9PeIs,9346
156
156
  ultralytics/models/yolo/pose/__init__.py,sha256=OGvxN3LqJot2h8GX1csJ1KErsHnDKsm33Ce6ZBU9Lr4,199
157
157
  ultralytics/models/yolo/pose/predict.py,sha256=illk4qyZvybc_XMo9TKT54FIkizx91MYviE5c5OwBTQ,2404
158
158
  ultralytics/models/yolo/pose/train.py,sha256=ki8bkT8WfIFjTKf1ofeRDqeIqmk6A8a7AFog7nM-otM,2926
159
- ultralytics/models/yolo/pose/val.py,sha256=beoPPTWckvO7c1kWf2DbFjIN6IHcTV2hcB1rKvk0pwE,10668
159
+ ultralytics/models/yolo/pose/val.py,sha256=VEYKClcZSt_RcAArAHn_nohuh7fW5rxulra675RFgGM,11721
160
160
  ultralytics/models/yolo/segment/__init__.py,sha256=mSbKOE8BnHL7PL2nCOVG7dRM7CI6hJezFPPwZFjEmy8,247
161
161
  ultralytics/models/yolo/segment/predict.py,sha256=xtA0ZZyuh9WVpX7zZFdAeCkWnxhQ30ADEzSud_H6N7E,2491
162
162
  ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBINBkI9K8-3sQ,2298
163
- ultralytics/models/yolo/segment/val.py,sha256=DxEpR0FaQePlOXb19-FO4G0Nl9rWf9smtAh9eH__2g0,11806
163
+ ultralytics/models/yolo/segment/val.py,sha256=wH5H0NMjFzZeRcuzkspOqohhTsqBI00kmLkyGhpJA7o,13327
164
164
  ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2bmETJUhsVTBI,103
165
165
  ultralytics/models/yolo/world/train.py,sha256=acYN2-onL69LrL4av6_hY2r5AY0urC0WViDstn7npfI,3686
166
166
  ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
@@ -179,10 +179,10 @@ ultralytics/solutions/analytics.py,sha256=UI8HoegfIJGgvQPOt4-e9A0ss2_ofM7zzxcbKl
179
179
  ultralytics/solutions/distance_calculation.py,sha256=dmHxKfC6CNwgS5otN5AF0LkygdZMGbn9UZ06Zrs-hlk,6485
180
180
  ultralytics/solutions/heatmap.py,sha256=lPvC9XEbRodOfZSUdF5BlGVMAT9TVpjIyp3Ed_1ssb0,10376
181
181
  ultralytics/solutions/object_counter.py,sha256=C80ET_-tIKv7pfshO8DFwimCieBHV4Ns7WruaY0ScgQ,10762
182
- ultralytics/solutions/parking_management.py,sha256=Bd7FU3WZ8mRBWq81Z5c8jH5WloF4jPKo8TycqU_AcEI,9786
182
+ ultralytics/solutions/parking_management.py,sha256=E55v0c-AfKbDNfEMng2UJapktDnYJHcRKC6uAImg7kM,9928
183
183
  ultralytics/solutions/queue_management.py,sha256=CxFvHwSHq8OZ5aW7x2F10jcjkGAQ3LSJ5z69zusRVbs,6781
184
184
  ultralytics/solutions/speed_estimation.py,sha256=kjqMSHGTHMZaNgTKNKWULxnJQNsvhq4WMUphMVlBjsc,6768
185
- ultralytics/solutions/streamlit_inference.py,sha256=_IB4f9qHQPB39NrHUbNNj8vhx1HF7fiecRi0wfdXzPU,5412
185
+ ultralytics/solutions/streamlit_inference.py,sha256=wmte67QJAtTlHoEqlJxncWIHEiENpNLv9qOMNVGEUXo,5508
186
186
  ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
187
187
  ultralytics/trackers/basetrack.py,sha256=-vBDD-Q9lsxfTMK2w9kuqWGrYbRMmaBCCEbGGyR53gE,3675
188
188
  ultralytics/trackers/bot_sort.py,sha256=39AvhYVbT7izF3--rX_e6Lhgb5czTA23gw6AgnNcRds,8601
@@ -221,9 +221,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
221
221
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
222
222
  ultralytics/utils/callbacks/tensorboard.py,sha256=QEgOVhUqY9akOs5TJIwz1Rvn6l32xWLpOxlwEyWF0B8,4136
223
223
  ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
224
- ultralytics-8.2.57.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
225
- ultralytics-8.2.57.dist-info/METADATA,sha256=fIBO5P9-jTY-g2D8Ko5BoagltvtoYeUgekd0SapcYW8,41217
226
- ultralytics-8.2.57.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
227
- ultralytics-8.2.57.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
228
- ultralytics-8.2.57.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
229
- ultralytics-8.2.57.dist-info/RECORD,,
224
+ ultralytics-8.2.58.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
225
+ ultralytics-8.2.58.dist-info/METADATA,sha256=-4-9mqwsiCumLVi2LWv_F6QwJ9lZFJUdvyXbFyTSd08,41217
226
+ ultralytics-8.2.58.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
227
+ ultralytics-8.2.58.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
228
+ ultralytics-8.2.58.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
229
+ ultralytics-8.2.58.dist-info/RECORD,,