ultralytics 8.2.56__py3-none-any.whl → 8.2.58__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

@@ -0,0 +1,90 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import cv2
4
+ import pytest
5
+
6
+ from ultralytics import YOLO, solutions
7
+ from ultralytics.utils.downloads import safe_download
8
+
9
+ MAJOR_SOLUTIONS_DEMO = "https://github.com/ultralytics/assets/releases/download/v0.0.0/solutions_ci_demo.mp4"
10
+ WORKOUTS_SOLUTION_DEMO = "https://github.com/ultralytics/assets/releases/download/v0.0.0/solution_ci_pose_demo.mp4"
11
+
12
+
13
+ @pytest.mark.slow
14
+ def test_major_solutions():
15
+ """Test the object counting, heatmap, speed estimation and queue management solution."""
16
+
17
+ safe_download(url=MAJOR_SOLUTIONS_DEMO)
18
+ model = YOLO("yolov8n.pt")
19
+ names = model.names
20
+ cap = cv2.VideoCapture("solutions_ci_demo.mp4")
21
+ assert cap.isOpened(), "Error reading video file"
22
+ region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
23
+ counter = solutions.ObjectCounter(reg_pts=region_points, names=names, view_img=False)
24
+ heatmap = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA, names=names, view_img=False)
25
+ speed = solutions.SpeedEstimator(reg_pts=region_points, names=names, view_img=False)
26
+ queue = solutions.QueueManager(names=names, reg_pts=region_points, view_img=False)
27
+ while cap.isOpened():
28
+ success, im0 = cap.read()
29
+ if not success:
30
+ break
31
+ original_im0 = im0.copy()
32
+ tracks = model.track(im0, persist=True, show=False)
33
+ _ = counter.start_counting(original_im0.copy(), tracks)
34
+ _ = heatmap.generate_heatmap(original_im0.copy(), tracks)
35
+ _ = speed.estimate_speed(original_im0.copy(), tracks)
36
+ _ = queue.process_queue(original_im0.copy(), tracks)
37
+ cap.release()
38
+ cv2.destroyAllWindows()
39
+
40
+
41
+ @pytest.mark.slow
42
+ def test_aigym():
43
+ """Test the workouts monitoring solution."""
44
+
45
+ safe_download(url=WORKOUTS_SOLUTION_DEMO)
46
+ model = YOLO("yolov8n-pose.pt")
47
+ cap = cv2.VideoCapture("solution_ci_pose_demo.mp4")
48
+ assert cap.isOpened(), "Error reading video file"
49
+ gym_object = solutions.AIGym(line_thickness=2, pose_type="squat", kpts_to_check=[5, 11, 13])
50
+ while cap.isOpened():
51
+ success, im0 = cap.read()
52
+ if not success:
53
+ break
54
+ results = model.track(im0, verbose=False)
55
+ _ = gym_object.start_counting(im0, results)
56
+ cap.release()
57
+ cv2.destroyAllWindows()
58
+
59
+
60
+ @pytest.mark.slow
61
+ def test_instance_segmentation():
62
+ """Test the instance segmentation solution."""
63
+
64
+ from ultralytics.utils.plotting import Annotator, colors
65
+
66
+ model = YOLO("yolov8n-seg.pt")
67
+ names = model.names
68
+ cap = cv2.VideoCapture("solutions_ci_demo.mp4")
69
+ assert cap.isOpened(), "Error reading video file"
70
+ while cap.isOpened():
71
+ success, im0 = cap.read()
72
+ if not success:
73
+ break
74
+ results = model.predict(im0)
75
+ annotator = Annotator(im0, line_width=2)
76
+ if results[0].masks is not None:
77
+ clss = results[0].boxes.cls.cpu().tolist()
78
+ masks = results[0].masks.xy
79
+ for mask, cls in zip(masks, clss):
80
+ color = colors(int(cls), True)
81
+ annotator.seg_bbox(mask=mask, mask_color=color, label=names[int(cls)])
82
+ cap.release()
83
+ cv2.destroyAllWindows()
84
+
85
+
86
+ @pytest.mark.slow
87
+ def test_streamlit_predict():
88
+ """Test streamlit predict live inference solution."""
89
+
90
+ solutions.inference()
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.56"
3
+ __version__ = "8.2.58"
4
4
 
5
5
  import os
6
6
 
@@ -512,14 +512,14 @@ def handle_yolo_settings(args: List[str]) -> None:
512
512
 
513
513
  def handle_explorer():
514
514
  """Open the Ultralytics Explorer GUI for dataset exploration and analysis."""
515
- checks.check_requirements("streamlit")
515
+ checks.check_requirements("streamlit>=1.29.0")
516
516
  LOGGER.info("💡 Loading Explorer dashboard...")
517
517
  subprocess.run(["streamlit", "run", ROOT / "data/explorer/gui/dash.py", "--server.maxMessageSize", "2048"])
518
518
 
519
519
 
520
520
  def handle_streamlit_inference():
521
521
  """Open the Ultralytics Live Inference streamlit app for real time object detection."""
522
- checks.check_requirements(["streamlit", "opencv-python", "torch"])
522
+ checks.check_requirements("streamlit>=1.29.0")
523
523
  LOGGER.info("💡 Loading Ultralytics Live Inference app...")
524
524
  subprocess.run(["streamlit", "run", ROOT / "solutions/streamlit_inference.py", "--server.headless", "true"])
525
525
 
@@ -15,7 +15,7 @@ from torch.utils.data import ConcatDataset
15
15
 
16
16
  from ultralytics.utils import LOCAL_RANK, NUM_THREADS, TQDM, colorstr
17
17
  from ultralytics.utils.ops import resample_segments
18
- from ultralytics.utils.torch_utils import TORCH_1_13
18
+ from ultralytics.utils.torch_utils import TORCHVISION_0_18
19
19
 
20
20
  from .augment import (
21
21
  Compose,
@@ -417,7 +417,7 @@ class ClassificationDataset:
417
417
  import torchvision # scope for faster 'import ultralytics'
418
418
 
419
419
  # Base class assigned as attribute rather than used as base class to allow for scoping slow torchvision import
420
- if TORCH_1_13: # 'allow_empty' argument first introduced in torch 1.13
420
+ if TORCHVISION_0_18: # 'allow_empty' argument first introduced in torchvision 0.18
421
421
  self.base = torchvision.datasets.ImageFolder(root=root, allow_empty=True)
422
422
  else:
423
423
  self.base = torchvision.datasets.ImageFolder(root=root)
@@ -311,11 +311,13 @@ class Model(nn.Module):
311
311
  AssertionError: If the model is not a PyTorch model.
312
312
  """
313
313
  self._check_is_pytorch_model()
314
+ from copy import deepcopy
314
315
  from datetime import datetime
315
316
 
316
317
  from ultralytics import __version__
317
318
 
318
319
  updates = {
320
+ "model": deepcopy(self.model).half() if isinstance(self.model, nn.Module) else self.model,
319
321
  "date": datetime.now().isoformat(),
320
322
  "version": __version__,
321
323
  "license": "AGPL-3.0 License (https://ultralytics.com/license)",
@@ -7,6 +7,7 @@ import cv2
7
7
  import numpy as np
8
8
  import torch
9
9
  from PIL import Image
10
+ from torch import Tensor
10
11
 
11
12
  from ultralytics.utils import TQDM, checks
12
13
 
@@ -249,7 +250,7 @@ class FastSAMPrompt:
249
250
  ax.imshow(show)
250
251
 
251
252
  @torch.no_grad()
252
- def retrieve(self, model, preprocess, elements, search_text: str, device) -> int:
253
+ def retrieve(self, model, preprocess, elements, search_text: str, device) -> Tensor:
253
254
  """Processes images and text with a model, calculates similarity, and returns softmax score."""
254
255
  preprocessed_images = [preprocess(image).to(device) for image in elements]
255
256
  tokenized_text = self.clip.tokenize([search_text]).to(device)
@@ -269,19 +270,16 @@ class FastSAMPrompt:
269
270
  mask_h, mask_w = annotations[0]["segmentation"].shape
270
271
  if ori_w != mask_w or ori_h != mask_h:
271
272
  image = image.resize((mask_w, mask_h))
272
- cropped_boxes = []
273
273
  cropped_images = []
274
- not_crop = []
275
274
  filter_id = []
276
275
  for _, mask in enumerate(annotations):
277
276
  if np.sum(mask["segmentation"]) <= 100:
278
277
  filter_id.append(_)
279
278
  continue
280
279
  bbox = self._get_bbox_from_mask(mask["segmentation"]) # bbox from mask
281
- cropped_boxes.append(self._segment_image(image, bbox)) # save cropped image
282
- cropped_images.append(bbox) # save cropped image bbox
280
+ cropped_images.append(self._segment_image(image, bbox)) # save cropped image
283
281
 
284
- return cropped_boxes, cropped_images, not_crop, filter_id, annotations
282
+ return cropped_images, filter_id, annotations
285
283
 
286
284
  def box_prompt(self, bbox):
287
285
  """Modifies the bounding box properties and calculates IoU between masks and bounding box."""
@@ -341,11 +339,10 @@ class FastSAMPrompt:
341
339
  """Processes a text prompt, applies it to existing results and returns the updated results."""
342
340
  if self.results[0].masks is not None:
343
341
  format_results = self._format_results(self.results[0], 0)
344
- cropped_boxes, cropped_images, not_crop, filter_id, annotations = self._crop_image(format_results)
342
+ cropped_images, filter_id, annotations = self._crop_image(format_results)
345
343
  clip_model, preprocess = self.clip.load("ViT-B/32", device=self.device)
346
- scores = self.retrieve(clip_model, preprocess, cropped_boxes, text, device=self.device)
347
- max_idx = scores.argsort()
348
- max_idx = max_idx[-1]
344
+ scores = self.retrieve(clip_model, preprocess, cropped_images, text, device=self.device)
345
+ max_idx = torch.argmax(scores)
349
346
  max_idx += sum(np.array(filter_id) <= int(max_idx))
350
347
  self.results[0].masks.data = torch.tensor(np.array([annotations[max_idx]["segmentation"]]))
351
348
  return self.results
@@ -202,13 +202,18 @@ class DetectionValidator(BaseValidator):
202
202
  Return correct prediction matrix.
203
203
 
204
204
  Args:
205
- detections (torch.Tensor): Tensor of shape [N, 6] representing detections.
206
- Each detection is of the format: x1, y1, x2, y2, conf, class.
207
- labels (torch.Tensor): Tensor of shape [M, 5] representing labels.
208
- Each label is of the format: class, x1, y1, x2, y2.
205
+ detections (torch.Tensor): Tensor of shape (N, 6) representing detections where each detection is
206
+ (x1, y1, x2, y2, conf, class).
207
+ gt_bboxes (torch.Tensor): Tensor of shape (M, 4) representing ground-truth bounding box coordinates. Each
208
+ bounding box is of the format: (x1, y1, x2, y2).
209
+ gt_cls (torch.Tensor): Tensor of shape (M,) representing target class indices.
209
210
 
210
211
  Returns:
211
- (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels.
212
+ (torch.Tensor): Correct prediction matrix of shape (N, 10) for 10 IoU levels.
213
+
214
+ Note:
215
+ The function does not return any value directly usable for metrics calculation. Instead, it provides an
216
+ intermediate representation used for evaluating predictions against ground truth.
212
217
  """
213
218
  iou = box_iou(gt_bboxes, detections[:, :4])
214
219
  return self.match_predictions(detections[:, 5], gt_cls, iou)
@@ -52,17 +52,29 @@ class OBBValidator(DetectionValidator):
52
52
 
53
53
  def _process_batch(self, detections, gt_bboxes, gt_cls):
54
54
  """
55
- Return correct prediction matrix.
55
+ Perform computation of the correct prediction matrix for a batch of detections and ground truth bounding boxes.
56
56
 
57
57
  Args:
58
- detections (torch.Tensor): Tensor of shape [N, 7] representing detections.
59
- Each detection is of the format: x1, y1, x2, y2, conf, class, angle.
60
- gt_bboxes (torch.Tensor): Tensor of shape [M, 5] representing rotated boxes.
61
- Each box is of the format: x1, y1, x2, y2, angle.
62
- labels (torch.Tensor): Tensor of shape [M] representing labels.
58
+ detections (torch.Tensor): A tensor of shape (N, 7) representing the detected bounding boxes and associated
59
+ data. Each detection is represented as (x1, y1, x2, y2, conf, class, angle).
60
+ gt_bboxes (torch.Tensor): A tensor of shape (M, 5) representing the ground truth bounding boxes. Each box is
61
+ represented as (x1, y1, x2, y2, angle).
62
+ gt_cls (torch.Tensor): A tensor of shape (M,) representing class labels for the ground truth bounding boxes.
63
63
 
64
64
  Returns:
65
- (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels.
65
+ (torch.Tensor): The correct prediction matrix with shape (N, 10), which includes 10 IoU (Intersection over
66
+ Union) levels for each detection, indicating the accuracy of predictions compared to the ground truth.
67
+
68
+ Example:
69
+ ```python
70
+ detections = torch.rand(100, 7) # 100 sample detections
71
+ gt_bboxes = torch.rand(50, 5) # 50 sample ground truth boxes
72
+ gt_cls = torch.randint(0, 5, (50,)) # 50 ground truth class labels
73
+ correct_matrix = OBBValidator._process_batch(detections, gt_bboxes, gt_cls)
74
+ ```
75
+
76
+ Note:
77
+ This method relies on `batch_probiou` to calculate IoU between detections and ground truth bounding boxes.
66
78
  """
67
79
  iou = batch_probiou(gt_bboxes, torch.cat([detections[:, :4], detections[:, -1:]], dim=-1))
68
80
  return self.match_predictions(detections[:, 5], gt_cls, iou)
@@ -152,19 +152,34 @@ class PoseValidator(DetectionValidator):
152
152
 
153
153
  def _process_batch(self, detections, gt_bboxes, gt_cls, pred_kpts=None, gt_kpts=None):
154
154
  """
155
- Return correct prediction matrix.
155
+ Return correct prediction matrix by computing Intersection over Union (IoU) between detections and ground truth.
156
156
 
157
157
  Args:
158
- detections (torch.Tensor): Tensor of shape [N, 6] representing detections.
159
- Each detection is of the format: x1, y1, x2, y2, conf, class.
160
- labels (torch.Tensor): Tensor of shape [M, 5] representing labels.
161
- Each label is of the format: class, x1, y1, x2, y2.
162
- pred_kpts (torch.Tensor, optional): Tensor of shape [N, 51] representing predicted keypoints.
163
- 51 corresponds to 17 keypoints each with 3 values.
164
- gt_kpts (torch.Tensor, optional): Tensor of shape [N, 51] representing ground truth keypoints.
158
+ detections (torch.Tensor): Tensor with shape (N, 6) representing detection boxes and scores, where each
159
+ detection is of the format (x1, y1, x2, y2, conf, class).
160
+ gt_bboxes (torch.Tensor): Tensor with shape (M, 4) representing ground truth bounding boxes, where each
161
+ box is of the format (x1, y1, x2, y2).
162
+ gt_cls (torch.Tensor): Tensor with shape (M,) representing ground truth class indices.
163
+ pred_kpts (torch.Tensor | None): Optional tensor with shape (N, 51) representing predicted keypoints, where
164
+ 51 corresponds to 17 keypoints each having 3 values.
165
+ gt_kpts (torch.Tensor | None): Optional tensor with shape (N, 51) representing ground truth keypoints.
165
166
 
166
167
  Returns:
167
- torch.Tensor: Correct prediction matrix of shape [N, 10] for 10 IoU levels.
168
+ torch.Tensor: A tensor with shape (N, 10) representing the correct prediction matrix for 10 IoU levels,
169
+ where N is the number of detections.
170
+
171
+ Example:
172
+ ```python
173
+ detections = torch.rand(100, 6) # 100 predictions: (x1, y1, x2, y2, conf, class)
174
+ gt_bboxes = torch.rand(50, 4) # 50 ground truth boxes: (x1, y1, x2, y2)
175
+ gt_cls = torch.randint(0, 2, (50,)) # 50 ground truth class indices
176
+ pred_kpts = torch.rand(100, 51) # 100 predicted keypoints
177
+ gt_kpts = torch.rand(50, 51) # 50 ground truth keypoints
178
+ correct_preds = _process_batch(detections, gt_bboxes, gt_cls, pred_kpts, gt_kpts)
179
+ ```
180
+
181
+ Note:
182
+ `0.53` scale factor used in area computation is referenced from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384.
168
183
  """
169
184
  if pred_kpts is not None and gt_kpts is not None:
170
185
  # `0.53` is from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384
@@ -164,14 +164,34 @@ class SegmentationValidator(DetectionValidator):
164
164
 
165
165
  def _process_batch(self, detections, gt_bboxes, gt_cls, pred_masks=None, gt_masks=None, overlap=False, masks=False):
166
166
  """
167
- Return correct prediction matrix.
167
+ Compute correct prediction matrix for a batch based on bounding boxes and optional masks.
168
168
 
169
169
  Args:
170
- detections (array[N, 6]), x1, y1, x2, y2, conf, class
171
- labels (array[M, 5]), class, x1, y1, x2, y2
170
+ detections (torch.Tensor): Tensor of shape (N, 6) representing detected bounding boxes and
171
+ associated confidence scores and class indices. Each row is of the format [x1, y1, x2, y2, conf, class].
172
+ gt_bboxes (torch.Tensor): Tensor of shape (M, 4) representing ground truth bounding box coordinates.
173
+ Each row is of the format [x1, y1, x2, y2].
174
+ gt_cls (torch.Tensor): Tensor of shape (M,) representing ground truth class indices.
175
+ pred_masks (torch.Tensor | None): Tensor representing predicted masks, if available. The shape should
176
+ match the ground truth masks.
177
+ gt_masks (torch.Tensor | None): Tensor of shape (M, H, W) representing ground truth masks, if available.
178
+ overlap (bool): Flag indicating if overlapping masks should be considered.
179
+ masks (bool): Flag indicating if the batch contains mask data.
172
180
 
173
181
  Returns:
174
- correct (array[N, 10]), for 10 IoU levels
182
+ (torch.Tensor): A correct prediction matrix of shape (N, 10), where 10 represents different IoU levels.
183
+
184
+ Note:
185
+ - If `masks` is True, the function computes IoU between predicted and ground truth masks.
186
+ - If `overlap` is True and `masks` is True, overlapping masks are taken into account when computing IoU.
187
+
188
+ Example:
189
+ ```python
190
+ detections = torch.tensor([[25, 30, 200, 300, 0.8, 1], [50, 60, 180, 290, 0.75, 0]])
191
+ gt_bboxes = torch.tensor([[24, 29, 199, 299], [55, 65, 185, 295]])
192
+ gt_cls = torch.tensor([1, 0])
193
+ correct_preds = validator._process_batch(detections, gt_bboxes, gt_cls)
194
+ ```
175
195
  """
176
196
  if masks:
177
197
  if overlap:
@@ -24,7 +24,7 @@ class DistanceCalculation:
24
24
  Initializes the DistanceCalculation class with the given parameters.
25
25
 
26
26
  Args:
27
- names (dict): Dictionary mapping class indices to class names.
27
+ names (dict): Dictionary of classes names.
28
28
  pixels_per_meter (int, optional): Conversion factor from pixels to meters. Defaults to 10.
29
29
  view_img (bool, optional): Flag to indicate if the video stream should be displayed. Defaults to False.
30
30
  line_thickness (int, optional): Thickness of the lines drawn on the image. Defaults to 2.
@@ -18,7 +18,7 @@ class Heatmap:
18
18
 
19
19
  def __init__(
20
20
  self,
21
- classes_names,
21
+ names,
22
22
  imw=0,
23
23
  imh=0,
24
24
  colormap=cv2.COLORMAP_JET,
@@ -44,7 +44,7 @@ class Heatmap:
44
44
  self.shape = shape
45
45
 
46
46
  self.initialized = False
47
- self.names = classes_names # Classes names
47
+ self.names = names # Classes names
48
48
 
49
49
  # Image information
50
50
  self.imw = imw
@@ -17,7 +17,7 @@ class ObjectCounter:
17
17
 
18
18
  def __init__(
19
19
  self,
20
- classes_names,
20
+ names,
21
21
  reg_pts=None,
22
22
  count_reg_color=(255, 0, 255),
23
23
  count_txt_color=(0, 0, 0),
@@ -37,7 +37,7 @@ class ObjectCounter:
37
37
  Initializes the ObjectCounter with various tracking and counting parameters.
38
38
 
39
39
  Args:
40
- classes_names (dict): Dictionary of class names.
40
+ names (dict): Dictionary of class names.
41
41
  reg_pts (list): List of points defining the counting region.
42
42
  count_reg_color (tuple): RGB color of the counting region.
43
43
  count_txt_color (tuple): RGB color of the count text.
@@ -72,7 +72,7 @@ class ObjectCounter:
72
72
  self.view_in_counts = view_in_counts
73
73
  self.view_out_counts = view_out_counts
74
74
 
75
- self.names = classes_names # Classes names
75
+ self.names = names # Classes names
76
76
  self.annotator = None # Annotator
77
77
  self.window_name = "Ultralytics YOLOv8 Object Counter"
78
78
 
@@ -1,11 +1,9 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
3
  import json
4
- from tkinter import filedialog, messagebox
5
4
 
6
5
  import cv2
7
6
  import numpy as np
8
- from PIL import Image, ImageTk
9
7
 
10
8
  from ultralytics.utils.checks import check_imshow, check_requirements
11
9
  from ultralytics.utils.plotting import Annotator
@@ -16,7 +14,7 @@ class ParkingPtsSelection:
16
14
  """Initializes the UI for selecting parking zone points in a tkinter window."""
17
15
  check_requirements("tkinter")
18
16
 
19
- import tkinter as tk
17
+ import tkinter as tk # scope for multi-environment compatibility
20
18
 
21
19
  self.tk = tk
22
20
  self.master = tk.Tk()
@@ -55,6 +53,10 @@ class ParkingPtsSelection:
55
53
 
56
54
  def upload_image(self):
57
55
  """Upload an image and resize it to fit canvas."""
56
+ from tkinter import filedialog
57
+
58
+ from PIL import Image, ImageTk # scope because ImageTk requires tkinter package
59
+
58
60
  self.image_path = filedialog.askopenfilename(filetypes=[("Image Files", "*.png;*.jpg;*.jpeg")])
59
61
  if not self.image_path:
60
62
  return
@@ -115,6 +117,8 @@ class ParkingPtsSelection:
115
117
 
116
118
  def remove_last_bounding_box(self):
117
119
  """Remove the last drawn bounding box from canvas."""
120
+ from tkinter import messagebox # scope for multi-environment compatibility
121
+
118
122
  if self.bounding_boxes:
119
123
  self.bounding_boxes.pop() # Remove the last bounding box
120
124
  self.canvas.delete("all") # Clear the canvas
@@ -130,6 +134,8 @@ class ParkingPtsSelection:
130
134
 
131
135
  def save_to_json(self):
132
136
  """Saves rescaled bounding boxes to 'bounding_boxes.json' based on image-to-canvas size ratio."""
137
+ from tkinter import messagebox # scope for multi-environment compatibility
138
+
133
139
  canvas_width, canvas_height = self.canvas.winfo_width(), self.canvas.winfo_height()
134
140
  width_scaling_factor = self.img_width / canvas_width
135
141
  height_scaling_factor = self.img_height / canvas_height
@@ -141,8 +147,8 @@ class ParkingPtsSelection:
141
147
  rescaled_y = int(y * height_scaling_factor)
142
148
  rescaled_box.append((rescaled_x, rescaled_y))
143
149
  bounding_boxes_data.append({"points": rescaled_box})
144
- with open("bounding_boxes.json", "w") as json_file:
145
- json.dump(bounding_boxes_data, json_file, indent=4)
150
+ with open("bounding_boxes.json", "w") as f:
151
+ json.dump(bounding_boxes_data, f, indent=4)
146
152
 
147
153
  messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
148
154
 
@@ -187,11 +193,10 @@ class ParkingManagement:
187
193
  self.env_check = check_imshow(warn=True)
188
194
 
189
195
  def load_model(self):
190
- """Load the Ultralytics YOLOv8 model for inference and analytics."""
196
+ """Load the Ultralytics YOLO model for inference and analytics."""
191
197
  from ultralytics import YOLO
192
198
 
193
- self.model = YOLO(self.model_path)
194
- return self.model
199
+ return YOLO(self.model_path)
195
200
 
196
201
  @staticmethod
197
202
  def parking_regions_extraction(json_file):
@@ -201,8 +206,8 @@ class ParkingManagement:
201
206
  Args:
202
207
  json_file (str): file that have all parking slot points
203
208
  """
204
- with open(json_file, "r") as json_file:
205
- return json.load(json_file)
209
+ with open(json_file, "r") as f:
210
+ return json.load(f)
206
211
 
207
212
  def process_data(self, json_data, im0, boxes, clss):
208
213
  """
@@ -219,12 +224,9 @@ class ParkingManagement:
219
224
  empty_slots (int): total slots that are available in parking lot
220
225
  """
221
226
  annotator = Annotator(im0)
222
- total_slots, filled_slots = len(json_data), 0
223
- empty_slots = total_slots
224
-
227
+ empty_slots, filled_slots = len(json_data), 0
225
228
  for region in json_data:
226
- points = region["points"]
227
- points_array = np.array(points, dtype=np.int32).reshape((-1, 1, 2))
229
+ points_array = np.array(region["points"], dtype=np.int32).reshape((-1, 1, 2))
228
230
  region_occupied = False
229
231
 
230
232
  for box, cls in zip(boxes, clss):
@@ -17,7 +17,7 @@ class QueueManager:
17
17
 
18
18
  def __init__(
19
19
  self,
20
- classes_names,
20
+ names,
21
21
  reg_pts=None,
22
22
  line_thickness=2,
23
23
  track_thickness=2,
@@ -34,7 +34,7 @@ class QueueManager:
34
34
  Initializes the QueueManager with specified parameters for tracking and counting objects.
35
35
 
36
36
  Args:
37
- classes_names (dict): A dictionary mapping class IDs to class names.
37
+ names (dict): A dictionary mapping class IDs to class names.
38
38
  reg_pts (list of tuples, optional): Points defining the counting region polygon. Defaults to a predefined
39
39
  rectangle.
40
40
  line_thickness (int, optional): Thickness of the annotation lines. Defaults to 2.
@@ -69,7 +69,7 @@ class QueueManager:
69
69
  self.view_queue_counts = view_queue_counts
70
70
  self.fontsize = fontsize
71
71
 
72
- self.names = classes_names # Class names
72
+ self.names = names # Class names
73
73
  self.annotator = None # Annotator
74
74
  self.window_name = "Ultralytics YOLOv8 Queue Manager"
75
75
 
@@ -139,7 +139,7 @@ class QueueManager:
139
139
 
140
140
  def display_frames(self):
141
141
  """Displays the current frame with annotations."""
142
- if self.env_check:
142
+ if self.env_check and self.view_img:
143
143
  self.annotator.draw_region(reg_pts=self.reg_pts, thickness=self.region_thickness, color=self.region_color)
144
144
  cv2.namedWindow(self.window_name)
145
145
  cv2.imshow(self.window_name, self.im0)
@@ -6,13 +6,13 @@ import time
6
6
  import cv2
7
7
  import torch
8
8
 
9
+ from ultralytics.utils.checks import check_requirements
9
10
  from ultralytics.utils.downloads import GITHUB_ASSETS_STEMS
10
11
 
11
12
 
12
13
  def inference():
13
14
  """Runs real-time object detection on video input using Ultralytics YOLOv8 in a Streamlit application."""
14
-
15
- # Scope imports for faster ultralytics package load speeds
15
+ check_requirements("streamlit>=1.29.0") # scope imports for faster ultralytics package load speeds
16
16
  import streamlit as st
17
17
 
18
18
  from ultralytics import YOLO
@@ -726,20 +726,18 @@ class Annotator:
726
726
  )
727
727
  cv2.putText(self.im, stage_text, stage_text_position, 0, self.sf, txt_color, self.tf)
728
728
 
729
- def seg_bbox(self, mask, mask_color=(255, 0, 255), det_label=None, track_label=None):
729
+ def seg_bbox(self, mask, mask_color=(255, 0, 255), label=None, txt_color=(255, 255, 255)):
730
730
  """
731
731
  Function for drawing segmented object in bounding box shape.
732
732
 
733
733
  Args:
734
734
  mask (list): masks data list for instance segmentation area plotting
735
- mask_color (tuple): mask foreground color
736
- det_label (str): Detection label text
737
- track_label (str): Tracking label text
735
+ mask_color (RGB): mask foreground color
736
+ label (str): Detection label text
737
+ txt_color (RGB): text color
738
738
  """
739
739
 
740
740
  cv2.polylines(self.im, [np.int32([mask])], isClosed=True, color=mask_color, thickness=2)
741
-
742
- label = f"Track ID: {track_label}" if track_label else det_label
743
741
  text_size, _ = cv2.getTextSize(label, 0, self.sf, self.tf)
744
742
 
745
743
  cv2.rectangle(
@@ -750,9 +748,10 @@ class Annotator:
750
748
  -1,
751
749
  )
752
750
 
753
- cv2.putText(
754
- self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, (255, 255, 255), self.tf
755
- )
751
+ if label:
752
+ cv2.putText(
753
+ self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
754
+ )
756
755
 
757
756
  def plot_distance_and_line(self, distance_m, distance_mm, centroids, line_color, centroid_color):
758
757
  """
@@ -40,6 +40,7 @@ TORCH_2_0 = check_version(torch.__version__, "2.0.0")
40
40
  TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0")
41
41
  TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0")
42
42
  TORCHVISION_0_13 = check_version(TORCHVISION_VERSION, "0.13.0")
43
+ TORCHVISION_0_18 = check_version(TORCHVISION_VERSION, "0.18.0")
43
44
 
44
45
 
45
46
  @contextmanager
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.56
3
+ Version: 8.2.58
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -7,10 +7,11 @@ tests/test_explorer.py,sha256=NcxSJeB6FxwkN09hQl7nnQL--HjfHB_WcZk0mEmBNHI,2215
7
7
  tests/test_exports.py,sha256=Uezf3OatpPHlo5qoPw-2kqkZxuMCF9L4XF2riD4vmII,8225
8
8
  tests/test_integrations.py,sha256=xglcfMPjfVh346PV8WTpk6tBxraCXEFJEQyyJMr5tyU,6064
9
9
  tests/test_python.py,sha256=qhtSQ7NDfBChsVUxeSwfUIkoKq0S1Z-Rd9_MP023Y5k,21794
10
- ultralytics/__init__.py,sha256=vHGCfANVx6-O0kMZW2f7Cd3G8rscqy57ltGGnQ739zE,694
10
+ tests/test_solutions.py,sha256=EACnPXbeJe2aVTOKfqMk5jclKKCWCVgFEzjpR6y7Sh8,3304
11
+ ultralytics/__init__.py,sha256=Cfjin2MEmuwjjw4wyXtKmTKRpM_6SD6i4baqR34duUs,694
11
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
- ultralytics/cfg/__init__.py,sha256=MqUsV-Mdk80dO64yY7JmplEO0Awb-25Lfx4YC9QYxhc,26210
14
+ ultralytics/cfg/__init__.py,sha256=-3FW9UuCjhvWw0OFWbiXHWMqujOvBX428-NgSMFG0sQ,26198
14
15
  ultralytics/cfg/default.yaml,sha256=xRKVF-Z9E3imXTU9OCK94kj3jGgYoo67VJQwuYlHiUU,8228
15
16
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
16
17
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8l_fIDo8gw,1195
@@ -87,7 +88,7 @@ ultralytics/data/augment.py,sha256=V0iyu_9q_mx-G_61sPA1FWt_6ErJY4SnY_W62uxKOqI,5
87
88
  ultralytics/data/base.py,sha256=C3teLnw97ZTbpJHT9P7yYWosAKocMzgJjRe1rxgfpls,13524
88
89
  ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
89
90
  ultralytics/data/converter.py,sha256=7640xKuf7LPeoTwoCvgbIXM5xbzyq72Hu2Rf2lrgjRY,17554
90
- ultralytics/data/dataset.py,sha256=q8g8cUAabdhqahL0a0cbulOey29UFXY51VDEFJN_x0c,22444
91
+ ultralytics/data/dataset.py,sha256=XrHMe79IQODD51cPGFaeX2MPXZUJzcPI-ywNJe6jMN4,22462
91
92
  ultralytics/data/loaders.py,sha256=XnwJsrejnigaG0wwivKccFUxq002czYa4cgVfGzsFms,24078
92
93
  ultralytics/data/split_dota.py,sha256=fWezt1Bo3jiZ6AyUWdBtTUuvLamPv1t7JD-DirM9gQ8,10142
93
94
  ultralytics/data/utils.py,sha256=GHmqx6e5yRfcUD2Qkwk-tQfhXCwtUMFD3Uf6d699nGo,31046
@@ -98,7 +99,7 @@ ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2
98
99
  ultralytics/data/explorer/gui/dash.py,sha256=CPlFIIhf53j_YVAqealsC3AbcztdPqZxfniQcBnlKK4,10042
99
100
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
100
101
  ultralytics/engine/exporter.py,sha256=yV5DKjz5DZ6BrW8mOC5Nb5eDcuCc93Ft-RQwJ21xVZs,58729
101
- ultralytics/engine/model.py,sha256=8qD5irabp8BF7bBZGwztCu8yAVQQp1kksYSea9EhdEo,39078
102
+ ultralytics/engine/model.py,sha256=OvQsoANg5oyN3k3K-ppa4KrIqPi96hvfGcjqd-TU5l0,39215
102
103
  ultralytics/engine/predictor.py,sha256=W58kDCFH2AfoFzpGbos3k8zUEVsLunBuM8sc2B64rPY,17449
103
104
  ultralytics/engine/results.py,sha256=5MevvBz0E-cpDf55FqweInlKdcQPb7sz0EgZSROJqw4,35817
104
105
  ultralytics/engine/trainer.py,sha256=vFdWN6I-DoAHZYmxjRDeYcc44B9i8tBtK8u6oMgyj9o,35476
@@ -112,7 +113,7 @@ ultralytics/models/__init__.py,sha256=TT9iLCL_n9Y80dcUq0Fo-p-GRZCSU2vrWXM3CoMwqq
112
113
  ultralytics/models/fastsam/__init__.py,sha256=0dt65jZ_5b7Q-mdXN8MSEkgnFRA0FIwlel_LS2RaOlU,254
113
114
  ultralytics/models/fastsam/model.py,sha256=c7GGwaa9AXssJFwrcuytFHpPOlgSrS3n0utyf4JSL2o,1055
114
115
  ultralytics/models/fastsam/predict.py,sha256=0WHUFrqHUNy1cTNpLKsN0FKqLKCvr7fHU6pp91_QVg0,4121
115
- ultralytics/models/fastsam/prompt.py,sha256=_SZumoIYjZA8jML9K2bNY8UX6T5_8MTjw9Hhm_Ozdyo,15967
116
+ ultralytics/models/fastsam/prompt.py,sha256=JJP8Ow-F5iBRWmCPCQk3Z5MiX8aTiX1jGlbejC8LpOI,15801
116
117
  ultralytics/models/fastsam/utils.py,sha256=r-b362Wb7P2ZAlOwWckPJM6HLvg-eFDDz4wkA0ymLd0,2157
117
118
  ultralytics/models/fastsam/val.py,sha256=ILKmw3U8FYmmQsO9wk9-bJ9Pyp_ZthJM36b61L75s3Y,1967
118
119
  ultralytics/models/nas/__init__.py,sha256=d6-WTrYLXvbPs58ebA0-583ODi-VyzXc-t4aGIDQK6M,179
@@ -147,19 +148,19 @@ ultralytics/models/yolo/classify/val.py,sha256=MXdtWrBYVpfFuPfFPOTLKa_wBdTIA4dBZ
147
148
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
148
149
  ultralytics/models/yolo/detect/predict.py,sha256=_a9vH3DmKFY6eeztFTdj3nkfu_MKG6n7zb5rRKGjs9I,1510
149
150
  ultralytics/models/yolo/detect/train.py,sha256=8Ulq1SPNLrkOqXj0Yt5zNR1c_Xl_QnOjllCdqBHUMds,6353
150
- ultralytics/models/yolo/detect/val.py,sha256=NVUHkea2iSY58P7a2Tg1yeNp_ItwKomuLqkgB7mGUk4,14637
151
+ ultralytics/models/yolo/detect/val.py,sha256=BJpA37JA-RBCa5RsUtQlB7N69HVrFAmPGA9jKpboAf8,14981
151
152
  ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
152
153
  ultralytics/models/yolo/obb/predict.py,sha256=prfDzhwuVHKF6CRwnFVBA-YFI5q7U7NEQwITGHmB2Ow,2037
153
154
  ultralytics/models/yolo/obb/train.py,sha256=tWpFtcasMwWq1A_9VdbEg5pIVHwuWwmeLOyj-S4_1sY,1473
154
- ultralytics/models/yolo/obb/val.py,sha256=tHoUDh-Pv95GEnQ73yzCAAxnTMNayv4yZg33hmGuNww,8511
155
+ ultralytics/models/yolo/obb/val.py,sha256=YMFZ79aaW45LdPBrQwRACrxbOI9cH9M_C_ibwi9PeIs,9346
155
156
  ultralytics/models/yolo/pose/__init__.py,sha256=OGvxN3LqJot2h8GX1csJ1KErsHnDKsm33Ce6ZBU9Lr4,199
156
157
  ultralytics/models/yolo/pose/predict.py,sha256=illk4qyZvybc_XMo9TKT54FIkizx91MYviE5c5OwBTQ,2404
157
158
  ultralytics/models/yolo/pose/train.py,sha256=ki8bkT8WfIFjTKf1ofeRDqeIqmk6A8a7AFog7nM-otM,2926
158
- ultralytics/models/yolo/pose/val.py,sha256=beoPPTWckvO7c1kWf2DbFjIN6IHcTV2hcB1rKvk0pwE,10668
159
+ ultralytics/models/yolo/pose/val.py,sha256=VEYKClcZSt_RcAArAHn_nohuh7fW5rxulra675RFgGM,11721
159
160
  ultralytics/models/yolo/segment/__init__.py,sha256=mSbKOE8BnHL7PL2nCOVG7dRM7CI6hJezFPPwZFjEmy8,247
160
161
  ultralytics/models/yolo/segment/predict.py,sha256=xtA0ZZyuh9WVpX7zZFdAeCkWnxhQ30ADEzSud_H6N7E,2491
161
162
  ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBINBkI9K8-3sQ,2298
162
- ultralytics/models/yolo/segment/val.py,sha256=DxEpR0FaQePlOXb19-FO4G0Nl9rWf9smtAh9eH__2g0,11806
163
+ ultralytics/models/yolo/segment/val.py,sha256=wH5H0NMjFzZeRcuzkspOqohhTsqBI00kmLkyGhpJA7o,13327
163
164
  ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2bmETJUhsVTBI,103
164
165
  ultralytics/models/yolo/world/train.py,sha256=acYN2-onL69LrL4av6_hY2r5AY0urC0WViDstn7npfI,3686
165
166
  ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
@@ -175,13 +176,13 @@ ultralytics/nn/modules/utils.py,sha256=779QnnKp9v8jv251ESduTXJ0ol8HkIOLbGQWwEGQj
175
176
  ultralytics/solutions/__init__.py,sha256=O_G9jh34NnFsHKSA8zcJH0CHtg1Q01JEiRWGwX3vGJY,631
176
177
  ultralytics/solutions/ai_gym.py,sha256=KQdx0RP9t9y1MqYMVlYUSn09SVJSUwKvgxPri_DhczM,4721
177
178
  ultralytics/solutions/analytics.py,sha256=UI8HoegfIJGgvQPOt4-e9A0ss2_ofM7zzxcbKlhe66k,11572
178
- ultralytics/solutions/distance_calculation.py,sha256=pSIkyytHGRAaNzIrkkNkiOnSVWU1PYvURlCIV_jRORA,6505
179
- ultralytics/solutions/heatmap.py,sha256=Fl01uzt5B0hBtcK0xG0QtUe1KyyEhorpV2C6Nbb3d_o,10392
180
- ultralytics/solutions/object_counter.py,sha256=IR2kvgjlaHuzfq55gtwBiGFJ7dS5-5OCFOck54ol3PU,10786
181
- ultralytics/solutions/parking_management.py,sha256=Bd7FU3WZ8mRBWq81Z5c8jH5WloF4jPKo8TycqU_AcEI,9786
182
- ultralytics/solutions/queue_management.py,sha256=ECm6gLZplmE9Cm-zdOazHBBDcW-vvr8nx2M28fcPbts,6787
179
+ ultralytics/solutions/distance_calculation.py,sha256=dmHxKfC6CNwgS5otN5AF0LkygdZMGbn9UZ06Zrs-hlk,6485
180
+ ultralytics/solutions/heatmap.py,sha256=lPvC9XEbRodOfZSUdF5BlGVMAT9TVpjIyp3Ed_1ssb0,10376
181
+ ultralytics/solutions/object_counter.py,sha256=C80ET_-tIKv7pfshO8DFwimCieBHV4Ns7WruaY0ScgQ,10762
182
+ ultralytics/solutions/parking_management.py,sha256=E55v0c-AfKbDNfEMng2UJapktDnYJHcRKC6uAImg7kM,9928
183
+ ultralytics/solutions/queue_management.py,sha256=CxFvHwSHq8OZ5aW7x2F10jcjkGAQ3LSJ5z69zusRVbs,6781
183
184
  ultralytics/solutions/speed_estimation.py,sha256=kjqMSHGTHMZaNgTKNKWULxnJQNsvhq4WMUphMVlBjsc,6768
184
- ultralytics/solutions/streamlit_inference.py,sha256=_IB4f9qHQPB39NrHUbNNj8vhx1HF7fiecRi0wfdXzPU,5412
185
+ ultralytics/solutions/streamlit_inference.py,sha256=wmte67QJAtTlHoEqlJxncWIHEiENpNLv9qOMNVGEUXo,5508
185
186
  ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
186
187
  ultralytics/trackers/basetrack.py,sha256=-vBDD-Q9lsxfTMK2w9kuqWGrYbRMmaBCCEbGGyR53gE,3675
187
188
  ultralytics/trackers/bot_sort.py,sha256=39AvhYVbT7izF3--rX_e6Lhgb5czTA23gw6AgnNcRds,8601
@@ -204,9 +205,9 @@ ultralytics/utils/loss.py,sha256=tAAi_l0SAtbtqT8AQSBSCvEyv342-r04H2KcSF1Yk_w,337
204
205
  ultralytics/utils/metrics.py,sha256=C7qFuZjwGqbsG4sggm_qfm8gVuBUwHg_Fhxj08b6NfU,53671
205
206
  ultralytics/utils/ops.py,sha256=Jlb0YBkN_SMVT2AjKPEjxgOtgnj7i7HTBh9FEwpoprU,33509
206
207
  ultralytics/utils/patches.py,sha256=SgMqeMsq2K6JoBJP1NplXMl9C6rK0JeJUChjBrJOneo,2750
207
- ultralytics/utils/plotting.py,sha256=icSUqsmJLpeXyVAIt8vxpbrxTe40mwiF5ay4el3IXl0,55584
208
+ ultralytics/utils/plotting.py,sha256=5HRfiG2dklWZJheTxGTy0gFRk39utHcZbMJl7j2hnMI,55522
208
209
  ultralytics/utils/tal.py,sha256=xuIyryUjaaYHkHPG9GvBwh1xxN2Hq4y3hXOtuERehwY,16017
209
- ultralytics/utils/torch_utils.py,sha256=EqBLg_G4x31InrTEvUvvMyxWaFaZ7UNts0tUUQsQmLY,27828
210
+ ultralytics/utils/torch_utils.py,sha256=8B-NJKGysxUKbstHJfrpnT9Kgp3Imb4jIYWyFYKkrwM,27892
210
211
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
211
212
  ultralytics/utils/tuner.py,sha256=49KAadKZsUeCpwIm5Sn0grb0RPcMNI8vHGLwroDEJNI,6171
212
213
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
@@ -220,9 +221,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
220
221
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
221
222
  ultralytics/utils/callbacks/tensorboard.py,sha256=QEgOVhUqY9akOs5TJIwz1Rvn6l32xWLpOxlwEyWF0B8,4136
222
223
  ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
223
- ultralytics-8.2.56.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
224
- ultralytics-8.2.56.dist-info/METADATA,sha256=Zx1owi5MQKMjgragH3MIvG0YK1-81WKa1vh6DuMRGo8,41217
225
- ultralytics-8.2.56.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
226
- ultralytics-8.2.56.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
227
- ultralytics-8.2.56.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
228
- ultralytics-8.2.56.dist-info/RECORD,,
224
+ ultralytics-8.2.58.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
225
+ ultralytics-8.2.58.dist-info/METADATA,sha256=-4-9mqwsiCumLVi2LWv_F6QwJ9lZFJUdvyXbFyTSd08,41217
226
+ ultralytics-8.2.58.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
227
+ ultralytics-8.2.58.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
228
+ ultralytics-8.2.58.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
229
+ ultralytics-8.2.58.dist-info/RECORD,,