ultralytics 8.2.42__py3-none-any.whl → 8.2.44__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- tests/test_python.py +1 -0
- ultralytics/__init__.py +1 -1
- ultralytics/data/loaders.py +5 -4
- ultralytics/nn/tasks.py +8 -2
- ultralytics/utils/ops.py +16 -10
- ultralytics/utils/plotting.py +69 -39
- {ultralytics-8.2.42.dist-info → ultralytics-8.2.44.dist-info}/METADATA +1 -1
- {ultralytics-8.2.42.dist-info → ultralytics-8.2.44.dist-info}/RECORD +12 -12
- {ultralytics-8.2.42.dist-info → ultralytics-8.2.44.dist-info}/WHEEL +1 -1
- {ultralytics-8.2.42.dist-info → ultralytics-8.2.44.dist-info}/LICENSE +0 -0
- {ultralytics-8.2.42.dist-info → ultralytics-8.2.44.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.2.42.dist-info → ultralytics-8.2.44.dist-info}/top_level.txt +0 -0
tests/test_python.py
CHANGED
|
@@ -585,4 +585,5 @@ def test_yolov10():
|
|
|
585
585
|
# train/val/predict
|
|
586
586
|
model.train(data="coco8.yaml", epochs=1, imgsz=32, close_mosaic=1, cache="disk")
|
|
587
587
|
model.val(data="coco8.yaml", imgsz=32)
|
|
588
|
+
model.predict(imgsz=32, save_txt=True, save_crop=True, augment=True)
|
|
588
589
|
model(SOURCE)
|
ultralytics/__init__.py
CHANGED
ultralytics/data/loaders.py
CHANGED
|
@@ -362,10 +362,11 @@ class LoadImagesAndVideos:
|
|
|
362
362
|
self.mode = "image"
|
|
363
363
|
im0 = cv2.imread(path) # BGR
|
|
364
364
|
if im0 is None:
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
365
|
+
LOGGER.warning(f"WARNING ⚠️ Image Read Error {path}")
|
|
366
|
+
else:
|
|
367
|
+
paths.append(path)
|
|
368
|
+
imgs.append(im0)
|
|
369
|
+
info.append(f"image {self.count + 1}/{self.nf} {path}: ")
|
|
369
370
|
self.count += 1 # move to the next file
|
|
370
371
|
if self.count >= self.ni: # end of image list
|
|
371
372
|
break
|
ultralytics/nn/tasks.py
CHANGED
|
@@ -151,8 +151,8 @@ class BaseModel(nn.Module):
|
|
|
151
151
|
def _predict_augment(self, x):
|
|
152
152
|
"""Perform augmentations on input image x and return augmented inference."""
|
|
153
153
|
LOGGER.warning(
|
|
154
|
-
f"WARNING ⚠️ {self.__class__.__name__} does not support
|
|
155
|
-
f"Reverting to single-scale
|
|
154
|
+
f"WARNING ⚠️ {self.__class__.__name__} does not support 'augment=True' prediction. "
|
|
155
|
+
f"Reverting to single-scale prediction."
|
|
156
156
|
)
|
|
157
157
|
return self._predict_once(x)
|
|
158
158
|
|
|
@@ -337,6 +337,12 @@ class DetectionModel(BaseModel):
|
|
|
337
337
|
|
|
338
338
|
def _predict_augment(self, x):
|
|
339
339
|
"""Perform augmentations on input image x and return augmented inference and train outputs."""
|
|
340
|
+
if self.end2end:
|
|
341
|
+
LOGGER.warning(
|
|
342
|
+
"WARNING ⚠️ End2End model does not support 'augment=True' prediction. "
|
|
343
|
+
"Reverting to single-scale prediction."
|
|
344
|
+
)
|
|
345
|
+
return self._predict_once(x)
|
|
340
346
|
img_size = x.shape[-2:] # height, width
|
|
341
347
|
s = [1, 0.83, 0.67] # scales
|
|
342
348
|
f = [None, 3, None] # flips (2-ud, 3-lr)
|
ultralytics/utils/ops.py
CHANGED
|
@@ -199,6 +199,7 @@ def non_max_suppression(
|
|
|
199
199
|
max_nms (int): The maximum number of boxes into torchvision.ops.nms().
|
|
200
200
|
max_wh (int): The maximum box width and height in pixels.
|
|
201
201
|
in_place (bool): If True, the input prediction tensor will be modified in place.
|
|
202
|
+
rotated (bool): If Oriented Bounding Boxes (OBB) are being passed for NMS.
|
|
202
203
|
|
|
203
204
|
Returns:
|
|
204
205
|
(List[torch.Tensor]): A list of length batch_size, where each element is a tensor of
|
|
@@ -212,11 +213,16 @@ def non_max_suppression(
|
|
|
212
213
|
assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"
|
|
213
214
|
if isinstance(prediction, (list, tuple)): # YOLOv8 model in validation model, output = (inference_out, loss_out)
|
|
214
215
|
prediction = prediction[0] # select only inference output
|
|
216
|
+
if classes is not None:
|
|
217
|
+
classes = torch.tensor(classes, device=prediction.device)
|
|
215
218
|
|
|
216
|
-
if prediction.shape[-1] == 6: # end-to-end model
|
|
217
|
-
|
|
219
|
+
if prediction.shape[-1] == 6: # end-to-end model (BNC, i.e. 1,300,6)
|
|
220
|
+
output = [pred[pred[:, 4] > conf_thres] for pred in prediction]
|
|
221
|
+
if classes is not None:
|
|
222
|
+
output = [pred[(pred[:, 5:6] == classes).any(1)] for pred in output]
|
|
223
|
+
return output
|
|
218
224
|
|
|
219
|
-
bs = prediction.shape[0] # batch size
|
|
225
|
+
bs = prediction.shape[0] # batch size (BCN, i.e. 1,84,6300)
|
|
220
226
|
nc = nc or (prediction.shape[1] - 4) # number of classes
|
|
221
227
|
nm = prediction.shape[1] - nc - 4 # number of masks
|
|
222
228
|
mi = 4 + nc # mask start index
|
|
@@ -265,7 +271,7 @@ def non_max_suppression(
|
|
|
265
271
|
|
|
266
272
|
# Filter by class
|
|
267
273
|
if classes is not None:
|
|
268
|
-
x = x[(x[:, 5:6] ==
|
|
274
|
+
x = x[(x[:, 5:6] == classes).any(1)]
|
|
269
275
|
|
|
270
276
|
# Check shape
|
|
271
277
|
n = x.shape[0] # number of boxes
|
|
@@ -661,10 +667,10 @@ def process_mask_upsample(protos, masks_in, bboxes, shape):
|
|
|
661
667
|
(torch.Tensor): The upsampled masks.
|
|
662
668
|
"""
|
|
663
669
|
c, mh, mw = protos.shape # CHW
|
|
664
|
-
masks = (masks_in @ protos.float().view(c, -1)).
|
|
670
|
+
masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw)
|
|
665
671
|
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
|
|
666
672
|
masks = crop_mask(masks, bboxes) # CHW
|
|
667
|
-
return masks.gt_(0.
|
|
673
|
+
return masks.gt_(0.0)
|
|
668
674
|
|
|
669
675
|
|
|
670
676
|
def process_mask(protos, masks_in, bboxes, shape, upsample=False):
|
|
@@ -685,7 +691,7 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False):
|
|
|
685
691
|
|
|
686
692
|
c, mh, mw = protos.shape # CHW
|
|
687
693
|
ih, iw = shape
|
|
688
|
-
masks = (masks_in @ protos.float().view(c, -1)).
|
|
694
|
+
masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw) # CHW
|
|
689
695
|
width_ratio = mw / iw
|
|
690
696
|
height_ratio = mh / ih
|
|
691
697
|
|
|
@@ -698,7 +704,7 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False):
|
|
|
698
704
|
masks = crop_mask(masks, downsampled_bboxes) # CHW
|
|
699
705
|
if upsample:
|
|
700
706
|
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
|
|
701
|
-
return masks.gt_(0.
|
|
707
|
+
return masks.gt_(0.0)
|
|
702
708
|
|
|
703
709
|
|
|
704
710
|
def process_mask_native(protos, masks_in, bboxes, shape):
|
|
@@ -715,10 +721,10 @@ def process_mask_native(protos, masks_in, bboxes, shape):
|
|
|
715
721
|
masks (torch.Tensor): The returned masks with dimensions [h, w, n]
|
|
716
722
|
"""
|
|
717
723
|
c, mh, mw = protos.shape # CHW
|
|
718
|
-
masks = (masks_in @ protos.float().view(c, -1)).
|
|
724
|
+
masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw)
|
|
719
725
|
masks = scale_masks(masks[None], shape)[0] # CHW
|
|
720
726
|
masks = crop_mask(masks, bboxes) # CHW
|
|
721
|
-
return masks.gt_(0.
|
|
727
|
+
return masks.gt_(0.0)
|
|
722
728
|
|
|
723
729
|
|
|
724
730
|
def scale_masks(masks, shape, padding=True):
|
ultralytics/utils/plotting.py
CHANGED
|
@@ -4,6 +4,7 @@ import contextlib
|
|
|
4
4
|
import math
|
|
5
5
|
import warnings
|
|
6
6
|
from pathlib import Path
|
|
7
|
+
from typing import Callable, Dict, List, Optional, Union
|
|
7
8
|
|
|
8
9
|
import cv2
|
|
9
10
|
import matplotlib.pyplot as plt
|
|
@@ -290,14 +291,15 @@ class Annotator:
|
|
|
290
291
|
if self.pil or not is_ascii(label):
|
|
291
292
|
if rotated:
|
|
292
293
|
p1 = box[0]
|
|
293
|
-
#
|
|
294
|
-
self.draw.polygon([tuple(b) for b in box], width=self.lw, outline=color)
|
|
294
|
+
self.draw.polygon([tuple(b) for b in box], width=self.lw, outline=color) # PIL requires tuple box
|
|
295
295
|
else:
|
|
296
296
|
p1 = (box[0], box[1])
|
|
297
297
|
self.draw.rectangle(box, width=self.lw, outline=color) # box
|
|
298
298
|
if label:
|
|
299
299
|
w, h = self.font.getsize(label) # text width, height
|
|
300
|
-
outside = p1[1]
|
|
300
|
+
outside = p1[1] >= h # label fits outside box
|
|
301
|
+
if p1[0] > self.im.size[1] - w: # check if label extend beyond right side of image
|
|
302
|
+
p1 = self.im.size[1] - w, p1[1]
|
|
301
303
|
self.draw.rectangle(
|
|
302
304
|
(p1[0], p1[1] - h if outside else p1[1], p1[0] + w + 1, p1[1] + 1 if outside else p1[1] + h + 1),
|
|
303
305
|
fill=color,
|
|
@@ -307,20 +309,22 @@ class Annotator:
|
|
|
307
309
|
else: # cv2
|
|
308
310
|
if rotated:
|
|
309
311
|
p1 = [int(b) for b in box[0]]
|
|
310
|
-
|
|
311
|
-
cv2.polylines(self.im, [np.asarray(box, dtype=int)], True, color, self.lw)
|
|
312
|
+
cv2.polylines(self.im, [np.asarray(box, dtype=int)], True, color, self.lw) # cv2 requires nparray box
|
|
312
313
|
else:
|
|
313
314
|
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
|
|
314
315
|
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
|
|
315
316
|
if label:
|
|
316
317
|
w, h = cv2.getTextSize(label, 0, fontScale=self.sf, thickness=self.tf)[0] # text width, height
|
|
317
|
-
|
|
318
|
-
|
|
318
|
+
h += 3 # add pixels to pad text
|
|
319
|
+
outside = p1[1] >= h # label fits outside box
|
|
320
|
+
if p1[0] > self.im.shape[1] - w: # check if label extend beyond right side of image
|
|
321
|
+
p1 = self.im.shape[1] - w, p1[1]
|
|
322
|
+
p2 = p1[0] + w, p1[1] - h if outside else p1[1] + h
|
|
319
323
|
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
|
|
320
324
|
cv2.putText(
|
|
321
325
|
self.im,
|
|
322
326
|
label,
|
|
323
|
-
(p1[0], p1[1] - 2 if outside else p1[1] + h
|
|
327
|
+
(p1[0], p1[1] - 2 if outside else p1[1] + h - 1),
|
|
324
328
|
0,
|
|
325
329
|
self.sf,
|
|
326
330
|
txt_color,
|
|
@@ -441,8 +445,9 @@ class Annotator:
|
|
|
441
445
|
else:
|
|
442
446
|
if box_style:
|
|
443
447
|
w, h = cv2.getTextSize(text, 0, fontScale=self.sf, thickness=self.tf)[0] # text width, height
|
|
444
|
-
|
|
445
|
-
|
|
448
|
+
h += 3 # add pixels to pad text
|
|
449
|
+
outside = xy[1] >= h # label fits outside box
|
|
450
|
+
p2 = xy[0] + w, xy[1] - h if outside else xy[1] + h
|
|
446
451
|
cv2.rectangle(self.im, xy, p2, txt_color, -1, cv2.LINE_AA) # filled
|
|
447
452
|
# Using `txt_color` for background and draw fg with white color
|
|
448
453
|
txt_color = (255, 255, 255)
|
|
@@ -575,7 +580,8 @@ class Annotator:
|
|
|
575
580
|
|
|
576
581
|
def display_analytics(self, im0, text, txt_color, bg_color, margin):
|
|
577
582
|
"""
|
|
578
|
-
Display the overall statistics for parking lots
|
|
583
|
+
Display the overall statistics for parking lots.
|
|
584
|
+
|
|
579
585
|
Args:
|
|
580
586
|
im0 (ndarray): inference image
|
|
581
587
|
text (dict): labels dictionary
|
|
@@ -657,7 +663,7 @@ class Annotator:
|
|
|
657
663
|
angle_text (str): angle value for workout monitoring
|
|
658
664
|
count_text (str): counts value for workout monitoring
|
|
659
665
|
stage_text (str): stage decision for workout monitoring
|
|
660
|
-
center_kpt (
|
|
666
|
+
center_kpt (list): centroid pose index for workout monitoring
|
|
661
667
|
color (tuple): text background color for workout monitoring
|
|
662
668
|
txt_color (tuple): text foreground color for workout monitoring
|
|
663
669
|
"""
|
|
@@ -913,22 +919,49 @@ def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False,
|
|
|
913
919
|
|
|
914
920
|
@threaded
|
|
915
921
|
def plot_images(
|
|
916
|
-
images,
|
|
917
|
-
batch_idx,
|
|
918
|
-
cls,
|
|
919
|
-
bboxes=np.zeros(0, dtype=np.float32),
|
|
920
|
-
confs=None,
|
|
921
|
-
masks=np.zeros(0, dtype=np.uint8),
|
|
922
|
-
kpts=np.zeros((0, 51), dtype=np.float32),
|
|
923
|
-
paths=None,
|
|
924
|
-
fname="images.jpg",
|
|
925
|
-
names=None,
|
|
926
|
-
on_plot=None,
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
922
|
+
images: Union[torch.Tensor, np.ndarray],
|
|
923
|
+
batch_idx: Union[torch.Tensor, np.ndarray],
|
|
924
|
+
cls: Union[torch.Tensor, np.ndarray],
|
|
925
|
+
bboxes: Union[torch.Tensor, np.ndarray] = np.zeros(0, dtype=np.float32),
|
|
926
|
+
confs: Optional[Union[torch.Tensor, np.ndarray]] = None,
|
|
927
|
+
masks: Union[torch.Tensor, np.ndarray] = np.zeros(0, dtype=np.uint8),
|
|
928
|
+
kpts: Union[torch.Tensor, np.ndarray] = np.zeros((0, 51), dtype=np.float32),
|
|
929
|
+
paths: Optional[List[str]] = None,
|
|
930
|
+
fname: str = "images.jpg",
|
|
931
|
+
names: Optional[Dict[int, str]] = None,
|
|
932
|
+
on_plot: Optional[Callable] = None,
|
|
933
|
+
max_size: int = 1920,
|
|
934
|
+
max_subplots: int = 16,
|
|
935
|
+
save: bool = True,
|
|
936
|
+
conf_thres: float = 0.25,
|
|
937
|
+
) -> Optional[np.ndarray]:
|
|
938
|
+
"""
|
|
939
|
+
Plot image grid with labels, bounding boxes, masks, and keypoints.
|
|
940
|
+
|
|
941
|
+
Args:
|
|
942
|
+
images: Batch of images to plot. Shape: (batch_size, channels, height, width).
|
|
943
|
+
batch_idx: Batch indices for each detection. Shape: (num_detections,).
|
|
944
|
+
cls: Class labels for each detection. Shape: (num_detections,).
|
|
945
|
+
bboxes: Bounding boxes for each detection. Shape: (num_detections, 4) or (num_detections, 5) for rotated boxes.
|
|
946
|
+
confs: Confidence scores for each detection. Shape: (num_detections,).
|
|
947
|
+
masks: Instance segmentation masks. Shape: (num_detections, height, width) or (1, height, width).
|
|
948
|
+
kpts: Keypoints for each detection. Shape: (num_detections, 51).
|
|
949
|
+
paths: List of file paths for each image in the batch.
|
|
950
|
+
fname: Output filename for the plotted image grid.
|
|
951
|
+
names: Dictionary mapping class indices to class names.
|
|
952
|
+
on_plot: Optional callback function to be called after saving the plot.
|
|
953
|
+
max_size: Maximum size of the output image grid.
|
|
954
|
+
max_subplots: Maximum number of subplots in the image grid.
|
|
955
|
+
save: Whether to save the plotted image grid to a file.
|
|
956
|
+
conf_thres: Confidence threshold for displaying detections.
|
|
957
|
+
|
|
958
|
+
Returns:
|
|
959
|
+
np.ndarray: Plotted image grid as a numpy array if save is False, None otherwise.
|
|
960
|
+
|
|
961
|
+
Note:
|
|
962
|
+
This function supports both tensor and numpy array inputs. It will automatically
|
|
963
|
+
convert tensor inputs to numpy arrays for processing.
|
|
964
|
+
"""
|
|
932
965
|
if isinstance(images, torch.Tensor):
|
|
933
966
|
images = images.cpu().float().numpy()
|
|
934
967
|
if isinstance(cls, torch.Tensor):
|
|
@@ -942,7 +975,6 @@ def plot_images(
|
|
|
942
975
|
if isinstance(batch_idx, torch.Tensor):
|
|
943
976
|
batch_idx = batch_idx.cpu().numpy()
|
|
944
977
|
|
|
945
|
-
max_size = 1920 # max image size
|
|
946
978
|
bs, _, h, w = images.shape # batch size, _, height, width
|
|
947
979
|
bs = min(bs, max_subplots) # limit plot images
|
|
948
980
|
ns = np.ceil(bs**0.5) # number of subplots (square)
|
|
@@ -1162,6 +1194,12 @@ def plot_tune_results(csv_file="tune_results.csv"):
|
|
|
1162
1194
|
import pandas as pd # scope for faster 'import ultralytics'
|
|
1163
1195
|
from scipy.ndimage import gaussian_filter1d
|
|
1164
1196
|
|
|
1197
|
+
def _save_one_file(file):
|
|
1198
|
+
"""Save one matplotlib plot to 'file'."""
|
|
1199
|
+
plt.savefig(file, dpi=200)
|
|
1200
|
+
plt.close()
|
|
1201
|
+
LOGGER.info(f"Saved {file}")
|
|
1202
|
+
|
|
1165
1203
|
# Scatter plots for each hyperparameter
|
|
1166
1204
|
csv_file = Path(csv_file)
|
|
1167
1205
|
data = pd.read_csv(csv_file)
|
|
@@ -1182,11 +1220,7 @@ def plot_tune_results(csv_file="tune_results.csv"):
|
|
|
1182
1220
|
plt.tick_params(axis="both", labelsize=8) # Set axis label size to 8
|
|
1183
1221
|
if i % n != 0:
|
|
1184
1222
|
plt.yticks([])
|
|
1185
|
-
|
|
1186
|
-
file = csv_file.with_name("tune_scatter_plots.png") # filename
|
|
1187
|
-
plt.savefig(file, dpi=200)
|
|
1188
|
-
plt.close()
|
|
1189
|
-
LOGGER.info(f"Saved {file}")
|
|
1223
|
+
_save_one_file(csv_file.with_name("tune_scatter_plots.png"))
|
|
1190
1224
|
|
|
1191
1225
|
# Fitness vs iteration
|
|
1192
1226
|
x = range(1, len(fitness) + 1)
|
|
@@ -1198,11 +1232,7 @@ def plot_tune_results(csv_file="tune_results.csv"):
|
|
|
1198
1232
|
plt.ylabel("Fitness")
|
|
1199
1233
|
plt.grid(True)
|
|
1200
1234
|
plt.legend()
|
|
1201
|
-
|
|
1202
|
-
file = csv_file.with_name("tune_fitness.png") # filename
|
|
1203
|
-
plt.savefig(file, dpi=200)
|
|
1204
|
-
plt.close()
|
|
1205
|
-
LOGGER.info(f"Saved {file}")
|
|
1235
|
+
_save_one_file(csv_file.with_name("tune_fitness.png"))
|
|
1206
1236
|
|
|
1207
1237
|
|
|
1208
1238
|
def output_to_target(output, max_det=300):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.2.
|
|
3
|
+
Version: 8.2.44
|
|
4
4
|
Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
6
6
|
Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
@@ -6,8 +6,8 @@ tests/test_engine.py,sha256=fFzcbqZuMkzZHjA5FMddWcqVE703iq8HB_a0Q2lcBKM,4705
|
|
|
6
6
|
tests/test_explorer.py,sha256=r1pWer2y290Y0DqsM-La7egfEY0497YCdC4rwq3URV4,2178
|
|
7
7
|
tests/test_exports.py,sha256=qc4YOgsGixqYLO6IRNY16-v6z14R0dp5fdni1v222xw,8034
|
|
8
8
|
tests/test_integrations.py,sha256=8Ru7GyKV8j44EEc8X9_E7q7aR4CTOIMPuSagXjSGUxw,5847
|
|
9
|
-
tests/test_python.py,sha256=
|
|
10
|
-
ultralytics/__init__.py,sha256=
|
|
9
|
+
tests/test_python.py,sha256=V8DqHIoflITyxW0Q6-5CIgjg32Yl9VapemC1jcOmlLM,20609
|
|
10
|
+
ultralytics/__init__.py,sha256=fzGf74204Ftaq7UuLb7Eqi_i1m9mKux5uB_wstW-bHA,694
|
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
|
13
13
|
ultralytics/cfg/__init__.py,sha256=JblkT6Ze9MZ8hSs8gkV8JPcEKNMm-YqRqM4x501Dn9g,21507
|
|
@@ -88,7 +88,7 @@ ultralytics/data/base.py,sha256=C3teLnw97ZTbpJHT9P7yYWosAKocMzgJjRe1rxgfpls,1352
|
|
|
88
88
|
ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
|
|
89
89
|
ultralytics/data/converter.py,sha256=7640xKuf7LPeoTwoCvgbIXM5xbzyq72Hu2Rf2lrgjRY,17554
|
|
90
90
|
ultralytics/data/dataset.py,sha256=NFaXyHRn64TyTEbtSkr7SkqWXK8bEJl6lZ6M1JwO3MY,22201
|
|
91
|
-
ultralytics/data/loaders.py,sha256=
|
|
91
|
+
ultralytics/data/loaders.py,sha256=UUL7yOmuseAG5RBVI-kLrLr42Vm4kL05Qqnc5jAmNW0,23972
|
|
92
92
|
ultralytics/data/split_dota.py,sha256=fWezt1Bo3jiZ6AyUWdBtTUuvLamPv1t7JD-DirM9gQ8,10142
|
|
93
93
|
ultralytics/data/utils.py,sha256=zqFg4xaWU--fastZmwvZ3DxGyJQ3i4tVNLuYnqS1xxs,31044
|
|
94
94
|
ultralytics/data/explorer/__init__.py,sha256=-Y3m1ZedepOQUv_KW82zaGxvU_PSHcuwUTFqG9BhAr4,113
|
|
@@ -165,7 +165,7 @@ ultralytics/models/yolo/world/train.py,sha256=acYN2-onL69LrL4av6_hY2r5AY0urC0WVi
|
|
|
165
165
|
ultralytics/models/yolo/world/train_world.py,sha256=n0XTAHYxufHU5OZ_QjpkHieKik-24z0LrYKzWYbCLvA,4798
|
|
166
166
|
ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
|
|
167
167
|
ultralytics/nn/autobackend.py,sha256=stqN66L8iloqKxBBYaAespsj2ZoSossouFiFf_Txi0s,31163
|
|
168
|
-
ultralytics/nn/tasks.py,sha256=
|
|
168
|
+
ultralytics/nn/tasks.py,sha256=o0S0hgcscCthLmKekoVWYJo5iCDCRD043QN2cYcTRjE,45798
|
|
169
169
|
ultralytics/nn/modules/__init__.py,sha256=mARjWk83WPYF5phXhXfPbAu2ZohtdbHdi5zzoxyMubo,2553
|
|
170
170
|
ultralytics/nn/modules/block.py,sha256=JiPwcbLzb7O_O5T1KkW0dIGJSfBwPaS-NNYuVkLBDwg,34384
|
|
171
171
|
ultralytics/nn/modules/conv.py,sha256=Ywe87IhuaS22mR2JJ9xjnW8Sb-m7WTjxuqIxV_Dv8lI,12722
|
|
@@ -201,9 +201,9 @@ ultralytics/utils/files.py,sha256=TVfY0Wi5IsUc4YdsDzC0dAg-jAP5exYvwqB3VmXhDLY,67
|
|
|
201
201
|
ultralytics/utils/instance.py,sha256=5daM5nkxBv9hr5QzyII8zmuFj24hHuNtcr4EMCHAtpY,15654
|
|
202
202
|
ultralytics/utils/loss.py,sha256=RwFYL71P-4y6zgOxWIxiK1uj7-h3NBESv-g1DDdykdE,33547
|
|
203
203
|
ultralytics/utils/metrics.py,sha256=3nuFZK_7rnhf6KjhflnRfHVN2i_ZB-LbGvIdbc177N8,53587
|
|
204
|
-
ultralytics/utils/ops.py,sha256=
|
|
204
|
+
ultralytics/utils/ops.py,sha256=Jlb0YBkN_SMVT2AjKPEjxgOtgnj7i7HTBh9FEwpoprU,33509
|
|
205
205
|
ultralytics/utils/patches.py,sha256=SgMqeMsq2K6JoBJP1NplXMl9C6rK0JeJUChjBrJOneo,2750
|
|
206
|
-
ultralytics/utils/plotting.py,sha256=
|
|
206
|
+
ultralytics/utils/plotting.py,sha256=Aiu_J5mYGugvZ0WxHMbXftlR9lQh53iGPemHb2RT87k,55533
|
|
207
207
|
ultralytics/utils/tal.py,sha256=xuIyryUjaaYHkHPG9GvBwh1xxN2Hq4y3hXOtuERehwY,16017
|
|
208
208
|
ultralytics/utils/torch_utils.py,sha256=LwicOi4hI801LilElKmArs0z8T_e4wPCsyTcd2Y70Pk,27028
|
|
209
209
|
ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
|
|
@@ -219,9 +219,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
|
|
|
219
219
|
ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
|
|
220
220
|
ultralytics/utils/callbacks/tensorboard.py,sha256=QEgOVhUqY9akOs5TJIwz1Rvn6l32xWLpOxlwEyWF0B8,4136
|
|
221
221
|
ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
|
|
222
|
-
ultralytics-8.2.
|
|
223
|
-
ultralytics-8.2.
|
|
224
|
-
ultralytics-8.2.
|
|
225
|
-
ultralytics-8.2.
|
|
226
|
-
ultralytics-8.2.
|
|
227
|
-
ultralytics-8.2.
|
|
222
|
+
ultralytics-8.2.44.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
|
223
|
+
ultralytics-8.2.44.dist-info/METADATA,sha256=iBZgQIFgqzMcPWXU1xlMHBJS8HSeCjY5LQJHHhHQMnY,41210
|
|
224
|
+
ultralytics-8.2.44.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91
|
|
225
|
+
ultralytics-8.2.44.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
|
226
|
+
ultralytics-8.2.44.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
|
227
|
+
ultralytics-8.2.44.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|