ultralytics 8.3.55__py3-none-any.whl → 8.3.57__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_solutions.py +14 -7
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +1 -1
- ultralytics/data/augment.py +7 -7
- ultralytics/data/converter.py +2 -2
- ultralytics/data/dataset.py +2 -1
- ultralytics/data/split_dota.py +1 -1
- ultralytics/data/utils.py +49 -0
- ultralytics/engine/exporter.py +7 -4
- ultralytics/models/sam/amg.py +1 -1
- ultralytics/models/sam/modules/blocks.py +11 -11
- ultralytics/models/sam/modules/sam.py +2 -2
- ultralytics/models/sam/predict.py +36 -37
- ultralytics/nn/autobackend.py +1 -1
- ultralytics/trackers/utils/gmc.py +12 -12
- ultralytics/utils/__init__.py +5 -9
- ultralytics/utils/metrics.py +1 -1
- ultralytics/utils/plotting.py +1 -1
- {ultralytics-8.3.55.dist-info → ultralytics-8.3.57.dist-info}/METADATA +1 -2
- {ultralytics-8.3.55.dist-info → ultralytics-8.3.57.dist-info}/RECORD +24 -24
- {ultralytics-8.3.55.dist-info → ultralytics-8.3.57.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.55.dist-info → ultralytics-8.3.57.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.55.dist-info → ultralytics-8.3.57.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.55.dist-info → ultralytics-8.3.57.dist-info}/top_level.txt +0 -0
tests/test_solutions.py
CHANGED
@@ -14,46 +14,53 @@ POSE_VIDEO = "solution_ci_pose_demo.mp4"
|
|
14
14
|
|
15
15
|
@pytest.mark.slow
|
16
16
|
def test_major_solutions():
|
17
|
-
"""Test the object counting, heatmap, speed estimation and queue management solution."""
|
17
|
+
"""Test the object counting, heatmap, speed estimation, trackzone and queue management solution."""
|
18
18
|
safe_download(url=f"{ASSETS_URL}/{DEMO_VIDEO}", dir=TMP)
|
19
19
|
cap = cv2.VideoCapture(str(TMP / DEMO_VIDEO))
|
20
20
|
assert cap.isOpened(), "Error reading video file"
|
21
21
|
region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)]
|
22
22
|
counter = solutions.ObjectCounter(region=region_points, model="yolo11n.pt", show=False) # Test object counter
|
23
23
|
heatmap = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False) # Test heatmaps
|
24
|
+
heatmap_count = solutions.Heatmap(
|
25
|
+
colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False, region=region_points
|
26
|
+
) # Test heatmaps with object counting
|
24
27
|
speed = solutions.SpeedEstimator(region=region_points, model="yolo11n.pt", show=False) # Test queue manager
|
25
28
|
queue = solutions.QueueManager(region=region_points, model="yolo11n.pt", show=False) # Test speed estimation
|
26
29
|
line_analytics = solutions.Analytics(analytics_type="line", model="yolo11n.pt", show=False) # line analytics
|
27
30
|
pie_analytics = solutions.Analytics(analytics_type="pie", model="yolo11n.pt", show=False) # line analytics
|
28
31
|
bar_analytics = solutions.Analytics(analytics_type="bar", model="yolo11n.pt", show=False) # line analytics
|
29
32
|
area_analytics = solutions.Analytics(analytics_type="area", model="yolo11n.pt", show=False) # line analytics
|
33
|
+
trackzone = solutions.TrackZone(region=region_points, model="yolo11n.pt", show=False) # Test trackzone
|
30
34
|
frame_count = 0 # Required for analytics
|
31
35
|
while cap.isOpened():
|
32
36
|
success, im0 = cap.read()
|
33
37
|
if not success:
|
34
38
|
break
|
39
|
+
frame_count += 1
|
35
40
|
original_im0 = im0.copy()
|
36
41
|
_ = counter.count(original_im0.copy())
|
37
42
|
_ = heatmap.generate_heatmap(original_im0.copy())
|
43
|
+
_ = heatmap_count.generate_heatmap(original_im0.copy())
|
38
44
|
_ = speed.estimate_speed(original_im0.copy())
|
39
45
|
_ = queue.process_queue(original_im0.copy())
|
40
46
|
_ = line_analytics.process_data(original_im0.copy(), frame_count)
|
41
47
|
_ = pie_analytics.process_data(original_im0.copy(), frame_count)
|
42
48
|
_ = bar_analytics.process_data(original_im0.copy(), frame_count)
|
43
49
|
_ = area_analytics.process_data(original_im0.copy(), frame_count)
|
50
|
+
_ = trackzone.trackzone(original_im0.copy())
|
44
51
|
cap.release()
|
45
52
|
|
46
53
|
# Test workouts monitoring
|
47
54
|
safe_download(url=f"{ASSETS_URL}/{POSE_VIDEO}", dir=TMP)
|
48
|
-
|
49
|
-
assert
|
50
|
-
gym = solutions.AIGym(
|
51
|
-
while
|
52
|
-
success, im0 =
|
55
|
+
cap = cv2.VideoCapture(str(TMP / POSE_VIDEO))
|
56
|
+
assert cap.isOpened(), "Error reading video file"
|
57
|
+
gym = solutions.AIGym(kpts=[5, 11, 13], show=False)
|
58
|
+
while cap.isOpened():
|
59
|
+
success, im0 = cap.read()
|
53
60
|
if not success:
|
54
61
|
break
|
55
62
|
_ = gym.monitor(im0)
|
56
|
-
|
63
|
+
cap.release()
|
57
64
|
|
58
65
|
|
59
66
|
@pytest.mark.slow
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
@@ -303,7 +303,7 @@ def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, ove
|
|
303
303
|
if k in cfg and isinstance(cfg[k], (int, float)):
|
304
304
|
cfg[k] = str(cfg[k])
|
305
305
|
if cfg.get("name") == "model": # assign model to 'name' arg
|
306
|
-
cfg["name"] = cfg.get("model", "").split(".")[0]
|
306
|
+
cfg["name"] = str(cfg.get("model", "")).split(".")[0]
|
307
307
|
LOGGER.warning(f"WARNING ⚠️ 'name=model' automatically updated to 'name={cfg['name']}'.")
|
308
308
|
|
309
309
|
# Type and Value checks
|
ultralytics/data/augment.py
CHANGED
@@ -642,7 +642,7 @@ class Mosaic(BaseMixTransform):
|
|
642
642
|
c = s - w, s + h0 - h, s, s + h0
|
643
643
|
|
644
644
|
padw, padh = c[:2]
|
645
|
-
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate
|
645
|
+
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coordinates
|
646
646
|
|
647
647
|
img3[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] # img3[ymin:ymax, xmin:xmax]
|
648
648
|
# hp, wp = h, w # height, width previous for next iteration
|
@@ -771,7 +771,7 @@ class Mosaic(BaseMixTransform):
|
|
771
771
|
c = s - w, s + h0 - hp - h, s, s + h0 - hp
|
772
772
|
|
773
773
|
padw, padh = c[:2]
|
774
|
-
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate
|
774
|
+
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coordinates
|
775
775
|
|
776
776
|
# Image
|
777
777
|
img9[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] # img9[ymin:ymax, xmin:xmax]
|
@@ -1283,7 +1283,7 @@ class RandomPerspective:
|
|
1283
1283
|
eps (float): Small epsilon value to prevent division by zero.
|
1284
1284
|
|
1285
1285
|
Returns:
|
1286
|
-
(numpy.ndarray): Boolean array of shape (n
|
1286
|
+
(numpy.ndarray): Boolean array of shape (n) indicating which boxes are candidates.
|
1287
1287
|
True values correspond to boxes that meet all criteria.
|
1288
1288
|
|
1289
1289
|
Examples:
|
@@ -1320,7 +1320,7 @@ class RandomHSV:
|
|
1320
1320
|
>>> augmenter = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5)
|
1321
1321
|
>>> image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
|
1322
1322
|
>>> labels = {"img": image}
|
1323
|
-
>>>
|
1323
|
+
>>> augmenter(labels)
|
1324
1324
|
>>> augmented_image = augmented_labels["img"]
|
1325
1325
|
"""
|
1326
1326
|
|
@@ -1337,7 +1337,7 @@ class RandomHSV:
|
|
1337
1337
|
|
1338
1338
|
Examples:
|
1339
1339
|
>>> hsv_aug = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5)
|
1340
|
-
>>>
|
1340
|
+
>>> hsv_aug(image)
|
1341
1341
|
"""
|
1342
1342
|
self.hgain = hgain
|
1343
1343
|
self.sgain = sgain
|
@@ -1419,7 +1419,7 @@ class RandomFlip:
|
|
1419
1419
|
|
1420
1420
|
Examples:
|
1421
1421
|
>>> flip = RandomFlip(p=0.5, direction="horizontal")
|
1422
|
-
>>>
|
1422
|
+
>>> flip_with_idx = RandomFlip(p=0.7, direction="vertical", flip_idx=[1, 0, 3, 2, 5, 4])
|
1423
1423
|
"""
|
1424
1424
|
assert direction in {"horizontal", "vertical"}, f"Support direction `horizontal` or `vertical`, got {direction}"
|
1425
1425
|
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
|
@@ -2022,7 +2022,7 @@ class Format:
|
|
2022
2022
|
Returns:
|
2023
2023
|
(Dict): A dictionary with formatted data, including:
|
2024
2024
|
- 'img': Formatted image tensor.
|
2025
|
-
- 'cls': Class
|
2025
|
+
- 'cls': Class label's tensor.
|
2026
2026
|
- 'bboxes': Bounding boxes tensor in the specified format.
|
2027
2027
|
- 'masks': Instance masks tensor (if return_mask is True).
|
2028
2028
|
- 'keypoints': Keypoints tensor (if return_keypoint is True).
|
ultralytics/data/converter.py
CHANGED
@@ -241,7 +241,7 @@ def convert_coco(
|
|
241
241
|
```python
|
242
242
|
from ultralytics.data.converter import convert_coco
|
243
243
|
|
244
|
-
convert_coco("../datasets/coco/annotations/", use_segments=True, use_keypoints=False, cls91to80=
|
244
|
+
convert_coco("../datasets/coco/annotations/", use_segments=True, use_keypoints=False, cls91to80=False)
|
245
245
|
convert_coco("../datasets/lvis/annotations/", use_segments=True, use_keypoints=False, cls91to80=False, lvis=True)
|
246
246
|
```
|
247
247
|
|
@@ -266,7 +266,7 @@ def convert_coco(
|
|
266
266
|
# since LVIS val set contains images from COCO 2017 train in addition to the COCO 2017 val split.
|
267
267
|
(fn / "train2017").mkdir(parents=True, exist_ok=True)
|
268
268
|
(fn / "val2017").mkdir(parents=True, exist_ok=True)
|
269
|
-
with open(json_file) as f:
|
269
|
+
with open(json_file, encoding="utf-8") as f:
|
270
270
|
data = json.load(f)
|
271
271
|
|
272
272
|
# Create image dict
|
ultralytics/data/dataset.py
CHANGED
@@ -323,7 +323,8 @@ class GroundingDataset(YOLODataset):
|
|
323
323
|
if box[2] <= 0 or box[3] <= 0:
|
324
324
|
continue
|
325
325
|
|
326
|
-
|
326
|
+
caption = img["caption"]
|
327
|
+
cat_name = " ".join([caption[t[0] : t[1]] for t in ann["tokens_positive"]])
|
327
328
|
if cat_name not in cat2id:
|
328
329
|
cat2id[cat_name] = len(cat2id)
|
329
330
|
texts.append([cat_name])
|
ultralytics/data/split_dota.py
CHANGED
@@ -67,7 +67,7 @@ def load_yolo_dota(data_root, split="train"):
|
|
67
67
|
|
68
68
|
Args:
|
69
69
|
data_root (str): Data root.
|
70
|
-
split (str): The split data set, could be train or val
|
70
|
+
split (str): The split data set, could be `train` or `val`.
|
71
71
|
|
72
72
|
Notes:
|
73
73
|
The directory structure assumed for the DOTA dataset:
|
ultralytics/data/utils.py
CHANGED
@@ -167,6 +167,55 @@ def verify_image_label(args):
|
|
167
167
|
return [None, None, None, None, None, nm, nf, ne, nc, msg]
|
168
168
|
|
169
169
|
|
170
|
+
def visualize_image_annotations(image_path, txt_path, label_map):
|
171
|
+
"""
|
172
|
+
Visualizes YOLO annotations (bounding boxes and class labels) on an image.
|
173
|
+
|
174
|
+
This function reads an image and its corresponding annotation file in YOLO format, then
|
175
|
+
draws bounding boxes around detected objects and labels them with their respective class names.
|
176
|
+
The bounding box colors are assigned based on the class ID, and the text color is dynamically
|
177
|
+
adjusted for readability, depending on the background color's luminance.
|
178
|
+
|
179
|
+
Args:
|
180
|
+
image_path (str): The path to the image file to annotate, and it can be in formats supported by PIL (e.g., .jpg, .png).
|
181
|
+
txt_path (str): The path to the annotation file in YOLO format, that should contain one line per object with:
|
182
|
+
- class_id (int): The class index.
|
183
|
+
- x_center (float): The X center of the bounding box (relative to image width).
|
184
|
+
- y_center (float): The Y center of the bounding box (relative to image height).
|
185
|
+
- width (float): The width of the bounding box (relative to image width).
|
186
|
+
- height (float): The height of the bounding box (relative to image height).
|
187
|
+
label_map (dict): A dictionary that maps class IDs (integers) to class labels (strings).
|
188
|
+
|
189
|
+
Example:
|
190
|
+
>>> label_map = {0: "cat", 1: "dog", 2: "bird"} # It should include all annotated classes details
|
191
|
+
>>> visualize_image_annotations("path/to/image.jpg", "path/to/annotations.txt", label_map)
|
192
|
+
"""
|
193
|
+
import matplotlib.pyplot as plt
|
194
|
+
|
195
|
+
from ultralytics.utils.plotting import colors
|
196
|
+
|
197
|
+
img = np.array(Image.open(image_path))
|
198
|
+
img_height, img_width = img.shape[:2]
|
199
|
+
annotations = []
|
200
|
+
with open(txt_path) as file:
|
201
|
+
for line in file:
|
202
|
+
class_id, x_center, y_center, width, height = map(float, line.split())
|
203
|
+
x = (x_center - width / 2) * img_width
|
204
|
+
y = (y_center - height / 2) * img_height
|
205
|
+
w = width * img_width
|
206
|
+
h = height * img_height
|
207
|
+
annotations.append((x, y, w, h, int(class_id)))
|
208
|
+
fig, ax = plt.subplots(1) # Plot the image and annotations
|
209
|
+
for x, y, w, h, label in annotations:
|
210
|
+
color = tuple(c / 255 for c in colors(label, True)) # Get and normalize the RGB color
|
211
|
+
rect = plt.Rectangle((x, y), w, h, linewidth=2, edgecolor=color, facecolor="none") # Create a rectangle
|
212
|
+
ax.add_patch(rect)
|
213
|
+
luminance = 0.2126 * color[0] + 0.7152 * color[1] + 0.0722 * color[2] # Formula for luminance
|
214
|
+
ax.text(x, y - 5, label_map[label], color="white" if luminance < 0.5 else "black", backgroundcolor=color)
|
215
|
+
ax.imshow(img)
|
216
|
+
plt.show()
|
217
|
+
|
218
|
+
|
170
219
|
def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1):
|
171
220
|
"""
|
172
221
|
Convert a list of polygons to a binary mask of the specified image size.
|
ultralytics/engine/exporter.py
CHANGED
@@ -250,7 +250,8 @@ class Exporter:
|
|
250
250
|
self.device = select_device("cpu" if self.args.device is None else self.args.device)
|
251
251
|
|
252
252
|
# Argument compatibility checks
|
253
|
-
|
253
|
+
fmt_keys = fmts_dict["Arguments"][flags.index(True) + 1]
|
254
|
+
validate_args(fmt, self.args, fmt_keys)
|
254
255
|
if imx and not self.args.int8:
|
255
256
|
LOGGER.warning("WARNING ⚠️ IMX only supports int8 export, setting int8=True.")
|
256
257
|
self.args.int8 = True
|
@@ -285,6 +286,7 @@ class Exporter:
|
|
285
286
|
"(torchscript, onnx, openvino, engine, coreml) formats. "
|
286
287
|
"See https://docs.ultralytics.com/models/yolo-world for details."
|
287
288
|
)
|
289
|
+
model.clip_model = None # openvino int8 export error: https://github.com/ultralytics/ultralytics/pull/18445
|
288
290
|
if self.args.int8 and not self.args.data:
|
289
291
|
self.args.data = DEFAULT_CFG.data or TASK2DATA[getattr(model, "task", "detect")] # assign default data
|
290
292
|
LOGGER.warning(
|
@@ -368,6 +370,7 @@ class Exporter:
|
|
368
370
|
"batch": self.args.batch,
|
369
371
|
"imgsz": self.imgsz,
|
370
372
|
"names": model.names,
|
373
|
+
"args": {k: v for k, v in self.args if k in fmt_keys},
|
371
374
|
} # model metadata
|
372
375
|
if model.task == "pose":
|
373
376
|
self.metadata["kpt_shape"] = model.model[-1].kpt_shape
|
@@ -602,7 +605,7 @@ class Exporter:
|
|
602
605
|
@try_export
|
603
606
|
def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
|
604
607
|
"""YOLO Paddle export."""
|
605
|
-
check_requirements(("paddlepaddle", "x2paddle"))
|
608
|
+
check_requirements(("paddlepaddle-gpu" if torch.cuda.is_available() else "paddlepaddle", "x2paddle"))
|
606
609
|
import x2paddle # noqa
|
607
610
|
from x2paddle.convert import pytorch2paddle # noqa
|
608
611
|
|
@@ -949,7 +952,7 @@ class Exporter:
|
|
949
952
|
"sng4onnx>=1.0.1", # required by 'onnx2tf' package
|
950
953
|
"onnx_graphsurgeon>=0.3.26", # required by 'onnx2tf' package
|
951
954
|
"onnx>=1.12.0",
|
952
|
-
"onnx2tf>1.17.5,<=1.
|
955
|
+
"onnx2tf>1.17.5,<=1.26.3",
|
953
956
|
"onnxslim>=0.1.31",
|
954
957
|
"tflite_support<=0.4.3" if IS_JETSON else "tflite_support", # fix ImportError 'GLIBCXX_3.4.29'
|
955
958
|
"flatbuffers>=23.5.26,<100", # update old 'flatbuffers' included inside tensorflow package
|
@@ -1136,7 +1139,7 @@ class Exporter:
|
|
1136
1139
|
if getattr(self.model, "end2end", False):
|
1137
1140
|
raise ValueError("IMX export is not supported for end2end models.")
|
1138
1141
|
if "C2f" not in self.model.__str__():
|
1139
|
-
raise ValueError("IMX export is only supported for
|
1142
|
+
raise ValueError("IMX export is only supported for YOLOv8n detection models")
|
1140
1143
|
check_requirements(("model-compression-toolkit==2.1.1", "sony-custom-layers==0.2.0", "tensorflow==2.12.0"))
|
1141
1144
|
check_requirements("imx500-converter[pt]==3.14.3") # Separate requirements for imx500-converter
|
1142
1145
|
|
ultralytics/models/sam/amg.py
CHANGED
@@ -76,7 +76,7 @@ def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer:
|
|
76
76
|
def generate_crop_boxes(
|
77
77
|
im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
|
78
78
|
) -> Tuple[List[List[int]], List[int]]:
|
79
|
-
"""Generates crop boxes of varying sizes for
|
79
|
+
"""Generates crop boxes of varying sizes for multiscale image processing, with layered overlapping regions."""
|
80
80
|
crop_boxes, layer_idxs = [], []
|
81
81
|
im_h, im_w = im_size
|
82
82
|
short_side = min(im_h, im_w)
|
@@ -502,11 +502,11 @@ def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.T
|
|
502
502
|
|
503
503
|
class MultiScaleAttention(nn.Module):
|
504
504
|
"""
|
505
|
-
Implements
|
505
|
+
Implements multiscale self-attention with optional query pooling for efficient feature extraction.
|
506
506
|
|
507
|
-
This class provides a flexible implementation of
|
507
|
+
This class provides a flexible implementation of multiscale attention, allowing for optional
|
508
508
|
downsampling of query features through pooling. It's designed to enhance the model's ability to
|
509
|
-
capture
|
509
|
+
capture multiscale information in visual tasks.
|
510
510
|
|
511
511
|
Attributes:
|
512
512
|
dim (int): Input dimension of the feature map.
|
@@ -518,7 +518,7 @@ class MultiScaleAttention(nn.Module):
|
|
518
518
|
proj (nn.Linear): Output projection.
|
519
519
|
|
520
520
|
Methods:
|
521
|
-
forward: Applies
|
521
|
+
forward: Applies multiscale attention to the input tensor.
|
522
522
|
|
523
523
|
Examples:
|
524
524
|
>>> import torch
|
@@ -537,7 +537,7 @@ class MultiScaleAttention(nn.Module):
|
|
537
537
|
num_heads: int,
|
538
538
|
q_pool: nn.Module = None,
|
539
539
|
):
|
540
|
-
"""Initializes
|
540
|
+
"""Initializes multiscale attention with optional query pooling for efficient feature extraction."""
|
541
541
|
super().__init__()
|
542
542
|
|
543
543
|
self.dim = dim
|
@@ -552,7 +552,7 @@ class MultiScaleAttention(nn.Module):
|
|
552
552
|
self.proj = nn.Linear(dim_out, dim_out)
|
553
553
|
|
554
554
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
555
|
-
"""Applies
|
555
|
+
"""Applies multiscale attention with optional query pooling to extract multiscale features."""
|
556
556
|
B, H, W, _ = x.shape
|
557
557
|
# qkv with shape (B, H * W, 3, nHead, C)
|
558
558
|
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
|
@@ -582,9 +582,9 @@ class MultiScaleAttention(nn.Module):
|
|
582
582
|
|
583
583
|
class MultiScaleBlock(nn.Module):
|
584
584
|
"""
|
585
|
-
A
|
585
|
+
A multiscale attention block with window partitioning and query pooling for efficient vision transformers.
|
586
586
|
|
587
|
-
This class implements a
|
587
|
+
This class implements a multiscale attention mechanism with optional window partitioning and downsampling,
|
588
588
|
designed for use in vision transformer architectures.
|
589
589
|
|
590
590
|
Attributes:
|
@@ -601,7 +601,7 @@ class MultiScaleBlock(nn.Module):
|
|
601
601
|
proj (nn.Linear | None): Projection layer for dimension mismatch.
|
602
602
|
|
603
603
|
Methods:
|
604
|
-
forward: Processes input tensor through the
|
604
|
+
forward: Processes input tensor through the multiscale block.
|
605
605
|
|
606
606
|
Examples:
|
607
607
|
>>> block = MultiScaleBlock(dim=256, dim_out=512, num_heads=8, window_size=7)
|
@@ -623,7 +623,7 @@ class MultiScaleBlock(nn.Module):
|
|
623
623
|
act_layer: nn.Module = nn.GELU,
|
624
624
|
window_size: int = 0,
|
625
625
|
):
|
626
|
-
"""Initializes a
|
626
|
+
"""Initializes a multiscale attention block with window partitioning and optional query pooling."""
|
627
627
|
super().__init__()
|
628
628
|
|
629
629
|
if isinstance(norm_layer, str):
|
@@ -660,7 +660,7 @@ class MultiScaleBlock(nn.Module):
|
|
660
660
|
self.proj = nn.Linear(dim, dim_out)
|
661
661
|
|
662
662
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
663
|
-
"""Processes input through
|
663
|
+
"""Processes input through multiscale attention and MLP, with optional windowing and downsampling."""
|
664
664
|
shortcut = x # B, H, W, C
|
665
665
|
x = self.norm1(x)
|
666
666
|
|
@@ -425,7 +425,7 @@ class SAM2Model(torch.nn.Module):
|
|
425
425
|
low_res_masks: Tensor of shape (B, 1, H*4, W*4) with the best low-resolution mask.
|
426
426
|
high_res_masks: Tensor of shape (B, 1, H*16, W*16) with the best high-resolution mask.
|
427
427
|
obj_ptr: Tensor of shape (B, C) with object pointer vector for the output mask.
|
428
|
-
object_score_logits: Tensor of shape (B
|
428
|
+
object_score_logits: Tensor of shape (B) with object score logits.
|
429
429
|
|
430
430
|
Where M is 3 if multimask_output=True, and 1 if multimask_output=False.
|
431
431
|
|
@@ -643,7 +643,7 @@ class SAM2Model(torch.nn.Module):
|
|
643
643
|
if not is_init_cond_frame:
|
644
644
|
# Retrieve the memories encoded with the maskmem backbone
|
645
645
|
to_cat_memory, to_cat_memory_pos_embed = [], []
|
646
|
-
# Add conditioning
|
646
|
+
# Add conditioning frame's output first (all cond frames have t_pos=0 for
|
647
647
|
# when getting temporal positional embedding below)
|
648
648
|
assert len(output_dict["cond_frame_outputs"]) > 0
|
649
649
|
# Select a maximum number of temporally closest cond frames for cross attention
|
@@ -91,9 +91,9 @@ class Predictor(BasePredictor):
|
|
91
91
|
_callbacks (Dict | None): Dictionary of callback functions to customize behavior.
|
92
92
|
|
93
93
|
Examples:
|
94
|
-
>>>
|
95
|
-
>>>
|
96
|
-
>>>
|
94
|
+
>>> predictor_example = Predictor(cfg=DEFAULT_CFG)
|
95
|
+
>>> predictor_example_with_imgsz = Predictor(overrides={"imgsz": 640})
|
96
|
+
>>> predictor_example_with_callback = Predictor(_callbacks={"on_predict_start": custom_callback})
|
97
97
|
"""
|
98
98
|
if overrides is None:
|
99
99
|
overrides = {}
|
@@ -215,7 +215,7 @@ class Predictor(BasePredictor):
|
|
215
215
|
im (torch.Tensor): Preprocessed input image tensor with shape (N, C, H, W).
|
216
216
|
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
|
217
217
|
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
|
218
|
-
labels (np.ndarray | List | None): Point prompt labels with shape (N
|
218
|
+
labels (np.ndarray | List | None): Point prompt labels with shape (N) or (N, num_points). 1 for foreground, 0 for background.
|
219
219
|
masks (np.ndarray | None): Low-res masks from previous predictions with shape (N, H, W). For SAM, H=W=256.
|
220
220
|
multimask_output (bool): Flag to return multiple masks for ambiguous prompts.
|
221
221
|
|
@@ -260,7 +260,7 @@ class Predictor(BasePredictor):
|
|
260
260
|
dst_shape (tuple): The target shape (height, width) for the prompts.
|
261
261
|
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
|
262
262
|
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
|
263
|
-
labels (np.ndarray | List | None): Point prompt labels with shape (N
|
263
|
+
labels (np.ndarray | List | None): Point prompt labels with shape (N) or (N, num_points). 1 for foreground, 0 for background.
|
264
264
|
masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
|
265
265
|
|
266
266
|
Raises:
|
@@ -853,8 +853,8 @@ class SAM2VideoPredictor(SAM2Predictor):
|
|
853
853
|
|
854
854
|
Examples:
|
855
855
|
>>> predictor = SAM2VideoPredictor(cfg=DEFAULT_CFG)
|
856
|
-
>>>
|
857
|
-
>>>
|
856
|
+
>>> predictor_example_with_imgsz = SAM2VideoPredictor(overrides={"imgsz": 640})
|
857
|
+
>>> predictor_example_with_callback = SAM2VideoPredictor(_callbacks={"on_predict_start": custom_callback})
|
858
858
|
"""
|
859
859
|
super().__init__(cfg, overrides, _callbacks)
|
860
860
|
self.inference_state = {}
|
@@ -1096,7 +1096,7 @@ class SAM2VideoPredictor(SAM2Predictor):
|
|
1096
1096
|
# to `propagate_in_video_preflight`).
|
1097
1097
|
consolidated_frame_inds = self.inference_state["consolidated_frame_inds"]
|
1098
1098
|
for is_cond in {False, True}:
|
1099
|
-
# Separately consolidate conditioning and non-conditioning temp
|
1099
|
+
# Separately consolidate conditioning and non-conditioning temp outputs
|
1100
1100
|
storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
|
1101
1101
|
# Find all the frames that contain temporary outputs for any objects
|
1102
1102
|
# (these should be the frames that have just received clicks for mask inputs
|
@@ -1161,36 +1161,35 @@ class SAM2VideoPredictor(SAM2Predictor):
|
|
1161
1161
|
assert predictor.dataset is not None
|
1162
1162
|
assert predictor.dataset.mode == "video"
|
1163
1163
|
|
1164
|
-
inference_state = {
|
1165
|
-
|
1166
|
-
|
1167
|
-
|
1168
|
-
|
1169
|
-
|
1170
|
-
|
1171
|
-
|
1172
|
-
|
1173
|
-
|
1174
|
-
|
1175
|
-
|
1176
|
-
|
1177
|
-
|
1178
|
-
|
1179
|
-
|
1180
|
-
|
1181
|
-
|
1182
|
-
|
1183
|
-
|
1184
|
-
|
1185
|
-
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1164
|
+
inference_state = {
|
1165
|
+
"num_frames": predictor.dataset.frames,
|
1166
|
+
"point_inputs_per_obj": {}, # inputs points on each frame
|
1167
|
+
"mask_inputs_per_obj": {}, # inputs mask on each frame
|
1168
|
+
"constants": {}, # values that don't change across frames (so we only need to hold one copy of them)
|
1169
|
+
# mapping between client-side object id and model-side object index
|
1170
|
+
"obj_id_to_idx": OrderedDict(),
|
1171
|
+
"obj_idx_to_id": OrderedDict(),
|
1172
|
+
"obj_ids": [],
|
1173
|
+
# A storage to hold the model's tracking results and states on each frame
|
1174
|
+
"output_dict": {
|
1175
|
+
"cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
|
1176
|
+
"non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
|
1177
|
+
},
|
1178
|
+
# Slice (view) of each object tracking results, sharing the same memory with "output_dict"
|
1179
|
+
"output_dict_per_obj": {},
|
1180
|
+
# A temporary storage to hold new outputs when user interact with a frame
|
1181
|
+
# to add clicks or mask (it's merged into "output_dict" before propagation starts)
|
1182
|
+
"temp_output_dict_per_obj": {},
|
1183
|
+
# Frames that already holds consolidated outputs from click or mask inputs
|
1184
|
+
# (we directly use their consolidated outputs during tracking)
|
1185
|
+
"consolidated_frame_inds": {
|
1186
|
+
"cond_frame_outputs": set(), # set containing frame indices
|
1187
|
+
"non_cond_frame_outputs": set(), # set containing frame indices
|
1188
|
+
},
|
1189
|
+
# metadata for each tracking frame (e.g. which direction it's tracked)
|
1190
|
+
"tracking_has_started": False,
|
1191
|
+
"frames_already_tracked": [],
|
1190
1192
|
}
|
1191
|
-
# metadata for each tracking frame (e.g. which direction it's tracked)
|
1192
|
-
inference_state["tracking_has_started"] = False
|
1193
|
-
inference_state["frames_already_tracked"] = []
|
1194
1193
|
predictor.inference_state = inference_state
|
1195
1194
|
|
1196
1195
|
def get_im_features(self, im, batch=1):
|
ultralytics/nn/autobackend.py
CHANGED
@@ -133,7 +133,7 @@ class AutoBackend(nn.Module):
|
|
133
133
|
|
134
134
|
# Set device
|
135
135
|
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
|
136
|
-
if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats
|
136
|
+
if cuda and not any([nn_module, pt, jit, engine, onnx, paddle]): # GPU dataloader formats
|
137
137
|
device = torch.device("cpu")
|
138
138
|
cuda = False
|
139
139
|
|
@@ -26,9 +26,9 @@ class GMC:
|
|
26
26
|
Methods:
|
27
27
|
__init__: Initializes a GMC object with the specified method and downscale factor.
|
28
28
|
apply: Applies the chosen method to a raw frame and optionally uses provided detections.
|
29
|
-
|
30
|
-
|
31
|
-
|
29
|
+
apply_ecc: Applies the ECC algorithm to a raw frame.
|
30
|
+
apply_features: Applies feature-based methods like ORB or SIFT to a raw frame.
|
31
|
+
apply_sparseoptflow: Applies the Sparse Optical Flow method to a raw frame.
|
32
32
|
reset_params: Resets the internal parameters of the GMC object.
|
33
33
|
|
34
34
|
Examples:
|
@@ -108,15 +108,15 @@ class GMC:
|
|
108
108
|
(480, 640, 3)
|
109
109
|
"""
|
110
110
|
if self.method in {"orb", "sift"}:
|
111
|
-
return self.
|
111
|
+
return self.apply_features(raw_frame, detections)
|
112
112
|
elif self.method == "ecc":
|
113
|
-
return self.
|
113
|
+
return self.apply_ecc(raw_frame)
|
114
114
|
elif self.method == "sparseOptFlow":
|
115
|
-
return self.
|
115
|
+
return self.apply_sparseoptflow(raw_frame)
|
116
116
|
else:
|
117
117
|
return np.eye(2, 3)
|
118
118
|
|
119
|
-
def
|
119
|
+
def apply_ecc(self, raw_frame: np.array) -> np.array:
|
120
120
|
"""
|
121
121
|
Apply the ECC (Enhanced Correlation Coefficient) algorithm to a raw frame for motion compensation.
|
122
122
|
|
@@ -128,7 +128,7 @@ class GMC:
|
|
128
128
|
|
129
129
|
Examples:
|
130
130
|
>>> gmc = GMC(method="ecc")
|
131
|
-
>>> processed_frame = gmc.
|
131
|
+
>>> processed_frame = gmc.apply_ecc(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]))
|
132
132
|
>>> print(processed_frame)
|
133
133
|
[[1. 0. 0.]
|
134
134
|
[0. 1. 0.]]
|
@@ -161,7 +161,7 @@ class GMC:
|
|
161
161
|
|
162
162
|
return H
|
163
163
|
|
164
|
-
def
|
164
|
+
def apply_features(self, raw_frame: np.array, detections: list = None) -> np.array:
|
165
165
|
"""
|
166
166
|
Apply feature-based methods like ORB or SIFT to a raw frame.
|
167
167
|
|
@@ -175,7 +175,7 @@ class GMC:
|
|
175
175
|
Examples:
|
176
176
|
>>> gmc = GMC(method="orb")
|
177
177
|
>>> raw_frame = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
178
|
-
>>> processed_frame = gmc.
|
178
|
+
>>> processed_frame = gmc.apply_features(raw_frame)
|
179
179
|
>>> print(processed_frame.shape)
|
180
180
|
(2, 3)
|
181
181
|
"""
|
@@ -304,7 +304,7 @@ class GMC:
|
|
304
304
|
|
305
305
|
return H
|
306
306
|
|
307
|
-
def
|
307
|
+
def apply_sparseoptflow(self, raw_frame: np.array) -> np.array:
|
308
308
|
"""
|
309
309
|
Apply Sparse Optical Flow method to a raw frame.
|
310
310
|
|
@@ -316,7 +316,7 @@ class GMC:
|
|
316
316
|
|
317
317
|
Examples:
|
318
318
|
>>> gmc = GMC()
|
319
|
-
>>> result = gmc.
|
319
|
+
>>> result = gmc.apply_sparseoptflow(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]))
|
320
320
|
>>> print(result)
|
321
321
|
[[1. 0. 0.]
|
322
322
|
[0. 1. 0.]]
|
ultralytics/utils/__init__.py
CHANGED
@@ -524,13 +524,9 @@ def read_device_model() -> str:
|
|
524
524
|
is_raspberrypi().
|
525
525
|
|
526
526
|
Returns:
|
527
|
-
(str):
|
527
|
+
(str): Kernel release information.
|
528
528
|
"""
|
529
|
-
|
530
|
-
with open("/proc/device-tree/model") as f:
|
531
|
-
return f.read()
|
532
|
-
except Exception:
|
533
|
-
return ""
|
529
|
+
return platform.release().lower()
|
534
530
|
|
535
531
|
|
536
532
|
def is_ubuntu() -> bool:
|
@@ -602,7 +598,7 @@ def is_raspberrypi() -> bool:
|
|
602
598
|
Returns:
|
603
599
|
(bool): True if running on a Raspberry Pi, False otherwise.
|
604
600
|
"""
|
605
|
-
return "
|
601
|
+
return "rpi" in DEVICE_MODEL
|
606
602
|
|
607
603
|
|
608
604
|
def is_jetson() -> bool:
|
@@ -612,7 +608,7 @@ def is_jetson() -> bool:
|
|
612
608
|
Returns:
|
613
609
|
(bool): True if running on an NVIDIA Jetson device, False otherwise.
|
614
610
|
"""
|
615
|
-
return
|
611
|
+
return "tegra" in DEVICE_MODEL
|
616
612
|
|
617
613
|
|
618
614
|
def is_online() -> bool:
|
@@ -802,7 +798,7 @@ def get_user_config_dir(sub_dir="Ultralytics"):
|
|
802
798
|
|
803
799
|
|
804
800
|
# Define constants (required below)
|
805
|
-
|
801
|
+
DEVICE_MODEL = read_device_model() # is_jetson() and is_raspberrypi() depend on this constant
|
806
802
|
ONLINE = is_online()
|
807
803
|
IS_COLAB = is_colab()
|
808
804
|
IS_KAGGLE = is_kaggle()
|
ultralytics/utils/metrics.py
CHANGED
ultralytics/utils/plotting.py
CHANGED
@@ -1269,7 +1269,7 @@ def plt_color_scatter(v, f, bins=20, cmap="viridis", alpha=0.8, edgecolors="none
|
|
1269
1269
|
|
1270
1270
|
def plot_tune_results(csv_file="tune_results.csv"):
|
1271
1271
|
"""
|
1272
|
-
Plot the evolution results stored in
|
1272
|
+
Plot the evolution results stored in a 'tune_results.csv' file. The function generates a scatter plot for each key
|
1273
1273
|
in the CSV, color-coded based on fitness scores. The best-performing configurations are highlighted on the plots.
|
1274
1274
|
|
1275
1275
|
Args:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.57
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -57,7 +57,6 @@ Requires-Dist: coverage[toml]; extra == "dev"
|
|
57
57
|
Requires-Dist: mkdocs>=1.6.0; extra == "dev"
|
58
58
|
Requires-Dist: mkdocs-material>=9.5.9; extra == "dev"
|
59
59
|
Requires-Dist: mkdocstrings[python]; extra == "dev"
|
60
|
-
Requires-Dist: mkdocs-jupyter; extra == "dev"
|
61
60
|
Requires-Dist: mkdocs-redirects; extra == "dev"
|
62
61
|
Requires-Dist: mkdocs-ultralytics-plugin>=0.1.8; extra == "dev"
|
63
62
|
Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
|
@@ -6,11 +6,11 @@ tests/test_engine.py,sha256=dcEcJsMQh61rDSNv7l4TIAgybLpzjVwerv9JZC_KCM8,4934
|
|
6
6
|
tests/test_exports.py,sha256=1MvhcQ2qHdbJImHII-bFarcaIcm-kPlEK-OdFLxnj7o,8769
|
7
7
|
tests/test_integrations.py,sha256=f5-QCUk1SU_-qn4mBCZwS3GN3tXEBIIXo4z2EhExbHw,6126
|
8
8
|
tests/test_python.py,sha256=S399TdcZcymRJIYrKlXPiROWg_izHL3TGhHgW15kcrA,23210
|
9
|
-
tests/test_solutions.py,sha256=
|
10
|
-
ultralytics/__init__.py,sha256=
|
9
|
+
tests/test_solutions.py,sha256=O-GM6qBdew8BQmkpt8XLbyQJTcTdElz1yTBL1WOJsWw,4177
|
10
|
+
ultralytics/__init__.py,sha256=PJ8JWUuq5nR3GTR4c-nHYo75Zux3ml7WHndDUSF6Ob8,681
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
13
|
-
ultralytics/cfg/__init__.py,sha256=
|
13
|
+
ultralytics/cfg/__init__.py,sha256=MJ52wv8-rQHvD8ZBJ4RA31npqgCtUtFYEG4sQ2kciFc,39031
|
14
14
|
ultralytics/cfg/default.yaml,sha256=FcXbvTXXvMpssk9fSwdlnVTtyqfmlYE9gAcHsf0OMf8,8347
|
15
15
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
|
16
16
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8l_fIDo8gw,1195
|
@@ -92,16 +92,16 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=FDIrZ3hAhRtMfDl654pt1HIexmPqlFQK-3l
|
|
92
92
|
ultralytics/cfg/trackers/bytetrack.yaml,sha256=rBWY4RjjX6PTO2o6TUJFYHVgXNZHCN5TuBuzwuPYVjA,723
|
93
93
|
ultralytics/data/__init__.py,sha256=VGe-ATG7j35F4A4r8Jmzffjlhve4JAJPgRa5ahKTU18,616
|
94
94
|
ultralytics/data/annotator.py,sha256=JNmS6uELlEABrU5ViVJiPnjt44v-Us7j39Bwoug_73Y,3117
|
95
|
-
ultralytics/data/augment.py,sha256=
|
95
|
+
ultralytics/data/augment.py,sha256=xE3fyPSCsgzz1vo1758HQA3YQhO4QZ2TqM0So0tE434,120479
|
96
96
|
ultralytics/data/base.py,sha256=ZCIhAyFfxXVp5fVnYD8mwbksNALJTayBKIR5FKGV7ZM,15168
|
97
97
|
ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
|
98
|
-
ultralytics/data/converter.py,sha256=
|
99
|
-
ultralytics/data/dataset.py,sha256=
|
98
|
+
ultralytics/data/converter.py,sha256=rWTg5cLF7uTB9vaUOmNSUxRRkZldCsAZ21wOFAGMqzQ,24407
|
99
|
+
ultralytics/data/dataset.py,sha256=6_6sHSjJYX7lVUzqBqVW_q_REXbjeoh6dHqAqH9krfA,23216
|
100
100
|
ultralytics/data/loaders.py,sha256=k1Vq7Rxv6tpsRsYuMdZeI3_f2BciAaZwhDQU8iHhVJM,28506
|
101
|
-
ultralytics/data/split_dota.py,sha256=
|
102
|
-
ultralytics/data/utils.py,sha256=
|
101
|
+
ultralytics/data/split_dota.py,sha256=FxsuBhClSZN4XHu8ETTiA2oP6yrjp49T5lbZaiI0fw4,10692
|
102
|
+
ultralytics/data/utils.py,sha256=xCobqNksarqQsGqQZ-g5N9nSwaQwiaMAVB92s903RQA,33807
|
103
103
|
ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
104
|
-
ultralytics/engine/exporter.py,sha256=
|
104
|
+
ultralytics/engine/exporter.py,sha256=cBJ0pYNlL4BS913Pkq_M6jccPys1qeHleQgkrufWwQI,68866
|
105
105
|
ultralytics/engine/model.py,sha256=3csd_Ml9M6CKxUKU7vRZadanNnJw96sNIx71qHVGdGQ,53082
|
106
106
|
ultralytics/engine/predictor.py,sha256=o1RYMFH3_uVOMCIXXakpRYpNzoD-6Bdsxryt5fuBni0,17712
|
107
107
|
ultralytics/engine/results.py,sha256=a1XFZRPwqgKDBOEAibHuT9nP2xefLiWVsMoBJbcr4iA,75058
|
@@ -129,16 +129,16 @@ ultralytics/models/rtdetr/predict.py,sha256=cxULdJAzL9RM11Y24tIguKcNJZXwynNsrWRC
|
|
129
129
|
ultralytics/models/rtdetr/train.py,sha256=m8S9Z94kNaH0HN9TR51iQpToIDV8AUoXpkI5qMdLB7Q,3847
|
130
130
|
ultralytics/models/rtdetr/val.py,sha256=xVjZShZ1AvES97wVekl2q_1g20Pq-IIHhkJdWtxMncs,5566
|
131
131
|
ultralytics/models/sam/__init__.py,sha256=E4IHie-T0HYCklKW6-kqlW84GJJdD6rujf7W_SgRlrs,218
|
132
|
-
ultralytics/models/sam/amg.py,sha256=
|
132
|
+
ultralytics/models/sam/amg.py,sha256=X7qYm0TzzlLxPNNiXAGsb-Vir3XnK-ZZXGsMlCZSnsA,8708
|
133
133
|
ultralytics/models/sam/build.py,sha256=ac7Pop5f51TVzGgfV6bbXSFDA9fBVxERUc_6WDQ-9Ys,12487
|
134
134
|
ultralytics/models/sam/model.py,sha256=CE4ruw1Iwrp7-9aHGspQihQaTVsqagYrQLWmpXYodLw,7382
|
135
|
-
ultralytics/models/sam/predict.py,sha256
|
135
|
+
ultralytics/models/sam/predict.py,sha256=VYxmaTuCCTSWCxToTdvi6XLoNLuTEbVJlOW9qwgGano,82541
|
136
136
|
ultralytics/models/sam/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
137
|
-
ultralytics/models/sam/modules/blocks.py,sha256=
|
137
|
+
ultralytics/models/sam/modules/blocks.py,sha256=sv8YmqrAEAeC3naq8zUfTiY1xVJOwTw5IPraeT1mLCE,45907
|
138
138
|
ultralytics/models/sam/modules/decoders.py,sha256=mODsqnTN_CjE3H0Sh9cd8PfTnHANPjGB1bjqHxfezSg,25830
|
139
139
|
ultralytics/models/sam/modules/encoders.py,sha256=Ay3sYeUonCf6URXBdB0dDwyngovevW8hUDgULRnNIoA,34824
|
140
140
|
ultralytics/models/sam/modules/memory_attention.py,sha256=XilWBnRfH8wZxIoL2-yEk-dRypCsS0Jf_9t8WJxXKg0,9722
|
141
|
-
ultralytics/models/sam/modules/sam.py,sha256=
|
141
|
+
ultralytics/models/sam/modules/sam.py,sha256=uJALGzx9OpbFGeqLGSOluLJA8IoN0jF_rGoomy2-tsM,52723
|
142
142
|
ultralytics/models/sam/modules/tiny_encoder.py,sha256=0Gai3BzQPU5Jz5P696_U2_3rkLg_QQTm_Wm4hZmR3gk,41344
|
143
143
|
ultralytics/models/sam/modules/transformer.py,sha256=nuhF_14LGrr5uYCAP9XCXps-zlVcT4OWO0evXWDxPwI,16081
|
144
144
|
ultralytics/models/sam/modules/utils.py,sha256=Y36V6BVy6GeaAvKE8gHmoDIa-f5LjJpmSVwywNkv2yk,12315
|
@@ -171,7 +171,7 @@ ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2
|
|
171
171
|
ultralytics/models/yolo/world/train.py,sha256=gaDrAmLJpg9qDtmL5evA5HsV2yb4RTRSfk2EDYrHdRg,3686
|
172
172
|
ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
|
173
173
|
ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
|
174
|
-
ultralytics/nn/autobackend.py,sha256=
|
174
|
+
ultralytics/nn/autobackend.py,sha256=7WyyipeaAqKCFUAA7_y2jIOz2e90GxHrD7c7ARe4ZJI,35556
|
175
175
|
ultralytics/nn/tasks.py,sha256=pqRe1F1HOH8AjLZpFaZCGb5gSYsXH0eVnHITKDTFFhI,48527
|
176
176
|
ultralytics/nn/modules/__init__.py,sha256=xhW2BennT9U_VaMXVpRu-bdLgp1BXt9L8mkIUBE3idU,2625
|
177
177
|
ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
|
@@ -200,10 +200,10 @@ ultralytics/trackers/bot_sort.py,sha256=766grVQExvonb087Wy-SB32TSwYYsTEM22yoWeQ_
|
|
200
200
|
ultralytics/trackers/byte_tracker.py,sha256=jl3egXlItfqPfbmxsLebvA7eKZWa1Ghj2Qc9wNTtebQ,20818
|
201
201
|
ultralytics/trackers/track.py,sha256=BfkdmdgTvoI8Raz6yuDQMrbCrWOGm9Lfu3aBTXYv2j8,3874
|
202
202
|
ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
203
|
-
ultralytics/trackers/utils/gmc.py,sha256=
|
203
|
+
ultralytics/trackers/utils/gmc.py,sha256=NltwdGQMCzew7aCnAxNrgbrqmjdnSJfuDRn_xegVhIw,14569
|
204
204
|
ultralytics/trackers/utils/kalman_filter.py,sha256=cH9zD3fwkuezP97H9mw8cSBN7a8hHKx_Sx1j7t3oYGs,21349
|
205
205
|
ultralytics/trackers/utils/matching.py,sha256=Y94cMwo9TLd-IWFqHKp8dHSDyguS1qtOeebBMalWnJQ,7078
|
206
|
-
ultralytics/utils/__init__.py,sha256
|
206
|
+
ultralytics/utils/__init__.py,sha256=eCoQewuEZ045peO3XtymyNp9oUH22_AqcTUEtgekuN8,49302
|
207
207
|
ultralytics/utils/autobatch.py,sha256=yBkojvLhZofwwKnaA8BnEIFXp3UWt7rVmyuh-dl1Ymk,5020
|
208
208
|
ultralytics/utils/benchmarks.py,sha256=EqvP8AOks7D_QqUy-DmI4WI5MA0KNYqVINjm4XF5GqM,25640
|
209
209
|
ultralytics/utils/checks.py,sha256=1Cu8k2qg_pFaoHvkiE07Ab5ZGLyZHZxFAg1IMM63CBQ,30145
|
@@ -213,10 +213,10 @@ ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,8
|
|
213
213
|
ultralytics/utils/files.py,sha256=uiXQSVABJRoI5ImnM6ndEBIFbECfksmWNEldBg8GnSo,8224
|
214
214
|
ultralytics/utils/instance.py,sha256=FXL1Ihlbn2fNZG_IaJpXul9Sd4QDLwotCo2U84moSlA,16853
|
215
215
|
ultralytics/utils/loss.py,sha256=_d2L4lIemaeAHrGHqf9q-KI7yTgHKCbIcYAF7Y-farI,34185
|
216
|
-
ultralytics/utils/metrics.py,sha256=
|
216
|
+
ultralytics/utils/metrics.py,sha256=acnxUxseiyYfb1uAq2gcMOTslYEJOMOrpbbpPU_JhSA,53778
|
217
217
|
ultralytics/utils/ops.py,sha256=d5sLAvgqP36Pq_dMQE1DZFYhmIGUMrlrxh1czcuUfC4,33546
|
218
218
|
ultralytics/utils/patches.py,sha256=J-iOwIRbfUs-inBZerhnXby5tUKjYcOIyvhLTS352JE,3270
|
219
|
-
ultralytics/utils/plotting.py,sha256=
|
219
|
+
ultralytics/utils/plotting.py,sha256=SudFfq9KOfprtpXsurfWEOeQqVsU0K3aVvcOGFcNB4A,62959
|
220
220
|
ultralytics/utils/tal.py,sha256=thD_AEhVmhaZqmS5szZMvpKO-RKOeZwfX1BYAhdnA0o,18470
|
221
221
|
ultralytics/utils/torch_utils.py,sha256=7qP0YhF5d8qCUD2XiOwXjCTOw8pje6HvX42J8oL3Ldw,33263
|
222
222
|
ultralytics/utils/triton.py,sha256=HL_gjIwMoi-WD8gJLTmemBehIto8eRz3HdK8fcROLk0,4043
|
@@ -232,9 +232,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=IbGQfEltamUKXJt93uSLQFn8c2rYh3DMTg
|
|
232
232
|
ultralytics/utils/callbacks/raytune.py,sha256=Ck_yFzg7UZXiDWrLHaltjQybzVWSFDfzpdrx9ZYTRfI,700
|
233
233
|
ultralytics/utils/callbacks/tensorboard.py,sha256=SHlE58Fb-sg-uZKtgy-ybIO3SAIfK55aj8kTYGA0Cyg,4167
|
234
234
|
ultralytics/utils/callbacks/wb.py,sha256=sizfTa-xI9k2pnDSP_Q9pHZEFwcl__gSFM0AcneuRpY,7058
|
235
|
-
ultralytics-8.3.
|
236
|
-
ultralytics-8.3.
|
237
|
-
ultralytics-8.3.
|
238
|
-
ultralytics-8.3.
|
239
|
-
ultralytics-8.3.
|
240
|
-
ultralytics-8.3.
|
235
|
+
ultralytics-8.3.57.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
236
|
+
ultralytics-8.3.57.dist-info/METADATA,sha256=VUcX-u-yE8tCayKn0Qk3CA_QIXebOaxrs6ixuc4NALU,35286
|
237
|
+
ultralytics-8.3.57.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
238
|
+
ultralytics-8.3.57.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
239
|
+
ultralytics-8.3.57.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
240
|
+
ultralytics-8.3.57.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|