ultralytics-opencv-headless 8.4.1__py3-none-any.whl → 8.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_exports.py +0 -2
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +1 -3
- ultralytics/data/converter.py +49 -30
- ultralytics/engine/results.py +19 -10
- ultralytics/engine/trainer.py +8 -10
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.2.dist-info}/METADATA +1 -1
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.2.dist-info}/RECORD +12 -12
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.2.dist-info}/WHEEL +0 -0
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.2.dist-info}/entry_points.txt +0 -0
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.2.dist-info}/licenses/LICENSE +0 -0
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.2.dist-info}/top_level.txt +0 -0
tests/test_exports.py
CHANGED
|
@@ -240,7 +240,6 @@ def test_export_mnn_matrix(task, int8, half, batch):
|
|
|
240
240
|
|
|
241
241
|
|
|
242
242
|
@pytest.mark.slow
|
|
243
|
-
@pytest.mark.skipif(ARM64, reason="NCNN not supported on ARM64") # https://github.com/Tencent/ncnn/issues/6509
|
|
244
243
|
@pytest.mark.skipif(not TORCH_2_0, reason="NCNN inference causes segfault on PyTorch<2.0")
|
|
245
244
|
def test_export_ncnn():
|
|
246
245
|
"""Test YOLO export to NCNN format."""
|
|
@@ -249,7 +248,6 @@ def test_export_ncnn():
|
|
|
249
248
|
|
|
250
249
|
|
|
251
250
|
@pytest.mark.slow
|
|
252
|
-
@pytest.mark.skipif(ARM64, reason="NCNN not supported on ARM64") # https://github.com/Tencent/ncnn/issues/6509
|
|
253
251
|
@pytest.mark.skipif(not TORCH_2_0, reason="NCNN inference causes segfault on PyTorch<2.0")
|
|
254
252
|
@pytest.mark.parametrize("task, half, batch", list(product(TASKS, [True, False], [1])))
|
|
255
253
|
def test_export_ncnn_matrix(task, half, batch):
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
|
@@ -305,8 +305,6 @@ def get_cfg(
|
|
|
305
305
|
# Merge overrides
|
|
306
306
|
if overrides:
|
|
307
307
|
overrides = cfg2dict(overrides)
|
|
308
|
-
if "save_dir" not in cfg:
|
|
309
|
-
overrides.pop("save_dir", None) # special override keys to ignore
|
|
310
308
|
check_dict_alignment(cfg, overrides)
|
|
311
309
|
cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides)
|
|
312
310
|
|
|
@@ -494,7 +492,7 @@ def check_dict_alignment(
|
|
|
494
492
|
base_keys, custom_keys = (frozenset(x.keys()) for x in (base, custom))
|
|
495
493
|
# Allow 'augmentations' as a valid custom parameter for custom Albumentations transforms
|
|
496
494
|
if allowed_custom_keys is None:
|
|
497
|
-
allowed_custom_keys = {"augmentations"}
|
|
495
|
+
allowed_custom_keys = {"augmentations", "save_dir"}
|
|
498
496
|
if mismatched := [k for k in custom_keys if k not in base_keys and k not in allowed_custom_keys]:
|
|
499
497
|
from difflib import get_close_matches
|
|
500
498
|
|
ultralytics/data/converter.py
CHANGED
|
@@ -749,12 +749,13 @@ def convert_to_multispectral(path: str | Path, n_channels: int = 10, replace: bo
|
|
|
749
749
|
async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Path | None = None) -> Path:
|
|
750
750
|
"""Convert NDJSON dataset format to Ultralytics YOLO11 dataset structure.
|
|
751
751
|
|
|
752
|
-
This function converts datasets stored in NDJSON (Newline Delimited JSON) format to the standard YOLO format
|
|
753
|
-
separate directories for images and labels.
|
|
754
|
-
|
|
752
|
+
This function converts datasets stored in NDJSON (Newline Delimited JSON) format to the standard YOLO format. For
|
|
753
|
+
detection/segmentation/pose/obb tasks, it creates separate directories for images and labels. For classification
|
|
754
|
+
tasks, it creates the ImageNet-style {split}/{class_name}/ folder structure. It supports parallel processing for
|
|
755
|
+
efficient conversion of large datasets and can download images from URLs.
|
|
755
756
|
|
|
756
757
|
The NDJSON format consists of:
|
|
757
|
-
- First line: Dataset metadata with class names and configuration
|
|
758
|
+
- First line: Dataset metadata with class names, task type, and configuration
|
|
758
759
|
- Subsequent lines: Individual image records with annotations and optional URLs
|
|
759
760
|
|
|
760
761
|
Args:
|
|
@@ -763,7 +764,7 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
|
|
|
763
764
|
None, uses the parent directory of the NDJSON file. Defaults to None.
|
|
764
765
|
|
|
765
766
|
Returns:
|
|
766
|
-
(Path): Path to the generated data.yaml file
|
|
767
|
+
(Path): Path to the generated data.yaml file (detection) or dataset directory (classification).
|
|
767
768
|
|
|
768
769
|
Examples:
|
|
769
770
|
Convert a local NDJSON file:
|
|
@@ -790,36 +791,51 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
|
|
|
790
791
|
dataset_dir = output_path / ndjson_path.stem
|
|
791
792
|
splits = {record["split"] for record in image_records}
|
|
792
793
|
|
|
793
|
-
#
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
data_yaml["names"] = {int(k): v for k, v in dataset_record.get("class_names", {}).items()}
|
|
797
|
-
data_yaml.pop("class_names")
|
|
794
|
+
# Check if this is a classification dataset
|
|
795
|
+
is_classification = dataset_record.get("task") == "classify"
|
|
796
|
+
class_names = {int(k): v for k, v in dataset_record.get("class_names", {}).items()}
|
|
798
797
|
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
798
|
+
# Create base directories
|
|
799
|
+
dataset_dir.mkdir(parents=True, exist_ok=True)
|
|
800
|
+
data_yaml = None
|
|
801
|
+
|
|
802
|
+
if not is_classification:
|
|
803
|
+
# Detection/segmentation/pose/obb: prepare YAML and create base structure
|
|
804
|
+
data_yaml = dict(dataset_record)
|
|
805
|
+
data_yaml["names"] = class_names
|
|
806
|
+
data_yaml.pop("class_names", None)
|
|
807
|
+
data_yaml.pop("type", None) # Remove NDJSON-specific fields
|
|
808
|
+
for split in sorted(splits):
|
|
809
|
+
(dataset_dir / "images" / split).mkdir(parents=True, exist_ok=True)
|
|
810
|
+
(dataset_dir / "labels" / split).mkdir(parents=True, exist_ok=True)
|
|
811
|
+
data_yaml[split] = f"images/{split}"
|
|
803
812
|
|
|
804
813
|
async def process_record(session, semaphore, record):
|
|
805
814
|
"""Process single image record with async session."""
|
|
806
815
|
async with semaphore:
|
|
807
816
|
split, original_name = record["split"], record["file"]
|
|
808
|
-
label_path = dataset_dir / "labels" / split / f"{Path(original_name).stem}.txt"
|
|
809
|
-
image_path = dataset_dir / "images" / split / original_name
|
|
810
|
-
|
|
811
817
|
annotations = record.get("annotations", {})
|
|
812
|
-
lines_to_write = []
|
|
813
|
-
for key in annotations.keys():
|
|
814
|
-
lines_to_write = [" ".join(map(str, item)) for item in annotations[key]]
|
|
815
|
-
break
|
|
816
|
-
if "classification" in annotations:
|
|
817
|
-
lines_to_write = [str(cls) for cls in annotations["classification"]]
|
|
818
|
-
|
|
819
|
-
label_path.write_text("\n".join(lines_to_write) + "\n" if lines_to_write else "")
|
|
820
818
|
|
|
819
|
+
if is_classification:
|
|
820
|
+
# Classification: place image in {split}/{class_name}/ folder
|
|
821
|
+
class_ids = annotations.get("classification", [])
|
|
822
|
+
class_id = class_ids[0] if class_ids else 0
|
|
823
|
+
class_name = class_names.get(class_id, str(class_id))
|
|
824
|
+
image_path = dataset_dir / split / class_name / original_name
|
|
825
|
+
else:
|
|
826
|
+
# Detection: write label file and place image in images/{split}/
|
|
827
|
+
image_path = dataset_dir / "images" / split / original_name
|
|
828
|
+
label_path = dataset_dir / "labels" / split / f"{Path(original_name).stem}.txt"
|
|
829
|
+
lines_to_write = []
|
|
830
|
+
for key in annotations.keys():
|
|
831
|
+
lines_to_write = [" ".join(map(str, item)) for item in annotations[key]]
|
|
832
|
+
break
|
|
833
|
+
label_path.write_text("\n".join(lines_to_write) + "\n" if lines_to_write else "")
|
|
834
|
+
|
|
835
|
+
# Download image if URL provided and file doesn't exist
|
|
821
836
|
if http_url := record.get("url"):
|
|
822
837
|
if not image_path.exists():
|
|
838
|
+
image_path.parent.mkdir(parents=True, exist_ok=True) # Ensure parent dir exists
|
|
823
839
|
try:
|
|
824
840
|
async with session.get(http_url, timeout=aiohttp.ClientTimeout(total=30)) as response:
|
|
825
841
|
response.raise_for_status()
|
|
@@ -848,8 +864,11 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
|
|
|
848
864
|
await asyncio.gather(*[tracked_process(record) for record in image_records])
|
|
849
865
|
pbar.close()
|
|
850
866
|
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
867
|
+
if is_classification:
|
|
868
|
+
# Classification: return dataset directory (check_cls_dataset expects a directory path)
|
|
869
|
+
return dataset_dir
|
|
870
|
+
else:
|
|
871
|
+
# Detection: write data.yaml and return its path
|
|
872
|
+
yaml_path = dataset_dir / "data.yaml"
|
|
873
|
+
YAML.save(yaml_path, data_yaml)
|
|
874
|
+
return yaml_path
|
ultralytics/engine/results.py
CHANGED
|
@@ -750,8 +750,8 @@ class Results(SimpleClass, DataExportMixin):
|
|
|
750
750
|
"""Convert inference results to a summarized dictionary with optional normalization for box coordinates.
|
|
751
751
|
|
|
752
752
|
This method creates a list of detection dictionaries, each containing information about a single detection or
|
|
753
|
-
classification result. For classification tasks, it returns the top
|
|
754
|
-
|
|
753
|
+
classification result. For classification tasks, it returns the top 5 classes and their
|
|
754
|
+
confidences. For detection tasks, it includes class information, bounding box coordinates, and
|
|
755
755
|
optionally mask segments and keypoints.
|
|
756
756
|
|
|
757
757
|
Args:
|
|
@@ -772,14 +772,23 @@ class Results(SimpleClass, DataExportMixin):
|
|
|
772
772
|
# Create list of detection dictionaries
|
|
773
773
|
results = []
|
|
774
774
|
if self.probs is not None:
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
775
|
+
# Return top 5 classification results
|
|
776
|
+
for class_id, conf in zip(self.probs.top5, self.probs.top5conf.tolist()):
|
|
777
|
+
class_id = int(class_id)
|
|
778
|
+
results.append(
|
|
779
|
+
{
|
|
780
|
+
"name": self.names[class_id],
|
|
781
|
+
"class": class_id,
|
|
782
|
+
"confidence": round(conf, decimals),
|
|
783
|
+
}
|
|
784
|
+
)
|
|
785
|
+
results.append(
|
|
786
|
+
{
|
|
787
|
+
"name": self.names[class_id],
|
|
788
|
+
"class": class_id,
|
|
789
|
+
"confidence": round(conf, decimals),
|
|
790
|
+
}
|
|
791
|
+
)
|
|
783
792
|
return results
|
|
784
793
|
|
|
785
794
|
is_obb = self.obb is not None
|
ultralytics/engine/trainer.py
CHANGED
|
@@ -632,21 +632,19 @@ class BaseTrainer:
|
|
|
632
632
|
(dict): A dictionary containing the training/validation/test dataset and category names.
|
|
633
633
|
"""
|
|
634
634
|
try:
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
str(self.args.data).startswith("ul://") and "/datasets/" in str(self.args.data)
|
|
639
|
-
):
|
|
640
|
-
# Convert NDJSON to YOLO format (including ul:// platform dataset URIs)
|
|
635
|
+
# Convert ul:// platform URIs and NDJSON files to local dataset format first
|
|
636
|
+
data_str = str(self.args.data)
|
|
637
|
+
if data_str.endswith(".ndjson") or (data_str.startswith("ul://") and "/datasets/" in data_str):
|
|
641
638
|
import asyncio
|
|
642
639
|
|
|
643
640
|
from ultralytics.data.converter import convert_ndjson_to_yolo
|
|
644
641
|
from ultralytics.utils.checks import check_file
|
|
645
642
|
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
643
|
+
self.args.data = str(asyncio.run(convert_ndjson_to_yolo(check_file(self.args.data))))
|
|
644
|
+
|
|
645
|
+
# Task-specific dataset checking
|
|
646
|
+
if self.args.task == "classify":
|
|
647
|
+
data = check_cls_dataset(self.args.data)
|
|
650
648
|
elif str(self.args.data).rsplit(".", 1)[-1] in {"yaml", "yml"} or self.args.task in {
|
|
651
649
|
"detect",
|
|
652
650
|
"segment",
|
{ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.2.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ultralytics-opencv-headless
|
|
3
|
-
Version: 8.4.
|
|
3
|
+
Version: 8.4.2
|
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
{ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.2.dist-info}/RECORD
RENAMED
|
@@ -3,15 +3,15 @@ tests/conftest.py,sha256=rlKyDuOC_3ptXrWS8Q19bNEGOupUmYXHj3nB6o1GBGY,2318
|
|
|
3
3
|
tests/test_cli.py,sha256=GhIFHi-_WIJpDgoGNRi0DnjbfwP1wHbklBMnkCM-P_4,5464
|
|
4
4
|
tests/test_cuda.py,sha256=2TBe-ZkecMOGPWLdHcbsAjH3m9c5SQJ2KeyICgS0aeo,8426
|
|
5
5
|
tests/test_engine.py,sha256=ufSn3X4kL_Lpn2O25jKAfw_9QwHTMRjP9shDdpgBqnY,5740
|
|
6
|
-
tests/test_exports.py,sha256=
|
|
6
|
+
tests/test_exports.py,sha256=Toy4u-4bsoyAbzNhc9kbMuKqvMKywZxNj5jlFNTzFWs,14670
|
|
7
7
|
tests/test_integrations.py,sha256=FjvTGjXm3bvYHK3_obgObhC5SzHCTzw4aOJV9Hh08jQ,6220
|
|
8
8
|
tests/test_python.py,sha256=np6on3Sa0NNi5pquvilekjKxxedAJMpLOQEthGaIalQ,29284
|
|
9
9
|
tests/test_solutions.py,sha256=1tRlM72YciE42Nk9v83gsXOD5RSx9GSWVsKGhH7-HxE,14122
|
|
10
|
-
ultralytics/__init__.py,sha256=
|
|
10
|
+
ultralytics/__init__.py,sha256=qMJcbftLCQIaiGTgGRVuCAdhTXrvitn9PbCmwc3dXyY,1300
|
|
11
11
|
ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
|
|
12
12
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
|
13
13
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
|
14
|
-
ultralytics/cfg/__init__.py,sha256=
|
|
14
|
+
ultralytics/cfg/__init__.py,sha256=VHOu4sXRDcBddHGJ2UkdQN2yLXXFdn_rytHPWqke9nE,40300
|
|
15
15
|
ultralytics/cfg/default.yaml,sha256=E__q2msvK9XCQngf0YFLpueCer_1tRcMJM0p3ahBdbA,9015
|
|
16
16
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=QGpdh3Hj5dFrvbsaE_8rAVj9BO4XpKTB7uhXaTTnE-o,3364
|
|
17
17
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=KE7VC-ZMDSei1pLPm-pdk_ZAMRU_gLwGgtIQNbwp6dA,1212
|
|
@@ -121,7 +121,7 @@ ultralytics/data/annotator.py,sha256=kbfSPBesKEVK6ys3dilTdMh7rCKyp0xV7tGQeEDbpWI
|
|
|
121
121
|
ultralytics/data/augment.py,sha256=4xtggkuysYcbK5pYwNuAaoCzshb5wwD9KN6_pP4uSFU,128003
|
|
122
122
|
ultralytics/data/base.py,sha256=pMs8yJOmAFPXdgfLCDtUemSvkPNDzxReP-fWzkNtonc,19723
|
|
123
123
|
ultralytics/data/build.py,sha256=s-tkSZPf3OfQyfXPXB9XxdW_gIcU6Xy_u21ekSgTnRo,17205
|
|
124
|
-
ultralytics/data/converter.py,sha256=
|
|
124
|
+
ultralytics/data/converter.py,sha256=59-70gOXixYYTYjIKzdjhwaQ2HCJsy01kBuy1v7_hZ0,33158
|
|
125
125
|
ultralytics/data/dataset.py,sha256=r_BZy4FwMZ-dYkaJiz1E3jr2pI6dn7V3hZwf2RM9_RQ,36536
|
|
126
126
|
ultralytics/data/loaders.py,sha256=BQbhgjiLCGcRBPkGVG9Hr1jeNfG1nuZD3jstiWb7zS8,31889
|
|
127
127
|
ultralytics/data/split.py,sha256=HpR0ltf5oN1DpZstavFbBFC1YdpGPaATXxDOcAMwOqc,5101
|
|
@@ -135,8 +135,8 @@ ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QU
|
|
|
135
135
|
ultralytics/engine/exporter.py,sha256=SpA0Oj4w8yjYUde1okc4XfyCK376t1zZPr-bx1-p_WE,73429
|
|
136
136
|
ultralytics/engine/model.py,sha256=bKoiy8ImddK-e87NmVbO5nlktqgebRM7D65epD4Cvjk,53211
|
|
137
137
|
ultralytics/engine/predictor.py,sha256=neYmNDX27Vv3ggk9xqaKlH6XzB2vlFIghU5o7ZC0zFo,22838
|
|
138
|
-
ultralytics/engine/results.py,sha256=
|
|
139
|
-
ultralytics/engine/trainer.py,sha256=
|
|
138
|
+
ultralytics/engine/results.py,sha256=BuYsuBRYMCoTuZyXo13Z9RW02zfzwzT8EZK8nNr7QmI,68433
|
|
139
|
+
ultralytics/engine/trainer.py,sha256=bzIm_eQiaMJSusCPu_T9AiLrLWBfpGpI6z7y_PXTfwE,46816
|
|
140
140
|
ultralytics/engine/tuner.py,sha256=mD4bjddz7CE7ExKgEaIoSQw22Lg9V0NBXqR9Vey2gIs,21840
|
|
141
141
|
ultralytics/engine/validator.py,sha256=2rqdVt4hB9ruMJq-L7PbaCNFwuERS7ZHdVSg91RM3wk,17761
|
|
142
142
|
ultralytics/hub/__init__.py,sha256=Z0K_E00jzQh90b18q3IDChwVmTvyIYp6C00sCV-n2F8,6709
|
|
@@ -302,9 +302,9 @@ ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqd
|
|
|
302
302
|
ultralytics/utils/export/engine.py,sha256=QoXPqnmQn6W5TOUAygOtCG63R9ExDG4-Df6X6W-_Mzo,10470
|
|
303
303
|
ultralytics/utils/export/imx.py,sha256=U9CFQJGRSNa5gyrVxW9fEvnhCd6Ut9_mFZZgzhrGhuI,13783
|
|
304
304
|
ultralytics/utils/export/tensorflow.py,sha256=xHEcEM3_VeYctyqkJCpgkqcNie1M8xLqcFKr6uANEEQ,9951
|
|
305
|
-
ultralytics_opencv_headless-8.4.
|
|
306
|
-
ultralytics_opencv_headless-8.4.
|
|
307
|
-
ultralytics_opencv_headless-8.4.
|
|
308
|
-
ultralytics_opencv_headless-8.4.
|
|
309
|
-
ultralytics_opencv_headless-8.4.
|
|
310
|
-
ultralytics_opencv_headless-8.4.
|
|
305
|
+
ultralytics_opencv_headless-8.4.2.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
|
306
|
+
ultralytics_opencv_headless-8.4.2.dist-info/METADATA,sha256=toUtt-YSbIApvVMiHfgb6vNovbq1f6MTv5YB1tBfMWs,36937
|
|
307
|
+
ultralytics_opencv_headless-8.4.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
308
|
+
ultralytics_opencv_headless-8.4.2.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
|
309
|
+
ultralytics_opencv_headless-8.4.2.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
|
310
|
+
ultralytics_opencv_headless-8.4.2.dist-info/RECORD,,
|
{ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.2.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|