ultralytics 8.3.86__tar.gz → 8.3.87__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ultralytics-8.3.86/ultralytics.egg-info → ultralytics-8.3.87}/PKG-INFO +8 -8
- {ultralytics-8.3.86 → ultralytics-8.3.87}/README.md +6 -6
- {ultralytics-8.3.86 → ultralytics-8.3.87}/pyproject.toml +1 -1
- {ultralytics-8.3.86 → ultralytics-8.3.87}/tests/test_solutions.py +21 -2
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/__init__.py +1 -1
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/__init__.py +18 -22
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +1 -1
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/11/yolo11-cls.yaml +6 -6
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/loaders.py +1 -1
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/engine/exporter.py +1 -1
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/engine/results.py +76 -41
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/engine/trainer.py +11 -5
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/engine/tuner.py +3 -2
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/autobackend.py +1 -1
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/tasks.py +1 -1
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/parking_management.py +19 -4
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/__init__.py +2 -3
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/comet.py +37 -5
- {ultralytics-8.3.86 → ultralytics-8.3.87/ultralytics.egg-info}/PKG-INFO +8 -8
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics.egg-info/requires.txt +1 -1
- {ultralytics-8.3.86 → ultralytics-8.3.87}/LICENSE +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/setup.cfg +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/tests/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/tests/conftest.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/tests/test_cli.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/tests/test_cuda.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/tests/test_engine.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/tests/test_exports.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/tests/test_integrations.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/tests/test_python.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/assets/bus.jpg +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/assets/zidane.jpg +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/VOC.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/coco.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/coco128.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/coco8.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/dog-pose.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/dota8.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/hand-keypoints.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/lvis.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/medical-pills.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/signature.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/datasets/xView.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/default.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/11/yolo11-obb.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/11/yolo11-pose.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/11/yolo11-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/11/yolo11.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/12/yolo12-cls.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/12/yolo12-obb.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/12/yolo12-pose.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/12/yolo12-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/12/yolo12.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v10/yolov10b.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v10/yolov10l.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v10/yolov10m.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v10/yolov10n.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v10/yolov10s.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v10/yolov10x.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v9/yolov9m.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v9/yolov9s.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/models/v9/yolov9t.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/solutions/default.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/trackers/botsort.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/annotator.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/augment.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/base.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/build.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/converter.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/dataset.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/split_dota.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/data/utils.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/engine/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/engine/model.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/engine/predictor.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/engine/validator.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/hub/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/hub/auth.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/hub/google/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/hub/session.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/hub/utils.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/fastsam/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/fastsam/model.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/fastsam/predict.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/fastsam/utils.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/fastsam/val.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/nas/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/nas/model.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/nas/predict.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/nas/val.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/rtdetr/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/rtdetr/model.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/rtdetr/predict.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/rtdetr/train.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/rtdetr/val.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/amg.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/build.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/model.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/modules/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/modules/blocks.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/modules/decoders.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/modules/encoders.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/modules/memory_attention.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/modules/sam.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/modules/transformer.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/modules/utils.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/sam/predict.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/utils/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/utils/loss.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/utils/ops.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/classify/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/classify/predict.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/classify/train.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/classify/val.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/detect/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/detect/predict.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/detect/train.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/detect/val.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/model.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/obb/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/obb/predict.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/obb/train.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/obb/val.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/pose/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/pose/predict.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/pose/train.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/pose/val.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/segment/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/segment/predict.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/segment/train.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/segment/val.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/world/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/world/train.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/models/yolo/world/train_world.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/modules/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/modules/activation.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/modules/block.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/modules/conv.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/modules/head.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/modules/transformer.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/nn/modules/utils.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/ai_gym.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/analytics.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/distance_calculation.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/heatmap.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/object_counter.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/queue_management.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/region_counter.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/security_alarm.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/solutions.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/speed_estimation.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/streamlit_inference.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/solutions/trackzone.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/trackers/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/trackers/basetrack.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/trackers/bot_sort.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/trackers/byte_tracker.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/trackers/track.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/trackers/utils/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/trackers/utils/gmc.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/trackers/utils/kalman_filter.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/trackers/utils/matching.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/autobatch.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/benchmarks.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/__init__.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/base.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/clearml.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/dvc.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/hub.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/mlflow.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/neptune.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/raytune.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/tensorboard.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/callbacks/wb.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/checks.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/dist.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/downloads.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/errors.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/files.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/instance.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/loss.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/metrics.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/ops.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/patches.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/plotting.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/tal.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/torch_utils.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/triton.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics/utils/tuner.py +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics.egg-info/SOURCES.txt +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics.egg-info/dependency_links.txt +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics.egg-info/entry_points.txt +0 -0
- {ultralytics-8.3.86 → ultralytics-8.3.87}/ultralytics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.87
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -63,7 +63,7 @@ Provides-Extra: export
|
|
63
63
|
Requires-Dist: onnx>=1.12.0; extra == "export"
|
64
64
|
Requires-Dist: coremltools>=7.0; (platform_system != "Windows" and python_version <= "3.11") and extra == "export"
|
65
65
|
Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.11") and extra == "export"
|
66
|
-
Requires-Dist: openvino
|
66
|
+
Requires-Dist: openvino!=2025.0.0,>=2024.0.0; extra == "export"
|
67
67
|
Requires-Dist: tensorflow>=2.0.0; extra == "export"
|
68
68
|
Requires-Dist: tensorflowjs>=3.9.0; extra == "export"
|
69
69
|
Requires-Dist: tensorstore>=0.1.63; (platform_machine == "aarch64" and python_version >= "3.9") and extra == "export"
|
@@ -248,13 +248,13 @@ See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage e
|
|
248
248
|
|
249
249
|
See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usage examples with these models trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/), which include 1000 pretrained classes.
|
250
250
|
|
251
|
-
| Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at
|
251
|
+
| Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at 224 |
|
252
252
|
| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ |
|
253
|
-
| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 |
|
254
|
-
| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 |
|
255
|
-
| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 |
|
256
|
-
| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 |
|
257
|
-
| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 |
|
253
|
+
| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 | 0.5 |
|
254
|
+
| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 | 1.6 |
|
255
|
+
| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 | 5.0 |
|
256
|
+
| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 | 6.2 |
|
257
|
+
| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 | 13.7 |
|
258
258
|
|
259
259
|
- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set. <br>Reproduce by `yolo val classify data=path/to/ImageNet device=0`
|
260
260
|
- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
|
@@ -163,13 +163,13 @@ See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage e
|
|
163
163
|
|
164
164
|
See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usage examples with these models trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/), which include 1000 pretrained classes.
|
165
165
|
|
166
|
-
| Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at
|
166
|
+
| Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at 224 |
|
167
167
|
| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ |
|
168
|
-
| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 |
|
169
|
-
| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 |
|
170
|
-
| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 |
|
171
|
-
| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 |
|
172
|
-
| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 |
|
168
|
+
| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 | 0.5 |
|
169
|
+
| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 | 1.6 |
|
170
|
+
| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 | 5.0 |
|
171
|
+
| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 | 6.2 |
|
172
|
+
| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 | 13.7 |
|
173
173
|
|
174
174
|
- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set. <br>Reproduce by `yolo val classify data=path/to/ImageNet device=0`
|
175
175
|
- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
|
@@ -97,7 +97,7 @@ export = [
|
|
97
97
|
"onnx>=1.12.0", # ONNX export
|
98
98
|
"coremltools>=7.0; platform_system != 'Windows' and python_version <= '3.11'", # CoreML supported on macOS and Linux
|
99
99
|
"scikit-learn>=1.3.2; platform_system != 'Windows' and python_version <= '3.11'", # CoreML k-means quantization
|
100
|
-
"openvino>=2024.0.0
|
100
|
+
"openvino>=2024.0.0,!=2025.0.0", # OpenVINO export
|
101
101
|
"tensorflow>=2.0.0", # TF bug https://github.com/ultralytics/ultralytics/issues/5161
|
102
102
|
"tensorflowjs>=3.9.0", # TF.js export, automatically installs tensorflow
|
103
103
|
"tensorstore>=0.1.63; platform_machine == 'aarch64' and python_version >= '3.9'", # for TF Raspberry Pi exports
|
@@ -8,8 +8,11 @@ from ultralytics import YOLO, solutions
|
|
8
8
|
from ultralytics.utils import ASSETS_URL, WEIGHTS_DIR
|
9
9
|
from ultralytics.utils.downloads import safe_download
|
10
10
|
|
11
|
-
DEMO_VIDEO = "solutions_ci_demo.mp4"
|
12
|
-
POSE_VIDEO = "solution_ci_pose_demo.mp4"
|
11
|
+
DEMO_VIDEO = "solutions_ci_demo.mp4" # for all the solutions, except workout and parking
|
12
|
+
POSE_VIDEO = "solution_ci_pose_demo.mp4" # only for workouts monitoring solution
|
13
|
+
PARKING_VIDEO = "solution_ci_parking_demo.mp4" # only for parking management solution
|
14
|
+
PARKING_AREAS_JSON = "solution_ci_parking_areas.json" # only for parking management solution
|
15
|
+
PARKING_MODEL = "solutions_ci_parking_model.pt" # only for parking management solution
|
13
16
|
|
14
17
|
|
15
18
|
@pytest.mark.slow
|
@@ -62,6 +65,22 @@ def test_major_solutions():
|
|
62
65
|
_ = gym.monitor(im0)
|
63
66
|
cap.release()
|
64
67
|
|
68
|
+
# Test parking management
|
69
|
+
safe_download(url=f"{ASSETS_URL}/{PARKING_VIDEO}", dir=TMP)
|
70
|
+
safe_download(url=f"{ASSETS_URL}/{PARKING_AREAS_JSON}", dir=TMP)
|
71
|
+
safe_download(url=f"{ASSETS_URL}/{PARKING_MODEL}", dir=TMP)
|
72
|
+
cap = cv2.VideoCapture(str(TMP / PARKING_VIDEO))
|
73
|
+
assert cap.isOpened(), "Error reading video file"
|
74
|
+
parkingmanager = solutions.ParkingManagement(
|
75
|
+
json_file=str(TMP / PARKING_AREAS_JSON), model=str(TMP / PARKING_MODEL), show=False
|
76
|
+
)
|
77
|
+
while cap.isOpened():
|
78
|
+
success, im0 = cap.read()
|
79
|
+
if not success:
|
80
|
+
break
|
81
|
+
_ = parkingmanager.process_data(im0)
|
82
|
+
cap.release()
|
83
|
+
|
65
84
|
|
66
85
|
@pytest.mark.slow
|
67
86
|
def test_instance_segmentation():
|
@@ -656,13 +656,10 @@ def handle_yolo_solutions(args: List[str]) -> None:
|
|
656
656
|
- For 'analytics' solution, frame numbers are tracked for generating analytical graphs
|
657
657
|
- Video processing can be interrupted by pressing 'q'
|
658
658
|
- Processes video frames sequentially and saves output in .avi format
|
659
|
-
- If no source is specified, downloads and uses a default sample video
|
659
|
+
- If no source is specified, downloads and uses a default sample video
|
660
660
|
- The inference solution will be launched using the 'streamlit run' command.
|
661
661
|
- The Streamlit app file is located in the Ultralytics package directory.
|
662
662
|
"""
|
663
|
-
from ultralytics import solutions
|
664
|
-
from ultralytics.utils.files import increment_path
|
665
|
-
|
666
663
|
full_args_dict = {**DEFAULT_SOL_DICT, **DEFAULT_CFG_DICT} # arguments dictionary
|
667
664
|
overrides = {}
|
668
665
|
|
@@ -680,21 +677,19 @@ def handle_yolo_solutions(args: List[str]) -> None:
|
|
680
677
|
check_dict_alignment(full_args_dict, overrides) # dict alignment
|
681
678
|
|
682
679
|
# Get solution name
|
683
|
-
if args
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
680
|
+
if args[0] == "help":
|
681
|
+
LOGGER.info(SOLUTIONS_HELP_MSG)
|
682
|
+
return # Early return for 'help' case
|
683
|
+
elif args[0] in SOLUTION_MAP:
|
684
|
+
solution_name = args.pop(0) # Extract the solution name directly
|
688
685
|
else:
|
689
686
|
LOGGER.warning(
|
690
|
-
f"
|
687
|
+
f"❌ '{args[0]}' is not a valid solution. 💡 Defaulting to 'count'.\n"
|
688
|
+
f"🚀 Available solutions: {', '.join(list(SOLUTION_MAP.keys())[:-1])}\n"
|
691
689
|
)
|
692
|
-
|
693
|
-
|
694
|
-
if args and args[0] == "help": # Add check for return if user call `yolo solutions help`
|
695
|
-
return
|
690
|
+
solution_name = "count" # Default for invalid solution
|
696
691
|
|
697
|
-
if
|
692
|
+
if solution_name == "inference":
|
698
693
|
checks.check_requirements("streamlit>=1.29.0")
|
699
694
|
LOGGER.info("💡 Loading Ultralytics live inference app...")
|
700
695
|
subprocess.run(
|
@@ -708,7 +703,9 @@ def handle_yolo_solutions(args: List[str]) -> None:
|
|
708
703
|
]
|
709
704
|
)
|
710
705
|
else:
|
711
|
-
|
706
|
+
from ultralytics import solutions
|
707
|
+
|
708
|
+
cls, method = SOLUTION_MAP[solution_name] # solution class name, method name and default source
|
712
709
|
solution = getattr(solutions, cls)(IS_CLI=True, **overrides) # get solution class i.e ObjectCounter
|
713
710
|
process = getattr(
|
714
711
|
solution, method
|
@@ -717,13 +714,12 @@ def handle_yolo_solutions(args: List[str]) -> None:
|
|
717
714
|
cap = cv2.VideoCapture(solution.CFG["source"]) # read the video file
|
718
715
|
|
719
716
|
# extract width, height and fps of the video file, create save directory and initialize video writer
|
720
|
-
|
721
717
|
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
722
|
-
if
|
718
|
+
if solution_name == "analytics": # analytical graphs follow fixed shape for output i.e w=1920, h=1080
|
723
719
|
w, h = 1920, 1080
|
724
|
-
save_dir =
|
725
|
-
save_dir.mkdir(parents=True
|
726
|
-
vw = cv2.VideoWriter(str(save_dir / "
|
720
|
+
save_dir = get_save_dir(SimpleNamespace(project="runs/solutions", name="exp", exist_ok=False))
|
721
|
+
save_dir.mkdir(parents=True) # create the output directory i.e. runs/solutions/exp
|
722
|
+
vw = cv2.VideoWriter(str(save_dir / f"{solution_name}.avi"), cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
727
723
|
|
728
724
|
try: # Process video frames
|
729
725
|
f_n = 0 # frame number, required for analytical graphs
|
@@ -731,7 +727,7 @@ def handle_yolo_solutions(args: List[str]) -> None:
|
|
731
727
|
success, frame = cap.read()
|
732
728
|
if not success:
|
733
729
|
break
|
734
|
-
frame = process(frame, f_n := f_n + 1) if
|
730
|
+
frame = process(frame, f_n := f_n + 1) if solution_name == "analytics" else process(frame)
|
735
731
|
vw.write(frame)
|
736
732
|
if cv2.waitKey(1) & 0xFF == ord("q"):
|
737
733
|
break
|
@@ -5,14 +5,14 @@
|
|
5
5
|
# Task docs: https://docs.ultralytics.com/tasks/classify
|
6
6
|
|
7
7
|
# Parameters
|
8
|
-
nc:
|
8
|
+
nc: 1000 # number of classes
|
9
9
|
scales: # model compound scaling constants, i.e. 'model=yolo11n-cls.yaml' will call yolo11-cls.yaml with scale 'n'
|
10
10
|
# [depth, width, max_channels]
|
11
|
-
n: [0.50, 0.25, 1024] # summary: 151 layers, 1633584 parameters, 1633584 gradients,
|
12
|
-
s: [0.50, 0.50, 1024] # summary: 151 layers, 5545488 parameters, 5545488 gradients,
|
13
|
-
m: [0.50, 1.00, 512] # summary: 187 layers, 10455696 parameters, 10455696 gradients,
|
14
|
-
l: [1.00, 1.00, 512] # summary: 309 layers, 12937104 parameters, 12937104 gradients,
|
15
|
-
x: [1.00, 1.50, 512] # summary: 309 layers, 28458544 parameters, 28458544 gradients,
|
11
|
+
n: [0.50, 0.25, 1024] # summary: 151 layers, 1633584 parameters, 1633584 gradients, 0.5 GFLOPs
|
12
|
+
s: [0.50, 0.50, 1024] # summary: 151 layers, 5545488 parameters, 5545488 gradients, 1.6 GFLOPs
|
13
|
+
m: [0.50, 1.00, 512] # summary: 187 layers, 10455696 parameters, 10455696 gradients, 5.0 GFLOPs
|
14
|
+
l: [1.00, 1.00, 512] # summary: 309 layers, 12937104 parameters, 12937104 gradients, 6.2 GFLOPs
|
15
|
+
x: [1.00, 1.50, 512] # summary: 309 layers, 28458544 parameters, 28458544 gradients, 13.7 GFLOPs
|
16
16
|
|
17
17
|
# YOLO11n backbone
|
18
18
|
backbone:
|
@@ -106,7 +106,7 @@ class LoadStreams:
|
|
106
106
|
self.caps = [None] * n # video capture objects
|
107
107
|
self.imgs = [[] for _ in range(n)] # images
|
108
108
|
self.shape = [[] for _ in range(n)] # image shapes
|
109
|
-
self.sources = [ops.clean_str(x) for x in sources] # clean source names for later
|
109
|
+
self.sources = [ops.clean_str(x).replace(os.sep, "_") for x in sources] # clean source names for later
|
110
110
|
for i, s in enumerate(sources): # index, source
|
111
111
|
# Start thread to read frames from video stream
|
112
112
|
st = f"{i + 1}/{n}: {s}... "
|
@@ -590,7 +590,7 @@ class Exporter:
|
|
590
590
|
@try_export
|
591
591
|
def export_openvino(self, prefix=colorstr("OpenVINO:")):
|
592
592
|
"""YOLO OpenVINO export."""
|
593
|
-
check_requirements("openvino>=2024.0.0
|
593
|
+
check_requirements("openvino>=2024.0.0,!=2025.0.0")
|
594
594
|
import openvino as ov
|
595
595
|
|
596
596
|
LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
|
@@ -188,43 +188,50 @@ class Results(SimpleClass):
|
|
188
188
|
"""
|
189
189
|
A class for storing and manipulating inference results.
|
190
190
|
|
191
|
-
This class
|
192
|
-
|
191
|
+
This class provides methods for accessing, manipulating, and visualizing inference results from various
|
192
|
+
Ultralytics models, including detection, segmentation, classification, and pose estimation.
|
193
193
|
|
194
194
|
Attributes:
|
195
|
-
orig_img (numpy.ndarray):
|
195
|
+
orig_img (numpy.ndarray): The original image as a numpy array.
|
196
196
|
orig_shape (Tuple[int, int]): Original image shape in (height, width) format.
|
197
|
-
boxes (Boxes | None):
|
198
|
-
masks (Masks | None):
|
199
|
-
probs (Probs | None):
|
200
|
-
keypoints (Keypoints | None):
|
201
|
-
obb (OBB | None):
|
202
|
-
speed (Dict
|
203
|
-
names (Dict
|
204
|
-
path (str): Path to the image file.
|
205
|
-
|
197
|
+
boxes (Boxes | None): Detected bounding boxes.
|
198
|
+
masks (Masks | None): Segmentation masks.
|
199
|
+
probs (Probs | None): Classification probabilities.
|
200
|
+
keypoints (Keypoints | None): Detected keypoints.
|
201
|
+
obb (OBB | None): Oriented bounding boxes.
|
202
|
+
speed (Dict): Dictionary containing inference speed information.
|
203
|
+
names (Dict): Dictionary mapping class indices to class names.
|
204
|
+
path (str): Path to the input image file.
|
205
|
+
save_dir (str | None): Directory to save results.
|
206
206
|
|
207
207
|
Methods:
|
208
|
-
update: Updates object
|
209
|
-
cpu: Returns a copy of the Results object with all tensors
|
210
|
-
numpy:
|
211
|
-
cuda:
|
212
|
-
to:
|
213
|
-
new:
|
214
|
-
plot: Plots detection results on an input
|
215
|
-
show:
|
216
|
-
save: Saves annotated results to file.
|
217
|
-
verbose: Returns a log string for each task
|
208
|
+
update: Updates the Results object with new detection data.
|
209
|
+
cpu: Returns a copy of the Results object with all tensors moved to CPU memory.
|
210
|
+
numpy: Converts all tensors in the Results object to numpy arrays.
|
211
|
+
cuda: Moves all tensors in the Results object to GPU memory.
|
212
|
+
to: Moves all tensors to the specified device and dtype.
|
213
|
+
new: Creates a new Results object with the same image, path, names, and speed attributes.
|
214
|
+
plot: Plots detection results on an input RGB image.
|
215
|
+
show: Displays the image with annotated inference results.
|
216
|
+
save: Saves annotated inference results image to file.
|
217
|
+
verbose: Returns a log string for each task in the results.
|
218
218
|
save_txt: Saves detection results to a text file.
|
219
|
-
save_crop: Saves cropped detection images.
|
220
|
-
|
219
|
+
save_crop: Saves cropped detection images to specified directory.
|
220
|
+
summary: Converts inference results to a summarized dictionary.
|
221
|
+
to_df: Converts detection results to a Pandas Dataframe.
|
222
|
+
to_json: Converts detection results to JSON format.
|
223
|
+
to_csv: Converts detection results to a CSV format.
|
224
|
+
to_xml: Converts detection results to XML format.
|
225
|
+
to_html: Converts detection results to HTML format.
|
226
|
+
to_sql: Converts detection results to an SQL-compatible format.
|
221
227
|
|
222
228
|
Examples:
|
223
229
|
>>> results = model("path/to/image.jpg")
|
230
|
+
>>> result = results[0] # Get the first result
|
231
|
+
>>> boxes = result.boxes # Get the boxes for the first result
|
232
|
+
>>> masks = result.masks # Get the masks for the first result
|
224
233
|
>>> for result in results:
|
225
|
-
|
226
|
-
... result.show() # Display the annotated image
|
227
|
-
... result.save(filename="result.jpg") # Save annotated image
|
234
|
+
>>> result.plot() # Plot detection results
|
228
235
|
"""
|
229
236
|
|
230
237
|
def __init__(
|
@@ -766,8 +773,8 @@ class Results(SimpleClass):
|
|
766
773
|
optionally mask segments and keypoints.
|
767
774
|
|
768
775
|
Args:
|
769
|
-
normalize (bool): Whether to normalize bounding box coordinates by image dimensions.
|
770
|
-
decimals (int): Number of decimal places to round the output values to.
|
776
|
+
normalize (bool): Whether to normalize bounding box coordinates by image dimensions.
|
777
|
+
decimals (int): Number of decimal places to round the output values to.
|
771
778
|
|
772
779
|
Returns:
|
773
780
|
(List[Dict]): A list of dictionaries, each containing summarized information for a single
|
@@ -832,8 +839,8 @@ class Results(SimpleClass):
|
|
832
839
|
|
833
840
|
Args:
|
834
841
|
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
|
835
|
-
If True, coordinates will be returned as float values between 0 and 1.
|
836
|
-
decimals (int): Number of decimal places to round the output values to.
|
842
|
+
If True, coordinates will be returned as float values between 0 and 1.
|
843
|
+
decimals (int): Number of decimal places to round the output values to.
|
837
844
|
|
838
845
|
Returns:
|
839
846
|
(DataFrame): A Pandas Dataframe containing all the information in results in an organized way.
|
@@ -858,8 +865,8 @@ class Results(SimpleClass):
|
|
858
865
|
|
859
866
|
Args:
|
860
867
|
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
|
861
|
-
If True, coordinates will be returned as float values between 0 and 1.
|
862
|
-
decimals (int): Number of decimal places to round the output values to.
|
868
|
+
If True, coordinates will be returned as float values between 0 and 1.
|
869
|
+
decimals (int): Number of decimal places to round the output values to.
|
863
870
|
*args (Any): Variable length argument list to be passed to pandas.DataFrame.to_csv().
|
864
871
|
**kwargs (Any): Arbitrary keyword arguments to be passed to pandas.DataFrame.to_csv().
|
865
872
|
|
@@ -885,8 +892,8 @@ class Results(SimpleClass):
|
|
885
892
|
|
886
893
|
Args:
|
887
894
|
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
|
888
|
-
If True, coordinates will be returned as float values between 0 and 1.
|
889
|
-
decimals (int): Number of decimal places to round the output values to.
|
895
|
+
If True, coordinates will be returned as float values between 0 and 1.
|
896
|
+
decimals (int): Number of decimal places to round the output values to.
|
890
897
|
*args (Any): Variable length argument list to be passed to pandas.DataFrame.to_xml().
|
891
898
|
**kwargs (Any): Arbitrary keyword arguments to be passed to pandas.DataFrame.to_xml().
|
892
899
|
|
@@ -903,6 +910,34 @@ class Results(SimpleClass):
|
|
903
910
|
df = self.to_df(normalize=normalize, decimals=decimals)
|
904
911
|
return '<?xml version="1.0" encoding="utf-8"?>\n<root></root>' if df.empty else df.to_xml(*args, **kwargs)
|
905
912
|
|
913
|
+
def to_html(self, normalize=False, decimals=5, index=False, *args, **kwargs):
|
914
|
+
"""
|
915
|
+
Converts detection results to HTML format.
|
916
|
+
|
917
|
+
This method serializes the detection results into an HTML format. It includes information
|
918
|
+
about detected objects such as bounding boxes, class names, confidence scores, and optionally
|
919
|
+
segmentation masks and keypoints.
|
920
|
+
|
921
|
+
Args:
|
922
|
+
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
|
923
|
+
If True, coordinates will be returned as float values between 0 and 1.
|
924
|
+
decimals (int): Number of decimal places to round the output values to.
|
925
|
+
index (bool): Whether to include the DataFrame index in the HTML output.
|
926
|
+
*args (Any): Variable length argument list to be passed to pandas.DataFrame.to_html().
|
927
|
+
**kwargs (Any): Arbitrary keyword arguments to be passed to pandas.DataFrame.to_html().
|
928
|
+
|
929
|
+
Returns:
|
930
|
+
(str): An HTML string containing all the information in results in an organized way.
|
931
|
+
|
932
|
+
Examples:
|
933
|
+
>>> results = model("path/to/image.jpg")
|
934
|
+
>>> for result in results:
|
935
|
+
>>> html_result = result.to_html()
|
936
|
+
>>> print(html_result)
|
937
|
+
"""
|
938
|
+
df = self.to_df(normalize=normalize, decimals=decimals)
|
939
|
+
return "<table></table>" if df.empty else df.to_html(index=index, *args, **kwargs)
|
940
|
+
|
906
941
|
def tojson(self, normalize=False, decimals=5):
|
907
942
|
"""Deprecated version of to_json()."""
|
908
943
|
LOGGER.warning("WARNING ⚠️ 'result.tojson()' is deprecated, replace with 'result.to_json()'.")
|
@@ -918,8 +953,8 @@ class Results(SimpleClass):
|
|
918
953
|
|
919
954
|
Args:
|
920
955
|
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
|
921
|
-
If True, coordinates will be returned as float values between 0 and 1.
|
922
|
-
decimals (int): Number of decimal places to round the output values to.
|
956
|
+
If True, coordinates will be returned as float values between 0 and 1.
|
957
|
+
decimals (int): Number of decimal places to round the output values to.
|
923
958
|
|
924
959
|
Returns:
|
925
960
|
(str): A JSON string containing the serialized detection results.
|
@@ -951,11 +986,11 @@ class Results(SimpleClass):
|
|
951
986
|
and optionally segmentation masks, keypoints or oriented bounding boxes.
|
952
987
|
|
953
988
|
Args:
|
954
|
-
table_name (str): Name of the SQL table where the data will be inserted.
|
989
|
+
table_name (str): Name of the SQL table where the data will be inserted.
|
955
990
|
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
|
956
|
-
If True, coordinates will be returned as float values between 0 and 1.
|
957
|
-
decimals (int): Number of decimal places to round the bounding boxes values to.
|
958
|
-
db_path (str): Path to the SQLite database file.
|
991
|
+
If True, coordinates will be returned as float values between 0 and 1.
|
992
|
+
decimals (int): Number of decimal places to round the bounding boxes values to.
|
993
|
+
db_path (str): Path to the SQLite database file.
|
959
994
|
|
960
995
|
Examples:
|
961
996
|
>>> results = model("path/to/image.jpg")
|
@@ -452,7 +452,8 @@ class BaseTrainer:
|
|
452
452
|
self.scheduler.last_epoch = self.epoch # do not move
|
453
453
|
self.stop |= epoch >= self.epochs # stop if exceeded epochs
|
454
454
|
self.run_callbacks("on_fit_epoch_end")
|
455
|
-
self.
|
455
|
+
if self._get_memory(fraction=True) > 0.9:
|
456
|
+
self._clear_memory() # clear if memory utilization > 90%
|
456
457
|
|
457
458
|
# Early Stopping
|
458
459
|
if RANK != -1: # if DDP training
|
@@ -485,15 +486,20 @@ class BaseTrainer:
|
|
485
486
|
max_num_obj=max_num_obj,
|
486
487
|
) # returns batch size
|
487
488
|
|
488
|
-
def _get_memory(self):
|
489
|
-
"""Get accelerator memory utilization in GB."""
|
489
|
+
def _get_memory(self, fraction=False):
|
490
|
+
"""Get accelerator memory utilization in GB or fraction."""
|
491
|
+
memory, total = 0, 0
|
490
492
|
if self.device.type == "mps":
|
491
493
|
memory = torch.mps.driver_allocated_memory()
|
494
|
+
if fraction:
|
495
|
+
total = torch.mps.get_mem_info()[0]
|
492
496
|
elif self.device.type == "cpu":
|
493
|
-
|
497
|
+
pass
|
494
498
|
else:
|
495
499
|
memory = torch.cuda.memory_reserved()
|
496
|
-
|
500
|
+
if fraction:
|
501
|
+
total = torch.cuda.get_device_properties(self.device).total_memory
|
502
|
+
return ((memory / total) if total > 0 else 0) if fraction else (memory / 2**30)
|
497
503
|
|
498
504
|
def _clear_memory(self):
|
499
505
|
"""Clear accelerator memory on different platforms."""
|
@@ -191,8 +191,9 @@ class Tuner:
|
|
191
191
|
weights_dir = save_dir / "weights"
|
192
192
|
try:
|
193
193
|
# Train YOLO model with mutated hyperparameters (run in subprocess to avoid dataloader hang)
|
194
|
-
|
195
|
-
|
194
|
+
launch = [__import__("sys").executable, "-m", "ultralytics.cfg.__init__"] # workaround yolo not found
|
195
|
+
cmd = [*launch, "train", *(f"{k}={v}" for k, v in train_args.items())]
|
196
|
+
return_code = subprocess.run(cmd, check=True).returncode
|
196
197
|
ckpt_file = weights_dir / ("best.pt" if (weights_dir / "best.pt").exists() else "last.pt")
|
197
198
|
metrics = torch.load(ckpt_file)["train_metrics"]
|
198
199
|
assert return_code == 0, "training failed"
|
@@ -244,7 +244,7 @@ class AutoBackend(nn.Module):
|
|
244
244
|
# OpenVINO
|
245
245
|
elif xml:
|
246
246
|
LOGGER.info(f"Loading {w} for OpenVINO inference...")
|
247
|
-
check_requirements("openvino>=2024.0.0
|
247
|
+
check_requirements("openvino>=2024.0.0,!=2025.0.0")
|
248
248
|
import openvino as ov
|
249
249
|
|
250
250
|
core = ov.Core()
|
@@ -1119,7 +1119,7 @@ def guess_model_scale(model_path):
|
|
1119
1119
|
(str): The size character of the model's scale, which can be n, s, m, l, or x.
|
1120
1120
|
"""
|
1121
1121
|
try:
|
1122
|
-
return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) #
|
1122
|
+
return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # returns n, s, m, l, or x
|
1123
1123
|
except AttributeError:
|
1124
1124
|
return ""
|
1125
1125
|
|
@@ -7,7 +7,7 @@ import numpy as np
|
|
7
7
|
|
8
8
|
from ultralytics.solutions.solutions import BaseSolution
|
9
9
|
from ultralytics.utils import LOGGER
|
10
|
-
from ultralytics.utils.checks import
|
10
|
+
from ultralytics.utils.checks import check_imshow
|
11
11
|
from ultralytics.utils.plotting import Annotator
|
12
12
|
|
13
13
|
|
@@ -49,9 +49,24 @@ class ParkingPtsSelection:
|
|
49
49
|
|
50
50
|
def __init__(self):
|
51
51
|
"""Initializes the ParkingPtsSelection class, setting up UI and properties for parking zone point selection."""
|
52
|
-
|
53
|
-
|
54
|
-
|
52
|
+
try: # check if tkinter installed
|
53
|
+
import tkinter as tk
|
54
|
+
from tkinter import filedialog, messagebox
|
55
|
+
except ImportError: # Display error with recommendations
|
56
|
+
import platform
|
57
|
+
|
58
|
+
install_cmd = {
|
59
|
+
"Linux": "sudo apt install python3-tk (Debian/Ubuntu) | sudo dnf install python3-tkinter (Fedora) | "
|
60
|
+
"sudo pacman -S tk (Arch)",
|
61
|
+
"Windows": "reinstall Python and enable the checkbox `tcl/tk and IDLE` on **Optional Features** during installation",
|
62
|
+
"Darwin": "reinstall Python from https://www.python.org/downloads/mac-osx/ or `brew install python-tk`",
|
63
|
+
}.get(platform.system(), "Unknown OS. Check your Python installation.")
|
64
|
+
|
65
|
+
LOGGER.warning(f"WARNING ⚠️ Tkinter is not configured or supported. Potential fix: {install_cmd}")
|
66
|
+
return
|
67
|
+
|
68
|
+
if not check_imshow(warn=True):
|
69
|
+
return
|
55
70
|
|
56
71
|
self.tk, self.filedialog, self.messagebox = tk, filedialog, messagebox
|
57
72
|
self.master = self.tk.Tk() # Reference to the main application window or parent widget
|
@@ -28,6 +28,7 @@ import tqdm
|
|
28
28
|
import yaml
|
29
29
|
|
30
30
|
from ultralytics import __version__
|
31
|
+
from ultralytics.utils.patches import imread, imshow, imwrite, torch_load, torch_save # for patches
|
31
32
|
|
32
33
|
# PyTorch Multi-GPU DDP Constants
|
33
34
|
RANK = int(os.getenv("RANK", -1))
|
@@ -125,7 +126,7 @@ HELP_MSG = """
|
|
125
126
|
|
126
127
|
# Settings and Environment Variables
|
127
128
|
torch.set_printoptions(linewidth=320, precision=4, profile="default")
|
128
|
-
np.set_printoptions(linewidth=320, formatter=
|
129
|
+
np.set_printoptions(linewidth=320, formatter=dict(float_kind="{:11.5g}".format)) # format short g, %precision=5
|
129
130
|
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
|
130
131
|
os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS) # NumExpr max threads
|
131
132
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # suppress verbose TF compiler warnings in Colab
|
@@ -1340,8 +1341,6 @@ TESTS_RUNNING = is_pytest_running() or is_github_action_running()
|
|
1340
1341
|
set_sentry()
|
1341
1342
|
|
1342
1343
|
# Apply monkey patches
|
1343
|
-
from ultralytics.utils.patches import imread, imshow, imwrite, torch_load, torch_save
|
1344
|
-
|
1345
1344
|
torch.load = torch_load
|
1346
1345
|
torch.save = torch_save
|
1347
1346
|
if WINDOWS:
|