ultralytics 8.3.12__tar.gz → 8.3.13__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- {ultralytics-8.3.12/ultralytics.egg-info → ultralytics-8.3.13}/PKG-INFO +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/__init__.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/engine/exporter.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/predict.py +68 -46
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/classify/train.py +1 -2
- ultralytics-8.3.13/ultralytics/solutions/distance_calculation.py +82 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/object_counter.py +2 -2
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/metrics.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/plotting.py +14 -15
- {ultralytics-8.3.12 → ultralytics-8.3.13/ultralytics.egg-info}/PKG-INFO +1 -1
- ultralytics-8.3.12/ultralytics/solutions/distance_calculation.py +0 -139
- {ultralytics-8.3.12 → ultralytics-8.3.13}/LICENSE +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/README.md +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/pyproject.toml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/setup.cfg +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/tests/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/tests/conftest.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/tests/test_cli.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/tests/test_cuda.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/tests/test_engine.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/tests/test_exports.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/tests/test_integrations.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/tests/test_python.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/tests/test_solutions.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/assets/bus.jpg +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/assets/zidane.jpg +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/VOC.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco128.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco8.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/dota8.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/hand-keypoints.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/lvis.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/signature.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/datasets/xView.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/default.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11-cls.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11-obb.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10b.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10l.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10m.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10n.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10s.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10x.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9m.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9s.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9t.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/solutions/default.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/trackers/botsort.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/annotator.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/augment.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/base.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/build.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/converter.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/dataset.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/loaders.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/split_dota.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/data/utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/engine/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/engine/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/engine/predictor.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/engine/results.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/engine/trainer.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/engine/tuner.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/engine/validator.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/hub/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/hub/auth.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/hub/google/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/hub/session.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/hub/utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/fastsam/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/fastsam/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/fastsam/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/fastsam/utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/fastsam/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/nas/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/nas/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/nas/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/nas/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/rtdetr/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/rtdetr/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/rtdetr/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/rtdetr/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/rtdetr/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/amg.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/build.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/modules/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/modules/blocks.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/modules/decoders.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/modules/encoders.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/modules/memory_attention.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/modules/sam.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/modules/transformer.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/sam/modules/utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/utils/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/utils/loss.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/utils/ops.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/classify/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/classify/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/classify/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/detect/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/detect/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/detect/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/detect/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/obb/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/obb/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/obb/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/obb/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/pose/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/pose/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/pose/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/pose/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/segment/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/segment/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/segment/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/segment/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/world/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/world/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/models/yolo/world/train_world.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/autobackend.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/modules/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/modules/activation.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/modules/block.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/modules/conv.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/modules/head.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/modules/transformer.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/modules/utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/nn/tasks.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/ai_gym.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/analytics.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/heatmap.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/parking_management.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/queue_management.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/solutions.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/speed_estimation.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/solutions/streamlit_inference.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/trackers/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/trackers/basetrack.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/trackers/bot_sort.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/trackers/byte_tracker.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/trackers/track.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/trackers/utils/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/trackers/utils/gmc.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/trackers/utils/kalman_filter.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/trackers/utils/matching.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/autobatch.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/benchmarks.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/base.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/clearml.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/comet.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/dvc.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/hub.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/mlflow.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/neptune.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/raytune.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/tensorboard.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/callbacks/wb.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/checks.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/dist.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/downloads.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/errors.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/files.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/instance.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/loss.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/ops.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/patches.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/tal.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/torch_utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/triton.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/utils/tuner.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics.egg-info/SOURCES.txt +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics.egg-info/dependency_links.txt +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics.egg-info/entry_points.txt +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics.egg-info/requires.txt +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.3.
|
|
3
|
+
Version: 8.3.13
|
|
4
4
|
Summary: Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
|
@@ -235,7 +235,42 @@ class Predictor(BasePredictor):
|
|
|
235
235
|
"""
|
|
236
236
|
features = self.get_im_features(im) if self.features is None else self.features
|
|
237
237
|
|
|
238
|
-
|
|
238
|
+
bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
|
|
239
|
+
points = (points, labels) if points is not None else None
|
|
240
|
+
# Embed prompts
|
|
241
|
+
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
|
|
242
|
+
|
|
243
|
+
# Predict masks
|
|
244
|
+
pred_masks, pred_scores = self.model.mask_decoder(
|
|
245
|
+
image_embeddings=features,
|
|
246
|
+
image_pe=self.model.prompt_encoder.get_dense_pe(),
|
|
247
|
+
sparse_prompt_embeddings=sparse_embeddings,
|
|
248
|
+
dense_prompt_embeddings=dense_embeddings,
|
|
249
|
+
multimask_output=multimask_output,
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
# (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
|
|
253
|
+
# `d` could be 1 or 3 depends on `multimask_output`.
|
|
254
|
+
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
|
|
255
|
+
|
|
256
|
+
def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
|
|
257
|
+
"""
|
|
258
|
+
Prepares and transforms the input prompts for processing based on the destination shape.
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
dst_shape (tuple): The target shape (height, width) for the prompts.
|
|
262
|
+
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
|
|
263
|
+
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
|
|
264
|
+
labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
|
|
265
|
+
masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
|
|
266
|
+
|
|
267
|
+
Raises:
|
|
268
|
+
AssertionError: If the number of points don't match the number of labels, in case labels were passed.
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
(tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
|
|
272
|
+
"""
|
|
273
|
+
src_shape = self.batch[1][0].shape[:2]
|
|
239
274
|
r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
|
|
240
275
|
# Transform input prompts
|
|
241
276
|
if points is not None:
|
|
@@ -258,23 +293,7 @@ class Predictor(BasePredictor):
|
|
|
258
293
|
bboxes *= r
|
|
259
294
|
if masks is not None:
|
|
260
295
|
masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
|
|
261
|
-
|
|
262
|
-
points = (points, labels) if points is not None else None
|
|
263
|
-
# Embed prompts
|
|
264
|
-
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
|
|
265
|
-
|
|
266
|
-
# Predict masks
|
|
267
|
-
pred_masks, pred_scores = self.model.mask_decoder(
|
|
268
|
-
image_embeddings=features,
|
|
269
|
-
image_pe=self.model.prompt_encoder.get_dense_pe(),
|
|
270
|
-
sparse_prompt_embeddings=sparse_embeddings,
|
|
271
|
-
dense_prompt_embeddings=dense_embeddings,
|
|
272
|
-
multimask_output=multimask_output,
|
|
273
|
-
)
|
|
274
|
-
|
|
275
|
-
# (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
|
|
276
|
-
# `d` could be 1 or 3 depends on `multimask_output`.
|
|
277
|
-
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
|
|
296
|
+
return bboxes, points, labels, masks
|
|
278
297
|
|
|
279
298
|
def generate(
|
|
280
299
|
self,
|
|
@@ -693,34 +712,7 @@ class SAM2Predictor(Predictor):
|
|
|
693
712
|
"""
|
|
694
713
|
features = self.get_im_features(im) if self.features is None else self.features
|
|
695
714
|
|
|
696
|
-
|
|
697
|
-
r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
|
|
698
|
-
# Transform input prompts
|
|
699
|
-
if points is not None:
|
|
700
|
-
points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
|
|
701
|
-
points = points[None] if points.ndim == 1 else points
|
|
702
|
-
# Assuming labels are all positive if users don't pass labels.
|
|
703
|
-
if labels is None:
|
|
704
|
-
labels = torch.ones(points.shape[0])
|
|
705
|
-
labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
|
|
706
|
-
points *= r
|
|
707
|
-
# (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
|
|
708
|
-
points, labels = points[:, None], labels[:, None]
|
|
709
|
-
if bboxes is not None:
|
|
710
|
-
bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
|
|
711
|
-
bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
|
|
712
|
-
bboxes = bboxes.view(-1, 2, 2) * r
|
|
713
|
-
bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
|
|
714
|
-
# NOTE: merge "boxes" and "points" into a single "points" input
|
|
715
|
-
# (where boxes are added at the beginning) to model.sam_prompt_encoder
|
|
716
|
-
if points is not None:
|
|
717
|
-
points = torch.cat([bboxes, points], dim=1)
|
|
718
|
-
labels = torch.cat([bbox_labels, labels], dim=1)
|
|
719
|
-
else:
|
|
720
|
-
points, labels = bboxes, bbox_labels
|
|
721
|
-
if masks is not None:
|
|
722
|
-
masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
|
|
723
|
-
|
|
715
|
+
bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
|
|
724
716
|
points = (points, labels) if points is not None else None
|
|
725
717
|
|
|
726
718
|
sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
|
|
@@ -744,6 +736,36 @@ class SAM2Predictor(Predictor):
|
|
|
744
736
|
# `d` could be 1 or 3 depends on `multimask_output`.
|
|
745
737
|
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
|
|
746
738
|
|
|
739
|
+
def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
|
|
740
|
+
"""
|
|
741
|
+
Prepares and transforms the input prompts for processing based on the destination shape.
|
|
742
|
+
|
|
743
|
+
Args:
|
|
744
|
+
dst_shape (tuple): The target shape (height, width) for the prompts.
|
|
745
|
+
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
|
|
746
|
+
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
|
|
747
|
+
labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
|
|
748
|
+
masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
|
|
749
|
+
|
|
750
|
+
Raises:
|
|
751
|
+
AssertionError: If the number of points don't match the number of labels, in case labels were passed.
|
|
752
|
+
|
|
753
|
+
Returns:
|
|
754
|
+
(tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
|
|
755
|
+
"""
|
|
756
|
+
bboxes, points, labels, masks = super()._prepare_prompts(dst_shape, bboxes, points, labels, masks)
|
|
757
|
+
if bboxes is not None:
|
|
758
|
+
bboxes = bboxes.view(-1, 2, 2)
|
|
759
|
+
bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
|
|
760
|
+
# NOTE: merge "boxes" and "points" into a single "points" input
|
|
761
|
+
# (where boxes are added at the beginning) to model.sam_prompt_encoder
|
|
762
|
+
if points is not None:
|
|
763
|
+
points = torch.cat([bboxes, points], dim=1)
|
|
764
|
+
labels = torch.cat([bbox_labels, labels], dim=1)
|
|
765
|
+
else:
|
|
766
|
+
points, labels = bboxes, bbox_labels
|
|
767
|
+
return bboxes, points, labels, masks
|
|
768
|
+
|
|
747
769
|
def set_image(self, image):
|
|
748
770
|
"""
|
|
749
771
|
Preprocesses and sets a single image for inference using the SAM2 model.
|
|
@@ -8,7 +8,7 @@ from ultralytics.data import ClassificationDataset, build_dataloader
|
|
|
8
8
|
from ultralytics.engine.trainer import BaseTrainer
|
|
9
9
|
from ultralytics.models import yolo
|
|
10
10
|
from ultralytics.nn.tasks import ClassificationModel
|
|
11
|
-
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
|
|
11
|
+
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
|
|
12
12
|
from ultralytics.utils.plotting import plot_images, plot_results
|
|
13
13
|
from ultralytics.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first
|
|
14
14
|
|
|
@@ -141,7 +141,6 @@ class ClassificationTrainer(BaseTrainer):
|
|
|
141
141
|
self.metrics = self.validator(model=f)
|
|
142
142
|
self.metrics.pop("fitness", None)
|
|
143
143
|
self.run_callbacks("on_fit_epoch_end")
|
|
144
|
-
LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
|
|
145
144
|
|
|
146
145
|
def plot_training_samples(self, batch, ni):
|
|
147
146
|
"""Plots training samples with their annotations."""
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
|
+
|
|
3
|
+
import math
|
|
4
|
+
|
|
5
|
+
import cv2
|
|
6
|
+
|
|
7
|
+
from ultralytics.solutions.solutions import BaseSolution # Import a parent class
|
|
8
|
+
from ultralytics.utils.plotting import Annotator, colors
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class DistanceCalculation(BaseSolution):
|
|
12
|
+
"""A class to calculate distance between two objects in a real-time video stream based on their tracks."""
|
|
13
|
+
|
|
14
|
+
def __init__(self, **kwargs):
|
|
15
|
+
"""Initializes the DistanceCalculation class with the given parameters."""
|
|
16
|
+
super().__init__(**kwargs)
|
|
17
|
+
|
|
18
|
+
# Mouse event information
|
|
19
|
+
self.left_mouse_count = 0
|
|
20
|
+
self.selected_boxes = {}
|
|
21
|
+
|
|
22
|
+
def mouse_event_for_distance(self, event, x, y, flags, param):
|
|
23
|
+
"""
|
|
24
|
+
Handles mouse events to select regions in a real-time video stream.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.).
|
|
28
|
+
x (int): X-coordinate of the mouse pointer.
|
|
29
|
+
y (int): Y-coordinate of the mouse pointer.
|
|
30
|
+
flags (int): Flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY, etc.).
|
|
31
|
+
param (dict): Additional parameters passed to the function.
|
|
32
|
+
"""
|
|
33
|
+
if event == cv2.EVENT_LBUTTONDOWN:
|
|
34
|
+
self.left_mouse_count += 1
|
|
35
|
+
if self.left_mouse_count <= 2:
|
|
36
|
+
for box, track_id in zip(self.boxes, self.track_ids):
|
|
37
|
+
if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
|
|
38
|
+
self.selected_boxes[track_id] = box
|
|
39
|
+
|
|
40
|
+
elif event == cv2.EVENT_RBUTTONDOWN:
|
|
41
|
+
self.selected_boxes = {}
|
|
42
|
+
self.left_mouse_count = 0
|
|
43
|
+
|
|
44
|
+
def calculate(self, im0):
|
|
45
|
+
"""
|
|
46
|
+
Processes the video frame and calculates the distance between two bounding boxes.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
im0 (ndarray): The image frame.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
(ndarray): The processed image frame.
|
|
53
|
+
"""
|
|
54
|
+
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
|
|
55
|
+
self.extract_tracks(im0) # Extract tracks
|
|
56
|
+
|
|
57
|
+
# Iterate over bounding boxes, track ids and classes index
|
|
58
|
+
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
|
|
59
|
+
self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
|
|
60
|
+
|
|
61
|
+
if len(self.selected_boxes) == 2:
|
|
62
|
+
for trk_id in self.selected_boxes.keys():
|
|
63
|
+
if trk_id == track_id:
|
|
64
|
+
self.selected_boxes[track_id] = box
|
|
65
|
+
|
|
66
|
+
if len(self.selected_boxes) == 2:
|
|
67
|
+
# Store user selected boxes in centroids list
|
|
68
|
+
self.centroids.extend(
|
|
69
|
+
[[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
|
|
70
|
+
)
|
|
71
|
+
# Calculate pixels distance
|
|
72
|
+
pixels_distance = math.sqrt(
|
|
73
|
+
(self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
|
|
74
|
+
)
|
|
75
|
+
self.annotator.plot_distance_and_line(pixels_distance, self.centroids)
|
|
76
|
+
|
|
77
|
+
self.centroids = []
|
|
78
|
+
|
|
79
|
+
self.display_output(im0) # display output with base class function
|
|
80
|
+
cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
|
|
81
|
+
|
|
82
|
+
return im0 # return output image for more usage
|
|
@@ -112,13 +112,13 @@ class ObjectCounter(BaseSolution):
|
|
|
112
112
|
# Iterate over bounding boxes, track ids and classes index
|
|
113
113
|
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
|
|
114
114
|
# Draw bounding box and counting region
|
|
115
|
-
self.annotator.box_label(box, label=self.names[cls], color=colors(
|
|
115
|
+
self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
|
|
116
116
|
self.store_tracking_history(track_id, box) # Store track history
|
|
117
117
|
self.store_classwise_counts(cls) # store classwise counts in dict
|
|
118
118
|
|
|
119
119
|
# Draw tracks of objects
|
|
120
120
|
self.annotator.draw_centroid_and_tracks(
|
|
121
|
-
self.track_line, color=colors(int(
|
|
121
|
+
self.track_line, color=colors(int(cls), True), track_thickness=self.line_width
|
|
122
122
|
)
|
|
123
123
|
|
|
124
124
|
# store previous position of track for object counting
|
|
@@ -598,7 +598,7 @@ def ap_per_class(
|
|
|
598
598
|
# AP from recall-precision curve
|
|
599
599
|
for j in range(tp.shape[1]):
|
|
600
600
|
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
|
|
601
|
-
if
|
|
601
|
+
if j == 0:
|
|
602
602
|
prec_values.append(np.interp(x, mrec, mpre)) # precision at mAP@0.5
|
|
603
603
|
|
|
604
604
|
prec_values = np.array(prec_values) # (nc, 1000)
|
|
@@ -804,31 +804,30 @@ class Annotator:
|
|
|
804
804
|
self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
|
|
805
805
|
)
|
|
806
806
|
|
|
807
|
-
def plot_distance_and_line(
|
|
807
|
+
def plot_distance_and_line(
|
|
808
|
+
self, pixels_distance, centroids, line_color=(104, 31, 17), centroid_color=(255, 0, 255)
|
|
809
|
+
):
|
|
808
810
|
"""
|
|
809
811
|
Plot the distance and line on frame.
|
|
810
812
|
|
|
811
813
|
Args:
|
|
812
814
|
pixels_distance (float): Pixels distance between two bbox centroids.
|
|
813
815
|
centroids (list): Bounding box centroids data.
|
|
814
|
-
line_color (tuple):
|
|
815
|
-
centroid_color (tuple):
|
|
816
|
+
line_color (tuple, optional): Distance line color.
|
|
817
|
+
centroid_color (tuple, optional): Bounding box centroid color.
|
|
816
818
|
"""
|
|
817
819
|
# Get the text size
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
)
|
|
820
|
+
text = f"Pixels Distance: {pixels_distance:.2f}"
|
|
821
|
+
(text_width_m, text_height_m), _ = cv2.getTextSize(text, 0, self.sf, self.tf)
|
|
821
822
|
|
|
822
823
|
# Define corners with 10-pixel margin and draw rectangle
|
|
823
|
-
|
|
824
|
-
bottom_right = (15 + text_width_m + 20, 25 + text_height_m + 20)
|
|
825
|
-
cv2.rectangle(self.im, top_left, bottom_right, centroid_color, -1)
|
|
824
|
+
cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 20, 25 + text_height_m + 20), line_color, -1)
|
|
826
825
|
|
|
827
826
|
# Calculate the position for the text with a 10-pixel margin and draw text
|
|
828
|
-
text_position = (
|
|
827
|
+
text_position = (25, 25 + text_height_m + 10)
|
|
829
828
|
cv2.putText(
|
|
830
829
|
self.im,
|
|
831
|
-
|
|
830
|
+
text,
|
|
832
831
|
text_position,
|
|
833
832
|
0,
|
|
834
833
|
self.sf,
|
|
@@ -1156,16 +1155,16 @@ def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False,
|
|
|
1156
1155
|
save_dir = Path(file).parent if file else Path(dir)
|
|
1157
1156
|
if classify:
|
|
1158
1157
|
fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True)
|
|
1159
|
-
index = [
|
|
1158
|
+
index = [2, 5, 3, 4]
|
|
1160
1159
|
elif segment:
|
|
1161
1160
|
fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
|
|
1162
|
-
index = [
|
|
1161
|
+
index = [2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 8, 9, 12, 13]
|
|
1163
1162
|
elif pose:
|
|
1164
1163
|
fig, ax = plt.subplots(2, 9, figsize=(21, 6), tight_layout=True)
|
|
1165
|
-
index = [
|
|
1164
|
+
index = [2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 17, 18, 19, 9, 10, 13, 14]
|
|
1166
1165
|
else:
|
|
1167
1166
|
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
|
|
1168
|
-
index = [
|
|
1167
|
+
index = [2, 3, 4, 5, 6, 9, 10, 11, 7, 8]
|
|
1169
1168
|
ax = ax.ravel()
|
|
1170
1169
|
files = list(save_dir.glob("results*.csv"))
|
|
1171
1170
|
assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.3.
|
|
3
|
+
Version: 8.3.13
|
|
4
4
|
Summary: Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
|
@@ -1,139 +0,0 @@
|
|
|
1
|
-
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
|
-
|
|
3
|
-
import math
|
|
4
|
-
|
|
5
|
-
import cv2
|
|
6
|
-
|
|
7
|
-
from ultralytics.utils.checks import check_imshow
|
|
8
|
-
from ultralytics.utils.plotting import Annotator, colors
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class DistanceCalculation:
|
|
12
|
-
"""A class to calculate distance between two objects in a real-time video stream based on their tracks."""
|
|
13
|
-
|
|
14
|
-
def __init__(
|
|
15
|
-
self,
|
|
16
|
-
names,
|
|
17
|
-
view_img=False,
|
|
18
|
-
line_thickness=2,
|
|
19
|
-
line_color=(255, 0, 255),
|
|
20
|
-
centroid_color=(104, 31, 17),
|
|
21
|
-
):
|
|
22
|
-
"""
|
|
23
|
-
Initializes the DistanceCalculation class with the given parameters.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
names (dict): Dictionary of classes names.
|
|
27
|
-
view_img (bool, optional): Flag to indicate if the video stream should be displayed. Defaults to False.
|
|
28
|
-
line_thickness (int, optional): Thickness of the lines drawn on the image. Defaults to 2.
|
|
29
|
-
line_color (tuple, optional): Color of the lines drawn on the image (BGR format). Defaults to (255, 255, 0).
|
|
30
|
-
centroid_color (tuple, optional): Color of the centroids drawn (BGR format). Defaults to (255, 0, 255).
|
|
31
|
-
"""
|
|
32
|
-
# Visual & image information
|
|
33
|
-
self.im0 = None
|
|
34
|
-
self.annotator = None
|
|
35
|
-
self.view_img = view_img
|
|
36
|
-
self.line_color = line_color
|
|
37
|
-
self.centroid_color = centroid_color
|
|
38
|
-
|
|
39
|
-
# Prediction & tracking information
|
|
40
|
-
self.names = names
|
|
41
|
-
self.boxes = None
|
|
42
|
-
self.line_thickness = line_thickness
|
|
43
|
-
self.trk_ids = None
|
|
44
|
-
|
|
45
|
-
# Distance calculation information
|
|
46
|
-
self.centroids = []
|
|
47
|
-
|
|
48
|
-
# Mouse event information
|
|
49
|
-
self.left_mouse_count = 0
|
|
50
|
-
self.selected_boxes = {}
|
|
51
|
-
|
|
52
|
-
# Check if environment supports imshow
|
|
53
|
-
self.env_check = check_imshow(warn=True)
|
|
54
|
-
self.window_name = "Ultralytics Solutions"
|
|
55
|
-
|
|
56
|
-
def mouse_event_for_distance(self, event, x, y, flags, param):
|
|
57
|
-
"""
|
|
58
|
-
Handles mouse events to select regions in a real-time video stream.
|
|
59
|
-
|
|
60
|
-
Args:
|
|
61
|
-
event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.).
|
|
62
|
-
x (int): X-coordinate of the mouse pointer.
|
|
63
|
-
y (int): Y-coordinate of the mouse pointer.
|
|
64
|
-
flags (int): Flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY, etc.).
|
|
65
|
-
param (dict): Additional parameters passed to the function.
|
|
66
|
-
"""
|
|
67
|
-
if event == cv2.EVENT_LBUTTONDOWN:
|
|
68
|
-
self.left_mouse_count += 1
|
|
69
|
-
if self.left_mouse_count <= 2:
|
|
70
|
-
for box, track_id in zip(self.boxes, self.trk_ids):
|
|
71
|
-
if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
|
|
72
|
-
self.selected_boxes[track_id] = box
|
|
73
|
-
|
|
74
|
-
elif event == cv2.EVENT_RBUTTONDOWN:
|
|
75
|
-
self.selected_boxes = {}
|
|
76
|
-
self.left_mouse_count = 0
|
|
77
|
-
|
|
78
|
-
def start_process(self, im0, tracks):
|
|
79
|
-
"""
|
|
80
|
-
Processes the video frame and calculates the distance between two bounding boxes.
|
|
81
|
-
|
|
82
|
-
Args:
|
|
83
|
-
im0 (ndarray): The image frame.
|
|
84
|
-
tracks (list): List of tracks obtained from the object tracking process.
|
|
85
|
-
|
|
86
|
-
Returns:
|
|
87
|
-
(ndarray): The processed image frame.
|
|
88
|
-
"""
|
|
89
|
-
self.im0 = im0
|
|
90
|
-
if tracks[0].boxes.id is None:
|
|
91
|
-
if self.view_img:
|
|
92
|
-
self.display_frames()
|
|
93
|
-
return im0
|
|
94
|
-
|
|
95
|
-
self.boxes = tracks[0].boxes.xyxy.cpu()
|
|
96
|
-
clss = tracks[0].boxes.cls.cpu().tolist()
|
|
97
|
-
self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
|
|
98
|
-
|
|
99
|
-
self.annotator = Annotator(self.im0, line_width=self.line_thickness)
|
|
100
|
-
|
|
101
|
-
for box, cls, track_id in zip(self.boxes, clss, self.trk_ids):
|
|
102
|
-
self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
|
|
103
|
-
|
|
104
|
-
if len(self.selected_boxes) == 2:
|
|
105
|
-
for trk_id in self.selected_boxes.keys():
|
|
106
|
-
if trk_id == track_id:
|
|
107
|
-
self.selected_boxes[track_id] = box
|
|
108
|
-
|
|
109
|
-
if len(self.selected_boxes) == 2:
|
|
110
|
-
# Store user selected boxes in centroids list
|
|
111
|
-
self.centroids.extend(
|
|
112
|
-
[[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
|
|
113
|
-
)
|
|
114
|
-
# Calculate pixels distance
|
|
115
|
-
pixels_distance = math.sqrt(
|
|
116
|
-
(self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
|
|
117
|
-
)
|
|
118
|
-
self.annotator.plot_distance_and_line(pixels_distance, self.centroids, self.line_color, self.centroid_color)
|
|
119
|
-
|
|
120
|
-
self.centroids = []
|
|
121
|
-
|
|
122
|
-
if self.view_img and self.env_check:
|
|
123
|
-
self.display_frames()
|
|
124
|
-
|
|
125
|
-
return im0
|
|
126
|
-
|
|
127
|
-
def display_frames(self):
|
|
128
|
-
"""Displays the current frame with annotations."""
|
|
129
|
-
cv2.namedWindow(self.window_name)
|
|
130
|
-
cv2.setMouseCallback(self.window_name, self.mouse_event_for_distance)
|
|
131
|
-
cv2.imshow(self.window_name, self.im0)
|
|
132
|
-
|
|
133
|
-
if cv2.waitKey(1) & 0xFF == ord("q"):
|
|
134
|
-
return
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
if __name__ == "__main__":
|
|
138
|
-
names = {0: "person", 1: "car"} # example class names
|
|
139
|
-
distance_calculation = DistanceCalculation(names)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml
RENAMED
|
File without changes
|
{ultralytics-8.3.12 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml
RENAMED
|
File without changes
|