ultralytics 8.3.12__tar.gz → 8.3.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ultralytics-8.3.12/ultralytics.egg-info → ultralytics-8.3.14}/PKG-INFO +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/__init__.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/__init__.py +1 -2
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/utils.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/engine/exporter.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/predict.py +68 -46
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/classify/train.py +1 -2
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/autobackend.py +2 -2
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/tasks.py +4 -5
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/analytics.py +3 -3
- ultralytics-8.3.14/ultralytics/solutions/distance_calculation.py +82 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/heatmap.py +2 -1
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/object_counter.py +2 -2
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/__init__.py +3 -3
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/tensorboard.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/checks.py +3 -3
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/downloads.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/metrics.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/plotting.py +15 -16
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/torch_utils.py +1 -1
- {ultralytics-8.3.12 → ultralytics-8.3.14/ultralytics.egg-info}/PKG-INFO +1 -1
- ultralytics-8.3.12/ultralytics/solutions/distance_calculation.py +0 -139
- {ultralytics-8.3.12 → ultralytics-8.3.14}/LICENSE +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/README.md +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/pyproject.toml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/setup.cfg +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/tests/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/tests/conftest.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/tests/test_cli.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/tests/test_cuda.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/tests/test_engine.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/tests/test_exports.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/tests/test_integrations.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/tests/test_python.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/tests/test_solutions.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/assets/bus.jpg +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/assets/zidane.jpg +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/VOC.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/coco.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/coco128.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/coco8.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/dota8.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/hand-keypoints.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/lvis.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/signature.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/datasets/xView.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/default.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/11/yolo11-cls.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/11/yolo11-obb.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/11/yolo11-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/11/yolo11-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/11/yolo11.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v10/yolov10b.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v10/yolov10l.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v10/yolov10m.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v10/yolov10n.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v10/yolov10s.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v10/yolov10x.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v9/yolov9m.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v9/yolov9s.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/models/v9/yolov9t.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/solutions/default.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/trackers/botsort.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/annotator.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/augment.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/base.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/build.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/converter.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/dataset.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/loaders.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/data/split_dota.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/engine/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/engine/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/engine/predictor.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/engine/results.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/engine/trainer.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/engine/tuner.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/engine/validator.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/hub/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/hub/auth.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/hub/google/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/hub/session.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/hub/utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/fastsam/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/fastsam/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/fastsam/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/fastsam/utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/fastsam/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/nas/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/nas/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/nas/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/nas/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/rtdetr/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/rtdetr/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/rtdetr/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/rtdetr/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/rtdetr/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/amg.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/build.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/modules/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/modules/blocks.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/modules/decoders.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/modules/encoders.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/modules/memory_attention.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/modules/sam.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/modules/transformer.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/sam/modules/utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/utils/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/utils/loss.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/utils/ops.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/classify/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/classify/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/classify/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/detect/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/detect/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/detect/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/detect/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/model.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/obb/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/obb/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/obb/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/obb/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/pose/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/pose/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/pose/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/pose/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/segment/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/segment/predict.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/segment/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/segment/val.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/world/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/world/train.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/models/yolo/world/train_world.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/modules/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/modules/activation.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/modules/block.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/modules/conv.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/modules/head.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/modules/transformer.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/nn/modules/utils.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/ai_gym.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/parking_management.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/queue_management.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/solutions.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/speed_estimation.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/solutions/streamlit_inference.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/trackers/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/trackers/basetrack.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/trackers/bot_sort.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/trackers/byte_tracker.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/trackers/track.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/trackers/utils/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/trackers/utils/gmc.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/trackers/utils/kalman_filter.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/trackers/utils/matching.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/autobatch.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/benchmarks.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/__init__.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/base.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/clearml.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/comet.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/dvc.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/hub.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/mlflow.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/neptune.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/raytune.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/callbacks/wb.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/dist.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/errors.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/files.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/instance.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/loss.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/ops.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/patches.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/tal.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/triton.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics/utils/tuner.py +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics.egg-info/SOURCES.txt +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics.egg-info/dependency_links.txt +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics.egg-info/entry_points.txt +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics.egg-info/requires.txt +0 -0
- {ultralytics-8.3.12 → ultralytics-8.3.14}/ultralytics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.14
|
4
4
|
Summary: Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -1,6 +1,5 @@
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2
2
|
|
3
|
-
import contextlib
|
4
3
|
import shutil
|
5
4
|
import subprocess
|
6
5
|
import sys
|
@@ -639,7 +638,7 @@ def smart_value(v):
|
|
639
638
|
else:
|
640
639
|
try:
|
641
640
|
return eval(v)
|
642
|
-
except:
|
641
|
+
except Exception:
|
643
642
|
return v
|
644
643
|
|
645
644
|
|
@@ -235,7 +235,42 @@ class Predictor(BasePredictor):
|
|
235
235
|
"""
|
236
236
|
features = self.get_im_features(im) if self.features is None else self.features
|
237
237
|
|
238
|
-
|
238
|
+
bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
|
239
|
+
points = (points, labels) if points is not None else None
|
240
|
+
# Embed prompts
|
241
|
+
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
|
242
|
+
|
243
|
+
# Predict masks
|
244
|
+
pred_masks, pred_scores = self.model.mask_decoder(
|
245
|
+
image_embeddings=features,
|
246
|
+
image_pe=self.model.prompt_encoder.get_dense_pe(),
|
247
|
+
sparse_prompt_embeddings=sparse_embeddings,
|
248
|
+
dense_prompt_embeddings=dense_embeddings,
|
249
|
+
multimask_output=multimask_output,
|
250
|
+
)
|
251
|
+
|
252
|
+
# (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
|
253
|
+
# `d` could be 1 or 3 depends on `multimask_output`.
|
254
|
+
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
|
255
|
+
|
256
|
+
def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
|
257
|
+
"""
|
258
|
+
Prepares and transforms the input prompts for processing based on the destination shape.
|
259
|
+
|
260
|
+
Args:
|
261
|
+
dst_shape (tuple): The target shape (height, width) for the prompts.
|
262
|
+
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
|
263
|
+
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
|
264
|
+
labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
|
265
|
+
masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
|
266
|
+
|
267
|
+
Raises:
|
268
|
+
AssertionError: If the number of points don't match the number of labels, in case labels were passed.
|
269
|
+
|
270
|
+
Returns:
|
271
|
+
(tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
|
272
|
+
"""
|
273
|
+
src_shape = self.batch[1][0].shape[:2]
|
239
274
|
r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
|
240
275
|
# Transform input prompts
|
241
276
|
if points is not None:
|
@@ -258,23 +293,7 @@ class Predictor(BasePredictor):
|
|
258
293
|
bboxes *= r
|
259
294
|
if masks is not None:
|
260
295
|
masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
|
261
|
-
|
262
|
-
points = (points, labels) if points is not None else None
|
263
|
-
# Embed prompts
|
264
|
-
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
|
265
|
-
|
266
|
-
# Predict masks
|
267
|
-
pred_masks, pred_scores = self.model.mask_decoder(
|
268
|
-
image_embeddings=features,
|
269
|
-
image_pe=self.model.prompt_encoder.get_dense_pe(),
|
270
|
-
sparse_prompt_embeddings=sparse_embeddings,
|
271
|
-
dense_prompt_embeddings=dense_embeddings,
|
272
|
-
multimask_output=multimask_output,
|
273
|
-
)
|
274
|
-
|
275
|
-
# (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
|
276
|
-
# `d` could be 1 or 3 depends on `multimask_output`.
|
277
|
-
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
|
296
|
+
return bboxes, points, labels, masks
|
278
297
|
|
279
298
|
def generate(
|
280
299
|
self,
|
@@ -693,34 +712,7 @@ class SAM2Predictor(Predictor):
|
|
693
712
|
"""
|
694
713
|
features = self.get_im_features(im) if self.features is None else self.features
|
695
714
|
|
696
|
-
|
697
|
-
r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
|
698
|
-
# Transform input prompts
|
699
|
-
if points is not None:
|
700
|
-
points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
|
701
|
-
points = points[None] if points.ndim == 1 else points
|
702
|
-
# Assuming labels are all positive if users don't pass labels.
|
703
|
-
if labels is None:
|
704
|
-
labels = torch.ones(points.shape[0])
|
705
|
-
labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
|
706
|
-
points *= r
|
707
|
-
# (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
|
708
|
-
points, labels = points[:, None], labels[:, None]
|
709
|
-
if bboxes is not None:
|
710
|
-
bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
|
711
|
-
bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
|
712
|
-
bboxes = bboxes.view(-1, 2, 2) * r
|
713
|
-
bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
|
714
|
-
# NOTE: merge "boxes" and "points" into a single "points" input
|
715
|
-
# (where boxes are added at the beginning) to model.sam_prompt_encoder
|
716
|
-
if points is not None:
|
717
|
-
points = torch.cat([bboxes, points], dim=1)
|
718
|
-
labels = torch.cat([bbox_labels, labels], dim=1)
|
719
|
-
else:
|
720
|
-
points, labels = bboxes, bbox_labels
|
721
|
-
if masks is not None:
|
722
|
-
masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
|
723
|
-
|
715
|
+
bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
|
724
716
|
points = (points, labels) if points is not None else None
|
725
717
|
|
726
718
|
sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
|
@@ -744,6 +736,36 @@ class SAM2Predictor(Predictor):
|
|
744
736
|
# `d` could be 1 or 3 depends on `multimask_output`.
|
745
737
|
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
|
746
738
|
|
739
|
+
def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
|
740
|
+
"""
|
741
|
+
Prepares and transforms the input prompts for processing based on the destination shape.
|
742
|
+
|
743
|
+
Args:
|
744
|
+
dst_shape (tuple): The target shape (height, width) for the prompts.
|
745
|
+
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
|
746
|
+
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
|
747
|
+
labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
|
748
|
+
masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
|
749
|
+
|
750
|
+
Raises:
|
751
|
+
AssertionError: If the number of points don't match the number of labels, in case labels were passed.
|
752
|
+
|
753
|
+
Returns:
|
754
|
+
(tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
|
755
|
+
"""
|
756
|
+
bboxes, points, labels, masks = super()._prepare_prompts(dst_shape, bboxes, points, labels, masks)
|
757
|
+
if bboxes is not None:
|
758
|
+
bboxes = bboxes.view(-1, 2, 2)
|
759
|
+
bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
|
760
|
+
# NOTE: merge "boxes" and "points" into a single "points" input
|
761
|
+
# (where boxes are added at the beginning) to model.sam_prompt_encoder
|
762
|
+
if points is not None:
|
763
|
+
points = torch.cat([bboxes, points], dim=1)
|
764
|
+
labels = torch.cat([bbox_labels, labels], dim=1)
|
765
|
+
else:
|
766
|
+
points, labels = bboxes, bbox_labels
|
767
|
+
return bboxes, points, labels, masks
|
768
|
+
|
747
769
|
def set_image(self, image):
|
748
770
|
"""
|
749
771
|
Preprocesses and sets a single image for inference using the SAM2 model.
|
@@ -8,7 +8,7 @@ from ultralytics.data import ClassificationDataset, build_dataloader
|
|
8
8
|
from ultralytics.engine.trainer import BaseTrainer
|
9
9
|
from ultralytics.models import yolo
|
10
10
|
from ultralytics.nn.tasks import ClassificationModel
|
11
|
-
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
|
11
|
+
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
|
12
12
|
from ultralytics.utils.plotting import plot_images, plot_results
|
13
13
|
from ultralytics.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first
|
14
14
|
|
@@ -141,7 +141,6 @@ class ClassificationTrainer(BaseTrainer):
|
|
141
141
|
self.metrics = self.validator(model=f)
|
142
142
|
self.metrics.pop("fitness", None)
|
143
143
|
self.run_callbacks("on_fit_epoch_end")
|
144
|
-
LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
|
145
144
|
|
146
145
|
def plot_training_samples(self, batch, ni):
|
147
146
|
"""Plots training samples with their annotations."""
|
@@ -46,7 +46,7 @@ def default_class_names(data=None):
|
|
46
46
|
if data:
|
47
47
|
try:
|
48
48
|
return yaml_load(check_yaml(data))["names"]
|
49
|
-
except:
|
49
|
+
except Exception:
|
50
50
|
pass
|
51
51
|
return {i: f"class{i}" for i in range(999)} # return default if above errors
|
52
52
|
|
@@ -501,7 +501,7 @@ class AutoBackend(nn.Module):
|
|
501
501
|
|
502
502
|
# TensorRT
|
503
503
|
elif self.engine:
|
504
|
-
if self.dynamic
|
504
|
+
if self.dynamic and im.shape != self.bindings["images"].shape:
|
505
505
|
if self.is_trt10:
|
506
506
|
self.context.set_input_shape("images", im.shape)
|
507
507
|
self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape)
|
@@ -963,7 +963,6 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
|
|
963
963
|
args[j] = locals()[a] if a in locals() else ast.literal_eval(a)
|
964
964
|
except ValueError:
|
965
965
|
pass
|
966
|
-
|
967
966
|
n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain
|
968
967
|
if m in {
|
969
968
|
Classify,
|
@@ -1102,7 +1101,7 @@ def guess_model_scale(model_path):
|
|
1102
1101
|
(str): The size character of the model's scale, which can be n, s, m, l, or x.
|
1103
1102
|
"""
|
1104
1103
|
try:
|
1105
|
-
return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # n, s, m, l, or x
|
1104
|
+
return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # noqa, returns n, s, m, l, or x
|
1106
1105
|
except AttributeError:
|
1107
1106
|
return ""
|
1108
1107
|
|
@@ -1139,7 +1138,7 @@ def guess_model_task(model):
|
|
1139
1138
|
if isinstance(model, dict):
|
1140
1139
|
try:
|
1141
1140
|
return cfg2task(model)
|
1142
|
-
except:
|
1141
|
+
except Exception:
|
1143
1142
|
pass
|
1144
1143
|
|
1145
1144
|
# Guess from PyTorch model
|
@@ -1147,12 +1146,12 @@ def guess_model_task(model):
|
|
1147
1146
|
for x in "model.args", "model.model.args", "model.model.model.args":
|
1148
1147
|
try:
|
1149
1148
|
return eval(x)["task"]
|
1150
|
-
except:
|
1149
|
+
except Exception:
|
1151
1150
|
pass
|
1152
1151
|
for x in "model.yaml", "model.model.yaml", "model.model.model.yaml":
|
1153
1152
|
try:
|
1154
1153
|
return cfg2task(eval(x))
|
1155
|
-
except:
|
1154
|
+
except Exception:
|
1156
1155
|
pass
|
1157
1156
|
|
1158
1157
|
for m in model.modules():
|
@@ -48,7 +48,7 @@ class Analytics(BaseSolution):
|
|
48
48
|
self.canvas = FigureCanvas(self.fig) # Set common axis properties
|
49
49
|
self.ax.set_facecolor(self.bg_color)
|
50
50
|
self.color_mapping = {}
|
51
|
-
self.ax.axis("equal") if type == "pie" else None # Ensure pie chart is circular
|
51
|
+
self.ax.axis("equal") if self.type == "pie" else None # Ensure pie chart is circular
|
52
52
|
|
53
53
|
def process_data(self, im0, frame_number):
|
54
54
|
"""
|
@@ -61,11 +61,11 @@ class Analytics(BaseSolution):
|
|
61
61
|
self.extract_tracks(im0) # Extract tracks
|
62
62
|
|
63
63
|
if self.type == "line":
|
64
|
-
for
|
64
|
+
for _ in self.boxes:
|
65
65
|
self.total_counts += 1
|
66
66
|
im0 = self.update_graph(frame_number=frame_number)
|
67
67
|
self.total_counts = 0
|
68
|
-
elif self.type
|
68
|
+
elif self.type in {"pie", "bar", "area"}:
|
69
69
|
self.clswise_count = {}
|
70
70
|
for box, cls in zip(self.boxes, self.clss):
|
71
71
|
if self.names[int(cls)] in self.clswise_count:
|
@@ -0,0 +1,82 @@
|
|
1
|
+
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2
|
+
|
3
|
+
import math
|
4
|
+
|
5
|
+
import cv2
|
6
|
+
|
7
|
+
from ultralytics.solutions.solutions import BaseSolution # Import a parent class
|
8
|
+
from ultralytics.utils.plotting import Annotator, colors
|
9
|
+
|
10
|
+
|
11
|
+
class DistanceCalculation(BaseSolution):
|
12
|
+
"""A class to calculate distance between two objects in a real-time video stream based on their tracks."""
|
13
|
+
|
14
|
+
def __init__(self, **kwargs):
|
15
|
+
"""Initializes the DistanceCalculation class with the given parameters."""
|
16
|
+
super().__init__(**kwargs)
|
17
|
+
|
18
|
+
# Mouse event information
|
19
|
+
self.left_mouse_count = 0
|
20
|
+
self.selected_boxes = {}
|
21
|
+
|
22
|
+
def mouse_event_for_distance(self, event, x, y, flags, param):
|
23
|
+
"""
|
24
|
+
Handles mouse events to select regions in a real-time video stream.
|
25
|
+
|
26
|
+
Args:
|
27
|
+
event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.).
|
28
|
+
x (int): X-coordinate of the mouse pointer.
|
29
|
+
y (int): Y-coordinate of the mouse pointer.
|
30
|
+
flags (int): Flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY, etc.).
|
31
|
+
param (dict): Additional parameters passed to the function.
|
32
|
+
"""
|
33
|
+
if event == cv2.EVENT_LBUTTONDOWN:
|
34
|
+
self.left_mouse_count += 1
|
35
|
+
if self.left_mouse_count <= 2:
|
36
|
+
for box, track_id in zip(self.boxes, self.track_ids):
|
37
|
+
if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
|
38
|
+
self.selected_boxes[track_id] = box
|
39
|
+
|
40
|
+
elif event == cv2.EVENT_RBUTTONDOWN:
|
41
|
+
self.selected_boxes = {}
|
42
|
+
self.left_mouse_count = 0
|
43
|
+
|
44
|
+
def calculate(self, im0):
|
45
|
+
"""
|
46
|
+
Processes the video frame and calculates the distance between two bounding boxes.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
im0 (ndarray): The image frame.
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
(ndarray): The processed image frame.
|
53
|
+
"""
|
54
|
+
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
|
55
|
+
self.extract_tracks(im0) # Extract tracks
|
56
|
+
|
57
|
+
# Iterate over bounding boxes, track ids and classes index
|
58
|
+
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
|
59
|
+
self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
|
60
|
+
|
61
|
+
if len(self.selected_boxes) == 2:
|
62
|
+
for trk_id in self.selected_boxes.keys():
|
63
|
+
if trk_id == track_id:
|
64
|
+
self.selected_boxes[track_id] = box
|
65
|
+
|
66
|
+
if len(self.selected_boxes) == 2:
|
67
|
+
# Store user selected boxes in centroids list
|
68
|
+
self.centroids.extend(
|
69
|
+
[[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
|
70
|
+
)
|
71
|
+
# Calculate pixels distance
|
72
|
+
pixels_distance = math.sqrt(
|
73
|
+
(self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
|
74
|
+
)
|
75
|
+
self.annotator.plot_distance_and_line(pixels_distance, self.centroids)
|
76
|
+
|
77
|
+
self.centroids = []
|
78
|
+
|
79
|
+
self.display_output(im0) # display output with base class function
|
80
|
+
cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
|
81
|
+
|
82
|
+
return im0 # return output image for more usage
|
@@ -52,7 +52,8 @@ class Heatmap(ObjectCounter):
|
|
52
52
|
Returns:
|
53
53
|
im0 (ndarray): Processed image for further usage
|
54
54
|
"""
|
55
|
-
|
55
|
+
if not self.initialized:
|
56
|
+
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
|
56
57
|
self.initialized = True # Initialize heatmap only once
|
57
58
|
|
58
59
|
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
|
@@ -112,13 +112,13 @@ class ObjectCounter(BaseSolution):
|
|
112
112
|
# Iterate over bounding boxes, track ids and classes index
|
113
113
|
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
|
114
114
|
# Draw bounding box and counting region
|
115
|
-
self.annotator.box_label(box, label=self.names[cls], color=colors(
|
115
|
+
self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
|
116
116
|
self.store_tracking_history(track_id, box) # Store track history
|
117
117
|
self.store_classwise_counts(cls) # store classwise counts in dict
|
118
118
|
|
119
119
|
# Draw tracks of objects
|
120
120
|
self.annotator.draw_centroid_and_tracks(
|
121
|
-
self.track_line, color=colors(int(
|
121
|
+
self.track_line, color=colors(int(cls), True), track_thickness=self.line_width
|
122
122
|
)
|
123
123
|
|
124
124
|
# store previous position of track for object counting
|
@@ -526,7 +526,7 @@ def read_device_model() -> str:
|
|
526
526
|
try:
|
527
527
|
with open("/proc/device-tree/model") as f:
|
528
528
|
return f.read()
|
529
|
-
except:
|
529
|
+
except Exception:
|
530
530
|
return ""
|
531
531
|
|
532
532
|
|
@@ -584,7 +584,7 @@ def is_docker() -> bool:
|
|
584
584
|
try:
|
585
585
|
with open("/proc/self/cgroup") as f:
|
586
586
|
return "docker" in f.read()
|
587
|
-
except:
|
587
|
+
except Exception:
|
588
588
|
return False
|
589
589
|
|
590
590
|
|
@@ -623,7 +623,7 @@ def is_online() -> bool:
|
|
623
623
|
for dns in ("1.1.1.1", "8.8.8.8"): # check Cloudflare and Google DNS
|
624
624
|
socket.create_connection(address=(dns, 80), timeout=2.0).close()
|
625
625
|
return True
|
626
|
-
except:
|
626
|
+
except Exception:
|
627
627
|
return False
|
628
628
|
|
629
629
|
|
@@ -50,7 +50,7 @@ def _log_tensorboard_graph(trainer):
|
|
50
50
|
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
51
51
|
return
|
52
52
|
|
53
|
-
except:
|
53
|
+
except Exception:
|
54
54
|
# Fallback to TorchScript export steps (RTDETR)
|
55
55
|
try:
|
56
56
|
model = deepcopy(de_parallel(trainer.model))
|
@@ -277,7 +277,7 @@ def check_latest_pypi_version(package_name="ultralytics"):
|
|
277
277
|
response = requests.get(f"https://pypi.org/pypi/{package_name}/json", timeout=3)
|
278
278
|
if response.status_code == 200:
|
279
279
|
return response.json()["info"]["version"]
|
280
|
-
except:
|
280
|
+
except Exception:
|
281
281
|
return None
|
282
282
|
|
283
283
|
|
@@ -299,7 +299,7 @@ def check_pip_update_available():
|
|
299
299
|
f"Update with 'pip install -U ultralytics'"
|
300
300
|
)
|
301
301
|
return True
|
302
|
-
except:
|
302
|
+
except Exception:
|
303
303
|
pass
|
304
304
|
return False
|
305
305
|
|
@@ -715,7 +715,7 @@ def git_describe(path=ROOT): # path must be a directory
|
|
715
715
|
"""Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe."""
|
716
716
|
try:
|
717
717
|
return subprocess.check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1]
|
718
|
-
except:
|
718
|
+
except Exception:
|
719
719
|
return ""
|
720
720
|
|
721
721
|
|
@@ -598,7 +598,7 @@ def ap_per_class(
|
|
598
598
|
# AP from recall-precision curve
|
599
599
|
for j in range(tp.shape[1]):
|
600
600
|
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
|
601
|
-
if
|
601
|
+
if j == 0:
|
602
602
|
prec_values.append(np.interp(x, mrec, mpre)) # precision at mAP@0.5
|
603
603
|
|
604
604
|
prec_values = np.array(prec_values) # (nc, 1000)
|
@@ -804,31 +804,30 @@ class Annotator:
|
|
804
804
|
self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
|
805
805
|
)
|
806
806
|
|
807
|
-
def plot_distance_and_line(
|
807
|
+
def plot_distance_and_line(
|
808
|
+
self, pixels_distance, centroids, line_color=(104, 31, 17), centroid_color=(255, 0, 255)
|
809
|
+
):
|
808
810
|
"""
|
809
811
|
Plot the distance and line on frame.
|
810
812
|
|
811
813
|
Args:
|
812
814
|
pixels_distance (float): Pixels distance between two bbox centroids.
|
813
815
|
centroids (list): Bounding box centroids data.
|
814
|
-
line_color (tuple):
|
815
|
-
centroid_color (tuple):
|
816
|
+
line_color (tuple, optional): Distance line color.
|
817
|
+
centroid_color (tuple, optional): Bounding box centroid color.
|
816
818
|
"""
|
817
819
|
# Get the text size
|
818
|
-
|
819
|
-
|
820
|
-
)
|
820
|
+
text = f"Pixels Distance: {pixels_distance:.2f}"
|
821
|
+
(text_width_m, text_height_m), _ = cv2.getTextSize(text, 0, self.sf, self.tf)
|
821
822
|
|
822
823
|
# Define corners with 10-pixel margin and draw rectangle
|
823
|
-
|
824
|
-
bottom_right = (15 + text_width_m + 20, 25 + text_height_m + 20)
|
825
|
-
cv2.rectangle(self.im, top_left, bottom_right, centroid_color, -1)
|
824
|
+
cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 20, 25 + text_height_m + 20), line_color, -1)
|
826
825
|
|
827
826
|
# Calculate the position for the text with a 10-pixel margin and draw text
|
828
|
-
text_position = (
|
827
|
+
text_position = (25, 25 + text_height_m + 10)
|
829
828
|
cv2.putText(
|
830
829
|
self.im,
|
831
|
-
|
830
|
+
text,
|
832
831
|
text_position,
|
833
832
|
0,
|
834
833
|
self.sf,
|
@@ -1118,7 +1117,7 @@ def plot_images(
|
|
1118
1117
|
im[y : y + h, x : x + w, :][mask] = (
|
1119
1118
|
im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6
|
1120
1119
|
)
|
1121
|
-
except:
|
1120
|
+
except Exception:
|
1122
1121
|
pass
|
1123
1122
|
annotator.fromarray(im)
|
1124
1123
|
if not save:
|
@@ -1156,16 +1155,16 @@ def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False,
|
|
1156
1155
|
save_dir = Path(file).parent if file else Path(dir)
|
1157
1156
|
if classify:
|
1158
1157
|
fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True)
|
1159
|
-
index = [
|
1158
|
+
index = [2, 5, 3, 4]
|
1160
1159
|
elif segment:
|
1161
1160
|
fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
|
1162
|
-
index = [
|
1161
|
+
index = [2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 8, 9, 12, 13]
|
1163
1162
|
elif pose:
|
1164
1163
|
fig, ax = plt.subplots(2, 9, figsize=(21, 6), tight_layout=True)
|
1165
|
-
index = [
|
1164
|
+
index = [2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 17, 18, 19, 9, 10, 13, 14]
|
1166
1165
|
else:
|
1167
1166
|
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
|
1168
|
-
index = [
|
1167
|
+
index = [2, 3, 4, 5, 6, 9, 10, 11, 7, 8]
|
1169
1168
|
ax = ax.ravel()
|
1170
1169
|
files = list(save_dir.glob("results*.csv"))
|
1171
1170
|
assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
|
@@ -119,7 +119,7 @@ def get_cpu_info():
|
|
119
119
|
info = cpuinfo.get_cpu_info() # info dict
|
120
120
|
string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown")
|
121
121
|
PERSISTENT_CACHE["cpu_info"] = string.replace("(R)", "").replace("CPU ", "").replace("@ ", "")
|
122
|
-
except:
|
122
|
+
except Exception:
|
123
123
|
pass
|
124
124
|
return PERSISTENT_CACHE.get("cpu_info", "unknown")
|
125
125
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.14
|
4
4
|
Summary: Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|