ultralytics 8.3.158__tar.gz → 8.3.160__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ultralytics-8.3.158/ultralytics.egg-info → ultralytics-8.3.160}/PKG-INFO +2 -2
- {ultralytics-8.3.158 → ultralytics-8.3.160}/README.md +1 -1
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/__init__.py +1 -1
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/__init__.py +0 -2
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/augment.py +8 -8
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/engine/exporter.py +4 -2
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/engine/model.py +2 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/engine/results.py +0 -5
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/detect/val.py +56 -20
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/model.py +25 -24
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/pose/val.py +3 -27
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/segment/val.py +7 -42
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/world/train.py +1 -1
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/yoloe/train.py +1 -1
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/autobackend.py +5 -1
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/text_model.py +44 -11
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/heatmap.py +1 -1
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/object_counter.py +10 -10
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/similarity_search.py +5 -14
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/solutions.py +2 -2
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/__init__.py +1 -4
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/instance.py +2 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/metrics.py +18 -30
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/plotting.py +1 -1
- {ultralytics-8.3.158 → ultralytics-8.3.160/ultralytics.egg-info}/PKG-INFO +2 -2
- {ultralytics-8.3.158 → ultralytics-8.3.160}/LICENSE +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/pyproject.toml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/setup.cfg +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/tests/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/tests/conftest.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/tests/test_cli.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/tests/test_cuda.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/tests/test_engine.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/tests/test_exports.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/tests/test_integrations.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/tests/test_python.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/tests/test_solutions.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/assets/bus.jpg +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/assets/zidane.jpg +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/HomeObjects-3K.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/VOC.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/coco.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/coco128.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/coco8-grayscale.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/coco8-multispectral.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/coco8.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/dog-pose.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/dota8-multispectral.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/dota8.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/hand-keypoints.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/lvis.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/medical-pills.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/signature.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/datasets/xView.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/default.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/11/yolo11-cls.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/11/yolo11-obb.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/11/yolo11-pose.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/11/yolo11-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/11/yolo11.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/11/yoloe-11-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/11/yoloe-11.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/12/yolo12-cls.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/12/yolo12-obb.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/12/yolo12-pose.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/12/yolo12-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/12/yolo12.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v10/yolov10b.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v10/yolov10l.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v10/yolov10m.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v10/yolov10n.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v10/yolov10s.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v10/yolov10x.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yoloe-v8.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v9/yolov9m.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v9/yolov9s.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/models/v9/yolov9t.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/trackers/botsort.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/annotator.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/base.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/build.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/converter.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/dataset.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/loaders.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/scripts/download_weights.sh +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/scripts/get_coco.sh +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/scripts/get_coco128.sh +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/scripts/get_imagenet.sh +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/split.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/split_dota.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/data/utils.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/engine/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/engine/predictor.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/engine/trainer.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/engine/tuner.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/engine/validator.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/hub/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/hub/auth.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/hub/google/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/hub/session.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/hub/utils.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/fastsam/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/fastsam/model.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/fastsam/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/fastsam/utils.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/fastsam/val.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/nas/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/nas/model.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/nas/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/nas/val.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/rtdetr/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/rtdetr/model.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/rtdetr/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/rtdetr/train.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/rtdetr/val.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/amg.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/build.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/model.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/modules/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/modules/blocks.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/modules/decoders.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/modules/encoders.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/modules/memory_attention.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/modules/sam.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/modules/transformer.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/modules/utils.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/sam/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/utils/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/utils/loss.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/utils/ops.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/classify/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/classify/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/classify/train.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/classify/val.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/detect/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/detect/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/detect/train.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/obb/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/obb/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/obb/train.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/obb/val.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/pose/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/pose/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/pose/train.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/segment/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/segment/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/segment/train.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/world/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/world/train_world.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/yoloe/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/yoloe/predict.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/yoloe/train_seg.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/models/yolo/yoloe/val.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/modules/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/modules/activation.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/modules/block.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/modules/conv.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/modules/head.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/modules/transformer.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/modules/utils.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/nn/tasks.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/ai_gym.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/analytics.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/config.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/distance_calculation.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/instance_segmentation.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/object_blurrer.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/object_cropper.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/parking_management.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/queue_management.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/region_counter.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/security_alarm.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/speed_estimation.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/streamlit_inference.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/templates/similarity-search.html +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/trackzone.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/solutions/vision_eye.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/trackers/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/trackers/basetrack.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/trackers/bot_sort.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/trackers/byte_tracker.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/trackers/track.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/trackers/utils/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/trackers/utils/gmc.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/trackers/utils/kalman_filter.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/trackers/utils/matching.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/autobatch.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/autodevice.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/benchmarks.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/__init__.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/base.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/clearml.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/comet.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/dvc.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/hub.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/mlflow.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/neptune.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/raytune.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/tensorboard.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/callbacks/wb.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/checks.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/dist.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/downloads.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/errors.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/export.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/files.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/loss.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/ops.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/patches.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/tal.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/torch_utils.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/triton.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics/utils/tuner.py +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics.egg-info/SOURCES.txt +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics.egg-info/dependency_links.txt +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics.egg-info/entry_points.txt +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics.egg-info/requires.txt +0 -0
- {ultralytics-8.3.158 → ultralytics-8.3.160}/ultralytics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.160
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -87,7 +87,7 @@ Dynamic: license-file
|
|
87
87
|
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="Ultralytics YOLO banner"></a>
|
88
88
|
</p>
|
89
89
|
|
90
|
-
[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar) <br>
|
90
|
+
[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/) <br>
|
91
91
|
|
92
92
|
<div>
|
93
93
|
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg" alt="Ultralytics CI"></a>
|
@@ -4,7 +4,7 @@
|
|
4
4
|
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="Ultralytics YOLO banner"></a>
|
5
5
|
</p>
|
6
6
|
|
7
|
-
[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar) <br>
|
7
|
+
[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/) <br>
|
8
8
|
|
9
9
|
<div>
|
10
10
|
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg" alt="Ultralytics CI"></a>
|
@@ -954,8 +954,6 @@ def entrypoint(debug: str = "") -> None:
|
|
954
954
|
from ultralytics import YOLO
|
955
955
|
|
956
956
|
model = YOLO(model, task=task)
|
957
|
-
if isinstance(overrides.get("pretrained"), str):
|
958
|
-
model.load(overrides["pretrained"])
|
959
957
|
|
960
958
|
# Task Update
|
961
959
|
if task != model.task:
|
@@ -251,8 +251,7 @@ class Compose:
|
|
251
251
|
>>> multiple_transforms = compose[0:2] # Returns a Compose object with RandomFlip and RandomPerspective
|
252
252
|
"""
|
253
253
|
assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}"
|
254
|
-
|
255
|
-
return Compose([self.transforms[i] for i in index])
|
254
|
+
return Compose([self.transforms[i] for i in index]) if isinstance(index, list) else self.transforms[index]
|
256
255
|
|
257
256
|
def __setitem__(self, index: Union[list, int], value: Union[list, int]) -> None:
|
258
257
|
"""
|
@@ -1560,14 +1559,15 @@ class RandomFlip:
|
|
1560
1559
|
h = 1 if instances.normalized else h
|
1561
1560
|
w = 1 if instances.normalized else w
|
1562
1561
|
|
1563
|
-
#
|
1562
|
+
# WARNING: two separate if and calls to random.random() intentional for reproducibility with older versions
|
1564
1563
|
if self.direction == "vertical" and random.random() < self.p:
|
1565
1564
|
img = np.flipud(img)
|
1566
1565
|
instances.flipud(h)
|
1566
|
+
if self.flip_idx is not None and instances.keypoints is not None:
|
1567
|
+
instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :])
|
1567
1568
|
if self.direction == "horizontal" and random.random() < self.p:
|
1568
1569
|
img = np.fliplr(img)
|
1569
1570
|
instances.fliplr(w)
|
1570
|
-
# For keypoints
|
1571
1571
|
if self.flip_idx is not None and instances.keypoints is not None:
|
1572
1572
|
instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :])
|
1573
1573
|
labels["img"] = np.ascontiguousarray(img)
|
@@ -2533,9 +2533,9 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False):
|
|
2533
2533
|
flip_idx = dataset.data.get("flip_idx", []) # for keypoints augmentation
|
2534
2534
|
if dataset.use_keypoints:
|
2535
2535
|
kpt_shape = dataset.data.get("kpt_shape", None)
|
2536
|
-
if len(flip_idx) == 0 and hyp.fliplr > 0.0:
|
2537
|
-
hyp.fliplr = 0.0
|
2538
|
-
LOGGER.warning("No 'flip_idx' array defined in data.yaml,
|
2536
|
+
if len(flip_idx) == 0 and (hyp.fliplr > 0.0 or hyp.flipud > 0.0):
|
2537
|
+
hyp.fliplr = hyp.flipud = 0.0 # both fliplr and flipud require flip_idx
|
2538
|
+
LOGGER.warning("No 'flip_idx' array defined in data.yaml, disabling 'fliplr' and 'flipud' augmentations.")
|
2539
2539
|
elif flip_idx and (len(flip_idx) != kpt_shape[0]):
|
2540
2540
|
raise ValueError(f"data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}")
|
2541
2541
|
|
@@ -2546,7 +2546,7 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False):
|
|
2546
2546
|
CutMix(dataset, pre_transform=pre_transform, p=hyp.cutmix),
|
2547
2547
|
Albumentations(p=1.0),
|
2548
2548
|
RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v),
|
2549
|
-
RandomFlip(direction="vertical", p=hyp.flipud),
|
2549
|
+
RandomFlip(direction="vertical", p=hyp.flipud, flip_idx=flip_idx),
|
2550
2550
|
RandomFlip(direction="horizontal", p=hyp.fliplr, flip_idx=flip_idx),
|
2551
2551
|
]
|
2552
2552
|
) # transforms
|
@@ -1152,7 +1152,9 @@ class Exporter:
|
|
1152
1152
|
)
|
1153
1153
|
if getattr(self.model, "end2end", False):
|
1154
1154
|
raise ValueError("IMX export is not supported for end2end models.")
|
1155
|
-
check_requirements(
|
1155
|
+
check_requirements(
|
1156
|
+
("model-compression-toolkit>=2.3.0,<2.4.1", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0")
|
1157
|
+
)
|
1156
1158
|
check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
|
1157
1159
|
|
1158
1160
|
import model_compression_toolkit as mct
|
@@ -1493,7 +1495,7 @@ class NMSModel(torch.nn.Module):
|
|
1493
1495
|
scores, classes = scores.max(dim=-1)
|
1494
1496
|
self.args.max_det = min(pred.shape[1], self.args.max_det) # in case num_anchors < max_det
|
1495
1497
|
# (N, max_det, 4 coords + 1 class score + 1 class label + extra_shape).
|
1496
|
-
out = torch.zeros(
|
1498
|
+
out = torch.zeros(pred.shape[0], self.args.max_det, boxes.shape[-1] + 2 + extra_shape, **kwargs)
|
1497
1499
|
for i in range(bs):
|
1498
1500
|
box, cls, score, extra = boxes[i], classes[i], scores[i], extras[i]
|
1499
1501
|
mask = score > self.args.conf
|
@@ -777,6 +777,8 @@ class Model(torch.nn.Module):
|
|
777
777
|
|
778
778
|
checks.check_pip_update_available()
|
779
779
|
|
780
|
+
if isinstance(kwargs.get("pretrained", None), (str, Path)):
|
781
|
+
self.load(kwargs["pretrained"]) # load pretrained weights if provided
|
780
782
|
overrides = YAML.load(checks.check_yaml(kwargs["cfg"])) if kwargs.get("cfg") else self.overrides
|
781
783
|
custom = {
|
782
784
|
# NOTE: handle the case when 'cfg' includes 'data'.
|
@@ -16,7 +16,6 @@ import torch
|
|
16
16
|
from ultralytics.data.augment import LetterBox
|
17
17
|
from ultralytics.utils import LOGGER, DataExportMixin, SimpleClass, ops
|
18
18
|
from ultralytics.utils.plotting import Annotator, colors, save_one_box
|
19
|
-
from ultralytics.utils.torch_utils import smart_inference_mode
|
20
19
|
|
21
20
|
|
22
21
|
class BaseTensor(SimpleClass):
|
@@ -1204,7 +1203,6 @@ class Keypoints(BaseTensor):
|
|
1204
1203
|
>>> keypoints_cpu = keypoints.cpu() # Move keypoints to CPU
|
1205
1204
|
"""
|
1206
1205
|
|
1207
|
-
@smart_inference_mode() # avoid keypoints < conf in-place error
|
1208
1206
|
def __init__(self, keypoints: Union[torch.Tensor, np.ndarray], orig_shape: Tuple[int, int]) -> None:
|
1209
1207
|
"""
|
1210
1208
|
Initialize the Keypoints object with detection keypoints and original image dimensions.
|
@@ -1225,9 +1223,6 @@ class Keypoints(BaseTensor):
|
|
1225
1223
|
"""
|
1226
1224
|
if keypoints.ndim == 2:
|
1227
1225
|
keypoints = keypoints[None, :]
|
1228
|
-
if keypoints.shape[2] == 3: # x, y, conf
|
1229
|
-
mask = keypoints[..., 2] < 0.5 # points with conf < 0.5 (not visible)
|
1230
|
-
keypoints[..., :2][mask] = 0
|
1231
1226
|
super().__init__(keypoints, orig_shape)
|
1232
1227
|
self.has_visible = self.data.shape[-1] == 3
|
1233
1228
|
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
import os
|
4
4
|
from pathlib import Path
|
5
|
-
from typing import Any, Dict, List, Optional, Tuple
|
5
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
6
6
|
|
7
7
|
import numpy as np
|
8
8
|
import torch
|
@@ -219,6 +219,7 @@ class DetectionValidator(BaseValidator):
|
|
219
219
|
self.confusion_matrix.plot(save_dir=self.save_dir, normalize=normalize, on_plot=self.on_plot)
|
220
220
|
self.metrics.speed = self.speed
|
221
221
|
self.metrics.confusion_matrix = self.confusion_matrix
|
222
|
+
self.metrics.save_dir = self.save_dir
|
222
223
|
|
223
224
|
def get_stats(self) -> Dict[str, Any]:
|
224
225
|
"""
|
@@ -392,38 +393,73 @@ class DetectionValidator(BaseValidator):
|
|
392
393
|
Returns:
|
393
394
|
(Dict[str, Any]): Updated statistics dictionary with COCO/LVIS evaluation results.
|
394
395
|
"""
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
396
|
+
pred_json = self.save_dir / "predictions.json" # predictions
|
397
|
+
anno_json = (
|
398
|
+
self.data["path"]
|
399
|
+
/ "annotations"
|
400
|
+
/ ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
|
401
|
+
) # annotations
|
402
|
+
return self.coco_evaluate(stats, pred_json, anno_json)
|
403
|
+
|
404
|
+
def coco_evaluate(
|
405
|
+
self,
|
406
|
+
stats: Dict[str, Any],
|
407
|
+
pred_json: str,
|
408
|
+
anno_json: str,
|
409
|
+
iou_types: Union[str, List[str]] = "bbox",
|
410
|
+
suffix: Union[str, List[str]] = "Box",
|
411
|
+
) -> Dict[str, Any]:
|
412
|
+
"""
|
413
|
+
Evaluate COCO/LVIS metrics using faster-coco-eval library.
|
414
|
+
|
415
|
+
Performs evaluation using the faster-coco-eval library to compute mAP metrics
|
416
|
+
for object detection. Updates the provided stats dictionary with computed metrics
|
417
|
+
including mAP50, mAP50-95, and LVIS-specific metrics if applicable.
|
418
|
+
|
419
|
+
Args:
|
420
|
+
stats (Dict[str, Any]): Dictionary to store computed metrics and statistics.
|
421
|
+
pred_json (str | Path]): Path to JSON file containing predictions in COCO format.
|
422
|
+
anno_json (str | Path]): Path to JSON file containing ground truth annotations in COCO format.
|
423
|
+
iou_types (str | List[str]]): IoU type(s) for evaluation. Can be single string or list of strings.
|
424
|
+
Common values include "bbox", "segm", "keypoints". Defaults to "bbox".
|
425
|
+
suffix (str | List[str]]): Suffix to append to metric names in stats dictionary. Should correspond
|
426
|
+
to iou_types if multiple types provided. Defaults to "Box".
|
402
427
|
|
428
|
+
Returns:
|
429
|
+
(Dict[str, Any]): Updated stats dictionary containing the computed COCO/LVIS evaluation metrics.
|
430
|
+
"""
|
431
|
+
if self.args.save_json and (self.is_coco or self.is_lvis) and len(self.jdict):
|
403
432
|
LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
|
404
433
|
try:
|
405
434
|
for x in pred_json, anno_json:
|
406
435
|
assert x.is_file(), f"{x} file not found"
|
436
|
+
iou_types = [iou_types] if isinstance(iou_types, str) else iou_types
|
437
|
+
suffix = [suffix] if isinstance(suffix, str) else suffix
|
407
438
|
check_requirements("faster-coco-eval>=1.6.7")
|
408
439
|
from faster_coco_eval import COCO, COCOeval_faster
|
409
440
|
|
410
441
|
anno = COCO(anno_json)
|
411
442
|
pred = anno.loadRes(pred_json)
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
443
|
+
for i, iou_type in enumerate(iou_types):
|
444
|
+
val = COCOeval_faster(
|
445
|
+
anno, pred, iouType=iou_type, lvis_style=self.is_lvis, print_function=LOGGER.info
|
446
|
+
)
|
447
|
+
val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
|
448
|
+
val.evaluate()
|
449
|
+
val.accumulate()
|
450
|
+
val.summarize()
|
451
|
+
|
452
|
+
# update mAP50-95 and mAP50
|
453
|
+
stats[f"metrics/mAP50({suffix[i][0]})"] = val.stats_as_dict["AP_all"]
|
454
|
+
stats[f"metrics/mAP50-95({suffix[i][0]})"] = val.stats_as_dict["AP_50"]
|
417
455
|
|
418
|
-
|
419
|
-
|
420
|
-
|
456
|
+
if self.is_lvis:
|
457
|
+
stats[f"metrics/APr({suffix[i][0]})"] = val.stats_as_dict["APr"]
|
458
|
+
stats[f"metrics/APc({suffix[i][0]})"] = val.stats_as_dict["APc"]
|
459
|
+
stats[f"metrics/APf({suffix[i][0]})"] = val.stats_as_dict["APf"]
|
421
460
|
|
422
461
|
if self.is_lvis:
|
423
|
-
stats["metrics/
|
424
|
-
stats["metrics/APc(B)"] = val.stats_as_dict["APc"]
|
425
|
-
stats["metrics/APf(B)"] = val.stats_as_dict["APf"]
|
426
|
-
stats["fitness"] = val.stats_as_dict["AP_all"]
|
462
|
+
stats["fitness"] = stats["metrics/mAP50-95(B)"] # always use box mAP50-95 for fitness
|
427
463
|
except Exception as e:
|
428
464
|
LOGGER.warning(f"faster-coco-eval unable to run: {e}")
|
429
465
|
return stats
|
@@ -406,18 +406,18 @@ class YOLOE(Model):
|
|
406
406
|
f"Expected equal number of bounding boxes and classes, but got {len(visual_prompts['bboxes'])} and "
|
407
407
|
f"{len(visual_prompts['cls'])} respectively"
|
408
408
|
)
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
409
|
+
if not isinstance(self.predictor, yolo.yoloe.YOLOEVPDetectPredictor):
|
410
|
+
self.predictor = (predictor or yolo.yoloe.YOLOEVPDetectPredictor)(
|
411
|
+
overrides={
|
412
|
+
"task": self.model.task,
|
413
|
+
"mode": "predict",
|
414
|
+
"save": False,
|
415
|
+
"verbose": refer_image is None,
|
416
|
+
"batch": 1,
|
417
|
+
},
|
418
|
+
_callbacks=self.callbacks,
|
419
|
+
)
|
419
420
|
|
420
|
-
if len(visual_prompts):
|
421
421
|
num_cls = (
|
422
422
|
max(len(set(c)) for c in visual_prompts["cls"])
|
423
423
|
if isinstance(source, list) and refer_image is None # means multiple images
|
@@ -426,18 +426,19 @@ class YOLOE(Model):
|
|
426
426
|
self.model.model[-1].nc = num_cls
|
427
427
|
self.model.names = [f"object{i}" for i in range(num_cls)]
|
428
428
|
self.predictor.set_prompts(visual_prompts.copy())
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
429
|
+
self.predictor.setup_model(model=self.model)
|
430
|
+
|
431
|
+
if refer_image is None and source is not None:
|
432
|
+
dataset = load_inference_source(source)
|
433
|
+
if dataset.mode in {"video", "stream"}:
|
434
|
+
# NOTE: set the first frame as refer image for videos/streams inference
|
435
|
+
refer_image = next(iter(dataset))[1][0]
|
436
|
+
if refer_image is not None:
|
437
|
+
vpe = self.predictor.get_vpe(refer_image)
|
438
|
+
self.model.set_classes(self.model.names, vpe)
|
439
|
+
self.task = "segment" if isinstance(self.predictor, yolo.segment.SegmentationPredictor) else "detect"
|
440
|
+
self.predictor = None # reset predictor
|
441
|
+
elif isinstance(self.predictor, yolo.yoloe.YOLOEVPDetectPredictor):
|
442
|
+
self.predictor = None # reset predictor if no visual prompts
|
442
443
|
|
443
444
|
return super().predict(source, stream, **kwargs)
|
@@ -8,7 +8,6 @@ import torch
|
|
8
8
|
|
9
9
|
from ultralytics.models.yolo.detect import DetectionValidator
|
10
10
|
from ultralytics.utils import LOGGER, ops
|
11
|
-
from ultralytics.utils.checks import check_requirements
|
12
11
|
from ultralytics.utils.metrics import OKS_SIGMA, PoseMetrics, kpt_iou
|
13
12
|
|
14
13
|
|
@@ -289,29 +288,6 @@ class PoseValidator(DetectionValidator):
|
|
289
288
|
|
290
289
|
def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
|
291
290
|
"""Evaluate object detection model using COCO JSON format."""
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
|
296
|
-
try:
|
297
|
-
check_requirements("faster-coco-eval>=1.6.7")
|
298
|
-
from faster_coco_eval import COCO, COCOeval_faster
|
299
|
-
|
300
|
-
for x in anno_json, pred_json:
|
301
|
-
assert x.is_file(), f"{x} file not found"
|
302
|
-
anno = COCO(anno_json) # init annotations api
|
303
|
-
pred = anno.loadRes(pred_json) # init predictions api (must pass string, not Path)
|
304
|
-
kwargs = dict(cocoGt=anno, cocoDt=pred, print_function=LOGGER.info)
|
305
|
-
for i, eval in enumerate(
|
306
|
-
[COCOeval_faster(iouType="bbox", **kwargs), COCOeval_faster(iouType="keypoints", **kwargs)]
|
307
|
-
):
|
308
|
-
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
|
309
|
-
eval.evaluate()
|
310
|
-
eval.accumulate()
|
311
|
-
eval.summarize()
|
312
|
-
idx = i * 4 + 2
|
313
|
-
# update mAP50-95 and mAP50
|
314
|
-
stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[:2]
|
315
|
-
except Exception as e:
|
316
|
-
LOGGER.warning(f"faster-coco-eval unable to run: {e}")
|
317
|
-
return stats
|
291
|
+
anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations
|
292
|
+
pred_json = self.save_dir / "predictions.json" # predictions
|
293
|
+
return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "keypoints"], suffix=["Box", "Pose"])
|
@@ -272,45 +272,10 @@ class SegmentationValidator(DetectionValidator):
|
|
272
272
|
|
273
273
|
def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
|
274
274
|
"""Return COCO-style instance segmentation evaluation metrics."""
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
|
284
|
-
try:
|
285
|
-
for x in anno_json, pred_json:
|
286
|
-
assert x.is_file(), f"{x} file not found"
|
287
|
-
check_requirements("faster-coco-eval>=1.6.7")
|
288
|
-
from faster_coco_eval import COCO, COCOeval_faster
|
289
|
-
|
290
|
-
anno = COCO(anno_json) # init annotations api
|
291
|
-
pred = anno.loadRes(pred_json) # init predictions api (must pass string, not Path)
|
292
|
-
kwargs = dict(cocoGt=anno, cocoDt=pred, lvis_style=self.is_lvis, print_function=LOGGER.info)
|
293
|
-
for i, eval in enumerate(
|
294
|
-
[COCOeval_faster(iouType="bbox", **kwargs), COCOeval_faster(iouType="segm", **kwargs)]
|
295
|
-
):
|
296
|
-
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
|
297
|
-
eval.evaluate()
|
298
|
-
eval.accumulate()
|
299
|
-
eval.summarize()
|
300
|
-
idx = i * 4 + 2
|
301
|
-
# update mAP50-95 and mAP50
|
302
|
-
stats[self.metrics.keys[idx + 1]] = eval.stats_as_dict["AP_all"]
|
303
|
-
stats[self.metrics.keys[idx]] = eval.stats_as_dict["AP_50"]
|
304
|
-
|
305
|
-
if self.is_lvis:
|
306
|
-
tag = "B" if i == 0 else "M"
|
307
|
-
stats[f"metrics/APr({tag})"] = eval.stats_as_dict["APr"]
|
308
|
-
stats[f"metrics/APc({tag})"] = eval.stats_as_dict["APc"]
|
309
|
-
stats[f"metrics/APf({tag})"] = eval.stats_as_dict["APf"]
|
310
|
-
|
311
|
-
if self.is_lvis:
|
312
|
-
stats["fitness"] = stats["metrics/mAP50-95(B)"]
|
313
|
-
|
314
|
-
except Exception as e:
|
315
|
-
LOGGER.warning(f"faster-coco-eval unable to run: {e}")
|
316
|
-
return stats
|
275
|
+
pred_json = self.save_dir / "predictions.json" # predictions
|
276
|
+
anno_json = (
|
277
|
+
self.data["path"]
|
278
|
+
/ "annotations"
|
279
|
+
/ ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
|
280
|
+
) # annotations
|
281
|
+
return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "segm"], suffix=["Box", "Mask"])
|
@@ -158,7 +158,7 @@ class WorldTrainer(DetectionTrainer):
|
|
158
158
|
return txt_map
|
159
159
|
LOGGER.info(f"Caching text embeddings to '{cache_path}'")
|
160
160
|
assert self.model is not None
|
161
|
-
txt_feats = self.model.get_text_pe(texts, batch, cache_clip_model=False)
|
161
|
+
txt_feats = de_parallel(self.model).get_text_pe(texts, batch, cache_clip_model=False)
|
162
162
|
txt_map = dict(zip(texts, txt_feats.squeeze(0)))
|
163
163
|
torch.save(txt_map, cache_path)
|
164
164
|
return txt_map
|
@@ -222,7 +222,7 @@ class YOLOETrainerFromScratch(YOLOETrainer, WorldTrainerFromScratch):
|
|
222
222
|
return txt_map
|
223
223
|
LOGGER.info(f"Caching text embeddings to '{cache_path}'")
|
224
224
|
assert self.model is not None
|
225
|
-
txt_feats = self.model.get_text_pe(texts, batch, without_reprta=True, cache_clip_model=False)
|
225
|
+
txt_feats = de_parallel(self.model).get_text_pe(texts, batch, without_reprta=True, cache_clip_model=False)
|
226
226
|
txt_map = dict(zip(texts, txt_feats.squeeze(0)))
|
227
227
|
torch.save(txt_map, cache_path)
|
228
228
|
return txt_map
|
@@ -259,7 +259,11 @@ class AutoBackend(nn.Module):
|
|
259
259
|
session = onnxruntime.InferenceSession(w, providers=providers)
|
260
260
|
else:
|
261
261
|
check_requirements(
|
262
|
-
[
|
262
|
+
[
|
263
|
+
"model-compression-toolkit>=2.3.0,<2.4.1",
|
264
|
+
"sony-custom-layers[torch]>=0.3.0",
|
265
|
+
"onnxruntime-extensions",
|
266
|
+
]
|
263
267
|
)
|
264
268
|
w = next(Path(w).glob("*.onnx"))
|
265
269
|
LOGGER.info(f"Loading {w} for ONNX IMX inference...")
|
@@ -6,6 +6,7 @@ from typing import List, Union
|
|
6
6
|
|
7
7
|
import torch
|
8
8
|
import torch.nn as nn
|
9
|
+
from PIL import Image
|
9
10
|
|
10
11
|
from ultralytics.utils import checks
|
11
12
|
from ultralytics.utils.torch_utils import smart_inference_mode
|
@@ -68,7 +69,7 @@ class CLIP(TextModel):
|
|
68
69
|
>>> print(text_features.shape)
|
69
70
|
"""
|
70
71
|
|
71
|
-
def __init__(self, size: str, device: torch.device):
|
72
|
+
def __init__(self, size: str, device: torch.device) -> None:
|
72
73
|
"""
|
73
74
|
Initialize the CLIP text encoder.
|
74
75
|
|
@@ -85,12 +86,12 @@ class CLIP(TextModel):
|
|
85
86
|
>>> text_features = clip_model.encode_text(["a photo of a cat", "a photo of a dog"])
|
86
87
|
"""
|
87
88
|
super().__init__()
|
88
|
-
self.model = clip.load(size, device=device)
|
89
|
+
self.model, self.image_preprocess = clip.load(size, device=device)
|
89
90
|
self.to(device)
|
90
91
|
self.device = device
|
91
92
|
self.eval()
|
92
93
|
|
93
|
-
def tokenize(self, texts: Union[str, List[str]]):
|
94
|
+
def tokenize(self, texts: Union[str, List[str]]) -> torch.Tensor:
|
94
95
|
"""
|
95
96
|
Convert input texts to CLIP tokens.
|
96
97
|
|
@@ -108,7 +109,7 @@ class CLIP(TextModel):
|
|
108
109
|
return clip.tokenize(texts).to(self.device)
|
109
110
|
|
110
111
|
@smart_inference_mode()
|
111
|
-
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32):
|
112
|
+
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
|
112
113
|
"""
|
113
114
|
Encode tokenized texts into normalized feature vectors.
|
114
115
|
|
@@ -133,6 +134,38 @@ class CLIP(TextModel):
|
|
133
134
|
txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
|
134
135
|
return txt_feats
|
135
136
|
|
137
|
+
@smart_inference_mode()
|
138
|
+
def encode_image(self, image: Union[Image.Image, torch.Tensor], dtype: torch.dtype = torch.float32) -> torch.Tensor:
|
139
|
+
"""
|
140
|
+
Encode preprocessed images into normalized feature vectors.
|
141
|
+
|
142
|
+
This method processes preprocessed image inputs through the CLIP model to generate feature vectors, which are then
|
143
|
+
normalized to unit length. These normalized vectors can be used for text-image similarity comparisons.
|
144
|
+
|
145
|
+
Args:
|
146
|
+
image (PIL.Image | torch.Tensor): Preprocessed image input. If a PIL Image is provided, it will be
|
147
|
+
converted to a tensor using the model's image preprocessing function.
|
148
|
+
dtype (torch.dtype, optional): Data type for output features.
|
149
|
+
|
150
|
+
Returns:
|
151
|
+
(torch.Tensor): Normalized image feature vectors with unit length (L2 norm = 1).
|
152
|
+
|
153
|
+
Examples:
|
154
|
+
>>> from ultralytics.nn.text_model import CLIP
|
155
|
+
>>> from PIL import Image
|
156
|
+
>>> clip_model = CLIP("ViT-B/32", device="cuda")
|
157
|
+
>>> image = Image.open("path/to/image.jpg")
|
158
|
+
>>> image_tensor = clip_model.image_preprocess(image).unsqueeze(0).to("cuda")
|
159
|
+
>>> features = clip_model.encode_image(image_tensor)
|
160
|
+
>>> features.shape
|
161
|
+
torch.Size([1, 512])
|
162
|
+
"""
|
163
|
+
if isinstance(image, Image.Image):
|
164
|
+
image = self.image_preprocess(image).unsqueeze(0).to(self.device)
|
165
|
+
img_feats = self.model.encode_image(image).to(dtype)
|
166
|
+
img_feats = img_feats / img_feats.norm(p=2, dim=-1, keepdim=True)
|
167
|
+
return img_feats
|
168
|
+
|
136
169
|
|
137
170
|
class MobileCLIP(TextModel):
|
138
171
|
"""
|
@@ -160,7 +193,7 @@ class MobileCLIP(TextModel):
|
|
160
193
|
|
161
194
|
config_size_map = {"s0": "s0", "s1": "s1", "s2": "s2", "b": "b", "blt": "b"}
|
162
195
|
|
163
|
-
def __init__(self, size: str, device: torch.device):
|
196
|
+
def __init__(self, size: str, device: torch.device) -> None:
|
164
197
|
"""
|
165
198
|
Initialize the MobileCLIP text encoder.
|
166
199
|
|
@@ -201,7 +234,7 @@ class MobileCLIP(TextModel):
|
|
201
234
|
self.device = device
|
202
235
|
self.eval()
|
203
236
|
|
204
|
-
def tokenize(self, texts: List[str]):
|
237
|
+
def tokenize(self, texts: List[str]) -> torch.Tensor:
|
205
238
|
"""
|
206
239
|
Convert input texts to MobileCLIP tokens.
|
207
240
|
|
@@ -218,7 +251,7 @@ class MobileCLIP(TextModel):
|
|
218
251
|
return self.tokenizer(texts).to(self.device)
|
219
252
|
|
220
253
|
@smart_inference_mode()
|
221
|
-
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32):
|
254
|
+
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
|
222
255
|
"""
|
223
256
|
Encode tokenized texts into normalized feature vectors.
|
224
257
|
|
@@ -286,7 +319,7 @@ class MobileCLIPTS(TextModel):
|
|
286
319
|
self.tokenizer = clip.clip.tokenize
|
287
320
|
self.device = device
|
288
321
|
|
289
|
-
def tokenize(self, texts: List[str]):
|
322
|
+
def tokenize(self, texts: List[str]) -> torch.Tensor:
|
290
323
|
"""
|
291
324
|
Convert input texts to MobileCLIP tokens.
|
292
325
|
|
@@ -303,7 +336,7 @@ class MobileCLIPTS(TextModel):
|
|
303
336
|
return self.tokenizer(texts).to(self.device)
|
304
337
|
|
305
338
|
@smart_inference_mode()
|
306
|
-
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32):
|
339
|
+
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
|
307
340
|
"""
|
308
341
|
Encode tokenized texts into normalized feature vectors.
|
309
342
|
|
@@ -322,10 +355,10 @@ class MobileCLIPTS(TextModel):
|
|
322
355
|
torch.Size([2, 512]) # Actual dimension depends on model size
|
323
356
|
"""
|
324
357
|
# NOTE: no need to do normalization here as it's embedded in the torchscript model
|
325
|
-
return self.encoder(texts)
|
358
|
+
return self.encoder(texts).to(dtype)
|
326
359
|
|
327
360
|
|
328
|
-
def build_text_model(variant: str, device: torch.device = None):
|
361
|
+
def build_text_model(variant: str, device: torch.device = None) -> TextModel:
|
329
362
|
"""
|
330
363
|
Build a text encoding model based on the specified variant.
|
331
364
|
|