ultralytics 8.2.67__tar.gz → 8.2.69__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- {ultralytics-8.2.67/ultralytics.egg-info → ultralytics-8.2.69}/PKG-INFO +1 -1
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/test_cli.py +4 -16
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/__init__.py +1 -1
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/augment.py +1 -1
- ultralytics-8.2.69/ultralytics/hub/google/__init__.py +159 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/fastsam/__init__.py +1 -2
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/fastsam/model.py +18 -0
- ultralytics-8.2.69/ultralytics/models/fastsam/predict.py +145 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/ops.py +1 -1
- {ultralytics-8.2.67 → ultralytics-8.2.69/ultralytics.egg-info}/PKG-INFO +1 -1
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics.egg-info/SOURCES.txt +1 -1
- ultralytics-8.2.67/ultralytics/models/fastsam/predict.py +0 -31
- ultralytics-8.2.67/ultralytics/models/fastsam/prompt.py +0 -352
- {ultralytics-8.2.67 → ultralytics-8.2.69}/LICENSE +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/README.md +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/pyproject.toml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/setup.cfg +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/conftest.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/test_cuda.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/test_engine.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/test_explorer.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/test_exports.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/test_integrations.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/test_python.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/tests/test_solutions.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/assets/bus.jpg +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/assets/zidane.jpg +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/VOC.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/coco.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/coco128.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/coco8.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/dota8.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/lvis.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/signature.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/datasets/xView.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/default.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v10/yolov10b.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v10/yolov10l.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v10/yolov10m.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v10/yolov10n.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v10/yolov10s.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v10/yolov10x.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v9/yolov9m.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v9/yolov9s.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/models/v9/yolov9t.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/trackers/botsort.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/annotator.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/base.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/build.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/converter.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/dataset.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/explorer/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/explorer/explorer.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/explorer/gui/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/explorer/gui/dash.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/explorer/utils.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/loaders.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/split_dota.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/data/utils.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/engine/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/engine/exporter.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/engine/model.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/engine/predictor.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/engine/results.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/engine/trainer.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/engine/tuner.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/engine/validator.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/hub/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/hub/auth.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/hub/session.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/hub/utils.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/fastsam/utils.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/fastsam/val.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/nas/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/nas/model.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/nas/predict.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/nas/val.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/rtdetr/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/rtdetr/model.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/rtdetr/predict.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/rtdetr/train.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/rtdetr/val.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/amg.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/build.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/model.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/modules/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/modules/decoders.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/modules/encoders.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/modules/sam.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/modules/transformer.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/sam/predict.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/utils/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/utils/loss.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/utils/ops.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/classify/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/classify/predict.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/classify/train.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/classify/val.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/detect/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/detect/predict.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/detect/train.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/detect/val.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/model.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/obb/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/obb/predict.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/obb/train.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/obb/val.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/pose/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/pose/predict.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/pose/train.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/pose/val.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/segment/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/segment/predict.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/segment/train.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/segment/val.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/world/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/world/train.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/models/yolo/world/train_world.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/autobackend.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/modules/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/modules/activation.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/modules/block.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/modules/conv.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/modules/head.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/modules/transformer.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/modules/utils.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/nn/tasks.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/ai_gym.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/analytics.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/distance_calculation.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/heatmap.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/object_counter.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/parking_management.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/queue_management.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/speed_estimation.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/solutions/streamlit_inference.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/trackers/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/trackers/basetrack.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/trackers/bot_sort.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/trackers/byte_tracker.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/trackers/track.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/trackers/utils/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/trackers/utils/gmc.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/trackers/utils/kalman_filter.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/trackers/utils/matching.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/autobatch.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/benchmarks.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/__init__.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/base.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/clearml.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/comet.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/dvc.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/hub.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/mlflow.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/neptune.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/raytune.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/tensorboard.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/callbacks/wb.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/checks.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/dist.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/downloads.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/errors.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/files.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/instance.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/loss.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/metrics.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/patches.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/plotting.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/tal.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/torch_utils.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/triton.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics/utils/tuner.py +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics.egg-info/dependency_links.txt +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics.egg-info/entry_points.txt +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics.egg-info/requires.txt +0 -0
- {ultralytics-8.2.67 → ultralytics-8.2.69}/ultralytics.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.2.
|
|
3
|
+
Version: 8.2.69
|
|
4
4
|
Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
6
6
|
Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
@@ -68,7 +68,6 @@ def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8
|
|
|
68
68
|
run(f"yolo segment predict model={model} source={source} imgsz=32 save save_crop save_txt")
|
|
69
69
|
|
|
70
70
|
from ultralytics import FastSAM
|
|
71
|
-
from ultralytics.models.fastsam import FastSAMPrompt
|
|
72
71
|
from ultralytics.models.sam import Predictor
|
|
73
72
|
|
|
74
73
|
# Create a FastSAM model
|
|
@@ -81,21 +80,10 @@ def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8
|
|
|
81
80
|
# Remove small regions
|
|
82
81
|
new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
|
|
83
82
|
|
|
84
|
-
#
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
# Bbox default shape [0,0,0,0] -> [x1,y1,x2,y2]
|
|
89
|
-
ann = prompt_process.box_prompt(bbox=[200, 200, 300, 300])
|
|
90
|
-
|
|
91
|
-
# Text prompt
|
|
92
|
-
ann = prompt_process.text_prompt(text="a photo of a dog")
|
|
93
|
-
|
|
94
|
-
# Point prompt
|
|
95
|
-
# Points default [[0,0]] [[x1,y1],[x2,y2]]
|
|
96
|
-
# Point_label default [0] [1,0] 0:background, 1:foreground
|
|
97
|
-
ann = prompt_process.point_prompt(points=[[200, 200]], pointlabel=[1])
|
|
98
|
-
prompt_process.plot(annotations=ann, output="./")
|
|
83
|
+
# Run inference with bboxes and points and texts prompt at the same time
|
|
84
|
+
results = sam_model(
|
|
85
|
+
source, bboxes=[439, 437, 524, 709], points=[[200, 200]], labels=[1], texts="a photo of a dog"
|
|
86
|
+
)
|
|
99
87
|
|
|
100
88
|
|
|
101
89
|
def test_mobilesam():
|
|
@@ -2221,7 +2221,7 @@ class RandomLoadText:
|
|
|
2221
2221
|
pos_labels = np.unique(cls).tolist()
|
|
2222
2222
|
|
|
2223
2223
|
if len(pos_labels) > self.max_samples:
|
|
2224
|
-
pos_labels =
|
|
2224
|
+
pos_labels = random.sample(pos_labels, k=self.max_samples)
|
|
2225
2225
|
|
|
2226
2226
|
neg_samples = min(min(num_classes, self.max_samples) - len(pos_labels), random.randint(*self.neg_samples))
|
|
2227
2227
|
neg_labels = [i for i in range(num_classes) if i not in pos_labels]
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
|
+
|
|
3
|
+
import concurrent.futures
|
|
4
|
+
import statistics
|
|
5
|
+
import time
|
|
6
|
+
from typing import List, Optional, Tuple
|
|
7
|
+
|
|
8
|
+
import requests
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GCPRegions:
|
|
12
|
+
"""
|
|
13
|
+
A class for managing and analyzing Google Cloud Platform (GCP) regions.
|
|
14
|
+
|
|
15
|
+
This class provides functionality to initialize, categorize, and analyze GCP regions based on their
|
|
16
|
+
geographical location, tier classification, and network latency.
|
|
17
|
+
|
|
18
|
+
Attributes:
|
|
19
|
+
regions (Dict[str, Tuple[int, str, str]]): A dictionary of GCP regions with their tier, city, and country.
|
|
20
|
+
|
|
21
|
+
Methods:
|
|
22
|
+
tier1: Returns a list of tier 1 GCP regions.
|
|
23
|
+
tier2: Returns a list of tier 2 GCP regions.
|
|
24
|
+
lowest_latency: Determines the GCP region(s) with the lowest network latency.
|
|
25
|
+
|
|
26
|
+
Examples:
|
|
27
|
+
>>> from ultralytics.hub.google import GCPRegions
|
|
28
|
+
>>> regions = GCPRegions()
|
|
29
|
+
>>> lowest_latency_region = regions.lowest_latency(verbose=True, attempts=3)
|
|
30
|
+
>>> print(f"Lowest latency region: {lowest_latency_region[0][0]}")
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self):
|
|
34
|
+
"""Initializes the GCPRegions class with predefined Google Cloud Platform regions and their details."""
|
|
35
|
+
self.regions = {
|
|
36
|
+
"asia-east1": (1, "Taiwan", "China"),
|
|
37
|
+
"asia-east2": (2, "Hong Kong", "China"),
|
|
38
|
+
"asia-northeast1": (1, "Tokyo", "Japan"),
|
|
39
|
+
"asia-northeast2": (1, "Osaka", "Japan"),
|
|
40
|
+
"asia-northeast3": (2, "Seoul", "South Korea"),
|
|
41
|
+
"asia-south1": (2, "Mumbai", "India"),
|
|
42
|
+
"asia-south2": (2, "Delhi", "India"),
|
|
43
|
+
"asia-southeast1": (2, "Jurong West", "Singapore"),
|
|
44
|
+
"asia-southeast2": (2, "Jakarta", "Indonesia"),
|
|
45
|
+
"australia-southeast1": (2, "Sydney", "Australia"),
|
|
46
|
+
"australia-southeast2": (2, "Melbourne", "Australia"),
|
|
47
|
+
"europe-central2": (2, "Warsaw", "Poland"),
|
|
48
|
+
"europe-north1": (1, "Hamina", "Finland"),
|
|
49
|
+
"europe-southwest1": (1, "Madrid", "Spain"),
|
|
50
|
+
"europe-west1": (1, "St. Ghislain", "Belgium"),
|
|
51
|
+
"europe-west10": (2, "Berlin", "Germany"),
|
|
52
|
+
"europe-west12": (2, "Turin", "Italy"),
|
|
53
|
+
"europe-west2": (2, "London", "United Kingdom"),
|
|
54
|
+
"europe-west3": (2, "Frankfurt", "Germany"),
|
|
55
|
+
"europe-west4": (1, "Eemshaven", "Netherlands"),
|
|
56
|
+
"europe-west6": (2, "Zurich", "Switzerland"),
|
|
57
|
+
"europe-west8": (1, "Milan", "Italy"),
|
|
58
|
+
"europe-west9": (1, "Paris", "France"),
|
|
59
|
+
"me-central1": (2, "Doha", "Qatar"),
|
|
60
|
+
"me-west1": (1, "Tel Aviv", "Israel"),
|
|
61
|
+
"northamerica-northeast1": (2, "Montreal", "Canada"),
|
|
62
|
+
"northamerica-northeast2": (2, "Toronto", "Canada"),
|
|
63
|
+
"southamerica-east1": (2, "São Paulo", "Brazil"),
|
|
64
|
+
"southamerica-west1": (2, "Santiago", "Chile"),
|
|
65
|
+
"us-central1": (1, "Iowa", "United States"),
|
|
66
|
+
"us-east1": (1, "South Carolina", "United States"),
|
|
67
|
+
"us-east4": (1, "Northern Virginia", "United States"),
|
|
68
|
+
"us-east5": (1, "Columbus", "United States"),
|
|
69
|
+
"us-south1": (1, "Dallas", "United States"),
|
|
70
|
+
"us-west1": (1, "Oregon", "United States"),
|
|
71
|
+
"us-west2": (2, "Los Angeles", "United States"),
|
|
72
|
+
"us-west3": (2, "Salt Lake City", "United States"),
|
|
73
|
+
"us-west4": (2, "Las Vegas", "United States"),
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
def tier1(self) -> List[str]:
|
|
77
|
+
"""Returns a list of GCP regions classified as tier 1 based on predefined criteria."""
|
|
78
|
+
return [region for region, info in self.regions.items() if info[0] == 1]
|
|
79
|
+
|
|
80
|
+
def tier2(self) -> List[str]:
|
|
81
|
+
"""Returns a list of GCP regions classified as tier 2 based on predefined criteria."""
|
|
82
|
+
return [region for region, info in self.regions.items() if info[0] == 2]
|
|
83
|
+
|
|
84
|
+
@staticmethod
|
|
85
|
+
def _ping_region(region: str, attempts: int = 1) -> Tuple[str, float, float, float, float]:
|
|
86
|
+
"""Pings a specified GCP region and returns latency statistics: mean, min, max, and standard deviation."""
|
|
87
|
+
url = f"https://{region}-docker.pkg.dev"
|
|
88
|
+
latencies = []
|
|
89
|
+
for _ in range(attempts):
|
|
90
|
+
try:
|
|
91
|
+
start_time = time.time()
|
|
92
|
+
_ = requests.head(url, timeout=5)
|
|
93
|
+
latency = (time.time() - start_time) * 1000 # convert latency to milliseconds
|
|
94
|
+
if latency != float("inf"):
|
|
95
|
+
latencies.append(latency)
|
|
96
|
+
except requests.RequestException:
|
|
97
|
+
pass
|
|
98
|
+
if not latencies:
|
|
99
|
+
return region, float("inf"), float("inf"), float("inf"), float("inf")
|
|
100
|
+
|
|
101
|
+
std_dev = statistics.stdev(latencies) if len(latencies) > 1 else 0
|
|
102
|
+
return region, statistics.mean(latencies), std_dev, min(latencies), max(latencies)
|
|
103
|
+
|
|
104
|
+
def lowest_latency(
|
|
105
|
+
self,
|
|
106
|
+
top: int = 1,
|
|
107
|
+
verbose: bool = False,
|
|
108
|
+
tier: Optional[int] = None,
|
|
109
|
+
attempts: int = 1,
|
|
110
|
+
) -> List[Tuple[str, float, float, float, float]]:
|
|
111
|
+
"""
|
|
112
|
+
Determines the GCP regions with the lowest latency based on ping tests.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
top (int): Number of top regions to return.
|
|
116
|
+
verbose (bool): If True, prints detailed latency information for all tested regions.
|
|
117
|
+
tier (int | None): Filter regions by tier (1 or 2). If None, all regions are tested.
|
|
118
|
+
attempts (int): Number of ping attempts per region.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
(List[Tuple[str, float, float, float, float]]): List of tuples containing region information and
|
|
122
|
+
latency statistics. Each tuple contains (region, mean_latency, std_dev, min_latency, max_latency).
|
|
123
|
+
|
|
124
|
+
Examples:
|
|
125
|
+
>>> regions = GCPRegions()
|
|
126
|
+
>>> results = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=2)
|
|
127
|
+
>>> print(results[0][0]) # Print the name of the lowest latency region
|
|
128
|
+
"""
|
|
129
|
+
if verbose:
|
|
130
|
+
print(f"Testing GCP regions for latency (with {attempts} {'retry' if attempts == 1 else 'attempts'})...")
|
|
131
|
+
|
|
132
|
+
regions_to_test = [k for k, v in self.regions.items() if v[0] == tier] if tier else list(self.regions.keys())
|
|
133
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
|
|
134
|
+
results = list(executor.map(lambda r: self._ping_region(r, attempts), regions_to_test))
|
|
135
|
+
|
|
136
|
+
sorted_results = sorted(results, key=lambda x: x[1])
|
|
137
|
+
|
|
138
|
+
if verbose:
|
|
139
|
+
print(f"{'Region':<25} {'Location':<35} {'Tier':<5} {'Latency (ms)'}")
|
|
140
|
+
for region, mean, std, min_, max_ in sorted_results:
|
|
141
|
+
tier, city, country = self.regions[region]
|
|
142
|
+
location = f"{city}, {country}"
|
|
143
|
+
if mean == float("inf"):
|
|
144
|
+
print(f"{region:<25} {location:<35} {tier:<5} {'Timeout'}")
|
|
145
|
+
else:
|
|
146
|
+
print(f"{region:<25} {location:<35} {tier:<5} {mean:.0f} ± {std:.0f} ({min_:.0f} - {max_:.0f})")
|
|
147
|
+
print(f"\nLowest latency region{'s' if top > 1 else ''}:")
|
|
148
|
+
for region, mean, std, min_, max_ in sorted_results[:top]:
|
|
149
|
+
tier, city, country = self.regions[region]
|
|
150
|
+
location = f"{city}, {country}"
|
|
151
|
+
print(f"{region} ({location}, {mean:.0f} ± {std:.0f} ms ({min_:.0f} - {max_:.0f}))")
|
|
152
|
+
|
|
153
|
+
return sorted_results[:top]
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
# Usage example
|
|
157
|
+
if __name__ == "__main__":
|
|
158
|
+
regions = GCPRegions()
|
|
159
|
+
top_3_latency_tier1 = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=3)
|
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
|
|
3
3
|
from .model import FastSAM
|
|
4
4
|
from .predict import FastSAMPredictor
|
|
5
|
-
from .prompt import FastSAMPrompt
|
|
6
5
|
from .val import FastSAMValidator
|
|
7
6
|
|
|
8
|
-
__all__ = "FastSAMPredictor", "FastSAM", "
|
|
7
|
+
__all__ = "FastSAMPredictor", "FastSAM", "FastSAMValidator"
|
|
@@ -28,6 +28,24 @@ class FastSAM(Model):
|
|
|
28
28
|
assert Path(model).suffix not in {".yaml", ".yml"}, "FastSAM models only support pre-trained models."
|
|
29
29
|
super().__init__(model=model, task="segment")
|
|
30
30
|
|
|
31
|
+
def predict(self, source, stream=False, bboxes=None, points=None, labels=None, texts=None, **kwargs):
|
|
32
|
+
"""
|
|
33
|
+
Performs segmentation prediction on the given image or video source.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object.
|
|
37
|
+
stream (bool, optional): If True, enables real-time streaming. Defaults to False.
|
|
38
|
+
bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None.
|
|
39
|
+
points (list, optional): List of points for prompted segmentation. Defaults to None.
|
|
40
|
+
labels (list, optional): List of labels for prompted segmentation. Defaults to None.
|
|
41
|
+
texts (list, optional): List of texts for prompted segmentation. Defaults to None.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
(list): The model predictions.
|
|
45
|
+
"""
|
|
46
|
+
prompts = dict(bboxes=bboxes, points=points, labels=labels, texts=texts)
|
|
47
|
+
return super().predict(source, stream, prompts=prompts, **kwargs)
|
|
48
|
+
|
|
31
49
|
@property
|
|
32
50
|
def task_map(self):
|
|
33
51
|
"""Returns a dictionary mapping segment task to corresponding predictor and validator classes."""
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
|
+
import torch
|
|
3
|
+
from PIL import Image
|
|
4
|
+
|
|
5
|
+
from ultralytics.models.yolo.segment import SegmentationPredictor
|
|
6
|
+
from ultralytics.utils import DEFAULT_CFG, checks
|
|
7
|
+
from ultralytics.utils.metrics import box_iou
|
|
8
|
+
from ultralytics.utils.ops import scale_masks
|
|
9
|
+
|
|
10
|
+
from .utils import adjust_bboxes_to_image_border
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class FastSAMPredictor(SegmentationPredictor):
|
|
14
|
+
"""
|
|
15
|
+
FastSAMPredictor is specialized for fast SAM (Segment Anything Model) segmentation prediction tasks in Ultralytics
|
|
16
|
+
YOLO framework.
|
|
17
|
+
|
|
18
|
+
This class extends the SegmentationPredictor, customizing the prediction pipeline specifically for fast SAM. It
|
|
19
|
+
adjusts post-processing steps to incorporate mask prediction and non-max suppression while optimizing for single-
|
|
20
|
+
class segmentation.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
|
24
|
+
super().__init__(cfg, overrides, _callbacks)
|
|
25
|
+
self.prompts = {}
|
|
26
|
+
|
|
27
|
+
def postprocess(self, preds, img, orig_imgs):
|
|
28
|
+
"""Applies box postprocess for FastSAM predictions."""
|
|
29
|
+
bboxes = self.prompts.pop("bboxes", None)
|
|
30
|
+
points = self.prompts.pop("points", None)
|
|
31
|
+
labels = self.prompts.pop("labels", None)
|
|
32
|
+
texts = self.prompts.pop("texts", None)
|
|
33
|
+
results = super().postprocess(preds, img, orig_imgs)
|
|
34
|
+
for result in results:
|
|
35
|
+
full_box = torch.tensor(
|
|
36
|
+
[0, 0, result.orig_shape[1], result.orig_shape[0]], device=preds[0].device, dtype=torch.float32
|
|
37
|
+
)
|
|
38
|
+
boxes = adjust_bboxes_to_image_border(result.boxes.xyxy, result.orig_shape)
|
|
39
|
+
idx = torch.nonzero(box_iou(full_box[None], boxes) > 0.9).flatten()
|
|
40
|
+
if idx.numel() != 0:
|
|
41
|
+
result.boxes.xyxy[idx] = full_box
|
|
42
|
+
|
|
43
|
+
return self.prompt(results, bboxes=bboxes, points=points, labels=labels, texts=texts)
|
|
44
|
+
|
|
45
|
+
def prompt(self, results, bboxes=None, points=None, labels=None, texts=None):
|
|
46
|
+
"""
|
|
47
|
+
Internal function for image segmentation inference based on cues like bounding boxes, points, and masks.
|
|
48
|
+
Leverages SAM's specialized architecture for prompt-based, real-time segmentation.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
results (Results | List[Results]): The original inference results from FastSAM models without any prompts.
|
|
52
|
+
bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.
|
|
53
|
+
points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixels.
|
|
54
|
+
labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 = foreground, 0 = background.
|
|
55
|
+
texts (str | List[str], optional): Textual prompts, a list contains string objects.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
(List[Results]): The output results determined by prompts.
|
|
59
|
+
"""
|
|
60
|
+
if bboxes is None and points is None and texts is None:
|
|
61
|
+
return results
|
|
62
|
+
prompt_results = []
|
|
63
|
+
if not isinstance(results, list):
|
|
64
|
+
results = [results]
|
|
65
|
+
for result in results:
|
|
66
|
+
masks = result.masks.data
|
|
67
|
+
if masks.shape[1:] != result.orig_shape:
|
|
68
|
+
masks = scale_masks(masks[None], result.orig_shape)[0]
|
|
69
|
+
# bboxes prompt
|
|
70
|
+
idx = torch.zeros(len(result), dtype=torch.bool, device=self.device)
|
|
71
|
+
if bboxes is not None:
|
|
72
|
+
bboxes = torch.as_tensor(bboxes, dtype=torch.int32, device=self.device)
|
|
73
|
+
bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
|
|
74
|
+
bbox_areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0])
|
|
75
|
+
mask_areas = torch.stack([masks[:, b[1] : b[3], b[0] : b[2]].sum(dim=(1, 2)) for b in bboxes])
|
|
76
|
+
full_mask_areas = torch.sum(masks, dim=(1, 2))
|
|
77
|
+
|
|
78
|
+
union = bbox_areas[:, None] + full_mask_areas - mask_areas
|
|
79
|
+
idx[torch.argmax(mask_areas / union, dim=1)] = True
|
|
80
|
+
if points is not None:
|
|
81
|
+
points = torch.as_tensor(points, dtype=torch.int32, device=self.device)
|
|
82
|
+
points = points[None] if points.ndim == 1 else points
|
|
83
|
+
if labels is None:
|
|
84
|
+
labels = torch.ones(points.shape[0])
|
|
85
|
+
labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
|
|
86
|
+
assert len(labels) == len(
|
|
87
|
+
points
|
|
88
|
+
), f"Excepted `labels` got same size as `point`, but got {len(labels)} and {len(points)}"
|
|
89
|
+
point_idx = (
|
|
90
|
+
torch.ones(len(result), dtype=torch.bool, device=self.device)
|
|
91
|
+
if labels.sum() == 0 # all negative points
|
|
92
|
+
else torch.zeros(len(result), dtype=torch.bool, device=self.device)
|
|
93
|
+
)
|
|
94
|
+
for p, l in zip(points, labels):
|
|
95
|
+
point_idx[torch.nonzero(masks[:, p[1], p[0]], as_tuple=True)[0]] = True if l else False
|
|
96
|
+
idx |= point_idx
|
|
97
|
+
if texts is not None:
|
|
98
|
+
if isinstance(texts, str):
|
|
99
|
+
texts = [texts]
|
|
100
|
+
crop_ims, filter_idx = [], []
|
|
101
|
+
for i, b in enumerate(result.boxes.xyxy.tolist()):
|
|
102
|
+
x1, y1, x2, y2 = [int(x) for x in b]
|
|
103
|
+
if masks[i].sum() <= 100:
|
|
104
|
+
filter_idx.append(i)
|
|
105
|
+
continue
|
|
106
|
+
crop_ims.append(Image.fromarray(result.orig_img[y1:y2, x1:x2, ::-1]))
|
|
107
|
+
similarity = self._clip_inference(crop_ims, texts)
|
|
108
|
+
text_idx = torch.argmax(similarity, dim=-1) # (M, )
|
|
109
|
+
if len(filter_idx):
|
|
110
|
+
text_idx += (torch.tensor(filter_idx, device=self.device)[None] <= int(text_idx)).sum(0)
|
|
111
|
+
idx[text_idx] = True
|
|
112
|
+
|
|
113
|
+
prompt_results.append(result[idx])
|
|
114
|
+
|
|
115
|
+
return prompt_results
|
|
116
|
+
|
|
117
|
+
def _clip_inference(self, images, texts):
|
|
118
|
+
"""
|
|
119
|
+
CLIP Inference process.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
images (List[PIL.Image]): A list of source images and each of them should be PIL.Image type with RGB channel order.
|
|
123
|
+
texts (List[str]): A list of prompt texts and each of them should be string object.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
(torch.Tensor): The similarity between given images and texts.
|
|
127
|
+
"""
|
|
128
|
+
try:
|
|
129
|
+
import clip
|
|
130
|
+
except ImportError:
|
|
131
|
+
checks.check_requirements("git+https://github.com/ultralytics/CLIP.git")
|
|
132
|
+
import clip
|
|
133
|
+
if (not hasattr(self, "clip_model")) or (not hasattr(self, "clip_preprocess")):
|
|
134
|
+
self.clip_model, self.clip_preprocess = clip.load("ViT-B/32", device=self.device)
|
|
135
|
+
images = torch.stack([self.clip_preprocess(image).to(self.device) for image in images])
|
|
136
|
+
tokenized_text = clip.tokenize(texts).to(self.device)
|
|
137
|
+
image_features = self.clip_model.encode_image(images)
|
|
138
|
+
text_features = self.clip_model.encode_text(tokenized_text)
|
|
139
|
+
image_features /= image_features.norm(dim=-1, keepdim=True) # (N, 512)
|
|
140
|
+
text_features /= text_features.norm(dim=-1, keepdim=True) # (M, 512)
|
|
141
|
+
return (image_features * text_features[:, None]).sum(-1) # (M, N)
|
|
142
|
+
|
|
143
|
+
def set_prompts(self, prompts):
|
|
144
|
+
"""Set prompts in advance."""
|
|
145
|
+
self.prompts = prompts
|
|
@@ -363,7 +363,7 @@ def scale_image(masks, im0_shape, ratio_pad=None):
|
|
|
363
363
|
ratio_pad (tuple): the ratio of the padding to the original image.
|
|
364
364
|
|
|
365
365
|
Returns:
|
|
366
|
-
masks (
|
|
366
|
+
masks (np.ndarray): The masks that are being returned with shape [h, w, num].
|
|
367
367
|
"""
|
|
368
368
|
# Rescale coordinates (xyxy) from im1_shape to im0_shape
|
|
369
369
|
im1_shape = masks.shape
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.2.
|
|
3
|
+
Version: 8.2.69
|
|
4
4
|
Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
6
6
|
Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
@@ -126,11 +126,11 @@ ultralytics/hub/__init__.py
|
|
|
126
126
|
ultralytics/hub/auth.py
|
|
127
127
|
ultralytics/hub/session.py
|
|
128
128
|
ultralytics/hub/utils.py
|
|
129
|
+
ultralytics/hub/google/__init__.py
|
|
129
130
|
ultralytics/models/__init__.py
|
|
130
131
|
ultralytics/models/fastsam/__init__.py
|
|
131
132
|
ultralytics/models/fastsam/model.py
|
|
132
133
|
ultralytics/models/fastsam/predict.py
|
|
133
|
-
ultralytics/models/fastsam/prompt.py
|
|
134
134
|
ultralytics/models/fastsam/utils.py
|
|
135
135
|
ultralytics/models/fastsam/val.py
|
|
136
136
|
ultralytics/models/nas/__init__.py
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
|
-
import torch
|
|
3
|
-
|
|
4
|
-
from ultralytics.models.yolo.segment import SegmentationPredictor
|
|
5
|
-
from ultralytics.utils.metrics import box_iou
|
|
6
|
-
|
|
7
|
-
from .utils import adjust_bboxes_to_image_border
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class FastSAMPredictor(SegmentationPredictor):
|
|
11
|
-
"""
|
|
12
|
-
FastSAMPredictor is specialized for fast SAM (Segment Anything Model) segmentation prediction tasks in Ultralytics
|
|
13
|
-
YOLO framework.
|
|
14
|
-
|
|
15
|
-
This class extends the SegmentationPredictor, customizing the prediction pipeline specifically for fast SAM. It
|
|
16
|
-
adjusts post-processing steps to incorporate mask prediction and non-max suppression while optimizing for single-
|
|
17
|
-
class segmentation.
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
def postprocess(self, preds, img, orig_imgs):
|
|
21
|
-
"""Applies box postprocess for FastSAM predictions."""
|
|
22
|
-
results = super().postprocess(preds, img, orig_imgs)
|
|
23
|
-
for result in results:
|
|
24
|
-
full_box = torch.tensor(
|
|
25
|
-
[0, 0, result.orig_shape[1], result.orig_shape[0]], device=preds[0].device, dtype=torch.float32
|
|
26
|
-
)
|
|
27
|
-
boxes = adjust_bboxes_to_image_border(result.boxes.xyxy, result.orig_shape)
|
|
28
|
-
idx = torch.nonzero(box_iou(full_box[None], boxes) > 0.9).flatten()
|
|
29
|
-
if idx.numel() != 0:
|
|
30
|
-
result.boxes.xyxy[idx] = full_box
|
|
31
|
-
return results
|