ultralytics 8.1.15__tar.gz → 8.1.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- {ultralytics-8.1.15/ultralytics.egg-info → ultralytics-8.1.17}/PKG-INFO +2 -2
- {ultralytics-8.1.15 → ultralytics-8.1.17}/pyproject.toml +1 -1
- {ultralytics-8.1.15 → ultralytics-8.1.17}/tests/test_explorer.py +5 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/__init__.py +1 -1
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/default.yaml +1 -1
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/converter.py +2 -2
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/dataset.py +22 -15
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/utils.py +2 -1
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/engine/exporter.py +3 -3
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/engine/model.py +82 -33
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/engine/trainer.py +1 -1
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/detect/val.py +2 -4
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/obb/val.py +5 -4
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/nn/tasks.py +4 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/solutions/speed_estimation.py +3 -2
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/trackers/utils/gmc.py +10 -10
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/downloads.py +1 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/metrics.py +14 -8
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/plotting.py +1 -1
- {ultralytics-8.1.15 → ultralytics-8.1.17/ultralytics.egg-info}/PKG-INFO +2 -2
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics.egg-info/requires.txt +1 -1
- {ultralytics-8.1.15 → ultralytics-8.1.17}/LICENSE +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/README.md +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/setup.cfg +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/tests/test_cli.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/tests/test_cuda.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/tests/test_engine.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/tests/test_integrations.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/tests/test_python.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/assets/bus.jpg +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/assets/zidane.jpg +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/VOC.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/coco.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/coco128.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/coco8.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/dota8.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/datasets/xView.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-world-t2i.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/trackers/botsort.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/annotator.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/augment.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/base.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/build.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/explorer/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/explorer/explorer.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/explorer/gui/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/explorer/gui/dash.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/explorer/utils.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/loaders.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/data/split_dota.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/engine/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/engine/predictor.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/engine/results.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/engine/tuner.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/engine/validator.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/hub/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/hub/auth.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/hub/session.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/hub/utils.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/fastsam/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/fastsam/model.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/fastsam/predict.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/fastsam/prompt.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/fastsam/utils.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/fastsam/val.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/nas/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/nas/model.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/nas/predict.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/nas/val.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/rtdetr/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/rtdetr/model.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/rtdetr/predict.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/rtdetr/train.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/rtdetr/val.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/amg.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/build.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/model.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/modules/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/modules/decoders.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/modules/encoders.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/modules/sam.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/modules/transformer.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/sam/predict.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/utils/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/utils/loss.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/utils/ops.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/classify/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/classify/predict.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/classify/train.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/classify/val.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/detect/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/detect/predict.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/detect/train.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/model.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/obb/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/obb/predict.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/obb/train.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/pose/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/pose/predict.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/pose/train.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/pose/val.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/segment/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/segment/predict.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/segment/train.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/models/yolo/segment/val.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/nn/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/nn/autobackend.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/nn/modules/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/nn/modules/block.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/nn/modules/conv.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/nn/modules/head.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/nn/modules/transformer.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/nn/modules/utils.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/solutions/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/solutions/ai_gym.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/solutions/distance_calculation.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/solutions/heatmap.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/solutions/object_counter.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/trackers/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/trackers/basetrack.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/trackers/bot_sort.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/trackers/byte_tracker.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/trackers/track.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/trackers/utils/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/trackers/utils/kalman_filter.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/trackers/utils/matching.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/autobatch.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/benchmarks.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/__init__.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/base.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/clearml.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/comet.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/dvc.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/hub.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/mlflow.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/neptune.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/raytune.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/tensorboard.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/callbacks/wb.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/checks.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/dist.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/errors.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/files.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/instance.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/loss.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/ops.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/patches.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/tal.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/torch_utils.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/triton.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics/utils/tuner.py +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics.egg-info/SOURCES.txt +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics.egg-info/dependency_links.txt +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics.egg-info/entry_points.txt +0 -0
- {ultralytics-8.1.15 → ultralytics-8.1.17}/ultralytics.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.1.
|
|
3
|
+
Version: 8.1.17
|
|
4
4
|
Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
6
6
|
Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
@@ -55,7 +55,7 @@ Requires-Dist: mkdocs-material>=9.5.9; extra == "dev"
|
|
|
55
55
|
Requires-Dist: mkdocstrings[python]; extra == "dev"
|
|
56
56
|
Requires-Dist: mkdocs-jupyter; extra == "dev"
|
|
57
57
|
Requires-Dist: mkdocs-redirects; extra == "dev"
|
|
58
|
-
Requires-Dist: mkdocs-ultralytics-plugin>=0.0.
|
|
58
|
+
Requires-Dist: mkdocs-ultralytics-plugin>=0.0.44; extra == "dev"
|
|
59
59
|
Provides-Extra: export
|
|
60
60
|
Requires-Dist: onnx>=1.12.0; extra == "export"
|
|
61
61
|
Requires-Dist: coremltools>=7.0; (platform_system != "Windows" and python_version <= "3.11") and extra == "export"
|
|
@@ -93,7 +93,7 @@ dev = [
|
|
|
93
93
|
"mkdocstrings[python]",
|
|
94
94
|
"mkdocs-jupyter", # for notebooks
|
|
95
95
|
"mkdocs-redirects", # for 301 redirects
|
|
96
|
-
"mkdocs-ultralytics-plugin>=0.0.
|
|
96
|
+
"mkdocs-ultralytics-plugin>=0.0.44", # for meta descriptions and images, dates and authors
|
|
97
97
|
]
|
|
98
98
|
export = [
|
|
99
99
|
"onnx>=1.12.0", # ONNX export
|
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
2
|
|
|
3
3
|
import PIL
|
|
4
|
+
import pytest
|
|
4
5
|
|
|
5
6
|
from ultralytics import Explorer
|
|
6
7
|
from ultralytics.utils import ASSETS
|
|
7
8
|
|
|
8
9
|
|
|
10
|
+
@pytest.mark.slow
|
|
9
11
|
def test_similarity():
|
|
10
12
|
"""Test similarity calculations and SQL queries for correctness and response length."""
|
|
11
13
|
exp = Explorer()
|
|
@@ -22,6 +24,7 @@ def test_similarity():
|
|
|
22
24
|
assert len(sql) > 0
|
|
23
25
|
|
|
24
26
|
|
|
27
|
+
@pytest.mark.slow
|
|
25
28
|
def test_det():
|
|
26
29
|
"""Test detection functionalities and ensure the embedding table has bounding boxes."""
|
|
27
30
|
exp = Explorer(data="coco8.yaml", model="yolov8n.pt")
|
|
@@ -34,6 +37,7 @@ def test_det():
|
|
|
34
37
|
assert isinstance(similar, PIL.Image.Image)
|
|
35
38
|
|
|
36
39
|
|
|
40
|
+
@pytest.mark.slow
|
|
37
41
|
def test_seg():
|
|
38
42
|
"""Test segmentation functionalities and verify the embedding table includes masks."""
|
|
39
43
|
exp = Explorer(data="coco8-seg.yaml", model="yolov8n-seg.pt")
|
|
@@ -45,6 +49,7 @@ def test_seg():
|
|
|
45
49
|
assert isinstance(similar, PIL.Image.Image)
|
|
46
50
|
|
|
47
51
|
|
|
52
|
+
@pytest.mark.slow
|
|
48
53
|
def test_pose():
|
|
49
54
|
"""Test pose estimation functionalities and check the embedding table for keypoints."""
|
|
50
55
|
exp = Explorer(data="coco8-pose.yaml", model="yolov8n-pose.pt")
|
|
@@ -9,7 +9,7 @@ model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml
|
|
|
9
9
|
data: # (str, optional) path to data file, i.e. coco128.yaml
|
|
10
10
|
epochs: 100 # (int) number of epochs to train for
|
|
11
11
|
time: # (float, optional) number of hours to train for, overrides epochs if supplied
|
|
12
|
-
patience:
|
|
12
|
+
patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
|
|
13
13
|
batch: 16 # (int) number of images per batch (-1 for AutoBatch)
|
|
14
14
|
imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes
|
|
15
15
|
save: True # (bool) save train checkpoints and predict results
|
|
@@ -418,8 +418,8 @@ def min_index(arr1, arr2):
|
|
|
418
418
|
Find a pair of indexes with the shortest distance between two arrays of 2D points.
|
|
419
419
|
|
|
420
420
|
Args:
|
|
421
|
-
arr1 (np.
|
|
422
|
-
arr2 (np.
|
|
421
|
+
arr1 (np.ndarray): A NumPy array of shape (N, 2) representing N 2D points.
|
|
422
|
+
arr2 (np.ndarray): A NumPy array of shape (M, 2) representing M 2D points.
|
|
423
423
|
|
|
424
424
|
Returns:
|
|
425
425
|
(tuple): A tuple containing the indexes of the points with the shortest distance in arr1 and arr2 respectively.
|
|
@@ -226,35 +226,42 @@ class YOLODataset(BaseDataset):
|
|
|
226
226
|
# Classification dataloaders -------------------------------------------------------------------------------------------
|
|
227
227
|
class ClassificationDataset(torchvision.datasets.ImageFolder):
|
|
228
228
|
"""
|
|
229
|
-
YOLO
|
|
229
|
+
Extends torchvision ImageFolder to support YOLO classification tasks, offering functionalities like image
|
|
230
|
+
augmentation, caching, and verification. It's designed to efficiently handle large datasets for training deep
|
|
231
|
+
learning models, with optional image transformations and caching mechanisms to speed up training.
|
|
230
232
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
+
This class allows for augmentations using both torchvision and Albumentations libraries, and supports caching images
|
|
234
|
+
in RAM or on disk to reduce IO overhead during training. Additionally, it implements a robust verification process
|
|
235
|
+
to ensure data integrity and consistency.
|
|
233
236
|
|
|
234
237
|
Attributes:
|
|
235
|
-
cache_ram (bool):
|
|
236
|
-
cache_disk (bool):
|
|
237
|
-
samples (list):
|
|
238
|
-
|
|
239
|
-
|
|
238
|
+
cache_ram (bool): Indicates if caching in RAM is enabled.
|
|
239
|
+
cache_disk (bool): Indicates if caching on disk is enabled.
|
|
240
|
+
samples (list): A list of tuples, each containing the path to an image, its class index, path to its .npy cache
|
|
241
|
+
file (if caching on disk), and optionally the loaded image array (if caching in RAM).
|
|
242
|
+
torch_transforms (callable): PyTorch transforms to be applied to the images.
|
|
240
243
|
"""
|
|
241
244
|
|
|
242
|
-
def __init__(self, root, args, augment=False,
|
|
245
|
+
def __init__(self, root, args, augment=False, prefix=""):
|
|
243
246
|
"""
|
|
244
247
|
Initialize YOLO object with root, image size, augmentations, and cache settings.
|
|
245
248
|
|
|
246
249
|
Args:
|
|
247
|
-
root (str):
|
|
248
|
-
args (Namespace):
|
|
249
|
-
|
|
250
|
-
|
|
250
|
+
root (str): Path to the dataset directory where images are stored in a class-specific folder structure.
|
|
251
|
+
args (Namespace): Configuration containing dataset-related settings such as image size, augmentation
|
|
252
|
+
parameters, and cache settings. It includes attributes like `imgsz` (image size), `fraction` (fraction
|
|
253
|
+
of data to use), `scale`, `fliplr`, `flipud`, `cache` (disk or RAM caching for faster training),
|
|
254
|
+
`auto_augment`, `hsv_h`, `hsv_s`, `hsv_v`, and `crop_fraction`.
|
|
255
|
+
augment (bool, optional): Whether to apply augmentations to the dataset. Default is False.
|
|
256
|
+
prefix (str, optional): Prefix for logging and cache filenames, aiding in dataset identification and
|
|
257
|
+
debugging. Default is an empty string.
|
|
251
258
|
"""
|
|
252
259
|
super().__init__(root=root)
|
|
253
260
|
if augment and args.fraction < 1.0: # reduce training fraction
|
|
254
261
|
self.samples = self.samples[: round(len(self.samples) * args.fraction)]
|
|
255
262
|
self.prefix = colorstr(f"{prefix}: ") if prefix else ""
|
|
256
|
-
self.cache_ram = cache is True or cache == "ram"
|
|
257
|
-
self.cache_disk = cache == "disk"
|
|
263
|
+
self.cache_ram = args.cache is True or args.cache == "ram" # cache images into RAM
|
|
264
|
+
self.cache_disk = args.cache == "disk" # cache images on hard drive as uncompressed *.npy files
|
|
258
265
|
self.samples = self.verify_images() # filter out bad images
|
|
259
266
|
self.samples = [list(x) + [Path(x[0]).with_suffix(".npy"), None] for x in self.samples] # file, index, npy, im
|
|
260
267
|
scale = (1.0 - args.scale, 1.0) # (0.08, 1.0)
|
|
@@ -467,7 +467,6 @@ class HUBDatasetStats:
|
|
|
467
467
|
|
|
468
468
|
self.hub_dir = Path(f'{data["path"]}-hub')
|
|
469
469
|
self.im_dir = self.hub_dir / "images"
|
|
470
|
-
self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images
|
|
471
470
|
self.stats = {"nc": len(data["names"]), "names": list(data["names"].values())} # statistics dictionary
|
|
472
471
|
self.data = data
|
|
473
472
|
|
|
@@ -551,6 +550,7 @@ class HUBDatasetStats:
|
|
|
551
550
|
|
|
552
551
|
# Save, print and return
|
|
553
552
|
if save:
|
|
553
|
+
self.hub_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/
|
|
554
554
|
stats_path = self.hub_dir / "stats.json"
|
|
555
555
|
LOGGER.info(f"Saving {stats_path.resolve()}...")
|
|
556
556
|
with open(stats_path, "w") as f:
|
|
@@ -563,6 +563,7 @@ class HUBDatasetStats:
|
|
|
563
563
|
"""Compress images for Ultralytics HUB."""
|
|
564
564
|
from ultralytics.data import YOLODataset # ClassificationDataset
|
|
565
565
|
|
|
566
|
+
self.im_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/images/
|
|
566
567
|
for split in "train", "val", "test":
|
|
567
568
|
if self.data.get(split) is None:
|
|
568
569
|
continue
|
|
@@ -716,12 +716,13 @@ class Exporter:
|
|
|
716
716
|
import tensorflow as tf # noqa
|
|
717
717
|
check_requirements(
|
|
718
718
|
(
|
|
719
|
-
"onnx",
|
|
719
|
+
"onnx>=1.12.0",
|
|
720
720
|
"onnx2tf>=1.15.4,<=1.17.5",
|
|
721
721
|
"sng4onnx>=1.0.1",
|
|
722
722
|
"onnxsim>=0.4.33",
|
|
723
723
|
"onnx_graphsurgeon>=0.3.26",
|
|
724
724
|
"tflite_support",
|
|
725
|
+
"flatbuffers>=23.5.26", # update old 'flatbuffers' included inside tensorflow package
|
|
725
726
|
"onnxruntime-gpu" if cuda else "onnxruntime",
|
|
726
727
|
),
|
|
727
728
|
cmds="--extra-index-url https://pypi.ngc.nvidia.com",
|
|
@@ -860,8 +861,7 @@ class Exporter:
|
|
|
860
861
|
@try_export
|
|
861
862
|
def export_tfjs(self, prefix=colorstr("TensorFlow.js:")):
|
|
862
863
|
"""YOLOv8 TensorFlow.js export."""
|
|
863
|
-
|
|
864
|
-
check_requirements(["jax<=0.4.21", "jaxlib<=0.4.21", "tensorflowjs"])
|
|
864
|
+
check_requirements("tensorflowjs")
|
|
865
865
|
import tensorflow as tf
|
|
866
866
|
import tensorflowjs as tfjs # noqa
|
|
867
867
|
|
|
@@ -5,6 +5,9 @@ import sys
|
|
|
5
5
|
from pathlib import Path
|
|
6
6
|
from typing import Union
|
|
7
7
|
|
|
8
|
+
import numpy as np
|
|
9
|
+
import torch
|
|
10
|
+
|
|
8
11
|
from ultralytics.cfg import TASK2DATA, get_cfg, get_save_dir
|
|
9
12
|
from ultralytics.hub.utils import HUB_WEB_ROOT
|
|
10
13
|
from ultralytics.nn.tasks import attempt_load_one_weight, guess_model_task, nn, yaml_model_load
|
|
@@ -78,7 +81,12 @@ class Model(nn.Module):
|
|
|
78
81
|
NotImplementedError: If a specific model task or mode is not supported.
|
|
79
82
|
"""
|
|
80
83
|
|
|
81
|
-
def __init__(
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
model: Union[str, Path] = "yolov8n.pt",
|
|
87
|
+
task: str = None,
|
|
88
|
+
verbose: bool = False,
|
|
89
|
+
) -> None:
|
|
82
90
|
"""
|
|
83
91
|
Initializes a new instance of the YOLO model class.
|
|
84
92
|
|
|
@@ -135,7 +143,12 @@ class Model(nn.Module):
|
|
|
135
143
|
|
|
136
144
|
self.model_name = model
|
|
137
145
|
|
|
138
|
-
def __call__(
|
|
146
|
+
def __call__(
|
|
147
|
+
self,
|
|
148
|
+
source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
|
|
149
|
+
stream: bool = False,
|
|
150
|
+
**kwargs,
|
|
151
|
+
) -> list:
|
|
139
152
|
"""
|
|
140
153
|
An alias for the predict method, enabling the model instance to be callable.
|
|
141
154
|
|
|
@@ -143,8 +156,9 @@ class Model(nn.Module):
|
|
|
143
156
|
with the required arguments for prediction.
|
|
144
157
|
|
|
145
158
|
Args:
|
|
146
|
-
source (str | int | PIL.Image | np.ndarray, optional): The source of the image for making
|
|
147
|
-
Accepts various types, including file paths, URLs, PIL images, and numpy arrays.
|
|
159
|
+
source (str | Path | int | PIL.Image | np.ndarray, optional): The source of the image for making
|
|
160
|
+
predictions. Accepts various types, including file paths, URLs, PIL images, and numpy arrays.
|
|
161
|
+
Defaults to None.
|
|
148
162
|
stream (bool, optional): If True, treats the input source as a continuous stream for predictions.
|
|
149
163
|
Defaults to False.
|
|
150
164
|
**kwargs (dict): Additional keyword arguments for configuring the prediction process.
|
|
@@ -163,7 +177,7 @@ class Model(nn.Module):
|
|
|
163
177
|
return session if session.client.authenticated else None
|
|
164
178
|
|
|
165
179
|
@staticmethod
|
|
166
|
-
def is_triton_model(model):
|
|
180
|
+
def is_triton_model(model: str) -> bool:
|
|
167
181
|
"""Is model a Triton Server URL string, i.e. <scheme>://<netloc>/<endpoint>/<task_name>"""
|
|
168
182
|
from urllib.parse import urlsplit
|
|
169
183
|
|
|
@@ -171,7 +185,7 @@ class Model(nn.Module):
|
|
|
171
185
|
return url.netloc and url.path and url.scheme in {"http", "grpc"}
|
|
172
186
|
|
|
173
187
|
@staticmethod
|
|
174
|
-
def is_hub_model(model):
|
|
188
|
+
def is_hub_model(model: str) -> bool:
|
|
175
189
|
"""Check if the provided model is a HUB model."""
|
|
176
190
|
return any(
|
|
177
191
|
(
|
|
@@ -181,7 +195,7 @@ class Model(nn.Module):
|
|
|
181
195
|
)
|
|
182
196
|
)
|
|
183
197
|
|
|
184
|
-
def _new(self, cfg: str, task=None, model=None, verbose=False):
|
|
198
|
+
def _new(self, cfg: str, task=None, model=None, verbose=False) -> None:
|
|
185
199
|
"""
|
|
186
200
|
Initializes a new model and infers the task type from the model definitions.
|
|
187
201
|
|
|
@@ -202,7 +216,7 @@ class Model(nn.Module):
|
|
|
202
216
|
self.model.args = {**DEFAULT_CFG_DICT, **self.overrides} # combine default and model args (prefer model args)
|
|
203
217
|
self.model.task = self.task
|
|
204
218
|
|
|
205
|
-
def _load(self, weights: str, task=None):
|
|
219
|
+
def _load(self, weights: str, task=None) -> None:
|
|
206
220
|
"""
|
|
207
221
|
Initializes a new model and infers the task type from the model head.
|
|
208
222
|
|
|
@@ -224,7 +238,7 @@ class Model(nn.Module):
|
|
|
224
238
|
self.overrides["model"] = weights
|
|
225
239
|
self.overrides["task"] = self.task
|
|
226
240
|
|
|
227
|
-
def _check_is_pytorch_model(self):
|
|
241
|
+
def _check_is_pytorch_model(self) -> None:
|
|
228
242
|
"""Raises TypeError is model is not a PyTorch model."""
|
|
229
243
|
pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == ".pt"
|
|
230
244
|
pt_module = isinstance(self.model, nn.Module)
|
|
@@ -237,7 +251,7 @@ class Model(nn.Module):
|
|
|
237
251
|
f"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'"
|
|
238
252
|
)
|
|
239
253
|
|
|
240
|
-
def reset_weights(self):
|
|
254
|
+
def reset_weights(self) -> "Model":
|
|
241
255
|
"""
|
|
242
256
|
Resets the model parameters to randomly initialized values, effectively discarding all training information.
|
|
243
257
|
|
|
@@ -259,7 +273,7 @@ class Model(nn.Module):
|
|
|
259
273
|
p.requires_grad = True
|
|
260
274
|
return self
|
|
261
275
|
|
|
262
|
-
def load(self, weights="yolov8n.pt"):
|
|
276
|
+
def load(self, weights: Union[str, Path] = "yolov8n.pt") -> "Model":
|
|
263
277
|
"""
|
|
264
278
|
Loads parameters from the specified weights file into the model.
|
|
265
279
|
|
|
@@ -281,24 +295,22 @@ class Model(nn.Module):
|
|
|
281
295
|
self.model.load(weights)
|
|
282
296
|
return self
|
|
283
297
|
|
|
284
|
-
def save(self, filename="
|
|
298
|
+
def save(self, filename: Union[str, Path] = "saved_model.pt") -> None:
|
|
285
299
|
"""
|
|
286
300
|
Saves the current model state to a file.
|
|
287
301
|
|
|
288
302
|
This method exports the model's checkpoint (ckpt) to the specified filename.
|
|
289
303
|
|
|
290
304
|
Args:
|
|
291
|
-
filename (str): The name of the file to save the model to. Defaults to '
|
|
305
|
+
filename (str | Path): The name of the file to save the model to. Defaults to 'saved_model.pt'.
|
|
292
306
|
|
|
293
307
|
Raises:
|
|
294
308
|
AssertionError: If the model is not a PyTorch model.
|
|
295
309
|
"""
|
|
296
310
|
self._check_is_pytorch_model()
|
|
297
|
-
import torch
|
|
298
|
-
|
|
299
311
|
torch.save(self.ckpt, filename)
|
|
300
312
|
|
|
301
|
-
def info(self, detailed=False, verbose=True):
|
|
313
|
+
def info(self, detailed: bool = False, verbose: bool = True):
|
|
302
314
|
"""
|
|
303
315
|
Logs or returns model information.
|
|
304
316
|
|
|
@@ -330,7 +342,12 @@ class Model(nn.Module):
|
|
|
330
342
|
self._check_is_pytorch_model()
|
|
331
343
|
self.model.fuse()
|
|
332
344
|
|
|
333
|
-
def embed(
|
|
345
|
+
def embed(
|
|
346
|
+
self,
|
|
347
|
+
source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
|
|
348
|
+
stream: bool = False,
|
|
349
|
+
**kwargs,
|
|
350
|
+
) -> list:
|
|
334
351
|
"""
|
|
335
352
|
Generates image embeddings based on the provided source.
|
|
336
353
|
|
|
@@ -353,7 +370,13 @@ class Model(nn.Module):
|
|
|
353
370
|
kwargs["embed"] = [len(self.model.model) - 2] # embed second-to-last layer if no indices passed
|
|
354
371
|
return self.predict(source, stream, **kwargs)
|
|
355
372
|
|
|
356
|
-
def predict(
|
|
373
|
+
def predict(
|
|
374
|
+
self,
|
|
375
|
+
source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
|
|
376
|
+
stream: bool = False,
|
|
377
|
+
predictor=None,
|
|
378
|
+
**kwargs,
|
|
379
|
+
) -> list:
|
|
357
380
|
"""
|
|
358
381
|
Performs predictions on the given image source using the YOLO model.
|
|
359
382
|
|
|
@@ -405,7 +428,13 @@ class Model(nn.Module):
|
|
|
405
428
|
self.predictor.set_prompts(prompts)
|
|
406
429
|
return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)
|
|
407
430
|
|
|
408
|
-
def track(
|
|
431
|
+
def track(
|
|
432
|
+
self,
|
|
433
|
+
source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
|
|
434
|
+
stream: bool = False,
|
|
435
|
+
persist: bool = False,
|
|
436
|
+
**kwargs,
|
|
437
|
+
) -> list:
|
|
409
438
|
"""
|
|
410
439
|
Conducts object tracking on the specified input source using the registered trackers.
|
|
411
440
|
|
|
@@ -438,7 +467,11 @@ class Model(nn.Module):
|
|
|
438
467
|
kwargs["mode"] = "track"
|
|
439
468
|
return self.predict(source=source, stream=stream, **kwargs)
|
|
440
469
|
|
|
441
|
-
def val(
|
|
470
|
+
def val(
|
|
471
|
+
self,
|
|
472
|
+
validator=None,
|
|
473
|
+
**kwargs,
|
|
474
|
+
):
|
|
442
475
|
"""
|
|
443
476
|
Validates the model using a specified dataset and validation configuration.
|
|
444
477
|
|
|
@@ -471,7 +504,10 @@ class Model(nn.Module):
|
|
|
471
504
|
self.metrics = validator.metrics
|
|
472
505
|
return validator.metrics
|
|
473
506
|
|
|
474
|
-
def benchmark(
|
|
507
|
+
def benchmark(
|
|
508
|
+
self,
|
|
509
|
+
**kwargs,
|
|
510
|
+
):
|
|
475
511
|
"""
|
|
476
512
|
Benchmarks the model across various export formats to evaluate performance.
|
|
477
513
|
|
|
@@ -509,7 +545,10 @@ class Model(nn.Module):
|
|
|
509
545
|
verbose=kwargs.get("verbose"),
|
|
510
546
|
)
|
|
511
547
|
|
|
512
|
-
def export(
|
|
548
|
+
def export(
|
|
549
|
+
self,
|
|
550
|
+
**kwargs,
|
|
551
|
+
):
|
|
513
552
|
"""
|
|
514
553
|
Exports the model to a different format suitable for deployment.
|
|
515
554
|
|
|
@@ -537,7 +576,11 @@ class Model(nn.Module):
|
|
|
537
576
|
args = {**self.overrides, **custom, **kwargs, "mode": "export"} # highest priority args on the right
|
|
538
577
|
return Exporter(overrides=args, _callbacks=self.callbacks)(model=self.model)
|
|
539
578
|
|
|
540
|
-
def train(
|
|
579
|
+
def train(
|
|
580
|
+
self,
|
|
581
|
+
trainer=None,
|
|
582
|
+
**kwargs,
|
|
583
|
+
):
|
|
541
584
|
"""
|
|
542
585
|
Trains the model using the specified dataset and training configuration.
|
|
543
586
|
|
|
@@ -607,7 +650,13 @@ class Model(nn.Module):
|
|
|
607
650
|
self.metrics = getattr(self.trainer.validator, "metrics", None) # TODO: no metrics returned by DDP
|
|
608
651
|
return self.metrics
|
|
609
652
|
|
|
610
|
-
def tune(
|
|
653
|
+
def tune(
|
|
654
|
+
self,
|
|
655
|
+
use_ray=False,
|
|
656
|
+
iterations=10,
|
|
657
|
+
*args,
|
|
658
|
+
**kwargs,
|
|
659
|
+
):
|
|
611
660
|
"""
|
|
612
661
|
Conducts hyperparameter tuning for the model, with an option to use Ray Tune.
|
|
613
662
|
|
|
@@ -640,7 +689,7 @@ class Model(nn.Module):
|
|
|
640
689
|
args = {**self.overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
|
|
641
690
|
return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations)
|
|
642
691
|
|
|
643
|
-
def _apply(self, fn):
|
|
692
|
+
def _apply(self, fn) -> "Model":
|
|
644
693
|
"""Apply to(), cpu(), cuda(), half(), float() to model tensors that are not parameters or registered buffers."""
|
|
645
694
|
self._check_is_pytorch_model()
|
|
646
695
|
self = super()._apply(fn) # noqa
|
|
@@ -649,7 +698,7 @@ class Model(nn.Module):
|
|
|
649
698
|
return self
|
|
650
699
|
|
|
651
700
|
@property
|
|
652
|
-
def names(self):
|
|
701
|
+
def names(self) -> list:
|
|
653
702
|
"""
|
|
654
703
|
Retrieves the class names associated with the loaded model.
|
|
655
704
|
|
|
@@ -664,7 +713,7 @@ class Model(nn.Module):
|
|
|
664
713
|
return check_class_names(self.model.names) if hasattr(self.model, "names") else None
|
|
665
714
|
|
|
666
715
|
@property
|
|
667
|
-
def device(self):
|
|
716
|
+
def device(self) -> torch.device:
|
|
668
717
|
"""
|
|
669
718
|
Retrieves the device on which the model's parameters are allocated.
|
|
670
719
|
|
|
@@ -688,7 +737,7 @@ class Model(nn.Module):
|
|
|
688
737
|
"""
|
|
689
738
|
return self.model.transforms if hasattr(self.model, "transforms") else None
|
|
690
739
|
|
|
691
|
-
def add_callback(self, event: str, func):
|
|
740
|
+
def add_callback(self, event: str, func) -> None:
|
|
692
741
|
"""
|
|
693
742
|
Adds a callback function for a specified event.
|
|
694
743
|
|
|
@@ -704,7 +753,7 @@ class Model(nn.Module):
|
|
|
704
753
|
"""
|
|
705
754
|
self.callbacks[event].append(func)
|
|
706
755
|
|
|
707
|
-
def clear_callback(self, event: str):
|
|
756
|
+
def clear_callback(self, event: str) -> None:
|
|
708
757
|
"""
|
|
709
758
|
Clears all callback functions registered for a specified event.
|
|
710
759
|
|
|
@@ -718,7 +767,7 @@ class Model(nn.Module):
|
|
|
718
767
|
"""
|
|
719
768
|
self.callbacks[event] = []
|
|
720
769
|
|
|
721
|
-
def reset_callbacks(self):
|
|
770
|
+
def reset_callbacks(self) -> None:
|
|
722
771
|
"""
|
|
723
772
|
Resets all callbacks to their default functions.
|
|
724
773
|
|
|
@@ -729,7 +778,7 @@ class Model(nn.Module):
|
|
|
729
778
|
self.callbacks[event] = [callbacks.default_callbacks[event][0]]
|
|
730
779
|
|
|
731
780
|
@staticmethod
|
|
732
|
-
def _reset_ckpt_args(args):
|
|
781
|
+
def _reset_ckpt_args(args: dict) -> dict:
|
|
733
782
|
"""Reset arguments when loading a PyTorch model."""
|
|
734
783
|
include = {"imgsz", "data", "task", "single_cls"} # only remember these arguments when loading a PyTorch model
|
|
735
784
|
return {k: v for k, v in args.items() if k in include}
|
|
@@ -739,7 +788,7 @@ class Model(nn.Module):
|
|
|
739
788
|
# name = self.__class__.__name__
|
|
740
789
|
# raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
|
|
741
790
|
|
|
742
|
-
def _smart_load(self, key):
|
|
791
|
+
def _smart_load(self, key: str):
|
|
743
792
|
"""Load model/trainer/validator/predictor."""
|
|
744
793
|
try:
|
|
745
794
|
return self.task_map[self.task][key]
|
|
@@ -751,7 +800,7 @@ class Model(nn.Module):
|
|
|
751
800
|
) from e
|
|
752
801
|
|
|
753
802
|
@property
|
|
754
|
-
def task_map(self):
|
|
803
|
+
def task_map(self) -> dict:
|
|
755
804
|
"""
|
|
756
805
|
Map head to model, trainer, validator, and predictor classes.
|
|
757
806
|
|
|
@@ -252,7 +252,7 @@ class BaseTrainer:
|
|
|
252
252
|
if any(x in k for x in freeze_layer_names):
|
|
253
253
|
LOGGER.info(f"Freezing layer '{k}'")
|
|
254
254
|
v.requires_grad = False
|
|
255
|
-
elif not v.requires_grad:
|
|
255
|
+
elif not v.requires_grad and v.dtype.is_floating_point: # only floating point Tensor can require gradients
|
|
256
256
|
LOGGER.info(
|
|
257
257
|
f"WARNING ⚠️ setting 'requires_grad=True' for frozen layer '{k}'. "
|
|
258
258
|
"See ultralytics.engine.trainer for customization of frozen layers."
|
|
@@ -132,8 +132,7 @@ class DetectionValidator(BaseValidator):
|
|
|
132
132
|
if nl:
|
|
133
133
|
for k in self.stats.keys():
|
|
134
134
|
self.stats[k].append(stat[k])
|
|
135
|
-
|
|
136
|
-
if self.args.plots and self.args.task != "obb":
|
|
135
|
+
if self.args.plots:
|
|
137
136
|
self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls)
|
|
138
137
|
continue
|
|
139
138
|
|
|
@@ -147,8 +146,7 @@ class DetectionValidator(BaseValidator):
|
|
|
147
146
|
# Evaluate
|
|
148
147
|
if nl:
|
|
149
148
|
stat["tp"] = self._process_batch(predn, bbox, cls)
|
|
150
|
-
|
|
151
|
-
if self.args.plots and self.args.task != "obb":
|
|
149
|
+
if self.args.plots:
|
|
152
150
|
self.confusion_matrix.process_batch(predn, bbox, cls)
|
|
153
151
|
for k in self.stats.keys():
|
|
154
152
|
self.stats[k].append(stat[k])
|
|
@@ -55,10 +55,11 @@ class OBBValidator(DetectionValidator):
|
|
|
55
55
|
Return correct prediction matrix.
|
|
56
56
|
|
|
57
57
|
Args:
|
|
58
|
-
detections (torch.Tensor): Tensor of shape [N,
|
|
59
|
-
Each detection is of the format: x1, y1, x2, y2, conf, class.
|
|
60
|
-
|
|
61
|
-
Each
|
|
58
|
+
detections (torch.Tensor): Tensor of shape [N, 7] representing detections.
|
|
59
|
+
Each detection is of the format: x1, y1, x2, y2, conf, class, angle.
|
|
60
|
+
gt_bboxes (torch.Tensor): Tensor of shape [M, 5] representing rotated boxes.
|
|
61
|
+
Each box is of the format: x1, y1, x2, y2, angle.
|
|
62
|
+
labels (torch.Tensor): Tensor of shape [M] representing labels.
|
|
62
63
|
|
|
63
64
|
Returns:
|
|
64
65
|
(torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels.
|
|
@@ -761,6 +761,8 @@ def attempt_load_weights(weights, device=None, inplace=True, fuse=False):
|
|
|
761
761
|
for m in ensemble.modules():
|
|
762
762
|
if hasattr(m, "inplace"):
|
|
763
763
|
m.inplace = inplace
|
|
764
|
+
elif isinstance(m, nn.Upsample) and not hasattr(m, "recompute_scale_factor"):
|
|
765
|
+
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
|
764
766
|
|
|
765
767
|
# Return model
|
|
766
768
|
if len(ensemble) == 1:
|
|
@@ -794,6 +796,8 @@ def attempt_load_one_weight(weight, device=None, inplace=True, fuse=False):
|
|
|
794
796
|
for m in model.modules():
|
|
795
797
|
if hasattr(m, "inplace"):
|
|
796
798
|
m.inplace = inplace
|
|
799
|
+
elif isinstance(m, nn.Upsample) and not hasattr(m, "recompute_scale_factor"):
|
|
800
|
+
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
|
797
801
|
|
|
798
802
|
# Return model and ckpt
|
|
799
803
|
return model, ckpt
|
|
@@ -154,13 +154,14 @@ class SpeedEstimator:
|
|
|
154
154
|
self.trk_previous_times[trk_id] = time()
|
|
155
155
|
self.trk_previous_points[trk_id] = track[-1]
|
|
156
156
|
|
|
157
|
-
def estimate_speed(self, im0, tracks):
|
|
157
|
+
def estimate_speed(self, im0, tracks, region_color=(255, 0, 0)):
|
|
158
158
|
"""
|
|
159
159
|
Calculate object based on tracking data.
|
|
160
160
|
|
|
161
161
|
Args:
|
|
162
162
|
im0 (nd array): Image
|
|
163
163
|
tracks (list): List of tracks obtained from the object tracking process.
|
|
164
|
+
region_color (tuple): Color to use when drawing regions.
|
|
164
165
|
"""
|
|
165
166
|
self.im0 = im0
|
|
166
167
|
if tracks[0].boxes.id is None:
|
|
@@ -170,7 +171,7 @@ class SpeedEstimator:
|
|
|
170
171
|
self.extract_tracks(tracks)
|
|
171
172
|
|
|
172
173
|
self.annotator = Annotator(self.im0, line_width=2)
|
|
173
|
-
self.annotator.draw_region(reg_pts=self.reg_pts, color=
|
|
174
|
+
self.annotator.draw_region(reg_pts=self.reg_pts, color=region_color, thickness=self.region_thickness)
|
|
174
175
|
|
|
175
176
|
for box, trk_id, cls in zip(self.boxes, self.trk_ids, self.clss):
|
|
176
177
|
track = self.store_track_info(trk_id, box)
|