ultralytics 8.1.39__tar.gz → 8.1.40__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- {ultralytics-8.1.39/ultralytics.egg-info → ultralytics-8.1.40}/PKG-INFO +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/tests/test_python.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/__init__.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/__init__.py +3 -3
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/augment.py +2 -2
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/base.py +2 -2
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/converter.py +2 -2
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/dataset.py +4 -4
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/loaders.py +11 -8
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/split_dota.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/utils.py +8 -7
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/engine/exporter.py +3 -3
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/engine/model.py +2 -2
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/engine/results.py +2 -2
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/engine/trainer.py +13 -13
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/engine/validator.py +2 -2
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/hub/utils.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/fastsam/model.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/fastsam/prompt.py +4 -5
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/nas/model.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/model.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/modules/tiny_encoder.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/classify/train.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/detect/train.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/world/train.py +16 -15
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/nn/autobackend.py +5 -5
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/nn/modules/conv.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/nn/modules/head.py +4 -4
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/nn/tasks.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/solutions/ai_gym.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/solutions/heatmap.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/trackers/byte_tracker.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/trackers/track.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/trackers/utils/gmc.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/__init__.py +4 -4
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/benchmarks.py +2 -2
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/comet.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/mlflow.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/checks.py +3 -3
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/downloads.py +2 -2
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/metrics.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/plotting.py +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/torch_utils.py +3 -3
- {ultralytics-8.1.39 → ultralytics-8.1.40/ultralytics.egg-info}/PKG-INFO +1 -1
- {ultralytics-8.1.39 → ultralytics-8.1.40}/LICENSE +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/README.md +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/pyproject.toml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/setup.cfg +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/tests/test_cli.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/tests/test_cuda.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/tests/test_engine.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/tests/test_explorer.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/tests/test_integrations.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/assets/bus.jpg +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/assets/zidane.jpg +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/VOC.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/coco.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/coco128.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/coco8.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/dota8.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/lvis.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/datasets/xView.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/default.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/trackers/botsort.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/annotator.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/build.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/explorer/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/explorer/explorer.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/explorer/gui/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/explorer/gui/dash.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/data/explorer/utils.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/engine/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/engine/predictor.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/engine/tuner.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/hub/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/hub/auth.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/hub/session.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/fastsam/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/fastsam/predict.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/fastsam/utils.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/fastsam/val.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/nas/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/nas/predict.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/nas/val.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/rtdetr/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/rtdetr/model.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/rtdetr/predict.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/rtdetr/train.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/rtdetr/val.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/amg.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/build.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/modules/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/modules/decoders.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/modules/encoders.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/modules/sam.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/modules/transformer.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/sam/predict.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/utils/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/utils/loss.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/utils/ops.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/classify/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/classify/predict.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/classify/val.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/detect/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/detect/predict.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/detect/val.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/model.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/obb/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/obb/predict.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/obb/train.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/obb/val.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/pose/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/pose/predict.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/pose/train.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/pose/val.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/segment/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/segment/predict.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/segment/train.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/segment/val.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/world/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/models/yolo/world/train_world.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/nn/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/nn/modules/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/nn/modules/block.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/nn/modules/transformer.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/nn/modules/utils.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/solutions/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/solutions/distance_calculation.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/solutions/object_counter.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/solutions/speed_estimation.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/trackers/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/trackers/basetrack.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/trackers/bot_sort.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/trackers/utils/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/trackers/utils/kalman_filter.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/trackers/utils/matching.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/autobatch.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/__init__.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/base.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/clearml.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/dvc.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/hub.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/neptune.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/raytune.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/tensorboard.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/callbacks/wb.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/dist.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/errors.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/files.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/instance.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/loss.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/ops.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/patches.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/tal.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/triton.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics/utils/tuner.py +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics.egg-info/SOURCES.txt +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics.egg-info/dependency_links.txt +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics.egg-info/entry_points.txt +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics.egg-info/requires.txt +0 -0
- {ultralytics-8.1.39 → ultralytics-8.1.40}/ultralytics.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.1.
|
|
3
|
+
Version: 8.1.40
|
|
4
4
|
Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
6
6
|
Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
@@ -351,7 +351,7 @@ def test_labels_and_crops():
|
|
|
351
351
|
crop_dirs = [p for p in (save_path / "crops").iterdir()]
|
|
352
352
|
crop_files = [f for p in crop_dirs for f in p.glob("*")]
|
|
353
353
|
# Crop directories match detections
|
|
354
|
-
assert all([r.names.get(c) in
|
|
354
|
+
assert all([r.names.get(c) in {d.name for d in crop_dirs} for c in cls_idxs])
|
|
355
355
|
# Same number of crops as detections
|
|
356
356
|
assert len([f for f in crop_files if im_name in f.name]) == len(r.boxes.data)
|
|
357
357
|
|
|
@@ -272,7 +272,7 @@ def get_save_dir(args, name=None):
|
|
|
272
272
|
|
|
273
273
|
project = args.project or (ROOT.parent / "tests/tmp/runs" if TESTS_RUNNING else RUNS_DIR) / args.task
|
|
274
274
|
name = name or args.name or f"{args.mode}"
|
|
275
|
-
save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in
|
|
275
|
+
save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in {-1, 0} else True)
|
|
276
276
|
|
|
277
277
|
return Path(save_dir)
|
|
278
278
|
|
|
@@ -566,10 +566,10 @@ def entrypoint(debug=""):
|
|
|
566
566
|
task = model.task
|
|
567
567
|
|
|
568
568
|
# Mode
|
|
569
|
-
if mode in
|
|
569
|
+
if mode in {"predict", "track"} and "source" not in overrides:
|
|
570
570
|
overrides["source"] = DEFAULT_CFG.source or ASSETS
|
|
571
571
|
LOGGER.warning(f"WARNING ⚠️ 'source' argument is missing. Using default 'source={overrides['source']}'.")
|
|
572
|
-
elif mode in
|
|
572
|
+
elif mode in {"train", "val"}:
|
|
573
573
|
if "data" not in overrides and "resume" not in overrides:
|
|
574
574
|
overrides["data"] = DEFAULT_CFG.data or TASK2DATA.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data)
|
|
575
575
|
LOGGER.warning(f"WARNING ⚠️ 'data' argument is missing. Using default 'data={overrides['data']}'.")
|
|
@@ -191,7 +191,7 @@ class Mosaic(BaseMixTransform):
|
|
|
191
191
|
def __init__(self, dataset, imgsz=640, p=1.0, n=4):
|
|
192
192
|
"""Initializes the object with a dataset, image size, probability, and border."""
|
|
193
193
|
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
|
|
194
|
-
assert n in
|
|
194
|
+
assert n in {4, 9}, "grid must be equal to 4 or 9."
|
|
195
195
|
super().__init__(dataset=dataset, p=p)
|
|
196
196
|
self.dataset = dataset
|
|
197
197
|
self.imgsz = imgsz
|
|
@@ -685,7 +685,7 @@ class RandomFlip:
|
|
|
685
685
|
Default is 'horizontal'.
|
|
686
686
|
flip_idx (array-like, optional): Index mapping for flipping keypoints, if any.
|
|
687
687
|
"""
|
|
688
|
-
assert direction in
|
|
688
|
+
assert direction in {"horizontal", "vertical"}, f"Support direction `horizontal` or `vertical`, got {direction}"
|
|
689
689
|
assert 0 <= p <= 1.0
|
|
690
690
|
|
|
691
691
|
self.p = p
|
|
@@ -15,7 +15,7 @@ import psutil
|
|
|
15
15
|
from torch.utils.data import Dataset
|
|
16
16
|
|
|
17
17
|
from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM
|
|
18
|
-
from .utils import HELP_URL, IMG_FORMATS
|
|
18
|
+
from .utils import HELP_URL, FORMATS_HELP_MSG, IMG_FORMATS
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class BaseDataset(Dataset):
|
|
@@ -118,7 +118,7 @@ class BaseDataset(Dataset):
|
|
|
118
118
|
raise FileNotFoundError(f"{self.prefix}{p} does not exist")
|
|
119
119
|
im_files = sorted(x.replace("/", os.sep) for x in f if x.split(".")[-1].lower() in IMG_FORMATS)
|
|
120
120
|
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
|
|
121
|
-
assert im_files, f"{self.prefix}No images found in {img_path}"
|
|
121
|
+
assert im_files, f"{self.prefix}No images found in {img_path}. {FORMATS_HELP_MSG}"
|
|
122
122
|
except Exception as e:
|
|
123
123
|
raise FileNotFoundError(f"{self.prefix}Error loading data from {img_path}\n{HELP_URL}") from e
|
|
124
124
|
if self.fraction < 1:
|
|
@@ -481,7 +481,7 @@ def merge_multi_segment(segments):
|
|
|
481
481
|
segments[i] = np.roll(segments[i], -idx[0], axis=0)
|
|
482
482
|
segments[i] = np.concatenate([segments[i], segments[i][:1]])
|
|
483
483
|
# Deal with the first segment and the last one
|
|
484
|
-
if i in
|
|
484
|
+
if i in {0, len(idx_list) - 1}:
|
|
485
485
|
s.append(segments[i])
|
|
486
486
|
else:
|
|
487
487
|
idx = [0, idx[1] - idx[0]]
|
|
@@ -489,7 +489,7 @@ def merge_multi_segment(segments):
|
|
|
489
489
|
|
|
490
490
|
else:
|
|
491
491
|
for i in range(len(idx_list) - 1, -1, -1):
|
|
492
|
-
if i not in
|
|
492
|
+
if i not in {0, len(idx_list) - 1}:
|
|
493
493
|
idx = idx_list[i]
|
|
494
494
|
nidx = abs(idx[1] - idx[0])
|
|
495
495
|
s.append(segments[i][nidx:])
|
|
@@ -77,7 +77,7 @@ class YOLODataset(BaseDataset):
|
|
|
77
77
|
desc = f"{self.prefix}Scanning {path.parent / path.stem}..."
|
|
78
78
|
total = len(self.im_files)
|
|
79
79
|
nkpt, ndim = self.data.get("kpt_shape", (0, 0))
|
|
80
|
-
if self.use_keypoints and (nkpt <= 0 or ndim not in
|
|
80
|
+
if self.use_keypoints and (nkpt <= 0 or ndim not in {2, 3}):
|
|
81
81
|
raise ValueError(
|
|
82
82
|
"'kpt_shape' in data.yaml missing or incorrect. Should be a list with [number of "
|
|
83
83
|
"keypoints, number of dims (2 for x,y or 3 for x,y,visible)], i.e. 'kpt_shape: [17, 3]'"
|
|
@@ -142,7 +142,7 @@ class YOLODataset(BaseDataset):
|
|
|
142
142
|
|
|
143
143
|
# Display cache
|
|
144
144
|
nf, nm, ne, nc, n = cache.pop("results") # found, missing, empty, corrupt, total
|
|
145
|
-
if exists and LOCAL_RANK in
|
|
145
|
+
if exists and LOCAL_RANK in {-1, 0}:
|
|
146
146
|
d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt"
|
|
147
147
|
TQDM(None, desc=self.prefix + d, total=n, initial=n) # display results
|
|
148
148
|
if cache["msgs"]:
|
|
@@ -235,7 +235,7 @@ class YOLODataset(BaseDataset):
|
|
|
235
235
|
value = values[i]
|
|
236
236
|
if k == "img":
|
|
237
237
|
value = torch.stack(value, 0)
|
|
238
|
-
if k in
|
|
238
|
+
if k in {"masks", "keypoints", "bboxes", "cls", "segments", "obb"}:
|
|
239
239
|
value = torch.cat(value, 0)
|
|
240
240
|
new_batch[k] = value
|
|
241
241
|
new_batch["batch_idx"] = list(new_batch["batch_idx"])
|
|
@@ -334,7 +334,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder):
|
|
|
334
334
|
assert cache["version"] == DATASET_CACHE_VERSION # matches current version
|
|
335
335
|
assert cache["hash"] == get_hash([x[0] for x in self.samples]) # identical hash
|
|
336
336
|
nf, nc, n, samples = cache.pop("results") # found, missing, empty, corrupt, total
|
|
337
|
-
if LOCAL_RANK in
|
|
337
|
+
if LOCAL_RANK in {-1, 0}:
|
|
338
338
|
d = f"{desc} {nf} images, {nc} corrupt"
|
|
339
339
|
TQDM(None, desc=d, total=n, initial=n)
|
|
340
340
|
if cache["msgs"]:
|
|
@@ -15,7 +15,7 @@ import requests
|
|
|
15
15
|
import torch
|
|
16
16
|
from PIL import Image
|
|
17
17
|
|
|
18
|
-
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
|
|
18
|
+
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS, FORMATS_HELP_MSG
|
|
19
19
|
from ultralytics.utils import LOGGER, is_colab, is_kaggle, ops
|
|
20
20
|
from ultralytics.utils.checks import check_requirements
|
|
21
21
|
|
|
@@ -83,7 +83,7 @@ class LoadStreams:
|
|
|
83
83
|
for i, s in enumerate(sources): # index, source
|
|
84
84
|
# Start thread to read frames from video stream
|
|
85
85
|
st = f"{i + 1}/{n}: {s}... "
|
|
86
|
-
if urlparse(s).hostname in
|
|
86
|
+
if urlparse(s).hostname in {"www.youtube.com", "youtube.com", "youtu.be"}: # if source is YouTube video
|
|
87
87
|
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
|
|
88
88
|
s = get_best_youtube_url(s)
|
|
89
89
|
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
|
|
@@ -291,8 +291,14 @@ class LoadImagesAndVideos:
|
|
|
291
291
|
else:
|
|
292
292
|
raise FileNotFoundError(f"{p} does not exist")
|
|
293
293
|
|
|
294
|
-
|
|
295
|
-
videos = [
|
|
294
|
+
# Define files as images or videos
|
|
295
|
+
images, videos = [], []
|
|
296
|
+
for f in files:
|
|
297
|
+
suffix = f.split(".")[-1].lower() # Get file extension without the dot and lowercase
|
|
298
|
+
if suffix in IMG_FORMATS:
|
|
299
|
+
images.append(f)
|
|
300
|
+
elif suffix in VID_FORMATS:
|
|
301
|
+
videos.append(f)
|
|
296
302
|
ni, nv = len(images), len(videos)
|
|
297
303
|
|
|
298
304
|
self.files = images + videos
|
|
@@ -307,10 +313,7 @@ class LoadImagesAndVideos:
|
|
|
307
313
|
else:
|
|
308
314
|
self.cap = None
|
|
309
315
|
if self.nf == 0:
|
|
310
|
-
raise FileNotFoundError(
|
|
311
|
-
f"No images or videos found in {p}. "
|
|
312
|
-
f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}"
|
|
313
|
-
)
|
|
316
|
+
raise FileNotFoundError(f"No images or videos found in {p}. {FORMATS_HELP_MSG}")
|
|
314
317
|
|
|
315
318
|
def __iter__(self):
|
|
316
319
|
"""Returns an iterator object for VideoStream or ImageFolder."""
|
|
@@ -71,7 +71,7 @@ def load_yolo_dota(data_root, split="train"):
|
|
|
71
71
|
- train
|
|
72
72
|
- val
|
|
73
73
|
"""
|
|
74
|
-
assert split in
|
|
74
|
+
assert split in {"train", "val"}, f"Split must be 'train' or 'val', not {split}."
|
|
75
75
|
im_dir = Path(data_root) / "images" / split
|
|
76
76
|
assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
|
|
77
77
|
im_files = glob(str(Path(data_root) / "images" / split / "*"))
|
|
@@ -39,6 +39,7 @@ HELP_URL = "See https://docs.ultralytics.com/datasets/detect for dataset formatt
|
|
|
39
39
|
IMG_FORMATS = {"bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm"} # image suffixes
|
|
40
40
|
VID_FORMATS = {"asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv", "webm"} # video suffixes
|
|
41
41
|
PIN_MEMORY = str(os.getenv("PIN_MEMORY", True)).lower() == "true" # global pin_memory for dataloaders
|
|
42
|
+
FORMATS_HELP_MSG = f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}"
|
|
42
43
|
|
|
43
44
|
|
|
44
45
|
def img2label_paths(img_paths):
|
|
@@ -63,7 +64,7 @@ def exif_size(img: Image.Image):
|
|
|
63
64
|
exif = img.getexif()
|
|
64
65
|
if exif:
|
|
65
66
|
rotation = exif.get(274, None) # the EXIF key for the orientation tag is 274
|
|
66
|
-
if rotation in
|
|
67
|
+
if rotation in {6, 8}: # rotation 270 or 90
|
|
67
68
|
s = s[1], s[0]
|
|
68
69
|
return s
|
|
69
70
|
|
|
@@ -79,8 +80,8 @@ def verify_image(args):
|
|
|
79
80
|
shape = exif_size(im) # image size
|
|
80
81
|
shape = (shape[1], shape[0]) # hw
|
|
81
82
|
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
|
|
82
|
-
assert im.format.lower() in IMG_FORMATS, f"
|
|
83
|
-
if im.format.lower() in
|
|
83
|
+
assert im.format.lower() in IMG_FORMATS, f"Invalid image format {im.format}. {FORMATS_HELP_MSG}"
|
|
84
|
+
if im.format.lower() in {"jpg", "jpeg"}:
|
|
84
85
|
with open(im_file, "rb") as f:
|
|
85
86
|
f.seek(-2, 2)
|
|
86
87
|
if f.read() != b"\xff\xd9": # corrupt JPEG
|
|
@@ -105,8 +106,8 @@ def verify_image_label(args):
|
|
|
105
106
|
shape = exif_size(im) # image size
|
|
106
107
|
shape = (shape[1], shape[0]) # hw
|
|
107
108
|
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
|
|
108
|
-
assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}"
|
|
109
|
-
if im.format.lower() in
|
|
109
|
+
assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}. {FORMATS_HELP_MSG}"
|
|
110
|
+
if im.format.lower() in {"jpg", "jpeg"}:
|
|
110
111
|
with open(im_file, "rb") as f:
|
|
111
112
|
f.seek(-2, 2)
|
|
112
113
|
if f.read() != b"\xff\xd9": # corrupt JPEG
|
|
@@ -336,7 +337,7 @@ def check_det_dataset(dataset, autodownload=True):
|
|
|
336
337
|
else: # python script
|
|
337
338
|
exec(s, {"yaml": data})
|
|
338
339
|
dt = f"({round(time.time() - t, 1)}s)"
|
|
339
|
-
s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in
|
|
340
|
+
s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in {0, None} else f"failure {dt} ❌"
|
|
340
341
|
LOGGER.info(f"Dataset download {s}\n")
|
|
341
342
|
check_font("Arial.ttf" if is_ascii(data["names"]) else "Arial.Unicode.ttf") # download fonts
|
|
342
343
|
|
|
@@ -366,7 +367,7 @@ def check_cls_dataset(dataset, split=""):
|
|
|
366
367
|
# Download (optional if dataset=https://file.zip is passed directly)
|
|
367
368
|
if str(dataset).startswith(("http:/", "https:/")):
|
|
368
369
|
dataset = safe_download(dataset, dir=DATASETS_DIR, unzip=True, delete=False)
|
|
369
|
-
elif Path(dataset).suffix in
|
|
370
|
+
elif Path(dataset).suffix in {".zip", ".tar", ".gz"}:
|
|
370
371
|
file = check_file(dataset)
|
|
371
372
|
dataset = safe_download(file, dir=DATASETS_DIR, unzip=True, delete=False)
|
|
372
373
|
|
|
@@ -159,7 +159,7 @@ class Exporter:
|
|
|
159
159
|
_callbacks (dict, optional): Dictionary of callback functions. Defaults to None.
|
|
160
160
|
"""
|
|
161
161
|
self.args = get_cfg(cfg, overrides)
|
|
162
|
-
if self.args.format.lower() in
|
|
162
|
+
if self.args.format.lower() in {"coreml", "mlmodel"}: # fix attempt for protobuf<3.20.x errors
|
|
163
163
|
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" # must run before TensorBoard callback
|
|
164
164
|
|
|
165
165
|
self.callbacks = _callbacks or callbacks.get_default_callbacks()
|
|
@@ -171,9 +171,9 @@ class Exporter:
|
|
|
171
171
|
self.run_callbacks("on_export_start")
|
|
172
172
|
t = time.time()
|
|
173
173
|
fmt = self.args.format.lower() # to lowercase
|
|
174
|
-
if fmt in
|
|
174
|
+
if fmt in {"tensorrt", "trt"}: # 'engine' aliases
|
|
175
175
|
fmt = "engine"
|
|
176
|
-
if fmt in
|
|
176
|
+
if fmt in {"mlmodel", "mlpackage", "mlprogram", "apple", "ios", "coreml"}: # 'coreml' aliases
|
|
177
177
|
fmt = "coreml"
|
|
178
178
|
fmts = tuple(export_formats()["Argument"][1:]) # available export formats
|
|
179
179
|
flags = [x == fmt for x in fmts]
|
|
@@ -145,7 +145,7 @@ class Model(nn.Module):
|
|
|
145
145
|
return
|
|
146
146
|
|
|
147
147
|
# Load or create new YOLO model
|
|
148
|
-
if Path(model).suffix in
|
|
148
|
+
if Path(model).suffix in {".yaml", ".yml"}:
|
|
149
149
|
self._new(model, task=task, verbose=verbose)
|
|
150
150
|
else:
|
|
151
151
|
self._load(model, task=task)
|
|
@@ -666,7 +666,7 @@ class Model(nn.Module):
|
|
|
666
666
|
self.trainer.hub_session = self.session # attach optional HUB session
|
|
667
667
|
self.trainer.train()
|
|
668
668
|
# Update model and cfg after training
|
|
669
|
-
if RANK in
|
|
669
|
+
if RANK in {-1, 0}:
|
|
670
670
|
ckpt = self.trainer.best if self.trainer.best.exists() else self.trainer.last
|
|
671
671
|
self.model, _ = attempt_load_one_weight(ckpt)
|
|
672
672
|
self.overrides = self.model.args
|
|
@@ -470,7 +470,7 @@ class Boxes(BaseTensor):
|
|
|
470
470
|
if boxes.ndim == 1:
|
|
471
471
|
boxes = boxes[None, :]
|
|
472
472
|
n = boxes.shape[-1]
|
|
473
|
-
assert n in
|
|
473
|
+
assert n in {6, 7}, f"expected 6 or 7 values but got {n}" # xyxy, track_id, conf, cls
|
|
474
474
|
super().__init__(boxes, orig_shape)
|
|
475
475
|
self.is_track = n == 7
|
|
476
476
|
self.orig_shape = orig_shape
|
|
@@ -687,7 +687,7 @@ class OBB(BaseTensor):
|
|
|
687
687
|
if boxes.ndim == 1:
|
|
688
688
|
boxes = boxes[None, :]
|
|
689
689
|
n = boxes.shape[-1]
|
|
690
|
-
assert n in
|
|
690
|
+
assert n in {7, 8}, f"expected 7 or 8 values but got {n}" # xywh, rotation, track_id, conf, cls
|
|
691
691
|
super().__init__(boxes, orig_shape)
|
|
692
692
|
self.is_track = n == 8
|
|
693
693
|
self.orig_shape = orig_shape
|
|
@@ -107,7 +107,7 @@ class BaseTrainer:
|
|
|
107
107
|
self.save_dir = get_save_dir(self.args)
|
|
108
108
|
self.args.name = self.save_dir.name # update name for loggers
|
|
109
109
|
self.wdir = self.save_dir / "weights" # weights dir
|
|
110
|
-
if RANK in
|
|
110
|
+
if RANK in {-1, 0}:
|
|
111
111
|
self.wdir.mkdir(parents=True, exist_ok=True) # make dir
|
|
112
112
|
self.args.save_dir = str(self.save_dir)
|
|
113
113
|
yaml_save(self.save_dir / "args.yaml", vars(self.args)) # save run args
|
|
@@ -121,7 +121,7 @@ class BaseTrainer:
|
|
|
121
121
|
print_args(vars(self.args))
|
|
122
122
|
|
|
123
123
|
# Device
|
|
124
|
-
if self.device.type in
|
|
124
|
+
if self.device.type in {"cpu", "mps"}:
|
|
125
125
|
self.args.workers = 0 # faster CPU training as time dominated by inference, not dataloading
|
|
126
126
|
|
|
127
127
|
# Model and Dataset
|
|
@@ -144,7 +144,7 @@ class BaseTrainer:
|
|
|
144
144
|
|
|
145
145
|
# Callbacks
|
|
146
146
|
self.callbacks = _callbacks or callbacks.get_default_callbacks()
|
|
147
|
-
if RANK in
|
|
147
|
+
if RANK in {-1, 0}:
|
|
148
148
|
callbacks.add_integration_callbacks(self)
|
|
149
149
|
|
|
150
150
|
def add_callback(self, event: str, callback):
|
|
@@ -210,7 +210,7 @@ class BaseTrainer:
|
|
|
210
210
|
torch.cuda.set_device(RANK)
|
|
211
211
|
self.device = torch.device("cuda", RANK)
|
|
212
212
|
# LOGGER.info(f'DDP info: RANK {RANK}, WORLD_SIZE {world_size}, DEVICE {self.device}')
|
|
213
|
-
os.environ["
|
|
213
|
+
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" # set to enforce timeout
|
|
214
214
|
dist.init_process_group(
|
|
215
215
|
"nccl" if dist.is_nccl_available() else "gloo",
|
|
216
216
|
timeout=timedelta(seconds=10800), # 3 hours
|
|
@@ -251,7 +251,7 @@ class BaseTrainer:
|
|
|
251
251
|
|
|
252
252
|
# Check AMP
|
|
253
253
|
self.amp = torch.tensor(self.args.amp).to(self.device) # True or False
|
|
254
|
-
if self.amp and RANK in
|
|
254
|
+
if self.amp and RANK in {-1, 0}: # Single-GPU and DDP
|
|
255
255
|
callbacks_backup = callbacks.default_callbacks.copy() # backup callbacks as check_amp() resets them
|
|
256
256
|
self.amp = torch.tensor(check_amp(self.model), device=self.device)
|
|
257
257
|
callbacks.default_callbacks = callbacks_backup # restore callbacks
|
|
@@ -274,7 +274,7 @@ class BaseTrainer:
|
|
|
274
274
|
# Dataloaders
|
|
275
275
|
batch_size = self.batch_size // max(world_size, 1)
|
|
276
276
|
self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode="train")
|
|
277
|
-
if RANK in
|
|
277
|
+
if RANK in {-1, 0}:
|
|
278
278
|
# Note: When training DOTA dataset, double batch size could get OOM on images with >2000 objects.
|
|
279
279
|
self.test_loader = self.get_dataloader(
|
|
280
280
|
self.testset, batch_size=batch_size if self.args.task == "obb" else batch_size * 2, rank=-1, mode="val"
|
|
@@ -340,7 +340,7 @@ class BaseTrainer:
|
|
|
340
340
|
self._close_dataloader_mosaic()
|
|
341
341
|
self.train_loader.reset()
|
|
342
342
|
|
|
343
|
-
if RANK in
|
|
343
|
+
if RANK in {-1, 0}:
|
|
344
344
|
LOGGER.info(self.progress_string())
|
|
345
345
|
pbar = TQDM(enumerate(self.train_loader), total=nb)
|
|
346
346
|
self.tloss = None
|
|
@@ -392,7 +392,7 @@ class BaseTrainer:
|
|
|
392
392
|
mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
|
|
393
393
|
loss_len = self.tloss.shape[0] if len(self.tloss.shape) else 1
|
|
394
394
|
losses = self.tloss if loss_len > 1 else torch.unsqueeze(self.tloss, 0)
|
|
395
|
-
if RANK in
|
|
395
|
+
if RANK in {-1, 0}:
|
|
396
396
|
pbar.set_description(
|
|
397
397
|
("%11s" * 2 + "%11.4g" * (2 + loss_len))
|
|
398
398
|
% (f"{epoch + 1}/{self.epochs}", mem, *losses, batch["cls"].shape[0], batch["img"].shape[-1])
|
|
@@ -405,7 +405,7 @@ class BaseTrainer:
|
|
|
405
405
|
|
|
406
406
|
self.lr = {f"lr/pg{ir}": x["lr"] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers
|
|
407
407
|
self.run_callbacks("on_train_epoch_end")
|
|
408
|
-
if RANK in
|
|
408
|
+
if RANK in {-1, 0}:
|
|
409
409
|
final_epoch = epoch + 1 >= self.epochs
|
|
410
410
|
self.ema.update_attr(self.model, include=["yaml", "nc", "args", "names", "stride", "class_weights"])
|
|
411
411
|
|
|
@@ -447,7 +447,7 @@ class BaseTrainer:
|
|
|
447
447
|
break # must break all DDP ranks
|
|
448
448
|
epoch += 1
|
|
449
449
|
|
|
450
|
-
if RANK in
|
|
450
|
+
if RANK in {-1, 0}:
|
|
451
451
|
# Do final val with best.pt
|
|
452
452
|
LOGGER.info(
|
|
453
453
|
f"\n{epoch - self.start_epoch + 1} epochs completed in "
|
|
@@ -503,12 +503,12 @@ class BaseTrainer:
|
|
|
503
503
|
try:
|
|
504
504
|
if self.args.task == "classify":
|
|
505
505
|
data = check_cls_dataset(self.args.data)
|
|
506
|
-
elif self.args.data.split(".")[-1] in
|
|
506
|
+
elif self.args.data.split(".")[-1] in {"yaml", "yml"} or self.args.task in {
|
|
507
507
|
"detect",
|
|
508
508
|
"segment",
|
|
509
509
|
"pose",
|
|
510
510
|
"obb",
|
|
511
|
-
|
|
511
|
+
}:
|
|
512
512
|
data = check_det_dataset(self.args.data)
|
|
513
513
|
if "yaml_file" in data:
|
|
514
514
|
self.args.data = data["yaml_file"] # for validating 'yolo train data=url.zip' usage
|
|
@@ -740,7 +740,7 @@ class BaseTrainer:
|
|
|
740
740
|
else: # weight (with decay)
|
|
741
741
|
g[0].append(param)
|
|
742
742
|
|
|
743
|
-
if name in
|
|
743
|
+
if name in {"Adam", "Adamax", "AdamW", "NAdam", "RAdam"}:
|
|
744
744
|
optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
|
|
745
745
|
elif name == "RMSProp":
|
|
746
746
|
optimizer = optim.RMSprop(g[2], lr=lr, momentum=momentum)
|
|
@@ -139,14 +139,14 @@ class BaseValidator:
|
|
|
139
139
|
self.args.batch = 1 # export.py models default to batch-size 1
|
|
140
140
|
LOGGER.info(f"Forcing batch=1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models")
|
|
141
141
|
|
|
142
|
-
if str(self.args.data).split(".")[-1] in
|
|
142
|
+
if str(self.args.data).split(".")[-1] in {"yaml", "yml"}:
|
|
143
143
|
self.data = check_det_dataset(self.args.data)
|
|
144
144
|
elif self.args.task == "classify":
|
|
145
145
|
self.data = check_cls_dataset(self.args.data, split=self.args.split)
|
|
146
146
|
else:
|
|
147
147
|
raise FileNotFoundError(emojis(f"Dataset '{self.args.data}' for task={self.args.task} not found ❌"))
|
|
148
148
|
|
|
149
|
-
if self.device.type in
|
|
149
|
+
if self.device.type in {"cpu", "mps"}:
|
|
150
150
|
self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading
|
|
151
151
|
if not pt:
|
|
152
152
|
self.args.rect = False
|
|
@@ -198,7 +198,7 @@ class Events:
|
|
|
198
198
|
}
|
|
199
199
|
self.enabled = (
|
|
200
200
|
SETTINGS["sync"]
|
|
201
|
-
and RANK in
|
|
201
|
+
and RANK in {-1, 0}
|
|
202
202
|
and not TESTS_RUNNING
|
|
203
203
|
and ONLINE
|
|
204
204
|
and (is_pip_package() or get_git_origin_url() == "https://github.com/ultralytics/ultralytics.git")
|
|
@@ -24,7 +24,7 @@ class FastSAM(Model):
|
|
|
24
24
|
"""Call the __init__ method of the parent class (YOLO) with the updated default model."""
|
|
25
25
|
if str(model) == "FastSAM.pt":
|
|
26
26
|
model = "FastSAM-x.pt"
|
|
27
|
-
assert Path(model).suffix not in
|
|
27
|
+
assert Path(model).suffix not in {".yaml", ".yml"}, "FastSAM models only support pre-trained models."
|
|
28
28
|
super().__init__(model=model, task="segment")
|
|
29
29
|
|
|
30
30
|
@property
|
|
@@ -9,7 +9,7 @@ import numpy as np
|
|
|
9
9
|
import torch
|
|
10
10
|
from PIL import Image
|
|
11
11
|
|
|
12
|
-
from ultralytics.utils import TQDM
|
|
12
|
+
from ultralytics.utils import TQDM, checks
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
class FastSAMPrompt:
|
|
@@ -33,9 +33,7 @@ class FastSAMPrompt:
|
|
|
33
33
|
try:
|
|
34
34
|
import clip
|
|
35
35
|
except ImportError:
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
check_requirements("git+https://github.com/ultralytics/CLIP.git")
|
|
36
|
+
checks.check_requirements("git+https://github.com/ultralytics/CLIP.git")
|
|
39
37
|
import clip
|
|
40
38
|
self.clip = clip
|
|
41
39
|
|
|
@@ -115,7 +113,8 @@ class FastSAMPrompt:
|
|
|
115
113
|
points (list, optional): Points to be plotted. Defaults to None.
|
|
116
114
|
point_label (list, optional): Labels for the points. Defaults to None.
|
|
117
115
|
mask_random_color (bool, optional): Whether to use random color for masks. Defaults to True.
|
|
118
|
-
better_quality (bool, optional): Whether to apply morphological transformations for better mask quality.
|
|
116
|
+
better_quality (bool, optional): Whether to apply morphological transformations for better mask quality.
|
|
117
|
+
Defaults to True.
|
|
119
118
|
retina (bool, optional): Whether to use retina mask. Defaults to False.
|
|
120
119
|
with_contours (bool, optional): Whether to plot contours. Defaults to True.
|
|
121
120
|
"""
|
|
@@ -45,7 +45,7 @@ class NAS(Model):
|
|
|
45
45
|
|
|
46
46
|
def __init__(self, model="yolo_nas_s.pt") -> None:
|
|
47
47
|
"""Initializes the NAS model with the provided or default 'yolo_nas_s.pt' model."""
|
|
48
|
-
assert Path(model).suffix not in
|
|
48
|
+
assert Path(model).suffix not in {".yaml", ".yml"}, "YOLO-NAS models only support pre-trained models."
|
|
49
49
|
super().__init__(model, task="detect")
|
|
50
50
|
|
|
51
51
|
@smart_inference_mode()
|
|
@@ -41,7 +41,7 @@ class SAM(Model):
|
|
|
41
41
|
Raises:
|
|
42
42
|
NotImplementedError: If the model file extension is not .pt or .pth.
|
|
43
43
|
"""
|
|
44
|
-
if model and Path(model).suffix not in
|
|
44
|
+
if model and Path(model).suffix not in {".pt", ".pth"}:
|
|
45
45
|
raise NotImplementedError("SAM prediction requires pre-trained *.pt or *.pth model.")
|
|
46
46
|
super().__init__(model=model, task="segment")
|
|
47
47
|
|
|
@@ -112,7 +112,7 @@ class PatchMerging(nn.Module):
|
|
|
112
112
|
self.out_dim = out_dim
|
|
113
113
|
self.act = activation()
|
|
114
114
|
self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0)
|
|
115
|
-
stride_c = 1 if out_dim in
|
|
115
|
+
stride_c = 1 if out_dim in {320, 448, 576} else 2
|
|
116
116
|
self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim)
|
|
117
117
|
self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0)
|
|
118
118
|
|
|
@@ -68,7 +68,7 @@ class ClassificationTrainer(BaseTrainer):
|
|
|
68
68
|
self.model, ckpt = attempt_load_one_weight(model, device="cpu")
|
|
69
69
|
for p in self.model.parameters():
|
|
70
70
|
p.requires_grad = True # for training
|
|
71
|
-
elif model.split(".")[-1] in
|
|
71
|
+
elif model.split(".")[-1] in {"yaml", "yml"}:
|
|
72
72
|
self.model = self.get_model(cfg=model)
|
|
73
73
|
elif model in torchvision.models.__dict__:
|
|
74
74
|
self.model = torchvision.models.__dict__[model](weights="IMAGENET1K_V1" if self.args.pretrained else None)
|
|
@@ -44,7 +44,7 @@ class DetectionTrainer(BaseTrainer):
|
|
|
44
44
|
|
|
45
45
|
def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
|
|
46
46
|
"""Construct and return dataloader."""
|
|
47
|
-
assert mode in
|
|
47
|
+
assert mode in {"train", "val"}, f"Mode must be 'train' or 'val', not {mode}."
|
|
48
48
|
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
|
49
49
|
dataset = self.build_dataset(dataset_path, mode, batch_size)
|
|
50
50
|
shuffle = mode == "train"
|
|
@@ -1,31 +1,24 @@
|
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
2
|
|
|
3
|
+
import itertools
|
|
4
|
+
|
|
5
|
+
from ultralytics.data import build_yolo_dataset
|
|
3
6
|
from ultralytics.models import yolo
|
|
4
7
|
from ultralytics.nn.tasks import WorldModel
|
|
5
|
-
from ultralytics.utils import DEFAULT_CFG, RANK
|
|
6
|
-
from ultralytics.data import build_yolo_dataset
|
|
8
|
+
from ultralytics.utils import DEFAULT_CFG, RANK, checks
|
|
7
9
|
from ultralytics.utils.torch_utils import de_parallel
|
|
8
|
-
from ultralytics.utils.checks import check_requirements
|
|
9
|
-
import itertools
|
|
10
|
-
|
|
11
|
-
try:
|
|
12
|
-
import clip
|
|
13
|
-
except ImportError:
|
|
14
|
-
check_requirements("git+https://github.com/ultralytics/CLIP.git")
|
|
15
|
-
import clip
|
|
16
10
|
|
|
17
11
|
|
|
18
12
|
def on_pretrain_routine_end(trainer):
|
|
19
13
|
"""Callback."""
|
|
20
|
-
if RANK in
|
|
14
|
+
if RANK in {-1, 0}:
|
|
21
15
|
# NOTE: for evaluation
|
|
22
16
|
names = [name.split("/")[0] for name in list(trainer.test_loader.dataset.data["names"].values())]
|
|
23
17
|
de_parallel(trainer.ema.ema).set_classes(names, cache_clip_model=False)
|
|
24
18
|
device = next(trainer.model.parameters()).device
|
|
25
|
-
text_model, _ = clip.load("ViT-B/32", device=device)
|
|
26
|
-
for p in text_model.parameters():
|
|
19
|
+
trainer.text_model, _ = trainer.clip.load("ViT-B/32", device=device)
|
|
20
|
+
for p in trainer.text_model.parameters():
|
|
27
21
|
p.requires_grad_(False)
|
|
28
|
-
trainer.text_model = text_model
|
|
29
22
|
|
|
30
23
|
|
|
31
24
|
class WorldTrainer(yolo.detect.DetectionTrainer):
|
|
@@ -48,6 +41,14 @@ class WorldTrainer(yolo.detect.DetectionTrainer):
|
|
|
48
41
|
overrides = {}
|
|
49
42
|
super().__init__(cfg, overrides, _callbacks)
|
|
50
43
|
|
|
44
|
+
# Import and assign clip
|
|
45
|
+
try:
|
|
46
|
+
import clip
|
|
47
|
+
except ImportError:
|
|
48
|
+
checks.check_requirements("git+https://github.com/ultralytics/CLIP.git")
|
|
49
|
+
import clip
|
|
50
|
+
self.clip = clip
|
|
51
|
+
|
|
51
52
|
def get_model(self, cfg=None, weights=None, verbose=True):
|
|
52
53
|
"""Return WorldModel initialized with specified config and weights."""
|
|
53
54
|
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
|
|
@@ -84,7 +85,7 @@ class WorldTrainer(yolo.detect.DetectionTrainer):
|
|
|
84
85
|
|
|
85
86
|
# NOTE: add text features
|
|
86
87
|
texts = list(itertools.chain(*batch["texts"]))
|
|
87
|
-
text_token = clip.tokenize(texts).to(batch["img"].device)
|
|
88
|
+
text_token = self.clip.tokenize(texts).to(batch["img"].device)
|
|
88
89
|
txt_feats = self.text_model.encode_text(text_token).to(dtype=batch["img"].dtype) # torch.float32
|
|
89
90
|
txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
|
|
90
91
|
batch["txt_feats"] = txt_feats.reshape(len(batch["texts"]), -1, txt_feats.shape[-1])
|