ultralytics 8.3.222__tar.gz → 8.3.223__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ultralytics-8.3.222 → ultralytics-8.3.223}/PKG-INFO +2 -2
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/pyproject.toml +2 -2
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/__init__.py +1 -1
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/ImageNet.yaml +1 -1
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/lvis.yaml +5 -5
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/engine/exporter.py +40 -101
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/rtdetr/val.py +1 -1
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/autobackend.py +1 -1
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/modules/head.py +5 -30
 - ultralytics-8.3.223/ultralytics/utils/export/__init__.py +7 -0
 - ultralytics-8.3.222/ultralytics/utils/export/__init__.py → ultralytics-8.3.223/ultralytics/utils/export/engine.py +0 -2
 - ultralytics-8.3.223/ultralytics/utils/export/tensorflow.py +221 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/nms.py +4 -2
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics.egg-info/PKG-INFO +2 -2
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics.egg-info/SOURCES.txt +3 -1
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics.egg-info/requires.txt +1 -1
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/LICENSE +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/README.md +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/setup.cfg +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/tests/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/tests/conftest.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/tests/test_cli.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/tests/test_cuda.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/tests/test_engine.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/tests/test_exports.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/tests/test_integrations.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/tests/test_python.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/tests/test_solutions.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/assets/bus.jpg +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/assets/zidane.jpg +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/HomeObjects-3K.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/VOC.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/coco.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/coco128.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/coco8-grayscale.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/coco8-multispectral.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/coco8.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/construction-ppe.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/dog-pose.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/dota8-multispectral.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/dota8.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/hand-keypoints.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/medical-pills.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/signature.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/datasets/xView.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/default.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/11/yolo11-cls.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/11/yolo11-obb.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/11/yolo11-pose.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/11/yolo11-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/11/yolo11.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/11/yoloe-11-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/11/yoloe-11.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/12/yolo12-cls.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/12/yolo12-obb.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/12/yolo12-pose.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/12/yolo12-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/12/yolo12.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v10/yolov10b.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v10/yolov10l.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v10/yolov10m.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v10/yolov10n.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v10/yolov10s.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v10/yolov10x.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yoloe-v8.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v9/yolov9m.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v9/yolov9s.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/models/v9/yolov9t.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/trackers/botsort.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/annotator.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/augment.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/base.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/build.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/converter.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/dataset.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/loaders.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/scripts/download_weights.sh +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/scripts/get_coco.sh +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/scripts/get_coco128.sh +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/scripts/get_imagenet.sh +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/split.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/split_dota.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/data/utils.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/engine/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/engine/model.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/engine/predictor.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/engine/results.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/engine/trainer.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/engine/tuner.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/engine/validator.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/hub/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/hub/auth.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/hub/google/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/hub/session.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/hub/utils.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/fastsam/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/fastsam/model.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/fastsam/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/fastsam/utils.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/fastsam/val.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/nas/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/nas/model.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/nas/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/nas/val.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/rtdetr/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/rtdetr/model.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/rtdetr/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/rtdetr/train.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/amg.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/build.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/model.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/modules/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/modules/blocks.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/modules/decoders.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/modules/encoders.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/modules/memory_attention.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/modules/sam.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/modules/transformer.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/modules/utils.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/sam/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/utils/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/utils/loss.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/utils/ops.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/classify/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/classify/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/classify/train.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/classify/val.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/detect/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/detect/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/detect/train.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/detect/val.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/model.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/obb/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/obb/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/obb/train.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/obb/val.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/pose/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/pose/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/pose/train.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/pose/val.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/segment/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/segment/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/segment/train.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/segment/val.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/world/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/world/train.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/world/train_world.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/yoloe/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/yoloe/predict.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/yoloe/train.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/yoloe/train_seg.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/models/yolo/yoloe/val.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/modules/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/modules/activation.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/modules/block.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/modules/conv.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/modules/transformer.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/modules/utils.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/tasks.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/nn/text_model.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/py.typed +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/ai_gym.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/analytics.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/config.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/distance_calculation.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/heatmap.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/instance_segmentation.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/object_blurrer.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/object_counter.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/object_cropper.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/parking_management.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/queue_management.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/region_counter.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/security_alarm.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/similarity_search.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/solutions.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/speed_estimation.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/streamlit_inference.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/templates/similarity-search.html +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/trackzone.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/solutions/vision_eye.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/trackers/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/trackers/basetrack.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/trackers/bot_sort.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/trackers/byte_tracker.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/trackers/track.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/trackers/utils/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/trackers/utils/gmc.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/trackers/utils/kalman_filter.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/trackers/utils/matching.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/autobatch.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/autodevice.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/benchmarks.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/__init__.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/base.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/clearml.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/comet.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/dvc.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/hub.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/mlflow.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/neptune.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/platform.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/raytune.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/tensorboard.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/callbacks/wb.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/checks.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/cpu.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/dist.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/downloads.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/errors.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/events.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/export/imx.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/files.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/git.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/instance.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/logger.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/loss.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/metrics.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/ops.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/patches.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/plotting.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/tal.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/torch_utils.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/tqdm.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/triton.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics/utils/tuner.py +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics.egg-info/dependency_links.txt +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics.egg-info/entry_points.txt +0 -0
 - {ultralytics-8.3.222 → ultralytics-8.3.223}/ultralytics.egg-info/top_level.txt +0 -0
 
| 
         @@ -1,6 +1,6 @@ 
     | 
|
| 
       1 
1 
     | 
    
         
             
            Metadata-Version: 2.4
         
     | 
| 
       2 
2 
     | 
    
         
             
            Name: ultralytics
         
     | 
| 
       3 
     | 
    
         
            -
            Version: 8.3. 
     | 
| 
      
 3 
     | 
    
         
            +
            Version: 8.3.223
         
     | 
| 
       4 
4 
     | 
    
         
             
            Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
         
     | 
| 
       5 
5 
     | 
    
         
             
            Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
         
     | 
| 
       6 
6 
     | 
    
         
             
            Maintainer-email: Ultralytics <hello@ultralytics.com>
         
     | 
| 
         @@ -44,7 +44,7 @@ Requires-Dist: torch!=2.4.0,>=1.8.0; sys_platform == "win32" 
     | 
|
| 
       44 
44 
     | 
    
         
             
            Requires-Dist: torchvision>=0.9.0
         
     | 
| 
       45 
45 
     | 
    
         
             
            Requires-Dist: psutil
         
     | 
| 
       46 
46 
     | 
    
         
             
            Requires-Dist: polars
         
     | 
| 
       47 
     | 
    
         
            -
            Requires-Dist: ultralytics-thop>=2.0. 
     | 
| 
      
 47 
     | 
    
         
            +
            Requires-Dist: ultralytics-thop>=2.0.18
         
     | 
| 
       48 
48 
     | 
    
         
             
            Provides-Extra: dev
         
     | 
| 
       49 
49 
     | 
    
         
             
            Requires-Dist: ipython; extra == "dev"
         
     | 
| 
       50 
50 
     | 
    
         
             
            Requires-Dist: pytest; extra == "dev"
         
     | 
| 
         @@ -73,7 +73,7 @@ dependencies = [ 
     | 
|
| 
       73 
73 
     | 
    
         
             
                "torchvision>=0.9.0",
         
     | 
| 
       74 
74 
     | 
    
         
             
                "psutil", # system utilization
         
     | 
| 
       75 
75 
     | 
    
         
             
                "polars",
         
     | 
| 
       76 
     | 
    
         
            -
                "ultralytics-thop>=2.0. 
     | 
| 
      
 76 
     | 
    
         
            +
                "ultralytics-thop>=2.0.18", # FLOPs computation https://github.com/ultralytics/thop
         
     | 
| 
       77 
77 
     | 
    
         
             
            ]
         
     | 
| 
       78 
78 
     | 
    
         | 
| 
       79 
79 
     | 
    
         
             
            # Optional dependencies ------------------------------------------------------------------------------------------------
         
     | 
| 
         @@ -190,4 +190,4 @@ in-place = true 
     | 
|
| 
       190 
190 
     | 
    
         | 
| 
       191 
191 
     | 
    
         
             
            [tool.codespell]
         
     | 
| 
       192 
192 
     | 
    
         
             
            ignore-words-list = "grey,writeable,finalY,RepResNet,Idenfy,WIT,Smoot,EHR,ROUGE,ALS,iTerm,Carmel,FPR,Hach,Calle,ore,COO,MOT,crate,nd,ned,strack,dota,ane,segway,fo,gool,winn,commend,bloc,nam,afterall,skelton,goin"
         
     | 
| 
       193 
     | 
    
         
            -
            skip = " 
     | 
| 
      
 193 
     | 
    
         
            +
            skip = "*.pt,*.pth,*.torchscript,*.onnx,*.tflite,*.pb,*.bin,*.param,*.mlmodel,*.engine,*.npy,*.data*,*.csv,*pnnx*,*venv*,*translat*,*lock*,__pycache__*,*.ico,*.jpg,*.png,*.webp,*.avif,*.mp4,*.mov,/runs,/.git,./docs/??/*.md,./docs/mkdocs_??.yml"
         
     | 
| 
         @@ -35,7 +35,7 @@ names: 
     | 
|
| 
       35 
35 
     | 
    
         
             
              17: armband
         
     | 
| 
       36 
36 
     | 
    
         
             
              18: armchair
         
     | 
| 
       37 
37 
     | 
    
         
             
              19: armoire
         
     | 
| 
       38 
     | 
    
         
            -
              20: armor 
     | 
| 
      
 38 
     | 
    
         
            +
              20: armor
         
     | 
| 
       39 
39 
     | 
    
         
             
              21: artichoke
         
     | 
| 
       40 
40 
     | 
    
         
             
              22: trash can/garbage can/wastebin/dustbin/trash barrel/trash bin
         
     | 
| 
       41 
41 
     | 
    
         
             
              23: ashtray
         
     | 
| 
         @@ -245,7 +245,7 @@ names: 
     | 
|
| 
       245 
245 
     | 
    
         
             
              227: CD player
         
     | 
| 
       246 
246 
     | 
    
         
             
              228: celery
         
     | 
| 
       247 
247 
     | 
    
         
             
              229: cellular telephone/cellular phone/cellphone/mobile phone/smart phone
         
     | 
| 
       248 
     | 
    
         
            -
              230: chain mail/ring mail/chain armor/ 
     | 
| 
      
 248 
     | 
    
         
            +
              230: chain mail/ring mail/chain armor/ring armor
         
     | 
| 
       249 
249 
     | 
    
         
             
              231: chair
         
     | 
| 
       250 
250 
     | 
    
         
             
              232: chaise longue/chaise/daybed
         
     | 
| 
       251 
251 
     | 
    
         
             
              233: chalice
         
     | 
| 
         @@ -305,7 +305,7 @@ names: 
     | 
|
| 
       305 
305 
     | 
    
         
             
              287: coin
         
     | 
| 
       306 
306 
     | 
    
         
             
              288: colander/cullender
         
     | 
| 
       307 
307 
     | 
    
         
             
              289: coleslaw/slaw
         
     | 
| 
       308 
     | 
    
         
            -
              290: coloring material 
     | 
| 
      
 308 
     | 
    
         
            +
              290: coloring material
         
     | 
| 
       309 
309 
     | 
    
         
             
              291: combination lock
         
     | 
| 
       310 
310 
     | 
    
         
             
              292: pacifier/teething ring
         
     | 
| 
       311 
311 
     | 
    
         
             
              293: comic book
         
     | 
| 
         @@ -401,7 +401,7 @@ names: 
     | 
|
| 
       401 
401 
     | 
    
         
             
              383: domestic ass/donkey
         
     | 
| 
       402 
402 
     | 
    
         
             
              384: doorknob/doorhandle
         
     | 
| 
       403 
403 
     | 
    
         
             
              385: doormat/welcome mat
         
     | 
| 
       404 
     | 
    
         
            -
              386:  
     | 
| 
      
 404 
     | 
    
         
            +
              386: donut
         
     | 
| 
       405 
405 
     | 
    
         
             
              387: dove
         
     | 
| 
       406 
406 
     | 
    
         
             
              388: dragonfly
         
     | 
| 
       407 
407 
     | 
    
         
             
              389: drawer
         
     | 
| 
         @@ -1072,7 +1072,7 @@ names: 
     | 
|
| 
       1072 
1072 
     | 
    
         
             
              1054: tag
         
     | 
| 
       1073 
1073 
     | 
    
         
             
              1055: taillight/rear light
         
     | 
| 
       1074 
1074 
     | 
    
         
             
              1056: tambourine
         
     | 
| 
       1075 
     | 
    
         
            -
              1057: army tank/armored combat vehicle 
     | 
| 
      
 1075 
     | 
    
         
            +
              1057: army tank/armored combat vehicle
         
     | 
| 
       1076 
1076 
     | 
    
         
             
              1058: tank/tank storage vessel/storage tank
         
     | 
| 
       1077 
1077 
     | 
    
         
             
              1059: tank top/tank top clothing
         
     | 
| 
       1078 
1078 
     | 
    
         
             
              1060: tape/tape sticky cloth or paper
         
     | 
| 
         @@ -107,9 +107,17 @@ from ultralytics.utils.checks import ( 
     | 
|
| 
       107 
107 
     | 
    
         
             
                is_intel,
         
     | 
| 
       108 
108 
     | 
    
         
             
                is_sudo_available,
         
     | 
| 
       109 
109 
     | 
    
         
             
            )
         
     | 
| 
       110 
     | 
    
         
            -
            from ultralytics.utils.downloads import  
     | 
| 
       111 
     | 
    
         
            -
            from ultralytics.utils.export import  
     | 
| 
       112 
     | 
    
         
            -
             
     | 
| 
      
 110 
     | 
    
         
            +
            from ultralytics.utils.downloads import get_github_assets, safe_download
         
     | 
| 
      
 111 
     | 
    
         
            +
            from ultralytics.utils.export import (
         
     | 
| 
      
 112 
     | 
    
         
            +
                keras2pb,
         
     | 
| 
      
 113 
     | 
    
         
            +
                onnx2engine,
         
     | 
| 
      
 114 
     | 
    
         
            +
                onnx2saved_model,
         
     | 
| 
      
 115 
     | 
    
         
            +
                pb2tfjs,
         
     | 
| 
      
 116 
     | 
    
         
            +
                tflite2edgetpu,
         
     | 
| 
      
 117 
     | 
    
         
            +
                torch2imx,
         
     | 
| 
      
 118 
     | 
    
         
            +
                torch2onnx,
         
     | 
| 
      
 119 
     | 
    
         
            +
            )
         
     | 
| 
      
 120 
     | 
    
         
            +
            from ultralytics.utils.files import file_size
         
     | 
| 
       113 
121 
     | 
    
         
             
            from ultralytics.utils.metrics import batch_probiou
         
     | 
| 
       114 
122 
     | 
    
         
             
            from ultralytics.utils.nms import TorchNMS
         
     | 
| 
       115 
123 
     | 
    
         
             
            from ultralytics.utils.ops import Profile
         
     | 
| 
         @@ -206,15 +214,6 @@ def validate_args(format, passed_args, valid_args): 
     | 
|
| 
       206 
214 
     | 
    
         
             
                        assert arg in valid_args, f"ERROR ❌️ argument '{arg}' is not supported for format='{format}'"
         
     | 
| 
       207 
215 
     | 
    
         | 
| 
       208 
216 
     | 
    
         | 
| 
       209 
     | 
    
         
            -
            def gd_outputs(gd):
         
     | 
| 
       210 
     | 
    
         
            -
                """Return TensorFlow GraphDef model output node names."""
         
     | 
| 
       211 
     | 
    
         
            -
                name_list, input_list = [], []
         
     | 
| 
       212 
     | 
    
         
            -
                for node in gd.node:  # tensorflow.core.framework.node_def_pb2.NodeDef
         
     | 
| 
       213 
     | 
    
         
            -
                    name_list.append(node.name)
         
     | 
| 
       214 
     | 
    
         
            -
                    input_list.extend(node.input)
         
     | 
| 
       215 
     | 
    
         
            -
                return sorted(f"{x}:0" for x in list(set(name_list) - set(input_list)) if not x.startswith("NoOp"))
         
     | 
| 
       216 
     | 
    
         
            -
             
     | 
| 
       217 
     | 
    
         
            -
             
     | 
| 
       218 
217 
     | 
    
         
             
            def try_export(inner_func):
         
     | 
| 
       219 
218 
     | 
    
         
             
                """YOLO export decorator, i.e. @try_export."""
         
     | 
| 
       220 
219 
     | 
    
         
             
                inner_args = get_default_args(inner_func)
         
     | 
| 
         @@ -371,7 +370,7 @@ class Exporter: 
     | 
|
| 
       371 
370 
     | 
    
         
             
                            LOGGER.warning("IMX export requires nms=True, setting nms=True.")
         
     | 
| 
       372 
371 
     | 
    
         
             
                            self.args.nms = True
         
     | 
| 
       373 
372 
     | 
    
         
             
                        if model.task not in {"detect", "pose", "classify"}:
         
     | 
| 
       374 
     | 
    
         
            -
                            raise ValueError("IMX export only supported for detection  
     | 
| 
      
 373 
     | 
    
         
            +
                            raise ValueError("IMX export only supported for detection, pose estimation, and classification models.")
         
     | 
| 
       375 
374 
     | 
    
         
             
                    if not hasattr(model, "names"):
         
     | 
| 
       376 
375 
     | 
    
         
             
                        model.names = default_class_names()
         
     | 
| 
       377 
376 
     | 
    
         
             
                    model.names = check_class_names(model.names)
         
     | 
| 
         @@ -461,6 +460,10 @@ class Exporter: 
     | 
|
| 
       461 
460 
     | 
    
         
             
                        from ultralytics.utils.export.imx import FXModel
         
     | 
| 
       462 
461 
     | 
    
         | 
| 
       463 
462 
     | 
    
         
             
                        model = FXModel(model, self.imgsz)
         
     | 
| 
      
 463 
     | 
    
         
            +
                    if tflite or edgetpu:
         
     | 
| 
      
 464 
     | 
    
         
            +
                        from ultralytics.utils.export.tensorflow import tf_wrapper
         
     | 
| 
      
 465 
     | 
    
         
            +
             
     | 
| 
      
 466 
     | 
    
         
            +
                        model = tf_wrapper(model)
         
     | 
| 
       464 
467 
     | 
    
         
             
                    for m in model.modules():
         
     | 
| 
       465 
468 
     | 
    
         
             
                        if isinstance(m, Classify):
         
     | 
| 
       466 
469 
     | 
    
         
             
                            m.export = True
         
     | 
| 
         @@ -642,7 +645,7 @@ class Exporter: 
     | 
|
| 
       642 
645 
     | 
    
         
             
                        assert TORCH_1_13, f"'nms=True' ONNX export requires torch>=1.13 (found torch=={TORCH_VERSION})"
         
     | 
| 
       643 
646 
     | 
    
         | 
| 
       644 
647 
     | 
    
         
             
                    f = str(self.file.with_suffix(".onnx"))
         
     | 
| 
       645 
     | 
    
         
            -
                    output_names = ["output0", "output1"] if  
     | 
| 
      
 648 
     | 
    
         
            +
                    output_names = ["output0", "output1"] if self.model.task == "segment" else ["output0"]
         
     | 
| 
       646 
649 
     | 
    
         
             
                    dynamic = self.args.dynamic
         
     | 
| 
       647 
650 
     | 
    
         
             
                    if dynamic:
         
     | 
| 
       648 
651 
     | 
    
         
             
                        dynamic = {"images": {0: "batch", 2: "height", 3: "width"}}  # shape(1,3,640,640)
         
     | 
| 
         @@ -1053,75 +1056,43 @@ class Exporter: 
     | 
|
| 
       1053 
1056 
     | 
    
         
             
                    if f.is_dir():
         
     | 
| 
       1054 
1057 
     | 
    
         
             
                        shutil.rmtree(f)  # delete output folder
         
     | 
| 
       1055 
1058 
     | 
    
         | 
| 
       1056 
     | 
    
         
            -
                    #  
     | 
| 
       1057 
     | 
    
         
            -
                     
     | 
| 
       1058 
     | 
    
         
            -
                    if  
     | 
| 
       1059 
     | 
    
         
            -
                         
     | 
| 
      
 1059 
     | 
    
         
            +
                    # Export to TF
         
     | 
| 
      
 1060 
     | 
    
         
            +
                    images = None
         
     | 
| 
      
 1061 
     | 
    
         
            +
                    if self.args.int8 and self.args.data:
         
     | 
| 
      
 1062 
     | 
    
         
            +
                        images = [batch["img"] for batch in self.get_int8_calibration_dataloader(prefix)]
         
     | 
| 
      
 1063 
     | 
    
         
            +
                        images = (
         
     | 
| 
      
 1064 
     | 
    
         
            +
                            torch.nn.functional.interpolate(torch.cat(images, 0).float(), size=self.imgsz)
         
     | 
| 
      
 1065 
     | 
    
         
            +
                            .permute(0, 2, 3, 1)
         
     | 
| 
      
 1066 
     | 
    
         
            +
                            .numpy()
         
     | 
| 
      
 1067 
     | 
    
         
            +
                            .astype(np.float32)
         
     | 
| 
      
 1068 
     | 
    
         
            +
                        )
         
     | 
| 
       1060 
1069 
     | 
    
         | 
| 
       1061 
1070 
     | 
    
         
             
                    # Export to ONNX
         
     | 
| 
       1062 
1071 
     | 
    
         
             
                    if isinstance(self.model.model[-1], RTDETRDecoder):
         
     | 
| 
       1063 
1072 
     | 
    
         
             
                        self.args.opset = self.args.opset or 19
         
     | 
| 
       1064 
1073 
     | 
    
         
             
                        assert 16 <= self.args.opset <= 19, "RTDETR export requires opset>=16;<=19"
         
     | 
| 
       1065 
1074 
     | 
    
         
             
                    self.args.simplify = True
         
     | 
| 
       1066 
     | 
    
         
            -
                    f_onnx = self.export_onnx()
         
     | 
| 
       1067 
     | 
    
         
            -
             
     | 
| 
       1068 
     | 
    
         
            -
             
     | 
| 
       1069 
     | 
    
         
            -
             
     | 
| 
       1070 
     | 
    
         
            -
             
     | 
| 
       1071 
     | 
    
         
            -
                         
     | 
| 
       1072 
     | 
    
         
            -
                         
     | 
| 
       1073 
     | 
    
         
            -
             
     | 
| 
       1074 
     | 
    
         
            -
                            images = [batch["img"] for batch in self.get_int8_calibration_dataloader(prefix)]
         
     | 
| 
       1075 
     | 
    
         
            -
                            images = torch.nn.functional.interpolate(torch.cat(images, 0).float(), size=self.imgsz).permute(
         
     | 
| 
       1076 
     | 
    
         
            -
                                0, 2, 3, 1
         
     | 
| 
       1077 
     | 
    
         
            -
                            )
         
     | 
| 
       1078 
     | 
    
         
            -
                            np.save(str(tmp_file), images.numpy().astype(np.float32))  # BHWC
         
     | 
| 
       1079 
     | 
    
         
            -
                            np_data = [["images", tmp_file, [[[[0, 0, 0]]]], [[[[255, 255, 255]]]]]]
         
     | 
| 
       1080 
     | 
    
         
            -
             
     | 
| 
       1081 
     | 
    
         
            -
                    import onnx2tf  # scoped for after ONNX export for reduced conflict during import
         
     | 
| 
       1082 
     | 
    
         
            -
             
     | 
| 
       1083 
     | 
    
         
            -
                    LOGGER.info(f"{prefix} starting TFLite export with onnx2tf {onnx2tf.__version__}...")
         
     | 
| 
       1084 
     | 
    
         
            -
                    keras_model = onnx2tf.convert(
         
     | 
| 
       1085 
     | 
    
         
            -
                        input_onnx_file_path=f_onnx,
         
     | 
| 
       1086 
     | 
    
         
            -
                        output_folder_path=str(f),
         
     | 
| 
       1087 
     | 
    
         
            -
                        not_use_onnxsim=True,
         
     | 
| 
       1088 
     | 
    
         
            -
                        verbosity="error",  # note INT8-FP16 activation bug https://github.com/ultralytics/ultralytics/issues/15873
         
     | 
| 
       1089 
     | 
    
         
            -
                        output_integer_quantized_tflite=self.args.int8,
         
     | 
| 
       1090 
     | 
    
         
            -
                        custom_input_op_name_np_data_path=np_data,
         
     | 
| 
       1091 
     | 
    
         
            -
                        enable_batchmatmul_unfold=True and not self.args.int8,  # fix lower no. of detected objects on GPU delegate
         
     | 
| 
       1092 
     | 
    
         
            -
                        output_signaturedefs=True,  # fix error with Attention block group convolution
         
     | 
| 
       1093 
     | 
    
         
            -
                        disable_group_convolution=self.args.format in {"tfjs", "edgetpu"},  # fix error with group convolution
         
     | 
| 
      
 1075 
     | 
    
         
            +
                    f_onnx = self.export_onnx()  # ensure ONNX is available
         
     | 
| 
      
 1076 
     | 
    
         
            +
                    keras_model = onnx2saved_model(
         
     | 
| 
      
 1077 
     | 
    
         
            +
                        f_onnx,
         
     | 
| 
      
 1078 
     | 
    
         
            +
                        f,
         
     | 
| 
      
 1079 
     | 
    
         
            +
                        int8=self.args.int8,
         
     | 
| 
      
 1080 
     | 
    
         
            +
                        images=images,
         
     | 
| 
      
 1081 
     | 
    
         
            +
                        disable_group_convolution=self.args.format in {"tfjs", "edgetpu"},
         
     | 
| 
      
 1082 
     | 
    
         
            +
                        prefix=prefix,
         
     | 
| 
       1094 
1083 
     | 
    
         
             
                    )
         
     | 
| 
       1095 
1084 
     | 
    
         
             
                    YAML.save(f / "metadata.yaml", self.metadata)  # add metadata.yaml
         
     | 
| 
       1096 
     | 
    
         
            -
             
     | 
| 
       1097 
     | 
    
         
            -
                    # Remove/rename TFLite models
         
     | 
| 
       1098 
     | 
    
         
            -
                    if self.args.int8:
         
     | 
| 
       1099 
     | 
    
         
            -
                        tmp_file.unlink(missing_ok=True)
         
     | 
| 
       1100 
     | 
    
         
            -
                        for file in f.rglob("*_dynamic_range_quant.tflite"):
         
     | 
| 
       1101 
     | 
    
         
            -
                            file.rename(file.with_name(file.stem.replace("_dynamic_range_quant", "_int8") + file.suffix))
         
     | 
| 
       1102 
     | 
    
         
            -
                        for file in f.rglob("*_integer_quant_with_int16_act.tflite"):
         
     | 
| 
       1103 
     | 
    
         
            -
                            file.unlink()  # delete extra fp16 activation TFLite files
         
     | 
| 
       1104 
     | 
    
         
            -
             
     | 
| 
       1105 
1085 
     | 
    
         
             
                    # Add TFLite metadata
         
     | 
| 
       1106 
1086 
     | 
    
         
             
                    for file in f.rglob("*.tflite"):
         
     | 
| 
       1107 
     | 
    
         
            -
                         
     | 
| 
      
 1087 
     | 
    
         
            +
                        file.unlink() if "quant_with_int16_act.tflite" in str(file) else self._add_tflite_metadata(file)
         
     | 
| 
       1108 
1088 
     | 
    
         | 
| 
       1109 
1089 
     | 
    
         
             
                    return str(f), keras_model  # or keras_model = tf.saved_model.load(f, tags=None, options=None)
         
     | 
| 
       1110 
1090 
     | 
    
         | 
| 
       1111 
1091 
     | 
    
         
             
                @try_export
         
     | 
| 
       1112 
1092 
     | 
    
         
             
                def export_pb(self, keras_model, prefix=colorstr("TensorFlow GraphDef:")):
         
     | 
| 
       1113 
1093 
     | 
    
         
             
                    """Export YOLO model to TensorFlow GraphDef *.pb format https://github.com/leimao/Frozen-Graph-TensorFlow."""
         
     | 
| 
       1114 
     | 
    
         
            -
                    import tensorflow as tf
         
     | 
| 
       1115 
     | 
    
         
            -
                    from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
         
     | 
| 
       1116 
     | 
    
         
            -
             
     | 
| 
       1117 
     | 
    
         
            -
                    LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
         
     | 
| 
       1118 
1094 
     | 
    
         
             
                    f = self.file.with_suffix(".pb")
         
     | 
| 
       1119 
     | 
    
         
            -
             
     | 
| 
       1120 
     | 
    
         
            -
                    m = tf.function(lambda x: keras_model(x))  # full model
         
     | 
| 
       1121 
     | 
    
         
            -
                    m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
         
     | 
| 
       1122 
     | 
    
         
            -
                    frozen_func = convert_variables_to_constants_v2(m)
         
     | 
| 
       1123 
     | 
    
         
            -
                    frozen_func.graph.as_graph_def()
         
     | 
| 
       1124 
     | 
    
         
            -
                    tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
         
     | 
| 
      
 1095 
     | 
    
         
            +
                    keras2pb(keras_model, f, prefix)
         
     | 
| 
       1125 
1096 
     | 
    
         
             
                    return f
         
     | 
| 
       1126 
1097 
     | 
    
         | 
| 
       1127 
1098 
     | 
    
         
             
                @try_export
         
     | 
| 
         @@ -1189,22 +1160,11 @@ class Exporter: 
     | 
|
| 
       1189 
1160 
     | 
    
         
             
                            "sudo apt-get install edgetpu-compiler",
         
     | 
| 
       1190 
1161 
     | 
    
         
             
                        ):
         
     | 
| 
       1191 
1162 
     | 
    
         
             
                            subprocess.run(c if is_sudo_available() else c.replace("sudo ", ""), shell=True, check=True)
         
     | 
| 
       1192 
     | 
    
         
            -
                    ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().rsplit(maxsplit=1)[-1]
         
     | 
| 
       1193 
1163 
     | 
    
         | 
| 
      
 1164 
     | 
    
         
            +
                    ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().rsplit(maxsplit=1)[-1]
         
     | 
| 
       1194 
1165 
     | 
    
         
             
                    LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
         
     | 
| 
      
 1166 
     | 
    
         
            +
                    tflite2edgetpu(tflite_file=tflite_model, output_dir=tflite_model.parent, prefix=prefix)
         
     | 
| 
       1195 
1167 
     | 
    
         
             
                    f = str(tflite_model).replace(".tflite", "_edgetpu.tflite")  # Edge TPU model
         
     | 
| 
       1196 
     | 
    
         
            -
             
     | 
| 
       1197 
     | 
    
         
            -
                    cmd = (
         
     | 
| 
       1198 
     | 
    
         
            -
                        "edgetpu_compiler "
         
     | 
| 
       1199 
     | 
    
         
            -
                        f'--out_dir "{Path(f).parent}" '
         
     | 
| 
       1200 
     | 
    
         
            -
                        "--show_operations "
         
     | 
| 
       1201 
     | 
    
         
            -
                        "--search_delegate "
         
     | 
| 
       1202 
     | 
    
         
            -
                        "--delegate_search_step 30 "
         
     | 
| 
       1203 
     | 
    
         
            -
                        "--timeout_sec 180 "
         
     | 
| 
       1204 
     | 
    
         
            -
                        f'"{tflite_model}"'
         
     | 
| 
       1205 
     | 
    
         
            -
                    )
         
     | 
| 
       1206 
     | 
    
         
            -
                    LOGGER.info(f"{prefix} running '{cmd}'")
         
     | 
| 
       1207 
     | 
    
         
            -
                    subprocess.run(cmd, shell=True)
         
     | 
| 
       1208 
1168 
     | 
    
         
             
                    self._add_tflite_metadata(f)
         
     | 
| 
       1209 
1169 
     | 
    
         
             
                    return f
         
     | 
| 
       1210 
1170 
     | 
    
         | 
| 
         @@ -1212,31 +1172,10 @@ class Exporter: 
     | 
|
| 
       1212 
1172 
     | 
    
         
             
                def export_tfjs(self, prefix=colorstr("TensorFlow.js:")):
         
     | 
| 
       1213 
1173 
     | 
    
         
             
                    """Export YOLO model to TensorFlow.js format."""
         
     | 
| 
       1214 
1174 
     | 
    
         
             
                    check_requirements("tensorflowjs")
         
     | 
| 
       1215 
     | 
    
         
            -
                    import tensorflow as tf
         
     | 
| 
       1216 
     | 
    
         
            -
                    import tensorflowjs as tfjs
         
     | 
| 
       1217 
1175 
     | 
    
         | 
| 
       1218 
     | 
    
         
            -
                    LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...")
         
     | 
| 
       1219 
1176 
     | 
    
         
             
                    f = str(self.file).replace(self.file.suffix, "_web_model")  # js dir
         
     | 
| 
       1220 
1177 
     | 
    
         
             
                    f_pb = str(self.file.with_suffix(".pb"))  # *.pb path
         
     | 
| 
       1221 
     | 
    
         
            -
             
     | 
| 
       1222 
     | 
    
         
            -
                    gd = tf.Graph().as_graph_def()  # TF GraphDef
         
     | 
| 
       1223 
     | 
    
         
            -
                    with open(f_pb, "rb") as file:
         
     | 
| 
       1224 
     | 
    
         
            -
                        gd.ParseFromString(file.read())
         
     | 
| 
       1225 
     | 
    
         
            -
                    outputs = ",".join(gd_outputs(gd))
         
     | 
| 
       1226 
     | 
    
         
            -
                    LOGGER.info(f"\n{prefix} output node names: {outputs}")
         
     | 
| 
       1227 
     | 
    
         
            -
             
     | 
| 
       1228 
     | 
    
         
            -
                    quantization = "--quantize_float16" if self.args.half else "--quantize_uint8" if self.args.int8 else ""
         
     | 
| 
       1229 
     | 
    
         
            -
                    with spaces_in_path(f_pb) as fpb_, spaces_in_path(f) as f_:  # exporter can not handle spaces in path
         
     | 
| 
       1230 
     | 
    
         
            -
                        cmd = (
         
     | 
| 
       1231 
     | 
    
         
            -
                            "tensorflowjs_converter "
         
     | 
| 
       1232 
     | 
    
         
            -
                            f'--input_format=tf_frozen_model {quantization} --output_node_names={outputs} "{fpb_}" "{f_}"'
         
     | 
| 
       1233 
     | 
    
         
            -
                        )
         
     | 
| 
       1234 
     | 
    
         
            -
                        LOGGER.info(f"{prefix} running '{cmd}'")
         
     | 
| 
       1235 
     | 
    
         
            -
                        subprocess.run(cmd, shell=True)
         
     | 
| 
       1236 
     | 
    
         
            -
             
     | 
| 
       1237 
     | 
    
         
            -
                    if " " in f:
         
     | 
| 
       1238 
     | 
    
         
            -
                        LOGGER.warning(f"{prefix} your model may not work correctly with spaces in path '{f}'.")
         
     | 
| 
       1239 
     | 
    
         
            -
             
     | 
| 
      
 1178 
     | 
    
         
            +
                    pb2tfjs(pb_file=f_pb, output_dir=f, half=self.args.half, int8=self.args.int8, prefix=prefix)
         
     | 
| 
       1240 
1179 
     | 
    
         
             
                    # Add metadata
         
     | 
| 
       1241 
1180 
     | 
    
         
             
                    YAML.save(Path(f) / "metadata.yaml", self.metadata)  # add metadata.yaml
         
     | 
| 
       1242 
1181 
     | 
    
         
             
                    return f
         
     | 
| 
         @@ -89,7 +89,7 @@ class RTDETRDataset(YOLODataset): 
     | 
|
| 
       89 
89 
     | 
    
         
             
                        transforms = v8_transforms(self, self.imgsz, hyp, stretch=True)
         
     | 
| 
       90 
90 
     | 
    
         
             
                    else:
         
     | 
| 
       91 
91 
     | 
    
         
             
                        # transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), auto=False, scale_fill=True)])
         
     | 
| 
       92 
     | 
    
         
            -
                        transforms = Compose([])
         
     | 
| 
      
 92 
     | 
    
         
            +
                        transforms = Compose([lambda x: {**x, **{"ratio_pad": [x["ratio_pad"], [0, 0]]}}])
         
     | 
| 
       93 
93 
     | 
    
         
             
                    transforms.append(
         
     | 
| 
       94 
94 
     | 
    
         
             
                        Format(
         
     | 
| 
       95 
95 
     | 
    
         
             
                            bbox_format="xywh",
         
     | 
| 
         @@ -428,7 +428,7 @@ class AutoBackend(nn.Module): 
     | 
|
| 
       428 
428 
     | 
    
         
             
                        LOGGER.info(f"Loading {w} for TensorFlow GraphDef inference...")
         
     | 
| 
       429 
429 
     | 
    
         
             
                        import tensorflow as tf
         
     | 
| 
       430 
430 
     | 
    
         | 
| 
       431 
     | 
    
         
            -
                        from ultralytics. 
     | 
| 
      
 431 
     | 
    
         
            +
                        from ultralytics.utils.export.tensorflow import gd_outputs
         
     | 
| 
       432 
432 
     | 
    
         | 
| 
       433 
433 
     | 
    
         
             
                        def wrap_frozen_graph(gd, inputs, outputs):
         
     | 
| 
       434 
434 
     | 
    
         
             
                            """Wrap frozen graphs for deployment."""
         
     | 
| 
         @@ -166,22 +166,8 @@ class Detect(nn.Module): 
     | 
|
| 
       166 
166 
     | 
    
         
             
                        self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
         
     | 
| 
       167 
167 
     | 
    
         
             
                        self.shape = shape
         
     | 
| 
       168 
168 
     | 
    
         | 
| 
       169 
     | 
    
         
            -
                     
     | 
| 
       170 
     | 
    
         
            -
             
     | 
| 
       171 
     | 
    
         
            -
                        cls = x_cat[:, self.reg_max * 4 :]
         
     | 
| 
       172 
     | 
    
         
            -
                    else:
         
     | 
| 
       173 
     | 
    
         
            -
                        box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
         
     | 
| 
       174 
     | 
    
         
            -
             
     | 
| 
       175 
     | 
    
         
            -
                    if self.export and self.format in {"tflite", "edgetpu"}:
         
     | 
| 
       176 
     | 
    
         
            -
                        # Precompute normalization factor to increase numerical stability
         
     | 
| 
       177 
     | 
    
         
            -
                        # See https://github.com/ultralytics/ultralytics/issues/7371
         
     | 
| 
       178 
     | 
    
         
            -
                        grid_h = shape[2]
         
     | 
| 
       179 
     | 
    
         
            -
                        grid_w = shape[3]
         
     | 
| 
       180 
     | 
    
         
            -
                        grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
         
     | 
| 
       181 
     | 
    
         
            -
                        norm = self.strides / (self.stride[0] * grid_size)
         
     | 
| 
       182 
     | 
    
         
            -
                        dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
         
     | 
| 
       183 
     | 
    
         
            -
                    else:
         
     | 
| 
       184 
     | 
    
         
            -
                        dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
         
     | 
| 
      
 169 
     | 
    
         
            +
                    box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
         
     | 
| 
      
 170 
     | 
    
         
            +
                    dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
         
     | 
| 
       185 
171 
     | 
    
         
             
                    return torch.cat((dbox, cls.sigmoid()), 1)
         
     | 
| 
       186 
172 
     | 
    
         | 
| 
       187 
173 
     | 
    
         
             
                def bias_init(self):
         
     | 
| 
         @@ -391,20 +377,9 @@ class Pose(Detect): 
     | 
|
| 
       391 
377 
     | 
    
         
             
                    """Decode keypoints from predictions."""
         
     | 
| 
       392 
378 
     | 
    
         
             
                    ndim = self.kpt_shape[1]
         
     | 
| 
       393 
379 
     | 
    
         
             
                    if self.export:
         
     | 
| 
       394 
     | 
    
         
            -
                         
     | 
| 
       395 
     | 
    
         
            -
             
     | 
| 
       396 
     | 
    
         
            -
             
     | 
| 
       397 
     | 
    
         
            -
                        }:  # required for TFLite export to avoid 'PLACEHOLDER_FOR_GREATER_OP_CODES' bug
         
     | 
| 
       398 
     | 
    
         
            -
                            # Precompute normalization factor to increase numerical stability
         
     | 
| 
       399 
     | 
    
         
            -
                            y = kpts.view(bs, *self.kpt_shape, -1)
         
     | 
| 
       400 
     | 
    
         
            -
                            grid_h, grid_w = self.shape[2], self.shape[3]
         
     | 
| 
       401 
     | 
    
         
            -
                            grid_size = torch.tensor([grid_w, grid_h], device=y.device).reshape(1, 2, 1)
         
     | 
| 
       402 
     | 
    
         
            -
                            norm = self.strides / (self.stride[0] * grid_size)
         
     | 
| 
       403 
     | 
    
         
            -
                            a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * norm
         
     | 
| 
       404 
     | 
    
         
            -
                        else:
         
     | 
| 
       405 
     | 
    
         
            -
                            # NCNN fix
         
     | 
| 
       406 
     | 
    
         
            -
                            y = kpts.view(bs, *self.kpt_shape, -1)
         
     | 
| 
       407 
     | 
    
         
            -
                            a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * self.strides
         
     | 
| 
      
 380 
     | 
    
         
            +
                        # NCNN fix
         
     | 
| 
      
 381 
     | 
    
         
            +
                        y = kpts.view(bs, *self.kpt_shape, -1)
         
     | 
| 
      
 382 
     | 
    
         
            +
                        a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * self.strides
         
     | 
| 
       408 
383 
     | 
    
         
             
                        if ndim == 3:
         
     | 
| 
       409 
384 
     | 
    
         
             
                            a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2)
         
     | 
| 
       410 
385 
     | 
    
         
             
                        return a.view(bs, self.nk, -1)
         
     | 
| 
         @@ -0,0 +1,7 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
         
     | 
| 
      
 2 
     | 
    
         
            +
             
     | 
| 
      
 3 
     | 
    
         
            +
            from .engine import onnx2engine, torch2onnx
         
     | 
| 
      
 4 
     | 
    
         
            +
            from .imx import torch2imx
         
     | 
| 
      
 5 
     | 
    
         
            +
            from .tensorflow import keras2pb, onnx2saved_model, pb2tfjs, tflite2edgetpu
         
     | 
| 
      
 6 
     | 
    
         
            +
             
     | 
| 
      
 7 
     | 
    
         
            +
            __all__ = ["keras2pb", "onnx2engine", "onnx2saved_model", "pb2tfjs", "tflite2edgetpu", "torch2imx", "torch2onnx"]
         
     | 
| 
         @@ -0,0 +1,221 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
         
     | 
| 
      
 2 
     | 
    
         
            +
             
     | 
| 
      
 3 
     | 
    
         
            +
            from __future__ import annotations
         
     | 
| 
      
 4 
     | 
    
         
            +
             
     | 
| 
      
 5 
     | 
    
         
            +
            from pathlib import Path
         
     | 
| 
      
 6 
     | 
    
         
            +
             
     | 
| 
      
 7 
     | 
    
         
            +
            import numpy as np
         
     | 
| 
      
 8 
     | 
    
         
            +
            import torch
         
     | 
| 
      
 9 
     | 
    
         
            +
             
     | 
| 
      
 10 
     | 
    
         
            +
            from ultralytics.nn.modules import Detect, Pose
         
     | 
| 
      
 11 
     | 
    
         
            +
            from ultralytics.utils import LOGGER
         
     | 
| 
      
 12 
     | 
    
         
            +
            from ultralytics.utils.downloads import attempt_download_asset
         
     | 
| 
      
 13 
     | 
    
         
            +
            from ultralytics.utils.files import spaces_in_path
         
     | 
| 
      
 14 
     | 
    
         
            +
            from ultralytics.utils.tal import make_anchors
         
     | 
| 
      
 15 
     | 
    
         
            +
             
     | 
| 
      
 16 
     | 
    
         
            +
             
     | 
| 
      
 17 
     | 
    
         
            +
            def tf_wrapper(model: torch.nn.Module) -> torch.nn.Module:
         
     | 
| 
      
 18 
     | 
    
         
            +
                """A wrapper to add TensorFlow compatible inference methods to Detect and Pose layers."""
         
     | 
| 
      
 19 
     | 
    
         
            +
                for m in model.modules():
         
     | 
| 
      
 20 
     | 
    
         
            +
                    if not isinstance(m, Detect):
         
     | 
| 
      
 21 
     | 
    
         
            +
                        continue
         
     | 
| 
      
 22 
     | 
    
         
            +
                    import types
         
     | 
| 
      
 23 
     | 
    
         
            +
             
     | 
| 
      
 24 
     | 
    
         
            +
                    m._inference = types.MethodType(_tf_inference, m)
         
     | 
| 
      
 25 
     | 
    
         
            +
                    if type(m) is Pose:
         
     | 
| 
      
 26 
     | 
    
         
            +
                        m.kpts_decode = types.MethodType(tf_kpts_decode, m)
         
     | 
| 
      
 27 
     | 
    
         
            +
                return model
         
     | 
| 
      
 28 
     | 
    
         
            +
             
     | 
| 
      
 29 
     | 
    
         
            +
             
     | 
| 
      
 30 
     | 
    
         
            +
            def _tf_inference(self, x: list[torch.Tensor]) -> tuple[torch.Tensor]:
         
     | 
| 
      
 31 
     | 
    
         
            +
                """Decode boxes and cls scores for tf object detection."""
         
     | 
| 
      
 32 
     | 
    
         
            +
                shape = x[0].shape  # BCHW
         
     | 
| 
      
 33 
     | 
    
         
            +
                x_cat = torch.cat([xi.view(x[0].shape[0], self.no, -1) for xi in x], 2)
         
     | 
| 
      
 34 
     | 
    
         
            +
                box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
         
     | 
| 
      
 35 
     | 
    
         
            +
                if self.dynamic or self.shape != shape:
         
     | 
| 
      
 36 
     | 
    
         
            +
                    self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
         
     | 
| 
      
 37 
     | 
    
         
            +
                    self.shape = shape
         
     | 
| 
      
 38 
     | 
    
         
            +
                grid_h, grid_w = shape[2], shape[3]
         
     | 
| 
      
 39 
     | 
    
         
            +
                grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
         
     | 
| 
      
 40 
     | 
    
         
            +
                norm = self.strides / (self.stride[0] * grid_size)
         
     | 
| 
      
 41 
     | 
    
         
            +
                dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
         
     | 
| 
      
 42 
     | 
    
         
            +
                return torch.cat((dbox, cls.sigmoid()), 1)
         
     | 
| 
      
 43 
     | 
    
         
            +
             
     | 
| 
      
 44 
     | 
    
         
            +
             
     | 
| 
      
 45 
     | 
    
         
            +
            def tf_kpts_decode(self, bs: int, kpts: torch.Tensor) -> torch.Tensor:
         
     | 
| 
      
 46 
     | 
    
         
            +
                """Decode keypoints for tf pose estimation."""
         
     | 
| 
      
 47 
     | 
    
         
            +
                ndim = self.kpt_shape[1]
         
     | 
| 
      
 48 
     | 
    
         
            +
                # required for TFLite export to avoid 'PLACEHOLDER_FOR_GREATER_OP_CODES' bug
         
     | 
| 
      
 49 
     | 
    
         
            +
                # Precompute normalization factor to increase numerical stability
         
     | 
| 
      
 50 
     | 
    
         
            +
                y = kpts.view(bs, *self.kpt_shape, -1)
         
     | 
| 
      
 51 
     | 
    
         
            +
                grid_h, grid_w = self.shape[2], self.shape[3]
         
     | 
| 
      
 52 
     | 
    
         
            +
                grid_size = torch.tensor([grid_w, grid_h], device=y.device).reshape(1, 2, 1)
         
     | 
| 
      
 53 
     | 
    
         
            +
                norm = self.strides / (self.stride[0] * grid_size)
         
     | 
| 
      
 54 
     | 
    
         
            +
                a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * norm
         
     | 
| 
      
 55 
     | 
    
         
            +
                if ndim == 3:
         
     | 
| 
      
 56 
     | 
    
         
            +
                    a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2)
         
     | 
| 
      
 57 
     | 
    
         
            +
                return a.view(bs, self.nk, -1)
         
     | 
| 
      
 58 
     | 
    
         
            +
             
     | 
| 
      
 59 
     | 
    
         
            +
             
     | 
| 
      
 60 
     | 
    
         
            +
            def onnx2saved_model(
         
     | 
| 
      
 61 
     | 
    
         
            +
                onnx_file: str,
         
     | 
| 
      
 62 
     | 
    
         
            +
                output_dir: Path,
         
     | 
| 
      
 63 
     | 
    
         
            +
                int8: bool = False,
         
     | 
| 
      
 64 
     | 
    
         
            +
                images: np.ndarray = None,
         
     | 
| 
      
 65 
     | 
    
         
            +
                disable_group_convolution: bool = False,
         
     | 
| 
      
 66 
     | 
    
         
            +
                prefix="",
         
     | 
| 
      
 67 
     | 
    
         
            +
            ):
         
     | 
| 
      
 68 
     | 
    
         
            +
                """
         
     | 
| 
      
 69 
     | 
    
         
            +
                Convert a ONNX model to TensorFlow SavedModel format via ONNX.
         
     | 
| 
      
 70 
     | 
    
         
            +
             
     | 
| 
      
 71 
     | 
    
         
            +
                Args:
         
     | 
| 
      
 72 
     | 
    
         
            +
                    onnx_file (str): ONNX file path.
         
     | 
| 
      
 73 
     | 
    
         
            +
                    output_dir (Path): Output directory path for the SavedModel.
         
     | 
| 
      
 74 
     | 
    
         
            +
                    int8 (bool, optional): Enable INT8 quantization. Defaults to False.
         
     | 
| 
      
 75 
     | 
    
         
            +
                    images (np.ndarray, optional): Calibration images for INT8 quantization in BHWC format.
         
     | 
| 
      
 76 
     | 
    
         
            +
                    disable_group_convolution (bool, optional): Disable group convolution optimization. Defaults to False.
         
     | 
| 
      
 77 
     | 
    
         
            +
                    prefix (str, optional): Logging prefix. Defaults to "".
         
     | 
| 
      
 78 
     | 
    
         
            +
             
     | 
| 
      
 79 
     | 
    
         
            +
                Returns:
         
     | 
| 
      
 80 
     | 
    
         
            +
                    (keras.Model): Converted Keras model.
         
     | 
| 
      
 81 
     | 
    
         
            +
             
     | 
| 
      
 82 
     | 
    
         
            +
                Note:
         
     | 
| 
      
 83 
     | 
    
         
            +
                    Requires onnx2tf package. Downloads calibration data if INT8 quantization is enabled.
         
     | 
| 
      
 84 
     | 
    
         
            +
                    Removes temporary files and renames quantized models after conversion.
         
     | 
| 
      
 85 
     | 
    
         
            +
                """
         
     | 
| 
      
 86 
     | 
    
         
            +
                # Pre-download calibration file to fix https://github.com/PINTO0309/onnx2tf/issues/545
         
     | 
| 
      
 87 
     | 
    
         
            +
                onnx2tf_file = Path("calibration_image_sample_data_20x128x128x3_float32.npy")
         
     | 
| 
      
 88 
     | 
    
         
            +
                if not onnx2tf_file.exists():
         
     | 
| 
      
 89 
     | 
    
         
            +
                    attempt_download_asset(f"{onnx2tf_file}.zip", unzip=True, delete=True)
         
     | 
| 
      
 90 
     | 
    
         
            +
                np_data = None
         
     | 
| 
      
 91 
     | 
    
         
            +
                if int8:
         
     | 
| 
      
 92 
     | 
    
         
            +
                    tmp_file = output_dir / "tmp_tflite_int8_calibration_images.npy"  # int8 calibration images file
         
     | 
| 
      
 93 
     | 
    
         
            +
                    if images is not None:
         
     | 
| 
      
 94 
     | 
    
         
            +
                        output_dir.mkdir()
         
     | 
| 
      
 95 
     | 
    
         
            +
                        np.save(str(tmp_file), images)  # BHWC
         
     | 
| 
      
 96 
     | 
    
         
            +
                        np_data = [["images", tmp_file, [[[[0, 0, 0]]]], [[[[255, 255, 255]]]]]]
         
     | 
| 
      
 97 
     | 
    
         
            +
             
     | 
| 
      
 98 
     | 
    
         
            +
                import onnx2tf  # scoped for after ONNX export for reduced conflict during import
         
     | 
| 
      
 99 
     | 
    
         
            +
             
     | 
| 
      
 100 
     | 
    
         
            +
                LOGGER.info(f"{prefix} starting TFLite export with onnx2tf {onnx2tf.__version__}...")
         
     | 
| 
      
 101 
     | 
    
         
            +
                keras_model = onnx2tf.convert(
         
     | 
| 
      
 102 
     | 
    
         
            +
                    input_onnx_file_path=onnx_file,
         
     | 
| 
      
 103 
     | 
    
         
            +
                    output_folder_path=str(output_dir),
         
     | 
| 
      
 104 
     | 
    
         
            +
                    not_use_onnxsim=True,
         
     | 
| 
      
 105 
     | 
    
         
            +
                    verbosity="error",  # note INT8-FP16 activation bug https://github.com/ultralytics/ultralytics/issues/15873
         
     | 
| 
      
 106 
     | 
    
         
            +
                    output_integer_quantized_tflite=int8,
         
     | 
| 
      
 107 
     | 
    
         
            +
                    custom_input_op_name_np_data_path=np_data,
         
     | 
| 
      
 108 
     | 
    
         
            +
                    enable_batchmatmul_unfold=True and not int8,  # fix lower no. of detected objects on GPU delegate
         
     | 
| 
      
 109 
     | 
    
         
            +
                    output_signaturedefs=True,  # fix error with Attention block group convolution
         
     | 
| 
      
 110 
     | 
    
         
            +
                    disable_group_convolution=disable_group_convolution,  # fix error with group convolution
         
     | 
| 
      
 111 
     | 
    
         
            +
                )
         
     | 
| 
      
 112 
     | 
    
         
            +
             
     | 
| 
      
 113 
     | 
    
         
            +
                # Remove/rename TFLite models
         
     | 
| 
      
 114 
     | 
    
         
            +
                if int8:
         
     | 
| 
      
 115 
     | 
    
         
            +
                    tmp_file.unlink(missing_ok=True)
         
     | 
| 
      
 116 
     | 
    
         
            +
                    for file in output_dir.rglob("*_dynamic_range_quant.tflite"):
         
     | 
| 
      
 117 
     | 
    
         
            +
                        file.rename(file.with_name(file.stem.replace("_dynamic_range_quant", "_int8") + file.suffix))
         
     | 
| 
      
 118 
     | 
    
         
            +
                    for file in output_dir.rglob("*_integer_quant_with_int16_act.tflite"):
         
     | 
| 
      
 119 
     | 
    
         
            +
                        file.unlink()  # delete extra fp16 activation TFLite files
         
     | 
| 
      
 120 
     | 
    
         
            +
                return keras_model
         
     | 
| 
      
 121 
     | 
    
         
            +
             
     | 
| 
      
 122 
     | 
    
         
            +
             
     | 
| 
      
 123 
     | 
    
         
            +
            def keras2pb(keras_model, file: Path, prefix=""):
         
     | 
| 
      
 124 
     | 
    
         
            +
                """
         
     | 
| 
      
 125 
     | 
    
         
            +
                Convert a Keras model to TensorFlow GraphDef (.pb) format.
         
     | 
| 
      
 126 
     | 
    
         
            +
             
     | 
| 
      
 127 
     | 
    
         
            +
                Args:
         
     | 
| 
      
 128 
     | 
    
         
            +
                    keras_model(tf_keras): Keras model to convert to frozen graph format.
         
     | 
| 
      
 129 
     | 
    
         
            +
                    file (Path): Output file path (suffix will be changed to .pb).
         
     | 
| 
      
 130 
     | 
    
         
            +
                    prefix (str, optional): Logging prefix. Defaults to "".
         
     | 
| 
      
 131 
     | 
    
         
            +
             
     | 
| 
      
 132 
     | 
    
         
            +
                Note:
         
     | 
| 
      
 133 
     | 
    
         
            +
                    Creates a frozen graph by converting variables to constants for inference optimization.
         
     | 
| 
      
 134 
     | 
    
         
            +
                """
         
     | 
| 
      
 135 
     | 
    
         
            +
                import tensorflow as tf
         
     | 
| 
      
 136 
     | 
    
         
            +
                from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
         
     | 
| 
      
 137 
     | 
    
         
            +
             
     | 
| 
      
 138 
     | 
    
         
            +
                LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
         
     | 
| 
      
 139 
     | 
    
         
            +
                m = tf.function(lambda x: keras_model(x))  # full model
         
     | 
| 
      
 140 
     | 
    
         
            +
                m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
         
     | 
| 
      
 141 
     | 
    
         
            +
                frozen_func = convert_variables_to_constants_v2(m)
         
     | 
| 
      
 142 
     | 
    
         
            +
                frozen_func.graph.as_graph_def()
         
     | 
| 
      
 143 
     | 
    
         
            +
                tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(file.parent), name=file.name, as_text=False)
         
     | 
| 
      
 144 
     | 
    
         
            +
             
     | 
| 
      
 145 
     | 
    
         
            +
             
     | 
| 
      
 146 
     | 
    
         
            +
            def tflite2edgetpu(tflite_file: str | Path, output_dir: str | Path, prefix: str = ""):
         
     | 
| 
      
 147 
     | 
    
         
            +
                """
         
     | 
| 
      
 148 
     | 
    
         
            +
                Convert a TensorFlow Lite model to Edge TPU format using the Edge TPU compiler.
         
     | 
| 
      
 149 
     | 
    
         
            +
             
     | 
| 
      
 150 
     | 
    
         
            +
                Args:
         
     | 
| 
      
 151 
     | 
    
         
            +
                    tflite_file (str | Path): Path to the input TensorFlow Lite (.tflite) model file.
         
     | 
| 
      
 152 
     | 
    
         
            +
                    output_dir (str | Path): Output directory path for the compiled Edge TPU model.
         
     | 
| 
      
 153 
     | 
    
         
            +
                    prefix (str, optional): Logging prefix. Defaults to "".
         
     | 
| 
      
 154 
     | 
    
         
            +
             
     | 
| 
      
 155 
     | 
    
         
            +
                Note:
         
     | 
| 
      
 156 
     | 
    
         
            +
                    Requires the Edge TPU compiler to be installed. The function compiles the TFLite model
         
     | 
| 
      
 157 
     | 
    
         
            +
                    for optimal performance on Google's Edge TPU hardware accelerator.
         
     | 
| 
      
 158 
     | 
    
         
            +
                """
         
     | 
| 
      
 159 
     | 
    
         
            +
                import subprocess
         
     | 
| 
      
 160 
     | 
    
         
            +
             
     | 
| 
      
 161 
     | 
    
         
            +
                cmd = (
         
     | 
| 
      
 162 
     | 
    
         
            +
                    "edgetpu_compiler "
         
     | 
| 
      
 163 
     | 
    
         
            +
                    f'--out_dir "{output_dir}" '
         
     | 
| 
      
 164 
     | 
    
         
            +
                    "--show_operations "
         
     | 
| 
      
 165 
     | 
    
         
            +
                    "--search_delegate "
         
     | 
| 
      
 166 
     | 
    
         
            +
                    "--delegate_search_step 30 "
         
     | 
| 
      
 167 
     | 
    
         
            +
                    "--timeout_sec 180 "
         
     | 
| 
      
 168 
     | 
    
         
            +
                    f'"{tflite_file}"'
         
     | 
| 
      
 169 
     | 
    
         
            +
                )
         
     | 
| 
      
 170 
     | 
    
         
            +
                LOGGER.info(f"{prefix} running '{cmd}'")
         
     | 
| 
      
 171 
     | 
    
         
            +
                subprocess.run(cmd, shell=True)
         
     | 
| 
      
 172 
     | 
    
         
            +
             
     | 
| 
      
 173 
     | 
    
         
            +
             
     | 
| 
      
 174 
     | 
    
         
            +
            def pb2tfjs(pb_file: str, output_dir: str, half: bool = False, int8: bool = False, prefix: str = ""):
         
     | 
| 
      
 175 
     | 
    
         
            +
                """
         
     | 
| 
      
 176 
     | 
    
         
            +
                Convert a TensorFlow GraphDef (.pb) model to TensorFlow.js format.
         
     | 
| 
      
 177 
     | 
    
         
            +
             
     | 
| 
      
 178 
     | 
    
         
            +
                Args:
         
     | 
| 
      
 179 
     | 
    
         
            +
                    pb_file (str): Path to the input TensorFlow GraphDef (.pb) model file.
         
     | 
| 
      
 180 
     | 
    
         
            +
                    output_dir (str): Output directory path for the converted TensorFlow.js model.
         
     | 
| 
      
 181 
     | 
    
         
            +
                    half (bool, optional): Enable FP16 quantization. Defaults to False.
         
     | 
| 
      
 182 
     | 
    
         
            +
                    int8 (bool, optional): Enable INT8 quantization. Defaults to False.
         
     | 
| 
      
 183 
     | 
    
         
            +
                    prefix (str, optional): Logging prefix. Defaults to "".
         
     | 
| 
      
 184 
     | 
    
         
            +
             
     | 
| 
      
 185 
     | 
    
         
            +
                Note:
         
     | 
| 
      
 186 
     | 
    
         
            +
                    Requires tensorflowjs package. Uses tensorflowjs_converter command-line tool for conversion.
         
     | 
| 
      
 187 
     | 
    
         
            +
                    Handles spaces in file paths and warns if output directory contains spaces.
         
     | 
| 
      
 188 
     | 
    
         
            +
                """
         
     | 
| 
      
 189 
     | 
    
         
            +
                import subprocess
         
     | 
| 
      
 190 
     | 
    
         
            +
             
     | 
| 
      
 191 
     | 
    
         
            +
                import tensorflow as tf
         
     | 
| 
      
 192 
     | 
    
         
            +
                import tensorflowjs as tfjs
         
     | 
| 
      
 193 
     | 
    
         
            +
             
     | 
| 
      
 194 
     | 
    
         
            +
                LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...")
         
     | 
| 
      
 195 
     | 
    
         
            +
             
     | 
| 
      
 196 
     | 
    
         
            +
                gd = tf.Graph().as_graph_def()  # TF GraphDef
         
     | 
| 
      
 197 
     | 
    
         
            +
                with open(pb_file, "rb") as file:
         
     | 
| 
      
 198 
     | 
    
         
            +
                    gd.ParseFromString(file.read())
         
     | 
| 
      
 199 
     | 
    
         
            +
                outputs = ",".join(gd_outputs(gd))
         
     | 
| 
      
 200 
     | 
    
         
            +
                LOGGER.info(f"\n{prefix} output node names: {outputs}")
         
     | 
| 
      
 201 
     | 
    
         
            +
             
     | 
| 
      
 202 
     | 
    
         
            +
                quantization = "--quantize_float16" if half else "--quantize_uint8" if int8 else ""
         
     | 
| 
      
 203 
     | 
    
         
            +
                with spaces_in_path(pb_file) as fpb_, spaces_in_path(output_dir) as f_:  # exporter can not handle spaces in path
         
     | 
| 
      
 204 
     | 
    
         
            +
                    cmd = (
         
     | 
| 
      
 205 
     | 
    
         
            +
                        "tensorflowjs_converter "
         
     | 
| 
      
 206 
     | 
    
         
            +
                        f'--input_format=tf_frozen_model {quantization} --output_node_names={outputs} "{fpb_}" "{f_}"'
         
     | 
| 
      
 207 
     | 
    
         
            +
                    )
         
     | 
| 
      
 208 
     | 
    
         
            +
                    LOGGER.info(f"{prefix} running '{cmd}'")
         
     | 
| 
      
 209 
     | 
    
         
            +
                    subprocess.run(cmd, shell=True)
         
     | 
| 
      
 210 
     | 
    
         
            +
             
     | 
| 
      
 211 
     | 
    
         
            +
                if " " in output_dir:
         
     | 
| 
      
 212 
     | 
    
         
            +
                    LOGGER.warning(f"{prefix} your model may not work correctly with spaces in path '{output_dir}'.")
         
     | 
| 
      
 213 
     | 
    
         
            +
             
     | 
| 
      
 214 
     | 
    
         
            +
             
     | 
| 
      
 215 
     | 
    
         
            +
            def gd_outputs(gd):
         
     | 
| 
      
 216 
     | 
    
         
            +
                """Return TensorFlow GraphDef model output node names."""
         
     | 
| 
      
 217 
     | 
    
         
            +
                name_list, input_list = [], []
         
     | 
| 
      
 218 
     | 
    
         
            +
                for node in gd.node:  # tensorflow.core.framework.node_def_pb2.NodeDef
         
     | 
| 
      
 219 
     | 
    
         
            +
                    name_list.append(node.name)
         
     | 
| 
      
 220 
     | 
    
         
            +
                    input_list.extend(node.input)
         
     | 
| 
      
 221 
     | 
    
         
            +
                return sorted(f"{x}:0" for x in list(set(name_list) - set(input_list)) if not x.startswith("NoOp"))
         
     | 
| 
         @@ -231,9 +231,11 @@ class TorchNMS: 
     | 
|
| 
       231 
231 
     | 
    
         
             
                        upper_mask = row_idx < col_idx
         
     | 
| 
       232 
232 
     | 
    
         
             
                        ious = ious * upper_mask
         
     | 
| 
       233 
233 
     | 
    
         
             
                        # Zeroing these scores ensures the additional indices would not affect the final results
         
     | 
| 
       234 
     | 
    
         
            -
                        scores[ 
     | 
| 
      
 234 
     | 
    
         
            +
                        scores_ = scores[sorted_idx]
         
     | 
| 
      
 235 
     | 
    
         
            +
                        scores_[~((ious >= iou_threshold).sum(0) <= 0)] = 0
         
     | 
| 
      
 236 
     | 
    
         
            +
                        scores[sorted_idx] = scores_  # update original tensor for NMSModel
         
     | 
| 
       235 
237 
     | 
    
         
             
                        # NOTE: return indices with fixed length to avoid TFLite reshape error
         
     | 
| 
       236 
     | 
    
         
            -
                        pick = torch.topk( 
     | 
| 
      
 238 
     | 
    
         
            +
                        pick = torch.topk(scores_, scores_.shape[0]).indices
         
     | 
| 
       237 
239 
     | 
    
         
             
                    return sorted_idx[pick]
         
     | 
| 
       238 
240 
     | 
    
         | 
| 
       239 
241 
     | 
    
         
             
                @staticmethod
         
     |