ultralytics 8.3.11__tar.gz → 8.3.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (247) hide show
  1. {ultralytics-8.3.11/ultralytics.egg-info → ultralytics-8.3.13}/PKG-INFO +4 -5
  2. {ultralytics-8.3.11 → ultralytics-8.3.13}/pyproject.toml +3 -4
  3. {ultralytics-8.3.11 → ultralytics-8.3.13}/tests/test_cli.py +4 -1
  4. {ultralytics-8.3.11 → ultralytics-8.3.13}/tests/test_cuda.py +13 -1
  5. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/__init__.py +1 -3
  6. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/__init__.py +2 -35
  7. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/solutions/default.yaml +1 -0
  8. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/engine/exporter.py +9 -1
  9. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/predict.py +79 -50
  10. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/classify/train.py +1 -2
  11. ultralytics-8.3.13/ultralytics/solutions/analytics.py +194 -0
  12. ultralytics-8.3.13/ultralytics/solutions/distance_calculation.py +82 -0
  13. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/solutions/object_counter.py +2 -2
  14. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/metrics.py +1 -1
  15. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/plotting.py +14 -15
  16. {ultralytics-8.3.11 → ultralytics-8.3.13/ultralytics.egg-info}/PKG-INFO +4 -5
  17. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics.egg-info/SOURCES.txt +0 -5
  18. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics.egg-info/requires.txt +4 -5
  19. ultralytics-8.3.11/ultralytics/data/explorer/__init__.py +0 -5
  20. ultralytics-8.3.11/ultralytics/data/explorer/explorer.py +0 -460
  21. ultralytics-8.3.11/ultralytics/data/explorer/gui/dash.py +0 -269
  22. ultralytics-8.3.11/ultralytics/data/explorer/utils.py +0 -167
  23. ultralytics-8.3.11/ultralytics/solutions/analytics.py +0 -307
  24. ultralytics-8.3.11/ultralytics/solutions/distance_calculation.py +0 -139
  25. ultralytics-8.3.11/ultralytics/trackers/utils/__init__.py +0 -1
  26. {ultralytics-8.3.11 → ultralytics-8.3.13}/LICENSE +0 -0
  27. {ultralytics-8.3.11 → ultralytics-8.3.13}/README.md +0 -0
  28. {ultralytics-8.3.11 → ultralytics-8.3.13}/setup.cfg +0 -0
  29. {ultralytics-8.3.11 → ultralytics-8.3.13}/tests/__init__.py +0 -0
  30. {ultralytics-8.3.11 → ultralytics-8.3.13}/tests/conftest.py +0 -0
  31. {ultralytics-8.3.11 → ultralytics-8.3.13}/tests/test_engine.py +0 -0
  32. {ultralytics-8.3.11 → ultralytics-8.3.13}/tests/test_exports.py +0 -0
  33. {ultralytics-8.3.11 → ultralytics-8.3.13}/tests/test_integrations.py +0 -0
  34. {ultralytics-8.3.11 → ultralytics-8.3.13}/tests/test_python.py +0 -0
  35. {ultralytics-8.3.11 → ultralytics-8.3.13}/tests/test_solutions.py +0 -0
  36. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/assets/bus.jpg +0 -0
  37. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/assets/zidane.jpg +0 -0
  38. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
  39. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
  40. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
  41. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
  42. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
  43. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
  44. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
  45. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/VOC.yaml +0 -0
  46. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
  47. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
  48. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
  49. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
  50. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
  51. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco.yaml +0 -0
  52. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
  53. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco128.yaml +0 -0
  54. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
  55. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
  56. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/coco8.yaml +0 -0
  57. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
  58. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/dota8.yaml +0 -0
  59. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/hand-keypoints.yaml +0 -0
  60. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/lvis.yaml +0 -0
  61. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
  62. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
  63. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/signature.yaml +0 -0
  64. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
  65. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/datasets/xView.yaml +0 -0
  66. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/default.yaml +0 -0
  67. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11-cls.yaml +0 -0
  68. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11-obb.yaml +0 -0
  69. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11-pose.yaml +0 -0
  70. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11-seg.yaml +0 -0
  71. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/11/yolo11.yaml +0 -0
  72. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
  73. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
  74. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
  75. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
  76. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10b.yaml +0 -0
  77. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10l.yaml +0 -0
  78. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10m.yaml +0 -0
  79. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10n.yaml +0 -0
  80. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10s.yaml +0 -0
  81. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v10/yolov10x.yaml +0 -0
  82. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
  83. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
  84. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
  85. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
  86. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
  87. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
  88. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
  89. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
  90. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
  91. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
  92. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
  93. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
  94. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
  95. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
  96. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
  97. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
  98. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
  99. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
  100. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
  101. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
  102. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
  103. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
  104. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
  105. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
  106. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
  107. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
  108. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
  109. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9m.yaml +0 -0
  110. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9s.yaml +0 -0
  111. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/models/v9/yolov9t.yaml +0 -0
  112. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/trackers/botsort.yaml +0 -0
  113. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
  114. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/__init__.py +0 -0
  115. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/annotator.py +0 -0
  116. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/augment.py +0 -0
  117. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/base.py +0 -0
  118. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/build.py +0 -0
  119. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/converter.py +0 -0
  120. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/dataset.py +0 -0
  121. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/loaders.py +0 -0
  122. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/split_dota.py +0 -0
  123. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/data/utils.py +0 -0
  124. {ultralytics-8.3.11/ultralytics/data/explorer/gui → ultralytics-8.3.13/ultralytics/engine}/__init__.py +0 -0
  125. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/engine/model.py +0 -0
  126. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/engine/predictor.py +0 -0
  127. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/engine/results.py +0 -0
  128. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/engine/trainer.py +0 -0
  129. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/engine/tuner.py +0 -0
  130. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/engine/validator.py +0 -0
  131. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/hub/__init__.py +0 -0
  132. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/hub/auth.py +0 -0
  133. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/hub/google/__init__.py +0 -0
  134. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/hub/session.py +0 -0
  135. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/hub/utils.py +0 -0
  136. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/__init__.py +0 -0
  137. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/fastsam/__init__.py +0 -0
  138. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/fastsam/model.py +0 -0
  139. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/fastsam/predict.py +0 -0
  140. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/fastsam/utils.py +0 -0
  141. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/fastsam/val.py +0 -0
  142. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/nas/__init__.py +0 -0
  143. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/nas/model.py +0 -0
  144. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/nas/predict.py +0 -0
  145. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/nas/val.py +0 -0
  146. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/rtdetr/__init__.py +0 -0
  147. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/rtdetr/model.py +0 -0
  148. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/rtdetr/predict.py +0 -0
  149. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/rtdetr/train.py +0 -0
  150. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/rtdetr/val.py +0 -0
  151. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/__init__.py +0 -0
  152. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/amg.py +0 -0
  153. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/build.py +0 -0
  154. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/model.py +0 -0
  155. {ultralytics-8.3.11/ultralytics/engine → ultralytics-8.3.13/ultralytics/models/sam/modules}/__init__.py +0 -0
  156. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/modules/blocks.py +0 -0
  157. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/modules/decoders.py +0 -0
  158. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/modules/encoders.py +0 -0
  159. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/modules/memory_attention.py +0 -0
  160. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/modules/sam.py +0 -0
  161. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
  162. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/modules/transformer.py +0 -0
  163. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/sam/modules/utils.py +0 -0
  164. {ultralytics-8.3.11/ultralytics/models/sam/modules → ultralytics-8.3.13/ultralytics/models/utils}/__init__.py +0 -0
  165. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/utils/loss.py +0 -0
  166. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/utils/ops.py +0 -0
  167. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/__init__.py +0 -0
  168. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/classify/__init__.py +0 -0
  169. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/classify/predict.py +0 -0
  170. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/classify/val.py +0 -0
  171. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/detect/__init__.py +0 -0
  172. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/detect/predict.py +0 -0
  173. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/detect/train.py +0 -0
  174. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/detect/val.py +0 -0
  175. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/model.py +0 -0
  176. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/obb/__init__.py +0 -0
  177. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/obb/predict.py +0 -0
  178. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/obb/train.py +0 -0
  179. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/obb/val.py +0 -0
  180. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/pose/__init__.py +0 -0
  181. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/pose/predict.py +0 -0
  182. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/pose/train.py +0 -0
  183. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/pose/val.py +0 -0
  184. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/segment/__init__.py +0 -0
  185. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/segment/predict.py +0 -0
  186. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/segment/train.py +0 -0
  187. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/segment/val.py +0 -0
  188. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/world/__init__.py +0 -0
  189. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/world/train.py +0 -0
  190. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/models/yolo/world/train_world.py +0 -0
  191. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/__init__.py +0 -0
  192. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/autobackend.py +0 -0
  193. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/modules/__init__.py +0 -0
  194. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/modules/activation.py +0 -0
  195. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/modules/block.py +0 -0
  196. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/modules/conv.py +0 -0
  197. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/modules/head.py +0 -0
  198. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/modules/transformer.py +0 -0
  199. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/modules/utils.py +0 -0
  200. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/nn/tasks.py +0 -0
  201. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/solutions/__init__.py +0 -0
  202. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/solutions/ai_gym.py +0 -0
  203. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/solutions/heatmap.py +0 -0
  204. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/solutions/parking_management.py +0 -0
  205. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/solutions/queue_management.py +0 -0
  206. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/solutions/solutions.py +0 -0
  207. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/solutions/speed_estimation.py +0 -0
  208. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/solutions/streamlit_inference.py +0 -0
  209. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/trackers/__init__.py +0 -0
  210. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/trackers/basetrack.py +0 -0
  211. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/trackers/bot_sort.py +0 -0
  212. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/trackers/byte_tracker.py +0 -0
  213. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/trackers/track.py +0 -0
  214. {ultralytics-8.3.11/ultralytics/models → ultralytics-8.3.13/ultralytics/trackers}/utils/__init__.py +0 -0
  215. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/trackers/utils/gmc.py +0 -0
  216. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/trackers/utils/kalman_filter.py +0 -0
  217. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/trackers/utils/matching.py +0 -0
  218. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/__init__.py +0 -0
  219. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/autobatch.py +0 -0
  220. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/benchmarks.py +0 -0
  221. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/__init__.py +0 -0
  222. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/base.py +0 -0
  223. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/clearml.py +0 -0
  224. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/comet.py +0 -0
  225. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/dvc.py +0 -0
  226. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/hub.py +0 -0
  227. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/mlflow.py +0 -0
  228. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/neptune.py +0 -0
  229. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/raytune.py +0 -0
  230. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/tensorboard.py +0 -0
  231. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/callbacks/wb.py +0 -0
  232. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/checks.py +0 -0
  233. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/dist.py +0 -0
  234. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/downloads.py +0 -0
  235. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/errors.py +0 -0
  236. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/files.py +0 -0
  237. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/instance.py +0 -0
  238. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/loss.py +0 -0
  239. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/ops.py +0 -0
  240. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/patches.py +0 -0
  241. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/tal.py +0 -0
  242. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/torch_utils.py +0 -0
  243. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/triton.py +0 -0
  244. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics/utils/tuner.py +0 -0
  245. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics.egg-info/dependency_links.txt +0 -0
  246. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics.egg-info/entry_points.txt +0 -0
  247. {ultralytics-8.3.11 → ultralytics-8.3.13}/ultralytics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.3.11
3
+ Version: 8.3.13
4
4
  Summary: Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -72,10 +72,9 @@ Requires-Dist: keras; extra == "export"
72
72
  Requires-Dist: flatbuffers<100,>=23.5.26; platform_machine == "aarch64" and extra == "export"
73
73
  Requires-Dist: numpy==1.23.5; platform_machine == "aarch64" and extra == "export"
74
74
  Requires-Dist: h5py!=3.11.0; platform_machine == "aarch64" and extra == "export"
75
- Provides-Extra: explorer
76
- Requires-Dist: lancedb; extra == "explorer"
77
- Requires-Dist: duckdb<=0.9.2; extra == "explorer"
78
- Requires-Dist: streamlit; extra == "explorer"
75
+ Provides-Extra: solutions
76
+ Requires-Dist: shapely>=2.0.0; extra == "solutions"
77
+ Requires-Dist: streamlit; extra == "solutions"
79
78
  Provides-Extra: logging
80
79
  Requires-Dist: comet; extra == "logging"
81
80
  Requires-Dist: tensorboard>=2.13.0; extra == "logging"
@@ -107,10 +107,9 @@ export = [
107
107
  "numpy==1.23.5; platform_machine == 'aarch64'", # fix error: `np.bool` was a deprecated alias for the builtin `bool` when using TensorRT models on NVIDIA Jetson
108
108
  "h5py!=3.11.0; platform_machine == 'aarch64'", # fix h5py build issues due to missing aarch64 wheels in 3.11 release
109
109
  ]
110
- explorer = [
111
- "lancedb", # vector search
112
- "duckdb<=0.9.2", # SQL queries, duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181
113
- "streamlit", # visualizing with GUI
110
+ solutions = [
111
+ "shapely>=2.0.0", # shapely for point and polygon data matching
112
+ "streamlit", # for live inference on web browser i.e `yolo streamlit-predict`
114
113
  ]
115
114
  logging = [
116
115
  "comet", # https://docs.ultralytics.com/integrations/comet/
@@ -97,9 +97,12 @@ def test_mobilesam():
97
97
  # Source
98
98
  source = ASSETS / "zidane.jpg"
99
99
 
100
- # Predict a segment based on a point prompt
100
+ # Predict a segment based on a 1D point prompt and 1D labels.
101
101
  model.predict(source, points=[900, 370], labels=[1])
102
102
 
103
+ # Predict a segment based on 3D points and 2D labels (multiple points per object).
104
+ model.predict(source, points=[[[900, 370], [1000, 100]]], labels=[[1, 1]])
105
+
103
106
  # Predict a segment based on a box prompt
104
107
  model.predict(source, bboxes=[439, 437, 524, 709], save=True)
105
108
 
@@ -127,9 +127,21 @@ def test_predict_sam():
127
127
  # Run inference with bboxes prompt
128
128
  model(SOURCE, bboxes=[439, 437, 524, 709], device=0)
129
129
 
130
- # Run inference with points prompt
130
+ # Run inference with no labels
131
+ model(ASSETS / "zidane.jpg", points=[900, 370], device=0)
132
+
133
+ # Run inference with 1D points and 1D labels
131
134
  model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=0)
132
135
 
136
+ # Run inference with 2D points and 1D labels
137
+ model(ASSETS / "zidane.jpg", points=[[900, 370]], labels=[1], device=0)
138
+
139
+ # Run inference with multiple 2D points and 1D labels
140
+ model(ASSETS / "zidane.jpg", points=[[400, 370], [900, 370]], labels=[1, 1], device=0)
141
+
142
+ # Run inference with 3D points and 2D labels (multiple points per object)
143
+ model(ASSETS / "zidane.jpg", points=[[[900, 370], [1000, 100]]], labels=[[1, 1]], device=0)
144
+
133
145
  # Create SAMPredictor
134
146
  overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
135
147
  predictor = SAMPredictor(overrides=overrides)
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.3.11"
3
+ __version__ = "8.3.13"
4
4
 
5
5
  import os
6
6
 
@@ -8,7 +8,6 @@ import os
8
8
  if not os.environ.get("OMP_NUM_THREADS"):
9
9
  os.environ["OMP_NUM_THREADS"] = "1" # default for reduced CPU utilization during training
10
10
 
11
- from ultralytics.data.explorer.explorer import Explorer
12
11
  from ultralytics.models import NAS, RTDETR, SAM, YOLO, FastSAM, YOLOWorld
13
12
  from ultralytics.utils import ASSETS, SETTINGS
14
13
  from ultralytics.utils.checks import check_yolo as checks
@@ -27,5 +26,4 @@ __all__ = (
27
26
  "checks",
28
27
  "download",
29
28
  "settings",
30
- "Explorer",
31
29
  )
@@ -79,14 +79,11 @@ CLI_HELP_MSG = f"""
79
79
 
80
80
  4. Export a YOLO11n classification model to ONNX format at image size 224 by 128 (no TASK required)
81
81
  yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128
82
-
83
- 5. Explore your datasets using semantic search and SQL with a simple GUI powered by Ultralytics Explorer API
84
- yolo explorer data=data.yaml model=yolo11n.pt
85
82
 
86
- 6. Streamlit real-time webcam inference GUI
83
+ 5. Streamlit real-time webcam inference GUI
87
84
  yolo streamlit-predict
88
85
 
89
- 7. Run special commands:
86
+ 6. Run special commands:
90
87
  yolo help
91
88
  yolo checks
92
89
  yolo version
@@ -546,35 +543,6 @@ def handle_yolo_settings(args: List[str]) -> None:
546
543
  LOGGER.warning(f"WARNING ⚠️ settings error: '{e}'. Please see {url} for help.")
547
544
 
548
545
 
549
- def handle_explorer(args: List[str]):
550
- """
551
- Launches a graphical user interface that provides tools for interacting with and analyzing datasets using the
552
- Ultralytics Explorer API. It checks for the required 'streamlit' package and informs the user that the Explorer
553
- dashboard is loading.
554
-
555
- Args:
556
- args (List[str]): A list of optional command line arguments.
557
-
558
- Examples:
559
- ```bash
560
- yolo explorer data=data.yaml model=yolo11n.pt
561
- ```
562
-
563
- Notes:
564
- - Requires 'streamlit' package version 1.29.0 or higher.
565
- - The function does not take any arguments or return any values.
566
- - It is typically called from the command line interface using the 'yolo explorer' command.
567
- """
568
- checks.check_requirements("streamlit>=1.29.0")
569
- LOGGER.info("💡 Loading Explorer dashboard...")
570
- cmd = ["streamlit", "run", ROOT / "data/explorer/gui/dash.py", "--server.maxMessageSize", "2048"]
571
- new = dict(parse_key_value_pair(a) for a in args)
572
- check_dict_alignment(base={k: DEFAULT_CFG_DICT[k] for k in ["model", "data"]}, custom=new)
573
- for k, v in new.items():
574
- cmd += [k, v]
575
- subprocess.run(cmd)
576
-
577
-
578
546
  def handle_streamlit_inference():
579
547
  """
580
548
  Open the Ultralytics Live Inference Streamlit app for real-time object detection.
@@ -715,7 +683,6 @@ def entrypoint(debug=""):
715
683
  "login": lambda: handle_yolo_hub(args),
716
684
  "logout": lambda: handle_yolo_hub(args),
717
685
  "copy-cfg": copy_default_cfg,
718
- "explorer": lambda: handle_explorer(args[1:]),
719
686
  "streamlit-predict": lambda: handle_streamlit_inference(),
720
687
  }
721
688
  full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special}
@@ -14,3 +14,4 @@ up_angle: 145.0 # Workouts up_angle for counts, 145.0 is default value. You can
14
14
  down_angle: 90 # Workouts down_angle for counts, 90 is default value. You can change it for different workouts, based on position of keypoints.
15
15
  kpts: [6, 8, 10] # Keypoints for workouts monitoring, i.e. If you want to consider keypoints for pushups that have mostly values of [6, 8, 10].
16
16
  colormap: # Colormap for heatmap, Only OPENCV supported colormaps can be used. By default COLORMAP_PARULA will be used for visualization.
17
+ analytics_type: "line" # Analytics type i.e "line", "pie", "bar" or "area" charts. By default, "line" analytics will be used for processing.
@@ -960,7 +960,15 @@ class Exporter:
960
960
  LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
961
961
  f = str(tflite_model).replace(".tflite", "_edgetpu.tflite") # Edge TPU model
962
962
 
963
- cmd = f'edgetpu_compiler -s -d -k 10 --out_dir "{Path(f).parent}" "{tflite_model}"'
963
+ cmd = (
964
+ "edgetpu_compiler "
965
+ f'--out_dir "{Path(f).parent}" '
966
+ "--show_operations "
967
+ "--search_delegate "
968
+ "--delegate_search_step 30 "
969
+ "--timeout_sec 180 "
970
+ f'"{tflite_model}"'
971
+ )
964
972
  LOGGER.info(f"{prefix} running '{cmd}'")
965
973
  subprocess.run(cmd, shell=True)
966
974
  self._add_tflite_metadata(f)
@@ -213,11 +213,14 @@ class Predictor(BasePredictor):
213
213
  Args:
214
214
  im (torch.Tensor): Preprocessed input image tensor with shape (N, C, H, W).
215
215
  bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
216
- points (np.ndarray | List | None): Points indicating object locations with shape (N, 2), in pixels.
217
- labels (np.ndarray | List | None): Point prompt labels with shape (N,). 1 for foreground, 0 for background.
216
+ points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
217
+ labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
218
218
  masks (np.ndarray | None): Low-res masks from previous predictions with shape (N, H, W). For SAM, H=W=256.
219
219
  multimask_output (bool): Flag to return multiple masks for ambiguous prompts.
220
220
 
221
+ Raises:
222
+ AssertionError: If the number of points don't match the number of labels, in case labels were passed.
223
+
221
224
  Returns:
222
225
  (tuple): Tuple containing:
223
226
  - np.ndarray: Output masks with shape (C, H, W), where C is the number of generated masks.
@@ -232,26 +235,7 @@ class Predictor(BasePredictor):
232
235
  """
233
236
  features = self.get_im_features(im) if self.features is None else self.features
234
237
 
235
- src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
236
- r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
237
- # Transform input prompts
238
- if points is not None:
239
- points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
240
- points = points[None] if points.ndim == 1 else points
241
- # Assuming labels are all positive if users don't pass labels.
242
- if labels is None:
243
- labels = np.ones(points.shape[0])
244
- labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
245
- points *= r
246
- # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
247
- points, labels = points[:, None, :], labels[:, None]
248
- if bboxes is not None:
249
- bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
250
- bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
251
- bboxes *= r
252
- if masks is not None:
253
- masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
254
-
238
+ bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
255
239
  points = (points, labels) if points is not None else None
256
240
  # Embed prompts
257
241
  sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
@@ -269,6 +253,48 @@ class Predictor(BasePredictor):
269
253
  # `d` could be 1 or 3 depends on `multimask_output`.
270
254
  return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
271
255
 
256
+ def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
257
+ """
258
+ Prepares and transforms the input prompts for processing based on the destination shape.
259
+
260
+ Args:
261
+ dst_shape (tuple): The target shape (height, width) for the prompts.
262
+ bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
263
+ points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
264
+ labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
265
+ masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
266
+
267
+ Raises:
268
+ AssertionError: If the number of points don't match the number of labels, in case labels were passed.
269
+
270
+ Returns:
271
+ (tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
272
+ """
273
+ src_shape = self.batch[1][0].shape[:2]
274
+ r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
275
+ # Transform input prompts
276
+ if points is not None:
277
+ points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
278
+ points = points[None] if points.ndim == 1 else points
279
+ # Assuming labels are all positive if users don't pass labels.
280
+ if labels is None:
281
+ labels = np.ones(points.shape[:-1])
282
+ labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
283
+ assert (
284
+ points.shape[-2] == labels.shape[-1]
285
+ ), f"Number of points {points.shape[-2]} should match number of labels {labels.shape[-1]}."
286
+ points *= r
287
+ if points.ndim == 2:
288
+ # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
289
+ points, labels = points[:, None, :], labels[:, None]
290
+ if bboxes is not None:
291
+ bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
292
+ bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
293
+ bboxes *= r
294
+ if masks is not None:
295
+ masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
296
+ return bboxes, points, labels, masks
297
+
272
298
  def generate(
273
299
  self,
274
300
  im,
@@ -686,34 +712,7 @@ class SAM2Predictor(Predictor):
686
712
  """
687
713
  features = self.get_im_features(im) if self.features is None else self.features
688
714
 
689
- src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
690
- r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
691
- # Transform input prompts
692
- if points is not None:
693
- points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
694
- points = points[None] if points.ndim == 1 else points
695
- # Assuming labels are all positive if users don't pass labels.
696
- if labels is None:
697
- labels = torch.ones(points.shape[0])
698
- labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
699
- points *= r
700
- # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
701
- points, labels = points[:, None], labels[:, None]
702
- if bboxes is not None:
703
- bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
704
- bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
705
- bboxes = bboxes.view(-1, 2, 2) * r
706
- bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
707
- # NOTE: merge "boxes" and "points" into a single "points" input
708
- # (where boxes are added at the beginning) to model.sam_prompt_encoder
709
- if points is not None:
710
- points = torch.cat([bboxes, points], dim=1)
711
- labels = torch.cat([bbox_labels, labels], dim=1)
712
- else:
713
- points, labels = bboxes, bbox_labels
714
- if masks is not None:
715
- masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
716
-
715
+ bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
717
716
  points = (points, labels) if points is not None else None
718
717
 
719
718
  sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
@@ -737,6 +736,36 @@ class SAM2Predictor(Predictor):
737
736
  # `d` could be 1 or 3 depends on `multimask_output`.
738
737
  return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
739
738
 
739
+ def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
740
+ """
741
+ Prepares and transforms the input prompts for processing based on the destination shape.
742
+
743
+ Args:
744
+ dst_shape (tuple): The target shape (height, width) for the prompts.
745
+ bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
746
+ points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
747
+ labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
748
+ masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
749
+
750
+ Raises:
751
+ AssertionError: If the number of points don't match the number of labels, in case labels were passed.
752
+
753
+ Returns:
754
+ (tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
755
+ """
756
+ bboxes, points, labels, masks = super()._prepare_prompts(dst_shape, bboxes, points, labels, masks)
757
+ if bboxes is not None:
758
+ bboxes = bboxes.view(-1, 2, 2)
759
+ bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
760
+ # NOTE: merge "boxes" and "points" into a single "points" input
761
+ # (where boxes are added at the beginning) to model.sam_prompt_encoder
762
+ if points is not None:
763
+ points = torch.cat([bboxes, points], dim=1)
764
+ labels = torch.cat([bbox_labels, labels], dim=1)
765
+ else:
766
+ points, labels = bboxes, bbox_labels
767
+ return bboxes, points, labels, masks
768
+
740
769
  def set_image(self, image):
741
770
  """
742
771
  Preprocesses and sets a single image for inference using the SAM2 model.
@@ -8,7 +8,7 @@ from ultralytics.data import ClassificationDataset, build_dataloader
8
8
  from ultralytics.engine.trainer import BaseTrainer
9
9
  from ultralytics.models import yolo
10
10
  from ultralytics.nn.tasks import ClassificationModel
11
- from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK, colorstr
11
+ from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
12
12
  from ultralytics.utils.plotting import plot_images, plot_results
13
13
  from ultralytics.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first
14
14
 
@@ -141,7 +141,6 @@ class ClassificationTrainer(BaseTrainer):
141
141
  self.metrics = self.validator(model=f)
142
142
  self.metrics.pop("fitness", None)
143
143
  self.run_callbacks("on_fit_epoch_end")
144
- LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
145
144
 
146
145
  def plot_training_samples(self, batch, ni):
147
146
  """Plots training samples with their annotations."""
@@ -0,0 +1,194 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ from itertools import cycle
4
+
5
+ import cv2
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+ from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
9
+ from matplotlib.figure import Figure
10
+
11
+ from ultralytics.solutions.solutions import BaseSolution # Import a parent class
12
+
13
+
14
+ class Analytics(BaseSolution):
15
+ """A class to create and update various types of charts (line, bar, pie, area) for visual analytics."""
16
+
17
+ def __init__(self, **kwargs):
18
+ """Initialize the Analytics class with various chart types."""
19
+ super().__init__(**kwargs)
20
+
21
+ self.type = self.CFG["analytics_type"] # extract type of analytics
22
+ self.x_label = "Classes" if self.type in {"bar", "pie"} else "Frame#"
23
+ self.y_label = "Total Counts"
24
+
25
+ # Predefined data
26
+ self.bg_color = "#00F344" # background color of frame
27
+ self.fg_color = "#111E68" # foreground color of frame
28
+ self.title = "Ultralytics Solutions" # window name
29
+ self.max_points = 45 # maximum points to be drawn on window
30
+ self.fontsize = 25 # text font size for display
31
+ figsize = (19.2, 10.8) # Set output image size 1920 * 1080
32
+ self.color_cycle = cycle(["#DD00BA", "#042AFF", "#FF4447", "#7D24FF", "#BD00FF"])
33
+
34
+ self.total_counts = 0 # count variable for storing total counts i.e for line
35
+ self.clswise_count = {} # dictionary for classwise counts
36
+
37
+ # Ensure line and area chart
38
+ if self.type in {"line", "area"}:
39
+ self.lines = {}
40
+ self.fig = Figure(facecolor=self.bg_color, figsize=figsize)
41
+ self.canvas = FigureCanvas(self.fig) # Set common axis properties
42
+ self.ax = self.fig.add_subplot(111, facecolor=self.bg_color)
43
+ if self.type == "line":
44
+ (self.line,) = self.ax.plot([], [], color="cyan", linewidth=self.line_width)
45
+ elif self.type in {"bar", "pie"}:
46
+ # Initialize bar or pie plot
47
+ self.fig, self.ax = plt.subplots(figsize=figsize, facecolor=self.bg_color)
48
+ self.canvas = FigureCanvas(self.fig) # Set common axis properties
49
+ self.ax.set_facecolor(self.bg_color)
50
+ self.color_mapping = {}
51
+ self.ax.axis("equal") if type == "pie" else None # Ensure pie chart is circular
52
+
53
+ def process_data(self, im0, frame_number):
54
+ """
55
+ Process the image data, run object tracking.
56
+
57
+ Args:
58
+ im0 (ndarray): Input image for processing.
59
+ frame_number (int): Video frame # for plotting the data.
60
+ """
61
+ self.extract_tracks(im0) # Extract tracks
62
+
63
+ if self.type == "line":
64
+ for box in self.boxes:
65
+ self.total_counts += 1
66
+ im0 = self.update_graph(frame_number=frame_number)
67
+ self.total_counts = 0
68
+ elif self.type == "pie" or self.type == "bar" or self.type == "area":
69
+ self.clswise_count = {}
70
+ for box, cls in zip(self.boxes, self.clss):
71
+ if self.names[int(cls)] in self.clswise_count:
72
+ self.clswise_count[self.names[int(cls)]] += 1
73
+ else:
74
+ self.clswise_count[self.names[int(cls)]] = 1
75
+ im0 = self.update_graph(frame_number=frame_number, count_dict=self.clswise_count, plot=self.type)
76
+ else:
77
+ raise ModuleNotFoundError(f"{self.type} chart is not supported ❌")
78
+ return im0
79
+
80
+ def update_graph(self, frame_number, count_dict=None, plot="line"):
81
+ """
82
+ Update the graph (line or area) with new data for single or multiple classes.
83
+
84
+ Args:
85
+ frame_number (int): The current frame number.
86
+ count_dict (dict, optional): Dictionary with class names as keys and counts as values for multiple classes.
87
+ If None, updates a single line graph.
88
+ plot (str): Type of the plot i.e. line, bar or area.
89
+ """
90
+ if count_dict is None:
91
+ # Single line update
92
+ x_data = np.append(self.line.get_xdata(), float(frame_number))
93
+ y_data = np.append(self.line.get_ydata(), float(self.total_counts))
94
+
95
+ if len(x_data) > self.max_points:
96
+ x_data, y_data = x_data[-self.max_points :], y_data[-self.max_points :]
97
+
98
+ self.line.set_data(x_data, y_data)
99
+ self.line.set_label("Counts")
100
+ self.line.set_color("#7b0068") # Pink color
101
+ self.line.set_marker("*")
102
+ self.line.set_markersize(self.line_width * 5)
103
+ else:
104
+ labels = list(count_dict.keys())
105
+ counts = list(count_dict.values())
106
+ if plot == "area":
107
+ color_cycle = cycle(["#DD00BA", "#042AFF", "#FF4447", "#7D24FF", "#BD00FF"])
108
+ # Multiple lines or area update
109
+ x_data = self.ax.lines[0].get_xdata() if self.ax.lines else np.array([])
110
+ y_data_dict = {key: np.array([]) for key in count_dict.keys()}
111
+ if self.ax.lines:
112
+ for line, key in zip(self.ax.lines, count_dict.keys()):
113
+ y_data_dict[key] = line.get_ydata()
114
+
115
+ x_data = np.append(x_data, float(frame_number))
116
+ max_length = len(x_data)
117
+ for key in count_dict.keys():
118
+ y_data_dict[key] = np.append(y_data_dict[key], float(count_dict[key]))
119
+ if len(y_data_dict[key]) < max_length:
120
+ y_data_dict[key] = np.pad(y_data_dict[key], (0, max_length - len(y_data_dict[key])), "constant")
121
+ if len(x_data) > self.max_points:
122
+ x_data = x_data[1:]
123
+ for key in count_dict.keys():
124
+ y_data_dict[key] = y_data_dict[key][1:]
125
+
126
+ self.ax.clear()
127
+ for key, y_data in y_data_dict.items():
128
+ color = next(color_cycle)
129
+ self.ax.fill_between(x_data, y_data, color=color, alpha=0.7)
130
+ self.ax.plot(
131
+ x_data,
132
+ y_data,
133
+ color=color,
134
+ linewidth=self.line_width,
135
+ marker="o",
136
+ markersize=self.line_width * 5,
137
+ label=f"{key} Data Points",
138
+ )
139
+ if plot == "bar":
140
+ self.ax.clear() # clear bar data
141
+ for label in labels: # Map labels to colors
142
+ if label not in self.color_mapping:
143
+ self.color_mapping[label] = next(self.color_cycle)
144
+ colors = [self.color_mapping[label] for label in labels]
145
+ bars = self.ax.bar(labels, counts, color=colors)
146
+ for bar, count in zip(bars, counts):
147
+ self.ax.text(
148
+ bar.get_x() + bar.get_width() / 2,
149
+ bar.get_height(),
150
+ str(count),
151
+ ha="center",
152
+ va="bottom",
153
+ color=self.fg_color,
154
+ )
155
+ # Create the legend using labels from the bars
156
+ for bar, label in zip(bars, labels):
157
+ bar.set_label(label) # Assign label to each bar
158
+ self.ax.legend(loc="upper left", fontsize=13, facecolor=self.fg_color, edgecolor=self.fg_color)
159
+ if plot == "pie":
160
+ total = sum(counts)
161
+ percentages = [size / total * 100 for size in counts]
162
+ start_angle = 90
163
+ self.ax.clear()
164
+
165
+ # Create pie chart and create legend labels with percentages
166
+ wedges, autotexts = self.ax.pie(
167
+ counts, labels=labels, startangle=start_angle, textprops={"color": self.fg_color}, autopct=None
168
+ )
169
+ legend_labels = [f"{label} ({percentage:.1f}%)" for label, percentage in zip(labels, percentages)]
170
+
171
+ # Assign the legend using the wedges and manually created labels
172
+ self.ax.legend(wedges, legend_labels, title="Classes", loc="center left", bbox_to_anchor=(1, 0, 0.5, 1))
173
+ self.fig.subplots_adjust(left=0.1, right=0.75) # Adjust layout to fit the legend
174
+
175
+ # Common plot settings
176
+ self.ax.set_facecolor("#f0f0f0") # Set to light gray or any other color you like
177
+ self.ax.set_title(self.title, color=self.fg_color, fontsize=self.fontsize)
178
+ self.ax.set_xlabel(self.x_label, color=self.fg_color, fontsize=self.fontsize - 3)
179
+ self.ax.set_ylabel(self.y_label, color=self.fg_color, fontsize=self.fontsize - 3)
180
+
181
+ # Add and format legend
182
+ legend = self.ax.legend(loc="upper left", fontsize=13, facecolor=self.bg_color, edgecolor=self.bg_color)
183
+ for text in legend.get_texts():
184
+ text.set_color(self.fg_color)
185
+
186
+ # Redraw graph, update view, capture, and display the updated plot
187
+ self.ax.relim()
188
+ self.ax.autoscale_view()
189
+ self.canvas.draw()
190
+ im0 = np.array(self.canvas.renderer.buffer_rgba())
191
+ im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
192
+ self.display_output(im0)
193
+
194
+ return im0 # Return the image
@@ -0,0 +1,82 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import math
4
+
5
+ import cv2
6
+
7
+ from ultralytics.solutions.solutions import BaseSolution # Import a parent class
8
+ from ultralytics.utils.plotting import Annotator, colors
9
+
10
+
11
+ class DistanceCalculation(BaseSolution):
12
+ """A class to calculate distance between two objects in a real-time video stream based on their tracks."""
13
+
14
+ def __init__(self, **kwargs):
15
+ """Initializes the DistanceCalculation class with the given parameters."""
16
+ super().__init__(**kwargs)
17
+
18
+ # Mouse event information
19
+ self.left_mouse_count = 0
20
+ self.selected_boxes = {}
21
+
22
+ def mouse_event_for_distance(self, event, x, y, flags, param):
23
+ """
24
+ Handles mouse events to select regions in a real-time video stream.
25
+
26
+ Args:
27
+ event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.).
28
+ x (int): X-coordinate of the mouse pointer.
29
+ y (int): Y-coordinate of the mouse pointer.
30
+ flags (int): Flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY, etc.).
31
+ param (dict): Additional parameters passed to the function.
32
+ """
33
+ if event == cv2.EVENT_LBUTTONDOWN:
34
+ self.left_mouse_count += 1
35
+ if self.left_mouse_count <= 2:
36
+ for box, track_id in zip(self.boxes, self.track_ids):
37
+ if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
38
+ self.selected_boxes[track_id] = box
39
+
40
+ elif event == cv2.EVENT_RBUTTONDOWN:
41
+ self.selected_boxes = {}
42
+ self.left_mouse_count = 0
43
+
44
+ def calculate(self, im0):
45
+ """
46
+ Processes the video frame and calculates the distance between two bounding boxes.
47
+
48
+ Args:
49
+ im0 (ndarray): The image frame.
50
+
51
+ Returns:
52
+ (ndarray): The processed image frame.
53
+ """
54
+ self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
55
+ self.extract_tracks(im0) # Extract tracks
56
+
57
+ # Iterate over bounding boxes, track ids and classes index
58
+ for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
59
+ self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
60
+
61
+ if len(self.selected_boxes) == 2:
62
+ for trk_id in self.selected_boxes.keys():
63
+ if trk_id == track_id:
64
+ self.selected_boxes[track_id] = box
65
+
66
+ if len(self.selected_boxes) == 2:
67
+ # Store user selected boxes in centroids list
68
+ self.centroids.extend(
69
+ [[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
70
+ )
71
+ # Calculate pixels distance
72
+ pixels_distance = math.sqrt(
73
+ (self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
74
+ )
75
+ self.annotator.plot_distance_and_line(pixels_distance, self.centroids)
76
+
77
+ self.centroids = []
78
+
79
+ self.display_output(im0) # display output with base class function
80
+ cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
81
+
82
+ return im0 # return output image for more usage
@@ -112,13 +112,13 @@ class ObjectCounter(BaseSolution):
112
112
  # Iterate over bounding boxes, track ids and classes index
113
113
  for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
114
114
  # Draw bounding box and counting region
115
- self.annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
115
+ self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
116
116
  self.store_tracking_history(track_id, box) # Store track history
117
117
  self.store_classwise_counts(cls) # store classwise counts in dict
118
118
 
119
119
  # Draw tracks of objects
120
120
  self.annotator.draw_centroid_and_tracks(
121
- self.track_line, color=colors(int(track_id), True), track_thickness=self.line_width
121
+ self.track_line, color=colors(int(cls), True), track_thickness=self.line_width
122
122
  )
123
123
 
124
124
  # store previous position of track for object counting