ultralytics 8.1.4__tar.gz → 8.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (204) hide show
  1. {ultralytics-8.1.4/ultralytics.egg-info → ultralytics-8.1.6}/PKG-INFO +2 -2
  2. {ultralytics-8.1.4 → ultralytics-8.1.6}/pyproject.toml +1 -1
  3. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/__init__.py +1 -1
  4. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/__init__.py +2 -1
  5. ultralytics-8.1.6/ultralytics/cfg/datasets/carparts-seg.yaml +43 -0
  6. ultralytics-8.1.6/ultralytics/cfg/datasets/crack-seg.yaml +21 -0
  7. ultralytics-8.1.6/ultralytics/cfg/datasets/package-seg.yaml +21 -0
  8. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/build.py +5 -6
  9. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/converter.py +63 -0
  10. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/explorer/gui/dash.py +23 -3
  11. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/loaders.py +4 -12
  12. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/engine/model.py +2 -2
  13. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/engine/predictor.py +1 -1
  14. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/engine/results.py +3 -1
  15. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/obb/predict.py +3 -2
  16. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/trackers/byte_tracker.py +34 -24
  17. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/trackers/track.py +6 -4
  18. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/trackers/utils/matching.py +14 -6
  19. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/benchmarks.py +6 -2
  20. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/hub.py +1 -1
  21. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/metrics.py +5 -2
  22. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/ops.py +22 -4
  23. {ultralytics-8.1.4 → ultralytics-8.1.6/ultralytics.egg-info}/PKG-INFO +2 -2
  24. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics.egg-info/SOURCES.txt +3 -0
  25. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics.egg-info/requires.txt +1 -1
  26. {ultralytics-8.1.4 → ultralytics-8.1.6}/LICENSE +0 -0
  27. {ultralytics-8.1.4 → ultralytics-8.1.6}/README.md +0 -0
  28. {ultralytics-8.1.4 → ultralytics-8.1.6}/setup.cfg +0 -0
  29. {ultralytics-8.1.4 → ultralytics-8.1.6}/tests/test_cli.py +0 -0
  30. {ultralytics-8.1.4 → ultralytics-8.1.6}/tests/test_cuda.py +0 -0
  31. {ultralytics-8.1.4 → ultralytics-8.1.6}/tests/test_engine.py +0 -0
  32. {ultralytics-8.1.4 → ultralytics-8.1.6}/tests/test_explorer.py +0 -0
  33. {ultralytics-8.1.4 → ultralytics-8.1.6}/tests/test_integrations.py +0 -0
  34. {ultralytics-8.1.4 → ultralytics-8.1.6}/tests/test_python.py +0 -0
  35. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/assets/bus.jpg +0 -0
  36. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/assets/zidane.jpg +0 -0
  37. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
  38. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
  39. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
  40. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
  41. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
  42. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
  43. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
  44. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/VOC.yaml +0 -0
  45. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
  46. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
  47. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/coco.yaml +0 -0
  48. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
  49. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/coco128.yaml +0 -0
  50. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
  51. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
  52. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/coco8.yaml +0 -0
  53. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/dota8.yaml +0 -0
  54. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
  55. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
  56. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/datasets/xView.yaml +0 -0
  57. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/default.yaml +0 -0
  58. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
  59. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
  60. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
  61. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
  62. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
  63. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
  64. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
  65. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
  66. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
  67. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
  68. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
  69. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
  70. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
  71. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
  72. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
  73. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
  74. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
  75. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
  76. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
  77. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
  78. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
  79. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
  80. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
  81. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
  82. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
  83. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/trackers/botsort.yaml +0 -0
  84. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
  85. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/__init__.py +0 -0
  86. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/annotator.py +0 -0
  87. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/augment.py +0 -0
  88. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/base.py +0 -0
  89. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/dataset.py +0 -0
  90. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/explorer/__init__.py +0 -0
  91. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/explorer/explorer.py +0 -0
  92. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/explorer/gui/__init__.py +0 -0
  93. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/explorer/utils.py +0 -0
  94. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/split_dota.py +0 -0
  95. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/data/utils.py +0 -0
  96. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/engine/__init__.py +0 -0
  97. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/engine/exporter.py +0 -0
  98. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/engine/trainer.py +0 -0
  99. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/engine/tuner.py +0 -0
  100. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/engine/validator.py +0 -0
  101. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/hub/__init__.py +0 -0
  102. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/hub/auth.py +0 -0
  103. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/hub/session.py +3 -3
  104. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/hub/utils.py +0 -0
  105. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/__init__.py +0 -0
  106. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/fastsam/__init__.py +0 -0
  107. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/fastsam/model.py +0 -0
  108. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/fastsam/predict.py +0 -0
  109. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/fastsam/prompt.py +0 -0
  110. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/fastsam/utils.py +0 -0
  111. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/fastsam/val.py +0 -0
  112. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/nas/__init__.py +0 -0
  113. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/nas/model.py +0 -0
  114. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/nas/predict.py +0 -0
  115. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/nas/val.py +0 -0
  116. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/rtdetr/__init__.py +0 -0
  117. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/rtdetr/model.py +0 -0
  118. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/rtdetr/predict.py +0 -0
  119. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/rtdetr/train.py +0 -0
  120. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/rtdetr/val.py +0 -0
  121. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/__init__.py +0 -0
  122. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/amg.py +0 -0
  123. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/build.py +0 -0
  124. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/model.py +0 -0
  125. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/modules/__init__.py +0 -0
  126. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/modules/decoders.py +0 -0
  127. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/modules/encoders.py +0 -0
  128. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/modules/sam.py +0 -0
  129. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
  130. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/modules/transformer.py +0 -0
  131. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/sam/predict.py +0 -0
  132. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/utils/__init__.py +0 -0
  133. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/utils/loss.py +0 -0
  134. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/utils/ops.py +0 -0
  135. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/__init__.py +0 -0
  136. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/classify/__init__.py +0 -0
  137. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/classify/predict.py +0 -0
  138. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/classify/train.py +0 -0
  139. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/classify/val.py +0 -0
  140. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/detect/__init__.py +0 -0
  141. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/detect/predict.py +0 -0
  142. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/detect/train.py +0 -0
  143. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/detect/val.py +0 -0
  144. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/model.py +0 -0
  145. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/obb/__init__.py +0 -0
  146. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/obb/train.py +0 -0
  147. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/obb/val.py +0 -0
  148. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/pose/__init__.py +0 -0
  149. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/pose/predict.py +0 -0
  150. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/pose/train.py +0 -0
  151. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/pose/val.py +0 -0
  152. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/segment/__init__.py +0 -0
  153. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/segment/predict.py +0 -0
  154. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/segment/train.py +0 -0
  155. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/models/yolo/segment/val.py +0 -0
  156. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/nn/__init__.py +0 -0
  157. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/nn/autobackend.py +0 -0
  158. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/nn/modules/__init__.py +0 -0
  159. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/nn/modules/block.py +0 -0
  160. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/nn/modules/conv.py +0 -0
  161. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/nn/modules/head.py +0 -0
  162. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/nn/modules/transformer.py +0 -0
  163. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/nn/modules/utils.py +0 -0
  164. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/nn/tasks.py +0 -0
  165. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/solutions/__init__.py +0 -0
  166. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/solutions/ai_gym.py +0 -0
  167. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/solutions/distance_calculation.py +0 -0
  168. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/solutions/heatmap.py +0 -0
  169. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/solutions/object_counter.py +0 -0
  170. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/solutions/speed_estimation.py +0 -0
  171. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/trackers/__init__.py +0 -0
  172. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/trackers/basetrack.py +0 -0
  173. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/trackers/bot_sort.py +0 -0
  174. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/trackers/utils/__init__.py +0 -0
  175. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/trackers/utils/gmc.py +0 -0
  176. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/trackers/utils/kalman_filter.py +0 -0
  177. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/__init__.py +0 -0
  178. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/autobatch.py +0 -0
  179. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/__init__.py +0 -0
  180. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/base.py +0 -0
  181. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/clearml.py +0 -0
  182. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/comet.py +0 -0
  183. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/dvc.py +0 -0
  184. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/mlflow.py +0 -0
  185. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/neptune.py +0 -0
  186. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/raytune.py +0 -0
  187. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/tensorboard.py +0 -0
  188. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/callbacks/wb.py +0 -0
  189. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/checks.py +0 -0
  190. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/dist.py +0 -0
  191. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/downloads.py +0 -0
  192. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/errors.py +0 -0
  193. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/files.py +0 -0
  194. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/instance.py +0 -0
  195. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/loss.py +0 -0
  196. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/patches.py +0 -0
  197. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/plotting.py +0 -0
  198. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/tal.py +0 -0
  199. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/torch_utils.py +0 -0
  200. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/triton.py +0 -0
  201. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics/utils/tuner.py +0 -0
  202. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics.egg-info/dependency_links.txt +0 -0
  203. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics.egg-info/entry_points.txt +0 -0
  204. {ultralytics-8.1.4 → ultralytics-8.1.6}/ultralytics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.1.4
3
+ Version: 8.1.6
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -55,7 +55,7 @@ Requires-Dist: mkdocs-material; extra == "dev"
55
55
  Requires-Dist: mkdocstrings[python]; extra == "dev"
56
56
  Requires-Dist: mkdocs-jupyter; extra == "dev"
57
57
  Requires-Dist: mkdocs-redirects; extra == "dev"
58
- Requires-Dist: mkdocs-ultralytics-plugin>=0.0.38; extra == "dev"
58
+ Requires-Dist: mkdocs-ultralytics-plugin>=0.0.40; extra == "dev"
59
59
  Provides-Extra: export
60
60
  Requires-Dist: onnx>=1.12.0; extra == "export"
61
61
  Requires-Dist: coremltools>=7.0; platform_system != "Windows" and extra == "export"
@@ -93,7 +93,7 @@ dev = [
93
93
  "mkdocstrings[python]",
94
94
  "mkdocs-jupyter", # for notebooks
95
95
  "mkdocs-redirects", # for 301 redirects
96
- "mkdocs-ultralytics-plugin>=0.0.38", # for meta descriptions and images, dates and authors
96
+ "mkdocs-ultralytics-plugin>=0.0.40", # for meta descriptions and images, dates and authors
97
97
  ]
98
98
  export = [
99
99
  "onnx>=1.12.0", # ONNX export
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.1.4"
3
+ __version__ = "8.1.6"
4
4
 
5
5
  from ultralytics.data.explorer.explorer import Explorer
6
6
  from ultralytics.models import RTDETR, SAM, YOLO
@@ -51,7 +51,7 @@ TASK2METRIC = {
51
51
  "segment": "metrics/mAP50-95(M)",
52
52
  "classify": "metrics/accuracy_top1",
53
53
  "pose": "metrics/mAP50-95(P)",
54
- "obb": "metrics/mAP50-95(OBB)",
54
+ "obb": "metrics/mAP50-95(B)",
55
55
  }
56
56
 
57
57
  CLI_HELP_MSG = f"""
@@ -396,6 +396,7 @@ def handle_yolo_settings(args: List[str]) -> None:
396
396
  def handle_explorer():
397
397
  """Open the Ultralytics Explorer GUI."""
398
398
  checks.check_requirements("streamlit")
399
+ LOGGER.info(f"💡 Loading Explorer dashboard...")
399
400
  subprocess.run(["streamlit", "run", ROOT / "data/explorer/gui/dash.py", "--server.maxMessageSize", "2048"])
400
401
 
401
402
 
@@ -0,0 +1,43 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # Carparts-seg dataset by Ultralytics
3
+ # Documentation: https://docs.ultralytics.com/datasets/segment/carparts-seg/
4
+ # Example usage: yolo train data=carparts-seg.yaml
5
+ # parent
6
+ # ├── ultralytics
7
+ # └── datasets
8
+ # └── carparts-seg ← downloads here (132 MB)
9
+
10
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
+ path: ../datasets/carparts-seg # dataset root dir
12
+ train: train/images # train images (relative to 'path') 3516 images
13
+ val: valid/images # val images (relative to 'path') 276 images
14
+ test: test/images # test images (relative to 'path') 401 images
15
+
16
+ # Classes
17
+ names:
18
+ 0: back_bumper
19
+ 1: back_door
20
+ 2: back_glass
21
+ 3: back_left_door
22
+ 4: back_left_light
23
+ 5: back_light
24
+ 6: back_right_door
25
+ 7: back_right_light
26
+ 8: front_bumper
27
+ 9: front_door
28
+ 10: front_glass
29
+ 11: front_left_door
30
+ 12: front_left_light
31
+ 13: front_light
32
+ 14: front_right_door
33
+ 15: front_right_light
34
+ 16: hood
35
+ 17: left_mirror
36
+ 18: object
37
+ 19: right_mirror
38
+ 20: tailgate
39
+ 21: trunk
40
+ 22: wheel
41
+
42
+ # Download script/URL (optional)
43
+ download: https://ultralytics.com/assets/carparts-seg.zip
@@ -0,0 +1,21 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # Crack-seg dataset by Ultralytics
3
+ # Documentation: https://docs.ultralytics.com/datasets/segment/crack-seg/
4
+ # Example usage: yolo train data=crack-seg.yaml
5
+ # parent
6
+ # ├── ultralytics
7
+ # └── datasets
8
+ # └── crack-seg ← downloads here (91.2 MB)
9
+
10
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
+ path: ../datasets/crack-seg # dataset root dir
12
+ train: train/images # train images (relative to 'path') 3717 images
13
+ val: valid/images # val images (relative to 'path') 112 images
14
+ test: test/images # test images (relative to 'path') 200 images
15
+
16
+ # Classes
17
+ names:
18
+ 0: crack
19
+
20
+ # Download script/URL (optional)
21
+ download: https://ultralytics.com/assets/crack-seg.zip
@@ -0,0 +1,21 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # Package-seg dataset by Ultralytics
3
+ # Documentation: https://docs.ultralytics.com/datasets/segment/package-seg/
4
+ # Example usage: yolo train data=package-seg.yaml
5
+ # parent
6
+ # ├── ultralytics
7
+ # └── datasets
8
+ # └── package-seg ← downloads here (102 MB)
9
+
10
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
+ path: ../datasets/package-seg # dataset root dir
12
+ train: images/train # train images (relative to 'path') 1920 images
13
+ val: images/val # val images (relative to 'path') 89 images
14
+ test: test/images # test images (relative to 'path') 188 images
15
+
16
+ # Classes
17
+ names:
18
+ 0: package
19
+
20
+ # Download script/URL (optional)
21
+ download: https://ultralytics.com/assets/package-seg.zip
@@ -150,13 +150,12 @@ def check_source(source):
150
150
  return source, webcam, screenshot, from_img, in_memory, tensor
151
151
 
152
152
 
153
- def load_inference_source(source=None, imgsz=640, vid_stride=1, buffer=False):
153
+ def load_inference_source(source=None, vid_stride=1, buffer=False):
154
154
  """
155
155
  Loads an inference source for object detection and applies necessary transformations.
156
156
 
157
157
  Args:
158
158
  source (str, Path, Tensor, PIL.Image, np.ndarray): The input source for inference.
159
- imgsz (int, optional): The size of the image for inference. Default is 640.
160
159
  vid_stride (int, optional): The frame interval for video sources. Default is 1.
161
160
  buffer (bool, optional): Determined whether stream frames will be buffered. Default is False.
162
161
 
@@ -172,13 +171,13 @@ def load_inference_source(source=None, imgsz=640, vid_stride=1, buffer=False):
172
171
  elif in_memory:
173
172
  dataset = source
174
173
  elif webcam:
175
- dataset = LoadStreams(source, imgsz=imgsz, vid_stride=vid_stride, buffer=buffer)
174
+ dataset = LoadStreams(source, vid_stride=vid_stride, buffer=buffer)
176
175
  elif screenshot:
177
- dataset = LoadScreenshots(source, imgsz=imgsz)
176
+ dataset = LoadScreenshots(source)
178
177
  elif from_img:
179
- dataset = LoadPilAndNumpy(source, imgsz=imgsz)
178
+ dataset = LoadPilAndNumpy(source)
180
179
  else:
181
- dataset = LoadImages(source, imgsz=imgsz, vid_stride=vid_stride)
180
+ dataset = LoadImages(source, vid_stride=vid_stride)
182
181
 
183
182
  # Attach source types to the dataset
184
183
  setattr(dataset, "source_type", source_type)
@@ -474,3 +474,66 @@ def merge_multi_segment(segments):
474
474
  nidx = abs(idx[1] - idx[0])
475
475
  s.append(segments[i][nidx:])
476
476
  return s
477
+
478
+
479
+ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
480
+ """
481
+ Converts existing object detection dataset (bounding boxes) to segmentation dataset or oriented bounding box (OBB)
482
+ in YOLO format. Generates segmentation data using SAM auto-annotator as needed.
483
+
484
+ Args:
485
+ im_dir (str | Path): Path to image directory to convert.
486
+ save_dir (str | Path): Path to save the generated labels, labels will be saved
487
+ into `labels-segment` in the same directory level of `im_dir` if save_dir is None. Default: None.
488
+ sam_model (str): Segmentation model to use for intermediate segmentation data; optional.
489
+
490
+ Notes:
491
+ The input directory structure assumed for dataset:
492
+ - im_dir
493
+ ├─ 001.jpg
494
+ ├─ ..
495
+ ├─ NNN.jpg
496
+ - labels
497
+ ├─ 001.txt
498
+ ├─ ..
499
+ ├─ NNN.txt
500
+ """
501
+ from ultralytics.data import YOLODataset
502
+ from ultralytics.utils.ops import xywh2xyxy
503
+ from ultralytics.utils import LOGGER
504
+ from ultralytics import SAM
505
+ from tqdm import tqdm
506
+
507
+ # NOTE: add placeholder to pass class index check
508
+ dataset = YOLODataset(im_dir, data=dict(names=list(range(1000))))
509
+ if len(dataset.labels[0]["segments"]) > 0: # if it's segment data
510
+ LOGGER.info("Segmentation labels detected, no need to generate new ones!")
511
+ return
512
+
513
+ LOGGER.info("Detection labels detected, generating segment labels by SAM model!")
514
+ sam_model = SAM(sam_model)
515
+ for l in tqdm(dataset.labels, total=len(dataset.labels), desc="Generating segment labels"):
516
+ h, w = l["shape"]
517
+ boxes = l["bboxes"]
518
+ if len(boxes) == 0: # skip empty labels
519
+ continue
520
+ boxes[:, [0, 2]] *= w
521
+ boxes[:, [1, 3]] *= h
522
+ im = cv2.imread(l["im_file"])
523
+ sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False)
524
+ l["segments"] = sam_results[0].masks.xyn
525
+
526
+ save_dir = Path(save_dir) if save_dir else Path(im_dir).parent / "labels-segment"
527
+ save_dir.mkdir(parents=True, exist_ok=True)
528
+ for l in dataset.labels:
529
+ texts = []
530
+ lb_name = Path(l["im_file"]).with_suffix(".txt").name
531
+ txt_file = save_dir / lb_name
532
+ cls = l["cls"]
533
+ for i, s in enumerate(l["segments"]):
534
+ line = (int(cls[i]), *s.reshape(-1))
535
+ texts.append(("%g " * len(line)).rstrip() % line)
536
+ if texts:
537
+ with open(txt_file, "a") as f:
538
+ f.writelines(text + "\n" for text in texts)
539
+ LOGGER.info(f"Generated segment labels saved in {save_dir}")
@@ -9,7 +9,7 @@ from ultralytics import Explorer
9
9
  from ultralytics.utils import ROOT, SETTINGS
10
10
  from ultralytics.utils.checks import check_requirements
11
11
 
12
- check_requirements(("streamlit>=1.29.0", "streamlit-select>=0.2"))
12
+ check_requirements(("streamlit>=1.29.0", "streamlit-select>=0.3"))
13
13
 
14
14
  import streamlit as st
15
15
  from streamlit_select import image_select
@@ -94,6 +94,7 @@ def find_similar_imgs(imgs):
94
94
  similar = exp.get_similar(img=imgs, limit=st.session_state.get("limit"), return_type="arrow")
95
95
  paths = similar.to_pydict()["im_file"]
96
96
  st.session_state["imgs"] = paths
97
+ st.session_state["res"] = similar
97
98
 
98
99
 
99
100
  def similarity_form(selected_imgs):
@@ -137,6 +138,7 @@ def run_sql_query():
137
138
  exp = st.session_state["explorer"]
138
139
  res = exp.sql_query(query, return_type="arrow")
139
140
  st.session_state["imgs"] = res.to_pydict()["im_file"]
141
+ st.session_state["res"] = res
140
142
 
141
143
 
142
144
  def run_ai_query():
@@ -155,6 +157,7 @@ def run_ai_query():
155
157
  st.session_state["error"] = "No results found using AI generated query. Try another query or rerun it."
156
158
  return
157
159
  st.session_state["imgs"] = res["im_file"].to_list()
160
+ st.session_state["res"] = res
158
161
 
159
162
 
160
163
  def reset_explorer():
@@ -195,7 +198,11 @@ def layout():
195
198
  if st.session_state.get("error"):
196
199
  st.error(st.session_state["error"])
197
200
  else:
198
- imgs = st.session_state.get("imgs") or exp.table.to_lance().to_table(columns=["im_file"]).to_pydict()["im_file"]
201
+ if st.session_state.get("imgs"):
202
+ imgs = st.session_state.get("imgs")
203
+ else:
204
+ imgs = exp.table.to_lance().to_table(columns=["im_file"]).to_pydict()["im_file"]
205
+ st.session_state["res"] = exp.table.to_arrow()
199
206
  total_imgs, selected_imgs = len(imgs), []
200
207
  with col1:
201
208
  subcol1, subcol2, subcol3, subcol4, subcol5 = st.columns(5)
@@ -230,17 +237,30 @@ def layout():
230
237
  query_form()
231
238
  ai_query_form()
232
239
  if total_imgs:
240
+ labels, boxes, masks, kpts, classes = None, None, None, None, None
241
+ task = exp.model.task
242
+ if st.session_state.get("display_labels"):
243
+ labels = st.session_state.get("res").to_pydict()["labels"][start_idx : start_idx + num]
244
+ boxes = st.session_state.get("res").to_pydict()["bboxes"][start_idx : start_idx + num]
245
+ masks = st.session_state.get("res").to_pydict()["masks"][start_idx : start_idx + num]
246
+ kpts = st.session_state.get("res").to_pydict()["keypoints"][start_idx : start_idx + num]
247
+ classes = st.session_state.get("res").to_pydict()["cls"][start_idx : start_idx + num]
233
248
  imgs_displayed = imgs[start_idx : start_idx + num]
234
249
  selected_imgs = image_select(
235
250
  f"Total samples: {total_imgs}",
236
251
  images=imgs_displayed,
237
252
  use_container_width=False,
238
253
  # indices=[i for i in range(num)] if select_all else None,
254
+ labels=labels,
255
+ classes=classes,
256
+ bboxes=boxes,
257
+ masks=masks if task == "segment" else None,
258
+ kpts=kpts if task == "pose" else None,
239
259
  )
240
260
 
241
261
  with col2:
242
262
  similarity_form(selected_imgs)
243
- # display_labels = st.checkbox("Labels", value=False, key="display_labels")
263
+ display_labels = st.checkbox("Labels", value=False, key="display_labels")
244
264
  utralytics_explorer_docs_callback()
245
265
 
246
266
 
@@ -38,7 +38,6 @@ class LoadStreams:
38
38
 
39
39
  Attributes:
40
40
  sources (str): The source input paths or URLs for the video streams.
41
- imgsz (int): The image size for processing, defaults to 640.
42
41
  vid_stride (int): Video frame-rate stride, defaults to 1.
43
42
  buffer (bool): Whether to buffer input streams, defaults to False.
44
43
  running (bool): Flag to indicate if the streaming thread is running.
@@ -60,13 +59,12 @@ class LoadStreams:
60
59
  __len__: Return the length of the sources object.
61
60
  """
62
61
 
63
- def __init__(self, sources="file.streams", imgsz=640, vid_stride=1, buffer=False):
62
+ def __init__(self, sources="file.streams", vid_stride=1, buffer=False):
64
63
  """Initialize instance variables and check for consistent input stream shapes."""
65
64
  torch.backends.cudnn.benchmark = True # faster for fixed-size inference
66
65
  self.buffer = buffer # buffer input streams
67
66
  self.running = True # running flag for Thread
68
67
  self.mode = "stream"
69
- self.imgsz = imgsz
70
68
  self.vid_stride = vid_stride # video frame-rate stride
71
69
 
72
70
  sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
@@ -193,7 +191,6 @@ class LoadScreenshots:
193
191
 
194
192
  Attributes:
195
193
  source (str): The source input indicating which screen to capture.
196
- imgsz (int): The image size for processing, defaults to 640.
197
194
  screen (int): The screen number to capture.
198
195
  left (int): The left coordinate for screen capture area.
199
196
  top (int): The top coordinate for screen capture area.
@@ -210,7 +207,7 @@ class LoadScreenshots:
210
207
  __next__: Captures the next screenshot and returns it.
211
208
  """
212
209
 
213
- def __init__(self, source, imgsz=640):
210
+ def __init__(self, source):
214
211
  """Source = [screen_number left top width height] (pixels)."""
215
212
  check_requirements("mss")
216
213
  import mss # noqa
@@ -223,7 +220,6 @@ class LoadScreenshots:
223
220
  left, top, width, height = (int(x) for x in params)
224
221
  elif len(params) == 5:
225
222
  self.screen, left, top, width, height = (int(x) for x in params)
226
- self.imgsz = imgsz
227
223
  self.mode = "stream"
228
224
  self.frame = 0
229
225
  self.sct = mss.mss()
@@ -258,7 +254,6 @@ class LoadImages:
258
254
  various formats, including single image files, video files, and lists of image and video paths.
259
255
 
260
256
  Attributes:
261
- imgsz (int): Image size, defaults to 640.
262
257
  files (list): List of image and video file paths.
263
258
  nf (int): Total number of files (images and videos).
264
259
  video_flag (list): Flags indicating whether a file is a video (True) or an image (False).
@@ -274,7 +269,7 @@ class LoadImages:
274
269
  _new_video(path): Create a new cv2.VideoCapture object for a given video path.
275
270
  """
276
271
 
277
- def __init__(self, path, imgsz=640, vid_stride=1):
272
+ def __init__(self, path, vid_stride=1):
278
273
  """Initialize the Dataloader and raise FileNotFoundError if file not found."""
279
274
  parent = None
280
275
  if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line
@@ -298,7 +293,6 @@ class LoadImages:
298
293
  videos = [x for x in files if x.split(".")[-1].lower() in VID_FORMATS]
299
294
  ni, nv = len(images), len(videos)
300
295
 
301
- self.imgsz = imgsz
302
296
  self.files = images + videos
303
297
  self.nf = ni + nv # number of files
304
298
  self.video_flag = [False] * ni + [True] * nv
@@ -377,7 +371,6 @@ class LoadPilAndNumpy:
377
371
  Attributes:
378
372
  paths (list): List of image paths or autogenerated filenames.
379
373
  im0 (list): List of images stored as Numpy arrays.
380
- imgsz (int): Image size, defaults to 640.
381
374
  mode (str): Type of data being processed, defaults to 'image'.
382
375
  bs (int): Batch size, equivalent to the length of `im0`.
383
376
  count (int): Counter for iteration, initialized at 0 during `__iter__()`.
@@ -386,13 +379,12 @@ class LoadPilAndNumpy:
386
379
  _single_check(im): Validate and format a single image to a Numpy array.
387
380
  """
388
381
 
389
- def __init__(self, im0, imgsz=640):
382
+ def __init__(self, im0):
390
383
  """Initialize PIL and Numpy Dataloader."""
391
384
  if not isinstance(im0, list):
392
385
  im0 = [im0]
393
386
  self.paths = [getattr(im, "filename", f"image{i}.jpg") for i, im in enumerate(im0)]
394
387
  self.im0 = [self._single_check(im) for im in im0]
395
- self.imgsz = imgsz
396
388
  self.mode = "image"
397
389
  # Generate fake paths
398
390
  self.bs = len(self.im0)
@@ -259,8 +259,8 @@ class Model(nn.Module):
259
259
  x in sys.argv for x in ("predict", "track", "mode=predict", "mode=track")
260
260
  )
261
261
 
262
- custom = {"conf": 0.25, "save": is_cli} # method defaults
263
- args = {**self.overrides, **custom, **kwargs, "mode": "predict"} # highest priority args on the right
262
+ custom = {"conf": 0.25, "save": is_cli, "mode": "predict"} # method defaults
263
+ args = {**self.overrides, **custom, **kwargs} # highest priority args on the right
264
264
  prompts = args.pop("prompts", None) # for SAM-type models
265
265
 
266
266
  if not self.predictor:
@@ -226,7 +226,7 @@ class BasePredictor:
226
226
  else None
227
227
  )
228
228
  self.dataset = load_inference_source(
229
- source=source, imgsz=self.imgsz, vid_stride=self.args.vid_stride, buffer=self.args.stream_buffer
229
+ source=source, vid_stride=self.args.vid_stride, buffer=self.args.stream_buffer
230
230
  )
231
231
  self.source_type = self.dataset.source_type
232
232
  if not getattr(self, "stream", True) and (
@@ -115,7 +115,7 @@ class Results(SimpleClass):
115
115
  if v is not None:
116
116
  return len(v)
117
117
 
118
- def update(self, boxes=None, masks=None, probs=None):
118
+ def update(self, boxes=None, masks=None, probs=None, obb=None):
119
119
  """Update the boxes, masks, and probs attributes of the Results object."""
120
120
  if boxes is not None:
121
121
  self.boxes = Boxes(ops.clip_boxes(boxes, self.orig_shape), self.orig_shape)
@@ -123,6 +123,8 @@ class Results(SimpleClass):
123
123
  self.masks = Masks(masks, self.orig_shape)
124
124
  if probs is not None:
125
125
  self.probs = probs
126
+ if obb is not None:
127
+ self.obb = OBB(obb, self.orig_shape)
126
128
 
127
129
  def _apply(self, fn, *args, **kwargs):
128
130
  """
@@ -45,8 +45,9 @@ class OBBPredictor(DetectionPredictor):
45
45
 
46
46
  results = []
47
47
  for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
48
- pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape, xywh=True)
48
+ rboxes = ops.regularize_rboxes(torch.cat([pred[:, :4], pred[:, -1:]], dim=-1))
49
+ rboxes[:, :4] = ops.scale_boxes(img.shape[2:], rboxes[:, :4], orig_img.shape, xywh=True)
49
50
  # xywh, r, conf, cls
50
- obb = torch.cat([pred[:, :4], pred[:, -1:], pred[:, 4:6]], dim=-1)
51
+ obb = torch.cat([rboxes, pred[:, 4:6]], dim=-1)
51
52
  results.append(Results(orig_img, path=img_path, names=self.model.names, obb=obb))
52
53
  return results
@@ -5,6 +5,8 @@ import numpy as np
5
5
  from .basetrack import BaseTrack, TrackState
6
6
  from .utils import matching
7
7
  from .utils.kalman_filter import KalmanFilterXYAH
8
+ from ..utils.ops import xywh2ltwh
9
+ from ..utils import LOGGER
8
10
 
9
11
 
10
12
  class STrack(BaseTrack):
@@ -35,18 +37,18 @@ class STrack(BaseTrack):
35
37
  activate(kalman_filter, frame_id): Activate a new tracklet.
36
38
  re_activate(new_track, frame_id, new_id): Reactivate a previously lost tracklet.
37
39
  update(new_track, frame_id): Update the state of a matched track.
38
- convert_coords(tlwh): Convert bounding box to x-y-angle-height format.
40
+ convert_coords(tlwh): Convert bounding box to x-y-aspect-height format.
39
41
  tlwh_to_xyah(tlwh): Convert tlwh bounding box to xyah format.
40
- tlbr_to_tlwh(tlbr): Convert tlbr bounding box to tlwh format.
41
- tlwh_to_tlbr(tlwh): Convert tlwh bounding box to tlbr format.
42
42
  """
43
43
 
44
44
  shared_kalman = KalmanFilterXYAH()
45
45
 
46
- def __init__(self, tlwh, score, cls):
46
+ def __init__(self, xywh, score, cls):
47
47
  """Initialize new STrack instance."""
48
48
  super().__init__()
49
- self._tlwh = np.asarray(self.tlbr_to_tlwh(tlwh[:-1]), dtype=np.float32)
49
+ # xywh+idx or xywha+idx
50
+ assert len(xywh) in [5, 6], f"expected 5 or 6 values but got {len(xywh)}"
51
+ self._tlwh = np.asarray(xywh2ltwh(xywh[:4]), dtype=np.float32)
50
52
  self.kalman_filter = None
51
53
  self.mean, self.covariance = None, None
52
54
  self.is_activated = False
@@ -54,7 +56,8 @@ class STrack(BaseTrack):
54
56
  self.score = score
55
57
  self.tracklet_len = 0
56
58
  self.cls = cls
57
- self.idx = tlwh[-1]
59
+ self.idx = xywh[-1]
60
+ self.angle = xywh[4] if len(xywh) == 6 else None
58
61
 
59
62
  def predict(self):
60
63
  """Predicts mean and covariance using Kalman filter."""
@@ -123,6 +126,7 @@ class STrack(BaseTrack):
123
126
  self.track_id = self.next_id()
124
127
  self.score = new_track.score
125
128
  self.cls = new_track.cls
129
+ self.angle = new_track.angle
126
130
  self.idx = new_track.idx
127
131
 
128
132
  def update(self, new_track, frame_id):
@@ -145,10 +149,11 @@ class STrack(BaseTrack):
145
149
 
146
150
  self.score = new_track.score
147
151
  self.cls = new_track.cls
152
+ self.angle = new_track.angle
148
153
  self.idx = new_track.idx
149
154
 
150
155
  def convert_coords(self, tlwh):
151
- """Convert a bounding box's top-left-width-height format to its x-y-angle-height equivalent."""
156
+ """Convert a bounding box's top-left-width-height format to its x-y-aspect-height equivalent."""
152
157
  return self.tlwh_to_xyah(tlwh)
153
158
 
154
159
  @property
@@ -162,7 +167,7 @@ class STrack(BaseTrack):
162
167
  return ret
163
168
 
164
169
  @property
165
- def tlbr(self):
170
+ def xyxy(self):
166
171
  """Convert bounding box to format (min x, min y, max x, max y), i.e., (top left, bottom right)."""
167
172
  ret = self.tlwh.copy()
168
173
  ret[2:] += ret[:2]
@@ -178,19 +183,26 @@ class STrack(BaseTrack):
178
183
  ret[2] /= ret[3]
179
184
  return ret
180
185
 
181
- @staticmethod
182
- def tlbr_to_tlwh(tlbr):
183
- """Converts top-left bottom-right format to top-left width height format."""
184
- ret = np.asarray(tlbr).copy()
185
- ret[2:] -= ret[:2]
186
+ @property
187
+ def xywh(self):
188
+ """Get current position in bounding box format (center x, center y, width, height)."""
189
+ ret = np.asarray(self.tlwh).copy()
190
+ ret[:2] += ret[2:] / 2
186
191
  return ret
187
192
 
188
- @staticmethod
189
- def tlwh_to_tlbr(tlwh):
190
- """Converts tlwh bounding box format to tlbr format."""
191
- ret = np.asarray(tlwh).copy()
192
- ret[2:] += ret[:2]
193
- return ret
193
+ @property
194
+ def xywha(self):
195
+ """Get current position in bounding box format (center x, center y, width, height, angle)."""
196
+ if self.angle is None:
197
+ LOGGER.warning("WARNING ⚠️ `angle` attr not found, returning `xywh` instead.")
198
+ return self.xywh
199
+ return np.concatenate([self.xywh, self.angle[None]])
200
+
201
+ @property
202
+ def result(self):
203
+ """Get current tracking results."""
204
+ coords = self.xyxy if self.angle is None else self.xywha
205
+ return coords.tolist() + [self.track_id, self.score, self.cls, self.idx]
194
206
 
195
207
  def __repr__(self):
196
208
  """Return a string representation of the BYTETracker object with start and end frames and track ID."""
@@ -247,7 +259,7 @@ class BYTETracker:
247
259
  removed_stracks = []
248
260
 
249
261
  scores = results.conf
250
- bboxes = results.xyxy
262
+ bboxes = results.xywhr if hasattr(results, "xywhr") else results.xywh
251
263
  # Add index
252
264
  bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1)
253
265
  cls = results.cls
@@ -349,10 +361,8 @@ class BYTETracker:
349
361
  self.removed_stracks.extend(removed_stracks)
350
362
  if len(self.removed_stracks) > 1000:
351
363
  self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum
352
- return np.asarray(
353
- [x.tlbr.tolist() + [x.track_id, x.score, x.cls, x.idx] for x in self.tracked_stracks if x.is_activated],
354
- dtype=np.float32,
355
- )
364
+
365
+ return np.asarray([x.result for x in self.tracked_stracks if x.is_activated], dtype=np.float32)
356
366
 
357
367
  def get_kalmanfilter(self):
358
368
  """Returns a Kalman filter object for tracking bounding boxes."""
@@ -25,8 +25,6 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
25
25
  Raises:
26
26
  AssertionError: If the tracker_type is not 'bytetrack' or 'botsort'.
27
27
  """
28
- if predictor.args.task == "obb":
29
- raise NotImplementedError("ERROR ❌ OBB task does not support track mode!")
30
28
  if hasattr(predictor, "trackers") and persist:
31
29
  return
32
30
 
@@ -54,11 +52,12 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
54
52
  bs = predictor.dataset.bs
55
53
  path, im0s = predictor.batch[:2]
56
54
 
55
+ is_obb = predictor.args.task == "obb"
57
56
  for i in range(bs):
58
57
  if not persist and predictor.vid_path[i] != str(predictor.save_dir / Path(path[i]).name): # new video
59
58
  predictor.trackers[i].reset()
60
59
 
61
- det = predictor.results[i].boxes.cpu().numpy()
60
+ det = (predictor.results[i].obb if is_obb else predictor.results[i].boxes).cpu().numpy()
62
61
  if len(det) == 0:
63
62
  continue
64
63
  tracks = predictor.trackers[i].update(det, im0s[i])
@@ -66,7 +65,10 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
66
65
  continue
67
66
  idx = tracks[:, -1].astype(int)
68
67
  predictor.results[i] = predictor.results[i][idx]
69
- predictor.results[i].update(boxes=torch.as_tensor(tracks[:, :-1]))
68
+
69
+ update_args = dict()
70
+ update_args["obb" if is_obb else "boxes"] = torch.as_tensor(tracks[:, :-1])
71
+ predictor.results[i].update(**update_args)
70
72
 
71
73
 
72
74
  def register_tracker(model: object, persist: bool) -> None: