ultralytics 8.2.29__tar.gz → 8.2.30__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (223) hide show
  1. {ultralytics-8.2.29 → ultralytics-8.2.30}/PKG-INFO +1 -1
  2. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/__init__.py +1 -1
  3. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/augment.py +1 -4
  4. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/build.py +1 -1
  5. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/split_dota.py +5 -4
  6. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/engine/exporter.py +6 -5
  7. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/engine/model.py +4 -5
  8. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/engine/predictor.py +11 -5
  9. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/hub/session.py +1 -1
  10. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/fastsam/prompt.py +1 -1
  11. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/nas/val.py +1 -1
  12. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/modules/tiny_encoder.py +21 -21
  13. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/detect/val.py +8 -8
  14. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/world/train_world.py +10 -11
  15. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/nn/autobackend.py +1 -3
  16. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/nn/modules/block.py +2 -4
  17. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/solutions/ai_gym.py +1 -1
  18. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/solutions/object_counter.py +1 -1
  19. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/trackers/track.py +1 -2
  20. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/trackers/utils/gmc.py +1 -1
  21. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/benchmarks.py +16 -28
  22. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/mlflow.py +16 -16
  23. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/wb.py +1 -1
  24. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/checks.py +1 -2
  25. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/plotting.py +3 -1
  26. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics.egg-info/PKG-INFO +1 -1
  27. {ultralytics-8.2.29 → ultralytics-8.2.30}/LICENSE +0 -0
  28. {ultralytics-8.2.29 → ultralytics-8.2.30}/README.md +0 -0
  29. {ultralytics-8.2.29 → ultralytics-8.2.30}/pyproject.toml +0 -0
  30. {ultralytics-8.2.29 → ultralytics-8.2.30}/setup.cfg +0 -0
  31. {ultralytics-8.2.29 → ultralytics-8.2.30}/tests/__init__.py +0 -0
  32. {ultralytics-8.2.29 → ultralytics-8.2.30}/tests/conftest.py +0 -0
  33. {ultralytics-8.2.29 → ultralytics-8.2.30}/tests/test_cli.py +0 -0
  34. {ultralytics-8.2.29 → ultralytics-8.2.30}/tests/test_cuda.py +0 -0
  35. {ultralytics-8.2.29 → ultralytics-8.2.30}/tests/test_engine.py +0 -0
  36. {ultralytics-8.2.29 → ultralytics-8.2.30}/tests/test_explorer.py +0 -0
  37. {ultralytics-8.2.29 → ultralytics-8.2.30}/tests/test_exports.py +0 -0
  38. {ultralytics-8.2.29 → ultralytics-8.2.30}/tests/test_integrations.py +0 -0
  39. {ultralytics-8.2.29 → ultralytics-8.2.30}/tests/test_python.py +0 -0
  40. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/assets/bus.jpg +0 -0
  41. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/assets/zidane.jpg +0 -0
  42. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/__init__.py +0 -0
  43. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
  44. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
  45. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
  46. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
  47. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
  48. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
  49. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
  50. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/VOC.yaml +0 -0
  51. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
  52. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
  53. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
  54. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
  55. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
  56. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/coco.yaml +0 -0
  57. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
  58. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/coco128.yaml +0 -0
  59. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
  60. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
  61. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/coco8.yaml +0 -0
  62. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
  63. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/dota8.yaml +0 -0
  64. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/lvis.yaml +0 -0
  65. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
  66. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
  67. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/signature.yaml +0 -0
  68. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
  69. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/datasets/xView.yaml +0 -0
  70. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/default.yaml +0 -0
  71. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
  72. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
  73. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
  74. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
  75. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
  76. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
  77. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
  78. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
  79. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
  80. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
  81. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
  82. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
  83. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
  84. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
  85. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
  86. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
  87. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
  88. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
  89. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
  90. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
  91. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
  92. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
  93. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
  94. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
  95. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
  96. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
  97. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
  98. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
  99. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
  100. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
  101. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
  102. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/trackers/botsort.yaml +0 -0
  103. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
  104. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/__init__.py +0 -0
  105. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/annotator.py +0 -0
  106. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/base.py +0 -0
  107. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/converter.py +0 -0
  108. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/dataset.py +0 -0
  109. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/explorer/__init__.py +0 -0
  110. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/explorer/explorer.py +0 -0
  111. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/explorer/gui/__init__.py +0 -0
  112. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/explorer/gui/dash.py +0 -0
  113. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/explorer/utils.py +0 -0
  114. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/loaders.py +0 -0
  115. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/data/utils.py +0 -0
  116. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/engine/__init__.py +0 -0
  117. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/engine/results.py +0 -0
  118. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/engine/trainer.py +0 -0
  119. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/engine/tuner.py +0 -0
  120. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/engine/validator.py +0 -0
  121. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/hub/__init__.py +0 -0
  122. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/hub/auth.py +0 -0
  123. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/hub/utils.py +0 -0
  124. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/__init__.py +0 -0
  125. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/fastsam/__init__.py +0 -0
  126. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/fastsam/model.py +0 -0
  127. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/fastsam/predict.py +0 -0
  128. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/fastsam/utils.py +0 -0
  129. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/fastsam/val.py +0 -0
  130. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/nas/__init__.py +0 -0
  131. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/nas/model.py +0 -0
  132. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/nas/predict.py +0 -0
  133. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/rtdetr/__init__.py +0 -0
  134. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/rtdetr/model.py +0 -0
  135. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/rtdetr/predict.py +0 -0
  136. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/rtdetr/train.py +0 -0
  137. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/rtdetr/val.py +0 -0
  138. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/__init__.py +0 -0
  139. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/amg.py +0 -0
  140. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/build.py +0 -0
  141. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/model.py +0 -0
  142. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/modules/__init__.py +0 -0
  143. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/modules/decoders.py +0 -0
  144. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/modules/encoders.py +0 -0
  145. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/modules/sam.py +0 -0
  146. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/modules/transformer.py +0 -0
  147. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/sam/predict.py +0 -0
  148. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/utils/__init__.py +0 -0
  149. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/utils/loss.py +0 -0
  150. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/utils/ops.py +0 -0
  151. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/__init__.py +0 -0
  152. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/classify/__init__.py +0 -0
  153. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/classify/predict.py +0 -0
  154. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/classify/train.py +0 -0
  155. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/classify/val.py +0 -0
  156. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/detect/__init__.py +0 -0
  157. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/detect/predict.py +0 -0
  158. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/detect/train.py +0 -0
  159. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/model.py +0 -0
  160. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/obb/__init__.py +0 -0
  161. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/obb/predict.py +0 -0
  162. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/obb/train.py +0 -0
  163. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/obb/val.py +0 -0
  164. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/pose/__init__.py +0 -0
  165. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/pose/predict.py +0 -0
  166. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/pose/train.py +0 -0
  167. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/pose/val.py +0 -0
  168. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/segment/__init__.py +0 -0
  169. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/segment/predict.py +0 -0
  170. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/segment/train.py +0 -0
  171. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/segment/val.py +0 -0
  172. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/world/__init__.py +0 -0
  173. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/models/yolo/world/train.py +0 -0
  174. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/nn/__init__.py +0 -0
  175. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/nn/modules/__init__.py +0 -0
  176. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/nn/modules/conv.py +0 -0
  177. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/nn/modules/head.py +0 -0
  178. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/nn/modules/transformer.py +0 -0
  179. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/nn/modules/utils.py +0 -0
  180. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/nn/tasks.py +0 -0
  181. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/solutions/__init__.py +0 -0
  182. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/solutions/analytics.py +0 -0
  183. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/solutions/distance_calculation.py +0 -0
  184. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/solutions/heatmap.py +0 -0
  185. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/solutions/parking_management.py +0 -0
  186. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/solutions/queue_management.py +0 -0
  187. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/solutions/speed_estimation.py +0 -0
  188. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/trackers/__init__.py +0 -0
  189. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/trackers/basetrack.py +0 -0
  190. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/trackers/bot_sort.py +0 -0
  191. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/trackers/byte_tracker.py +0 -0
  192. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/trackers/utils/__init__.py +0 -0
  193. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/trackers/utils/kalman_filter.py +0 -0
  194. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/trackers/utils/matching.py +0 -0
  195. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/__init__.py +0 -0
  196. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/autobatch.py +0 -0
  197. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/__init__.py +0 -0
  198. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/base.py +0 -0
  199. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/clearml.py +0 -0
  200. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/comet.py +0 -0
  201. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/dvc.py +0 -0
  202. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/hub.py +0 -0
  203. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/neptune.py +0 -0
  204. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/raytune.py +0 -0
  205. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/callbacks/tensorboard.py +0 -0
  206. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/dist.py +0 -0
  207. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/downloads.py +0 -0
  208. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/errors.py +0 -0
  209. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/files.py +0 -0
  210. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/instance.py +0 -0
  211. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/loss.py +0 -0
  212. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/metrics.py +0 -0
  213. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/ops.py +0 -0
  214. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/patches.py +0 -0
  215. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/tal.py +0 -0
  216. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/torch_utils.py +0 -0
  217. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/triton.py +0 -0
  218. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics/utils/tuner.py +0 -0
  219. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics.egg-info/SOURCES.txt +0 -0
  220. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics.egg-info/dependency_links.txt +0 -0
  221. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics.egg-info/entry_points.txt +0 -0
  222. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics.egg-info/requires.txt +0 -0
  223. {ultralytics-8.2.29 → ultralytics-8.2.30}/ultralytics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.29
3
+ Version: 8.2.30
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.29"
3
+ __version__ = "8.2.30"
4
4
 
5
5
  import os
6
6
 
@@ -1114,10 +1114,7 @@ class RandomLoadText:
1114
1114
  pos_labels = set(random.sample(pos_labels, k=self.max_samples))
1115
1115
 
1116
1116
  neg_samples = min(min(num_classes, self.max_samples) - len(pos_labels), random.randint(*self.neg_samples))
1117
- neg_labels = []
1118
- for i in range(num_classes):
1119
- if i not in pos_labels:
1120
- neg_labels.append(i)
1117
+ neg_labels = [i for i in range(num_classes) if i not in pos_labels]
1121
1118
  neg_labels = random.sample(neg_labels, k=neg_samples)
1122
1119
 
1123
1120
  sampled_labels = pos_labels + neg_labels
@@ -21,7 +21,7 @@ from ultralytics.data.loaders import (
21
21
  autocast_list,
22
22
  )
23
23
  from ultralytics.data.utils import IMG_FORMATS, PIN_MEMORY, VID_FORMATS
24
- from ultralytics.utils import LINUX, RANK, colorstr
24
+ from ultralytics.utils import RANK, colorstr
25
25
  from ultralytics.utils.checks import check_file
26
26
 
27
27
 
@@ -86,7 +86,7 @@ def load_yolo_dota(data_root, split="train"):
86
86
  return annos
87
87
 
88
88
 
89
- def get_windows(im_size, crop_sizes=[1024], gaps=[200], im_rate_thr=0.6, eps=0.01):
89
+ def get_windows(im_size, crop_sizes=(1024,), gaps=(200,), im_rate_thr=0.6, eps=0.01):
90
90
  """
91
91
  Get the coordinates of windows.
92
92
 
@@ -95,6 +95,7 @@ def get_windows(im_size, crop_sizes=[1024], gaps=[200], im_rate_thr=0.6, eps=0.0
95
95
  crop_sizes (List(int)): Crop size of windows.
96
96
  gaps (List(int)): Gap between crops.
97
97
  im_rate_thr (float): Threshold of windows areas divided by image ares.
98
+ eps (float): Epsilon value for math operations.
98
99
  """
99
100
  h, w = im_size
100
101
  windows = []
@@ -187,7 +188,7 @@ def crop_and_save(anno, windows, window_objs, im_dir, lb_dir):
187
188
  f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")
188
189
 
189
190
 
190
- def split_images_and_labels(data_root, save_dir, split="train", crop_sizes=[1024], gaps=[200]):
191
+ def split_images_and_labels(data_root, save_dir, split="train", crop_sizes=(1024,), gaps=(200,)):
191
192
  """
192
193
  Split both images and labels.
193
194
 
@@ -217,7 +218,7 @@ def split_images_and_labels(data_root, save_dir, split="train", crop_sizes=[1024
217
218
  crop_and_save(anno, windows, window_objs, str(im_dir), str(lb_dir))
218
219
 
219
220
 
220
- def split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]):
221
+ def split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,)):
221
222
  """
222
223
  Split train and val set of DOTA.
223
224
 
@@ -247,7 +248,7 @@ def split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]):
247
248
  split_images_and_labels(data_root, save_dir, split, crop_sizes, gaps)
248
249
 
249
250
 
250
- def split_test(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]):
251
+ def split_test(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,)):
251
252
  """
252
253
  Split test set of DOTA, labels are not included within this set.
253
254
 
@@ -209,11 +209,12 @@ class Exporter:
209
209
  if self.args.optimize:
210
210
  assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
211
211
  assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
212
- if edgetpu and not LINUX:
213
- raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler/")
214
- elif edgetpu and self.args.batch != 1: # see github.com/ultralytics/ultralytics/pull/13420
215
- LOGGER.warning("WARNING ⚠️ Edge TPU export requires batch size 1, setting batch=1.")
216
- self.args.batch = 1
212
+ if edgetpu:
213
+ if not LINUX:
214
+ raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler")
215
+ elif self.args.batch != 1: # see github.com/ultralytics/ultralytics/pull/13420
216
+ LOGGER.warning("WARNING ⚠️ Edge TPU export requires batch size 1, setting batch=1.")
217
+ self.args.batch = 1
217
218
  if isinstance(model, WorldModel):
218
219
  LOGGER.warning(
219
220
  "WARNING ⚠️ YOLOWorld (original version) export is not supported to any format.\n"
@@ -742,11 +742,10 @@ class Model(nn.Module):
742
742
 
743
743
  if hasattr(self.model, "names"):
744
744
  return check_class_names(self.model.names)
745
- else:
746
- if not self.predictor: # export formats will not have predictor defined until predict() is called
747
- self.predictor = self._smart_load("predictor")(overrides=self.overrides, _callbacks=self.callbacks)
748
- self.predictor.setup_model(model=self.model, verbose=False)
749
- return self.predictor.model.names
745
+ if not self.predictor: # export formats will not have predictor defined until predict() is called
746
+ self.predictor = self._smart_load("predictor")(overrides=self.overrides, _callbacks=self.callbacks)
747
+ self.predictor.setup_model(model=self.model, verbose=False)
748
+ return self.predictor.model.names
750
749
 
751
750
  @property
752
751
  def device(self) -> torch.device:
@@ -169,12 +169,18 @@ class BasePredictor:
169
169
 
170
170
  def predict_cli(self, source=None, model=None):
171
171
  """
172
- Method used for CLI prediction.
172
+ Method used for Command Line Interface (CLI) prediction.
173
173
 
174
- It uses always generator as outputs as not required by CLI mode.
174
+ This function is designed to run predictions using the CLI. It sets up the source and model, then processes
175
+ the inputs in a streaming manner. This method ensures that no outputs accumulate in memory by consuming the
176
+ generator without storing results.
177
+
178
+ Note:
179
+ Do not modify this function or remove the generator. The generator ensures that no outputs are
180
+ accumulated in memory, which is critical for preventing memory issues during long-running predictions.
175
181
  """
176
182
  gen = self.stream_inference(source, model)
177
- for _ in gen: # noqa, running CLI inference without accumulating any outputs (do not modify)
183
+ for _ in gen: # sourcery skip: remove-empty-nested-block, noqa
178
184
  pass
179
185
 
180
186
  def setup_source(self, source):
@@ -319,13 +325,13 @@ class BasePredictor:
319
325
  frame = self.dataset.count
320
326
  else:
321
327
  match = re.search(r"frame (\d+)/", s[i])
322
- frame = int(match.group(1)) if match else None # 0 if frame undetermined
328
+ frame = int(match[1]) if match else None # 0 if frame undetermined
323
329
 
324
330
  self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
325
331
  string += "%gx%g " % im.shape[2:]
326
332
  result = self.results[i]
327
333
  result.save_dir = self.save_dir.__str__() # used in other locations
328
- string += result.verbose() + f"{result.speed['inference']:.1f}ms"
334
+ string += f"{result.verbose()}{result.speed['inference']:.1f}ms"
329
335
 
330
336
  # Add predictions to image
331
337
  if self.args.save or self.args.show:
@@ -368,5 +368,5 @@ class HUBTrainingSession:
368
368
  Returns:
369
369
  None
370
370
  """
371
- for data in response.iter_content(chunk_size=1024):
371
+ for _ in response.iter_content(chunk_size=1024):
372
372
  pass # Do nothing with data chunks
@@ -25,7 +25,7 @@ class FastSAMPrompt:
25
25
  def __init__(self, source, results, device="cuda") -> None:
26
26
  """Initializes FastSAMPrompt with given source, results and device, and assigns clip for linear assignment."""
27
27
  if isinstance(source, (str, Path)) and os.path.isdir(source):
28
- raise ValueError(f"FastSAM only accepts image paths and PIL Image sources, not directories.")
28
+ raise ValueError("FastSAM only accepts image paths and PIL Image sources, not directories.")
29
29
  self.device = device
30
30
  self.results = results
31
31
  self.source = source
@@ -17,7 +17,7 @@ class NASValidator(DetectionValidator):
17
17
  ultimately producing the final detections.
18
18
 
19
19
  Attributes:
20
- args (Namespace): Namespace containing various configurations for post-processing, such as confidence and IoU thresholds.
20
+ args (Namespace): Namespace containing various configurations for post-processing, such as confidence and IoU.
21
21
  lb (torch.Tensor): Optional tensor for multilabel NMS.
22
22
 
23
23
  Example:
@@ -383,44 +383,44 @@ class TinyViTBlock(nn.Module):
383
383
  """Applies attention-based transformation or padding to input 'x' before passing it through a local
384
384
  convolution.
385
385
  """
386
- H, W = self.input_resolution
387
- B, L, C = x.shape
388
- assert L == H * W, "input feature has wrong size"
386
+ h, w = self.input_resolution
387
+ b, l, c = x.shape
388
+ assert l == h * w, "input feature has wrong size"
389
389
  res_x = x
390
- if H == self.window_size and W == self.window_size:
390
+ if h == self.window_size and w == self.window_size:
391
391
  x = self.attn(x)
392
392
  else:
393
- x = x.view(B, H, W, C)
394
- pad_b = (self.window_size - H % self.window_size) % self.window_size
395
- pad_r = (self.window_size - W % self.window_size) % self.window_size
393
+ x = x.view(b, h, w, c)
394
+ pad_b = (self.window_size - h % self.window_size) % self.window_size
395
+ pad_r = (self.window_size - w % self.window_size) % self.window_size
396
396
  padding = pad_b > 0 or pad_r > 0
397
397
 
398
398
  if padding:
399
399
  x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
400
400
 
401
- pH, pW = H + pad_b, W + pad_r
401
+ pH, pW = h + pad_b, w + pad_r
402
402
  nH = pH // self.window_size
403
403
  nW = pW // self.window_size
404
404
  # Window partition
405
405
  x = (
406
- x.view(B, nH, self.window_size, nW, self.window_size, C)
406
+ x.view(b, nH, self.window_size, nW, self.window_size, c)
407
407
  .transpose(2, 3)
408
- .reshape(B * nH * nW, self.window_size * self.window_size, C)
408
+ .reshape(b * nH * nW, self.window_size * self.window_size, c)
409
409
  )
410
410
  x = self.attn(x)
411
411
  # Window reverse
412
- x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C)
412
+ x = x.view(b, nH, nW, self.window_size, self.window_size, c).transpose(2, 3).reshape(b, pH, pW, c)
413
413
 
414
414
  if padding:
415
- x = x[:, :H, :W].contiguous()
415
+ x = x[:, :h, :w].contiguous()
416
416
 
417
- x = x.view(B, L, C)
417
+ x = x.view(b, l, c)
418
418
 
419
419
  x = res_x + self.drop_path(x)
420
420
 
421
- x = x.transpose(1, 2).reshape(B, C, H, W)
421
+ x = x.transpose(1, 2).reshape(b, c, h, w)
422
422
  x = self.local_conv(x)
423
- x = x.view(B, C, L).transpose(1, 2)
423
+ x = x.view(b, c, l).transpose(1, 2)
424
424
 
425
425
  return x + self.drop_path(self.mlp(x))
426
426
 
@@ -565,10 +565,10 @@ class TinyViT(nn.Module):
565
565
  img_size=224,
566
566
  in_chans=3,
567
567
  num_classes=1000,
568
- embed_dims=[96, 192, 384, 768],
569
- depths=[2, 2, 6, 2],
570
- num_heads=[3, 6, 12, 24],
571
- window_sizes=[7, 7, 14, 7],
568
+ embed_dims=(96, 192, 384, 768),
569
+ depths=(2, 2, 6, 2),
570
+ num_heads=(3, 6, 12, 24),
571
+ window_sizes=(7, 7, 14, 7),
572
572
  mlp_ratio=4.0,
573
573
  drop_rate=0.0,
574
574
  drop_path_rate=0.1,
@@ -732,8 +732,8 @@ class TinyViT(nn.Module):
732
732
  for i in range(start_i, len(self.layers)):
733
733
  layer = self.layers[i]
734
734
  x = layer(x)
735
- B, _, C = x.shape
736
- x = x.view(B, 64, 64, C)
735
+ batch, _, channel = x.shape
736
+ x = x.view(batch, 64, 64, channel)
737
737
  x = x.permute(0, 3, 1, 2)
738
738
  return self.neck(x)
739
739
 
@@ -300,22 +300,22 @@ class DetectionValidator(BaseValidator):
300
300
 
301
301
  anno = COCO(str(anno_json)) # init annotations api
302
302
  pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
303
- eval = COCOeval(anno, pred, "bbox")
303
+ val = COCOeval(anno, pred, "bbox")
304
304
  else:
305
305
  from lvis import LVIS, LVISEval
306
306
 
307
307
  anno = LVIS(str(anno_json)) # init annotations api
308
308
  pred = anno._load_json(str(pred_json)) # init predictions api (must pass string, not Path)
309
- eval = LVISEval(anno, pred, "bbox")
310
- eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
311
- eval.evaluate()
312
- eval.accumulate()
313
- eval.summarize()
309
+ val = LVISEval(anno, pred, "bbox")
310
+ val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
311
+ val.evaluate()
312
+ val.accumulate()
313
+ val.summarize()
314
314
  if self.is_lvis:
315
- eval.print_results() # explicitly call print_results
315
+ val.print_results() # explicitly call print_results
316
316
  # update mAP50-95 and mAP50
317
317
  stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = (
318
- eval.stats[:2] if self.is_coco else [eval.results["AP50"], eval.results["AP"]]
318
+ val.stats[:2] if self.is_coco else [val.results["AP50"], val.results["AP"]]
319
319
  )
320
320
  except Exception as e:
321
321
  LOGGER.warning(f"{pkg} unable to run: {e}")
@@ -54,16 +54,15 @@ class WorldTrainerFromScratch(WorldTrainer):
54
54
  batch (int, optional): Size of batches, this is for `rect`. Defaults to None.
55
55
  """
56
56
  gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
57
- if mode == "train":
58
- dataset = [
59
- build_yolo_dataset(self.args, im_path, batch, self.data, stride=gs, multi_modal=True)
60
- if isinstance(im_path, str)
61
- else build_grounding(self.args, im_path["img_path"], im_path["json_file"], batch, stride=gs)
62
- for im_path in img_path
63
- ]
64
- return YOLOConcatDataset(dataset) if len(dataset) > 1 else dataset[0]
65
- else:
57
+ if mode != "train":
66
58
  return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs)
59
+ dataset = [
60
+ build_yolo_dataset(self.args, im_path, batch, self.data, stride=gs, multi_modal=True)
61
+ if isinstance(im_path, str)
62
+ else build_grounding(self.args, im_path["img_path"], im_path["json_file"], batch, stride=gs)
63
+ for im_path in img_path
64
+ ]
65
+ return YOLOConcatDataset(dataset) if len(dataset) > 1 else dataset[0]
67
66
 
68
67
  def get_dataset(self):
69
68
  """
@@ -71,7 +70,7 @@ class WorldTrainerFromScratch(WorldTrainer):
71
70
 
72
71
  Returns None if data format is not recognized.
73
72
  """
74
- final_data = dict()
73
+ final_data = {}
75
74
  data_yaml = self.args.data
76
75
  assert data_yaml.get("train", False) # object365.yaml
77
76
  assert data_yaml.get("val", False) # lvis.yaml
@@ -88,7 +87,7 @@ class WorldTrainerFromScratch(WorldTrainer):
88
87
  grounding_data = data_yaml[s].get("grounding_data")
89
88
  if grounding_data is None:
90
89
  continue
91
- grounding_data = [grounding_data] if not isinstance(grounding_data, list) else grounding_data
90
+ grounding_data = grounding_data if isinstance(grounding_data, list) else [grounding_data]
92
91
  for g in grounding_data:
93
92
  assert isinstance(g, dict), f"Grounding data should be provided in dict format, but got {type(g)}"
94
93
  final_data[s] += grounding_data
@@ -320,10 +320,8 @@ class AutoBackend(nn.Module):
320
320
  with open(w, "rb") as f:
321
321
  gd.ParseFromString(f.read())
322
322
  frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd))
323
- try: # attempt to retrieve metadata from SavedModel file potentially alongside GraphDef file
323
+ with contextlib.suppress(StopIteration): # find metadata in SavedModel alongside GraphDef
324
324
  metadata = next(Path(w).resolve().parent.rglob(f"{Path(w).stem}_saved_model*/metadata.yaml"))
325
- except StopIteration:
326
- pass # no metadata file found
327
325
 
328
326
  # TFLite or TFLite Edge TPU
329
327
  elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
@@ -666,8 +666,7 @@ class CBLinear(nn.Module):
666
666
 
667
667
  def forward(self, x):
668
668
  """Forward pass through CBLinear layer."""
669
- outs = self.conv(x).split(self.c2s, dim=1)
670
- return outs
669
+ return self.conv(x).split(self.c2s, dim=1)
671
670
 
672
671
 
673
672
  class CBFuse(nn.Module):
@@ -682,5 +681,4 @@ class CBFuse(nn.Module):
682
681
  """Forward pass through CBFuse layer."""
683
682
  target_size = xs[-1].shape[2:]
684
683
  res = [F.interpolate(x[self.idx[i]], size=target_size, mode="nearest") for i, x in enumerate(xs[:-1])]
685
- out = torch.sum(torch.stack(res + xs[-1:]), dim=0)
686
- return out
684
+ return torch.sum(torch.stack(res + xs[-1:]), dim=0)
@@ -93,7 +93,7 @@ class AIGym:
93
93
  self.stage[ind] = "up"
94
94
  self.count[ind] += 1
95
95
 
96
- elif self.pose_type == "pushup" or self.pose_type == "squat":
96
+ elif self.pose_type in {"pushup", "squat"}:
97
97
  if self.angle[ind] > self.poseup_angle:
98
98
  self.stage[ind] = "up"
99
99
  if self.angle[ind] < self.posedown_angle and self.stage[ind] == "up":
@@ -172,7 +172,7 @@ class ObjectCounter:
172
172
  if self.draw_tracks:
173
173
  self.annotator.draw_centroid_and_tracks(
174
174
  track_line,
175
- color=self.track_color if self.track_color else colors(int(track_id), True),
175
+ color=self.track_color or colors(int(track_id), True),
176
176
  track_thickness=self.track_thickness,
177
177
  )
178
178
 
@@ -73,8 +73,7 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
73
73
  idx = tracks[:, -1].astype(int)
74
74
  predictor.results[i] = predictor.results[i][idx]
75
75
 
76
- update_args = dict()
77
- update_args["obb" if is_obb else "boxes"] = torch.as_tensor(tracks[:, :-1])
76
+ update_args = {"obb" if is_obb else "boxes": torch.as_tensor(tracks[:, :-1])}
78
77
  predictor.results[i].update(**update_args)
79
78
 
80
79
 
@@ -44,7 +44,7 @@ class GMC:
44
44
  super().__init__()
45
45
 
46
46
  self.method = method
47
- self.downscale = max(1, int(downscale))
47
+ self.downscale = max(1, downscale)
48
48
 
49
49
  if self.method == "orb":
50
50
  self.detector = cv2.FastFeatureDetector_create(20)
@@ -208,9 +208,10 @@ class RF100Benchmark:
208
208
 
209
209
  return self.ds_names, self.ds_cfg_list
210
210
 
211
- def fix_yaml(self, path):
211
+ @staticmethod
212
+ def fix_yaml(path):
212
213
  """
213
- Function to fix yaml train and val path.
214
+ Function to fix YAML train and val path.
214
215
 
215
216
  Args:
216
217
  path (str): YAML file path.
@@ -245,32 +246,19 @@ class RF100Benchmark:
245
246
  entries = line.split(" ")
246
247
  entries = list(filter(lambda val: val != "", entries))
247
248
  entries = [e.strip("\n") for e in entries]
248
- start_class = False
249
- for e in entries:
250
- if e == "all":
251
- if "(AP)" not in entries:
252
- if "(AR)" not in entries:
253
- # parse all
254
- eval = {}
255
- eval["class"] = entries[0]
256
- eval["images"] = entries[1]
257
- eval["targets"] = entries[2]
258
- eval["precision"] = entries[3]
259
- eval["recall"] = entries[4]
260
- eval["map50"] = entries[5]
261
- eval["map95"] = entries[6]
262
- eval_lines.append(eval)
263
-
264
- if e in class_names:
265
- eval = {}
266
- eval["class"] = entries[0]
267
- eval["images"] = entries[1]
268
- eval["targets"] = entries[2]
269
- eval["precision"] = entries[3]
270
- eval["recall"] = entries[4]
271
- eval["map50"] = entries[5]
272
- eval["map95"] = entries[6]
273
- eval_lines.append(eval)
249
+ eval_lines.extend(
250
+ {
251
+ "class": entries[0],
252
+ "images": entries[1],
253
+ "targets": entries[2],
254
+ "precision": entries[3],
255
+ "recall": entries[4],
256
+ "map50": entries[5],
257
+ "map95": entries[6],
258
+ }
259
+ for e in entries
260
+ if e in class_names or (e == "all" and "(AP)" not in entries and "(AR)" not in entries)
261
+ )
274
262
  map_val = 0.0
275
263
  if len(eval_lines) > 1:
276
264
  print("There's more dicts")
@@ -103,22 +103,22 @@ def on_fit_epoch_end(trainer):
103
103
 
104
104
  def on_train_end(trainer):
105
105
  """Log model artifacts at the end of the training."""
106
- if mlflow:
107
- mlflow.log_artifact(str(trainer.best.parent)) # log save_dir/weights directory with best.pt and last.pt
108
- for f in trainer.save_dir.glob("*"): # log all other files in save_dir
109
- if f.suffix in {".png", ".jpg", ".csv", ".pt", ".yaml"}:
110
- mlflow.log_artifact(str(f))
111
- keep_run_active = os.environ.get("MLFLOW_KEEP_RUN_ACTIVE", "False").lower() == "true"
112
- if keep_run_active:
113
- LOGGER.info(f"{PREFIX}mlflow run still alive, remember to close it using mlflow.end_run()")
114
- else:
115
- mlflow.end_run()
116
- LOGGER.debug(f"{PREFIX}mlflow run ended")
117
-
118
- LOGGER.info(
119
- f"{PREFIX}results logged to {mlflow.get_tracking_uri()}\n"
120
- f"{PREFIX}disable with 'yolo settings mlflow=False'"
121
- )
106
+ if not mlflow:
107
+ return
108
+ mlflow.log_artifact(str(trainer.best.parent)) # log save_dir/weights directory with best.pt and last.pt
109
+ for f in trainer.save_dir.glob("*"): # log all other files in save_dir
110
+ if f.suffix in {".png", ".jpg", ".csv", ".pt", ".yaml"}:
111
+ mlflow.log_artifact(str(f))
112
+ keep_run_active = os.environ.get("MLFLOW_KEEP_RUN_ACTIVE", "False").lower() == "true"
113
+ if keep_run_active:
114
+ LOGGER.info(f"{PREFIX}mlflow run still alive, remember to close it using mlflow.end_run()")
115
+ else:
116
+ mlflow.end_run()
117
+ LOGGER.debug(f"{PREFIX}mlflow run ended")
118
+
119
+ LOGGER.info(
120
+ f"{PREFIX}results logged to {mlflow.get_tracking_uri()}\n{PREFIX}disable with 'yolo settings mlflow=False'"
121
+ )
122
122
 
123
123
 
124
124
  callbacks = (
@@ -19,7 +19,7 @@ def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall
19
19
  """
20
20
  Create and log a custom metric visualization to wandb.plot.pr_curve.
21
21
 
22
- This function crafts a custom metric visualization that mimics the behavior of wandb's default precision-recall
22
+ This function crafts a custom metric visualization that mimics the behavior of the default wandb precision-recall
23
23
  curve while allowing for enhanced customization. The visual metric is useful for monitoring model performance across
24
24
  different classes.
25
25
 
@@ -434,10 +434,9 @@ def check_torchvision():
434
434
 
435
435
  # Extract only the major and minor versions
436
436
  v_torch = ".".join(torch.__version__.split("+")[0].split(".")[:2])
437
- v_torchvision = ".".join(TORCHVISION_VERSION.split("+")[0].split(".")[:2])
438
-
439
437
  if v_torch in compatibility_table:
440
438
  compatible_versions = compatibility_table[v_torch]
439
+ v_torchvision = ".".join(TORCHVISION_VERSION.split("+")[0].split(".")[:2])
441
440
  if all(v_torchvision != v for v in compatible_versions):
442
441
  print(
443
442
  f"WARNING ⚠️ torchvision=={v_torchvision} is incompatible with torch=={v_torch}.\n"
@@ -493,7 +493,7 @@ class Annotator:
493
493
  angle = 360 - angle
494
494
  return angle
495
495
 
496
- def draw_specific_points(self, keypoints, indices=[2, 5, 7], shape=(640, 640), radius=2, conf_thres=0.25):
496
+ def draw_specific_points(self, keypoints, indices=None, shape=(640, 640), radius=2, conf_thres=0.25):
497
497
  """
498
498
  Draw specific keypoints for gym steps counting.
499
499
 
@@ -503,6 +503,8 @@ class Annotator:
503
503
  shape (tuple): imgsz for model inference
504
504
  radius (int): Keypoint radius value
505
505
  """
506
+ if indices is None:
507
+ indices = [2, 5, 7]
506
508
  for i, k in enumerate(keypoints):
507
509
  if i in indices:
508
510
  x_coord, y_coord = k[0], k[1]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.29
3
+ Version: 8.2.30
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
File without changes
File without changes
File without changes