ultralytics 8.2.57__tar.gz → 8.2.59__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (234) hide show
  1. {ultralytics-8.2.57/ultralytics.egg-info → ultralytics-8.2.59}/PKG-INFO +1 -1
  2. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/__init__.py +1 -1
  3. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/__init__.py +2 -2
  4. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/fastsam/prompt.py +7 -10
  5. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/detect/val.py +33 -21
  6. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/obb/val.py +32 -14
  7. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/pose/val.py +44 -11
  8. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/segment/val.py +53 -13
  9. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/nn/modules/block.py +1 -1
  10. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/parking_management.py +17 -15
  11. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/streamlit_inference.py +9 -7
  12. {ultralytics-8.2.57 → ultralytics-8.2.59/ultralytics.egg-info}/PKG-INFO +1 -1
  13. {ultralytics-8.2.57 → ultralytics-8.2.59}/LICENSE +0 -0
  14. {ultralytics-8.2.57 → ultralytics-8.2.59}/README.md +0 -0
  15. {ultralytics-8.2.57 → ultralytics-8.2.59}/pyproject.toml +0 -0
  16. {ultralytics-8.2.57 → ultralytics-8.2.59}/setup.cfg +0 -0
  17. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/__init__.py +0 -0
  18. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/conftest.py +0 -0
  19. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/test_cli.py +0 -0
  20. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/test_cuda.py +0 -0
  21. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/test_engine.py +0 -0
  22. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/test_explorer.py +0 -0
  23. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/test_exports.py +0 -0
  24. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/test_integrations.py +0 -0
  25. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/test_python.py +0 -0
  26. {ultralytics-8.2.57 → ultralytics-8.2.59}/tests/test_solutions.py +0 -0
  27. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/assets/bus.jpg +0 -0
  28. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/assets/zidane.jpg +0 -0
  29. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
  30. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
  31. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
  32. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
  33. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
  34. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
  35. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
  36. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/VOC.yaml +0 -0
  37. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
  38. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
  39. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
  40. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
  41. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
  42. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/coco.yaml +0 -0
  43. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
  44. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/coco128.yaml +0 -0
  45. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
  46. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
  47. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/coco8.yaml +0 -0
  48. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
  49. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/dota8.yaml +0 -0
  50. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/lvis.yaml +0 -0
  51. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
  52. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
  53. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/signature.yaml +0 -0
  54. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
  55. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/datasets/xView.yaml +0 -0
  56. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/default.yaml +0 -0
  57. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
  58. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
  59. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
  60. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
  61. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v10/yolov10b.yaml +0 -0
  62. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v10/yolov10l.yaml +0 -0
  63. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v10/yolov10m.yaml +0 -0
  64. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v10/yolov10n.yaml +0 -0
  65. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v10/yolov10s.yaml +0 -0
  66. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v10/yolov10x.yaml +0 -0
  67. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
  68. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
  69. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
  70. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
  71. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
  72. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
  73. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
  74. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
  75. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
  76. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
  77. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
  78. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
  79. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
  80. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
  81. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
  82. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
  83. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
  84. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
  85. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
  86. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
  87. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
  88. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
  89. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
  90. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
  91. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
  92. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
  93. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
  94. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v9/yolov9m.yaml +0 -0
  95. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v9/yolov9s.yaml +0 -0
  96. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/models/v9/yolov9t.yaml +0 -0
  97. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/trackers/botsort.yaml +0 -0
  98. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
  99. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/__init__.py +0 -0
  100. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/annotator.py +0 -0
  101. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/augment.py +0 -0
  102. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/base.py +0 -0
  103. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/build.py +0 -0
  104. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/converter.py +0 -0
  105. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/dataset.py +0 -0
  106. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/explorer/__init__.py +0 -0
  107. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/explorer/explorer.py +0 -0
  108. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/explorer/gui/__init__.py +0 -0
  109. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/explorer/gui/dash.py +0 -0
  110. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/explorer/utils.py +0 -0
  111. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/loaders.py +0 -0
  112. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/split_dota.py +0 -0
  113. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/data/utils.py +0 -0
  114. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/engine/__init__.py +0 -0
  115. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/engine/exporter.py +0 -0
  116. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/engine/model.py +0 -0
  117. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/engine/predictor.py +0 -0
  118. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/engine/results.py +0 -0
  119. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/engine/trainer.py +0 -0
  120. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/engine/tuner.py +0 -0
  121. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/engine/validator.py +0 -0
  122. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/hub/__init__.py +0 -0
  123. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/hub/auth.py +0 -0
  124. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/hub/session.py +0 -0
  125. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/hub/utils.py +0 -0
  126. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/__init__.py +0 -0
  127. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/fastsam/__init__.py +0 -0
  128. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/fastsam/model.py +0 -0
  129. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/fastsam/predict.py +0 -0
  130. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/fastsam/utils.py +0 -0
  131. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/fastsam/val.py +0 -0
  132. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/nas/__init__.py +0 -0
  133. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/nas/model.py +0 -0
  134. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/nas/predict.py +0 -0
  135. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/nas/val.py +0 -0
  136. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/rtdetr/__init__.py +0 -0
  137. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/rtdetr/model.py +0 -0
  138. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/rtdetr/predict.py +0 -0
  139. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/rtdetr/train.py +0 -0
  140. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/rtdetr/val.py +0 -0
  141. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/__init__.py +0 -0
  142. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/amg.py +0 -0
  143. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/build.py +0 -0
  144. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/model.py +0 -0
  145. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/modules/__init__.py +0 -0
  146. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/modules/decoders.py +0 -0
  147. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/modules/encoders.py +0 -0
  148. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/modules/sam.py +0 -0
  149. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/modules/tiny_encoder.py +0 -0
  150. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/modules/transformer.py +0 -0
  151. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/sam/predict.py +0 -0
  152. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/utils/__init__.py +0 -0
  153. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/utils/loss.py +0 -0
  154. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/utils/ops.py +0 -0
  155. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/__init__.py +0 -0
  156. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/classify/__init__.py +0 -0
  157. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/classify/predict.py +0 -0
  158. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/classify/train.py +0 -0
  159. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/classify/val.py +0 -0
  160. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/detect/__init__.py +0 -0
  161. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/detect/predict.py +0 -0
  162. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/detect/train.py +0 -0
  163. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/model.py +0 -0
  164. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/obb/__init__.py +0 -0
  165. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/obb/predict.py +0 -0
  166. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/obb/train.py +0 -0
  167. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/pose/__init__.py +0 -0
  168. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/pose/predict.py +0 -0
  169. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/pose/train.py +0 -0
  170. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/segment/__init__.py +0 -0
  171. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/segment/predict.py +0 -0
  172. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/segment/train.py +0 -0
  173. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/world/__init__.py +0 -0
  174. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/world/train.py +0 -0
  175. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/models/yolo/world/train_world.py +0 -0
  176. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/nn/__init__.py +0 -0
  177. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/nn/autobackend.py +0 -0
  178. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/nn/modules/__init__.py +0 -0
  179. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/nn/modules/conv.py +0 -0
  180. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/nn/modules/head.py +0 -0
  181. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/nn/modules/transformer.py +0 -0
  182. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/nn/modules/utils.py +0 -0
  183. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/nn/tasks.py +0 -0
  184. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/__init__.py +0 -0
  185. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/ai_gym.py +0 -0
  186. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/analytics.py +0 -0
  187. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/distance_calculation.py +0 -0
  188. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/heatmap.py +0 -0
  189. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/object_counter.py +0 -0
  190. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/queue_management.py +0 -0
  191. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/solutions/speed_estimation.py +0 -0
  192. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/trackers/__init__.py +0 -0
  193. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/trackers/basetrack.py +0 -0
  194. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/trackers/bot_sort.py +0 -0
  195. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/trackers/byte_tracker.py +0 -0
  196. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/trackers/track.py +0 -0
  197. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/trackers/utils/__init__.py +0 -0
  198. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/trackers/utils/gmc.py +0 -0
  199. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/trackers/utils/kalman_filter.py +0 -0
  200. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/trackers/utils/matching.py +0 -0
  201. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/__init__.py +0 -0
  202. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/autobatch.py +0 -0
  203. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/benchmarks.py +0 -0
  204. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/__init__.py +0 -0
  205. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/base.py +0 -0
  206. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/clearml.py +0 -0
  207. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/comet.py +0 -0
  208. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/dvc.py +0 -0
  209. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/hub.py +0 -0
  210. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/mlflow.py +0 -0
  211. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/neptune.py +0 -0
  212. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/raytune.py +0 -0
  213. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/tensorboard.py +0 -0
  214. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/callbacks/wb.py +0 -0
  215. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/checks.py +0 -0
  216. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/dist.py +0 -0
  217. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/downloads.py +0 -0
  218. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/errors.py +0 -0
  219. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/files.py +0 -0
  220. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/instance.py +0 -0
  221. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/loss.py +0 -0
  222. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/metrics.py +0 -0
  223. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/ops.py +0 -0
  224. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/patches.py +0 -0
  225. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/plotting.py +0 -0
  226. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/tal.py +0 -0
  227. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/torch_utils.py +0 -0
  228. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/triton.py +0 -0
  229. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics/utils/tuner.py +0 -0
  230. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics.egg-info/SOURCES.txt +0 -0
  231. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics.egg-info/dependency_links.txt +0 -0
  232. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics.egg-info/entry_points.txt +0 -0
  233. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics.egg-info/requires.txt +0 -0
  234. {ultralytics-8.2.57 → ultralytics-8.2.59}/ultralytics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.57
3
+ Version: 8.2.59
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.57"
3
+ __version__ = "8.2.59"
4
4
 
5
5
  import os
6
6
 
@@ -512,14 +512,14 @@ def handle_yolo_settings(args: List[str]) -> None:
512
512
 
513
513
  def handle_explorer():
514
514
  """Open the Ultralytics Explorer GUI for dataset exploration and analysis."""
515
- checks.check_requirements("streamlit")
515
+ checks.check_requirements("streamlit>=1.29.0")
516
516
  LOGGER.info("💡 Loading Explorer dashboard...")
517
517
  subprocess.run(["streamlit", "run", ROOT / "data/explorer/gui/dash.py", "--server.maxMessageSize", "2048"])
518
518
 
519
519
 
520
520
  def handle_streamlit_inference():
521
521
  """Open the Ultralytics Live Inference streamlit app for real time object detection."""
522
- checks.check_requirements(["streamlit", "opencv-python", "torch"])
522
+ checks.check_requirements("streamlit>=1.29.0")
523
523
  LOGGER.info("💡 Loading Ultralytics Live Inference app...")
524
524
  subprocess.run(["streamlit", "run", ROOT / "solutions/streamlit_inference.py", "--server.headless", "true"])
525
525
 
@@ -7,6 +7,7 @@ import cv2
7
7
  import numpy as np
8
8
  import torch
9
9
  from PIL import Image
10
+ from torch import Tensor
10
11
 
11
12
  from ultralytics.utils import TQDM, checks
12
13
 
@@ -249,7 +250,7 @@ class FastSAMPrompt:
249
250
  ax.imshow(show)
250
251
 
251
252
  @torch.no_grad()
252
- def retrieve(self, model, preprocess, elements, search_text: str, device) -> int:
253
+ def retrieve(self, model, preprocess, elements, search_text: str, device) -> Tensor:
253
254
  """Processes images and text with a model, calculates similarity, and returns softmax score."""
254
255
  preprocessed_images = [preprocess(image).to(device) for image in elements]
255
256
  tokenized_text = self.clip.tokenize([search_text]).to(device)
@@ -269,19 +270,16 @@ class FastSAMPrompt:
269
270
  mask_h, mask_w = annotations[0]["segmentation"].shape
270
271
  if ori_w != mask_w or ori_h != mask_h:
271
272
  image = image.resize((mask_w, mask_h))
272
- cropped_boxes = []
273
273
  cropped_images = []
274
- not_crop = []
275
274
  filter_id = []
276
275
  for _, mask in enumerate(annotations):
277
276
  if np.sum(mask["segmentation"]) <= 100:
278
277
  filter_id.append(_)
279
278
  continue
280
279
  bbox = self._get_bbox_from_mask(mask["segmentation"]) # bbox from mask
281
- cropped_boxes.append(self._segment_image(image, bbox)) # save cropped image
282
- cropped_images.append(bbox) # save cropped image bbox
280
+ cropped_images.append(self._segment_image(image, bbox)) # save cropped image
283
281
 
284
- return cropped_boxes, cropped_images, not_crop, filter_id, annotations
282
+ return cropped_images, filter_id, annotations
285
283
 
286
284
  def box_prompt(self, bbox):
287
285
  """Modifies the bounding box properties and calculates IoU between masks and bounding box."""
@@ -341,11 +339,10 @@ class FastSAMPrompt:
341
339
  """Processes a text prompt, applies it to existing results and returns the updated results."""
342
340
  if self.results[0].masks is not None:
343
341
  format_results = self._format_results(self.results[0], 0)
344
- cropped_boxes, cropped_images, not_crop, filter_id, annotations = self._crop_image(format_results)
342
+ cropped_images, filter_id, annotations = self._crop_image(format_results)
345
343
  clip_model, preprocess = self.clip.load("ViT-B/32", device=self.device)
346
- scores = self.retrieve(clip_model, preprocess, cropped_boxes, text, device=self.device)
347
- max_idx = scores.argsort()
348
- max_idx = max_idx[-1]
344
+ scores = self.retrieve(clip_model, preprocess, cropped_images, text, device=self.device)
345
+ max_idx = torch.argmax(scores)
349
346
  max_idx += sum(np.array(filter_id) <= int(max_idx))
350
347
  self.results[0].masks.data = torch.tensor(np.array([annotations[max_idx]["segmentation"]]))
351
348
  return self.results
@@ -41,6 +41,11 @@ class DetectionValidator(BaseValidator):
41
41
  self.iouv = torch.linspace(0.5, 0.95, 10) # IoU vector for mAP@0.5:0.95
42
42
  self.niou = self.iouv.numel()
43
43
  self.lb = [] # for autolabelling
44
+ if self.args.save_hybrid:
45
+ LOGGER.warning(
46
+ "WARNING ⚠️ 'save_hybrid=True' will append ground truth to predictions for autolabelling.\n"
47
+ "WARNING ⚠️ 'save_hybrid=True' will cause incorrect mAP.\n"
48
+ )
44
49
 
45
50
  def preprocess(self, batch):
46
51
  """Preprocesses batch of images for YOLO training."""
@@ -53,14 +58,10 @@ class DetectionValidator(BaseValidator):
53
58
  height, width = batch["img"].shape[2:]
54
59
  nb = len(batch["img"])
55
60
  bboxes = batch["bboxes"] * torch.tensor((width, height, width, height), device=self.device)
56
- self.lb = (
57
- [
58
- torch.cat([batch["cls"][batch["batch_idx"] == i], bboxes[batch["batch_idx"] == i]], dim=-1)
59
- for i in range(nb)
60
- ]
61
- if self.args.save_hybrid
62
- else []
63
- ) # for autolabelling
61
+ self.lb = [
62
+ torch.cat([batch["cls"][batch["batch_idx"] == i], bboxes[batch["batch_idx"] == i]], dim=-1)
63
+ for i in range(nb)
64
+ ]
64
65
 
65
66
  return batch
66
67
 
@@ -159,8 +160,12 @@ class DetectionValidator(BaseValidator):
159
160
  if self.args.save_json:
160
161
  self.pred_to_json(predn, batch["im_file"][si])
161
162
  if self.args.save_txt:
162
- file = self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt'
163
- self.save_one_txt(predn, self.args.save_conf, pbatch["ori_shape"], file)
163
+ self.save_one_txt(
164
+ predn,
165
+ self.args.save_conf,
166
+ pbatch["ori_shape"],
167
+ self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt',
168
+ )
164
169
 
165
170
  def finalize_metrics(self, *args, **kwargs):
166
171
  """Set final values for metrics speed and confusion matrix."""
@@ -202,13 +207,18 @@ class DetectionValidator(BaseValidator):
202
207
  Return correct prediction matrix.
203
208
 
204
209
  Args:
205
- detections (torch.Tensor): Tensor of shape [N, 6] representing detections.
206
- Each detection is of the format: x1, y1, x2, y2, conf, class.
207
- labels (torch.Tensor): Tensor of shape [M, 5] representing labels.
208
- Each label is of the format: class, x1, y1, x2, y2.
210
+ detections (torch.Tensor): Tensor of shape (N, 6) representing detections where each detection is
211
+ (x1, y1, x2, y2, conf, class).
212
+ gt_bboxes (torch.Tensor): Tensor of shape (M, 4) representing ground-truth bounding box coordinates. Each
213
+ bounding box is of the format: (x1, y1, x2, y2).
214
+ gt_cls (torch.Tensor): Tensor of shape (M,) representing target class indices.
209
215
 
210
216
  Returns:
211
- (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels.
217
+ (torch.Tensor): Correct prediction matrix of shape (N, 10) for 10 IoU levels.
218
+
219
+ Note:
220
+ The function does not return any value directly usable for metrics calculation. Instead, it provides an
221
+ intermediate representation used for evaluating predictions against ground truth.
212
222
  """
213
223
  iou = box_iou(gt_bboxes, detections[:, :4])
214
224
  return self.match_predictions(detections[:, 5], gt_cls, iou)
@@ -255,12 +265,14 @@ class DetectionValidator(BaseValidator):
255
265
 
256
266
  def save_one_txt(self, predn, save_conf, shape, file):
257
267
  """Save YOLO detections to a txt file in normalized coordinates in a specific format."""
258
- gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
259
- for *xyxy, conf, cls in predn.tolist():
260
- xywh = (ops.xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
261
- line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
262
- with open(file, "a") as f:
263
- f.write(("%g " * len(line)).rstrip() % line + "\n")
268
+ from ultralytics.engine.results import Results
269
+
270
+ Results(
271
+ np.zeros((shape[0], shape[1]), dtype=np.uint8),
272
+ path=None,
273
+ names=self.names,
274
+ boxes=predn[:, :6],
275
+ ).save_txt(file, save_conf=save_conf)
264
276
 
265
277
  def pred_to_json(self, predn, filename):
266
278
  """Serialize YOLO predictions to COCO json format."""
@@ -52,17 +52,29 @@ class OBBValidator(DetectionValidator):
52
52
 
53
53
  def _process_batch(self, detections, gt_bboxes, gt_cls):
54
54
  """
55
- Return correct prediction matrix.
55
+ Perform computation of the correct prediction matrix for a batch of detections and ground truth bounding boxes.
56
56
 
57
57
  Args:
58
- detections (torch.Tensor): Tensor of shape [N, 7] representing detections.
59
- Each detection is of the format: x1, y1, x2, y2, conf, class, angle.
60
- gt_bboxes (torch.Tensor): Tensor of shape [M, 5] representing rotated boxes.
61
- Each box is of the format: x1, y1, x2, y2, angle.
62
- labels (torch.Tensor): Tensor of shape [M] representing labels.
58
+ detections (torch.Tensor): A tensor of shape (N, 7) representing the detected bounding boxes and associated
59
+ data. Each detection is represented as (x1, y1, x2, y2, conf, class, angle).
60
+ gt_bboxes (torch.Tensor): A tensor of shape (M, 5) representing the ground truth bounding boxes. Each box is
61
+ represented as (x1, y1, x2, y2, angle).
62
+ gt_cls (torch.Tensor): A tensor of shape (M,) representing class labels for the ground truth bounding boxes.
63
63
 
64
64
  Returns:
65
- (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels.
65
+ (torch.Tensor): The correct prediction matrix with shape (N, 10), which includes 10 IoU (Intersection over
66
+ Union) levels for each detection, indicating the accuracy of predictions compared to the ground truth.
67
+
68
+ Example:
69
+ ```python
70
+ detections = torch.rand(100, 7) # 100 sample detections
71
+ gt_bboxes = torch.rand(50, 5) # 50 sample ground truth boxes
72
+ gt_cls = torch.randint(0, 5, (50,)) # 50 ground truth class labels
73
+ correct_matrix = OBBValidator._process_batch(detections, gt_bboxes, gt_cls)
74
+ ```
75
+
76
+ Note:
77
+ This method relies on `batch_probiou` to calculate IoU between detections and ground truth bounding boxes.
66
78
  """
67
79
  iou = batch_probiou(gt_bboxes, torch.cat([detections[:, :4], detections[:, -1:]], dim=-1))
68
80
  return self.match_predictions(detections[:, 5], gt_cls, iou)
@@ -118,13 +130,19 @@ class OBBValidator(DetectionValidator):
118
130
 
119
131
  def save_one_txt(self, predn, save_conf, shape, file):
120
132
  """Save YOLO detections to a txt file in normalized coordinates in a specific format."""
121
- gn = torch.tensor(shape)[[1, 0]] # normalization gain whwh
122
- for *xywh, conf, cls, angle in predn.tolist():
123
- xywha = torch.tensor([*xywh, angle]).view(1, 5)
124
- xyxyxyxy = (ops.xywhr2xyxyxyxy(xywha) / gn).view(-1).tolist() # normalized xywh
125
- line = (cls, *xyxyxyxy, conf) if save_conf else (cls, *xyxyxyxy) # label format
126
- with open(file, "a") as f:
127
- f.write(("%g " * len(line)).rstrip() % line + "\n")
133
+ import numpy as np
134
+
135
+ from ultralytics.engine.results import Results
136
+
137
+ rboxes = torch.cat([predn[:, :4], predn[:, -1:]], dim=-1)
138
+ # xywh, r, conf, cls
139
+ obb = torch.cat([rboxes, predn[:, 4:6]], dim=-1)
140
+ Results(
141
+ np.zeros((shape[0], shape[1]), dtype=np.uint8),
142
+ path=None,
143
+ names=self.names,
144
+ obb=obb,
145
+ ).save_txt(file, save_conf=save_conf)
128
146
 
129
147
  def eval_json(self, stats):
130
148
  """Evaluates YOLO output in JSON format and returns performance statistics."""
@@ -147,24 +147,45 @@ class PoseValidator(DetectionValidator):
147
147
  # Save
148
148
  if self.args.save_json:
149
149
  self.pred_to_json(predn, batch["im_file"][si])
150
- # if self.args.save_txt:
151
- # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
150
+ if self.args.save_txt:
151
+ self.save_one_txt(
152
+ predn,
153
+ pred_kpts,
154
+ self.args.save_conf,
155
+ pbatch["ori_shape"],
156
+ self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt',
157
+ )
152
158
 
153
159
  def _process_batch(self, detections, gt_bboxes, gt_cls, pred_kpts=None, gt_kpts=None):
154
160
  """
155
- Return correct prediction matrix.
161
+ Return correct prediction matrix by computing Intersection over Union (IoU) between detections and ground truth.
156
162
 
157
163
  Args:
158
- detections (torch.Tensor): Tensor of shape [N, 6] representing detections.
159
- Each detection is of the format: x1, y1, x2, y2, conf, class.
160
- labels (torch.Tensor): Tensor of shape [M, 5] representing labels.
161
- Each label is of the format: class, x1, y1, x2, y2.
162
- pred_kpts (torch.Tensor, optional): Tensor of shape [N, 51] representing predicted keypoints.
163
- 51 corresponds to 17 keypoints each with 3 values.
164
- gt_kpts (torch.Tensor, optional): Tensor of shape [N, 51] representing ground truth keypoints.
164
+ detections (torch.Tensor): Tensor with shape (N, 6) representing detection boxes and scores, where each
165
+ detection is of the format (x1, y1, x2, y2, conf, class).
166
+ gt_bboxes (torch.Tensor): Tensor with shape (M, 4) representing ground truth bounding boxes, where each
167
+ box is of the format (x1, y1, x2, y2).
168
+ gt_cls (torch.Tensor): Tensor with shape (M,) representing ground truth class indices.
169
+ pred_kpts (torch.Tensor | None): Optional tensor with shape (N, 51) representing predicted keypoints, where
170
+ 51 corresponds to 17 keypoints each having 3 values.
171
+ gt_kpts (torch.Tensor | None): Optional tensor with shape (N, 51) representing ground truth keypoints.
165
172
 
166
173
  Returns:
167
- torch.Tensor: Correct prediction matrix of shape [N, 10] for 10 IoU levels.
174
+ torch.Tensor: A tensor with shape (N, 10) representing the correct prediction matrix for 10 IoU levels,
175
+ where N is the number of detections.
176
+
177
+ Example:
178
+ ```python
179
+ detections = torch.rand(100, 6) # 100 predictions: (x1, y1, x2, y2, conf, class)
180
+ gt_bboxes = torch.rand(50, 4) # 50 ground truth boxes: (x1, y1, x2, y2)
181
+ gt_cls = torch.randint(0, 2, (50,)) # 50 ground truth class indices
182
+ pred_kpts = torch.rand(100, 51) # 100 predicted keypoints
183
+ gt_kpts = torch.rand(50, 51) # 50 ground truth keypoints
184
+ correct_preds = _process_batch(detections, gt_bboxes, gt_cls, pred_kpts, gt_kpts)
185
+ ```
186
+
187
+ Note:
188
+ `0.53` scale factor used in area computation is referenced from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384.
168
189
  """
169
190
  if pred_kpts is not None and gt_kpts is not None:
170
191
  # `0.53` is from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384
@@ -202,6 +223,18 @@ class PoseValidator(DetectionValidator):
202
223
  on_plot=self.on_plot,
203
224
  ) # pred
204
225
 
226
+ def save_one_txt(self, predn, pred_kpts, save_conf, shape, file):
227
+ """Save YOLO detections to a txt file in normalized coordinates in a specific format."""
228
+ from ultralytics.engine.results import Results
229
+
230
+ Results(
231
+ np.zeros((shape[0], shape[1]), dtype=np.uint8),
232
+ path=None,
233
+ names=self.names,
234
+ boxes=predn[:, :6],
235
+ keypoints=pred_kpts,
236
+ ).save_txt(file, save_conf=save_conf)
237
+
205
238
  def pred_to_json(self, predn, filename):
206
239
  """Converts YOLO predictions to COCO JSON format."""
207
240
  stem = Path(filename).stem
@@ -48,9 +48,8 @@ class SegmentationValidator(DetectionValidator):
48
48
  self.plot_masks = []
49
49
  if self.args.save_json:
50
50
  check_requirements("pycocotools>=2.0.6")
51
- self.process = ops.process_mask_upsample # more accurate
52
- else:
53
- self.process = ops.process_mask # faster
51
+ # more accurate vs faster
52
+ self.process = ops.process_mask_upsample if self.args.save_json or self.args.save_txt else ops.process_mask
54
53
  self.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
55
54
 
56
55
  def get_desc(self):
@@ -148,14 +147,23 @@ class SegmentationValidator(DetectionValidator):
148
147
 
149
148
  # Save
150
149
  if self.args.save_json:
151
- pred_masks = ops.scale_image(
152
- pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
150
+ self.pred_to_json(
151
+ predn,
152
+ batch["im_file"][si],
153
+ ops.scale_image(
154
+ pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
155
+ pbatch["ori_shape"],
156
+ ratio_pad=batch["ratio_pad"][si],
157
+ ),
158
+ )
159
+ if self.args.save_txt:
160
+ self.save_one_txt(
161
+ predn,
162
+ pred_masks,
163
+ self.args.save_conf,
153
164
  pbatch["ori_shape"],
154
- ratio_pad=batch["ratio_pad"][si],
165
+ self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt',
155
166
  )
156
- self.pred_to_json(predn, batch["im_file"][si], pred_masks)
157
- # if self.args.save_txt:
158
- # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
159
167
 
160
168
  def finalize_metrics(self, *args, **kwargs):
161
169
  """Sets speed and confusion matrix for evaluation metrics."""
@@ -164,14 +172,34 @@ class SegmentationValidator(DetectionValidator):
164
172
 
165
173
  def _process_batch(self, detections, gt_bboxes, gt_cls, pred_masks=None, gt_masks=None, overlap=False, masks=False):
166
174
  """
167
- Return correct prediction matrix.
175
+ Compute correct prediction matrix for a batch based on bounding boxes and optional masks.
168
176
 
169
177
  Args:
170
- detections (array[N, 6]), x1, y1, x2, y2, conf, class
171
- labels (array[M, 5]), class, x1, y1, x2, y2
178
+ detections (torch.Tensor): Tensor of shape (N, 6) representing detected bounding boxes and
179
+ associated confidence scores and class indices. Each row is of the format [x1, y1, x2, y2, conf, class].
180
+ gt_bboxes (torch.Tensor): Tensor of shape (M, 4) representing ground truth bounding box coordinates.
181
+ Each row is of the format [x1, y1, x2, y2].
182
+ gt_cls (torch.Tensor): Tensor of shape (M,) representing ground truth class indices.
183
+ pred_masks (torch.Tensor | None): Tensor representing predicted masks, if available. The shape should
184
+ match the ground truth masks.
185
+ gt_masks (torch.Tensor | None): Tensor of shape (M, H, W) representing ground truth masks, if available.
186
+ overlap (bool): Flag indicating if overlapping masks should be considered.
187
+ masks (bool): Flag indicating if the batch contains mask data.
172
188
 
173
189
  Returns:
174
- correct (array[N, 10]), for 10 IoU levels
190
+ (torch.Tensor): A correct prediction matrix of shape (N, 10), where 10 represents different IoU levels.
191
+
192
+ Note:
193
+ - If `masks` is True, the function computes IoU between predicted and ground truth masks.
194
+ - If `overlap` is True and `masks` is True, overlapping masks are taken into account when computing IoU.
195
+
196
+ Example:
197
+ ```python
198
+ detections = torch.tensor([[25, 30, 200, 300, 0.8, 1], [50, 60, 180, 290, 0.75, 0]])
199
+ gt_bboxes = torch.tensor([[24, 29, 199, 299], [55, 65, 185, 295]])
200
+ gt_cls = torch.tensor([1, 0])
201
+ correct_preds = validator._process_batch(detections, gt_bboxes, gt_cls)
202
+ ```
175
203
  """
176
204
  if masks:
177
205
  if overlap:
@@ -215,6 +243,18 @@ class SegmentationValidator(DetectionValidator):
215
243
  ) # pred
216
244
  self.plot_masks.clear()
217
245
 
246
+ def save_one_txt(self, predn, pred_masks, save_conf, shape, file):
247
+ """Save YOLO detections to a txt file in normalized coordinates in a specific format."""
248
+ from ultralytics.engine.results import Results
249
+
250
+ Results(
251
+ np.zeros((shape[0], shape[1]), dtype=np.uint8),
252
+ path=None,
253
+ names=self.names,
254
+ boxes=predn[:, :6],
255
+ masks=pred_masks,
256
+ ).save_txt(file, save_conf=save_conf)
257
+
218
258
  def pred_to_json(self, predn, filename, pred_masks):
219
259
  """
220
260
  Save one JSON result.
@@ -855,7 +855,7 @@ class Attention(nn.Module):
855
855
  self.head_dim = dim // num_heads
856
856
  self.key_dim = int(self.head_dim * attn_ratio)
857
857
  self.scale = self.key_dim**-0.5
858
- nh_kd = nh_kd = self.key_dim * num_heads
858
+ nh_kd = self.key_dim * num_heads
859
859
  h = dim + nh_kd * 2
860
860
  self.qkv = Conv(dim, h, 1, act=False)
861
861
  self.proj = Conv(dim, dim, 1, act=False)
@@ -1,11 +1,9 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
3
  import json
4
- from tkinter import filedialog, messagebox
5
4
 
6
5
  import cv2
7
6
  import numpy as np
8
- from PIL import Image, ImageTk
9
7
 
10
8
  from ultralytics.utils.checks import check_imshow, check_requirements
11
9
  from ultralytics.utils.plotting import Annotator
@@ -16,7 +14,7 @@ class ParkingPtsSelection:
16
14
  """Initializes the UI for selecting parking zone points in a tkinter window."""
17
15
  check_requirements("tkinter")
18
16
 
19
- import tkinter as tk
17
+ import tkinter as tk # scope for multi-environment compatibility
20
18
 
21
19
  self.tk = tk
22
20
  self.master = tk.Tk()
@@ -55,6 +53,10 @@ class ParkingPtsSelection:
55
53
 
56
54
  def upload_image(self):
57
55
  """Upload an image and resize it to fit canvas."""
56
+ from tkinter import filedialog
57
+
58
+ from PIL import Image, ImageTk # scope because ImageTk requires tkinter package
59
+
58
60
  self.image_path = filedialog.askopenfilename(filetypes=[("Image Files", "*.png;*.jpg;*.jpeg")])
59
61
  if not self.image_path:
60
62
  return
@@ -115,6 +117,8 @@ class ParkingPtsSelection:
115
117
 
116
118
  def remove_last_bounding_box(self):
117
119
  """Remove the last drawn bounding box from canvas."""
120
+ from tkinter import messagebox # scope for multi-environment compatibility
121
+
118
122
  if self.bounding_boxes:
119
123
  self.bounding_boxes.pop() # Remove the last bounding box
120
124
  self.canvas.delete("all") # Clear the canvas
@@ -130,6 +134,8 @@ class ParkingPtsSelection:
130
134
 
131
135
  def save_to_json(self):
132
136
  """Saves rescaled bounding boxes to 'bounding_boxes.json' based on image-to-canvas size ratio."""
137
+ from tkinter import messagebox # scope for multi-environment compatibility
138
+
133
139
  canvas_width, canvas_height = self.canvas.winfo_width(), self.canvas.winfo_height()
134
140
  width_scaling_factor = self.img_width / canvas_width
135
141
  height_scaling_factor = self.img_height / canvas_height
@@ -141,8 +147,8 @@ class ParkingPtsSelection:
141
147
  rescaled_y = int(y * height_scaling_factor)
142
148
  rescaled_box.append((rescaled_x, rescaled_y))
143
149
  bounding_boxes_data.append({"points": rescaled_box})
144
- with open("bounding_boxes.json", "w") as json_file:
145
- json.dump(bounding_boxes_data, json_file, indent=4)
150
+ with open("bounding_boxes.json", "w") as f:
151
+ json.dump(bounding_boxes_data, f, indent=4)
146
152
 
147
153
  messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
148
154
 
@@ -187,11 +193,10 @@ class ParkingManagement:
187
193
  self.env_check = check_imshow(warn=True)
188
194
 
189
195
  def load_model(self):
190
- """Load the Ultralytics YOLOv8 model for inference and analytics."""
196
+ """Load the Ultralytics YOLO model for inference and analytics."""
191
197
  from ultralytics import YOLO
192
198
 
193
- self.model = YOLO(self.model_path)
194
- return self.model
199
+ return YOLO(self.model_path)
195
200
 
196
201
  @staticmethod
197
202
  def parking_regions_extraction(json_file):
@@ -201,8 +206,8 @@ class ParkingManagement:
201
206
  Args:
202
207
  json_file (str): file that have all parking slot points
203
208
  """
204
- with open(json_file, "r") as json_file:
205
- return json.load(json_file)
209
+ with open(json_file, "r") as f:
210
+ return json.load(f)
206
211
 
207
212
  def process_data(self, json_data, im0, boxes, clss):
208
213
  """
@@ -219,12 +224,9 @@ class ParkingManagement:
219
224
  empty_slots (int): total slots that are available in parking lot
220
225
  """
221
226
  annotator = Annotator(im0)
222
- total_slots, filled_slots = len(json_data), 0
223
- empty_slots = total_slots
224
-
227
+ empty_slots, filled_slots = len(json_data), 0
225
228
  for region in json_data:
226
- points = region["points"]
227
- points_array = np.array(points, dtype=np.int32).reshape((-1, 1, 2))
229
+ points_array = np.array(region["points"], dtype=np.int32).reshape((-1, 1, 2))
228
230
  region_occupied = False
229
231
 
230
232
  for box, cls in zip(boxes, clss):
@@ -6,13 +6,13 @@ import time
6
6
  import cv2
7
7
  import torch
8
8
 
9
+ from ultralytics.utils.checks import check_requirements
9
10
  from ultralytics.utils.downloads import GITHUB_ASSETS_STEMS
10
11
 
11
12
 
12
13
  def inference():
13
14
  """Runs real-time object detection on video input using Ultralytics YOLOv8 in a Streamlit application."""
14
-
15
- # Scope imports for faster ultralytics package load speeds
15
+ check_requirements("streamlit>=1.29.0") # scope imports for faster ultralytics package load speeds
16
16
  import streamlit as st
17
17
 
18
18
  from ultralytics import YOLO
@@ -99,24 +99,26 @@ def inference():
99
99
 
100
100
  stop_button = st.button("Stop") # Button to stop the inference
101
101
 
102
- prev_time = 0
103
102
  while videocapture.isOpened():
104
103
  success, frame = videocapture.read()
105
104
  if not success:
106
105
  st.warning("Failed to read frame from webcam. Please make sure the webcam is connected properly.")
107
106
  break
108
107
 
109
- curr_time = time.time()
110
- fps = 1 / (curr_time - prev_time)
111
- prev_time = curr_time
108
+ prev_time = time.time()
112
109
 
113
110
  # Store model predictions
114
- if enable_trk:
111
+ if enable_trk == "Yes":
115
112
  results = model.track(frame, conf=conf, iou=iou, classes=selected_ind, persist=True)
116
113
  else:
117
114
  results = model(frame, conf=conf, iou=iou, classes=selected_ind)
118
115
  annotated_frame = results[0].plot() # Add annotations on frame
119
116
 
117
+ # Calculate model FPS
118
+ curr_time = time.time()
119
+ fps = 1 / (curr_time - prev_time)
120
+ prev_time = curr_time
121
+
120
122
  # display frame
121
123
  org_frame.image(frame, channels="BGR")
122
124
  ann_frame.image(annotated_frame, channels="BGR")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.57
3
+ Version: 8.2.59
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
File without changes
File without changes
File without changes