ultralytics 8.0.114__tar.gz → 8.0.116__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. {ultralytics-8.0.114/ultralytics.egg-info → ultralytics-8.0.116}/PKG-INFO +5 -3
  2. {ultralytics-8.0.114 → ultralytics-8.0.116}/README.md +4 -2
  3. {ultralytics-8.0.114 → ultralytics-8.0.116}/README.zh-CN.md +4 -2
  4. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/__init__.py +3 -2
  5. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/rtdetr/model.py +9 -0
  6. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/model.py +9 -0
  7. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/utils.py +1 -1
  8. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/engine/model.py +8 -6
  9. ultralytics-8.0.116/ultralytics/yolo/nas/__init__.py +7 -0
  10. ultralytics-8.0.116/ultralytics/yolo/nas/model.py +125 -0
  11. ultralytics-8.0.116/ultralytics/yolo/nas/predict.py +35 -0
  12. ultralytics-8.0.116/ultralytics/yolo/nas/val.py +25 -0
  13. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/__init__.py +7 -6
  14. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/benchmarks.py +14 -5
  15. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/dvc.py +11 -10
  16. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/neptune.py +0 -1
  17. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/checks.py +23 -20
  18. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/instance.py +4 -3
  19. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/torch_utils.py +2 -0
  20. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/detect/val.py +1 -1
  21. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/pose/val.py +1 -1
  22. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/segment/val.py +8 -7
  23. {ultralytics-8.0.114 → ultralytics-8.0.116/ultralytics.egg-info}/PKG-INFO +5 -3
  24. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics.egg-info/SOURCES.txt +4 -0
  25. {ultralytics-8.0.114 → ultralytics-8.0.116}/CONTRIBUTING.md +0 -0
  26. {ultralytics-8.0.114 → ultralytics-8.0.116}/LICENSE +0 -0
  27. {ultralytics-8.0.114 → ultralytics-8.0.116}/MANIFEST.in +0 -0
  28. {ultralytics-8.0.114 → ultralytics-8.0.116}/requirements.txt +0 -0
  29. {ultralytics-8.0.114 → ultralytics-8.0.116}/setup.cfg +0 -0
  30. {ultralytics-8.0.114 → ultralytics-8.0.116}/setup.py +0 -0
  31. {ultralytics-8.0.114 → ultralytics-8.0.116}/tests/test_cli.py +0 -0
  32. {ultralytics-8.0.114 → ultralytics-8.0.116}/tests/test_engine.py +0 -0
  33. {ultralytics-8.0.114 → ultralytics-8.0.116}/tests/test_python.py +0 -0
  34. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/assets/bus.jpg +0 -0
  35. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/assets/zidane.jpg +0 -0
  36. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/Argoverse.yaml +0 -0
  37. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/GlobalWheat2020.yaml +0 -0
  38. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/ImageNet.yaml +0 -0
  39. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/Objects365.yaml +0 -0
  40. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/SKU-110K.yaml +0 -0
  41. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/VOC.yaml +0 -0
  42. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/VisDrone.yaml +0 -0
  43. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/coco-pose.yaml +0 -0
  44. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/coco.yaml +0 -0
  45. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/coco128-seg.yaml +0 -0
  46. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/coco128.yaml +0 -0
  47. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/coco8-pose.yaml +0 -0
  48. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/coco8-seg.yaml +0 -0
  49. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/coco8.yaml +0 -0
  50. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/datasets/xView.yaml +0 -0
  51. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/hub/__init__.py +0 -0
  52. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/hub/auth.py +0 -0
  53. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/hub/session.py +0 -0
  54. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/hub/utils.py +0 -0
  55. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/rt-detr/rt-detr-l.yaml +0 -0
  56. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/rt-detr/rt-detr-x.yaml +0 -0
  57. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v3/yolov3-spp.yaml +0 -0
  58. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v3/yolov3-tiny.yaml +0 -0
  59. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v3/yolov3.yaml +0 -0
  60. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v5/yolov5-p6.yaml +0 -0
  61. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v5/yolov5.yaml +0 -0
  62. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v6/yolov6.yaml +0 -0
  63. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v8/yolov8-cls.yaml +0 -0
  64. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v8/yolov8-p2.yaml +0 -0
  65. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v8/yolov8-p6.yaml +0 -0
  66. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v8/yolov8-pose-p6.yaml +0 -0
  67. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v8/yolov8-pose.yaml +0 -0
  68. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v8/yolov8-seg.yaml +0 -0
  69. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/models/v8/yolov8.yaml +0 -0
  70. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/__init__.py +0 -0
  71. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/autobackend.py +0 -0
  72. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/autoshape.py +0 -0
  73. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/modules/__init__.py +0 -0
  74. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/modules/block.py +0 -0
  75. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/modules/conv.py +0 -0
  76. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/modules/head.py +0 -0
  77. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/modules/transformer.py +0 -0
  78. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/modules/utils.py +0 -0
  79. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/nn/tasks.py +0 -0
  80. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/__init__.py +0 -0
  81. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/cfg/botsort.yaml +0 -0
  82. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/cfg/bytetrack.yaml +0 -0
  83. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/track.py +0 -0
  84. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/trackers/__init__.py +0 -0
  85. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/trackers/basetrack.py +0 -0
  86. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/trackers/bot_sort.py +0 -0
  87. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/trackers/byte_tracker.py +0 -0
  88. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/utils/__init__.py +0 -0
  89. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/utils/gmc.py +0 -0
  90. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/utils/kalman_filter.py +0 -0
  91. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/tracker/utils/matching.py +0 -0
  92. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/__init__.py +0 -0
  93. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/rtdetr/__init__.py +0 -0
  94. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/rtdetr/predict.py +0 -0
  95. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/rtdetr/val.py +0 -0
  96. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/__init__.py +0 -0
  97. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/amg.py +0 -0
  98. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/autosize.py +0 -0
  99. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/build.py +0 -0
  100. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/modules/__init__.py +0 -0
  101. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/modules/decoders.py +0 -0
  102. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/modules/encoders.py +0 -0
  103. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/modules/mask_generator.py +0 -0
  104. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/modules/prompt_predictor.py +0 -0
  105. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/modules/sam.py +0 -0
  106. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/modules/transformer.py +0 -0
  107. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/vit/sam/predict.py +0 -0
  108. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/__init__.py +0 -0
  109. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/cfg/__init__.py +0 -0
  110. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/cfg/default.yaml +0 -0
  111. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/__init__.py +0 -0
  112. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/annotator.py +0 -0
  113. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/augment.py +0 -0
  114. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/base.py +0 -0
  115. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/build.py +0 -0
  116. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/converter.py +0 -0
  117. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/dataloaders/__init__.py +0 -0
  118. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/dataloaders/stream_loaders.py +0 -0
  119. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/dataloaders/v5augmentations.py +0 -0
  120. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/dataloaders/v5loader.py +0 -0
  121. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/dataset.py +0 -0
  122. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/data/dataset_wrappers.py +0 -0
  123. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/engine/__init__.py +0 -0
  124. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/engine/exporter.py +0 -0
  125. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/engine/predictor.py +0 -0
  126. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/engine/results.py +0 -0
  127. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/engine/trainer.py +0 -0
  128. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/engine/validator.py +0 -0
  129. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/autobatch.py +0 -0
  130. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/__init__.py +0 -0
  131. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/base.py +0 -0
  132. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/clearml.py +0 -0
  133. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/comet.py +0 -0
  134. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/hub.py +0 -0
  135. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/mlflow.py +0 -0
  136. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/raytune.py +0 -0
  137. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/tensorboard.py +0 -0
  138. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/callbacks/wb.py +0 -0
  139. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/dist.py +0 -0
  140. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/downloads.py +0 -0
  141. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/errors.py +0 -0
  142. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/files.py +0 -0
  143. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/loss.py +0 -0
  144. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/metrics.py +0 -0
  145. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/ops.py +0 -0
  146. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/patches.py +0 -0
  147. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/plotting.py +0 -0
  148. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/tal.py +0 -0
  149. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/utils/tuner.py +0 -0
  150. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/__init__.py +0 -0
  151. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/classify/__init__.py +0 -0
  152. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/classify/predict.py +0 -0
  153. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/classify/train.py +0 -0
  154. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/classify/val.py +0 -0
  155. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/detect/__init__.py +0 -0
  156. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/detect/predict.py +0 -0
  157. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/detect/train.py +0 -0
  158. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/pose/__init__.py +0 -0
  159. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/pose/predict.py +0 -0
  160. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/pose/train.py +0 -0
  161. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/segment/__init__.py +0 -0
  162. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/segment/predict.py +0 -0
  163. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics/yolo/v8/segment/train.py +0 -0
  164. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics.egg-info/dependency_links.txt +0 -0
  165. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics.egg-info/entry_points.txt +0 -0
  166. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics.egg-info/requires.txt +0 -0
  167. {ultralytics-8.0.114 → ultralytics-8.0.116}/ultralytics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.0.114
3
+ Version: 8.0.116
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Home-page: https://github.com/ultralytics/ultralytics
6
6
  Author: Ultralytics
@@ -138,9 +138,11 @@ path = model.export(format="onnx") # export the model to ONNX format
138
138
 
139
139
  ## <div align="center">Models</div>
140
140
 
141
- All YOLOv8 pretrained models are available here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset.
141
+ YOLOv8 [Detect](https://docs.ultralytics.com/tasks/detect), [Segment](https://docs.ultralytics.com/tasks/segment) and [Pose](https://docs.ultralytics.com/tasks/pose) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco) dataset are available here, as well as YOLOv8 [Classify](https://docs.ultralytics.com/modes/classify) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet) dataset. [Track](https://docs.ultralytics.com/modes/track) mode is available for all Detect, Segment and Pose models.
142
142
 
143
- [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
143
+ <img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/tasks/im/banner-tasks.png">
144
+
145
+ All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
144
146
 
145
147
  <details open><summary>Detection</summary>
146
148
 
@@ -102,9 +102,11 @@ path = model.export(format="onnx") # export the model to ONNX format
102
102
 
103
103
  ## <div align="center">Models</div>
104
104
 
105
- All YOLOv8 pretrained models are available here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset.
105
+ YOLOv8 [Detect](https://docs.ultralytics.com/tasks/detect), [Segment](https://docs.ultralytics.com/tasks/segment) and [Pose](https://docs.ultralytics.com/tasks/pose) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco) dataset are available here, as well as YOLOv8 [Classify](https://docs.ultralytics.com/modes/classify) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet) dataset. [Track](https://docs.ultralytics.com/modes/track) mode is available for all Detect, Segment and Pose models.
106
106
 
107
- [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
107
+ <img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/tasks/im/banner-tasks.png">
108
+
109
+ All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
108
110
 
109
111
  <details open><summary>Detection</summary>
110
112
 
@@ -102,9 +102,11 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式
102
102
 
103
103
  ## <div align="center">模型</div>
104
104
 
105
- 所有的 YOLOv8 预训练模型都可以在此找到。检测、分割和姿态模型在 [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) 数据集上进行预训练,而分类模型在 [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) 数据集上进行预训练。
105
+ [COCO](https://docs.ultralytics.com/datasets/detect/coco)数据集上预训练的YOLOv8 [检测](https://docs.ultralytics.com/tasks/detect),[分割](https://docs.ultralytics.com/tasks/segment)和[姿态](https://docs.ultralytics.com/tasks/pose)模型可以在这里找到,以及在[ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet)数据集上预训练的YOLOv8 [分类](https://docs.ultralytics.com/modes/classify)模型。所有的检测,分割和姿态模型都支持[追踪](https://docs.ultralytics.com/modes/track)模式。
106
106
 
107
- 在首次使用时,[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会自动从最新的 Ultralytics [发布版本](https://github.com/ultralytics/assets/releases)中下载。
107
+ <img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/tasks/im/banner-tasks.png">
108
+
109
+ 所有[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models)在首次使用时会自动从最新的Ultralytics [发布版本](https://github.com/ultralytics/assets/releases)下载。
108
110
 
109
111
  <details open><summary>检测</summary>
110
112
 
@@ -1,11 +1,12 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = '8.0.114'
3
+ __version__ = '8.0.116'
4
4
 
5
5
  from ultralytics.hub import start
6
6
  from ultralytics.vit.rtdetr import RTDETR
7
7
  from ultralytics.vit.sam import SAM
8
8
  from ultralytics.yolo.engine.model import YOLO
9
+ from ultralytics.yolo.nas import NAS
9
10
  from ultralytics.yolo.utils.checks import check_yolo as checks
10
11
 
11
- __all__ = '__version__', 'YOLO', 'SAM', 'RTDETR', 'checks', 'start' # allow simpler import
12
+ __all__ = '__version__', 'YOLO', 'NAS', 'SAM', 'RTDETR', 'checks', 'start' # allow simpler import
@@ -110,3 +110,12 @@ class RTDETR:
110
110
  if args.batch == DEFAULT_CFG.batch:
111
111
  args.batch = 1 # default to 1 if not modified
112
112
  return Exporter(overrides=args)(model=self.model)
113
+
114
+ def __call__(self, source=None, stream=False, **kwargs):
115
+ """Calls the 'predict' function with given arguments to perform object detection."""
116
+ return self.predict(source, stream, **kwargs)
117
+
118
+ def __getattr__(self, attr):
119
+ """Raises error if object has no requested attribute."""
120
+ name = self.__class__.__name__
121
+ raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
@@ -35,6 +35,15 @@ class SAM:
35
35
  """Run validation given dataset."""
36
36
  raise NotImplementedError("SAM models don't support validation")
37
37
 
38
+ def __call__(self, source=None, stream=False, **kwargs):
39
+ """Calls the 'predict' function with given arguments to perform object detection."""
40
+ return self.predict(source, stream, **kwargs)
41
+
42
+ def __getattr__(self, attr):
43
+ """Raises error if object has no requested attribute."""
44
+ name = self.__class__.__name__
45
+ raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
46
+
38
47
  def info(self, detailed=False, verbose=True):
39
48
  """
40
49
  Logs model info.
@@ -138,7 +138,7 @@ def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1):
138
138
  """
139
139
  Args:
140
140
  imgsz (tuple): The image size.
141
- polygons (np.ndarray): [N, M], N is the number of polygons, M is the number of points(Be divided by 2).
141
+ polygons (list[np.ndarray]): [N, M], N is the number of polygons, M is the number of points(Be divided by 2).
142
142
  color (int): color
143
143
  downsample_ratio (int): downsample ratio
144
144
  """
@@ -9,8 +9,8 @@ from ultralytics.nn.tasks import (ClassificationModel, DetectionModel, PoseModel
9
9
  attempt_load_one_weight, guess_model_task, nn, yaml_model_load)
10
10
  from ultralytics.yolo.cfg import get_cfg
11
11
  from ultralytics.yolo.engine.exporter import Exporter
12
- from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, ROOT, callbacks,
13
- is_git_dir, yaml_load)
12
+ from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, NUM_THREADS, RANK, ROOT,
13
+ callbacks, is_git_dir, yaml_load)
14
14
  from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_pip_update_available, check_yaml
15
15
  from ultralytics.yolo.utils.downloads import GITHUB_ASSET_STEMS
16
16
  from ultralytics.yolo.utils.torch_utils import smart_inference_mode
@@ -119,7 +119,7 @@ class YOLO:
119
119
  def is_hub_model(model):
120
120
  """Check if the provided model is a HUB model."""
121
121
  return any((
122
- model.startswith('https://hub.ultra'), # i.e. https://hub.ultralytics.com/models/MODEL_ID
122
+ model.startswith('https://hub.ultralytics.com/models/'), # i.e. https://hub.ultralytics.com/models/MODEL_ID
123
123
  [len(x) for x in model.split('_')] == [42, 20], # APIKEY_MODELID
124
124
  len(model) == 20 and not Path(model).exists() and all(x not in model for x in './\\'))) # MODELID
125
125
 
@@ -391,7 +391,7 @@ class YOLO:
391
391
  grace_period: int = 10,
392
392
  gpu_per_trial: int = None,
393
393
  max_samples: int = 10,
394
- train_args: dict = {}):
394
+ train_args: dict = None):
395
395
  """
396
396
  Runs hyperparameter tuning using Ray Tune.
397
397
 
@@ -409,6 +409,8 @@ class YOLO:
409
409
  Raises:
410
410
  ModuleNotFoundError: If Ray Tune is not installed.
411
411
  """
412
+ if train_args is None:
413
+ train_args = {}
412
414
 
413
415
  try:
414
416
  from ultralytics.yolo.utils.tuner import (ASHAScheduler, RunConfig, WandbLoggerCallback, default_space,
@@ -443,7 +445,7 @@ class YOLO:
443
445
  space['data'] = data
444
446
 
445
447
  # Define the trainable function with allocated resources
446
- trainable_with_resources = tune.with_resources(_tune, {'cpu': 8, 'gpu': gpu_per_trial if gpu_per_trial else 0})
448
+ trainable_with_resources = tune.with_resources(_tune, {'cpu': NUM_THREADS, 'gpu': gpu_per_trial or 0})
447
449
 
448
450
  # Define the ASHA scheduler for hyperparameter search
449
451
  asha_scheduler = ASHAScheduler(time_attr='epoch',
@@ -454,7 +456,7 @@ class YOLO:
454
456
  reduction_factor=3)
455
457
 
456
458
  # Define the callbacks for the hyperparameter search
457
- tuner_callbacks = [WandbLoggerCallback(project='yolov8_tune')] if wandb else []
459
+ tuner_callbacks = [WandbLoggerCallback(project='YOLOv8-tune')] if wandb else []
458
460
 
459
461
  # Create the Ray Tune hyperparameter search tuner
460
462
  tuner = tune.Tuner(trainable_with_resources,
@@ -0,0 +1,7 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ from .model import NAS
4
+ from .predict import NASPredictor
5
+ from .val import NASValidator
6
+
7
+ __all__ = 'NASPredictor', 'NASValidator', 'NAS'
@@ -0,0 +1,125 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ """
3
+ # NAS model interface
4
+ """
5
+
6
+ from pathlib import Path
7
+
8
+ import torch
9
+
10
+ from ultralytics.yolo.cfg import get_cfg
11
+ from ultralytics.yolo.engine.exporter import Exporter
12
+ from ultralytics.yolo.utils import DEFAULT_CFG, DEFAULT_CFG_DICT, LOGGER, ROOT, is_git_dir
13
+ from ultralytics.yolo.utils.checks import check_imgsz
14
+
15
+ from ...yolo.utils.torch_utils import model_info, smart_inference_mode
16
+ from .predict import NASPredictor
17
+ from .val import NASValidator
18
+
19
+
20
+ class NAS:
21
+
22
+ def __init__(self, model='yolo_nas_s.pt') -> None:
23
+ # Load or create new NAS model
24
+ import super_gradients
25
+
26
+ self.predictor = None
27
+ suffix = Path(model).suffix
28
+ if suffix == '.pt':
29
+ self._load(model)
30
+ elif suffix == '':
31
+ self.model = super_gradients.training.models.get(model, pretrained_weights='coco')
32
+ self.task = 'detect'
33
+ self.model.args = DEFAULT_CFG_DICT # attach args to model
34
+
35
+ # Standardize model
36
+ self.model.fuse = lambda verbose: self.model
37
+ self.model.stride = torch.tensor([32])
38
+ self.model.names = dict(enumerate(self.model._class_names))
39
+ self.model.is_fused = lambda: False # for info()
40
+ self.model.yaml = {} # for info()
41
+ self.info()
42
+
43
+ @smart_inference_mode()
44
+ def _load(self, weights: str):
45
+ self.model = torch.load(weights)
46
+
47
+ @smart_inference_mode()
48
+ def predict(self, source=None, stream=False, **kwargs):
49
+ """
50
+ Perform prediction using the YOLO model.
51
+
52
+ Args:
53
+ source (str | int | PIL | np.ndarray): The source of the image to make predictions on.
54
+ Accepts all source types accepted by the YOLO model.
55
+ stream (bool): Whether to stream the predictions or not. Defaults to False.
56
+ **kwargs : Additional keyword arguments passed to the predictor.
57
+ Check the 'configuration' section in the documentation for all available options.
58
+
59
+ Returns:
60
+ (List[ultralytics.yolo.engine.results.Results]): The prediction results.
61
+ """
62
+ if source is None:
63
+ source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg'
64
+ LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using 'source={source}'.")
65
+ overrides = dict(conf=0.25, task='detect', mode='predict')
66
+ overrides.update(kwargs) # prefer kwargs
67
+ if not self.predictor:
68
+ self.predictor = NASPredictor(overrides=overrides)
69
+ self.predictor.setup_model(model=self.model)
70
+ else: # only update args if predictor is already setup
71
+ self.predictor.args = get_cfg(self.predictor.args, overrides)
72
+ return self.predictor(source, stream=stream)
73
+
74
+ def train(self, **kwargs):
75
+ """Function trains models but raises an error as NAS models do not support training."""
76
+ raise NotImplementedError("NAS models don't support training")
77
+
78
+ def val(self, **kwargs):
79
+ """Run validation given dataset."""
80
+ overrides = dict(task='detect', mode='val')
81
+ overrides.update(kwargs) # prefer kwargs
82
+ args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides)
83
+ args.imgsz = check_imgsz(args.imgsz, max_dim=1)
84
+ validator = NASValidator(args=args)
85
+ validator(model=self.model)
86
+ self.metrics = validator.metrics
87
+ return validator.metrics
88
+
89
+ @smart_inference_mode()
90
+ def export(self, **kwargs):
91
+ """
92
+ Export model.
93
+
94
+ Args:
95
+ **kwargs : Any other args accepted by the predictors. To see all args check 'configuration' section in docs
96
+ """
97
+ overrides = dict(task='detect')
98
+ overrides.update(kwargs)
99
+ overrides['mode'] = 'export'
100
+ args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides)
101
+ args.task = self.task
102
+ if args.imgsz == DEFAULT_CFG.imgsz:
103
+ args.imgsz = self.model.args['imgsz'] # use trained imgsz unless custom value is passed
104
+ if args.batch == DEFAULT_CFG.batch:
105
+ args.batch = 1 # default to 1 if not modified
106
+ return Exporter(overrides=args)(model=self.model)
107
+
108
+ def info(self, detailed=False, verbose=True):
109
+ """
110
+ Logs model info.
111
+
112
+ Args:
113
+ detailed (bool): Show detailed information about model.
114
+ verbose (bool): Controls verbosity.
115
+ """
116
+ return model_info(self.model, detailed=detailed, verbose=verbose, imgsz=640)
117
+
118
+ def __call__(self, source=None, stream=False, **kwargs):
119
+ """Calls the 'predict' function with given arguments to perform object detection."""
120
+ return self.predict(source, stream, **kwargs)
121
+
122
+ def __getattr__(self, attr):
123
+ """Raises error if object has no requested attribute."""
124
+ name = self.__class__.__name__
125
+ raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
@@ -0,0 +1,35 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import torch
4
+
5
+ from ultralytics.yolo.engine.predictor import BasePredictor
6
+ from ultralytics.yolo.engine.results import Results
7
+ from ultralytics.yolo.utils import ops
8
+ from ultralytics.yolo.utils.ops import xyxy2xywh
9
+
10
+
11
+ class NASPredictor(BasePredictor):
12
+
13
+ def postprocess(self, preds_in, img, orig_imgs):
14
+ """Postprocesses predictions and returns a list of Results objects."""
15
+
16
+ # Cat boxes and class scores
17
+ boxes = xyxy2xywh(preds_in[0][0])
18
+ preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1)
19
+
20
+ preds = ops.non_max_suppression(preds,
21
+ self.args.conf,
22
+ self.args.iou,
23
+ agnostic=self.args.agnostic_nms,
24
+ max_det=self.args.max_det,
25
+ classes=self.args.classes)
26
+
27
+ results = []
28
+ for i, pred in enumerate(preds):
29
+ orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs
30
+ if not isinstance(orig_imgs, torch.Tensor):
31
+ pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
32
+ path = self.batch[0]
33
+ img_path = path[i] if isinstance(path, list) else path
34
+ results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred))
35
+ return results
@@ -0,0 +1,25 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import torch
4
+
5
+ from ultralytics.yolo.utils import ops
6
+ from ultralytics.yolo.utils.ops import xyxy2xywh
7
+ from ultralytics.yolo.v8.detect import DetectionValidator
8
+
9
+ __all__ = ['NASValidator']
10
+
11
+
12
+ class NASValidator(DetectionValidator):
13
+
14
+ def postprocess(self, preds_in):
15
+ """Apply Non-maximum suppression to prediction outputs."""
16
+ boxes = xyxy2xywh(preds_in[0][0])
17
+ preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1)
18
+ return ops.non_max_suppression(preds,
19
+ self.args.conf,
20
+ self.args.iou,
21
+ labels=self.lb,
22
+ multi_label=False,
23
+ agnostic=self.args.single_cls,
24
+ max_det=self.args.max_det,
25
+ max_time_img=0.5)
@@ -37,7 +37,7 @@ AUTOINSTALL = str(os.getenv('YOLO_AUTOINSTALL', True)).lower() == 'true' # glob
37
37
  VERBOSE = str(os.getenv('YOLO_VERBOSE', True)).lower() == 'true' # global verbose mode
38
38
  TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format
39
39
  LOGGING_NAME = 'ultralytics'
40
- MACOS, LINUX, WINDOWS = (platform.system() == x for x in ['Darwin', 'Linux', 'Windows']) # environment booleans
40
+ MACOS, LINUX, WINDOWS = (platform.system() == x for x in {'Darwin', 'Linux', 'Windows'}) # environment booleans
41
41
  HELP_MSG = \
42
42
  """
43
43
  Usage examples for running YOLOv8:
@@ -224,6 +224,11 @@ def set_logging(name=LOGGING_NAME, verbose=True):
224
224
  'propagate': False}}})
225
225
 
226
226
 
227
+ def emojis(string=''):
228
+ """Return platform-dependent emoji-safe version of string."""
229
+ return string.encode().decode('ascii', 'ignore') if WINDOWS else string
230
+
231
+
227
232
  class EmojiFilter(logging.Filter):
228
233
  """
229
234
  A custom logging filter class for removing emojis in log messages.
@@ -533,6 +538,7 @@ def get_user_config_dir(sub_dir='Ultralytics'):
533
538
  # GCP and AWS lambda fix, only /tmp is writeable
534
539
  if not is_dir_writeable(str(path.parent)):
535
540
  path = Path('/tmp') / sub_dir
541
+ LOGGER.warning(f"WARNING ⚠️ user config directory is not writeable, defaulting to '{path}'.")
536
542
 
537
543
  # Create the subdirectory if it does not exist
538
544
  path.mkdir(parents=True, exist_ok=True)
@@ -544,11 +550,6 @@ USER_CONFIG_DIR = Path(os.getenv('YOLO_CONFIG_DIR', get_user_config_dir())) # U
544
550
  SETTINGS_YAML = USER_CONFIG_DIR / 'settings.yaml'
545
551
 
546
552
 
547
- def emojis(string=''):
548
- """Return platform-dependent emoji-safe version of string."""
549
- return string.encode().decode('ascii', 'ignore') if WINDOWS else string
550
-
551
-
552
553
  def colorstr(*input):
553
554
  """Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')."""
554
555
  *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
@@ -90,7 +90,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
90
90
  filename = model.ckpt_path or model.cfg
91
91
  export = model # PyTorch format
92
92
  else:
93
- filename = model.export(imgsz=imgsz, format=format, half=half, int8=int8, device=device) # all others
93
+ filename = model.export(imgsz=imgsz, format=format, half=half, int8=int8, device=device, verbose=False)
94
94
  export = YOLO(filename, task=model.task)
95
95
  assert suffix in str(filename), 'export failed'
96
96
  emoji = '❎' # indicates export succeeded
@@ -196,8 +196,17 @@ class ProfileModels:
196
196
  model.fuse() # to report correct params and GFLOPs in model.info()
197
197
  model_info = model.info()
198
198
  if self.trt and self.device.type != 'cpu' and not engine_file.is_file():
199
- engine_file = model.export(format='engine', half=True, imgsz=self.imgsz, device=self.device)
200
- onnx_file = model.export(format='onnx', half=True, imgsz=self.imgsz, simplify=True, device=self.device)
199
+ engine_file = model.export(format='engine',
200
+ half=True,
201
+ imgsz=self.imgsz,
202
+ device=self.device,
203
+ verbose=False)
204
+ onnx_file = model.export(format='onnx',
205
+ half=True,
206
+ imgsz=self.imgsz,
207
+ simplify=True,
208
+ device=self.device,
209
+ verbose=False)
201
210
  elif file.suffix == '.onnx':
202
211
  model_info = self.get_onnx_model_info(file)
203
212
  onnx_file = file
@@ -254,7 +263,7 @@ class ProfileModels:
254
263
  for _ in range(3):
255
264
  start_time = time.time()
256
265
  for _ in range(self.num_warmup_runs):
257
- model(input_data, verbose=False)
266
+ model(input_data, imgsz=self.imgsz, verbose=False)
258
267
  elapsed = time.time() - start_time
259
268
 
260
269
  # Compute number of runs as higher of min_time or num_timed_runs
@@ -263,7 +272,7 @@ class ProfileModels:
263
272
  # Timed runs
264
273
  run_times = []
265
274
  for _ in tqdm(range(num_runs), desc=engine_file):
266
- results = model(input_data, verbose=False)
275
+ results = model(input_data, imgsz=self.imgsz, verbose=False)
267
276
  run_times.append(results[0].speed['inference']) # Convert to milliseconds
268
277
 
269
278
  run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping
@@ -1,8 +1,10 @@
1
1
  # Ultralytics YOLO 🚀, GPL-3.0 license
2
2
  import os
3
3
 
4
+ import pkg_resources as pkg
5
+
4
6
  from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
5
- from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
7
+ from ultralytics.yolo.utils.torch_utils import model_info_for_loggers
6
8
 
7
9
  try:
8
10
  from importlib.metadata import version
@@ -10,8 +12,12 @@ try:
10
12
  import dvclive
11
13
 
12
14
  assert not TESTS_RUNNING # do not log pytest
13
- assert version('dvclive')
14
- except (ImportError, AssertionError):
15
+
16
+ ver = version('dvclive')
17
+ if pkg.parse_version(ver) < pkg.parse_version('2.11.0'):
18
+ LOGGER.debug(f'DVCLive is detected but version {ver} is incompatible (>=2.11 required).')
19
+ dvclive = None # noqa: F811
20
+ except (ImportError, AssertionError, TypeError):
15
21
  dvclive = None
16
22
 
17
23
  # DVCLive logger instance
@@ -36,7 +42,7 @@ def _log_images(image_path, prefix=''):
36
42
  def _log_plots(plots, prefix=''):
37
43
  for name, params in plots.items():
38
44
  timestamp = params['timestamp']
39
- if _processed_plots.get(name, None) != timestamp:
45
+ if _processed_plots.get(name) != timestamp:
40
46
  _log_images(name, prefix)
41
47
  _processed_plots[name] = timestamp
42
48
 
@@ -94,12 +100,7 @@ def on_fit_epoch_end(trainer):
94
100
  live.log_metric(metric, value)
95
101
 
96
102
  if trainer.epoch == 0:
97
- model_info = {
98
- 'model/parameters': get_num_params(trainer.model),
99
- 'model/GFLOPs': round(get_flops(trainer.model), 3),
100
- 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}
101
-
102
- for metric, value in model_info.items():
103
+ for metric, value in model_info_for_loggers(trainer).items():
103
104
  live.log_metric(metric, value, plot=False)
104
105
 
105
106
  _log_plots(trainer.plots, 'train')
@@ -93,7 +93,6 @@ def on_train_end(trainer):
93
93
  # Log the final model
94
94
  run[f'weights/{trainer.args.name or trainer.args.task}/{str(trainer.best.name)}'].upload(File(str(
95
95
  trainer.best)))
96
- run.stop()
97
96
 
98
97
 
99
98
  callbacks = {
@@ -19,9 +19,9 @@ import requests
19
19
  import torch
20
20
  from matplotlib import font_manager
21
21
 
22
- from ultralytics.yolo.utils import (AUTOINSTALL, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, TryExcept, clean_url, colorstr,
23
- downloads, emojis, is_colab, is_docker, is_kaggle, is_online, is_pip_package,
24
- url2file)
22
+ from ultralytics.yolo.utils import (AUTOINSTALL, LOGGER, ONLINE, RANK, ROOT, USER_CONFIG_DIR, TryExcept, clean_url,
23
+ colorstr, downloads, emojis, is_colab, is_docker, is_kaggle, is_online,
24
+ is_pip_package, url2file)
25
25
 
26
26
 
27
27
  def is_ascii(s) -> bool:
@@ -164,23 +164,26 @@ def check_font(font='Arial.ttf'):
164
164
  Returns:
165
165
  file (Path): Resolved font file path.
166
166
  """
167
- name = Path(font).name
168
-
169
- # Check USER_CONFIG_DIR
170
- file = USER_CONFIG_DIR / name
171
- if file.exists():
172
- return file
173
-
174
- # Check system fonts
175
- matches = [s for s in font_manager.findSystemFonts() if font in s]
176
- if any(matches):
177
- return matches[0]
178
-
179
- # Download to USER_CONFIG_DIR if missing
180
- url = f'https://ultralytics.com/assets/{name}'
181
- if downloads.is_url(url):
182
- downloads.safe_download(url=url, file=file)
183
- return file
167
+ from ultralytics.yolo.utils.torch_utils import torch_distributed_zero_first
168
+
169
+ with torch_distributed_zero_first(RANK):
170
+ name = Path(font).name
171
+
172
+ # Check USER_CONFIG_DIR
173
+ file = USER_CONFIG_DIR / name
174
+ if file.exists():
175
+ return file
176
+
177
+ # Check system fonts
178
+ matches = [s for s in font_manager.findSystemFonts() if font in s]
179
+ if any(matches):
180
+ return matches[0]
181
+
182
+ # Download to USER_CONFIG_DIR if missing
183
+ url = f'https://ultralytics.com/assets/{name}'
184
+ if downloads.is_url(url):
185
+ downloads.safe_download(url=url, file=file)
186
+ return file
184
187
 
185
188
 
186
189
  def check_python(minimum: str = '3.7.0') -> bool:
@@ -209,9 +209,10 @@ class Instances:
209
209
  """Convert bounding box format."""
210
210
  self._bboxes.convert(format=format)
211
211
 
212
+ @property
212
213
  def bbox_areas(self):
213
214
  """Calculate the area of bounding boxes."""
214
- self._bboxes.areas()
215
+ return self._bboxes.areas()
215
216
 
216
217
  def scale(self, scale_w, scale_h, bbox_only=False):
217
218
  """this might be similar with denormalize func but without normalized sign."""
@@ -328,9 +329,9 @@ class Instances:
328
329
 
329
330
  def remove_zero_area_boxes(self):
330
331
  """Remove zero-area boxes, i.e. after clipping some boxes may have zero width or height. This removes them."""
331
- good = self._bboxes.areas() > 0
332
+ good = self.bbox_areas > 0
332
333
  if not all(good):
333
- self._bboxes = Bboxes(self._bboxes.bboxes[good], format=self._bboxes.format)
334
+ self._bboxes = self._bboxes[good]
334
335
  if len(self.segments):
335
336
  self.segments = self.segments[good]
336
337
  if self.keypoints is not None:
@@ -64,6 +64,8 @@ def select_device(device='', batch=0, newline=False, verbose=True):
64
64
  if cpu or mps:
65
65
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
66
66
  elif device: # non-cpu device requested
67
+ if device == 'cuda':
68
+ device = '0'
67
69
  visible = os.environ.get('CUDA_VISIBLE_DEVICES', None)
68
70
  os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()
69
71
  if not (torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', ''))):
@@ -223,7 +223,7 @@ class DetectionValidator(BaseValidator):
223
223
  def plot_predictions(self, batch, preds, ni):
224
224
  """Plots predicted bounding boxes on input images and saves the result."""
225
225
  plot_images(batch['img'],
226
- *output_to_target(preds, max_det=15),
226
+ *output_to_target(preds, max_det=self.args.max_det),
227
227
  paths=batch['im_file'],
228
228
  fname=self.save_dir / f'val_batch{ni}_pred.jpg',
229
229
  names=self.names,
@@ -156,7 +156,7 @@ class PoseValidator(DetectionValidator):
156
156
  """Plots predictions for YOLO model."""
157
157
  pred_kpts = torch.cat([p[:, 6:].view(-1, *self.kpt_shape)[:15] for p in preds], 0)
158
158
  plot_images(batch['img'],
159
- *output_to_target(preds, max_det=15),
159
+ *output_to_target(preds, max_det=self.args.max_det),
160
160
  kpts=pred_kpts,
161
161
  paths=batch['im_file'],
162
162
  fname=self.save_dir / f'val_batch{ni}_pred.jpg',