ultralytics 8.1.1__py3-none-any.whl → 8.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (105) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/cfg/__init__.py +1 -1
  3. ultralytics/cfg/datasets/Argoverse.yaml +5 -7
  4. ultralytics/cfg/datasets/DOTAv1.5.yaml +4 -4
  5. ultralytics/cfg/datasets/DOTAv1.yaml +4 -4
  6. ultralytics/cfg/datasets/GlobalWheat2020.yaml +2 -4
  7. ultralytics/cfg/datasets/ImageNet.yaml +4 -6
  8. ultralytics/cfg/datasets/Objects365.yaml +3 -5
  9. ultralytics/cfg/datasets/SKU-110K.yaml +4 -6
  10. ultralytics/cfg/datasets/VOC.yaml +0 -2
  11. ultralytics/cfg/datasets/VisDrone.yaml +4 -6
  12. ultralytics/cfg/datasets/coco-pose.yaml +6 -7
  13. ultralytics/cfg/datasets/coco.yaml +5 -7
  14. ultralytics/cfg/datasets/coco128-seg.yaml +4 -6
  15. ultralytics/cfg/datasets/coco128.yaml +4 -6
  16. ultralytics/cfg/datasets/coco8-pose.yaml +5 -6
  17. ultralytics/cfg/datasets/coco8-seg.yaml +4 -6
  18. ultralytics/cfg/datasets/coco8.yaml +4 -6
  19. ultralytics/cfg/datasets/dota8.yaml +3 -3
  20. ultralytics/cfg/datasets/open-images-v7.yaml +4 -6
  21. ultralytics/cfg/datasets/tiger-pose.yaml +4 -5
  22. ultralytics/cfg/datasets/xView.yaml +3 -5
  23. ultralytics/cfg/default.yaml +103 -103
  24. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +27 -27
  25. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +23 -23
  26. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +23 -23
  27. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +27 -27
  28. ultralytics/cfg/models/v3/yolov3-spp.yaml +32 -34
  29. ultralytics/cfg/models/v3/yolov3-tiny.yaml +24 -26
  30. ultralytics/cfg/models/v3/yolov3.yaml +32 -34
  31. ultralytics/cfg/models/v5/yolov5-p6.yaml +41 -43
  32. ultralytics/cfg/models/v5/yolov5.yaml +26 -28
  33. ultralytics/cfg/models/v6/yolov6.yaml +17 -17
  34. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +25 -0
  35. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +25 -0
  36. ultralytics/cfg/models/v8/yolov8-cls.yaml +7 -7
  37. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +26 -26
  38. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +27 -27
  39. ultralytics/cfg/models/v8/yolov8-ghost.yaml +23 -23
  40. ultralytics/cfg/models/v8/yolov8-obb.yaml +23 -23
  41. ultralytics/cfg/models/v8/yolov8-p2.yaml +23 -23
  42. ultralytics/cfg/models/v8/yolov8-p6.yaml +24 -24
  43. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +25 -25
  44. ultralytics/cfg/models/v8/yolov8-pose.yaml +19 -19
  45. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +23 -23
  46. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +24 -24
  47. ultralytics/cfg/models/v8/yolov8-seg.yaml +18 -18
  48. ultralytics/cfg/models/v8/yolov8.yaml +23 -23
  49. ultralytics/cfg/trackers/botsort.yaml +7 -7
  50. ultralytics/cfg/trackers/bytetrack.yaml +6 -6
  51. ultralytics/data/annotator.py +1 -1
  52. ultralytics/data/augment.py +1 -2
  53. ultralytics/data/base.py +0 -1
  54. ultralytics/data/build.py +1 -2
  55. ultralytics/data/dataset.py +0 -1
  56. ultralytics/data/explorer/explorer.py +11 -12
  57. ultralytics/data/explorer/utils.py +3 -3
  58. ultralytics/data/split_dota.py +15 -23
  59. ultralytics/engine/model.py +12 -11
  60. ultralytics/engine/predictor.py +1 -1
  61. ultralytics/engine/trainer.py +1 -4
  62. ultralytics/hub/__init__.py +5 -3
  63. ultralytics/hub/auth.py +1 -2
  64. ultralytics/hub/session.py +14 -6
  65. ultralytics/hub/utils.py +4 -0
  66. ultralytics/models/fastsam/model.py +0 -1
  67. ultralytics/models/nas/model.py +0 -1
  68. ultralytics/models/rtdetr/train.py +0 -1
  69. ultralytics/models/rtdetr/val.py +1 -2
  70. ultralytics/models/sam/build.py +0 -1
  71. ultralytics/models/sam/model.py +0 -1
  72. ultralytics/models/sam/modules/encoders.py +1 -6
  73. ultralytics/models/sam/predict.py +0 -1
  74. ultralytics/models/utils/loss.py +0 -1
  75. ultralytics/models/yolo/detect/val.py +1 -2
  76. ultralytics/models/yolo/obb/val.py +14 -39
  77. ultralytics/nn/modules/head.py +5 -6
  78. ultralytics/nn/modules/utils.py +1 -1
  79. ultralytics/nn/tasks.py +1 -1
  80. ultralytics/solutions/ai_gym.py +9 -1
  81. ultralytics/solutions/distance_calculation.py +4 -8
  82. ultralytics/solutions/heatmap.py +16 -21
  83. ultralytics/solutions/object_counter.py +30 -29
  84. ultralytics/solutions/speed_estimation.py +19 -24
  85. ultralytics/trackers/track.py +0 -1
  86. ultralytics/trackers/utils/gmc.py +1 -1
  87. ultralytics/trackers/utils/matching.py +1 -3
  88. ultralytics/utils/benchmarks.py +2 -7
  89. ultralytics/utils/callbacks/base.py +1 -0
  90. ultralytics/utils/callbacks/comet.py +4 -22
  91. ultralytics/utils/callbacks/hub.py +1 -3
  92. ultralytics/utils/callbacks/neptune.py +1 -3
  93. ultralytics/utils/callbacks/tensorboard.py +2 -1
  94. ultralytics/utils/checks.py +2 -2
  95. ultralytics/utils/loss.py +3 -6
  96. ultralytics/utils/ops.py +8 -9
  97. ultralytics/utils/plotting.py +13 -15
  98. ultralytics/utils/tal.py +1 -2
  99. {ultralytics-8.1.1.dist-info → ultralytics-8.1.3.dist-info}/METADATA +15 -15
  100. ultralytics-8.1.3.dist-info/RECORD +190 -0
  101. ultralytics-8.1.1.dist-info/RECORD +0 -188
  102. {ultralytics-8.1.1.dist-info → ultralytics-8.1.3.dist-info}/LICENSE +0 -0
  103. {ultralytics-8.1.1.dist-info → ultralytics-8.1.3.dist-info}/WHEEL +0 -0
  104. {ultralytics-8.1.1.dist-info → ultralytics-8.1.3.dist-info}/entry_points.txt +0 -0
  105. {ultralytics-8.1.1.dist-info → ultralytics-8.1.3.dist-info}/top_level.txt +0 -0
@@ -18,7 +18,6 @@ from pathlib import Path
18
18
 
19
19
  from ultralytics.engine.model import Model
20
20
  from ultralytics.utils.torch_utils import model_info
21
-
22
21
  from .build import build_sam
23
22
  from .predict import Predictor
24
23
 
@@ -198,12 +198,7 @@ class PromptEncoder(nn.Module):
198
198
  """
199
199
  return self.pe_layer(self.image_embedding_size).unsqueeze(0)
200
200
 
201
- def _embed_points(
202
- self,
203
- points: torch.Tensor,
204
- labels: torch.Tensor,
205
- pad: bool,
206
- ) -> torch.Tensor:
201
+ def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
207
202
  """Embeds point prompts."""
208
203
  points = points + 0.5 # Shift to center of pixel
209
204
  if pad:
@@ -18,7 +18,6 @@ from ultralytics.engine.predictor import BasePredictor
18
18
  from ultralytics.engine.results import Results
19
19
  from ultralytics.utils import DEFAULT_CFG, ops
20
20
  from ultralytics.utils.torch_utils import select_device
21
-
22
21
  from .amg import (
23
22
  batch_iterator,
24
23
  batched_mask_to_box,
@@ -6,7 +6,6 @@ import torch.nn.functional as F
6
6
 
7
7
  from ultralytics.utils.loss import FocalLoss, VarifocalLoss
8
8
  from ultralytics.utils.metrics import bbox_iou
9
-
10
9
  from .ops import HungarianMatcher
11
10
 
12
11
 
@@ -104,8 +104,7 @@ class DetectionValidator(BaseValidator):
104
104
  if len(cls):
105
105
  bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes
106
106
  ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad) # native-space labels
107
- prepared_batch = dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad)
108
- return prepared_batch
107
+ return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad)
109
108
 
110
109
  def _prepare_pred(self, pred, pbatch):
111
110
  """Prepares a batch of images and annotations for validation."""
@@ -18,7 +18,7 @@ class OBBValidator(DetectionValidator):
18
18
  ```python
19
19
  from ultralytics.models.yolo.obb import OBBValidator
20
20
 
21
- args = dict(model='yolov8n-obb.pt', data='coco8-seg.yaml')
21
+ args = dict(model='yolov8n-obb.pt', data='dota8.yaml')
22
22
  validator = OBBValidator(args=args)
23
23
  validator(model=args['model'])
24
24
  ```
@@ -77,8 +77,7 @@ class OBBValidator(DetectionValidator):
77
77
  if len(cls):
78
78
  bbox[..., :4].mul_(torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]]) # target boxes
79
79
  ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad, xywh=True) # native-space labels
80
- prepared_batch = dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad)
81
- return prepared_batch
80
+ return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad)
82
81
 
83
82
  def _prepare_pred(self, pred, pbatch):
84
83
  """Prepares and returns a batch for OBB validation with scaled and padded bounding boxes."""
@@ -118,11 +117,10 @@ class OBBValidator(DetectionValidator):
118
117
 
119
118
  def save_one_txt(self, predn, save_conf, shape, file):
120
119
  """Save YOLO detections to a txt file in normalized coordinates in a specific format."""
121
- gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
122
- for *xyxy, conf, cls, angle in predn.tolist():
123
- xywha = torch.tensor([*xyxy, angle]).view(1, 5)
124
- xywha[:, :4] /= gn
125
- xyxyxyxy = ops.xywhr2xyxyxyxy(xywha).view(-1).tolist() # normalized xywh
120
+ gn = torch.tensor(shape)[[1, 0]] # normalization gain whwh
121
+ for *xywh, conf, cls, angle in predn.tolist():
122
+ xywha = torch.tensor([*xywh, angle]).view(1, 5)
123
+ xyxyxyxy = (ops.xywhr2xyxyxyxy(xywha) / gn).view(-1).tolist() # normalized xywh
126
124
  line = (cls, *xyxyxyxy, conf) if save_conf else (cls, *xyxyxyxy) # label format
127
125
  with open(file, "a") as f:
128
126
  f.write(("%g " * len(line)).rstrip() % line + "\n")
@@ -139,32 +137,21 @@ class OBBValidator(DetectionValidator):
139
137
  pred_txt.mkdir(parents=True, exist_ok=True)
140
138
  data = json.load(open(pred_json))
141
139
  # Save split results
142
- LOGGER.info(f"Saving predictions with DOTA format to {str(pred_txt)}...")
140
+ LOGGER.info(f"Saving predictions with DOTA format to {pred_txt}...")
143
141
  for d in data:
144
142
  image_id = d["image_id"]
145
143
  score = d["score"]
146
144
  classname = self.names[d["category_id"]].replace(" ", "-")
145
+ p = d["poly"]
147
146
 
148
- lines = "{} {} {} {} {} {} {} {} {} {}\n".format(
149
- image_id,
150
- score,
151
- d["poly"][0],
152
- d["poly"][1],
153
- d["poly"][2],
154
- d["poly"][3],
155
- d["poly"][4],
156
- d["poly"][5],
157
- d["poly"][6],
158
- d["poly"][7],
159
- )
160
- with open(str(pred_txt / f"Task1_{classname}") + ".txt", "a") as f:
161
- f.writelines(lines)
147
+ with open(f'{pred_txt / f"Task1_{classname}"}.txt', "a") as f:
148
+ f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
162
149
  # Save merged results, this could result slightly lower map than using official merging script,
163
150
  # because of the probiou calculation.
164
151
  pred_merged_txt = self.save_dir / "predictions_merged_txt" # predictions
165
152
  pred_merged_txt.mkdir(parents=True, exist_ok=True)
166
153
  merged_results = defaultdict(list)
167
- LOGGER.info(f"Saving merged predictions with DOTA format to {str(pred_merged_txt)}...")
154
+ LOGGER.info(f"Saving merged predictions with DOTA format to {pred_merged_txt}...")
168
155
  for d in data:
169
156
  image_id = d["image_id"].split("__")[0]
170
157
  pattern = re.compile(r"\d+___\d+")
@@ -188,22 +175,10 @@ class OBBValidator(DetectionValidator):
188
175
  b = ops.xywhr2xyxyxyxy(bbox[:, :5]).view(-1, 8)
189
176
  for x in torch.cat([b, bbox[:, 5:7]], dim=-1).tolist():
190
177
  classname = self.names[int(x[-1])].replace(" ", "-")
191
- poly = [round(i, 3) for i in x[:-2]]
178
+ p = [round(i, 3) for i in x[:-2]] # poly
192
179
  score = round(x[-2], 3)
193
180
 
194
- lines = "{} {} {} {} {} {} {} {} {} {}\n".format(
195
- image_id,
196
- score,
197
- poly[0],
198
- poly[1],
199
- poly[2],
200
- poly[3],
201
- poly[4],
202
- poly[5],
203
- poly[6],
204
- poly[7],
205
- )
206
- with open(str(pred_merged_txt / f"Task1_{classname}") + ".txt", "a") as f:
207
- f.writelines(lines)
181
+ with open(f'{pred_merged_txt / f"Task1_{classname}"}.txt', "a") as f:
182
+ f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
208
183
 
209
184
  return stats
@@ -8,11 +8,10 @@ import torch.nn as nn
8
8
  from torch.nn.init import constant_, xavier_uniform_
9
9
 
10
10
  from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors
11
-
12
11
  from .block import DFL, Proto
13
12
  from .conv import Conv
14
13
  from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
15
- from .utils import bias_init_with_prob, linear_init_
14
+ from .utils import bias_init_with_prob, linear_init
16
15
 
17
16
  __all__ = "Detect", "Segment", "Pose", "Classify", "OBB", "RTDETRDecoder"
18
17
 
@@ -418,18 +417,18 @@ class RTDETRDecoder(nn.Module):
418
417
  """Initializes or resets the parameters of the model's various components with predefined weights and biases."""
419
418
  # Class and bbox head init
420
419
  bias_cls = bias_init_with_prob(0.01) / 80 * self.nc
421
- # NOTE: the weight initialization in `linear_init_` would cause NaN when training with custom datasets.
422
- # linear_init_(self.enc_score_head)
420
+ # NOTE: the weight initialization in `linear_init` would cause NaN when training with custom datasets.
421
+ # linear_init(self.enc_score_head)
423
422
  constant_(self.enc_score_head.bias, bias_cls)
424
423
  constant_(self.enc_bbox_head.layers[-1].weight, 0.0)
425
424
  constant_(self.enc_bbox_head.layers[-1].bias, 0.0)
426
425
  for cls_, reg_ in zip(self.dec_score_head, self.dec_bbox_head):
427
- # linear_init_(cls_)
426
+ # linear_init(cls_)
428
427
  constant_(cls_.bias, bias_cls)
429
428
  constant_(reg_.layers[-1].weight, 0.0)
430
429
  constant_(reg_.layers[-1].bias, 0.0)
431
430
 
432
- linear_init_(self.enc_output[0])
431
+ linear_init(self.enc_output[0])
433
432
  xavier_uniform_(self.enc_output[0].weight)
434
433
  if self.learnt_init_query:
435
434
  xavier_uniform_(self.tgt_embed.weight)
@@ -23,7 +23,7 @@ def bias_init_with_prob(prior_prob=0.01):
23
23
  return float(-np.log((1 - prior_prob) / prior_prob)) # return bias_init
24
24
 
25
25
 
26
- def linear_init_(module):
26
+ def linear_init(module):
27
27
  """Initialize the weights and biases of a linear module."""
28
28
  bound = 1 / math.sqrt(module.weight.shape[0])
29
29
  uniform_(module.weight, -bound, bound)
ultralytics/nn/tasks.py CHANGED
@@ -339,7 +339,7 @@ class DetectionModel(BaseModel):
339
339
 
340
340
 
341
341
  class OBBModel(DetectionModel):
342
- """"YOLOv8 Oriented Bounding Box (OBB) model."""
342
+ """YOLOv8 Oriented Bounding Box (OBB) model."""
343
343
 
344
344
  def __init__(self, cfg="yolov8n-obb.yaml", ch=3, nc=None, verbose=True):
345
345
  """Initialize YOLOv8 OBB model with given config and parameters."""
@@ -78,8 +78,16 @@ class AIGym:
78
78
  self.keypoints = results[0].keypoints.data
79
79
  self.annotator = Annotator(im0, line_width=2)
80
80
 
81
+ num_keypoints = len(results[0])
82
+
83
+ # Resize self.angle, self.count, and self.stage if the number of keypoints has changed
84
+ if len(self.angle) != num_keypoints:
85
+ self.angle = [0] * num_keypoints
86
+ self.count = [0] * num_keypoints
87
+ self.stage = ["-" for _ in range(num_keypoints)]
88
+
81
89
  for ind, k in enumerate(reversed(self.keypoints)):
82
- if self.pose_type == "pushup" or self.pose_type == "pullup":
90
+ if self.pose_type in ["pushup", "pullup"]:
83
91
  self.angle[ind] = self.annotator.estimate_pose_angle(
84
92
  k[int(self.kpts_to_check[0])].cpu(),
85
93
  k[int(self.kpts_to_check[1])].cpu(),
@@ -86,10 +86,9 @@ class DistanceCalculation:
86
86
  self.left_mouse_count += 1
87
87
  if self.left_mouse_count <= 2:
88
88
  for box, track_id in zip(self.boxes, self.trk_ids):
89
- if box[0] < x < box[2] and box[1] < y < box[3]:
90
- if track_id not in self.selected_boxes:
91
- self.selected_boxes[track_id] = []
92
- self.selected_boxes[track_id] = box
89
+ if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
90
+ self.selected_boxes[track_id] = []
91
+ self.selected_boxes[track_id] = box
93
92
 
94
93
  if event == cv2.EVENT_RBUTTONDOWN:
95
94
  self.selected_boxes = {}
@@ -149,10 +148,7 @@ class DistanceCalculation:
149
148
  if tracks[0].boxes.id is None:
150
149
  if self.view_img:
151
150
  self.display_frames()
152
- return
153
- else:
154
- return
155
-
151
+ return
156
152
  self.extract_tracks(tracks)
157
153
 
158
154
  self.annotator = Annotator(self.im0, line_width=2)
@@ -169,10 +169,7 @@ class Heatmap:
169
169
  if tracks[0].boxes.id is None:
170
170
  if self.view_img and self.env_check:
171
171
  self.display_frames()
172
- return
173
- else:
174
- return
175
-
172
+ return
176
173
  self.heatmap *= self.decay_factor # decay factor
177
174
  self.extract_results(tracks)
178
175
  self.annotator = Annotator(self.im0, self.count_txt_thickness, None)
@@ -207,23 +204,21 @@ class Heatmap:
207
204
 
208
205
  # Count objects
209
206
  if len(self.count_reg_pts) == 4:
210
- if self.counting_region.contains(Point(track_line[-1])):
211
- if track_id not in self.counting_list:
212
- self.counting_list.append(track_id)
213
- if box[0] < self.counting_region.centroid.x:
214
- self.out_counts += 1
215
- else:
216
- self.in_counts += 1
207
+ if self.counting_region.contains(Point(track_line[-1])) and track_id not in self.counting_list:
208
+ self.counting_list.append(track_id)
209
+ if box[0] < self.counting_region.centroid.x:
210
+ self.out_counts += 1
211
+ else:
212
+ self.in_counts += 1
217
213
 
218
214
  elif len(self.count_reg_pts) == 2:
219
215
  distance = Point(track_line[-1]).distance(self.counting_region)
220
- if distance < self.line_dist_thresh:
221
- if track_id not in self.counting_list:
222
- self.counting_list.append(track_id)
223
- if box[0] < self.counting_region.centroid.x:
224
- self.out_counts += 1
225
- else:
226
- self.in_counts += 1
216
+ if distance < self.line_dist_thresh and track_id not in self.counting_list:
217
+ self.counting_list.append(track_id)
218
+ if box[0] < self.counting_region.centroid.x:
219
+ self.out_counts += 1
220
+ else:
221
+ self.in_counts += 1
227
222
  else:
228
223
  for box, cls in zip(self.boxes, self.clss):
229
224
  if self.shape == "circle":
@@ -244,8 +239,8 @@ class Heatmap:
244
239
  heatmap_normalized = cv2.normalize(self.heatmap, None, 0, 255, cv2.NORM_MINMAX)
245
240
  heatmap_colored = cv2.applyColorMap(heatmap_normalized.astype(np.uint8), self.colormap)
246
241
 
247
- incount_label = "In Count : " + f"{self.in_counts}"
248
- outcount_label = "OutCount : " + f"{self.out_counts}"
242
+ incount_label = f"In Count : {self.in_counts}"
243
+ outcount_label = f"OutCount : {self.out_counts}"
249
244
 
250
245
  # Display counts based on user choice
251
246
  counts_label = None
@@ -256,7 +251,7 @@ class Heatmap:
256
251
  elif not self.view_out_counts:
257
252
  counts_label = incount_label
258
253
  else:
259
- counts_label = incount_label + " " + outcount_label
254
+ counts_label = f"{incount_label} {outcount_label}"
260
255
 
261
256
  if self.count_reg_pts is not None and counts_label is not None:
262
257
  self.annotator.count_labels(
@@ -139,11 +139,14 @@ class ObjectCounter:
139
139
  # global is_drawing, selected_point
140
140
  if event == cv2.EVENT_LBUTTONDOWN:
141
141
  for i, point in enumerate(self.reg_pts):
142
- if isinstance(point, (tuple, list)) and len(point) >= 2:
143
- if abs(x - point[0]) < 10 and abs(y - point[1]) < 10:
144
- self.selected_point = i
145
- self.is_drawing = True
146
- break
142
+ if (
143
+ isinstance(point, (tuple, list))
144
+ and len(point) >= 2
145
+ and (abs(x - point[0]) < 10 and abs(y - point[1]) < 10)
146
+ ):
147
+ self.selected_point = i
148
+ self.is_drawing = True
149
+ break
147
150
 
148
151
  elif event == cv2.EVENT_MOUSEMOVE:
149
152
  if self.is_drawing and self.selected_point is not None:
@@ -166,9 +169,8 @@ class ObjectCounter:
166
169
 
167
170
  # Extract tracks
168
171
  for box, track_id, cls in zip(boxes, track_ids, clss):
169
- self.annotator.box_label(
170
- box, label=str(track_id) + ":" + self.names[cls], color=colors(int(cls), True)
171
- ) # Draw bounding box
172
+ # Draw bounding box
173
+ self.annotator.box_label(box, label=f"{track_id}:{self.names[cls]}", color=colors(int(cls), True))
172
174
 
173
175
  # Draw Tracks
174
176
  track_line = self.track_history[track_id]
@@ -186,28 +188,29 @@ class ObjectCounter:
186
188
 
187
189
  # Count objects
188
190
  if len(self.reg_pts) == 4:
189
- if prev_position is not None:
190
- if self.counting_region.contains(Point(track_line[-1])):
191
- if track_id not in self.counting_list:
192
- self.counting_list.append(track_id)
193
- if (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) > 0:
194
- self.in_counts += 1
195
- else:
196
- self.out_counts += 1
191
+ if (
192
+ prev_position is not None
193
+ and self.counting_region.contains(Point(track_line[-1]))
194
+ and track_id not in self.counting_list
195
+ ):
196
+ self.counting_list.append(track_id)
197
+ if (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) > 0:
198
+ self.in_counts += 1
199
+ else:
200
+ self.out_counts += 1
197
201
 
198
202
  elif len(self.reg_pts) == 2:
199
203
  if prev_position is not None:
200
204
  distance = Point(track_line[-1]).distance(self.counting_region)
201
- if distance < self.line_dist_thresh:
202
- if track_id not in self.counting_list:
203
- self.counting_list.append(track_id)
204
- if (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) > 0:
205
- self.in_counts += 1
206
- else:
207
- self.out_counts += 1
205
+ if distance < self.line_dist_thresh and track_id not in self.counting_list:
206
+ self.counting_list.append(track_id)
207
+ if (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) > 0:
208
+ self.in_counts += 1
209
+ else:
210
+ self.out_counts += 1
208
211
 
209
- incount_label = "In Count : " + f"{self.in_counts}"
210
- outcount_label = "OutCount : " + f"{self.out_counts}"
212
+ incount_label = f"In Count : {self.in_counts}"
213
+ outcount_label = f"OutCount : {self.out_counts}"
211
214
 
212
215
  # Display counts based on user choice
213
216
  counts_label = None
@@ -218,7 +221,7 @@ class ObjectCounter:
218
221
  elif not self.view_out_counts:
219
222
  counts_label = incount_label
220
223
  else:
221
- counts_label = incount_label + " " + outcount_label
224
+ counts_label = f"{incount_label} {outcount_label}"
222
225
 
223
226
  if counts_label is not None:
224
227
  self.annotator.count_labels(
@@ -254,9 +257,7 @@ class ObjectCounter:
254
257
  if tracks[0].boxes.id is None:
255
258
  if self.view_img:
256
259
  self.display_frames()
257
- return
258
- else:
259
- return
260
+ return
260
261
  self.extract_and_process_tracks(tracks)
261
262
 
262
263
  if self.view_img:
@@ -114,9 +114,7 @@ class SpeedEstimator:
114
114
  cls (str): object class name
115
115
  track (list): tracking history for tracks path drawing
116
116
  """
117
- speed_label = (
118
- str(int(self.dist_data[track_id])) + "km/ph" if track_id in self.dist_data else self.names[int(cls)]
119
- )
117
+ speed_label = f"{int(self.dist_data[track_id])}km/ph" if track_id in self.dist_data else self.names[int(cls)]
120
118
  bbox_color = colors(int(track_id)) if track_id in self.dist_data else (255, 0, 255)
121
119
 
122
120
  self.annotator.box_label(box, speed_label, bbox_color)
@@ -132,28 +130,28 @@ class SpeedEstimator:
132
130
  track (list): tracking history for tracks path drawing
133
131
  """
134
132
 
135
- if self.reg_pts[0][0] < track[-1][0] < self.reg_pts[1][0]:
136
- if self.reg_pts[1][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[1][1] + self.spdl_dist_thresh:
137
- direction = "known"
133
+ if not self.reg_pts[0][0] < track[-1][0] < self.reg_pts[1][0]:
134
+ return
135
+ if self.reg_pts[1][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[1][1] + self.spdl_dist_thresh:
136
+ direction = "known"
138
137
 
139
- elif self.reg_pts[0][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[0][1] + self.spdl_dist_thresh:
140
- direction = "known"
138
+ elif self.reg_pts[0][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[0][1] + self.spdl_dist_thresh:
139
+ direction = "known"
141
140
 
142
- else:
143
- direction = "unknown"
141
+ else:
142
+ direction = "unknown"
144
143
 
145
- if self.trk_previous_times[trk_id] != 0 and direction != "unknown":
146
- if trk_id not in self.trk_idslist:
147
- self.trk_idslist.append(trk_id)
144
+ if self.trk_previous_times[trk_id] != 0 and direction != "unknown" and trk_id not in self.trk_idslist:
145
+ self.trk_idslist.append(trk_id)
148
146
 
149
- time_difference = time() - self.trk_previous_times[trk_id]
150
- if time_difference > 0:
151
- dist_difference = np.abs(track[-1][1] - self.trk_previous_points[trk_id][1])
152
- speed = dist_difference / time_difference
153
- self.dist_data[trk_id] = speed
147
+ time_difference = time() - self.trk_previous_times[trk_id]
148
+ if time_difference > 0:
149
+ dist_difference = np.abs(track[-1][1] - self.trk_previous_points[trk_id][1])
150
+ speed = dist_difference / time_difference
151
+ self.dist_data[trk_id] = speed
154
152
 
155
- self.trk_previous_times[trk_id] = time()
156
- self.trk_previous_points[trk_id] = track[-1]
153
+ self.trk_previous_times[trk_id] = time()
154
+ self.trk_previous_points[trk_id] = track[-1]
157
155
 
158
156
  def estimate_speed(self, im0, tracks):
159
157
  """
@@ -166,10 +164,7 @@ class SpeedEstimator:
166
164
  if tracks[0].boxes.id is None:
167
165
  if self.view_img and self.env_check:
168
166
  self.display_frames()
169
- return
170
- else:
171
- return
172
-
167
+ return
173
168
  self.extract_tracks(tracks)
174
169
 
175
170
  self.annotator = Annotator(self.im0, line_width=2)
@@ -7,7 +7,6 @@ import torch
7
7
 
8
8
  from ultralytics.utils import IterableSimpleNamespace, yaml_load
9
9
  from ultralytics.utils.checks import check_yaml
10
-
11
10
  from .bot_sort import BOTSORT
12
11
  from .byte_tracker import BYTETracker
13
12
 
@@ -67,7 +67,7 @@ class GMC:
67
67
  maxCorners=1000, qualityLevel=0.01, minDistance=1, blockSize=3, useHarrisDetector=False, k=0.04
68
68
  )
69
69
 
70
- elif self.method in ["none", "None", None]:
70
+ elif self.method in {"none", "None", None}:
71
71
  self.method = None
72
72
  else:
73
73
  raise ValueError(f"Error: Unknown GMC method:{method}")
@@ -70,9 +70,7 @@ def iou_distance(atracks: list, btracks: list) -> np.ndarray:
70
70
  (np.ndarray): Cost matrix computed based on IoU.
71
71
  """
72
72
 
73
- if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) or (
74
- len(btracks) > 0 and isinstance(btracks[0], np.ndarray)
75
- ):
73
+ if atracks and isinstance(atracks[0], np.ndarray) or btracks and isinstance(btracks[0], np.ndarray):
76
74
  atlbrs = atracks
77
75
  btlbrs = btracks
78
76
  else:
@@ -26,7 +26,6 @@ ncnn | `ncnn` | yolov8n_ncnn_model/
26
26
 
27
27
  import glob
28
28
  import platform
29
- import sys
30
29
  import time
31
30
  from pathlib import Path
32
31
 
@@ -85,12 +84,8 @@ def benchmark(
85
84
  emoji, filename = "❌", None # export defaults
86
85
  try:
87
86
  assert i != 9 or LINUX, "Edge TPU export only supported on Linux"
88
- if i == 5:
89
- assert MACOS or LINUX, "CoreML export only supported on macOS and Linux"
90
- elif i == 10:
91
- assert MACOS or LINUX, "TF.js export only supported on macOS and Linux"
92
- # elif i == 11:
93
- # assert sys.version_info < (3, 11), "PaddlePaddle export only supported on Python<=3.10"
87
+ if i in {5, 10}: # CoreML and TF.js
88
+ assert MACOS or LINUX, "export only supported on macOS and Linux"
94
89
  if "cpu" in device.type:
95
90
  assert cpu, "inference not supported on CPU"
96
91
  if "cuda" in device.type:
@@ -4,6 +4,7 @@
4
4
  from collections import defaultdict
5
5
  from copy import deepcopy
6
6
 
7
+
7
8
  # Trainer callbacks ----------------------------------------------------------------------------------------------------
8
9
 
9
10
 
@@ -105,12 +105,7 @@ def _fetch_trainer_metadata(trainer):
105
105
  save_interval = curr_epoch % save_period == 0
106
106
  save_assets = save and save_period > 0 and save_interval and not final_epoch
107
107
 
108
- return dict(
109
- curr_epoch=curr_epoch,
110
- curr_step=curr_step,
111
- save_assets=save_assets,
112
- final_epoch=final_epoch,
113
- )
108
+ return dict(curr_epoch=curr_epoch, curr_step=curr_step, save_assets=save_assets, final_epoch=final_epoch)
114
109
 
115
110
 
116
111
  def _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad):
@@ -218,11 +213,7 @@ def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch):
218
213
  conf_mat = trainer.validator.confusion_matrix.matrix
219
214
  names = list(trainer.data["names"].values()) + ["background"]
220
215
  experiment.log_confusion_matrix(
221
- matrix=conf_mat,
222
- labels=names,
223
- max_categories=len(names),
224
- epoch=curr_epoch,
225
- step=curr_step,
216
+ matrix=conf_mat, labels=names, max_categories=len(names), epoch=curr_epoch, step=curr_step
226
217
  )
227
218
 
228
219
 
@@ -294,12 +285,7 @@ def _log_plots(experiment, trainer):
294
285
  def _log_model(experiment, trainer):
295
286
  """Log the best-trained model to Comet.ml."""
296
287
  model_name = _get_comet_model_name()
297
- experiment.log_model(
298
- model_name,
299
- file_or_folder=str(trainer.best),
300
- file_name="best.pt",
301
- overwrite=True,
302
- )
288
+ experiment.log_model(model_name, file_or_folder=str(trainer.best), file_name="best.pt", overwrite=True)
303
289
 
304
290
 
305
291
  def on_pretrain_routine_start(trainer):
@@ -320,11 +306,7 @@ def on_train_epoch_end(trainer):
320
306
  curr_epoch = metadata["curr_epoch"]
321
307
  curr_step = metadata["curr_step"]
322
308
 
323
- experiment.log_metrics(
324
- trainer.label_loss_items(trainer.tloss, prefix="train"),
325
- step=curr_step,
326
- epoch=curr_epoch,
327
- )
309
+ experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix="train"), step=curr_step, epoch=curr_epoch)
328
310
 
329
311
  if curr_epoch == 1:
330
312
  _log_images(experiment, trainer.save_dir.glob("train_batch*.jpg"), curr_step)
@@ -3,9 +3,7 @@
3
3
  import json
4
4
  from time import time
5
5
 
6
- from hub_sdk.config import HUB_WEB_ROOT
7
-
8
- from ultralytics.hub.utils import PREFIX, events
6
+ from ultralytics.hub.utils import HUB_WEB_ROOT, PREFIX, events
9
7
  from ultralytics.utils import LOGGER, SETTINGS
10
8
 
11
9
 
@@ -96,9 +96,7 @@ def on_train_end(trainer):
96
96
  for f in files:
97
97
  _log_plot(title=f.stem, plot_path=f)
98
98
  # Log the final model
99
- run[f"weights/{trainer.args.name or trainer.args.task}/{str(trainer.best.name)}"].upload(
100
- File(str(trainer.best))
101
- )
99
+ run[f"weights/{trainer.args.name or trainer.args.task}/{trainer.best.name}"].upload(File(str(trainer.best)))
102
100
 
103
101
 
104
102
  callbacks = (
@@ -10,8 +10,9 @@ try:
10
10
  assert SETTINGS["tensorboard"] is True # verify integration is enabled
11
11
  WRITER = None # TensorBoard SummaryWriter instance
12
12
 
13
- except (ImportError, AssertionError, TypeError):
13
+ except (ImportError, AssertionError, TypeError, AttributeError):
14
14
  # TypeError for handling 'Descriptors cannot not be created directly.' protobuf errors in Windows
15
+ # AttributeError: module 'tensorflow' has no attribute 'io' if 'tensorflow' not installed
15
16
  SummaryWriter = None
16
17
 
17
18