ultralytics 8.3.167__py3-none-any.whl → 8.3.168__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ultralytics/__init__.py +1 -1
- ultralytics/models/rtdetr/val.py +22 -38
- ultralytics/models/yolo/detect/val.py +23 -17
- ultralytics/models/yolo/obb/val.py +15 -31
- ultralytics/models/yolo/pose/val.py +11 -46
- ultralytics/models/yolo/segment/val.py +12 -40
- ultralytics/solutions/region_counter.py +2 -1
- ultralytics/solutions/similarity_search.py +2 -1
- ultralytics/solutions/solutions.py +30 -63
- ultralytics/solutions/streamlit_inference.py +57 -14
- {ultralytics-8.3.167.dist-info → ultralytics-8.3.168.dist-info}/METADATA +1 -1
- {ultralytics-8.3.167.dist-info → ultralytics-8.3.168.dist-info}/RECORD +16 -16
- {ultralytics-8.3.167.dist-info → ultralytics-8.3.168.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.167.dist-info → ultralytics-8.3.168.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.167.dist-info → ultralytics-8.3.168.dist-info}/licenses/LICENSE +0 -0
- {ultralytics-8.3.167.dist-info → ultralytics-8.3.168.dist-info}/top_level.txt +0 -0
ultralytics/__init__.py
CHANGED
ultralytics/models/rtdetr/val.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
|
+
from pathlib import Path
|
3
4
|
from typing import Any, Dict, List, Tuple, Union
|
4
5
|
|
5
6
|
import torch
|
@@ -186,45 +187,28 @@ class RTDETRValidator(DetectionValidator):
|
|
186
187
|
|
187
188
|
return [{"bboxes": x[:, :4], "conf": x[:, 4], "cls": x[:, 5]} for x in outputs]
|
188
189
|
|
189
|
-
def
|
190
|
+
def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
|
190
191
|
"""
|
191
|
-
|
192
|
+
Serialize YOLO predictions to COCO json format.
|
192
193
|
|
193
194
|
Args:
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
Returns:
|
198
|
-
(Dict[str, Any]): Prepared batch with transformed annotations containing cls, bboxes,
|
199
|
-
ori_shape, imgsz, and ratio_pad.
|
200
|
-
"""
|
201
|
-
idx = batch["batch_idx"] == si
|
202
|
-
cls = batch["cls"][idx].squeeze(-1)
|
203
|
-
bbox = batch["bboxes"][idx]
|
204
|
-
ori_shape = batch["ori_shape"][si]
|
205
|
-
imgsz = batch["img"].shape[2:]
|
206
|
-
ratio_pad = batch["ratio_pad"][si]
|
207
|
-
if len(cls):
|
208
|
-
bbox = ops.xywh2xyxy(bbox) # target boxes
|
209
|
-
bbox[..., [0, 2]] *= ori_shape[1] # native-space pred
|
210
|
-
bbox[..., [1, 3]] *= ori_shape[0] # native-space pred
|
211
|
-
return {"cls": cls, "bboxes": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
|
212
|
-
|
213
|
-
def _prepare_pred(self, pred: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
|
195
|
+
predn (Dict[str, torch.Tensor]): Predictions dictionary containing 'bboxes', 'conf', and 'cls' keys
|
196
|
+
with bounding box coordinates, confidence scores, and class predictions.
|
197
|
+
pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
|
214
198
|
"""
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
199
|
+
stem = Path(pbatch["im_file"]).stem
|
200
|
+
image_id = int(stem) if stem.isnumeric() else stem
|
201
|
+
box = predn["bboxes"].clone()
|
202
|
+
box[..., [0, 2]] *= pbatch["ori_shape"][1] / self.args.imgsz # native-space pred
|
203
|
+
box[..., [1, 3]] *= pbatch["ori_shape"][0] / self.args.imgsz # native-space pred
|
204
|
+
box = ops.xyxy2xywh(box) # xywh
|
205
|
+
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
206
|
+
for b, s, c in zip(box.tolist(), predn["conf"].tolist(), predn["cls"].tolist()):
|
207
|
+
self.jdict.append(
|
208
|
+
{
|
209
|
+
"image_id": image_id,
|
210
|
+
"category_id": self.class_map[int(c)],
|
211
|
+
"bbox": [round(x, 3) for x in b],
|
212
|
+
"score": round(s, 5),
|
213
|
+
}
|
214
|
+
)
|
@@ -147,28 +147,28 @@ class DetectionValidator(BaseValidator):
|
|
147
147
|
ratio_pad = batch["ratio_pad"][si]
|
148
148
|
if len(cls):
|
149
149
|
bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes
|
150
|
-
|
151
|
-
|
150
|
+
return {
|
151
|
+
"cls": cls,
|
152
|
+
"bboxes": bbox,
|
153
|
+
"ori_shape": ori_shape,
|
154
|
+
"imgsz": imgsz,
|
155
|
+
"ratio_pad": ratio_pad,
|
156
|
+
"im_file": batch["im_file"][si],
|
157
|
+
}
|
152
158
|
|
153
|
-
def _prepare_pred(self, pred: Dict[str, torch.Tensor]
|
159
|
+
def _prepare_pred(self, pred: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
|
154
160
|
"""
|
155
161
|
Prepare predictions for evaluation against ground truth.
|
156
162
|
|
157
163
|
Args:
|
158
164
|
pred (Dict[str, torch.Tensor]): Post-processed predictions from the model.
|
159
|
-
pbatch (Dict[str, Any]): Prepared batch information.
|
160
165
|
|
161
166
|
Returns:
|
162
167
|
(Dict[str, torch.Tensor]): Prepared predictions in native space.
|
163
168
|
"""
|
164
|
-
cls = pred["cls"]
|
165
169
|
if self.args.single_cls:
|
166
|
-
cls *= 0
|
167
|
-
|
168
|
-
bboxes = ops.scale_boxes(
|
169
|
-
pbatch["imgsz"], pred["bboxes"].clone(), pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"]
|
170
|
-
) # native-space pred
|
171
|
-
return {"bboxes": bboxes, "conf": pred["conf"], "cls": cls}
|
170
|
+
pred["cls"] *= 0
|
171
|
+
return pred
|
172
172
|
|
173
173
|
def update_metrics(self, preds: List[Dict[str, torch.Tensor]], batch: Dict[str, Any]) -> None:
|
174
174
|
"""
|
@@ -181,7 +181,7 @@ class DetectionValidator(BaseValidator):
|
|
181
181
|
for si, pred in enumerate(preds):
|
182
182
|
self.seen += 1
|
183
183
|
pbatch = self._prepare_batch(si, batch)
|
184
|
-
predn = self._prepare_pred(pred
|
184
|
+
predn = self._prepare_pred(pred)
|
185
185
|
|
186
186
|
cls = pbatch["cls"].cpu().numpy()
|
187
187
|
no_pred = len(predn["cls"]) == 0
|
@@ -203,7 +203,7 @@ class DetectionValidator(BaseValidator):
|
|
203
203
|
|
204
204
|
# Save
|
205
205
|
if self.args.save_json:
|
206
|
-
self.pred_to_json(predn,
|
206
|
+
self.pred_to_json(predn, pbatch)
|
207
207
|
if self.args.save_txt:
|
208
208
|
self.save_one_txt(
|
209
209
|
predn,
|
@@ -360,18 +360,24 @@ class DetectionValidator(BaseValidator):
|
|
360
360
|
boxes=torch.cat([predn["bboxes"], predn["conf"].unsqueeze(-1), predn["cls"].unsqueeze(-1)], dim=1),
|
361
361
|
).save_txt(file, save_conf=save_conf)
|
362
362
|
|
363
|
-
def pred_to_json(self, predn: Dict[str, torch.Tensor],
|
363
|
+
def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
|
364
364
|
"""
|
365
365
|
Serialize YOLO predictions to COCO json format.
|
366
366
|
|
367
367
|
Args:
|
368
368
|
predn (Dict[str, torch.Tensor]): Predictions dictionary containing 'bboxes', 'conf', and 'cls' keys
|
369
369
|
with bounding box coordinates, confidence scores, and class predictions.
|
370
|
-
|
370
|
+
pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
|
371
371
|
"""
|
372
|
-
stem = Path(
|
372
|
+
stem = Path(pbatch["im_file"]).stem
|
373
373
|
image_id = int(stem) if stem.isnumeric() else stem
|
374
|
-
box = ops.
|
374
|
+
box = ops.scale_boxes(
|
375
|
+
pbatch["imgsz"],
|
376
|
+
predn["bboxes"].clone(),
|
377
|
+
pbatch["ori_shape"],
|
378
|
+
ratio_pad=pbatch["ratio_pad"],
|
379
|
+
)
|
380
|
+
box = ops.xyxy2xywh(box) # xywh
|
375
381
|
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
376
382
|
for b, s, c in zip(box.tolist(), predn["conf"].tolist(), predn["cls"].tolist()):
|
377
383
|
self.jdict.append(
|
@@ -1,7 +1,7 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
3
|
from pathlib import Path
|
4
|
-
from typing import Any, Dict, List, Tuple
|
4
|
+
from typing import Any, Dict, List, Tuple
|
5
5
|
|
6
6
|
import numpy as np
|
7
7
|
import torch
|
@@ -132,33 +132,14 @@ class OBBValidator(DetectionValidator):
|
|
132
132
|
ratio_pad = batch["ratio_pad"][si]
|
133
133
|
if len(cls):
|
134
134
|
bbox[..., :4].mul_(torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]]) # target boxes
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
input dimensions to the original image dimensions using the provided batch information.
|
144
|
-
|
145
|
-
Args:
|
146
|
-
pred (Dict[str, torch.Tensor]): Prediction dictionary containing bounding box coordinates and other information.
|
147
|
-
pbatch (Dict[str, Any]): Dictionary containing batch information with keys:
|
148
|
-
- imgsz (tuple): Model input image size.
|
149
|
-
- ori_shape (tuple): Original image shape.
|
150
|
-
- ratio_pad (tuple): Ratio and padding information for scaling.
|
151
|
-
|
152
|
-
Returns:
|
153
|
-
(Dict[str, torch.Tensor]): Scaled prediction dictionary with bounding boxes in original image dimensions.
|
154
|
-
"""
|
155
|
-
cls = pred["cls"]
|
156
|
-
if self.args.single_cls:
|
157
|
-
cls *= 0
|
158
|
-
bboxes = ops.scale_boxes(
|
159
|
-
pbatch["imgsz"], pred["bboxes"].clone(), pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"], xywh=True
|
160
|
-
) # native-space pred
|
161
|
-
return {"bboxes": bboxes, "conf": pred["conf"], "cls": cls}
|
135
|
+
return {
|
136
|
+
"cls": cls,
|
137
|
+
"bboxes": bbox,
|
138
|
+
"ori_shape": ori_shape,
|
139
|
+
"imgsz": imgsz,
|
140
|
+
"ratio_pad": ratio_pad,
|
141
|
+
"im_file": batch["im_file"][si],
|
142
|
+
}
|
162
143
|
|
163
144
|
def plot_predictions(self, batch: Dict[str, Any], preds: List[torch.Tensor], ni: int) -> None:
|
164
145
|
"""
|
@@ -180,23 +161,26 @@ class OBBValidator(DetectionValidator):
|
|
180
161
|
p["bboxes"][:, :4] = ops.xywh2xyxy(p["bboxes"][:, :4]) # convert to xyxy format for plotting
|
181
162
|
super().plot_predictions(batch, preds, ni) # plot bboxes
|
182
163
|
|
183
|
-
def pred_to_json(self, predn: Dict[str, torch.Tensor],
|
164
|
+
def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
|
184
165
|
"""
|
185
166
|
Convert YOLO predictions to COCO JSON format with rotated bounding box information.
|
186
167
|
|
187
168
|
Args:
|
188
169
|
predn (Dict[str, torch.Tensor]): Prediction dictionary containing 'bboxes', 'conf', and 'cls' keys
|
189
170
|
with bounding box coordinates, confidence scores, and class predictions.
|
190
|
-
|
171
|
+
pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
|
191
172
|
|
192
173
|
Notes:
|
193
174
|
This method processes rotated bounding box predictions and converts them to both rbox format
|
194
175
|
(x, y, w, h, angle) and polygon format (x1, y1, x2, y2, x3, y3, x4, y4) before adding them
|
195
176
|
to the JSON dictionary.
|
196
177
|
"""
|
197
|
-
stem = Path(
|
178
|
+
stem = Path(pbatch["im_file"]).stem
|
198
179
|
image_id = int(stem) if stem.isnumeric() else stem
|
199
180
|
rbox = predn["bboxes"]
|
181
|
+
rbox = ops.scale_boxes(
|
182
|
+
pbatch["imgsz"], predn["bboxes"].clone(), pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"], xywh=True
|
183
|
+
) # native-space pred
|
200
184
|
poly = ops.xywhr2xyxyxyxy(rbox).view(-1, 8)
|
201
185
|
for r, b, s, c in zip(rbox.tolist(), poly.tolist(), predn["conf"].tolist(), predn["cls"].tolist()):
|
202
186
|
self.jdict.append(
|
@@ -167,34 +167,9 @@ class PoseValidator(DetectionValidator):
|
|
167
167
|
kpts = kpts.clone()
|
168
168
|
kpts[..., 0] *= w
|
169
169
|
kpts[..., 1] *= h
|
170
|
-
kpts = ops.scale_coords(pbatch["imgsz"], kpts, pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"])
|
171
170
|
pbatch["keypoints"] = kpts
|
172
171
|
return pbatch
|
173
172
|
|
174
|
-
def _prepare_pred(self, pred: Dict[str, Any], pbatch: Dict[str, Any]) -> Dict[str, Any]:
|
175
|
-
"""
|
176
|
-
Prepare and scale keypoints in predictions for pose processing.
|
177
|
-
|
178
|
-
This method extends the parent class's _prepare_pred method to handle keypoint scaling. It first calls
|
179
|
-
the parent method to get the basic prediction boxes, then extracts and scales the keypoint coordinates
|
180
|
-
to match the original image dimensions.
|
181
|
-
|
182
|
-
Args:
|
183
|
-
pred (Dict[str, torch.Tensor]): Post-processed predictions from the model.
|
184
|
-
pbatch (Dict[str, Any]): Processed batch dictionary containing image information including:
|
185
|
-
- imgsz: Image size used for inference
|
186
|
-
- ori_shape: Original image shape
|
187
|
-
- ratio_pad: Ratio and padding information for coordinate scaling
|
188
|
-
|
189
|
-
Returns:
|
190
|
-
(Dict[str, Any]): Processed prediction dictionary with keypoints scaled to original image dimensions.
|
191
|
-
"""
|
192
|
-
predn = super()._prepare_pred(pred, pbatch)
|
193
|
-
predn["keypoints"] = ops.scale_coords(
|
194
|
-
pbatch["imgsz"], pred.get("keypoints").clone(), pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"]
|
195
|
-
)
|
196
|
-
return predn
|
197
|
-
|
198
173
|
def _process_batch(self, preds: Dict[str, torch.Tensor], batch: Dict[str, Any]) -> Dict[str, np.ndarray]:
|
199
174
|
"""
|
200
175
|
Return correct prediction matrix by computing Intersection over Union (IoU) between detections and ground truth.
|
@@ -249,7 +224,7 @@ class PoseValidator(DetectionValidator):
|
|
249
224
|
keypoints=predn["keypoints"],
|
250
225
|
).save_txt(file, save_conf=save_conf)
|
251
226
|
|
252
|
-
def pred_to_json(self, predn: Dict[str, torch.Tensor],
|
227
|
+
def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
|
253
228
|
"""
|
254
229
|
Convert YOLO predictions to COCO JSON format.
|
255
230
|
|
@@ -259,32 +234,22 @@ class PoseValidator(DetectionValidator):
|
|
259
234
|
Args:
|
260
235
|
predn (Dict[str, torch.Tensor]): Prediction dictionary containing 'bboxes', 'conf', 'cls',
|
261
236
|
and 'keypoints' tensors.
|
262
|
-
|
237
|
+
pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
|
263
238
|
|
264
239
|
Notes:
|
265
240
|
The method extracts the image ID from the filename stem (either as an integer if numeric, or as a string),
|
266
241
|
converts bounding boxes from xyxy to xywh format, and adjusts coordinates from center to top-left corner
|
267
242
|
before saving to the JSON dictionary.
|
268
243
|
"""
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
):
|
279
|
-
self.jdict.append(
|
280
|
-
{
|
281
|
-
"image_id": image_id,
|
282
|
-
"category_id": self.class_map[int(c)],
|
283
|
-
"bbox": [round(x, 3) for x in b],
|
284
|
-
"keypoints": k,
|
285
|
-
"score": round(s, 5),
|
286
|
-
}
|
287
|
-
)
|
244
|
+
super().pred_to_json(predn, pbatch)
|
245
|
+
kpts = ops.scale_coords(
|
246
|
+
pbatch["imgsz"],
|
247
|
+
predn["keypoints"].clone(),
|
248
|
+
pbatch["ori_shape"],
|
249
|
+
ratio_pad=pbatch["ratio_pad"],
|
250
|
+
)
|
251
|
+
for i, k in enumerate(kpts.flatten(1, 2).tolist()):
|
252
|
+
self.jdict[-len(kpts) + i]["keypoints"] = k # keypoints
|
288
253
|
|
289
254
|
def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
|
290
255
|
"""Evaluate object detection model using COCO JSON format."""
|
@@ -135,29 +135,6 @@ class SegmentationValidator(DetectionValidator):
|
|
135
135
|
prepared_batch["masks"] = batch["masks"][midx]
|
136
136
|
return prepared_batch
|
137
137
|
|
138
|
-
def _prepare_pred(self, pred: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
|
139
|
-
"""
|
140
|
-
Prepare predictions for evaluation by processing bounding boxes and masks.
|
141
|
-
|
142
|
-
Args:
|
143
|
-
pred (Dict[str, torch.Tensor]): Post-processed predictions from the model.
|
144
|
-
pbatch (Dict[str, Any]): Prepared batch information.
|
145
|
-
|
146
|
-
Returns:
|
147
|
-
Dict[str, torch.Tensor]: Processed bounding box predictions.
|
148
|
-
"""
|
149
|
-
predn = super()._prepare_pred(pred, pbatch)
|
150
|
-
predn["masks"] = pred["masks"]
|
151
|
-
if self.args.save_json and len(predn["masks"]):
|
152
|
-
coco_masks = torch.as_tensor(pred["masks"], dtype=torch.uint8)
|
153
|
-
coco_masks = ops.scale_image(
|
154
|
-
coco_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
|
155
|
-
pbatch["ori_shape"],
|
156
|
-
ratio_pad=pbatch["ratio_pad"],
|
157
|
-
)
|
158
|
-
predn["coco_masks"] = coco_masks
|
159
|
-
return predn
|
160
|
-
|
161
138
|
def _process_batch(self, preds: Dict[str, torch.Tensor], batch: Dict[str, Any]) -> Dict[str, np.ndarray]:
|
162
139
|
"""
|
163
140
|
Compute correct prediction matrix for a batch based on bounding boxes and optional masks.
|
@@ -233,13 +210,13 @@ class SegmentationValidator(DetectionValidator):
|
|
233
210
|
masks=torch.as_tensor(predn["masks"], dtype=torch.uint8),
|
234
211
|
).save_txt(file, save_conf=save_conf)
|
235
212
|
|
236
|
-
def pred_to_json(self, predn: torch.Tensor,
|
213
|
+
def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
|
237
214
|
"""
|
238
215
|
Save one JSON result for COCO evaluation.
|
239
216
|
|
240
217
|
Args:
|
241
218
|
predn (Dict[str, torch.Tensor]): Predictions containing bboxes, masks, confidence scores, and classes.
|
242
|
-
|
219
|
+
pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
|
243
220
|
|
244
221
|
Examples:
|
245
222
|
>>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
|
@@ -252,23 +229,18 @@ class SegmentationValidator(DetectionValidator):
|
|
252
229
|
rle["counts"] = rle["counts"].decode("utf-8")
|
253
230
|
return rle
|
254
231
|
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
232
|
+
coco_masks = torch.as_tensor(predn["masks"], dtype=torch.uint8)
|
233
|
+
coco_masks = ops.scale_image(
|
234
|
+
coco_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
|
235
|
+
pbatch["ori_shape"],
|
236
|
+
ratio_pad=pbatch["ratio_pad"],
|
237
|
+
)
|
238
|
+
pred_masks = np.transpose(coco_masks, (2, 0, 1))
|
260
239
|
with ThreadPool(NUM_THREADS) as pool:
|
261
240
|
rles = pool.map(single_encode, pred_masks)
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
"image_id": image_id,
|
266
|
-
"category_id": self.class_map[int(c)],
|
267
|
-
"bbox": [round(x, 3) for x in b],
|
268
|
-
"score": round(s, 5),
|
269
|
-
"segmentation": rles[i],
|
270
|
-
}
|
271
|
-
)
|
241
|
+
super().pred_to_json(predn, pbatch)
|
242
|
+
for i, r in enumerate(rles):
|
243
|
+
self.jdict[-len(rles) + i]["segmentation"] = r # segmentation
|
272
244
|
|
273
245
|
def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
|
274
246
|
"""Return COCO-style instance segmentation evaluation metrics."""
|
@@ -118,12 +118,13 @@ class RegionCounter(BaseSolution):
|
|
118
118
|
x1, y1, x2, y2 = map(int, region["polygon"].bounds)
|
119
119
|
pts = [(x1, y1), (x2, y1), (x2, y2), (x1, y2)]
|
120
120
|
annotator.draw_region(pts, region["region_color"], self.line_width * 2)
|
121
|
-
annotator.
|
121
|
+
annotator.adaptive_label(
|
122
122
|
[x1, y1, x2, y2],
|
123
123
|
label=str(region["counts"]),
|
124
124
|
color=region["region_color"],
|
125
125
|
txt_color=region["text_color"],
|
126
126
|
margin=self.line_width * 4,
|
127
|
+
shape="rect",
|
127
128
|
)
|
128
129
|
region["counts"] = 0 # Reset for next frame
|
129
130
|
plot_im = annotator.result()
|
@@ -8,7 +8,6 @@ import numpy as np
|
|
8
8
|
from PIL import Image
|
9
9
|
|
10
10
|
from ultralytics.data.utils import IMG_FORMATS
|
11
|
-
from ultralytics.nn.text_model import build_text_model
|
12
11
|
from ultralytics.utils import LOGGER
|
13
12
|
from ultralytics.utils.checks import check_requirements
|
14
13
|
from ultralytics.utils.torch_utils import select_device
|
@@ -48,6 +47,8 @@ class VisualAISearch:
|
|
48
47
|
|
49
48
|
def __init__(self, **kwargs: Any) -> None:
|
50
49
|
"""Initialize the VisualAISearch class with FAISS index and CLIP model."""
|
50
|
+
from ultralytics.nn.text_model import build_text_model
|
51
|
+
|
51
52
|
check_requirements("faiss-cpu")
|
52
53
|
|
53
54
|
self.faiss = __import__("faiss")
|
@@ -287,8 +287,7 @@ class SolutionAnnotator(Annotator):
|
|
287
287
|
display_objects_labels: Annotate bounding boxes with object class labels.
|
288
288
|
sweep_annotator: Visualize a vertical sweep line and optional label.
|
289
289
|
visioneye: Map and connect object centroids to a visual "eye" point.
|
290
|
-
|
291
|
-
text_label: Draw a rectangular label within a bounding box.
|
290
|
+
adaptive_label: Draw a circular or rectangle background shape label in center of a bounding box.
|
292
291
|
|
293
292
|
Examples:
|
294
293
|
>>> annotator = SolutionAnnotator(image)
|
@@ -695,90 +694,58 @@ class SolutionAnnotator(Annotator):
|
|
695
694
|
cv2.circle(self.im, center_bbox, self.tf * 2, color, -1)
|
696
695
|
cv2.line(self.im, center_point, center_bbox, color, self.tf)
|
697
696
|
|
698
|
-
def
|
697
|
+
def adaptive_label(
|
699
698
|
self,
|
700
699
|
box: Tuple[float, float, float, float],
|
701
700
|
label: str = "",
|
702
701
|
color: Tuple[int, int, int] = (128, 128, 128),
|
703
702
|
txt_color: Tuple[int, int, int] = (255, 255, 255),
|
704
|
-
|
703
|
+
shape: str = "rect",
|
704
|
+
margin: int = 5,
|
705
705
|
):
|
706
706
|
"""
|
707
|
-
Draw a label with a background circle centered within a given bounding box.
|
707
|
+
Draw a label with a background rectangle or circle centered within a given bounding box.
|
708
708
|
|
709
709
|
Args:
|
710
710
|
box (Tuple[float, float, float, float]): The bounding box coordinates (x1, y1, x2, y2).
|
711
711
|
label (str): The text label to be displayed.
|
712
|
-
color (Tuple[int, int, int]): The background color of the
|
712
|
+
color (Tuple[int, int, int]): The background color of the rectangle (B, G, R).
|
713
713
|
txt_color (Tuple[int, int, int]): The color of the text (R, G, B).
|
714
|
-
|
714
|
+
shape (str): The shape of the label i.e "circle" or "rect"
|
715
|
+
margin (int): The margin between the text and the rectangle border.
|
715
716
|
"""
|
716
|
-
if len(label) > 3:
|
717
|
+
if shape == "circle" and len(label) > 3:
|
717
718
|
LOGGER.warning(f"Length of label is {len(label)}, only first 3 letters will be used for circle annotation.")
|
718
719
|
label = label[:3]
|
719
720
|
|
720
|
-
# Calculate
|
721
|
-
|
722
|
-
#
|
723
|
-
text_size = cv2.getTextSize(str(label), cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.15, self.tf)[0]
|
724
|
-
# Calculate the required radius to fit the text with the margin
|
725
|
-
required_radius = int(((text_size[0] ** 2 + text_size[1] ** 2) ** 0.5) / 2) + margin
|
726
|
-
# Draw the circle with the required radius
|
727
|
-
cv2.circle(self.im, (x_center, y_center), required_radius, color, -1)
|
728
|
-
# Calculate the position for the text
|
729
|
-
text_x = x_center - text_size[0] // 2
|
730
|
-
text_y = y_center + text_size[1] // 2
|
731
|
-
# Draw the text
|
732
|
-
cv2.putText(
|
733
|
-
self.im,
|
734
|
-
str(label),
|
735
|
-
(text_x, text_y),
|
736
|
-
cv2.FONT_HERSHEY_SIMPLEX,
|
737
|
-
self.sf - 0.15,
|
738
|
-
self.get_txt_color(color, txt_color),
|
739
|
-
self.tf,
|
740
|
-
lineType=cv2.LINE_AA,
|
741
|
-
)
|
721
|
+
x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2) # Calculate center of the bbox
|
722
|
+
text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.15, self.tf)[0] # Get size of the text
|
723
|
+
text_x, text_y = x_center - text_size[0] // 2, y_center + text_size[1] // 2 # Calculate top-left corner of text
|
742
724
|
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
725
|
+
if shape == "circle":
|
726
|
+
cv2.circle(
|
727
|
+
self.im,
|
728
|
+
(x_center, y_center),
|
729
|
+
int(((text_size[0] ** 2 + text_size[1] ** 2) ** 0.5) / 2) + margin, # Calculate the radius
|
730
|
+
color,
|
731
|
+
-1,
|
732
|
+
)
|
733
|
+
else:
|
734
|
+
cv2.rectangle(
|
735
|
+
self.im,
|
736
|
+
(text_x - margin, text_y - text_size[1] - margin), # Calculate coordinates of the rectangle
|
737
|
+
(text_x + text_size[0] + margin, text_y + margin), # Calculate coordinates of the rectangle
|
738
|
+
color,
|
739
|
+
-1,
|
740
|
+
)
|
753
741
|
|
754
|
-
Args:
|
755
|
-
box (Tuple[float, float, float, float]): The bounding box coordinates (x1, y1, x2, y2).
|
756
|
-
label (str): The text label to be displayed.
|
757
|
-
color (Tuple[int, int, int]): The background color of the rectangle (B, G, R).
|
758
|
-
txt_color (Tuple[int, int, int]): The color of the text (R, G, B).
|
759
|
-
margin (int): The margin between the text and the rectangle border.
|
760
|
-
"""
|
761
|
-
# Calculate the center of the bounding box
|
762
|
-
x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
|
763
|
-
# Get the size of the text
|
764
|
-
text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.1, self.tf)[0]
|
765
|
-
# Calculate the top-left corner of the text (to center it)
|
766
|
-
text_x = x_center - text_size[0] // 2
|
767
|
-
text_y = y_center + text_size[1] // 2
|
768
|
-
# Calculate the coordinates of the background rectangle
|
769
|
-
rect_x1 = text_x - margin
|
770
|
-
rect_y1 = text_y - text_size[1] - margin
|
771
|
-
rect_x2 = text_x + text_size[0] + margin
|
772
|
-
rect_y2 = text_y + margin
|
773
|
-
# Draw the background rectangle
|
774
|
-
cv2.rectangle(self.im, (rect_x1, rect_y1), (rect_x2, rect_y2), color, -1)
|
775
742
|
# Draw the text on top of the rectangle
|
776
743
|
cv2.putText(
|
777
744
|
self.im,
|
778
745
|
label,
|
779
|
-
(text_x, text_y),
|
746
|
+
(text_x, text_y), # Calculate top-left corner of the text
|
780
747
|
cv2.FONT_HERSHEY_SIMPLEX,
|
781
|
-
self.sf - 0.
|
748
|
+
self.sf - 0.15,
|
782
749
|
self.get_txt_color(color, txt_color),
|
783
750
|
self.tf,
|
784
751
|
lineType=cv2.LINE_AA,
|
@@ -1,6 +1,7 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
3
|
import io
|
4
|
+
import os
|
4
5
|
from typing import Any, List
|
5
6
|
|
6
7
|
import cv2
|
@@ -64,6 +65,7 @@ class Inference:
|
|
64
65
|
|
65
66
|
self.st = st # Reference to the Streamlit module
|
66
67
|
self.source = None # Video source selection (webcam or video file)
|
68
|
+
self.img_file_names = [] # List of image file names
|
67
69
|
self.enable_trk = False # Flag to toggle object tracking
|
68
70
|
self.conf = 0.25 # Confidence threshold for detection
|
69
71
|
self.iou = 0.45 # Intersection-over-Union (IoU) threshold for non-maximum suppression
|
@@ -85,13 +87,13 @@ class Inference:
|
|
85
87
|
menu_style_cfg = """<style>MainMenu {visibility: hidden;}</style>""" # Hide main menu style
|
86
88
|
|
87
89
|
# Main title of streamlit application
|
88
|
-
main_title_cfg = """<div><h1 style="color:#
|
90
|
+
main_title_cfg = """<div><h1 style="color:#111F68; text-align:center; font-size:40px; margin-top:-50px;
|
89
91
|
font-family: 'Archivo', sans-serif; margin-bottom:20px;">Ultralytics YOLO Streamlit Application</h1></div>"""
|
90
92
|
|
91
93
|
# Subtitle of streamlit application
|
92
|
-
sub_title_cfg = """<div><
|
93
|
-
margin-top:-15px; margin-bottom:50px;">Experience real-time object detection on your webcam
|
94
|
-
of Ultralytics YOLO! 🚀</
|
94
|
+
sub_title_cfg = """<div><h5 style="color:#042AFF; text-align:center; font-family: 'Archivo', sans-serif;
|
95
|
+
margin-top:-15px; margin-bottom:50px;">Experience real-time object detection on your webcam, videos, and images
|
96
|
+
with the power of Ultralytics YOLO! 🚀</h5></div>"""
|
95
97
|
|
96
98
|
# Set html page configuration and append custom HTML
|
97
99
|
self.st.set_page_config(page_title="Ultralytics Streamlit App", layout="wide")
|
@@ -107,24 +109,28 @@ class Inference:
|
|
107
109
|
|
108
110
|
self.st.sidebar.title("User Configuration") # Add elements to vertical setting menu
|
109
111
|
self.source = self.st.sidebar.selectbox(
|
110
|
-
"
|
111
|
-
("webcam", "video"),
|
112
|
+
"Source",
|
113
|
+
("webcam", "video", "image"),
|
112
114
|
) # Add source selection dropdown
|
113
|
-
|
115
|
+
if self.source in ["webcam", "video"]:
|
116
|
+
self.enable_trk = self.st.sidebar.radio("Enable Tracking", ("Yes", "No")) == "Yes" # Enable object tracking
|
114
117
|
self.conf = float(
|
115
118
|
self.st.sidebar.slider("Confidence Threshold", 0.0, 1.0, self.conf, 0.01)
|
116
119
|
) # Slider for confidence
|
117
120
|
self.iou = float(self.st.sidebar.slider("IoU Threshold", 0.0, 1.0, self.iou, 0.01)) # Slider for NMS threshold
|
118
121
|
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
+
if self.source != "image": # Only create columns for video/webcam
|
123
|
+
col1, col2 = self.st.columns(2) # Create two columns for displaying frames
|
124
|
+
self.org_frame = col1.empty() # Container for original frame
|
125
|
+
self.ann_frame = col2.empty() # Container for annotated frame
|
122
126
|
|
123
127
|
def source_upload(self) -> None:
|
124
128
|
"""Handle video file uploads through the Streamlit interface."""
|
129
|
+
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS # scope import
|
130
|
+
|
125
131
|
self.vid_file_name = ""
|
126
132
|
if self.source == "video":
|
127
|
-
vid_file = self.st.sidebar.file_uploader("Upload Video File", type=
|
133
|
+
vid_file = self.st.sidebar.file_uploader("Upload Video File", type=VID_FORMATS)
|
128
134
|
if vid_file is not None:
|
129
135
|
g = io.BytesIO(vid_file.read()) # BytesIO Object
|
130
136
|
with open("ultralytics.mp4", "wb") as out: # Open temporary file as bytes
|
@@ -132,6 +138,15 @@ class Inference:
|
|
132
138
|
self.vid_file_name = "ultralytics.mp4"
|
133
139
|
elif self.source == "webcam":
|
134
140
|
self.vid_file_name = 0 # Use webcam index 0
|
141
|
+
elif self.source == "image":
|
142
|
+
import tempfile # scope import
|
143
|
+
|
144
|
+
imgfiles = self.st.sidebar.file_uploader("Upload Image Files", type=IMG_FORMATS, accept_multiple_files=True)
|
145
|
+
if imgfiles:
|
146
|
+
for imgfile in imgfiles: # Save each uploaded image to a temporary file
|
147
|
+
with tempfile.NamedTemporaryFile(delete=False, suffix=f".{imgfile.name.split('.')[-1]}") as tf:
|
148
|
+
tf.write(imgfile.read())
|
149
|
+
self.img_file_names.append({"path": tf.name, "name": imgfile.name})
|
135
150
|
|
136
151
|
def configure(self) -> None:
|
137
152
|
"""Configure the model and load selected classes for inference."""
|
@@ -161,6 +176,27 @@ class Inference:
|
|
161
176
|
if not isinstance(self.selected_ind, list): # Ensure selected_options is a list
|
162
177
|
self.selected_ind = list(self.selected_ind)
|
163
178
|
|
179
|
+
def image_inference(self) -> None:
|
180
|
+
"""Perform inference on uploaded images."""
|
181
|
+
for idx, img_info in enumerate(self.img_file_names):
|
182
|
+
img_path = img_info["path"]
|
183
|
+
image = cv2.imread(img_path) # Load and display the original image
|
184
|
+
if image is not None:
|
185
|
+
self.st.markdown(f"#### Processed: {img_info['name']}")
|
186
|
+
col1, col2 = self.st.columns(2)
|
187
|
+
with col1:
|
188
|
+
self.st.image(image, channels="BGR", caption="Original Image")
|
189
|
+
results = self.model(image, conf=self.conf, iou=self.iou, classes=self.selected_ind)
|
190
|
+
annotated_image = results[0].plot()
|
191
|
+
with col2:
|
192
|
+
self.st.image(annotated_image, channels="BGR", caption="Predicted Image")
|
193
|
+
try: # Clean up temporary file
|
194
|
+
os.unlink(img_path)
|
195
|
+
except FileNotFoundError:
|
196
|
+
pass # File doesn't exist, ignore
|
197
|
+
else:
|
198
|
+
self.st.error("Could not load the uploaded image.")
|
199
|
+
|
164
200
|
def inference(self) -> None:
|
165
201
|
"""Perform real-time object detection inference on video or webcam feed."""
|
166
202
|
self.web_ui() # Initialize the web interface
|
@@ -169,7 +205,14 @@ class Inference:
|
|
169
205
|
self.configure() # Configure the app
|
170
206
|
|
171
207
|
if self.st.sidebar.button("Start"):
|
172
|
-
|
208
|
+
if self.source == "image":
|
209
|
+
if self.img_file_names:
|
210
|
+
self.image_inference()
|
211
|
+
else:
|
212
|
+
self.st.info("Please upload an image file to perform inference.")
|
213
|
+
return
|
214
|
+
|
215
|
+
stop_button = self.st.sidebar.button("Stop") # Button to stop the inference
|
173
216
|
cap = cv2.VideoCapture(self.vid_file_name) # Capture the video
|
174
217
|
if not cap.isOpened():
|
175
218
|
self.st.error("Could not open webcam or video source.")
|
@@ -195,8 +238,8 @@ class Inference:
|
|
195
238
|
cap.release() # Release the capture
|
196
239
|
self.st.stop() # Stop streamlit app
|
197
240
|
|
198
|
-
self.org_frame.image(frame, channels="BGR") # Display original frame
|
199
|
-
self.ann_frame.image(annotated_frame, channels="BGR") # Display processed
|
241
|
+
self.org_frame.image(frame, channels="BGR", caption="Original Frame") # Display original frame
|
242
|
+
self.ann_frame.image(annotated_frame, channels="BGR", caption="Predicted Frame") # Display processed
|
200
243
|
|
201
244
|
cap.release() # Release the capture
|
202
245
|
cv2.destroyAllWindows() # Destroy all OpenCV windows
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.168
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -7,7 +7,7 @@ tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
|
|
7
7
|
tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
|
8
8
|
tests/test_python.py,sha256=JJu-69IfuUf1dLK7Ko9elyPONiQ1yu7yhapMVIAt_KI,27907
|
9
9
|
tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
|
10
|
-
ultralytics/__init__.py,sha256=
|
10
|
+
ultralytics/__init__.py,sha256=4WtcHqsFXTjYzmeOIAOMUX3wLs-ZjEt4inIaEc77h5s,730
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
13
13
|
ultralytics/cfg/__init__.py,sha256=VIpPHImhjb0XLJquGZrG_LBGZchtOtBSXR7HYTYV2GU,39602
|
@@ -145,7 +145,7 @@ ultralytics/models/rtdetr/__init__.py,sha256=_jEHmOjI_QP_nT3XJXLgYHQ6bXG4EL8Gnvn
|
|
145
145
|
ultralytics/models/rtdetr/model.py,sha256=e2u6kQEYawRXGGO6HbFDE1uyHfsIqvKk4IpVjjYN41k,2182
|
146
146
|
ultralytics/models/rtdetr/predict.py,sha256=_jk9ZkIW0gNLUHYyRCz_n9UgGnMTtTkFZ3Pzmkbyjgw,4197
|
147
147
|
ultralytics/models/rtdetr/train.py,sha256=6FA3nDEcH1diFQ8Ky0xENp9cOOYATHxU6f42z9npMvs,3766
|
148
|
-
ultralytics/models/rtdetr/val.py,sha256=
|
148
|
+
ultralytics/models/rtdetr/val.py,sha256=QT7JNKFJmD8dqUVSUBb78t9wGtE7KEw5l92CKJU50TM,8849
|
149
149
|
ultralytics/models/sam/__init__.py,sha256=iR7B06rAEni21eptg8n4rLOP0Z_qV9y9PL-L93n4_7s,266
|
150
150
|
ultralytics/models/sam/amg.py,sha256=IpcuIfC5KBRiF4sdrsPl1ecWEJy75axo1yG23r5BFsw,11783
|
151
151
|
ultralytics/models/sam/build.py,sha256=J6n-_QOYLa63jldEZmhRe9D3Is_AJE8xyZLUjzfRyTY,12629
|
@@ -172,19 +172,19 @@ ultralytics/models/yolo/classify/val.py,sha256=YakPxBVZCd85Kp4wFKx8KH6JJFiU7nkFS
|
|
172
172
|
ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
|
173
173
|
ultralytics/models/yolo/detect/predict.py,sha256=ySUsdIf8dw00bzWhcxN1jZwLWKPRT2M7-N7TNL3o4zo,5387
|
174
174
|
ultralytics/models/yolo/detect/train.py,sha256=HlaCoHJ6Y2TpCXXWabMRZApAYqBvjuM_YQJUV5JYCvw,9907
|
175
|
-
ultralytics/models/yolo/detect/val.py,sha256=
|
175
|
+
ultralytics/models/yolo/detect/val.py,sha256=jxpaKmWH5VBAR7FuxEnnbN7c1hjFJYPfDWAanemqiS0,20388
|
176
176
|
ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
|
177
177
|
ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
|
178
178
|
ultralytics/models/yolo/obb/train.py,sha256=bnYFAMur7Uvbw5Dc09-S2ge7B05iGX-t37Ksgc0ef6g,3921
|
179
|
-
ultralytics/models/yolo/obb/val.py,sha256=
|
179
|
+
ultralytics/models/yolo/obb/val.py,sha256=GAZ1yEUYke_qzSl59kAkROXgc3Af22gDICfwUXukl1Q,13725
|
180
180
|
ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
|
181
181
|
ultralytics/models/yolo/pose/predict.py,sha256=M0C7ZfVXx4QXgv-szjnaXYEPas76ZLGAgDNNh1GG0vI,3743
|
182
182
|
ultralytics/models/yolo/pose/train.py,sha256=GyvNnDPJ3UFq_90HN8_FJ0dbwRkw3JJTVpkMFH0vC0o,5457
|
183
|
-
ultralytics/models/yolo/pose/val.py,sha256=
|
183
|
+
ultralytics/models/yolo/pose/val.py,sha256=Sa4YAYpOhdt_mpNGWX2tvjwkDvt1RjiNjqdZ5p532hw,12327
|
184
184
|
ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
|
185
185
|
ultralytics/models/yolo/segment/predict.py,sha256=qlprQCZn4_bpjpI08U0MU9Q9_1gpHrw_7MXwtXE1l1Y,5377
|
186
186
|
ultralytics/models/yolo/segment/train.py,sha256=XrPkXUiNu1Jvhn8iDew_RaLLjZA3un65rK-QH9mtNIw,3802
|
187
|
-
ultralytics/models/yolo/segment/val.py,sha256=
|
187
|
+
ultralytics/models/yolo/segment/val.py,sha256=yVFJpYZCjGJ8fBgp4XEDO5ivAhkcctGqfkHI8uB-RwM,11209
|
188
188
|
ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
|
189
189
|
ultralytics/models/yolo/world/train.py,sha256=wBKnSC-TvrKWM1Taxqwo13XcwGHwwAXzNYV1tmqcOpc,7845
|
190
190
|
ultralytics/models/yolo/world/train_world.py,sha256=lk9z_INGPSTP_W7Rjh3qrWSmjHaxOJtGngonh1cj2SM,9551
|
@@ -216,12 +216,12 @@ ultralytics/solutions/object_counter.py,sha256=zD-EYIxu_y7qCFEkv6aqV60oMCZ4q6b_k
|
|
216
216
|
ultralytics/solutions/object_cropper.py,sha256=x3gN-ihtwkJntp6EMcVWnIvVTOu1iRkP5RrX-1kwJHg,3522
|
217
217
|
ultralytics/solutions/parking_management.py,sha256=IfPUn15aelxz6YZNo9WYkVEl5IOVSw8VD0OrpKtExPE,13613
|
218
218
|
ultralytics/solutions/queue_management.py,sha256=gTkILx4dVcsKRZXSCXtelkEjCRiDS5iznb3FnddC61c,4390
|
219
|
-
ultralytics/solutions/region_counter.py,sha256=
|
219
|
+
ultralytics/solutions/region_counter.py,sha256=Ncd6_qIXmSQXUxCwQkgYc2-nI7KifQYhxPi3pOelZak,5950
|
220
220
|
ultralytics/solutions/security_alarm.py,sha256=czEaMcy04q-iBkKqT_14d8H20CFB6zcKH_31nBGQnyw,6345
|
221
|
-
ultralytics/solutions/similarity_search.py,sha256=
|
222
|
-
ultralytics/solutions/solutions.py,sha256=
|
221
|
+
ultralytics/solutions/similarity_search.py,sha256=c18TK0qW5AvanXU28nAX4o_WtB1SDAJStUtyLDuEBHQ,9505
|
222
|
+
ultralytics/solutions/solutions.py,sha256=KuQ5M9oocygExRjKAIN0HjHNFYebENUSyw-i7ykDsO8,35903
|
223
223
|
ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
|
224
|
-
ultralytics/solutions/streamlit_inference.py,sha256=
|
224
|
+
ultralytics/solutions/streamlit_inference.py,sha256=JAVOCc_eNtszUHKU-rZ-iUQtA6m6d3QqCgtPfwrlcsE,12773
|
225
225
|
ultralytics/solutions/trackzone.py,sha256=kIS94rNfL3yVPAtSbnW8F-aLMxXowQtsfKNB-jLezz8,3941
|
226
226
|
ultralytics/solutions/vision_eye.py,sha256=J_nsXhWkhfWz8THNJU4Yag4wbPv78ymby6SlNKeSuk4,3005
|
227
227
|
ultralytics/solutions/templates/similarity-search.html,sha256=nyyurpWlkvYlDeNh-74TlV4ctCpTksvkVy2Yc4ImQ1U,4261
|
@@ -265,9 +265,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
|
|
265
265
|
ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
|
266
266
|
ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
|
267
267
|
ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
|
268
|
-
ultralytics-8.3.
|
269
|
-
ultralytics-8.3.
|
270
|
-
ultralytics-8.3.
|
271
|
-
ultralytics-8.3.
|
272
|
-
ultralytics-8.3.
|
273
|
-
ultralytics-8.3.
|
268
|
+
ultralytics-8.3.168.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
269
|
+
ultralytics-8.3.168.dist-info/METADATA,sha256=7afOJPw9IKBqVgBS71Nk08KhkNpEZXyOsbgp9G6IHFQ,37576
|
270
|
+
ultralytics-8.3.168.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
271
|
+
ultralytics-8.3.168.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
272
|
+
ultralytics-8.3.168.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
273
|
+
ultralytics-8.3.168.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|