ultralytics 8.3.21__py3-none-any.whl → 8.3.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_cuda.py +1 -1
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +1 -1
- ultralytics/cfg/solutions/default.yaml +8 -7
- ultralytics/data/converter.py +2 -1
- ultralytics/engine/exporter.py +3 -4
- ultralytics/models/sam/build.py +8 -0
- ultralytics/models/sam/modules/sam.py +113 -34
- ultralytics/solutions/parking_management.py +0 -1
- ultralytics/solutions/solutions.py +9 -9
- ultralytics/utils/__init__.py +10 -4
- ultralytics/utils/checks.py +1 -1
- {ultralytics-8.3.21.dist-info → ultralytics-8.3.23.dist-info}/METADATA +2 -1
- {ultralytics-8.3.21.dist-info → ultralytics-8.3.23.dist-info}/RECORD +18 -18
- {ultralytics-8.3.21.dist-info → ultralytics-8.3.23.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.21.dist-info → ultralytics-8.3.23.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.21.dist-info → ultralytics-8.3.23.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.21.dist-info → ultralytics-8.3.23.dist-info}/top_level.txt +0 -0
tests/test_cuda.py
CHANGED
@@ -116,7 +116,7 @@ def test_predict_sam():
|
|
116
116
|
from ultralytics.models.sam import Predictor as SAMPredictor
|
117
117
|
|
118
118
|
# Load a model
|
119
|
-
model = SAM(WEIGHTS_DIR / "
|
119
|
+
model = SAM(WEIGHTS_DIR / "sam2.1_b.pt")
|
120
120
|
|
121
121
|
# Display model information (optional)
|
122
122
|
model.info()
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
@@ -787,7 +787,7 @@ def entrypoint(debug=""):
|
|
787
787
|
from ultralytics import FastSAM
|
788
788
|
|
789
789
|
model = FastSAM(model)
|
790
|
-
elif "sam_" in stem or "sam2_" in stem:
|
790
|
+
elif "sam_" in stem or "sam2_" in stem or "sam2.1_" in stem:
|
791
791
|
from ultralytics import SAM
|
792
792
|
|
793
793
|
model = SAM(model)
|
@@ -1,18 +1,19 @@
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2
|
-
|
3
2
|
# Configuration for Ultralytics Solutions
|
4
3
|
|
5
|
-
|
6
|
-
|
4
|
+
# Object counting settings
|
7
5
|
region: # Object counting, queue or speed estimation region points. Default region points are [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
8
|
-
line_width: 2 # Width of the annotator used to draw regions on the image/video frames + bounding boxes and tracks drawing. Default value is 2.
|
9
|
-
show: True # Flag to control whether to display output image or not, you can set this as False i.e. when deploying it on some embedded devices.
|
10
6
|
show_in: True # Flag to display objects moving *into* the defined region
|
11
7
|
show_out: True # Flag to display objects moving *out of* the defined region
|
12
|
-
|
8
|
+
|
9
|
+
# Heatmaps settings
|
10
|
+
colormap: # Colormap for heatmap, Only OPENCV supported colormaps can be used. By default COLORMAP_PARULA will be used for visualization.
|
11
|
+
|
12
|
+
# Workouts monitoring settings
|
13
13
|
up_angle: 145.0 # Workouts up_angle for counts, 145.0 is default value. You can adjust it for different workouts, based on position of keypoints.
|
14
14
|
down_angle: 90 # Workouts down_angle for counts, 90 is default value. You can change it for different workouts, based on position of keypoints.
|
15
15
|
kpts: [6, 8, 10] # Keypoints for workouts monitoring, i.e. If you want to consider keypoints for pushups that have mostly values of [6, 8, 10].
|
16
|
-
|
16
|
+
|
17
|
+
# Analytics settings
|
17
18
|
analytics_type: "line" # Analytics type i.e "line", "pie", "bar" or "area" charts. By default, "line" analytics will be used for processing.
|
18
19
|
json_file: # parking system regions file path.
|
ultralytics/data/converter.py
CHANGED
@@ -632,9 +632,10 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
|
|
632
632
|
txt_file = save_dir / lb_name
|
633
633
|
cls = label["cls"]
|
634
634
|
for i, s in enumerate(label["segments"]):
|
635
|
+
if len(s) == 0:
|
636
|
+
continue
|
635
637
|
line = (int(cls[i]), *s.reshape(-1))
|
636
638
|
texts.append(("%g " * len(line)).rstrip() % line)
|
637
|
-
if texts:
|
638
639
|
with open(txt_file, "a") as f:
|
639
640
|
f.writelines(text + "\n" for text in texts)
|
640
641
|
LOGGER.info(f"Generated segment labels saved in {save_dir}")
|
ultralytics/engine/exporter.py
CHANGED
@@ -195,13 +195,12 @@ class Exporter:
|
|
195
195
|
|
196
196
|
# Device
|
197
197
|
dla = None
|
198
|
-
if fmt == "engine" and "dla" in self.args.device:
|
199
|
-
dla = self.args.device.split(":")[-1]
|
200
|
-
assert dla in {"0", "1"}, f"Expected self.args.device='dla:0' or 'dla:1, but got {self.args.device}."
|
201
|
-
self.args.device = "0"
|
202
198
|
if fmt == "engine" and self.args.device is None:
|
203
199
|
LOGGER.warning("WARNING ⚠️ TensorRT requires GPU export, automatically assigning device=0")
|
204
200
|
self.args.device = "0"
|
201
|
+
if fmt == "engine" and "dla" in str(self.args.device): # convert int/list to str first
|
202
|
+
dla = self.args.device.split(":")[-1]
|
203
|
+
assert dla in {"0", "1"}, f"Expected self.args.device='dla:0' or 'dla:1, but got {self.args.device}."
|
205
204
|
self.device = select_device("cpu" if self.args.device is None else self.args.device)
|
206
205
|
|
207
206
|
# Checks
|
ultralytics/models/sam/build.py
CHANGED
@@ -263,6 +263,7 @@ def _build_sam2(
|
|
263
263
|
memory_attention = MemoryAttention(d_model=256, pos_enc_at_input=True, num_layers=4, layer=MemoryAttentionLayer())
|
264
264
|
memory_encoder = MemoryEncoder(out_dim=64)
|
265
265
|
|
266
|
+
is_sam2_1 = checkpoint is not None and "sam2.1" in checkpoint
|
266
267
|
sam2 = SAM2Model(
|
267
268
|
image_encoder=image_encoder,
|
268
269
|
memory_attention=memory_attention,
|
@@ -288,6 +289,9 @@ def _build_sam2(
|
|
288
289
|
multimask_max_pt_num=1,
|
289
290
|
use_mlp_for_obj_ptr_proj=True,
|
290
291
|
compile_image_encoder=False,
|
292
|
+
no_obj_embed_spatial=is_sam2_1,
|
293
|
+
proj_tpos_enc_in_obj_ptrs=is_sam2_1,
|
294
|
+
use_signed_tpos_enc_to_obj_ptrs=is_sam2_1,
|
291
295
|
sam_mask_decoder_extra_args=dict(
|
292
296
|
dynamic_multimask_via_stability=True,
|
293
297
|
dynamic_multimask_stability_delta=0.05,
|
@@ -313,6 +317,10 @@ sam_model_map = {
|
|
313
317
|
"sam2_s.pt": build_sam2_s,
|
314
318
|
"sam2_b.pt": build_sam2_b,
|
315
319
|
"sam2_l.pt": build_sam2_l,
|
320
|
+
"sam2.1_t.pt": build_sam2_t,
|
321
|
+
"sam2.1_s.pt": build_sam2_s,
|
322
|
+
"sam2.1_b.pt": build_sam2_b,
|
323
|
+
"sam2.1_l.pt": build_sam2_l,
|
316
324
|
}
|
317
325
|
|
318
326
|
|
@@ -161,18 +161,19 @@ class SAM2Model(torch.nn.Module):
|
|
161
161
|
use_multimask_token_for_obj_ptr: bool = False,
|
162
162
|
iou_prediction_use_sigmoid=False,
|
163
163
|
memory_temporal_stride_for_eval=1,
|
164
|
-
add_all_frames_to_correct_as_cond=False,
|
165
164
|
non_overlap_masks_for_mem_enc=False,
|
166
165
|
use_obj_ptrs_in_encoder=False,
|
167
166
|
max_obj_ptrs_in_encoder=16,
|
168
167
|
add_tpos_enc_to_obj_ptrs=True,
|
169
168
|
proj_tpos_enc_in_obj_ptrs=False,
|
169
|
+
use_signed_tpos_enc_to_obj_ptrs=False,
|
170
170
|
only_obj_ptrs_in_the_past_for_eval=False,
|
171
171
|
pred_obj_scores: bool = False,
|
172
172
|
pred_obj_scores_mlp: bool = False,
|
173
173
|
fixed_no_obj_ptr: bool = False,
|
174
174
|
soft_no_obj_ptr: bool = False,
|
175
175
|
use_mlp_for_obj_ptr_proj: bool = False,
|
176
|
+
no_obj_embed_spatial: bool = False,
|
176
177
|
sam_mask_decoder_extra_args=None,
|
177
178
|
compile_image_encoder: bool = False,
|
178
179
|
):
|
@@ -205,8 +206,6 @@ class SAM2Model(torch.nn.Module):
|
|
205
206
|
use_multimask_token_for_obj_ptr (bool): Whether to use multimask tokens for object pointers.
|
206
207
|
iou_prediction_use_sigmoid (bool): Whether to use sigmoid to restrict IoU prediction to [0-1].
|
207
208
|
memory_temporal_stride_for_eval (int): Memory bank's temporal stride during evaluation.
|
208
|
-
add_all_frames_to_correct_as_cond (bool): Whether to append frames with correction clicks to conditioning
|
209
|
-
frame list.
|
210
209
|
non_overlap_masks_for_mem_enc (bool): Whether to apply non-overlapping constraints on object masks in
|
211
210
|
memory encoder during evaluation.
|
212
211
|
use_obj_ptrs_in_encoder (bool): Whether to cross-attend to object pointers from other frames in the encoder.
|
@@ -216,6 +215,9 @@ class SAM2Model(torch.nn.Module):
|
|
216
215
|
the encoder.
|
217
216
|
proj_tpos_enc_in_obj_ptrs (bool): Whether to add an extra linear projection layer for temporal positional
|
218
217
|
encoding in object pointers.
|
218
|
+
use_signed_tpos_enc_to_obj_ptrs (bool): whether to use signed distance (instead of unsigned absolute distance)
|
219
|
+
in the temporal positional encoding in the object pointers, only relevant when both `use_obj_ptrs_in_encoder=True`
|
220
|
+
and `add_tpos_enc_to_obj_ptrs=True`.
|
219
221
|
only_obj_ptrs_in_the_past_for_eval (bool): Whether to only attend to object pointers in the past
|
220
222
|
during evaluation.
|
221
223
|
pred_obj_scores (bool): Whether to predict if there is an object in the frame.
|
@@ -223,6 +225,7 @@ class SAM2Model(torch.nn.Module):
|
|
223
225
|
fixed_no_obj_ptr (bool): Whether to have a fixed no-object pointer when there is no object present.
|
224
226
|
soft_no_obj_ptr (bool): Whether to mix in no-object pointer softly for easier recovery and error mitigation.
|
225
227
|
use_mlp_for_obj_ptr_proj (bool): Whether to use MLP for object pointer projection.
|
228
|
+
no_obj_embed_spatial (bool): Whether add no obj embedding to spatial frames.
|
226
229
|
sam_mask_decoder_extra_args (Dict | None): Extra arguments for constructing the SAM mask decoder.
|
227
230
|
compile_image_encoder (bool): Whether to compile the image encoder for faster inference.
|
228
231
|
|
@@ -253,6 +256,7 @@ class SAM2Model(torch.nn.Module):
|
|
253
256
|
if proj_tpos_enc_in_obj_ptrs:
|
254
257
|
assert add_tpos_enc_to_obj_ptrs # these options need to be used together
|
255
258
|
self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs
|
259
|
+
self.use_signed_tpos_enc_to_obj_ptrs = use_signed_tpos_enc_to_obj_ptrs
|
256
260
|
self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval
|
257
261
|
|
258
262
|
# Part 2: memory attention to condition current frame's visual features
|
@@ -309,9 +313,12 @@ class SAM2Model(torch.nn.Module):
|
|
309
313
|
self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
|
310
314
|
trunc_normal_(self.no_obj_ptr, std=0.02)
|
311
315
|
self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj
|
316
|
+
self.no_obj_embed_spatial = None
|
317
|
+
if no_obj_embed_spatial:
|
318
|
+
self.no_obj_embed_spatial = torch.nn.Parameter(torch.zeros(1, self.mem_dim))
|
319
|
+
trunc_normal_(self.no_obj_embed_spatial, std=0.02)
|
312
320
|
|
313
321
|
self._build_sam_heads()
|
314
|
-
self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
|
315
322
|
self.max_cond_frames_in_attn = max_cond_frames_in_attn
|
316
323
|
|
317
324
|
# Model compilation
|
@@ -533,8 +540,6 @@ class SAM2Model(torch.nn.Module):
|
|
533
540
|
if self.pred_obj_scores:
|
534
541
|
# Allow *soft* no obj ptr, unlike for masks
|
535
542
|
if self.soft_no_obj_ptr:
|
536
|
-
# Only hard possible with gt
|
537
|
-
assert not self.teacher_force_obj_scores_for_mem
|
538
543
|
lambda_is_obj_appearing = object_score_logits.sigmoid()
|
539
544
|
else:
|
540
545
|
lambda_is_obj_appearing = is_obj_appearing.float()
|
@@ -647,6 +652,7 @@ class SAM2Model(torch.nn.Module):
|
|
647
652
|
if self.num_maskmem == 0: # Disable memory and skip fusion
|
648
653
|
return current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
|
649
654
|
num_obj_ptr_tokens = 0
|
655
|
+
tpos_sign_mul = -1 if track_in_reverse else 1
|
650
656
|
# Step 1: condition the visual features of the current frame on previous memories
|
651
657
|
if not is_init_cond_frame:
|
652
658
|
# Retrieve the memories encoded with the maskmem backbone
|
@@ -664,7 +670,7 @@ class SAM2Model(torch.nn.Module):
|
|
664
670
|
# the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1
|
665
671
|
# We also allow taking the memory frame non-consecutively (with r>1), in which case
|
666
672
|
# we take (self.num_maskmem - 2) frames among every r-th frames plus the last frame.
|
667
|
-
r = self.memory_temporal_stride_for_eval
|
673
|
+
r = 1 if self.training else self.memory_temporal_stride_for_eval
|
668
674
|
for t_pos in range(1, self.num_maskmem):
|
669
675
|
t_rel = self.num_maskmem - t_pos # how many frames before current frame
|
670
676
|
if t_rel == 1:
|
@@ -718,7 +724,14 @@ class SAM2Model(torch.nn.Module):
|
|
718
724
|
ptr_cond_outputs = selected_cond_outputs
|
719
725
|
pos_and_ptrs = [
|
720
726
|
# Temporal pos encoding contains how far away each pointer is from current frame
|
721
|
-
(
|
727
|
+
(
|
728
|
+
(
|
729
|
+
(frame_idx - t) * tpos_sign_mul
|
730
|
+
if self.use_signed_tpos_enc_to_obj_ptrs
|
731
|
+
else abs(frame_idx - t)
|
732
|
+
),
|
733
|
+
out["obj_ptr"],
|
734
|
+
)
|
722
735
|
for t, out in ptr_cond_outputs.items()
|
723
736
|
]
|
724
737
|
# Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame
|
@@ -787,6 +800,7 @@ class SAM2Model(torch.nn.Module):
|
|
787
800
|
current_vision_feats,
|
788
801
|
feat_sizes,
|
789
802
|
pred_masks_high_res,
|
803
|
+
object_score_logits,
|
790
804
|
is_mask_from_pts,
|
791
805
|
):
|
792
806
|
"""Encodes frame features and masks into a new memory representation for video segmentation."""
|
@@ -819,10 +833,17 @@ class SAM2Model(torch.nn.Module):
|
|
819
833
|
)
|
820
834
|
maskmem_features = maskmem_out["vision_features"]
|
821
835
|
maskmem_pos_enc = maskmem_out["vision_pos_enc"]
|
836
|
+
# add a no-object embedding to the spatial memory to indicate that the frame
|
837
|
+
# is predicted to be occluded (i.e. no object is appearing in the frame)
|
838
|
+
if self.no_obj_embed_spatial is not None:
|
839
|
+
is_obj_appearing = (object_score_logits > 0).float()
|
840
|
+
maskmem_features += (1 - is_obj_appearing[..., None, None]) * self.no_obj_embed_spatial[
|
841
|
+
..., None, None
|
842
|
+
].expand(*maskmem_features.shape)
|
822
843
|
|
823
844
|
return maskmem_features, maskmem_pos_enc
|
824
845
|
|
825
|
-
def
|
846
|
+
def _track_step(
|
826
847
|
self,
|
827
848
|
frame_idx,
|
828
849
|
is_init_cond_frame,
|
@@ -833,15 +854,7 @@ class SAM2Model(torch.nn.Module):
|
|
833
854
|
mask_inputs,
|
834
855
|
output_dict,
|
835
856
|
num_frames,
|
836
|
-
|
837
|
-
# Whether to run the memory encoder on the predicted masks. Sometimes we might want
|
838
|
-
# to skip the memory encoder with `run_mem_encoder=False`. For example,
|
839
|
-
# in demo we might call `track_step` multiple times for each user click,
|
840
|
-
# and only encode the memory when the user finalizes their clicks. And in ablation
|
841
|
-
# settings like SAM training on static images, we don't need the memory encoder.
|
842
|
-
run_mem_encoder=True,
|
843
|
-
# The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
|
844
|
-
prev_sam_mask_logits=None,
|
857
|
+
prev_sam_mask_logits,
|
845
858
|
):
|
846
859
|
"""Performs a single tracking step, updating object masks and memory features based on current frame inputs."""
|
847
860
|
current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs}
|
@@ -861,7 +874,7 @@ class SAM2Model(torch.nn.Module):
|
|
861
874
|
sam_outputs = self._use_mask_as_output(pix_feat, high_res_features, mask_inputs)
|
862
875
|
else:
|
863
876
|
# fused the visual feature with previous memory features in the memory bank
|
864
|
-
|
877
|
+
pix_feat = self._prepare_memory_conditioned_features(
|
865
878
|
frame_idx=frame_idx,
|
866
879
|
is_init_cond_frame=is_init_cond_frame,
|
867
880
|
current_vision_feats=current_vision_feats[-1:],
|
@@ -880,12 +893,78 @@ class SAM2Model(torch.nn.Module):
|
|
880
893
|
mask_inputs = prev_sam_mask_logits
|
881
894
|
multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
|
882
895
|
sam_outputs = self._forward_sam_heads(
|
883
|
-
backbone_features=
|
896
|
+
backbone_features=pix_feat,
|
884
897
|
point_inputs=point_inputs,
|
885
898
|
mask_inputs=mask_inputs,
|
886
899
|
high_res_features=high_res_features,
|
887
900
|
multimask_output=multimask_output,
|
888
901
|
)
|
902
|
+
return current_out, sam_outputs, high_res_features, pix_feat
|
903
|
+
|
904
|
+
def _encode_memory_in_output(
|
905
|
+
self,
|
906
|
+
current_vision_feats,
|
907
|
+
feat_sizes,
|
908
|
+
point_inputs,
|
909
|
+
run_mem_encoder,
|
910
|
+
high_res_masks,
|
911
|
+
object_score_logits,
|
912
|
+
current_out,
|
913
|
+
):
|
914
|
+
"""Finally run the memory encoder on the predicted mask to encode, it into a new memory feature (that can be
|
915
|
+
used in future frames).
|
916
|
+
"""
|
917
|
+
if run_mem_encoder and self.num_maskmem > 0:
|
918
|
+
high_res_masks_for_mem_enc = high_res_masks
|
919
|
+
maskmem_features, maskmem_pos_enc = self._encode_new_memory(
|
920
|
+
current_vision_feats=current_vision_feats,
|
921
|
+
feat_sizes=feat_sizes,
|
922
|
+
pred_masks_high_res=high_res_masks_for_mem_enc,
|
923
|
+
object_score_logits=object_score_logits,
|
924
|
+
is_mask_from_pts=(point_inputs is not None),
|
925
|
+
)
|
926
|
+
current_out["maskmem_features"] = maskmem_features
|
927
|
+
current_out["maskmem_pos_enc"] = maskmem_pos_enc
|
928
|
+
else:
|
929
|
+
current_out["maskmem_features"] = None
|
930
|
+
current_out["maskmem_pos_enc"] = None
|
931
|
+
|
932
|
+
def track_step(
|
933
|
+
self,
|
934
|
+
frame_idx,
|
935
|
+
is_init_cond_frame,
|
936
|
+
current_vision_feats,
|
937
|
+
current_vision_pos_embeds,
|
938
|
+
feat_sizes,
|
939
|
+
point_inputs,
|
940
|
+
mask_inputs,
|
941
|
+
output_dict,
|
942
|
+
num_frames,
|
943
|
+
track_in_reverse=False, # tracking in reverse time order (for demo usage)
|
944
|
+
# Whether to run the memory encoder on the predicted masks. Sometimes we might want
|
945
|
+
# to skip the memory encoder with `run_mem_encoder=False`. For example,
|
946
|
+
# in demo we might call `track_step` multiple times for each user click,
|
947
|
+
# and only encode the memory when the user finalizes their clicks. And in ablation
|
948
|
+
# settings like SAM training on static images, we don't need the memory encoder.
|
949
|
+
run_mem_encoder=True,
|
950
|
+
# The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
|
951
|
+
prev_sam_mask_logits=None,
|
952
|
+
):
|
953
|
+
"""Performs a single tracking step, updating object masks and memory features based on current frame inputs."""
|
954
|
+
current_out, sam_outputs, _, _ = self._track_step(
|
955
|
+
frame_idx,
|
956
|
+
is_init_cond_frame,
|
957
|
+
current_vision_feats,
|
958
|
+
current_vision_pos_embeds,
|
959
|
+
feat_sizes,
|
960
|
+
point_inputs,
|
961
|
+
mask_inputs,
|
962
|
+
output_dict,
|
963
|
+
num_frames,
|
964
|
+
track_in_reverse,
|
965
|
+
prev_sam_mask_logits,
|
966
|
+
)
|
967
|
+
|
889
968
|
(
|
890
969
|
_,
|
891
970
|
_,
|
@@ -893,28 +972,28 @@ class SAM2Model(torch.nn.Module):
|
|
893
972
|
low_res_masks,
|
894
973
|
high_res_masks,
|
895
974
|
obj_ptr,
|
896
|
-
|
975
|
+
object_score_logits,
|
897
976
|
) = sam_outputs
|
898
977
|
|
899
978
|
current_out["pred_masks"] = low_res_masks
|
900
979
|
current_out["pred_masks_high_res"] = high_res_masks
|
901
980
|
current_out["obj_ptr"] = obj_ptr
|
981
|
+
if not self.training:
|
982
|
+
# Only add this in inference (to avoid unused param in activation checkpointing;
|
983
|
+
# it's mainly used in the demo to encode spatial memories w/ consolidated masks)
|
984
|
+
current_out["object_score_logits"] = object_score_logits
|
902
985
|
|
903
986
|
# Finally run the memory encoder on the predicted mask to encode
|
904
987
|
# it into a new memory feature (that can be used in future frames)
|
905
|
-
|
906
|
-
|
907
|
-
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
|
912
|
-
|
913
|
-
|
914
|
-
current_out["maskmem_pos_enc"] = maskmem_pos_enc
|
915
|
-
else:
|
916
|
-
current_out["maskmem_features"] = None
|
917
|
-
current_out["maskmem_pos_enc"] = None
|
988
|
+
self._encode_memory_in_output(
|
989
|
+
current_vision_feats,
|
990
|
+
feat_sizes,
|
991
|
+
point_inputs,
|
992
|
+
run_mem_encoder,
|
993
|
+
high_res_masks,
|
994
|
+
object_score_logits,
|
995
|
+
current_out,
|
996
|
+
)
|
918
997
|
|
919
998
|
return current_out
|
920
999
|
|
@@ -168,7 +168,6 @@ class ParkingManagement(BaseSolution):
|
|
168
168
|
Examples:
|
169
169
|
>>> from ultralytics.solutions import ParkingManagement
|
170
170
|
>>> parking_manager = ParkingManagement(model="yolov8n.pt", json_file="parking_regions.json")
|
171
|
-
>>> results = parking_manager(source="parking_lot_video.mp4")
|
172
171
|
>>> print(f"Occupied spaces: {parking_manager.pr_info['Occupancy']}")
|
173
172
|
>>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
|
174
173
|
"""
|
@@ -1,16 +1,13 @@
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2
2
|
|
3
3
|
from collections import defaultdict
|
4
|
-
from pathlib import Path
|
5
4
|
|
6
5
|
import cv2
|
7
6
|
|
8
7
|
from ultralytics import YOLO
|
9
|
-
from ultralytics.utils import
|
8
|
+
from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_SOL_DICT, LOGGER
|
10
9
|
from ultralytics.utils.checks import check_imshow, check_requirements
|
11
10
|
|
12
|
-
DEFAULT_SOL_CFG_PATH = Path(__file__).resolve().parents[1] / "cfg/solutions/default.yaml"
|
13
|
-
|
14
11
|
|
15
12
|
class BaseSolution:
|
16
13
|
"""
|
@@ -55,15 +52,18 @@ class BaseSolution:
|
|
55
52
|
self.Point = Point
|
56
53
|
|
57
54
|
# Load config and update with args
|
58
|
-
|
59
|
-
|
60
|
-
|
55
|
+
DEFAULT_SOL_DICT.update(kwargs)
|
56
|
+
DEFAULT_CFG_DICT.update(kwargs)
|
57
|
+
self.CFG = {**DEFAULT_SOL_DICT, **DEFAULT_CFG_DICT}
|
58
|
+
LOGGER.info(f"Ultralytics Solutions: ✅ {DEFAULT_SOL_DICT}")
|
61
59
|
|
62
60
|
self.region = self.CFG["region"] # Store region data for other classes usage
|
63
|
-
self.line_width =
|
61
|
+
self.line_width = (
|
62
|
+
self.CFG["line_width"] if self.CFG["line_width"] is not None else 2
|
63
|
+
) # Store line_width for usage
|
64
64
|
|
65
65
|
# Load Model and store classes names
|
66
|
-
self.model = YOLO(self.CFG["model"])
|
66
|
+
self.model = YOLO(self.CFG["model"] if self.CFG["model"] else "yolov8n.pt")
|
67
67
|
self.names = self.model.names
|
68
68
|
|
69
69
|
# Initialize environment and region setup
|
ultralytics/utils/__init__.py
CHANGED
@@ -38,6 +38,7 @@ FILE = Path(__file__).resolve()
|
|
38
38
|
ROOT = FILE.parents[1] # YOLO
|
39
39
|
ASSETS = ROOT / "assets" # default images
|
40
40
|
DEFAULT_CFG_PATH = ROOT / "cfg/default.yaml"
|
41
|
+
DEFAULT_SOL_CFG_PATH = ROOT / "cfg/solutions/default.yaml" # Ultralytics solutions yaml path
|
41
42
|
NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLO multiprocessing threads
|
42
43
|
AUTOINSTALL = str(os.getenv("YOLO_AUTOINSTALL", True)).lower() == "true" # global auto-install mode
|
43
44
|
VERBOSE = str(os.getenv("YOLO_VERBOSE", True)).lower() == "true" # global verbose mode
|
@@ -508,6 +509,7 @@ def yaml_print(yaml_file: Union[str, Path, dict]) -> None:
|
|
508
509
|
|
509
510
|
# Default configuration
|
510
511
|
DEFAULT_CFG_DICT = yaml_load(DEFAULT_CFG_PATH)
|
512
|
+
DEFAULT_SOL_DICT = yaml_load(DEFAULT_SOL_CFG_PATH) # Ultralytics solutions configuration
|
511
513
|
for k, v in DEFAULT_CFG_DICT.items():
|
512
514
|
if isinstance(v, str) and v.lower() == "none":
|
513
515
|
DEFAULT_CFG_DICT[k] = None
|
@@ -566,12 +568,16 @@ def is_kaggle():
|
|
566
568
|
|
567
569
|
def is_jupyter():
|
568
570
|
"""
|
569
|
-
Check if the current script is running inside a Jupyter Notebook.
|
571
|
+
Check if the current script is running inside a Jupyter Notebook.
|
570
572
|
|
571
573
|
Returns:
|
572
574
|
(bool): True if running inside a Jupyter Notebook, False otherwise.
|
575
|
+
|
576
|
+
Note:
|
577
|
+
- Only works on Colab and Kaggle, other environments like Jupyterlab and Paperspace are not reliably detectable.
|
578
|
+
- "get_ipython" in globals() method suffers false positives when IPython package installed manually.
|
573
579
|
"""
|
574
|
-
return
|
580
|
+
return IS_COLAB or IS_KAGGLE
|
575
581
|
|
576
582
|
|
577
583
|
def is_docker() -> bool:
|
@@ -799,10 +805,10 @@ def get_user_config_dir(sub_dir="Ultralytics"):
|
|
799
805
|
PROC_DEVICE_MODEL = read_device_model() # is_jetson() and is_raspberrypi() depend on this constant
|
800
806
|
ONLINE = is_online()
|
801
807
|
IS_COLAB = is_colab()
|
808
|
+
IS_KAGGLE = is_kaggle()
|
802
809
|
IS_DOCKER = is_docker()
|
803
810
|
IS_JETSON = is_jetson()
|
804
811
|
IS_JUPYTER = is_jupyter()
|
805
|
-
IS_KAGGLE = is_kaggle()
|
806
812
|
IS_PIP_PACKAGE = is_pip_package()
|
807
813
|
IS_RASPBERRYPI = is_raspberrypi()
|
808
814
|
GIT_DIR = get_git_dir()
|
@@ -1193,7 +1199,7 @@ class SettingsManager(JSONDict):
|
|
1193
1199
|
"neptune": True, # Neptune integration
|
1194
1200
|
"raytune": True, # Ray Tune integration
|
1195
1201
|
"tensorboard": True, # TensorBoard logging
|
1196
|
-
"wandb":
|
1202
|
+
"wandb": False, # Weights & Biases logging
|
1197
1203
|
"vscode_msg": True, # VSCode messaging
|
1198
1204
|
}
|
1199
1205
|
|
ultralytics/utils/checks.py
CHANGED
@@ -335,7 +335,7 @@ def check_font(font="Arial.ttf"):
|
|
335
335
|
return file
|
336
336
|
|
337
337
|
|
338
|
-
def check_python(minimum: str = "3.8.0", hard: bool = True, verbose: bool =
|
338
|
+
def check_python(minimum: str = "3.8.0", hard: bool = True, verbose: bool = False) -> bool:
|
339
339
|
"""
|
340
340
|
Check current python version against the required minimum version.
|
341
341
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.23
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -104,6 +104,7 @@ Requires-Dist: streamlit; extra == "solutions"
|
|
104
104
|
<a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run Ultralytics on Gradient"></a>
|
105
105
|
<a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Ultralytics In Colab"></a>
|
106
106
|
<a href="https://www.kaggle.com/models/ultralytics/yolo11"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open Ultralytics In Kaggle"></a>
|
107
|
+
<a href="https://mybinder.org/v2/gh/ultralytics/ultralytics/HEAD?labpath=examples%2Ftutorial.ipynb"><img src="https://mybinder.org/badge_logo.svg" alt="Open Ultralytics In Binder"></a>
|
107
108
|
</div>
|
108
109
|
<br>
|
109
110
|
|
@@ -1,16 +1,16 @@
|
|
1
1
|
tests/__init__.py,sha256=iVH5nXrACTDv0_ZIVRPi-9f6oYBl6g-tCkeR2Hb8MFM,666
|
2
2
|
tests/conftest.py,sha256=9PFAiwAy6eeORGspr5dOKxVuFDVKqYg8Nn_RxSJ27UI,2919
|
3
3
|
tests/test_cli.py,sha256=G7OJ1ErQYsGy2Dx1zP-0p7EZR4aPoAdtLGiY4Hm7jQM,5006
|
4
|
-
tests/test_cuda.py,sha256=
|
4
|
+
tests/test_cuda.py,sha256=rhHFvKNegN1ChtueKM0JhATJaJDFB377uXo2Kca5JVQ,5943
|
5
5
|
tests/test_engine.py,sha256=dcEcJsMQh61rDSNv7l4TIAgybLpzjVwerv9JZC_KCM8,4934
|
6
6
|
tests/test_exports.py,sha256=fpTKEVBUGLF3WiZPNKRs-IEcIY4cfxgvgKjUNfodjww,8042
|
7
7
|
tests/test_integrations.py,sha256=f5-QCUk1SU_-qn4mBCZwS3GN3tXEBIIXo4z2EhExbHw,6126
|
8
8
|
tests/test_python.py,sha256=I1RRdCwLdrc3jX06huVxct8HX8ccQOmQgVpuEflRl0U,23560
|
9
9
|
tests/test_solutions.py,sha256=sPYhy2d814mIVvojQeVxeZPu0IVy01_Y8zuMcu_9GF0,3790
|
10
|
-
ultralytics/__init__.py,sha256=
|
10
|
+
ultralytics/__init__.py,sha256=oeMJ2vOq3lfYTtggyngLMUYRmg2YoNKN1G0O3GrrUMo,681
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
13
|
-
ultralytics/cfg/__init__.py,sha256=
|
13
|
+
ultralytics/cfg/__init__.py,sha256=xubvv6UXzRfPGvDyTDLRiZsaRRYXYHY9cHyClRqLNAs,32420
|
14
14
|
ultralytics/cfg/default.yaml,sha256=ul49zgSzTegMmc8CFeu9tXkWNvQhETdZMa9EgDNSnY4,8319
|
15
15
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
|
16
16
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8l_fIDo8gw,1195
|
@@ -85,7 +85,7 @@ ultralytics/cfg/models/v9/yolov9e.yaml,sha256=dhaR47WxuLOrZWDCceS4bQG00sQdrMc8FQ
|
|
85
85
|
ultralytics/cfg/models/v9/yolov9m.yaml,sha256=l6CmivzNu44sRVmkQXk4-tXflbV1nWnk5MSc8su2vhs,1311
|
86
86
|
ultralytics/cfg/models/v9/yolov9s.yaml,sha256=lPWcu-6ub1kCBD6zIDFwthYZ3RvdJfODWKy3vEQWRjo,1291
|
87
87
|
ultralytics/cfg/models/v9/yolov9t.yaml,sha256=qL__kr6GoefpQWP4jV0jdzwTp46bdFUcqtPRnfDbkY8,1275
|
88
|
-
ultralytics/cfg/solutions/default.yaml,sha256=
|
88
|
+
ultralytics/cfg/solutions/default.yaml,sha256=irtGM8nxaSBkrWMqcXoJdtKgqAq1YBwyVMGx5csSH2Y,1239
|
89
89
|
ultralytics/cfg/trackers/botsort.yaml,sha256=8B0xNbnG_E-9DCUpap72PWkUgBb1AjuApEn7gHiVngE,916
|
90
90
|
ultralytics/cfg/trackers/bytetrack.yaml,sha256=8vpTZ2x9mhRXJymoJvs1G8kTXo_HxbSwHup2FQALT3A,721
|
91
91
|
ultralytics/data/__init__.py,sha256=VGe-ATG7j35F4A4r8Jmzffjlhve4JAJPgRa5ahKTU18,616
|
@@ -93,13 +93,13 @@ ultralytics/data/annotator.py,sha256=oy87bzQN6ZRYeucoLk8e-jDEo6YJ91FE_zMFtLEVC1I
|
|
93
93
|
ultralytics/data/augment.py,sha256=YCLrwx1mRGeidggo_7GeINay8KdxACqREHJofZeaTHA,120430
|
94
94
|
ultralytics/data/base.py,sha256=ZCIhAyFfxXVp5fVnYD8mwbksNALJTayBKIR5FKGV7ZM,15168
|
95
95
|
ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
|
96
|
-
ultralytics/data/converter.py,sha256=
|
96
|
+
ultralytics/data/converter.py,sha256=wd1u2BPZQRAkobv8ThklpeQAfcYL2xo-i94qNJdbwBU,24286
|
97
97
|
ultralytics/data/dataset.py,sha256=D556AW0ZEsW3V8c5zJiHM_prc_YfZqymIkDKPw3k9Io,22936
|
98
98
|
ultralytics/data/loaders.py,sha256=Fr70Q9p9t7buLW_8R2_lI_nyCMG033gWSxvwy1M-a-U,28449
|
99
99
|
ultralytics/data/split_dota.py,sha256=eFafJ7Vg52wj6KDCHFJAf1tKzyPD5YaPB8kM4VX5Aeg,10688
|
100
100
|
ultralytics/data/utils.py,sha256=bmWEIrdogj4kssZQSJdSbIF8QsJU00lo-EY-Mgcqv4M,31073
|
101
101
|
ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
102
|
-
ultralytics/engine/exporter.py,sha256=
|
102
|
+
ultralytics/engine/exporter.py,sha256=suJeF0W14HEAbjX452btvzgGtdNy-Ay-LtUHvWV7ZZ8,58620
|
103
103
|
ultralytics/engine/model.py,sha256=pvL1uf-wwdWL8Iph7VEAYn1-z7wEHzVug21V_0_gO6M,51456
|
104
104
|
ultralytics/engine/predictor.py,sha256=keTelEeo23Dcbs-XvmRWAPIs4pbCNDtsMBz88WM1eK8,17534
|
105
105
|
ultralytics/engine/results.py,sha256=BxanBI8PhBCfs-9cSy-GS6naScuiD3hdvUAJWPW2mS0,75043
|
@@ -128,7 +128,7 @@ ultralytics/models/rtdetr/train.py,sha256=3QjchAvLM3gh1sNTOVSVvpyqqsZSYprUQ12e4o
|
|
128
128
|
ultralytics/models/rtdetr/val.py,sha256=xVjZShZ1AvES97wVekl2q_1g20Pq-IIHhkJdWtxMncs,5566
|
129
129
|
ultralytics/models/sam/__init__.py,sha256=o4_D6y8YJlOXIK7Lwo9RHnIJJ9xoFNi4zK99QSc1kdM,176
|
130
130
|
ultralytics/models/sam/amg.py,sha256=GrmO_8YfIDt_QkPEMF_WFjPZkhwhf7iwx7ig8JgOUnE,8709
|
131
|
-
ultralytics/models/sam/build.py,sha256=
|
131
|
+
ultralytics/models/sam/build.py,sha256=ac7Pop5f51TVzGgfV6bbXSFDA9fBVxERUc_6WDQ-9Ys,12487
|
132
132
|
ultralytics/models/sam/model.py,sha256=2KFUp8SHiqOgwUjkdqdau0oduJwKQxm4N9GHWjdhUFo,7382
|
133
133
|
ultralytics/models/sam/predict.py,sha256=LSxys7fuQycrAoqf_EFohk9ftu7cq1F2GY9_fuIl5uE,40384
|
134
134
|
ultralytics/models/sam/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
@@ -136,7 +136,7 @@ ultralytics/models/sam/modules/blocks.py,sha256=Q-KwhFbdyZhl1tjG_kP2LcQkZbzoNt61
|
|
136
136
|
ultralytics/models/sam/modules/decoders.py,sha256=mODsqnTN_CjE3H0Sh9cd8PfTnHANPjGB1bjqHxfezSg,25830
|
137
137
|
ultralytics/models/sam/modules/encoders.py,sha256=Ay3sYeUonCf6URXBdB0dDwyngovevW8hUDgULRnNIoA,34824
|
138
138
|
ultralytics/models/sam/modules/memory_attention.py,sha256=XilWBnRfH8wZxIoL2-yEk-dRypCsS0Jf_9t8WJxXKg0,9722
|
139
|
-
ultralytics/models/sam/modules/sam.py,sha256=
|
139
|
+
ultralytics/models/sam/modules/sam.py,sha256=TEo4iB7i8B3hKr1SaNu-PDIWHl3FZoSvyjnSJZI0V1A,53076
|
140
140
|
ultralytics/models/sam/modules/tiny_encoder.py,sha256=NyzeFMLnmqwcFQFs-JBM9PCWSsYoYZ_6h59Un1DeDV0,41332
|
141
141
|
ultralytics/models/sam/modules/transformer.py,sha256=nuhF_14LGrr5uYCAP9XCXps-zlVcT4OWO0evXWDxPwI,16081
|
142
142
|
ultralytics/models/sam/modules/utils.py,sha256=Y36V6BVy6GeaAvKE8gHmoDIa-f5LjJpmSVwywNkv2yk,12315
|
@@ -184,9 +184,9 @@ ultralytics/solutions/analytics.py,sha256=G4SKg8OPwGsHdUITOeD3pP11iUce1j8ut6HW7B
|
|
184
184
|
ultralytics/solutions/distance_calculation.py,sha256=KN3CC-dm2dTQylj79IrifCJT8ZhE7hc2EweH3KK31mE,5461
|
185
185
|
ultralytics/solutions/heatmap.py,sha256=If9rosSCmE7pAL1HtVnLkx05gQp6nP1K6HzATMcaEEE,5372
|
186
186
|
ultralytics/solutions/object_counter.py,sha256=vKB7riRm8NjHA6IXyf557FpmV-b0_XoKbXHqMHziXSM,8264
|
187
|
-
ultralytics/solutions/parking_management.py,sha256=
|
187
|
+
ultralytics/solutions/parking_management.py,sha256=1DsEE94eauqcnnFxUYI-BX9eA1GbJVNt7oncj1okYpI,11198
|
188
188
|
ultralytics/solutions/queue_management.py,sha256=D9TqwJSVrZQFxp_M8O62WfBAxkAuDWWnXe7FFmnp7_w,4881
|
189
|
-
ultralytics/solutions/solutions.py,sha256=
|
189
|
+
ultralytics/solutions/solutions.py,sha256=Hd_rhqzlrBgrXRGbj5yJiq8-fLs0xe8uxT6BzvjdlW4,6573
|
190
190
|
ultralytics/solutions/speed_estimation.py,sha256=A10DmuZlGkoZUyfHhZWcDRjj1-9GXiDhEjyBbAzfaDs,4936
|
191
191
|
ultralytics/solutions/streamlit_inference.py,sha256=w4dnvSv2FOrpji9W1Ir86phka3OXc7jd_38-OCbQdZw,5701
|
192
192
|
ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
|
@@ -198,10 +198,10 @@ ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7J
|
|
198
198
|
ultralytics/trackers/utils/gmc.py,sha256=VcURuY041qGCeWUGMxHZBr10T16LtcMqyv7AmTfE1MY,14557
|
199
199
|
ultralytics/trackers/utils/kalman_filter.py,sha256=cH9zD3fwkuezP97H9mw8cSBN7a8hHKx_Sx1j7t3oYGs,21349
|
200
200
|
ultralytics/trackers/utils/matching.py,sha256=3Ie1WNNRZ4_q3365F03XD7Nr9juZB_08mw4yUKC3w74,7162
|
201
|
-
ultralytics/utils/__init__.py,sha256=
|
201
|
+
ultralytics/utils/__init__.py,sha256=oUtiHZUVtz-KtequUv15Has85k2BHgP6c-_cAAdf-rM,49060
|
202
202
|
ultralytics/utils/autobatch.py,sha256=BO9MCRtrLDtrDQaxqV0BdjaYsgXf-q07Y3_VdGp4URY,4330
|
203
203
|
ultralytics/utils/benchmarks.py,sha256=R3_jtwLd48azPSXmtIhqlzduUvflk0FOY8GJOJ6mB6E,25097
|
204
|
-
ultralytics/utils/checks.py,sha256
|
204
|
+
ultralytics/utils/checks.py,sha256=cEQIYK3ZGQqcQ9uckNF-KbYdjGpfA1FHJHsUim94EoA,29800
|
205
205
|
ultralytics/utils/dist.py,sha256=NDFga-uKxkBX2zLxFHSene_cCiGQJoyOeCXcN9JIOIk,2358
|
206
206
|
ultralytics/utils/downloads.py,sha256=fh7I5toTSowAOXtmx5zIzCEDREfTFG45cLIHmsDmuYw,21974
|
207
207
|
ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
|
@@ -227,9 +227,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=IbGQfEltamUKXJt93uSLQFn8c2rYh3DMTg
|
|
227
227
|
ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
|
228
228
|
ultralytics/utils/callbacks/tensorboard.py,sha256=SHlE58Fb-sg-uZKtgy-ybIO3SAIfK55aj8kTYGA0Cyg,4167
|
229
229
|
ultralytics/utils/callbacks/wb.py,sha256=oX3JarCJGhzvW556XiEXQNaZblAaK_UETAt3kzkY61w,6869
|
230
|
-
ultralytics-8.3.
|
231
|
-
ultralytics-8.3.
|
232
|
-
ultralytics-8.3.
|
233
|
-
ultralytics-8.3.
|
234
|
-
ultralytics-8.3.
|
235
|
-
ultralytics-8.3.
|
230
|
+
ultralytics-8.3.23.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
231
|
+
ultralytics-8.3.23.dist-info/METADATA,sha256=IatjVT78W2QUeo1wNKGqjRs7DpSoGi35WH9YIlmUx_g,35028
|
232
|
+
ultralytics-8.3.23.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
|
233
|
+
ultralytics-8.3.23.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
234
|
+
ultralytics-8.3.23.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
235
|
+
ultralytics-8.3.23.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|