ultralytics 8.2.69__py3-none-any.whl → 8.2.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (35) hide show
  1. ultralytics/__init__.py +3 -2
  2. ultralytics/cfg/__init__.py +4 -0
  3. ultralytics/data/converter.py +81 -0
  4. ultralytics/engine/trainer.py +3 -2
  5. ultralytics/engine/validator.py +2 -2
  6. ultralytics/models/__init__.py +2 -1
  7. ultralytics/models/fastsam/predict.py +1 -0
  8. ultralytics/models/sam/build.py +2 -2
  9. ultralytics/models/sam/model.py +10 -2
  10. ultralytics/models/sam/modules/decoders.py +1 -42
  11. ultralytics/models/sam/modules/encoders.py +3 -1
  12. ultralytics/models/sam/modules/sam.py +5 -7
  13. ultralytics/models/sam/modules/transformer.py +4 -3
  14. ultralytics/models/sam/predict.py +12 -6
  15. ultralytics/models/sam2/__init__.py +6 -0
  16. ultralytics/models/sam2/build.py +156 -0
  17. ultralytics/models/sam2/model.py +97 -0
  18. ultralytics/models/sam2/modules/__init__.py +1 -0
  19. ultralytics/models/sam2/modules/decoders.py +305 -0
  20. ultralytics/models/sam2/modules/encoders.py +332 -0
  21. ultralytics/models/sam2/modules/memory_attention.py +170 -0
  22. ultralytics/models/sam2/modules/sam2.py +804 -0
  23. ultralytics/models/sam2/modules/sam2_blocks.py +715 -0
  24. ultralytics/models/sam2/modules/utils.py +191 -0
  25. ultralytics/models/sam2/predict.py +182 -0
  26. ultralytics/nn/modules/transformer.py +5 -3
  27. ultralytics/utils/__init__.py +9 -9
  28. ultralytics/utils/plotting.py +1 -1
  29. ultralytics/utils/torch_utils.py +11 -7
  30. {ultralytics-8.2.69.dist-info → ultralytics-8.2.71.dist-info}/METADATA +1 -1
  31. {ultralytics-8.2.69.dist-info → ultralytics-8.2.71.dist-info}/RECORD +35 -24
  32. {ultralytics-8.2.69.dist-info → ultralytics-8.2.71.dist-info}/LICENSE +0 -0
  33. {ultralytics-8.2.69.dist-info → ultralytics-8.2.71.dist-info}/WHEEL +0 -0
  34. {ultralytics-8.2.69.dist-info → ultralytics-8.2.71.dist-info}/entry_points.txt +0 -0
  35. {ultralytics-8.2.69.dist-info → ultralytics-8.2.71.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,191 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+
6
+
7
+ def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num):
8
+ """
9
+ Selects the closest conditioning frames to a given frame index.
10
+
11
+ Args:
12
+ frame_idx (int): Current frame index.
13
+ cond_frame_outputs (Dict[int, Any]): Dictionary of conditioning frame outputs keyed by frame indices.
14
+ max_cond_frame_num (int): Maximum number of conditioning frames to select.
15
+
16
+ Returns:
17
+ (Tuple[Dict[int, Any], Dict[int, Any]]): A tuple containing two dictionaries:
18
+ - selected_outputs: Selected items from cond_frame_outputs.
19
+ - unselected_outputs: Items not selected from cond_frame_outputs.
20
+
21
+ Examples:
22
+ >>> frame_idx = 5
23
+ >>> cond_frame_outputs = {1: 'a', 3: 'b', 7: 'c', 9: 'd'}
24
+ >>> max_cond_frame_num = 2
25
+ >>> selected, unselected = select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num)
26
+ >>> print(selected)
27
+ {3: 'b', 7: 'c'}
28
+ >>> print(unselected)
29
+ {1: 'a', 9: 'd'}
30
+ """
31
+ if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
32
+ selected_outputs = cond_frame_outputs
33
+ unselected_outputs = {}
34
+ else:
35
+ assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
36
+ selected_outputs = {}
37
+
38
+ # the closest conditioning frame before `frame_idx` (if any)
39
+ idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
40
+ if idx_before is not None:
41
+ selected_outputs[idx_before] = cond_frame_outputs[idx_before]
42
+
43
+ # the closest conditioning frame after `frame_idx` (if any)
44
+ idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
45
+ if idx_after is not None:
46
+ selected_outputs[idx_after] = cond_frame_outputs[idx_after]
47
+
48
+ # add other temporally closest conditioning frames until reaching a total
49
+ # of `max_cond_frame_num` conditioning frames.
50
+ num_remain = max_cond_frame_num - len(selected_outputs)
51
+ inds_remain = sorted(
52
+ (t for t in cond_frame_outputs if t not in selected_outputs),
53
+ key=lambda x: abs(x - frame_idx),
54
+ )[:num_remain]
55
+ selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
56
+ unselected_outputs = {t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs}
57
+
58
+ return selected_outputs, unselected_outputs
59
+
60
+
61
+ def get_1d_sine_pe(pos_inds, dim, temperature=10000):
62
+ """Generates 1D sinusoidal positional embeddings for given positions and dimensions."""
63
+ pe_dim = dim // 2
64
+ dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
65
+ dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
66
+
67
+ pos_embed = pos_inds.unsqueeze(-1) / dim_t
68
+ pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
69
+ return pos_embed
70
+
71
+
72
+ def init_t_xy(end_x: int, end_y: int):
73
+ """Initializes 1D and 2D coordinate tensors for a grid of size end_x by end_y."""
74
+ t = torch.arange(end_x * end_y, dtype=torch.float32)
75
+ t_x = (t % end_x).float()
76
+ t_y = torch.div(t, end_x, rounding_mode="floor").float()
77
+ return t_x, t_y
78
+
79
+
80
+ def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0):
81
+ """Computes axial complex exponential positional encodings for 2D spatial positions."""
82
+ freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
83
+ freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
84
+
85
+ t_x, t_y = init_t_xy(end_x, end_y)
86
+ freqs_x = torch.outer(t_x, freqs_x)
87
+ freqs_y = torch.outer(t_y, freqs_y)
88
+ freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
89
+ freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
90
+ return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
91
+
92
+
93
+ def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
94
+ """Reshapes frequency tensor for broadcasting, ensuring compatibility with input tensor dimensions."""
95
+ ndim = x.ndim
96
+ assert 0 <= 1 < ndim
97
+ assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
98
+ shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
99
+ return freqs_cis.view(*shape)
100
+
101
+
102
+ def apply_rotary_enc(
103
+ xq: torch.Tensor,
104
+ xk: torch.Tensor,
105
+ freqs_cis: torch.Tensor,
106
+ repeat_freqs_k: bool = False,
107
+ ):
108
+ """Applies rotary positional encoding to query and key tensors using complex-valued frequency components."""
109
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
110
+ xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) if xk.shape[-2] != 0 else None
111
+ freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
112
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
113
+ if xk_ is None:
114
+ # no keys to rotate, due to dropout
115
+ return xq_out.type_as(xq).to(xq.device), xk
116
+ # repeat freqs along seq_len dim to match k seq_len
117
+ if repeat_freqs_k:
118
+ r = xk_.shape[-2] // xq_.shape[-2]
119
+ freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
120
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
121
+ return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
122
+
123
+
124
+ def window_partition(x, window_size):
125
+ """
126
+ Partitions input tensor into non-overlapping windows with padding if needed.
127
+
128
+ Args:
129
+ x (torch.Tensor): Input tensor with shape (B, H, W, C).
130
+ window_size (int): Size of each window.
131
+
132
+ Returns:
133
+ (Tuple[torch.Tensor, Tuple[int, int]]): A tuple containing:
134
+ - windows (torch.Tensor): Partitioned windows with shape (B * num_windows, window_size, window_size, C).
135
+ - (Hp, Wp) (Tuple[int, int]): Padded height and width before partition.
136
+
137
+ Examples:
138
+ >>> x = torch.randn(1, 16, 16, 3)
139
+ >>> windows, (Hp, Wp) = window_partition(x, window_size=4)
140
+ >>> print(windows.shape, Hp, Wp)
141
+ torch.Size([16, 4, 4, 3]) 16 16
142
+ """
143
+ B, H, W, C = x.shape
144
+
145
+ pad_h = (window_size - H % window_size) % window_size
146
+ pad_w = (window_size - W % window_size) % window_size
147
+ if pad_h > 0 or pad_w > 0:
148
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
149
+ Hp, Wp = H + pad_h, W + pad_w
150
+
151
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
152
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
153
+ return windows, (Hp, Wp)
154
+
155
+
156
+ def window_unpartition(windows, window_size, pad_hw, hw):
157
+ """
158
+ Unpartitions windowed sequences into original sequences and removes padding.
159
+
160
+ This function reverses the windowing process, reconstructing the original input from windowed segments
161
+ and removing any padding that was added during the windowing process.
162
+
163
+ Args:
164
+ windows (torch.Tensor): Input tensor of windowed sequences with shape (B * num_windows, window_size,
165
+ window_size, C), where B is the batch size, num_windows is the number of windows, window_size is
166
+ the size of each window, and C is the number of channels.
167
+ window_size (int): Size of each window.
168
+ pad_hw (Tuple[int, int]): Padded height and width (Hp, Wp) of the input before windowing.
169
+ hw (Tuple[int, int]): Original height and width (H, W) of the input before padding and windowing.
170
+
171
+ Returns:
172
+ (torch.Tensor): Unpartitioned sequences with shape (B, H, W, C), where B is the batch size, H and W
173
+ are the original height and width, and C is the number of channels.
174
+
175
+ Examples:
176
+ >>> windows = torch.rand(32, 8, 8, 64) # 32 windows of size 8x8 with 64 channels
177
+ >>> pad_hw = (16, 16) # Padded height and width
178
+ >>> hw = (15, 14) # Original height and width
179
+ >>> x = window_unpartition(windows, window_size=8, pad_hw=pad_hw, hw=hw)
180
+ >>> print(x.shape)
181
+ torch.Size([1, 15, 14, 64])
182
+ """
183
+ Hp, Wp = pad_hw
184
+ H, W = hw
185
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
186
+ x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
187
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
188
+
189
+ if Hp > H or Wp > W:
190
+ x = x[:, :H, :W, :].contiguous()
191
+ return x
@@ -0,0 +1,182 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import torch
4
+
5
+ from ..sam.predict import Predictor
6
+ from .build import build_sam2
7
+
8
+
9
+ class SAM2Predictor(Predictor):
10
+ """
11
+ A predictor class for the Segment Anything Model 2 (SAM2), extending the base Predictor class.
12
+
13
+ This class provides an interface for model inference tailored to image segmentation tasks, leveraging SAM2's
14
+ advanced architecture and promptable segmentation capabilities. It facilitates flexible and real-time mask
15
+ generation, working with various types of prompts such as bounding boxes, points, and low-resolution masks.
16
+
17
+ Attributes:
18
+ cfg (Dict): Configuration dictionary specifying model and task-related parameters.
19
+ overrides (Dict): Dictionary containing values that override the default configuration.
20
+ _callbacks (Dict): Dictionary of user-defined callback functions to augment behavior.
21
+ args (namespace): Namespace to hold command-line arguments or other operational variables.
22
+ im (torch.Tensor): Preprocessed input image tensor.
23
+ features (torch.Tensor): Extracted image features used for inference.
24
+ prompts (Dict): Collection of various prompt types, such as bounding boxes and points.
25
+ segment_all (bool): Flag to control whether to segment all objects in the image or only specified ones.
26
+ model (torch.nn.Module): The loaded SAM2 model.
27
+ device (torch.device): The device (CPU or GPU) on which the model is loaded.
28
+ _bb_feat_sizes (List[Tuple[int, int]]): List of feature sizes for different backbone levels.
29
+
30
+ Methods:
31
+ get_model: Builds and returns the SAM2 model.
32
+ prompt_inference: Performs image segmentation inference based on various prompts.
33
+ set_image: Preprocesses and sets a single image for inference.
34
+ get_im_features: Extracts image features from the SAM2 image encoder.
35
+
36
+ Examples:
37
+ >>> predictor = SAM2Predictor(model='sam2_l.pt')
38
+ >>> predictor.set_image('path/to/image.jpg')
39
+ >>> masks, scores = predictor.prompt_inference(im=predictor.im, points=[[500, 375]], labels=[1])
40
+ >>> print(f"Generated {len(masks)} mask(s) with scores: {scores}")
41
+ """
42
+
43
+ _bb_feat_sizes = [
44
+ (256, 256),
45
+ (128, 128),
46
+ (64, 64),
47
+ ]
48
+
49
+ def get_model(self):
50
+ """Retrieves and initializes the Segment Anything Model (SAM) for image segmentation tasks."""
51
+ return build_sam2(self.args.model)
52
+
53
+ def prompt_inference(
54
+ self,
55
+ im,
56
+ bboxes=None,
57
+ points=None,
58
+ labels=None,
59
+ masks=None,
60
+ multimask_output=False,
61
+ img_idx=-1,
62
+ ):
63
+ """
64
+ Performs image segmentation inference based on various prompts using SAM2 architecture.
65
+
66
+ Args:
67
+ im (torch.Tensor): Preprocessed input image tensor with shape (N, C, H, W).
68
+ bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
69
+ points (np.ndarray | List | None): Points indicating object locations with shape (N, 2), in pixels.
70
+ labels (np.ndarray | List | None): Labels for point prompts with shape (N,). 1 = foreground, 0 = background.
71
+ masks (np.ndarray | None): Low-resolution masks from previous predictions with shape (N, H, W).
72
+ multimask_output (bool): Flag to return multiple masks for ambiguous prompts.
73
+ img_idx (int): Index of the image in the batch to process.
74
+
75
+ Returns:
76
+ (tuple): Tuple containing:
77
+ - np.ndarray: Output masks with shape (C, H, W), where C is the number of generated masks.
78
+ - np.ndarray: Quality scores for each mask, with length C.
79
+ - np.ndarray: Low-resolution logits with shape (C, 256, 256) for subsequent inference.
80
+
81
+ Examples:
82
+ >>> predictor = SAM2Predictor(cfg)
83
+ >>> image = torch.rand(1, 3, 640, 640)
84
+ >>> bboxes = [[100, 100, 200, 200]]
85
+ >>> masks, scores, logits = predictor.prompt_inference(image, bboxes=bboxes)
86
+ """
87
+ features = self.get_im_features(im) if self.features is None else self.features
88
+
89
+ src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
90
+ r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
91
+ # Transform input prompts
92
+ if points is not None:
93
+ points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
94
+ points = points[None] if points.ndim == 1 else points
95
+ # Assuming labels are all positive if users don't pass labels.
96
+ if labels is None:
97
+ labels = torch.ones(points.shape[0])
98
+ labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
99
+ points *= r
100
+ # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
101
+ points, labels = points[:, None], labels[:, None]
102
+ if bboxes is not None:
103
+ bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
104
+ bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
105
+ bboxes *= r
106
+ if masks is not None:
107
+ masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
108
+
109
+ points = (points, labels) if points is not None else None
110
+ # TODO: Embed prompts
111
+ # if bboxes is not None:
112
+ # box_coords = bboxes.reshape(-1, 2, 2)
113
+ # box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=bboxes.device)
114
+ # box_labels = box_labels.repeat(bboxes.size(0), 1)
115
+ # # we merge "boxes" and "points" into a single "concat_points" input (where
116
+ # # boxes are added at the beginning) to sam_prompt_encoder
117
+ # if concat_points is not None:
118
+ # concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
119
+ # concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
120
+ # concat_points = (concat_coords, concat_labels)
121
+ # else:
122
+ # concat_points = (box_coords, box_labels)
123
+
124
+ sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
125
+ points=points,
126
+ boxes=bboxes,
127
+ masks=masks,
128
+ )
129
+ # Predict masks
130
+ batched_mode = points is not None and points[0].shape[0] > 1 # multi object prediction
131
+ high_res_features = [feat_level[img_idx].unsqueeze(0) for feat_level in features["high_res_feats"]]
132
+ pred_masks, pred_scores, _, _ = self.model.sam_mask_decoder(
133
+ image_embeddings=features["image_embed"][img_idx].unsqueeze(0),
134
+ image_pe=self.model.sam_prompt_encoder.get_dense_pe(),
135
+ sparse_prompt_embeddings=sparse_embeddings,
136
+ dense_prompt_embeddings=dense_embeddings,
137
+ multimask_output=multimask_output,
138
+ repeat_image=batched_mode,
139
+ high_res_features=high_res_features,
140
+ )
141
+ # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
142
+ # `d` could be 1 or 3 depends on `multimask_output`.
143
+ return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
144
+
145
+ def set_image(self, image):
146
+ """
147
+ Preprocesses and sets a single image for inference.
148
+
149
+ This function sets up the model if not already initialized, configures the data source to the specified image,
150
+ and preprocesses the image for feature extraction. Only one image can be set at a time.
151
+
152
+ Args:
153
+ image (str | np.ndarray): Image file path as a string, or a numpy array image read by cv2.
154
+
155
+ Raises:
156
+ AssertionError: If more than one image is set.
157
+
158
+ Examples:
159
+ >>> predictor = SAM2Predictor()
160
+ >>> predictor.set_image("path/to/image.jpg")
161
+ >>> predictor.set_image(np.array([...])) # Using a numpy array
162
+ """
163
+ if self.model is None:
164
+ self.setup_model(model=None)
165
+ self.setup_source(image)
166
+ assert len(self.dataset) == 1, "`set_image` only supports setting one image!"
167
+ for batch in self.dataset:
168
+ im = self.preprocess(batch[1])
169
+ self.features = self.get_im_features(im)
170
+ break
171
+
172
+ def get_im_features(self, im):
173
+ """Extracts and processes image features using SAM2's image encoder for subsequent segmentation tasks."""
174
+ backbone_out = self.model.forward_image(im)
175
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
176
+ if self.model.directly_add_no_mem_embed:
177
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
178
+ feats = [
179
+ feat.permute(1, 2, 0).view(1, -1, *feat_size)
180
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
181
+ ][::-1]
182
+ return {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
@@ -174,18 +174,20 @@ class MLPBlock(nn.Module):
174
174
  class MLP(nn.Module):
175
175
  """Implements a simple multi-layer perceptron (also called FFN)."""
176
176
 
177
- def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
177
+ def __init__(self, input_dim, hidden_dim, output_dim, num_layers, act=nn.ReLU, sigmoid=False):
178
178
  """Initialize the MLP with specified input, hidden, output dimensions and number of layers."""
179
179
  super().__init__()
180
180
  self.num_layers = num_layers
181
181
  h = [hidden_dim] * (num_layers - 1)
182
182
  self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
183
+ self.sigmoid = sigmoid
184
+ self.act = act()
183
185
 
184
186
  def forward(self, x):
185
187
  """Forward pass for the entire MLP."""
186
188
  for i, layer in enumerate(self.layers):
187
- x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
188
- return x
189
+ x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x)
190
+ return x.sigmoid() if self.sigmoid else x
189
191
 
190
192
 
191
193
  class LayerNorm2d(nn.Module):
@@ -47,7 +47,7 @@ PYTHON_VERSION = platform.python_version()
47
47
  TORCH_VERSION = torch.__version__
48
48
  TORCHVISION_VERSION = importlib.metadata.version("torchvision") # faster than importing torchvision
49
49
  HELP_MSG = """
50
- Usage examples for running YOLOv8:
50
+ Usage examples for running Ultralytics YOLO:
51
51
 
52
52
  1. Install the ultralytics package:
53
53
 
@@ -58,25 +58,25 @@ HELP_MSG = """
58
58
  from ultralytics import YOLO
59
59
 
60
60
  # Load a model
61
- model = YOLO('yolov8n.yaml') # build a new model from scratch
61
+ model = YOLO("yolov8n.yaml") # build a new model from scratch
62
62
  model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
63
63
 
64
64
  # Use the model
65
65
  results = model.train(data="coco8.yaml", epochs=3) # train the model
66
66
  results = model.val() # evaluate model performance on the validation set
67
- results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
68
- success = model.export(format='onnx') # export the model to ONNX format
67
+ results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
68
+ success = model.export(format="onnx") # export the model to ONNX format
69
69
 
70
70
  3. Use the command line interface (CLI):
71
71
 
72
- YOLOv8 'yolo' CLI commands use the following syntax:
72
+ Ultralytics 'yolo' CLI commands use the following syntax:
73
73
 
74
74
  yolo TASK MODE ARGS
75
75
 
76
- Where TASK (optional) is one of [detect, segment, classify]
77
- MODE (required) is one of [train, val, predict, export]
78
- ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults.
79
- See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
76
+ Where TASK (optional) is one of [detect, segment, classify, pose, obb]
77
+ MODE (required) is one of [train, val, predict, export, benchmark]
78
+ ARGS (optional) are any number of custom "arg=value" pairs like "imgsz=320" that override defaults.
79
+ See all ARGS at https://docs.ultralytics.com/usage/cfg or with "yolo cfg"
80
80
 
81
81
  - Train a detection model for 10 epochs with an initial learning_rate of 0.01
82
82
  yolo detect train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01
@@ -280,7 +280,7 @@ class Annotator:
280
280
  Args:
281
281
  box (tuple): The bounding box coordinates (x1, y1, x2, y2).
282
282
  label (str): The text label to be displayed.
283
- color (tuple, optional): The background color of the rectangle (R, G, B).
283
+ color (tuple, optional): The background color of the rectangle (B, G, R).
284
284
  txt_color (tuple, optional): The color of the text (R, G, B).
285
285
  rotated (bool, optional): Variable used to check if task is OBB
286
286
  """
@@ -1,5 +1,5 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
-
2
+ import contextlib
3
3
  import gc
4
4
  import math
5
5
  import os
@@ -48,11 +48,12 @@ TORCHVISION_0_18 = check_version(TORCHVISION_VERSION, "0.18.0")
48
48
  def torch_distributed_zero_first(local_rank: int):
49
49
  """Ensures all processes in distributed training wait for the local master (rank 0) to complete a task first."""
50
50
  initialized = dist.is_available() and dist.is_initialized()
51
+
51
52
  if initialized and local_rank not in {-1, 0}:
52
53
  dist.barrier(device_ids=[local_rank])
53
54
  yield
54
55
  if initialized and local_rank == 0:
55
- dist.barrier(device_ids=[0])
56
+ dist.barrier(device_ids=[local_rank])
56
57
 
57
58
 
58
59
  def smart_inference_mode():
@@ -101,12 +102,15 @@ def autocast(enabled: bool, device: str = "cuda"):
101
102
 
102
103
  def get_cpu_info():
103
104
  """Return a string with system CPU information, i.e. 'Apple M2'."""
104
- import cpuinfo # pip install py-cpuinfo
105
+ with contextlib.suppress(Exception):
106
+ import cpuinfo # pip install py-cpuinfo
107
+
108
+ k = "brand_raw", "hardware_raw", "arch_string_raw" # keys sorted by preference (not all keys always available)
109
+ info = cpuinfo.get_cpu_info() # info dict
110
+ string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown")
111
+ return string.replace("(R)", "").replace("CPU ", "").replace("@ ", "")
105
112
 
106
- k = "brand_raw", "hardware_raw", "arch_string_raw" # info keys sorted by preference (not all keys always available)
107
- info = cpuinfo.get_cpu_info() # info dict
108
- string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown")
109
- return string.replace("(R)", "").replace("CPU ", "").replace("@ ", "")
113
+ return "unknown"
110
114
 
111
115
 
112
116
  def select_device(device="", batch=0, newline=False, verbose=True):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.69
3
+ Version: 8.2.71
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -8,10 +8,10 @@ tests/test_exports.py,sha256=Uezf3OatpPHlo5qoPw-2kqkZxuMCF9L4XF2riD4vmII,8225
8
8
  tests/test_integrations.py,sha256=xglcfMPjfVh346PV8WTpk6tBxraCXEFJEQyyJMr5tyU,6064
9
9
  tests/test_python.py,sha256=cLK8dyRf_4H_znFIm-krnOFMydwkxKlVZvHwl9vbck8,21780
10
10
  tests/test_solutions.py,sha256=EACnPXbeJe2aVTOKfqMk5jclKKCWCVgFEzjpR6y7Sh8,3304
11
- ultralytics/__init__.py,sha256=YWRj4FNGuxXRahBpsPRAOxm3h0rYMQTFTPqJcnwUEDE,694
11
+ ultralytics/__init__.py,sha256=sV3uzVV5yg9sqZSe7JS8hxO873wlJUProzRIeVii45U,712
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
- ultralytics/cfg/__init__.py,sha256=fD3Llw12sIkJo4g667t6b051je9nEpwdBLGgbbVEzHY,32973
14
+ ultralytics/cfg/__init__.py,sha256=7ce3_bhi7pDw5ZAbSqYR6e3_IYD2JCLCy7fkl5d1WyI,33064
15
15
  ultralytics/cfg/default.yaml,sha256=xRKVF-Z9E3imXTU9OCK94kj3jGgYoo67VJQwuYlHiUU,8228
16
16
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
17
17
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8l_fIDo8gw,1195
@@ -87,7 +87,7 @@ ultralytics/data/annotator.py,sha256=1Hyu6ubrBL8KmRrt1keGn-K4XTqQdAVyIwTsQiBtzLU
87
87
  ultralytics/data/augment.py,sha256=ExU4khJfJ_TeczkJRLNUDscN57SJvAjnm-reouJcxGI,119309
88
88
  ultralytics/data/base.py,sha256=C3teLnw97ZTbpJHT9P7yYWosAKocMzgJjRe1rxgfpls,13524
89
89
  ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
90
- ultralytics/data/converter.py,sha256=7640xKuf7LPeoTwoCvgbIXM5xbzyq72Hu2Rf2lrgjRY,17554
90
+ ultralytics/data/converter.py,sha256=R9zrsClQGxDBg035o63jNQRALALY3XVAytD9xksVU1M,21442
91
91
  ultralytics/data/dataset.py,sha256=ZBnO9KPVOJXwKQbN2LlmROIxLEb0mtppVQlrC4sX3oE,22879
92
92
  ultralytics/data/loaders.py,sha256=vy71TzKAPqohCp4MDNQpia2CR1LaOxAU5eA14DonJoU,24085
93
93
  ultralytics/data/split_dota.py,sha256=fWezt1Bo3jiZ6AyUWdBtTUuvLamPv1t7JD-DirM9gQ8,10142
@@ -102,18 +102,18 @@ ultralytics/engine/exporter.py,sha256=EM35MOPWbIKE2ShJsPzdrEmrjzwZSp9gW-rO8GEFal
102
102
  ultralytics/engine/model.py,sha256=8YSxLan1OfV_IynCQjAzaGS4gCWTEbGLfUWnfTDxhsE,52047
103
103
  ultralytics/engine/predictor.py,sha256=W58kDCFH2AfoFzpGbos3k8zUEVsLunBuM8sc2B64rPY,17449
104
104
  ultralytics/engine/results.py,sha256=oNAzSKdKxxx_5QQd9opzCevvgPhspdY5BkWxoz5bQ8E,69882
105
- ultralytics/engine/trainer.py,sha256=esQhG3XJUF1vsl49GavnqpL0tvMZIY-SwD_hw1XmWdU,35454
105
+ ultralytics/engine/trainer.py,sha256=BHcwcWgEiN7wBMEkGfO4AVo3O3M0IreVl1OnHXPipuA,35482
106
106
  ultralytics/engine/tuner.py,sha256=iZrgMmXSDpfuDu4bdFRflmAsscys2-8W8qAGxSyOVJE,11844
107
- ultralytics/engine/validator.py,sha256=Y21Uo8_Zto4qjk_YqQk6k7tyfpq_Qk9cfjeXeyDRxs8,14643
107
+ ultralytics/engine/validator.py,sha256=R0qND144EHTOedGDoqI60MxY9RigbynjvGrRYUjjPsU,14682
108
108
  ultralytics/hub/__init__.py,sha256=93bqI8x8-MfDYdKkQVduuocUiQj3WGnk1nIk0li08zA,5663
109
109
  ultralytics/hub/auth.py,sha256=FID58NE6fh7Op_B45QOpWBw1qoBN0ponL16uvyb2dZ8,5399
110
110
  ultralytics/hub/session.py,sha256=UF_aVwyxnbP-OzpzKXGGhi4i6KGWjjhoj5Qsn46dFpE,16257
111
111
  ultralytics/hub/utils.py,sha256=tXfM3QbXBcf4Y6StgHI1pktT4OM7Ic9eF3xiBFHGlhY,9721
112
112
  ultralytics/hub/google/__init__.py,sha256=qyvvpGP-4NAtrn7GLqfqxP_aWuRP1T0OvJYafWKvL2Q,7512
113
- ultralytics/models/__init__.py,sha256=TT9iLCL_n9Y80dcUq0Fo-p-GRZCSU2vrWXM3CoMwqqE,265
113
+ ultralytics/models/__init__.py,sha256=AlVStwxv5pMrYaPL8dLhu4sY2c2JgqmuK__RlEDKrEo,296
114
114
  ultralytics/models/fastsam/__init__.py,sha256=W0rRSJM3vdxcsneuiN6_ajkUw86k6-opUKdLxVhKOoQ,203
115
115
  ultralytics/models/fastsam/model.py,sha256=r5VZj-KLKaqZtEKTZxQik8vQI2N9uOF4xpV_gA-P8h0,2101
116
- ultralytics/models/fastsam/predict.py,sha256=ej1Z93W73hThBxuHTdb-LB-yElijKnAMxrTUMlXJ8Qs,7262
116
+ ultralytics/models/fastsam/predict.py,sha256=z5j2IMwf4MURuROKeqNXW1WvOSj91UdJa7dLRqN_OFc,7370
117
117
  ultralytics/models/fastsam/utils.py,sha256=dCSm6l5yua_PTT5aNvyOvn1Q0h42Ta_NovO7sTbsBxM,715
118
118
  ultralytics/models/fastsam/val.py,sha256=ILKmw3U8FYmmQsO9wk9-bJ9Pyp_ZthJM36b61L75s3Y,1967
119
119
  ultralytics/models/nas/__init__.py,sha256=d6-WTrYLXvbPs58ebA0-583ODi-VyzXc-t4aGIDQK6M,179
@@ -127,15 +127,26 @@ ultralytics/models/rtdetr/train.py,sha256=20AFYVW9NPxw0-cp-sRdIovWidFL0IIhJRv2oZ
127
127
  ultralytics/models/rtdetr/val.py,sha256=4QQArdaGEY8rJsJuvyJ032f8GGVGdV2jURHK2EdMxyk,5566
128
128
  ultralytics/models/sam/__init__.py,sha256=9A1iyfPN_ncqq3TMExe_-uPoARjEX3psoHEI1xMG2VE,144
129
129
  ultralytics/models/sam/amg.py,sha256=He2c4nIoZ__F_pL18rRl278R8iBjWXBM2Z_vxfuVOkk,7971
130
- ultralytics/models/sam/build.py,sha256=-i-vj0egQ2idBZUf3Xf-H89QeToM3ky0HTxKP_KEXTs,4944
131
- ultralytics/models/sam/model.py,sha256=dkEhqJEZFuSoKubMaAjUx1U9Np49AII3nBScdH8rMBI,4707
132
- ultralytics/models/sam/predict.py,sha256=hachjdcJ175v_oOUPmu_jG_VSe2wCbpLpi4qymUJV34,23575
130
+ ultralytics/models/sam/build.py,sha256=BSpRgDIQb-kgxaQtSq0C7Zb2UsqkfkFRmErC_bzKYIg,4954
131
+ ultralytics/models/sam/model.py,sha256=MVO7WqF41Sq1-qbsN8O8Fophe8anYVY67yp17Sudp0k,4979
132
+ ultralytics/models/sam/predict.py,sha256=GoR8xCwt3VJdBX5wUUhJ-3qY87LoG-R1eG8SziTDLP0,23755
133
133
  ultralytics/models/sam/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
134
- ultralytics/models/sam/modules/decoders.py,sha256=7NWnBNupxGYvH0S1N0R6NBHxdVFRUrrnL9EqAw09J4E,7816
135
- ultralytics/models/sam/modules/encoders.py,sha256=pRNZHzt2J2xD_D0Btu8pk4DcItfr6dRr9rcRfxoZZhU,24746
136
- ultralytics/models/sam/modules/sam.py,sha256=zC4l4kcrIQD_ekczjl2l6dgaABqqjROZxQ-FDb-itt0,2783
134
+ ultralytics/models/sam/modules/decoders.py,sha256=SkVdfsFwy8g4rOJYXi2rWg2zI5HEttsQJi6E4Uwxs9o,6307
135
+ ultralytics/models/sam/modules/encoders.py,sha256=JHot5dRyr_d3wAzz7jgc_SOWOWibWKRWOt-IGBXPhW8,24894
136
+ ultralytics/models/sam/modules/sam.py,sha256=lyB-edOBr85gACTaVqG0WiSIS4FyohTtLqkNMKDwVM0,2695
137
137
  ultralytics/models/sam/modules/tiny_encoder.py,sha256=rAY9JuyxUpFivFUUPVjK2aUYlsXEZ0JGKVoEWDGf0Eo,29228
138
- ultralytics/models/sam/modules/transformer.py,sha256=VINZMb4xkx4IHAbJdhCq2XLDvaFBMup7RGC16DLS7OY,11164
138
+ ultralytics/models/sam/modules/transformer.py,sha256=a2jsS_J76MvrIKIERb_0flliYFMjpBbwVL4UnsNnoyE,11232
139
+ ultralytics/models/sam2/__init__.py,sha256=_xqQHLZTLgEdK278YETYR-Fts2hsvXP5q9ddUbuuFvc,154
140
+ ultralytics/models/sam2/build.py,sha256=m6hv82VKn3Lct_7nztUqdzJzCV9Nbr5mvqpI8nkReQM,5422
141
+ ultralytics/models/sam2/model.py,sha256=PS-eV78DVNrGZmUq7L7gJHgrGjxnySM1TTHkwfrQM7E,3408
142
+ ultralytics/models/sam2/predict.py,sha256=gvKf6qcStFiT9SLzo8Ol25suIh-QRVcOcdbyeuM2ORw,8894
143
+ ultralytics/models/sam2/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
144
+ ultralytics/models/sam2/modules/decoders.py,sha256=t4SR-0g3HQstk-agiapCsVYTMZBFc2vz24zfgBwZUkw,15376
145
+ ultralytics/models/sam2/modules/encoders.py,sha256=0VRK2wdl0vZzKA3528_j-Vyn4Iy8XlNHp2ftQRn-aGE,13313
146
+ ultralytics/models/sam2/modules/memory_attention.py,sha256=4zdvm8_ANM0r8QSN_xBGi9l-9Ugjt3gxBsHv2cHczjc,6214
147
+ ultralytics/models/sam2/modules/sam2.py,sha256=CgCBrfjhKDHI2n8iM6AIJmXeCEgf2_qUz7rzZT31fB0,44255
148
+ ultralytics/models/sam2/modules/sam2_blocks.py,sha256=7HmuZTFw8VVdAVDsIStWByxyUHBqytnfgvQMaCNr1GU,28379
149
+ ultralytics/models/sam2/modules/utils.py,sha256=2H5C3sjBnYoPuoJqflH3AmGeBJoKrhHea136jgwIq_I,8320
139
150
  ultralytics/models/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
140
151
  ultralytics/models/utils/loss.py,sha256=PmlKDe4xQTiYkPSCdNUabxJC7bh43zGxiKVIxsXBVGE,15135
141
152
  ultralytics/models/utils/ops.py,sha256=sAeD_koytXDzHibIvQLLAx3vOpGdhdAiQhMiNFUnn5U,13255
@@ -172,7 +183,7 @@ ultralytics/nn/modules/activation.py,sha256=RS0DRDm9r56tojN79X8UBVtiktde9Wasw7GI
172
183
  ultralytics/nn/modules/block.py,sha256=jLXQerl4nXfr4MEGMp9S3YgdTqOJzas1GBxryyXyLV0,34582
173
184
  ultralytics/nn/modules/conv.py,sha256=Ywe87IhuaS22mR2JJ9xjnW8Sb-m7WTjxuqIxV_Dv8lI,12722
174
185
  ultralytics/nn/modules/head.py,sha256=vlp3rMa54kjiuPqP32_RdgOb9KrHItiJx0ih1SFzQec,26853
175
- ultralytics/nn/modules/transformer.py,sha256=AxD9uURpCl-EqvXe3DiG6JW-pBzB16G-AahLdZ7yayo,17909
186
+ ultralytics/nn/modules/transformer.py,sha256=8ux2-0ObrafMTYCLucLLVmqk9XWz74bwmWtJGDmgF6Q,18028
176
187
  ultralytics/nn/modules/utils.py,sha256=779QnnKp9v8jv251ESduTXJ0ol8HkIOLbGQWwEGQjhU,3196
177
188
  ultralytics/solutions/__init__.py,sha256=O_G9jh34NnFsHKSA8zcJH0CHtg1Q01JEiRWGwX3vGJY,631
178
189
  ultralytics/solutions/ai_gym.py,sha256=KQdx0RP9t9y1MqYMVlYUSn09SVJSUwKvgxPri_DhczM,4721
@@ -193,7 +204,7 @@ ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7J
193
204
  ultralytics/trackers/utils/gmc.py,sha256=-1oBNFRB-9EawJmUOT566AygLCVxJw-jsPSIOl5j_Hk,13683
194
205
  ultralytics/trackers/utils/kalman_filter.py,sha256=0oqhk59NKEiwcJ2FXnw6_sT4bIFC6Wu5IY2B-TGxJKU,15168
195
206
  ultralytics/trackers/utils/matching.py,sha256=UxhSGa5pN6WoYwYSBAkkt-O7xMxUR47VuUB6PfVNkb4,5404
196
- ultralytics/utils/__init__.py,sha256=FyRZRncXCAGmtFp1Dd4ACUK5zTrp6sOK-zFuG2Nwm1I,38905
207
+ ultralytics/utils/__init__.py,sha256=kVU1d73Z6rSjgwyfdOwBaVrT2hYVwijUD54Wl_AWWA4,38942
197
208
  ultralytics/utils/autobatch.py,sha256=POJb9f8dioI7lPGnCc7bdxt0ncftXZa0bvOkip-XoWk,3969
198
209
  ultralytics/utils/benchmarks.py,sha256=6tdNcBLATllWpmAMUC6TW7DiCx1VKHhnQN4vkoqN3sE,23866
199
210
  ultralytics/utils/checks.py,sha256=hBkhOinWRzhpA5SbY1v-wCMdFeOemORRlmKBXgwoHYo,28498
@@ -206,9 +217,9 @@ ultralytics/utils/loss.py,sha256=mDHGmF-gjggAUVhI1dkCm7TtfZHCwz25XKm4M2xJKLs,339
206
217
  ultralytics/utils/metrics.py,sha256=UXMhBnTtMcpTANxmQqcYkVnj8NeAt39gZez0g6jbrW0,53786
207
218
  ultralytics/utils/ops.py,sha256=WJHyjyTH8xl5bRkBX0JB3K1sHAGONHx_joubUewE0A8,32709
208
219
  ultralytics/utils/patches.py,sha256=Oo3DkP7MbXnNGvPfoFSocAkVvaPh9kwMT_9RQUfjVhI,3594
209
- ultralytics/utils/plotting.py,sha256=5HRfiG2dklWZJheTxGTy0gFRk39utHcZbMJl7j2hnMI,55522
220
+ ultralytics/utils/plotting.py,sha256=Bc-8SPs6R1BKMW1V8oVeD-ajMsWP0knAydsoFrB_doU,55522
210
221
  ultralytics/utils/tal.py,sha256=hia39MhWPFpDWOTAXC_5vz-9cUdiRHZs-UcTnxD4Dlo,16112
211
- ultralytics/utils/torch_utils.py,sha256=ClXfUpTKFkwD3Zfnjk9YnK0gsA-7YmvEasTDiVN9Wvw,28880
222
+ ultralytics/utils/torch_utils.py,sha256=fvt3J2Oh1SgUcjUGSFK8sCKhCp826y6S7NBEiDGZpbI,28985
212
223
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
213
224
  ultralytics/utils/tuner.py,sha256=49KAadKZsUeCpwIm5Sn0grb0RPcMNI8vHGLwroDEJNI,6171
214
225
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
@@ -222,9 +233,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
222
233
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
223
234
  ultralytics/utils/callbacks/tensorboard.py,sha256=QEgOVhUqY9akOs5TJIwz1Rvn6l32xWLpOxlwEyWF0B8,4136
224
235
  ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
225
- ultralytics-8.2.69.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
226
- ultralytics-8.2.69.dist-info/METADATA,sha256=htZwlHV6f-WyWZpx2aAgEhKJYDRhK56EMOs0w0XwhZ4,41337
227
- ultralytics-8.2.69.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
228
- ultralytics-8.2.69.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
229
- ultralytics-8.2.69.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
230
- ultralytics-8.2.69.dist-info/RECORD,,
236
+ ultralytics-8.2.71.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
237
+ ultralytics-8.2.71.dist-info/METADATA,sha256=neVOnXCAh1rp6O9ps9cdZbw_Pns6ZW0v8_Va4Prqy8k,41337
238
+ ultralytics-8.2.71.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
239
+ ultralytics-8.2.71.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
240
+ ultralytics-8.2.71.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
241
+ ultralytics-8.2.71.dist-info/RECORD,,