dgenerate-ultralytics-headless 8.3.227__py3-none-any.whl → 8.3.228__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.227.dist-info → dgenerate_ultralytics_headless-8.3.228.dist-info}/METADATA +1 -1
- {dgenerate_ultralytics_headless-8.3.227.dist-info → dgenerate_ultralytics_headless-8.3.228.dist-info}/RECORD +9 -9
- ultralytics/__init__.py +1 -1
- ultralytics/nn/text_model.py +13 -4
- ultralytics/utils/tqdm.py +9 -1
- {dgenerate_ultralytics_headless-8.3.227.dist-info → dgenerate_ultralytics_headless-8.3.228.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.227.dist-info → dgenerate_ultralytics_headless-8.3.228.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.227.dist-info → dgenerate_ultralytics_headless-8.3.228.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.227.dist-info → dgenerate_ultralytics_headless-8.3.228.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dgenerate-ultralytics-headless
|
|
3
|
-
Version: 8.3.
|
|
3
|
+
Version: 8.3.228
|
|
4
4
|
Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
|
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
dgenerate_ultralytics_headless-8.3.
|
|
1
|
+
dgenerate_ultralytics_headless-8.3.228.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
|
2
2
|
tests/__init__.py,sha256=bCox_hLdGRFYGLb2kd722VdNP2zEXNYNuLLYtqZSrbw,804
|
|
3
3
|
tests/conftest.py,sha256=mOy9lGpNp7lk1hHl6_pVE0f9cU-72gnkoSm4TO-CNZU,2318
|
|
4
4
|
tests/test_cli.py,sha256=GhIFHi-_WIJpDgoGNRi0DnjbfwP1wHbklBMnkCM-P_4,5464
|
|
@@ -8,7 +8,7 @@ tests/test_exports.py,sha256=OMLio2uUhyqo8D8qB5xUwmk7Po2rMeAACRc8WYoxbj4,13147
|
|
|
8
8
|
tests/test_integrations.py,sha256=6QgSh9n0J04RdUYz08VeVOnKmf4S5MDEQ0chzS7jo_c,6220
|
|
9
9
|
tests/test_python.py,sha256=jhnN-Oie3euE3kfHzUqvnadkWOsQyvFmdmEcse9Rsto,29253
|
|
10
10
|
tests/test_solutions.py,sha256=j_PZZ5tMR1Y5ararY-OTXZr1hYJ7vEVr8H3w4O1tbQs,14153
|
|
11
|
-
ultralytics/__init__.py,sha256=
|
|
11
|
+
ultralytics/__init__.py,sha256=3a_Min7fPzhcqS0xB9RqedP5HwOYBqFtntprkA3lxyM,1302
|
|
12
12
|
ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
|
|
13
13
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
|
14
14
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
|
@@ -200,7 +200,7 @@ ultralytics/models/yolo/yoloe/val.py,sha256=utUFWeFKRFWZrPr1y3A8ztbTwdoWMYqzlwBN
|
|
|
200
200
|
ultralytics/nn/__init__.py,sha256=538LZPUKKvc3JCMgiQ4VLGqRN2ZAaVLFcQbeNNHFkEA,545
|
|
201
201
|
ultralytics/nn/autobackend.py,sha256=OBlE1R4ZGBF4JBMqb-ImLgaBZebap0m02qV_uJWiQTA,42673
|
|
202
202
|
ultralytics/nn/tasks.py,sha256=dkfIujXeSaR8FmLYyrhl5Pj2U1h22JMEOkv9T3pIIwc,70367
|
|
203
|
-
ultralytics/nn/text_model.py,sha256=
|
|
203
|
+
ultralytics/nn/text_model.py,sha256=Nz7MJlIL4flNpOnwhS3qqINb_NfANSIOw4ex49yTFt0,16051
|
|
204
204
|
ultralytics/nn/modules/__init__.py,sha256=5Sg_28MDfKwdu14Ty_WCaiIXZyjBSQ-xCNCwnoz_w-w,3198
|
|
205
205
|
ultralytics/nn/modules/activation.py,sha256=J6n-CJKFK0YbhwcRDqm9zEJM9pSAEycj5quQss_3x6E,2219
|
|
206
206
|
ultralytics/nn/modules/block.py,sha256=-Suv96Oo0LM1sqHHKudt5lL5YIcWLkxwrYVBgIAkmTs,69876
|
|
@@ -260,7 +260,7 @@ ultralytics/utils/patches.py,sha256=6WDGUokiND76iDbLeul_6Ny-bvvFcy6Bms5f9MkxhfQ,
|
|
|
260
260
|
ultralytics/utils/plotting.py,sha256=FoGnXc52IvsVtlDvS8Ffee-SszwpepAvrYrusTn21Fs,48283
|
|
261
261
|
ultralytics/utils/tal.py,sha256=w7oi6fp0NmL6hHh-yvCCX1cBuuB4JuX7w1wiR4_SMZs,20678
|
|
262
262
|
ultralytics/utils/torch_utils.py,sha256=o6KMukW6g-mUYrVMPHb5qkcGbQIk8aMMnVrOrsJoL1Q,40220
|
|
263
|
-
ultralytics/utils/tqdm.py,sha256=
|
|
263
|
+
ultralytics/utils/tqdm.py,sha256=sYKcXJDKCgOcMp7KBAB9cmCiJxk9tvoeoto6M8QRW24,16393
|
|
264
264
|
ultralytics/utils/triton.py,sha256=2wZil1PfvOpaBymTzzP8Da6Aam-2MTLumO3uBmTE5FY,5406
|
|
265
265
|
ultralytics/utils/tuner.py,sha256=rN8gFWnQOJFtrGlFcvOo0Eah9dEVFx0nFkpTGrlewZA,6861
|
|
266
266
|
ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
|
|
@@ -279,8 +279,8 @@ ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqd
|
|
|
279
279
|
ultralytics/utils/export/engine.py,sha256=23-lC6dNsmz5vprSJzaN7UGNXrFlVedNcqhlOH_IXes,9956
|
|
280
280
|
ultralytics/utils/export/imx.py,sha256=9UPA4CwTPADzvJx9dOsh_8fQ-LMeqG7eI9EYIn5ojkc,11621
|
|
281
281
|
ultralytics/utils/export/tensorflow.py,sha256=PyAp0_rXSUcXiqV2RY0H9b_-oFaZ7hZBiSM42X53t0Q,9374
|
|
282
|
-
dgenerate_ultralytics_headless-8.3.
|
|
283
|
-
dgenerate_ultralytics_headless-8.3.
|
|
284
|
-
dgenerate_ultralytics_headless-8.3.
|
|
285
|
-
dgenerate_ultralytics_headless-8.3.
|
|
286
|
-
dgenerate_ultralytics_headless-8.3.
|
|
282
|
+
dgenerate_ultralytics_headless-8.3.228.dist-info/METADATA,sha256=MAIGSG3LP-IBAsjkbuN_Mce_h_5_X9-YjMNUG-LNEqI,38811
|
|
283
|
+
dgenerate_ultralytics_headless-8.3.228.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
284
|
+
dgenerate_ultralytics_headless-8.3.228.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
|
285
|
+
dgenerate_ultralytics_headless-8.3.228.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
|
286
|
+
dgenerate_ultralytics_headless-8.3.228.dist-info/RECORD,,
|
ultralytics/__init__.py
CHANGED
ultralytics/nn/text_model.py
CHANGED
|
@@ -89,11 +89,13 @@ class CLIP(TextModel):
|
|
|
89
89
|
self.device = device
|
|
90
90
|
self.eval()
|
|
91
91
|
|
|
92
|
-
def tokenize(self, texts: str | list[str]) -> torch.Tensor:
|
|
92
|
+
def tokenize(self, texts: str | list[str], truncate: bool = True) -> torch.Tensor:
|
|
93
93
|
"""Convert input texts to CLIP tokens.
|
|
94
94
|
|
|
95
95
|
Args:
|
|
96
96
|
texts (str | list[str]): Input text or list of texts to tokenize.
|
|
97
|
+
truncate (bool, optional): Whether to trim texts that exceed CLIP's context length. Defaults to True to
|
|
98
|
+
avoid RuntimeError from overly long inputs while still allowing explicit opt-out.
|
|
97
99
|
|
|
98
100
|
Returns:
|
|
99
101
|
(torch.Tensor): Tokenized text tensor with shape (batch_size, context_length) ready for model processing.
|
|
@@ -102,8 +104,10 @@ class CLIP(TextModel):
|
|
|
102
104
|
>>> model = CLIP("ViT-B/32", device="cpu")
|
|
103
105
|
>>> tokens = model.tokenize("a photo of a cat")
|
|
104
106
|
>>> print(tokens.shape) # torch.Size([1, 77])
|
|
107
|
+
>>> strict_tokens = model.tokenize("a photo of a cat", truncate=False) # Enforce strict length checks
|
|
108
|
+
>>> print(strict_tokens.shape) # Same shape/content as tokens since prompt less than 77 tokens
|
|
105
109
|
"""
|
|
106
|
-
return clip.tokenize(texts).to(self.device)
|
|
110
|
+
return clip.tokenize(texts, truncate=truncate).to(self.device)
|
|
107
111
|
|
|
108
112
|
@smart_inference_mode()
|
|
109
113
|
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
|
|
@@ -308,11 +312,13 @@ class MobileCLIPTS(TextModel):
|
|
|
308
312
|
self.tokenizer = clip.clip.tokenize
|
|
309
313
|
self.device = device
|
|
310
314
|
|
|
311
|
-
def tokenize(self, texts: list[str]) -> torch.Tensor:
|
|
315
|
+
def tokenize(self, texts: list[str], truncate: bool = True) -> torch.Tensor:
|
|
312
316
|
"""Convert input texts to MobileCLIP tokens.
|
|
313
317
|
|
|
314
318
|
Args:
|
|
315
319
|
texts (list[str]): List of text strings to tokenize.
|
|
320
|
+
truncate (bool, optional): Whether to trim texts that exceed the tokenizer context length. Defaults to True,
|
|
321
|
+
matching CLIP's behavior to prevent runtime failures on long captions.
|
|
316
322
|
|
|
317
323
|
Returns:
|
|
318
324
|
(torch.Tensor): Tokenized text inputs with shape (batch_size, sequence_length).
|
|
@@ -320,8 +326,11 @@ class MobileCLIPTS(TextModel):
|
|
|
320
326
|
Examples:
|
|
321
327
|
>>> model = MobileCLIPTS("cpu")
|
|
322
328
|
>>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
|
|
329
|
+
>>> strict_tokens = model.tokenize(
|
|
330
|
+
... ["a very long caption"], truncate=False
|
|
331
|
+
... ) # RuntimeError if exceeds 77-token
|
|
323
332
|
"""
|
|
324
|
-
return self.tokenizer(texts).to(self.device)
|
|
333
|
+
return self.tokenizer(texts, truncate=truncate).to(self.device)
|
|
325
334
|
|
|
326
335
|
@smart_inference_mode()
|
|
327
336
|
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
|
ultralytics/utils/tqdm.py
CHANGED
|
@@ -159,9 +159,17 @@ class TQDM:
|
|
|
159
159
|
self._display()
|
|
160
160
|
|
|
161
161
|
def _format_rate(self, rate: float) -> str:
|
|
162
|
-
"""Format rate with units."""
|
|
162
|
+
"""Format rate with units, switching between it/s and s/it for readability."""
|
|
163
163
|
if rate <= 0:
|
|
164
164
|
return ""
|
|
165
|
+
|
|
166
|
+
inv_rate = 1 / rate if rate else None
|
|
167
|
+
|
|
168
|
+
# Use s/it format when inv_rate > 1 (i.e., rate < 1 it/s) for better readability
|
|
169
|
+
if inv_rate and inv_rate > 1:
|
|
170
|
+
return f"{inv_rate:.1f}s/B" if self.is_bytes else f"{inv_rate:.1f}s/{self.unit}"
|
|
171
|
+
|
|
172
|
+
# Use it/s format for fast iterations
|
|
165
173
|
fallback = f"{rate:.1f}B/s" if self.is_bytes else f"{rate:.1f}{self.unit}/s"
|
|
166
174
|
return next((f"{rate / t:.1f}{u}" for t, u in self.scales if rate >= t), fallback)
|
|
167
175
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|