dgenerate-ultralytics-headless 8.3.236__py3-none-any.whl → 8.3.237__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {dgenerate_ultralytics_headless-8.3.236.dist-info → dgenerate_ultralytics_headless-8.3.237.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.236.dist-info → dgenerate_ultralytics_headless-8.3.237.dist-info}/RECORD +38 -25
  3. ultralytics/__init__.py +1 -1
  4. ultralytics/engine/exporter.py +17 -10
  5. ultralytics/engine/predictor.py +3 -2
  6. ultralytics/engine/trainer.py +8 -0
  7. ultralytics/models/rtdetr/val.py +5 -1
  8. ultralytics/models/sam/__init__.py +14 -1
  9. ultralytics/models/sam/build.py +17 -8
  10. ultralytics/models/sam/build_sam3.py +374 -0
  11. ultralytics/models/sam/model.py +12 -4
  12. ultralytics/models/sam/modules/blocks.py +20 -8
  13. ultralytics/models/sam/modules/decoders.py +2 -3
  14. ultralytics/models/sam/modules/encoders.py +4 -1
  15. ultralytics/models/sam/modules/memory_attention.py +6 -2
  16. ultralytics/models/sam/modules/sam.py +150 -6
  17. ultralytics/models/sam/modules/utils.py +134 -4
  18. ultralytics/models/sam/predict.py +2076 -118
  19. ultralytics/models/sam/sam3/__init__.py +3 -0
  20. ultralytics/models/sam/sam3/decoder.py +546 -0
  21. ultralytics/models/sam/sam3/encoder.py +535 -0
  22. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  23. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  24. ultralytics/models/sam/sam3/model_misc.py +198 -0
  25. ultralytics/models/sam/sam3/necks.py +129 -0
  26. ultralytics/models/sam/sam3/sam3_image.py +357 -0
  27. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  28. ultralytics/models/sam/sam3/tokenizer_ve.py +242 -0
  29. ultralytics/models/sam/sam3/vitdet.py +546 -0
  30. ultralytics/models/sam/sam3/vl_combiner.py +165 -0
  31. ultralytics/models/yolo/obb/val.py +18 -7
  32. ultralytics/nn/modules/transformer.py +21 -1
  33. ultralytics/utils/checks.py +2 -2
  34. ultralytics/utils/ops.py +1 -3
  35. {dgenerate_ultralytics_headless-8.3.236.dist-info → dgenerate_ultralytics_headless-8.3.237.dist-info}/WHEEL +0 -0
  36. {dgenerate_ultralytics_headless-8.3.236.dist-info → dgenerate_ultralytics_headless-8.3.237.dist-info}/entry_points.txt +0 -0
  37. {dgenerate_ultralytics_headless-8.3.236.dist-info → dgenerate_ultralytics_headless-8.3.237.dist-info}/licenses/LICENSE +0 -0
  38. {dgenerate_ultralytics_headless-8.3.236.dist-info → dgenerate_ultralytics_headless-8.3.237.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,307 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
4
+
5
+ from __future__ import annotations
6
+
7
+ from collections import OrderedDict
8
+ from typing import Callable
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ from torch.utils.checkpoint import checkpoint
13
+
14
+ from .model_misc import LayerScale
15
+
16
+
17
+ class ResidualAttentionBlock(nn.Module):
18
+ """Transformer block with multi-head attention, layer normalization, and MLP feed-forward network."""
19
+
20
+ def __init__(
21
+ self,
22
+ d_model: int,
23
+ n_head: int,
24
+ mlp_ratio: float = 4.0,
25
+ ls_init_value: float | None = None,
26
+ act_layer: Callable[[], nn.Module] = nn.GELU,
27
+ norm_layer: Callable[[int], nn.Module] = nn.LayerNorm,
28
+ ):
29
+ """Initialize residual attention block with configurable dimensions and normalization."""
30
+ super().__init__()
31
+ # Attention
32
+ self.attn = nn.MultiheadAttention(d_model, n_head, batch_first=True)
33
+
34
+ # LayerNorm, LayerScale
35
+ self.ln_1 = norm_layer(d_model)
36
+ self.ln_2 = norm_layer(d_model)
37
+
38
+ self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
39
+ self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
40
+
41
+ # MLP
42
+ mlp_width = int(d_model * mlp_ratio)
43
+ self.mlp = nn.Sequential(
44
+ OrderedDict(
45
+ [
46
+ ("c_fc", nn.Linear(d_model, mlp_width)),
47
+ ("gelu", act_layer()),
48
+ ("c_proj", nn.Linear(mlp_width, d_model)),
49
+ ]
50
+ )
51
+ )
52
+
53
+ def attention(
54
+ self, q_x: torch.Tensor, k_x: torch.Tensor = None, v_x: torch.Tensor = None, attn_mask: torch.Tensor = None
55
+ ) -> torch.Tensor:
56
+ """Compute multi-head attention with optional cross-attention support and masking."""
57
+ k_x = k_x if k_x is not None else q_x
58
+ v_x = v_x if v_x is not None else q_x
59
+ if attn_mask is not None:
60
+ # Leave boolean masks as is
61
+ if not attn_mask.dtype == torch.bool:
62
+ attn_mask = attn_mask.to(q_x.dtype)
63
+
64
+ return self.attn(q_x, k_x, v_x, need_weights=False, attn_mask=attn_mask)[0]
65
+
66
+ def forward(
67
+ self, q_x: torch.Tensor, k_x: torch.Tensor = None, v_x: torch.Tensor = None, attn_mask: torch.Tensor = None
68
+ ) -> torch.Tensor:
69
+ """Apply residual attention with layer normalization and MLP, supporting optional cross-attention."""
70
+ k_x = self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None
71
+ v_x = self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None
72
+ x = q_x + self.ls_1(self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask))
73
+ x = x + self.ls_2(self.mlp(self.ln_2(x)))
74
+ return x
75
+
76
+
77
+ class Transformer(nn.Module):
78
+ """Stack of residual attention blocks forming a transformer encoder with optional gradient checkpointing."""
79
+
80
+ def __init__(
81
+ self,
82
+ width: int,
83
+ layers: int,
84
+ heads: int,
85
+ mlp_ratio: float = 4.0,
86
+ ls_init_value: float | None = None,
87
+ act_layer: Callable[[], nn.Module] = nn.GELU,
88
+ norm_layer: Callable[[int], nn.Module] = nn.LayerNorm,
89
+ compile_mode: str | None = None,
90
+ use_act_checkpoint: bool = False,
91
+ ):
92
+ """Initialize transformer with configurable depth, width, and optional compilation/checkpointing."""
93
+ super().__init__()
94
+ self.width = width
95
+ self.layers = layers
96
+ self.grad_checkpointing = use_act_checkpoint
97
+ self.resblocks = nn.ModuleList(
98
+ [
99
+ ResidualAttentionBlock(
100
+ width,
101
+ heads,
102
+ mlp_ratio,
103
+ ls_init_value=ls_init_value,
104
+ act_layer=act_layer,
105
+ norm_layer=norm_layer,
106
+ )
107
+ for _ in range(layers)
108
+ ]
109
+ )
110
+
111
+ if compile_mode is not None:
112
+ self.forward = torch.compile(self.forward, mode=compile_mode, fullgraph=True)
113
+ if self.grad_checkpointing:
114
+ torch._dynamo.config.optimize_ddp = False
115
+
116
+ def forward(self, x: torch.Tensor, attn_mask: torch.Tensor = None) -> torch.Tensor:
117
+ """Process input through all transformer blocks with optional gradient checkpointing during training."""
118
+ for _, r in enumerate(self.resblocks):
119
+ if self.grad_checkpointing and not torch.jit.is_scripting() and self.training:
120
+ x = checkpoint(r, x, None, None, attn_mask, use_reentrant=False)
121
+ else:
122
+ x = r(x, attn_mask=attn_mask)
123
+ return x
124
+
125
+
126
+ def text_global_pool(
127
+ x: torch.Tensor, text: torch.Tensor = None, pool_type: str = "argmax"
128
+ ) -> tuple[torch.Tensor, torch.Tensor]:
129
+ """Extract pooled representation and tokens from text embeddings using specified pooling strategy
130
+ (first/last/argmax/none).
131
+ """
132
+ if pool_type == "first":
133
+ pooled, tokens = x[:, 0], x[:, 1:]
134
+ elif pool_type == "last":
135
+ pooled, tokens = x[:, -1], x[:, :-1]
136
+ elif pool_type == "argmax":
137
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
138
+ assert text is not None
139
+ pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x
140
+ else:
141
+ pooled = tokens = x
142
+ return pooled, tokens
143
+
144
+
145
+ class TextTransformer(nn.Module):
146
+ """Text transformer encoder with causal masking and flexible pooling strategies."""
147
+
148
+ def __init__(
149
+ self,
150
+ context_length: int = 77,
151
+ vocab_size: int = 49408,
152
+ width: int = 512,
153
+ heads: int = 8,
154
+ layers: int = 12,
155
+ mlp_ratio: float = 4.0,
156
+ ls_init_value: float | None = None,
157
+ output_dim: int = 512,
158
+ no_causal_mask: bool = False,
159
+ pool_type: str = "none", # no pooling
160
+ proj_bias: bool = False,
161
+ act_layer: Callable = nn.GELU,
162
+ norm_layer: Callable = nn.LayerNorm,
163
+ output_tokens: bool = False,
164
+ use_ln_post: bool = True,
165
+ compile_mode: str | None = None,
166
+ use_act_checkpoint: bool = False,
167
+ ):
168
+ """Initialize text transformer with embedding layers, transformer blocks, and pooling options."""
169
+ super().__init__()
170
+ assert pool_type in ("first", "last", "argmax", "none")
171
+ self.output_tokens = output_tokens
172
+ self.num_pos = self.context_length = context_length
173
+ self.vocab_size = vocab_size
174
+ self.width = width
175
+ self.output_dim = output_dim
176
+ self.heads = heads
177
+ self.pool_type = pool_type
178
+
179
+ self.token_embedding = nn.Embedding(self.vocab_size, width)
180
+ self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width))
181
+ self.transformer = Transformer(
182
+ width=width,
183
+ layers=layers,
184
+ heads=heads,
185
+ mlp_ratio=mlp_ratio,
186
+ ls_init_value=ls_init_value,
187
+ act_layer=act_layer,
188
+ norm_layer=norm_layer,
189
+ compile_mode=compile_mode,
190
+ use_act_checkpoint=use_act_checkpoint,
191
+ )
192
+ self.ln_final = norm_layer(width) if use_ln_post else nn.Identity()
193
+ if no_causal_mask:
194
+ self.attn_mask = None
195
+ else:
196
+ self.register_buffer("attn_mask", self.build_causal_mask(), persistent=False)
197
+ if proj_bias:
198
+ self.text_projection = nn.Linear(width, output_dim)
199
+ else:
200
+ self.text_projection = nn.Parameter(torch.empty(width, output_dim))
201
+
202
+ def build_causal_mask(self) -> torch.Tensor:
203
+ """Create a causal attention mask to prevent attention to future tokens."""
204
+ # lazily create causal attention mask, with full attention between the tokens
205
+ # pytorch uses additive attention mask; fill with -inf
206
+ mask = torch.empty(self.num_pos, self.num_pos)
207
+ mask.fill_(float("-inf"))
208
+ mask.triu_(1) # zero out the lower diagonal
209
+ return mask
210
+
211
+ def forward(self, text: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
212
+ """Forward pass through the text transformer, returning pooled output and optionally token embeddings."""
213
+ seq_len = text.shape[1]
214
+ x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
215
+
216
+ attn_mask = self.attn_mask
217
+ if attn_mask is not None:
218
+ attn_mask = attn_mask[:seq_len, :seq_len]
219
+
220
+ x = x + self.positional_embedding[:seq_len]
221
+ x = self.transformer(x, attn_mask=attn_mask)
222
+
223
+ x = self.ln_final(x)
224
+ pooled, tokens = text_global_pool(x, text, pool_type=self.pool_type)
225
+ if self.text_projection is not None:
226
+ if isinstance(self.text_projection, nn.Linear):
227
+ pooled = self.text_projection(pooled)
228
+ else:
229
+ pooled = pooled @ self.text_projection
230
+ if self.output_tokens:
231
+ return pooled, tokens
232
+ return pooled
233
+
234
+
235
+ class VETextEncoder(nn.Module):
236
+ """Text encoder for Vision Encoder (VE) models, combining a text transformer and a linear resizer."""
237
+
238
+ def __init__(
239
+ self,
240
+ d_model: int,
241
+ tokenizer: Callable,
242
+ width: int = 1024,
243
+ heads: int = 16,
244
+ layers: int = 24,
245
+ context_length: int = 32,
246
+ vocab_size: int = 49408,
247
+ use_ln_post: bool = True,
248
+ compile_mode: str | None = None,
249
+ use_act_checkpoint: bool = True,
250
+ ):
251
+ """Initialize VE text encoder with a text transformer and a linear resizer to match decoder dimensions."""
252
+ super().__init__()
253
+ self.context_length = context_length
254
+ self.use_ln_post = use_ln_post
255
+ self.tokenizer = tokenizer
256
+
257
+ self.encoder = TextTransformer(
258
+ context_length=self.context_length,
259
+ vocab_size=vocab_size,
260
+ width=width,
261
+ heads=heads,
262
+ layers=layers,
263
+ # we want the tokens, not just the pooled output
264
+ output_tokens=True,
265
+ use_ln_post=use_ln_post,
266
+ compile_mode=compile_mode,
267
+ use_act_checkpoint=use_act_checkpoint,
268
+ )
269
+ self.resizer = nn.Linear(self.encoder.width, d_model)
270
+
271
+ def forward(
272
+ self, text: list[str] | tuple[torch.Tensor, torch.Tensor, dict], input_boxes: list | None = None
273
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
274
+ """Encode text input, either raw strings or pre-encoded tensors, and resize to match decoder dimensions."""
275
+ if isinstance(text[0], str):
276
+ # no use case for this
277
+ assert input_boxes is None or len(input_boxes) == 0, "not supported"
278
+
279
+ # Encode the text
280
+ tokenized = self.tokenizer(text, context_length=self.context_length).to(
281
+ self.resizer.weight.device
282
+ ) # [b, seq_len]
283
+ text_attention_mask = (tokenized != 0).bool()
284
+
285
+ # manually embed the tokens
286
+ inputs_embeds = self.encoder.token_embedding(tokenized) # [b, seq_len, d=1024]
287
+ _, text_memory = self.encoder(tokenized) # [b, seq_len, d=1024]
288
+
289
+ assert text_memory.shape[1] == inputs_embeds.shape[1]
290
+ # Invert attention mask because its the opposite in pytorch transformer
291
+ text_attention_mask = text_attention_mask.ne(1)
292
+ # Transpose memory because pytorch's attention expects sequence first
293
+ text_memory = text_memory.transpose(0, 1)
294
+ # Resize the encoder hidden states to be of the same d_model as the decoder
295
+ text_memory_resized = self.resizer(text_memory)
296
+ else:
297
+ # The text is already encoded, use as is.
298
+ text_attention_mask, text_memory_resized, tokenized = text
299
+ inputs_embeds = tokenized["inputs_embeds"]
300
+ assert input_boxes is None or len(input_boxes) == 0, "Can't replace boxes in text if it's already encoded"
301
+
302
+ # Note that the input_embeds are returned in pytorch's convention (sequence first)
303
+ return (
304
+ text_attention_mask,
305
+ text_memory_resized,
306
+ inputs_embeds.transpose(0, 1),
307
+ )
@@ -0,0 +1,242 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
4
+
5
+ """
6
+ Text Tokenizer.
7
+
8
+ Copied and lightly adapted from VE repo, which in turn copied
9
+ from open_clip and openAI CLIP.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import gzip
15
+ import html
16
+ import io
17
+ import os
18
+ import string
19
+ from functools import lru_cache
20
+
21
+ import ftfy
22
+ import regex as re
23
+ import torch
24
+ from iopath.common.file_io import g_pathmgr
25
+
26
+
27
+ @lru_cache
28
+ def bytes_to_unicode():
29
+ """Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode
30
+ strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When
31
+ you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a
32
+ significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8
33
+ bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
34
+ """
35
+ bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
36
+ cs = bs[:]
37
+ n = 0
38
+ for b in range(2**8):
39
+ if b not in bs:
40
+ bs.append(b)
41
+ cs.append(2**8 + n)
42
+ n += 1
43
+ cs = [chr(n) for n in cs]
44
+ return dict(zip(bs, cs))
45
+
46
+
47
+ def get_pairs(word):
48
+ """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
49
+ strings).
50
+ """
51
+ pairs = set()
52
+ prev_char = word[0]
53
+ for char in word[1:]:
54
+ pairs.add((prev_char, char))
55
+ prev_char = char
56
+ return pairs
57
+
58
+
59
+ def basic_clean(text):
60
+ """Basic text cleaning: fix unicode and unescape HTML entities."""
61
+ text = ftfy.fix_text(text)
62
+ text = html.unescape(html.unescape(text))
63
+ return text.strip()
64
+
65
+
66
+ def whitespace_clean(text):
67
+ """Remove redundant whitespace."""
68
+ text = re.sub(r"\s+", " ", text)
69
+ text = text.strip()
70
+ return text
71
+
72
+
73
+ def _clean_canonicalize(x):
74
+ """Clean text and canonicalize it."""
75
+ # basic, remove whitespace, remove punctuation, lower case
76
+ return canonicalize_text(basic_clean(x))
77
+
78
+
79
+ def _clean_lower(x):
80
+ """Clean text and return lowercase."""
81
+ # basic, remove whitespace, lower case
82
+ return whitespace_clean(basic_clean(x)).lower()
83
+
84
+
85
+ def _clean_whitespace(x):
86
+ """Clean text and remove redundant whitespace."""
87
+ # basic, remove whitespace
88
+ return whitespace_clean(basic_clean(x))
89
+
90
+
91
+ def get_clean_fn(type: str):
92
+ """Get text cleaning function by name."""
93
+ if type == "canonicalize":
94
+ return _clean_canonicalize
95
+ elif type == "lower":
96
+ return _clean_lower
97
+ elif type == "whitespace":
98
+ return _clean_whitespace
99
+ else:
100
+ assert False, f"Invalid clean function ({type})."
101
+
102
+
103
+ def canonicalize_text(text, *, keep_punctuation_exact_string=None):
104
+ """Returns canonicalized `text` (lowercase and punctuation removed). From:
105
+ https://github.com/google-research/big_vision/blob/53f18caf27a9419231bbf08d3388b07671616d3d/big_vision/evaluators/proj/image_text/prompt_engineering.py#L94.
106
+
107
+ Args:
108
+ text: string to be canonicalized.
109
+ keep_punctuation_exact_string: If provided, then this exact string kept. For example providing '{}' will keep
110
+ any occurrences of '{}' (but will still remove '{' and '}' that appear separately).
111
+ """
112
+ text = text.replace("_", " ")
113
+ if keep_punctuation_exact_string:
114
+ text = keep_punctuation_exact_string.join(
115
+ part.translate(str.maketrans("", "", string.punctuation))
116
+ for part in text.split(keep_punctuation_exact_string)
117
+ )
118
+ else:
119
+ text = text.translate(str.maketrans("", "", string.punctuation))
120
+ text = text.lower()
121
+ text = re.sub(r"\s+", " ", text)
122
+ return text.strip()
123
+
124
+
125
+ class SimpleTokenizer:
126
+ """A simple tokenizer for text inputs."""
127
+
128
+ def __init__(
129
+ self,
130
+ bpe_path: str | os.PathLike,
131
+ additional_special_tokens: list[str] | None = None,
132
+ context_length: int = 77,
133
+ clean: str = "lower",
134
+ ):
135
+ """The tokenizer for text inputs."""
136
+ self.byte_encoder = bytes_to_unicode()
137
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
138
+ with g_pathmgr.open(bpe_path, "rb") as fh:
139
+ bpe_bytes = io.BytesIO(fh.read())
140
+ merges = gzip.open(bpe_bytes).read().decode("utf-8").split("\n")
141
+ # merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
142
+ merges = merges[1 : 49152 - 256 - 2 + 1]
143
+ merges = [tuple(merge.split()) for merge in merges]
144
+ vocab = list(bytes_to_unicode().values())
145
+ vocab = vocab + [v + "</w>" for v in vocab]
146
+ for merge in merges:
147
+ vocab.append("".join(merge))
148
+ special_tokens = ["<start_of_text>", "<end_of_text>"]
149
+ if additional_special_tokens:
150
+ special_tokens += additional_special_tokens
151
+ vocab.extend(special_tokens)
152
+ self.encoder = dict(zip(vocab, range(len(vocab))))
153
+ self.decoder = {v: k for k, v in self.encoder.items()}
154
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
155
+ self.cache = {t: t for t in special_tokens}
156
+ special = "|".join(special_tokens)
157
+ self.pat = re.compile(
158
+ special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
159
+ re.IGNORECASE,
160
+ )
161
+ self.vocab_size = len(self.encoder)
162
+ self.all_special_ids = [self.encoder[t] for t in special_tokens]
163
+ self.sot_token_id = self.all_special_ids[0]
164
+ self.eot_token_id = self.all_special_ids[1]
165
+ self.context_length = context_length
166
+ self.clean_fn = get_clean_fn(clean)
167
+
168
+ def bpe(self, token):
169
+ """Byte Pair Encoding."""
170
+ if token in self.cache:
171
+ return self.cache[token]
172
+ word = (*tuple(token[:-1]), token[-1] + "</w>")
173
+ pairs = get_pairs(word)
174
+ if not pairs:
175
+ return token + "</w>"
176
+ while True:
177
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
178
+ if bigram not in self.bpe_ranks:
179
+ break
180
+ first, second = bigram
181
+ new_word = []
182
+ i = 0
183
+ while i < len(word):
184
+ try:
185
+ j = word.index(first, i)
186
+ new_word.extend(word[i:j])
187
+ i = j
188
+ except Exception:
189
+ new_word.extend(word[i:])
190
+ break
191
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
192
+ new_word.append(first + second)
193
+ i += 2
194
+ else:
195
+ new_word.append(word[i])
196
+ i += 1
197
+ new_word = tuple(new_word)
198
+ word = new_word
199
+ if len(word) == 1:
200
+ break
201
+ else:
202
+ pairs = get_pairs(word)
203
+ word = " ".join(word)
204
+ self.cache[token] = word
205
+ return word
206
+
207
+ def encode(self, text):
208
+ """Encode text to a sequence of BPE tokens."""
209
+ bpe_tokens = []
210
+ text = self.clean_fn(text)
211
+ for token in re.findall(self.pat, text):
212
+ token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
213
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" "))
214
+ return bpe_tokens
215
+
216
+ def decode(self, tokens):
217
+ """Decodes a sequence of tokens back into a text string."""
218
+ text = "".join([self.decoder[token] for token in tokens])
219
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors="replace").replace("</w>", " ")
220
+ return text
221
+
222
+ def __call__(self, texts: str | list[str], context_length: int | None = None) -> torch.LongTensor:
223
+ """Returns the tokenized representation of given input string(s) Parameters. ---------- texts : Union[str,
224
+ list[str]] An input string or a list of input strings to tokenize context_length : int The context
225
+ length to use; all CLIP models use 77 as the context length.
226
+
227
+ Returns:
228
+ -------: A two-dimensional tensor containing the resulting tokens, shape = [number of input strings,
229
+ context_length]
230
+ """
231
+ if isinstance(texts, str):
232
+ texts = [texts]
233
+ context_length = context_length or self.context_length
234
+ assert context_length, "Please set a valid context length"
235
+ all_tokens = [[self.sot_token_id, *self.encode(text), self.eot_token_id] for text in texts]
236
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
237
+ for i, tokens in enumerate(all_tokens):
238
+ if len(tokens) > context_length:
239
+ tokens = tokens[:context_length] # Truncate
240
+ tokens[-1] = self.eot_token_id
241
+ result[i, : len(tokens)] = torch.tensor(tokens)
242
+ return result