nexaai 1.0.19rc6__cp310-cp310-macosx_14_0_universal2.whl → 1.0.19rc8__cp310-cp310-macosx_14_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nexaai might be problematic. Click here for more details.
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +1 -1
- nexaai/binds/libnexa_bridge.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libggml-base.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libggml-cpu.so +0 -0
- nexaai/binds/nexa_llama_cpp/libggml-metal.so +0 -0
- nexaai/binds/nexa_llama_cpp/libggml.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libllama.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libmtmd.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libnexa_plugin.dylib +0 -0
- nexaai/binds/nexa_mlx/libnexa_plugin.dylib +0 -0
- nexaai/binds/nexa_nexaml/libggml-base.dylib +0 -0
- nexaai/binds/nexa_nexaml/libggml-cpu.so +0 -0
- nexaai/binds/nexa_nexaml/libggml-metal.so +0 -0
- nexaai/binds/nexa_nexaml/libggml.dylib +0 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl_moe.py +276 -0
- nexaai/mlx_backend/vlm/interface.py +21 -4
- nexaai/mlx_backend/vlm/main.py +6 -2
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +117 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +531 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +701 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +255 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +303 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +407 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/processor.py +476 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +1309 -0
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/switch_layers.py +210 -0
- nexaai/utils/manifest_utils.py +222 -15
- nexaai/utils/model_manager.py +83 -7
- nexaai/utils/model_types.py +2 -0
- {nexaai-1.0.19rc6.dist-info → nexaai-1.0.19rc8.dist-info}/METADATA +1 -1
- {nexaai-1.0.19rc6.dist-info → nexaai-1.0.19rc8.dist-info}/RECORD +35 -24
- {nexaai-1.0.19rc6.dist-info → nexaai-1.0.19rc8.dist-info}/WHEEL +0 -0
- {nexaai-1.0.19rc6.dist-info → nexaai-1.0.19rc8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
# Copyright © 2023-2024 Apple Inc.
|
|
2
|
+
|
|
3
|
+
import math
|
|
4
|
+
from typing import List, Optional, Union
|
|
5
|
+
|
|
6
|
+
import mlx.core as mx
|
|
7
|
+
import mlx.nn as nn
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class SuScaledRoPE(nn.Module):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
dims: int,
|
|
14
|
+
base: float = 10000.0,
|
|
15
|
+
max_position_embeddings: int = 131072,
|
|
16
|
+
original_max_position_embeddings: int = 4096,
|
|
17
|
+
short_factor: Union[List[float], float] = 1.0,
|
|
18
|
+
long_factor: Union[List[float], float] = 1.0,
|
|
19
|
+
short_mscale: float = None,
|
|
20
|
+
long_mscale: float = None,
|
|
21
|
+
):
|
|
22
|
+
"""
|
|
23
|
+
Su Scaled Rotary Embedding layer.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
dims (int): The feature dimensions to be rotated.
|
|
27
|
+
base (int, optional): Base for the exponential scaling.
|
|
28
|
+
max_position_embeddings (int, optional): The maximum sequence
|
|
29
|
+
length that this model was trained with. This is used to determine
|
|
30
|
+
the size of the original RoPE embeddings when using long scaling.
|
|
31
|
+
Default: ``131072``.
|
|
32
|
+
original_max_position_embeddings (int, optional): The maximum
|
|
33
|
+
sequence length that this model was trained with. This is used to
|
|
34
|
+
determine the size of the original RoPE embeddings when using long
|
|
35
|
+
scaling. Default: ``4096``.
|
|
36
|
+
short_factor (float or list[float], optional): List of scaling
|
|
37
|
+
factors for sequences of length lesser than
|
|
38
|
+
``original_max_position_embeddings``. Default: ``1.0``.
|
|
39
|
+
long_factor (float or list[float], optional): List of scaling
|
|
40
|
+
factors for sequences of length greater than
|
|
41
|
+
``original_max_position_embeddings``. Default: ``1.0``.
|
|
42
|
+
short_mscale (float, optional): Scale the input prior to embedding.
|
|
43
|
+
long_mscale (float, optional): Scale the input prior to embedding.
|
|
44
|
+
"""
|
|
45
|
+
super().__init__()
|
|
46
|
+
freqs = base ** (mx.arange(0, dims, 2, dtype=mx.float32) / dims)
|
|
47
|
+
self._freqs = mx.array(long_factor, dtype=mx.float32) * freqs
|
|
48
|
+
self.original_max_position_embeddings = original_max_position_embeddings
|
|
49
|
+
self.scale = long_mscale or math.sqrt(
|
|
50
|
+
1
|
|
51
|
+
+ math.log(max_position_embeddings / original_max_position_embeddings)
|
|
52
|
+
/ math.log(original_max_position_embeddings)
|
|
53
|
+
)
|
|
54
|
+
self.dim = dims
|
|
55
|
+
|
|
56
|
+
def __call__(self, x, offset: int = 0):
|
|
57
|
+
x[..., : self.dim] = self.scale * x[..., : self.dim]
|
|
58
|
+
return mx.fast.rope(
|
|
59
|
+
x,
|
|
60
|
+
self.dim,
|
|
61
|
+
traditional=False,
|
|
62
|
+
base=None,
|
|
63
|
+
scale=1.0,
|
|
64
|
+
offset=offset,
|
|
65
|
+
freqs=self._freqs,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class Llama3RoPE(nn.Module):
|
|
70
|
+
|
|
71
|
+
def __init__(
|
|
72
|
+
self,
|
|
73
|
+
dims: int,
|
|
74
|
+
max_position_embeddings: int = 2048,
|
|
75
|
+
traditional: bool = False,
|
|
76
|
+
base: float = 10000,
|
|
77
|
+
scaling_config: dict = None,
|
|
78
|
+
):
|
|
79
|
+
super().__init__()
|
|
80
|
+
self.dims = dims
|
|
81
|
+
self.max_position_embeddings = max_position_embeddings
|
|
82
|
+
self.traditional = traditional
|
|
83
|
+
|
|
84
|
+
factor = scaling_config["factor"]
|
|
85
|
+
low_freq_factor = scaling_config.get("low_freq_factor", 1.0)
|
|
86
|
+
high_freq_factor = scaling_config.get("high_freq_factor", 4.0)
|
|
87
|
+
old_context_len = scaling_config.get(
|
|
88
|
+
"original_max_position_embeddings",
|
|
89
|
+
8192,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
low_freq_wavelen = old_context_len / low_freq_factor
|
|
93
|
+
high_freq_wavelen = old_context_len / high_freq_factor
|
|
94
|
+
|
|
95
|
+
freqs = base ** (mx.arange(0, dims, 2) / dims)
|
|
96
|
+
wavelens = 2 * mx.pi * freqs
|
|
97
|
+
|
|
98
|
+
freqs = mx.where(wavelens > low_freq_wavelen, freqs * factor, freqs)
|
|
99
|
+
is_medium_freq = (wavelens > high_freq_wavelen) & (wavelens < low_freq_wavelen)
|
|
100
|
+
smooth_factors = (old_context_len / wavelens - low_freq_factor) / (
|
|
101
|
+
high_freq_factor - low_freq_factor
|
|
102
|
+
)
|
|
103
|
+
smooth_freqs = freqs / ((1 - smooth_factors) / factor + smooth_factors)
|
|
104
|
+
self._freqs = mx.where(is_medium_freq, smooth_freqs, freqs)
|
|
105
|
+
|
|
106
|
+
def extra_repr(self):
|
|
107
|
+
return (
|
|
108
|
+
f"{self.dims}, traditional={self.traditional}, "
|
|
109
|
+
f"max_position_embeddings={self.max_position_embeddings}"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def __call__(self, x, offset: int = 0):
|
|
113
|
+
return mx.fast.rope(
|
|
114
|
+
x,
|
|
115
|
+
self.dims,
|
|
116
|
+
traditional=self.traditional,
|
|
117
|
+
base=None,
|
|
118
|
+
scale=1.0,
|
|
119
|
+
offset=offset,
|
|
120
|
+
freqs=self._freqs,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class YarnRoPE(nn.Module):
|
|
125
|
+
def __init__(
|
|
126
|
+
self,
|
|
127
|
+
dims,
|
|
128
|
+
traditional=False,
|
|
129
|
+
max_position_embeddings=2048,
|
|
130
|
+
base=10000,
|
|
131
|
+
scaling_factor=1.0,
|
|
132
|
+
original_max_position_embeddings=4096,
|
|
133
|
+
beta_fast=32,
|
|
134
|
+
beta_slow=1,
|
|
135
|
+
mscale=1,
|
|
136
|
+
mscale_all_dim=0,
|
|
137
|
+
):
|
|
138
|
+
super().__init__()
|
|
139
|
+
|
|
140
|
+
def yarn_find_correction_dim(num_rotations):
|
|
141
|
+
return (
|
|
142
|
+
dims
|
|
143
|
+
* math.log(
|
|
144
|
+
original_max_position_embeddings / (num_rotations * 2 * math.pi)
|
|
145
|
+
)
|
|
146
|
+
) / (2 * math.log(base))
|
|
147
|
+
|
|
148
|
+
def yarn_find_correction_range():
|
|
149
|
+
low = math.floor(yarn_find_correction_dim(beta_fast))
|
|
150
|
+
high = math.ceil(yarn_find_correction_dim(beta_slow))
|
|
151
|
+
return max(low, 0), min(high, dims - 1)
|
|
152
|
+
|
|
153
|
+
def yarn_get_mscale(scale=1, mscale=1):
|
|
154
|
+
if scale <= 1:
|
|
155
|
+
return 1.0
|
|
156
|
+
return 0.1 * mscale * math.log(scale) + 1.0
|
|
157
|
+
|
|
158
|
+
def yarn_linear_ramp_mask(min_val, max_val, dim):
|
|
159
|
+
if min_val == max_val:
|
|
160
|
+
max_val += 0.001 # Prevent singularity
|
|
161
|
+
|
|
162
|
+
linear_func = (mx.arange(dim, dtype=mx.float32) - min_val) / (
|
|
163
|
+
max_val - min_val
|
|
164
|
+
)
|
|
165
|
+
return mx.clip(linear_func, 0, 1)
|
|
166
|
+
|
|
167
|
+
self.mscale = yarn_get_mscale(scaling_factor, mscale) / yarn_get_mscale(
|
|
168
|
+
scaling_factor, mscale_all_dim
|
|
169
|
+
)
|
|
170
|
+
freq_extra = base ** (mx.arange(0, dims, 2, dtype=mx.float32) / dims)
|
|
171
|
+
freq_inter = scaling_factor * base ** (
|
|
172
|
+
mx.arange(0, dims, 2, dtype=mx.float32) / dims
|
|
173
|
+
)
|
|
174
|
+
low, high = yarn_find_correction_range()
|
|
175
|
+
freq_mask = 1.0 - yarn_linear_ramp_mask(low, high, dims // 2)
|
|
176
|
+
self._freqs = (freq_inter * freq_extra) / (
|
|
177
|
+
freq_inter * freq_mask + freq_extra * (1 - freq_mask)
|
|
178
|
+
)
|
|
179
|
+
self.dims = dims
|
|
180
|
+
self.traditional = traditional
|
|
181
|
+
|
|
182
|
+
def __call__(self, x, offset=0):
|
|
183
|
+
if self.mscale != 1.0:
|
|
184
|
+
x[..., : self.dims] = self.mscale * x[..., : self.dims]
|
|
185
|
+
return mx.fast.rope(
|
|
186
|
+
x,
|
|
187
|
+
self.dims,
|
|
188
|
+
traditional=self.traditional,
|
|
189
|
+
base=None,
|
|
190
|
+
scale=1.0,
|
|
191
|
+
offset=offset,
|
|
192
|
+
freqs=self._freqs,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def initialize_rope(
|
|
197
|
+
dims,
|
|
198
|
+
base,
|
|
199
|
+
traditional,
|
|
200
|
+
scaling_config: Optional[dict] = None,
|
|
201
|
+
max_position_embeddings: Optional[int] = None,
|
|
202
|
+
):
|
|
203
|
+
if scaling_config is not None:
|
|
204
|
+
rope_type = scaling_config.get("type") or scaling_config.get(
|
|
205
|
+
"rope_type", "default"
|
|
206
|
+
)
|
|
207
|
+
else:
|
|
208
|
+
rope_type = "default"
|
|
209
|
+
|
|
210
|
+
if rope_type in ["default", "linear"]:
|
|
211
|
+
scale = 1 / scaling_config["factor"] if rope_type == "linear" else 1.0
|
|
212
|
+
return nn.RoPE(dims, traditional=traditional, base=base, scale=scale)
|
|
213
|
+
|
|
214
|
+
elif rope_type == "llama3":
|
|
215
|
+
return Llama3RoPE(
|
|
216
|
+
dims=dims,
|
|
217
|
+
max_position_embeddings=max_position_embeddings,
|
|
218
|
+
traditional=traditional,
|
|
219
|
+
base=base,
|
|
220
|
+
scaling_config=scaling_config,
|
|
221
|
+
)
|
|
222
|
+
elif rope_type == "yarn":
|
|
223
|
+
scaling_factor = scaling_config["factor"]
|
|
224
|
+
rope_kwargs = {
|
|
225
|
+
key: scaling_config[key]
|
|
226
|
+
for key in [
|
|
227
|
+
"original_max_position_embeddings",
|
|
228
|
+
"beta_fast",
|
|
229
|
+
"beta_slow",
|
|
230
|
+
"mscale",
|
|
231
|
+
"mscale_all_dim",
|
|
232
|
+
]
|
|
233
|
+
if key in scaling_config
|
|
234
|
+
}
|
|
235
|
+
return YarnRoPE(
|
|
236
|
+
dims=dims,
|
|
237
|
+
max_position_embeddings=max_position_embeddings,
|
|
238
|
+
traditional=traditional,
|
|
239
|
+
base=base,
|
|
240
|
+
**rope_kwargs,
|
|
241
|
+
)
|
|
242
|
+
elif rope_type == "longrope":
|
|
243
|
+
return SuScaledRoPE(
|
|
244
|
+
dims=dims,
|
|
245
|
+
base=base,
|
|
246
|
+
max_position_embeddings=max_position_embeddings,
|
|
247
|
+
original_max_position_embeddings=scaling_config[
|
|
248
|
+
"original_max_position_embeddings"
|
|
249
|
+
],
|
|
250
|
+
short_factor=scaling_config["short_factor"],
|
|
251
|
+
long_factor=scaling_config["long_factor"],
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
else:
|
|
255
|
+
raise ValueError(f"Unsupported RoPE type {rope_type}")
|
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
import math
|
|
2
|
+
from functools import partial
|
|
3
|
+
from typing import Callable, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
import mlx.core as mx
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def make_sampler(
|
|
9
|
+
temp: float = 0.0,
|
|
10
|
+
top_p: float = 0.0,
|
|
11
|
+
min_p: float = 0.0,
|
|
12
|
+
min_tokens_to_keep: int = 1,
|
|
13
|
+
top_k: int = 0,
|
|
14
|
+
xtc_probability: float = 0.0,
|
|
15
|
+
xtc_threshold: float = 0.0,
|
|
16
|
+
xtc_special_tokens: List[int] = [],
|
|
17
|
+
) -> Callable[mx.array, mx.array]:
|
|
18
|
+
"""
|
|
19
|
+
Make a sampler function for use with ``generate_step``.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
temp (float): The temperature for sampling, if 0 the argmax is used.
|
|
23
|
+
Default: ``0``.
|
|
24
|
+
top_p (float, optional): Nulceus sampling, higher means model considers
|
|
25
|
+
more less likely words.
|
|
26
|
+
min_p (float, optional): The minimum value (scaled by the top token's
|
|
27
|
+
probability) that a token probability must have to be considered.
|
|
28
|
+
min_tokens_to_keep (int, optional): Minimum number of tokens that cannot
|
|
29
|
+
be filtered by min_p sampling.
|
|
30
|
+
top_k (int, optional): The top k tokens ranked by probability to constrain
|
|
31
|
+
the sampling to.
|
|
32
|
+
xtc_probability (float, optional): The probability of applying XTC
|
|
33
|
+
sampling.
|
|
34
|
+
xtc_threshold (float, optional): The threshold the probs need to reach
|
|
35
|
+
for being sampled.
|
|
36
|
+
xtc_special_tokens (list(int), optional): List of special tokens IDs to
|
|
37
|
+
be excluded from XTC sampling.
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Callable[mx.array, mx.array]:
|
|
42
|
+
A sampler which takes log-probabilities and returns tokens.
|
|
43
|
+
"""
|
|
44
|
+
if temp == 0:
|
|
45
|
+
return lambda x: mx.argmax(x, axis=-1)
|
|
46
|
+
|
|
47
|
+
# Create sampler chain
|
|
48
|
+
sampling_methods = []
|
|
49
|
+
if top_k > 0:
|
|
50
|
+
sampling_methods.append(lambda x: apply_top_k(x, top_k))
|
|
51
|
+
if top_p > 0 and top_p < 1.0:
|
|
52
|
+
sampling_methods.append(lambda x: apply_top_p(x, top_p))
|
|
53
|
+
if min_p != 0.0:
|
|
54
|
+
sampling_methods.append(lambda x: apply_min_p(x, min_p, min_tokens_to_keep))
|
|
55
|
+
if xtc_probability > 0.0:
|
|
56
|
+
sampling_methods.append(
|
|
57
|
+
lambda x: apply_xtc(x, xtc_probability, xtc_threshold, xtc_special_tokens)
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Apply the sampling methods
|
|
61
|
+
def sampler(logits):
|
|
62
|
+
for method in sampling_methods:
|
|
63
|
+
logits = method(logits)
|
|
64
|
+
|
|
65
|
+
# Return the sampled token
|
|
66
|
+
return categorical_sampling(logits, temp)
|
|
67
|
+
|
|
68
|
+
return sampler
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def make_logits_processors(
|
|
72
|
+
logit_bias: Optional[Dict[int, float]] = None,
|
|
73
|
+
repetition_penalty: Optional[float] = None,
|
|
74
|
+
repetition_context_size: Optional[int] = 20,
|
|
75
|
+
):
|
|
76
|
+
"""
|
|
77
|
+
Make logits processors for use with ``generate_step``.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
repetition_penalty (float, optional): The penalty factor for repeating
|
|
81
|
+
tokens.
|
|
82
|
+
repetition_context_size (int, optional): The number of tokens to
|
|
83
|
+
consider for repetition penalty. Default: ``20``.
|
|
84
|
+
logit_bias (dictionary, optional): Additive logit bias.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
List[Callable[[mx.array, mx.array], mx.array]]:
|
|
88
|
+
A list of logits processors. Each processor in the list is a
|
|
89
|
+
callable which takes an array of tokens and an array of logits
|
|
90
|
+
and returns the updated logits.
|
|
91
|
+
"""
|
|
92
|
+
logits_processors = []
|
|
93
|
+
if logit_bias:
|
|
94
|
+
indices = mx.array(list(logit_bias.keys()))
|
|
95
|
+
values = mx.array(list(logit_bias.values()))
|
|
96
|
+
|
|
97
|
+
def logit_bias_processor(_, logits):
|
|
98
|
+
logits[:, indices] += values
|
|
99
|
+
return logits
|
|
100
|
+
|
|
101
|
+
logits_processors.append(logit_bias_processor)
|
|
102
|
+
|
|
103
|
+
if repetition_penalty and repetition_penalty != 0.0:
|
|
104
|
+
logits_processors.append(
|
|
105
|
+
make_repetition_penalty(repetition_penalty, repetition_context_size)
|
|
106
|
+
)
|
|
107
|
+
return logits_processors
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
|
111
|
+
def apply_top_k(
|
|
112
|
+
logprobs: mx.array,
|
|
113
|
+
top_k: int,
|
|
114
|
+
) -> mx.array:
|
|
115
|
+
"""
|
|
116
|
+
Sample from only the top K tokens ranked by probability.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
logprobs: A vector of log probabilities.
|
|
120
|
+
top_k (int): Top k tokens to sample from.
|
|
121
|
+
"""
|
|
122
|
+
vocab_size = logprobs.shape[-1]
|
|
123
|
+
if not isinstance(top_k, int) or not (0 < top_k < vocab_size):
|
|
124
|
+
raise ValueError(
|
|
125
|
+
f"`top_k` has to be an integer in the (0, {vocab_size}] interval," f" but is {top_k}."
|
|
126
|
+
)
|
|
127
|
+
mask_idx = mx.argpartition(-logprobs, kth=top_k - 1, axis=-1)[..., top_k:]
|
|
128
|
+
masked_logprobs = mx.put_along_axis(
|
|
129
|
+
logprobs, mask_idx, mx.array(-float("inf"), logprobs.dtype), axis=-1
|
|
130
|
+
)
|
|
131
|
+
return masked_logprobs
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
|
135
|
+
def apply_min_p(
|
|
136
|
+
logprobs: mx.array,
|
|
137
|
+
min_p: float,
|
|
138
|
+
min_tokens_to_keep: int = 1,
|
|
139
|
+
) -> mx.array:
|
|
140
|
+
"""
|
|
141
|
+
Apply min-p sampling to the logprobs.
|
|
142
|
+
|
|
143
|
+
Min-p keeps all tokens that are above a minimum probability, scaled by the
|
|
144
|
+
probability of the most likely token. As a result, the filter is more
|
|
145
|
+
aggressive given a very high-probability token.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
logprobs: A vector of log probabilities.
|
|
149
|
+
min_p (float): Minimum token probability. Typical values are in the
|
|
150
|
+
0.01-0.2 range, comparably selective as setting `top_p` in the
|
|
151
|
+
0.99-0.8 range.
|
|
152
|
+
min_tokens_to_keep (int, optional): Minimum number of tokens that cannot
|
|
153
|
+
be filtered. Default: ``1``.
|
|
154
|
+
|
|
155
|
+
"""
|
|
156
|
+
if not (0 <= min_p <= 1.0):
|
|
157
|
+
raise ValueError(f"`min_p` has to be a float in the [0, 1] interval, but is {min_p}")
|
|
158
|
+
if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
|
|
159
|
+
raise ValueError(
|
|
160
|
+
f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}"
|
|
161
|
+
)
|
|
162
|
+
# reference implementation: https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L531-L605
|
|
163
|
+
|
|
164
|
+
# Indices sorted in decreasing order
|
|
165
|
+
sorted_indices = mx.argsort(-logprobs, axis=-1)
|
|
166
|
+
sorted_logprobs = mx.take_along_axis(logprobs, sorted_indices, axis=-1)
|
|
167
|
+
|
|
168
|
+
# Top probability
|
|
169
|
+
top_logprobs = sorted_logprobs[:, 0:1]
|
|
170
|
+
|
|
171
|
+
# Calculate the min_p threshold
|
|
172
|
+
scaled_min_p = top_logprobs + math.log(min_p)
|
|
173
|
+
|
|
174
|
+
# Mask tokens that have a probability less than the scaled min_p
|
|
175
|
+
tokens_to_remove = sorted_logprobs < scaled_min_p
|
|
176
|
+
tokens_to_remove[..., :min_tokens_to_keep] = False
|
|
177
|
+
|
|
178
|
+
# Create pool of tokens with probability less than scaled min_p
|
|
179
|
+
selected_logprobs = mx.where(tokens_to_remove, -float("inf"), sorted_logprobs)
|
|
180
|
+
|
|
181
|
+
# Create a mapping to rearrange back to original indices
|
|
182
|
+
inverse_indices = mx.put_along_axis(
|
|
183
|
+
mx.zeros_like(sorted_indices),
|
|
184
|
+
sorted_indices,
|
|
185
|
+
mx.arange(sorted_indices.shape[-1], dtype=sorted_indices.dtype),
|
|
186
|
+
axis=-1,
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Rearrange selected_logprobs back to original order
|
|
190
|
+
original_order_logprobs = mx.take_along_axis(selected_logprobs, inverse_indices, axis=-1)
|
|
191
|
+
|
|
192
|
+
return original_order_logprobs
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
|
196
|
+
def apply_top_p(logprobs: mx.array, top_p: float) -> mx.array:
|
|
197
|
+
"""
|
|
198
|
+
Apply top-p (nucleus) sampling to logits.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
logprobs: A vector of log probabilities.
|
|
202
|
+
top_p: The cumulative probability threshold for top-p filtering.
|
|
203
|
+
Returns:
|
|
204
|
+
token selected based on the top-p criterion.
|
|
205
|
+
"""
|
|
206
|
+
# referenced implementation from https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L449-L460
|
|
207
|
+
probs = mx.exp(logprobs)
|
|
208
|
+
# sort in ascending order
|
|
209
|
+
sorted_indices = mx.argsort(logprobs, axis=-1)
|
|
210
|
+
sorted_probs = mx.take_along_axis(probs, sorted_indices, axis=-1)
|
|
211
|
+
|
|
212
|
+
cumulative_probs = mx.cumsum(sorted_probs, axis=-1)
|
|
213
|
+
|
|
214
|
+
# Rearrange cumulative probs back to original order
|
|
215
|
+
inverse_indices = mx.put_along_axis(
|
|
216
|
+
mx.zeros_like(sorted_indices),
|
|
217
|
+
sorted_indices,
|
|
218
|
+
mx.arange(sorted_indices.shape[-1], dtype=sorted_indices.dtype),
|
|
219
|
+
axis=-1,
|
|
220
|
+
)
|
|
221
|
+
cumulative_probs = mx.take_along_axis(cumulative_probs, inverse_indices, axis=-1)
|
|
222
|
+
|
|
223
|
+
# select tokens with cumulative probs below threshold
|
|
224
|
+
return mx.where(
|
|
225
|
+
cumulative_probs > 1 - top_p,
|
|
226
|
+
logprobs,
|
|
227
|
+
-float("inf"),
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
|
232
|
+
def apply_xtc(
|
|
233
|
+
logits: mx.array,
|
|
234
|
+
xtc_probability: float,
|
|
235
|
+
xtc_threshold: float,
|
|
236
|
+
xtc_special_tokens: List[int],
|
|
237
|
+
) -> mx.array:
|
|
238
|
+
"""
|
|
239
|
+
Apply XTC sampling to the logits.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
logits: The logits from the model's output.
|
|
243
|
+
xtc_probability (float): Probability of XTC sampling to happen for each token
|
|
244
|
+
xtc_threshold (float): The threshold the probs need to reach for being sampled.
|
|
245
|
+
special_tokens_ids (list(int)): List of special tokens IDs to be excluded from XTC sampling.
|
|
246
|
+
"""
|
|
247
|
+
if not (0 <= xtc_threshold <= 0.5):
|
|
248
|
+
raise ValueError(
|
|
249
|
+
f"`threshold` has to be a float in the [0, 0.5] interval, but is {xtc_threshold}"
|
|
250
|
+
)
|
|
251
|
+
if not (0 <= xtc_probability <= 1.0):
|
|
252
|
+
raise ValueError(
|
|
253
|
+
f"`probability` has to be a float in the [0, 1] interval, but is {xtc_probability}"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
probs = mx.softmax(logits, -1)
|
|
257
|
+
mask = probs > mx.where(probs > xtc_threshold, probs, mx.inf).min()
|
|
258
|
+
if xtc_special_tokens:
|
|
259
|
+
mask[..., xtc_special_tokens] = False
|
|
260
|
+
|
|
261
|
+
return mx.where(
|
|
262
|
+
mx.random.uniform(0, 1) > xtc_probability,
|
|
263
|
+
logits,
|
|
264
|
+
mx.where(mask, -mx.inf, logits),
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
|
269
|
+
def categorical_sampling(logits, temp):
|
|
270
|
+
return mx.random.categorical(logits * (1 / temp))
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def make_repetition_penalty(penalty: float, context_size: int = 20):
|
|
274
|
+
"""
|
|
275
|
+
Make repetition penalty processor.
|
|
276
|
+
|
|
277
|
+
Paper: https://arxiv.org/abs/1909.05858
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
penalty (float): The repetition penalty factor to be applied.
|
|
281
|
+
context_size (int): The number of previous tokens to use.
|
|
282
|
+
Default: ``20``.
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
Callable[[mx.array, List[int]], mx.array]:
|
|
286
|
+
The repetition penalty processor.
|
|
287
|
+
"""
|
|
288
|
+
if penalty < 0 or not isinstance(penalty, (int, float)):
|
|
289
|
+
raise ValueError(f"penalty must be a non-negative float, got {penalty}")
|
|
290
|
+
|
|
291
|
+
def repetition_penalty_processor(tokens, logits):
|
|
292
|
+
if len(tokens) > 0:
|
|
293
|
+
tokens = tokens[-context_size:]
|
|
294
|
+
selected_logits = logits[:, tokens]
|
|
295
|
+
selected_logits = mx.where(
|
|
296
|
+
selected_logits < 0,
|
|
297
|
+
selected_logits * penalty,
|
|
298
|
+
selected_logits / penalty,
|
|
299
|
+
)
|
|
300
|
+
logits[:, tokens] = selected_logits
|
|
301
|
+
return logits
|
|
302
|
+
|
|
303
|
+
return repetition_penalty_processor
|