nexaai 1.0.19rc7__cp310-cp310-macosx_14_0_universal2.whl → 1.0.19rc9__cp310-cp310-macosx_14_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nexaai might be problematic. Click here for more details.
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +1 -1
- nexaai/binds/libnexa_bridge.dylib +0 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl.py +14 -31
- nexaai/mlx_backend/vlm/generate_qwen3_vl_moe.py +15 -32
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/qwen3vl.py +7 -23
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +8 -24
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/METADATA +1 -1
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/RECORD +11 -200
- nexaai/binds/nexa_mlx/py-lib/asr/__init__.py +0 -12
- nexaai/binds/nexa_mlx/py-lib/asr/interface.py +0 -122
- nexaai/binds/nexa_mlx/py-lib/common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/common/utils.py +0 -25
- nexaai/binds/nexa_mlx/py-lib/cv/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/cv/generate.py +0 -195
- nexaai/binds/nexa_mlx/py-lib/cv/interface.py +0 -151
- nexaai/binds/nexa_mlx/py-lib/cv/main.py +0 -81
- nexaai/binds/nexa_mlx/py-lib/cv/modeling/pp_ocr_v4.py +0 -1736
- nexaai/binds/nexa_mlx/py-lib/embedding/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/generate.py +0 -333
- nexaai/binds/nexa_mlx/py-lib/embedding/interface.py +0 -617
- nexaai/binds/nexa_mlx/py-lib/embedding/main.py +0 -173
- nexaai/binds/nexa_mlx/py-lib/embedding/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/modeling/nexa_jina_v2.py +0 -399
- nexaai/binds/nexa_mlx/py-lib/image_gen/__init__.py +0 -1
- nexaai/binds/nexa_mlx/py-lib/image_gen/generate_sd.py +0 -244
- nexaai/binds/nexa_mlx/py-lib/image_gen/interface.py +0 -82
- nexaai/binds/nexa_mlx/py-lib/image_gen/main.py +0 -281
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/__init__.py +0 -306
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/clip.py +0 -116
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/config.py +0 -65
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/model_io.py +0 -386
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/sampler.py +0 -105
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/tokenizer.py +0 -100
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/unet.py +0 -460
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/vae.py +0 -274
- nexaai/binds/nexa_mlx/py-lib/llm/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/llm/generate.py +0 -149
- nexaai/binds/nexa_mlx/py-lib/llm/interface.py +0 -764
- nexaai/binds/nexa_mlx/py-lib/llm/main.py +0 -68
- nexaai/binds/nexa_mlx/py-lib/rerank/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/generate.py +0 -174
- nexaai/binds/nexa_mlx/py-lib/rerank/interface.py +0 -287
- nexaai/binds/nexa_mlx/py-lib/rerank/main.py +0 -127
- nexaai/binds/nexa_mlx/py-lib/rerank/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/modeling/nexa_jina_rerank.py +0 -330
- nexaai/binds/nexa_mlx/py-lib/sd/__init__.py +0 -1
- nexaai/binds/nexa_mlx/py-lib/sd/interface.py +0 -362
- nexaai/binds/nexa_mlx/py-lib/sd/main.py +0 -286
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/__init__.py +0 -306
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/clip.py +0 -116
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/config.py +0 -65
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/model_io.py +0 -385
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/sampler.py +0 -105
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/tokenizer.py +0 -100
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/unet.py +0 -460
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/vae.py +0 -274
- nexaai/binds/nexa_mlx/py-lib/tts/__init__.py +0 -12
- nexaai/binds/nexa_mlx/py-lib/tts/interface.py +0 -276
- nexaai/binds/nexa_mlx/py-lib/vlm/__init__.py +0 -3
- nexaai/binds/nexa_mlx/py-lib/vlm/generate.py +0 -572
- nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl.py +0 -294
- nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl_moe.py +0 -276
- nexaai/binds/nexa_mlx/py-lib/vlm/interface.py +0 -504
- nexaai/binds/nexa_mlx/py-lib/vlm/main.py +0 -320
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/convert.py +0 -68
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/aya_vision.py +0 -193
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/interpolate.py +0 -186
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/language.py +0 -233
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/vision.py +0 -503
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/base.py +0 -202
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/cache.py +0 -230
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/__init__.py +0 -10
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/conversation.py +0 -264
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +0 -472
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/language.py +0 -591
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +0 -526
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/vision.py +0 -356
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/florence2.py +0 -366
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/language.py +0 -488
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/vision.py +0 -591
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/gemma3.py +0 -213
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/language.py +0 -315
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/vision.py +0 -238
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/__init__.py +0 -2
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/audio.py +0 -1038
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/config.py +0 -139
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/gemma3n.py +0 -322
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/language.py +0 -629
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/vision.py +0 -1022
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/idefics2.py +0 -294
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/language.py +0 -191
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/vision.py +0 -267
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/idefics3.py +0 -175
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/language.py +0 -192
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/vision.py +0 -233
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/internvl_chat.py +0 -140
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/language.py +0 -220
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/processor.py +0 -393
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/vision.py +0 -293
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kernels.py +0 -307
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/kimi_vl.py +0 -143
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/language.py +0 -509
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/vision.py +0 -522
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/language.py +0 -386
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/llama4.py +0 -138
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/vision.py +0 -560
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/language.py +0 -240
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/llava.py +0 -153
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/vision.py +0 -259
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/language.py +0 -236
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/llava_bunny.py +0 -256
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/vision.py +0 -303
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/language.py +0 -230
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/llava_next.py +0 -160
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/vision.py +0 -243
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/mistral3.py +0 -283
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/language.py +0 -416
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/mllama.py +0 -172
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/vision.py +0 -499
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/language.py +0 -243
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/molmo.py +0 -133
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/vision.py +0 -465
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/__init__.py +0 -10
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/language.py +0 -230
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/multi_modality.py +0 -385
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/sam.py +0 -557
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/vision.py +0 -526
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/language.py +0 -282
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/paligemma.py +0 -160
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/vision.py +0 -242
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/language.py +0 -21
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/phi3_v.py +0 -243
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/su_rope.py +0 -71
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/vision.py +0 -324
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/language.py +0 -229
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/pixtral.py +0 -161
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/vision.py +0 -320
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/__init__.py +0 -2
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/config.py +0 -108
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/language.py +0 -490
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +0 -168
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/vision.py +0 -414
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/__init__.py +0 -2
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/config.py +0 -104
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/language.py +0 -490
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/qwen2_vl.py +0 -167
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/vision.py +0 -312
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/base.py +0 -117
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/cache.py +0 -531
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/generate.py +0 -701
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +0 -255
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +0 -303
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +0 -407
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/processor.py +0 -476
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/qwen3vl.py +0 -1223
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +0 -117
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +0 -531
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +0 -701
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +0 -255
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +0 -303
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +0 -407
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/processor.py +0 -476
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +0 -1309
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/switch_layers.py +0 -210
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/smolvlm.py +0 -62
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_5_vl.py +0 -209
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_vl.py +0 -215
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/prompt_utils.py +0 -474
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/sample_utils.py +0 -39
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/tokenizer_utils.py +0 -344
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/lora.py +0 -70
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/trainer.py +0 -296
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/utils.py +0 -160
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/utils.py +0 -928
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/WHEEL +0 -0
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/top_level.txt +0 -0
|
@@ -1,139 +0,0 @@
|
|
|
1
|
-
import inspect
|
|
2
|
-
from dataclasses import dataclass
|
|
3
|
-
from typing import Dict, List, Optional, Union
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
@dataclass
|
|
7
|
-
class AudioConfig:
|
|
8
|
-
input_feat_size: int = 80
|
|
9
|
-
hidden_size: int = 1536
|
|
10
|
-
conf_attention_chunk_size: int = 12
|
|
11
|
-
conf_attention_context_left: int = 13
|
|
12
|
-
conf_attention_context_right: int = 0
|
|
13
|
-
conf_attention_invalid_logits_value: float = -1e9
|
|
14
|
-
conf_attention_logit_cap: float = 50.0
|
|
15
|
-
conf_num_attention_heads: int = 8
|
|
16
|
-
conf_num_hidden_layers: int = 12
|
|
17
|
-
conf_conv_kernel_size: int = 5
|
|
18
|
-
conf_positional_bias_size: int = 256
|
|
19
|
-
conf_reduction_factor: int = 4
|
|
20
|
-
conf_residual_weight: float = 0.5
|
|
21
|
-
sscp_conv_channel_size: tuple[int, int] = (128, 32)
|
|
22
|
-
sscp_conv_group_norm_eps: float = 1e-3
|
|
23
|
-
sscp_conv_kernel_size: tuple[tuple[int, int], tuple[int, int]] = ((3, 3), (3, 3))
|
|
24
|
-
sscp_conv_stride_size: tuple[tuple[int, int], tuple[int, int]] = ((2, 2), (2, 2))
|
|
25
|
-
vocab_size: int = 128
|
|
26
|
-
sscp_conv_eps: float = 1e-3
|
|
27
|
-
rms_norm_eps: float = 1e-6
|
|
28
|
-
gradient_clipping: float = 10000000000.0
|
|
29
|
-
vocab_offset: int = 262_144 + 128 # text vocab size + vision vocab size
|
|
30
|
-
|
|
31
|
-
@classmethod
|
|
32
|
-
def from_dict(cls, params):
|
|
33
|
-
return cls(
|
|
34
|
-
**{
|
|
35
|
-
k: v
|
|
36
|
-
for k, v in params.items()
|
|
37
|
-
if k in inspect.signature(cls).parameters
|
|
38
|
-
}
|
|
39
|
-
)
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
@dataclass
|
|
43
|
-
class VisionConfig:
|
|
44
|
-
model_type: str = "gemma3n_vision"
|
|
45
|
-
num_hidden_layers: int = 12
|
|
46
|
-
hidden_size: int = 2048
|
|
47
|
-
intermediate_size: int = 8192
|
|
48
|
-
num_attention_heads: int = 16
|
|
49
|
-
patch_size: int = 16
|
|
50
|
-
image_size: int = 224
|
|
51
|
-
num_channels: int = 3
|
|
52
|
-
rms_norm_eps: float = 1e-6
|
|
53
|
-
vocab_size: int = 128
|
|
54
|
-
vocab_offset: int = 262_144
|
|
55
|
-
|
|
56
|
-
@classmethod
|
|
57
|
-
def from_dict(cls, params):
|
|
58
|
-
return cls(
|
|
59
|
-
**{
|
|
60
|
-
k: v
|
|
61
|
-
for k, v in params.items()
|
|
62
|
-
if k in inspect.signature(cls).parameters
|
|
63
|
-
}
|
|
64
|
-
)
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
@dataclass
|
|
68
|
-
class TextConfig:
|
|
69
|
-
model_type: str
|
|
70
|
-
hidden_size: int
|
|
71
|
-
num_hidden_layers: int
|
|
72
|
-
intermediate_size: int
|
|
73
|
-
num_attention_heads: int = 2
|
|
74
|
-
head_dim: int = 256
|
|
75
|
-
rms_norm_eps: float = 1.0e-6
|
|
76
|
-
vocab_size: int = 262400
|
|
77
|
-
vocab_size_per_layer_input: int = 262144
|
|
78
|
-
num_key_value_heads: int = 4
|
|
79
|
-
laurel_rank: int = 64
|
|
80
|
-
frac_shared_layers: float = 0.5
|
|
81
|
-
altup_active_idx: int = 0
|
|
82
|
-
pad_token_id: int = 0
|
|
83
|
-
altup_num_inputs: int = 4
|
|
84
|
-
altup_coef_clip: Optional[float] = None
|
|
85
|
-
altup_correct_scale: bool = True
|
|
86
|
-
hidden_size_per_layer_input: int = 1024
|
|
87
|
-
rope_local_base_freq: float = 10000.0
|
|
88
|
-
rope_traditional: bool = False
|
|
89
|
-
rope_theta: float = 1000000.0
|
|
90
|
-
query_pre_attn_scalar: float = 0.0625
|
|
91
|
-
sliding_window: int = 1024
|
|
92
|
-
rope_scaling: Optional[Dict[str, Union[float, List[float]]]] = None
|
|
93
|
-
mm_tokens_per_image: int = 256
|
|
94
|
-
sliding_window_pattern: int = 5
|
|
95
|
-
activation_sparsity_pattern: Optional[List[float]] = None
|
|
96
|
-
final_logit_softcapping: float = 30.0
|
|
97
|
-
query_rescale_scalar: float = 1.0
|
|
98
|
-
num_kv_shared_layers: int = 0
|
|
99
|
-
max_position_embeddings: int = 32768
|
|
100
|
-
attn_logit_softcapping: float = 0.0
|
|
101
|
-
layer_types: List[str] = None
|
|
102
|
-
|
|
103
|
-
@classmethod
|
|
104
|
-
def from_dict(cls, params):
|
|
105
|
-
return cls(
|
|
106
|
-
**{
|
|
107
|
-
k: v
|
|
108
|
-
for k, v in params.items()
|
|
109
|
-
if k in inspect.signature(cls).parameters
|
|
110
|
-
}
|
|
111
|
-
)
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
@dataclass
|
|
115
|
-
class ModelConfig:
|
|
116
|
-
text_config: TextConfig
|
|
117
|
-
vision_config: VisionConfig
|
|
118
|
-
audio_config: AudioConfig
|
|
119
|
-
model_type: str
|
|
120
|
-
vocab_size: int = 257152
|
|
121
|
-
ignore_index: int = -100
|
|
122
|
-
image_token_index: int = 262145
|
|
123
|
-
audio_token_id: int = 262273
|
|
124
|
-
image_token_id: int = 262145
|
|
125
|
-
hidden_size: int = 2048
|
|
126
|
-
pad_token_id: int = 0
|
|
127
|
-
vision_soft_tokens_per_image: int = 256
|
|
128
|
-
audio_soft_tokens_per_image: int = 188
|
|
129
|
-
eos_token_id: Optional[List[int]] = None
|
|
130
|
-
|
|
131
|
-
@classmethod
|
|
132
|
-
def from_dict(cls, params):
|
|
133
|
-
return cls(
|
|
134
|
-
**{
|
|
135
|
-
k: v
|
|
136
|
-
for k, v in params.items()
|
|
137
|
-
if k in inspect.signature(cls).parameters
|
|
138
|
-
}
|
|
139
|
-
)
|
|
@@ -1,322 +0,0 @@
|
|
|
1
|
-
from typing import Optional
|
|
2
|
-
|
|
3
|
-
import mlx.core as mx
|
|
4
|
-
import mlx.nn as nn
|
|
5
|
-
|
|
6
|
-
from .audio import AudioModel
|
|
7
|
-
from .config import ModelConfig, TextConfig
|
|
8
|
-
from .language import Gemma3nRMSNorm, LanguageModel
|
|
9
|
-
from .vision import VisionModel
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def masked_scatter(input_tensor, mask, source):
|
|
13
|
-
"""MLX implementation of PyTorch's masked_scatter"""
|
|
14
|
-
|
|
15
|
-
# Convert mask to boolean once
|
|
16
|
-
mask = mask.astype(mx.bool_)
|
|
17
|
-
|
|
18
|
-
# Early exit
|
|
19
|
-
if not mask.any():
|
|
20
|
-
return mx.broadcast_to(input_tensor, mask.shape)
|
|
21
|
-
|
|
22
|
-
# Flatten everything once
|
|
23
|
-
input_shape = mask.shape
|
|
24
|
-
result_flat = mx.broadcast_to(input_tensor, input_shape).flatten()
|
|
25
|
-
mask_flat = mask.flatten()
|
|
26
|
-
source_flat = source.flatten()
|
|
27
|
-
|
|
28
|
-
# Create selection indices using cumulative sum
|
|
29
|
-
selection_mask = mx.cumsum(mask_flat.astype(mx.int32)) - 1
|
|
30
|
-
|
|
31
|
-
# Bound check and create source selection
|
|
32
|
-
source_len = len(source_flat)
|
|
33
|
-
bounded_indices = selection_mask % source_len
|
|
34
|
-
|
|
35
|
-
# Vectorized selection from source
|
|
36
|
-
selected_values = source_flat[bounded_indices]
|
|
37
|
-
|
|
38
|
-
result_flat = mx.where(mask_flat, selected_values, result_flat)
|
|
39
|
-
|
|
40
|
-
return result_flat.reshape(input_shape)
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
class Gemma3nMultimodalEmbedder(nn.Module):
|
|
44
|
-
"""Embeds token ids or soft tokens into language model space."""
|
|
45
|
-
|
|
46
|
-
def __init__(self, multimodal_config: ModelConfig, text_config: TextConfig):
|
|
47
|
-
super().__init__()
|
|
48
|
-
|
|
49
|
-
self.multimodal_hidden_size = multimodal_config.hidden_size
|
|
50
|
-
self.eps = multimodal_config.rms_norm_eps
|
|
51
|
-
self.vocab_offset = multimodal_config.vocab_offset
|
|
52
|
-
self.vocab_size = multimodal_config.vocab_size
|
|
53
|
-
self.text_hidden_size = text_config.hidden_size
|
|
54
|
-
|
|
55
|
-
self.embedding = nn.Embedding(self.vocab_size, self.multimodal_hidden_size)
|
|
56
|
-
self.hard_embedding_norm = Gemma3nRMSNorm(
|
|
57
|
-
self.multimodal_hidden_size, eps=self.eps
|
|
58
|
-
)
|
|
59
|
-
self.soft_embedding_norm = Gemma3nRMSNorm(
|
|
60
|
-
self.multimodal_hidden_size, eps=self.eps
|
|
61
|
-
)
|
|
62
|
-
self.embedding_projection = nn.Linear(
|
|
63
|
-
self.multimodal_hidden_size, self.text_hidden_size, bias=False
|
|
64
|
-
)
|
|
65
|
-
self.embedding_post_projection_norm = Gemma3nRMSNorm(
|
|
66
|
-
self.text_hidden_size, eps=self.eps, with_scale=False
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
def __call__(
|
|
70
|
-
self, input_ids: mx.array = None, inputs_embeds: mx.array = None
|
|
71
|
-
) -> mx.array:
|
|
72
|
-
if (input_ids is None) ^ (inputs_embeds is not None):
|
|
73
|
-
raise ValueError(
|
|
74
|
-
"You must specify exactly one of input_ids or inputs_embeds"
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
if inputs_embeds is not None:
|
|
78
|
-
emb_norm = self.soft_embedding_norm(inputs_embeds)
|
|
79
|
-
else:
|
|
80
|
-
|
|
81
|
-
hard_emb = self.embedding(input_ids - self.vocab_offset)
|
|
82
|
-
emb_norm = self.hard_embedding_norm(hard_emb)
|
|
83
|
-
|
|
84
|
-
emb_norm_proj = self.embedding_projection(emb_norm)
|
|
85
|
-
projected = self.embedding_post_projection_norm(emb_norm_proj)
|
|
86
|
-
return projected
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
class Model(nn.Module):
|
|
90
|
-
def __init__(self, config: ModelConfig):
|
|
91
|
-
super().__init__()
|
|
92
|
-
self.model_type = config.model_type
|
|
93
|
-
self.config = config
|
|
94
|
-
|
|
95
|
-
# Text
|
|
96
|
-
self.language_model = LanguageModel(config.text_config)
|
|
97
|
-
self.vocab_size = config.text_config.vocab_size
|
|
98
|
-
self.vocab_size_per_layer_input = config.text_config.vocab_size_per_layer_input
|
|
99
|
-
|
|
100
|
-
# Vision
|
|
101
|
-
self.vision_tower = VisionModel(config.vision_config)
|
|
102
|
-
self.embed_vision = Gemma3nMultimodalEmbedder(
|
|
103
|
-
config.vision_config, text_config=config.text_config
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
# Audio
|
|
107
|
-
self.audio_tower = AudioModel(config.audio_config)
|
|
108
|
-
self.embed_audio = Gemma3nMultimodalEmbedder(
|
|
109
|
-
config.audio_config, text_config=config.text_config
|
|
110
|
-
)
|
|
111
|
-
|
|
112
|
-
def get_input_embeddings(
|
|
113
|
-
self,
|
|
114
|
-
input_ids: Optional[mx.array] = None,
|
|
115
|
-
pixel_values: Optional[mx.array] = None,
|
|
116
|
-
input_features: Optional[mx.array] = None,
|
|
117
|
-
input_features_mask: Optional[mx.array] = None,
|
|
118
|
-
**kwargs,
|
|
119
|
-
):
|
|
120
|
-
|
|
121
|
-
inputs_embeds = self.language_model.model.embed_tokens(input_ids)
|
|
122
|
-
|
|
123
|
-
per_layer_inputs_mask = mx.logical_and(
|
|
124
|
-
input_ids >= 0, input_ids < self.vocab_size_per_layer_input
|
|
125
|
-
)
|
|
126
|
-
per_layer_inputs_tokens = mx.where(
|
|
127
|
-
per_layer_inputs_mask, input_ids, mx.zeros_like(input_ids)
|
|
128
|
-
)
|
|
129
|
-
per_layer_inputs = self.language_model.model.get_per_layer_inputs(
|
|
130
|
-
per_layer_inputs_tokens
|
|
131
|
-
)
|
|
132
|
-
if pixel_values is None and input_features is None:
|
|
133
|
-
return inputs_embeds, per_layer_inputs
|
|
134
|
-
|
|
135
|
-
if input_ids is not None:
|
|
136
|
-
|
|
137
|
-
# Handle vision tokens (>= embed_vision.vocab_offset and < embed_audio.vocab_offset)
|
|
138
|
-
vision_mask = mx.logical_and(
|
|
139
|
-
input_ids >= self.embed_vision.vocab_offset,
|
|
140
|
-
input_ids < self.embed_audio.vocab_offset,
|
|
141
|
-
)
|
|
142
|
-
dummy_vision_token_id = (
|
|
143
|
-
self.embed_vision.vocab_offset + self.embed_vision.vocab_size - 1
|
|
144
|
-
)
|
|
145
|
-
vision_tokens = mx.where(vision_mask, input_ids, dummy_vision_token_id)
|
|
146
|
-
vision_embeds_flat = self.embed_vision(input_ids=vision_tokens)
|
|
147
|
-
inputs_embeds = mx.where(
|
|
148
|
-
vision_mask[..., None], vision_embeds_flat, inputs_embeds
|
|
149
|
-
)
|
|
150
|
-
|
|
151
|
-
# Handle audio tokens (>= embed_audio.vocab_offset)
|
|
152
|
-
audio_mask = input_ids >= self.embed_audio.vocab_offset
|
|
153
|
-
dummy_audio_token_id = (
|
|
154
|
-
self.embed_audio.vocab_offset + self.embed_audio.vocab_size - 1
|
|
155
|
-
)
|
|
156
|
-
|
|
157
|
-
audio_tokens = mx.where(audio_mask, input_ids, dummy_audio_token_id)
|
|
158
|
-
audio_embeds_flat = self.embed_audio(input_ids=audio_tokens)
|
|
159
|
-
inputs_embeds = mx.where(
|
|
160
|
-
audio_mask[..., None], audio_embeds_flat, inputs_embeds
|
|
161
|
-
)
|
|
162
|
-
else:
|
|
163
|
-
per_layer_inputs = None
|
|
164
|
-
|
|
165
|
-
# Vision features
|
|
166
|
-
if pixel_values is not None:
|
|
167
|
-
image_features = self.get_image_features(
|
|
168
|
-
pixel_values, self.vision_tower, self.config, self.embed_vision
|
|
169
|
-
)
|
|
170
|
-
|
|
171
|
-
modality = "image"
|
|
172
|
-
inputs_embeds = self.merge_multimodal_and_text(
|
|
173
|
-
inputs_embeds,
|
|
174
|
-
image_features,
|
|
175
|
-
self.construct_special_modality_mask(
|
|
176
|
-
input_ids,
|
|
177
|
-
inputs_embeds,
|
|
178
|
-
self.config.image_token_id,
|
|
179
|
-
modality=modality,
|
|
180
|
-
),
|
|
181
|
-
modality=modality,
|
|
182
|
-
)
|
|
183
|
-
|
|
184
|
-
# Audio features
|
|
185
|
-
if input_features is not None:
|
|
186
|
-
audio_features, audio_mask = self.get_audio_features(
|
|
187
|
-
input_features, ~input_features_mask
|
|
188
|
-
)
|
|
189
|
-
audio_padding_ids = mx.array([[self.vocab_size - 1]])
|
|
190
|
-
audio_padding_embs = self.embed_audio(input_ids=audio_padding_ids)
|
|
191
|
-
audio_features = mx.where(
|
|
192
|
-
audio_mask[..., None], audio_padding_embs, audio_features
|
|
193
|
-
)
|
|
194
|
-
|
|
195
|
-
audio_batch_size, audio_seq_len, audio_embed_dim = audio_features.shape
|
|
196
|
-
extra_padding_tokens = (
|
|
197
|
-
self.config.audio_soft_tokens_per_image - audio_seq_len
|
|
198
|
-
)
|
|
199
|
-
extra_padding_features = mx.broadcast_to(
|
|
200
|
-
audio_padding_embs,
|
|
201
|
-
(audio_batch_size, extra_padding_tokens, audio_embed_dim),
|
|
202
|
-
)
|
|
203
|
-
|
|
204
|
-
audio_features = mx.concatenate(
|
|
205
|
-
(audio_features, extra_padding_features), axis=1
|
|
206
|
-
)
|
|
207
|
-
modality = "audio"
|
|
208
|
-
inputs_embeds = self.merge_multimodal_and_text(
|
|
209
|
-
inputs_embeds,
|
|
210
|
-
audio_features,
|
|
211
|
-
self.construct_special_modality_mask(
|
|
212
|
-
input_ids,
|
|
213
|
-
inputs_embeds,
|
|
214
|
-
self.config.audio_token_id,
|
|
215
|
-
modality=modality,
|
|
216
|
-
),
|
|
217
|
-
modality=modality,
|
|
218
|
-
)
|
|
219
|
-
|
|
220
|
-
return inputs_embeds, per_layer_inputs
|
|
221
|
-
|
|
222
|
-
def get_audio_features(self, input_features, input_features_mask):
|
|
223
|
-
audio_outputs, audio_mask = self.audio_tower(
|
|
224
|
-
input_features, input_features_mask
|
|
225
|
-
)
|
|
226
|
-
return self.embed_audio(inputs_embeds=audio_outputs), audio_mask
|
|
227
|
-
|
|
228
|
-
@staticmethod
|
|
229
|
-
def get_image_features(pixel_values, vision_tower, config, embed_vision):
|
|
230
|
-
vision_outputs = vision_tower(
|
|
231
|
-
pixel_values,
|
|
232
|
-
output_hidden_states=True,
|
|
233
|
-
)
|
|
234
|
-
vision_outputs = vision_outputs.transpose(0, 3, 1, 2)
|
|
235
|
-
vision_outputs = vision_outputs.reshape(
|
|
236
|
-
vision_outputs.shape[0],
|
|
237
|
-
config.vision_config.hidden_size,
|
|
238
|
-
config.vision_soft_tokens_per_image,
|
|
239
|
-
).transpose(0, 2, 1)
|
|
240
|
-
|
|
241
|
-
# Normalize and embed the soft tokens into language model space.
|
|
242
|
-
vision_outputs *= config.vision_config.hidden_size**0.5
|
|
243
|
-
return embed_vision(inputs_embeds=vision_outputs)
|
|
244
|
-
|
|
245
|
-
def construct_special_modality_mask(
|
|
246
|
-
self, input_ids, inputs_embeds, token_id, modality="image"
|
|
247
|
-
):
|
|
248
|
-
if input_ids is None:
|
|
249
|
-
embed_fn = (
|
|
250
|
-
self.embed_audio
|
|
251
|
-
if modality == "audio"
|
|
252
|
-
else self.language_model.model.embed_tokens
|
|
253
|
-
)
|
|
254
|
-
special_modality_mask = inputs_embeds == embed_fn(
|
|
255
|
-
input_ids=mx.array([token_id])
|
|
256
|
-
)
|
|
257
|
-
else:
|
|
258
|
-
special_modality_mask = mx.expand_dims(input_ids == token_id, -1)
|
|
259
|
-
special_modality_mask = mx.broadcast_to(
|
|
260
|
-
special_modality_mask, inputs_embeds.shape
|
|
261
|
-
)
|
|
262
|
-
return special_modality_mask
|
|
263
|
-
|
|
264
|
-
@staticmethod
|
|
265
|
-
def merge_multimodal_and_text(
|
|
266
|
-
inputs_embeds, features, special_modality_mask, modality="image"
|
|
267
|
-
):
|
|
268
|
-
# Count special tokens by summing the mask
|
|
269
|
-
modality_tokens_in_text = special_modality_mask.sum()
|
|
270
|
-
feature_tokens = features.size
|
|
271
|
-
|
|
272
|
-
if modality_tokens_in_text != feature_tokens:
|
|
273
|
-
raise ValueError(
|
|
274
|
-
f"Number of {modality}s does not match number of special {modality} tokens in the input text. "
|
|
275
|
-
f"Got {modality_tokens_in_text} {modality} tokens in the text and "
|
|
276
|
-
f"{feature_tokens} tokens from {modality} embeddings."
|
|
277
|
-
)
|
|
278
|
-
features = features.astype(inputs_embeds.dtype)
|
|
279
|
-
|
|
280
|
-
inputs_embeds = masked_scatter(inputs_embeds, special_modality_mask, features)
|
|
281
|
-
return inputs_embeds
|
|
282
|
-
|
|
283
|
-
def __call__(
|
|
284
|
-
self,
|
|
285
|
-
input_ids: mx.array,
|
|
286
|
-
pixel_values: mx.array,
|
|
287
|
-
mask: Optional[mx.array] = None,
|
|
288
|
-
cache: Optional[mx.array] = None,
|
|
289
|
-
**kwargs,
|
|
290
|
-
):
|
|
291
|
-
# Audio features
|
|
292
|
-
input_features = kwargs.pop("input_features", None)
|
|
293
|
-
input_features_mask = kwargs.pop("input_features_mask", None)
|
|
294
|
-
inputs_embeds, per_layer_inputs = self.get_input_embeddings(
|
|
295
|
-
input_ids=input_ids,
|
|
296
|
-
pixel_values=pixel_values,
|
|
297
|
-
input_features=input_features,
|
|
298
|
-
input_features_mask=input_features_mask,
|
|
299
|
-
**kwargs,
|
|
300
|
-
)
|
|
301
|
-
|
|
302
|
-
logits = self.language_model(
|
|
303
|
-
input_ids=None,
|
|
304
|
-
cache=cache,
|
|
305
|
-
inputs_embeds=inputs_embeds,
|
|
306
|
-
per_layer_inputs=per_layer_inputs,
|
|
307
|
-
)
|
|
308
|
-
return logits
|
|
309
|
-
|
|
310
|
-
def sanitize(self, weights):
|
|
311
|
-
sanitized_weights = {}
|
|
312
|
-
for k, v in weights.items():
|
|
313
|
-
# if "vision_tower" not in k and "embed_vision" not in k:
|
|
314
|
-
if k.startswith("model."):
|
|
315
|
-
sanitized_weights[".".join(k.split(".")[1:])] = v
|
|
316
|
-
else:
|
|
317
|
-
sanitized_weights[k] = v
|
|
318
|
-
return sanitized_weights
|
|
319
|
-
|
|
320
|
-
@property
|
|
321
|
-
def layers(self):
|
|
322
|
-
return self.language_model.model.layers
|