nexaai 1.0.19rc7__cp310-cp310-macosx_14_0_universal2.whl → 1.0.19rc8__cp310-cp310-macosx_14_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nexaai might be problematic. Click here for more details.
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +1 -1
- nexaai/binds/libnexa_bridge.dylib +0 -0
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/METADATA +1 -1
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/RECORD +7 -196
- nexaai/binds/nexa_mlx/py-lib/asr/__init__.py +0 -12
- nexaai/binds/nexa_mlx/py-lib/asr/interface.py +0 -122
- nexaai/binds/nexa_mlx/py-lib/common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/common/utils.py +0 -25
- nexaai/binds/nexa_mlx/py-lib/cv/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/cv/generate.py +0 -195
- nexaai/binds/nexa_mlx/py-lib/cv/interface.py +0 -151
- nexaai/binds/nexa_mlx/py-lib/cv/main.py +0 -81
- nexaai/binds/nexa_mlx/py-lib/cv/modeling/pp_ocr_v4.py +0 -1736
- nexaai/binds/nexa_mlx/py-lib/embedding/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/generate.py +0 -333
- nexaai/binds/nexa_mlx/py-lib/embedding/interface.py +0 -617
- nexaai/binds/nexa_mlx/py-lib/embedding/main.py +0 -173
- nexaai/binds/nexa_mlx/py-lib/embedding/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/modeling/nexa_jina_v2.py +0 -399
- nexaai/binds/nexa_mlx/py-lib/image_gen/__init__.py +0 -1
- nexaai/binds/nexa_mlx/py-lib/image_gen/generate_sd.py +0 -244
- nexaai/binds/nexa_mlx/py-lib/image_gen/interface.py +0 -82
- nexaai/binds/nexa_mlx/py-lib/image_gen/main.py +0 -281
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/__init__.py +0 -306
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/clip.py +0 -116
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/config.py +0 -65
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/model_io.py +0 -386
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/sampler.py +0 -105
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/tokenizer.py +0 -100
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/unet.py +0 -460
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/vae.py +0 -274
- nexaai/binds/nexa_mlx/py-lib/llm/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/llm/generate.py +0 -149
- nexaai/binds/nexa_mlx/py-lib/llm/interface.py +0 -764
- nexaai/binds/nexa_mlx/py-lib/llm/main.py +0 -68
- nexaai/binds/nexa_mlx/py-lib/rerank/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/generate.py +0 -174
- nexaai/binds/nexa_mlx/py-lib/rerank/interface.py +0 -287
- nexaai/binds/nexa_mlx/py-lib/rerank/main.py +0 -127
- nexaai/binds/nexa_mlx/py-lib/rerank/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/modeling/nexa_jina_rerank.py +0 -330
- nexaai/binds/nexa_mlx/py-lib/sd/__init__.py +0 -1
- nexaai/binds/nexa_mlx/py-lib/sd/interface.py +0 -362
- nexaai/binds/nexa_mlx/py-lib/sd/main.py +0 -286
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/__init__.py +0 -306
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/clip.py +0 -116
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/config.py +0 -65
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/model_io.py +0 -385
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/sampler.py +0 -105
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/tokenizer.py +0 -100
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/unet.py +0 -460
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/vae.py +0 -274
- nexaai/binds/nexa_mlx/py-lib/tts/__init__.py +0 -12
- nexaai/binds/nexa_mlx/py-lib/tts/interface.py +0 -276
- nexaai/binds/nexa_mlx/py-lib/vlm/__init__.py +0 -3
- nexaai/binds/nexa_mlx/py-lib/vlm/generate.py +0 -572
- nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl.py +0 -294
- nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl_moe.py +0 -276
- nexaai/binds/nexa_mlx/py-lib/vlm/interface.py +0 -504
- nexaai/binds/nexa_mlx/py-lib/vlm/main.py +0 -320
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/convert.py +0 -68
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/aya_vision.py +0 -193
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/interpolate.py +0 -186
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/language.py +0 -233
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/vision.py +0 -503
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/base.py +0 -202
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/cache.py +0 -230
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/__init__.py +0 -10
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/conversation.py +0 -264
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +0 -472
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/language.py +0 -591
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +0 -526
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/vision.py +0 -356
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/florence2.py +0 -366
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/language.py +0 -488
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/vision.py +0 -591
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/gemma3.py +0 -213
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/language.py +0 -315
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/vision.py +0 -238
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/__init__.py +0 -2
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/audio.py +0 -1038
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/config.py +0 -139
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/gemma3n.py +0 -322
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/language.py +0 -629
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/vision.py +0 -1022
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/idefics2.py +0 -294
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/language.py +0 -191
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/vision.py +0 -267
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/idefics3.py +0 -175
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/language.py +0 -192
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/vision.py +0 -233
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/internvl_chat.py +0 -140
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/language.py +0 -220
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/processor.py +0 -393
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/vision.py +0 -293
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kernels.py +0 -307
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/kimi_vl.py +0 -143
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/language.py +0 -509
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/vision.py +0 -522
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/language.py +0 -386
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/llama4.py +0 -138
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/vision.py +0 -560
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/language.py +0 -240
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/llava.py +0 -153
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/vision.py +0 -259
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/language.py +0 -236
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/llava_bunny.py +0 -256
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/vision.py +0 -303
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/language.py +0 -230
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/llava_next.py +0 -160
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/vision.py +0 -243
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/mistral3.py +0 -283
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/language.py +0 -416
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/mllama.py +0 -172
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/vision.py +0 -499
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/language.py +0 -243
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/molmo.py +0 -133
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/vision.py +0 -465
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/__init__.py +0 -10
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/language.py +0 -230
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/multi_modality.py +0 -385
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/sam.py +0 -557
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/vision.py +0 -526
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/language.py +0 -282
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/paligemma.py +0 -160
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/vision.py +0 -242
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/language.py +0 -21
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/phi3_v.py +0 -243
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/su_rope.py +0 -71
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/vision.py +0 -324
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/language.py +0 -229
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/pixtral.py +0 -161
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/vision.py +0 -320
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/__init__.py +0 -2
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/config.py +0 -108
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/language.py +0 -490
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +0 -168
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/vision.py +0 -414
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/__init__.py +0 -2
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/config.py +0 -104
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/language.py +0 -490
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/qwen2_vl.py +0 -167
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/vision.py +0 -312
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/base.py +0 -117
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/cache.py +0 -531
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/generate.py +0 -701
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +0 -255
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +0 -303
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +0 -407
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/processor.py +0 -476
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/qwen3vl.py +0 -1223
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +0 -117
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +0 -531
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +0 -701
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +0 -255
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +0 -303
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +0 -407
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/processor.py +0 -476
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +0 -1309
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/switch_layers.py +0 -210
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/smolvlm.py +0 -62
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_5_vl.py +0 -209
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_vl.py +0 -215
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/prompt_utils.py +0 -474
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/sample_utils.py +0 -39
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/tokenizer_utils.py +0 -344
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/lora.py +0 -70
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/trainer.py +0 -296
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/utils.py +0 -160
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/utils.py +0 -928
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/WHEEL +0 -0
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc8.dist-info}/top_level.txt +0 -0
|
File without changes
|
|
@@ -1,333 +0,0 @@
|
|
|
1
|
-
# Copyright © Nexa AI
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
|
|
15
|
-
import sys
|
|
16
|
-
import os
|
|
17
|
-
import json
|
|
18
|
-
import mlx.core as mx
|
|
19
|
-
import numpy as np
|
|
20
|
-
|
|
21
|
-
curr_dir = os.path.dirname(os.path.abspath(__file__))
|
|
22
|
-
from .modeling.nexa_jina_v2 import Model, ModelArgs
|
|
23
|
-
from tokenizers import Tokenizer
|
|
24
|
-
from huggingface_hub import snapshot_download
|
|
25
|
-
|
|
26
|
-
# Try to import mlx_embeddings for general embedding support
|
|
27
|
-
try:
|
|
28
|
-
import mlx_embeddings
|
|
29
|
-
MLX_EMBEDDINGS_AVAILABLE = True
|
|
30
|
-
except ImportError:
|
|
31
|
-
MLX_EMBEDDINGS_AVAILABLE = False
|
|
32
|
-
# Suppress warning during import to avoid interfering with C++ tests
|
|
33
|
-
# The warning will be shown when actually trying to use mlx_embeddings functionality
|
|
34
|
-
pass
|
|
35
|
-
|
|
36
|
-
def detect_model_type(model_path):
|
|
37
|
-
"""Detect if the model is Jina V2 or generic mlx_embeddings model."""
|
|
38
|
-
config_path = os.path.join(model_path, "config.json") if os.path.isdir(model_path) else f"{model_path}/config.json"
|
|
39
|
-
|
|
40
|
-
if not os.path.exists(config_path):
|
|
41
|
-
# Try default modelfiles directory
|
|
42
|
-
config_path = f"{curr_dir}/modelfiles/config.json"
|
|
43
|
-
if not os.path.exists(config_path):
|
|
44
|
-
return "generic"
|
|
45
|
-
|
|
46
|
-
try:
|
|
47
|
-
with open(config_path, "r") as f:
|
|
48
|
-
config = json.load(f)
|
|
49
|
-
|
|
50
|
-
# Check if it's a Jina V2 model
|
|
51
|
-
architectures = config.get("architectures", [])
|
|
52
|
-
if "JinaBertModel" in architectures:
|
|
53
|
-
return "jina_v2"
|
|
54
|
-
|
|
55
|
-
return "generic"
|
|
56
|
-
except Exception:
|
|
57
|
-
return "generic"
|
|
58
|
-
|
|
59
|
-
# ========== Jina V2 Direct Implementation ==========
|
|
60
|
-
|
|
61
|
-
def load_jina_model(model_id):
|
|
62
|
-
"""Initialize and load the Jina V2 model with FP16 weights."""
|
|
63
|
-
# Load configuration from config.json
|
|
64
|
-
if not os.path.exists(f"{curr_dir}/modelfiles/config.json"):
|
|
65
|
-
print(f"📥 Downloading Jina V2 model {model_id}...")
|
|
66
|
-
|
|
67
|
-
# Ensure modelfiles directory exists
|
|
68
|
-
os.makedirs(f"{curr_dir}/modelfiles", exist_ok=True)
|
|
69
|
-
|
|
70
|
-
try:
|
|
71
|
-
# Download model with progress indication
|
|
72
|
-
snapshot_download(
|
|
73
|
-
repo_id=model_id,
|
|
74
|
-
local_dir=f"{curr_dir}/modelfiles",
|
|
75
|
-
resume_download=True, # Resume partial downloads
|
|
76
|
-
local_dir_use_symlinks=False # Use actual files instead of symlinks
|
|
77
|
-
)
|
|
78
|
-
print("✅ Model download completed!")
|
|
79
|
-
except Exception as e:
|
|
80
|
-
print(f"❌ Failed to download model: {e}")
|
|
81
|
-
print("💡 Try: huggingface-cli login (if authentication required)")
|
|
82
|
-
raise
|
|
83
|
-
|
|
84
|
-
with open(f"{curr_dir}/modelfiles/config.json", "r") as f:
|
|
85
|
-
config_dict = json.load(f)
|
|
86
|
-
|
|
87
|
-
# Create ModelArgs from loaded config
|
|
88
|
-
config = ModelArgs(
|
|
89
|
-
model_type=config_dict["model_type"],
|
|
90
|
-
vocab_size=config_dict["vocab_size"],
|
|
91
|
-
hidden_size=config_dict["hidden_size"],
|
|
92
|
-
num_hidden_layers=config_dict["num_hidden_layers"],
|
|
93
|
-
num_attention_heads=config_dict["num_attention_heads"],
|
|
94
|
-
intermediate_size=config_dict["intermediate_size"],
|
|
95
|
-
hidden_act=config_dict["hidden_act"],
|
|
96
|
-
hidden_dropout_prob=config_dict["hidden_dropout_prob"],
|
|
97
|
-
attention_probs_dropout_prob=config_dict["attention_probs_dropout_prob"],
|
|
98
|
-
max_position_embeddings=config_dict["max_position_embeddings"],
|
|
99
|
-
type_vocab_size=config_dict["type_vocab_size"],
|
|
100
|
-
initializer_range=config_dict["initializer_range"],
|
|
101
|
-
layer_norm_eps=config_dict["layer_norm_eps"],
|
|
102
|
-
pad_token_id=config_dict["pad_token_id"],
|
|
103
|
-
position_embedding_type=config_dict["position_embedding_type"],
|
|
104
|
-
use_cache=config_dict["use_cache"],
|
|
105
|
-
classifier_dropout=config_dict["classifier_dropout"],
|
|
106
|
-
feed_forward_type=config_dict["feed_forward_type"],
|
|
107
|
-
emb_pooler=config_dict["emb_pooler"],
|
|
108
|
-
attn_implementation=config_dict["attn_implementation"],
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
# Initialize model
|
|
112
|
-
model = Model(config)
|
|
113
|
-
|
|
114
|
-
# Load FP16 weights
|
|
115
|
-
model.load_weights(f"{curr_dir}/modelfiles/model.safetensors", strict=True)
|
|
116
|
-
model.eval()
|
|
117
|
-
|
|
118
|
-
return model
|
|
119
|
-
|
|
120
|
-
def load_jina_tokenizer():
|
|
121
|
-
"""Load and configure the tokenizer for Jina V2."""
|
|
122
|
-
tokenizer = Tokenizer.from_file(f"{curr_dir}/modelfiles/tokenizer.json")
|
|
123
|
-
tokenizer.enable_padding(pad_id=0, pad_token="[PAD]")
|
|
124
|
-
tokenizer.enable_truncation(max_length=512)
|
|
125
|
-
return tokenizer
|
|
126
|
-
|
|
127
|
-
def encode_jina_text(model, tokenizer, text):
|
|
128
|
-
"""Encode a single text using Jina V2 and return its embedding."""
|
|
129
|
-
# Tokenize the text
|
|
130
|
-
encoding = tokenizer.encode(text)
|
|
131
|
-
|
|
132
|
-
# Prepare inputs
|
|
133
|
-
input_ids = np.array([encoding.ids], dtype=np.int32)
|
|
134
|
-
attention_mask = np.array([encoding.attention_mask], dtype=np.float32)
|
|
135
|
-
token_type_ids = np.array([encoding.type_ids if encoding.type_ids else [0] * len(encoding.ids)], dtype=np.int32)
|
|
136
|
-
|
|
137
|
-
# Convert to MLX arrays
|
|
138
|
-
input_ids = mx.array(input_ids)
|
|
139
|
-
attention_mask = mx.array(attention_mask)
|
|
140
|
-
token_type_ids = mx.array(token_type_ids)
|
|
141
|
-
|
|
142
|
-
# Get embeddings
|
|
143
|
-
embeddings = model.encode(
|
|
144
|
-
input_ids=input_ids,
|
|
145
|
-
attention_mask=attention_mask,
|
|
146
|
-
token_type_ids=token_type_ids,
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
return embeddings
|
|
150
|
-
|
|
151
|
-
# ========== MLX Embeddings Direct Implementation ==========
|
|
152
|
-
|
|
153
|
-
def load_mlx_embeddings_model(model_id):
|
|
154
|
-
"""Load model using mlx_embeddings package."""
|
|
155
|
-
if not MLX_EMBEDDINGS_AVAILABLE:
|
|
156
|
-
print("Warning: mlx_embeddings not available. Please install it to use general embedding models.")
|
|
157
|
-
raise ImportError("mlx_embeddings package is not available. Please install it first.")
|
|
158
|
-
|
|
159
|
-
# Download model if needed
|
|
160
|
-
model_path = f"{curr_dir}/modelfiles"
|
|
161
|
-
|
|
162
|
-
if not os.path.exists(f"{model_path}/config.json"):
|
|
163
|
-
print(f"📥 Downloading model {model_id}...")
|
|
164
|
-
os.makedirs(model_path, exist_ok=True)
|
|
165
|
-
|
|
166
|
-
try:
|
|
167
|
-
snapshot_download(
|
|
168
|
-
repo_id=model_id,
|
|
169
|
-
local_dir=model_path,
|
|
170
|
-
resume_download=True,
|
|
171
|
-
local_dir_use_symlinks=False
|
|
172
|
-
)
|
|
173
|
-
print("✅ Model download completed!")
|
|
174
|
-
except Exception as e:
|
|
175
|
-
print(f"❌ Failed to download model: {e}")
|
|
176
|
-
raise
|
|
177
|
-
|
|
178
|
-
# Load model and tokenizer using mlx_embeddings
|
|
179
|
-
model, tokenizer = mlx_embeddings.load(model_path)
|
|
180
|
-
return model, tokenizer
|
|
181
|
-
|
|
182
|
-
def encode_mlx_embeddings_text(model, tokenizer, texts, model_path=None):
|
|
183
|
-
"""Generate embeddings using mlx_embeddings."""
|
|
184
|
-
if isinstance(texts, str):
|
|
185
|
-
texts = [texts]
|
|
186
|
-
|
|
187
|
-
# Check if this is a Gemma3TextModel by checking config
|
|
188
|
-
# WORKAROUND: Gemma3TextModel has a bug where it expects 'inputs' as positional arg
|
|
189
|
-
# but mlx_embeddings.generate passes 'input_ids' as keyword arg
|
|
190
|
-
# See: https://github.com/ml-explore/mlx-examples/issues/... (bug report pending)
|
|
191
|
-
is_gemma = False
|
|
192
|
-
if model_path:
|
|
193
|
-
config_path = os.path.join(model_path, "config.json") if os.path.isdir(model_path) else f"{model_path}/config.json"
|
|
194
|
-
else:
|
|
195
|
-
config_path = f"{curr_dir}/modelfiles/config.json"
|
|
196
|
-
|
|
197
|
-
if os.path.exists(config_path):
|
|
198
|
-
try:
|
|
199
|
-
with open(config_path, "r") as f:
|
|
200
|
-
config = json.load(f)
|
|
201
|
-
architectures = config.get("architectures", [])
|
|
202
|
-
is_gemma = "Gemma3TextModel" in architectures
|
|
203
|
-
except Exception:
|
|
204
|
-
pass
|
|
205
|
-
|
|
206
|
-
if is_gemma:
|
|
207
|
-
# HARDCODED WORKAROUND for Gemma3TextModel bug
|
|
208
|
-
# Use direct tokenization and model call instead of mlx_embeddings.generate
|
|
209
|
-
# This avoids the bug where generate passes 'input_ids' as keyword arg
|
|
210
|
-
# but Gemma3TextModel.__call__ expects 'inputs' as positional arg
|
|
211
|
-
|
|
212
|
-
# Tokenize using batch_encode_plus for Gemma models
|
|
213
|
-
encoded_input = tokenizer.batch_encode_plus(
|
|
214
|
-
texts,
|
|
215
|
-
padding=True,
|
|
216
|
-
truncation=True,
|
|
217
|
-
return_tensors='mlx',
|
|
218
|
-
max_length=512
|
|
219
|
-
)
|
|
220
|
-
|
|
221
|
-
# Get input tensors
|
|
222
|
-
input_ids = encoded_input['input_ids']
|
|
223
|
-
attention_mask = encoded_input.get('attention_mask', None)
|
|
224
|
-
|
|
225
|
-
# Call model with positional input_ids and keyword attention_mask
|
|
226
|
-
# This matches Gemma3TextModel's expected signature:
|
|
227
|
-
# def __call__(self, inputs: mx.array, attention_mask: Optional[mx.array] = None)
|
|
228
|
-
output = model(input_ids, attention_mask=attention_mask)
|
|
229
|
-
|
|
230
|
-
# Get the normalized embeddings
|
|
231
|
-
return output.text_embeds
|
|
232
|
-
else:
|
|
233
|
-
# Normal path for non-Gemma models
|
|
234
|
-
# Use standard mlx_embeddings.generate approach
|
|
235
|
-
output = mlx_embeddings.generate(
|
|
236
|
-
model,
|
|
237
|
-
tokenizer,
|
|
238
|
-
texts=texts,
|
|
239
|
-
max_length=512,
|
|
240
|
-
padding=True,
|
|
241
|
-
truncation=True
|
|
242
|
-
)
|
|
243
|
-
|
|
244
|
-
return output.text_embeds
|
|
245
|
-
|
|
246
|
-
def main(model_id):
|
|
247
|
-
"""Main function to handle user input and generate embeddings."""
|
|
248
|
-
|
|
249
|
-
print(f"🔍 Loading model: {model_id}")
|
|
250
|
-
|
|
251
|
-
# Detect model type
|
|
252
|
-
model_type = detect_model_type(f"{curr_dir}/modelfiles")
|
|
253
|
-
|
|
254
|
-
# First try to download/check if model exists
|
|
255
|
-
if not os.path.exists(f"{curr_dir}/modelfiles/config.json"):
|
|
256
|
-
# Download the model first to detect its type
|
|
257
|
-
print(f"Model not found locally. Downloading...")
|
|
258
|
-
os.makedirs(f"{curr_dir}/modelfiles", exist_ok=True)
|
|
259
|
-
try:
|
|
260
|
-
snapshot_download(
|
|
261
|
-
repo_id=model_id,
|
|
262
|
-
local_dir=f"{curr_dir}/modelfiles",
|
|
263
|
-
resume_download=True,
|
|
264
|
-
local_dir_use_symlinks=False
|
|
265
|
-
)
|
|
266
|
-
print("✅ Model download completed!")
|
|
267
|
-
# Re-detect model type after download
|
|
268
|
-
model_type = detect_model_type(f"{curr_dir}/modelfiles")
|
|
269
|
-
except Exception as e:
|
|
270
|
-
print(f"❌ Failed to download model: {e}")
|
|
271
|
-
raise
|
|
272
|
-
|
|
273
|
-
print(f"📦 Detected model type: {model_type}")
|
|
274
|
-
|
|
275
|
-
# Test texts
|
|
276
|
-
test_texts = [
|
|
277
|
-
"Hello, how are you?",
|
|
278
|
-
"What is machine learning?",
|
|
279
|
-
"The weather is nice today."
|
|
280
|
-
]
|
|
281
|
-
|
|
282
|
-
if model_type == "jina_v2":
|
|
283
|
-
print("Using Jina V2 direct implementation")
|
|
284
|
-
|
|
285
|
-
# Load Jina V2 model
|
|
286
|
-
model = load_jina_model(model_id)
|
|
287
|
-
tokenizer = load_jina_tokenizer()
|
|
288
|
-
|
|
289
|
-
print("\nGenerating embeddings for test texts:")
|
|
290
|
-
for text in test_texts:
|
|
291
|
-
embedding = encode_jina_text(model, tokenizer, text)
|
|
292
|
-
print(f"\nText: '{text}'")
|
|
293
|
-
print(f" Embedding shape: {embedding.shape}")
|
|
294
|
-
print(f" Sample values (first 5): {embedding.flatten()[:5].tolist()}")
|
|
295
|
-
print(f" Stats - Min: {embedding.min():.4f}, Max: {embedding.max():.4f}, Mean: {embedding.mean():.4f}")
|
|
296
|
-
|
|
297
|
-
else:
|
|
298
|
-
print("Using mlx_embeddings direct implementation")
|
|
299
|
-
|
|
300
|
-
if not MLX_EMBEDDINGS_AVAILABLE:
|
|
301
|
-
print("❌ mlx_embeddings is not installed. Please install it to use generic models.")
|
|
302
|
-
return
|
|
303
|
-
|
|
304
|
-
# Load generic model using mlx_embeddings
|
|
305
|
-
model, tokenizer = load_mlx_embeddings_model(model_id)
|
|
306
|
-
|
|
307
|
-
print("\nGenerating embeddings for test texts:")
|
|
308
|
-
# Pass model_path to handle Gemma workaround if needed
|
|
309
|
-
embeddings = encode_mlx_embeddings_text(model, tokenizer, test_texts, model_path=f"{curr_dir}/modelfiles")
|
|
310
|
-
|
|
311
|
-
for i, text in enumerate(test_texts):
|
|
312
|
-
embedding = embeddings[i]
|
|
313
|
-
print(f"\nText: '{text}'")
|
|
314
|
-
print(f" Embedding shape: {embedding.shape}")
|
|
315
|
-
print(f" Sample values (first 5): {embedding[:5].tolist()}")
|
|
316
|
-
|
|
317
|
-
# Calculate stats
|
|
318
|
-
emb_array = mx.array(embedding) if not isinstance(embedding, mx.array) else embedding
|
|
319
|
-
print(f" Stats - Min: {emb_array.min():.4f}, Max: {emb_array.max():.4f}, Mean: {emb_array.mean():.4f}")
|
|
320
|
-
|
|
321
|
-
print("\n✅ Direct embedding generation completed!")
|
|
322
|
-
|
|
323
|
-
if __name__ == "__main__":
|
|
324
|
-
import argparse
|
|
325
|
-
parser = argparse.ArgumentParser(description="Generate embeddings using direct implementation")
|
|
326
|
-
parser.add_argument(
|
|
327
|
-
"--model_id",
|
|
328
|
-
type=str,
|
|
329
|
-
default="nexaml/jina-v2-fp16-mlx",
|
|
330
|
-
help="Model ID from Hugging Face Hub (e.g., 'nexaml/jina-v2-fp16-mlx' or 'mlx-community/embeddinggemma-300m-bf16')"
|
|
331
|
-
)
|
|
332
|
-
args = parser.parse_args()
|
|
333
|
-
main(args.model_id)
|