nexaai 1.0.19rc7__cp310-cp310-macosx_14_0_universal2.whl → 1.0.19rc9__cp310-cp310-macosx_14_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nexaai might be problematic. Click here for more details.
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +1 -1
- nexaai/binds/libnexa_bridge.dylib +0 -0
- nexaai/mlx_backend/vlm/generate_qwen3_vl.py +14 -31
- nexaai/mlx_backend/vlm/generate_qwen3_vl_moe.py +15 -32
- nexaai/mlx_backend/vlm/modeling/models/qwen3_vl/qwen3vl.py +7 -23
- nexaai/mlx_backend/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +8 -24
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/METADATA +1 -1
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/RECORD +11 -200
- nexaai/binds/nexa_mlx/py-lib/asr/__init__.py +0 -12
- nexaai/binds/nexa_mlx/py-lib/asr/interface.py +0 -122
- nexaai/binds/nexa_mlx/py-lib/common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/common/utils.py +0 -25
- nexaai/binds/nexa_mlx/py-lib/cv/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/cv/generate.py +0 -195
- nexaai/binds/nexa_mlx/py-lib/cv/interface.py +0 -151
- nexaai/binds/nexa_mlx/py-lib/cv/main.py +0 -81
- nexaai/binds/nexa_mlx/py-lib/cv/modeling/pp_ocr_v4.py +0 -1736
- nexaai/binds/nexa_mlx/py-lib/embedding/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/generate.py +0 -333
- nexaai/binds/nexa_mlx/py-lib/embedding/interface.py +0 -617
- nexaai/binds/nexa_mlx/py-lib/embedding/main.py +0 -173
- nexaai/binds/nexa_mlx/py-lib/embedding/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/embedding/modeling/nexa_jina_v2.py +0 -399
- nexaai/binds/nexa_mlx/py-lib/image_gen/__init__.py +0 -1
- nexaai/binds/nexa_mlx/py-lib/image_gen/generate_sd.py +0 -244
- nexaai/binds/nexa_mlx/py-lib/image_gen/interface.py +0 -82
- nexaai/binds/nexa_mlx/py-lib/image_gen/main.py +0 -281
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/__init__.py +0 -306
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/clip.py +0 -116
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/config.py +0 -65
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/model_io.py +0 -386
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/sampler.py +0 -105
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/tokenizer.py +0 -100
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/unet.py +0 -460
- nexaai/binds/nexa_mlx/py-lib/image_gen/stable_diffusion/vae.py +0 -274
- nexaai/binds/nexa_mlx/py-lib/llm/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/llm/generate.py +0 -149
- nexaai/binds/nexa_mlx/py-lib/llm/interface.py +0 -764
- nexaai/binds/nexa_mlx/py-lib/llm/main.py +0 -68
- nexaai/binds/nexa_mlx/py-lib/rerank/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/generate.py +0 -174
- nexaai/binds/nexa_mlx/py-lib/rerank/interface.py +0 -287
- nexaai/binds/nexa_mlx/py-lib/rerank/main.py +0 -127
- nexaai/binds/nexa_mlx/py-lib/rerank/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/rerank/modeling/nexa_jina_rerank.py +0 -330
- nexaai/binds/nexa_mlx/py-lib/sd/__init__.py +0 -1
- nexaai/binds/nexa_mlx/py-lib/sd/interface.py +0 -362
- nexaai/binds/nexa_mlx/py-lib/sd/main.py +0 -286
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/__init__.py +0 -306
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/clip.py +0 -116
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/config.py +0 -65
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/model_io.py +0 -385
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/sampler.py +0 -105
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/tokenizer.py +0 -100
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/unet.py +0 -460
- nexaai/binds/nexa_mlx/py-lib/sd/modeling/vae.py +0 -274
- nexaai/binds/nexa_mlx/py-lib/tts/__init__.py +0 -12
- nexaai/binds/nexa_mlx/py-lib/tts/interface.py +0 -276
- nexaai/binds/nexa_mlx/py-lib/vlm/__init__.py +0 -3
- nexaai/binds/nexa_mlx/py-lib/vlm/generate.py +0 -572
- nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl.py +0 -294
- nexaai/binds/nexa_mlx/py-lib/vlm/generate_qwen3_vl_moe.py +0 -276
- nexaai/binds/nexa_mlx/py-lib/vlm/interface.py +0 -504
- nexaai/binds/nexa_mlx/py-lib/vlm/main.py +0 -320
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/convert.py +0 -68
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/aya_vision.py +0 -193
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/interpolate.py +0 -186
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/language.py +0 -233
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/aya_vision/vision.py +0 -503
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/base.py +0 -202
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/cache.py +0 -230
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/__init__.py +0 -10
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/conversation.py +0 -264
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/deepseek_vl_v2.py +0 -472
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/language.py +0 -591
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/processing_deepsek_vl_v2.py +0 -526
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/deepseek_vl_v2/vision.py +0 -356
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/florence2.py +0 -366
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/language.py +0 -488
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/florence2/vision.py +0 -591
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/gemma3.py +0 -213
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/language.py +0 -315
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3/vision.py +0 -238
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/__init__.py +0 -2
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/audio.py +0 -1038
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/config.py +0 -139
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/gemma3n.py +0 -322
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/language.py +0 -629
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/gemma3n/vision.py +0 -1022
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/idefics2.py +0 -294
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/language.py +0 -191
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics2/vision.py +0 -267
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/idefics3.py +0 -175
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/language.py +0 -192
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/idefics3/vision.py +0 -233
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/internvl_chat.py +0 -140
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/language.py +0 -220
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/processor.py +0 -393
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/internvl_chat/vision.py +0 -293
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kernels.py +0 -307
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/kimi_vl.py +0 -143
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/language.py +0 -509
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/kimi_vl/vision.py +0 -522
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/language.py +0 -386
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/llama4.py +0 -138
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llama4/vision.py +0 -560
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/language.py +0 -240
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/llava.py +0 -153
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava/vision.py +0 -259
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/language.py +0 -236
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/llava_bunny.py +0 -256
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_bunny/vision.py +0 -303
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/language.py +0 -230
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/llava_next.py +0 -160
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/llava_next/vision.py +0 -243
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mistral3/mistral3.py +0 -283
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/language.py +0 -416
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/mllama.py +0 -172
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/mllama/vision.py +0 -499
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/language.py +0 -243
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/molmo.py +0 -133
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/molmo/vision.py +0 -465
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/__init__.py +0 -10
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/language.py +0 -230
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/multi_modality.py +0 -385
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/sam.py +0 -557
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/multi_modality/vision.py +0 -526
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/language.py +0 -282
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/paligemma.py +0 -160
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/paligemma/vision.py +0 -242
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/language.py +0 -21
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/phi3_v.py +0 -243
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/su_rope.py +0 -71
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/phi3_v/vision.py +0 -324
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/language.py +0 -229
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/pixtral.py +0 -161
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/pixtral/vision.py +0 -320
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/__init__.py +0 -2
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/config.py +0 -108
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/language.py +0 -490
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/qwen2_5_vl.py +0 -168
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_5_vl/vision.py +0 -414
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/__init__.py +0 -2
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/config.py +0 -104
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/language.py +0 -490
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/qwen2_vl.py +0 -167
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen2_vl/vision.py +0 -312
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/base.py +0 -117
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/cache.py +0 -531
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/generate.py +0 -701
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/rope_utils.py +0 -255
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/sample_utils.py +0 -303
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/llm_common/tokenizer_utils.py +0 -407
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/processor.py +0 -476
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3_vl/qwen3vl.py +0 -1223
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/__init__.py +0 -0
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/base.py +0 -117
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/cache.py +0 -531
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/generate.py +0 -701
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/rope_utils.py +0 -255
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/sample_utils.py +0 -303
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/llm_common/tokenizer_utils.py +0 -407
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/processor.py +0 -476
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/qwen3vl_moe.py +0 -1309
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/qwen3vl_moe/switch_layers.py +0 -210
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/__init__.py +0 -8
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/models/smolvlm/smolvlm.py +0 -62
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_5_vl.py +0 -209
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/processing_qwen2_vl.py +0 -215
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/prompt_utils.py +0 -474
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/sample_utils.py +0 -39
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/tokenizer_utils.py +0 -344
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/__init__.py +0 -9
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/lora.py +0 -70
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/trainer.py +0 -296
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/trainer/utils.py +0 -160
- nexaai/binds/nexa_mlx/py-lib/vlm/modeling/utils.py +0 -928
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/WHEEL +0 -0
- {nexaai-1.0.19rc7.dist-info → nexaai-1.0.19rc9.dist-info}/top_level.txt +0 -0
|
@@ -1,230 +0,0 @@
|
|
|
1
|
-
from typing import Any, List, Optional, Tuple
|
|
2
|
-
|
|
3
|
-
import mlx.core as mx
|
|
4
|
-
import mlx.nn as nn
|
|
5
|
-
from mlx_lm.models.cache import ChunkedKVCache, KVCache, RotatingKVCache, _BaseCache
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def make_prompt_cache(
|
|
9
|
-
model: nn.Module,
|
|
10
|
-
max_kv_size: Optional[int] = None,
|
|
11
|
-
) -> List[Any]:
|
|
12
|
-
"""
|
|
13
|
-
Construct the model's cache for use in generation.
|
|
14
|
-
|
|
15
|
-
This function will defer the cache construction to the model if it has a
|
|
16
|
-
``make_cache`` method, otherwise it will make a default KV cache.
|
|
17
|
-
|
|
18
|
-
Args:
|
|
19
|
-
model (nn.Module): The language model.
|
|
20
|
-
max_kv_size (Optional[int]): If provided and the model does not have a
|
|
21
|
-
``make_cache`` method, a ``RotatingKVCache`` is used with a maximum
|
|
22
|
-
size of ``max_kv_size``
|
|
23
|
-
"""
|
|
24
|
-
if hasattr(model, "make_cache"):
|
|
25
|
-
return model.make_cache()
|
|
26
|
-
|
|
27
|
-
num_layers = len(model.layers)
|
|
28
|
-
|
|
29
|
-
if max_kv_size is not None:
|
|
30
|
-
return [
|
|
31
|
-
RotatingKVCache(max_size=max_kv_size, keep=4) for _ in range(num_layers)
|
|
32
|
-
]
|
|
33
|
-
else:
|
|
34
|
-
return [KVCache() for _ in range(num_layers)]
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class SimpleKVCache:
|
|
38
|
-
"""A simple key-value cache for transformer attention layers.
|
|
39
|
-
|
|
40
|
-
Stores and concatenates key/value tensors along sequence dimension.
|
|
41
|
-
"""
|
|
42
|
-
|
|
43
|
-
def __init__(self):
|
|
44
|
-
self.keys = None
|
|
45
|
-
self.values = None
|
|
46
|
-
self.cache_length = 0
|
|
47
|
-
|
|
48
|
-
def update_and_fetch(self, keys, values):
|
|
49
|
-
"""Update cache with new key/value tensors and return full cache.
|
|
50
|
-
|
|
51
|
-
Args:
|
|
52
|
-
keys: New key tensor to add [batch, heads, seq_len, head_dim]
|
|
53
|
-
values: New value tensor to add [batch, heads, seq_len, head_dim]
|
|
54
|
-
|
|
55
|
-
Returns:
|
|
56
|
-
Tuple of (cached_keys, cached_values) containing full cache history
|
|
57
|
-
"""
|
|
58
|
-
if self.cache_length == 0:
|
|
59
|
-
# First update - just store tensors
|
|
60
|
-
self.keys = keys
|
|
61
|
-
self.values = values
|
|
62
|
-
else:
|
|
63
|
-
# Concatenate with existing cache along sequence dimension
|
|
64
|
-
self.keys = mx.concatenate([self.keys, keys], axis=2)
|
|
65
|
-
self.values = mx.concatenate([self.values, values], axis=2)
|
|
66
|
-
|
|
67
|
-
self.cache_length += keys.shape[2]
|
|
68
|
-
return self.keys, self.values
|
|
69
|
-
|
|
70
|
-
def fetch(self):
|
|
71
|
-
return self.keys, self.values
|
|
72
|
-
|
|
73
|
-
def update(self, keys, values):
|
|
74
|
-
"""Update cache with new key/value tensors without returning.
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
keys: New key tensor to store
|
|
78
|
-
values: New value tensor to store
|
|
79
|
-
"""
|
|
80
|
-
self.keys = keys
|
|
81
|
-
self.values = values
|
|
82
|
-
self.cache_length += keys.shape[2]
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
class SlidingWindowCache(_BaseCache):
|
|
86
|
-
"""A sliding window cache for local attention layers."""
|
|
87
|
-
|
|
88
|
-
def __init__(self, max_size: int, step: int = 256):
|
|
89
|
-
self.max_size = max_size
|
|
90
|
-
self.step = step
|
|
91
|
-
self.keys = None
|
|
92
|
-
self.values = None
|
|
93
|
-
self.offset = 0
|
|
94
|
-
|
|
95
|
-
def update_and_fetch(
|
|
96
|
-
self, keys: mx.array, values: mx.array
|
|
97
|
-
) -> Tuple[mx.array, mx.array]:
|
|
98
|
-
B, n_kv_heads, seq_len, k_head_dim = keys.shape
|
|
99
|
-
v_head_dim = values.shape[-1]
|
|
100
|
-
|
|
101
|
-
if self.keys is None:
|
|
102
|
-
# Initialize cache
|
|
103
|
-
k_shape = (B, n_kv_heads, self.max_size, k_head_dim)
|
|
104
|
-
v_shape = (B, n_kv_heads, self.max_size, v_head_dim)
|
|
105
|
-
self.keys = mx.zeros(k_shape, dtype=keys.dtype)
|
|
106
|
-
self.values = mx.zeros(v_shape, dtype=values.dtype)
|
|
107
|
-
|
|
108
|
-
# Simple sliding window: keep only the last max_size tokens
|
|
109
|
-
if self.offset + seq_len <= self.max_size:
|
|
110
|
-
# Fits within current window
|
|
111
|
-
start_idx = self.offset
|
|
112
|
-
end_idx = self.offset + seq_len
|
|
113
|
-
self.keys[:, :, start_idx:end_idx, :] = keys
|
|
114
|
-
self.values[:, :, start_idx:end_idx, :] = values
|
|
115
|
-
self.offset += seq_len
|
|
116
|
-
else:
|
|
117
|
-
# Need to slide the window
|
|
118
|
-
if seq_len < self.max_size:
|
|
119
|
-
# Shift existing content left
|
|
120
|
-
shift_amount = min(seq_len, self.max_size - 1)
|
|
121
|
-
self.keys[:, :, :-shift_amount, :] = self.keys[:, :, shift_amount:, :]
|
|
122
|
-
self.values[:, :, :-shift_amount, :] = self.values[
|
|
123
|
-
:, :, shift_amount:, :
|
|
124
|
-
]
|
|
125
|
-
# Add new tokens at the end
|
|
126
|
-
self.keys[:, :, -shift_amount:, :] = keys[:, :, -shift_amount:, :]
|
|
127
|
-
self.values[:, :, -shift_amount:, :] = values[:, :, -shift_amount:, :]
|
|
128
|
-
else:
|
|
129
|
-
# New sequence is larger than cache, just keep the last max_size tokens
|
|
130
|
-
self.keys = keys[:, :, -self.max_size :, :]
|
|
131
|
-
self.values = values[:, :, -self.max_size :, :]
|
|
132
|
-
self.offset = self.max_size
|
|
133
|
-
|
|
134
|
-
return self.keys, self.values
|
|
135
|
-
|
|
136
|
-
@property
|
|
137
|
-
def state(self):
|
|
138
|
-
if self.keys is None:
|
|
139
|
-
return None, None
|
|
140
|
-
return self.keys, self.values
|
|
141
|
-
|
|
142
|
-
@state.setter
|
|
143
|
-
def state(self, v):
|
|
144
|
-
if v is not None and len(v) == 2:
|
|
145
|
-
self.keys, self.values = v
|
|
146
|
-
if self.keys is not None:
|
|
147
|
-
self.offset = self.max_size
|
|
148
|
-
|
|
149
|
-
def get_max_cache_shape(self):
|
|
150
|
-
return self.max_size
|
|
151
|
-
|
|
152
|
-
@property
|
|
153
|
-
def meta_state(self):
|
|
154
|
-
return tuple(map(str, (self.max_size, self.step, self.offset)))
|
|
155
|
-
|
|
156
|
-
@meta_state.setter
|
|
157
|
-
def meta_state(self, v):
|
|
158
|
-
self.max_size, self.step, self.offset = map(int, v)
|
|
159
|
-
|
|
160
|
-
def is_trimmable(self):
|
|
161
|
-
return False
|
|
162
|
-
|
|
163
|
-
def trim(self, n):
|
|
164
|
-
return 0
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
class StaticKVCache(_BaseCache):
|
|
168
|
-
"""A static cache that grows to accommodate all tokens."""
|
|
169
|
-
|
|
170
|
-
def __init__(self, max_size: int, step: int = 256):
|
|
171
|
-
self.max_size = max_size
|
|
172
|
-
self.step = step
|
|
173
|
-
self.keys = None
|
|
174
|
-
self.values = None
|
|
175
|
-
self.offset = 0
|
|
176
|
-
|
|
177
|
-
def update_and_fetch(
|
|
178
|
-
self, keys: mx.array, values: mx.array
|
|
179
|
-
) -> Tuple[mx.array, mx.array]:
|
|
180
|
-
B, n_kv_heads, seq_len, k_head_dim = keys.shape
|
|
181
|
-
v_head_dim = values.shape[-1]
|
|
182
|
-
|
|
183
|
-
# Initialize cache if needed
|
|
184
|
-
if self.keys is None:
|
|
185
|
-
k_shape = (B, n_kv_heads, self.max_size, k_head_dim)
|
|
186
|
-
v_shape = (B, n_kv_heads, self.max_size, v_head_dim)
|
|
187
|
-
self.keys = mx.zeros(k_shape, dtype=keys.dtype)
|
|
188
|
-
self.values = mx.zeros(v_shape, dtype=values.dtype)
|
|
189
|
-
|
|
190
|
-
# Update cache
|
|
191
|
-
end_pos = min(self.offset + seq_len, self.max_size)
|
|
192
|
-
actual_seq_len = end_pos - self.offset
|
|
193
|
-
|
|
194
|
-
if actual_seq_len > 0:
|
|
195
|
-
self.keys[:, :, self.offset : end_pos, :] = keys[:, :, :actual_seq_len, :]
|
|
196
|
-
self.values[:, :, self.offset : end_pos, :] = values[
|
|
197
|
-
:, :, :actual_seq_len, :
|
|
198
|
-
]
|
|
199
|
-
self.offset = end_pos
|
|
200
|
-
|
|
201
|
-
return self.keys, self.values
|
|
202
|
-
|
|
203
|
-
@property
|
|
204
|
-
def state(self):
|
|
205
|
-
if self.keys is None:
|
|
206
|
-
return None, None
|
|
207
|
-
return self.keys, self.values
|
|
208
|
-
|
|
209
|
-
@state.setter
|
|
210
|
-
def state(self, v):
|
|
211
|
-
if v is not None and len(v) == 2:
|
|
212
|
-
self.keys, self.values = v
|
|
213
|
-
if self.keys is not None:
|
|
214
|
-
self.offset = self.max_size
|
|
215
|
-
|
|
216
|
-
@property
|
|
217
|
-
def meta_state(self):
|
|
218
|
-
return tuple(map(str, (self.max_size, self.step, self.offset)))
|
|
219
|
-
|
|
220
|
-
@meta_state.setter
|
|
221
|
-
def meta_state(self, v):
|
|
222
|
-
self.max_size, self.step, self.offset = map(int, v)
|
|
223
|
-
|
|
224
|
-
def is_trimmable(self):
|
|
225
|
-
return True
|
|
226
|
-
|
|
227
|
-
def trim(self, n):
|
|
228
|
-
n = min(self.offset, n)
|
|
229
|
-
self.offset -= n
|
|
230
|
-
return n
|
|
@@ -1,264 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
From https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import dataclasses
|
|
6
|
-
from enum import IntEnum, auto
|
|
7
|
-
from typing import Dict, List
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class SeparatorStyle(IntEnum):
|
|
11
|
-
"""Separator styles."""
|
|
12
|
-
|
|
13
|
-
DeepSeek = auto()
|
|
14
|
-
DeepSeekV2 = auto()
|
|
15
|
-
PLAIN = auto()
|
|
16
|
-
ALIGNMENT = auto()
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
@dataclasses.dataclass
|
|
20
|
-
class Conversation:
|
|
21
|
-
"""A class that manages prompt templates and keeps all conversation history."""
|
|
22
|
-
|
|
23
|
-
# The name of this template
|
|
24
|
-
name: str
|
|
25
|
-
# The template of the system prompt
|
|
26
|
-
system_template: str = "{system_message}"
|
|
27
|
-
# The system message
|
|
28
|
-
system_message: str = ""
|
|
29
|
-
# The names of two roles
|
|
30
|
-
roles: List[str] = (("USER", "ASSISTANT"),)
|
|
31
|
-
# All messages. Each item is (role, message).
|
|
32
|
-
messages: List[List[str]] = ()
|
|
33
|
-
# The number of few shot examples
|
|
34
|
-
offset: int = 0
|
|
35
|
-
# The separator style and configurations
|
|
36
|
-
sep_style: SeparatorStyle = SeparatorStyle.DeepSeek
|
|
37
|
-
sep: str = "\n"
|
|
38
|
-
sep2: str = None
|
|
39
|
-
# Stop criteria (the default one is EOS token)
|
|
40
|
-
stop_str: str = None
|
|
41
|
-
# Stops generation if meeting any token in this list
|
|
42
|
-
stop_token_ids: List[int] = None
|
|
43
|
-
|
|
44
|
-
def get_prompt(self) -> str:
|
|
45
|
-
"""Get the prompt for generation."""
|
|
46
|
-
system_prompt = self.system_template.format(system_message=self.system_message)
|
|
47
|
-
if self.sep_style == SeparatorStyle.DeepSeek:
|
|
48
|
-
seps = [self.sep, self.sep2]
|
|
49
|
-
if system_prompt == "" or system_prompt is None:
|
|
50
|
-
ret = ""
|
|
51
|
-
else:
|
|
52
|
-
ret = system_prompt + seps[0]
|
|
53
|
-
for i, (role, message) in enumerate(self.messages):
|
|
54
|
-
if message:
|
|
55
|
-
ret += role + ": " + message + seps[i % 2]
|
|
56
|
-
else:
|
|
57
|
-
ret += role + ":"
|
|
58
|
-
return ret
|
|
59
|
-
elif self.sep_style == SeparatorStyle.DeepSeekV2:
|
|
60
|
-
seps = [self.sep, self.sep2]
|
|
61
|
-
if system_prompt == "" or system_prompt is None:
|
|
62
|
-
ret = ""
|
|
63
|
-
else:
|
|
64
|
-
ret = system_prompt + seps[0]
|
|
65
|
-
for i, (role, message) in enumerate(self.messages):
|
|
66
|
-
if message:
|
|
67
|
-
if role == "User":
|
|
68
|
-
ret += "<|sft▁begin|>\n" + message + self.sep
|
|
69
|
-
else:
|
|
70
|
-
ret += message + self.sep2
|
|
71
|
-
else:
|
|
72
|
-
ret = ret
|
|
73
|
-
return ret
|
|
74
|
-
|
|
75
|
-
elif self.sep_style == SeparatorStyle.PLAIN:
|
|
76
|
-
seps = [self.sep, self.sep2]
|
|
77
|
-
ret = ""
|
|
78
|
-
for i, (role, message) in enumerate(self.messages):
|
|
79
|
-
if message:
|
|
80
|
-
if type(message) is tuple:
|
|
81
|
-
message, _, _ = message
|
|
82
|
-
if i % 2 == 0:
|
|
83
|
-
ret += message + seps[i % 2]
|
|
84
|
-
else:
|
|
85
|
-
ret += message + seps[i % 2]
|
|
86
|
-
else:
|
|
87
|
-
ret += ""
|
|
88
|
-
return ret
|
|
89
|
-
elif self.sep_style == SeparatorStyle.ALIGNMENT:
|
|
90
|
-
seps = [self.sep, self.sep2]
|
|
91
|
-
ret = ""
|
|
92
|
-
for i, (role, message) in enumerate(self.messages):
|
|
93
|
-
if message:
|
|
94
|
-
if type(message) is tuple:
|
|
95
|
-
message, _, _ = message
|
|
96
|
-
if i % 2 == 0:
|
|
97
|
-
ret += "<image>\n" + seps[i % 2]
|
|
98
|
-
else:
|
|
99
|
-
ret += message + seps[i % 2]
|
|
100
|
-
else:
|
|
101
|
-
ret += ""
|
|
102
|
-
return ret
|
|
103
|
-
else:
|
|
104
|
-
raise ValueError(f"Invalid style: {self.sep_style}")
|
|
105
|
-
|
|
106
|
-
def set_system_message(self, system_message: str):
|
|
107
|
-
"""Set the system message."""
|
|
108
|
-
self.system_message = system_message
|
|
109
|
-
|
|
110
|
-
def append_message(self, role: str, message: str):
|
|
111
|
-
"""Append a new message."""
|
|
112
|
-
self.messages.append([role, message])
|
|
113
|
-
|
|
114
|
-
def update_last_message(self, message: str):
|
|
115
|
-
"""Update the last output.
|
|
116
|
-
|
|
117
|
-
The last message is typically set to be None when constructing the prompt,
|
|
118
|
-
so we need to update it in-place after getting the response from a model.
|
|
119
|
-
"""
|
|
120
|
-
self.messages[-1][1] = message
|
|
121
|
-
|
|
122
|
-
def reset_message(self):
|
|
123
|
-
"""Reset a new message."""
|
|
124
|
-
self.messages = []
|
|
125
|
-
|
|
126
|
-
def to_gradio_chatbot(self):
|
|
127
|
-
"""Convert the conversation to gradio chatbot format."""
|
|
128
|
-
ret = []
|
|
129
|
-
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
|
130
|
-
if i % 2 == 0:
|
|
131
|
-
ret.append([msg, None])
|
|
132
|
-
else:
|
|
133
|
-
ret[-1][-1] = msg
|
|
134
|
-
return ret
|
|
135
|
-
|
|
136
|
-
def to_openai_api_messages(self):
|
|
137
|
-
"""Convert the conversation to OpenAI chat completion format."""
|
|
138
|
-
system_prompt = self.system_template.format(system_message=self.system_message)
|
|
139
|
-
ret = [{"role": "system", "content": system_prompt}]
|
|
140
|
-
|
|
141
|
-
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
|
142
|
-
if i % 2 == 0:
|
|
143
|
-
ret.append({"role": "user", "content": msg})
|
|
144
|
-
else:
|
|
145
|
-
if msg is not None:
|
|
146
|
-
ret.append({"role": "assistant", "content": msg})
|
|
147
|
-
return ret
|
|
148
|
-
|
|
149
|
-
def copy(self):
|
|
150
|
-
return Conversation(
|
|
151
|
-
name=self.name,
|
|
152
|
-
system_template=self.system_template,
|
|
153
|
-
system_message=self.system_message,
|
|
154
|
-
roles=self.roles,
|
|
155
|
-
messages=[[x, y] for x, y in self.messages],
|
|
156
|
-
offset=self.offset,
|
|
157
|
-
sep_style=self.sep_style,
|
|
158
|
-
sep=self.sep,
|
|
159
|
-
sep2=self.sep2,
|
|
160
|
-
stop_str=self.stop_str,
|
|
161
|
-
stop_token_ids=self.stop_token_ids,
|
|
162
|
-
)
|
|
163
|
-
|
|
164
|
-
def dict(self):
|
|
165
|
-
return {
|
|
166
|
-
"template_name": self.name,
|
|
167
|
-
"system_message": self.system_message,
|
|
168
|
-
"roles": self.roles,
|
|
169
|
-
"messages": self.messages,
|
|
170
|
-
"offset": self.offset,
|
|
171
|
-
}
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
# A global registry for all conversation templates
|
|
175
|
-
conv_templates: Dict[str, Conversation] = {}
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
def register_conv_template(template: Conversation, override: bool = False):
|
|
179
|
-
"""Register a new conversation template."""
|
|
180
|
-
if not override:
|
|
181
|
-
assert (
|
|
182
|
-
template.name not in conv_templates
|
|
183
|
-
), f"{template.name} has been registered."
|
|
184
|
-
|
|
185
|
-
conv_templates[template.name] = template
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
def get_conv_template(name: str) -> Conversation:
|
|
189
|
-
"""Get a conversation template."""
|
|
190
|
-
return conv_templates[name].copy()
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
register_conv_template(
|
|
194
|
-
Conversation(
|
|
195
|
-
name="deepseek",
|
|
196
|
-
system_template="{system_message}",
|
|
197
|
-
# system_message="You are a helpful assistant. Please answer truthfully and write out your "
|
|
198
|
-
# "thinking step by step to be sure you get the right answer.",
|
|
199
|
-
system_message="",
|
|
200
|
-
roles=("<|User|>", "<|Assistant|>"),
|
|
201
|
-
messages=(),
|
|
202
|
-
offset=0,
|
|
203
|
-
sep_style=SeparatorStyle.DeepSeek,
|
|
204
|
-
sep="\n\n",
|
|
205
|
-
sep2="<|end▁of▁sentence|>",
|
|
206
|
-
stop_token_ids=[100001],
|
|
207
|
-
stop_str=["User:", "<|end▁of▁sentence|>"],
|
|
208
|
-
)
|
|
209
|
-
)
|
|
210
|
-
|
|
211
|
-
register_conv_template(
|
|
212
|
-
Conversation(
|
|
213
|
-
name="deepseekv2",
|
|
214
|
-
system_template="{system_message}",
|
|
215
|
-
system_message="",
|
|
216
|
-
roles=("|<User>|", "|<Assistant>|"),
|
|
217
|
-
messages=(),
|
|
218
|
-
offset=0,
|
|
219
|
-
sep_style=SeparatorStyle.DeepSeekV2,
|
|
220
|
-
sep="\n<|sft▁end|>",
|
|
221
|
-
sep2="<|end▁of▁sentence|>",
|
|
222
|
-
stop_token_ids=[100001],
|
|
223
|
-
stop_str=["User:", "<|end▁of▁sentence|>"],
|
|
224
|
-
)
|
|
225
|
-
)
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
register_conv_template(
|
|
229
|
-
Conversation(
|
|
230
|
-
name="plain",
|
|
231
|
-
system_template="",
|
|
232
|
-
system_message="",
|
|
233
|
-
roles=("", ""),
|
|
234
|
-
messages=(),
|
|
235
|
-
offset=0,
|
|
236
|
-
sep_style=SeparatorStyle.PLAIN,
|
|
237
|
-
sep="",
|
|
238
|
-
sep2="",
|
|
239
|
-
stop_token_ids=[100001],
|
|
240
|
-
stop_str=["</s>"],
|
|
241
|
-
)
|
|
242
|
-
)
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
register_conv_template(
|
|
246
|
-
Conversation(
|
|
247
|
-
name="alignment",
|
|
248
|
-
system_template="",
|
|
249
|
-
system_message="",
|
|
250
|
-
roles=("", ""),
|
|
251
|
-
messages=(),
|
|
252
|
-
offset=0,
|
|
253
|
-
sep_style=SeparatorStyle.ALIGNMENT,
|
|
254
|
-
sep="",
|
|
255
|
-
sep2="",
|
|
256
|
-
stop_token_ids=[100001],
|
|
257
|
-
stop_str=["</s>"],
|
|
258
|
-
)
|
|
259
|
-
)
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
if __name__ == "__main__":
|
|
263
|
-
print("deepseek template:")
|
|
264
|
-
conv = get_conv_template("deepseek")
|