nexaai 1.0.4rc15__cp310-cp310-macosx_13_0_x86_64.whl → 1.0.4rc16__cp310-cp310-macosx_13_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nexaai might be problematic. Click here for more details.
- nexaai/__init__.py +6 -1
- nexaai/_stub.cpython-310-darwin.so +0 -0
- nexaai/_version.py +1 -1
- nexaai/asr.py +7 -3
- nexaai/asr_impl/mlx_asr_impl.py +3 -2
- nexaai/asr_impl/pybind_asr_impl.py +3 -2
- nexaai/binds/libcrypto.dylib +0 -0
- nexaai/binds/libnexa_bridge.dylib +0 -0
- nexaai/binds/libssl.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libggml-base.dylib +0 -0
- nexaai/binds/nexa_llama_cpp/libnexa_plugin.dylib +0 -0
- nexaai/common.py +7 -0
- nexaai/cv.py +7 -3
- nexaai/cv_impl/mlx_cv_impl.py +3 -2
- nexaai/cv_impl/pybind_cv_impl.py +3 -2
- nexaai/embedder.py +7 -3
- nexaai/embedder_impl/mlx_embedder_impl.py +3 -2
- nexaai/embedder_impl/pybind_embedder_impl.py +6 -3
- nexaai/image_gen.py +6 -2
- nexaai/image_gen_impl/mlx_image_gen_impl.py +3 -2
- nexaai/image_gen_impl/pybind_image_gen_impl.py +3 -2
- nexaai/llm.py +8 -5
- nexaai/llm_impl/mlx_llm_impl.py +19 -6
- nexaai/llm_impl/pybind_llm_impl.py +7 -5
- nexaai/mlx_backend/llm/interface.py +2 -2
- nexaai/rerank.py +7 -3
- nexaai/rerank_impl/mlx_rerank_impl.py +3 -2
- nexaai/rerank_impl/pybind_rerank_impl.py +3 -2
- nexaai/tts.py +7 -3
- nexaai/tts_impl/mlx_tts_impl.py +3 -2
- nexaai/tts_impl/pybind_tts_impl.py +3 -2
- nexaai/vlm.py +6 -3
- nexaai/vlm_impl/mlx_vlm_impl.py +3 -3
- nexaai/vlm_impl/pybind_vlm_impl.py +5 -3
- {nexaai-1.0.4rc15.dist-info → nexaai-1.0.4rc16.dist-info}/METADATA +1 -1
- {nexaai-1.0.4rc15.dist-info → nexaai-1.0.4rc16.dist-info}/RECORD +38 -36
- {nexaai-1.0.4rc15.dist-info → nexaai-1.0.4rc16.dist-info}/WHEEL +0 -0
- {nexaai-1.0.4rc15.dist-info → nexaai-1.0.4rc16.dist-info}/top_level.txt +0 -0
nexaai/__init__.py
CHANGED
|
@@ -19,7 +19,10 @@ except ImportError:
|
|
|
19
19
|
__version__ = "0.0.1"
|
|
20
20
|
|
|
21
21
|
# Import common configuration classes first (no external dependencies)
|
|
22
|
-
from .common import ModelConfig, GenerationConfig, ChatMessage, SamplerConfig
|
|
22
|
+
from .common import ModelConfig, GenerationConfig, ChatMessage, SamplerConfig, PluginID
|
|
23
|
+
|
|
24
|
+
# Create alias for PluginID to be accessible as plugin_id
|
|
25
|
+
plugin_id = PluginID
|
|
23
26
|
|
|
24
27
|
# Import new feature classes (no external dependencies in base classes)
|
|
25
28
|
from .llm import LLM
|
|
@@ -40,6 +43,8 @@ __all__ = [
|
|
|
40
43
|
"ChatMessage",
|
|
41
44
|
"SamplerConfig",
|
|
42
45
|
"EmbeddingConfig",
|
|
46
|
+
"PluginID",
|
|
47
|
+
"plugin_id",
|
|
43
48
|
|
|
44
49
|
"LLM",
|
|
45
50
|
"Embedder",
|
|
Binary file
|
nexaai/_version.py
CHANGED
nexaai/asr.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
from typing import List, Optional, Sequence, Tuple
|
|
1
|
+
from typing import List, Optional, Sequence, Tuple, Union
|
|
2
2
|
from abc import abstractmethod
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
|
|
5
5
|
from nexaai.base import BaseModel
|
|
6
|
+
from nexaai.common import PluginID
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
@dataclass
|
|
@@ -33,11 +34,14 @@ class ASR(BaseModel):
|
|
|
33
34
|
model_path: str,
|
|
34
35
|
tokenizer_path: Optional[str] = None,
|
|
35
36
|
language: Optional[str] = None,
|
|
36
|
-
plugin_id: str =
|
|
37
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
37
38
|
device_id: Optional[str] = None
|
|
38
39
|
) -> 'ASR':
|
|
39
40
|
"""Load ASR model from local path, routing to appropriate implementation."""
|
|
40
|
-
|
|
41
|
+
# Check plugin_id value for routing - handle both enum and string
|
|
42
|
+
plugin_value = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
43
|
+
|
|
44
|
+
if plugin_value == "mlx":
|
|
41
45
|
from nexaai.asr_impl.mlx_asr_impl import MLXASRImpl
|
|
42
46
|
return MLXASRImpl._load_from(model_path, tokenizer_path, language, plugin_id, device_id)
|
|
43
47
|
else:
|
nexaai/asr_impl/mlx_asr_impl.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
# Note: This code is generated by Cursor, not tested yet.
|
|
2
2
|
|
|
3
|
-
from typing import List, Optional
|
|
3
|
+
from typing import List, Optional, Union
|
|
4
4
|
|
|
5
|
+
from nexaai.common import PluginID
|
|
5
6
|
from nexaai.asr import ASR, ASRConfig, ASRResult
|
|
6
7
|
from nexaai.mlx_backend.asr.interface import MlxAsr as MLXASRInterface
|
|
7
8
|
from nexaai.mlx_backend.ml import ModelConfig as MLXModelConfig, SamplerConfig as MLXSamplerConfig, GenerationConfig as MLXGenerationConfig, EmbeddingConfig
|
|
@@ -18,7 +19,7 @@ class MLXASRImpl(ASR):
|
|
|
18
19
|
model_path: str,
|
|
19
20
|
tokenizer_path: Optional[str] = None,
|
|
20
21
|
language: Optional[str] = None,
|
|
21
|
-
plugin_id: str =
|
|
22
|
+
plugin_id: Union[PluginID, str] = PluginID.MLX,
|
|
22
23
|
device_id: Optional[str] = None
|
|
23
24
|
) -> 'MLXASRImpl':
|
|
24
25
|
"""Load ASR model from local path using MLX backend."""
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
from typing import List, Optional
|
|
1
|
+
from typing import List, Optional, Union
|
|
2
2
|
|
|
3
|
+
from nexaai.common import PluginID
|
|
3
4
|
from nexaai.asr import ASR, ASRConfig, ASRResult
|
|
4
5
|
|
|
5
6
|
|
|
@@ -14,7 +15,7 @@ class PyBindASRImpl(ASR):
|
|
|
14
15
|
model_path: str,
|
|
15
16
|
tokenizer_path: Optional[str] = None,
|
|
16
17
|
language: Optional[str] = None,
|
|
17
|
-
plugin_id: str =
|
|
18
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
18
19
|
device_id: Optional[str] = None
|
|
19
20
|
) -> 'PyBindASRImpl':
|
|
20
21
|
"""Load ASR model from local path using PyBind backend."""
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
nexaai/common.py
CHANGED
|
@@ -1,5 +1,12 @@
|
|
|
1
1
|
from dataclasses import dataclass
|
|
2
2
|
from typing import TypedDict, Literal, Optional, List
|
|
3
|
+
from enum import Enum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class PluginID(str, Enum):
|
|
7
|
+
"""Enum for plugin identifiers."""
|
|
8
|
+
MLX = "mlx"
|
|
9
|
+
LLAMA_CPP = "llama_cpp"
|
|
3
10
|
|
|
4
11
|
|
|
5
12
|
class ChatMessage(TypedDict):
|
nexaai/cv.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
from typing import List, Optional
|
|
1
|
+
from typing import List, Optional, Union
|
|
2
2
|
from abc import abstractmethod
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
|
|
5
5
|
from nexaai.base import BaseModel
|
|
6
|
+
from nexaai.common import PluginID
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
@dataclass
|
|
@@ -71,11 +72,14 @@ class CVModel(BaseModel):
|
|
|
71
72
|
def _load_from(cls,
|
|
72
73
|
_: str, # TODO: remove this argument, this is a hack to make api design happy
|
|
73
74
|
config: CVModelConfig,
|
|
74
|
-
plugin_id: str =
|
|
75
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
75
76
|
device_id: Optional[str] = None
|
|
76
77
|
) -> 'CVModel':
|
|
77
78
|
"""Load CV model from configuration, routing to appropriate implementation."""
|
|
78
|
-
|
|
79
|
+
# Check plugin_id value for routing - handle both enum and string
|
|
80
|
+
plugin_value = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
81
|
+
|
|
82
|
+
if plugin_value == "mlx":
|
|
79
83
|
from nexaai.cv_impl.mlx_cv_impl import MLXCVImpl
|
|
80
84
|
return MLXCVImpl._load_from(config, plugin_id, device_id)
|
|
81
85
|
else:
|
nexaai/cv_impl/mlx_cv_impl.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
# Note: This code is generated by Cursor, not tested yet.
|
|
2
2
|
|
|
3
|
-
from typing import Optional
|
|
3
|
+
from typing import Optional, Union
|
|
4
4
|
import os
|
|
5
5
|
|
|
6
|
+
from nexaai.common import PluginID
|
|
6
7
|
from nexaai.cv import CVModel, CVModelConfig, CVResults
|
|
7
8
|
from nexaai.mlx_backend.cv.interface import CVModel as MLXCVInterface, create_cv_model
|
|
8
9
|
|
|
@@ -16,7 +17,7 @@ class MLXCVImpl(CVModel):
|
|
|
16
17
|
@classmethod
|
|
17
18
|
def _load_from(cls,
|
|
18
19
|
config: CVModelConfig,
|
|
19
|
-
plugin_id: str =
|
|
20
|
+
plugin_id: Union[PluginID, str] = PluginID.MLX,
|
|
20
21
|
device_id: Optional[str] = None
|
|
21
22
|
) -> 'MLXCVImpl':
|
|
22
23
|
"""Load CV model from configuration using MLX backend."""
|
nexaai/cv_impl/pybind_cv_impl.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
from typing import Optional
|
|
1
|
+
from typing import Optional, Union
|
|
2
2
|
|
|
3
|
+
from nexaai.common import PluginID
|
|
3
4
|
from nexaai.cv import CVModel, CVModelConfig, CVResults
|
|
4
5
|
|
|
5
6
|
|
|
@@ -12,7 +13,7 @@ class PyBindCVImpl(CVModel):
|
|
|
12
13
|
@classmethod
|
|
13
14
|
def _load_from(cls,
|
|
14
15
|
config: CVModelConfig,
|
|
15
|
-
plugin_id: str =
|
|
16
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
16
17
|
device_id: Optional[str] = None
|
|
17
18
|
) -> 'PyBindCVImpl':
|
|
18
19
|
"""Load CV model from configuration using PyBind backend."""
|
nexaai/embedder.py
CHANGED
|
@@ -4,6 +4,7 @@ from abc import abstractmethod
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
|
|
6
6
|
from nexaai.base import BaseModel
|
|
7
|
+
from nexaai.common import PluginID
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
@dataclass
|
|
@@ -21,19 +22,22 @@ class Embedder(BaseModel):
|
|
|
21
22
|
pass
|
|
22
23
|
|
|
23
24
|
@classmethod
|
|
24
|
-
def _load_from(cls, model_path: str, tokenizer_file: str = "tokenizer.json", plugin_id: str =
|
|
25
|
+
def _load_from(cls, model_path: str, tokenizer_file: str = "tokenizer.json", plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP):
|
|
25
26
|
"""
|
|
26
27
|
Load an embedder from model files, routing to appropriate implementation.
|
|
27
28
|
|
|
28
29
|
Args:
|
|
29
30
|
model_path: Path to the model file
|
|
30
31
|
tokenizer_file: Path to the tokenizer file (default: "tokenizer.json")
|
|
31
|
-
plugin_id: Plugin ID to use for the model (default:
|
|
32
|
+
plugin_id: Plugin ID to use for the model (default: PluginID.LLAMA_CPP)
|
|
32
33
|
|
|
33
34
|
Returns:
|
|
34
35
|
Embedder instance
|
|
35
36
|
"""
|
|
36
|
-
|
|
37
|
+
# Check plugin_id value for routing - handle both enum and string
|
|
38
|
+
plugin_value = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
39
|
+
|
|
40
|
+
if plugin_value == "mlx":
|
|
37
41
|
from nexaai.embedder_impl.mlx_embedder_impl import MLXEmbedderImpl
|
|
38
42
|
return MLXEmbedderImpl._load_from(model_path, tokenizer_file, plugin_id)
|
|
39
43
|
else:
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import List, Union
|
|
2
2
|
import numpy as np
|
|
3
3
|
|
|
4
|
+
from nexaai.common import PluginID
|
|
4
5
|
from nexaai.embedder import Embedder, EmbeddingConfig
|
|
5
6
|
from nexaai.mlx_backend.embedding.interface import Embedder as MLXEmbedderInterface
|
|
6
7
|
from nexaai.mlx_backend.ml import ModelConfig as MLXModelConfig, SamplerConfig as MLXSamplerConfig, GenerationConfig as MLXGenerationConfig, EmbeddingConfig
|
|
@@ -13,14 +14,14 @@ class MLXEmbedderImpl(Embedder):
|
|
|
13
14
|
self._mlx_embedder = None
|
|
14
15
|
|
|
15
16
|
@classmethod
|
|
16
|
-
def _load_from(cls, model_path: str, tokenizer_file: str = "tokenizer.json", plugin_id: str =
|
|
17
|
+
def _load_from(cls, model_path: str, tokenizer_file: str = "tokenizer.json", plugin_id: Union[PluginID, str] = PluginID.MLX):
|
|
17
18
|
"""
|
|
18
19
|
Load an embedder from model files using MLX backend.
|
|
19
20
|
|
|
20
21
|
Args:
|
|
21
22
|
model_path: Path to the model file
|
|
22
23
|
tokenizer_file: Path to the tokenizer file (default: "tokenizer.json")
|
|
23
|
-
plugin_id: Plugin ID to use for the model (default:
|
|
24
|
+
plugin_id: Plugin ID to use for the model (default: PluginID.MLX)
|
|
24
25
|
|
|
25
26
|
Returns:
|
|
26
27
|
MLXEmbedderImpl instance
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import List, Union
|
|
2
2
|
import numpy as np
|
|
3
3
|
|
|
4
|
+
from nexaai.common import PluginID
|
|
4
5
|
from nexaai.embedder import Embedder, EmbeddingConfig
|
|
5
6
|
from nexaai.binds import embedder_bind
|
|
6
7
|
from nexaai.runtime import _ensure_runtime
|
|
@@ -15,20 +16,22 @@ class PyBindEmbedderImpl(Embedder):
|
|
|
15
16
|
self._handle = _handle_ptr
|
|
16
17
|
|
|
17
18
|
@classmethod
|
|
18
|
-
def _load_from(cls, model_path: str, tokenizer_file: str = "tokenizer.json", plugin_id: str =
|
|
19
|
+
def _load_from(cls, model_path: str, tokenizer_file: str = "tokenizer.json", plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP):
|
|
19
20
|
"""
|
|
20
21
|
Load an embedder from model files
|
|
21
22
|
|
|
22
23
|
Args:
|
|
23
24
|
model_path: Path to the model file
|
|
24
25
|
tokenizer_file: Path to the tokenizer file (default: "tokenizer.json")
|
|
25
|
-
plugin_id: Plugin ID to use for the model (default:
|
|
26
|
+
plugin_id: Plugin ID to use for the model (default: PluginID.LLAMA_CPP)
|
|
26
27
|
|
|
27
28
|
Returns:
|
|
28
29
|
PyBindEmbedderImpl instance
|
|
29
30
|
"""
|
|
30
31
|
_ensure_runtime()
|
|
31
|
-
|
|
32
|
+
# Convert enum to string for C++ binding
|
|
33
|
+
plugin_id_str = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
34
|
+
handle = embedder_bind.ml_embedder_create(model_path, tokenizer_file, plugin_id_str)
|
|
32
35
|
return cls(handle)
|
|
33
36
|
|
|
34
37
|
def eject(self):
|
nexaai/image_gen.py
CHANGED
|
@@ -3,6 +3,7 @@ from abc import abstractmethod
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
|
|
5
5
|
from nexaai.base import BaseModel
|
|
6
|
+
from nexaai.common import PluginID
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
@dataclass
|
|
@@ -67,13 +68,16 @@ class ImageGen(BaseModel):
|
|
|
67
68
|
def _load_from(cls,
|
|
68
69
|
model_path: str,
|
|
69
70
|
scheduler_config_path: str = "",
|
|
70
|
-
plugin_id: str =
|
|
71
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
71
72
|
device_id: Optional[str] = None,
|
|
72
73
|
float16: bool = True,
|
|
73
74
|
quantize: bool = False
|
|
74
75
|
) -> 'ImageGen':
|
|
75
76
|
"""Load image generation model from local path, routing to appropriate implementation."""
|
|
76
|
-
|
|
77
|
+
# Check plugin_id value for routing - handle both enum and string
|
|
78
|
+
plugin_value = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
79
|
+
|
|
80
|
+
if plugin_value == "mlx":
|
|
77
81
|
from nexaai.image_gen_impl.mlx_image_gen_impl import MLXImageGenImpl
|
|
78
82
|
return MLXImageGenImpl._load_from(model_path, scheduler_config_path, plugin_id, device_id, float16, quantize)
|
|
79
83
|
else:
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
# Note: This code is generated by Cursor, not tested yet.
|
|
2
2
|
|
|
3
|
-
from typing import List, Optional
|
|
3
|
+
from typing import List, Optional, Union
|
|
4
4
|
import os
|
|
5
5
|
|
|
6
|
+
from nexaai.common import PluginID
|
|
6
7
|
from nexaai.image_gen import ImageGen, ImageGenerationConfig, ImageSamplerConfig, SchedulerConfig, Image
|
|
7
8
|
from nexaai.mlx_backend.sd.interface import ImageGen as MLXImageGenInterface
|
|
8
9
|
|
|
@@ -17,7 +18,7 @@ class MLXImageGenImpl(ImageGen):
|
|
|
17
18
|
def _load_from(cls,
|
|
18
19
|
model_path: str,
|
|
19
20
|
scheduler_config_path: str = "",
|
|
20
|
-
plugin_id: str =
|
|
21
|
+
plugin_id: Union[PluginID, str] = PluginID.MLX,
|
|
21
22
|
device_id: Optional[str] = None,
|
|
22
23
|
float16: bool = True,
|
|
23
24
|
quantize: bool = False
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
from typing import List, Optional
|
|
1
|
+
from typing import List, Optional, Union
|
|
2
2
|
|
|
3
|
+
from nexaai.common import PluginID
|
|
3
4
|
from nexaai.image_gen import ImageGen, ImageGenerationConfig, ImageSamplerConfig, SchedulerConfig, Image
|
|
4
5
|
|
|
5
6
|
|
|
@@ -13,7 +14,7 @@ class PyBindImageGenImpl(ImageGen):
|
|
|
13
14
|
def _load_from(cls,
|
|
14
15
|
model_path: str,
|
|
15
16
|
scheduler_config_path: str = "",
|
|
16
|
-
plugin_id: str =
|
|
17
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
17
18
|
device_id: Optional[str] = None,
|
|
18
19
|
float16: bool = True,
|
|
19
20
|
quantize: bool = False
|
nexaai/llm.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
from typing import Generator, Optional
|
|
1
|
+
from typing import Generator, Optional, Union
|
|
2
2
|
from abc import abstractmethod
|
|
3
3
|
import queue
|
|
4
4
|
import threading
|
|
5
5
|
|
|
6
|
-
from nexaai.common import ModelConfig, GenerationConfig, ChatMessage
|
|
6
|
+
from nexaai.common import ModelConfig, GenerationConfig, ChatMessage, PluginID
|
|
7
7
|
from nexaai.base import BaseModel
|
|
8
8
|
|
|
9
9
|
class LLM(BaseModel):
|
|
@@ -17,11 +17,14 @@ class LLM(BaseModel):
|
|
|
17
17
|
local_path: str,
|
|
18
18
|
tokenizer_path: Optional[str] = None,
|
|
19
19
|
m_cfg: ModelConfig = ModelConfig(),
|
|
20
|
-
plugin_id: str =
|
|
20
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
21
21
|
device_id: Optional[str] = None
|
|
22
22
|
) -> 'LLM':
|
|
23
23
|
"""Load model from local path, routing to appropriate implementation."""
|
|
24
|
-
|
|
24
|
+
# Check plugin_id value for routing - handle both enum and string
|
|
25
|
+
plugin_value = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
26
|
+
|
|
27
|
+
if plugin_value == "mlx":
|
|
25
28
|
from nexaai.llm_impl.mlx_llm_impl import MLXLLMImpl
|
|
26
29
|
return MLXLLMImpl._load_from(local_path, tokenizer_path, m_cfg, plugin_id, device_id)
|
|
27
30
|
else:
|
|
@@ -37,7 +40,7 @@ class LLM(BaseModel):
|
|
|
37
40
|
self._cancel_event.clear()
|
|
38
41
|
|
|
39
42
|
@abstractmethod
|
|
40
|
-
def apply_chat_template(self, messages: list[ChatMessage]) -> str:
|
|
43
|
+
def apply_chat_template(self, messages: list[ChatMessage], tools: Optional[str] = None, enable_thinking: bool = True, add_generation_prompt: bool = True) -> str:
|
|
41
44
|
"""Apply the chat template to messages."""
|
|
42
45
|
pass
|
|
43
46
|
|
nexaai/llm_impl/mlx_llm_impl.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
from typing import Generator, Optional, Any
|
|
1
|
+
from typing import Generator, Optional, Any, Sequence, Union
|
|
2
2
|
|
|
3
|
-
from nexaai.common import ModelConfig, GenerationConfig, ChatMessage
|
|
3
|
+
from nexaai.common import ModelConfig, GenerationConfig, ChatMessage, PluginID
|
|
4
4
|
from nexaai.llm import LLM
|
|
5
5
|
from nexaai.mlx_backend.llm.interface import LLM as MLXLLMInterface
|
|
6
6
|
from nexaai.mlx_backend.ml import ModelConfig as MLXModelConfig, SamplerConfig as MLXSamplerConfig, GenerationConfig as MLXGenerationConfig, EmbeddingConfig
|
|
@@ -17,7 +17,7 @@ class MLXLLMImpl(LLM):
|
|
|
17
17
|
local_path: str,
|
|
18
18
|
tokenizer_path: Optional[str] = None,
|
|
19
19
|
m_cfg: ModelConfig = ModelConfig(),
|
|
20
|
-
plugin_id: str =
|
|
20
|
+
plugin_id: Union[PluginID, str] = PluginID.MLX,
|
|
21
21
|
device_id: Optional[str] = None
|
|
22
22
|
) -> 'MLXLLMImpl':
|
|
23
23
|
"""Load model from local path using MLX backend."""
|
|
@@ -54,7 +54,13 @@ class MLXLLMImpl(LLM):
|
|
|
54
54
|
self._mlx_llm.destroy()
|
|
55
55
|
self._mlx_llm = None
|
|
56
56
|
|
|
57
|
-
def apply_chat_template(
|
|
57
|
+
def apply_chat_template(
|
|
58
|
+
self,
|
|
59
|
+
messages: Sequence[ChatMessage],
|
|
60
|
+
tools: Optional[str] = None,
|
|
61
|
+
enable_thinking: bool = True,
|
|
62
|
+
add_generation_prompt: bool = True
|
|
63
|
+
) -> str:
|
|
58
64
|
"""Apply the chat template to messages."""
|
|
59
65
|
if not self._mlx_llm:
|
|
60
66
|
raise RuntimeError("MLX LLM not loaded")
|
|
@@ -68,9 +74,16 @@ class MLXLLMImpl(LLM):
|
|
|
68
74
|
def __init__(self, role, content):
|
|
69
75
|
self.role = role
|
|
70
76
|
self.content = content
|
|
71
|
-
|
|
77
|
+
|
|
78
|
+
# Handle both dict-style and attribute-style access
|
|
79
|
+
if hasattr(msg, 'role') and hasattr(msg, 'content'):
|
|
80
|
+
# Message is already an object with attributes
|
|
81
|
+
mlx_messages.append(MLXChatMessage(msg.role, msg.content))
|
|
82
|
+
else:
|
|
83
|
+
# Message is a dict
|
|
84
|
+
mlx_messages.append(MLXChatMessage(msg["role"], msg["content"]))
|
|
72
85
|
|
|
73
|
-
return self._mlx_llm.apply_chat_template(mlx_messages)
|
|
86
|
+
return self._mlx_llm.apply_chat_template(mlx_messages, tools=tools, enable_thinking=enable_thinking, add_generation_prompt=add_generation_prompt)
|
|
74
87
|
except Exception as e:
|
|
75
88
|
raise RuntimeError(f"Failed to apply chat template: {str(e)}")
|
|
76
89
|
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
from typing import Generator, Optional
|
|
1
|
+
from typing import Generator, Optional, Union
|
|
2
2
|
import queue
|
|
3
3
|
import threading
|
|
4
4
|
|
|
5
|
-
from nexaai.common import ModelConfig, GenerationConfig, ChatMessage
|
|
5
|
+
from nexaai.common import ModelConfig, GenerationConfig, ChatMessage, PluginID
|
|
6
6
|
from nexaai.binds import llm_bind, common_bind
|
|
7
7
|
from nexaai.runtime import _ensure_runtime
|
|
8
8
|
from nexaai.llm import LLM
|
|
@@ -19,7 +19,7 @@ class PyBindLLMImpl(LLM):
|
|
|
19
19
|
local_path: str,
|
|
20
20
|
tokenizer_path: Optional[str] = None,
|
|
21
21
|
m_cfg: ModelConfig = ModelConfig(),
|
|
22
|
-
plugin_id: str =
|
|
22
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
23
23
|
device_id: Optional[str] = None
|
|
24
24
|
) -> 'PyBindLLMImpl':
|
|
25
25
|
"""Load model from local path."""
|
|
@@ -49,11 +49,13 @@ class PyBindLLMImpl(LLM):
|
|
|
49
49
|
config.chat_template_content = m_cfg.chat_template_content
|
|
50
50
|
|
|
51
51
|
# Create handle : returns py::capsule with automatic cleanup
|
|
52
|
+
# Convert enum to string for C++ binding
|
|
53
|
+
plugin_id_str = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
52
54
|
handle = llm_bind.ml_llm_create(
|
|
53
55
|
model_path=local_path,
|
|
54
56
|
tokenizer_path=tokenizer_path,
|
|
55
57
|
model_config=config,
|
|
56
|
-
plugin_id=
|
|
58
|
+
plugin_id=plugin_id_str,
|
|
57
59
|
device_id=device_id
|
|
58
60
|
)
|
|
59
61
|
return cls(handle, m_cfg)
|
|
@@ -64,7 +66,7 @@ class PyBindLLMImpl(LLM):
|
|
|
64
66
|
del self._handle
|
|
65
67
|
self._handle = None
|
|
66
68
|
|
|
67
|
-
def apply_chat_template(self, messages: list[ChatMessage]) -> str:
|
|
69
|
+
def apply_chat_template(self, messages: list[ChatMessage], tools: Optional[str] = None, enable_thinking: bool = True, add_generation_prompt: bool = True) -> str:
|
|
68
70
|
"""Apply the chat template to messages."""
|
|
69
71
|
# Convert TypedDict to list of dicts for binding
|
|
70
72
|
message_dicts = [
|
|
@@ -467,7 +467,7 @@ class LLM(BaseLLM, ProfilingMixin):
|
|
|
467
467
|
# We'll ignore the argument for now.
|
|
468
468
|
return self.tokenizer.chat_template
|
|
469
469
|
|
|
470
|
-
def apply_chat_template(self, messages: Sequence[ChatMessage], tools: Optional[str] = None, enable_thinking: bool = True) -> str:
|
|
470
|
+
def apply_chat_template(self, messages: Sequence[ChatMessage], tools: Optional[str] = None, enable_thinking: bool = True, add_generation_prompt: bool = True) -> str:
|
|
471
471
|
"""
|
|
472
472
|
Apply chat template to messages with incremental prompt support and optional tools.
|
|
473
473
|
|
|
@@ -526,7 +526,7 @@ class LLM(BaseLLM, ProfilingMixin):
|
|
|
526
526
|
incremental_messages,
|
|
527
527
|
tokenize=False,
|
|
528
528
|
enable_thinking=enable_thinking,
|
|
529
|
-
add_generation_prompt=
|
|
529
|
+
add_generation_prompt=add_generation_prompt,
|
|
530
530
|
tools=parsed_tools
|
|
531
531
|
)
|
|
532
532
|
except Exception as e:
|
nexaai/rerank.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
from typing import List, Optional, Sequence
|
|
1
|
+
from typing import List, Optional, Sequence, Union
|
|
2
2
|
from abc import abstractmethod
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
|
|
5
5
|
from nexaai.base import BaseModel
|
|
6
|
+
from nexaai.common import PluginID
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
@dataclass
|
|
@@ -24,11 +25,14 @@ class Reranker(BaseModel):
|
|
|
24
25
|
def _load_from(cls,
|
|
25
26
|
model_path: str,
|
|
26
27
|
tokenizer_file: str = "tokenizer.json",
|
|
27
|
-
plugin_id: str =
|
|
28
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
28
29
|
device_id: Optional[str] = None
|
|
29
30
|
) -> 'Reranker':
|
|
30
31
|
"""Load reranker model from local path, routing to appropriate implementation."""
|
|
31
|
-
|
|
32
|
+
# Check plugin_id value for routing - handle both enum and string
|
|
33
|
+
plugin_value = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
34
|
+
|
|
35
|
+
if plugin_value == "mlx":
|
|
32
36
|
from nexaai.rerank_impl.mlx_rerank_impl import MLXRerankImpl
|
|
33
37
|
return MLXRerankImpl._load_from(model_path, tokenizer_file, plugin_id, device_id)
|
|
34
38
|
else:
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
# Note: This code is generated by Cursor, not tested yet.
|
|
2
2
|
|
|
3
|
-
from typing import List, Optional, Sequence
|
|
3
|
+
from typing import List, Optional, Sequence, Union
|
|
4
4
|
import os
|
|
5
5
|
|
|
6
|
+
from nexaai.common import PluginID
|
|
6
7
|
from nexaai.rerank import Reranker, RerankConfig
|
|
7
8
|
from nexaai.mlx_backend.rerank.interface import Reranker as MLXRerankInterface, create_reranker
|
|
8
9
|
|
|
@@ -17,7 +18,7 @@ class MLXRerankImpl(Reranker):
|
|
|
17
18
|
def _load_from(cls,
|
|
18
19
|
model_path: str,
|
|
19
20
|
tokenizer_file: str = "tokenizer.json",
|
|
20
|
-
plugin_id: str =
|
|
21
|
+
plugin_id: Union[PluginID, str] = PluginID.MLX,
|
|
21
22
|
device_id: Optional[str] = None
|
|
22
23
|
) -> 'MLXRerankImpl':
|
|
23
24
|
"""Load reranker model from local path using MLX backend."""
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
from typing import List, Optional, Sequence
|
|
1
|
+
from typing import List, Optional, Sequence, Union
|
|
2
2
|
|
|
3
|
+
from nexaai.common import PluginID
|
|
3
4
|
from nexaai.rerank import Reranker, RerankConfig
|
|
4
5
|
|
|
5
6
|
|
|
@@ -13,7 +14,7 @@ class PyBindRerankImpl(Reranker):
|
|
|
13
14
|
def _load_from(cls,
|
|
14
15
|
model_path: str,
|
|
15
16
|
tokenizer_file: str = "tokenizer.json",
|
|
16
|
-
plugin_id: str =
|
|
17
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
17
18
|
device_id: Optional[str] = None
|
|
18
19
|
) -> 'PyBindRerankImpl':
|
|
19
20
|
"""Load reranker model from local path using PyBind backend."""
|
nexaai/tts.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
from typing import List, Optional
|
|
1
|
+
from typing import List, Optional, Union
|
|
2
2
|
from abc import abstractmethod
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
|
|
5
5
|
from nexaai.base import BaseModel
|
|
6
|
+
from nexaai.common import PluginID
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
@dataclass
|
|
@@ -43,11 +44,14 @@ class TTS(BaseModel):
|
|
|
43
44
|
def _load_from(cls,
|
|
44
45
|
model_path: str,
|
|
45
46
|
vocoder_path: str,
|
|
46
|
-
plugin_id: str =
|
|
47
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
47
48
|
device_id: Optional[str] = None
|
|
48
49
|
) -> 'TTS':
|
|
49
50
|
"""Load TTS model from local path, routing to appropriate implementation."""
|
|
50
|
-
|
|
51
|
+
# Check plugin_id value for routing - handle both enum and string
|
|
52
|
+
plugin_value = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
53
|
+
|
|
54
|
+
if plugin_value == "mlx":
|
|
51
55
|
from nexaai.tts_impl.mlx_tts_impl import MLXTTSImpl
|
|
52
56
|
return MLXTTSImpl._load_from(model_path, vocoder_path, plugin_id, device_id)
|
|
53
57
|
else:
|
nexaai/tts_impl/mlx_tts_impl.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
# Note: This code is generated by Cursor, not tested yet.
|
|
2
2
|
|
|
3
|
-
from typing import List, Optional
|
|
3
|
+
from typing import List, Optional, Union
|
|
4
4
|
import os
|
|
5
5
|
|
|
6
|
+
from nexaai.common import PluginID
|
|
6
7
|
from nexaai.tts import TTS, TTSConfig, TTSResult
|
|
7
8
|
from nexaai.mlx_backend.tts.interface import MlxTts as MLXTTSInterface
|
|
8
9
|
|
|
@@ -17,7 +18,7 @@ class MLXTTSImpl(TTS):
|
|
|
17
18
|
def _load_from(cls,
|
|
18
19
|
model_path: str,
|
|
19
20
|
vocoder_path: str,
|
|
20
|
-
plugin_id: str =
|
|
21
|
+
plugin_id: Union[PluginID, str] = PluginID.MLX,
|
|
21
22
|
device_id: Optional[str] = None
|
|
22
23
|
) -> 'MLXTTSImpl':
|
|
23
24
|
"""Load TTS model from local path using MLX backend."""
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
from typing import List, Optional
|
|
1
|
+
from typing import List, Optional, Union
|
|
2
2
|
|
|
3
|
+
from nexaai.common import PluginID
|
|
3
4
|
from nexaai.tts import TTS, TTSConfig, TTSResult
|
|
4
5
|
|
|
5
6
|
|
|
@@ -13,7 +14,7 @@ class PyBindTTSImpl(TTS):
|
|
|
13
14
|
def _load_from(cls,
|
|
14
15
|
model_path: str,
|
|
15
16
|
vocoder_path: str,
|
|
16
|
-
plugin_id: str =
|
|
17
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
17
18
|
device_id: Optional[str] = None
|
|
18
19
|
) -> 'PyBindTTSImpl':
|
|
19
20
|
"""Load TTS model from local path using PyBind backend."""
|
nexaai/vlm.py
CHANGED
|
@@ -5,7 +5,7 @@ import threading
|
|
|
5
5
|
import base64
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
|
|
8
|
-
from nexaai.common import ModelConfig, GenerationConfig, MultiModalMessage
|
|
8
|
+
from nexaai.common import ModelConfig, GenerationConfig, MultiModalMessage, PluginID
|
|
9
9
|
from nexaai.base import BaseModel
|
|
10
10
|
|
|
11
11
|
|
|
@@ -20,7 +20,7 @@ class VLM(BaseModel):
|
|
|
20
20
|
local_path: str,
|
|
21
21
|
mmproj_path: str,
|
|
22
22
|
m_cfg: ModelConfig = ModelConfig(),
|
|
23
|
-
plugin_id: str =
|
|
23
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
24
24
|
device_id: Optional[str] = None
|
|
25
25
|
) -> 'VLM':
|
|
26
26
|
"""Load VLM model from local path, routing to appropriate implementation.
|
|
@@ -35,7 +35,10 @@ class VLM(BaseModel):
|
|
|
35
35
|
Returns:
|
|
36
36
|
VLM instance
|
|
37
37
|
"""
|
|
38
|
-
|
|
38
|
+
# Check plugin_id value for routing - handle both enum and string
|
|
39
|
+
plugin_value = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
40
|
+
|
|
41
|
+
if plugin_value == "mlx":
|
|
39
42
|
from nexaai.vlm_impl.mlx_vlm_impl import MlxVlmImpl
|
|
40
43
|
return MlxVlmImpl._load_from(local_path, mmproj_path, m_cfg, plugin_id, device_id)
|
|
41
44
|
else:
|
nexaai/vlm_impl/mlx_vlm_impl.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
from typing import Generator, Optional, List, Dict, Any
|
|
1
|
+
from typing import Generator, Optional, List, Dict, Any, Union
|
|
2
2
|
|
|
3
|
-
from nexaai.common import ModelConfig, GenerationConfig, MultiModalMessage
|
|
3
|
+
from nexaai.common import ModelConfig, GenerationConfig, MultiModalMessage, PluginID
|
|
4
4
|
from nexaai.vlm import VLM
|
|
5
5
|
from nexaai.mlx_backend.vlm.interface import VLM as MLXVLMInterface
|
|
6
6
|
from nexaai.mlx_backend.ml import ModelConfig as MLXModelConfig, SamplerConfig as MLXSamplerConfig, GenerationConfig as MLXGenerationConfig, EmbeddingConfig
|
|
@@ -17,7 +17,7 @@ class MlxVlmImpl(VLM):
|
|
|
17
17
|
local_path: str,
|
|
18
18
|
mmproj_path: str,
|
|
19
19
|
m_cfg: ModelConfig = ModelConfig(),
|
|
20
|
-
plugin_id: str =
|
|
20
|
+
plugin_id: Union[PluginID, str] = PluginID.MLX,
|
|
21
21
|
device_id: Optional[str] = None
|
|
22
22
|
) -> 'MlxVlmImpl':
|
|
23
23
|
"""Load VLM model from local path using MLX backend.
|
|
@@ -4,7 +4,7 @@ import threading
|
|
|
4
4
|
import base64
|
|
5
5
|
from pathlib import Path
|
|
6
6
|
|
|
7
|
-
from nexaai.common import ModelConfig, GenerationConfig, MultiModalMessage
|
|
7
|
+
from nexaai.common import ModelConfig, GenerationConfig, MultiModalMessage, PluginID
|
|
8
8
|
from nexaai.binds import vlm_bind, common_bind
|
|
9
9
|
from nexaai.runtime import _ensure_runtime
|
|
10
10
|
from nexaai.vlm import VLM
|
|
@@ -21,7 +21,7 @@ class PyBindVLMImpl(VLM):
|
|
|
21
21
|
local_path: str,
|
|
22
22
|
mmproj_path: str,
|
|
23
23
|
m_cfg: ModelConfig = ModelConfig(),
|
|
24
|
-
plugin_id: str =
|
|
24
|
+
plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP,
|
|
25
25
|
device_id: Optional[str] = None
|
|
26
26
|
) -> 'PyBindVLMImpl':
|
|
27
27
|
"""Load VLM model from local path.
|
|
@@ -61,11 +61,13 @@ class PyBindVLMImpl(VLM):
|
|
|
61
61
|
config.chat_template_content = m_cfg.chat_template_content
|
|
62
62
|
|
|
63
63
|
# Create handle : returns py::capsule with automatic cleanup
|
|
64
|
+
# Convert enum to string for C++ binding
|
|
65
|
+
plugin_id_str = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
|
|
64
66
|
handle = vlm_bind.create_vlm(
|
|
65
67
|
model_path=local_path,
|
|
66
68
|
mmproj_path=mmproj_path,
|
|
67
69
|
model_config=config,
|
|
68
|
-
plugin_id=
|
|
70
|
+
plugin_id=plugin_id_str,
|
|
69
71
|
device_id=device_id
|
|
70
72
|
)
|
|
71
73
|
return cls(handle, m_cfg)
|
|
@@ -1,44 +1,46 @@
|
|
|
1
|
-
nexaai/__init__.py,sha256=
|
|
2
|
-
nexaai/_stub.cpython-310-darwin.so,sha256=
|
|
3
|
-
nexaai/_version.py,sha256=
|
|
4
|
-
nexaai/asr.py,sha256=
|
|
1
|
+
nexaai/__init__.py,sha256=jXdC4vv6DBK1fVewYTYSUhOOYfvf_Mk81UIeMGGIKUg,2029
|
|
2
|
+
nexaai/_stub.cpython-310-darwin.so,sha256=7KDZERgfp9KR_dpQvZ7SMrGjlJpPyezLP7v-rYCdqFA,49832
|
|
3
|
+
nexaai/_version.py,sha256=NGCgH5JHTkWsbmkVT9FhcM7m4cxgmEZiw51TUG210EA,143
|
|
4
|
+
nexaai/asr.py,sha256=NljMXDErwPNMOPaRkJZMEDka9Nk8xyur7L8i924TStY,2054
|
|
5
5
|
nexaai/base.py,sha256=N8PRgDFA-XPku2vWnQIofQ7ipz3pPlO6f8YZGnuhquE,982
|
|
6
|
-
nexaai/common.py,sha256=
|
|
7
|
-
nexaai/cv.py,sha256=
|
|
8
|
-
nexaai/embedder.py,sha256=
|
|
9
|
-
nexaai/image_gen.py,sha256=
|
|
10
|
-
nexaai/llm.py,sha256=
|
|
11
|
-
nexaai/rerank.py,sha256=
|
|
6
|
+
nexaai/common.py,sha256=5ElYo4uDP2CT3Kqxoo7XzqcJtDBuwwbIi_Wr14aT9Z4,1659
|
|
7
|
+
nexaai/cv.py,sha256=RHCDo8gvBH8BkGZx7qVyp-OKxqi7E1GG9XzyaXehCNA,3273
|
|
8
|
+
nexaai/embedder.py,sha256=Cw0tSHkPgd-RI62afCqQAcTHMnQhaI2CvfTMO-1JKOg,2452
|
|
9
|
+
nexaai/image_gen.py,sha256=0C_5Tjj4BYmxLbmMmvwajp-yy2mmEEOKwBFnDQNPzx4,4356
|
|
10
|
+
nexaai/llm.py,sha256=QQDRg8zlu-xHmWjtSOsK1vhQBHaqRIdL3T9I4cVX7W4,3416
|
|
11
|
+
nexaai/rerank.py,sha256=vWaBucoQ1wz-2iYnZqyFIcEjm-4Xcs1KDbFN5X8zzDQ,1872
|
|
12
12
|
nexaai/runtime.py,sha256=mxxHYsb5iBUAm2K_u-XJWr_U-spJ9S4eApc8kf9myjw,1957
|
|
13
|
-
nexaai/tts.py,sha256=
|
|
14
|
-
nexaai/vlm.py,sha256=
|
|
13
|
+
nexaai/tts.py,sha256=ZnBpWUxIfHhh7KfEjddtH7hHOTa91zg7ogGLakMIALo,2167
|
|
14
|
+
nexaai/vlm.py,sha256=pZcMWkF2Ml9liVNbHxLqBJxwm2bxVNM1dkoelwWMyIE,4500
|
|
15
15
|
nexaai/asr_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
|
-
nexaai/asr_impl/mlx_asr_impl.py,sha256=
|
|
17
|
-
nexaai/asr_impl/pybind_asr_impl.py,sha256=
|
|
16
|
+
nexaai/asr_impl/mlx_asr_impl.py,sha256=eosd8-TIWAOwV0HltmoFrLwzXHcU4jyxtncvuZE9pgA,3257
|
|
17
|
+
nexaai/asr_impl/pybind_asr_impl.py,sha256=pE9Hb_hMi5yAc4MF83bLVOb8zDtreCkB3_u7XED9YpA,1516
|
|
18
18
|
nexaai/binds/__init__.py,sha256=T9Ua7SzHNglSeEqXlfH5ymYXRyXhNKkC9z_y_bWCNMo,80
|
|
19
19
|
nexaai/binds/common_bind.cpython-310-darwin.so,sha256=FF5WuJj0fNCim_HjseBQu38vL-1M5zI_7EVTD7Bs-Bc,233960
|
|
20
20
|
nexaai/binds/embedder_bind.cpython-310-darwin.so,sha256=mU6hP0SyH8vcmPpC2GIr7ioK7539dsg_YbmrBdmj7l0,202032
|
|
21
|
-
nexaai/binds/
|
|
21
|
+
nexaai/binds/libcrypto.dylib,sha256=ysW8ydmDPnnNRy3AHESjJwMTFfmGDKU9eLIaiR37ca0,5091432
|
|
22
|
+
nexaai/binds/libnexa_bridge.dylib,sha256=8wjwefnWZLAzEqLlnFdjEWXNmTlRD9y9ogO0_ArRUB4,250712
|
|
23
|
+
nexaai/binds/libssl.dylib,sha256=JHPTSbRFnImmoWDO9rFdiKb0lJMT3q78VEsx-5-S0sk,889520
|
|
22
24
|
nexaai/binds/llm_bind.cpython-310-darwin.so,sha256=g4erKCUm2qdMZk1WUrr3IAXixRNp78ViUEkbE5jDOfE,182872
|
|
23
|
-
nexaai/binds/nexa_llama_cpp/libggml-base.dylib,sha256=
|
|
25
|
+
nexaai/binds/nexa_llama_cpp/libggml-base.dylib,sha256=ChIX99NoLhsYVXJvv8iGMIpx-5Rst2gYwux-bEektB4,626992
|
|
24
26
|
nexaai/binds/nexa_llama_cpp/libggml-cpu.so,sha256=tZcQGr6aWSQmTN12ieC2nIJ0lID5-mTkqoGjxJh07b4,1039744
|
|
25
27
|
nexaai/binds/nexa_llama_cpp/libggml-metal.so,sha256=eDWuZ4ui8LsahlU05sNEMZ7lTtZfswKtcGcGvWTB0ro,713680
|
|
26
28
|
nexaai/binds/nexa_llama_cpp/libggml.dylib,sha256=Z2ZvkyEEpPtHhMYap-44p9Q0M6TXJbLcMy-smR2X5sk,58336
|
|
27
29
|
nexaai/binds/nexa_llama_cpp/libllama.dylib,sha256=9pJFMHFlKHiQgLzi8YXextf5dPCYylQkpDv0EvCEssM,1958384
|
|
28
30
|
nexaai/binds/nexa_llama_cpp/libmtmd.dylib,sha256=Etc0ZuYVNo9l1OTQRjZY4cTkgH2S2EL84DpxpWJeoJ4,682480
|
|
29
|
-
nexaai/binds/nexa_llama_cpp/libnexa_plugin.dylib,sha256=
|
|
31
|
+
nexaai/binds/nexa_llama_cpp/libnexa_plugin.dylib,sha256=GzhOq5vFIQAii3zh4oRN_TjcpqNLYEstsBiUAa96fUA,2589576
|
|
30
32
|
nexaai/cv_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
31
|
-
nexaai/cv_impl/mlx_cv_impl.py,sha256=
|
|
32
|
-
nexaai/cv_impl/pybind_cv_impl.py,sha256=
|
|
33
|
+
nexaai/cv_impl/mlx_cv_impl.py,sha256=gKECQOv8iaWwG3bl7xeqVy2NN_9K7tYerIFzfn4eLo4,3228
|
|
34
|
+
nexaai/cv_impl/pybind_cv_impl.py,sha256=uSmwBste4cT7c8DQmXzRLmzwDf773PAbXNYWW1UzVls,1064
|
|
33
35
|
nexaai/embedder_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
|
-
nexaai/embedder_impl/mlx_embedder_impl.py,sha256=
|
|
35
|
-
nexaai/embedder_impl/pybind_embedder_impl.py,sha256=
|
|
36
|
+
nexaai/embedder_impl/mlx_embedder_impl.py,sha256=OsDzsc_2wZkSoWu6yCOZadMkaYdBW3uyjF11hDKTaX8,4383
|
|
37
|
+
nexaai/embedder_impl/pybind_embedder_impl.py,sha256=Ga1JYauVkRq6jwAGL7Xx5HDaIx483_v9gZVoTyd3xNU,3495
|
|
36
38
|
nexaai/image_gen_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
37
|
-
nexaai/image_gen_impl/mlx_image_gen_impl.py,sha256=
|
|
38
|
-
nexaai/image_gen_impl/pybind_image_gen_impl.py,sha256=
|
|
39
|
+
nexaai/image_gen_impl/mlx_image_gen_impl.py,sha256=BuDkksvXyb4J02GsdnbGAmYckfUU0Eah6BimoMD3QqY,11219
|
|
40
|
+
nexaai/image_gen_impl/pybind_image_gen_impl.py,sha256=ms34VYoD5AxZFG6cIG0QAJDjCtfphaZ1bHzKzey1xF8,3692
|
|
39
41
|
nexaai/llm_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
40
|
-
nexaai/llm_impl/mlx_llm_impl.py,sha256=
|
|
41
|
-
nexaai/llm_impl/pybind_llm_impl.py,sha256=
|
|
42
|
+
nexaai/llm_impl/mlx_llm_impl.py,sha256=2Ifc_mfTHDX64BWVHLjOhFCIMqM_Z-Cn4RfExlMtq0s,10865
|
|
43
|
+
nexaai/llm_impl/pybind_llm_impl.py,sha256=DpO38rlGcvf0Zpe4bPKsbPD3EguBf0dDS9Ve64bgdvo,7653
|
|
42
44
|
nexaai/mlx_backend/ml.py,sha256=LafDM_TeXmuQkld2tdQxUBGgooT0JPMXngLam2TADqU,23179
|
|
43
45
|
nexaai/mlx_backend/profiling.py,sha256=Dc-mybFwBdCIKFWL7CbSHjkOJGAoYHG7r_e_XPhzwBU,9361
|
|
44
46
|
nexaai/mlx_backend/asr/__init__.py,sha256=fuT_9_xpYJ28m4yjly5L2jChUrzlSQz-b_S7nujxkSM,451
|
|
@@ -58,7 +60,7 @@ nexaai/mlx_backend/embedding/modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCe
|
|
|
58
60
|
nexaai/mlx_backend/embedding/modeling/nexa_jina_v2.py,sha256=F9Z_9r-Dh0wNThiMp5W5hqE2dt5bf4ps5_c6h4BuWGw,15218
|
|
59
61
|
nexaai/mlx_backend/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
60
62
|
nexaai/mlx_backend/llm/generate.py,sha256=Phes0tzxbbEWA2hDylQvD0LjorMaPwvcfZq9RKCAOt0,4399
|
|
61
|
-
nexaai/mlx_backend/llm/interface.py,sha256=
|
|
63
|
+
nexaai/mlx_backend/llm/interface.py,sha256=YBLAdz_5gQ1VF9o98Tuj6xB_M2nUB9kX9VkM-Mp6ryc,29310
|
|
62
64
|
nexaai/mlx_backend/llm/main.py,sha256=gFDE4VZv_CLKMCTn0N521OfCKH_Ys26bHDh6g9VEFNc,1982
|
|
63
65
|
nexaai/mlx_backend/mlx_audio/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
64
66
|
nexaai/mlx_backend/mlx_audio/server.py,sha256=Pqy13Fafq4WX_cTuvRFz1jq89beQm2QQGpXmhK4b9jc,17547
|
|
@@ -349,19 +351,19 @@ nexaai/mlx_backend/vlm/modeling/trainer/lora.py,sha256=tGjvenjEQ8_1Az8Nz3smz5Mgv
|
|
|
349
351
|
nexaai/mlx_backend/vlm/modeling/trainer/trainer.py,sha256=h16SaHt76JzFruXuidgXDx7_2evx4L0SecvzqLmhyZw,9081
|
|
350
352
|
nexaai/mlx_backend/vlm/modeling/trainer/utils.py,sha256=29oHf_7946YeJKP_-Dt-NPeN4xJq8Fj7Yv4jZKO9RWA,4909
|
|
351
353
|
nexaai/rerank_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
352
|
-
nexaai/rerank_impl/mlx_rerank_impl.py,sha256=
|
|
353
|
-
nexaai/rerank_impl/pybind_rerank_impl.py,sha256=
|
|
354
|
+
nexaai/rerank_impl/mlx_rerank_impl.py,sha256=h37PKSIRBY8mwzVeLeP4ix9ui3waIsg4gorzelYLJbM,3243
|
|
355
|
+
nexaai/rerank_impl/pybind_rerank_impl.py,sha256=CtwkG7YrW58GPMDERJSnISGTVCXWNju5__R2W837t7c,1513
|
|
354
356
|
nexaai/tts_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
355
|
-
nexaai/tts_impl/mlx_tts_impl.py,sha256=
|
|
356
|
-
nexaai/tts_impl/pybind_tts_impl.py,sha256=
|
|
357
|
+
nexaai/tts_impl/mlx_tts_impl.py,sha256=i_uNPdvlXYtL3e01oKjDlP9jgkWCRt1bBHsExaaiJi8,3101
|
|
358
|
+
nexaai/tts_impl/pybind_tts_impl.py,sha256=mpn44r6pfYLIl-NrEy2dXHjGtWtNCmM7HRyxiANxUI4,1444
|
|
357
359
|
nexaai/utils/avatar_fetcher.py,sha256=bWy8ujgbOiTHFCjFxTwkn3uXbZ84PgEGUkXkR3MH4bI,3821
|
|
358
360
|
nexaai/utils/decode.py,sha256=61n4Zf6c5QLyqGoctEitlI9BX3tPlP2a5aaKNHbw3T4,404
|
|
359
361
|
nexaai/utils/model_manager.py,sha256=c07ocxxw1IHCQw6esbmYK0dX2R2OajfEIGsC_2teHXo,48572
|
|
360
362
|
nexaai/utils/progress_tracker.py,sha256=76HlPkyN41IMHSsH56-qdlN_aY_oBfJz50J16Cx67R0,15102
|
|
361
363
|
nexaai/vlm_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
362
|
-
nexaai/vlm_impl/mlx_vlm_impl.py,sha256=
|
|
363
|
-
nexaai/vlm_impl/pybind_vlm_impl.py,sha256=
|
|
364
|
-
nexaai-1.0.
|
|
365
|
-
nexaai-1.0.
|
|
366
|
-
nexaai-1.0.
|
|
367
|
-
nexaai-1.0.
|
|
364
|
+
nexaai/vlm_impl/mlx_vlm_impl.py,sha256=Dm-N38wqK3Cjdk3n7wfVGKC7hwxHvaM8pz37VzvJC-Y,10443
|
|
365
|
+
nexaai/vlm_impl/pybind_vlm_impl.py,sha256=mvydHMHNWtkmyqouLIj1XSYZgsro3tcp3s_aqkjljE0,8510
|
|
366
|
+
nexaai-1.0.4rc16.dist-info/METADATA,sha256=NuLsDWtJssKVjTNP4oo-tFItIBxIbiq-0hTq1rv706s,883
|
|
367
|
+
nexaai-1.0.4rc16.dist-info/WHEEL,sha256=0KYp5feZ1CMUhsfFXKpSQTbSmQbXy4mv6yPPVBXg2EM,110
|
|
368
|
+
nexaai-1.0.4rc16.dist-info/top_level.txt,sha256=LRE2YERlrZk2vfuygnSzsEeqSknnZbz3Z1MHyNmBU4w,7
|
|
369
|
+
nexaai-1.0.4rc16.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|