nexaai 1.0.21rc5__cp313-cp313-win_arm64.whl → 1.0.21rc16__cp313-cp313-win_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nexaai might be problematic. Click here for more details.

Files changed (104) hide show
  1. nexaai/__init__.py +95 -95
  2. nexaai/_stub.cp313-win_arm64.pyd +0 -0
  3. nexaai/_version.py +4 -1
  4. nexaai/asr.py +68 -65
  5. nexaai/asr_impl/mlx_asr_impl.py +92 -92
  6. nexaai/asr_impl/pybind_asr_impl.py +127 -44
  7. nexaai/base.py +39 -39
  8. nexaai/binds/__init__.py +6 -5
  9. nexaai/binds/asr_bind.cp313-win_arm64.pyd +0 -0
  10. nexaai/binds/common_bind.cp313-win_arm64.pyd +0 -0
  11. nexaai/binds/cpu_gpu/ggml-base.dll +0 -0
  12. nexaai/binds/cpu_gpu/ggml-cpu.dll +0 -0
  13. nexaai/binds/cpu_gpu/ggml-opencl.dll +0 -0
  14. nexaai/binds/cpu_gpu/ggml.dll +0 -0
  15. nexaai/binds/cpu_gpu/mtmd.dll +0 -0
  16. nexaai/binds/cpu_gpu/nexa_cpu_gpu.dll +0 -0
  17. nexaai/binds/cpu_gpu/nexa_plugin.dll +0 -0
  18. nexaai/binds/embedder_bind.cp313-win_arm64.pyd +0 -0
  19. nexaai/binds/libcrypto-3-arm64.dll +0 -0
  20. nexaai/binds/libssl-3-arm64.dll +0 -0
  21. nexaai/binds/llm_bind.cp313-win_arm64.pyd +0 -0
  22. nexaai/binds/nexa_bridge.dll +0 -0
  23. nexaai/binds/npu/convnext-sdk.dll +0 -0
  24. nexaai/binds/npu/embed-gemma-sdk.dll +0 -0
  25. nexaai/binds/npu/ggml-base.dll +0 -0
  26. nexaai/binds/npu/ggml-cpu.dll +0 -0
  27. nexaai/binds/{nexaml → npu}/ggml-opencl.dll +0 -0
  28. nexaai/binds/npu/ggml.dll +0 -0
  29. nexaai/binds/npu/granite-nano-sdk.dll +0 -0
  30. nexaai/binds/npu/granite4-sdk.dll +0 -0
  31. nexaai/binds/npu/jina-rerank-sdk.dll +0 -0
  32. nexaai/binds/npu/liquid-sdk.dll +0 -0
  33. nexaai/binds/npu/llama3-3b-sdk.dll +0 -0
  34. nexaai/binds/npu/nexa-mm-process.dll +0 -0
  35. nexaai/binds/npu/nexa-sampling.dll +0 -0
  36. nexaai/binds/npu/nexa_plugin.dll +0 -0
  37. nexaai/binds/npu/omni-neural-sdk.dll +0 -0
  38. nexaai/binds/npu/openblas.dll +0 -0
  39. nexaai/binds/npu/paddleocr-sdk.dll +0 -0
  40. nexaai/binds/npu/parakeet-sdk.dll +0 -0
  41. nexaai/binds/npu/phi3-5-sdk.dll +0 -0
  42. nexaai/binds/npu/phi4-sdk.dll +0 -0
  43. nexaai/binds/npu/pyannote-sdk.dll +0 -0
  44. nexaai/binds/npu/qwen3-4b-sdk.dll +0 -0
  45. nexaai/binds/npu/qwen3vl-sdk.dll +0 -0
  46. nexaai/binds/npu/qwen3vl-vision.dll +0 -0
  47. nexaai/binds/npu/yolov12-sdk.dll +0 -0
  48. nexaai/binds/npu/zlib1.dll +0 -0
  49. nexaai/binds/rerank_bind.cp313-win_arm64.pyd +0 -0
  50. nexaai/binds/vlm_bind.cp313-win_arm64.pyd +0 -0
  51. nexaai/common.py +105 -105
  52. nexaai/cv.py +93 -93
  53. nexaai/cv_impl/mlx_cv_impl.py +89 -89
  54. nexaai/cv_impl/pybind_cv_impl.py +32 -32
  55. nexaai/embedder.py +73 -73
  56. nexaai/embedder_impl/mlx_embedder_impl.py +118 -118
  57. nexaai/embedder_impl/pybind_embedder_impl.py +96 -96
  58. nexaai/image_gen.py +141 -141
  59. nexaai/image_gen_impl/mlx_image_gen_impl.py +292 -292
  60. nexaai/image_gen_impl/pybind_image_gen_impl.py +85 -85
  61. nexaai/llm.py +98 -98
  62. nexaai/llm_impl/mlx_llm_impl.py +271 -271
  63. nexaai/llm_impl/pybind_llm_impl.py +220 -220
  64. nexaai/log.py +92 -92
  65. nexaai/rerank.py +57 -57
  66. nexaai/rerank_impl/mlx_rerank_impl.py +94 -94
  67. nexaai/rerank_impl/pybind_rerank_impl.py +136 -136
  68. nexaai/runtime.py +68 -68
  69. nexaai/runtime_error.py +24 -24
  70. nexaai/tts.py +75 -75
  71. nexaai/tts_impl/mlx_tts_impl.py +94 -94
  72. nexaai/tts_impl/pybind_tts_impl.py +43 -43
  73. nexaai/utils/decode.py +17 -17
  74. nexaai/utils/manifest_utils.py +531 -531
  75. nexaai/utils/model_manager.py +1562 -1562
  76. nexaai/utils/model_types.py +49 -49
  77. nexaai/utils/progress_tracker.py +384 -384
  78. nexaai/utils/quantization_utils.py +245 -245
  79. nexaai/vlm.py +129 -129
  80. nexaai/vlm_impl/mlx_vlm_impl.py +258 -258
  81. nexaai/vlm_impl/pybind_vlm_impl.py +256 -256
  82. {nexaai-1.0.21rc5.dist-info → nexaai-1.0.21rc16.dist-info}/METADATA +1 -1
  83. nexaai-1.0.21rc16.dist-info/RECORD +154 -0
  84. nexaai/binds/nexaml/FLAC.dll +0 -0
  85. nexaai/binds/nexaml/fftw3.dll +0 -0
  86. nexaai/binds/nexaml/fftw3f.dll +0 -0
  87. nexaai/binds/nexaml/ggml-base.dll +0 -0
  88. nexaai/binds/nexaml/ggml-cpu.dll +0 -0
  89. nexaai/binds/nexaml/ggml.dll +0 -0
  90. nexaai/binds/nexaml/libmp3lame.DLL +0 -0
  91. nexaai/binds/nexaml/mpg123.dll +0 -0
  92. nexaai/binds/nexaml/nexa-mm-process.dll +0 -0
  93. nexaai/binds/nexaml/nexa-sampling.dll +0 -0
  94. nexaai/binds/nexaml/nexa_plugin.dll +0 -0
  95. nexaai/binds/nexaml/nexaproc.dll +0 -0
  96. nexaai/binds/nexaml/ogg.dll +0 -0
  97. nexaai/binds/nexaml/opus.dll +0 -0
  98. nexaai/binds/nexaml/qwen3-vl.dll +0 -0
  99. nexaai/binds/nexaml/qwen3vl-vision.dll +0 -0
  100. nexaai/binds/nexaml/vorbis.dll +0 -0
  101. nexaai/binds/nexaml/vorbisenc.dll +0 -0
  102. nexaai-1.0.21rc5.dist-info/RECORD +0 -162
  103. {nexaai-1.0.21rc5.dist-info → nexaai-1.0.21rc16.dist-info}/WHEEL +0 -0
  104. {nexaai-1.0.21rc5.dist-info → nexaai-1.0.21rc16.dist-info}/top_level.txt +0 -0
@@ -1,118 +1,118 @@
1
- from typing import List, Union
2
- import numpy as np
3
-
4
- from nexaai.common import PluginID
5
- from nexaai.embedder import Embedder, EmbeddingConfig
6
- from nexaai.mlx_backend.embedding.interface import create_embedder
7
- from nexaai.mlx_backend.ml import ModelConfig as MLXModelConfig, SamplerConfig as MLXSamplerConfig, GenerationConfig as MLXGenerationConfig, EmbeddingConfig
8
-
9
-
10
- class MLXEmbedderImpl(Embedder):
11
- def __init__(self):
12
- """Initialize MLX Embedder implementation."""
13
- super().__init__()
14
- self._mlx_embedder = None
15
-
16
- @classmethod
17
- def _load_from(cls, model_path: str, model_name: str = None, tokenizer_file: str = "tokenizer.json", plugin_id: Union[PluginID, str] = PluginID.MLX):
18
- """
19
- Load an embedder from model files using MLX backend.
20
-
21
- Args:
22
- model_path: Path to the model file
23
- model_name: Name of the model
24
- tokenizer_file: Path to the tokenizer file (default: "tokenizer.json")
25
- plugin_id: Plugin ID to use for the model (default: PluginID.MLX)
26
-
27
- Returns:
28
- MLXEmbedderImpl instance
29
- """
30
- try:
31
- # Create instance
32
- instance = cls()
33
-
34
- # Use the factory function to create the appropriate embedder based on model type
35
- # This will automatically detect if it's JinaV2 or generic model and route correctly
36
- instance._mlx_embedder = create_embedder(
37
- model_path=model_path,
38
- # model_name=model_name, # FIXME: For MLX Embedder, model_name is not used
39
- tokenizer_path=tokenizer_file
40
- )
41
-
42
- # Load the model
43
- success = instance._mlx_embedder.load_model(model_path)
44
- if not success:
45
- raise RuntimeError("Failed to load MLX embedder model")
46
-
47
- return instance
48
- except Exception as e:
49
- raise RuntimeError(f"Failed to load MLX Embedder: {str(e)}")
50
-
51
- def eject(self):
52
- """
53
- Clean up resources and destroy the embedder
54
- """
55
- if self._mlx_embedder:
56
- self._mlx_embedder.destroy()
57
- self._mlx_embedder = None
58
-
59
- def generate(self, texts: Union[List[str], str] = None, config: EmbeddingConfig = EmbeddingConfig(), input_ids: Union[List[int], List[List[int]]] = None) -> np.ndarray:
60
- """
61
- Generate embeddings for the given texts or input_ids.
62
-
63
- Args:
64
- texts: List of strings or single string to embed
65
- input_ids: Pre-tokenized input as:
66
- - Single sequence: list of integers [1, 2, 3, 4]
67
- - Multiple sequences: list of lists [[1, 2, 3], [4, 5, 6]]
68
- config: Configuration for embedding generation
69
-
70
- Returns:
71
- numpy array of embeddings with shape (num_sequences, embedding_dim)
72
- """
73
- if not self._mlx_embedder:
74
- raise RuntimeError("MLX Embedder not loaded")
75
-
76
- if texts is None and input_ids is None:
77
- raise ValueError("Either texts or input_ids must be provided")
78
-
79
- # MLX embedder currently only supports text input, not pre-tokenized input_ids
80
- if input_ids is not None:
81
- raise NotImplementedError("MLX embedder does not support input_ids, only text input")
82
-
83
- try:
84
- # Convert single string to list if needed
85
- if isinstance(texts, str):
86
- texts = [texts]
87
-
88
- # MLX config classes are already imported
89
-
90
- # Convert our config to MLX config
91
- mlx_config = EmbeddingConfig()
92
- mlx_config.batch_size = config.batch_size
93
- mlx_config.normalize = config.normalize
94
- mlx_config.normalize_method = config.normalize_method
95
-
96
- # Generate embeddings using MLX
97
- embeddings = self._mlx_embedder.embed(texts, mlx_config)
98
-
99
- # Convert to numpy array
100
- return np.array(embeddings, dtype=np.float32)
101
-
102
- except Exception as e:
103
- raise RuntimeError(f"Failed to generate embeddings: {str(e)}")
104
-
105
- def get_embedding_dim(self) -> int:
106
- """
107
- Get the embedding dimension of the model
108
-
109
- Returns:
110
- The embedding dimension in int
111
- """
112
- if not self._mlx_embedder:
113
- raise RuntimeError("MLX Embedder not loaded")
114
-
115
- try:
116
- return self._mlx_embedder.embedding_dim()
117
- except Exception as e:
118
- raise RuntimeError(f"Failed to get embedding dimension: {str(e)}")
1
+ from typing import List, Union
2
+ import numpy as np
3
+
4
+ from nexaai.common import PluginID
5
+ from nexaai.embedder import Embedder, EmbeddingConfig
6
+ from nexaai.mlx_backend.embedding.interface import create_embedder
7
+ from nexaai.mlx_backend.ml import ModelConfig as MLXModelConfig, SamplerConfig as MLXSamplerConfig, GenerationConfig as MLXGenerationConfig, EmbeddingConfig
8
+
9
+
10
+ class MLXEmbedderImpl(Embedder):
11
+ def __init__(self):
12
+ """Initialize MLX Embedder implementation."""
13
+ super().__init__()
14
+ self._mlx_embedder = None
15
+
16
+ @classmethod
17
+ def _load_from(cls, model_path: str, model_name: str = None, tokenizer_file: str = "tokenizer.json", plugin_id: Union[PluginID, str] = PluginID.MLX):
18
+ """
19
+ Load an embedder from model files using MLX backend.
20
+
21
+ Args:
22
+ model_path: Path to the model file
23
+ model_name: Name of the model
24
+ tokenizer_file: Path to the tokenizer file (default: "tokenizer.json")
25
+ plugin_id: Plugin ID to use for the model (default: PluginID.MLX)
26
+
27
+ Returns:
28
+ MLXEmbedderImpl instance
29
+ """
30
+ try:
31
+ # Create instance
32
+ instance = cls()
33
+
34
+ # Use the factory function to create the appropriate embedder based on model type
35
+ # This will automatically detect if it's JinaV2 or generic model and route correctly
36
+ instance._mlx_embedder = create_embedder(
37
+ model_path=model_path,
38
+ # model_name=model_name, # FIXME: For MLX Embedder, model_name is not used
39
+ tokenizer_path=tokenizer_file
40
+ )
41
+
42
+ # Load the model
43
+ success = instance._mlx_embedder.load_model(model_path)
44
+ if not success:
45
+ raise RuntimeError("Failed to load MLX embedder model")
46
+
47
+ return instance
48
+ except Exception as e:
49
+ raise RuntimeError(f"Failed to load MLX Embedder: {str(e)}")
50
+
51
+ def eject(self):
52
+ """
53
+ Clean up resources and destroy the embedder
54
+ """
55
+ if self._mlx_embedder:
56
+ self._mlx_embedder.destroy()
57
+ self._mlx_embedder = None
58
+
59
+ def generate(self, texts: Union[List[str], str] = None, config: EmbeddingConfig = EmbeddingConfig(), input_ids: Union[List[int], List[List[int]]] = None) -> np.ndarray:
60
+ """
61
+ Generate embeddings for the given texts or input_ids.
62
+
63
+ Args:
64
+ texts: List of strings or single string to embed
65
+ input_ids: Pre-tokenized input as:
66
+ - Single sequence: list of integers [1, 2, 3, 4]
67
+ - Multiple sequences: list of lists [[1, 2, 3], [4, 5, 6]]
68
+ config: Configuration for embedding generation
69
+
70
+ Returns:
71
+ numpy array of embeddings with shape (num_sequences, embedding_dim)
72
+ """
73
+ if not self._mlx_embedder:
74
+ raise RuntimeError("MLX Embedder not loaded")
75
+
76
+ if texts is None and input_ids is None:
77
+ raise ValueError("Either texts or input_ids must be provided")
78
+
79
+ # MLX embedder currently only supports text input, not pre-tokenized input_ids
80
+ if input_ids is not None:
81
+ raise NotImplementedError("MLX embedder does not support input_ids, only text input")
82
+
83
+ try:
84
+ # Convert single string to list if needed
85
+ if isinstance(texts, str):
86
+ texts = [texts]
87
+
88
+ # MLX config classes are already imported
89
+
90
+ # Convert our config to MLX config
91
+ mlx_config = EmbeddingConfig()
92
+ mlx_config.batch_size = config.batch_size
93
+ mlx_config.normalize = config.normalize
94
+ mlx_config.normalize_method = config.normalize_method
95
+
96
+ # Generate embeddings using MLX
97
+ embeddings = self._mlx_embedder.embed(texts, mlx_config)
98
+
99
+ # Convert to numpy array
100
+ return np.array(embeddings, dtype=np.float32)
101
+
102
+ except Exception as e:
103
+ raise RuntimeError(f"Failed to generate embeddings: {str(e)}")
104
+
105
+ def get_embedding_dim(self) -> int:
106
+ """
107
+ Get the embedding dimension of the model
108
+
109
+ Returns:
110
+ The embedding dimension in int
111
+ """
112
+ if not self._mlx_embedder:
113
+ raise RuntimeError("MLX Embedder not loaded")
114
+
115
+ try:
116
+ return self._mlx_embedder.embedding_dim()
117
+ except Exception as e:
118
+ raise RuntimeError(f"Failed to get embedding dimension: {str(e)}")
@@ -1,96 +1,96 @@
1
- from typing import List, Union
2
- import numpy as np
3
-
4
- from nexaai.common import PluginID
5
- from nexaai.embedder import Embedder, EmbeddingConfig
6
- from nexaai.binds import embedder_bind
7
- from nexaai.runtime import _ensure_runtime
8
-
9
-
10
- class PyBindEmbedderImpl(Embedder):
11
- def __init__(self, _handle_ptr):
12
- """
13
- Internal initializer
14
- """
15
- super().__init__()
16
- self._handle = _handle_ptr
17
-
18
- @classmethod
19
- def _load_from(cls, model_path: str, model_name: str = None, tokenizer_file: str = "tokenizer.json", plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP):
20
- """
21
- Load an embedder from model files
22
-
23
- Args:
24
- model_path: Path to the model file
25
- model_name: Name of the model
26
- tokenizer_file: Path to the tokenizer file (default: "tokenizer.json")
27
- plugin_id: Plugin ID to use for the model (default: PluginID.LLAMA_CPP)
28
-
29
- Returns:
30
- PyBindEmbedderImpl instance
31
- """
32
- _ensure_runtime()
33
- # Convert enum to string for C++ binding
34
- plugin_id_str = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
35
- # New parameter order: model_path, plugin_id, tokenizer_path (optional)
36
- handle = embedder_bind.ml_embedder_create(model_path, model_name, plugin_id_str, tokenizer_file)
37
- return cls(handle)
38
-
39
- def eject(self):
40
- """
41
- Clean up resources and destroy the embedder
42
- """
43
- # Destructor of the handle will unload the model correctly
44
- del self._handle
45
- self._handle = None
46
-
47
- def generate(self, texts: Union[List[str], str] = None, config: EmbeddingConfig = EmbeddingConfig(), input_ids: Union[List[int], List[List[int]]] = None) -> np.ndarray:
48
- """
49
- Generate embeddings for the given texts or input_ids.
50
-
51
- Args:
52
- texts: List of strings or single string to embed
53
- input_ids: Pre-tokenized input as:
54
- - Single sequence: list of integers [1, 2, 3, 4]
55
- - Multiple sequences: list of lists [[1, 2, 3], [4, 5, 6]]
56
- config: Configuration for embedding generation
57
-
58
- Returns:
59
- numpy array of embeddings with shape (num_sequences, embedding_dim)
60
- """
61
- if texts is None and input_ids is None:
62
- raise ValueError("Either texts or input_ids must be provided")
63
-
64
- # Create bind config
65
- bind_config = embedder_bind.EmbeddingConfig()
66
- bind_config.batch_size = config.batch_size
67
- bind_config.normalize = config.normalize
68
- bind_config.normalize_method = config.normalize_method
69
-
70
- # Convert single string to list if needed
71
- if isinstance(texts, str):
72
- texts = [texts]
73
-
74
- # Convert input_ids to 2D format if needed
75
- processed_input_ids = None
76
- if input_ids is not None:
77
- if len(input_ids) > 0 and isinstance(input_ids[0], int):
78
- # Single sequence: convert [1, 2, 3] to [[1, 2, 3]]
79
- processed_input_ids = [input_ids]
80
- else:
81
- # Multiple sequences: already in correct format [[1, 2], [3, 4]]
82
- processed_input_ids = input_ids
83
-
84
- # Pass both parameters, let the ABI handle validation
85
- embeddings = embedder_bind.ml_embedder_embed(self._handle, bind_config, texts, processed_input_ids)
86
-
87
- return embeddings
88
-
89
- def get_embedding_dim(self) -> int:
90
- """
91
- Get the embedding dimension of the model
92
-
93
- Returns:
94
- The embedding dimension in int
95
- """
96
- return embedder_bind.ml_embedder_embedding_dim(self._handle)
1
+ from typing import List, Union
2
+ import numpy as np
3
+
4
+ from nexaai.common import PluginID
5
+ from nexaai.embedder import Embedder, EmbeddingConfig
6
+ from nexaai.binds import embedder_bind
7
+ from nexaai.runtime import _ensure_runtime
8
+
9
+
10
+ class PyBindEmbedderImpl(Embedder):
11
+ def __init__(self, _handle_ptr):
12
+ """
13
+ Internal initializer
14
+ """
15
+ super().__init__()
16
+ self._handle = _handle_ptr
17
+
18
+ @classmethod
19
+ def _load_from(cls, model_path: str, model_name: str = None, tokenizer_file: str = "tokenizer.json", plugin_id: Union[PluginID, str] = PluginID.LLAMA_CPP):
20
+ """
21
+ Load an embedder from model files
22
+
23
+ Args:
24
+ model_path: Path to the model file
25
+ model_name: Name of the model
26
+ tokenizer_file: Path to the tokenizer file (default: "tokenizer.json")
27
+ plugin_id: Plugin ID to use for the model (default: PluginID.LLAMA_CPP)
28
+
29
+ Returns:
30
+ PyBindEmbedderImpl instance
31
+ """
32
+ _ensure_runtime()
33
+ # Convert enum to string for C++ binding
34
+ plugin_id_str = plugin_id.value if isinstance(plugin_id, PluginID) else plugin_id
35
+ # New parameter order: model_path, plugin_id, tokenizer_path (optional)
36
+ handle = embedder_bind.ml_embedder_create(model_path, model_name, plugin_id_str, tokenizer_file)
37
+ return cls(handle)
38
+
39
+ def eject(self):
40
+ """
41
+ Clean up resources and destroy the embedder
42
+ """
43
+ # Destructor of the handle will unload the model correctly
44
+ del self._handle
45
+ self._handle = None
46
+
47
+ def generate(self, texts: Union[List[str], str] = None, config: EmbeddingConfig = EmbeddingConfig(), input_ids: Union[List[int], List[List[int]]] = None) -> np.ndarray:
48
+ """
49
+ Generate embeddings for the given texts or input_ids.
50
+
51
+ Args:
52
+ texts: List of strings or single string to embed
53
+ input_ids: Pre-tokenized input as:
54
+ - Single sequence: list of integers [1, 2, 3, 4]
55
+ - Multiple sequences: list of lists [[1, 2, 3], [4, 5, 6]]
56
+ config: Configuration for embedding generation
57
+
58
+ Returns:
59
+ numpy array of embeddings with shape (num_sequences, embedding_dim)
60
+ """
61
+ if texts is None and input_ids is None:
62
+ raise ValueError("Either texts or input_ids must be provided")
63
+
64
+ # Create bind config
65
+ bind_config = embedder_bind.EmbeddingConfig()
66
+ bind_config.batch_size = config.batch_size
67
+ bind_config.normalize = config.normalize
68
+ bind_config.normalize_method = config.normalize_method
69
+
70
+ # Convert single string to list if needed
71
+ if isinstance(texts, str):
72
+ texts = [texts]
73
+
74
+ # Convert input_ids to 2D format if needed
75
+ processed_input_ids = None
76
+ if input_ids is not None:
77
+ if len(input_ids) > 0 and isinstance(input_ids[0], int):
78
+ # Single sequence: convert [1, 2, 3] to [[1, 2, 3]]
79
+ processed_input_ids = [input_ids]
80
+ else:
81
+ # Multiple sequences: already in correct format [[1, 2], [3, 4]]
82
+ processed_input_ids = input_ids
83
+
84
+ # Pass both parameters, let the ABI handle validation
85
+ embeddings = embedder_bind.ml_embedder_embed(self._handle, bind_config, texts, processed_input_ids)
86
+
87
+ return embeddings
88
+
89
+ def get_embedding_dim(self) -> int:
90
+ """
91
+ Get the embedding dimension of the model
92
+
93
+ Returns:
94
+ The embedding dimension in int
95
+ """
96
+ return embedder_bind.ml_embedder_embedding_dim(self._handle)