nexaai 1.0.4rc13__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nexaai might be problematic. Click here for more details.

Files changed (59) hide show
  1. nexaai/__init__.py +71 -0
  2. nexaai/_stub.cp310-win_amd64.pyd +0 -0
  3. nexaai/_version.py +4 -0
  4. nexaai/asr.py +60 -0
  5. nexaai/asr_impl/__init__.py +0 -0
  6. nexaai/asr_impl/mlx_asr_impl.py +91 -0
  7. nexaai/asr_impl/pybind_asr_impl.py +43 -0
  8. nexaai/base.py +39 -0
  9. nexaai/binds/__init__.py +3 -0
  10. nexaai/binds/common_bind.cp310-win_amd64.pyd +0 -0
  11. nexaai/binds/embedder_bind.cp310-win_amd64.pyd +0 -0
  12. nexaai/binds/llm_bind.cp310-win_amd64.pyd +0 -0
  13. nexaai/binds/nexa_bridge.dll +0 -0
  14. nexaai/binds/nexa_llama_cpp/ggml-base.dll +0 -0
  15. nexaai/binds/nexa_llama_cpp/ggml-cpu.dll +0 -0
  16. nexaai/binds/nexa_llama_cpp/ggml-cuda.dll +0 -0
  17. nexaai/binds/nexa_llama_cpp/ggml-vulkan.dll +0 -0
  18. nexaai/binds/nexa_llama_cpp/ggml.dll +0 -0
  19. nexaai/binds/nexa_llama_cpp/llama.dll +0 -0
  20. nexaai/binds/nexa_llama_cpp/mtmd.dll +0 -0
  21. nexaai/binds/nexa_llama_cpp/nexa_plugin.dll +0 -0
  22. nexaai/common.py +61 -0
  23. nexaai/cv.py +87 -0
  24. nexaai/cv_impl/__init__.py +0 -0
  25. nexaai/cv_impl/mlx_cv_impl.py +88 -0
  26. nexaai/cv_impl/pybind_cv_impl.py +31 -0
  27. nexaai/embedder.py +68 -0
  28. nexaai/embedder_impl/__init__.py +0 -0
  29. nexaai/embedder_impl/mlx_embedder_impl.py +114 -0
  30. nexaai/embedder_impl/pybind_embedder_impl.py +91 -0
  31. nexaai/image_gen.py +136 -0
  32. nexaai/image_gen_impl/__init__.py +0 -0
  33. nexaai/image_gen_impl/mlx_image_gen_impl.py +291 -0
  34. nexaai/image_gen_impl/pybind_image_gen_impl.py +84 -0
  35. nexaai/llm.py +89 -0
  36. nexaai/llm_impl/__init__.py +0 -0
  37. nexaai/llm_impl/mlx_llm_impl.py +249 -0
  38. nexaai/llm_impl/pybind_llm_impl.py +207 -0
  39. nexaai/rerank.py +51 -0
  40. nexaai/rerank_impl/__init__.py +0 -0
  41. nexaai/rerank_impl/mlx_rerank_impl.py +91 -0
  42. nexaai/rerank_impl/pybind_rerank_impl.py +42 -0
  43. nexaai/runtime.py +64 -0
  44. nexaai/tts.py +70 -0
  45. nexaai/tts_impl/__init__.py +0 -0
  46. nexaai/tts_impl/mlx_tts_impl.py +93 -0
  47. nexaai/tts_impl/pybind_tts_impl.py +42 -0
  48. nexaai/utils/avatar_fetcher.py +104 -0
  49. nexaai/utils/decode.py +18 -0
  50. nexaai/utils/model_manager.py +1195 -0
  51. nexaai/utils/progress_tracker.py +372 -0
  52. nexaai/vlm.py +120 -0
  53. nexaai/vlm_impl/__init__.py +0 -0
  54. nexaai/vlm_impl/mlx_vlm_impl.py +205 -0
  55. nexaai/vlm_impl/pybind_vlm_impl.py +228 -0
  56. nexaai-1.0.4rc13.dist-info/METADATA +26 -0
  57. nexaai-1.0.4rc13.dist-info/RECORD +59 -0
  58. nexaai-1.0.4rc13.dist-info/WHEEL +5 -0
  59. nexaai-1.0.4rc13.dist-info/top_level.txt +1 -0
@@ -0,0 +1,205 @@
1
+ from typing import Generator, Optional, List, Dict, Any
2
+
3
+ from nexaai.common import ModelConfig, GenerationConfig, MultiModalMessage
4
+ from nexaai.vlm import VLM
5
+ from nexaai.mlx_backend.vlm.interface import VLM as MLXVLMInterface
6
+ from nexaai.mlx_backend.ml import ModelConfig as MLXModelConfig, SamplerConfig as MLXSamplerConfig, GenerationConfig as MLXGenerationConfig, EmbeddingConfig
7
+
8
+
9
+ class MlxVlmImpl(VLM):
10
+ def __init__(self, m_cfg: ModelConfig = ModelConfig()):
11
+ """Initialize MLX VLM implementation."""
12
+ super().__init__(m_cfg)
13
+ self._mlx_vlm = None
14
+
15
+ @classmethod
16
+ def _load_from(cls,
17
+ local_path: str,
18
+ mmproj_path: str,
19
+ m_cfg: ModelConfig = ModelConfig(),
20
+ plugin_id: str = "mlx",
21
+ device_id: Optional[str] = None
22
+ ) -> 'MlxVlmImpl':
23
+ """Load VLM model from local path using MLX backend.
24
+
25
+ Args:
26
+ local_path: Path to the main model file
27
+ mmproj_path: Path to the multimodal projection file (not used in MLX VLM)
28
+ m_cfg: Model configuration
29
+ plugin_id: Plugin identifier
30
+ device_id: Optional device ID
31
+
32
+ Returns:
33
+ MlxVlmImpl instance
34
+ """
35
+ try:
36
+ # MLX interface is already imported
37
+
38
+ # Create instance and load MLX VLM
39
+ instance = cls(m_cfg)
40
+ instance._mlx_vlm = MLXVLMInterface(
41
+ model_path=local_path,
42
+ mmproj_path=mmproj_path, # MLX VLM may not use this, but pass it anyway
43
+ context_length=m_cfg.n_ctx,
44
+ device=device_id
45
+ )
46
+
47
+ return instance
48
+ except Exception as e:
49
+ raise RuntimeError(f"Failed to load MLX VLM: {str(e)}")
50
+
51
+ def eject(self):
52
+ """Release the model from memory."""
53
+ if self._mlx_vlm:
54
+ self._mlx_vlm.destroy()
55
+ self._mlx_vlm = None
56
+
57
+ def reset(self):
58
+ """
59
+ Reset the VLM model context and KV cache.
60
+ """
61
+ if not self._mlx_vlm:
62
+ raise RuntimeError("MLX VLM not loaded")
63
+
64
+ try:
65
+ self._mlx_vlm.reset()
66
+ except Exception as e:
67
+ raise RuntimeError(f"Failed to reset MLX VLM: {str(e)}")
68
+
69
+ def apply_chat_template(
70
+ self,
71
+ messages: List[MultiModalMessage],
72
+ tools: Optional[List[Dict[str, Any]]] = None
73
+ ) -> str:
74
+ """Apply the chat template to multimodal messages."""
75
+ if not self._mlx_vlm:
76
+ raise RuntimeError("MLX VLM not loaded")
77
+
78
+ try:
79
+ # Convert MultiModalMessage to MLX format
80
+ mlx_messages = []
81
+ for msg in messages:
82
+ # Create a simple object with role and content attributes
83
+ class MLXChatMessage:
84
+ def __init__(self, role, content):
85
+ self.role = role
86
+ self.content = content
87
+
88
+ # For MLX VLM, we need to extract text content from multimodal messages
89
+ # This is a simplified approach - the actual implementation may need
90
+ # more sophisticated handling of different content types
91
+ text_content = ""
92
+ for content_item in msg["content"]:
93
+ if content_item["type"] == "text":
94
+ text_content += content_item.get("text", "")
95
+ # Note: image/audio/video content is typically handled separately
96
+ # in the generation phase, not in the chat template
97
+
98
+ mlx_messages.append(MLXChatMessage(msg["role"], text_content))
99
+
100
+ return self._mlx_vlm.apply_chat_template(mlx_messages)
101
+ except Exception as e:
102
+ raise RuntimeError(f"Failed to apply chat template: {str(e)}")
103
+
104
+ def generate_stream(self, prompt: str, g_cfg: GenerationConfig = GenerationConfig()) -> Generator[str, None, None]:
105
+ """Generate text with streaming."""
106
+ if not self._mlx_vlm:
107
+ raise RuntimeError("MLX VLM not loaded")
108
+
109
+ try:
110
+ # Get MLX config classes
111
+ _, MLXSamplerConfig, MLXGenerationConfig, _ = get_mlx_configs()
112
+
113
+ # Convert GenerationConfig to MLX format
114
+ mlx_gen_config = MLXGenerationConfig()
115
+ mlx_gen_config.max_tokens = g_cfg.max_tokens
116
+ mlx_gen_config.stop = g_cfg.stop_words
117
+ mlx_gen_config.image_paths = g_cfg.image_paths
118
+ mlx_gen_config.audio_paths = g_cfg.audio_paths
119
+
120
+ if g_cfg.sampler_config:
121
+ mlx_sampler_config = MLXSamplerConfig()
122
+ mlx_sampler_config.temperature = g_cfg.sampler_config.temperature
123
+ mlx_sampler_config.top_p = g_cfg.sampler_config.top_p
124
+ mlx_sampler_config.top_k = g_cfg.sampler_config.top_k
125
+ mlx_sampler_config.repetition_penalty = g_cfg.sampler_config.repetition_penalty
126
+ mlx_sampler_config.presence_penalty = g_cfg.sampler_config.presence_penalty
127
+ mlx_sampler_config.frequency_penalty = g_cfg.sampler_config.frequency_penalty
128
+ mlx_sampler_config.seed = g_cfg.sampler_config.seed
129
+ mlx_sampler_config.grammar_path = g_cfg.sampler_config.grammar_path
130
+ mlx_sampler_config.grammar_string = g_cfg.sampler_config.grammar_string
131
+ mlx_gen_config.sampler_config = mlx_sampler_config
132
+
133
+ # Create a token callback for streaming
134
+ def token_callback(token: str) -> bool:
135
+ # Check if generation should be cancelled
136
+ return not self._cancel_event.is_set()
137
+
138
+ # Use MLX VLM streaming generation
139
+ result = self._mlx_vlm.generate_stream(prompt, mlx_gen_config, token_callback)
140
+
141
+ # MLX VLM interface returns a GenerationResult, extract the text
142
+ if hasattr(result, 'text') and result.text:
143
+ # Split the result into words and yield them
144
+ words = result.text.split()
145
+ for i, word in enumerate(words):
146
+ if self._cancel_event.is_set():
147
+ break
148
+ if i == 0:
149
+ yield word
150
+ else:
151
+ yield " " + word
152
+
153
+ except Exception as e:
154
+ raise RuntimeError(f"Failed to generate streaming text: {str(e)}")
155
+
156
+ def generate(self, prompt: str, g_cfg: GenerationConfig = GenerationConfig()) -> str:
157
+ """
158
+ Generate text without streaming.
159
+
160
+ Args:
161
+ prompt (str): The prompt to generate text from.
162
+ g_cfg (GenerationConfig): Generation configuration.
163
+
164
+ Returns:
165
+ str: The generated text.
166
+ """
167
+ if not self._mlx_vlm:
168
+ raise RuntimeError("MLX VLM not loaded")
169
+
170
+ try:
171
+ # Get MLX config classes
172
+ _, MLXSamplerConfig, MLXGenerationConfig, _ = get_mlx_configs()
173
+
174
+ # Convert GenerationConfig to MLX format
175
+ mlx_gen_config = MLXGenerationConfig()
176
+ mlx_gen_config.max_tokens = g_cfg.max_tokens
177
+ mlx_gen_config.stop = g_cfg.stop_words
178
+ mlx_gen_config.image_paths = g_cfg.image_paths
179
+ mlx_gen_config.audio_paths = g_cfg.audio_paths
180
+
181
+ if g_cfg.sampler_config:
182
+ mlx_sampler_config = MLXSamplerConfig()
183
+ mlx_sampler_config.temperature = g_cfg.sampler_config.temperature
184
+ mlx_sampler_config.top_p = g_cfg.sampler_config.top_p
185
+ mlx_sampler_config.top_k = g_cfg.sampler_config.top_k
186
+ mlx_sampler_config.repetition_penalty = g_cfg.sampler_config.repetition_penalty
187
+ mlx_sampler_config.presence_penalty = g_cfg.sampler_config.presence_penalty
188
+ mlx_sampler_config.frequency_penalty = g_cfg.sampler_config.frequency_penalty
189
+ mlx_sampler_config.seed = g_cfg.sampler_config.seed
190
+ mlx_sampler_config.grammar_path = g_cfg.sampler_config.grammar_path
191
+ mlx_sampler_config.grammar_string = g_cfg.sampler_config.grammar_string
192
+ mlx_gen_config.sampler_config = mlx_sampler_config
193
+
194
+ # Use MLX VLM generation
195
+ result = self._mlx_vlm.generate(prompt, mlx_gen_config)
196
+
197
+ # MLX VLM interface returns a GenerationResult, extract the text
198
+ if hasattr(result, 'text'):
199
+ return result.text
200
+ else:
201
+ # Fallback if result is just a string
202
+ return str(result)
203
+
204
+ except Exception as e:
205
+ raise RuntimeError(f"Failed to generate text: {str(e)}")
@@ -0,0 +1,228 @@
1
+ from typing import Generator, Optional, List, Dict, Any, Union
2
+ import queue
3
+ import threading
4
+ import base64
5
+ from pathlib import Path
6
+
7
+ from nexaai.common import ModelConfig, GenerationConfig, MultiModalMessage
8
+ from nexaai.binds import vlm_bind, common_bind
9
+ from nexaai.runtime import _ensure_runtime
10
+ from nexaai.vlm import VLM
11
+
12
+
13
+ class PyBindVLMImpl(VLM):
14
+ def __init__(self, handle: any, m_cfg: ModelConfig = ModelConfig()):
15
+ """Private constructor, should not be called directly."""
16
+ super().__init__(m_cfg)
17
+ self._handle = handle # This is a py::capsule
18
+
19
+ @classmethod
20
+ def _load_from(cls,
21
+ local_path: str,
22
+ mmproj_path: str,
23
+ m_cfg: ModelConfig = ModelConfig(),
24
+ plugin_id: str = "llama_cpp",
25
+ device_id: Optional[str] = None
26
+ ) -> 'PyBindVLMImpl':
27
+ """Load VLM model from local path.
28
+
29
+ Args:
30
+ local_path: Path to the main model file
31
+ mmproj_path: Path to the multimodal projection file
32
+ m_cfg: Model configuration
33
+ plugin_id: Plugin identifier
34
+ device_id: Optional device ID (not used in current binding)
35
+
36
+ Returns:
37
+ PyBindVLMImpl instance
38
+ """
39
+ _ensure_runtime()
40
+
41
+ config = common_bind.ModelConfig()
42
+
43
+ config.n_ctx = m_cfg.n_ctx
44
+ if m_cfg.n_threads is not None:
45
+ config.n_threads = m_cfg.n_threads
46
+ if m_cfg.n_threads_batch is not None:
47
+ config.n_threads_batch = m_cfg.n_threads_batch
48
+ if m_cfg.n_batch is not None:
49
+ config.n_batch = m_cfg.n_batch
50
+ if m_cfg.n_ubatch is not None:
51
+ config.n_ubatch = m_cfg.n_ubatch
52
+ if m_cfg.n_seq_max is not None:
53
+ config.n_seq_max = m_cfg.n_seq_max
54
+ config.n_gpu_layers = m_cfg.n_gpu_layers
55
+
56
+ # handle chat template strings
57
+ if m_cfg.chat_template_path:
58
+ config.chat_template_path = m_cfg.chat_template_path
59
+
60
+ if m_cfg.chat_template_content:
61
+ config.chat_template_content = m_cfg.chat_template_content
62
+
63
+ # Create handle : returns py::capsule with automatic cleanup
64
+ handle = vlm_bind.create_vlm(
65
+ model_path=local_path,
66
+ mmproj_path=mmproj_path,
67
+ model_config=config,
68
+ plugin_id=plugin_id,
69
+ device_id=device_id
70
+ )
71
+ return cls(handle, m_cfg)
72
+
73
+ def eject(self):
74
+ """Release the model from memory."""
75
+ # py::capsule handles cleanup automatically
76
+ del self._handle
77
+ self._handle = None
78
+
79
+ def reset(self):
80
+ """
81
+ Reset the VLM model context and KV cache. If not reset, the model will skip the number of evaluated tokens and treat tokens after those as the new incremental tokens.
82
+ If your past chat history changed, or you are starting a new chat, you should always reset the model before running generate.
83
+ """
84
+ vlm_bind.ml_vlm_reset(self._handle)
85
+
86
+ def apply_chat_template(
87
+ self,
88
+ messages: List[MultiModalMessage],
89
+ tools: Optional[List[Dict[str, Any]]] = None
90
+ ) -> str:
91
+ """Apply the chat template to multimodal messages."""
92
+ payload = []
93
+ for msg in messages:
94
+ role = msg["role"]
95
+ blocks = []
96
+
97
+ for c in msg["content"]:
98
+ t = c["type"]
99
+ if t == "text":
100
+ blocks.append({"type": "text", "text": c.get("text","") or ""})
101
+ else:
102
+ # image/audio/video
103
+ src = c.get("url") or c.get("path")
104
+ if not src:
105
+ raise ValueError(f"No url/path for {t}")
106
+ # read local file or strip data URI
107
+ if Path(src).exists():
108
+ raw = Path(src).read_bytes()
109
+ b64 = base64.b64encode(raw).decode("ascii")
110
+ blocks.append({"type": t, "text": b64})
111
+ elif src.startswith("data:"):
112
+ b64 = src.split(",",1)[1]
113
+ blocks.append({"type": t, "text": b64})
114
+ else:
115
+ # remote URL
116
+ blocks.append({"type": t, "text": src})
117
+
118
+ payload.append({"role": role, "content": blocks})
119
+
120
+ return vlm_bind.ml_vlm_apply_chat_template(self._handle, payload, tools)
121
+
122
+ def generate_stream(self, prompt: str, g_cfg: GenerationConfig = GenerationConfig()) -> Generator[str, None, None]:
123
+ """Generate text with streaming."""
124
+ token_queue = queue.Queue()
125
+ exception_container = [None]
126
+ self.reset_cancel() # Reset cancel flag before generation
127
+
128
+ def on_token(token: str, user_data) -> bool:
129
+ if self._cancel_event.is_set():
130
+ token_queue.put(('end', None))
131
+ return False # Stop generation
132
+ try:
133
+ token_queue.put(('token', token))
134
+ return True # Continue generation
135
+ except Exception as e:
136
+ exception_container[0] = e
137
+ return False # Stop generation
138
+
139
+ config = self._convert_generation_config(g_cfg)
140
+
141
+ # Run generation in thread
142
+ def generate():
143
+ try:
144
+ vlm_bind.ml_vlm_generate(
145
+ handle=self._handle,
146
+ prompt=prompt,
147
+ config=config,
148
+ on_token=on_token,
149
+ user_data=None
150
+ )
151
+ except Exception as e:
152
+ exception_container[0] = e
153
+ finally:
154
+ token_queue.put(('end', None))
155
+
156
+ thread = threading.Thread(target=generate)
157
+ thread.start()
158
+
159
+ # Yield tokens as they come
160
+ try:
161
+ while True:
162
+ msg_type, token = token_queue.get()
163
+ if msg_type == 'token':
164
+ yield token
165
+ elif msg_type in ('error', 'end'):
166
+ break
167
+ finally:
168
+ thread.join()
169
+
170
+ if exception_container[0]:
171
+ raise exception_container[0]
172
+
173
+ def generate(self, prompt: str, g_cfg: GenerationConfig = GenerationConfig()) -> str:
174
+ """
175
+ Generate text without streaming.
176
+
177
+ Args:
178
+ prompt (str): The prompt to generate text from. For chat models, this is the chat messages after chat template is applied.
179
+ g_cfg (GenerationConfig): Generation configuration.
180
+
181
+ Returns:
182
+ str: The generated text.
183
+ """
184
+ config = self._convert_generation_config(g_cfg)
185
+ result = vlm_bind.ml_vlm_generate(
186
+ handle=self._handle,
187
+ prompt=prompt,
188
+ config=config,
189
+ on_token=None, # No callback for non-streaming
190
+ user_data=None
191
+ )
192
+ return result.get("text", "")
193
+
194
+ def _convert_generation_config(self, g_cfg: GenerationConfig):
195
+ """Convert GenerationConfig to binding format."""
196
+ config = common_bind.GenerationConfig()
197
+
198
+ # Set basic generation parameters
199
+ config.max_tokens = g_cfg.max_tokens
200
+
201
+ if g_cfg.stop_words:
202
+ config.stop = g_cfg.stop_words
203
+
204
+ if g_cfg.image_paths:
205
+ config.image_paths = g_cfg.image_paths
206
+
207
+ if g_cfg.audio_paths:
208
+ config.audio_paths = g_cfg.audio_paths
209
+
210
+ if g_cfg.sampler_config:
211
+ sampler = common_bind.SamplerConfig()
212
+ sampler.temperature = g_cfg.sampler_config.temperature
213
+ sampler.top_p = g_cfg.sampler_config.top_p
214
+ sampler.top_k = g_cfg.sampler_config.top_k
215
+ sampler.repetition_penalty = g_cfg.sampler_config.repetition_penalty
216
+ sampler.presence_penalty = g_cfg.sampler_config.presence_penalty
217
+ sampler.frequency_penalty = g_cfg.sampler_config.frequency_penalty
218
+ sampler.seed = g_cfg.sampler_config.seed
219
+
220
+ if g_cfg.sampler_config.grammar_path:
221
+ sampler.grammar_path = g_cfg.sampler_config.grammar_path
222
+
223
+ if g_cfg.sampler_config.grammar_string:
224
+ sampler.grammar_string = g_cfg.sampler_config.grammar_string
225
+
226
+ config.sampler_config = sampler
227
+
228
+ return config
@@ -0,0 +1,26 @@
1
+ Metadata-Version: 2.4
2
+ Name: nexaai
3
+ Version: 1.0.4rc13
4
+ Summary: Python bindings for NexaSDK C-lib backend
5
+ Author-email: "Nexa AI, Inc." <dev@nexa.ai>
6
+ Project-URL: Homepage, https://github.com/NexaAI/nexasdk-bridge
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
13
+ Requires-Python: >=3.7
14
+ Description-Content-Type: text/markdown
15
+ Requires-Dist: huggingface_hub
16
+ Requires-Dist: tqdm
17
+ Requires-Dist: hf_xet
18
+ Requires-Dist: numpy
19
+ Requires-Dist: httpx
20
+ Provides-Extra: mlx
21
+ Requires-Dist: mlx; extra == "mlx"
22
+ Requires-Dist: mlx-lm; extra == "mlx"
23
+ Requires-Dist: mlx-vlm; extra == "mlx"
24
+ Requires-Dist: tokenizers; extra == "mlx"
25
+ Requires-Dist: safetensors; extra == "mlx"
26
+ Requires-Dist: Pillow; extra == "mlx"
@@ -0,0 +1,59 @@
1
+ nexaai/__init__.py,sha256=d1bC_PUNduXYYPLrzKLyS0RapvcrKzLQGJREsoBZvXM,1977
2
+ nexaai/_stub.cp310-win_amd64.pyd,sha256=bG8ot1cOKH_TxHDyUSwTDV8_EX7CZNzg9m9Ct-r5xwM,10752
3
+ nexaai/_version.py,sha256=F0fwBUpI_XB5EnjfaxQUgzhflriF0jZ8Y2aXQLsPf3o,147
4
+ nexaai/asr.py,sha256=1XnwbrSoweBfIVAH6LbILv0DMStTQe_Uq5U_f-EyArY,1873
5
+ nexaai/base.py,sha256=qQBCiQVNzgpkQjZX9aiFDEdbAAe56TROKC3WnWra2Zg,1021
6
+ nexaai/common.py,sha256=00cP8uT9NdldBI3dRNHrQFx-uhdgtOGGxRAx4p96nw4,1586
7
+ nexaai/cv.py,sha256=qGDVK0pKAEx0DrSi2rpSVLV4Kf53UAVIst9GdCMAGN0,3021
8
+ nexaai/embedder.py,sha256=FtJtMKrniejTCi8_-ePLOymfkH8j1VzUqteOqGy5cO4,2279
9
+ nexaai/image_gen.py,sha256=oliLxFN7Bd_3wzP4F6frMJ7GPvRn-1kn_8kAtdcy_pY,4258
10
+ nexaai/llm.py,sha256=7V60E1cI1tt6CZ1ti2-tPqkYS56TcJE_kIhvyRyIBeQ,3194
11
+ nexaai/rerank.py,sha256=l6KtNX9TMGjrtighTQrpXpG_XUlSoZ21AMDDFwR1acI,1682
12
+ nexaai/runtime.py,sha256=LxAUejH9uaci8IGz9_h0l-MMeYcwTlBjVKN_0u4Q4Qo,2021
13
+ nexaai/tts.py,sha256=1tJ3dSwphqPIAQmhMvigcyVFMki6_CC3yofCmaV_H9I,1996
14
+ nexaai/vlm.py,sha256=Cnnw9gy9PaC46WvrCoF5MwFA_iUPaBQGGQ98THSn1jA,4410
15
+ nexaai/asr_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
+ nexaai/asr_impl/mlx_asr_impl.py,sha256=UpGOtl4JZmcmDRa56z5OyIozFVjfsYpKw_vYU_7HoWk,3282
17
+ nexaai/asr_impl/pybind_asr_impl.py,sha256=C0Qnx-WDNmyC72dxuZVfUwuAoUSMvpo8IfOOkEbqsFA,1493
18
+ nexaai/binds/__init__.py,sha256=tYvy0pFhoY29GstDT5r-oRiPRarPLECvJAkcamJItOg,83
19
+ nexaai/binds/common_bind.cp310-win_amd64.pyd,sha256=vkhfHfpXxexJ8WyG0ywOLTzBqaai6N-1NFNneo8d1_4,201216
20
+ nexaai/binds/embedder_bind.cp310-win_amd64.pyd,sha256=vcrvVGXfR4Sztr5EScCKUo74ficwW2UbrKxostcbSRg,182784
21
+ nexaai/binds/llm_bind.cp310-win_amd64.pyd,sha256=IkAJTCZ4cKijHnRykXxdplEuBDnZdty9lDpe161B88Q,160256
22
+ nexaai/binds/nexa_bridge.dll,sha256=QemBCln56Ufmg2o2-dIWqYBvzgFBJhGUze0xu1vnA-M,176640
23
+ nexaai/binds/nexa_llama_cpp/ggml-base.dll,sha256=Y1XivFm8RVuCPzasooUQQ9QOnWCF2HWQY9t03-ao8_M,513536
24
+ nexaai/binds/nexa_llama_cpp/ggml-cpu.dll,sha256=tNaqpthvVb4m59kV6JXlAICycbvYseiLPteI0N6ILlU,656384
25
+ nexaai/binds/nexa_llama_cpp/ggml-cuda.dll,sha256=g0hXgrVcmb8se6AD_49cYvUKJgalfki4f9ZWGlWP2p4,302319616
26
+ nexaai/binds/nexa_llama_cpp/ggml-vulkan.dll,sha256=QBQbbTnfJ-nauvE1TcbxowAhylx-uuGmrUEE-6NEtFs,24686080
27
+ nexaai/binds/nexa_llama_cpp/ggml.dll,sha256=rOGVfcbwCnR40tcZBufcOhyeTEzRQcQgWA7YGeeUpAM,66560
28
+ nexaai/binds/nexa_llama_cpp/llama.dll,sha256=DPd4Ft4AZi4aCfKFMNOS_hyWUH3fGV5TK0USmlfCewA,1587712
29
+ nexaai/binds/nexa_llama_cpp/mtmd.dll,sha256=xsGO62lzfwT6JCiO-xy3qynU26tAb0gL9poSXf_vDvs,557056
30
+ nexaai/binds/nexa_llama_cpp/nexa_plugin.dll,sha256=s572VcKxaGBIC8svoc-8xUqff7OUWBdd3G9qr_c_BuE,1059840
31
+ nexaai/cv_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
+ nexaai/cv_impl/mlx_cv_impl.py,sha256=vYN8ASbDr-VlQcia0ydpE3tUfnRcRIoRVQDAOhlZB_4,3250
33
+ nexaai/cv_impl/pybind_cv_impl.py,sha256=oXT7Hcurg2YH_qgvwpGtgeQcIFxt6uzT9xN-cLvRHcU,1029
34
+ nexaai/embedder_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
+ nexaai/embedder_impl/mlx_embedder_impl.py,sha256=9tQ2-t31r7oo0WcvN3yWIYsk6VQgqSgcILEg0iz3x2I,4431
36
+ nexaai/embedder_impl/pybind_embedder_impl.py,sha256=-sQkWxtwU_SpcSW6OcddmTn4YVTTRBTNlZRDdktKagk,3377
37
+ nexaai/image_gen_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
+ nexaai/image_gen_impl/mlx_image_gen_impl.py,sha256=q-kXDj7Pg0xTuI1XKCbCigU5HtXV2HNJOXWx85YznLI,11444
39
+ nexaai/image_gen_impl/pybind_image_gen_impl.py,sha256=M3l9cgQStbStmJ2mtS2NRBZcjp0FJjDlHOhh8xhoaGI,3710
40
+ nexaai/llm_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
+ nexaai/llm_impl/mlx_llm_impl.py,sha256=oJePBkpvQijIxTNgF6sj_lmWKbV9Egh0wJIkGrTHzFc,10459
42
+ nexaai/llm_impl/pybind_llm_impl.py,sha256=ecDlzQ_4WE_xBE2pA98nkHdDdJgctpzK9IcpFINCdPc,7581
43
+ nexaai/rerank_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
+ nexaai/rerank_impl/mlx_rerank_impl.py,sha256=litO0iDf7KUrl8hE3ADETIvLkZcosroSgMpa3HEVFvQ,3268
45
+ nexaai/rerank_impl/pybind_rerank_impl.py,sha256=F-bsWJAS5ZBr4ZOwecZJwiZeB4uEGg0u3bSJV035NDA,1489
46
+ nexaai/tts_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
+ nexaai/tts_impl/mlx_tts_impl.py,sha256=I7_ladKIhzn3C7zcx5_dN8EcGlgmRjWSFfwQeqTza9s,3128
48
+ nexaai/tts_impl/pybind_tts_impl.py,sha256=JmWWoluN6I8325IwPL_Np8RLUq_yMENPoyszQ9G3B3M,1420
49
+ nexaai/utils/avatar_fetcher.py,sha256=D01f8je-37Nd68zGw8MYK2m7y3fvGlC6h0KR-aN9kdU,3925
50
+ nexaai/utils/decode.py,sha256=0Z9jDH4ICzw4YXj8nD4L-sMouDaev-TISGRQ4KzidWE,421
51
+ nexaai/utils/model_manager.py,sha256=Ksl-tKq-a3miTUxEn6-SSOC_KVdn6RPjcUdkWmDDwCk,49767
52
+ nexaai/utils/progress_tracker.py,sha256=eLJfgzXifTfJZzjkTH1ruUO0PDfvFad4ytYx8bs0NT0,14745
53
+ nexaai/vlm_impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
+ nexaai/vlm_impl/mlx_vlm_impl.py,sha256=DobOLnbf1v2uQrcB0TByTzJDp2Vhrjyjk3DvW4feahQ,9152
55
+ nexaai/vlm_impl/pybind_vlm_impl.py,sha256=WGC21LaQnr4LEiytXX8wVBMLJDG75GIweW3kEMxJGXE,8561
56
+ nexaai-1.0.4rc13.dist-info/METADATA,sha256=2a97uuKz6hRHcOZ-_AM6RL4p7jTKl_f2RSNfb2vvYIs,978
57
+ nexaai-1.0.4rc13.dist-info/WHEEL,sha256=KUuBC6lxAbHCKilKua8R9W_TM71_-9Sg5uEP3uDWcoU,101
58
+ nexaai-1.0.4rc13.dist-info/top_level.txt,sha256=LRE2YERlrZk2vfuygnSzsEeqSknnZbz3Z1MHyNmBU4w,7
59
+ nexaai-1.0.4rc13.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-win_amd64
5
+
@@ -0,0 +1 @@
1
+ nexaai