lollms-client 1.5.6__py3-none-any.whl → 1.7.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. lollms_client/__init__.py +1 -1
  2. lollms_client/llm_bindings/azure_openai/__init__.py +2 -2
  3. lollms_client/llm_bindings/claude/__init__.py +125 -34
  4. lollms_client/llm_bindings/gemini/__init__.py +261 -159
  5. lollms_client/llm_bindings/grok/__init__.py +52 -14
  6. lollms_client/llm_bindings/groq/__init__.py +2 -2
  7. lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +2 -2
  8. lollms_client/llm_bindings/litellm/__init__.py +1 -1
  9. lollms_client/llm_bindings/llamacpp/__init__.py +18 -11
  10. lollms_client/llm_bindings/lollms/__init__.py +76 -21
  11. lollms_client/llm_bindings/lollms_webui/__init__.py +1 -1
  12. lollms_client/llm_bindings/mistral/__init__.py +2 -2
  13. lollms_client/llm_bindings/novita_ai/__init__.py +142 -6
  14. lollms_client/llm_bindings/ollama/__init__.py +307 -89
  15. lollms_client/llm_bindings/open_router/__init__.py +2 -2
  16. lollms_client/llm_bindings/openai/__init__.py +81 -20
  17. lollms_client/llm_bindings/openllm/__init__.py +362 -506
  18. lollms_client/llm_bindings/openwebui/__init__.py +333 -171
  19. lollms_client/llm_bindings/perplexity/__init__.py +2 -2
  20. lollms_client/llm_bindings/pythonllamacpp/__init__.py +3 -3
  21. lollms_client/llm_bindings/tensor_rt/__init__.py +1 -1
  22. lollms_client/llm_bindings/transformers/__init__.py +428 -632
  23. lollms_client/llm_bindings/vllm/__init__.py +1 -1
  24. lollms_client/lollms_agentic.py +4 -2
  25. lollms_client/lollms_base_binding.py +61 -0
  26. lollms_client/lollms_core.py +512 -1890
  27. lollms_client/lollms_discussion.py +25 -11
  28. lollms_client/lollms_llm_binding.py +112 -261
  29. lollms_client/lollms_mcp_binding.py +34 -75
  30. lollms_client/lollms_stt_binding.py +85 -52
  31. lollms_client/lollms_tti_binding.py +23 -37
  32. lollms_client/lollms_ttm_binding.py +24 -42
  33. lollms_client/lollms_tts_binding.py +28 -17
  34. lollms_client/lollms_ttv_binding.py +24 -42
  35. lollms_client/lollms_types.py +4 -2
  36. lollms_client/stt_bindings/whisper/__init__.py +108 -23
  37. lollms_client/stt_bindings/whispercpp/__init__.py +7 -1
  38. lollms_client/tti_bindings/diffusers/__init__.py +418 -810
  39. lollms_client/tti_bindings/diffusers/server/main.py +1051 -0
  40. lollms_client/tti_bindings/gemini/__init__.py +182 -239
  41. lollms_client/tti_bindings/leonardo_ai/__init__.py +6 -3
  42. lollms_client/tti_bindings/lollms/__init__.py +4 -1
  43. lollms_client/tti_bindings/novita_ai/__init__.py +5 -2
  44. lollms_client/tti_bindings/openai/__init__.py +10 -11
  45. lollms_client/tti_bindings/stability_ai/__init__.py +5 -3
  46. lollms_client/ttm_bindings/audiocraft/__init__.py +7 -12
  47. lollms_client/ttm_bindings/beatoven_ai/__init__.py +7 -3
  48. lollms_client/ttm_bindings/lollms/__init__.py +4 -17
  49. lollms_client/ttm_bindings/replicate/__init__.py +7 -4
  50. lollms_client/ttm_bindings/stability_ai/__init__.py +7 -4
  51. lollms_client/ttm_bindings/topmediai/__init__.py +6 -3
  52. lollms_client/tts_bindings/bark/__init__.py +7 -10
  53. lollms_client/tts_bindings/lollms/__init__.py +6 -1
  54. lollms_client/tts_bindings/piper_tts/__init__.py +8 -11
  55. lollms_client/tts_bindings/xtts/__init__.py +157 -74
  56. lollms_client/tts_bindings/xtts/server/main.py +241 -280
  57. {lollms_client-1.5.6.dist-info → lollms_client-1.7.10.dist-info}/METADATA +113 -5
  58. lollms_client-1.7.10.dist-info/RECORD +89 -0
  59. lollms_client-1.5.6.dist-info/RECORD +0 -87
  60. {lollms_client-1.5.6.dist-info → lollms_client-1.7.10.dist-info}/WHEEL +0 -0
  61. {lollms_client-1.5.6.dist-info → lollms_client-1.7.10.dist-info}/licenses/LICENSE +0 -0
  62. {lollms_client-1.5.6.dist-info → lollms_client-1.7.10.dist-info}/top_level.txt +0 -0
@@ -19,8 +19,6 @@ BindingName = "NovitaAIBinding"
19
19
  API_BASE_URL = "https://api.novita.ai"
20
20
 
21
21
  # A hardcoded list of models based on Novita AI's documentation.
22
- # The API is OpenAI-compatible but does not provide a models listing endpoint.
23
- # Sourced from: https://docs.novita.ai/language-model/models
24
22
  _FALLBACK_MODELS = [
25
23
  {'model_name': 'meta-llama/Llama-3-8B-Instruct', 'display_name': 'Llama 3 8B Instruct', 'description': 'Meta\'s Llama 3 8B instruction-tuned model.', 'owned_by': 'Meta'},
26
24
  {'model_name': 'meta-llama/Llama-3-70B-Instruct', 'display_name': 'Llama 3 70B Instruct', 'description': 'Meta\'s Llama 3 70B instruction-tuned model.', 'owned_by': 'Meta'},
@@ -28,6 +26,7 @@ _FALLBACK_MODELS = [
28
26
  {'model_name': 'mistralai/Mistral-7B-Instruct-v0.2', 'display_name': 'Mistral 7B Instruct v0.2', 'description': 'Mistral AI\'s 7B instruction-tuned model.', 'owned_by': 'Mistral AI'},
29
27
  {'model_name': 'google/gemma-7b-it', 'display_name': 'Gemma 7B IT', 'description': 'Google\'s Gemma 7B instruction-tuned model.', 'owned_by': 'Google'},
30
28
  {'model_name': 'google/gemma-2-9b-it', 'display_name': 'Gemma 2 9B IT', 'description': 'Google\'s next-generation Gemma 2 9B instruction-tuned model.', 'owned_by': 'Google'},
29
+ {'model_name': 'deepseek/deepseek-r1', 'display_name': 'Deepseek R1', 'description': 'Deepseek R1 reasoning model.', 'owned_by': 'Deepseek AI'},
31
30
  {'model_name': 'deepseek-ai/deepseek-coder-33b-instruct', 'display_name': 'Deepseek Coder 33B Instruct', 'description': 'A powerful coding model from Deepseek AI.', 'owned_by': 'Deepseek AI'},
32
31
  ]
33
32
 
@@ -73,6 +72,119 @@ class NovitaAIBinding(LollmsLLMBinding):
73
72
  if frequency_penalty is not None: params['frequency_penalty'] = frequency_penalty
74
73
  return params
75
74
 
75
+ def generate_text(self,
76
+ prompt: str,
77
+ images: Optional[List[str]] = None,
78
+ system_prompt: str = "",
79
+ n_predict: Optional[int] = 2048,
80
+ stream: Optional[bool] = False,
81
+ temperature: float = 0.7,
82
+ top_k: int = 50, # Not supported by Novita API
83
+ top_p: float = 0.9,
84
+ repeat_penalty: float = 1.1, # maps to frequency_penalty
85
+ repeat_last_n: int = 64, # Not supported
86
+ seed: Optional[int] = None, # Not supported
87
+ n_threads: Optional[int] = None, # Not applicable
88
+ ctx_size: int | None = None, # Determined by model
89
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
90
+ split:Optional[bool]=False,
91
+ user_keyword:Optional[str]="!@>user:",
92
+ ai_keyword:Optional[str]="!@>assistant:",
93
+ think: Optional[bool] = False,
94
+ reasoning_effort: Optional[str] = "low", # low, medium, high
95
+ reasoning_summary: Optional[bool] = False, # auto
96
+ ) -> Union[str, dict]:
97
+ """
98
+ Generate text using Novita AI.
99
+ """
100
+ # Build messages
101
+ messages = []
102
+ if system_prompt and system_prompt.strip():
103
+ messages.append({"role": "system", "content": system_prompt})
104
+
105
+ if split:
106
+ # Simple split logic to support history if provided in prompt string
107
+ # This is a basic fallback; usually chat() is preferred for history
108
+ msgs = self.split_discussion(prompt, user_keyword, ai_keyword)
109
+ messages.extend(msgs)
110
+ else:
111
+ messages.append({"role": "user", "content": prompt})
112
+
113
+ if images:
114
+ ASCIIColors.warning("Novita AI API does not support images in this binding yet. They will be ignored.")
115
+
116
+ # Construct parameters
117
+ # Map repeat_penalty to frequency_penalty loosely if needed, or just pass as is if supported
118
+ # Novita supports standard OpenAI params
119
+ api_params = self._construct_parameters(
120
+ temperature, top_p, n_predict, 0.0, repeat_penalty
121
+ )
122
+
123
+ payload = {
124
+ "model": self.model_name,
125
+ "messages": messages,
126
+ "stream": stream,
127
+ **api_params
128
+ }
129
+
130
+ url = f"{API_BASE_URL}/v1/chat/completions"
131
+ full_response_text = ""
132
+
133
+ try:
134
+ if stream:
135
+ with requests.post(url, headers=self.headers, json=payload, stream=True) as response:
136
+ response.raise_for_status()
137
+ for line in response.iter_lines():
138
+ if line:
139
+ decoded_line = line.decode('utf-8')
140
+ if decoded_line.startswith("data:"):
141
+ content = decoded_line[len("data: "):].strip()
142
+ if content == "[DONE]":
143
+ break
144
+ try:
145
+ chunk = json.loads(content)
146
+ delta = chunk.get("choices", [{}])[0].get("delta", {})
147
+ text_chunk = delta.get("content", "")
148
+ # Deepseek R1 might output thinking in content or reasoning_content field
149
+ # Standard OpenAI compatible R1 usually puts thought in <think> tags or reasoning_content
150
+ reasoning_chunk = delta.get("reasoning_content", "")
151
+
152
+ if reasoning_chunk:
153
+ # If we get reasoning content field, wrap it in <think> for lollms UI if think is enabled
154
+ if think:
155
+ formatted_reasoning = f"<think>{reasoning_chunk}</think>" # Naive streaming wrap, might be broken tags
156
+ # Better to just stream it if UI handles it, or just text
157
+ if streaming_callback:
158
+ streaming_callback(reasoning_chunk, MSG_TYPE.MSG_TYPE_CHUNK)
159
+ else:
160
+ # If think disabled, we might skip reasoning or just show it?
161
+ # Typically we want to show it.
162
+ pass
163
+
164
+ if text_chunk:
165
+ full_response_text += text_chunk
166
+ if streaming_callback:
167
+ if not streaming_callback(text_chunk, MSG_TYPE.MSG_TYPE_CHUNK):
168
+ break
169
+ except json.JSONDecodeError:
170
+ continue
171
+ return full_response_text
172
+ else:
173
+ response = requests.post(url, headers=self.headers, json=payload)
174
+ response.raise_for_status()
175
+ data = response.json()
176
+ choice = data["choices"][0]["message"]
177
+ content = choice.get("content", "")
178
+ reasoning = choice.get("reasoning_content", "")
179
+
180
+ if think and reasoning:
181
+ return f"<think>\n{reasoning}\n</think>\n{content}"
182
+ return content
183
+
184
+ except Exception as e:
185
+ trace_exception(e)
186
+ return {"status": False, "error": str(e)}
187
+
76
188
  def chat(self,
77
189
  discussion: LollmsDiscussion,
78
190
  branch_tip_id: Optional[str] = None,
@@ -86,7 +198,10 @@ class NovitaAIBinding(LollmsLLMBinding):
86
198
  seed: Optional[int] = None, # Not supported
87
199
  n_threads: Optional[int] = None, # Not applicable
88
200
  ctx_size: Optional[int] = None, # Determined by model
89
- streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None
201
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
202
+ think: Optional[bool] = False,
203
+ reasoning_effort: Optional[str] = "low", # low, medium, high
204
+ reasoning_summary: Optional[bool] = False, # auto
90
205
  ) -> Union[str, dict]:
91
206
  """
92
207
  Conduct a chat session with a Novita AI model using a LollmsDiscussion object.
@@ -139,6 +254,19 @@ class NovitaAIBinding(LollmsLLMBinding):
139
254
  chunk = json.loads(content)
140
255
  delta = chunk.get("choices", [{}])[0].get("delta", {})
141
256
  text_chunk = delta.get("content", "")
257
+
258
+ # Support for reasoning content if provided (e.g. Deepseek R1)
259
+ reasoning_chunk = delta.get("reasoning_content", "")
260
+ if reasoning_chunk and think:
261
+ # Simple handling: stream it as regular chunk or specific type if supported
262
+ # Lollms typically expects <think> tags in the text if it's mixed
263
+ # Since we can't easily inject tags in a stream without state,
264
+ # we assume the model output might contain them or we just output reasoning.
265
+ # For now, append to text.
266
+ if streaming_callback:
267
+ # We could prefix with <think> if it's the start, but that's complex in stateless loop
268
+ streaming_callback(reasoning_chunk, MSG_TYPE.MSG_TYPE_CHUNK)
269
+
142
270
  if text_chunk:
143
271
  full_response_text += text_chunk
144
272
  if streaming_callback:
@@ -152,7 +280,15 @@ class NovitaAIBinding(LollmsLLMBinding):
152
280
  response = requests.post(url, headers=self.headers, json=payload)
153
281
  response.raise_for_status()
154
282
  data = response.json()
155
- return data["choices"][0]["message"]["content"]
283
+ choice = data["choices"][0]["message"]
284
+ content = choice.get("content", "")
285
+ reasoning = choice.get("reasoning_content", "")
286
+
287
+ if think and reasoning:
288
+ return f"<think>\n{reasoning}\n</think>\n{content}"
289
+
290
+ return content
291
+
156
292
  except requests.exceptions.HTTPError as e:
157
293
  try:
158
294
  error_details = e.response.json()
@@ -211,7 +347,7 @@ class NovitaAIBinding(LollmsLLMBinding):
211
347
  "supports_vision": False
212
348
  }
213
349
 
214
- def listModels(self) -> List[Dict[str, str]]:
350
+ def list_models(self) -> List[Dict[str, str]]:
215
351
  """
216
352
  Lists available models. Novita AI API does not have a models endpoint,
217
353
  so a hardcoded list from their documentation is returned.
@@ -242,7 +378,7 @@ if __name__ == '__main__':
242
378
 
243
379
  # --- List Models ---
244
380
  ASCIIColors.cyan("\n--- Listing Models (static list) ---")
245
- models = binding.listModels()
381
+ models = binding.list_models()
246
382
  if models:
247
383
  ASCIIColors.green(f"Found {len(models)} models.")
248
384
  for m in models: