lollms-client 1.4.1__py3-none-any.whl → 1.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.4.1" # Updated version
11
+ __version__ = "1.4.5" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -0,0 +1,303 @@
1
+ import os
2
+ import json
3
+ import requests
4
+ from typing import Optional, Callable, List, Union, Dict
5
+
6
+ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
7
+ from lollms_client.lollms_llm_binding import LollmsLLMBinding
8
+ from lollms_client.lollms_types import MSG_TYPE
9
+ from ascii_colors import ASCIIColors, trace_exception
10
+
11
+ import pipmaster as pm
12
+
13
+ # Ensure the required packages are installed
14
+ pm.ensure_packages(["requests", "tiktoken"])
15
+
16
+ import tiktoken
17
+
18
+ BindingName = "NovitaAIBinding"
19
+ API_BASE_URL = "https://api.novita.ai"
20
+
21
+ # A hardcoded list of models based on Novita AI's documentation.
22
+ # The API is OpenAI-compatible but does not provide a models listing endpoint.
23
+ # Sourced from: https://docs.novita.ai/language-model/models
24
+ _FALLBACK_MODELS = [
25
+ {'model_name': 'meta-llama/Llama-3-8B-Instruct', 'display_name': 'Llama 3 8B Instruct', 'description': 'Meta\'s Llama 3 8B instruction-tuned model.', 'owned_by': 'Meta'},
26
+ {'model_name': 'meta-llama/Llama-3-70B-Instruct', 'display_name': 'Llama 3 70B Instruct', 'description': 'Meta\'s Llama 3 70B instruction-tuned model.', 'owned_by': 'Meta'},
27
+ {'model_name': 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'display_name': 'Mixtral 8x7B Instruct', 'description': 'Mistral AI\'s Mixtral 8x7B instruction-tuned model.', 'owned_by': 'Mistral AI'},
28
+ {'model_name': 'mistralai/Mistral-7B-Instruct-v0.2', 'display_name': 'Mistral 7B Instruct v0.2', 'description': 'Mistral AI\'s 7B instruction-tuned model.', 'owned_by': 'Mistral AI'},
29
+ {'model_name': 'google/gemma-7b-it', 'display_name': 'Gemma 7B IT', 'description': 'Google\'s Gemma 7B instruction-tuned model.', 'owned_by': 'Google'},
30
+ {'model_name': 'google/gemma-2-9b-it', 'display_name': 'Gemma 2 9B IT', 'description': 'Google\'s next-generation Gemma 2 9B instruction-tuned model.', 'owned_by': 'Google'},
31
+ {'model_name': 'deepseek-ai/deepseek-coder-33b-instruct', 'display_name': 'Deepseek Coder 33B Instruct', 'description': 'A powerful coding model from Deepseek AI.', 'owned_by': 'Deepseek AI'},
32
+ ]
33
+
34
+ class NovitaAIBinding(LollmsLLMBinding):
35
+ """Novita AI-specific binding implementation using their OpenAI-compatible API."""
36
+
37
+ def __init__(self, **kwargs):
38
+ """
39
+ Initialize the Novita AI binding.
40
+
41
+ Args:
42
+ model_name (str): Name of the Novita AI model to use.
43
+ service_key (str): Novita AI API key.
44
+ """
45
+ super().__init__(BindingName, **kwargs)
46
+ self.model_name = kwargs.get("model_name")
47
+ self.service_key = kwargs.get("service_key")
48
+
49
+ if not self.service_key:
50
+ self.service_key = os.getenv("NOVITA_API_KEY")
51
+
52
+ if not self.service_key:
53
+ raise ValueError("Novita AI API key is required. Please set it via the 'service_key' parameter or the NOVITA_API_KEY environment variable.")
54
+
55
+ self.headers = {
56
+ "Authorization": f"Bearer {self.service_key}",
57
+ "Content-Type": "application/json",
58
+ "Accept": "application/json"
59
+ }
60
+
61
+ def _construct_parameters(self,
62
+ temperature: float,
63
+ top_p: float,
64
+ n_predict: int,
65
+ presence_penalty: float,
66
+ frequency_penalty: float) -> Dict[str, any]:
67
+ """Builds a parameters dictionary for the Novita AI API."""
68
+ params = {}
69
+ if temperature is not None: params['temperature'] = float(temperature)
70
+ if top_p is not None: params['top_p'] = top_p
71
+ if n_predict is not None: params['max_tokens'] = n_predict
72
+ if presence_penalty is not None: params['presence_penalty'] = presence_penalty
73
+ if frequency_penalty is not None: params['frequency_penalty'] = frequency_penalty
74
+ return params
75
+
76
+ def chat(self,
77
+ discussion: LollmsDiscussion,
78
+ branch_tip_id: Optional[str] = None,
79
+ n_predict: Optional[int] = 2048,
80
+ stream: Optional[bool] = False,
81
+ temperature: float = 0.7,
82
+ top_k: int = 50, # Not supported by Novita API
83
+ top_p: float = 0.9,
84
+ repeat_penalty: float = 1.1, # maps to frequency_penalty
85
+ presence_penalty: Optional[float] = 0.0,
86
+ seed: Optional[int] = None, # Not supported
87
+ n_threads: Optional[int] = None, # Not applicable
88
+ ctx_size: Optional[int] = None, # Determined by model
89
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None
90
+ ) -> Union[str, dict]:
91
+ """
92
+ Conduct a chat session with a Novita AI model using a LollmsDiscussion object.
93
+ """
94
+ system_prompt = discussion.system_prompt
95
+ messages = discussion.get_messages(branch_tip_id)
96
+
97
+ history = []
98
+ if system_prompt and system_prompt.strip():
99
+ history.append({"role": "system", "content": system_prompt})
100
+
101
+ for msg in messages:
102
+ role = 'user' if msg.sender_type == "user" else 'assistant'
103
+
104
+ if msg.images:
105
+ ASCIIColors.warning("Novita AI API does not support images. They will be ignored.")
106
+
107
+ if msg.content and msg.content.strip():
108
+ history.append({"role": role, "content": msg.content})
109
+
110
+ if not history:
111
+ return {"status": "error", "message": "Cannot start chat with an empty discussion."}
112
+
113
+ api_params = self._construct_parameters(
114
+ temperature, top_p, n_predict, presence_penalty, repeat_penalty
115
+ )
116
+
117
+ payload = {
118
+ "model": self.model_name,
119
+ "messages": history,
120
+ "stream": stream,
121
+ **api_params
122
+ }
123
+
124
+ url = f"{API_BASE_URL}/v1/chat/completions"
125
+ full_response_text = ""
126
+
127
+ try:
128
+ if stream:
129
+ with requests.post(url, headers=self.headers, json=payload, stream=True) as response:
130
+ response.raise_for_status()
131
+ for line in response.iter_lines():
132
+ if line:
133
+ decoded_line = line.decode('utf-8')
134
+ if decoded_line.startswith("data:"):
135
+ content = decoded_line[len("data: "):].strip()
136
+ if content == "[DONE]":
137
+ break
138
+ try:
139
+ chunk = json.loads(content)
140
+ delta = chunk.get("choices", [{}])[0].get("delta", {})
141
+ text_chunk = delta.get("content", "")
142
+ if text_chunk:
143
+ full_response_text += text_chunk
144
+ if streaming_callback:
145
+ if not streaming_callback(text_chunk, MSG_TYPE.MSG_TYPE_CHUNK):
146
+ break
147
+ except json.JSONDecodeError:
148
+ ASCIIColors.error(f"Failed to decode JSON chunk: {content}")
149
+ continue
150
+ return full_response_text
151
+ else:
152
+ response = requests.post(url, headers=self.headers, json=payload)
153
+ response.raise_for_status()
154
+ data = response.json()
155
+ return data["choices"][0]["message"]["content"]
156
+ except requests.exceptions.HTTPError as e:
157
+ try:
158
+ error_details = e.response.json()
159
+ error_message = error_details.get("error", {}).get("message", e.response.text)
160
+ except json.JSONDecodeError:
161
+ error_message = e.response.text
162
+ ASCIIColors.error(f"HTTP Error received from Novita AI API: {e.response.status_code} - {error_message}")
163
+ return {"status": "error", "message": f"HTTP Error: {e.response.status_code} - {error_message}"}
164
+ except requests.exceptions.RequestException as e:
165
+ error_message = f"An error occurred with the Novita AI API: {e}"
166
+ trace_exception(e)
167
+ return {"status": "error", "message": str(e)}
168
+
169
+ def tokenize(self, text: str) -> list:
170
+ """
171
+ Tokenize the input text. Novita uses an OpenAI-compatible API,
172
+ so we use the same tokenizer as GPT-4.
173
+ """
174
+ try:
175
+ encoding = tiktoken.get_encoding("cl100k_base")
176
+ return encoding.encode(text)
177
+ except Exception as e:
178
+ ASCIIColors.error(f"Could not use tiktoken, falling back to simple encoding: {e}")
179
+ return list(text.encode('utf-8'))
180
+
181
+ def detokenize(self, tokens: list) -> str:
182
+ """
183
+ Detokenize a list of tokens.
184
+ """
185
+ try:
186
+ encoding = tiktoken.get_encoding("cl100k_base")
187
+ return encoding.decode(tokens)
188
+ except Exception as e:
189
+ ASCIIColors.error(f"Could not use tiktoken, falling back to simple decoding: {e}")
190
+ return bytes(tokens).decode('utf-8', errors='ignore')
191
+
192
+ def count_tokens(self, text: str) -> int:
193
+ """
194
+ Count tokens from a text.
195
+ """
196
+ return len(self.tokenize(text))
197
+
198
+ def embed(self, text: str, **kwargs) -> List[float]:
199
+ """
200
+ Get embeddings for the input text.
201
+ """
202
+ ASCIIColors.warning("Novita AI does not offer a public embedding API via this binding. This method is not implemented.")
203
+ raise NotImplementedError("Novita AI binding does not support embeddings.")
204
+
205
+ def get_model_info(self) -> dict:
206
+ """Return information about the current model setup."""
207
+ return {
208
+ "name": self.binding_name,
209
+ "host_address": API_BASE_URL,
210
+ "model_name": self.model_name,
211
+ "supports_vision": False
212
+ }
213
+
214
+ def listModels(self) -> List[Dict[str, str]]:
215
+ """
216
+ Lists available models. Novita AI API does not have a models endpoint,
217
+ so a hardcoded list from their documentation is returned.
218
+ """
219
+ return sorted(_FALLBACK_MODELS, key=lambda x: x['display_name'])
220
+
221
+ def load_model(self, model_name: str) -> bool:
222
+ """Set the model name for subsequent operations."""
223
+ self.model_name = model_name
224
+ ASCIIColors.info(f"Novita AI model set to: {model_name}.")
225
+ return True
226
+
227
+ if __name__ == '__main__':
228
+ if 'NOVITA_API_KEY' not in os.environ:
229
+ ASCIIColors.red("Error: NOVITA_API_KEY environment variable not set.")
230
+ print("Please get your key from novita.ai and set it.")
231
+ exit(1)
232
+
233
+ ASCIIColors.yellow("--- Testing NovitaAIBinding ---")
234
+
235
+ test_model_name = "meta-llama/Llama-3-8B-Instruct"
236
+
237
+ try:
238
+ # --- Initialization ---
239
+ ASCIIColors.cyan("\n--- Initializing Binding ---")
240
+ binding = NovitaAIBinding(model_name=test_model_name)
241
+ ASCIIColors.green("Binding initialized successfully.")
242
+
243
+ # --- List Models ---
244
+ ASCIIColors.cyan("\n--- Listing Models (static list) ---")
245
+ models = binding.listModels()
246
+ if models:
247
+ ASCIIColors.green(f"Found {len(models)} models.")
248
+ for m in models:
249
+ print(f"- {m['model_name']} ({m['display_name']})")
250
+ else:
251
+ ASCIIColors.error("Failed to list models.")
252
+
253
+ # --- Count Tokens ---
254
+ ASCIIColors.cyan("\n--- Counting Tokens ---")
255
+ sample_text = "Hello, world! This is a test."
256
+ token_count = binding.count_tokens(sample_text)
257
+ ASCIIColors.green(f"Token count for '{sample_text}': {token_count}")
258
+
259
+ # --- Chat (Non-Streaming) ---
260
+ ASCIIColors.cyan("\n--- Chat (Non-Streaming) ---")
261
+ discussion_non_stream = LollmsDiscussion.from_messages(
262
+ messages=[
263
+ {"sender":"user", "content": "What is the largest planet in our solar system?"}
264
+ ],
265
+ system_prompt="You are a helpful and concise astronomical assistant."
266
+ )
267
+ ASCIIColors.info(f"Prompt: What is the largest planet in our solar system?")
268
+ generated_text = binding.chat(discussion_non_stream, n_predict=50, stream=False)
269
+ if isinstance(generated_text, str):
270
+ ASCIIColors.green(f"Generated text:\n{generated_text}")
271
+ else:
272
+ ASCIIColors.error(f"Generation failed: {generated_text}")
273
+
274
+ # --- Chat (Streaming) ---
275
+ ASCIIColors.cyan("\n--- Chat (Streaming) ---")
276
+
277
+ captured_chunks = []
278
+ def stream_callback(chunk: str, msg_type: int):
279
+ ASCIIColors.green(chunk, end="", flush=True)
280
+ captured_chunks.append(chunk)
281
+ return True
282
+
283
+ discussion_stream = LollmsDiscussion.from_messages(
284
+ messages=[
285
+ {"sender":"user", "content": "Explain the concept of photosynthesis in one short paragraph."}
286
+ ]
287
+ )
288
+ ASCIIColors.info(f"Prompt: Explain the concept of photosynthesis in one short paragraph.")
289
+ result = binding.chat(
290
+ discussion_stream,
291
+ n_predict=150,
292
+ stream=True,
293
+ streaming_callback=stream_callback
294
+ )
295
+ print("\n--- End of Stream ---")
296
+ full_streamed_text = "".join(captured_chunks)
297
+ assert result == full_streamed_text
298
+
299
+ except Exception as e:
300
+ ASCIIColors.error(f"An error occurred during testing: {e}")
301
+ trace_exception(e)
302
+
303
+ ASCIIColors.yellow("\nNovitaAIBinding test finished.")
@@ -0,0 +1,326 @@
1
+ import base64
2
+ import os
3
+ from io import BytesIO
4
+ from pathlib import Path
5
+ from typing import Optional, Callable, List, Union, Dict
6
+ import json
7
+ import requests
8
+
9
+ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
10
+ from lollms_client.lollms_llm_binding import LollmsLLMBinding
11
+ from lollms_client.lollms_types import MSG_TYPE
12
+ from ascii_colors import ASCIIColors, trace_exception
13
+
14
+ import pipmaster as pm
15
+
16
+ # Ensure the required packages are installed
17
+ pm.ensure_packages(["requests", "tiktoken"])
18
+
19
+ import tiktoken
20
+
21
+ BindingName = "PerplexityBinding"
22
+ API_BASE_URL = "https://api.perplexity.ai"
23
+
24
+ # A hardcoded list of models based on Perplexity's documentation
25
+ # The API does not provide a models listing endpoint.
26
+ # Sourced from: https://docs.perplexity.ai/docs/models
27
+ _FALLBACK_MODELS = [
28
+ # Sonar Models
29
+ {'model_name': 'llama-3.1-sonar-small-128k-chat', 'display_name': 'Llama 3.1 Sonar Small Chat (128k)', 'description': 'Fast and cost-effective conversational model.', 'owned_by': 'Perplexity'},
30
+ {'model_name': 'llama-3.1-sonar-small-128k-online', 'display_name': 'Llama 3.1 Sonar Small Online (128k)', 'description': 'Fast and cost-effective conversational model with web access.', 'owned_by': 'Perplexity'},
31
+ {'model_name': 'llama-3.1-sonar-large-128k-chat', 'display_name': 'Llama 3.1 Sonar Large Chat (128k)', 'description': 'State-of-the-art conversational model.', 'owned_by': 'Perplexity'},
32
+ {'model_name': 'llama-3.1-sonar-large-128k-online', 'display_name': 'Llama 3.1 Sonar Large Online (128k)', 'description': 'State-of-the-art conversational model with web access.', 'owned_by': 'Perplexity'},
33
+ # Llama 3 Instruct Models
34
+ {'model_name': 'llama-3-8b-instruct', 'display_name': 'Llama 3 8B Instruct', 'description': 'Meta\'s Llama 3 8B instruction-tuned model.', 'owned_by': 'Meta'},
35
+ {'model_name': 'llama-3-70b-instruct', 'display_name': 'Llama 3 70B Instruct', 'description': 'Meta\'s Llama 3 70B instruction-tuned model.', 'owned_by': 'Meta'},
36
+ # Mixtral Model
37
+ {'model_name': 'mixtral-8x7b-instruct', 'display_name': 'Mixtral 8x7B Instruct', 'description': 'Mistral AI\'s Mixtral 8x7B instruction-tuned model.', 'owned_by': 'Mistral AI'},
38
+ # Legacy Sonar Models
39
+ {'model_name': 'sonar-small-32k-chat', 'display_name': 'Sonar Small Chat (32k)', 'description': 'Legacy small conversational model.', 'owned_by': 'Perplexity'},
40
+ {'model_name': 'sonar-small-32k-online', 'display_name': 'Sonar Small Online (32k)', 'description': 'Legacy small conversational model with web access.', 'owned_by': 'Perplexity'},
41
+ {'model_name': 'sonar-medium-32k-chat', 'display_name': 'Sonar Medium Chat (32k)', 'description': 'Legacy medium conversational model.', 'owned_by': 'Perplexity'},
42
+ {'model_name': 'sonar-medium-32k-online', 'display_name': 'Sonar Medium Online (32k)', 'description': 'Legacy medium conversational model with web access.', 'owned_by': 'Perplexity'},
43
+ ]
44
+
45
+ class PerplexityBinding(LollmsLLMBinding):
46
+ """Perplexity AI-specific binding implementation."""
47
+
48
+ def __init__(self, **kwargs):
49
+ """
50
+ Initialize the Perplexity binding.
51
+
52
+ Args:
53
+ model_name (str): Name of the Perplexity model to use.
54
+ service_key (str): Perplexity API key.
55
+ """
56
+ super().__init__(BindingName, **kwargs)
57
+ self.model_name = kwargs.get("model_name")
58
+ self.service_key = kwargs.get("service_key")
59
+
60
+ if not self.service_key:
61
+ self.service_key = os.getenv("PERPLEXITY_API_KEY")
62
+
63
+ if not self.service_key:
64
+ raise ValueError("Perplexity API key is required. Please set it via the 'service_key' parameter or the PERPLEXITY_API_KEY environment variable.")
65
+
66
+ self.headers = {
67
+ "Authorization": f"Bearer {self.service_key}",
68
+ "Content-Type": "application/json",
69
+ "Accept": "application/json"
70
+ }
71
+
72
+ def _construct_parameters(self,
73
+ temperature: float,
74
+ top_p: float,
75
+ top_k: int,
76
+ n_predict: int,
77
+ presence_penalty: float,
78
+ frequency_penalty: float) -> Dict[str, any]:
79
+ """Builds a parameters dictionary for the Perplexity API."""
80
+ params = {}
81
+ if temperature is not None: params['temperature'] = float(temperature)
82
+ if top_p is not None: params['top_p'] = top_p
83
+ if top_k is not None: params['top_k'] = top_k
84
+ if n_predict is not None: params['max_tokens'] = n_predict
85
+ if presence_penalty is not None: params['presence_penalty'] = presence_penalty
86
+ if frequency_penalty is not None: params['frequency_penalty'] = frequency_penalty
87
+ return params
88
+
89
+ def chat(self,
90
+ discussion: LollmsDiscussion,
91
+ branch_tip_id: Optional[str] = None,
92
+ n_predict: Optional[int] = 2048,
93
+ stream: Optional[bool] = False,
94
+ temperature: float = 0.7,
95
+ top_k: int = 50,
96
+ top_p: float = 0.9,
97
+ repeat_penalty: float = 1.1, # maps to frequency_penalty
98
+ presence_penalty: Optional[float] = 0.0,
99
+ seed: Optional[int] = None, # Not supported
100
+ n_threads: Optional[int] = None, # Not applicable
101
+ ctx_size: Optional[int] = None, # Determined by model
102
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None
103
+ ) -> Union[str, dict]:
104
+ """
105
+ Conduct a chat session with the Perplexity model using a LollmsDiscussion object.
106
+ """
107
+ system_prompt = discussion.system_prompt
108
+ messages = discussion.get_messages(branch_tip_id)
109
+
110
+ history = []
111
+ if system_prompt and system_prompt.strip():
112
+ history.append({"role": "system", "content": system_prompt})
113
+
114
+ for msg in messages:
115
+ if msg.sender_type == "user":
116
+ role = "user"
117
+ else:
118
+ role = "assistant"
119
+
120
+ if msg.images:
121
+ ASCIIColors.warning("Perplexity API does not support images. They will be ignored.")
122
+
123
+ if msg.content and msg.content.strip():
124
+ history.append({"role": role, "content": msg.content})
125
+
126
+ if not history:
127
+ return {"status": "error", "message": "Cannot start chat with an empty discussion."}
128
+
129
+ api_params = self._construct_parameters(
130
+ temperature, top_p, top_k, n_predict, presence_penalty, repeat_penalty
131
+ )
132
+
133
+ payload = {
134
+ "model": self.model_name,
135
+ "messages": history,
136
+ "stream": stream,
137
+ **api_params
138
+ }
139
+
140
+ url = f"{API_BASE_URL}/chat/completions"
141
+ full_response_text = ""
142
+
143
+ try:
144
+ if stream:
145
+ with requests.post(url, headers=self.headers, json=payload, stream=True) as response:
146
+ response.raise_for_status()
147
+ for line in response.iter_lines():
148
+ if line:
149
+ decoded_line = line.decode('utf-8')
150
+ if decoded_line.startswith("data:"):
151
+ content = decoded_line[len("data: "):].strip()
152
+ if content == "[DONE]":
153
+ break
154
+ try:
155
+ chunk = json.loads(content)
156
+ delta = chunk.get("choices", [{}])[0].get("delta", {})
157
+ text_chunk = delta.get("content", "")
158
+ if text_chunk:
159
+ full_response_text += text_chunk
160
+ if streaming_callback:
161
+ if not streaming_callback(text_chunk, MSG_TYPE.MSG_TYPE_CHUNK):
162
+ break
163
+ except json.JSONDecodeError:
164
+ ASCIIColors.error(f"Failed to decode JSON chunk: {content}")
165
+ continue
166
+ return full_response_text
167
+ else:
168
+ response = requests.post(url, headers=self.headers, json=payload)
169
+ response.raise_for_status()
170
+ data = response.json()
171
+ return data["choices"][0]["message"]["content"]
172
+ except requests.exceptions.RequestException as e:
173
+ error_message = f"An error occurred with the Perplexity API: {e}"
174
+ trace_exception(e)
175
+ return {"status": "error", "message": str(e)}
176
+ except Exception as ex:
177
+ error_message = f"An unexpected error occurred: {str(ex)}"
178
+ trace_exception(ex)
179
+ return {"status": "error", "message": error_message}
180
+
181
+ def tokenize(self, text: str) -> list:
182
+ """
183
+ Tokenize the input text. Perplexity uses the same tokenizer as GPT-4.
184
+ """
185
+ try:
186
+ encoding = tiktoken.get_encoding("cl100k_base")
187
+ return encoding.encode(text)
188
+ except Exception as e:
189
+ ASCIIColors.error(f"Could not use tiktoken, falling back to simple encoding: {e}")
190
+ return list(text.encode('utf-8'))
191
+
192
+ def detokenize(self, tokens: list) -> str:
193
+ """
194
+ Detokenize a list of tokens.
195
+ """
196
+ try:
197
+ encoding = tiktoken.get_encoding("cl100k_base")
198
+ return encoding.decode(tokens)
199
+ except Exception as e:
200
+ ASCIIColors.error(f"Could not use tiktoken, falling back to simple decoding: {e}")
201
+ return bytes(tokens).decode('utf-8', errors='ignore')
202
+
203
+ def count_tokens(self, text: str) -> int:
204
+ """
205
+ Count tokens from a text.
206
+ """
207
+ return len(self.tokenize(text))
208
+
209
+ def embed(self, text: str, **kwargs) -> List[float]:
210
+ """
211
+ Get embeddings for the input text.
212
+ """
213
+ ASCIIColors.warning("Perplexity does not offer a public embedding API. This method is not implemented.")
214
+ raise NotImplementedError("Perplexity binding does not support embeddings.")
215
+
216
+ def get_model_info(self) -> dict:
217
+ """Return information about the current model setup."""
218
+ return {
219
+ "name": self.binding_name,
220
+ "version": "1.0",
221
+ "host_address": API_BASE_URL,
222
+ "model_name": self.model_name,
223
+ "supports_vision": False,
224
+ "supports_structured_output": False
225
+ }
226
+
227
+ def listModels(self) -> List[Dict[str, str]]:
228
+ """
229
+ Lists available models. Perplexity API does not have a models endpoint,
230
+ so a hardcoded list is returned.
231
+ """
232
+ return sorted(_FALLBACK_MODELS, key=lambda x: x['display_name'])
233
+
234
+ def load_model(self, model_name: str) -> bool:
235
+ """Set the model name for subsequent operations."""
236
+ self.model_name = model_name
237
+ ASCIIColors.info(f"Perplexity model set to: {model_name}.")
238
+ return True
239
+
240
+ if __name__ == '__main__':
241
+ if 'PERPLEXITY_API_KEY' not in os.environ:
242
+ ASCIIColors.red("Error: PERPLEXITY_API_KEY environment variable not set.")
243
+ print("Please get your key from Perplexity AI and set it.")
244
+ exit(1)
245
+
246
+ ASCIIColors.yellow("--- Testing PerplexityBinding ---")
247
+
248
+ test_model_name = "llama-3.1-sonar-small-128k-online"
249
+
250
+ try:
251
+ # --- Initialization ---
252
+ ASCIIColors.cyan("\n--- Initializing Binding ---")
253
+ binding = PerplexityBinding(model_name=test_model_name)
254
+ ASCIIColors.green("Binding initialized successfully.")
255
+
256
+ # --- List Models ---
257
+ ASCIIColors.cyan("\n--- Listing Models (static list) ---")
258
+ models = binding.listModels()
259
+ if models:
260
+ ASCIIColors.green(f"Found {len(models)} models.")
261
+ for m in models:
262
+ print(f"- {m['model_name']} ({m['display_name']})")
263
+ else:
264
+ ASCIIColors.error("Failed to list models.")
265
+
266
+ # --- Count Tokens ---
267
+ ASCIIColors.cyan("\n--- Counting Tokens ---")
268
+ sample_text = "Hello, world! This is a test."
269
+ token_count = binding.count_tokens(sample_text)
270
+ ASCIIColors.green(f"Token count for '{sample_text}': {token_count}")
271
+
272
+ # --- Chat (Non-Streaming) ---
273
+ ASCIIColors.cyan("\n--- Chat (Non-Streaming) ---")
274
+ discussion_non_stream = LollmsDiscussion.from_messages(
275
+ messages=[
276
+ {"sender":"user", "content": "What is the capital of France?"}
277
+ ],
278
+ system_prompt="You are a helpful and concise assistant."
279
+ )
280
+ ASCIIColors.info(f"Prompt: What is the capital of France?")
281
+ generated_text = binding.chat(discussion_non_stream, n_predict=50, stream=False)
282
+ if isinstance(generated_text, str):
283
+ ASCIIColors.green(f"Generated text:\n{generated_text}")
284
+ else:
285
+ ASCIIColors.error(f"Generation failed: {generated_text}")
286
+
287
+ # --- Chat (Streaming) ---
288
+ ASCIIColors.cyan("\n--- Chat (Streaming) ---")
289
+
290
+ captured_chunks = []
291
+ def stream_callback(chunk: str, msg_type: int):
292
+ ASCIIColors.green(chunk, end="", flush=True)
293
+ captured_chunks.append(chunk)
294
+ return True
295
+
296
+ discussion_stream = LollmsDiscussion.from_messages(
297
+ messages=[
298
+ {"sender":"user", "content": "Explain the importance of bees in one short paragraph."}
299
+ ],
300
+ system_prompt="You are a helpful assistant."
301
+ )
302
+ ASCIIColors.info(f"Prompt: Explain the importance of bees in one short paragraph.")
303
+ result = binding.chat(
304
+ discussion_stream,
305
+ n_predict=150,
306
+ stream=True,
307
+ streaming_callback=stream_callback
308
+ )
309
+ print("\n--- End of Stream ---")
310
+ full_streamed_text = "".join(captured_chunks)
311
+ assert result == full_streamed_text
312
+
313
+ # --- Embeddings (Expected to fail) ---
314
+ ASCIIColors.cyan("\n--- Embeddings ---")
315
+ try:
316
+ binding.embed("This should not work.")
317
+ except NotImplementedError as e:
318
+ ASCIIColors.green(f"Successfully caught expected error for embeddings: {e}")
319
+ except Exception as e:
320
+ ASCIIColors.error(f"Caught an unexpected error for embeddings: {e}")
321
+
322
+ except Exception as e:
323
+ ASCIIColors.error(f"An error occurred during testing: {e}")
324
+ trace_exception(e)
325
+
326
+ ASCIIColors.yellow("\nPerplexityBinding test finished.")
@@ -0,0 +1,124 @@
1
+ import os
2
+ import requests
3
+ import time
4
+ import base64
5
+ from io import BytesIO
6
+ from pathlib import Path
7
+ from typing import Optional, List, Dict, Any, Union
8
+
9
+ from lollms_client.lollms_tti_binding import LollmsTTIBinding
10
+ from ascii_colors import trace_exception, ASCIIColors
11
+ import pipmaster as pm
12
+
13
+ pm.ensure_packages(["requests", "Pillow"])
14
+ from PIL import Image
15
+
16
+ BindingName = "LeonardoAITTIBinding"
17
+
18
+ # Sourced from https://docs.leonardo.ai/docs/models
19
+ LEONARDO_AI_MODELS = [
20
+ {"model_name": "ac4f3991-8a40-42cd-b174-14a8e33738e4", "display_name": "Leonardo Phoenix", "description": "Fast, high-quality photorealism."},
21
+ {"model_name": "1e65d070-22c9-4aed-a5be-ce58a1b65b38", "display_name": "Leonardo Diffusion XL", "description": "The flagship general-purpose SDXL model."},
22
+ {"model_name": "b24e16ff-06e3-43eb-a255-db4322b0f345", "display_name": "AlbedoBase XL", "description": "Versatile model for photorealism and artistic styles."},
23
+ {"model_name": "6bef9f1b-29cb-40c7-b9df-32b51c1f67d3", "display_name": "Absolute Reality v1.6", "description": "Classic photorealistic model."},
24
+ {"model_name": "f3296a34-a868-4665-8b2f-f4313f8c8533", "display_name": "RPG v5", "description": "Specialized in RPG characters and assets."},
25
+ {"model_name": "2067ae58-a02e-4318-9742-2b55b2a4c813", "display_name": "DreamShaper v7", "description": "Popular versatile artistic model."},
26
+ ]
27
+
28
+ class LeonardoAITTIBinding(LollmsTTIBinding):
29
+ """Leonardo.ai TTI binding for LoLLMS"""
30
+
31
+ def __init__(self, **kwargs):
32
+ super().__init__(binding_name=BindingName)
33
+ self.config = kwargs
34
+ self.api_key = self.config.get("api_key") or os.environ.get("LEONARDO_API_KEY")
35
+ if not self.api_key:
36
+ raise ValueError("Leonardo.ai API key is required.")
37
+ self.model_name = self.config.get("model_name", "ac4f3991-8a40-42cd-b174-14a8e33738e4")
38
+ self.base_url = "https://cloud.leonardo.ai/api/rest/v1"
39
+ self.headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
40
+
41
+ def listModels(self) -> list:
42
+ # You could also fetch this dynamically from /models endpoint
43
+ return LEONARDO_AI_MODELS
44
+
45
+ def _wait_for_generation(self, generation_id: str) -> List[bytes]:
46
+ while True:
47
+ url = f"{self.base_url}/generations/{generation_id}"
48
+ response = requests.get(url, headers=self.headers)
49
+ response.raise_for_status()
50
+ data = response.json().get("generations_by_pk", {})
51
+ status = data.get("status")
52
+
53
+ if status == "COMPLETE":
54
+ ASCIIColors.green("Generation complete.")
55
+ images_data = []
56
+ for img in data.get("generated_images", []):
57
+ img_url = img.get("url")
58
+ if img_url:
59
+ img_response = requests.get(img_url)
60
+ img_response.raise_for_status()
61
+ images_data.append(img_response.content)
62
+ return images_data
63
+ elif status == "FAILED":
64
+ raise Exception("Leonardo.ai generation failed.")
65
+ else:
66
+ ASCIIColors.info(f"Generation status: {status}. Waiting...")
67
+ time.sleep(3)
68
+
69
+ def generate_image(self, prompt: str, negative_prompt: str = "", width: int = 1024, height: int = 1024, **kwargs) -> bytes:
70
+ url = f"{self.base_url}/generations"
71
+ payload = {
72
+ "prompt": prompt,
73
+ "negative_prompt": negative_prompt,
74
+ "modelId": self.model_name,
75
+ "width": width,
76
+ "height": height,
77
+ "num_images": 1,
78
+ "guidance_scale": kwargs.get("guidance_scale", 7),
79
+ "seed": kwargs.get("seed"),
80
+ "sd_version": "SDXL" # Most models are SDXL based
81
+ }
82
+
83
+ try:
84
+ ASCIIColors.info(f"Submitting generation job to Leonardo.ai ({self.model_name})...")
85
+ response = requests.post(url, json=payload, headers=self.headers)
86
+ response.raise_for_status()
87
+ generation_id = response.json()["sdGenerationJob"]["generationId"]
88
+ ASCIIColors.info(f"Job submitted with ID: {generation_id}")
89
+ images = self._wait_for_generation(generation_id)
90
+ return images[0]
91
+ except Exception as e:
92
+ trace_exception(e)
93
+ try:
94
+ error_msg = response.json()
95
+ raise Exception(f"Leonardo.ai API error: {error_msg}")
96
+ except:
97
+ raise Exception(f"Leonardo.ai API request failed: {e}")
98
+
99
+ def edit_image(self, **kwargs) -> bytes:
100
+ ASCIIColors.warning("Leonardo.ai edit_image (inpainting/img2img) is not yet implemented in this binding.")
101
+ raise NotImplementedError("This binding does not yet support image editing.")
102
+
103
+ if __name__ == '__main__':
104
+ ASCIIColors.magenta("--- Leonardo.ai TTI Binding Test ---")
105
+ if "LEONARDO_API_KEY" not in os.environ:
106
+ ASCIIColors.error("LEONARDO_API_KEY environment variable not set. Cannot run test.")
107
+ exit(1)
108
+
109
+ try:
110
+ binding = LeonardoAITTIBinding()
111
+
112
+ ASCIIColors.cyan("\n--- Test: Text-to-Image ---")
113
+ prompt = "A majestic lion wearing a crown, hyperrealistic, 8k"
114
+ img_bytes = binding.generate_image(prompt, width=1024, height=1024)
115
+
116
+ assert len(img_bytes) > 1000
117
+ output_path = Path(__file__).parent / "tmp_leonardo_t2i.png"
118
+ with open(output_path, "wb") as f:
119
+ f.write(img_bytes)
120
+ ASCIIColors.green(f"Text-to-Image generation OK. Image saved to {output_path}")
121
+
122
+ except Exception as e:
123
+ trace_exception(e)
124
+ ASCIIColors.error(f"Leonardo.ai binding test failed: {e}")
@@ -0,0 +1,102 @@
1
+ import os
2
+ import requests
3
+ import base64
4
+ from io import BytesIO
5
+ from pathlib import Path
6
+ from typing import Optional, List, Dict, Any, Union
7
+
8
+ from lollms_client.lollms_tti_binding import LollmsTTIBinding
9
+ from ascii_colors import trace_exception, ASCIIColors
10
+ import pipmaster as pm
11
+
12
+ pm.ensure_packages(["requests"])
13
+
14
+ BindingName = "NovitaAITTIBinding"
15
+
16
+ # Sourced from https://docs.novita.ai/image-generation/models
17
+ NOVITA_AI_MODELS = [
18
+ {"model_name": "sd_xl_base_1.0.safetensors", "display_name": "Stable Diffusion XL 1.0", "description": "Official SDXL 1.0 Base model."},
19
+ {"model_name": "dreamshaper_xl_1_0.safetensors", "display_name": "DreamShaper XL 1.0", "description": "Versatile artistic SDXL model."},
20
+ {"model_name": "juggernaut_xl_v9_rundiffusion.safetensors", "display_name": "Juggernaut XL v9", "description": "High-quality realistic and cinematic model."},
21
+ {"model_name": "realistic_vision_v5.1.safetensors", "display_name": "Realistic Vision v5.1", "description": "Popular photorealistic SD1.5 model."},
22
+ {"model_name": "absolutereality_v1.8.1.safetensors", "display_name": "Absolute Reality v1.8.1", "description": "General-purpose realistic SD1.5 model."},
23
+ {"model_name": "meinamix_meina_v11.safetensors", "display_name": "MeinaMix v11", "description": "High-quality anime illustration model."},
24
+ ]
25
+
26
+ class NovitaAITTIBinding(LollmsTTIBinding):
27
+ """Novita.ai TTI binding for LoLLMS"""
28
+
29
+ def __init__(self, **kwargs):
30
+ super().__init__(binding_name=BindingName)
31
+ self.config = kwargs
32
+ self.api_key = self.config.get("api_key") or os.environ.get("NOVITA_API_KEY")
33
+ if not self.api_key:
34
+ raise ValueError("Novita.ai API key is required.")
35
+ self.model_name = self.config.get("model_name", "juggernaut_xl_v9_rundiffusion.safetensors")
36
+ self.base_url = "https://api.novita.ai/v3"
37
+ self.headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
38
+
39
+ def listModels(self) -> list:
40
+ return NOVITA_AI_MODELS
41
+
42
+ def generate_image(self, prompt: str, negative_prompt: str = "", width: int = 1024, height: int = 1024, **kwargs) -> bytes:
43
+ url = f"{self.base_url}/text2img"
44
+ payload = {
45
+ "model_name": self.model_name,
46
+ "prompt": prompt,
47
+ "negative_prompt": negative_prompt,
48
+ "width": width,
49
+ "height": height,
50
+ "sampler_name": "DPM++ 2M Karras",
51
+ "cfg_scale": kwargs.get("guidance_scale", 7.0),
52
+ "steps": kwargs.get("num_inference_steps", 25),
53
+ "seed": kwargs.get("seed", -1),
54
+ "n_iter": 1,
55
+ "batch_size": 1
56
+ }
57
+
58
+ try:
59
+ ASCIIColors.info(f"Requesting image from Novita.ai ({self.model_name})...")
60
+ response = requests.post(url, json=payload, headers=self.headers)
61
+ response.raise_for_status()
62
+ data = response.json()
63
+ if "images" not in data or not data["images"]:
64
+ raise Exception(f"API returned no images. Response: {data}")
65
+
66
+ b64_image = data["images"][0]["image_base64"]
67
+ return base64.b64decode(b64_image)
68
+
69
+ except Exception as e:
70
+ trace_exception(e)
71
+ try:
72
+ error_msg = response.json()
73
+ raise Exception(f"Novita.ai API error: {error_msg}")
74
+ except:
75
+ raise Exception(f"Novita.ai API request failed: {e}")
76
+
77
+ def edit_image(self, **kwargs) -> bytes:
78
+ ASCIIColors.warning("Novita.ai edit_image (inpainting/img2img) is not yet implemented in this binding.")
79
+ raise NotImplementedError("This binding does not yet support image editing.")
80
+
81
+ if __name__ == '__main__':
82
+ ASCIIColors.magenta("--- Novita.ai TTI Binding Test ---")
83
+ if "NOVITA_API_KEY" not in os.environ:
84
+ ASCIIColors.error("NOVITA_API_KEY environment variable not set. Cannot run test.")
85
+ exit(1)
86
+
87
+ try:
88
+ binding = NovitaAITTIBinding()
89
+
90
+ ASCIIColors.cyan("\n--- Test: Text-to-Image ---")
91
+ prompt = "A cute capybara wearing a top hat, sitting in a field of flowers, painterly style"
92
+ img_bytes = binding.generate_image(prompt, width=1024, height=1024, num_inference_steps=30)
93
+
94
+ assert len(img_bytes) > 1000
95
+ output_path = Path(__file__).parent / "tmp_novita_t2i.png"
96
+ with open(output_path, "wb") as f:
97
+ f.write(img_bytes)
98
+ ASCIIColors.green(f"Text-to-Image generation OK. Image saved to {output_path}")
99
+
100
+ except Exception as e:
101
+ trace_exception(e)
102
+ ASCIIColors.error(f"Novita.ai binding test failed: {e}")
@@ -0,0 +1,176 @@
1
+ import os
2
+ import requests
3
+ import base64
4
+ from io import BytesIO
5
+ from pathlib import Path
6
+ from typing import Optional, List, Dict, Any, Union
7
+
8
+ from lollms_client.lollms_tti_binding import LollmsTTIBinding
9
+ from ascii_colors import trace_exception, ASCIIColors
10
+ import pipmaster as pm
11
+
12
+ pm.ensure_packages(["requests", "Pillow"])
13
+
14
+ from PIL import Image
15
+
16
+ BindingName = "StabilityAITTIBinding"
17
+
18
+ # Sourced from https://platform.stability.ai/docs/getting-started/models
19
+ STABILITY_AI_MODELS = [
20
+ # SD3
21
+ {"model_name": "stable-diffusion-3-medium", "display_name": "Stable Diffusion 3 Medium", "description": "Most advanced text-to-image model.", "owned_by": "Stability AI"},
22
+ {"model_name": "stable-diffusion-3-large", "display_name": "Stable Diffusion 3 Large", "description": "Most advanced model with higher quality.", "owned_by": "Stability AI"},
23
+ {"model_name": "stable-diffusion-3-large-turbo", "display_name": "Stable Diffusion 3 Large Turbo", "description": "Fast, high-quality generation.", "owned_by": "Stability AI"},
24
+ # SDXL
25
+ {"model_name": "stable-diffusion-xl-1024-v1-0", "display_name": "Stable Diffusion XL 1.0", "description": "High-quality 1024x1024 generation.", "owned_by": "Stability AI"},
26
+ {"model_name": "stable-diffusion-xl-beta-v2-2-2", "display_name": "SDXL Beta", "description": "Legacy anime-focused SDXL model.", "owned_by": "Stability AI"},
27
+ # SD 1.x & 2.x
28
+ {"model_name": "stable-diffusion-v1-6", "display_name": "Stable Diffusion 1.6", "description": "Improved version of SD 1.5.", "owned_by": "Stability AI"},
29
+ {"model_name": "stable-diffusion-2-1", "display_name": "Stable Diffusion 2.1", "description": "768x768 native resolution model.", "owned_by": "Stability AI"},
30
+ ]
31
+
32
+ class StabilityAITTIBinding(LollmsTTIBinding):
33
+ """Stability AI TTI binding for LoLLMS"""
34
+
35
+ def __init__(self, **kwargs):
36
+ super().__init__(binding_name=BindingName)
37
+ self.config = kwargs
38
+ self.api_key = self.config.get("api_key") or os.environ.get("STABILITY_API_KEY")
39
+ if not self.api_key:
40
+ raise ValueError("Stability AI API key is required. Please set it in the configuration or as STABILITY_API_KEY environment variable.")
41
+ self.model_name = self.config.get("model_name", "stable-diffusion-3-medium")
42
+
43
+ def listModels(self) -> list:
44
+ return STABILITY_AI_MODELS
45
+
46
+ def _get_api_url(self, task: str) -> str:
47
+ base_url = "https://api.stability.ai/v2beta/stable-image"
48
+ # SD3 models use a different endpoint structure
49
+ if "stable-diffusion-3" in self.model_name:
50
+ return f"{base_url}/generate/sd3"
51
+
52
+ task_map = {
53
+ "text2image": "generate/core",
54
+ "image2image": "edit/image-to-image",
55
+ "inpainting": "edit/in-painting",
56
+ "upscale": "edit/upscale"
57
+ }
58
+ if task not in task_map:
59
+ raise ValueError(f"Unsupported task for this model family: {task}")
60
+ return f"{base_url}/{task_map[task]}"
61
+
62
+ def _decode_image_input(self, item: Union[str, Path, bytes]) -> Image.Image:
63
+ if isinstance(item, bytes):
64
+ return Image.open(BytesIO(item))
65
+ s = str(item).strip()
66
+ if s.startswith("data:image/") and ";base64," in s:
67
+ b64 = s.split(";base64,")[-1]
68
+ return Image.open(BytesIO(base64.b64decode(b64)))
69
+ try:
70
+ p = Path(s)
71
+ if p.exists():
72
+ return Image.open(p)
73
+ except:
74
+ pass
75
+ if s.startswith("http"):
76
+ response = requests.get(s, stream=True)
77
+ response.raise_for_status()
78
+ return Image.open(response.raw)
79
+ # Fallback for raw base64
80
+ return Image.open(BytesIO(base64.b64decode(s)))
81
+
82
+ def generate_image(self, prompt: str, negative_prompt: str = "", width: int = 1024, height: int = 1024, **kwargs) -> bytes:
83
+ url = self._get_api_url("text2image")
84
+
85
+ data = {
86
+ "prompt": prompt,
87
+ "negative_prompt": negative_prompt,
88
+ "output_format": "png",
89
+ "seed": kwargs.get("seed", 0)
90
+ }
91
+
92
+ # SD3 uses aspect_ratio, older models use width/height
93
+ if "stable-diffusion-3" in self.model_name:
94
+ data["aspect_ratio"] = f"{width}:{height}"
95
+ data["model"] = self.model_name
96
+ else:
97
+ data["width"] = width
98
+ data["height"] = height
99
+ data["style_preset"] = kwargs.get("style_preset", "photographic")
100
+
101
+ headers = {"authorization": f"Bearer {self.api_key}", "accept": "image/*"}
102
+
103
+ try:
104
+ ASCIIColors.info(f"Requesting image from Stability AI ({self.model_name})...")
105
+ response = requests.post(url, headers=headers, files={"none": ''}, data=data)
106
+ response.raise_for_status()
107
+ return response.content
108
+ except Exception as e:
109
+ trace_exception(e)
110
+ try:
111
+ error_msg = response.json()
112
+ raise Exception(f"Stability AI API error: {error_msg}")
113
+ except:
114
+ raise Exception(f"Stability AI API request failed: {e}")
115
+
116
+ def edit_image(self, images: Union[str, List[str]], prompt: str, negative_prompt: Optional[str] = "", mask: Optional[str] = None, **kwargs) -> bytes:
117
+ init_image_bytes = BytesIO()
118
+ init_image = self._decode_image_input(images[0] if isinstance(images, list) else images)
119
+ init_image.save(init_image_bytes, format="PNG")
120
+
121
+ task = "inpainting" if mask else "image2image"
122
+ url = self._get_api_url(task)
123
+
124
+ files = {"image": init_image_bytes.getvalue()}
125
+ data = {
126
+ "prompt": prompt,
127
+ "negative_prompt": negative_prompt or "",
128
+ "output_format": "png",
129
+ "seed": kwargs.get("seed", 0)
130
+ }
131
+
132
+ if task == "inpainting":
133
+ mask_image_bytes = BytesIO()
134
+ mask_image = self._decode_image_input(mask)
135
+ mask_image.save(mask_image_bytes, format="PNG")
136
+ files["mask"] = mask_image_bytes.getvalue()
137
+ else: # image2image
138
+ data["strength"] = kwargs.get("strength", 0.6) # mode IMAGE_STRENGTH
139
+
140
+ headers = {"authorization": f"Bearer {self.api_key}", "accept": "image/*"}
141
+
142
+ try:
143
+ ASCIIColors.info(f"Requesting image edit from Stability AI ({self.model_name})...")
144
+ response = requests.post(url, headers=headers, files=files, data=data)
145
+ response.raise_for_status()
146
+ return response.content
147
+ except Exception as e:
148
+ trace_exception(e)
149
+ try:
150
+ error_msg = response.json()
151
+ raise Exception(f"Stability AI API error: {error_msg}")
152
+ except:
153
+ raise Exception(f"Stability AI API request failed: {e}")
154
+
155
+ if __name__ == '__main__':
156
+ ASCIIColors.magenta("--- Stability AI TTI Binding Test ---")
157
+ if "STABILITY_API_KEY" not in os.environ:
158
+ ASCIIColors.error("STABILITY_API_KEY environment variable not set. Cannot run test.")
159
+ exit(1)
160
+
161
+ try:
162
+ binding = StabilityAITTIBinding(model_name="stable-diffusion-3-medium")
163
+
164
+ ASCIIColors.cyan("\n--- Test: Text-to-Image ---")
165
+ prompt = "a cinematic photo of a robot drinking coffee in a Parisian cafe"
166
+ img_bytes = binding.generate_image(prompt, width=1024, height=1024)
167
+
168
+ assert len(img_bytes) > 1000, "Generated image bytes are too small."
169
+ output_path = Path(__file__).parent / "tmp_stability_t2i.png"
170
+ with open(output_path, "wb") as f:
171
+ f.write(img_bytes)
172
+ ASCIIColors.green(f"Text-to-Image generation OK. Image saved to {output_path}")
173
+
174
+ except Exception as e:
175
+ trace_exception(e)
176
+ ASCIIColors.error(f"Stability AI binding test failed: {e}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.4.1
3
+ Version: 1.4.5
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -1,4 +1,4 @@
1
- lollms_client/__init__.py,sha256=G2ENRPwIlHb_nTaBEbn_AvUQvlsBYpIuQXGWYkYmyo0,1146
1
+ lollms_client/__init__.py,sha256=1ATuXFW5bb2NSxUu0nl6c3tXgX30-GS0pYWv6hkA8Vg,1146
2
2
  lollms_client/lollms_agentic.py,sha256=pQiMEuB_XkG29-SW6u4KTaMFPr6eKqacInggcCuCW3k,13914
3
3
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
4
4
  lollms_client/lollms_core.py,sha256=aCEoxmEF6ZmkBgJgZd74lKkM4A3PVVyt2IwMvLfScWw,315053
@@ -29,10 +29,12 @@ lollms_client/llm_bindings/llamacpp/__init__.py,sha256=llPF85AzYgMp7Cpo_4OvEHKlx
29
29
  lollms_client/llm_bindings/lollms/__init__.py,sha256=XFQKtTJnkW8OwF1IoyzHqAZ8JAJ0PnAUKDdeOLGcbrE,24310
30
30
  lollms_client/llm_bindings/lollms_webui/__init__.py,sha256=iuDfhZZoLC-PDEPLHrcjk5-962S5c7OeCI7PMdJxI_A,17753
31
31
  lollms_client/llm_bindings/mistral/__init__.py,sha256=cddz9xIj8NRFLKHe2JMxzstpUrNIu5s9juci3mhiHfo,14133
32
+ lollms_client/llm_bindings/novita_ai/__init__.py,sha256=NOg6_NBCxuz9gwrijTCzrp9a78AbmBdT4k67baCTtuc,13877
32
33
  lollms_client/llm_bindings/ollama/__init__.py,sha256=a6cgzXPuo8ZLhIZHJFy8QF0n5ZTk0X4OC1JSyXG1enk,46013
33
34
  lollms_client/llm_bindings/open_router/__init__.py,sha256=cAFWtCWJx0WjIe1w2JReCf6WlAZjrXYA4jZ8l3zqxMs,14915
34
35
  lollms_client/llm_bindings/openai/__init__.py,sha256=ElLbtHLwR61Uj3W6G4g6QIhxtCqUGOCQBYwhQyN60us,26142
35
36
  lollms_client/llm_bindings/openllm/__init__.py,sha256=RC9dVeopslS-zXTsSJ7VC4iVsKgZCBwfmccmr_LCHA0,29971
37
+ lollms_client/llm_bindings/perplexity/__init__.py,sha256=lMRPdbVbGX_weByAdcsZakdxDg7nFF3uCbdzakQmBOc,15006
36
38
  lollms_client/llm_bindings/pythonllamacpp/__init__.py,sha256=ZTuVa5ngu9GPVImjs_g8ArV7Bx7a1Rze518Tz8AFJ3U,31807
37
39
  lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=xiT-JAyNI_jo6CE0nle9Xoc7U8-UHAfEHrnCwmDTiOE,32023
38
40
  lollms_client/llm_bindings/transformers/__init__.py,sha256=hEonNvmrVSc9YWg_1uVwxe31rC-fsjVGh6QvyBc0TEE,37598
@@ -51,8 +53,11 @@ lollms_client/stt_bindings/whispercpp/__init__.py,sha256=xSAQRjAhljak3vWCpkP0Vmd
51
53
  lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
54
  lollms_client/tti_bindings/diffusers/__init__.py,sha256=Pi5Zw4nHGXVc0Vcb0ib7KkoiOx__0JukWtL01BUzd7c,41692
53
55
  lollms_client/tti_bindings/gemini/__init__.py,sha256=f9fPuqnrBZ1Z-obcoP6EVvbEXNbNCSg21cd5efLCk8U,16707
56
+ lollms_client/tti_bindings/leonardo_ai/__init__.py,sha256=EvjKyV8kM7-tmLwC1agSQ-v7thrNgflrmFxhDLqzT8U,5884
54
57
  lollms_client/tti_bindings/lollms/__init__.py,sha256=5Tnsn4b17djvieQkcjtIDBm3qf0pg5ZWWov-4_2wmo0,8762
58
+ lollms_client/tti_bindings/novita_ai/__init__.py,sha256=1PZbM_R7cZwImbYxeuLqTlrc6Sfg43_YCbjw2ZJLnhc,4802
55
59
  lollms_client/tti_bindings/openai/__init__.py,sha256=YWJolJSQfIzTJvrLQVe8rQewP7rddf6z87g4rnp-lTs,4932
60
+ lollms_client/tti_bindings/stability_ai/__init__.py,sha256=GJxE0joQ0UZbheozysbN96AvQq60pjY2UjnSLFmRh4g,8025
56
61
  lollms_client/ttm_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
62
  lollms_client/ttm_bindings/audiocraft/__init__.py,sha256=a0k6wTrHth6GaVOiNnVboeFY3oKVvCQPbQlqO38XEyc,14328
58
63
  lollms_client/ttm_bindings/bark/__init__.py,sha256=Pr3ou2a-7hNYDqbkxrAbghZpO5HvGUhz7e-7VGXIHHA,18976
@@ -71,8 +76,8 @@ lollms_client/tts_bindings/xtts/server/main.py,sha256=T-Kn5NM-u1FJMygeV8rOoZKlqn
71
76
  lollms_client/tts_bindings/xtts/server/setup_voices.py,sha256=UdHaPa5aNcw8dR-aRGkZr2OfSFFejH79lXgfwT0P3ss,1964
72
77
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
73
78
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
74
- lollms_client-1.4.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
75
- lollms_client-1.4.1.dist-info/METADATA,sha256=eBfpms3EJ5sD7D-xBTXggnqOc1g8IE0inftnXGQmb6w,58689
76
- lollms_client-1.4.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
77
- lollms_client-1.4.1.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
78
- lollms_client-1.4.1.dist-info/RECORD,,
79
+ lollms_client-1.4.5.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
80
+ lollms_client-1.4.5.dist-info/METADATA,sha256=IJc5k53zOajOIIvuq-dhLQQ67bHpLwzNyTqxlQZNCz8,58689
81
+ lollms_client-1.4.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
82
+ lollms_client-1.4.5.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
83
+ lollms_client-1.4.5.dist-info/RECORD,,