lollms-client 1.5.6__py3-none-any.whl → 1.7.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. lollms_client/__init__.py +1 -1
  2. lollms_client/llm_bindings/azure_openai/__init__.py +2 -2
  3. lollms_client/llm_bindings/claude/__init__.py +125 -34
  4. lollms_client/llm_bindings/gemini/__init__.py +261 -159
  5. lollms_client/llm_bindings/grok/__init__.py +52 -14
  6. lollms_client/llm_bindings/groq/__init__.py +2 -2
  7. lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +2 -2
  8. lollms_client/llm_bindings/litellm/__init__.py +1 -1
  9. lollms_client/llm_bindings/llamacpp/__init__.py +18 -11
  10. lollms_client/llm_bindings/lollms/__init__.py +76 -21
  11. lollms_client/llm_bindings/lollms_webui/__init__.py +1 -1
  12. lollms_client/llm_bindings/mistral/__init__.py +2 -2
  13. lollms_client/llm_bindings/novita_ai/__init__.py +142 -6
  14. lollms_client/llm_bindings/ollama/__init__.py +307 -89
  15. lollms_client/llm_bindings/open_router/__init__.py +2 -2
  16. lollms_client/llm_bindings/openai/__init__.py +81 -20
  17. lollms_client/llm_bindings/openllm/__init__.py +362 -506
  18. lollms_client/llm_bindings/openwebui/__init__.py +333 -171
  19. lollms_client/llm_bindings/perplexity/__init__.py +2 -2
  20. lollms_client/llm_bindings/pythonllamacpp/__init__.py +3 -3
  21. lollms_client/llm_bindings/tensor_rt/__init__.py +1 -1
  22. lollms_client/llm_bindings/transformers/__init__.py +428 -632
  23. lollms_client/llm_bindings/vllm/__init__.py +1 -1
  24. lollms_client/lollms_agentic.py +4 -2
  25. lollms_client/lollms_base_binding.py +61 -0
  26. lollms_client/lollms_core.py +512 -1890
  27. lollms_client/lollms_discussion.py +25 -11
  28. lollms_client/lollms_llm_binding.py +112 -261
  29. lollms_client/lollms_mcp_binding.py +34 -75
  30. lollms_client/lollms_stt_binding.py +85 -52
  31. lollms_client/lollms_tti_binding.py +23 -37
  32. lollms_client/lollms_ttm_binding.py +24 -42
  33. lollms_client/lollms_tts_binding.py +28 -17
  34. lollms_client/lollms_ttv_binding.py +24 -42
  35. lollms_client/lollms_types.py +4 -2
  36. lollms_client/stt_bindings/whisper/__init__.py +108 -23
  37. lollms_client/stt_bindings/whispercpp/__init__.py +7 -1
  38. lollms_client/tti_bindings/diffusers/__init__.py +418 -810
  39. lollms_client/tti_bindings/diffusers/server/main.py +1051 -0
  40. lollms_client/tti_bindings/gemini/__init__.py +182 -239
  41. lollms_client/tti_bindings/leonardo_ai/__init__.py +6 -3
  42. lollms_client/tti_bindings/lollms/__init__.py +4 -1
  43. lollms_client/tti_bindings/novita_ai/__init__.py +5 -2
  44. lollms_client/tti_bindings/openai/__init__.py +10 -11
  45. lollms_client/tti_bindings/stability_ai/__init__.py +5 -3
  46. lollms_client/ttm_bindings/audiocraft/__init__.py +7 -12
  47. lollms_client/ttm_bindings/beatoven_ai/__init__.py +7 -3
  48. lollms_client/ttm_bindings/lollms/__init__.py +4 -17
  49. lollms_client/ttm_bindings/replicate/__init__.py +7 -4
  50. lollms_client/ttm_bindings/stability_ai/__init__.py +7 -4
  51. lollms_client/ttm_bindings/topmediai/__init__.py +6 -3
  52. lollms_client/tts_bindings/bark/__init__.py +7 -10
  53. lollms_client/tts_bindings/lollms/__init__.py +6 -1
  54. lollms_client/tts_bindings/piper_tts/__init__.py +8 -11
  55. lollms_client/tts_bindings/xtts/__init__.py +157 -74
  56. lollms_client/tts_bindings/xtts/server/main.py +241 -280
  57. {lollms_client-1.5.6.dist-info → lollms_client-1.7.10.dist-info}/METADATA +113 -5
  58. lollms_client-1.7.10.dist-info/RECORD +89 -0
  59. lollms_client-1.5.6.dist-info/RECORD +0 -87
  60. {lollms_client-1.5.6.dist-info → lollms_client-1.7.10.dist-info}/WHEEL +0 -0
  61. {lollms_client-1.5.6.dist-info → lollms_client-1.7.10.dist-info}/licenses/LICENSE +0 -0
  62. {lollms_client-1.5.6.dist-info → lollms_client-1.7.10.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,7 @@
1
- # bindings/gemini/binding.py
1
+ # bindings/gemini/__init__.py
2
2
  import base64
3
3
  import os
4
+ import re
4
5
  from io import BytesIO
5
6
  from pathlib import Path
6
7
  from typing import Optional, Callable, List, Union, Dict
@@ -16,7 +17,7 @@ import pipmaster as pm
16
17
  pm.ensure_packages(["google-generativeai", "pillow", "tiktoken", "protobuf"])
17
18
 
18
19
  import google.generativeai as genai
19
- from PIL import Image, ImageDraw # ImageDraw is used in the test script below
20
+ from PIL import Image, ImageDraw
20
21
  import tiktoken
21
22
 
22
23
  BindingName = "GeminiBinding"
@@ -43,7 +44,7 @@ class GeminiBinding(LollmsLLMBinding):
43
44
  service_key (str): Google AI Studio API key.
44
45
  """
45
46
  super().__init__(BindingName, **kwargs)
46
- self.model_name = kwargs.get("model_name", None)
47
+ self.model_name = kwargs.get("model_name", "gemini-1.5-pro-latest")
47
48
  self.service_key = kwargs.get("service_key", None)
48
49
 
49
50
  if not self.service_key:
@@ -91,35 +92,34 @@ class GeminiBinding(LollmsLLMBinding):
91
92
  split:Optional[bool]=False,
92
93
  user_keyword:Optional[str]="!@>user:",
93
94
  ai_keyword:Optional[str]="!@>assistant:",
95
+ think: Optional[bool] = False,
96
+ reasoning_effort: Optional[str] = "low", # low, medium, high
97
+ reasoning_summary: Optional[bool] = False, # auto
94
98
  ) -> Union[str, dict]:
95
99
  """
96
100
  Generate text using the Gemini model.
97
-
98
- Args:
99
- prompt (str): The input prompt for text generation.
100
- images (Optional[List[str]]): List of image file paths or base64 strings.
101
- system_prompt (str): The system prompt to guide the model.
102
- ... other LollmsLLMBinding parameters ...
103
-
104
- Returns:
105
- Union[str, dict]: Generated text or error dictionary.
106
101
  """
107
102
  if not self.client:
108
103
  return {"status": False, "error": "Gemini client not initialized."}
109
104
 
110
- # Gemini uses 'system_instruction' for GenerativeModel, not part of the regular message list.
111
- model = self.client.GenerativeModel(
112
- model_name=self.model_name,
113
- system_instruction=system_prompt if system_prompt else None
114
- )
105
+ # Handle 'think' parameter logging
106
+ if think and "thinking" not in str(self.model_name).lower():
107
+ ASCIIColors.info(f"Thinking requested but model '{self.model_name}' may not be a thinking model. Proceeding.")
108
+
109
+ # Gemini uses 'system_instruction' for GenerativeModel
110
+ try:
111
+ model = self.client.GenerativeModel(
112
+ model_name=self.model_name,
113
+ system_instruction=system_prompt if system_prompt else None
114
+ )
115
+ except Exception as e:
116
+ return {"status": False, "error": f"Failed to initialize GenerativeModel: {e}"}
115
117
 
116
118
  generation_config = self.get_generation_config(temperature, top_p, top_k, n_predict)
117
119
 
118
120
  # Prepare content for the API call
119
121
  content_parts = []
120
122
  if split:
121
- # Note: The 'split' logic for Gemini should ideally build a multi-turn history,
122
- # but for `generate_text`, we'll treat the last user part as the main prompt.
123
123
  discussion_messages = self.split_discussion(prompt, user_keyword, ai_keyword)
124
124
  if discussion_messages:
125
125
  last_message = discussion_messages[-1]['content']
@@ -135,6 +135,9 @@ class GeminiBinding(LollmsLLMBinding):
135
135
  if is_image_path(image_data):
136
136
  img = Image.open(image_data)
137
137
  else: # Assume base64
138
+ if image_data.startswith("data:image"):
139
+ # Remove prefix if present
140
+ image_data = image_data.split(",")[1]
138
141
  img = Image.open(BytesIO(base64.b64decode(image_data)))
139
142
  content_parts.append(img)
140
143
  except Exception as e:
@@ -155,17 +158,15 @@ class GeminiBinding(LollmsLLMBinding):
155
158
  try:
156
159
  chunk_text = chunk.text
157
160
  except ValueError:
158
- # Handle potential empty parts in the stream
159
161
  chunk_text = ""
160
162
 
161
163
  if chunk_text:
162
164
  full_response_text += chunk_text
163
165
  if streaming_callback:
164
166
  if not streaming_callback(chunk_text, MSG_TYPE.MSG_TYPE_CHUNK):
165
- break # Callback requested stop
167
+ break
166
168
  return full_response_text
167
169
  else:
168
- # Check for safety blocks
169
170
  if response.prompt_feedback.block_reason:
170
171
  error_msg = f"Content blocked due to: {response.prompt_feedback.block_reason.name}"
171
172
  ASCIIColors.warning(error_msg)
@@ -177,6 +178,133 @@ class GeminiBinding(LollmsLLMBinding):
177
178
  trace_exception(ex)
178
179
  return {"status": False, "error": error_message}
179
180
 
181
+ def generate_from_messages(self,
182
+ messages: List[Dict],
183
+ n_predict: Optional[int] = None,
184
+ stream: Optional[bool] = None,
185
+ temperature: Optional[float] = None,
186
+ top_k: Optional[int] = None,
187
+ top_p: Optional[float] = None,
188
+ repeat_penalty: Optional[float] = None,
189
+ repeat_last_n: Optional[int] = None,
190
+ seed: Optional[int] = None,
191
+ n_threads: Optional[int] = None,
192
+ ctx_size: int | None = None,
193
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
194
+ think: Optional[bool] = False,
195
+ reasoning_effort: Optional[bool] = "low",
196
+ reasoning_summary: Optional[bool] = "auto",
197
+ **kwargs
198
+ ) -> Union[str, dict]:
199
+ """
200
+ Generate content using a list of messages. This is the low-level method
201
+ that handles chat history structure for Gemini.
202
+ """
203
+ if not self.client:
204
+ return {"status": False, "error": "Gemini client not initialized."}
205
+
206
+ gen_config = self.get_generation_config(temperature, top_p, top_k, n_predict)
207
+
208
+ system_instruction = None
209
+ gemini_contents = []
210
+
211
+ # Parse messages
212
+ for msg in messages:
213
+ role = msg.get("role", "user")
214
+ content = msg.get("content", "")
215
+
216
+ # Gemini specific role mapping
217
+ if role == "system":
218
+ # Gemini takes system prompt at initialization, not in the content list
219
+ if isinstance(content, str):
220
+ system_instruction = content
221
+ elif isinstance(content, list):
222
+ # Extract text from list if system prompt is complex (rare)
223
+ text_parts = [p.get("text", "") for p in content if p.get("type") == "text"]
224
+ system_instruction = "\n".join(text_parts)
225
+ continue
226
+
227
+ gemini_role = "model" if role == "assistant" else "user"
228
+ parts = []
229
+
230
+ # Parse content (Text or List of multimodal)
231
+ if isinstance(content, str):
232
+ parts.append(content)
233
+ elif isinstance(content, list):
234
+ for item in content:
235
+ item_type = item.get("type")
236
+ if item_type == "text":
237
+ parts.append(item.get("text", ""))
238
+ elif item_type in ["input_image", "image_url"]:
239
+ # Extract Base64
240
+ base64_data = None
241
+ url_data = item.get("image_url", item.get("input_image"))
242
+
243
+ if isinstance(url_data, dict):
244
+ # Format: {"url": "data:image/..."} or {"base64": "..."}
245
+ if "base64" in url_data:
246
+ base64_data = url_data["base64"]
247
+ elif "url" in url_data:
248
+ base64_data = url_data["url"]
249
+ elif isinstance(url_data, str):
250
+ base64_data = url_data
251
+
252
+ if base64_data:
253
+ # Clean "data:image/png;base64," prefix
254
+ if "base64," in base64_data:
255
+ base64_data = base64_data.split("base64,")[1]
256
+ try:
257
+ img = Image.open(BytesIO(base64.b64decode(base64_data)))
258
+ parts.append(img)
259
+ except Exception as e:
260
+ ASCIIColors.warning(f"Failed to decode image in message: {e}")
261
+
262
+ if parts:
263
+ gemini_contents.append({"role": gemini_role, "parts": parts})
264
+
265
+ # Initialize Model with system instruction
266
+ try:
267
+ model = self.client.GenerativeModel(
268
+ model_name=self.model_name,
269
+ system_instruction=system_instruction
270
+ )
271
+ except Exception as e:
272
+ return {"status": False, "error": f"Failed to initialize GenerativeModel: {e}"}
273
+
274
+ full_response_text = ""
275
+
276
+ try:
277
+ # Generate content based on the full history
278
+ response = model.generate_content(
279
+ contents=gemini_contents,
280
+ generation_config=gen_config,
281
+ stream=stream
282
+ )
283
+
284
+ if stream:
285
+ for chunk in response:
286
+ try:
287
+ chunk_text = chunk.text
288
+ except ValueError:
289
+ chunk_text = "" # Safety filter or empty chunk
290
+
291
+ if chunk_text:
292
+ full_response_text += chunk_text
293
+ if streaming_callback:
294
+ if not streaming_callback(chunk_text, MSG_TYPE.MSG_TYPE_CHUNK):
295
+ break
296
+ return full_response_text
297
+ else:
298
+ if response.prompt_feedback.block_reason:
299
+ error_msg = f"Content blocked due to: {response.prompt_feedback.block_reason.name}"
300
+ return {"status": False, "error": error_msg}
301
+ return response.text
302
+
303
+ except Exception as ex:
304
+ error_message = f"An unexpected error occurred with Gemini API: {str(ex)}"
305
+ trace_exception(ex)
306
+ return {"status": False, "error": error_message}
307
+
180
308
  def chat(self,
181
309
  discussion: LollmsDiscussion,
182
310
  branch_tip_id: Optional[str] = None,
@@ -190,7 +318,10 @@ class GeminiBinding(LollmsLLMBinding):
190
318
  seed: Optional[int] = None,
191
319
  n_threads: Optional[int] = None,
192
320
  ctx_size: Optional[int] = None,
193
- streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None
321
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
322
+ think: Optional[bool] = False,
323
+ reasoning_effort: Optional[str] = "low", # low, medium, high
324
+ reasoning_summary: Optional[bool] = False, # auto
194
325
  ) -> Union[str, dict]:
195
326
  """
196
327
  Conduct a chat session with the Gemini model using a LollmsDiscussion object.
@@ -198,22 +329,17 @@ class GeminiBinding(LollmsLLMBinding):
198
329
  if not self.client:
199
330
  return {"status": "error", "message": "Gemini client not initialized."}
200
331
 
201
- # 1. Manually export discussion to Gemini's format.
202
- # Gemini uses 'user' and 'model' roles.
203
- # The system prompt is handled separately at model initialization.
204
332
  system_prompt = discussion.system_prompt
205
333
  messages = discussion.get_messages(branch_tip_id)
206
334
 
207
335
  history = []
208
336
  for msg in messages:
209
- role = 'user' if msg.sender_type == "user" else 'assistant'
337
+ role = 'user' if msg.sender_type == "user" else 'model'
210
338
 
211
- # Handle multimodal content in the message
212
339
  content_parts = []
213
340
  if msg.content:
214
341
  content_parts.append(msg.content)
215
342
 
216
- # Check for images associated with this message
217
343
  if msg.images:
218
344
  for file_path in msg.images:
219
345
  if is_image_path(file_path):
@@ -221,39 +347,61 @@ class GeminiBinding(LollmsLLMBinding):
221
347
  content_parts.append(Image.open(file_path))
222
348
  except Exception as e:
223
349
  ASCIIColors.warning(f"Could not load image {file_path}: {e}")
350
+ else:
351
+ try:
352
+ b64_data = file_path
353
+ if b64_data.startswith("data:image"):
354
+ b64_data = b64_data.split(",")[1]
355
+ content_parts.append(Image.open(BytesIO(base64.b64decode(b64_data))))
356
+ except:
357
+ pass
224
358
 
225
359
  if content_parts:
226
360
  history.append({'role': role, 'parts': content_parts})
227
361
 
228
- model = self.client.GenerativeModel(
229
- model_name=self.model_name,
230
- system_instruction=system_prompt
231
- )
362
+ try:
363
+ model = self.client.GenerativeModel(
364
+ model_name=self.model_name,
365
+ system_instruction=system_prompt if system_prompt else None
366
+ )
367
+ except Exception as e:
368
+ return {"status": "error", "message": f"Failed to initialize GenerativeModel: {e}"}
232
369
 
233
- # History must not be empty and should not contain consecutive roles of the same type.
234
- # We also need to separate the final prompt from the history.
370
+ # Organize history for chat session
371
+ # ChatSession expects history excluding the very last message if we use send_message
235
372
  if not history:
236
373
  return {"status": "error", "message": "Cannot start chat with an empty discussion."}
237
374
 
238
- chat_history = history[:-1] if len(history) > 1 else []
239
- last_prompt_parts = history[-1]['parts']
375
+ # Filter and consolidate consecutive roles
376
+ consolidated_history = []
377
+ current_role = None
378
+ current_parts = []
240
379
 
241
- # Ensure history is valid (no consecutive same roles)
242
- valid_history = []
243
- if chat_history:
244
- valid_history.append(chat_history[0])
245
- for i in range(1, len(chat_history)):
246
- if chat_history[i]['role'] != chat_history[i-1]['role']:
247
- valid_history.append(chat_history[i])
380
+ for msg in history:
381
+ if msg['role'] == current_role:
382
+ current_parts.extend(msg['parts'])
383
+ consolidated_history[-1]['parts'] = current_parts
384
+ else:
385
+ current_role = msg['role']
386
+ current_parts = list(msg['parts'])
387
+ consolidated_history.append({'role': current_role, 'parts': current_parts})
388
+
389
+ # Separate the last message for the send_message call
390
+ last_message = consolidated_history[-1]
391
+ chat_history = consolidated_history[:-1]
392
+
393
+ # Only 'user' can send_message in Gemini chat.
394
+ # If the last message in history is from 'model', the flow is slightly broken for standard chat,
395
+ # but we assume the standard Lollms loop (User -> AI).
248
396
 
249
- chat_session = model.start_chat(history=valid_history)
397
+ chat_session = model.start_chat(history=chat_history)
250
398
 
251
399
  generation_config = self.get_generation_config(temperature, top_p, top_k, n_predict)
252
400
 
253
401
  full_response_text = ""
254
402
  try:
255
403
  response = chat_session.send_message(
256
- content=last_prompt_parts,
404
+ content=last_message['parts'],
257
405
  generation_config=generation_config,
258
406
  stream=stream
259
407
  )
@@ -284,11 +432,6 @@ class GeminiBinding(LollmsLLMBinding):
284
432
  return {"status": "error", "message": error_message}
285
433
 
286
434
  def tokenize(self, text: str) -> list:
287
- """
288
- Tokenize the input text.
289
- Note: Gemini doesn't expose a public tokenizer API.
290
- Using tiktoken for a rough estimate, NOT accurate for Gemini.
291
- """
292
435
  try:
293
436
  encoding = tiktoken.get_encoding("cl100k_base")
294
437
  return encoding.encode(text)
@@ -296,10 +439,6 @@ class GeminiBinding(LollmsLLMBinding):
296
439
  return list(text.encode('utf-8'))
297
440
 
298
441
  def detokenize(self, tokens: list) -> str:
299
- """
300
- Detokenize a list of tokens.
301
- Note: Based on the placeholder tokenizer.
302
- """
303
442
  try:
304
443
  encoding = tiktoken.get_encoding("cl100k_base")
305
444
  return encoding.decode(tokens)
@@ -307,9 +446,6 @@ class GeminiBinding(LollmsLLMBinding):
307
446
  return bytes(tokens).decode('utf-8', errors='ignore')
308
447
 
309
448
  def count_tokens(self, text: str) -> int:
310
- """
311
- Count tokens from a text using the Gemini API.
312
- """
313
449
  if not self.client or not self.model_name:
314
450
  ASCIIColors.warning("Cannot count tokens, Gemini client or model_name not set.")
315
451
  return -1
@@ -318,24 +454,19 @@ class GeminiBinding(LollmsLLMBinding):
318
454
  return model.count_tokens(text).total_tokens
319
455
  except Exception as e:
320
456
  ASCIIColors.error(f"Failed to count tokens with Gemini API: {e}")
321
- # Fallback to tiktoken for a rough estimate
322
457
  return len(self.tokenize(text))
323
458
 
324
459
  def embed(self, text: str, **kwargs) -> List[float]:
325
- """
326
- Get embeddings for the input text using Gemini API.
327
- """
328
460
  if not self.client:
329
461
  raise Exception("Gemini client not initialized.")
330
462
 
331
- # Default to a known Gemini embedding model
332
463
  model_to_use = kwargs.get("model", "models/embedding-001")
333
464
 
334
465
  try:
335
466
  response = self.client.embed_content(
336
467
  model=model_to_use,
337
468
  content=text,
338
- task_type="retrieval_document" # or "semantic_similarity", etc.
469
+ task_type="retrieval_document"
339
470
  )
340
471
  return response['embedding']
341
472
  except Exception as ex:
@@ -343,18 +474,16 @@ class GeminiBinding(LollmsLLMBinding):
343
474
  raise Exception(f"Gemini embedding failed: {str(ex)}") from ex
344
475
 
345
476
  def get_model_info(self) -> dict:
346
- """Return information about the current Gemini model setup."""
347
477
  return {
348
478
  "name": self.binding_name,
349
479
  "version": genai.__version__,
350
480
  "host_address": "https://generativelanguage.googleapis.com",
351
481
  "model_name": self.model_name,
352
482
  "supports_structured_output": False,
353
- "supports_vision": "vision" in self.model_name or "gemini-1.5" in self.model_name,
483
+ "supports_vision": True,
354
484
  }
355
485
 
356
- def listModels(self) -> List[Dict[str, str]]:
357
- """Lists available generative models from the Gemini service."""
486
+ def list_models(self) -> List[Dict[str, str]]:
358
487
  if not self.client:
359
488
  ASCIIColors.error("Gemini client not initialized. Cannot list models.")
360
489
  return []
@@ -362,7 +491,6 @@ class GeminiBinding(LollmsLLMBinding):
362
491
  ASCIIColors.debug("Listing Gemini models...")
363
492
  model_info_list = []
364
493
  for m in self.client.list_models():
365
- # We are interested in models that can generate content.
366
494
  if 'generateContent' in m.supported_generation_methods:
367
495
  model_info_list.append({
368
496
  'model_name': m.name,
@@ -376,7 +504,6 @@ class GeminiBinding(LollmsLLMBinding):
376
504
  return []
377
505
 
378
506
  def load_model(self, model_name: str) -> bool:
379
- """Set the model name for subsequent operations."""
380
507
  self.model_name = model_name
381
508
  ASCIIColors.info(f"Gemini model set to: {model_name}. It will be used on the next API call.")
382
509
  return True
@@ -392,106 +519,81 @@ if __name__ == '__main__':
392
519
 
393
520
  # --- Configuration ---
394
521
  test_model_name = "gemini-1.5-pro-latest"
395
- test_vision_model_name = "gemini-1.5-pro-latest" # or gemini-pro-vision
396
- test_embedding_model = "models/embedding-001"
397
-
398
- # This variable is global to the script's execution
399
522
  full_streamed_text = ""
400
523
 
401
524
  try:
402
525
  # --- Initialization ---
403
- ASCIIColors.cyan("\n--- Initializing Binding ---")
404
526
  binding = GeminiBinding(model_name=test_model_name)
405
527
  ASCIIColors.green("Binding initialized successfully.")
406
- ASCIIColors.info(f"Using google-generativeai version: {genai.__version__}")
407
-
408
- # --- List Models ---
409
- ASCIIColors.cyan("\n--- Listing Models ---")
410
- models = binding.listModels()
411
- if models:
412
- ASCIIColors.green(f"Found {len(models)} generative models. First 5:")
413
- for m in models[:5]:
414
- print(m['model_name'])
415
- else:
416
- ASCIIColors.warning("No models found or failed to list models.")
417
-
418
- # --- Count Tokens ---
419
- ASCIIColors.cyan("\n--- Counting Tokens ---")
420
- sample_text = "Hello, world! This is a test."
421
- token_count = binding.count_tokens(sample_text)
422
- ASCIIColors.green(f"Token count for '{sample_text}': {token_count}")
423
-
424
- # --- Text Generation (Non-Streaming) ---
425
- ASCIIColors.cyan("\n--- Text Generation (Non-Streaming) ---")
426
- prompt_text = "Explain the importance of bees in one paragraph."
427
- ASCIIColors.info(f"Prompt: {prompt_text}")
428
- generated_text = binding.generate_text(prompt_text, n_predict=100, stream=False)
429
- if isinstance(generated_text, str):
430
- ASCIIColors.green(f"Generated text:\n{generated_text}")
431
- else:
432
- ASCIIColors.error(f"Generation failed: {generated_text}")
433
528
 
434
- # --- Text Generation (Streaming) ---
435
- ASCIIColors.cyan("\n--- Text Generation (Streaming) ---")
436
-
437
- def stream_callback(chunk: str, msg_type: int):
438
- # FIX: Use 'global' to modify the variable in the module's scope
439
- global full_streamed_text
440
- ASCIIColors.green(chunk, end="", flush=True)
441
- full_streamed_text += chunk
442
- return True
443
-
444
- # Reset for this test
445
- full_streamed_text = ""
446
- ASCIIColors.info(f"Prompt: {prompt_text}")
447
- result = binding.generate_text(prompt_text, n_predict=150, stream=True, streaming_callback=stream_callback)
448
- print("\n--- End of Stream ---")
449
- # 'result' is the full text after streaming, which should match our captured text.
450
- ASCIIColors.green(f"Full streamed text (for verification): {result}")
451
-
452
- # --- Embeddings ---
453
- ASCIIColors.cyan("\n--- Embeddings ---")
529
+ # --- Test generate_from_messages ---
530
+ ASCIIColors.cyan("\n--- Testing generate_from_messages ---")
531
+ messages = [
532
+ {"role": "system", "content": "You are a pirate."},
533
+ {"role": "user", "content": "How are you today?"}
534
+ ]
535
+ response = binding.generate_from_messages(messages, n_predict=50)
536
+ ASCIIColors.green(f"Pirate Response: {response}")
537
+
538
+ except Exception as e:
539
+ ASCIIColors.error(f"An error occurred during testing: {e}")
540
+ trace_exception(e)
541
+
542
+ ASCIIColors.yellow("\nGeminiBinding test finished.")
543
+
544
+ def test_connection(self) -> dict:
545
+ """
546
+ Tests the connection to the Gemini API using the provided key.
547
+ """
548
+ if not self.client:
549
+ return {"status": False, "message": "Client not configured. Check API Key."}
454
550
  try:
455
- embedding_text = "Lollms is a cool project."
456
- embedding_vector = binding.embed(embedding_text, model=test_embedding_model)
457
- ASCIIColors.green(f"Embedding for '{embedding_text}' (first 5 dims): {embedding_vector[:5]}...")
458
- ASCIIColors.info(f"Embedding vector dimension: {len(embedding_vector)}")
551
+ # Attempt to list 1 model to verify auth
552
+ list(self.client.list_models(page_size=1))
553
+ return {"status": True, "message": "Connection successful! API Key is valid."}
459
554
  except Exception as e:
460
- ASCIIColors.warning(f"Could not get embedding: {e}")
555
+ return {"status": False, "message": f"Connection failed: {str(e)}"}
461
556
 
462
- # --- Vision Model Test ---
463
- dummy_image_path = "gemini_dummy_test_image.png"
557
+ def list_tuned_models(self) -> dict:
558
+ """
559
+ Lists tuned models available for this API key.
560
+ """
561
+ if not self.client:
562
+ return {"status": False, "message": "Client not configured."}
464
563
  try:
465
- img = Image.new('RGB', (200, 50), color = ('blue'))
466
- d = ImageDraw.Draw(img)
467
- d.text((10,10), "Test Image", fill=('yellow'))
468
- img.save(dummy_image_path)
469
- ASCIIColors.info(f"Created dummy image: {dummy_image_path}")
470
-
471
- ASCIIColors.cyan(f"\n--- Vision Generation (using {test_vision_model_name}) ---")
472
- binding.load_model(test_vision_model_name)
473
- vision_prompt = "What color is the text and what does it say?"
474
- ASCIIColors.info(f"Vision Prompt: {vision_prompt} with image {dummy_image_path}")
564
+ tuned_models = []
565
+ # iterate over the tuned models generator
566
+ for model in self.client.list_tuned_models():
567
+ tuned_models.append(model.name)
475
568
 
476
- vision_response = binding.generate_text(
477
- prompt=vision_prompt,
478
- images=[dummy_image_path],
479
- n_predict=50,
480
- stream=False
481
- )
482
- if isinstance(vision_response, str):
483
- ASCIIColors.green(f"Vision model response: {vision_response}")
484
- else:
485
- ASCIIColors.error(f"Vision generation failed: {vision_response}")
569
+ if not tuned_models:
570
+ return {"status": True, "message": "No tuned models found on this account.", "models": []}
571
+
572
+ # Join them for a pretty message, but return the list for the UI
573
+ msg = f"Found: {', '.join(tuned_models)}"
574
+ return {"status": True, "message": msg, "models": tuned_models}
486
575
  except Exception as e:
487
- ASCIIColors.error(f"Error during vision test: {e}")
488
- trace_exception(e)
489
- finally:
490
- if os.path.exists(dummy_image_path):
491
- os.remove(dummy_image_path)
576
+ return {"status": False, "message": f"Could not list tuned models: {str(e)}", "models": []}
492
577
 
493
- except Exception as e:
494
- ASCIIColors.error(f"An error occurred during testing: {e}")
495
- trace_exception(e)
496
-
497
- ASCIIColors.yellow("\nGeminiBinding test finished.")
578
+ def get_active_model_info(self) -> dict:
579
+ """
580
+ Retrieves details about the currently selected model name.
581
+ """
582
+ if not self.client or not self.model_name:
583
+ return {"status": False, "message": "Client or model name not set."}
584
+
585
+ try:
586
+ model_info = self.client.get_model(self.model_name)
587
+
588
+ info = {
589
+ "display_name": model_info.display_name,
590
+ "description": model_info.description,
591
+ "input_token_limit": model_info.input_token_limit,
592
+ "output_token_limit": model_info.output_token_limit,
593
+ "supported_methods": model_info.supported_generation_methods
594
+ }
595
+
596
+ msg = f"Input Limit: {info['input_token_limit']}, Output Limit: {info['output_token_limit']}"
597
+ return {"status": True, "message": msg, "info": info}
598
+ except Exception as e:
599
+ return {"status": False, "message": f"Failed to get info for {self.model_name}: {str(e)}"}