lollms-client 0.25.0__tar.gz → 0.25.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. {lollms_client-0.25.0/lollms_client.egg-info → lollms_client-0.25.5}/PKG-INFO +7 -7
  2. {lollms_client-0.25.0 → lollms_client-0.25.5}/README.md +7 -7
  3. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/__init__.py +1 -1
  4. lollms_client-0.25.5/lollms_client/llm_bindings/gemini/__init__.py +501 -0
  5. lollms_client-0.25.5/lollms_client/llm_bindings/litellm/__init__.py +201 -0
  6. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/openai/__init__.py +27 -7
  7. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_core.py +38 -21
  8. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_discussion.py +29 -0
  9. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/mcp_bindings/remote_mcp/__init__.py +80 -2
  10. {lollms_client-0.25.0 → lollms_client-0.25.5/lollms_client.egg-info}/PKG-INFO +7 -7
  11. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client.egg-info/SOURCES.txt +2 -0
  12. {lollms_client-0.25.0 → lollms_client-0.25.5}/LICENSE +0 -0
  13. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/article_summary/article_summary.py +0 -0
  14. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/console_discussion/console_app.py +0 -0
  15. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/console_discussion.py +0 -0
  16. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/deep_analyze/deep_analyse.py +0 -0
  17. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  18. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/function_calling_with_local_custom_mcp.py +0 -0
  19. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/generate_a_benchmark_for_safe_store.py +0 -0
  20. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/generate_and_speak/generate_and_speak.py +0 -0
  21. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  22. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/generate_text_with_multihop_rag_example.py +0 -0
  23. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/gradio_chat_app.py +0 -0
  24. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/gradio_lollms_chat.py +0 -0
  25. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/internet_search_with_rag.py +0 -0
  26. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/lollms_discussions_test.py +0 -0
  27. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/mcp_examples/external_mcp.py +0 -0
  28. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/mcp_examples/local_mcp.py +0 -0
  29. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/mcp_examples/openai_mcp.py +0 -0
  30. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/mcp_examples/run_remote_mcp_example_v2.py +0 -0
  31. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/mcp_examples/run_standard_mcp_example.py +0 -0
  32. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/simple_text_gen_test.py +0 -0
  33. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/simple_text_gen_with_image_test.py +0 -0
  34. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/test_local_models/local_chat.py +0 -0
  35. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/text_2_audio.py +0 -0
  36. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/text_2_image.py +0 -0
  37. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/text_2_image_diffusers.py +0 -0
  38. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/text_and_image_2_audio.py +0 -0
  39. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/text_gen.py +0 -0
  40. {lollms_client-0.25.0 → lollms_client-0.25.5}/examples/text_gen_system_prompt.py +0 -0
  41. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/__init__.py +0 -0
  42. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  43. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  44. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  45. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  46. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  47. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  48. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  49. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  50. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_config.py +0 -0
  51. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_js_analyzer.py +0 -0
  52. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_llm_binding.py +0 -0
  53. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_mcp_binding.py +0 -0
  54. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_personality.py +0 -0
  55. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_python_analyzer.py +0 -0
  56. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_stt_binding.py +0 -0
  57. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_tti_binding.py +0 -0
  58. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_ttm_binding.py +0 -0
  59. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_tts_binding.py +0 -0
  60. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_ttv_binding.py +0 -0
  61. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_types.py +0 -0
  62. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/lollms_utilities.py +0 -0
  63. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  64. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  65. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  66. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  67. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  68. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  69. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/stt_bindings/__init__.py +0 -0
  70. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  71. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  72. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  73. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tti_bindings/__init__.py +0 -0
  74. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
  75. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  76. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  77. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  78. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/ttm_bindings/__init__.py +0 -0
  79. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  80. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  81. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  82. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tts_bindings/__init__.py +0 -0
  83. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  84. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  85. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  86. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  87. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/ttv_bindings/__init__.py +0 -0
  88. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  89. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client.egg-info/dependency_links.txt +0 -0
  90. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client.egg-info/requires.txt +0 -0
  91. {lollms_client-0.25.0 → lollms_client-0.25.5}/lollms_client.egg-info/top_level.txt +0 -0
  92. {lollms_client-0.25.0 → lollms_client-0.25.5}/pyproject.toml +0 -0
  93. {lollms_client-0.25.0 → lollms_client-0.25.5}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.25.0
3
+ Version: 0.25.5
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -186,7 +186,7 @@ graph LR
186
186
  LC -- Manages --> LLB[LLM Binding];
187
187
  LC -- Manages --> MCPB[MCP Binding];
188
188
  LC -- Orchestrates --> MCP_Interaction[generate_with_mcp];
189
- LC -- Provides --> HighLevelOps[High-Level Ops<br>(summarize, deep_analyze etc.)];
189
+ LC -- Provides --> HighLevelOps["High-Level Ops(summarize, deep_analyze etc.)"];
190
190
  LC -- Provides Access To --> DM[DiscussionManager];
191
191
  LC -- Provides Access To --> ModalityBindings[TTS, TTI, STT etc.];
192
192
  end
@@ -195,16 +195,16 @@ graph LR
195
195
  LLB --> LollmsServer[LoLLMs Server];
196
196
  LLB --> OllamaServer[Ollama];
197
197
  LLB --> OpenAPIServer[OpenAI API];
198
- LLB --> LocalGGUF[Local GGUF<br>(pythonllamacpp / llamacpp server)];
199
- LLB --> LocalHF[Local HuggingFace<br>(transformers / vLLM)];
198
+ LLB --> LocalGGUF["Local GGUF<br>(pythonllamacpp / llamacpp server)"];
199
+ LLB --> LocalHF["Local HuggingFace<br>(transformers / vLLM)"];
200
200
  end
201
201
 
202
202
  MCP_Interaction --> MCPB;
203
- MCPB --> LocalTools[Local Python Tools<br>(via local_mcp)];
204
- MCPB --> RemoteTools[Remote MCP Tool Servers<br>(Future Potential)];
203
+ MCPB --> LocalTools["Local Python Tools<br>(via local_mcp)"];
204
+ MCPB --> RemoteTools["Remote MCP Tool Servers<br>(Future Potential)"];
205
205
 
206
206
 
207
- ModalityBindings --> ModalityServices[Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)];
207
+ ModalityBindings --> ModalityServices["Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)"];
208
208
  ```
209
209
 
210
210
  * **`LollmsClient`**: The central class for all interactions. It holds the currently active LLM binding, an optional MCP binding, and provides access to modality bindings and high-level operations.
@@ -155,7 +155,7 @@ graph LR
155
155
  LC -- Manages --> LLB[LLM Binding];
156
156
  LC -- Manages --> MCPB[MCP Binding];
157
157
  LC -- Orchestrates --> MCP_Interaction[generate_with_mcp];
158
- LC -- Provides --> HighLevelOps[High-Level Ops<br>(summarize, deep_analyze etc.)];
158
+ LC -- Provides --> HighLevelOps["High-Level Ops(summarize, deep_analyze etc.)"];
159
159
  LC -- Provides Access To --> DM[DiscussionManager];
160
160
  LC -- Provides Access To --> ModalityBindings[TTS, TTI, STT etc.];
161
161
  end
@@ -164,16 +164,16 @@ graph LR
164
164
  LLB --> LollmsServer[LoLLMs Server];
165
165
  LLB --> OllamaServer[Ollama];
166
166
  LLB --> OpenAPIServer[OpenAI API];
167
- LLB --> LocalGGUF[Local GGUF<br>(pythonllamacpp / llamacpp server)];
168
- LLB --> LocalHF[Local HuggingFace<br>(transformers / vLLM)];
167
+ LLB --> LocalGGUF["Local GGUF<br>(pythonllamacpp / llamacpp server)"];
168
+ LLB --> LocalHF["Local HuggingFace<br>(transformers / vLLM)"];
169
169
  end
170
170
 
171
171
  MCP_Interaction --> MCPB;
172
- MCPB --> LocalTools[Local Python Tools<br>(via local_mcp)];
173
- MCPB --> RemoteTools[Remote MCP Tool Servers<br>(Future Potential)];
172
+ MCPB --> LocalTools["Local Python Tools<br>(via local_mcp)"];
173
+ MCPB --> RemoteTools["Remote MCP Tool Servers<br>(Future Potential)"];
174
174
 
175
175
 
176
- ModalityBindings --> ModalityServices[Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)];
176
+ ModalityBindings --> ModalityServices["Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)"];
177
177
  ```
178
178
 
179
179
  * **`LollmsClient`**: The central class for all interactions. It holds the currently active LLM binding, an optional MCP binding, and provides access to modality bindings and high-level operations.
@@ -205,4 +205,4 @@ This project is licensed under the **Apache 2.0 License**. See the [LICENSE](LIC
205
205
 
206
206
  ## Changelog
207
207
 
208
- For a list of changes and updates, please refer to the [CHANGELOG.md](CHANGELOG.md) file.
208
+ For a list of changes and updates, please refer to the [CHANGELOG.md](CHANGELOG.md) file.
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
 
10
10
 
11
- __version__ = "0.25.0" # Updated version
11
+ __version__ = "0.25.5" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -0,0 +1,501 @@
1
+ # bindings/gemini/binding.py
2
+ import base64
3
+ import os
4
+ from io import BytesIO
5
+ from pathlib import Path
6
+ from typing import Optional, Callable, List, Union, Dict
7
+
8
+ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
9
+ from lollms_client.lollms_llm_binding import LollmsLLMBinding
10
+ from lollms_client.lollms_types import MSG_TYPE
11
+ from ascii_colors import ASCIIColors, trace_exception
12
+
13
+ import pipmaster as pm
14
+
15
+ # Ensure the required packages are installed
16
+ pm.ensure_packages(["google-generativeai", "pillow", "tiktoken", "protobuf"])
17
+
18
+ import google.generativeai as genai
19
+ from PIL import Image, ImageDraw # ImageDraw is used in the test script below
20
+ import tiktoken
21
+
22
+ BindingName = "GeminiBinding"
23
+
24
+ # Helper to check if a string is a valid path to an image
25
+ def is_image_path(path_str: str) -> bool:
26
+ try:
27
+ p = Path(path_str)
28
+ return p.is_file() and p.suffix.lower() in ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp']
29
+ except Exception:
30
+ return False
31
+
32
+ class GeminiBinding(LollmsLLMBinding):
33
+ """Google Gemini-specific binding implementation."""
34
+
35
+ def __init__(self,
36
+ host_address: str = None, # Ignored, for compatibility
37
+ model_name: str = "gemini-1.5-pro-latest",
38
+ service_key: str = None,
39
+ verify_ssl_certificate: bool = True, # Ignored, for compatibility
40
+ **kwargs
41
+ ):
42
+ """
43
+ Initialize the Gemini binding.
44
+
45
+ Args:
46
+ model_name (str): Name of the Gemini model to use.
47
+ service_key (str): Google AI Studio API key.
48
+ """
49
+ super().__init__(binding_name=BindingName)
50
+ self.model_name = model_name
51
+ self.service_key = service_key
52
+
53
+ if not self.service_key:
54
+ self.service_key = os.getenv("GOOGLE_API_KEY")
55
+
56
+ if not self.service_key:
57
+ raise ValueError("Google API key is required. Please set it via the 'service_key' parameter or the GOOGLE_API_KEY environment variable.")
58
+
59
+ try:
60
+ genai.configure(api_key=self.service_key)
61
+ self.client = genai # Alias for consistency
62
+ except Exception as e:
63
+ ASCIIColors.error(f"Failed to configure Gemini client: {e}")
64
+ self.client = None
65
+ raise ConnectionError(f"Could not configure Gemini client: {e}") from e
66
+
67
+ def get_generation_config(self,
68
+ temperature: float,
69
+ top_p: float,
70
+ top_k: int,
71
+ n_predict: int) -> genai.types.GenerationConfig:
72
+ """Builds a GenerationConfig object from parameters."""
73
+ config = {}
74
+ if temperature is not None: config['temperature'] = float(temperature)
75
+ if top_p is not None: config['top_p'] = top_p
76
+ if top_k is not None: config['top_k'] = top_k
77
+ if n_predict is not None: config['max_output_tokens'] = n_predict
78
+ return genai.types.GenerationConfig(**config)
79
+
80
+ def generate_text(self,
81
+ prompt: str,
82
+ images: Optional[List[str]] = None,
83
+ system_prompt: str = "",
84
+ n_predict: Optional[int] = 2048,
85
+ stream: Optional[bool] = False,
86
+ temperature: float = 0.7,
87
+ top_k: int = 40,
88
+ top_p: float = 0.9,
89
+ repeat_penalty: float = 1.1, # Not directly supported by Gemini API
90
+ repeat_last_n: int = 64, # Not directly supported
91
+ seed: Optional[int] = None, # Not directly supported
92
+ n_threads: Optional[int] = None, # Not applicable
93
+ ctx_size: int | None = None, # Determined by model, not settable per-call
94
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
95
+ split:Optional[bool]=False,
96
+ user_keyword:Optional[str]="!@>user:",
97
+ ai_keyword:Optional[str]="!@>assistant:",
98
+ ) -> Union[str, dict]:
99
+ """
100
+ Generate text using the Gemini model.
101
+
102
+ Args:
103
+ prompt (str): The input prompt for text generation.
104
+ images (Optional[List[str]]): List of image file paths or base64 strings.
105
+ system_prompt (str): The system prompt to guide the model.
106
+ ... other LollmsLLMBinding parameters ...
107
+
108
+ Returns:
109
+ Union[str, dict]: Generated text or error dictionary.
110
+ """
111
+ if not self.client:
112
+ return {"status": False, "error": "Gemini client not initialized."}
113
+
114
+ # Gemini uses 'system_instruction' for GenerativeModel, not part of the regular message list.
115
+ model = self.client.GenerativeModel(
116
+ model_name=self.model_name,
117
+ system_instruction=system_prompt if system_prompt else None
118
+ )
119
+
120
+ generation_config = self.get_generation_config(temperature, top_p, top_k, n_predict)
121
+
122
+ # Prepare content for the API call
123
+ content_parts = []
124
+ if split:
125
+ # Note: The 'split' logic for Gemini should ideally build a multi-turn history,
126
+ # but for `generate_text`, we'll treat the last user part as the main prompt.
127
+ discussion_messages = self.split_discussion(prompt, user_keyword, ai_keyword)
128
+ if discussion_messages:
129
+ last_message = discussion_messages[-1]['content']
130
+ content_parts.append(last_message)
131
+ else:
132
+ content_parts.append(prompt)
133
+ else:
134
+ content_parts.append(prompt)
135
+
136
+ if images:
137
+ for image_data in images:
138
+ try:
139
+ if is_image_path(image_data):
140
+ img = Image.open(image_data)
141
+ else: # Assume base64
142
+ img = Image.open(BytesIO(base64.b64decode(image_data)))
143
+ content_parts.append(img)
144
+ except Exception as e:
145
+ error_msg = f"Failed to process image: {e}"
146
+ ASCIIColors.error(error_msg)
147
+ return {"status": False, "error": error_msg}
148
+
149
+ full_response_text = ""
150
+ try:
151
+ response = model.generate_content(
152
+ contents=content_parts,
153
+ generation_config=generation_config,
154
+ stream=stream
155
+ )
156
+
157
+ if stream:
158
+ for chunk in response:
159
+ try:
160
+ chunk_text = chunk.text
161
+ except ValueError:
162
+ # Handle potential empty parts in the stream
163
+ chunk_text = ""
164
+
165
+ if chunk_text:
166
+ full_response_text += chunk_text
167
+ if streaming_callback:
168
+ if not streaming_callback(chunk_text, MSG_TYPE.MSG_TYPE_CHUNK):
169
+ break # Callback requested stop
170
+ return full_response_text
171
+ else:
172
+ # Check for safety blocks
173
+ if response.prompt_feedback.block_reason:
174
+ error_msg = f"Content blocked due to: {response.prompt_feedback.block_reason.name}"
175
+ ASCIIColors.warning(error_msg)
176
+ return {"status": False, "error": error_msg}
177
+ return response.text
178
+
179
+ except Exception as ex:
180
+ error_message = f"An unexpected error occurred with Gemini API: {str(ex)}"
181
+ trace_exception(ex)
182
+ return {"status": False, "error": error_message}
183
+
184
+ def chat(self,
185
+ discussion: LollmsDiscussion,
186
+ branch_tip_id: Optional[str] = None,
187
+ n_predict: Optional[int] = 2048,
188
+ stream: Optional[bool] = False,
189
+ temperature: float = 0.7,
190
+ top_k: int = 40,
191
+ top_p: float = 0.9,
192
+ repeat_penalty: float = 1.1,
193
+ repeat_last_n: int = 64,
194
+ seed: Optional[int] = None,
195
+ n_threads: Optional[int] = None,
196
+ ctx_size: Optional[int] = None,
197
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None
198
+ ) -> Union[str, dict]:
199
+ """
200
+ Conduct a chat session with the Gemini model using a LollmsDiscussion object.
201
+ """
202
+ if not self.client:
203
+ return {"status": "error", "message": "Gemini client not initialized."}
204
+
205
+ # 1. Manually export discussion to Gemini's format.
206
+ # Gemini uses 'user' and 'model' roles.
207
+ # The system prompt is handled separately at model initialization.
208
+ system_prompt = discussion.system_prompt
209
+ messages = discussion.get_messages(branch_tip_id)
210
+
211
+ history = []
212
+ for msg in messages:
213
+ role = 'user' if msg.sender_type == "user" else 'assistant'
214
+
215
+ # Handle multimodal content in the message
216
+ content_parts = []
217
+ if msg.content:
218
+ content_parts.append(msg.content)
219
+
220
+ # Check for images associated with this message
221
+ if msg.images:
222
+ for file_path in msg.images:
223
+ if is_image_path(file_path):
224
+ try:
225
+ content_parts.append(Image.open(file_path))
226
+ except Exception as e:
227
+ ASCIIColors.warning(f"Could not load image {file_path}: {e}")
228
+
229
+ if content_parts:
230
+ history.append({'role': role, 'parts': content_parts})
231
+
232
+ model = self.client.GenerativeModel(
233
+ model_name=self.model_name,
234
+ system_instruction=system_prompt
235
+ )
236
+
237
+ # History must not be empty and should not contain consecutive roles of the same type.
238
+ # We also need to separate the final prompt from the history.
239
+ if not history:
240
+ return {"status": "error", "message": "Cannot start chat with an empty discussion."}
241
+
242
+ chat_history = history[:-1] if len(history) > 1 else []
243
+ last_prompt_parts = history[-1]['parts']
244
+
245
+ # Ensure history is valid (no consecutive same roles)
246
+ valid_history = []
247
+ if chat_history:
248
+ valid_history.append(chat_history[0])
249
+ for i in range(1, len(chat_history)):
250
+ if chat_history[i]['role'] != chat_history[i-1]['role']:
251
+ valid_history.append(chat_history[i])
252
+
253
+ chat_session = model.start_chat(history=valid_history)
254
+
255
+ generation_config = self.get_generation_config(temperature, top_p, top_k, n_predict)
256
+
257
+ full_response_text = ""
258
+ try:
259
+ response = chat_session.send_message(
260
+ content=last_prompt_parts,
261
+ generation_config=generation_config,
262
+ stream=stream
263
+ )
264
+
265
+ if stream:
266
+ for chunk in response:
267
+ try:
268
+ chunk_text = chunk.text
269
+ except ValueError:
270
+ chunk_text = ""
271
+
272
+ if chunk_text:
273
+ full_response_text += chunk_text
274
+ if streaming_callback:
275
+ if not streaming_callback(chunk_text, MSG_TYPE.MSG_TYPE_CHUNK):
276
+ break
277
+ return full_response_text
278
+ else:
279
+ if response.prompt_feedback.block_reason:
280
+ error_msg = f"Content blocked due to: {response.prompt_feedback.block_reason.name}"
281
+ ASCIIColors.warning(error_msg)
282
+ return {"status": "error", "message": error_msg}
283
+ return response.text
284
+
285
+ except Exception as ex:
286
+ error_message = f"An unexpected error occurred with Gemini API: {str(ex)}"
287
+ trace_exception(ex)
288
+ return {"status": "error", "message": error_message}
289
+
290
+ def tokenize(self, text: str) -> list:
291
+ """
292
+ Tokenize the input text.
293
+ Note: Gemini doesn't expose a public tokenizer API.
294
+ Using tiktoken for a rough estimate, NOT accurate for Gemini.
295
+ """
296
+ try:
297
+ encoding = tiktoken.get_encoding("cl100k_base")
298
+ return encoding.encode(text)
299
+ except:
300
+ return list(text.encode('utf-8'))
301
+
302
+ def detokenize(self, tokens: list) -> str:
303
+ """
304
+ Detokenize a list of tokens.
305
+ Note: Based on the placeholder tokenizer.
306
+ """
307
+ try:
308
+ encoding = tiktoken.get_encoding("cl100k_base")
309
+ return encoding.decode(tokens)
310
+ except:
311
+ return bytes(tokens).decode('utf-8', errors='ignore')
312
+
313
+ def count_tokens(self, text: str) -> int:
314
+ """
315
+ Count tokens from a text using the Gemini API.
316
+ """
317
+ if not self.client or not self.model_name:
318
+ ASCIIColors.warning("Cannot count tokens, Gemini client or model_name not set.")
319
+ return -1
320
+ try:
321
+ model = self.client.GenerativeModel(self.model_name)
322
+ return model.count_tokens(text).total_tokens
323
+ except Exception as e:
324
+ ASCIIColors.error(f"Failed to count tokens with Gemini API: {e}")
325
+ # Fallback to tiktoken for a rough estimate
326
+ return len(self.tokenize(text))
327
+
328
+ def embed(self, text: str, **kwargs) -> List[float]:
329
+ """
330
+ Get embeddings for the input text using Gemini API.
331
+ """
332
+ if not self.client:
333
+ raise Exception("Gemini client not initialized.")
334
+
335
+ # Default to a known Gemini embedding model
336
+ model_to_use = kwargs.get("model", "models/embedding-001")
337
+
338
+ try:
339
+ response = self.client.embed_content(
340
+ model=model_to_use,
341
+ content=text,
342
+ task_type="retrieval_document" # or "semantic_similarity", etc.
343
+ )
344
+ return response['embedding']
345
+ except Exception as ex:
346
+ trace_exception(ex)
347
+ raise Exception(f"Gemini embedding failed: {str(ex)}") from ex
348
+
349
+ def get_model_info(self) -> dict:
350
+ """Return information about the current Gemini model setup."""
351
+ return {
352
+ "name": self.binding_name,
353
+ "version": genai.__version__,
354
+ "host_address": "https://generativelanguage.googleapis.com",
355
+ "model_name": self.model_name,
356
+ "supports_structured_output": False,
357
+ "supports_vision": "vision" in self.model_name or "gemini-1.5" in self.model_name,
358
+ }
359
+
360
+ def listModels(self) -> List[Dict[str, str]]:
361
+ """Lists available generative models from the Gemini service."""
362
+ if not self.client:
363
+ ASCIIColors.error("Gemini client not initialized. Cannot list models.")
364
+ return []
365
+ try:
366
+ ASCIIColors.debug("Listing Gemini models...")
367
+ model_info_list = []
368
+ for m in self.client.list_models():
369
+ # We are interested in models that can generate content.
370
+ if 'generateContent' in m.supported_generation_methods:
371
+ model_info_list.append({
372
+ 'model_name': m.name,
373
+ 'display_name': m.display_name,
374
+ 'description': m.description,
375
+ 'owned_by': 'Google'
376
+ })
377
+ return model_info_list
378
+ except Exception as ex:
379
+ trace_exception(ex)
380
+ return []
381
+
382
+ def load_model(self, model_name: str) -> bool:
383
+ """Set the model name for subsequent operations."""
384
+ self.model_name = model_name
385
+ ASCIIColors.info(f"Gemini model set to: {model_name}. It will be used on the next API call.")
386
+ return True
387
+
388
+ if __name__ == '__main__':
389
+ # Example Usage (requires GOOGLE_API_KEY environment variable)
390
+ if 'GOOGLE_API_KEY' not in os.environ:
391
+ ASCIIColors.red("Error: GOOGLE_API_KEY environment variable not set.")
392
+ print("Please get your key from Google AI Studio and set it.")
393
+ exit(1)
394
+
395
+ ASCIIColors.yellow("--- Testing GeminiBinding ---")
396
+
397
+ # --- Configuration ---
398
+ test_model_name = "gemini-1.5-pro-latest"
399
+ test_vision_model_name = "gemini-1.5-pro-latest" # or gemini-pro-vision
400
+ test_embedding_model = "models/embedding-001"
401
+
402
+ # This variable is global to the script's execution
403
+ full_streamed_text = ""
404
+
405
+ try:
406
+ # --- Initialization ---
407
+ ASCIIColors.cyan("\n--- Initializing Binding ---")
408
+ binding = GeminiBinding(model_name=test_model_name)
409
+ ASCIIColors.green("Binding initialized successfully.")
410
+ ASCIIColors.info(f"Using google-generativeai version: {genai.__version__}")
411
+
412
+ # --- List Models ---
413
+ ASCIIColors.cyan("\n--- Listing Models ---")
414
+ models = binding.listModels()
415
+ if models:
416
+ ASCIIColors.green(f"Found {len(models)} generative models. First 5:")
417
+ for m in models[:5]:
418
+ print(m['model_name'])
419
+ else:
420
+ ASCIIColors.warning("No models found or failed to list models.")
421
+
422
+ # --- Count Tokens ---
423
+ ASCIIColors.cyan("\n--- Counting Tokens ---")
424
+ sample_text = "Hello, world! This is a test."
425
+ token_count = binding.count_tokens(sample_text)
426
+ ASCIIColors.green(f"Token count for '{sample_text}': {token_count}")
427
+
428
+ # --- Text Generation (Non-Streaming) ---
429
+ ASCIIColors.cyan("\n--- Text Generation (Non-Streaming) ---")
430
+ prompt_text = "Explain the importance of bees in one paragraph."
431
+ ASCIIColors.info(f"Prompt: {prompt_text}")
432
+ generated_text = binding.generate_text(prompt_text, n_predict=100, stream=False)
433
+ if isinstance(generated_text, str):
434
+ ASCIIColors.green(f"Generated text:\n{generated_text}")
435
+ else:
436
+ ASCIIColors.error(f"Generation failed: {generated_text}")
437
+
438
+ # --- Text Generation (Streaming) ---
439
+ ASCIIColors.cyan("\n--- Text Generation (Streaming) ---")
440
+
441
+ def stream_callback(chunk: str, msg_type: int):
442
+ # FIX: Use 'global' to modify the variable in the module's scope
443
+ global full_streamed_text
444
+ ASCIIColors.green(chunk, end="", flush=True)
445
+ full_streamed_text += chunk
446
+ return True
447
+
448
+ # Reset for this test
449
+ full_streamed_text = ""
450
+ ASCIIColors.info(f"Prompt: {prompt_text}")
451
+ result = binding.generate_text(prompt_text, n_predict=150, stream=True, streaming_callback=stream_callback)
452
+ print("\n--- End of Stream ---")
453
+ # 'result' is the full text after streaming, which should match our captured text.
454
+ ASCIIColors.green(f"Full streamed text (for verification): {result}")
455
+
456
+ # --- Embeddings ---
457
+ ASCIIColors.cyan("\n--- Embeddings ---")
458
+ try:
459
+ embedding_text = "Lollms is a cool project."
460
+ embedding_vector = binding.embed(embedding_text, model=test_embedding_model)
461
+ ASCIIColors.green(f"Embedding for '{embedding_text}' (first 5 dims): {embedding_vector[:5]}...")
462
+ ASCIIColors.info(f"Embedding vector dimension: {len(embedding_vector)}")
463
+ except Exception as e:
464
+ ASCIIColors.warning(f"Could not get embedding: {e}")
465
+
466
+ # --- Vision Model Test ---
467
+ dummy_image_path = "gemini_dummy_test_image.png"
468
+ try:
469
+ img = Image.new('RGB', (200, 50), color = ('blue'))
470
+ d = ImageDraw.Draw(img)
471
+ d.text((10,10), "Test Image", fill=('yellow'))
472
+ img.save(dummy_image_path)
473
+ ASCIIColors.info(f"Created dummy image: {dummy_image_path}")
474
+
475
+ ASCIIColors.cyan(f"\n--- Vision Generation (using {test_vision_model_name}) ---")
476
+ binding.load_model(test_vision_model_name)
477
+ vision_prompt = "What color is the text and what does it say?"
478
+ ASCIIColors.info(f"Vision Prompt: {vision_prompt} with image {dummy_image_path}")
479
+
480
+ vision_response = binding.generate_text(
481
+ prompt=vision_prompt,
482
+ images=[dummy_image_path],
483
+ n_predict=50,
484
+ stream=False
485
+ )
486
+ if isinstance(vision_response, str):
487
+ ASCIIColors.green(f"Vision model response: {vision_response}")
488
+ else:
489
+ ASCIIColors.error(f"Vision generation failed: {vision_response}")
490
+ except Exception as e:
491
+ ASCIIColors.error(f"Error during vision test: {e}")
492
+ trace_exception(e)
493
+ finally:
494
+ if os.path.exists(dummy_image_path):
495
+ os.remove(dummy_image_path)
496
+
497
+ except Exception as e:
498
+ ASCIIColors.error(f"An error occurred during testing: {e}")
499
+ trace_exception(e)
500
+
501
+ ASCIIColors.yellow("\nGeminiBinding test finished.")