lollms-client 0.32.1__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
- lollms_client/llm_bindings/claude/__init__.py +4 -7
- lollms_client/llm_bindings/gemini/__init__.py +3 -7
- lollms_client/llm_bindings/grok/__init__.py +3 -7
- lollms_client/llm_bindings/groq/__init__.py +4 -7
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
- lollms_client/llm_bindings/litellm/__init__.py +15 -6
- lollms_client/llm_bindings/llamacpp/__init__.py +214 -388
- lollms_client/llm_bindings/lollms/__init__.py +24 -14
- lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
- lollms_client/llm_bindings/mistral/__init__.py +58 -29
- lollms_client/llm_bindings/ollama/__init__.py +6 -11
- lollms_client/llm_bindings/open_router/__init__.py +45 -14
- lollms_client/llm_bindings/openai/__init__.py +7 -14
- lollms_client/llm_bindings/openllm/__init__.py +12 -12
- lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
- lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
- lollms_client/llm_bindings/transformers/__init__.py +14 -6
- lollms_client/llm_bindings/vllm/__init__.py +16 -12
- lollms_client/lollms_core.py +296 -487
- lollms_client/lollms_discussion.py +436 -78
- lollms_client/lollms_llm_binding.py +223 -11
- lollms_client/lollms_mcp_binding.py +33 -2
- lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
- lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
- lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
- lollms_client/stt_bindings/lollms/__init__.py +6 -8
- lollms_client/stt_bindings/whisper/__init__.py +2 -4
- lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
- lollms_client/tti_bindings/dalle/__init__.py +29 -28
- lollms_client/tti_bindings/diffusers/__init__.py +25 -21
- lollms_client/tti_bindings/gemini/__init__.py +215 -0
- lollms_client/tti_bindings/lollms/__init__.py +8 -9
- lollms_client-1.0.0.dist-info/METADATA +1214 -0
- lollms_client-1.0.0.dist-info/RECORD +69 -0
- {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/top_level.txt +0 -2
- examples/article_summary/article_summary.py +0 -58
- examples/console_discussion/console_app.py +0 -266
- examples/console_discussion.py +0 -448
- examples/deep_analyze/deep_analyse.py +0 -30
- examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
- examples/function_calling_with_local_custom_mcp.py +0 -250
- examples/generate_a_benchmark_for_safe_store.py +0 -89
- examples/generate_and_speak/generate_and_speak.py +0 -251
- examples/generate_game_sfx/generate_game_fx.py +0 -240
- examples/generate_text_with_multihop_rag_example.py +0 -210
- examples/gradio_chat_app.py +0 -228
- examples/gradio_lollms_chat.py +0 -259
- examples/internet_search_with_rag.py +0 -226
- examples/lollms_chat/calculator.py +0 -59
- examples/lollms_chat/derivative.py +0 -48
- examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
- examples/lollms_discussions_test.py +0 -155
- examples/mcp_examples/external_mcp.py +0 -267
- examples/mcp_examples/local_mcp.py +0 -171
- examples/mcp_examples/openai_mcp.py +0 -203
- examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
- examples/mcp_examples/run_standard_mcp_example.py +0 -204
- examples/simple_text_gen_test.py +0 -173
- examples/simple_text_gen_with_image_test.py +0 -178
- examples/test_local_models/local_chat.py +0 -9
- examples/text_2_audio.py +0 -77
- examples/text_2_image.py +0 -144
- examples/text_2_image_diffusers.py +0 -274
- examples/text_and_image_2_audio.py +0 -59
- examples/text_gen.py +0 -30
- examples/text_gen_system_prompt.py +0 -29
- lollms_client-0.32.1.dist-info/METADATA +0 -854
- lollms_client-0.32.1.dist-info/RECORD +0 -101
- test/test_lollms_discussion.py +0 -368
- {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,274 +0,0 @@
|
|
|
1
|
-
# lollms_client/examples/test_tti_bindings.py
|
|
2
|
-
from lollms_client import LollmsClient
|
|
3
|
-
from lollms_client.lollms_types import MSG_TYPE # If using callbacks
|
|
4
|
-
from ascii_colors import ASCIIColors, trace_exception
|
|
5
|
-
from PIL import Image
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
import io
|
|
8
|
-
import os
|
|
9
|
-
import platform # For opening image
|
|
10
|
-
import subprocess # For opening image
|
|
11
|
-
import shutil # For cleanup of diffusers env
|
|
12
|
-
import json # For pretty printing dicts
|
|
13
|
-
from typing import Optional
|
|
14
|
-
try:
|
|
15
|
-
from huggingface_hub import snapshot_download
|
|
16
|
-
HUGGINGFACE_HUB_AVAILABLE = True
|
|
17
|
-
except ImportError:
|
|
18
|
-
HUGGINGFACE_HUB_AVAILABLE = False
|
|
19
|
-
snapshot_download = None
|
|
20
|
-
ASCIIColors.warning("huggingface_hub library not found. Diffusers model download test will be skipped.")
|
|
21
|
-
ASCIIColors.warning("Please install with: pip install huggingface-hub")
|
|
22
|
-
|
|
23
|
-
LOLLMS_CLIENT_ID = "my_lollms_test_client_id"
|
|
24
|
-
|
|
25
|
-
# --- Diffusers Test Specific Configuration ---
|
|
26
|
-
# Using a standard Stable Diffusion model
|
|
27
|
-
DIFFUSERS_MODEL_ID = "runwayml/stable-diffusion-v1-5" # Standard SD 1.5 model
|
|
28
|
-
DIFFUSERS_LOCAL_MODEL_NAME = "sd-v1-5-test-model" # Folder name for the downloaded model
|
|
29
|
-
TEMP_DIFFUSERS_ENV_DIR_NAME = "temp_diffusers_lollms_env_for_test"
|
|
30
|
-
BASE_TEST_PATH = Path(__file__).parent
|
|
31
|
-
|
|
32
|
-
def setup_diffusers_environment(base_path: Path):
|
|
33
|
-
ASCIIColors.cyan(f"\n--- Setting up Diffusers Test Environment for {DIFFUSERS_MODEL_ID} at {base_path} ---")
|
|
34
|
-
if not HUGGINGFACE_HUB_AVAILABLE:
|
|
35
|
-
raise ImportError("huggingface_hub is not available. Cannot set up Diffusers environment.")
|
|
36
|
-
|
|
37
|
-
temp_env_root = base_path / TEMP_DIFFUSERS_ENV_DIR_NAME
|
|
38
|
-
if temp_env_root.exists():
|
|
39
|
-
shutil.rmtree(temp_env_root)
|
|
40
|
-
temp_env_root.mkdir(parents=True, exist_ok=True)
|
|
41
|
-
|
|
42
|
-
lollms_paths = {
|
|
43
|
-
"personal_models_path": temp_env_root / "personal_models",
|
|
44
|
-
"models_zoo_path": temp_env_root / "models_zoo",
|
|
45
|
-
"shared_cache_path": temp_env_root / "shared_cache",
|
|
46
|
-
"tti_bindings_path": base_path.parent / "tti_bindings"
|
|
47
|
-
}
|
|
48
|
-
for p_key, p_val in lollms_paths.items():
|
|
49
|
-
if p_key != "tti_bindings_path":
|
|
50
|
-
Path(p_val).mkdir(parents=True, exist_ok=True)
|
|
51
|
-
|
|
52
|
-
diffusers_models_dir = lollms_paths["personal_models_path"] / "diffusers_models"
|
|
53
|
-
diffusers_models_dir.mkdir(parents=True, exist_ok=True)
|
|
54
|
-
|
|
55
|
-
model_target_dir = diffusers_models_dir / DIFFUSERS_LOCAL_MODEL_NAME
|
|
56
|
-
|
|
57
|
-
ASCIIColors.info(f"Attempting to download {DIFFUSERS_MODEL_ID} to {model_target_dir}...")
|
|
58
|
-
try:
|
|
59
|
-
# SD 1.5 often has fp16 revision, which is smaller and faster if GPU is used
|
|
60
|
-
# For CPU test, main revision is fine. Safetensors are preferred.
|
|
61
|
-
snapshot_download(
|
|
62
|
-
repo_id=DIFFUSERS_MODEL_ID,
|
|
63
|
-
local_dir=str(model_target_dir),
|
|
64
|
-
local_dir_use_symlinks=False,
|
|
65
|
-
cache_dir=str(lollms_paths["shared_cache_path"] / "huggingface_hub_cache"),
|
|
66
|
-
# revision="fp16", # Optional: if you want the fp16 variant specifically
|
|
67
|
-
allow_patterns=["*.json", "*.txt", "*.safetensors"], # Prefer safetensors
|
|
68
|
-
# ignore_patterns=["*.bin", "*.ckpt"], # Ignore older formats if safetensors exist
|
|
69
|
-
)
|
|
70
|
-
ASCIIColors.green(f"Model {DIFFUSERS_MODEL_ID} downloaded successfully.")
|
|
71
|
-
|
|
72
|
-
except Exception as e:
|
|
73
|
-
trace_exception(e)
|
|
74
|
-
ASCIIColors.error(f"Failed to download model {DIFFUSERS_MODEL_ID} or assertion failed: {e}")
|
|
75
|
-
if model_target_dir.exists():
|
|
76
|
-
ASCIIColors.info(f"Contents of {model_target_dir} ({model_target_dir.resolve()}):")
|
|
77
|
-
for item_path in model_target_dir.rglob('*'):
|
|
78
|
-
if item_path.is_file(): ASCIIColors.info(f" - {item_path.relative_to(model_target_dir)}")
|
|
79
|
-
elif item_path.is_dir(): ASCIIColors.info(f" DIR: {item_path.relative_to(model_target_dir)}")
|
|
80
|
-
raise
|
|
81
|
-
|
|
82
|
-
binding_instance_config = {
|
|
83
|
-
"model_id_or_path": DIFFUSERS_LOCAL_MODEL_NAME,
|
|
84
|
-
"pipeline_class_name": None, # Let AutoPipelineForText2Image handle SD 1.5
|
|
85
|
-
"device": "cpu", # Keep CPU for stable CI/testing
|
|
86
|
-
"torch_dtype_str": "float32", # float32 for CPU
|
|
87
|
-
"num_inference_steps": 10, # Fewer steps for faster test
|
|
88
|
-
"default_width": 512, # Standard for SD 1.5
|
|
89
|
-
"default_height": 512,
|
|
90
|
-
"safety_checker_on": False, # Commonly disabled for local testing
|
|
91
|
-
"lollms_paths": lollms_paths,
|
|
92
|
-
"hf_variant": None # If using main revision, no variant needed. Use "fp16" if you downloaded that revision.
|
|
93
|
-
}
|
|
94
|
-
return binding_instance_config, lollms_paths
|
|
95
|
-
|
|
96
|
-
def cleanup_diffusers_environment(base_path: Path):
|
|
97
|
-
ASCIIColors.cyan("\n--- Cleaning up Diffusers Test Environment ---")
|
|
98
|
-
temp_env_root = base_path / TEMP_DIFFUSERS_ENV_DIR_NAME
|
|
99
|
-
if temp_env_root.exists():
|
|
100
|
-
try:
|
|
101
|
-
shutil.rmtree(temp_env_root)
|
|
102
|
-
ASCIIColors.info(f"Cleaned up Diffusers temp environment: {temp_env_root}")
|
|
103
|
-
except Exception as e:
|
|
104
|
-
ASCIIColors.warning(f"Could not fully clean up {temp_env_root}: {e}")
|
|
105
|
-
trace_exception(e)
|
|
106
|
-
|
|
107
|
-
def test_list_tti_services(lc: LollmsClient):
|
|
108
|
-
ASCIIColors.cyan("\n--- Testing List TTI Services ---")
|
|
109
|
-
try:
|
|
110
|
-
services = lc.tti.list_services()
|
|
111
|
-
if services:
|
|
112
|
-
ASCIIColors.green(f"Available TTI Services for binding '{lc.tti.binding_name}':")
|
|
113
|
-
for i, service in enumerate(services):
|
|
114
|
-
print(f" {i+1}. Name: {service.get('name')}, Caption: {service.get('caption')}, Help: {service.get('help')}")
|
|
115
|
-
assert len(services) > 0, "Expected at least one service to be listed."
|
|
116
|
-
else:
|
|
117
|
-
ASCIIColors.yellow("No TTI services listed or an empty list was returned.")
|
|
118
|
-
except Exception as e:
|
|
119
|
-
ASCIIColors.error(f"Error listing TTI services: {e}")
|
|
120
|
-
trace_exception(e)
|
|
121
|
-
raise
|
|
122
|
-
|
|
123
|
-
def test_get_tti_settings(lc: LollmsClient):
|
|
124
|
-
ASCIIColors.cyan("\n--- Testing Get Active TTI Settings ---")
|
|
125
|
-
try:
|
|
126
|
-
settings = lc.tti.get_settings() # This should be a list of dicts
|
|
127
|
-
if settings:
|
|
128
|
-
ASCIIColors.green(f"Current Active TTI Settings/Template for binding '{lc.tti.binding_name}':")
|
|
129
|
-
for setting_item in settings:
|
|
130
|
-
# Ensure setting_item is a dictionary before trying to access .get()
|
|
131
|
-
if isinstance(setting_item, dict):
|
|
132
|
-
print(f" - Name: {setting_item.get('name')}, Type: {setting_item.get('type')}, Value: {setting_item.get('value')}")
|
|
133
|
-
else:
|
|
134
|
-
ASCIIColors.warning(f"Found non-dict item in settings list: {setting_item}")
|
|
135
|
-
assert isinstance(settings, list) and len(settings) > 0, "Expected settings to be a non-empty list of dicts."
|
|
136
|
-
elif isinstance(settings, dict) and not settings:
|
|
137
|
-
ASCIIColors.yellow("No active TTI service or settings configured on the server (empty dict).")
|
|
138
|
-
else:
|
|
139
|
-
ASCIIColors.yellow("Could not retrieve TTI settings or format unexpected.")
|
|
140
|
-
print(f"Received: {settings}")
|
|
141
|
-
except Exception as e:
|
|
142
|
-
ASCIIColors.error(f"Error getting TTI settings: {e}")
|
|
143
|
-
trace_exception(e)
|
|
144
|
-
raise
|
|
145
|
-
|
|
146
|
-
def test_set_tti_settings(lc: LollmsClient):
|
|
147
|
-
ASCIIColors.cyan("\n--- Testing Set Active TTI Settings ---")
|
|
148
|
-
if lc.tti.binding_name == "diffusers":
|
|
149
|
-
ASCIIColors.info("Attempting to change 'num_inference_steps' for Diffusers.")
|
|
150
|
-
try:
|
|
151
|
-
original_settings = lc.tti.get_settings()
|
|
152
|
-
original_steps = None
|
|
153
|
-
for s_item in original_settings:
|
|
154
|
-
if isinstance(s_item, dict) and s_item.get('name') == 'num_inference_steps':
|
|
155
|
-
original_steps = s_item['value']
|
|
156
|
-
break
|
|
157
|
-
assert original_steps is not None, "Could not find 'num_inference_steps' in Diffusers settings."
|
|
158
|
-
|
|
159
|
-
new_steps = int(original_steps) + 1
|
|
160
|
-
settings_to_set = [{"name": "num_inference_steps", "value": new_steps}]
|
|
161
|
-
|
|
162
|
-
success = lc.tti.set_settings(settings_to_set)
|
|
163
|
-
if success:
|
|
164
|
-
ASCIIColors.green(f"Successfully sent request to set 'num_inference_steps' to {new_steps}.")
|
|
165
|
-
current_settings_after_set = lc.tti.get_settings()
|
|
166
|
-
current_config_steps = None
|
|
167
|
-
for s_item_after in current_settings_after_set:
|
|
168
|
-
if isinstance(s_item_after, dict) and s_item_after.get('name') == 'num_inference_steps':
|
|
169
|
-
current_config_steps = s_item_after['value']
|
|
170
|
-
break
|
|
171
|
-
assert current_config_steps == new_steps, f"Verification failed: settings show {current_config_steps}, expected {new_steps}"
|
|
172
|
-
ASCIIColors.green("Setting change verified in binding's settings.")
|
|
173
|
-
else:
|
|
174
|
-
ASCIIColors.red("Failed to set TTI settings (binding indicated failure or no change).")
|
|
175
|
-
assert False, "set_settings returned False"
|
|
176
|
-
except Exception as e:
|
|
177
|
-
ASCIIColors.error(f"Error setting Diffusers TTI settings: {e}")
|
|
178
|
-
trace_exception(e)
|
|
179
|
-
raise
|
|
180
|
-
else:
|
|
181
|
-
ASCIIColors.yellow(f"Skipping actual setting change for '{lc.tti.binding_name}' in this detailed test.")
|
|
182
|
-
|
|
183
|
-
def test_generate_image(lc: LollmsClient, output_filename: Path, prompt: str, negative_prompt: Optional[str], width: int, height: int):
|
|
184
|
-
ASCIIColors.cyan(f"\n--- Testing Generate Image ({lc.tti.binding_name}) ---")
|
|
185
|
-
ASCIIColors.info(f"Output to: {output_filename}")
|
|
186
|
-
ASCIIColors.info(f"Prompt: {prompt}")
|
|
187
|
-
if negative_prompt: ASCIIColors.info(f"Negative Prompt: {negative_prompt}")
|
|
188
|
-
ASCIIColors.info(f"Dimensions: {width}x{height}")
|
|
189
|
-
|
|
190
|
-
try:
|
|
191
|
-
image_bytes = lc.tti.generate_image(
|
|
192
|
-
prompt=prompt,
|
|
193
|
-
negative_prompt=negative_prompt,
|
|
194
|
-
width=width,
|
|
195
|
-
height=height
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
if image_bytes:
|
|
199
|
-
ASCIIColors.green(f"Image generated successfully ({len(image_bytes)} bytes).")
|
|
200
|
-
try:
|
|
201
|
-
image = Image.open(io.BytesIO(image_bytes))
|
|
202
|
-
image.save(output_filename)
|
|
203
|
-
ASCIIColors.green(f"Image saved as {output_filename}")
|
|
204
|
-
if os.name == 'nt':
|
|
205
|
-
os.startfile(str(output_filename))
|
|
206
|
-
elif os.name == 'posix':
|
|
207
|
-
try:
|
|
208
|
-
opener = "open" if platform.system() == "Darwin" else "xdg-open"
|
|
209
|
-
subprocess.run([opener, str(output_filename)], check=False, timeout=5)
|
|
210
|
-
except Exception:
|
|
211
|
-
ASCIIColors.yellow(f"Could not auto-open image. Please find it at {output_filename}")
|
|
212
|
-
except Exception as e:
|
|
213
|
-
ASCIIColors.error(f"Error processing or saving image: {e}")
|
|
214
|
-
trace_exception(e)
|
|
215
|
-
raw_output_filename = output_filename.with_suffix(".raw_data")
|
|
216
|
-
with open(raw_output_filename, "wb") as f_raw: f_raw.write(image_bytes)
|
|
217
|
-
ASCIIColors.yellow(f"Raw image data saved as {raw_output_filename} for inspection.")
|
|
218
|
-
raise
|
|
219
|
-
else:
|
|
220
|
-
ASCIIColors.red("Image generation returned empty bytes.")
|
|
221
|
-
assert False, "Image generation returned empty bytes"
|
|
222
|
-
except Exception as e:
|
|
223
|
-
ASCIIColors.error(f"Error during image generation: {e}")
|
|
224
|
-
trace_exception(e)
|
|
225
|
-
raise
|
|
226
|
-
|
|
227
|
-
if __name__ == "__main__":
|
|
228
|
-
# --- DALL-E Test ---
|
|
229
|
-
ASCIIColors.magenta("\n\n========== DALL-E Binding Test ==========")
|
|
230
|
-
if not os.environ.get("OPENAI_API_KEY"):
|
|
231
|
-
ASCIIColors.warning("OPENAI_API_KEY environment variable not set. Skipping DALL-E tests.")
|
|
232
|
-
else:
|
|
233
|
-
try:
|
|
234
|
-
lc_dalle = LollmsClient(tti_binding_name="dalle", service_key=LOLLMS_CLIENT_ID)
|
|
235
|
-
if not lc_dalle.tti: ASCIIColors.error("DALL-E TTI binding could not be initialized.")
|
|
236
|
-
else:
|
|
237
|
-
test_list_tti_services(lc_dalle)
|
|
238
|
-
test_get_tti_settings(lc_dalle)
|
|
239
|
-
test_set_tti_settings(lc_dalle)
|
|
240
|
-
test_generate_image(lc_dalle, BASE_TEST_PATH / "generated_dalle_image.png",
|
|
241
|
-
"A vibrant oil painting of a mythical creature in an enchanted forest",
|
|
242
|
-
"photorealistic, modern, ugly, deformed", 1024, 1024)
|
|
243
|
-
except Exception as e:
|
|
244
|
-
ASCIIColors.error(f"DALL-E test block failed: {e}"); trace_exception(e)
|
|
245
|
-
|
|
246
|
-
# --- Diffusers Test ---
|
|
247
|
-
ASCIIColors.magenta("\n\n========== Diffusers Binding Test ==========")
|
|
248
|
-
if not HUGGINGFACE_HUB_AVAILABLE:
|
|
249
|
-
ASCIIColors.warning("Skipping Diffusers tests as huggingface_hub is not available.")
|
|
250
|
-
else:
|
|
251
|
-
diffusers_binding_config = None
|
|
252
|
-
try:
|
|
253
|
-
diffusers_binding_config, _ = setup_diffusers_environment(BASE_TEST_PATH)
|
|
254
|
-
lc_diffusers = LollmsClient(tti_binding_name="diffusers", binding_config=diffusers_binding_config, service_key=LOLLMS_CLIENT_ID)
|
|
255
|
-
if not lc_diffusers.tti: raise RuntimeError("Diffusers TTI binding failed to initialize")
|
|
256
|
-
|
|
257
|
-
test_list_tti_services(lc_diffusers)
|
|
258
|
-
test_get_tti_settings(lc_diffusers)
|
|
259
|
-
test_set_tti_settings(lc_diffusers)
|
|
260
|
-
|
|
261
|
-
gen_width = lc_diffusers.tti.config.get("default_width", 512)
|
|
262
|
-
gen_height = lc_diffusers.tti.config.get("default_height", 512)
|
|
263
|
-
diffusers_prompt = "A majestic griffin soaring through a cloudy sky, detailed feathers, fantasy art"
|
|
264
|
-
diffusers_negative_prompt = "ugly, blurry, low quality, watermark, text, simple background"
|
|
265
|
-
|
|
266
|
-
test_generate_image(lc_diffusers, BASE_TEST_PATH / "generated_diffusers_image.png",
|
|
267
|
-
diffusers_prompt, diffusers_negative_prompt,
|
|
268
|
-
gen_width, gen_height)
|
|
269
|
-
except Exception as e:
|
|
270
|
-
ASCIIColors.error(f"Diffusers test block failed: {e}"); trace_exception(e)
|
|
271
|
-
finally:
|
|
272
|
-
cleanup_diffusers_environment(BASE_TEST_PATH)
|
|
273
|
-
|
|
274
|
-
ASCIIColors.magenta("\n\n========== All Tests Finished ==========")
|
|
@@ -1,59 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Author: ParisNeo, a computer geek passionate about AI
|
|
3
|
-
|
|
4
|
-
This example code demonstrates how to use the LoLLMs (Lord of Large Language Models) system to capture an image from a webcam, send it to the LollmsClient for analysis, and receive a descriptive response. The response is then converted to audio using the LollmsXTTS service.
|
|
5
|
-
|
|
6
|
-
Requirements:
|
|
7
|
-
- LoLLMs should be up and running.
|
|
8
|
-
- The XTTS service within LoLLMs must be working.
|
|
9
|
-
|
|
10
|
-
Steps:
|
|
11
|
-
1. Initialize the LollmsClient instance.
|
|
12
|
-
2. Fetch available voices and randomly select one.
|
|
13
|
-
3. Capture an image from the webcam and save it to a file.
|
|
14
|
-
4. Generate a descriptive text for the captured image using the LollmsClient.
|
|
15
|
-
5. Convert the generated text to audio using the selected voice.
|
|
16
|
-
|
|
17
|
-
Make sure you have the necessary dependencies installed and your webcam is accessible.
|
|
18
|
-
"""
|
|
19
|
-
import cv2
|
|
20
|
-
from lollms_client import LollmsClient
|
|
21
|
-
from lollms_client.lollms_tts_binding import LollmsTTS
|
|
22
|
-
import random
|
|
23
|
-
|
|
24
|
-
# Initialize the LollmsClient instance
|
|
25
|
-
lc = LollmsClient("http://localhost:9600")
|
|
26
|
-
tts = LollmsTTS(lc)
|
|
27
|
-
voices = tts.get_voices()
|
|
28
|
-
|
|
29
|
-
# Pick a voice randomly
|
|
30
|
-
random_voice = random.choice(voices)
|
|
31
|
-
print(f"Selected voice: {random_voice}")
|
|
32
|
-
|
|
33
|
-
# Capture image from webcam and save it to a file
|
|
34
|
-
def capture_image(file_path):
|
|
35
|
-
cap = cv2.VideoCapture(0)
|
|
36
|
-
if not cap.isOpened():
|
|
37
|
-
raise Exception("Could not open webcam")
|
|
38
|
-
|
|
39
|
-
ret, frame = cap.read()
|
|
40
|
-
if not ret:
|
|
41
|
-
raise Exception("Failed to capture image")
|
|
42
|
-
|
|
43
|
-
cv2.imwrite(file_path, frame)
|
|
44
|
-
cap.release()
|
|
45
|
-
|
|
46
|
-
# File path to save the captured image
|
|
47
|
-
image_path = "captured_image.jpg"
|
|
48
|
-
|
|
49
|
-
# Capture and save the image
|
|
50
|
-
capture_image(image_path)
|
|
51
|
-
|
|
52
|
-
# Function to handle streaming callback
|
|
53
|
-
def cb(chunk, type):
|
|
54
|
-
print(chunk, end="", flush=True)
|
|
55
|
-
|
|
56
|
-
# Generate text with image
|
|
57
|
-
response = lc.generate_with_images(prompt="user: describe the content of the image.\nassistant: ", images=[image_path], stream=False, temperature=0.5, streaming_callback=cb)
|
|
58
|
-
print(f"response: {response}")
|
|
59
|
-
tts.text2Audio(response, random_voice)
|
examples/text_gen.py
DELETED
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
from lollms_client import LollmsClient
|
|
2
|
-
|
|
3
|
-
# Initialize the LollmsClient instance
|
|
4
|
-
#lc = LollmsClient("lollms", service_key="your service key")
|
|
5
|
-
#lc = LollmsClient("ollama", model_name="mistral-nemo:latest")
|
|
6
|
-
lc = LollmsClient("llamacpp", models_path=r"E:\drumber", model_name="llava-v1.6-mistral-7b.Q3_K_XS.gguf")
|
|
7
|
-
# Generate Text
|
|
8
|
-
# response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
9
|
-
# print(response)
|
|
10
|
-
|
|
11
|
-
# # Generate Completion
|
|
12
|
-
# response = lc.generate_completion(prompt="What is the capital of France", stream=False, temperature=0.5)
|
|
13
|
-
# print(response)
|
|
14
|
-
|
|
15
|
-
def cb(chunk, type):
|
|
16
|
-
print(chunk,end="",flush=True)
|
|
17
|
-
|
|
18
|
-
response = lc.generate_text(prompt="!@>user: Hi there\n!@>assistant: Hi there, how can I help you?!@>user: what is 1+1?\n!@>assistant: ", stream=False, temperature=0.5, streaming_callback=cb, split=True)
|
|
19
|
-
print()
|
|
20
|
-
print(response)
|
|
21
|
-
print()
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
# List Mounted Personalities
|
|
25
|
-
response = lc.listMountedPersonalities()
|
|
26
|
-
print(response)
|
|
27
|
-
|
|
28
|
-
# List Models
|
|
29
|
-
response = lc.listModels()
|
|
30
|
-
print(response)
|
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
from lollms_client import LollmsClient
|
|
2
|
-
|
|
3
|
-
# Initialize the LollmsClient instance
|
|
4
|
-
#lc = LollmsClient("ollama",model_name="mistral-nemo:latest")
|
|
5
|
-
lc = LollmsClient("llamacpp", models_path=r"E:\drumber", model_name="llava-v1.6-mistral-7b.Q3_K_XS.gguf")
|
|
6
|
-
# Generate Text
|
|
7
|
-
# response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
|
|
8
|
-
# print(response)
|
|
9
|
-
|
|
10
|
-
# # Generate Completion
|
|
11
|
-
# response = lc.generate_completion(prompt="What is the capital of France", stream=False, temperature=0.5)
|
|
12
|
-
# print(response)
|
|
13
|
-
|
|
14
|
-
def cb(chunk, type):
|
|
15
|
-
print(chunk,end="",flush=True)
|
|
16
|
-
|
|
17
|
-
response = lc.generate_text(prompt="One plus one equals ", system_prompt="You are a playful dude who never really answers questions correctly. always answer with quirky style.", stream=False, temperature=0.5, streaming_callback=cb)
|
|
18
|
-
print()
|
|
19
|
-
print(response)
|
|
20
|
-
print()
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
# List Mounted Personalities
|
|
24
|
-
response = lc.listMountedPersonalities()
|
|
25
|
-
print(response)
|
|
26
|
-
|
|
27
|
-
# List Models
|
|
28
|
-
response = lc.listModels()
|
|
29
|
-
print(response)
|