lollms-client 1.5.7__tar.gz → 1.5.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- {lollms_client-1.5.7/src/lollms_client.egg-info → lollms_client-1.5.9}/PKG-INFO +106 -4
- {lollms_client-1.5.7 → lollms_client-1.5.9}/README.md +104 -2
- {lollms_client-1.5.7 → lollms_client-1.5.9}/pyproject.toml +1 -1
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/__init__.py +1 -1
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_core.py +1 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/diffusers/__init__.py +58 -14
- {lollms_client-1.5.7 → lollms_client-1.5.9/src/lollms_client.egg-info}/PKG-INFO +106 -4
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client.egg-info/requires.txt +1 -1
- {lollms_client-1.5.7 → lollms_client-1.5.9}/LICENSE +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/setup.cfg +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/assets/models_ctx_sizes.json +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/claude/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/gemini/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/grok/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/groq/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/litellm/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/mistral/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/novita_ai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/ollama/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/open_router/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/openai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/openllm/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/openwebui/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/perplexity/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/transformers/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_agentic.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_config.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_discussion.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_llm_binding.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_mcp_binding.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_mcp_security.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_personality.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_stt_binding.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_tti_binding.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_ttm_binding.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_tts_binding.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_ttv_binding.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_types.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/stt_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/stt_bindings/whisper/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/gemini/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/novita_ai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/openai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/stability_ai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/replicate/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/stability_ai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/topmediai/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/bark/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/bark/server/install_bark.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/bark/server/main.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/piper_tts/server/install_piper.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/piper_tts/server/main.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/piper_tts/server/setup_voices.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/xtts/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/xtts/server/main.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/xtts/server/setup_voices.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client.egg-info/SOURCES.txt +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-1.5.7 → lollms_client-1.5.9}/test/test_lollms_discussion.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 1.5.
|
|
3
|
+
Version: 1.5.9
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -222,7 +222,7 @@ License-File: LICENSE
|
|
|
222
222
|
Requires-Dist: httpx
|
|
223
223
|
Requires-Dist: requests
|
|
224
224
|
Requires-Dist: ascii-colors
|
|
225
|
-
Requires-Dist: pipmaster
|
|
225
|
+
Requires-Dist: pipmaster>=1.0.5
|
|
226
226
|
Requires-Dist: pyyaml
|
|
227
227
|
Requires-Dist: tiktoken
|
|
228
228
|
Requires-Dist: pydantic
|
|
@@ -249,7 +249,8 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
|
|
|
249
249
|
## Key Features
|
|
250
250
|
|
|
251
251
|
* 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM, Gemini, Claude, Groq, OpenRouter, Hugging Face Inference API) using a unified `llm_binding_config` dictionary for all parameters.
|
|
252
|
-
* 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS),
|
|
252
|
+
* 🗣️ **Comprehensive Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS), video (TTV), and music (TTM).
|
|
253
|
+
* 🎨 **Advanced Image Generation and Editing:** A new `diffusers` binding provides powerful text-to-image capabilities. It supports a wide range of models from Hugging Face and Civitai, including specialized models like `Qwen-Image-Edit` for single-image editing and the cutting-edge `Qwen-Image-Edit-2509` for **multi-image fusion, pose transfer, and character swapping**.
|
|
253
254
|
* 🖼️ **Selective Image Activation:** Control which images in a message are active and sent to the model, allowing for fine-grained multimodal context management without deleting the original data.
|
|
254
255
|
* 🤖 **Agentic Workflows with MCP:** Empower LLMs to act as sophisticated agents, breaking down complex tasks, selecting and executing external tools (e.g., internet search, code interpreter, file I/O, image generation) through the Model Context Protocol (MCP) using a robust "observe-think-act" loop.
|
|
255
256
|
* 🎭 **Personalities as Agents:** Personalities can now define their own set of required tools (MCPs) and have access to static or dynamic knowledge bases (`data_source`), turning them into self-contained, ready-to-use agents.
|
|
@@ -1300,9 +1301,110 @@ try:
|
|
|
1300
1301
|
|
|
1301
1302
|
except Exception as e:
|
|
1302
1303
|
ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
|
|
1303
|
-
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")
|
|
1304
|
+
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")```
|
|
1305
|
+
|
|
1306
|
+
---
|
|
1307
|
+
|
|
1308
|
+
### 4. Local Multimodal and Advanced Bindings
|
|
1309
|
+
|
|
1310
|
+
#### **Diffusers (Local Text-to-Image Generation and Editing)**
|
|
1311
|
+
|
|
1312
|
+
The `diffusers` binding leverages the Hugging Face `diffusers` library to run a vast array of text-to-image models locally on your own hardware (CPU or GPU). It supports models from Hugging Face and Civitai, providing everything from basic image generation to advanced, state-of-the-art image editing.
|
|
1313
|
+
|
|
1314
|
+
**Prerequisites:**
|
|
1315
|
+
* `torch` and `torchvision` must be installed. For GPU acceleration, it's critical to install the version that matches your CUDA toolkit.
|
|
1316
|
+
* The binding will attempt to auto-install other requirements like `diffusers`, `transformers`, and `safetensors`.
|
|
1317
|
+
|
|
1318
|
+
**Usage:**
|
|
1319
|
+
|
|
1320
|
+
**Example 1: Basic Text-to-Image Generation**
|
|
1321
|
+
This example shows how to generate an image from a simple text prompt using a classic Stable Diffusion model.
|
|
1322
|
+
|
|
1323
|
+
```python
|
|
1324
|
+
from lollms_client import LollmsClient
|
|
1325
|
+
from ascii_colors import ASCIIColors
|
|
1326
|
+
from pathlib import Path
|
|
1327
|
+
|
|
1328
|
+
try:
|
|
1329
|
+
# Initialize the client with the diffusers TTI binding
|
|
1330
|
+
# Let's use a classic Stable Diffusion model for this example
|
|
1331
|
+
lc = LollmsClient(
|
|
1332
|
+
tti_binding_name="diffusers",
|
|
1333
|
+
tti_binding_config={
|
|
1334
|
+
"model_name": "runwayml/stable-diffusion-v1-5",
|
|
1335
|
+
# Other options: "device", "torch_dtype_str", "enable_xformers"
|
|
1336
|
+
}
|
|
1337
|
+
)
|
|
1338
|
+
|
|
1339
|
+
prompt = "A high-quality photograph of an astronaut riding a horse on Mars."
|
|
1340
|
+
ASCIIColors.yellow(f"Generating image for prompt: '{prompt}'")
|
|
1341
|
+
|
|
1342
|
+
# Generate the image. The result is returned as bytes.
|
|
1343
|
+
image_bytes = lc.generate_image(prompt, width=512, height=512)
|
|
1344
|
+
|
|
1345
|
+
if image_bytes:
|
|
1346
|
+
output_path = Path("./astronaut_on_mars.png")
|
|
1347
|
+
with open(output_path, "wb") as f:
|
|
1348
|
+
f.write(image_bytes)
|
|
1349
|
+
ASCIIColors.green(f"Image saved successfully to: {output_path.resolve()}")
|
|
1350
|
+
else:
|
|
1351
|
+
ASCIIColors.error("Image generation failed.")
|
|
1352
|
+
|
|
1353
|
+
except Exception as e:
|
|
1354
|
+
ASCIIColors.error(f"An error occurred with the Diffusers binding: {e}")
|
|
1355
|
+
ASCIIColors.info("Please ensure torch is installed correctly for your hardware (CPU/GPU).")
|
|
1304
1356
|
```
|
|
1305
1357
|
|
|
1358
|
+
**Example 2: Advanced Multi-Image Fusion with Qwen-Image-Edit-2509**
|
|
1359
|
+
This example demonstrates a cutting-edge capability: using a specialized model to fuse elements from multiple input images based on a text prompt. Here, we'll ask the model to take a person from one image and place them in the background of another.
|
|
1360
|
+
|
|
1361
|
+
```python
|
|
1362
|
+
from lollms_client import LollmsClient
|
|
1363
|
+
from ascii_colors import ASCIIColors
|
|
1364
|
+
from pathlib import Path
|
|
1365
|
+
|
|
1366
|
+
# --- IMPORTANT ---
|
|
1367
|
+
# Replace these with actual paths to your local images
|
|
1368
|
+
path_to_person_image = "./path/to/your/person.jpg"
|
|
1369
|
+
path_to_background_image = "./path/to/your/background.jpg"
|
|
1370
|
+
|
|
1371
|
+
if not Path(path_to_person_image).exists() or not Path(path_to_background_image).exists():
|
|
1372
|
+
ASCIIColors.warning("Input images not found. Skipping multi-image fusion example.")
|
|
1373
|
+
ASCIIColors.warning(f"Please update 'path_to_person_image' and 'path_to_background_image'.")
|
|
1374
|
+
else:
|
|
1375
|
+
try:
|
|
1376
|
+
# Initialize with the advanced Qwen multi-image editing model
|
|
1377
|
+
lc = LollmsClient(
|
|
1378
|
+
tti_binding_name="diffusers",
|
|
1379
|
+
tti_binding_config={
|
|
1380
|
+
"model_name": "Qwen/Qwen-Image-Edit-2509",
|
|
1381
|
+
"torch_dtype_str": "bfloat16" # Recommended for this model
|
|
1382
|
+
}
|
|
1383
|
+
)
|
|
1384
|
+
|
|
1385
|
+
# The prompt guides how the images are combined
|
|
1386
|
+
prompt = "Place the person from the first image into the scenic background of the second image."
|
|
1387
|
+
ASCIIColors.yellow(f"Fusing images with prompt: '{prompt}'")
|
|
1388
|
+
|
|
1389
|
+
# The edit_image method can accept a list of image paths for fusion
|
|
1390
|
+
fused_image_bytes = lc.edit_image(
|
|
1391
|
+
images=[path_to_person_image, path_to_background_image],
|
|
1392
|
+
prompt=prompt,
|
|
1393
|
+
num_inference_steps=50
|
|
1394
|
+
)
|
|
1395
|
+
|
|
1396
|
+
if fused_image_bytes:
|
|
1397
|
+
output_path = Path("./fused_image_result.png")
|
|
1398
|
+
with open(output_path, "wb") as f:
|
|
1399
|
+
f.write(fused_image_bytes)
|
|
1400
|
+
ASCIIColors.green(f"Fused image saved successfully to: {output_path.resolve()}")
|
|
1401
|
+
else:
|
|
1402
|
+
ASCIIColors.error("Multi-image editing failed.")
|
|
1403
|
+
|
|
1404
|
+
except Exception as e:
|
|
1405
|
+
ASCIIColors.error(f"An error occurred during multi-image fusion: {e}")
|
|
1406
|
+
```This powerful feature allows for complex creative tasks like character swapping, background replacement, and style transfer directly through the `lollms_client` library.
|
|
1407
|
+
|
|
1306
1408
|
### Listing Available Models
|
|
1307
1409
|
|
|
1308
1410
|
You can query the active LLM binding to get a list of models it supports or has available. The exact information returned depends on the binding (e.g., Ollama lists local models, OpenAI lists all its API models).
|
|
@@ -16,7 +16,8 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
|
|
|
16
16
|
## Key Features
|
|
17
17
|
|
|
18
18
|
* 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM, Gemini, Claude, Groq, OpenRouter, Hugging Face Inference API) using a unified `llm_binding_config` dictionary for all parameters.
|
|
19
|
-
* 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS),
|
|
19
|
+
* 🗣️ **Comprehensive Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS), video (TTV), and music (TTM).
|
|
20
|
+
* 🎨 **Advanced Image Generation and Editing:** A new `diffusers` binding provides powerful text-to-image capabilities. It supports a wide range of models from Hugging Face and Civitai, including specialized models like `Qwen-Image-Edit` for single-image editing and the cutting-edge `Qwen-Image-Edit-2509` for **multi-image fusion, pose transfer, and character swapping**.
|
|
20
21
|
* 🖼️ **Selective Image Activation:** Control which images in a message are active and sent to the model, allowing for fine-grained multimodal context management without deleting the original data.
|
|
21
22
|
* 🤖 **Agentic Workflows with MCP:** Empower LLMs to act as sophisticated agents, breaking down complex tasks, selecting and executing external tools (e.g., internet search, code interpreter, file I/O, image generation) through the Model Context Protocol (MCP) using a robust "observe-think-act" loop.
|
|
22
23
|
* 🎭 **Personalities as Agents:** Personalities can now define their own set of required tools (MCPs) and have access to static or dynamic knowledge bases (`data_source`), turning them into self-contained, ready-to-use agents.
|
|
@@ -1067,9 +1068,110 @@ try:
|
|
|
1067
1068
|
|
|
1068
1069
|
except Exception as e:
|
|
1069
1070
|
ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
|
|
1070
|
-
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")
|
|
1071
|
+
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")```
|
|
1072
|
+
|
|
1073
|
+
---
|
|
1074
|
+
|
|
1075
|
+
### 4. Local Multimodal and Advanced Bindings
|
|
1076
|
+
|
|
1077
|
+
#### **Diffusers (Local Text-to-Image Generation and Editing)**
|
|
1078
|
+
|
|
1079
|
+
The `diffusers` binding leverages the Hugging Face `diffusers` library to run a vast array of text-to-image models locally on your own hardware (CPU or GPU). It supports models from Hugging Face and Civitai, providing everything from basic image generation to advanced, state-of-the-art image editing.
|
|
1080
|
+
|
|
1081
|
+
**Prerequisites:**
|
|
1082
|
+
* `torch` and `torchvision` must be installed. For GPU acceleration, it's critical to install the version that matches your CUDA toolkit.
|
|
1083
|
+
* The binding will attempt to auto-install other requirements like `diffusers`, `transformers`, and `safetensors`.
|
|
1084
|
+
|
|
1085
|
+
**Usage:**
|
|
1086
|
+
|
|
1087
|
+
**Example 1: Basic Text-to-Image Generation**
|
|
1088
|
+
This example shows how to generate an image from a simple text prompt using a classic Stable Diffusion model.
|
|
1089
|
+
|
|
1090
|
+
```python
|
|
1091
|
+
from lollms_client import LollmsClient
|
|
1092
|
+
from ascii_colors import ASCIIColors
|
|
1093
|
+
from pathlib import Path
|
|
1094
|
+
|
|
1095
|
+
try:
|
|
1096
|
+
# Initialize the client with the diffusers TTI binding
|
|
1097
|
+
# Let's use a classic Stable Diffusion model for this example
|
|
1098
|
+
lc = LollmsClient(
|
|
1099
|
+
tti_binding_name="diffusers",
|
|
1100
|
+
tti_binding_config={
|
|
1101
|
+
"model_name": "runwayml/stable-diffusion-v1-5",
|
|
1102
|
+
# Other options: "device", "torch_dtype_str", "enable_xformers"
|
|
1103
|
+
}
|
|
1104
|
+
)
|
|
1105
|
+
|
|
1106
|
+
prompt = "A high-quality photograph of an astronaut riding a horse on Mars."
|
|
1107
|
+
ASCIIColors.yellow(f"Generating image for prompt: '{prompt}'")
|
|
1108
|
+
|
|
1109
|
+
# Generate the image. The result is returned as bytes.
|
|
1110
|
+
image_bytes = lc.generate_image(prompt, width=512, height=512)
|
|
1111
|
+
|
|
1112
|
+
if image_bytes:
|
|
1113
|
+
output_path = Path("./astronaut_on_mars.png")
|
|
1114
|
+
with open(output_path, "wb") as f:
|
|
1115
|
+
f.write(image_bytes)
|
|
1116
|
+
ASCIIColors.green(f"Image saved successfully to: {output_path.resolve()}")
|
|
1117
|
+
else:
|
|
1118
|
+
ASCIIColors.error("Image generation failed.")
|
|
1119
|
+
|
|
1120
|
+
except Exception as e:
|
|
1121
|
+
ASCIIColors.error(f"An error occurred with the Diffusers binding: {e}")
|
|
1122
|
+
ASCIIColors.info("Please ensure torch is installed correctly for your hardware (CPU/GPU).")
|
|
1071
1123
|
```
|
|
1072
1124
|
|
|
1125
|
+
**Example 2: Advanced Multi-Image Fusion with Qwen-Image-Edit-2509**
|
|
1126
|
+
This example demonstrates a cutting-edge capability: using a specialized model to fuse elements from multiple input images based on a text prompt. Here, we'll ask the model to take a person from one image and place them in the background of another.
|
|
1127
|
+
|
|
1128
|
+
```python
|
|
1129
|
+
from lollms_client import LollmsClient
|
|
1130
|
+
from ascii_colors import ASCIIColors
|
|
1131
|
+
from pathlib import Path
|
|
1132
|
+
|
|
1133
|
+
# --- IMPORTANT ---
|
|
1134
|
+
# Replace these with actual paths to your local images
|
|
1135
|
+
path_to_person_image = "./path/to/your/person.jpg"
|
|
1136
|
+
path_to_background_image = "./path/to/your/background.jpg"
|
|
1137
|
+
|
|
1138
|
+
if not Path(path_to_person_image).exists() or not Path(path_to_background_image).exists():
|
|
1139
|
+
ASCIIColors.warning("Input images not found. Skipping multi-image fusion example.")
|
|
1140
|
+
ASCIIColors.warning(f"Please update 'path_to_person_image' and 'path_to_background_image'.")
|
|
1141
|
+
else:
|
|
1142
|
+
try:
|
|
1143
|
+
# Initialize with the advanced Qwen multi-image editing model
|
|
1144
|
+
lc = LollmsClient(
|
|
1145
|
+
tti_binding_name="diffusers",
|
|
1146
|
+
tti_binding_config={
|
|
1147
|
+
"model_name": "Qwen/Qwen-Image-Edit-2509",
|
|
1148
|
+
"torch_dtype_str": "bfloat16" # Recommended for this model
|
|
1149
|
+
}
|
|
1150
|
+
)
|
|
1151
|
+
|
|
1152
|
+
# The prompt guides how the images are combined
|
|
1153
|
+
prompt = "Place the person from the first image into the scenic background of the second image."
|
|
1154
|
+
ASCIIColors.yellow(f"Fusing images with prompt: '{prompt}'")
|
|
1155
|
+
|
|
1156
|
+
# The edit_image method can accept a list of image paths for fusion
|
|
1157
|
+
fused_image_bytes = lc.edit_image(
|
|
1158
|
+
images=[path_to_person_image, path_to_background_image],
|
|
1159
|
+
prompt=prompt,
|
|
1160
|
+
num_inference_steps=50
|
|
1161
|
+
)
|
|
1162
|
+
|
|
1163
|
+
if fused_image_bytes:
|
|
1164
|
+
output_path = Path("./fused_image_result.png")
|
|
1165
|
+
with open(output_path, "wb") as f:
|
|
1166
|
+
f.write(fused_image_bytes)
|
|
1167
|
+
ASCIIColors.green(f"Fused image saved successfully to: {output_path.resolve()}")
|
|
1168
|
+
else:
|
|
1169
|
+
ASCIIColors.error("Multi-image editing failed.")
|
|
1170
|
+
|
|
1171
|
+
except Exception as e:
|
|
1172
|
+
ASCIIColors.error(f"An error occurred during multi-image fusion: {e}")
|
|
1173
|
+
```This powerful feature allows for complex creative tasks like character swapping, background replacement, and style transfer directly through the `lollms_client` library.
|
|
1174
|
+
|
|
1073
1175
|
### Listing Available Models
|
|
1074
1176
|
|
|
1075
1177
|
You can query the active LLM binding to get a list of models it supports or has available. The exact information returned depends on the binding (e.g., Ollama lists local models, OpenAI lists all its API models).
|
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "1.5.
|
|
11
|
+
__version__ = "1.5.9" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
|
@@ -5736,6 +5736,7 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
5736
5736
|
self,
|
|
5737
5737
|
text_to_process: str,
|
|
5738
5738
|
contextual_prompt: Optional[str] = None,
|
|
5739
|
+
system_prompt: str= None,
|
|
5739
5740
|
context_fill_percentage: float = 0.75,
|
|
5740
5741
|
overlap_tokens: int = 0,
|
|
5741
5742
|
expected_generation_tokens: int = 1500,
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/diffusers/__init__.py
RENAMED
|
@@ -19,8 +19,14 @@ from lollms_client.lollms_tti_binding import LollmsTTIBinding
|
|
|
19
19
|
from ascii_colors import trace_exception, ASCIIColors
|
|
20
20
|
|
|
21
21
|
pm.ensure_packages(["torch","torchvision"],index_url="https://download.pytorch.org/whl/cu126")
|
|
22
|
-
pm.ensure_packages(["
|
|
23
|
-
|
|
22
|
+
pm.ensure_packages(["pillow","transformers","safetensors","requests","tqdm"])
|
|
23
|
+
pm.ensure_packages([
|
|
24
|
+
{
|
|
25
|
+
"name": "diffusers",
|
|
26
|
+
"vcs": "git+https://github.com/huggingface/diffusers.git",
|
|
27
|
+
"condition": ">=0.35.1"
|
|
28
|
+
}
|
|
29
|
+
])
|
|
24
30
|
try:
|
|
25
31
|
import torch
|
|
26
32
|
from diffusers import (
|
|
@@ -29,7 +35,8 @@ try:
|
|
|
29
35
|
AutoPipelineForInpainting,
|
|
30
36
|
DiffusionPipeline,
|
|
31
37
|
StableDiffusionPipeline,
|
|
32
|
-
|
|
38
|
+
QwenImageEditPipeline,
|
|
39
|
+
QwenImageEditPlusPipeline
|
|
33
40
|
)
|
|
34
41
|
from diffusers.utils import load_image
|
|
35
42
|
from PIL import Image
|
|
@@ -41,6 +48,8 @@ except ImportError:
|
|
|
41
48
|
AutoPipelineForInpainting = None
|
|
42
49
|
DiffusionPipeline = None
|
|
43
50
|
StableDiffusionPipeline = None
|
|
51
|
+
QwenImageEditPipeline = None
|
|
52
|
+
QwenImageEditPlusPipeline = None
|
|
44
53
|
Image = None
|
|
45
54
|
load_image = None
|
|
46
55
|
DIFFUSERS_AVAILABLE = False
|
|
@@ -353,7 +362,12 @@ class ModelManager:
|
|
|
353
362
|
common_args["safety_checker"] = None
|
|
354
363
|
if self.config.get("hf_cache_path"):
|
|
355
364
|
common_args["cache_dir"] = str(self.config["hf_cache_path"])
|
|
356
|
-
|
|
365
|
+
|
|
366
|
+
if "Qwen-Image-Edit-2509" in str(model_path):
|
|
367
|
+
self.pipeline = QwenImageEditPlusPipeline.from_pretrained(model_path, **common_args)
|
|
368
|
+
elif "Qwen-Image-Edit" in str(model_path):
|
|
369
|
+
self.pipeline = QwenImageEditPipeline.from_pretrained(model_path, **common_args)
|
|
370
|
+
elif task == "text2image":
|
|
357
371
|
self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **common_args)
|
|
358
372
|
elif task == "image2image":
|
|
359
373
|
self.pipeline = AutoPipelineForImage2Image.from_pretrained(model_path, **common_args)
|
|
@@ -492,9 +506,10 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
492
506
|
{"family": "SD 1.x", "model_name": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion 1.5", "desc": "Classic SD1.5."},
|
|
493
507
|
{"family": "SD 2.x", "model_name": "stabilityai/stable-diffusion-2-1", "display_name": "Stable Diffusion 2.1", "desc": "SD2.1 base."},
|
|
494
508
|
{"family": "SD3", "model_name": "stabilityai/stable-diffusion-3-medium-diffusers", "display_name": "Stable Diffusion 3 Medium", "desc": "SD3 medium."},
|
|
495
|
-
{"family": "Qwen", "model_name": "Qwen/Qwen-Image", "display_name": "Qwen Image
|
|
509
|
+
{"family": "Qwen", "model_name": "Qwen/Qwen-Image", "display_name": "Qwen Image", "desc": "Dedicated image generation."},
|
|
496
510
|
{"family": "Specialized", "model_name": "playgroundai/playground-v2.5-1024px-aesthetic", "display_name": "Playground v2.5", "desc": "High aesthetic 1024."},
|
|
497
|
-
{"family": "Editors", "model_name": "Qwen/Qwen-Image-Edit", "display_name": "Qwen Image Edit", "desc": "Dedicated image editing."}
|
|
511
|
+
{"family": "Editors", "model_name": "Qwen/Qwen-Image-Edit", "display_name": "Qwen Image Edit", "desc": "Dedicated image editing."},
|
|
512
|
+
{"family": "Editors", "model_name": "Qwen/Qwen-Image-Edit-2509", "display_name": "Qwen Image Edit Plus (Multi-Image)", "desc": "Advanced multi-image editing, fusion, and pose transfer."}
|
|
498
513
|
]
|
|
499
514
|
|
|
500
515
|
def __init__(self, **kwargs):
|
|
@@ -673,6 +688,25 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
673
688
|
generator = self._prepare_seed(kwargs)
|
|
674
689
|
steps = kwargs.pop("num_inference_steps", self.config["num_inference_steps"])
|
|
675
690
|
guidance = kwargs.pop("guidance_scale", self.config["guidance_scale"])
|
|
691
|
+
|
|
692
|
+
# Handle multi-image fusion for Qwen-Image-Edit-2509
|
|
693
|
+
if "Qwen-Image-Edit-2509" in self.model_name and len(pil_images) > 1:
|
|
694
|
+
pipeline_args = {
|
|
695
|
+
"image": pil_images,
|
|
696
|
+
"prompt": prompt,
|
|
697
|
+
"negative_prompt": negative_prompt or " ",
|
|
698
|
+
"width": out_w, "height": out_h,
|
|
699
|
+
"num_inference_steps": steps,
|
|
700
|
+
"true_cfg_scale": guidance,
|
|
701
|
+
"generator": generator
|
|
702
|
+
}
|
|
703
|
+
pipeline_args.update(kwargs)
|
|
704
|
+
future = Future()
|
|
705
|
+
self.manager.queue.put((future, "image2image", pipeline_args))
|
|
706
|
+
ASCIIColors.info(f"Job (multi-image fusion with {len(pil_images)} images) queued.")
|
|
707
|
+
return future.result()
|
|
708
|
+
|
|
709
|
+
# Handle inpainting (single image with mask)
|
|
676
710
|
if mask is not None and len(pil_images) == 1:
|
|
677
711
|
try:
|
|
678
712
|
mask_img = self._decode_image_input(mask).convert("L")
|
|
@@ -683,36 +717,46 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
683
717
|
"mask_image": mask_img,
|
|
684
718
|
"prompt": prompt,
|
|
685
719
|
"negative_prompt": negative_prompt or None,
|
|
686
|
-
"width": out_w,
|
|
687
|
-
"height": out_h,
|
|
720
|
+
"width": out_w, "height": out_h,
|
|
688
721
|
"num_inference_steps": steps,
|
|
689
722
|
"guidance_scale": guidance,
|
|
690
723
|
"generator": generator
|
|
691
724
|
}
|
|
692
725
|
pipeline_args.update(kwargs)
|
|
726
|
+
if "Qwen-Image-Edit" in self.model_name:
|
|
727
|
+
pipeline_args["true_cfg_scale"] = pipeline_args.pop("guidance_scale", 7.0)
|
|
728
|
+
if not pipeline_args.get("negative_prompt"): pipeline_args["negative_prompt"] = " "
|
|
729
|
+
|
|
693
730
|
future = Future()
|
|
694
731
|
self.manager.queue.put((future, "inpainting", pipeline_args))
|
|
695
732
|
ASCIIColors.info("Job (inpaint) queued.")
|
|
696
733
|
return future.result()
|
|
734
|
+
|
|
735
|
+
# Handle standard image-to-image (single image)
|
|
697
736
|
try:
|
|
698
737
|
pipeline_args = {
|
|
699
|
-
"image": pil_images
|
|
738
|
+
"image": pil_images[0],
|
|
700
739
|
"prompt": prompt,
|
|
701
740
|
"negative_prompt": negative_prompt or None,
|
|
702
741
|
"strength": kwargs.pop("strength", 0.6),
|
|
703
|
-
"width": out_w,
|
|
704
|
-
"height": out_h,
|
|
742
|
+
"width": out_w, "height": out_h,
|
|
705
743
|
"num_inference_steps": steps,
|
|
706
744
|
"guidance_scale": guidance,
|
|
707
745
|
"generator": generator
|
|
708
746
|
}
|
|
709
747
|
pipeline_args.update(kwargs)
|
|
748
|
+
if "Qwen-Image-Edit" in self.model_name:
|
|
749
|
+
pipeline_args["true_cfg_scale"] = pipeline_args.pop("guidance_scale", 7.0)
|
|
750
|
+
if not pipeline_args.get("negative_prompt"): pipeline_args["negative_prompt"] = " "
|
|
751
|
+
|
|
710
752
|
future = Future()
|
|
711
753
|
self.manager.queue.put((future, "image2image", pipeline_args))
|
|
712
754
|
ASCIIColors.info("Job (i2i) queued.")
|
|
713
755
|
return future.result()
|
|
714
756
|
except Exception:
|
|
715
757
|
pass
|
|
758
|
+
|
|
759
|
+
# Fallback to latent-based generation if i2i fails for some reason
|
|
716
760
|
try:
|
|
717
761
|
base = pil_images[0]
|
|
718
762
|
latents, _ = self._encode_image_to_latents(base, out_w, out_h)
|
|
@@ -723,8 +767,7 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
723
767
|
"num_inference_steps": steps,
|
|
724
768
|
"guidance_scale": guidance,
|
|
725
769
|
"generator": generator,
|
|
726
|
-
"width": out_w,
|
|
727
|
-
"height": out_h
|
|
770
|
+
"width": out_w, "height": out_h
|
|
728
771
|
}
|
|
729
772
|
pipeline_args.update(kwargs)
|
|
730
773
|
future = Future()
|
|
@@ -734,6 +777,7 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
734
777
|
except Exception as e:
|
|
735
778
|
raise Exception(f"Image edit failed: {e}") from e
|
|
736
779
|
|
|
780
|
+
|
|
737
781
|
def list_local_models(self) -> List[str]:
|
|
738
782
|
if not self.models_path.exists():
|
|
739
783
|
return []
|
|
@@ -840,4 +884,4 @@ if __name__ == '__main__':
|
|
|
840
884
|
ASCIIColors.cyan("\nCleaning up temporary directories...")
|
|
841
885
|
if temp_paths_dir.exists():
|
|
842
886
|
shutil.rmtree(temp_paths_dir)
|
|
843
|
-
ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
|
|
887
|
+
ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 1.5.
|
|
3
|
+
Version: 1.5.9
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -222,7 +222,7 @@ License-File: LICENSE
|
|
|
222
222
|
Requires-Dist: httpx
|
|
223
223
|
Requires-Dist: requests
|
|
224
224
|
Requires-Dist: ascii-colors
|
|
225
|
-
Requires-Dist: pipmaster
|
|
225
|
+
Requires-Dist: pipmaster>=1.0.5
|
|
226
226
|
Requires-Dist: pyyaml
|
|
227
227
|
Requires-Dist: tiktoken
|
|
228
228
|
Requires-Dist: pydantic
|
|
@@ -249,7 +249,8 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
|
|
|
249
249
|
## Key Features
|
|
250
250
|
|
|
251
251
|
* 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM, Gemini, Claude, Groq, OpenRouter, Hugging Face Inference API) using a unified `llm_binding_config` dictionary for all parameters.
|
|
252
|
-
* 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS),
|
|
252
|
+
* 🗣️ **Comprehensive Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS), video (TTV), and music (TTM).
|
|
253
|
+
* 🎨 **Advanced Image Generation and Editing:** A new `diffusers` binding provides powerful text-to-image capabilities. It supports a wide range of models from Hugging Face and Civitai, including specialized models like `Qwen-Image-Edit` for single-image editing and the cutting-edge `Qwen-Image-Edit-2509` for **multi-image fusion, pose transfer, and character swapping**.
|
|
253
254
|
* 🖼️ **Selective Image Activation:** Control which images in a message are active and sent to the model, allowing for fine-grained multimodal context management without deleting the original data.
|
|
254
255
|
* 🤖 **Agentic Workflows with MCP:** Empower LLMs to act as sophisticated agents, breaking down complex tasks, selecting and executing external tools (e.g., internet search, code interpreter, file I/O, image generation) through the Model Context Protocol (MCP) using a robust "observe-think-act" loop.
|
|
255
256
|
* 🎭 **Personalities as Agents:** Personalities can now define their own set of required tools (MCPs) and have access to static or dynamic knowledge bases (`data_source`), turning them into self-contained, ready-to-use agents.
|
|
@@ -1300,9 +1301,110 @@ try:
|
|
|
1300
1301
|
|
|
1301
1302
|
except Exception as e:
|
|
1302
1303
|
ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
|
|
1303
|
-
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")
|
|
1304
|
+
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")```
|
|
1305
|
+
|
|
1306
|
+
---
|
|
1307
|
+
|
|
1308
|
+
### 4. Local Multimodal and Advanced Bindings
|
|
1309
|
+
|
|
1310
|
+
#### **Diffusers (Local Text-to-Image Generation and Editing)**
|
|
1311
|
+
|
|
1312
|
+
The `diffusers` binding leverages the Hugging Face `diffusers` library to run a vast array of text-to-image models locally on your own hardware (CPU or GPU). It supports models from Hugging Face and Civitai, providing everything from basic image generation to advanced, state-of-the-art image editing.
|
|
1313
|
+
|
|
1314
|
+
**Prerequisites:**
|
|
1315
|
+
* `torch` and `torchvision` must be installed. For GPU acceleration, it's critical to install the version that matches your CUDA toolkit.
|
|
1316
|
+
* The binding will attempt to auto-install other requirements like `diffusers`, `transformers`, and `safetensors`.
|
|
1317
|
+
|
|
1318
|
+
**Usage:**
|
|
1319
|
+
|
|
1320
|
+
**Example 1: Basic Text-to-Image Generation**
|
|
1321
|
+
This example shows how to generate an image from a simple text prompt using a classic Stable Diffusion model.
|
|
1322
|
+
|
|
1323
|
+
```python
|
|
1324
|
+
from lollms_client import LollmsClient
|
|
1325
|
+
from ascii_colors import ASCIIColors
|
|
1326
|
+
from pathlib import Path
|
|
1327
|
+
|
|
1328
|
+
try:
|
|
1329
|
+
# Initialize the client with the diffusers TTI binding
|
|
1330
|
+
# Let's use a classic Stable Diffusion model for this example
|
|
1331
|
+
lc = LollmsClient(
|
|
1332
|
+
tti_binding_name="diffusers",
|
|
1333
|
+
tti_binding_config={
|
|
1334
|
+
"model_name": "runwayml/stable-diffusion-v1-5",
|
|
1335
|
+
# Other options: "device", "torch_dtype_str", "enable_xformers"
|
|
1336
|
+
}
|
|
1337
|
+
)
|
|
1338
|
+
|
|
1339
|
+
prompt = "A high-quality photograph of an astronaut riding a horse on Mars."
|
|
1340
|
+
ASCIIColors.yellow(f"Generating image for prompt: '{prompt}'")
|
|
1341
|
+
|
|
1342
|
+
# Generate the image. The result is returned as bytes.
|
|
1343
|
+
image_bytes = lc.generate_image(prompt, width=512, height=512)
|
|
1344
|
+
|
|
1345
|
+
if image_bytes:
|
|
1346
|
+
output_path = Path("./astronaut_on_mars.png")
|
|
1347
|
+
with open(output_path, "wb") as f:
|
|
1348
|
+
f.write(image_bytes)
|
|
1349
|
+
ASCIIColors.green(f"Image saved successfully to: {output_path.resolve()}")
|
|
1350
|
+
else:
|
|
1351
|
+
ASCIIColors.error("Image generation failed.")
|
|
1352
|
+
|
|
1353
|
+
except Exception as e:
|
|
1354
|
+
ASCIIColors.error(f"An error occurred with the Diffusers binding: {e}")
|
|
1355
|
+
ASCIIColors.info("Please ensure torch is installed correctly for your hardware (CPU/GPU).")
|
|
1304
1356
|
```
|
|
1305
1357
|
|
|
1358
|
+
**Example 2: Advanced Multi-Image Fusion with Qwen-Image-Edit-2509**
|
|
1359
|
+
This example demonstrates a cutting-edge capability: using a specialized model to fuse elements from multiple input images based on a text prompt. Here, we'll ask the model to take a person from one image and place them in the background of another.
|
|
1360
|
+
|
|
1361
|
+
```python
|
|
1362
|
+
from lollms_client import LollmsClient
|
|
1363
|
+
from ascii_colors import ASCIIColors
|
|
1364
|
+
from pathlib import Path
|
|
1365
|
+
|
|
1366
|
+
# --- IMPORTANT ---
|
|
1367
|
+
# Replace these with actual paths to your local images
|
|
1368
|
+
path_to_person_image = "./path/to/your/person.jpg"
|
|
1369
|
+
path_to_background_image = "./path/to/your/background.jpg"
|
|
1370
|
+
|
|
1371
|
+
if not Path(path_to_person_image).exists() or not Path(path_to_background_image).exists():
|
|
1372
|
+
ASCIIColors.warning("Input images not found. Skipping multi-image fusion example.")
|
|
1373
|
+
ASCIIColors.warning(f"Please update 'path_to_person_image' and 'path_to_background_image'.")
|
|
1374
|
+
else:
|
|
1375
|
+
try:
|
|
1376
|
+
# Initialize with the advanced Qwen multi-image editing model
|
|
1377
|
+
lc = LollmsClient(
|
|
1378
|
+
tti_binding_name="diffusers",
|
|
1379
|
+
tti_binding_config={
|
|
1380
|
+
"model_name": "Qwen/Qwen-Image-Edit-2509",
|
|
1381
|
+
"torch_dtype_str": "bfloat16" # Recommended for this model
|
|
1382
|
+
}
|
|
1383
|
+
)
|
|
1384
|
+
|
|
1385
|
+
# The prompt guides how the images are combined
|
|
1386
|
+
prompt = "Place the person from the first image into the scenic background of the second image."
|
|
1387
|
+
ASCIIColors.yellow(f"Fusing images with prompt: '{prompt}'")
|
|
1388
|
+
|
|
1389
|
+
# The edit_image method can accept a list of image paths for fusion
|
|
1390
|
+
fused_image_bytes = lc.edit_image(
|
|
1391
|
+
images=[path_to_person_image, path_to_background_image],
|
|
1392
|
+
prompt=prompt,
|
|
1393
|
+
num_inference_steps=50
|
|
1394
|
+
)
|
|
1395
|
+
|
|
1396
|
+
if fused_image_bytes:
|
|
1397
|
+
output_path = Path("./fused_image_result.png")
|
|
1398
|
+
with open(output_path, "wb") as f:
|
|
1399
|
+
f.write(fused_image_bytes)
|
|
1400
|
+
ASCIIColors.green(f"Fused image saved successfully to: {output_path.resolve()}")
|
|
1401
|
+
else:
|
|
1402
|
+
ASCIIColors.error("Multi-image editing failed.")
|
|
1403
|
+
|
|
1404
|
+
except Exception as e:
|
|
1405
|
+
ASCIIColors.error(f"An error occurred during multi-image fusion: {e}")
|
|
1406
|
+
```This powerful feature allows for complex creative tasks like character swapping, background replacement, and style transfer directly through the `lollms_client` library.
|
|
1407
|
+
|
|
1306
1408
|
### Listing Available Models
|
|
1307
1409
|
|
|
1308
1410
|
You can query the active LLM binding to get a list of models it supports or has available. The exact information returned depends on the binding (e.g., Ollama lists local models, OpenAI lists all its API models).
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/azure_openai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/claude/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/gemini/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/litellm/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/llamacpp/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/lollms_webui/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/mistral/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/novita_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/ollama/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/open_router/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/openai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/openllm/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/openwebui/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/perplexity/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/tensor_rt/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/llm_bindings/transformers/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/local_mcp/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/stt_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/stt_bindings/whisper/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/stt_bindings/whispercpp/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/gemini/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/novita_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/openai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tti_bindings/stability_ai/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/audiocraft/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/replicate/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/stability_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttm_bindings/topmediai/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/bark/server/main.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/piper_tts/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/piper_tts/server/main.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/tts_bindings/xtts/server/main.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.7 → lollms_client-1.5.9}/src/lollms_client/ttv_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|