lollms-client 1.5.8__tar.gz → 1.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (93) hide show
  1. {lollms_client-1.5.8/src/lollms_client.egg-info → lollms_client-1.6.0}/PKG-INFO +105 -3
  2. {lollms_client-1.5.8 → lollms_client-1.6.0}/README.md +104 -2
  3. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/__init__.py +1 -1
  4. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_core.py +271 -311
  5. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/diffusers/__init__.py +50 -12
  6. {lollms_client-1.5.8 → lollms_client-1.6.0/src/lollms_client.egg-info}/PKG-INFO +105 -3
  7. {lollms_client-1.5.8 → lollms_client-1.6.0}/LICENSE +0 -0
  8. {lollms_client-1.5.8 → lollms_client-1.6.0}/pyproject.toml +0 -0
  9. {lollms_client-1.5.8 → lollms_client-1.6.0}/setup.cfg +0 -0
  10. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/assets/models_ctx_sizes.json +0 -0
  11. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/__init__.py +0 -0
  12. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
  13. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/claude/__init__.py +0 -0
  14. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/gemini/__init__.py +0 -0
  15. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/grok/__init__.py +0 -0
  16. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/groq/__init__.py +0 -0
  17. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
  18. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/litellm/__init__.py +0 -0
  19. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  20. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  21. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
  22. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/mistral/__init__.py +0 -0
  23. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/novita_ai/__init__.py +0 -0
  24. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  25. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/open_router/__init__.py +0 -0
  26. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/openai/__init__.py +0 -0
  27. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  28. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/openwebui/__init__.py +0 -0
  29. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/perplexity/__init__.py +0 -0
  30. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  31. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  32. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  33. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  34. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_agentic.py +0 -0
  35. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_config.py +0 -0
  36. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_discussion.py +0 -0
  37. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_js_analyzer.py +0 -0
  38. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_llm_binding.py +0 -0
  39. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_mcp_binding.py +0 -0
  40. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_mcp_security.py +0 -0
  41. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_personality.py +0 -0
  42. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_python_analyzer.py +0 -0
  43. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_stt_binding.py +0 -0
  44. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_tti_binding.py +0 -0
  45. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_ttm_binding.py +0 -0
  46. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_tts_binding.py +0 -0
  47. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_ttv_binding.py +0 -0
  48. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_types.py +0 -0
  49. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/lollms_utilities.py +0 -0
  50. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  51. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  52. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  53. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  54. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  55. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  56. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  57. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/stt_bindings/__init__.py +0 -0
  58. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  59. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  60. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  61. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/__init__.py +0 -0
  62. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  63. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py +0 -0
  64. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  65. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/novita_ai/__init__.py +0 -0
  66. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/openai/__init__.py +0 -0
  67. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/stability_ai/__init__.py +0 -0
  68. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/__init__.py +0 -0
  69. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  70. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py +0 -0
  71. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  72. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/replicate/__init__.py +0 -0
  73. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/stability_ai/__init__.py +0 -0
  74. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/topmediai/__init__.py +0 -0
  75. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/__init__.py +0 -0
  76. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/bark/__init__.py +0 -0
  77. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/bark/server/install_bark.py +0 -0
  78. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/bark/server/main.py +0 -0
  79. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  80. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  81. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/piper_tts/server/install_piper.py +0 -0
  82. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/piper_tts/server/main.py +0 -0
  83. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/piper_tts/server/setup_voices.py +0 -0
  84. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  85. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/xtts/server/main.py +0 -0
  86. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/xtts/server/setup_voices.py +0 -0
  87. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/ttv_bindings/__init__.py +0 -0
  88. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  89. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client.egg-info/SOURCES.txt +0 -0
  90. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client.egg-info/dependency_links.txt +0 -0
  91. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client.egg-info/requires.txt +0 -0
  92. {lollms_client-1.5.8 → lollms_client-1.6.0}/src/lollms_client.egg-info/top_level.txt +0 -0
  93. {lollms_client-1.5.8 → lollms_client-1.6.0}/test/test_lollms_discussion.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.5.8
3
+ Version: 1.6.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache License
@@ -249,7 +249,8 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
249
249
  ## Key Features
250
250
 
251
251
  * 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM, Gemini, Claude, Groq, OpenRouter, Hugging Face Inference API) using a unified `llm_binding_config` dictionary for all parameters.
252
- * 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS), images (TTI), video (TTV), and music (TTM).
252
+ * 🗣️ **Comprehensive Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS), video (TTV), and music (TTM).
253
+ * 🎨 **Advanced Image Generation and Editing:** A new `diffusers` binding provides powerful text-to-image capabilities. It supports a wide range of models from Hugging Face and Civitai, including specialized models like `Qwen-Image-Edit` for single-image editing and the cutting-edge `Qwen-Image-Edit-2509` for **multi-image fusion, pose transfer, and character swapping**.
253
254
  * 🖼️ **Selective Image Activation:** Control which images in a message are active and sent to the model, allowing for fine-grained multimodal context management without deleting the original data.
254
255
  * 🤖 **Agentic Workflows with MCP:** Empower LLMs to act as sophisticated agents, breaking down complex tasks, selecting and executing external tools (e.g., internet search, code interpreter, file I/O, image generation) through the Model Context Protocol (MCP) using a robust "observe-think-act" loop.
255
256
  * 🎭 **Personalities as Agents:** Personalities can now define their own set of required tools (MCPs) and have access to static or dynamic knowledge bases (`data_source`), turning them into self-contained, ready-to-use agents.
@@ -1300,9 +1301,110 @@ try:
1300
1301
 
1301
1302
  except Exception as e:
1302
1303
  ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
1303
- ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")
1304
+ ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")```
1305
+
1306
+ ---
1307
+
1308
+ ### 4. Local Multimodal and Advanced Bindings
1309
+
1310
+ #### **Diffusers (Local Text-to-Image Generation and Editing)**
1311
+
1312
+ The `diffusers` binding leverages the Hugging Face `diffusers` library to run a vast array of text-to-image models locally on your own hardware (CPU or GPU). It supports models from Hugging Face and Civitai, providing everything from basic image generation to advanced, state-of-the-art image editing.
1313
+
1314
+ **Prerequisites:**
1315
+ * `torch` and `torchvision` must be installed. For GPU acceleration, it's critical to install the version that matches your CUDA toolkit.
1316
+ * The binding will attempt to auto-install other requirements like `diffusers`, `transformers`, and `safetensors`.
1317
+
1318
+ **Usage:**
1319
+
1320
+ **Example 1: Basic Text-to-Image Generation**
1321
+ This example shows how to generate an image from a simple text prompt using a classic Stable Diffusion model.
1322
+
1323
+ ```python
1324
+ from lollms_client import LollmsClient
1325
+ from ascii_colors import ASCIIColors
1326
+ from pathlib import Path
1327
+
1328
+ try:
1329
+ # Initialize the client with the diffusers TTI binding
1330
+ # Let's use a classic Stable Diffusion model for this example
1331
+ lc = LollmsClient(
1332
+ tti_binding_name="diffusers",
1333
+ tti_binding_config={
1334
+ "model_name": "runwayml/stable-diffusion-v1-5",
1335
+ # Other options: "device", "torch_dtype_str", "enable_xformers"
1336
+ }
1337
+ )
1338
+
1339
+ prompt = "A high-quality photograph of an astronaut riding a horse on Mars."
1340
+ ASCIIColors.yellow(f"Generating image for prompt: '{prompt}'")
1341
+
1342
+ # Generate the image. The result is returned as bytes.
1343
+ image_bytes = lc.generate_image(prompt, width=512, height=512)
1344
+
1345
+ if image_bytes:
1346
+ output_path = Path("./astronaut_on_mars.png")
1347
+ with open(output_path, "wb") as f:
1348
+ f.write(image_bytes)
1349
+ ASCIIColors.green(f"Image saved successfully to: {output_path.resolve()}")
1350
+ else:
1351
+ ASCIIColors.error("Image generation failed.")
1352
+
1353
+ except Exception as e:
1354
+ ASCIIColors.error(f"An error occurred with the Diffusers binding: {e}")
1355
+ ASCIIColors.info("Please ensure torch is installed correctly for your hardware (CPU/GPU).")
1304
1356
  ```
1305
1357
 
1358
+ **Example 2: Advanced Multi-Image Fusion with Qwen-Image-Edit-2509**
1359
+ This example demonstrates a cutting-edge capability: using a specialized model to fuse elements from multiple input images based on a text prompt. Here, we'll ask the model to take a person from one image and place them in the background of another.
1360
+
1361
+ ```python
1362
+ from lollms_client import LollmsClient
1363
+ from ascii_colors import ASCIIColors
1364
+ from pathlib import Path
1365
+
1366
+ # --- IMPORTANT ---
1367
+ # Replace these with actual paths to your local images
1368
+ path_to_person_image = "./path/to/your/person.jpg"
1369
+ path_to_background_image = "./path/to/your/background.jpg"
1370
+
1371
+ if not Path(path_to_person_image).exists() or not Path(path_to_background_image).exists():
1372
+ ASCIIColors.warning("Input images not found. Skipping multi-image fusion example.")
1373
+ ASCIIColors.warning(f"Please update 'path_to_person_image' and 'path_to_background_image'.")
1374
+ else:
1375
+ try:
1376
+ # Initialize with the advanced Qwen multi-image editing model
1377
+ lc = LollmsClient(
1378
+ tti_binding_name="diffusers",
1379
+ tti_binding_config={
1380
+ "model_name": "Qwen/Qwen-Image-Edit-2509",
1381
+ "torch_dtype_str": "bfloat16" # Recommended for this model
1382
+ }
1383
+ )
1384
+
1385
+ # The prompt guides how the images are combined
1386
+ prompt = "Place the person from the first image into the scenic background of the second image."
1387
+ ASCIIColors.yellow(f"Fusing images with prompt: '{prompt}'")
1388
+
1389
+ # The edit_image method can accept a list of image paths for fusion
1390
+ fused_image_bytes = lc.edit_image(
1391
+ images=[path_to_person_image, path_to_background_image],
1392
+ prompt=prompt,
1393
+ num_inference_steps=50
1394
+ )
1395
+
1396
+ if fused_image_bytes:
1397
+ output_path = Path("./fused_image_result.png")
1398
+ with open(output_path, "wb") as f:
1399
+ f.write(fused_image_bytes)
1400
+ ASCIIColors.green(f"Fused image saved successfully to: {output_path.resolve()}")
1401
+ else:
1402
+ ASCIIColors.error("Multi-image editing failed.")
1403
+
1404
+ except Exception as e:
1405
+ ASCIIColors.error(f"An error occurred during multi-image fusion: {e}")
1406
+ ```This powerful feature allows for complex creative tasks like character swapping, background replacement, and style transfer directly through the `lollms_client` library.
1407
+
1306
1408
  ### Listing Available Models
1307
1409
 
1308
1410
  You can query the active LLM binding to get a list of models it supports or has available. The exact information returned depends on the binding (e.g., Ollama lists local models, OpenAI lists all its API models).
@@ -16,7 +16,8 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
16
16
  ## Key Features
17
17
 
18
18
  * 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM, Gemini, Claude, Groq, OpenRouter, Hugging Face Inference API) using a unified `llm_binding_config` dictionary for all parameters.
19
- * 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS), images (TTI), video (TTV), and music (TTM).
19
+ * 🗣️ **Comprehensive Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS), video (TTV), and music (TTM).
20
+ * 🎨 **Advanced Image Generation and Editing:** A new `diffusers` binding provides powerful text-to-image capabilities. It supports a wide range of models from Hugging Face and Civitai, including specialized models like `Qwen-Image-Edit` for single-image editing and the cutting-edge `Qwen-Image-Edit-2509` for **multi-image fusion, pose transfer, and character swapping**.
20
21
  * 🖼️ **Selective Image Activation:** Control which images in a message are active and sent to the model, allowing for fine-grained multimodal context management without deleting the original data.
21
22
  * 🤖 **Agentic Workflows with MCP:** Empower LLMs to act as sophisticated agents, breaking down complex tasks, selecting and executing external tools (e.g., internet search, code interpreter, file I/O, image generation) through the Model Context Protocol (MCP) using a robust "observe-think-act" loop.
22
23
  * 🎭 **Personalities as Agents:** Personalities can now define their own set of required tools (MCPs) and have access to static or dynamic knowledge bases (`data_source`), turning them into self-contained, ready-to-use agents.
@@ -1067,9 +1068,110 @@ try:
1067
1068
 
1068
1069
  except Exception as e:
1069
1070
  ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
1070
- ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")
1071
+ ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")```
1072
+
1073
+ ---
1074
+
1075
+ ### 4. Local Multimodal and Advanced Bindings
1076
+
1077
+ #### **Diffusers (Local Text-to-Image Generation and Editing)**
1078
+
1079
+ The `diffusers` binding leverages the Hugging Face `diffusers` library to run a vast array of text-to-image models locally on your own hardware (CPU or GPU). It supports models from Hugging Face and Civitai, providing everything from basic image generation to advanced, state-of-the-art image editing.
1080
+
1081
+ **Prerequisites:**
1082
+ * `torch` and `torchvision` must be installed. For GPU acceleration, it's critical to install the version that matches your CUDA toolkit.
1083
+ * The binding will attempt to auto-install other requirements like `diffusers`, `transformers`, and `safetensors`.
1084
+
1085
+ **Usage:**
1086
+
1087
+ **Example 1: Basic Text-to-Image Generation**
1088
+ This example shows how to generate an image from a simple text prompt using a classic Stable Diffusion model.
1089
+
1090
+ ```python
1091
+ from lollms_client import LollmsClient
1092
+ from ascii_colors import ASCIIColors
1093
+ from pathlib import Path
1094
+
1095
+ try:
1096
+ # Initialize the client with the diffusers TTI binding
1097
+ # Let's use a classic Stable Diffusion model for this example
1098
+ lc = LollmsClient(
1099
+ tti_binding_name="diffusers",
1100
+ tti_binding_config={
1101
+ "model_name": "runwayml/stable-diffusion-v1-5",
1102
+ # Other options: "device", "torch_dtype_str", "enable_xformers"
1103
+ }
1104
+ )
1105
+
1106
+ prompt = "A high-quality photograph of an astronaut riding a horse on Mars."
1107
+ ASCIIColors.yellow(f"Generating image for prompt: '{prompt}'")
1108
+
1109
+ # Generate the image. The result is returned as bytes.
1110
+ image_bytes = lc.generate_image(prompt, width=512, height=512)
1111
+
1112
+ if image_bytes:
1113
+ output_path = Path("./astronaut_on_mars.png")
1114
+ with open(output_path, "wb") as f:
1115
+ f.write(image_bytes)
1116
+ ASCIIColors.green(f"Image saved successfully to: {output_path.resolve()}")
1117
+ else:
1118
+ ASCIIColors.error("Image generation failed.")
1119
+
1120
+ except Exception as e:
1121
+ ASCIIColors.error(f"An error occurred with the Diffusers binding: {e}")
1122
+ ASCIIColors.info("Please ensure torch is installed correctly for your hardware (CPU/GPU).")
1071
1123
  ```
1072
1124
 
1125
+ **Example 2: Advanced Multi-Image Fusion with Qwen-Image-Edit-2509**
1126
+ This example demonstrates a cutting-edge capability: using a specialized model to fuse elements from multiple input images based on a text prompt. Here, we'll ask the model to take a person from one image and place them in the background of another.
1127
+
1128
+ ```python
1129
+ from lollms_client import LollmsClient
1130
+ from ascii_colors import ASCIIColors
1131
+ from pathlib import Path
1132
+
1133
+ # --- IMPORTANT ---
1134
+ # Replace these with actual paths to your local images
1135
+ path_to_person_image = "./path/to/your/person.jpg"
1136
+ path_to_background_image = "./path/to/your/background.jpg"
1137
+
1138
+ if not Path(path_to_person_image).exists() or not Path(path_to_background_image).exists():
1139
+ ASCIIColors.warning("Input images not found. Skipping multi-image fusion example.")
1140
+ ASCIIColors.warning(f"Please update 'path_to_person_image' and 'path_to_background_image'.")
1141
+ else:
1142
+ try:
1143
+ # Initialize with the advanced Qwen multi-image editing model
1144
+ lc = LollmsClient(
1145
+ tti_binding_name="diffusers",
1146
+ tti_binding_config={
1147
+ "model_name": "Qwen/Qwen-Image-Edit-2509",
1148
+ "torch_dtype_str": "bfloat16" # Recommended for this model
1149
+ }
1150
+ )
1151
+
1152
+ # The prompt guides how the images are combined
1153
+ prompt = "Place the person from the first image into the scenic background of the second image."
1154
+ ASCIIColors.yellow(f"Fusing images with prompt: '{prompt}'")
1155
+
1156
+ # The edit_image method can accept a list of image paths for fusion
1157
+ fused_image_bytes = lc.edit_image(
1158
+ images=[path_to_person_image, path_to_background_image],
1159
+ prompt=prompt,
1160
+ num_inference_steps=50
1161
+ )
1162
+
1163
+ if fused_image_bytes:
1164
+ output_path = Path("./fused_image_result.png")
1165
+ with open(output_path, "wb") as f:
1166
+ f.write(fused_image_bytes)
1167
+ ASCIIColors.green(f"Fused image saved successfully to: {output_path.resolve()}")
1168
+ else:
1169
+ ASCIIColors.error("Multi-image editing failed.")
1170
+
1171
+ except Exception as e:
1172
+ ASCIIColors.error(f"An error occurred during multi-image fusion: {e}")
1173
+ ```This powerful feature allows for complex creative tasks like character swapping, background replacement, and style transfer directly through the `lollms_client` library.
1174
+
1073
1175
  ### Listing Available Models
1074
1176
 
1075
1177
  You can query the active LLM binding to get a list of models it supports or has available. The exact information returned depends on the binding (e.g., Ollama lists local models, OpenAI lists all its API models).
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.5.8" # Updated version
11
+ __version__ = "1.6.0" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [