lollms-client 1.5.7__tar.gz → 1.10.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lollms_client-1.5.7/src/lollms_client.egg-info → lollms_client-1.10.0}/PKG-INFO +364 -5
- {lollms_client-1.5.7 → lollms_client-1.10.0}/README.md +362 -4
- {lollms_client-1.5.7 → lollms_client-1.10.0}/pyproject.toml +5 -4
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/__init__.py +5 -2
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/assets/models_ctx_sizes.json +2 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/azure_openai/__init__.py +3 -3
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/claude/__init__.py +128 -36
- lollms_client-1.10.0/src/lollms_client/llm_bindings/gemini/__init__.py +542 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/grok/__init__.py +53 -16
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/groq/__init__.py +3 -3
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +3 -3
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/litellm/__init__.py +2 -2
- lollms_client-1.10.0/src/lollms_client/llm_bindings/llama_cpp_server/__init__.py +736 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/lollms/__init__.py +76 -21
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +2 -2
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/mistral/__init__.py +3 -3
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/novita_ai/__init__.py +143 -7
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/ollama/__init__.py +352 -90
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/open_router/__init__.py +39 -10
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/openai/__init__.py +82 -21
- lollms_client-1.10.0/src/lollms_client/llm_bindings/openllm/__init__.py +406 -0
- lollms_client-1.10.0/src/lollms_client/llm_bindings/openwebui/__init__.py +465 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/perplexity/__init__.py +3 -3
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +1 -1
- lollms_client-1.10.0/src/lollms_client/llm_bindings/transformers/__init__.py +503 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/vllm/__init__.py +1 -1
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_agentic.py +4 -2
- lollms_client-1.10.0/src/lollms_client/lollms_base_binding.py +61 -0
- lollms_client-1.10.0/src/lollms_client/lollms_bindings_utils.py +101 -0
- lollms_client-1.10.0/src/lollms_client/lollms_core.py +1110 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_discussion.py +251 -83
- lollms_client-1.10.0/src/lollms_client/lollms_llm_binding.py +536 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_mcp_binding.py +48 -77
- lollms_client-1.10.0/src/lollms_client/lollms_stt_binding.py +180 -0
- lollms_client-1.10.0/src/lollms_client/lollms_text_processing.py +1725 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_tti_binding.py +143 -37
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_ttm_binding.py +37 -41
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_tts_binding.py +43 -18
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_ttv_binding.py +37 -41
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_types.py +4 -2
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +143 -138
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/stt_bindings/lollms/__init__.py +20 -13
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/stt_bindings/whisper/__init__.py +155 -59
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/stt_bindings/whispercpp/__init__.py +103 -90
- lollms_client-1.10.0/src/lollms_client/tti_bindings/diffusers/__init__.py +504 -0
- lollms_client-1.10.0/src/lollms_client/tti_bindings/diffusers/config.py +43 -0
- lollms_client-1.10.0/src/lollms_client/tti_bindings/diffusers/server/main.py +1070 -0
- lollms_client-1.10.0/src/lollms_client/tti_bindings/gemini/__init__.py +289 -0
- lollms_client-1.10.0/src/lollms_client/tti_bindings/gguf_diffusion/__init__.py +207 -0
- lollms_client-1.10.0/src/lollms_client/tti_bindings/gguf_diffusion/server/dequant.py +251 -0
- lollms_client-1.10.0/src/lollms_client/tti_bindings/gguf_diffusion/server/main.py +289 -0
- lollms_client-1.10.0/src/lollms_client/tti_bindings/gguf_diffusion/server/ops.py +118 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py +6 -3
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tti_bindings/lollms/__init__.py +4 -1
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tti_bindings/novita_ai/__init__.py +5 -2
- lollms_client-1.10.0/src/lollms_client/tti_bindings/open_router/__init__.py +388 -0
- lollms_client-1.10.0/src/lollms_client/tti_bindings/openai/__init__.py +94 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tti_bindings/stability_ai/__init__.py +5 -3
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +7 -12
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py +7 -3
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/ttm_bindings/lollms/__init__.py +4 -17
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/ttm_bindings/replicate/__init__.py +7 -4
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/ttm_bindings/stability_ai/__init__.py +7 -4
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/ttm_bindings/topmediai/__init__.py +6 -3
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/bark/__init__.py +7 -10
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/lollms/__init__.py +12 -12
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/piper_tts/__init__.py +8 -11
- lollms_client-1.10.0/src/lollms_client/tts_bindings/vibevoice/__init__.py +197 -0
- lollms_client-1.10.0/src/lollms_client/tts_bindings/vibevoice/server/main.py +116 -0
- lollms_client-1.10.0/src/lollms_client/tts_bindings/xtts/__init__.py +210 -0
- lollms_client-1.10.0/src/lollms_client/tts_bindings/xtts/server/main.py +284 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0/src/lollms_client.egg-info}/PKG-INFO +364 -5
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client.egg-info/SOURCES.txt +13 -2
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client.egg-info/requires.txt +2 -1
- lollms_client-1.5.7/src/lollms_client/llm_bindings/gemini/__init__.py +0 -497
- lollms_client-1.5.7/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -1148
- lollms_client-1.5.7/src/lollms_client/llm_bindings/openllm/__init__.py +0 -550
- lollms_client-1.5.7/src/lollms_client/llm_bindings/openwebui/__init__.py +0 -303
- lollms_client-1.5.7/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -608
- lollms_client-1.5.7/src/lollms_client/llm_bindings/transformers/__init__.py +0 -707
- lollms_client-1.5.7/src/lollms_client/lollms_core.py +0 -6165
- lollms_client-1.5.7/src/lollms_client/lollms_llm_binding.py +0 -577
- lollms_client-1.5.7/src/lollms_client/lollms_stt_binding.py +0 -125
- lollms_client-1.5.7/src/lollms_client/tti_bindings/diffusers/__init__.py +0 -843
- lollms_client-1.5.7/src/lollms_client/tti_bindings/gemini/__init__.py +0 -320
- lollms_client-1.5.7/src/lollms_client/tti_bindings/openai/__init__.py +0 -124
- lollms_client-1.5.7/src/lollms_client/tts_bindings/xtts/__init__.py +0 -111
- lollms_client-1.5.7/src/lollms_client/tts_bindings/xtts/server/main.py +0 -314
- {lollms_client-1.5.7 → lollms_client-1.10.0}/LICENSE +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/setup.cfg +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_config.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_mcp_security.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_personality.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/bark/server/install_bark.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/bark/server/main.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/piper_tts/server/install_piper.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/piper_tts/server/main.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/piper_tts/server/setup_voices.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/tts_bindings/xtts/server/setup_voices.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/src/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-1.5.7 → lollms_client-1.10.0}/test/test_lollms_discussion.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.10.0
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -222,13 +222,14 @@ License-File: LICENSE
|
|
|
222
222
|
Requires-Dist: httpx
|
|
223
223
|
Requires-Dist: requests
|
|
224
224
|
Requires-Dist: ascii-colors
|
|
225
|
-
Requires-Dist: pipmaster
|
|
225
|
+
Requires-Dist: pipmaster>=1.0.5
|
|
226
226
|
Requires-Dist: pyyaml
|
|
227
227
|
Requires-Dist: tiktoken
|
|
228
228
|
Requires-Dist: pydantic
|
|
229
229
|
Requires-Dist: numpy
|
|
230
230
|
Requires-Dist: pillow
|
|
231
231
|
Requires-Dist: sqlalchemy
|
|
232
|
+
Requires-Dist: jsonschema
|
|
232
233
|
Dynamic: license-file
|
|
233
234
|
|
|
234
235
|
# LoLLMs Client Library
|
|
@@ -249,7 +250,8 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
|
|
|
249
250
|
## Key Features
|
|
250
251
|
|
|
251
252
|
* 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM, Gemini, Claude, Groq, OpenRouter, Hugging Face Inference API) using a unified `llm_binding_config` dictionary for all parameters.
|
|
252
|
-
* 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS),
|
|
253
|
+
* 🗣️ **Comprehensive Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS), video (TTV), and music (TTM).
|
|
254
|
+
* 🎨 **Advanced Image Generation and Editing:** A new `diffusers` binding provides powerful text-to-image capabilities. It supports a wide range of models from Hugging Face and Civitai, including specialized models like `Qwen-Image-Edit` for single-image editing and the cutting-edge `Qwen-Image-Edit-2509` for **multi-image fusion, pose transfer, and character swapping**.
|
|
253
255
|
* 🖼️ **Selective Image Activation:** Control which images in a message are active and sent to the model, allowing for fine-grained multimodal context management without deleting the original data.
|
|
254
256
|
* 🤖 **Agentic Workflows with MCP:** Empower LLMs to act as sophisticated agents, breaking down complex tasks, selecting and executing external tools (e.g., internet search, code interpreter, file I/O, image generation) through the Model Context Protocol (MCP) using a robust "observe-think-act" loop.
|
|
255
257
|
* 🎭 **Personalities as Agents:** Personalities can now define their own set of required tools (MCPs) and have access to static or dynamic knowledge bases (`data_source`), turning them into self-contained, ready-to-use agents.
|
|
@@ -303,6 +305,7 @@ try:
|
|
|
303
305
|
llm_binding_config={
|
|
304
306
|
"host_address": "http://localhost:9642", # Default port for LoLLMs server
|
|
305
307
|
# "service_key": "your_lollms_api_key_here" # Get key from LoLLMs UI -> User Settings if security is enabled
|
|
308
|
+
# "verify_ssl_certificate": True #if false the ssl certifcate verification will be ignored (only used when using https in lollms service address)
|
|
306
309
|
}
|
|
307
310
|
)
|
|
308
311
|
|
|
@@ -962,6 +965,7 @@ try:
|
|
|
962
965
|
config = {
|
|
963
966
|
"host_address": "http://localhost:9642",
|
|
964
967
|
# "service_key": "your_lollms_api_key_here" # Uncomment and replace if security is enabled
|
|
968
|
+
# "verify_ssl_certificate": True #if false the ssl certifcate verification will be ignored (only used when using https in lollms service address)
|
|
965
969
|
}
|
|
966
970
|
|
|
967
971
|
lc = LollmsClient(
|
|
@@ -1300,9 +1304,113 @@ try:
|
|
|
1300
1304
|
|
|
1301
1305
|
except Exception as e:
|
|
1302
1306
|
ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
|
|
1303
|
-
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")
|
|
1307
|
+
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")```
|
|
1304
1308
|
```
|
|
1305
1309
|
|
|
1310
|
+
---
|
|
1311
|
+
|
|
1312
|
+
### 4. Local Multimodal and Advanced Bindings
|
|
1313
|
+
|
|
1314
|
+
#### **Diffusers (Local Text-to-Image Generation and Editing)**
|
|
1315
|
+
|
|
1316
|
+
The `diffusers` binding leverages the Hugging Face `diffusers` library to run a vast array of text-to-image models locally on your own hardware (CPU or GPU). It supports models from Hugging Face and Civitai, providing everything from basic image generation to advanced, state-of-the-art image editing.
|
|
1317
|
+
|
|
1318
|
+
**Prerequisites:**
|
|
1319
|
+
* `torch` and `torchvision` must be installed. For GPU acceleration, it's critical to install the version that matches your CUDA toolkit.
|
|
1320
|
+
* The binding will attempt to auto-install other requirements like `diffusers`, `transformers`, and `safetensors`.
|
|
1321
|
+
|
|
1322
|
+
**Usage:**
|
|
1323
|
+
|
|
1324
|
+
**Example 1: Basic Text-to-Image Generation**
|
|
1325
|
+
This example shows how to generate an image from a simple text prompt using a classic Stable Diffusion model.
|
|
1326
|
+
|
|
1327
|
+
```python
|
|
1328
|
+
from lollms_client import LollmsClient
|
|
1329
|
+
from ascii_colors import ASCIIColors
|
|
1330
|
+
from pathlib import Path
|
|
1331
|
+
|
|
1332
|
+
try:
|
|
1333
|
+
# Initialize the client with the diffusers TTI binding
|
|
1334
|
+
# Let's use a classic Stable Diffusion model for this example
|
|
1335
|
+
lc = LollmsClient(
|
|
1336
|
+
tti_binding_name="diffusers",
|
|
1337
|
+
tti_binding_config={
|
|
1338
|
+
"model_name": "runwayml/stable-diffusion-v1-5",
|
|
1339
|
+
# Other options: "device", "torch_dtype_str", "enable_xformers"
|
|
1340
|
+
}
|
|
1341
|
+
)
|
|
1342
|
+
|
|
1343
|
+
prompt = "A high-quality photograph of an astronaut riding a horse on Mars."
|
|
1344
|
+
ASCIIColors.yellow(f"Generating image for prompt: '{prompt}'")
|
|
1345
|
+
|
|
1346
|
+
# Generate the image. The result is returned as bytes.
|
|
1347
|
+
image_bytes = lc.generate_image(prompt, width=512, height=512)
|
|
1348
|
+
|
|
1349
|
+
if image_bytes:
|
|
1350
|
+
output_path = Path("./astronaut_on_mars.png")
|
|
1351
|
+
with open(output_path, "wb") as f:
|
|
1352
|
+
f.write(image_bytes)
|
|
1353
|
+
ASCIIColors.green(f"Image saved successfully to: {output_path.resolve()}")
|
|
1354
|
+
else:
|
|
1355
|
+
ASCIIColors.error("Image generation failed.")
|
|
1356
|
+
|
|
1357
|
+
except Exception as e:
|
|
1358
|
+
ASCIIColors.error(f"An error occurred with the Diffusers binding: {e}")
|
|
1359
|
+
ASCIIColors.info("Please ensure torch is installed correctly for your hardware (CPU/GPU).")
|
|
1360
|
+
```
|
|
1361
|
+
|
|
1362
|
+
**Example 2: Advanced Multi-Image Fusion with Qwen-Image-Edit-2509**
|
|
1363
|
+
This example demonstrates a cutting-edge capability: using a specialized model to fuse elements from multiple input images based on a text prompt. Here, we'll ask the model to take a person from one image and place them in the background of another.
|
|
1364
|
+
|
|
1365
|
+
```python
|
|
1366
|
+
from lollms_client import LollmsClient
|
|
1367
|
+
from ascii_colors import ASCIIColors
|
|
1368
|
+
from pathlib import Path
|
|
1369
|
+
|
|
1370
|
+
# --- IMPORTANT ---
|
|
1371
|
+
# Replace these with actual paths to your local images
|
|
1372
|
+
path_to_person_image = "./path/to/your/person.jpg"
|
|
1373
|
+
path_to_background_image = "./path/to/your/background.jpg"
|
|
1374
|
+
|
|
1375
|
+
if not Path(path_to_person_image).exists() or not Path(path_to_background_image).exists():
|
|
1376
|
+
ASCIIColors.warning("Input images not found. Skipping multi-image fusion example.")
|
|
1377
|
+
ASCIIColors.warning(f"Please update 'path_to_person_image' and 'path_to_background_image'.")
|
|
1378
|
+
else:
|
|
1379
|
+
try:
|
|
1380
|
+
# Initialize with the advanced Qwen multi-image editing model
|
|
1381
|
+
lc = LollmsClient(
|
|
1382
|
+
tti_binding_name="diffusers",
|
|
1383
|
+
tti_binding_config={
|
|
1384
|
+
"model_name": "Qwen/Qwen-Image-Edit-2509",
|
|
1385
|
+
"torch_dtype_str": "bfloat16" # Recommended for this model
|
|
1386
|
+
}
|
|
1387
|
+
)
|
|
1388
|
+
|
|
1389
|
+
# The prompt guides how the images are combined
|
|
1390
|
+
prompt = "Place the person from the first image into the scenic background of the second image."
|
|
1391
|
+
ASCIIColors.yellow(f"Fusing images with prompt: '{prompt}'")
|
|
1392
|
+
|
|
1393
|
+
# The edit_image method can accept a list of image paths for fusion
|
|
1394
|
+
fused_image_bytes = lc.edit_image(
|
|
1395
|
+
images=[path_to_person_image, path_to_background_image],
|
|
1396
|
+
prompt=prompt,
|
|
1397
|
+
num_inference_steps=50
|
|
1398
|
+
)
|
|
1399
|
+
|
|
1400
|
+
if fused_image_bytes:
|
|
1401
|
+
output_path = Path("./fused_image_result.png")
|
|
1402
|
+
with open(output_path, "wb") as f:
|
|
1403
|
+
f.write(fused_image_bytes)
|
|
1404
|
+
ASCIIColors.green(f"Fused image saved successfully to: {output_path.resolve()}")
|
|
1405
|
+
else:
|
|
1406
|
+
ASCIIColors.error("Multi-image editing failed.")
|
|
1407
|
+
|
|
1408
|
+
except Exception as e:
|
|
1409
|
+
ASCIIColors.error(f"An error occurred during multi-image fusion: {e}")
|
|
1410
|
+
```
|
|
1411
|
+
|
|
1412
|
+
This powerful feature allows for complex creative tasks like character swapping, background replacement, and style transfer directly through the `lollms_client` library.
|
|
1413
|
+
|
|
1306
1414
|
### Listing Available Models
|
|
1307
1415
|
|
|
1308
1416
|
You can query the active LLM binding to get a list of models it supports or has available. The exact information returned depends on the binding (e.g., Ollama lists local models, OpenAI lists all its API models).
|
|
@@ -1323,7 +1431,7 @@ try:
|
|
|
1323
1431
|
)
|
|
1324
1432
|
|
|
1325
1433
|
ASCIIColors.yellow("\nListing available models for the current binding:")
|
|
1326
|
-
available_models = lc.
|
|
1434
|
+
available_models = lc.list_models()
|
|
1327
1435
|
|
|
1328
1436
|
if isinstance(available_models, list):
|
|
1329
1437
|
for model in available_models:
|
|
@@ -1402,6 +1510,257 @@ try:
|
|
|
1402
1510
|
except Exception as e:
|
|
1403
1511
|
ASCIIColors.error(f"An error occurred during long context processing: {e}")
|
|
1404
1512
|
```
|
|
1513
|
+
## low level text processing
|
|
1514
|
+
Here is the **English, README-ready version**, clean and aligned with LOLLMS documentation standards.
|
|
1515
|
+
|
|
1516
|
+
---
|
|
1517
|
+
|
|
1518
|
+
## 🧠 Lollms Text Processor
|
|
1519
|
+
|
|
1520
|
+
The **Lollms Text Processor** is a high-level utility designed to turn raw LLM generations into **production-ready workflows**.
|
|
1521
|
+
It handles long documents, structured outputs, robust code generation, intelligent editing, and reliable parsing.
|
|
1522
|
+
|
|
1523
|
+
It is directly accessible via:
|
|
1524
|
+
|
|
1525
|
+
```python
|
|
1526
|
+
lc.llm.tp
|
|
1527
|
+
```
|
|
1528
|
+
|
|
1529
|
+
---
|
|
1530
|
+
|
|
1531
|
+
## 🔧 Initialization
|
|
1532
|
+
|
|
1533
|
+
```python
|
|
1534
|
+
from lollms_client import LollmsClient
|
|
1535
|
+
|
|
1536
|
+
lc = LollmsClient(
|
|
1537
|
+
llm_binding_name="lollms",
|
|
1538
|
+
llm_binding_config={
|
|
1539
|
+
"model_name": "llama3",
|
|
1540
|
+
"host_address": "http://localhost:9642",
|
|
1541
|
+
"service_key": "the service key"
|
|
1542
|
+
}
|
|
1543
|
+
)
|
|
1544
|
+
|
|
1545
|
+
llm = lc.llm
|
|
1546
|
+
tp = lc.llm.tp
|
|
1547
|
+
```
|
|
1548
|
+
|
|
1549
|
+
* `llm` provides low-level text generation primitives
|
|
1550
|
+
* `tp` is the **Text Processor**, ready to use out of the box
|
|
1551
|
+
|
|
1552
|
+
---
|
|
1553
|
+
|
|
1554
|
+
## 📚 1. Long Context Processing
|
|
1555
|
+
|
|
1556
|
+
The Text Processor automatically handles documents that exceed the model’s context window by chunking, synthesizing intermediate results, and producing a final consolidated output.
|
|
1557
|
+
|
|
1558
|
+
### Text generation from a very long document
|
|
1559
|
+
|
|
1560
|
+
```python
|
|
1561
|
+
summary = tp.long_context_processing(
|
|
1562
|
+
text_to_process=long_document,
|
|
1563
|
+
contextual_prompt="Summarize the main findings about climate change",
|
|
1564
|
+
processing_type="text"
|
|
1565
|
+
)
|
|
1566
|
+
```
|
|
1567
|
+
|
|
1568
|
+
### Structured extraction from long context
|
|
1569
|
+
|
|
1570
|
+
```python
|
|
1571
|
+
result = tp.long_context_processing(
|
|
1572
|
+
text_to_process=long_document,
|
|
1573
|
+
contextual_prompt="Extract all people mentioned with their roles",
|
|
1574
|
+
processing_type="structured",
|
|
1575
|
+
schema={
|
|
1576
|
+
"type": "object",
|
|
1577
|
+
"properties": {
|
|
1578
|
+
"people": {
|
|
1579
|
+
"type": "array",
|
|
1580
|
+
"items": {
|
|
1581
|
+
"type": "object",
|
|
1582
|
+
"properties": {
|
|
1583
|
+
"name": {"type": "string"},
|
|
1584
|
+
"role": {"type": "string"}
|
|
1585
|
+
}
|
|
1586
|
+
}
|
|
1587
|
+
}
|
|
1588
|
+
}
|
|
1589
|
+
}
|
|
1590
|
+
)
|
|
1591
|
+
```
|
|
1592
|
+
|
|
1593
|
+
### Yes / No question over long documents
|
|
1594
|
+
|
|
1595
|
+
```python
|
|
1596
|
+
answer = tp.long_context_processing(
|
|
1597
|
+
text_to_process=long_document,
|
|
1598
|
+
contextual_prompt="Does this document mention Marie Curie?",
|
|
1599
|
+
processing_type="yes_no",
|
|
1600
|
+
return_explanation=True
|
|
1601
|
+
)
|
|
1602
|
+
```
|
|
1603
|
+
|
|
1604
|
+
---
|
|
1605
|
+
|
|
1606
|
+
## 💻 2. Code Generation and Editing
|
|
1607
|
+
|
|
1608
|
+
### Single-file code generation
|
|
1609
|
+
|
|
1610
|
+
```python
|
|
1611
|
+
code = tp.generate_code(
|
|
1612
|
+
prompt="Create a binary search function",
|
|
1613
|
+
language="python"
|
|
1614
|
+
)
|
|
1615
|
+
```
|
|
1616
|
+
|
|
1617
|
+
### Multi-file project generation
|
|
1618
|
+
|
|
1619
|
+
```python
|
|
1620
|
+
files = tp.generate_codes(
|
|
1621
|
+
prompt="Create a Flask web app with an HTML frontend"
|
|
1622
|
+
)
|
|
1623
|
+
```
|
|
1624
|
+
|
|
1625
|
+
### Efficient code editing (non-destructive)
|
|
1626
|
+
|
|
1627
|
+
```python
|
|
1628
|
+
updated_code = tp.edit_code(
|
|
1629
|
+
original_code=existing_code,
|
|
1630
|
+
edit_instruction="Add error handling and logging",
|
|
1631
|
+
language="python"
|
|
1632
|
+
)
|
|
1633
|
+
```
|
|
1634
|
+
|
|
1635
|
+
Unlike naïve prompting, edits are **structural**, not full rewrites.
|
|
1636
|
+
|
|
1637
|
+
---
|
|
1638
|
+
|
|
1639
|
+
## 🧩 3. Structured Content Generation
|
|
1640
|
+
|
|
1641
|
+
### Using JSON Schema
|
|
1642
|
+
|
|
1643
|
+
```python
|
|
1644
|
+
data = tp.generate_structured_content(
|
|
1645
|
+
prompt="Create a presentation about AI",
|
|
1646
|
+
schema={
|
|
1647
|
+
"type": "object",
|
|
1648
|
+
"properties": {
|
|
1649
|
+
"slides": {
|
|
1650
|
+
"type": "array",
|
|
1651
|
+
"items": {"type": "object"}
|
|
1652
|
+
}
|
|
1653
|
+
}
|
|
1654
|
+
}
|
|
1655
|
+
)
|
|
1656
|
+
```
|
|
1657
|
+
|
|
1658
|
+
### Using Pydantic models
|
|
1659
|
+
|
|
1660
|
+
```python
|
|
1661
|
+
from pydantic import BaseModel
|
|
1662
|
+
|
|
1663
|
+
class Person(BaseModel):
|
|
1664
|
+
name: str
|
|
1665
|
+
age: int
|
|
1666
|
+
|
|
1667
|
+
person = tp.generate_structured_content_pydantic(
|
|
1668
|
+
prompt="Create a person named Alice, age 30",
|
|
1669
|
+
pydantic_model=Person
|
|
1670
|
+
)
|
|
1671
|
+
```
|
|
1672
|
+
|
|
1673
|
+
✔ Automatic validation
|
|
1674
|
+
✔ Truncation recovery
|
|
1675
|
+
✔ Agent-friendly outputs
|
|
1676
|
+
|
|
1677
|
+
---
|
|
1678
|
+
|
|
1679
|
+
## 🧠 4. LLM Helper Utilities
|
|
1680
|
+
|
|
1681
|
+
### Yes / No questions
|
|
1682
|
+
|
|
1683
|
+
```python
|
|
1684
|
+
answer = tp.yes_no(
|
|
1685
|
+
question="Is Marie Curie a scientist?",
|
|
1686
|
+
context="Marie Curie was a physicist...",
|
|
1687
|
+
return_explanation=True
|
|
1688
|
+
)
|
|
1689
|
+
```
|
|
1690
|
+
|
|
1691
|
+
### Multiple-choice questions
|
|
1692
|
+
|
|
1693
|
+
```python
|
|
1694
|
+
choice = tp.multichoice_question(
|
|
1695
|
+
question="What field did Marie Curie work in?",
|
|
1696
|
+
possible_answers=["Biology", "Physics", "Chemistry"]
|
|
1697
|
+
)
|
|
1698
|
+
```
|
|
1699
|
+
|
|
1700
|
+
### Text summarization
|
|
1701
|
+
|
|
1702
|
+
```python
|
|
1703
|
+
summary = tp.summerize_text(text="Long article...")
|
|
1704
|
+
```
|
|
1705
|
+
|
|
1706
|
+
### Keyword extraction
|
|
1707
|
+
|
|
1708
|
+
```python
|
|
1709
|
+
keywords = tp.extract_keywords(
|
|
1710
|
+
text="Long article...",
|
|
1711
|
+
num_keywords=5
|
|
1712
|
+
)
|
|
1713
|
+
```
|
|
1714
|
+
|
|
1715
|
+
---
|
|
1716
|
+
|
|
1717
|
+
## 🧪 5. Response Parsing and Cleanup
|
|
1718
|
+
|
|
1719
|
+
### Extract reasoning / thinking blocks
|
|
1720
|
+
|
|
1721
|
+
```python
|
|
1722
|
+
thoughts = tp.extract_thinking_blocks(llm_response)
|
|
1723
|
+
```
|
|
1724
|
+
|
|
1725
|
+
### Remove reasoning blocks
|
|
1726
|
+
|
|
1727
|
+
```python
|
|
1728
|
+
clean_text = tp.remove_thinking_blocks(llm_response)
|
|
1729
|
+
```
|
|
1730
|
+
|
|
1731
|
+
### Extract code blocks (legacy support)
|
|
1732
|
+
|
|
1733
|
+
```python
|
|
1734
|
+
blocks = tp.extract_code_blocks(
|
|
1735
|
+
text=llm_response,
|
|
1736
|
+
format="markdown"
|
|
1737
|
+
)
|
|
1738
|
+
```
|
|
1739
|
+
|
|
1740
|
+
---
|
|
1741
|
+
|
|
1742
|
+
## ✨ Key Features
|
|
1743
|
+
|
|
1744
|
+
* ✅ Automatic **long-context handling**
|
|
1745
|
+
* ✅ XML-based code generation (no fragile backticks)
|
|
1746
|
+
* ✅ Truncation recovery for JSON and code
|
|
1747
|
+
* ✅ Non-destructive, structured code editing
|
|
1748
|
+
* ✅ JSON Schema & Pydantic support
|
|
1749
|
+
* ✅ Decision helpers (yes/no, multichoice, ranking)
|
|
1750
|
+
* ✅ Graceful fallback strategies
|
|
1751
|
+
|
|
1752
|
+
---
|
|
1753
|
+
|
|
1754
|
+
## 🏁 Summary
|
|
1755
|
+
|
|
1756
|
+
The **Lollms Text Processor** turns a raw LLM into a **reliable production tool**.
|
|
1757
|
+
|
|
1758
|
+
> `lc.llm` generates
|
|
1759
|
+
> `lc.llm.tp` structures, validates, and secures
|
|
1760
|
+
|
|
1761
|
+
A core component of **LOLLMS — one tool to rule them all** 🚀
|
|
1762
|
+
|
|
1763
|
+
|
|
1405
1764
|
|
|
1406
1765
|
## Contributing
|
|
1407
1766
|
|