lollms-client 1.6.7__tar.gz → 1.10.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lollms_client-1.6.7/src/lollms_client.egg-info → lollms_client-1.10.1}/PKG-INFO +255 -1
- {lollms_client-1.6.7 → lollms_client-1.10.1}/README.md +253 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/pyproject.toml +4 -3
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/__init__.py +5 -2
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/assets/models_ctx_sizes.json +2 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/azure_openai/__init__.py +1 -1
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/claude/__init__.py +126 -34
- lollms_client-1.10.1/src/lollms_client/llm_bindings/gemini/__init__.py +542 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/grok/__init__.py +51 -14
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/groq/__init__.py +1 -1
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +1 -1
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/litellm/__init__.py +1 -1
- lollms_client-1.10.1/src/lollms_client/llm_bindings/llama_cpp_server/__init__.py +736 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/lollms/__init__.py +75 -20
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +1 -1
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/mistral/__init__.py +1 -1
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/novita_ai/__init__.py +141 -5
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/ollama/__init__.py +345 -86
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/open_router/__init__.py +37 -8
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/openai/__init__.py +81 -20
- lollms_client-1.10.1/src/lollms_client/llm_bindings/openllm/__init__.py +406 -0
- lollms_client-1.10.1/src/lollms_client/llm_bindings/openwebui/__init__.py +465 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/perplexity/__init__.py +1 -1
- lollms_client-1.10.1/src/lollms_client/llm_bindings/transformers/__init__.py +503 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_agentic.py +4 -2
- lollms_client-1.10.1/src/lollms_client/lollms_base_binding.py +61 -0
- lollms_client-1.10.1/src/lollms_client/lollms_bindings_utils.py +101 -0
- lollms_client-1.10.1/src/lollms_client/lollms_core.py +1110 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_discussion.py +251 -83
- lollms_client-1.10.1/src/lollms_client/lollms_llm_binding.py +536 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_mcp_binding.py +48 -77
- lollms_client-1.10.1/src/lollms_client/lollms_stt_binding.py +180 -0
- lollms_client-1.10.1/src/lollms_client/lollms_text_processing.py +1725 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_tti_binding.py +142 -36
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_ttm_binding.py +37 -41
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_tts_binding.py +35 -12
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_ttv_binding.py +37 -41
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_types.py +4 -2
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +143 -138
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/stt_bindings/lollms/__init__.py +20 -13
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/stt_bindings/whisper/__init__.py +155 -59
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/stt_bindings/whispercpp/__init__.py +103 -90
- lollms_client-1.10.1/src/lollms_client/tti_bindings/diffusers/__init__.py +545 -0
- lollms_client-1.10.1/src/lollms_client/tti_bindings/diffusers/config.py +43 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tti_bindings/diffusers/server/main.py +190 -111
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tti_bindings/gemini/__init__.py +53 -24
- lollms_client-1.10.1/src/lollms_client/tti_bindings/gguf_diffusion/__init__.py +207 -0
- lollms_client-1.10.1/src/lollms_client/tti_bindings/gguf_diffusion/server/dequant.py +251 -0
- lollms_client-1.10.1/src/lollms_client/tti_bindings/gguf_diffusion/server/main.py +289 -0
- lollms_client-1.10.1/src/lollms_client/tti_bindings/gguf_diffusion/server/ops.py +118 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py +5 -2
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tti_bindings/lollms/__init__.py +4 -1
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tti_bindings/novita_ai/__init__.py +4 -1
- lollms_client-1.10.1/src/lollms_client/tti_bindings/open_router/__init__.py +388 -0
- lollms_client-1.10.1/src/lollms_client/tti_bindings/openai/__init__.py +94 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tti_bindings/stability_ai/__init__.py +4 -2
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +7 -12
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py +7 -3
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/ttm_bindings/lollms/__init__.py +4 -17
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/ttm_bindings/replicate/__init__.py +7 -4
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/ttm_bindings/stability_ai/__init__.py +7 -4
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/ttm_bindings/topmediai/__init__.py +6 -3
- lollms_client-1.10.1/src/lollms_client/tts_bindings/FishSpeech/__init__.py +281 -0
- lollms_client-1.10.1/src/lollms_client/tts_bindings/FishSpeech/server/main.py +260 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/bark/__init__.py +7 -10
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/lollms/__init__.py +7 -12
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/piper_tts/__init__.py +7 -10
- lollms_client-1.10.1/src/lollms_client/tts_bindings/vibevoice/__init__.py +197 -0
- lollms_client-1.10.1/src/lollms_client/tts_bindings/vibevoice/server/main.py +116 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/xtts/__init__.py +72 -23
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/xtts/server/main.py +10 -1
- lollms_client-1.10.1/src/lollms_client/ttv_bindings/diffusers/__init__.py +255 -0
- lollms_client-1.10.1/src/lollms_client/ttv_bindings/diffusers/server/main.py +194 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1/src/lollms_client.egg-info}/PKG-INFO +255 -1
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client.egg-info/SOURCES.txt +16 -2
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client.egg-info/requires.txt +1 -0
- lollms_client-1.6.7/src/lollms_client/llm_bindings/gemini/__init__.py +0 -497
- lollms_client-1.6.7/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -1148
- lollms_client-1.6.7/src/lollms_client/llm_bindings/openllm/__init__.py +0 -550
- lollms_client-1.6.7/src/lollms_client/llm_bindings/openwebui/__init__.py +0 -303
- lollms_client-1.6.7/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -608
- lollms_client-1.6.7/src/lollms_client/llm_bindings/transformers/__init__.py +0 -707
- lollms_client-1.6.7/src/lollms_client/lollms_core.py +0 -4722
- lollms_client-1.6.7/src/lollms_client/lollms_llm_binding.py +0 -577
- lollms_client-1.6.7/src/lollms_client/lollms_stt_binding.py +0 -125
- lollms_client-1.6.7/src/lollms_client/tti_bindings/diffusers/__init__.py +0 -377
- lollms_client-1.6.7/src/lollms_client/tti_bindings/openai/__init__.py +0 -124
- {lollms_client-1.6.7 → lollms_client-1.10.1}/LICENSE +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/setup.cfg +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_config.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_mcp_security.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_personality.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/bark/server/install_bark.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/bark/server/main.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/piper_tts/server/install_piper.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/piper_tts/server/main.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/piper_tts/server/setup_voices.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/tts_bindings/xtts/server/setup_voices.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-1.6.7 → lollms_client-1.10.1}/test/test_lollms_discussion.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.10.1
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -229,6 +229,7 @@ Requires-Dist: pydantic
|
|
|
229
229
|
Requires-Dist: numpy
|
|
230
230
|
Requires-Dist: pillow
|
|
231
231
|
Requires-Dist: sqlalchemy
|
|
232
|
+
Requires-Dist: jsonschema
|
|
232
233
|
Dynamic: license-file
|
|
233
234
|
|
|
234
235
|
# LoLLMs Client Library
|
|
@@ -304,6 +305,7 @@ try:
|
|
|
304
305
|
llm_binding_config={
|
|
305
306
|
"host_address": "http://localhost:9642", # Default port for LoLLMs server
|
|
306
307
|
# "service_key": "your_lollms_api_key_here" # Get key from LoLLMs UI -> User Settings if security is enabled
|
|
308
|
+
# "verify_ssl_certificate": True #if false the ssl certifcate verification will be ignored (only used when using https in lollms service address)
|
|
307
309
|
}
|
|
308
310
|
)
|
|
309
311
|
|
|
@@ -963,6 +965,7 @@ try:
|
|
|
963
965
|
config = {
|
|
964
966
|
"host_address": "http://localhost:9642",
|
|
965
967
|
# "service_key": "your_lollms_api_key_here" # Uncomment and replace if security is enabled
|
|
968
|
+
# "verify_ssl_certificate": True #if false the ssl certifcate verification will be ignored (only used when using https in lollms service address)
|
|
966
969
|
}
|
|
967
970
|
|
|
968
971
|
lc = LollmsClient(
|
|
@@ -1507,6 +1510,257 @@ try:
|
|
|
1507
1510
|
except Exception as e:
|
|
1508
1511
|
ASCIIColors.error(f"An error occurred during long context processing: {e}")
|
|
1509
1512
|
```
|
|
1513
|
+
## low level text processing
|
|
1514
|
+
Here is the **English, README-ready version**, clean and aligned with LOLLMS documentation standards.
|
|
1515
|
+
|
|
1516
|
+
---
|
|
1517
|
+
|
|
1518
|
+
## 🧠 Lollms Text Processor
|
|
1519
|
+
|
|
1520
|
+
The **Lollms Text Processor** is a high-level utility designed to turn raw LLM generations into **production-ready workflows**.
|
|
1521
|
+
It handles long documents, structured outputs, robust code generation, intelligent editing, and reliable parsing.
|
|
1522
|
+
|
|
1523
|
+
It is directly accessible via:
|
|
1524
|
+
|
|
1525
|
+
```python
|
|
1526
|
+
lc.llm.tp
|
|
1527
|
+
```
|
|
1528
|
+
|
|
1529
|
+
---
|
|
1530
|
+
|
|
1531
|
+
## 🔧 Initialization
|
|
1532
|
+
|
|
1533
|
+
```python
|
|
1534
|
+
from lollms_client import LollmsClient
|
|
1535
|
+
|
|
1536
|
+
lc = LollmsClient(
|
|
1537
|
+
llm_binding_name="lollms",
|
|
1538
|
+
llm_binding_config={
|
|
1539
|
+
"model_name": "llama3",
|
|
1540
|
+
"host_address": "http://localhost:9642",
|
|
1541
|
+
"service_key": "the service key"
|
|
1542
|
+
}
|
|
1543
|
+
)
|
|
1544
|
+
|
|
1545
|
+
llm = lc.llm
|
|
1546
|
+
tp = lc.llm.tp
|
|
1547
|
+
```
|
|
1548
|
+
|
|
1549
|
+
* `llm` provides low-level text generation primitives
|
|
1550
|
+
* `tp` is the **Text Processor**, ready to use out of the box
|
|
1551
|
+
|
|
1552
|
+
---
|
|
1553
|
+
|
|
1554
|
+
## 📚 1. Long Context Processing
|
|
1555
|
+
|
|
1556
|
+
The Text Processor automatically handles documents that exceed the model’s context window by chunking, synthesizing intermediate results, and producing a final consolidated output.
|
|
1557
|
+
|
|
1558
|
+
### Text generation from a very long document
|
|
1559
|
+
|
|
1560
|
+
```python
|
|
1561
|
+
summary = tp.long_context_processing(
|
|
1562
|
+
text_to_process=long_document,
|
|
1563
|
+
contextual_prompt="Summarize the main findings about climate change",
|
|
1564
|
+
processing_type="text"
|
|
1565
|
+
)
|
|
1566
|
+
```
|
|
1567
|
+
|
|
1568
|
+
### Structured extraction from long context
|
|
1569
|
+
|
|
1570
|
+
```python
|
|
1571
|
+
result = tp.long_context_processing(
|
|
1572
|
+
text_to_process=long_document,
|
|
1573
|
+
contextual_prompt="Extract all people mentioned with their roles",
|
|
1574
|
+
processing_type="structured",
|
|
1575
|
+
schema={
|
|
1576
|
+
"type": "object",
|
|
1577
|
+
"properties": {
|
|
1578
|
+
"people": {
|
|
1579
|
+
"type": "array",
|
|
1580
|
+
"items": {
|
|
1581
|
+
"type": "object",
|
|
1582
|
+
"properties": {
|
|
1583
|
+
"name": {"type": "string"},
|
|
1584
|
+
"role": {"type": "string"}
|
|
1585
|
+
}
|
|
1586
|
+
}
|
|
1587
|
+
}
|
|
1588
|
+
}
|
|
1589
|
+
}
|
|
1590
|
+
)
|
|
1591
|
+
```
|
|
1592
|
+
|
|
1593
|
+
### Yes / No question over long documents
|
|
1594
|
+
|
|
1595
|
+
```python
|
|
1596
|
+
answer = tp.long_context_processing(
|
|
1597
|
+
text_to_process=long_document,
|
|
1598
|
+
contextual_prompt="Does this document mention Marie Curie?",
|
|
1599
|
+
processing_type="yes_no",
|
|
1600
|
+
return_explanation=True
|
|
1601
|
+
)
|
|
1602
|
+
```
|
|
1603
|
+
|
|
1604
|
+
---
|
|
1605
|
+
|
|
1606
|
+
## 💻 2. Code Generation and Editing
|
|
1607
|
+
|
|
1608
|
+
### Single-file code generation
|
|
1609
|
+
|
|
1610
|
+
```python
|
|
1611
|
+
code = tp.generate_code(
|
|
1612
|
+
prompt="Create a binary search function",
|
|
1613
|
+
language="python"
|
|
1614
|
+
)
|
|
1615
|
+
```
|
|
1616
|
+
|
|
1617
|
+
### Multi-file project generation
|
|
1618
|
+
|
|
1619
|
+
```python
|
|
1620
|
+
files = tp.generate_codes(
|
|
1621
|
+
prompt="Create a Flask web app with an HTML frontend"
|
|
1622
|
+
)
|
|
1623
|
+
```
|
|
1624
|
+
|
|
1625
|
+
### Efficient code editing (non-destructive)
|
|
1626
|
+
|
|
1627
|
+
```python
|
|
1628
|
+
updated_code = tp.edit_code(
|
|
1629
|
+
original_code=existing_code,
|
|
1630
|
+
edit_instruction="Add error handling and logging",
|
|
1631
|
+
language="python"
|
|
1632
|
+
)
|
|
1633
|
+
```
|
|
1634
|
+
|
|
1635
|
+
Unlike naïve prompting, edits are **structural**, not full rewrites.
|
|
1636
|
+
|
|
1637
|
+
---
|
|
1638
|
+
|
|
1639
|
+
## 🧩 3. Structured Content Generation
|
|
1640
|
+
|
|
1641
|
+
### Using JSON Schema
|
|
1642
|
+
|
|
1643
|
+
```python
|
|
1644
|
+
data = tp.generate_structured_content(
|
|
1645
|
+
prompt="Create a presentation about AI",
|
|
1646
|
+
schema={
|
|
1647
|
+
"type": "object",
|
|
1648
|
+
"properties": {
|
|
1649
|
+
"slides": {
|
|
1650
|
+
"type": "array",
|
|
1651
|
+
"items": {"type": "object"}
|
|
1652
|
+
}
|
|
1653
|
+
}
|
|
1654
|
+
}
|
|
1655
|
+
)
|
|
1656
|
+
```
|
|
1657
|
+
|
|
1658
|
+
### Using Pydantic models
|
|
1659
|
+
|
|
1660
|
+
```python
|
|
1661
|
+
from pydantic import BaseModel
|
|
1662
|
+
|
|
1663
|
+
class Person(BaseModel):
|
|
1664
|
+
name: str
|
|
1665
|
+
age: int
|
|
1666
|
+
|
|
1667
|
+
person = tp.generate_structured_content_pydantic(
|
|
1668
|
+
prompt="Create a person named Alice, age 30",
|
|
1669
|
+
pydantic_model=Person
|
|
1670
|
+
)
|
|
1671
|
+
```
|
|
1672
|
+
|
|
1673
|
+
✔ Automatic validation
|
|
1674
|
+
✔ Truncation recovery
|
|
1675
|
+
✔ Agent-friendly outputs
|
|
1676
|
+
|
|
1677
|
+
---
|
|
1678
|
+
|
|
1679
|
+
## 🧠 4. LLM Helper Utilities
|
|
1680
|
+
|
|
1681
|
+
### Yes / No questions
|
|
1682
|
+
|
|
1683
|
+
```python
|
|
1684
|
+
answer = tp.yes_no(
|
|
1685
|
+
question="Is Marie Curie a scientist?",
|
|
1686
|
+
context="Marie Curie was a physicist...",
|
|
1687
|
+
return_explanation=True
|
|
1688
|
+
)
|
|
1689
|
+
```
|
|
1690
|
+
|
|
1691
|
+
### Multiple-choice questions
|
|
1692
|
+
|
|
1693
|
+
```python
|
|
1694
|
+
choice = tp.multichoice_question(
|
|
1695
|
+
question="What field did Marie Curie work in?",
|
|
1696
|
+
possible_answers=["Biology", "Physics", "Chemistry"]
|
|
1697
|
+
)
|
|
1698
|
+
```
|
|
1699
|
+
|
|
1700
|
+
### Text summarization
|
|
1701
|
+
|
|
1702
|
+
```python
|
|
1703
|
+
summary = tp.summerize_text(text="Long article...")
|
|
1704
|
+
```
|
|
1705
|
+
|
|
1706
|
+
### Keyword extraction
|
|
1707
|
+
|
|
1708
|
+
```python
|
|
1709
|
+
keywords = tp.extract_keywords(
|
|
1710
|
+
text="Long article...",
|
|
1711
|
+
num_keywords=5
|
|
1712
|
+
)
|
|
1713
|
+
```
|
|
1714
|
+
|
|
1715
|
+
---
|
|
1716
|
+
|
|
1717
|
+
## 🧪 5. Response Parsing and Cleanup
|
|
1718
|
+
|
|
1719
|
+
### Extract reasoning / thinking blocks
|
|
1720
|
+
|
|
1721
|
+
```python
|
|
1722
|
+
thoughts = tp.extract_thinking_blocks(llm_response)
|
|
1723
|
+
```
|
|
1724
|
+
|
|
1725
|
+
### Remove reasoning blocks
|
|
1726
|
+
|
|
1727
|
+
```python
|
|
1728
|
+
clean_text = tp.remove_thinking_blocks(llm_response)
|
|
1729
|
+
```
|
|
1730
|
+
|
|
1731
|
+
### Extract code blocks (legacy support)
|
|
1732
|
+
|
|
1733
|
+
```python
|
|
1734
|
+
blocks = tp.extract_code_blocks(
|
|
1735
|
+
text=llm_response,
|
|
1736
|
+
format="markdown"
|
|
1737
|
+
)
|
|
1738
|
+
```
|
|
1739
|
+
|
|
1740
|
+
---
|
|
1741
|
+
|
|
1742
|
+
## ✨ Key Features
|
|
1743
|
+
|
|
1744
|
+
* ✅ Automatic **long-context handling**
|
|
1745
|
+
* ✅ XML-based code generation (no fragile backticks)
|
|
1746
|
+
* ✅ Truncation recovery for JSON and code
|
|
1747
|
+
* ✅ Non-destructive, structured code editing
|
|
1748
|
+
* ✅ JSON Schema & Pydantic support
|
|
1749
|
+
* ✅ Decision helpers (yes/no, multichoice, ranking)
|
|
1750
|
+
* ✅ Graceful fallback strategies
|
|
1751
|
+
|
|
1752
|
+
---
|
|
1753
|
+
|
|
1754
|
+
## 🏁 Summary
|
|
1755
|
+
|
|
1756
|
+
The **Lollms Text Processor** turns a raw LLM into a **reliable production tool**.
|
|
1757
|
+
|
|
1758
|
+
> `lc.llm` generates
|
|
1759
|
+
> `lc.llm.tp` structures, validates, and secures
|
|
1760
|
+
|
|
1761
|
+
A core component of **LOLLMS — one tool to rule them all** 🚀
|
|
1762
|
+
|
|
1763
|
+
|
|
1510
1764
|
|
|
1511
1765
|
## Contributing
|
|
1512
1766
|
|
|
@@ -71,6 +71,7 @@ try:
|
|
|
71
71
|
llm_binding_config={
|
|
72
72
|
"host_address": "http://localhost:9642", # Default port for LoLLMs server
|
|
73
73
|
# "service_key": "your_lollms_api_key_here" # Get key from LoLLMs UI -> User Settings if security is enabled
|
|
74
|
+
# "verify_ssl_certificate": True #if false the ssl certifcate verification will be ignored (only used when using https in lollms service address)
|
|
74
75
|
}
|
|
75
76
|
)
|
|
76
77
|
|
|
@@ -730,6 +731,7 @@ try:
|
|
|
730
731
|
config = {
|
|
731
732
|
"host_address": "http://localhost:9642",
|
|
732
733
|
# "service_key": "your_lollms_api_key_here" # Uncomment and replace if security is enabled
|
|
734
|
+
# "verify_ssl_certificate": True #if false the ssl certifcate verification will be ignored (only used when using https in lollms service address)
|
|
733
735
|
}
|
|
734
736
|
|
|
735
737
|
lc = LollmsClient(
|
|
@@ -1274,6 +1276,257 @@ try:
|
|
|
1274
1276
|
except Exception as e:
|
|
1275
1277
|
ASCIIColors.error(f"An error occurred during long context processing: {e}")
|
|
1276
1278
|
```
|
|
1279
|
+
## low level text processing
|
|
1280
|
+
Here is the **English, README-ready version**, clean and aligned with LOLLMS documentation standards.
|
|
1281
|
+
|
|
1282
|
+
---
|
|
1283
|
+
|
|
1284
|
+
## 🧠 Lollms Text Processor
|
|
1285
|
+
|
|
1286
|
+
The **Lollms Text Processor** is a high-level utility designed to turn raw LLM generations into **production-ready workflows**.
|
|
1287
|
+
It handles long documents, structured outputs, robust code generation, intelligent editing, and reliable parsing.
|
|
1288
|
+
|
|
1289
|
+
It is directly accessible via:
|
|
1290
|
+
|
|
1291
|
+
```python
|
|
1292
|
+
lc.llm.tp
|
|
1293
|
+
```
|
|
1294
|
+
|
|
1295
|
+
---
|
|
1296
|
+
|
|
1297
|
+
## 🔧 Initialization
|
|
1298
|
+
|
|
1299
|
+
```python
|
|
1300
|
+
from lollms_client import LollmsClient
|
|
1301
|
+
|
|
1302
|
+
lc = LollmsClient(
|
|
1303
|
+
llm_binding_name="lollms",
|
|
1304
|
+
llm_binding_config={
|
|
1305
|
+
"model_name": "llama3",
|
|
1306
|
+
"host_address": "http://localhost:9642",
|
|
1307
|
+
"service_key": "the service key"
|
|
1308
|
+
}
|
|
1309
|
+
)
|
|
1310
|
+
|
|
1311
|
+
llm = lc.llm
|
|
1312
|
+
tp = lc.llm.tp
|
|
1313
|
+
```
|
|
1314
|
+
|
|
1315
|
+
* `llm` provides low-level text generation primitives
|
|
1316
|
+
* `tp` is the **Text Processor**, ready to use out of the box
|
|
1317
|
+
|
|
1318
|
+
---
|
|
1319
|
+
|
|
1320
|
+
## 📚 1. Long Context Processing
|
|
1321
|
+
|
|
1322
|
+
The Text Processor automatically handles documents that exceed the model’s context window by chunking, synthesizing intermediate results, and producing a final consolidated output.
|
|
1323
|
+
|
|
1324
|
+
### Text generation from a very long document
|
|
1325
|
+
|
|
1326
|
+
```python
|
|
1327
|
+
summary = tp.long_context_processing(
|
|
1328
|
+
text_to_process=long_document,
|
|
1329
|
+
contextual_prompt="Summarize the main findings about climate change",
|
|
1330
|
+
processing_type="text"
|
|
1331
|
+
)
|
|
1332
|
+
```
|
|
1333
|
+
|
|
1334
|
+
### Structured extraction from long context
|
|
1335
|
+
|
|
1336
|
+
```python
|
|
1337
|
+
result = tp.long_context_processing(
|
|
1338
|
+
text_to_process=long_document,
|
|
1339
|
+
contextual_prompt="Extract all people mentioned with their roles",
|
|
1340
|
+
processing_type="structured",
|
|
1341
|
+
schema={
|
|
1342
|
+
"type": "object",
|
|
1343
|
+
"properties": {
|
|
1344
|
+
"people": {
|
|
1345
|
+
"type": "array",
|
|
1346
|
+
"items": {
|
|
1347
|
+
"type": "object",
|
|
1348
|
+
"properties": {
|
|
1349
|
+
"name": {"type": "string"},
|
|
1350
|
+
"role": {"type": "string"}
|
|
1351
|
+
}
|
|
1352
|
+
}
|
|
1353
|
+
}
|
|
1354
|
+
}
|
|
1355
|
+
}
|
|
1356
|
+
)
|
|
1357
|
+
```
|
|
1358
|
+
|
|
1359
|
+
### Yes / No question over long documents
|
|
1360
|
+
|
|
1361
|
+
```python
|
|
1362
|
+
answer = tp.long_context_processing(
|
|
1363
|
+
text_to_process=long_document,
|
|
1364
|
+
contextual_prompt="Does this document mention Marie Curie?",
|
|
1365
|
+
processing_type="yes_no",
|
|
1366
|
+
return_explanation=True
|
|
1367
|
+
)
|
|
1368
|
+
```
|
|
1369
|
+
|
|
1370
|
+
---
|
|
1371
|
+
|
|
1372
|
+
## 💻 2. Code Generation and Editing
|
|
1373
|
+
|
|
1374
|
+
### Single-file code generation
|
|
1375
|
+
|
|
1376
|
+
```python
|
|
1377
|
+
code = tp.generate_code(
|
|
1378
|
+
prompt="Create a binary search function",
|
|
1379
|
+
language="python"
|
|
1380
|
+
)
|
|
1381
|
+
```
|
|
1382
|
+
|
|
1383
|
+
### Multi-file project generation
|
|
1384
|
+
|
|
1385
|
+
```python
|
|
1386
|
+
files = tp.generate_codes(
|
|
1387
|
+
prompt="Create a Flask web app with an HTML frontend"
|
|
1388
|
+
)
|
|
1389
|
+
```
|
|
1390
|
+
|
|
1391
|
+
### Efficient code editing (non-destructive)
|
|
1392
|
+
|
|
1393
|
+
```python
|
|
1394
|
+
updated_code = tp.edit_code(
|
|
1395
|
+
original_code=existing_code,
|
|
1396
|
+
edit_instruction="Add error handling and logging",
|
|
1397
|
+
language="python"
|
|
1398
|
+
)
|
|
1399
|
+
```
|
|
1400
|
+
|
|
1401
|
+
Unlike naïve prompting, edits are **structural**, not full rewrites.
|
|
1402
|
+
|
|
1403
|
+
---
|
|
1404
|
+
|
|
1405
|
+
## 🧩 3. Structured Content Generation
|
|
1406
|
+
|
|
1407
|
+
### Using JSON Schema
|
|
1408
|
+
|
|
1409
|
+
```python
|
|
1410
|
+
data = tp.generate_structured_content(
|
|
1411
|
+
prompt="Create a presentation about AI",
|
|
1412
|
+
schema={
|
|
1413
|
+
"type": "object",
|
|
1414
|
+
"properties": {
|
|
1415
|
+
"slides": {
|
|
1416
|
+
"type": "array",
|
|
1417
|
+
"items": {"type": "object"}
|
|
1418
|
+
}
|
|
1419
|
+
}
|
|
1420
|
+
}
|
|
1421
|
+
)
|
|
1422
|
+
```
|
|
1423
|
+
|
|
1424
|
+
### Using Pydantic models
|
|
1425
|
+
|
|
1426
|
+
```python
|
|
1427
|
+
from pydantic import BaseModel
|
|
1428
|
+
|
|
1429
|
+
class Person(BaseModel):
|
|
1430
|
+
name: str
|
|
1431
|
+
age: int
|
|
1432
|
+
|
|
1433
|
+
person = tp.generate_structured_content_pydantic(
|
|
1434
|
+
prompt="Create a person named Alice, age 30",
|
|
1435
|
+
pydantic_model=Person
|
|
1436
|
+
)
|
|
1437
|
+
```
|
|
1438
|
+
|
|
1439
|
+
✔ Automatic validation
|
|
1440
|
+
✔ Truncation recovery
|
|
1441
|
+
✔ Agent-friendly outputs
|
|
1442
|
+
|
|
1443
|
+
---
|
|
1444
|
+
|
|
1445
|
+
## 🧠 4. LLM Helper Utilities
|
|
1446
|
+
|
|
1447
|
+
### Yes / No questions
|
|
1448
|
+
|
|
1449
|
+
```python
|
|
1450
|
+
answer = tp.yes_no(
|
|
1451
|
+
question="Is Marie Curie a scientist?",
|
|
1452
|
+
context="Marie Curie was a physicist...",
|
|
1453
|
+
return_explanation=True
|
|
1454
|
+
)
|
|
1455
|
+
```
|
|
1456
|
+
|
|
1457
|
+
### Multiple-choice questions
|
|
1458
|
+
|
|
1459
|
+
```python
|
|
1460
|
+
choice = tp.multichoice_question(
|
|
1461
|
+
question="What field did Marie Curie work in?",
|
|
1462
|
+
possible_answers=["Biology", "Physics", "Chemistry"]
|
|
1463
|
+
)
|
|
1464
|
+
```
|
|
1465
|
+
|
|
1466
|
+
### Text summarization
|
|
1467
|
+
|
|
1468
|
+
```python
|
|
1469
|
+
summary = tp.summerize_text(text="Long article...")
|
|
1470
|
+
```
|
|
1471
|
+
|
|
1472
|
+
### Keyword extraction
|
|
1473
|
+
|
|
1474
|
+
```python
|
|
1475
|
+
keywords = tp.extract_keywords(
|
|
1476
|
+
text="Long article...",
|
|
1477
|
+
num_keywords=5
|
|
1478
|
+
)
|
|
1479
|
+
```
|
|
1480
|
+
|
|
1481
|
+
---
|
|
1482
|
+
|
|
1483
|
+
## 🧪 5. Response Parsing and Cleanup
|
|
1484
|
+
|
|
1485
|
+
### Extract reasoning / thinking blocks
|
|
1486
|
+
|
|
1487
|
+
```python
|
|
1488
|
+
thoughts = tp.extract_thinking_blocks(llm_response)
|
|
1489
|
+
```
|
|
1490
|
+
|
|
1491
|
+
### Remove reasoning blocks
|
|
1492
|
+
|
|
1493
|
+
```python
|
|
1494
|
+
clean_text = tp.remove_thinking_blocks(llm_response)
|
|
1495
|
+
```
|
|
1496
|
+
|
|
1497
|
+
### Extract code blocks (legacy support)
|
|
1498
|
+
|
|
1499
|
+
```python
|
|
1500
|
+
blocks = tp.extract_code_blocks(
|
|
1501
|
+
text=llm_response,
|
|
1502
|
+
format="markdown"
|
|
1503
|
+
)
|
|
1504
|
+
```
|
|
1505
|
+
|
|
1506
|
+
---
|
|
1507
|
+
|
|
1508
|
+
## ✨ Key Features
|
|
1509
|
+
|
|
1510
|
+
* ✅ Automatic **long-context handling**
|
|
1511
|
+
* ✅ XML-based code generation (no fragile backticks)
|
|
1512
|
+
* ✅ Truncation recovery for JSON and code
|
|
1513
|
+
* ✅ Non-destructive, structured code editing
|
|
1514
|
+
* ✅ JSON Schema & Pydantic support
|
|
1515
|
+
* ✅ Decision helpers (yes/no, multichoice, ranking)
|
|
1516
|
+
* ✅ Graceful fallback strategies
|
|
1517
|
+
|
|
1518
|
+
---
|
|
1519
|
+
|
|
1520
|
+
## 🏁 Summary
|
|
1521
|
+
|
|
1522
|
+
The **Lollms Text Processor** turns a raw LLM into a **reliable production tool**.
|
|
1523
|
+
|
|
1524
|
+
> `lc.llm` generates
|
|
1525
|
+
> `lc.llm.tp` structures, validates, and secures
|
|
1526
|
+
|
|
1527
|
+
A core component of **LOLLMS — one tool to rule them all** 🚀
|
|
1528
|
+
|
|
1529
|
+
|
|
1277
1530
|
|
|
1278
1531
|
## Contributing
|
|
1279
1532
|
|
|
@@ -6,7 +6,7 @@ build-backend = "setuptools.build_meta"
|
|
|
6
6
|
|
|
7
7
|
[project]
|
|
8
8
|
name = "lollms_client"
|
|
9
|
-
dynamic = ["version"]
|
|
9
|
+
dynamic = ["version"]
|
|
10
10
|
authors = [
|
|
11
11
|
{name = "ParisNeo", email = "parisneoai@gmail.com"},
|
|
12
12
|
]
|
|
@@ -36,7 +36,8 @@ dependencies = [
|
|
|
36
36
|
"pydantic",
|
|
37
37
|
"numpy",
|
|
38
38
|
"pillow",
|
|
39
|
-
"sqlalchemy"
|
|
39
|
+
"sqlalchemy",
|
|
40
|
+
"jsonschema"
|
|
40
41
|
]
|
|
41
42
|
|
|
42
43
|
[project.urls]
|
|
@@ -44,7 +45,7 @@ Homepage = "https://github.com/ParisNeo/lollms_client"
|
|
|
44
45
|
|
|
45
46
|
[tool.setuptools.packages.find]
|
|
46
47
|
where = ["src"]
|
|
47
|
-
exclude = ["lollms_client.egg-info*"]
|
|
48
|
+
exclude = ["lollms_client.egg-info*", "lollms_client.llm_bindings.llama_cpp_server.bin"]
|
|
48
49
|
|
|
49
50
|
[tool.setuptools.dynamic]
|
|
50
51
|
version = {attr = "lollms_client.__version__"}
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# lollms_client/__init__.py
|
|
2
1
|
from lollms_client.lollms_core import LollmsClient, ELF_COMPLETION_FORMAT
|
|
3
2
|
from lollms_client.lollms_types import MSG_TYPE # Assuming ELF_GENERATION_FORMAT is not directly used by users from here
|
|
4
3
|
from lollms_client.lollms_discussion import LollmsDiscussion, LollmsDataManager, LollmsMessage
|
|
@@ -7,8 +6,10 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
7
6
|
# Import new MCP binding classes
|
|
8
7
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
8
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
9
|
+
# Import new bindings utils
|
|
10
|
+
from lollms_client.lollms_bindings_utils import list_bindings, get_binding_desc
|
|
10
11
|
|
|
11
|
-
__version__ = "1.
|
|
12
|
+
__version__ = "1.10.1" # Updated version
|
|
12
13
|
|
|
13
14
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
15
|
__all__ = [
|
|
@@ -23,4 +24,6 @@ __all__ = [
|
|
|
23
24
|
"LollmsMCPBinding", # Export LollmsMCPBinding ABC
|
|
24
25
|
"LollmsLLMBindingManager",
|
|
25
26
|
"LollmsMCPBindingManager", # Export LollmsMCPBindingManager
|
|
27
|
+
"list_bindings",
|
|
28
|
+
"get_binding_desc"
|
|
26
29
|
]
|
{lollms_client-1.6.7 → lollms_client-1.10.1}/src/lollms_client/llm_bindings/azure_openai/__init__.py
RENAMED
|
@@ -138,7 +138,7 @@ class AzureOpenAIBinding(LollmsLLMBinding):
|
|
|
138
138
|
# Pass all relevant kwargs to the chat method
|
|
139
139
|
return self.chat(temp_discussion, **kwargs)
|
|
140
140
|
|
|
141
|
-
def
|
|
141
|
+
def _chat(self,
|
|
142
142
|
discussion: LollmsDiscussion,
|
|
143
143
|
branch_tip_id: Optional[str] = None,
|
|
144
144
|
n_predict: Optional[int] = 2048,
|