lollms-client 1.5.6__tar.gz → 1.5.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- {lollms_client-1.5.6/src/lollms_client.egg-info → lollms_client-1.5.8}/PKG-INFO +2 -2
- {lollms_client-1.5.6 → lollms_client-1.5.8}/pyproject.toml +1 -1
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/__init__.py +1 -1
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/lollms/__init__.py +1 -1
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_core.py +319 -151
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_llm_binding.py +1 -1
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/diffusers/__init__.py +8 -2
- {lollms_client-1.5.6 → lollms_client-1.5.8/src/lollms_client.egg-info}/PKG-INFO +2 -2
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client.egg-info/requires.txt +1 -1
- {lollms_client-1.5.6 → lollms_client-1.5.8}/LICENSE +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/README.md +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/setup.cfg +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/assets/models_ctx_sizes.json +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/claude/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/gemini/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/grok/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/groq/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/litellm/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/mistral/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/novita_ai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/ollama/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/open_router/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/openai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/openllm/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/openwebui/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/perplexity/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/transformers/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_agentic.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_config.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_discussion.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_mcp_binding.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_mcp_security.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_personality.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_stt_binding.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_tti_binding.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_ttm_binding.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_tts_binding.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_ttv_binding.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_types.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/stt_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/stt_bindings/whisper/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/gemini/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/novita_ai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/openai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/stability_ai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/replicate/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/stability_ai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/topmediai/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/bark/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/bark/server/install_bark.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/bark/server/main.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/piper_tts/server/install_piper.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/piper_tts/server/main.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/piper_tts/server/setup_voices.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/xtts/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/xtts/server/main.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/xtts/server/setup_voices.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client.egg-info/SOURCES.txt +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-1.5.6 → lollms_client-1.5.8}/test/test_lollms_discussion.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 1.5.
|
|
3
|
+
Version: 1.5.8
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -222,7 +222,7 @@ License-File: LICENSE
|
|
|
222
222
|
Requires-Dist: httpx
|
|
223
223
|
Requires-Dist: requests
|
|
224
224
|
Requires-Dist: ascii-colors
|
|
225
|
-
Requires-Dist: pipmaster
|
|
225
|
+
Requires-Dist: pipmaster>=1.0.5
|
|
226
226
|
Requires-Dist: pyyaml
|
|
227
227
|
Requires-Dist: tiktoken
|
|
228
228
|
Requires-Dist: pydantic
|
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "1.5.
|
|
11
|
+
__version__ = "1.5.8" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/lollms/__init__.py
RENAMED
|
@@ -232,7 +232,7 @@ class LollmsBinding(LollmsLLMBinding):
|
|
|
232
232
|
chat_completion = self.client.chat.completions.create(**params)
|
|
233
233
|
except Exception as ex:
|
|
234
234
|
# exception for new openai models
|
|
235
|
-
params["max_completion_tokens"]=params
|
|
235
|
+
params["max_completion_tokens"]=params.get("max_tokens") or params.get("max_completion_tokens") or self.default_ctx_size
|
|
236
236
|
params["temperature"]=1
|
|
237
237
|
try: del params["max_tokens"]
|
|
238
238
|
except Exception: pass
|
|
@@ -5736,186 +5736,354 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
5736
5736
|
self,
|
|
5737
5737
|
text_to_process: str,
|
|
5738
5738
|
contextual_prompt: Optional[str] = None,
|
|
5739
|
-
|
|
5739
|
+
system_prompt: str= None,
|
|
5740
|
+
context_fill_percentage: float = 0.75,
|
|
5740
5741
|
overlap_tokens: int = 0,
|
|
5742
|
+
expected_generation_tokens: int = 1500,
|
|
5741
5743
|
streaming_callback: Optional[Callable] = None,
|
|
5744
|
+
return_scratchpad_only: bool = False,
|
|
5745
|
+
debug: bool = True,
|
|
5742
5746
|
**kwargs
|
|
5743
5747
|
) -> str:
|
|
5744
|
-
"""
|
|
5745
|
-
Summarizes a long text that may not fit into the model's context window.
|
|
5748
|
+
"""Enhanced long context processing with Moby Dick literary analysis optimization."""
|
|
5746
5749
|
|
|
5747
|
-
|
|
5748
|
-
|
|
5749
|
-
2. **Synthesize:** It then takes all the chunk summaries and performs a final summarization pass to create a single, coherent, and comprehensive summary.
|
|
5750
|
+
if debug:
|
|
5751
|
+
print(f"\n🔧 DEBUG: Starting processing with {len(text_to_process):,} characters")
|
|
5750
5752
|
|
|
5751
|
-
|
|
5752
|
-
|
|
5753
|
-
|
|
5754
|
-
For example, "Summarize the text focusing on the financial implications."
|
|
5755
|
-
Defaults to None.
|
|
5756
|
-
chunk_size_tokens (int, optional): The number of tokens in each text chunk. This should be well
|
|
5757
|
-
within the model's context limit to allow space for prompts.
|
|
5758
|
-
Defaults to 1500.
|
|
5759
|
-
overlap_tokens (int, optional): The number of tokens to overlap between chunks to ensure context
|
|
5760
|
-
is not lost at the boundaries. Defaults to 250.
|
|
5761
|
-
streaming_callback (Optional[Callable], optional): A callback function to receive real-time updates
|
|
5762
|
-
on the process (e.g., which chunk is being processed).
|
|
5763
|
-
It receives a message, a message type, and optional metadata.
|
|
5764
|
-
Defaults to None.
|
|
5765
|
-
**kwargs: Additional keyword arguments to be passed to the generation method (e.g., temperature, top_p).
|
|
5753
|
+
# Validate context fill percentage
|
|
5754
|
+
if not (0.1 <= context_fill_percentage <= 0.9):
|
|
5755
|
+
raise ValueError(f"context_fill_percentage must be between 0.1 and 0.9, got {context_fill_percentage}")
|
|
5766
5756
|
|
|
5767
|
-
|
|
5768
|
-
|
|
5769
|
-
|
|
5770
|
-
|
|
5771
|
-
|
|
5757
|
+
# Get context size
|
|
5758
|
+
try:
|
|
5759
|
+
context_size = self.llm.get_context_size() or 4096
|
|
5760
|
+
except:
|
|
5761
|
+
context_size = 4096
|
|
5762
|
+
|
|
5763
|
+
if debug:
|
|
5764
|
+
print(f"🔧 DEBUG: Context size: {context_size}, Fill %: {context_fill_percentage}")
|
|
5765
|
+
|
|
5766
|
+
# Handle empty input
|
|
5772
5767
|
if not text_to_process:
|
|
5773
|
-
|
|
5774
|
-
|
|
5775
|
-
|
|
5776
|
-
|
|
5777
|
-
|
|
5778
|
-
|
|
5779
|
-
|
|
5780
|
-
|
|
5768
|
+
return ""
|
|
5769
|
+
|
|
5770
|
+
# Use word-based tokenization
|
|
5771
|
+
tokens = text_to_process.split()
|
|
5772
|
+
if debug:
|
|
5773
|
+
print(f"🔧 DEBUG: Tokenized into {len(tokens):,} word tokens")
|
|
5774
|
+
|
|
5775
|
+
# Dynamic token budget calculation
|
|
5776
|
+
def calculate_token_budgets(scratchpad_content: str = "", step_num: int = 0) -> dict:
|
|
5777
|
+
base_system_tokens = 250 # Increased for literary-specific prompts
|
|
5778
|
+
user_template_tokens = 300 # Increased for detailed instructions
|
|
5779
|
+
scratchpad_tokens = len(scratchpad_content.split()) * 1.3 if scratchpad_content else 0
|
|
5780
|
+
|
|
5781
|
+
used_tokens = base_system_tokens + user_template_tokens + scratchpad_tokens + expected_generation_tokens
|
|
5782
|
+
total_budget = int(context_size * context_fill_percentage)
|
|
5783
|
+
available_for_chunk = max(400, int(total_budget - used_tokens)) # Increased minimum for better context
|
|
5784
|
+
|
|
5785
|
+
budget_info = {
|
|
5786
|
+
"total_budget": total_budget,
|
|
5787
|
+
"chunk_budget": available_for_chunk,
|
|
5788
|
+
"efficiency_ratio": available_for_chunk / total_budget,
|
|
5789
|
+
"scratchpad_tokens": int(scratchpad_tokens),
|
|
5790
|
+
"used_tokens": int(used_tokens)
|
|
5791
|
+
}
|
|
5792
|
+
|
|
5793
|
+
if debug:
|
|
5794
|
+
print(f"🔧 DEBUG Step {step_num}: Budget = {available_for_chunk}/{total_budget} tokens, "
|
|
5795
|
+
f"Scratchpad = {int(scratchpad_tokens)} tokens")
|
|
5796
|
+
|
|
5797
|
+
return budget_info
|
|
5798
|
+
|
|
5799
|
+
# Initial budget calculation
|
|
5800
|
+
initial_budget = calculate_token_budgets()
|
|
5801
|
+
chunk_size_tokens = initial_budget["chunk_budget"]
|
|
5802
|
+
|
|
5803
|
+
if debug:
|
|
5804
|
+
print(f"🔧 DEBUG: Initial chunk size: {chunk_size_tokens} word tokens")
|
|
5805
|
+
|
|
5806
|
+
if streaming_callback:
|
|
5807
|
+
streaming_callback(
|
|
5808
|
+
f"Context Budget: {initial_budget['chunk_budget']:,}/{initial_budget['total_budget']:,} tokens "
|
|
5809
|
+
f"({initial_budget['efficiency_ratio']:.1%} efficiency)",
|
|
5810
|
+
MSG_TYPE.MSG_TYPE_STEP,
|
|
5811
|
+
{"budget_info": initial_budget}
|
|
5812
|
+
)
|
|
5813
|
+
|
|
5814
|
+
# Single pass for short content
|
|
5781
5815
|
if len(tokens) <= chunk_size_tokens:
|
|
5816
|
+
if debug:
|
|
5817
|
+
print("🔧 DEBUG: Using single-pass processing")
|
|
5818
|
+
|
|
5782
5819
|
if streaming_callback:
|
|
5783
|
-
streaming_callback("
|
|
5784
|
-
|
|
5785
|
-
|
|
5786
|
-
|
|
5787
|
-
|
|
5788
|
-
|
|
5789
|
-
|
|
5790
|
-
|
|
5791
|
-
|
|
5792
|
-
|
|
5793
|
-
system_prompt += "-- Extra instructions --\n"+ kwargs["system_prompt"] +"\n"
|
|
5794
|
-
del kwargs["system_prompt"]
|
|
5795
|
-
prompt_objective = contextual_prompt or "Provide a comprehensive summary of the content."
|
|
5796
|
-
final_prompt = f"{prompt_objective}"
|
|
5797
|
-
|
|
5798
|
-
processed_output = self.generate_text(final_prompt, system_prompt=system_prompt, **kwargs)
|
|
5799
|
-
|
|
5800
|
-
if streaming_callback:
|
|
5801
|
-
streaming_callback("Content processed.", MSG_TYPE.MSG_TYPE_STEP, {"progress": 100})
|
|
5802
|
-
|
|
5803
|
-
return processed_output
|
|
5820
|
+
streaming_callback("Content fits in single pass", MSG_TYPE.MSG_TYPE_STEP, {})
|
|
5821
|
+
|
|
5822
|
+
# FIXED: Moby Dick-specific single-pass system prompt
|
|
5823
|
+
system_prompt = (
|
|
5824
|
+
"You are a literary analysis expert specializing in Herman Melville's works. "
|
|
5825
|
+
"Analyze the provided Moby Dick text with deep understanding that Melville's "
|
|
5826
|
+
"detailed realism, technical descriptions, and cultural documentation are "
|
|
5827
|
+
"integral literary techniques. Focus on themes, narrative methods, symbolism, "
|
|
5828
|
+
"and cultural commentary."
|
|
5829
|
+
)
|
|
5804
5830
|
|
|
5805
|
-
|
|
5806
|
-
|
|
5807
|
-
|
|
5808
|
-
|
|
5809
|
-
|
|
5810
|
-
|
|
5811
|
-
|
|
5831
|
+
prompt_objective = contextual_prompt or "Provide comprehensive Moby Dick literary analysis."
|
|
5832
|
+
final_prompt = f"{prompt_objective}\n\n--- Moby Dick Content ---\n{text_to_process}"
|
|
5833
|
+
|
|
5834
|
+
try:
|
|
5835
|
+
result = self.remove_thinking_blocks(self.llm.generate_text(final_prompt, system_prompt=system_prompt, **kwargs))
|
|
5836
|
+
if debug:
|
|
5837
|
+
print(f"🔧 DEBUG: Single-pass result: {len(result):,} characters")
|
|
5838
|
+
return result
|
|
5839
|
+
except Exception as e:
|
|
5840
|
+
if debug:
|
|
5841
|
+
print(f"🔧 DEBUG: Single-pass failed: {e}")
|
|
5842
|
+
return f"Error in single-pass processing: {e}"
|
|
5843
|
+
|
|
5844
|
+
# Multi-chunk processing with FIXED prompts
|
|
5845
|
+
if debug:
|
|
5846
|
+
print("🔧 DEBUG: Using multi-chunk processing with Moby Dick-optimized prompts")
|
|
5812
5847
|
|
|
5813
5848
|
chunk_summaries = []
|
|
5814
|
-
|
|
5815
|
-
|
|
5816
|
-
|
|
5817
|
-
|
|
5818
|
-
|
|
5819
|
-
|
|
5820
|
-
|
|
5821
|
-
|
|
5822
|
-
|
|
5823
|
-
|
|
5824
|
-
|
|
5825
|
-
|
|
5826
|
-
|
|
5827
|
-
|
|
5828
|
-
|
|
5829
|
-
|
|
5830
|
-
|
|
5831
|
-
|
|
5832
|
-
|
|
5833
|
-
|
|
5834
|
-
|
|
5835
|
-
|
|
5836
|
-
|
|
5837
|
-
system_prompt += "-- Extra instructions --\n"+ kwargs["system_prompt"] +"\n"
|
|
5838
|
-
del kwargs["system_prompt"]
|
|
5839
|
-
chunk_summary_prompt_template = f"--- Global objective ---\n{summarization_objective}\n\n--- Text Excerpt ---\n{{chunk_text}}"
|
|
5849
|
+
current_position = 0
|
|
5850
|
+
step_number = 1
|
|
5851
|
+
|
|
5852
|
+
while current_position < len(tokens):
|
|
5853
|
+
# Recalculate budget
|
|
5854
|
+
current_scratchpad = "\n\n---\n\n".join(chunk_summaries) if chunk_summaries else "[Empty]"
|
|
5855
|
+
current_budget = calculate_token_budgets(current_scratchpad, step_number)
|
|
5856
|
+
adaptive_chunk_size = max(400, current_budget["chunk_budget"]) # Increased minimum
|
|
5857
|
+
|
|
5858
|
+
# Extract chunk
|
|
5859
|
+
chunk_end = min(current_position + adaptive_chunk_size, len(tokens))
|
|
5860
|
+
chunk_tokens = tokens[current_position:chunk_end]
|
|
5861
|
+
chunk_text = " ".join(chunk_tokens)
|
|
5862
|
+
|
|
5863
|
+
if debug:
|
|
5864
|
+
print(f"\n🔧 DEBUG Step {step_number}: Processing chunk {current_position}:{chunk_end} "
|
|
5865
|
+
f"({len(chunk_tokens)} tokens, {len(chunk_text)} chars)")
|
|
5866
|
+
|
|
5867
|
+
# Progress calculation
|
|
5868
|
+
remaining_tokens = len(tokens) - current_position
|
|
5869
|
+
estimated_remaining_steps = max(1, remaining_tokens // adaptive_chunk_size)
|
|
5870
|
+
total_estimated_steps = step_number + estimated_remaining_steps - 1
|
|
5871
|
+
progress = (current_position / len(tokens)) * 90
|
|
5840
5872
|
|
|
5841
|
-
for i, chunk in enumerate(chunks):
|
|
5842
|
-
progress_before = (i / total_steps) * 100
|
|
5843
5873
|
if streaming_callback:
|
|
5844
5874
|
streaming_callback(
|
|
5845
|
-
f"Processing chunk {
|
|
5846
|
-
|
|
5847
|
-
|
|
5875
|
+
f"Processing chunk {step_number}/{total_estimated_steps} - "
|
|
5876
|
+
f"Budget: {adaptive_chunk_size:,} tokens",
|
|
5877
|
+
MSG_TYPE.MSG_TYPE_STEP_START,
|
|
5878
|
+
{"step": step_number, "progress": progress}
|
|
5848
5879
|
)
|
|
5880
|
+
|
|
5849
5881
|
try:
|
|
5850
|
-
prompt
|
|
5851
|
-
|
|
5852
|
-
|
|
5853
|
-
|
|
5854
|
-
|
|
5855
|
-
|
|
5856
|
-
|
|
5857
|
-
|
|
5858
|
-
|
|
5859
|
-
|
|
5860
|
-
|
|
5882
|
+
# FIXED: Moby Dick-specific system prompt that prevents false filtering
|
|
5883
|
+
system_prompt = (
|
|
5884
|
+
f"You are analyzing Herman Melville's \"Moby Dick\" - a complex literary work where EVERY passage contains literary value.\n\n"
|
|
5885
|
+
f"**Critical Understanding:**\n"
|
|
5886
|
+
f"- Melville's detailed descriptions of whaling culture ARE literary techniques\n"
|
|
5887
|
+
f"- Technical passages reveal themes about knowledge, obsession, and human industry\n"
|
|
5888
|
+
f"- Social customs and maritime protocols reflect broader themes of hierarchy and civilization\n"
|
|
5889
|
+
f"- Even seemingly mundane details contribute to Melville's encyclopedic narrative style\n\n"
|
|
5890
|
+
f"**Current Status:** Step {step_number} of ~{total_estimated_steps} | Progress: {progress:.1f}%\n\n"
|
|
5891
|
+
f"**Your Task:**\n"
|
|
5892
|
+
f"Extract literary insights from this text chunk, focusing on:\n"
|
|
5893
|
+
f"1. **Themes** (obsession, knowledge, nature vs civilization, social hierarchy)\n"
|
|
5894
|
+
f"2. **Narrative Technique** (Melville's encyclopedic style, detailed realism)\n"
|
|
5895
|
+
f"3. **Cultural Commentary** (maritime society, American industry, social structures)\n"
|
|
5896
|
+
f"4. **Character Insights** (authority, dignity, social roles)\n"
|
|
5897
|
+
f"5. **Symbolic Elements** (ships, sea, whaling practices as metaphors)\n\n"
|
|
5898
|
+
f"**CRITICAL:** The scratchpad shows '{current_scratchpad[:20]}...' - if it shows '[Empty]', you are analyzing early content and everything you find is 'new' information. "
|
|
5899
|
+
f"Do NOT say '[No new information]' unless the chunk is literally empty or corrupted.\n\n"
|
|
5900
|
+
f"Be specific and extract concrete insights. Melville's detailed realism IS his literary technique."
|
|
5901
|
+
)
|
|
5902
|
+
|
|
5903
|
+
# FIXED: Moby Dick-specific user prompt with clear instructions
|
|
5904
|
+
summarization_objective = contextual_prompt or "Create comprehensive literary analysis of Moby-Dick focusing on themes, character development, narrative techniques, and symbolism"
|
|
5905
|
+
|
|
5906
|
+
# Determine scratchpad status for better context
|
|
5907
|
+
scratchpad_status = "The analysis is just beginning - this is among the first substantial content to be processed." if current_scratchpad == "[Empty]" else f"Building on existing analysis with {len(chunk_summaries)} sections already completed."
|
|
5908
|
+
|
|
5909
|
+
user_prompt = (
|
|
5910
|
+
f"--- Global Objective ---\n{summarization_objective}\n\n"
|
|
5911
|
+
f"--- Current Progress ---\n"
|
|
5912
|
+
f"Step {step_number} of ~{total_estimated_steps} | Progress: {progress:.1f}% | Token Budget: {adaptive_chunk_size:,}\n\n"
|
|
5913
|
+
f"--- Current Analysis State ---\n{scratchpad_status}\n\n"
|
|
5914
|
+
f"--- Existing Scratchpad Content ---\n{current_scratchpad}\n\n"
|
|
5915
|
+
f"--- New Text Chunk from Moby Dick ---\n{chunk_text}\n\n"
|
|
5916
|
+
f"--- Analysis Instructions ---\n"
|
|
5917
|
+
f"This is Melville's \"Moby Dick\" - extract literary insights from this passage. Consider:\n\n"
|
|
5918
|
+
f"• **What themes** does this passage develop? (obsession with knowledge, social hierarchy, maritime culture)\n"
|
|
5919
|
+
f"• **What narrative techniques** does Melville use? (detailed realism, encyclopedic style, technical precision)\n"
|
|
5920
|
+
f"• **What cultural commentary** is present? (whaling society, American industry, social protocols)\n"
|
|
5921
|
+
f"• **What character insights** emerge? (authority, dignity, social roles and expectations)\n"
|
|
5922
|
+
f"• **What symbolic elements** appear? (ships, maritime customs, hierarchical structures)\n\n"
|
|
5923
|
+
f"**Remember:** In Moby Dick, even technical descriptions serve literary purposes. Melville's detailed realism and cultural documentation ARE his narrative techniques.\n\n"
|
|
5924
|
+
f"Provide specific, concrete analysis with examples from the text. Extract insights that are not already captured in the scratchpad above."
|
|
5925
|
+
)
|
|
5926
|
+
|
|
5927
|
+
if debug:
|
|
5928
|
+
print(f"🔧 DEBUG: Sending {len(user_prompt)} char prompt to LLM")
|
|
5929
|
+
print(f"🔧 DEBUG: Scratchpad status: {scratchpad_status}")
|
|
5930
|
+
|
|
5931
|
+
chunk_summary = self.remove_thinking_blocks(self.llm.generate_text(user_prompt, system_prompt=system_prompt, **kwargs))
|
|
5932
|
+
|
|
5933
|
+
if debug:
|
|
5934
|
+
print(f"🔧 DEBUG: Received {len(chunk_summary)} char response")
|
|
5935
|
+
print(f"🔧 DEBUG: Response preview: {chunk_summary[:200]}...")
|
|
5936
|
+
|
|
5937
|
+
# FIXED: More intelligent content filtering specifically for literary analysis
|
|
5938
|
+
filter_out = False
|
|
5939
|
+
|
|
5940
|
+
# Check for explicit rejection signals
|
|
5941
|
+
if (chunk_summary.strip().startswith('[No new insights]') or
|
|
5942
|
+
chunk_summary.strip().startswith('[No new information]') or
|
|
5943
|
+
chunk_summary.strip().startswith('[No significant') or
|
|
5944
|
+
'cannot provide' in chunk_summary.lower()[:100] or
|
|
5945
|
+
'unable to analyze' in chunk_summary.lower()[:100]):
|
|
5946
|
+
filter_out = True
|
|
5947
|
+
filter_reason = "explicit rejection signal"
|
|
5948
|
+
|
|
5949
|
+
# Check for too short responses
|
|
5950
|
+
elif len(chunk_summary.strip()) < 50:
|
|
5951
|
+
filter_out = True
|
|
5952
|
+
filter_reason = "response too short"
|
|
5953
|
+
|
|
5954
|
+
# Check for error responses
|
|
5955
|
+
elif any(error_phrase in chunk_summary.lower()[:150] for error_phrase in [
|
|
5956
|
+
'error', 'failed', 'cannot', 'unable', 'not possible', 'insufficient']):
|
|
5957
|
+
filter_out = True
|
|
5958
|
+
filter_reason = "error response detected"
|
|
5959
|
+
|
|
5960
|
+
else:
|
|
5961
|
+
filter_reason = "content accepted"
|
|
5962
|
+
|
|
5963
|
+
if not filter_out:
|
|
5964
|
+
chunk_summaries.append(chunk_summary.strip())
|
|
5965
|
+
content_added = True
|
|
5966
|
+
if debug:
|
|
5967
|
+
print(f"🔧 DEBUG: ✅ Content added to scratchpad (total sections: {len(chunk_summaries)})")
|
|
5968
|
+
else:
|
|
5969
|
+
content_added = False
|
|
5970
|
+
if debug:
|
|
5971
|
+
print(f"🔧 DEBUG: ❌ Content filtered out - {filter_reason}: {chunk_summary[:100]}...")
|
|
5972
|
+
|
|
5973
|
+
# Update progress
|
|
5861
5974
|
if streaming_callback:
|
|
5975
|
+
updated_scratchpad = "\n\n---\n\n".join(chunk_summaries)
|
|
5862
5976
|
streaming_callback(
|
|
5863
|
-
|
|
5864
|
-
MSG_TYPE.
|
|
5865
|
-
{
|
|
5977
|
+
updated_scratchpad,
|
|
5978
|
+
MSG_TYPE.MSG_TYPE_SCRATCHPAD,
|
|
5979
|
+
{
|
|
5980
|
+
"step": step_number,
|
|
5981
|
+
"sections": len(chunk_summaries),
|
|
5982
|
+
"content_added": content_added,
|
|
5983
|
+
"filter_reason": filter_reason
|
|
5984
|
+
}
|
|
5866
5985
|
)
|
|
5986
|
+
|
|
5987
|
+
progress_after = ((current_position + len(chunk_tokens)) / len(tokens)) * 90
|
|
5988
|
+
if streaming_callback:
|
|
5989
|
+
streaming_callback(
|
|
5990
|
+
f"Step {step_number} completed - {'Content added' if content_added else f'Filtered: {filter_reason}'}",
|
|
5991
|
+
MSG_TYPE.MSG_TYPE_STEP_END,
|
|
5992
|
+
{"progress": progress_after}
|
|
5993
|
+
)
|
|
5994
|
+
|
|
5867
5995
|
except Exception as e:
|
|
5868
|
-
|
|
5996
|
+
error_msg = f"Step {step_number} failed: {str(e)}"
|
|
5997
|
+
if debug:
|
|
5998
|
+
print(f"🔧 DEBUG: ❌ {error_msg}")
|
|
5999
|
+
self.trace_exception(e)
|
|
5869
6000
|
if streaming_callback:
|
|
5870
|
-
streaming_callback(
|
|
5871
|
-
|
|
5872
|
-
|
|
6001
|
+
streaming_callback(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
|
|
6002
|
+
chunk_summaries.append(f"[Error in step {step_number}: {str(e)[:100]}]")
|
|
6003
|
+
|
|
6004
|
+
# Move to next chunk
|
|
6005
|
+
current_position += max(1, adaptive_chunk_size - overlap_tokens)
|
|
6006
|
+
step_number += 1
|
|
6007
|
+
|
|
6008
|
+
# Safety break
|
|
6009
|
+
if step_number > 50:
|
|
6010
|
+
if debug:
|
|
6011
|
+
print(f"🔧 DEBUG: Breaking after {step_number-1} steps for safety")
|
|
6012
|
+
break
|
|
6013
|
+
|
|
6014
|
+
if debug:
|
|
6015
|
+
print(f"\n🔧 DEBUG: Completed chunking. Total sections: {len(chunk_summaries)}")
|
|
6016
|
+
|
|
6017
|
+
# Return scratchpad if requested
|
|
6018
|
+
if return_scratchpad_only:
|
|
6019
|
+
final_scratchpad = "\n\n---\n\n".join(chunk_summaries)
|
|
6020
|
+
if streaming_callback:
|
|
6021
|
+
streaming_callback("Returning scratchpad only", MSG_TYPE.MSG_TYPE_STEP, {})
|
|
6022
|
+
return final_scratchpad.strip()
|
|
5873
6023
|
|
|
5874
|
-
#
|
|
5875
|
-
progress_before_synthesis = (len(chunks) / total_steps) * 100
|
|
6024
|
+
# Final synthesis
|
|
5876
6025
|
if streaming_callback:
|
|
5877
|
-
streaming_callback(
|
|
5878
|
-
"Processing the scratchpad content into a final version...",
|
|
5879
|
-
MSG_TYPE.MSG_TYPE_STEP_START,
|
|
5880
|
-
{"id": "final_synthesis", "progress": progress_before_synthesis}
|
|
5881
|
-
)
|
|
6026
|
+
streaming_callback("Synthesizing final comprehensive analysis...", MSG_TYPE.MSG_TYPE_STEP_START, {"progress": 90})
|
|
5882
6027
|
|
|
5883
|
-
|
|
5884
|
-
|
|
5885
|
-
|
|
5886
|
-
|
|
5887
|
-
|
|
5888
|
-
|
|
5889
|
-
|
|
5890
|
-
|
|
5891
|
-
|
|
5892
|
-
|
|
5893
|
-
|
|
5894
|
-
|
|
5895
|
-
|
|
5896
|
-
|
|
5897
|
-
|
|
5898
|
-
|
|
5899
|
-
|
|
5900
|
-
|
|
5901
|
-
|
|
5902
|
-
"Do not add comments.\n"
|
|
5903
|
-
)
|
|
5904
|
-
final_synthesis_prompt = (
|
|
5905
|
-
f"--- Global objective ---\n{synthesis_objective}\n\n"
|
|
5906
|
-
"--- Final Response ---"
|
|
6028
|
+
if not chunk_summaries:
|
|
6029
|
+
error_msg = "No content was successfully processed. The text may not contain recognizable literary elements, or there may be an issue with the processing."
|
|
6030
|
+
if debug:
|
|
6031
|
+
print(f"🔧 DEBUG: ❌ {error_msg}")
|
|
6032
|
+
return error_msg
|
|
6033
|
+
|
|
6034
|
+
combined_scratchpad = "\n\n---\n\n".join(chunk_summaries)
|
|
6035
|
+
synthesis_objective = contextual_prompt or "Create comprehensive literary analysis of Moby-Dick."
|
|
6036
|
+
|
|
6037
|
+
if debug:
|
|
6038
|
+
print(f"🔧 DEBUG: Synthesizing from {len(combined_scratchpad):,} char scratchpad with {len(chunk_summaries)} sections")
|
|
6039
|
+
|
|
6040
|
+
# FIXED: Moby Dick-specific synthesis prompts
|
|
6041
|
+
synthesis_system_prompt = (
|
|
6042
|
+
"You are a literary analysis expert creating a final comprehensive analysis of Herman Melville's Moby Dick.\n"
|
|
6043
|
+
"Synthesize all the insights from the analysis sections into a coherent, scholarly response.\n"
|
|
6044
|
+
"Create clear sections with markdown headers, eliminate redundancy, and provide a thorough analysis.\n"
|
|
6045
|
+
"Focus on Melville's major themes, narrative techniques, cultural commentary, and symbolic elements.\n"
|
|
6046
|
+
"Use specific examples from the text and maintain academic rigor throughout."
|
|
5907
6047
|
)
|
|
5908
6048
|
|
|
5909
|
-
|
|
5910
|
-
|
|
5911
|
-
|
|
5912
|
-
|
|
5913
|
-
|
|
5914
|
-
|
|
5915
|
-
|
|
5916
|
-
|
|
6049
|
+
synthesis_user_prompt = (
|
|
6050
|
+
f"--- Analysis Objective ---\n{synthesis_objective}\n\n"
|
|
6051
|
+
f"--- Processing Summary ---\n"
|
|
6052
|
+
f"Successfully analyzed {len(chunk_summaries)} sections of Moby Dick through incremental literary analysis.\n"
|
|
6053
|
+
f"Total scratchpad content: {len(combined_scratchpad):,} characters of literary insights.\n\n"
|
|
6054
|
+
f"--- Collected Literary Analysis Sections ---\n{combined_scratchpad}\n\n"
|
|
6055
|
+
f"--- Final Synthesis Task ---\n"
|
|
6056
|
+
f"Create a comprehensive, well-structured literary analysis of Moby Dick using ALL the insights above. "
|
|
6057
|
+
f"Organize into clear sections with markdown headers (## Theme Analysis, ## Narrative Techniques, ## Cultural Commentary, ## Symbolism, etc.). "
|
|
6058
|
+
f"Eliminate redundancy and create a coherent, scholarly analysis that demonstrates understanding of Melville's complex literary achievement. "
|
|
6059
|
+
f"Include specific textual examples and maintain academic depth throughout."
|
|
6060
|
+
)
|
|
6061
|
+
|
|
6062
|
+
try:
|
|
6063
|
+
final_answer = self.remove_thinking_blocks(self.llm.generate_text(synthesis_user_prompt, system_prompt=synthesis_system_prompt, **kwargs))
|
|
5917
6064
|
|
|
5918
|
-
|
|
6065
|
+
if debug:
|
|
6066
|
+
print(f"🔧 DEBUG: Final analysis: {len(final_answer):,} characters")
|
|
6067
|
+
|
|
6068
|
+
if streaming_callback:
|
|
6069
|
+
streaming_callback(f"Final synthesis completed - {len(final_answer):,} characters generated", MSG_TYPE.MSG_TYPE_STEP_END, {"progress": 100})
|
|
6070
|
+
|
|
6071
|
+
return final_answer.strip()
|
|
6072
|
+
|
|
6073
|
+
except Exception as e:
|
|
6074
|
+
error_msg = f"Synthesis failed: {str(e)}. Returning organized scratchpad content."
|
|
6075
|
+
if debug:
|
|
6076
|
+
print(f"🔧 DEBUG: ❌ {error_msg}")
|
|
6077
|
+
|
|
6078
|
+
# Return organized scratchpad as fallback
|
|
6079
|
+
organized_scratchpad = (
|
|
6080
|
+
f"# Literary Analysis of Moby Dick\n\n"
|
|
6081
|
+
f"*Note: Synthesis process encountered issues, presenting organized analysis sections:*\n\n"
|
|
6082
|
+
f"## Analysis Sections\n\n"
|
|
6083
|
+
f"{combined_scratchpad}"
|
|
6084
|
+
)
|
|
6085
|
+
return organized_scratchpad
|
|
6086
|
+
|
|
5919
6087
|
|
|
5920
6088
|
def chunk_text(text, tokenizer, detokenizer, chunk_size, overlap, use_separators=True):
|
|
5921
6089
|
"""
|
|
@@ -197,7 +197,7 @@ class LollmsLLMBinding(ABC):
|
|
|
197
197
|
"""
|
|
198
198
|
pass
|
|
199
199
|
|
|
200
|
-
def get_ctx_size(self, model_name: Optional[str] = None) -> Optional[int]:
|
|
200
|
+
def get_ctx_size(self, model_name: Optional[str|None] = None) -> Optional[int]:
|
|
201
201
|
"""
|
|
202
202
|
Retrieves context size for a model from a hardcoded list.
|
|
203
203
|
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/diffusers/__init__.py
RENAMED
|
@@ -19,8 +19,14 @@ from lollms_client.lollms_tti_binding import LollmsTTIBinding
|
|
|
19
19
|
from ascii_colors import trace_exception, ASCIIColors
|
|
20
20
|
|
|
21
21
|
pm.ensure_packages(["torch","torchvision"],index_url="https://download.pytorch.org/whl/cu126")
|
|
22
|
-
pm.ensure_packages(["
|
|
23
|
-
|
|
22
|
+
pm.ensure_packages(["pillow","transformers","safetensors","requests","tqdm"])
|
|
23
|
+
pm.ensure_packages([
|
|
24
|
+
{
|
|
25
|
+
"name": "diffusers",
|
|
26
|
+
"vcs": "git+https://github.com/huggingface/diffusers.git",
|
|
27
|
+
"condition": ">=0.35.1"
|
|
28
|
+
}
|
|
29
|
+
])
|
|
24
30
|
try:
|
|
25
31
|
import torch
|
|
26
32
|
from diffusers import (
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 1.5.
|
|
3
|
+
Version: 1.5.8
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -222,7 +222,7 @@ License-File: LICENSE
|
|
|
222
222
|
Requires-Dist: httpx
|
|
223
223
|
Requires-Dist: requests
|
|
224
224
|
Requires-Dist: ascii-colors
|
|
225
|
-
Requires-Dist: pipmaster
|
|
225
|
+
Requires-Dist: pipmaster>=1.0.5
|
|
226
226
|
Requires-Dist: pyyaml
|
|
227
227
|
Requires-Dist: tiktoken
|
|
228
228
|
Requires-Dist: pydantic
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/azure_openai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/claude/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/gemini/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/litellm/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/llamacpp/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/lollms_webui/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/mistral/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/novita_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/ollama/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/open_router/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/openai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/openllm/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/openwebui/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/perplexity/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/tensor_rt/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/llm_bindings/transformers/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/local_mcp/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/stt_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/stt_bindings/whisper/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/stt_bindings/whispercpp/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/gemini/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/novita_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/openai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tti_bindings/stability_ai/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/audiocraft/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/replicate/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/stability_ai/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttm_bindings/topmediai/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/bark/server/main.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/piper_tts/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/piper_tts/server/main.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/tts_bindings/xtts/server/main.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-1.5.6 → lollms_client-1.5.8}/src/lollms_client/ttv_bindings/lollms/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|