lollms-client 1.5.9__tar.gz → 1.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (93) hide show
  1. {lollms_client-1.5.9/src/lollms_client.egg-info → lollms_client-1.6.0}/PKG-INFO +1 -1
  2. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/__init__.py +1 -1
  3. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_core.py +271 -311
  4. {lollms_client-1.5.9 → lollms_client-1.6.0/src/lollms_client.egg-info}/PKG-INFO +1 -1
  5. {lollms_client-1.5.9 → lollms_client-1.6.0}/LICENSE +0 -0
  6. {lollms_client-1.5.9 → lollms_client-1.6.0}/README.md +0 -0
  7. {lollms_client-1.5.9 → lollms_client-1.6.0}/pyproject.toml +0 -0
  8. {lollms_client-1.5.9 → lollms_client-1.6.0}/setup.cfg +0 -0
  9. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/assets/models_ctx_sizes.json +0 -0
  10. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/__init__.py +0 -0
  11. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
  12. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/claude/__init__.py +0 -0
  13. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/gemini/__init__.py +0 -0
  14. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/grok/__init__.py +0 -0
  15. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/groq/__init__.py +0 -0
  16. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
  17. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/litellm/__init__.py +0 -0
  18. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  19. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  20. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
  21. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/mistral/__init__.py +0 -0
  22. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/novita_ai/__init__.py +0 -0
  23. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  24. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/open_router/__init__.py +0 -0
  25. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/openai/__init__.py +0 -0
  26. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  27. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/openwebui/__init__.py +0 -0
  28. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/perplexity/__init__.py +0 -0
  29. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  30. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  31. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  32. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  33. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_agentic.py +0 -0
  34. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_config.py +0 -0
  35. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_discussion.py +0 -0
  36. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_js_analyzer.py +0 -0
  37. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_llm_binding.py +0 -0
  38. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_mcp_binding.py +0 -0
  39. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_mcp_security.py +0 -0
  40. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_personality.py +0 -0
  41. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_python_analyzer.py +0 -0
  42. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_stt_binding.py +0 -0
  43. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_tti_binding.py +0 -0
  44. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_ttm_binding.py +0 -0
  45. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_tts_binding.py +0 -0
  46. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_ttv_binding.py +0 -0
  47. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_types.py +0 -0
  48. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/lollms_utilities.py +0 -0
  49. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  50. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  51. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  52. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  53. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  54. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  55. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  56. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/stt_bindings/__init__.py +0 -0
  57. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  58. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  59. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  60. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/__init__.py +0 -0
  61. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  62. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  63. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py +0 -0
  64. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  65. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/novita_ai/__init__.py +0 -0
  66. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/openai/__init__.py +0 -0
  67. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tti_bindings/stability_ai/__init__.py +0 -0
  68. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/__init__.py +0 -0
  69. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  70. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py +0 -0
  71. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  72. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/replicate/__init__.py +0 -0
  73. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/stability_ai/__init__.py +0 -0
  74. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/ttm_bindings/topmediai/__init__.py +0 -0
  75. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/__init__.py +0 -0
  76. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/bark/__init__.py +0 -0
  77. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/bark/server/install_bark.py +0 -0
  78. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/bark/server/main.py +0 -0
  79. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  80. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  81. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/piper_tts/server/install_piper.py +0 -0
  82. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/piper_tts/server/main.py +0 -0
  83. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/piper_tts/server/setup_voices.py +0 -0
  84. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  85. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/xtts/server/main.py +0 -0
  86. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/tts_bindings/xtts/server/setup_voices.py +0 -0
  87. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/ttv_bindings/__init__.py +0 -0
  88. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  89. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client.egg-info/SOURCES.txt +0 -0
  90. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client.egg-info/dependency_links.txt +0 -0
  91. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client.egg-info/requires.txt +0 -0
  92. {lollms_client-1.5.9 → lollms_client-1.6.0}/src/lollms_client.egg-info/top_level.txt +0 -0
  93. {lollms_client-1.5.9 → lollms_client-1.6.0}/test/test_lollms_discussion.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.5.9
3
+ Version: 1.6.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache License
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.5.9" # Updated version
11
+ __version__ = "1.6.0" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -5732,357 +5732,317 @@ Provide the final aggregated answer in {output_format} format, directly addressi
5732
5732
  callback("Deep analysis complete.", MSG_TYPE.MSG_TYPE_STEP_END)
5733
5733
  return final_output
5734
5734
 
5735
- def long_context_processing(
5736
- self,
5737
- text_to_process: str,
5738
- contextual_prompt: Optional[str] = None,
5739
- system_prompt: str= None,
5740
- context_fill_percentage: float = 0.75,
5741
- overlap_tokens: int = 0,
5742
- expected_generation_tokens: int = 1500,
5743
- streaming_callback: Optional[Callable] = None,
5744
- return_scratchpad_only: bool = False,
5745
- debug: bool = True,
5746
- **kwargs
5747
- ) -> str:
5748
- """Enhanced long context processing with Moby Dick literary analysis optimization."""
5735
+ def long_context_processing(
5736
+ self,
5737
+ text_to_process: str,
5738
+ contextual_prompt: Optional[str] = None,
5739
+ system_prompt: str | None = None,
5740
+ context_fill_percentage: float = 0.75,
5741
+ overlap_tokens: int = 150, # Added a default for better context continuity
5742
+ expected_generation_tokens: int = 1500,
5743
+ streaming_callback: Optional[Callable] = None,
5744
+ return_scratchpad_only: bool = False,
5745
+ debug: bool = True,
5746
+ **kwargs
5747
+ ) -> str:
5748
+ """
5749
+ Processes long text by breaking it down into chunks, analyzing each one incrementally,
5750
+ and synthesizing the results into a comprehensive final response based on a user-defined objective.
5751
+ """
5752
+
5753
+ if debug:
5754
+ print(f"\n🔧 DEBUG: Starting processing with {len(text_to_process):,} characters")
5755
+
5756
+ # Validate context fill percentage
5757
+ if not (0.1 <= context_fill_percentage <= 0.9):
5758
+ raise ValueError(f"context_fill_percentage must be between 0.1 and 0.9, got {context_fill_percentage}")
5759
+
5760
+ # Get context size
5761
+ try:
5762
+ context_size = self.llm.get_context_size() or 8192 # Using a more modern default
5763
+ except:
5764
+ context_size = 8192
5765
+
5766
+ if debug:
5767
+ print(f"🔧 DEBUG: Context size: {context_size}, Fill %: {context_fill_percentage}")
5768
+
5769
+ # Handle empty input
5770
+ if not text_to_process:
5771
+ return ""
5772
+
5773
+ # Use a simple word-based split for token estimation
5774
+ tokens = text_to_process.split()
5775
+ if debug:
5776
+ print(f"🔧 DEBUG: Tokenized into {len(tokens):,} word tokens")
5777
+
5778
+ # Dynamic token budget calculation
5779
+ def calculate_token_budgets(scratchpad_content: str = "", step_num: int = 0) -> dict:
5780
+ # Generic prompt templates are more concise
5781
+ base_system_tokens = 150
5782
+ user_template_tokens = 250
5783
+ scratchpad_tokens = len(scratchpad_content.split()) * 1.3 if scratchpad_content else 0
5784
+
5785
+ used_tokens = base_system_tokens + user_template_tokens + scratchpad_tokens + expected_generation_tokens
5786
+ total_budget = int(context_size * context_fill_percentage)
5787
+ available_for_chunk = max(500, int(total_budget - used_tokens)) # Ensure a reasonable minimum chunk size
5788
+
5789
+ budget_info = {
5790
+ "total_budget": total_budget,
5791
+ "chunk_budget": available_for_chunk,
5792
+ "efficiency_ratio": available_for_chunk / total_budget if total_budget > 0 else 0,
5793
+ "scratchpad_tokens": int(scratchpad_tokens),
5794
+ "used_tokens": int(used_tokens)
5795
+ }
5749
5796
 
5750
5797
  if debug:
5751
- print(f"\n🔧 DEBUG: Starting processing with {len(text_to_process):,} characters")
5798
+ print(f"🔧 DEBUG Step {step_num}: Budget = {available_for_chunk}/{total_budget} tokens, "
5799
+ f"Scratchpad = {int(scratchpad_tokens)} tokens")
5752
5800
 
5753
- # Validate context fill percentage
5754
- if not (0.1 <= context_fill_percentage <= 0.9):
5755
- raise ValueError(f"context_fill_percentage must be between 0.1 and 0.9, got {context_fill_percentage}")
5801
+ return budget_info
5756
5802
 
5757
- # Get context size
5758
- try:
5759
- context_size = self.llm.get_context_size() or 4096
5760
- except:
5761
- context_size = 4096
5803
+ # Initial budget calculation
5804
+ initial_budget = calculate_token_budgets()
5805
+ chunk_size_tokens = initial_budget["chunk_budget"]
5762
5806
 
5763
- if debug:
5764
- print(f"🔧 DEBUG: Context size: {context_size}, Fill %: {context_fill_percentage}")
5807
+ if debug:
5808
+ print(f"🔧 DEBUG: Initial chunk size: {chunk_size_tokens} word tokens")
5765
5809
 
5766
- # Handle empty input
5767
- if not text_to_process:
5768
- return ""
5810
+ if streaming_callback:
5811
+ streaming_callback(
5812
+ f"Context Budget: {initial_budget['chunk_budget']:,}/{initial_budget['total_budget']:,} tokens "
5813
+ f"({initial_budget['efficiency_ratio']:.1%} efficiency)",
5814
+ MSG_TYPE.MSG_TYPE_STEP,
5815
+ {"budget_info": initial_budget}
5816
+ )
5769
5817
 
5770
- # Use word-based tokenization
5771
- tokens = text_to_process.split()
5818
+ # Single pass for short content
5819
+ if len(tokens) <= chunk_size_tokens:
5772
5820
  if debug:
5773
- print(f"🔧 DEBUG: Tokenized into {len(tokens):,} word tokens")
5774
-
5775
- # Dynamic token budget calculation
5776
- def calculate_token_budgets(scratchpad_content: str = "", step_num: int = 0) -> dict:
5777
- base_system_tokens = 250 # Increased for literary-specific prompts
5778
- user_template_tokens = 300 # Increased for detailed instructions
5779
- scratchpad_tokens = len(scratchpad_content.split()) * 1.3 if scratchpad_content else 0
5780
-
5781
- used_tokens = base_system_tokens + user_template_tokens + scratchpad_tokens + expected_generation_tokens
5782
- total_budget = int(context_size * context_fill_percentage)
5783
- available_for_chunk = max(400, int(total_budget - used_tokens)) # Increased minimum for better context
5784
-
5785
- budget_info = {
5786
- "total_budget": total_budget,
5787
- "chunk_budget": available_for_chunk,
5788
- "efficiency_ratio": available_for_chunk / total_budget,
5789
- "scratchpad_tokens": int(scratchpad_tokens),
5790
- "used_tokens": int(used_tokens)
5791
- }
5821
+ print("🔧 DEBUG: Content is short enough for single-pass processing")
5792
5822
 
5823
+ if streaming_callback:
5824
+ streaming_callback("Content fits in a single pass", MSG_TYPE.MSG_TYPE_STEP, {})
5825
+
5826
+ # Generic single-pass system prompt
5827
+ system_prompt = (
5828
+ "You are an expert AI assistant for text analysis and summarization. "
5829
+ "Your task is to carefully analyze the provided text and generate a comprehensive, "
5830
+ "accurate, and well-structured response that directly addresses the user's objective. "
5831
+ "Focus on extracting key information, identifying main themes, and synthesizing the content effectively."
5832
+ )
5833
+
5834
+ prompt_objective = contextual_prompt or "Provide a comprehensive summary and analysis of the provided text."
5835
+ final_prompt = f"Objective: {prompt_objective}\n\n--- Full Text Content ---\n{text_to_process}"
5836
+
5837
+ try:
5838
+ result = self.remove_thinking_blocks(self.llm.generate_text(final_prompt, system_prompt=system_prompt, **kwargs))
5793
5839
  if debug:
5794
- print(f"🔧 DEBUG Step {step_num}: Budget = {available_for_chunk}/{total_budget} tokens, "
5795
- f"Scratchpad = {int(scratchpad_tokens)} tokens")
5840
+ print(f"🔧 DEBUG: Single-pass result: {len(result):,} characters")
5841
+ return result
5842
+ except Exception as e:
5843
+ if debug:
5844
+ print(f"🔧 DEBUG: Single-pass processing failed: {e}")
5845
+ return f"Error in single-pass processing: {e}"
5846
+
5847
+ # Multi-chunk processing for long content
5848
+ if debug:
5849
+ print("🔧 DEBUG: Using multi-chunk processing for long content")
5796
5850
 
5797
- return budget_info
5851
+ chunk_summaries = []
5852
+ current_position = 0
5853
+ step_number = 1
5798
5854
 
5799
- # Initial budget calculation
5800
- initial_budget = calculate_token_budgets()
5801
- chunk_size_tokens = initial_budget["chunk_budget"]
5855
+ while current_position < len(tokens):
5856
+ # Recalculate budget for each step for dynamic adaptation
5857
+ current_scratchpad = "\n\n---\n\n".join(chunk_summaries)
5858
+ current_budget = calculate_token_budgets(current_scratchpad, step_number)
5859
+ adaptive_chunk_size = max(500, current_budget["chunk_budget"])
5860
+
5861
+ # Extract the next chunk of text
5862
+ chunk_end = min(current_position + adaptive_chunk_size, len(tokens))
5863
+ chunk_tokens = tokens[current_position:chunk_end]
5864
+ chunk_text = " ".join(chunk_tokens)
5802
5865
 
5803
5866
  if debug:
5804
- print(f"🔧 DEBUG: Initial chunk size: {chunk_size_tokens} word tokens")
5867
+ print(f"\n🔧 DEBUG Step {step_number}: Processing chunk from {current_position} to {chunk_end} "
5868
+ f"({len(chunk_tokens)} tokens)")
5869
+
5870
+ # Progress calculation
5871
+ remaining_tokens = len(tokens) - current_position
5872
+ estimated_remaining_steps = max(1, -(-remaining_tokens // adaptive_chunk_size)) # Ceiling division
5873
+ total_estimated_steps = step_number + estimated_remaining_steps -1
5874
+ progress = (current_position / len(tokens)) * 90 if len(tokens) > 0 else 0
5805
5875
 
5806
5876
  if streaming_callback:
5807
5877
  streaming_callback(
5808
- f"Context Budget: {initial_budget['chunk_budget']:,}/{initial_budget['total_budget']:,} tokens "
5809
- f"({initial_budget['efficiency_ratio']:.1%} efficiency)",
5810
- MSG_TYPE.MSG_TYPE_STEP,
5811
- {"budget_info": initial_budget}
5878
+ f"Processing chunk {step_number}/{total_estimated_steps} - "
5879
+ f"Budget: {adaptive_chunk_size:,} tokens",
5880
+ MSG_TYPE.MSG_TYPE_STEP_START,
5881
+ {"step": step_number, "progress": progress}
5812
5882
  )
5813
5883
 
5814
- # Single pass for short content
5815
- if len(tokens) <= chunk_size_tokens:
5816
- if debug:
5817
- print("🔧 DEBUG: Using single-pass processing")
5818
-
5819
- if streaming_callback:
5820
- streaming_callback("Content fits in single pass", MSG_TYPE.MSG_TYPE_STEP, {})
5821
-
5822
- # FIXED: Moby Dick-specific single-pass system prompt
5884
+ try:
5885
+ # Generic, state-aware system prompt
5823
5886
  system_prompt = (
5824
- "You are a literary analysis expert specializing in Herman Melville's works. "
5825
- "Analyze the provided Moby Dick text with deep understanding that Melville's "
5826
- "detailed realism, technical descriptions, and cultural documentation are "
5827
- "integral literary techniques. Focus on themes, narrative methods, symbolism, "
5828
- "and cultural commentary."
5887
+ f"You are a component in a multi-step text processing pipeline. Your role is to analyze a chunk of text and extract key information relevant to a global objective.\n\n"
5888
+ f"**Current Status:** You are on step {step_number} of approximately {total_estimated_steps} steps. Progress is at {progress:.1f}%.\n\n"
5889
+ f"**Your Task:**\n"
5890
+ f"Analyze the 'New Text Chunk' provided below. Extract and summarize any information, data points, or key ideas that are relevant to the 'Global Objective'.\n"
5891
+ f"Review the 'Existing Scratchpad Content' to understand what has already been found. Your goal is to add *new* insights that are not already captured.\n\n"
5892
+ f"**CRITICAL:** Do NOT repeat information already present in the scratchpad. Focus only on new, relevant details from the current chunk. If the chunk contains no new relevant information, respond with '[No new information found in this chunk.]'."
5829
5893
  )
5830
5894
 
5831
- prompt_objective = contextual_prompt or "Provide comprehensive Moby Dick literary analysis."
5832
- final_prompt = f"{prompt_objective}\n\n--- Moby Dick Content ---\n{text_to_process}"
5833
-
5834
- try:
5835
- result = self.remove_thinking_blocks(self.llm.generate_text(final_prompt, system_prompt=system_prompt, **kwargs))
5836
- if debug:
5837
- print(f"🔧 DEBUG: Single-pass result: {len(result):,} characters")
5838
- return result
5839
- except Exception as e:
5840
- if debug:
5841
- print(f"🔧 DEBUG: Single-pass failed: {e}")
5842
- return f"Error in single-pass processing: {e}"
5843
-
5844
- # Multi-chunk processing with FIXED prompts
5845
- if debug:
5846
- print("🔧 DEBUG: Using multi-chunk processing with Moby Dick-optimized prompts")
5847
-
5848
- chunk_summaries = []
5849
- current_position = 0
5850
- step_number = 1
5895
+ # Generic, context-aware user prompt
5896
+ summarization_objective = contextual_prompt or "Create a comprehensive summary by extracting all key facts, concepts, and conclusions from the text."
5897
+ scratchpad_status = "The analysis is just beginning; this is the first chunk." if not chunk_summaries else f"Building on existing analysis with {len(chunk_summaries)} sections already completed."
5898
+
5899
+ user_prompt = (
5900
+ f"--- Global Objective ---\n{summarization_objective}\n\n"
5901
+ f"--- Current Progress ---\n"
5902
+ f"{scratchpad_status} (Step {step_number}/{total_estimated_steps})\n\n"
5903
+ f"--- Existing Scratchpad Content (for context) ---\n{current_scratchpad}\n\n"
5904
+ f"--- New Text Chunk to Analyze ---\n{chunk_text}\n\n"
5905
+ f"--- Your Instructions ---\n"
5906
+ f"Extract key information from the 'New Text Chunk' that aligns with the 'Global Objective'. "
5907
+ f"Provide a concise summary of the new findings. Do not repeat what is already in the scratchpad. "
5908
+ f"If no new relevant information is found, state that clearly."
5909
+ )
5851
5910
 
5852
- while current_position < len(tokens):
5853
- # Recalculate budget
5854
- current_scratchpad = "\n\n---\n\n".join(chunk_summaries) if chunk_summaries else "[Empty]"
5855
- current_budget = calculate_token_budgets(current_scratchpad, step_number)
5856
- adaptive_chunk_size = max(400, current_budget["chunk_budget"]) # Increased minimum
5911
+ if debug:
5912
+ print(f"🔧 DEBUG: Sending {len(user_prompt)} char prompt to LLM")
5857
5913
 
5858
- # Extract chunk
5859
- chunk_end = min(current_position + adaptive_chunk_size, len(tokens))
5860
- chunk_tokens = tokens[current_position:chunk_end]
5861
- chunk_text = " ".join(chunk_tokens)
5914
+ chunk_summary = self.remove_thinking_blocks(self.llm.generate_text(user_prompt, system_prompt=system_prompt, **kwargs))
5862
5915
 
5863
5916
  if debug:
5864
- print(f"\n🔧 DEBUG Step {step_number}: Processing chunk {current_position}:{chunk_end} "
5865
- f"({len(chunk_tokens)} tokens, {len(chunk_text)} chars)")
5866
-
5867
- # Progress calculation
5868
- remaining_tokens = len(tokens) - current_position
5869
- estimated_remaining_steps = max(1, remaining_tokens // adaptive_chunk_size)
5870
- total_estimated_steps = step_number + estimated_remaining_steps - 1
5871
- progress = (current_position / len(tokens)) * 90
5917
+ print(f"🔧 DEBUG: Received {len(chunk_summary)} char response preview: {chunk_summary[:200]}...")
5918
+
5919
+ # Generic content filtering
5920
+ filter_out = False
5921
+ filter_reason = "content accepted"
5922
+
5923
+ # Check for explicit rejection signals
5924
+ if (chunk_summary.strip().lower().startswith('[no new') or
5925
+ chunk_summary.strip().lower().startswith('no new information')):
5926
+ filter_out = True
5927
+ filter_reason = "explicit rejection signal"
5928
+ # Check for overly short or generic refusal responses
5929
+ elif len(chunk_summary.strip()) < 25:
5930
+ filter_out = True
5931
+ filter_reason = "response too short to be useful"
5932
+ # Check for common error phrases
5933
+ elif any(error_phrase in chunk_summary.lower()[:150] for error_phrase in [
5934
+ 'error', 'failed', 'cannot provide', 'unable to analyze', 'not possible', 'insufficient information']):
5935
+ filter_out = True
5936
+ filter_reason = "error or refusal response detected"
5937
+
5938
+ if not filter_out:
5939
+ chunk_summaries.append(chunk_summary.strip())
5940
+ content_added = True
5941
+ if debug:
5942
+ print(f"🔧 DEBUG: ✅ Content added to scratchpad (total sections: {len(chunk_summaries)})")
5943
+ else:
5944
+ content_added = False
5945
+ if debug:
5946
+ print(f"🔧 DEBUG: ❌ Content filtered out - {filter_reason}: {chunk_summary[:100]}...")
5872
5947
 
5948
+ # Update progress via callback
5873
5949
  if streaming_callback:
5950
+ updated_scratchpad = "\n\n---\n\n".join(chunk_summaries)
5874
5951
  streaming_callback(
5875
- f"Processing chunk {step_number}/{total_estimated_steps} - "
5876
- f"Budget: {adaptive_chunk_size:,} tokens",
5877
- MSG_TYPE.MSG_TYPE_STEP_START,
5878
- {"step": step_number, "progress": progress}
5879
- )
5880
-
5881
- try:
5882
- # FIXED: Moby Dick-specific system prompt that prevents false filtering
5883
- system_prompt = (
5884
- f"You are analyzing Herman Melville's \"Moby Dick\" - a complex literary work where EVERY passage contains literary value.\n\n"
5885
- f"**Critical Understanding:**\n"
5886
- f"- Melville's detailed descriptions of whaling culture ARE literary techniques\n"
5887
- f"- Technical passages reveal themes about knowledge, obsession, and human industry\n"
5888
- f"- Social customs and maritime protocols reflect broader themes of hierarchy and civilization\n"
5889
- f"- Even seemingly mundane details contribute to Melville's encyclopedic narrative style\n\n"
5890
- f"**Current Status:** Step {step_number} of ~{total_estimated_steps} | Progress: {progress:.1f}%\n\n"
5891
- f"**Your Task:**\n"
5892
- f"Extract literary insights from this text chunk, focusing on:\n"
5893
- f"1. **Themes** (obsession, knowledge, nature vs civilization, social hierarchy)\n"
5894
- f"2. **Narrative Technique** (Melville's encyclopedic style, detailed realism)\n"
5895
- f"3. **Cultural Commentary** (maritime society, American industry, social structures)\n"
5896
- f"4. **Character Insights** (authority, dignity, social roles)\n"
5897
- f"5. **Symbolic Elements** (ships, sea, whaling practices as metaphors)\n\n"
5898
- f"**CRITICAL:** The scratchpad shows '{current_scratchpad[:20]}...' - if it shows '[Empty]', you are analyzing early content and everything you find is 'new' information. "
5899
- f"Do NOT say '[No new information]' unless the chunk is literally empty or corrupted.\n\n"
5900
- f"Be specific and extract concrete insights. Melville's detailed realism IS his literary technique."
5952
+ updated_scratchpad,
5953
+ MSG_TYPE.MSG_TYPE_SCRATCHPAD,
5954
+ {"step": step_number, "sections": len(chunk_summaries), "content_added": content_added, "filter_reason": filter_reason}
5901
5955
  )
5902
-
5903
- # FIXED: Moby Dick-specific user prompt with clear instructions
5904
- summarization_objective = contextual_prompt or "Create comprehensive literary analysis of Moby-Dick focusing on themes, character development, narrative techniques, and symbolism"
5905
-
5906
- # Determine scratchpad status for better context
5907
- scratchpad_status = "The analysis is just beginning - this is among the first substantial content to be processed." if current_scratchpad == "[Empty]" else f"Building on existing analysis with {len(chunk_summaries)} sections already completed."
5908
-
5909
- user_prompt = (
5910
- f"--- Global Objective ---\n{summarization_objective}\n\n"
5911
- f"--- Current Progress ---\n"
5912
- f"Step {step_number} of ~{total_estimated_steps} | Progress: {progress:.1f}% | Token Budget: {adaptive_chunk_size:,}\n\n"
5913
- f"--- Current Analysis State ---\n{scratchpad_status}\n\n"
5914
- f"--- Existing Scratchpad Content ---\n{current_scratchpad}\n\n"
5915
- f"--- New Text Chunk from Moby Dick ---\n{chunk_text}\n\n"
5916
- f"--- Analysis Instructions ---\n"
5917
- f"This is Melville's \"Moby Dick\" - extract literary insights from this passage. Consider:\n\n"
5918
- f"• **What themes** does this passage develop? (obsession with knowledge, social hierarchy, maritime culture)\n"
5919
- f"• **What narrative techniques** does Melville use? (detailed realism, encyclopedic style, technical precision)\n"
5920
- f"• **What cultural commentary** is present? (whaling society, American industry, social protocols)\n"
5921
- f"• **What character insights** emerge? (authority, dignity, social roles and expectations)\n"
5922
- f"• **What symbolic elements** appear? (ships, maritime customs, hierarchical structures)\n\n"
5923
- f"**Remember:** In Moby Dick, even technical descriptions serve literary purposes. Melville's detailed realism and cultural documentation ARE his narrative techniques.\n\n"
5924
- f"Provide specific, concrete analysis with examples from the text. Extract insights that are not already captured in the scratchpad above."
5956
+ progress_after = ((current_position + len(chunk_tokens)) / len(tokens)) * 90 if len(tokens) > 0 else 90
5957
+ streaming_callback(
5958
+ f"Step {step_number} completed - {'Content added' if content_added else f'Filtered: {filter_reason}'}",
5959
+ MSG_TYPE.MSG_TYPE_STEP_END,
5960
+ {"progress": progress_after}
5925
5961
  )
5926
5962
 
5927
- if debug:
5928
- print(f"🔧 DEBUG: Sending {len(user_prompt)} char prompt to LLM")
5929
- print(f"🔧 DEBUG: Scratchpad status: {scratchpad_status}")
5930
-
5931
- chunk_summary = self.remove_thinking_blocks(self.llm.generate_text(user_prompt, system_prompt=system_prompt, **kwargs))
5932
-
5933
- if debug:
5934
- print(f"🔧 DEBUG: Received {len(chunk_summary)} char response")
5935
- print(f"🔧 DEBUG: Response preview: {chunk_summary[:200]}...")
5936
-
5937
- # FIXED: More intelligent content filtering specifically for literary analysis
5938
- filter_out = False
5939
-
5940
- # Check for explicit rejection signals
5941
- if (chunk_summary.strip().startswith('[No new insights]') or
5942
- chunk_summary.strip().startswith('[No new information]') or
5943
- chunk_summary.strip().startswith('[No significant') or
5944
- 'cannot provide' in chunk_summary.lower()[:100] or
5945
- 'unable to analyze' in chunk_summary.lower()[:100]):
5946
- filter_out = True
5947
- filter_reason = "explicit rejection signal"
5948
-
5949
- # Check for too short responses
5950
- elif len(chunk_summary.strip()) < 50:
5951
- filter_out = True
5952
- filter_reason = "response too short"
5953
-
5954
- # Check for error responses
5955
- elif any(error_phrase in chunk_summary.lower()[:150] for error_phrase in [
5956
- 'error', 'failed', 'cannot', 'unable', 'not possible', 'insufficient']):
5957
- filter_out = True
5958
- filter_reason = "error response detected"
5959
-
5960
- else:
5961
- filter_reason = "content accepted"
5962
-
5963
- if not filter_out:
5964
- chunk_summaries.append(chunk_summary.strip())
5965
- content_added = True
5966
- if debug:
5967
- print(f"🔧 DEBUG: ✅ Content added to scratchpad (total sections: {len(chunk_summaries)})")
5968
- else:
5969
- content_added = False
5970
- if debug:
5971
- print(f"🔧 DEBUG: ❌ Content filtered out - {filter_reason}: {chunk_summary[:100]}...")
5972
-
5973
- # Update progress
5974
- if streaming_callback:
5975
- updated_scratchpad = "\n\n---\n\n".join(chunk_summaries)
5976
- streaming_callback(
5977
- updated_scratchpad,
5978
- MSG_TYPE.MSG_TYPE_SCRATCHPAD,
5979
- {
5980
- "step": step_number,
5981
- "sections": len(chunk_summaries),
5982
- "content_added": content_added,
5983
- "filter_reason": filter_reason
5984
- }
5985
- )
5986
-
5987
- progress_after = ((current_position + len(chunk_tokens)) / len(tokens)) * 90
5988
- if streaming_callback:
5989
- streaming_callback(
5990
- f"Step {step_number} completed - {'Content added' if content_added else f'Filtered: {filter_reason}'}",
5991
- MSG_TYPE.MSG_TYPE_STEP_END,
5992
- {"progress": progress_after}
5993
- )
5994
-
5995
- except Exception as e:
5996
- error_msg = f"Step {step_number} failed: {str(e)}"
5997
- if debug:
5998
- print(f"🔧 DEBUG: ❌ {error_msg}")
5999
- self.trace_exception(e)
6000
- if streaming_callback:
6001
- streaming_callback(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
6002
- chunk_summaries.append(f"[Error in step {step_number}: {str(e)[:100]}]")
6003
-
6004
- # Move to next chunk
6005
- current_position += max(1, adaptive_chunk_size - overlap_tokens)
6006
- step_number += 1
6007
-
6008
- # Safety break
6009
- if step_number > 50:
6010
- if debug:
6011
- print(f"🔧 DEBUG: Breaking after {step_number-1} steps for safety")
6012
- break
6013
-
6014
- if debug:
6015
- print(f"\n🔧 DEBUG: Completed chunking. Total sections: {len(chunk_summaries)}")
6016
-
6017
- # Return scratchpad if requested
6018
- if return_scratchpad_only:
6019
- final_scratchpad = "\n\n---\n\n".join(chunk_summaries)
5963
+ except Exception as e:
5964
+ error_msg = f"Step {step_number} failed: {str(e)}"
5965
+ if debug:
5966
+ print(f"🔧 DEBUG: ❌ {error_msg}")
5967
+ self.trace_exception(e)
6020
5968
  if streaming_callback:
6021
- streaming_callback("Returning scratchpad only", MSG_TYPE.MSG_TYPE_STEP, {})
6022
- return final_scratchpad.strip()
5969
+ streaming_callback(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
5970
+ chunk_summaries.append(f"[Error processing chunk at step {step_number}: {str(e)[:150]}]")
6023
5971
 
6024
- # Final synthesis
5972
+ # Move to the next chunk, allowing for overlap
5973
+ current_position += max(1, adaptive_chunk_size - overlap_tokens)
5974
+ step_number += 1
5975
+
5976
+ # Safety break for excessively long documents
5977
+ if step_number > 200:
5978
+ if debug: print(f"🔧 DEBUG: Safety break after {step_number-1} steps.")
5979
+ chunk_summaries.append("[Processing halted due to exceeding maximum step limit.]")
5980
+ break
5981
+
5982
+ if debug:
5983
+ print(f"\n🔧 DEBUG: Chunk processing complete. Total sections gathered: {len(chunk_summaries)}")
5984
+
5985
+ # Return only the scratchpad content if requested
5986
+ if return_scratchpad_only:
5987
+ final_scratchpad = "\n\n---\n\n".join(chunk_summaries)
6025
5988
  if streaming_callback:
6026
- streaming_callback("Synthesizing final comprehensive analysis...", MSG_TYPE.MSG_TYPE_STEP_START, {"progress": 90})
6027
-
6028
- if not chunk_summaries:
6029
- error_msg = "No content was successfully processed. The text may not contain recognizable literary elements, or there may be an issue with the processing."
6030
- if debug:
6031
- print(f"🔧 DEBUG: ❌ {error_msg}")
6032
- return error_msg
5989
+ streaming_callback("Returning scratchpad content as final output.", MSG_TYPE.MSG_TYPE_STEP, {})
5990
+ return final_scratchpad.strip()
6033
5991
 
6034
- combined_scratchpad = "\n\n---\n\n".join(chunk_summaries)
6035
- synthesis_objective = contextual_prompt or "Create comprehensive literary analysis of Moby-Dick."
5992
+ # Final Synthesis Step
5993
+ if streaming_callback:
5994
+ streaming_callback("Synthesizing final comprehensive response...", MSG_TYPE.MSG_TYPE_STEP_START, {"progress": 95})
6036
5995
 
5996
+ if not chunk_summaries:
5997
+ error_msg = "No content was successfully processed or extracted from the document. The input might be empty or an issue occurred during processing."
6037
5998
  if debug:
6038
- print(f"🔧 DEBUG: Synthesizing from {len(combined_scratchpad):,} char scratchpad with {len(chunk_summaries)} sections")
6039
-
6040
- # FIXED: Moby Dick-specific synthesis prompts
6041
- synthesis_system_prompt = (
6042
- "You are a literary analysis expert creating a final comprehensive analysis of Herman Melville's Moby Dick.\n"
6043
- "Synthesize all the insights from the analysis sections into a coherent, scholarly response.\n"
6044
- "Create clear sections with markdown headers, eliminate redundancy, and provide a thorough analysis.\n"
6045
- "Focus on Melville's major themes, narrative techniques, cultural commentary, and symbolic elements.\n"
6046
- "Use specific examples from the text and maintain academic rigor throughout."
6047
- )
5999
+ print(f"🔧 DEBUG: {error_msg}")
6000
+ return error_msg
6001
+
6002
+ combined_scratchpad = "\n\n---\n\n".join(chunk_summaries)
6003
+ synthesis_objective = contextual_prompt or "Provide a comprehensive, well-structured summary and analysis of the provided text."
6004
+
6005
+ if debug:
6006
+ print(f"🔧 DEBUG: Synthesizing from {len(combined_scratchpad):,} char scratchpad with {len(chunk_summaries)} sections.")
6007
+
6008
+ # Generic synthesis prompts
6009
+ synthesis_system_prompt = (
6010
+ "You are an expert AI assistant specializing in synthesizing information. "
6011
+ "Your task is to consolidate a series of text analysis sections from a scratchpad into a single, coherent, and well-structured final response. "
6012
+ "Eliminate redundancy, organize the content logically, and ensure the final output directly and comprehensively addresses the user's primary objective. "
6013
+ "Use markdown for clear formatting (e.g., headers, lists, bold text)."
6014
+ )
6015
+
6016
+ synthesis_user_prompt = (
6017
+ f"--- Final Objective ---\n{synthesis_objective}\n\n"
6018
+ f"--- Collected Analysis Sections (Scratchpad) ---\n{combined_scratchpad}\n\n"
6019
+ f"--- Your Final Task ---\n"
6020
+ f"Synthesize all the information from the 'Collected Analysis Sections' into a single, high-quality, and comprehensive response. "
6021
+ f"Your response must directly address the 'Final Objective'. "
6022
+ f"Organize your answer logically with clear sections using markdown headers. "
6023
+ f"Ensure all key information is included, remove any repetitive statements, and produce a polished, final document."
6024
+ )
6025
+
6026
+ try:
6027
+ final_answer = self.remove_thinking_blocks(self.llm.generate_text(synthesis_user_prompt, system_prompt=synthesis_system_prompt, **kwargs))
6028
+ if debug:
6029
+ print(f"🔧 DEBUG: Final synthesis generated: {len(final_answer):,} characters")
6030
+ if streaming_callback:
6031
+ streaming_callback("Final synthesis complete.", MSG_TYPE.MSG_TYPE_STEP_END, {"progress": 100})
6032
+ return final_answer.strip()
6048
6033
 
6049
- synthesis_user_prompt = (
6050
- f"--- Analysis Objective ---\n{synthesis_objective}\n\n"
6051
- f"--- Processing Summary ---\n"
6052
- f"Successfully analyzed {len(chunk_summaries)} sections of Moby Dick through incremental literary analysis.\n"
6053
- f"Total scratchpad content: {len(combined_scratchpad):,} characters of literary insights.\n\n"
6054
- f"--- Collected Literary Analysis Sections ---\n{combined_scratchpad}\n\n"
6055
- f"--- Final Synthesis Task ---\n"
6056
- f"Create a comprehensive, well-structured literary analysis of Moby Dick using ALL the insights above. "
6057
- f"Organize into clear sections with markdown headers (## Theme Analysis, ## Narrative Techniques, ## Cultural Commentary, ## Symbolism, etc.). "
6058
- f"Eliminate redundancy and create a coherent, scholarly analysis that demonstrates understanding of Melville's complex literary achievement. "
6059
- f"Include specific textual examples and maintain academic depth throughout."
6034
+ except Exception as e:
6035
+ error_msg = f"The final synthesis step failed: {str(e)}. Returning the organized scratchpad content as a fallback."
6036
+ if debug: print(f"🔧 DEBUG: {error_msg}")
6037
+
6038
+ # Fallback to returning the organized scratchpad
6039
+ organized_scratchpad = (
6040
+ f"# Analysis Summary\n\n"
6041
+ f"*Note: The final synthesis process encountered an error. The raw, organized analysis sections are provided below.*\n\n"
6042
+ f"## Collected Sections\n\n"
6043
+ f"{combined_scratchpad}"
6060
6044
  )
6061
-
6062
- try:
6063
- final_answer = self.remove_thinking_blocks(self.llm.generate_text(synthesis_user_prompt, system_prompt=synthesis_system_prompt, **kwargs))
6064
-
6065
- if debug:
6066
- print(f"🔧 DEBUG: Final analysis: {len(final_answer):,} characters")
6067
-
6068
- if streaming_callback:
6069
- streaming_callback(f"Final synthesis completed - {len(final_answer):,} characters generated", MSG_TYPE.MSG_TYPE_STEP_END, {"progress": 100})
6070
-
6071
- return final_answer.strip()
6072
-
6073
- except Exception as e:
6074
- error_msg = f"Synthesis failed: {str(e)}. Returning organized scratchpad content."
6075
- if debug:
6076
- print(f"🔧 DEBUG: ❌ {error_msg}")
6077
-
6078
- # Return organized scratchpad as fallback
6079
- organized_scratchpad = (
6080
- f"# Literary Analysis of Moby Dick\n\n"
6081
- f"*Note: Synthesis process encountered issues, presenting organized analysis sections:*\n\n"
6082
- f"## Analysis Sections\n\n"
6083
- f"{combined_scratchpad}"
6084
- )
6085
- return organized_scratchpad
6045
+ return organized_scratchpad
6086
6046
 
6087
6047
 
6088
6048
  def chunk_text(text, tokenizer, detokenizer, chunk_size, overlap, use_separators=True):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.5.9
3
+ Version: 1.6.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache License
File without changes
File without changes
File without changes