lollms-client 0.29.2__tar.gz → 0.29.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (104) hide show
  1. {lollms_client-0.29.2/lollms_client.egg-info → lollms_client-0.29.3}/PKG-INFO +1 -1
  2. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/__init__.py +1 -1
  3. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_core.py +35 -10
  4. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_discussion.py +53 -28
  5. {lollms_client-0.29.2 → lollms_client-0.29.3/lollms_client.egg-info}/PKG-INFO +1 -1
  6. {lollms_client-0.29.2 → lollms_client-0.29.3}/LICENSE +0 -0
  7. {lollms_client-0.29.2 → lollms_client-0.29.3}/README.md +0 -0
  8. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/article_summary/article_summary.py +0 -0
  9. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/console_discussion/console_app.py +0 -0
  10. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/console_discussion.py +0 -0
  11. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/deep_analyze/deep_analyse.py +0 -0
  12. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  13. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/function_calling_with_local_custom_mcp.py +0 -0
  14. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/generate_a_benchmark_for_safe_store.py +0 -0
  15. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/generate_and_speak/generate_and_speak.py +0 -0
  16. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  17. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/generate_text_with_multihop_rag_example.py +0 -0
  18. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/gradio_chat_app.py +0 -0
  19. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/gradio_lollms_chat.py +0 -0
  20. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/internet_search_with_rag.py +0 -0
  21. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/lollms_chat/calculator.py +0 -0
  22. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/lollms_chat/derivative.py +0 -0
  23. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -0
  24. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/lollms_discussions_test.py +0 -0
  25. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/mcp_examples/external_mcp.py +0 -0
  26. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/mcp_examples/local_mcp.py +0 -0
  27. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/mcp_examples/openai_mcp.py +0 -0
  28. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/mcp_examples/run_remote_mcp_example_v2.py +0 -0
  29. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/mcp_examples/run_standard_mcp_example.py +0 -0
  30. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/simple_text_gen_test.py +0 -0
  31. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/simple_text_gen_with_image_test.py +0 -0
  32. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/test_local_models/local_chat.py +0 -0
  33. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/text_2_audio.py +0 -0
  34. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/text_2_image.py +0 -0
  35. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/text_2_image_diffusers.py +0 -0
  36. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/text_and_image_2_audio.py +0 -0
  37. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/text_gen.py +0 -0
  38. {lollms_client-0.29.2 → lollms_client-0.29.3}/examples/text_gen_system_prompt.py +0 -0
  39. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/__init__.py +0 -0
  40. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
  41. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/claude/__init__.py +0 -0
  42. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/gemini/__init__.py +0 -0
  43. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/grok/__init__.py +0 -0
  44. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/groq/__init__.py +0 -0
  45. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
  46. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/litellm/__init__.py +0 -0
  47. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  48. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  49. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
  50. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/mistral/__init__.py +0 -0
  51. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  52. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/open_router/__init__.py +0 -0
  53. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/openai/__init__.py +0 -0
  54. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  55. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  56. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  57. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  58. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  59. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_config.py +0 -0
  60. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_js_analyzer.py +0 -0
  61. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_llm_binding.py +0 -0
  62. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_mcp_binding.py +0 -0
  63. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_personality.py +0 -0
  64. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_python_analyzer.py +0 -0
  65. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_stt_binding.py +0 -0
  66. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_tti_binding.py +0 -0
  67. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_ttm_binding.py +0 -0
  68. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_tts_binding.py +0 -0
  69. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_ttv_binding.py +0 -0
  70. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_types.py +0 -0
  71. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/lollms_utilities.py +0 -0
  72. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  73. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  74. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  75. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  76. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  77. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  78. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  79. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/stt_bindings/__init__.py +0 -0
  80. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  81. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  82. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  83. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tti_bindings/__init__.py +0 -0
  84. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
  85. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  86. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  87. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  88. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/ttm_bindings/__init__.py +0 -0
  89. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  90. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  91. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  92. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tts_bindings/__init__.py +0 -0
  93. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  94. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  95. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  96. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  97. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/ttv_bindings/__init__.py +0 -0
  98. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  99. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client.egg-info/SOURCES.txt +0 -0
  100. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client.egg-info/dependency_links.txt +0 -0
  101. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client.egg-info/requires.txt +0 -0
  102. {lollms_client-0.29.2 → lollms_client-0.29.3}/lollms_client.egg-info/top_level.txt +0 -0
  103. {lollms_client-0.29.2 → lollms_client-0.29.3}/pyproject.toml +0 -0
  104. {lollms_client-0.29.2 → lollms_client-0.29.3}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.29.2
3
+ Version: 0.29.3
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "0.29.2" # Updated version
11
+ __version__ = "0.29.3" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -147,9 +147,6 @@ class LollmsClient():
147
147
  available = self.binding_manager.get_available_bindings()
148
148
  raise ValueError(f"Failed to create LLM binding: {binding_name}. Available: {available}")
149
149
 
150
- # Determine the effective host address (use LLM binding's if initial was None)
151
- effective_host_address = self.host_address
152
-
153
150
  # --- Modality Binding Setup ---
154
151
  self.tts_binding_manager = LollmsTTSBindingManager(tts_bindings_dir)
155
152
  self.tti_binding_manager = LollmsTTIBindingManager(tti_bindings_dir)
@@ -2961,7 +2958,6 @@ Provide the final aggregated answer in {output_format} format, directly addressi
2961
2958
  callback("Deep analysis complete.", MSG_TYPE.MSG_TYPE_STEP_END)
2962
2959
  return final_output
2963
2960
 
2964
-
2965
2961
  def summarize(
2966
2962
  self,
2967
2963
  text_to_summarize: str,
@@ -2990,6 +2986,7 @@ Provide the final aggregated answer in {output_format} format, directly addressi
2990
2986
  is not lost at the boundaries. Defaults to 250.
2991
2987
  streaming_callback (Optional[Callable], optional): A callback function to receive real-time updates
2992
2988
  on the process (e.g., which chunk is being processed).
2989
+ It receives a message, a message type, and optional metadata.
2993
2990
  Defaults to None.
2994
2991
  **kwargs: Additional keyword arguments to be passed to the generation method (e.g., temperature, top_p).
2995
2992
 
@@ -3004,12 +3001,17 @@ Provide the final aggregated answer in {output_format} format, directly addressi
3004
3001
 
3005
3002
  if len(tokens) <= chunk_size_tokens:
3006
3003
  if streaming_callback:
3007
- streaming_callback("Text is short enough for a single summary.", MSG_TYPE.MSG_TYPE_STEP)
3004
+ streaming_callback("Text is short enough for a single summary.", MSG_TYPE.MSG_TYPE_STEP, {"progress": 0})
3008
3005
 
3009
3006
  prompt_objective = contextual_prompt or "Provide a comprehensive summary of the following text."
3010
3007
  final_prompt = f"{prompt_objective}\n\n--- Text to Summarize ---\n{text_to_summarize}"
3011
3008
 
3012
- return self.generate_text(final_prompt, **kwargs)
3009
+ summary = self.generate_text(final_prompt, **kwargs)
3010
+
3011
+ if streaming_callback:
3012
+ streaming_callback("Summary generated.", MSG_TYPE.MSG_TYPE_STEP, {"progress": 100})
3013
+
3014
+ return summary
3013
3015
 
3014
3016
  # --- Stage 1: Chunking and Independent Summarization ---
3015
3017
  chunks = []
@@ -3021,13 +3023,21 @@ Provide the final aggregated answer in {output_format} format, directly addressi
3021
3023
 
3022
3024
  chunk_summaries = []
3023
3025
 
3026
+ # Total steps include each chunk plus the final synthesis step
3027
+ total_steps = len(chunks) + 1
3028
+
3024
3029
  # Define the prompt for summarizing each chunk
3025
3030
  summarization_objective = contextual_prompt or "Summarize the key points of the following text excerpt."
3026
3031
  chunk_summary_prompt_template = f"{summarization_objective}\n\n--- Text Excerpt ---\n{{chunk_text}}"
3027
3032
 
3028
3033
  for i, chunk in enumerate(chunks):
3034
+ progress_before = (i / total_steps) * 100
3029
3035
  if streaming_callback:
3030
- streaming_callback(f"Summarizing chunk {i + 1} of {len(chunks)}...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": f"chunk_{i+1}"})
3036
+ streaming_callback(
3037
+ f"Summarizing chunk {i + 1} of {len(chunks)}...",
3038
+ MSG_TYPE.MSG_TYPE_STEP_START,
3039
+ {"id": f"chunk_{i+1}", "progress": progress_before}
3040
+ )
3031
3041
 
3032
3042
  prompt = chunk_summary_prompt_template.format(chunk_text=chunk)
3033
3043
 
@@ -3035,8 +3045,14 @@ Provide the final aggregated answer in {output_format} format, directly addressi
3035
3045
  # Generate summary for the current chunk
3036
3046
  chunk_summary = self.generate_text(prompt, **kwargs)
3037
3047
  chunk_summaries.append(chunk_summary)
3048
+
3049
+ progress_after = ((i + 1) / total_steps) * 100
3038
3050
  if streaming_callback:
3039
- streaming_callback(f"Chunk {i + 1} summarized.", MSG_TYPE.MSG_TYPE_STEP_END, {"id": f"chunk_{i+1}", "summary_snippet": chunk_summary[:100]})
3051
+ streaming_callback(
3052
+ f"Chunk {i + 1} summarized. Progress: {progress_after:.0f}%",
3053
+ MSG_TYPE.MSG_TYPE_STEP_END,
3054
+ {"id": f"chunk_{i+1}", "summary_snippet": chunk_summary[:100], "progress": progress_after}
3055
+ )
3040
3056
  except Exception as e:
3041
3057
  trace_exception(e)
3042
3058
  if streaming_callback:
@@ -3045,8 +3061,13 @@ Provide the final aggregated answer in {output_format} format, directly addressi
3045
3061
  chunk_summaries.append(f"[Error summarizing chunk {i+1}]")
3046
3062
 
3047
3063
  # --- Stage 2: Final Synthesis of All Chunk Summaries ---
3064
+ progress_before_synthesis = (len(chunks) / total_steps) * 100
3048
3065
  if streaming_callback:
3049
- streaming_callback("Synthesizing all chunk summaries into a final version...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "final_synthesis"})
3066
+ streaming_callback(
3067
+ "Synthesizing all chunk summaries into a final version...",
3068
+ MSG_TYPE.MSG_TYPE_STEP_START,
3069
+ {"id": "final_synthesis", "progress": progress_before_synthesis}
3070
+ )
3050
3071
 
3051
3072
  combined_summaries = "\n\n---\n\n".join(chunk_summaries)
3052
3073
 
@@ -3064,7 +3085,11 @@ Provide the final aggregated answer in {output_format} format, directly addressi
3064
3085
  final_summary = self.generate_text(final_synthesis_prompt, **kwargs)
3065
3086
 
3066
3087
  if streaming_callback:
3067
- streaming_callback("Final summary synthesized.", MSG_TYPE.MSG_TYPE_STEP_END, {"id": "final_synthesis"})
3088
+ streaming_callback(
3089
+ "Final summary synthesized.",
3090
+ MSG_TYPE.MSG_TYPE_STEP_END,
3091
+ {"id": "final_synthesis", "progress": 100}
3092
+ )
3068
3093
 
3069
3094
  return final_summary.strip()
3070
3095
 
@@ -1180,6 +1180,7 @@ class LollmsDiscussion:
1180
1180
  "- Key decisions or conclusions reached.\n"
1181
1181
  "- Important entities, projects, or topics mentioned that are likely to recur.\n"
1182
1182
  "Format the output as a concise list of bullet points. Be brief and factual. "
1183
+ "Do not repeat information that is already in the User Data Zone or the Memory"
1183
1184
  "If no new, significant long-term information is present, output the single word: 'NOTHING'."
1184
1185
  )
1185
1186
 
@@ -1272,8 +1273,8 @@ class LollmsDiscussion:
1272
1273
  "content": str,
1273
1274
  "tokens": int,
1274
1275
  "breakdown": {
1275
- "system_prompt": str,
1276
- "memory": str,
1276
+ "system_prompt": {"content": str, "tokens": int},
1277
+ "memory": {"content": str, "tokens": int},
1277
1278
  ...
1278
1279
  }
1279
1280
  },
@@ -1291,40 +1292,66 @@ class LollmsDiscussion:
1291
1292
  "current_tokens": 0,
1292
1293
  "zones": {}
1293
1294
  }
1295
+ tokenizer = self.lollmsClient.count_tokens
1294
1296
 
1295
1297
  # --- 1. Assemble and Tokenize the Entire System Context Block ---
1296
1298
  system_prompt_text = (self._system_prompt or "").strip()
1297
- data_zone_text = self.get_full_data_zone() # This already formats all zones correctly
1298
-
1299
- pruning_summary_text = ""
1300
- if self.pruning_summary and self.pruning_point_id:
1301
- pruning_summary_text = f"--- Conversation Summary ---\n{self.pruning_summary.strip()}"
1299
+ data_zone_text = self.get_full_data_zone()
1300
+ pruning_summary_content = (self.pruning_summary or "").strip()
1301
+
1302
+ pruning_summary_block = ""
1303
+ if pruning_summary_content and self.pruning_point_id:
1304
+ pruning_summary_block = f"--- Conversation Summary ---\n{pruning_summary_content}"
1302
1305
 
1303
- # Combine all parts that go into the system block, separated by newlines
1304
1306
  full_system_content_parts = [
1305
- part for part in [system_prompt_text, data_zone_text, pruning_summary_text] if part
1307
+ part for part in [system_prompt_text, data_zone_text, pruning_summary_block] if part
1306
1308
  ]
1307
1309
  full_system_content = "\n\n".join(full_system_content_parts).strip()
1308
1310
 
1309
1311
  if full_system_content:
1310
- # Create the final system block as it would be exported
1311
1312
  system_block = f"!@>system:\n{full_system_content}\n"
1312
- system_tokens = self.lollmsClient.count_tokens(system_block)
1313
+ system_tokens = tokenizer(system_block)
1313
1314
 
1314
- # Create the breakdown for user visibility
1315
1315
  breakdown = {}
1316
1316
  if system_prompt_text:
1317
- breakdown["system_prompt"] = system_prompt_text
1318
- if self.memory and self.memory.strip():
1319
- breakdown["memory"] = self.memory.strip()
1320
- if self.user_data_zone and self.user_data_zone.strip():
1321
- breakdown["user_data_zone"] = self.user_data_zone.strip()
1322
- if self.discussion_data_zone and self.discussion_data_zone.strip():
1323
- breakdown["discussion_data_zone"] = self.discussion_data_zone.strip()
1324
- if self.personality_data_zone and self.personality_data_zone.strip():
1325
- breakdown["personality_data_zone"] = self.personality_data_zone.strip()
1326
- if self.pruning_summary and self.pruning_summary.strip():
1327
- breakdown["pruning_summary"] = self.pruning_summary.strip()
1317
+ breakdown["system_prompt"] = {
1318
+ "content": system_prompt_text,
1319
+ "tokens": tokenizer(system_prompt_text)
1320
+ }
1321
+
1322
+ memory_text = (self.memory or "").strip()
1323
+ if memory_text:
1324
+ breakdown["memory"] = {
1325
+ "content": memory_text,
1326
+ "tokens": tokenizer(memory_text)
1327
+ }
1328
+
1329
+ user_data_text = (self.user_data_zone or "").strip()
1330
+ if user_data_text:
1331
+ breakdown["user_data_zone"] = {
1332
+ "content": user_data_text,
1333
+ "tokens": tokenizer(user_data_text)
1334
+ }
1335
+
1336
+ discussion_data_text = (self.discussion_data_zone or "").strip()
1337
+ if discussion_data_text:
1338
+ breakdown["discussion_data_zone"] = {
1339
+ "content": discussion_data_text,
1340
+ "tokens": tokenizer(discussion_data_text)
1341
+ }
1342
+
1343
+ personality_data_text = (self.personality_data_zone or "").strip()
1344
+ if personality_data_text:
1345
+ breakdown["personality_data_zone"] = {
1346
+ "content": personality_data_text,
1347
+ "tokens": tokenizer(personality_data_text)
1348
+ }
1349
+
1350
+ if pruning_summary_content:
1351
+ breakdown["pruning_summary"] = {
1352
+ "content": pruning_summary_content,
1353
+ "tokens": tokenizer(pruning_summary_content)
1354
+ }
1328
1355
 
1329
1356
  result["zones"]["system_context"] = {
1330
1357
  "content": full_system_content,
@@ -1340,7 +1367,6 @@ class LollmsDiscussion:
1340
1367
  branch = self.get_branch(branch_tip_id)
1341
1368
  messages_to_render = branch
1342
1369
 
1343
- # Adjust for pruning to get the active set of messages
1344
1370
  if self.pruning_summary and self.pruning_point_id:
1345
1371
  pruning_index = -1
1346
1372
  for i, msg in enumerate(branch):
@@ -1363,7 +1389,7 @@ class LollmsDiscussion:
1363
1389
  message_count = len(messages_to_render)
1364
1390
 
1365
1391
  if messages_text:
1366
- tokens = self.lollmsClient.count_tokens(messages_text)
1392
+ tokens = tokenizer(messages_text)
1367
1393
  result["zones"]["message_history"] = {
1368
1394
  "content": messages_text,
1369
1395
  "tokens": tokens,
@@ -1371,11 +1397,10 @@ class LollmsDiscussion:
1371
1397
  }
1372
1398
 
1373
1399
  # --- 3. Finalize the Total Count ---
1374
- # This remains the most accurate way to get the final count, as it uses the
1375
- # exact same export logic as the chat method.
1376
1400
  result["current_tokens"] = self.count_discussion_tokens("lollms_text", branch_tip_id)
1377
1401
 
1378
1402
  return result
1403
+
1379
1404
  def switch_to_branch(self, branch_id):
1380
1405
  self.active_branch_id = branch_id
1381
1406
 
@@ -1412,4 +1437,4 @@ class LollmsDiscussion:
1412
1437
  new_metadata = (self.metadata or {}).copy()
1413
1438
  new_metadata[itemname] = item_value
1414
1439
  self.metadata = new_metadata
1415
- self.commit()
1440
+ self.commit()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.29.2
3
+ Version: 0.29.3
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
File without changes
File without changes
File without changes