lollms-client 0.28.0__py3-none-any.whl → 0.29.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "0.28.0" # Updated version
11
+ __version__ = "0.29.0" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -2008,7 +2008,7 @@ Do not split the code in multiple tags.
2008
2008
  self,
2009
2009
  prompt,
2010
2010
  output_format,
2011
- extra_system_prompt=None,
2011
+ system_prompt=None,
2012
2012
  **kwargs
2013
2013
  ):
2014
2014
  """
@@ -2025,7 +2025,7 @@ Do not split the code in multiple tags.
2025
2025
  A Python dictionary or a JSON string representing the desired output
2026
2026
  structure. This will be used as a template for the LLM.
2027
2027
  Example: {"name": "string", "age": "integer", "city": "string"}
2028
- extra_system_prompt (str, optional):
2028
+ system_prompt (str, optional):
2029
2029
  Additional instructions for the system prompt, to be appended to the
2030
2030
  main instructions. Defaults to None.
2031
2031
  **kwargs:
@@ -2048,7 +2048,7 @@ Do not split the code in multiple tags.
2048
2048
  raise TypeError("output_format must be a dict or a JSON string.")
2049
2049
 
2050
2050
  # 2. Construct a specialized system prompt for structured data generation
2051
- system_prompt = (
2051
+ full_system_prompt = (
2052
2052
  "You are a highly skilled AI assistant that processes user requests "
2053
2053
  "and returns structured data in JSON format. You must strictly adhere "
2054
2054
  "to the provided JSON template, filling in the values accurately based "
@@ -2056,8 +2056,8 @@ Do not split the code in multiple tags.
2056
2056
  "outside of the final JSON code block. Your entire response must be a single "
2057
2057
  "valid JSON object within a markdown code block."
2058
2058
  )
2059
- if extra_system_prompt:
2060
- system_prompt += f"\n\nAdditional instructions:\n{extra_system_prompt}"
2059
+ if system_prompt:
2060
+ system_prompt += f"\n\nAdditional instructions:\n{system_prompt}"
2061
2061
 
2062
2062
  # 3. Call the underlying generate_code method with JSON-specific settings
2063
2063
  if kwargs.get('debug'):
@@ -2065,7 +2065,7 @@ Do not split the code in multiple tags.
2065
2065
 
2066
2066
  json_string = self.generate_code(
2067
2067
  prompt=prompt,
2068
- system_prompt=system_prompt,
2068
+ system_prompt=full_system_prompt,
2069
2069
  template=template_str,
2070
2070
  language="json",
2071
2071
  code_tag_format="markdown", # Sticking to markdown is generally more reliable
@@ -2083,7 +2083,7 @@ Do not split the code in multiple tags.
2083
2083
 
2084
2084
  try:
2085
2085
  # Use the provided robust parser
2086
- parsed_json = self.robust_json_parser(json_string)
2086
+ parsed_json = robust_json_parser(json_string)
2087
2087
 
2088
2088
  if parsed_json is None:
2089
2089
  ASCIIColors.warning("Failed to robustly parse the generated JSON.")
@@ -31,6 +31,7 @@ if False:
31
31
 
32
32
  from lollms_client.lollms_utilities import build_image_dicts, robust_json_parser
33
33
  from ascii_colors import ASCIIColors, trace_exception
34
+ from .lollms_types import MSG_TYPE
34
35
 
35
36
  class EncryptedString(TypeDecorator):
36
37
  """A SQLAlchemy TypeDecorator for field-level database encryption.
@@ -127,6 +128,7 @@ def create_dynamic_models(
127
128
  __abstract__ = True
128
129
  id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
129
130
  system_prompt = Column(EncryptedText, nullable=True)
131
+ data_zone = Column(EncryptedText, nullable=True) # New field for persistent data
130
132
  participants = Column(JSON, nullable=True, default=dict)
131
133
  active_branch_id = Column(String, nullable=True)
132
134
  discussion_metadata = Column(JSON, nullable=True, default=dict)
@@ -223,6 +225,10 @@ class LollmsDataManager:
223
225
  if 'pruning_point_id' not in columns:
224
226
  print(" -> Upgrading 'discussions' table: Adding 'pruning_point_id' column.")
225
227
  connection.execute(text("ALTER TABLE discussions ADD COLUMN pruning_point_id VARCHAR"))
228
+
229
+ if 'data_zone' not in columns:
230
+ print(" -> Upgrading 'discussions' table: Adding 'data_zone' column.")
231
+ connection.execute(text("ALTER TABLE discussions ADD COLUMN data_zone TEXT"))
226
232
 
227
233
  print("Database schema is up to date.")
228
234
  # This is important to apply the ALTER TABLE statements
@@ -484,6 +490,7 @@ class LollmsDiscussion:
484
490
  proxy = SimpleNamespace()
485
491
  proxy.id = id or str(uuid.uuid4())
486
492
  proxy.system_prompt = None
493
+ proxy.data_zone = None
487
494
  proxy.participants = {}
488
495
  proxy.active_branch_id = None
489
496
  proxy.discussion_metadata = {}
@@ -642,21 +649,91 @@ class LollmsDiscussion:
642
649
  A dictionary with 'user_message' and 'ai_message' LollmsMessage objects,
643
650
  where the 'ai_message' will contain rich metadata if an agentic turn was used.
644
651
  """
652
+ callback = kwargs.get("streaming_callback")
653
+
645
654
  if personality is not None:
646
655
  object.__setattr__(self, '_system_prompt', personality.system_prompt)
656
+
657
+ # --- New Data Source Handling Logic ---
658
+ if hasattr(personality, 'data_source') and personality.data_source is not None:
659
+ if isinstance(personality.data_source, str):
660
+ # --- Static Data Source ---
661
+ if callback:
662
+ callback("Loading static personality data...", MSG_TYPE.MSG_TYPE_STEP, {"id": "static_data_loading"})
663
+ current_data_zone = self.data_zone or ""
664
+ self.data_zone = (current_data_zone + "\n\n--- Personality Static Data ---\n" + personality.data_source).strip()
665
+
666
+ elif callable(personality.data_source):
667
+ # --- Dynamic Data Source ---
668
+ qg_id = None
669
+ if callback:
670
+ qg_id = callback("Generating query for dynamic personality data...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "dynamic_data_query_gen"})
671
+
672
+ context_for_query = self.export('markdown')
673
+ query_prompt = (
674
+ "You are an expert query generator. Based on the current conversation, formulate a concise and specific query to retrieve relevant information from a knowledge base. "
675
+ "The query will be used to fetch data that will help you answer the user's latest request.\n\n"
676
+ f"--- Conversation History ---\n{context_for_query}\n\n"
677
+ "--- Instructions ---\n"
678
+ "Generate a single query string."
679
+ )
680
+
681
+ try:
682
+ query_json = self.lollmsClient.generate_structured_content(
683
+ prompt=query_prompt,
684
+ output_format={"query": "Your generated search query here."},
685
+ system_prompt="You are an AI assistant that generates search queries in JSON format.",
686
+ temperature=0.0
687
+ )
688
+
689
+ if not query_json or "query" not in query_json:
690
+ if callback:
691
+ callback("Failed to generate data query.", MSG_TYPE.MSG_TYPE_EXCEPTION, {"id": qg_id})
692
+ else:
693
+ generated_query = query_json["query"]
694
+ if callback:
695
+ callback(f"Generated query: '{generated_query}'", MSG_TYPE.MSG_TYPE_STEP_END, {"id": qg_id, "query": generated_query})
696
+
697
+ dr_id = None
698
+ if callback:
699
+ dr_id = callback("Retrieving dynamic data from personality source...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "dynamic_data_retrieval"})
700
+
701
+ try:
702
+ retrieved_data = personality.data_source(generated_query)
703
+ if callback:
704
+ callback(f"Retrieved data successfully.", MSG_TYPE.MSG_TYPE_STEP_END, {"id": dr_id, "data_snippet": retrieved_data[:200]})
705
+
706
+ current_data_zone = self.data_zone or ""
707
+ self.data_zone = (current_data_zone + "\n\n--- Retrieved Dynamic Data ---\n" + retrieved_data).strip()
708
+
709
+ except Exception as e:
710
+ trace_exception(e)
711
+ if callback:
712
+ callback(f"Error retrieving dynamic data: {e}", MSG_TYPE.MSG_TYPE_EXCEPTION, {"id": dr_id})
713
+ except Exception as e:
714
+ trace_exception(e)
715
+ if callback:
716
+ callback(f"An error occurred during query generation: {e}", MSG_TYPE.MSG_TYPE_EXCEPTION, {"id": qg_id})
717
+
718
+ # Determine effective MCPs by combining personality defaults and turn-specific overrides
719
+ effective_use_mcps = use_mcps
720
+ if personality and hasattr(personality, 'active_mcps') and personality.active_mcps:
721
+ if effective_use_mcps in [None, False]:
722
+ effective_use_mcps = personality.active_mcps
723
+ elif isinstance(effective_use_mcps, list):
724
+ effective_use_mcps = list(set(effective_use_mcps + personality.active_mcps))
647
725
 
648
726
  if self.max_context_size is not None:
649
727
  self.summarize_and_prune(self.max_context_size)
650
728
 
651
729
  # Step 1: Add user message, now including any images.
652
730
  if add_user_message:
653
- # Pass kwargs through to capture images and other potential message attributes
654
731
  user_msg = self.add_message(
655
732
  sender="user",
656
733
  sender_type="user",
657
734
  content=user_message,
658
735
  images=images,
659
- **kwargs # Use kwargs to allow other fields to be set from the caller
736
+ **kwargs
660
737
  )
661
738
  else: # Regeneration logic
662
739
  if self.active_branch_id not in self._message_index:
@@ -665,11 +742,9 @@ class LollmsDiscussion:
665
742
  if user_msg_orm.sender_type != 'user':
666
743
  raise ValueError(f"Regeneration failed: active branch tip is a '{user_msg_orm.sender_type}' message, not 'user'.")
667
744
  user_msg = LollmsMessage(self, user_msg_orm)
668
- # For regeneration, we use the images from the original user message
669
745
  images = user_msg.images
670
746
 
671
- # Step 2: Determine if this is a simple chat or a complex agentic turn.
672
- is_agentic_turn = (use_mcps is not None and use_mcps) or (use_data_store is not None and use_data_store)
747
+ is_agentic_turn = (effective_use_mcps is not None and effective_use_mcps) or (use_data_store is not None and use_data_store)
673
748
 
674
749
  start_time = datetime.now()
675
750
 
@@ -678,79 +753,56 @@ class LollmsDiscussion:
678
753
  final_raw_response = ""
679
754
  final_content = ""
680
755
 
681
- # Step 3: Execute the appropriate generation logic.
682
756
  if is_agentic_turn:
683
- # --- AGENTIC TURN ---
684
757
  prompt_for_agent = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id)
685
758
  if debug:
686
- ASCIIColors.cyan("\n" + "="*50)
687
- ASCIIColors.cyan("--- DEBUG: AGENTIC TURN TRIGGERED ---")
688
- ASCIIColors.cyan(f"--- PROMPT FOR AGENT (from discussion history) ---")
689
- ASCIIColors.magenta(prompt_for_agent)
690
- ASCIIColors.cyan("="*50 + "\n")
759
+ ASCIIColors.cyan("\n" + "="*50 + "\n--- DEBUG: AGENTIC TURN TRIGGERED ---\n" + f"--- PROMPT FOR AGENT (from discussion history) ---\n{prompt_for_agent}\n" + "="*50 + "\n")
691
760
 
692
761
  agent_result = self.lollmsClient.generate_with_mcp_rag(
693
762
  prompt=prompt_for_agent,
694
- use_mcps=use_mcps,
763
+ use_mcps=effective_use_mcps,
695
764
  use_data_store=use_data_store,
696
765
  max_reasoning_steps=max_reasoning_steps,
697
766
  images=images,
698
767
  system_prompt = self._system_prompt,
699
- debug=debug, # Pass the debug flag down
768
+ debug=debug,
700
769
  **kwargs
701
770
  )
702
771
  final_content = agent_result.get("final_answer", "The agent did not produce a final answer.")
703
772
  final_scratchpad = agent_result.get("final_scratchpad", "")
704
773
  final_raw_response = json.dumps(agent_result, indent=2)
705
-
706
774
  else:
707
- # --- SIMPLE CHAT TURN ---
708
775
  if debug:
709
776
  prompt_for_chat = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id)
710
- ASCIIColors.cyan("\n" + "="*50)
711
- ASCIIColors.cyan("--- DEBUG: SIMPLE CHAT PROMPT ---")
712
- ASCIIColors.magenta(prompt_for_chat)
713
- ASCIIColors.cyan("="*50 + "\n")
777
+ ASCIIColors.cyan("\n" + "="*50 + f"\n--- DEBUG: SIMPLE CHAT PROMPT ---\n{prompt_for_chat}\n" + "="*50 + "\n")
714
778
 
715
- # For simple chat, we also need to consider images if the model is multi-modal
716
779
  final_raw_response = self.lollmsClient.chat(self, images=images, **kwargs) or ""
717
780
 
718
781
  if debug:
719
- ASCIIColors.cyan("\n" + "="*50)
720
- ASCIIColors.cyan("--- DEBUG: RAW SIMPLE CHAT RESPONSE ---")
721
- ASCIIColors.magenta(final_raw_response)
722
- ASCIIColors.cyan("="*50 + "\n")
782
+ ASCIIColors.cyan("\n" + "="*50 + f"\n--- DEBUG: RAW SIMPLE CHAT RESPONSE ---\n{final_raw_response}\n" + "="*50 + "\n")
723
783
 
724
784
  if isinstance(final_raw_response, dict) and final_raw_response.get("status") == "error":
725
785
  raise Exception(final_raw_response.get("message", "Unknown error from lollmsClient.chat"))
726
786
  else:
727
787
  final_content = self.lollmsClient.remove_thinking_blocks(final_raw_response)
788
+ final_scratchpad = None
728
789
 
729
- final_scratchpad = None # No agentic scratchpad in a simple turn
730
-
731
- # Step 4: Post-generation processing and statistics.
732
790
  end_time = datetime.now()
733
791
  duration = (end_time - start_time).total_seconds()
734
792
  token_count = self.lollmsClient.count_tokens(final_content)
735
793
  tok_per_sec = (token_count / duration) if duration > 0 else 0
736
794
 
737
- # Step 5: Collect metadata from the agentic turn for storage.
738
795
  message_meta = {}
739
796
  if is_agentic_turn and isinstance(agent_result, dict):
740
- if "tool_calls" in agent_result:
741
- message_meta["tool_calls"] = agent_result["tool_calls"]
742
- if "sources" in agent_result:
743
- message_meta["sources"] = agent_result["sources"]
744
- if agent_result.get("clarification_required", False):
745
- message_meta["clarification_required"] = True
746
-
747
- # Step 6: Add the final AI message to the discussion.
797
+ if "tool_calls" in agent_result: message_meta["tool_calls"] = agent_result["tool_calls"]
798
+ if "sources" in agent_result: message_meta["sources"] = agent_result["sources"]
799
+ if agent_result.get("clarification_required", False): message_meta["clarification_required"] = True
800
+
748
801
  ai_message_obj = self.add_message(
749
802
  sender=personality.name if personality else "assistant",
750
803
  sender_type="assistant",
751
804
  content=final_content,
752
805
  raw_content=final_raw_response,
753
- # Store the agent's full reasoning log in the message's dedicated scratchpad field
754
806
  scratchpad=final_scratchpad,
755
807
  tokens=token_count,
756
808
  generation_speed=tok_per_sec,
@@ -892,7 +944,13 @@ class LollmsDiscussion:
892
944
  return "" if format_type == "lollms_text" else []
893
945
 
894
946
  branch = self.get_branch(branch_tip_id)
895
- full_system_prompt = self._system_prompt # Simplified for clarity
947
+
948
+ # Combine system prompt and the new data_zone if it exists
949
+ full_system_prompt = (self._system_prompt or "").strip()
950
+ if hasattr(self, 'data_zone') and self.data_zone:
951
+ data_zone_text = f"\n\n--- data ---\n{self.data_zone.strip()}"
952
+ full_system_prompt = (full_system_prompt + data_zone_text).strip()
953
+
896
954
  participants = self.participants or {}
897
955
 
898
956
  def get_full_content(msg: 'LollmsMessage') -> str:
@@ -22,6 +22,8 @@ class LollmsPersonality:
22
22
  # Core behavioral instruction
23
23
  system_prompt: str,
24
24
  icon: Optional[str] = None, # Base64 encoded image string
25
+ active_mcps: Optional[List[str]] = None, # The list of MCPs to activate with this personality
26
+ data_source: Optional[Union[str, Callable[[str], str]]] = None, # Static string data or a callable for dynamic data retrieval
25
27
 
26
28
 
27
29
  # RAG - Data Files and Application-provided Callbacks
@@ -46,6 +48,8 @@ class LollmsPersonality:
46
48
  description: A brief description of what the personality does.
47
49
  icon: An optional base64 encoded string for a display icon.
48
50
  system_prompt: The core system prompt that defines the AI's behavior.
51
+ active_mcps: An optional list of MCP (tool) names to be automatically activated with this personality.
52
+ data_source: A source of knowledge. Can be a static string or a callable function that takes a query and returns a string.
49
53
  data_files: A list of file paths to be used as a knowledge base for RAG.
50
54
  vectorize_chunk_callback: A function provided by the host app to vectorize and store a text chunk.
51
55
  is_vectorized_callback: A function provided by the host app to check if a chunk is already vectorized.
@@ -59,6 +63,8 @@ class LollmsPersonality:
59
63
  self.description = description
60
64
  self.icon = icon
61
65
  self.system_prompt = system_prompt
66
+ self.active_mcps = active_mcps or []
67
+ self.data_source = data_source
62
68
  self.data_files = [Path(f) for f in data_files] if data_files else []
63
69
 
64
70
  # RAG Callbacks provided by the host application
@@ -177,6 +183,8 @@ class LollmsPersonality:
177
183
  "category": self.category,
178
184
  "description": self.description,
179
185
  "system_prompt": self.system_prompt,
186
+ "active_mcps": self.active_mcps,
187
+ "has_data_source": self.data_source is not None,
180
188
  "data_files": [str(p) for p in self.data_files],
181
189
  "has_script": self.script is not None
182
190
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.28.0
3
+ Version: 0.29.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -49,8 +49,10 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
49
49
  * 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM) without major code changes.
50
50
  * 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS) and images (TTI).
51
51
  * 🤖 **Function Calling with MCP:** Empowers LLMs to use external tools and functions through the Model Context Protocol (MCP), with built-in support for local Python tool execution via `local_mcp` binding and its default tools (file I/O, internet search, Python interpreter, image generation).
52
+ * 🎭 **Personalities as Agents:** Personalities can now define their own set of required tools (MCPs) and have access to static or dynamic knowledge bases (`data_source`), turning them into self-contained, ready-to-use agents.
52
53
  * 🚀 **Streaming & Callbacks:** Efficiently handle real-time text generation with customizable callback functions, including during MCP interactions.
53
- * 💬 **Discussion Management:** Utilities to easily manage and format conversation histories for chat applications.
54
+ * 📝 **Advanced Structured Content Generation:** Reliably generate structured JSON output from natural language prompts using the `generate_structured_content` helper method.
55
+ * 💬 **Discussion Management:** Utilities to easily manage and format conversation histories, including a persistent `data_zone` for context that is always present in the system prompt.
54
56
  * ⚙️ **Configuration Management:** Flexible ways to configure bindings and generation parameters.
55
57
  * 🧩 **Extensible:** Designed to easily incorporate new LLM backends and modality services, including custom MCP toolsets.
56
58
  * 📝 **High-Level Operations:** Includes convenience methods for complex tasks like sequential summarization and deep text analysis directly within `LollmsClient`.
@@ -120,156 +122,174 @@ except Exception as e:
120
122
 
121
123
  ```
122
124
 
123
- ### Function Calling with MCP
125
+ ### Advanced Structured Content Generation
124
126
 
125
- `lollms-client` supports robust function calling via the Model Context Protocol (MCP), allowing LLMs to interact with your custom Python tools or pre-defined utilities.
127
+ The `generate_structured_content` method is a powerful utility for forcing an LLM's output into a specific JSON format. It's ideal for extracting information, getting consistent tool parameters, or any task requiring reliable, machine-readable output.
126
128
 
127
129
  ```python
128
- from lollms_client import LollmsClient, MSG_TYPE
129
- from ascii_colors import ASCIIColors
130
- import json # For pretty printing results
131
-
132
- # Example callback for MCP streaming
133
- def mcp_stream_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, turn_history: list = None) -> bool:
134
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: ASCIIColors.success(chunk, end="", flush=True) # LLM's final answer or thought process
135
- elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START: ASCIIColors.info(f"\n>> MCP Step Start: {metadata.get('tool_name', chunk)}", flush=True)
136
- elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END: ASCIIColors.success(f"\n<< MCP Step End: {metadata.get('tool_name', chunk)} -> Result: {json.dumps(metadata.get('result', ''))}", flush=True)
137
- elif msg_type == MSG_TYPE.MSG_TYPE_INFO and metadata and metadata.get("type") == "tool_call_request": ASCIIColors.info(f"\nAI requests: {metadata.get('name')}({metadata.get('params')})", flush=True)
138
- return True
130
+ from lollms_client import LollmsClient
131
+ import json
139
132
 
140
- try:
141
- # Initialize LollmsClient with an LLM binding and the local_mcp binding
142
- lc = LollmsClient(
143
- binding_name="ollama", model_name="mistral", # Example LLM
144
- mcp_binding_name="local_mcp" # Enables default tools (file_writer, internet_search, etc.)
145
- # or custom tools if mcp_binding_config.tools_folder_path is set.
146
- )
133
+ lc = LollmsClient(binding_name="ollama", model_name="llama3")
147
134
 
148
- user_query = "What were the main AI headlines last week and write a summary to 'ai_news.txt'?"
149
- ASCIIColors.blue(f"User Query: {user_query}")
150
- ASCIIColors.yellow("AI Processing with MCP (streaming):")
135
+ text_block = "John Doe is a 34-year-old software engineer from New York. He loves hiking and Python programming."
151
136
 
152
- mcp_result = lc.generate_with_mcp(
153
- prompt=user_query,
154
- streaming_callback=mcp_stream_callback
155
- )
156
- print("\n--- End of MCP Interaction ---")
137
+ # Define the exact JSON structure you want
138
+ output_template = {
139
+ "full_name": "string",
140
+ "age": "integer",
141
+ "profession": "string",
142
+ "city": "string",
143
+ "hobbies": ["list", "of", "strings"]
144
+ }
157
145
 
158
- if mcp_result.get("error"):
159
- ASCIIColors.error(f"MCP Error: {mcp_result['error']}")
160
- else:
161
- ASCIIColors.cyan(f"\nFinal Answer from AI: {mcp_result.get('final_answer', 'N/A')}")
162
- ASCIIColors.magenta("\nTool Calls Made:")
163
- for tc in mcp_result.get("tool_calls", []):
164
- print(f" - Tool: {tc.get('name')}, Params: {tc.get('params')}, Result (first 50 chars): {str(tc.get('result'))[:50]}...")
146
+ # Generate the structured data
147
+ extracted_data = lc.generate_structured_content(
148
+ prompt=f"Extract the relevant information from the following text:\n\n{text_block}",
149
+ output_format=output_template
150
+ )
165
151
 
166
- except Exception as e:
167
- ASCIIColors.error(f"An error occurred in MCP example: {e}")
168
- trace_exception(e) # Assuming you have trace_exception utility
152
+ if extracted_data:
153
+ print(json.dumps(extracted_data, indent=2))
154
+ # Expected output:
155
+ # {
156
+ # "full_name": "John Doe",
157
+ # "age": 34,
158
+ # "profession": "software engineer",
159
+ # "city": "New York",
160
+ # "hobbies": ["hiking", "Python programming"]
161
+ # }
169
162
  ```
170
- For a comprehensive guide on function calling and setting up tools, please refer to the [Usage Guide (DOC_USE.md)](DOC_USE.md).
171
163
 
172
- ### 🤖 Advanced Agentic Generation with RAG: `generate_with_mcp_rag`
164
+ ### Putting It All Together: An Advanced Agentic Example
173
165
 
174
- For more complex tasks, `generate_with_mcp_rag` provides a powerful, built-in agent that uses a ReAct-style (Reason, Act) loop. This agent can reason about a user's request, use tools (MCP), retrieve information from knowledge bases (RAG), and adapt its plan based on the results of its actions.
166
+ Let's create a **Python Coder Agent**. This agent will use a set of coding rules from a local file as its knowledge base and will be equipped with a tool to execute the code it writes. This demonstrates the synergy between `LollmsPersonality` (with `data_source` and `active_mcps`), `LollmsDiscussion`, and the MCP system.
175
167
 
176
- **Key Agent Capabilities:**
168
+ #### Step 1: Create the Knowledge Base (`coding_rules.txt`)
177
169
 
178
- * **Observe-Think-Act Loop:** The agent iteratively reviews its progress, thinks about the next logical step, and takes an action (like calling a tool).
179
- * **Tool Integration (MCP):** Can use any available MCP tools, such as searching the web or executing code.
180
- * **Retrieval-Augmented Generation (RAG):** You can provide one or more "data stores" (knowledge bases). The agent gains a `research::{store_name}` tool to query these stores for relevant information.
181
- * **In-Memory Code Generation:** The agent has a special `generate_code` tool. This allows it to first write a piece of code (e.g., a complex Python script) and then pass that code to another tool (e.g., `python_code_interpreter`) in a subsequent step.
182
- * **Stateful Progress Tracking:** Designed for rich UI experiences, it emits `step_start` and `step_end` events with unique IDs via the streaming callback. This allows an application to track the agent's individual thoughts and long-running tool calls in real-time.
183
- * **Self-Correction:** Includes a `refactor_scratchpad` tool for the agent to clean up its own thought process if it becomes cluttered.
170
+ Create a simple text file with the rules our agent must follow.
184
171
 
185
- Here is an example of using the agent to answer a question by first performing RAG on a custom knowledge base and then using the retrieved information to generate and execute code.
172
+ ```text
173
+ # File: coding_rules.txt
186
174
 
187
- ```python
188
- import json
189
- from lollms_client import LollmsClient, MSG_TYPE
190
- from ascii_colors import ASCIIColors
175
+ 1. All Python functions must include a Google-style docstring.
176
+ 2. Use type hints for all function parameters and return values.
177
+ 3. The main execution block should be protected by `if __name__ == "__main__":`.
178
+ 4. After defining a function, add a simple example of its usage inside the main block.
179
+ 5. Print the output of the example usage to the console.
180
+ ```
191
181
 
192
- # 1. Define a mock RAG data store and retrieval function
193
- project_notes = {
194
- "project_phoenix_details": "Project Phoenix has a current budget of $500,000 and an expected quarterly growth rate of 15%."
195
- }
182
+ #### Step 2: The Main Script (`agent_example.py`)
196
183
 
197
- def retrieve_from_notes(query: str, top_k: int = 1, min_similarity: float = 0.5):
198
- """A simple keyword-based retriever for our mock data store."""
199
- results = []
200
- for key, text in project_notes.items():
201
- if query.lower() in text.lower():
202
- results.append({"source": key, "content": text})
203
- return results[:top_k]
184
+ This script will define the personality, initialize the client, and run the agent.
204
185
 
205
- # 2. Define a detailed streaming callback to visualize the agent's process
206
- def agent_streaming_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, metadata: list = None) -> bool:
186
+ ```python
187
+ from pathlib import Path
188
+ from lollms_client import LollmsClient, LollmsPersonality, LollmsDiscussion, MSG_TYPE
189
+ from ascii_colors import ASCIIColors
190
+ import json
191
+
192
+ # A detailed callback to visualize the agent's process
193
+ def agent_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, **kwargs) -> bool:
207
194
  if not params: params = {}
208
- msg_id = params.get("id", "")
209
195
 
210
- if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
211
- ASCIIColors.yellow(f"\n>> Agent Step Start [ID: {msg_id}]: {chunk}")
196
+ if msg_type == MSG_TYPE.MSG_TYPE_STEP:
197
+ ASCIIColors.yellow(f"\n>> Agent Step: {chunk}")
198
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
199
+ ASCIIColors.yellow(f"\n>> Agent Step Start: {chunk}")
212
200
  elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
213
- ASCIIColors.green(f"<< Agent Step End [ID: {msg_id}]: {chunk}")
214
- if params.get('result'):
215
- ASCIIColors.cyan(f" Result: {json.dumps(params['result'], indent=2)}")
201
+ result = params.get('result', '')
202
+ ASCIIColors.green(f"<< Agent Step End: {chunk} -> Result: {json.dumps(result)[:150]}...")
216
203
  elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT:
217
- ASCIIColors.magenta(f"\n🤔 Agent Thought: {chunk}")
204
+ ASCIIColors.magenta(f"🤔 Agent Thought: {chunk}")
218
205
  elif msg_type == MSG_TYPE.MSG_TYPE_TOOL_CALL:
219
- ASCIIColors.blue(f"\n🛠️ Agent Action: {chunk}")
206
+ ASCIIColors.blue(f"🛠️ Agent Action: {chunk}")
220
207
  elif msg_type == MSG_TYPE.MSG_TYPE_OBSERVATION:
221
- ASCIIColors.cyan(f"\n👀 Agent Observation: {chunk}")
208
+ ASCIIColors.cyan(f"👀 Agent Observation: {chunk}")
222
209
  elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
223
210
  print(chunk, end="", flush=True) # Final answer stream
224
211
  return True
225
212
 
226
213
  try:
227
- # 3. Initialize LollmsClient with an LLM and local tools enabled
228
- lc = LollmsClient(
229
- binding_name="ollama", # Use Ollama
230
- model_name="llama3", # Or any capable model like mistral, gemma, etc.
231
- mcp_binding_name="local_mcp" # Enable local tools like python_code_interpreter
214
+ # --- 1. Load the knowledge base from the file ---
215
+ rules_path = Path("coding_rules.txt")
216
+ if not rules_path.exists():
217
+ raise FileNotFoundError("Please create the 'coding_rules.txt' file.")
218
+ coding_rules = rules_path.read_text()
219
+
220
+ # --- 2. Define the Coder Agent Personality ---
221
+ coder_personality = LollmsPersonality(
222
+ name="Python Coder Agent",
223
+ author="lollms-client",
224
+ category="Coding",
225
+ description="An agent that writes and executes Python code according to specific rules.",
226
+ system_prompt=(
227
+ "You are an expert Python programmer. Your task is to write clean, executable Python code based on the user's request. "
228
+ "You MUST strictly follow all rules provided in the 'Personality Static Data' section. "
229
+ "First, think about the plan. Then, use the `python_code_interpreter` tool to write and execute the code. "
230
+ "Finally, present the code and its output to the user."
231
+ ),
232
+ # A) Attach the static knowledge base
233
+ data_source=coding_rules,
234
+ # B) Equip the agent with a code execution tool
235
+ active_mcps=["python_code_interpreter"]
232
236
  )
233
237
 
234
- # 4. Define the user prompt and the RAG data store
235
- prompt = "Based on my notes about Project Phoenix, write and run a Python script to calculate its projected budget after two quarters."
238
+ # --- 3. Initialize the Client and Discussion ---
239
+ lc = LollmsClient(
240
+ binding_name="ollama", # Or any capable model binding
241
+ model_name="codellama", # A code-specialized model is recommended
242
+ mcp_binding_name="local_mcp" # Enable the local tool execution engine
243
+ )
244
+ discussion = LollmsDiscussion.create_new(lollms_client=lc)
236
245
 
237
- rag_data_store = {
238
- "project_notes": {"callable": retrieve_from_notes}
239
- }
246
+ # --- 4. The User's Request ---
247
+ user_prompt = "Write a Python function that takes two numbers and returns their sum."
240
248
 
241
- ASCIIColors.yellow(f"User Prompt: {prompt}")
249
+ ASCIIColors.yellow(f"User Prompt: {user_prompt}")
242
250
  print("\n" + "="*50 + "\nAgent is now running...\n" + "="*50)
243
251
 
244
- # 5. Run the agent
245
- agent_output = lc.generate_with_mcp_rag(
246
- prompt=prompt,
247
- use_data_store=rag_data_store,
248
- use_mcps=["python_code_interpreter"], # Make specific tools available
249
- streaming_callback=agent_streaming_callback,
250
- max_reasoning_steps=5
252
+ # --- 5. Run the Agentic Chat Turn ---
253
+ response = discussion.chat(
254
+ user_message=user_prompt,
255
+ personality=coder_personality,
256
+ streaming_callback=agent_callback
251
257
  )
252
258
 
253
- print("\n" + "="*50 + "\nAgent finished.\n" + "="*50)
254
-
255
- # 6. Print the final results
256
- if agent_output.get("error"):
257
- ASCIIColors.error(f"\nAgent Error: {agent_output['error']}")
258
- else:
259
- ASCIIColors.green("\n--- Final Answer ---")
260
- print(agent_output.get("final_answer"))
261
-
262
- ASCIIColors.magenta("\n--- Tool Calls ---")
263
- print(json.dumps(agent_output.get("tool_calls", []), indent=2))
264
-
265
- ASCIIColors.cyan("\n--- RAG Sources ---")
266
- print(json.dumps(agent_output.get("sources", []), indent=2))
259
+ print("\n\n" + "="*50 + "\nAgent finished.\n" + "="*50)
260
+
261
+ # --- 6. Inspect the results ---
262
+ ai_message = response['ai_message']
263
+ ASCIIColors.green("\n--- Final Answer from Agent ---")
264
+ print(ai_message.content)
265
+
266
+ ASCIIColors.magenta("\n--- Tool Calls Made ---")
267
+ print(json.dumps(ai_message.metadata.get("tool_calls", []), indent=2))
267
268
 
268
269
  except Exception as e:
269
- ASCIIColors.red(f"\nAn unexpected error occurred: {e}")
270
+ trace_exception(e)
270
271
 
271
272
  ```
272
273
 
274
+ #### Step 3: What Happens Under the Hood
275
+
276
+ When you run `agent_example.py`, a sophisticated process unfolds:
277
+
278
+ 1. **Initialization:** The `LollmsDiscussion.chat()` method is called with the `coder_personality`.
279
+ 2. **Knowledge Injection:** The `chat` method sees that `personality.data_source` is a string. It automatically takes the content of `coding_rules.txt` and injects it into the `discussion.data_zone`.
280
+ 3. **Tool Activation:** The method also sees `personality.active_mcps`. It enables the `python_code_interpreter` tool for this turn.
281
+ 4. **Context Assembly:** The `LollmsClient` assembles a rich prompt for the LLM that includes:
282
+ * The personality's `system_prompt`.
283
+ * The content of `coding_rules.txt` (from the `data_zone`).
284
+ * The list of available tools (including `python_code_interpreter`).
285
+ * The user's request ("Write a function...").
286
+ 5. **Reason and Act:** The LLM, now fully briefed, reasons that it needs to use the `python_code_interpreter` tool. It formulates the Python code *according to the rules it was given*.
287
+ 6. **Tool Execution:** The `local_mcp` binding receives the code and executes it in a secure local environment. It captures any output (`stdout`, `stderr`) and results.
288
+ 7. **Observation:** The execution results are sent back to the LLM as an "observation."
289
+ 8. **Final Synthesis:** The LLM now has the user's request, the rules, the code it wrote, and the code's output. It synthesizes all of this into a final, comprehensive answer for the user.
290
+
291
+ This example showcases how `lollms-client` allows you to build powerful, knowledgeable, and capable agents by simply composing personalities with data and tools.
292
+
273
293
  ## Documentation
274
294
 
275
295
  For more in-depth information, please refer to:
@@ -602,3 +622,141 @@ This project is licensed under the **Apache 2.0 License**. See the [LICENSE](LIC
602
622
  ## Changelog
603
623
 
604
624
  For a list of changes and updates, please refer to the [CHANGELOG.md](CHANGELOG.md) file.
625
+ ```
626
+
627
+ ---
628
+ ### Phase 2: Update `docs/md/lollms_discussion.md`
629
+
630
+ `[UPDATE] docs/md/lollms_discussion.md`
631
+ ```markdown
632
+ # LollmsDiscussion Class
633
+
634
+ The `LollmsDiscussion` class is a cornerstone of the `lollms-client` library, designed to represent and manage a single conversation. It provides a robust interface for handling message history, conversation branching, context formatting, and persistence.
635
+
636
+ ## Overview
637
+
638
+ A `LollmsDiscussion` can be either **in-memory** or **database-backed**, offering flexibility for different use cases.
639
+
640
+ - **In-Memory:** Ideal for temporary or transient conversations. The discussion exists only for the duration of the application's runtime.
641
+ - **Database-Backed:** Provides persistence by saving the entire conversation, including all branches and metadata, to a database file (e.g., SQLite). This is perfect for applications that need to retain user chat history.
642
+
643
+ ## Key Features
644
+
645
+ - **Message Management:** Add user and AI messages, which are automatically linked to form a conversation tree.
646
+ - **Branching:** The conversation is a tree, not a simple list. This allows for exploring different conversational paths from any point. You can regenerate an AI response, and it will create a new branch.
647
+ - **Context Exporting:** The `export()` method formats the conversation history for various LLM backends (`openai_chat`, `ollama_chat`, `lollms_text`, `markdown`), ensuring compatibility.
648
+ - **Automatic Pruning:** To prevent exceeding the model's context window, it can automatically summarize older parts of the conversation without losing the original data.
649
+ - **Persistent Data Zone:** A special field to hold context that is always included in the system prompt, separate from the main conversation flow.
650
+
651
+ ## Creating a Discussion
652
+
653
+ The recommended way to create a discussion is using the `LollmsDiscussion.create_new()` class method.
654
+
655
+ ```python
656
+ from lollms_client import LollmsClient, LollmsDataManager, LollmsDiscussion
657
+
658
+ # For an in-memory discussion (lost when the app closes)
659
+ lc = LollmsClient(binding_name="ollama", model_name="llama3")
660
+ discussion = LollmsDiscussion.create_new(lollms_client=lc, id="my-temp-discussion")
661
+
662
+ # For a persistent, database-backed discussion
663
+ # This will create a 'discussions.db' file if it doesn't exist
664
+ db_manager = LollmsDataManager('sqlite:///discussions.db')
665
+ discussion_db = LollmsDiscussion.create_new(
666
+ lollms_client=lc,
667
+ db_manager=db_manager,
668
+ discussion_metadata={"title": "My First DB Chat"}
669
+ )
670
+ ```
671
+
672
+ ## Core Properties
673
+
674
+ ### `data_zone`
675
+
676
+ The `data_zone` is a string property where you can store persistent information that should always be visible to the AI as part of its system instructions. This is incredibly useful for providing context that doesn't change, such as user profiles, complex instructions, or data that the AI should always reference.
677
+
678
+ The content of `data_zone` is automatically appended to the system prompt during context export. This is also where data from a personality's `data_source` is loaded before generation.
679
+
680
+ #### Example: Using the Data Zone
681
+
682
+ Imagine you are building a Python coding assistant. You can use the `data_zone` to hold the current state of a script the user is working on.
683
+
684
+ ```python
685
+ from lollms_client import LollmsClient, LollmsDiscussion
686
+
687
+ lc = LollmsClient(binding_name="ollama", model_name="codellama")
688
+ discussion = LollmsDiscussion.create_new(lollms_client=lc)
689
+
690
+ # Set the system prompt and initial data_zone
691
+ discussion.system_prompt = "You are a Python expert. Help the user with their code."
692
+ discussion.data_zone = "# Current script content:\n\nimport os\n\ndef list_files(path):\n pass"
693
+
694
+ # The user asks for help
695
+ user_prompt = "Flesh out the list_files function to print all files in the given path."
696
+
697
+ # When you generate a response, the AI will see the system prompt AND the data_zone
698
+ # The effective system prompt becomes:
699
+ # """
700
+ # You are a Python expert. Help the user with their code.
701
+ #
702
+ # --- data ---
703
+ # # Current script content:
704
+ #
705
+ # import os
706
+ #
707
+ # def list_files(path):
708
+ # pass
709
+ # """
710
+ response = discussion.chat(user_prompt)
711
+ print(response['ai_message'].content)
712
+
713
+ # The calling application can then parse the AI's response and update the data_zone
714
+ # for the next turn.
715
+ updated_code = "# ... updated code from AI ...\nimport os\n\ndef list_files(path):\n for f in os.listdir(path):\n print(f)"
716
+ discussion.data_zone = updated_code
717
+ discussion.commit() # If DB-backed
718
+ ```
719
+
720
+ ### Other Important Properties
721
+
722
+ - `id`: The unique identifier for the discussion.
723
+ - `system_prompt`: The main system prompt defining the AI's persona and core instructions.
724
+ - `metadata`: A dictionary for storing any custom metadata, like a title.
725
+ - `active_branch_id`: The ID of the message at the "tip" of the current conversation branch.
726
+ - `messages`: A list of all `LollmsMessage` objects in the discussion.
727
+
728
+ ## Main Methods
729
+
730
+ ### `chat()`
731
+ The `chat()` method is the primary way to interact with the discussion. It handles a full user-to-AI turn, including invoking the advanced agentic capabilities of the `LollmsClient`.
732
+
733
+ #### Personalities, Tools, and Data Sources
734
+
735
+ The `chat` method intelligently handles tool activation and data loading when a `LollmsPersonality` is provided. This allows personalities to be configured as self-contained agents with their own default tools and knowledge bases.
736
+
737
+ **Tool Activation (`use_mcps`):**
738
+
739
+ 1. **Personality has tools, `use_mcps` is not set:** The agent will use the tools defined in `personality.active_mcps`.
740
+ 2. **Personality has tools, `use_mcps` is also set:** The agent will use a *combination* of tools from both the personality and the `use_mcps` parameter for that specific turn. Duplicates are automatically handled. This allows you to augment a personality's default tools on the fly.
741
+ 3. **Personality has no tools, `use_mcps` is set:** The agent will use only the tools specified in the `use_mcps` parameter.
742
+ 4. **Neither are set:** The agentic turn is not triggered (unless a data store is used), and a simple chat generation occurs.
743
+
744
+ **Knowledge Loading (`data_source`):**
745
+
746
+ Before generation, the `chat` method checks for `personality.data_source`:
747
+
748
+ - **If it's a `str` (static data):** The string is appended to the `discussion.data_zone`, making it part of the system context for the current turn.
749
+ - **If it's a `Callable` (dynamic data):**
750
+ 1. The AI first generates a query based on the current conversation.
751
+ 2. The `chat` method calls your function with this query.
752
+ 3. The returned string is appended to the `discussion.data_zone`.
753
+ 4. The final response generation proceeds with this newly added context.
754
+
755
+ This makes it easy to create powerful, reusable agents. For a complete, runnable example of building a **Python Coder Agent** that uses both `active_mcps` and a static `data_source`, **please see the "Putting It All Together" section in the main `README.md` file.**
756
+
757
+ ### Other Methods
758
+ - `add_message(sender, content, ...)`: Adds a new message.
759
+ - `export(format_type, ...)`: Exports the discussion to a specific format.
760
+ - `commit()`: Saves changes to the database (if DB-backed).
761
+ - `summarize_and_prune()`: Automatically handles context window limits.
762
+ - `count_discussion_tokens()`: Counts the tokens for a given format.
@@ -29,14 +29,14 @@ examples/mcp_examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE
29
29
  examples/mcp_examples/run_remote_mcp_example_v2.py,sha256=bbNn93NO_lKcFzfIsdvJJijGx2ePFTYfknofqZxMuRM,14626
30
30
  examples/mcp_examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
31
31
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
32
- lollms_client/__init__.py,sha256=eAfY5Ik399J8AVHNU6vLjqiWNNmT88GX-h3NTKzLo2g,1147
32
+ lollms_client/__init__.py,sha256=DiIvlC4e7dCOcFGBxtAmgUNIpbaIMRTiPDhEuadweGg,1147
33
33
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
34
- lollms_client/lollms_core.py,sha256=QliAE6hYiIGqQSDWWmjii1Hj8hsj3IxwBuUFshxJDlM,169099
35
- lollms_client/lollms_discussion.py,sha256=tvANNvpTkUr4L6GKowosIyfV7l3SA6cXnzElt36e2s8,52133
34
+ lollms_client/lollms_core.py,sha256=U-o16h7BZT7H1tu-aZNM-14H-OuObPG6qpLsikU1Jw8,169080
35
+ lollms_client/lollms_discussion.py,sha256=RVGeFyPKeLpTJEUjx2IdWFYg-d8zjPhWLQGnFFiKNvQ,56138
36
36
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
37
37
  lollms_client/lollms_llm_binding.py,sha256=cU0cmxZfIrp-ofutbRLx7W_59dxzPXpU-vO98MqVnQA,14788
38
38
  lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
39
- lollms_client/lollms_personality.py,sha256=dILUI5DZdzJ3NDDQiIsK2UptVF-jZK3XYXZ2bpXP_ew,8035
39
+ lollms_client/lollms_personality.py,sha256=O-9nqZhazcITOkxjT24ENTxTmIoZLgqIsQ9WtWs0Id0,8719
40
40
  lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
41
41
  lollms_client/lollms_stt_binding.py,sha256=jAUhLouEhh2hmm1bK76ianfw_6B59EHfY3FmLv6DU-g,5111
42
42
  lollms_client/lollms_tti_binding.py,sha256=afO0-d-Kqsmh8UHTijTvy6dZAt-XDB6R-IHmdbf-_fs,5928
@@ -92,8 +92,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
92
92
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
93
93
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
94
94
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
- lollms_client-0.28.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
96
- lollms_client-0.28.0.dist-info/METADATA,sha256=Esa4mMjN3hLYTMUR8aGk9jNlE_kqCyYLLuWx-tL9Vzo,25778
97
- lollms_client-0.28.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
98
- lollms_client-0.28.0.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
99
- lollms_client-0.28.0.dist-info/RECORD,,
95
+ lollms_client-0.29.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
96
+ lollms_client-0.29.0.dist-info/METADATA,sha256=pykrb85WMir4magsF-_qpPvAJ0H9uKAR_iPr6lX7lXw,33456
97
+ lollms_client-0.29.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
98
+ lollms_client-0.29.0.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
99
+ lollms_client-0.29.0.dist-info/RECORD,,