lollms-client 0.24.2__py3-none-any.whl → 0.25.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/lollms/__init__.py +1 -0
- lollms_client/llm_bindings/openai/__init__.py +30 -9
- lollms_client/lollms_core.py +232 -161
- lollms_client/lollms_discussion.py +102 -34
- lollms_client/lollms_types.py +9 -1
- lollms_client/lollms_utilities.py +68 -0
- lollms_client/mcp_bindings/remote_mcp/__init__.py +82 -4
- {lollms_client-0.24.2.dist-info → lollms_client-0.25.1.dist-info}/METADATA +1 -1
- {lollms_client-0.24.2.dist-info → lollms_client-0.25.1.dist-info}/RECORD +13 -13
- {lollms_client-0.24.2.dist-info → lollms_client-0.25.1.dist-info}/WHEEL +0 -0
- {lollms_client-0.24.2.dist-info → lollms_client-0.25.1.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.24.2.dist-info → lollms_client-0.25.1.dist-info}/top_level.txt +0 -0
lollms_client/__init__.py
CHANGED
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
__version__ = "0.
|
|
11
|
+
__version__ = "0.25.1" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
|
@@ -4,6 +4,7 @@ from lollms_client.lollms_llm_binding import LollmsLLMBinding
|
|
|
4
4
|
from lollms_client.lollms_types import MSG_TYPE
|
|
5
5
|
from lollms_client.lollms_utilities import encode_image
|
|
6
6
|
from lollms_client.lollms_types import ELF_COMPLETION_FORMAT
|
|
7
|
+
from lollms_client.lollms_discussion import LollmsDiscussion
|
|
7
8
|
from ascii_colors import ASCIIColors, trace_exception
|
|
8
9
|
from typing import Optional, Callable, List, Union
|
|
9
10
|
import json
|
|
@@ -30,7 +30,8 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
30
30
|
model_name: str = "",
|
|
31
31
|
service_key: str = None,
|
|
32
32
|
verify_ssl_certificate: bool = True,
|
|
33
|
-
default_completion_format: ELF_COMPLETION_FORMAT = ELF_COMPLETION_FORMAT.Chat
|
|
33
|
+
default_completion_format: ELF_COMPLETION_FORMAT = ELF_COMPLETION_FORMAT.Chat,
|
|
34
|
+
**kwargs):
|
|
34
35
|
"""
|
|
35
36
|
Initialize the OpenAI binding.
|
|
36
37
|
|
|
@@ -52,7 +53,7 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
52
53
|
|
|
53
54
|
if not self.service_key:
|
|
54
55
|
self.service_key = os.getenv("OPENAI_API_KEY", self.service_key)
|
|
55
|
-
self.client = openai.OpenAI(api_key=self.service_key, base_url=host_address)
|
|
56
|
+
self.client = openai.OpenAI(api_key=self.service_key, base_url=None if host_address is None else host_address if len(host_address)>0 else None)
|
|
56
57
|
self.completion_format = ELF_COMPLETION_FORMAT.Chat
|
|
57
58
|
|
|
58
59
|
|
|
@@ -103,15 +104,15 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
103
104
|
"""
|
|
104
105
|
count = 0
|
|
105
106
|
output = ""
|
|
107
|
+
messages = [
|
|
108
|
+
{
|
|
109
|
+
"role": "system",
|
|
110
|
+
"content": system_prompt or "You are a helpful assistant.",
|
|
111
|
+
}
|
|
112
|
+
]
|
|
106
113
|
|
|
107
114
|
# Prepare messages based on whether images are provided
|
|
108
115
|
if images:
|
|
109
|
-
messages = [
|
|
110
|
-
{
|
|
111
|
-
"role": "system",
|
|
112
|
-
"content": system_prompt,
|
|
113
|
-
}
|
|
114
|
-
]
|
|
115
116
|
if split:
|
|
116
117
|
messages += self.split_discussion(prompt,user_keyword=user_keyword, ai_keyword=ai_keyword)
|
|
117
118
|
if images:
|
|
@@ -150,7 +151,27 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
150
151
|
)
|
|
151
152
|
|
|
152
153
|
else:
|
|
153
|
-
|
|
154
|
+
|
|
155
|
+
if split:
|
|
156
|
+
messages += self.split_discussion(prompt,user_keyword=user_keyword, ai_keyword=ai_keyword)
|
|
157
|
+
if images:
|
|
158
|
+
messages[-1]["content"] = [
|
|
159
|
+
{
|
|
160
|
+
"type": "text",
|
|
161
|
+
"text": messages[-1]["content"]
|
|
162
|
+
}
|
|
163
|
+
]
|
|
164
|
+
else:
|
|
165
|
+
messages.append({
|
|
166
|
+
'role': 'user',
|
|
167
|
+
'content': [
|
|
168
|
+
{
|
|
169
|
+
"type": "text",
|
|
170
|
+
"text": prompt
|
|
171
|
+
}
|
|
172
|
+
]
|
|
173
|
+
}
|
|
174
|
+
)
|
|
154
175
|
|
|
155
176
|
# Generate text using the OpenAI API
|
|
156
177
|
if self.completion_format == ELF_COMPLETION_FORMAT.Chat:
|
lollms_client/lollms_core.py
CHANGED
|
@@ -13,6 +13,8 @@ from lollms_client.lollms_ttm_binding import LollmsTTMBinding, LollmsTTMBindingM
|
|
|
13
13
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
14
14
|
|
|
15
15
|
from lollms_client.lollms_discussion import LollmsDiscussion
|
|
16
|
+
|
|
17
|
+
from lollms_client.lollms_utilities import build_image_dicts, dict_to_markdown
|
|
16
18
|
import json, re
|
|
17
19
|
from enum import Enum
|
|
18
20
|
import base64
|
|
@@ -846,7 +848,7 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
|
|
|
846
848
|
"2. **Check for a Single-Step Solution:** Scrutinize the available tools. Can a single tool call directly achieve the user's current goal? \n"
|
|
847
849
|
"3. **Formulate a Plan:** Based on your analysis, create a concise, numbered list of steps to achieve the goal. If the goal is simple, this may be only one step. If it is complex or multi-turn, it may be several steps.\n\n"
|
|
848
850
|
"**CRITICAL RULES:**\n"
|
|
849
|
-
"* **MANDATORY:
|
|
851
|
+
"* **MANDATORY: Be helpful, curious and creative.\n"
|
|
850
852
|
"* **Focus on the Goal:** Your plan should directly address the user's request as it stands now in the conversation.\n\n"
|
|
851
853
|
"---\n"
|
|
852
854
|
"**Available Tools:**\n"
|
|
@@ -1078,7 +1080,7 @@ Provide your response as a single JSON object with one key, "query".
|
|
|
1078
1080
|
"""
|
|
1079
1081
|
try:
|
|
1080
1082
|
raw_initial_query_response = self.generate_code(initial_query_gen_prompt, system_prompt="You are a query generation expert.", temperature=0.0)
|
|
1081
|
-
initial_plan =
|
|
1083
|
+
initial_plan = robust_json_parser(raw_initial_query_response)
|
|
1082
1084
|
current_query_for_rag = initial_plan.get("query")
|
|
1083
1085
|
if not current_query_for_rag:
|
|
1084
1086
|
raise ValueError("LLM returned an empty initial query.")
|
|
@@ -1434,7 +1436,6 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1434
1436
|
new_scratchpad_text = self.generate_text(prompt=synthesis_prompt, n_predict=1024, temperature=0.0)
|
|
1435
1437
|
return self.remove_thinking_blocks(new_scratchpad_text).strip()
|
|
1436
1438
|
|
|
1437
|
-
# In lollms_client/lollms_discussion.py -> LollmsClient class
|
|
1438
1439
|
|
|
1439
1440
|
def generate_with_mcp_rag(
|
|
1440
1441
|
self,
|
|
@@ -1442,15 +1443,16 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1442
1443
|
use_mcps: Union[None, bool, List[str]] = None,
|
|
1443
1444
|
use_data_store: Union[None, Dict[str, Callable]] = None,
|
|
1444
1445
|
system_prompt: str = None,
|
|
1445
|
-
reasoning_system_prompt: str = "You are a logical and
|
|
1446
|
+
reasoning_system_prompt: str = "You are a logical AI assistant. Your task is to achieve the user's goal by thinking step-by-step and using the available tools.",
|
|
1446
1447
|
images: Optional[List[str]] = None,
|
|
1447
|
-
max_reasoning_steps: int =
|
|
1448
|
-
decision_temperature: float =
|
|
1448
|
+
max_reasoning_steps: int = None,
|
|
1449
|
+
decision_temperature: float = None,
|
|
1449
1450
|
final_answer_temperature: float = None,
|
|
1450
1451
|
streaming_callback: Optional[Callable[[str, 'MSG_TYPE', Optional[Dict], Optional[List]], bool]] = None,
|
|
1451
|
-
rag_top_k: int =
|
|
1452
|
-
rag_min_similarity_percent: float =
|
|
1453
|
-
output_summarization_threshold: int =
|
|
1452
|
+
rag_top_k: int = None,
|
|
1453
|
+
rag_min_similarity_percent: float = None,
|
|
1454
|
+
output_summarization_threshold: int = None, # In tokens
|
|
1455
|
+
debug: bool = False,
|
|
1454
1456
|
**llm_generation_kwargs
|
|
1455
1457
|
) -> Dict[str, Any]:
|
|
1456
1458
|
"""Generates a response using a dynamic agent with stateful, ID-based step tracking.
|
|
@@ -1483,6 +1485,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1483
1485
|
rag_min_similarity_percent: Minimum similarity for RAG results.
|
|
1484
1486
|
output_summarization_threshold: The token count that triggers automatic
|
|
1485
1487
|
summarization of a tool's text output.
|
|
1488
|
+
debug : If true, we'll report the detailed promptin and response information
|
|
1486
1489
|
**llm_generation_kwargs: Additional keyword arguments for LLM calls.
|
|
1487
1490
|
|
|
1488
1491
|
Returns:
|
|
@@ -1490,12 +1493,28 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1490
1493
|
answer, the complete internal scratchpad, a log of tool calls,
|
|
1491
1494
|
any retrieved RAG sources, and other metadata.
|
|
1492
1495
|
"""
|
|
1496
|
+
reasoning_step_id = None
|
|
1493
1497
|
if not self.binding:
|
|
1494
1498
|
return {"final_answer": "", "tool_calls": [], "sources": [], "error": "LLM binding not initialized."}
|
|
1495
1499
|
|
|
1500
|
+
if not max_reasoning_steps:
|
|
1501
|
+
max_reasoning_steps= 10
|
|
1502
|
+
if not rag_min_similarity_percent:
|
|
1503
|
+
rag_min_similarity_percent= 50
|
|
1504
|
+
if not rag_top_k:
|
|
1505
|
+
rag_top_k = 5
|
|
1506
|
+
if not decision_temperature:
|
|
1507
|
+
decision_temperature = 0.7
|
|
1508
|
+
if not output_summarization_threshold:
|
|
1509
|
+
output_summarization_threshold = 500
|
|
1510
|
+
|
|
1511
|
+
events = []
|
|
1512
|
+
|
|
1513
|
+
|
|
1496
1514
|
# --- Initialize Agent State ---
|
|
1497
1515
|
sources_this_turn: List[Dict[str, Any]] = []
|
|
1498
1516
|
tool_calls_this_turn: List[Dict[str, Any]] = []
|
|
1517
|
+
generated_code_store: Dict[str, str] = {} # NEW: Store for UUID -> code
|
|
1499
1518
|
original_user_prompt = prompt
|
|
1500
1519
|
|
|
1501
1520
|
initial_state_parts = [
|
|
@@ -1507,41 +1526,50 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1507
1526
|
initial_state_parts.append(f"- The user has provided {len(images)} image(s) for context.")
|
|
1508
1527
|
current_scratchpad = "\n".join(initial_state_parts)
|
|
1509
1528
|
|
|
1510
|
-
|
|
1511
|
-
|
|
1529
|
+
def log_prompt(prompt, type="prompt"):
|
|
1530
|
+
ASCIIColors.cyan(f"** DEBUG: {type} **")
|
|
1531
|
+
ASCIIColors.magenta(prompt[-15000:])
|
|
1532
|
+
prompt_size = self.count_tokens(prompt)
|
|
1533
|
+
ASCIIColors.red(f"Prompt size:{prompt_size}/{self.default_ctx_size}")
|
|
1534
|
+
ASCIIColors.cyan(f"** DEBUG: DONE **")
|
|
1535
|
+
|
|
1536
|
+
# --- Define Inner Helper Functions ---
|
|
1537
|
+
def log_event(
|
|
1512
1538
|
description: str,
|
|
1513
|
-
|
|
1539
|
+
event_type: MSG_TYPE = MSG_TYPE.MSG_TYPE_CHUNK,
|
|
1514
1540
|
metadata: Optional[Dict] = None,
|
|
1515
|
-
|
|
1541
|
+
event_id=None
|
|
1516
1542
|
) -> Optional[str]:
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
streaming_callback(description, MSG_TYPE.MSG_TYPE_STEP, params)
|
|
1539
|
-
return None
|
|
1543
|
+
if not streaming_callback: return None
|
|
1544
|
+
event_id = str(uuid.uuid4()) if event_type==MSG_TYPE.MSG_TYPE_STEP_START else event_id
|
|
1545
|
+
params = {"type": event_type, "description": description, **(metadata or {})}
|
|
1546
|
+
params["id"] = event_id
|
|
1547
|
+
streaming_callback(description, event_type, params)
|
|
1548
|
+
return event_id
|
|
1549
|
+
|
|
1550
|
+
def _substitute_code_uuids_recursive(data: Any, code_store: Dict[str, str]):
|
|
1551
|
+
"""Recursively finds and replaces code UUIDs in tool parameters."""
|
|
1552
|
+
if isinstance(data, dict):
|
|
1553
|
+
for key, value in data.items():
|
|
1554
|
+
if isinstance(value, str) and value in code_store:
|
|
1555
|
+
data[key] = code_store[value]
|
|
1556
|
+
else:
|
|
1557
|
+
_substitute_code_uuids_recursive(value, code_store)
|
|
1558
|
+
elif isinstance(data, list):
|
|
1559
|
+
for i, item in enumerate(data):
|
|
1560
|
+
if isinstance(item, str) and item in code_store:
|
|
1561
|
+
data[i] = code_store[item]
|
|
1562
|
+
else:
|
|
1563
|
+
_substitute_code_uuids_recursive(item, code_store)
|
|
1540
1564
|
|
|
1565
|
+
discovery_step_id = log_event("Discovering tools",MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1541
1566
|
# --- 1. Discover Available Tools ---
|
|
1542
1567
|
available_tools = []
|
|
1543
1568
|
if use_mcps and self.mcp:
|
|
1544
|
-
|
|
1569
|
+
discovered_tools = self.mcp.discover_tools(force_refresh=True)
|
|
1570
|
+
if isinstance(use_mcps, list):
|
|
1571
|
+
available_tools.extend([t for t in discovered_tools if t["name"] in use_mcps])
|
|
1572
|
+
|
|
1545
1573
|
if use_data_store:
|
|
1546
1574
|
for store_name in use_data_store:
|
|
1547
1575
|
available_tools.append({
|
|
@@ -1550,20 +1578,33 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1550
1578
|
"input_schema": {"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}
|
|
1551
1579
|
})
|
|
1552
1580
|
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1581
|
+
# Add the new put_code_in_buffer tool definition
|
|
1582
|
+
available_tools.append({
|
|
1583
|
+
"name": "generate_code",
|
|
1584
|
+
"description": """Generates and stores code into a buffer to be used by another tool. You can put the uuid of the generated code into the fields that require long code among the tools. If no tool requires code as input do not use generate_code. generate_code do not execute the code nor does it audit it.""",
|
|
1585
|
+
"input_schema": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed natural language description of the code's purpose and requirements."}, "language": {"type": "string", "description": "The programming language of the generated code. By default it uses python."}}, "required": ["prompt"]}
|
|
1586
|
+
})
|
|
1587
|
+
# Add the new refactor_scratchpad tool definition
|
|
1588
|
+
available_tools.append({
|
|
1589
|
+
"name": "refactor_scratchpad",
|
|
1590
|
+
"description": "Rewrites the scratchpad content to clean it and reorganize it. Only use if the scratchpad is messy or contains too much information compared to what you need.",
|
|
1591
|
+
"input_schema": {"type": "object", "properties": {}}
|
|
1592
|
+
})
|
|
1593
|
+
|
|
1594
|
+
formatted_tools_list = "\n".join([f"**{t['name']}**:\n{t['description']}\ninput schema:\n{json.dumps(t['input_schema'])}" for t in available_tools])
|
|
1595
|
+
formatted_tools_list += "\n**request_clarification**:\nUse if the user's request is ambiguous and you can not infer a clear idea of his intent. this tool has no parameters."
|
|
1596
|
+
formatted_tools_list += "\n**final_answer**:\nUse when you are ready to respond to the user. this tool has no parameters."
|
|
1597
|
+
|
|
1598
|
+
if discovery_step_id: log_event("Discovering tools",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
|
|
1556
1599
|
|
|
1557
1600
|
# --- 2. Dynamic Reasoning Loop ---
|
|
1558
1601
|
for i in range(max_reasoning_steps):
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
reasoning_prompt_template = f"""You are a logical AI assistant. Your task is to achieve the user's goal by thinking step-by-step and using the available tools.
|
|
1566
|
-
|
|
1602
|
+
try:
|
|
1603
|
+
reasoning_step_id = log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1604
|
+
user_context = f'Original User Request: "{original_user_prompt}"'
|
|
1605
|
+
if images: user_context += f'\n(Note: {len(images)} image(s) were provided with this request.)'
|
|
1606
|
+
|
|
1607
|
+
reasoning_prompt_template = f"""
|
|
1567
1608
|
--- AVAILABLE TOOLS ---
|
|
1568
1609
|
{formatted_tools_list}
|
|
1569
1610
|
--- CONTEXT ---
|
|
@@ -1577,122 +1618,152 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1577
1618
|
2. **THINK:**
|
|
1578
1619
|
- Does the latest observation completely fulfill the user's original request?
|
|
1579
1620
|
- If YES, your next action MUST be to use the `final_answer` tool.
|
|
1580
|
-
- If NO, what is the single next logical step needed?
|
|
1621
|
+
- If NO, what is the single next logical step needed? This may involve writing code first with `put_code_in_buffer`, then using another tool.
|
|
1581
1622
|
- If you are stuck or the request is ambiguous, use `request_clarification`.
|
|
1582
1623
|
3. **ACT:** Formulate your decision as a JSON object.
|
|
1583
1624
|
"""
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1625
|
+
action_template = {
|
|
1626
|
+
"thought": "My detailed analysis of the last observation and my reasoning for the next action and how it integrates with my global plan.",
|
|
1627
|
+
"action": {
|
|
1628
|
+
"tool_name": "The single tool to use (e.g., 'put_code_in_buffer', 'time_machine::get_current_time', 'final_answer').",
|
|
1629
|
+
"tool_params": {"param1": "value1"},
|
|
1630
|
+
"clarification_question": "(string, ONLY if tool_name is 'request_clarification')"
|
|
1631
|
+
}
|
|
1590
1632
|
}
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
images=images if i == 0 else None
|
|
1599
|
-
)
|
|
1600
|
-
|
|
1601
|
-
try:
|
|
1602
|
-
action_data = json.loads(structured_action_response)
|
|
1603
|
-
thought = action_data.get("thought", "No thought was generated.")
|
|
1604
|
-
action = action_data.get("action", {})
|
|
1605
|
-
tool_name = action.get("tool_name")
|
|
1606
|
-
tool_params = action.get("tool_params", {})
|
|
1607
|
-
except (json.JSONDecodeError, TypeError) as e:
|
|
1608
|
-
current_scratchpad += f"\n\n### Step {i+1} Failure\n- **Error:** Failed to generate a valid JSON action: {e}"
|
|
1609
|
-
log_step(f"\n\n### Step {i+1} Failure\n- **Error:** Failed to generate a valid JSON action: {e}", "scratchpad", is_start=False)
|
|
1610
|
-
if reasoning_step_id:
|
|
1611
|
-
log_step(f"Reasoning Step {i+1}/{max_reasoning_steps}", "reasoning_step", metadata={"id": reasoning_step_id, "error": str(e)}, is_start=False)
|
|
1612
|
-
break
|
|
1633
|
+
if debug: log_prompt(reasoning_prompt_template, f"REASONING PROMPT (Step {i+1})")
|
|
1634
|
+
structured_action_response = self.generate_code(
|
|
1635
|
+
prompt=reasoning_prompt_template, template=json.dumps(action_template, indent=2),
|
|
1636
|
+
system_prompt=reasoning_system_prompt, temperature=decision_temperature,
|
|
1637
|
+
images=images if i == 0 else None
|
|
1638
|
+
)
|
|
1639
|
+
if debug: log_prompt(structured_action_response, f"RAW REASONING RESPONSE (Step {i+1})")
|
|
1613
1640
|
|
|
1614
|
-
|
|
1615
|
-
|
|
1641
|
+
try:
|
|
1642
|
+
action_data = robust_json_parser(structured_action_response)
|
|
1643
|
+
thought = action_data.get("thought", "No thought was generated.")
|
|
1644
|
+
action = action_data.get("action", {})
|
|
1645
|
+
if isinstance(action,str):
|
|
1646
|
+
tool_name = action
|
|
1647
|
+
tool_params = {}
|
|
1648
|
+
else:
|
|
1649
|
+
tool_name = action.get("tool_name")
|
|
1650
|
+
tool_params = action.get("tool_params", {})
|
|
1651
|
+
except (json.JSONDecodeError, TypeError) as e:
|
|
1652
|
+
current_scratchpad += f"\n\n### Step {i+1} Failure\n- **Error:** Failed to generate a valid JSON action: {e}"
|
|
1653
|
+
log_event(f"Step Failure: Invalid JSON action.", MSG_TYPE.MSG_TYPE_EXCEPTION, metadata={"details": str(e)})
|
|
1654
|
+
if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_END, metadata={"error": str(e)}, event_id=reasoning_step_id)
|
|
1655
|
+
|
|
1616
1656
|
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
log_step(f"\n\n### Step {i+1} Failure\n- **Error:** Did not specify a tool name.", "scratchpad", is_start=False)
|
|
1620
|
-
if reasoning_step_id:
|
|
1621
|
-
log_step(f"Reasoning Step {i+1}/{max_reasoning_steps}", "reasoning_step", metadata={"id": reasoning_step_id}, is_start=False)
|
|
1622
|
-
break
|
|
1657
|
+
current_scratchpad += f"\n\n### Step {i+1}: Thought\n{thought}"
|
|
1658
|
+
log_event(f"Thought: {thought}", MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT)
|
|
1623
1659
|
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1660
|
+
if not tool_name:
|
|
1661
|
+
# Handle error...
|
|
1662
|
+
break
|
|
1663
|
+
|
|
1664
|
+
# --- Handle special, non-executing tools ---
|
|
1665
|
+
if tool_name == "request_clarification":
|
|
1666
|
+
# Handle clarification...
|
|
1667
|
+
return {"final_answer": action.get("clarification_question", "Could you please provide more details?"), "final_scratchpad": current_scratchpad, "tool_calls": tool_calls_this_turn, "sources": sources_this_turn, "clarification_required": True, "error": None}
|
|
1668
|
+
|
|
1669
|
+
if tool_name == "final_answer":
|
|
1670
|
+
current_scratchpad += f"\n\n### Step {i+1}: Action\n- **Action:** Decided to formulate the final answer."
|
|
1671
|
+
log_event("Action: Formulate final answer.", MSG_TYPE.MSG_TYPE_THOUGHT_CHUNK)
|
|
1672
|
+
if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}",MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
|
|
1673
|
+
break
|
|
1638
1674
|
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1675
|
+
# --- Handle the `put_code_in_buffer` tool specifically ---
|
|
1676
|
+
if tool_name == 'put_code_in_buffer':
|
|
1677
|
+
code_gen_id = log_event(f"Generating code...", MSG_TYPE.MSG_TYPE_STEP_START, metadata={"name": "put_code_in_buffer", "id": "gencode"})
|
|
1678
|
+
code_prompt = tool_params.get("prompt", "Generate the requested code.")
|
|
1679
|
+
|
|
1680
|
+
# Use a specific system prompt to get raw code
|
|
1681
|
+
code_generation_system_prompt = "You are a code generation assistant. Generate ONLY the raw code based on the user's request. Do not add any explanations, markdown code fences, or other text outside of the code itself."
|
|
1682
|
+
generated_code = self.generate_code(prompt=code_prompt, system_prompt=code_generation_system_prompt + "\n----\n" + reasoning_prompt_template, **llm_generation_kwargs)
|
|
1683
|
+
|
|
1684
|
+
code_uuid = str(uuid.uuid4())
|
|
1685
|
+
generated_code_store[code_uuid] = generated_code
|
|
1686
|
+
|
|
1687
|
+
tool_result = {"status": "success", "code_id": code_uuid, "summary": f"Code generated successfully. Use this ID in the next tool call that requires code."}
|
|
1688
|
+
tool_calls_this_turn.append({"name": "put_code_in_buffer", "params": tool_params, "result": tool_result})
|
|
1689
|
+
observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
|
|
1690
|
+
current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
|
|
1691
|
+
log_event(f"Observation: Code generated with ID: {code_uuid}", MSG_TYPE.MSG_TYPE_OBSERVATION)
|
|
1692
|
+
if code_gen_id: log_event(f"Generating code...", MSG_TYPE.MSG_TYPE_TOOL_CALL, metadata={"id": code_gen_id, "result": tool_result})
|
|
1693
|
+
if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_END, event_id= reasoning_step_id)
|
|
1694
|
+
continue # Go to the next reasoning step immediately
|
|
1695
|
+
if tool_name == 'refactor_scratchpad':
|
|
1696
|
+
scratchpad_cleaning_prompt = f"""Enhance this scratchpad content to be more organized and comprehensive. Keep relevant experience information and remove any useless redundancies. Try to log learned things from the context so that you won't make the same mistakes again. Do not remove the main objective information or any crucial information that may be useful for the next iterations. Answer directly with the new scratchpad content without any comments.
|
|
1697
|
+
--- YOUR INTERNAL SCRATCHPAD (Work History & Analysis) ---
|
|
1698
|
+
{current_scratchpad}
|
|
1699
|
+
--- END OF SCRATCHPAD ---"""
|
|
1700
|
+
current_scratchpad = self.generate_text(scratchpad_cleaning_prompt)
|
|
1701
|
+
log_event(f"New scratchpad:\n{current_scratchpad}")
|
|
1702
|
+
|
|
1703
|
+
# --- Substitute UUIDs and Execute Standard Tools ---
|
|
1704
|
+
log_event(f"Calling tool: `{tool_name}` with params:\n{dict_to_markdown(tool_params)}", MSG_TYPE.MSG_TYPE_STEP)
|
|
1705
|
+
_substitute_code_uuids_recursive(tool_params, generated_code_store)
|
|
1706
|
+
|
|
1707
|
+
tool_call_id = log_event(f"Executing tool: {tool_name}",MSG_TYPE.MSG_TYPE_STEP_START, metadata={"name": tool_name, "parameters": tool_params, "id":"executing tool"})
|
|
1708
|
+
tool_result = None
|
|
1709
|
+
try:
|
|
1710
|
+
if tool_name.startswith("research::") and use_data_store:
|
|
1711
|
+
store_name = tool_name.split("::")[1]
|
|
1712
|
+
rag_callable = use_data_store.get(store_name, {}).get("callable")
|
|
1713
|
+
query = tool_params.get("query", "")
|
|
1714
|
+
retrieved_chunks = rag_callable(query, rag_top_k=rag_top_k, rag_min_similarity_percent=rag_min_similarity_percent)
|
|
1715
|
+
if retrieved_chunks:
|
|
1716
|
+
sources_this_turn.extend(retrieved_chunks)
|
|
1717
|
+
tool_result = {"status": "success", "summary": f"Found {len(retrieved_chunks)} relevant chunks.", "chunks": retrieved_chunks}
|
|
1718
|
+
else:
|
|
1719
|
+
tool_result = {"status": "success", "summary": "No relevant documents found."}
|
|
1720
|
+
elif use_mcps and self.mcp:
|
|
1721
|
+
mcp_result = self.mcp.execute_tool(tool_name, tool_params, lollms_client_instance=self)
|
|
1722
|
+
tool_result = {"status": "success", "output": mcp_result} if not (isinstance(mcp_result, dict) and "error" in mcp_result) else {"status": "failure", **mcp_result}
|
|
1650
1723
|
else:
|
|
1651
|
-
tool_result = {"status": "
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
tool_result = {"status": "
|
|
1724
|
+
tool_result = {"status": "failure", "error": f"Tool '{tool_name}' not found."}
|
|
1725
|
+
except Exception as e:
|
|
1726
|
+
trace_exception(e)
|
|
1727
|
+
tool_result = {"status": "failure", "error": f"Exception executing tool: {str(e)}"}
|
|
1728
|
+
|
|
1729
|
+
if tool_call_id: log_event(f"Executing tool: {tool_name}", MSG_TYPE.MSG_TYPE_STEP_END, metadata={"result": tool_result}, event_id= tool_call_id)
|
|
1730
|
+
|
|
1731
|
+
observation_text = ""
|
|
1732
|
+
sanitized_result = {}
|
|
1733
|
+
if isinstance(tool_result, dict):
|
|
1734
|
+
sanitized_result = tool_result.copy()
|
|
1735
|
+
summarized_fields = {}
|
|
1736
|
+
for key, value in tool_result.items():
|
|
1737
|
+
if isinstance(value, str) and key.endswith("_base64") and len(value) > 256:
|
|
1738
|
+
sanitized_result[key] = f"[Image was generated. Size: {len(value)} bytes]"
|
|
1739
|
+
continue
|
|
1740
|
+
if isinstance(value, str) and len(self.tokenize(value)) > output_summarization_threshold:
|
|
1741
|
+
if streaming_callback: streaming_callback(f"Summarizing long output from field '{key}'...", MSG_TYPE.MSG_TYPE_STEP, {"type": "summarization"})
|
|
1742
|
+
summary = self.sequential_summarize(text=value, chunk_processing_prompt=f"Summarize key info from this chunk of '{key}'.", callback=streaming_callback)
|
|
1743
|
+
summarized_fields[key] = summary
|
|
1744
|
+
sanitized_result[key] = f"[Content summarized, see summary below. Original length: {len(value)} chars]"
|
|
1745
|
+
observation_text = f"```json\n{json.dumps(sanitized_result, indent=2)}\n```"
|
|
1746
|
+
if summarized_fields:
|
|
1747
|
+
observation_text += "\n\n**Summaries of Long Outputs:**"
|
|
1748
|
+
for key, summary in summarized_fields.items():
|
|
1749
|
+
observation_text += f"\n- **Summary of '{key}':**\n{summary}"
|
|
1655
1750
|
else:
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
for key, value in tool_result.items():
|
|
1669
|
-
if isinstance(value, str) and key.endswith("_base64") and len(value) > 256:
|
|
1670
|
-
sanitized_result[key] = f"[Image was generated. Size: {len(value)} bytes]"
|
|
1671
|
-
continue
|
|
1672
|
-
if isinstance(value, str) and len(self.tokenize(value)) > output_summarization_threshold:
|
|
1673
|
-
if streaming_callback: streaming_callback(f"Summarizing long output from field '{key}'...", MSG_TYPE.MSG_TYPE_STEP, {"type": "summarization"})
|
|
1674
|
-
summary = self.sequential_summarize(text=value, chunk_processing_prompt=f"Summarize key info from this chunk of '{key}'.", callback=streaming_callback)
|
|
1675
|
-
summarized_fields[key] = summary
|
|
1676
|
-
sanitized_result[key] = f"[Content summarized, see summary below. Original length: {len(value)} chars]"
|
|
1677
|
-
observation_text = f"```json\n{json.dumps(sanitized_result, indent=2)}\n```"
|
|
1678
|
-
if summarized_fields:
|
|
1679
|
-
observation_text += "\n\n**Summaries of Long Outputs:**"
|
|
1680
|
-
for key, summary in summarized_fields.items():
|
|
1681
|
-
observation_text += f"\n- **Summary of '{key}':**\n{summary}"
|
|
1682
|
-
else:
|
|
1683
|
-
observation_text = f"Tool returned non-dictionary output: {str(tool_result)}"
|
|
1684
|
-
|
|
1685
|
-
tool_calls_this_turn.append({"name": tool_name, "params": tool_params, "result": tool_result})
|
|
1686
|
-
current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
|
|
1687
|
-
log_step(f"### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n", "scratchpad", is_start=False)
|
|
1688
|
-
|
|
1689
|
-
if reasoning_step_id:
|
|
1690
|
-
log_step(f"Reasoning Step {i+1}/{max_reasoning_steps}", "reasoning_step", metadata={"id": reasoning_step_id}, is_start=False)
|
|
1691
|
-
|
|
1751
|
+
observation_text = f"Tool returned non-dictionary output: {str(tool_result)}"
|
|
1752
|
+
|
|
1753
|
+
tool_calls_this_turn.append({"name": tool_name, "params": tool_params, "result": tool_result})
|
|
1754
|
+
current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
|
|
1755
|
+
log_event(f"Observation: Result from `{tool_name}`:\n{dict_to_markdown(sanitized_result)}", MSG_TYPE.MSG_TYPE_OBSERVATION)
|
|
1756
|
+
|
|
1757
|
+
if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_END, event_id = reasoning_step_id)
|
|
1758
|
+
except Exception as ex:
|
|
1759
|
+
trace_exception(ex)
|
|
1760
|
+
current_scratchpad += f"\n\n### Error : {ex}"
|
|
1761
|
+
if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_END, event_id = reasoning_step_id)
|
|
1762
|
+
|
|
1692
1763
|
# --- Final Answer Synthesis ---
|
|
1693
|
-
synthesis_id =
|
|
1764
|
+
synthesis_id = log_event("Synthesizing final answer...", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1694
1765
|
|
|
1695
|
-
final_answer_prompt = f"""
|
|
1766
|
+
final_answer_prompt = f"""
|
|
1696
1767
|
--- Original User Request ---
|
|
1697
1768
|
"{original_user_prompt}"
|
|
1698
1769
|
--- Your Internal Scratchpad (Actions Taken & Findings) ---
|
|
@@ -1702,11 +1773,14 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1702
1773
|
- If images were provided by the user, incorporate your analysis of them into the answer.
|
|
1703
1774
|
- Do not talk about your internal process unless it's necessary to explain why you couldn't find an answer.
|
|
1704
1775
|
"""
|
|
1776
|
+
if debug: log_prompt(final_answer_prompt, "FINAL ANSWER SYNTHESIS PROMPT")
|
|
1777
|
+
|
|
1778
|
+
|
|
1705
1779
|
final_answer_text = self.generate_text(prompt=final_answer_prompt, system_prompt=system_prompt, images=images, stream=streaming_callback is not None, streaming_callback=streaming_callback, temperature=final_answer_temperature, **llm_generation_kwargs)
|
|
1706
1780
|
final_answer = self.remove_thinking_blocks(final_answer_text)
|
|
1781
|
+
if debug: log_prompt(final_answer_text, "FINAL ANSWER RESPONSE")
|
|
1707
1782
|
|
|
1708
|
-
if synthesis_id:
|
|
1709
|
-
log_step("Synthesizing final answer...", "final_answer_synthesis", metadata={"id": synthesis_id}, is_start=False)
|
|
1783
|
+
if synthesis_id: log_event("Synthesizing final answer...", MSG_TYPE.MSG_TYPE_STEP_END, event_id= synthesis_id)
|
|
1710
1784
|
|
|
1711
1785
|
return {
|
|
1712
1786
|
"final_answer": final_answer,
|
|
@@ -1716,7 +1790,6 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1716
1790
|
"clarification_required": False,
|
|
1717
1791
|
"error": None
|
|
1718
1792
|
}
|
|
1719
|
-
|
|
1720
1793
|
def generate_code(
|
|
1721
1794
|
self,
|
|
1722
1795
|
prompt,
|
|
@@ -1795,7 +1868,7 @@ Do not split the code in multiple tags.
|
|
|
1795
1868
|
while not last_code["is_complete"] and retries < max_retries:
|
|
1796
1869
|
retries += 1
|
|
1797
1870
|
ASCIIColors.info(f"Code block seems incomplete. Attempting continuation ({retries}/{max_retries})...")
|
|
1798
|
-
continuation_prompt = f"{
|
|
1871
|
+
continuation_prompt = f"{prompt}\n\nAssistant:\n{code_content}\n\n{self.user_full_header}The previous code block was incomplete. Continue the code exactly from where it left off. Do not repeat the previous part. Only provide the continuation inside a single {code_tag_format} code tag.\n{self.ai_full_header}"
|
|
1799
1872
|
|
|
1800
1873
|
continuation_response = self.generate_text(
|
|
1801
1874
|
continuation_prompt,
|
|
@@ -2065,7 +2138,7 @@ Do not split the code in multiple tags.
|
|
|
2065
2138
|
response_json_str = re.sub(r",\s*}", "}", response_json_str)
|
|
2066
2139
|
response_json_str = re.sub(r",\s*]", "]", response_json_str)
|
|
2067
2140
|
|
|
2068
|
-
parsed_response =
|
|
2141
|
+
parsed_response = robust_json_parser(response_json_str)
|
|
2069
2142
|
answer = parsed_response.get("answer")
|
|
2070
2143
|
explanation = parsed_response.get("explanation", "")
|
|
2071
2144
|
|
|
@@ -2159,7 +2232,7 @@ Do not split the code in multiple tags.
|
|
|
2159
2232
|
response_json_str = re.sub(r",\s*}", "}", response_json_str)
|
|
2160
2233
|
response_json_str = re.sub(r",\s*]", "]", response_json_str)
|
|
2161
2234
|
|
|
2162
|
-
result =
|
|
2235
|
+
result = robust_json_parser(response_json_str)
|
|
2163
2236
|
index = result.get("index")
|
|
2164
2237
|
explanation = result.get("explanation", "")
|
|
2165
2238
|
|
|
@@ -2232,7 +2305,7 @@ Do not split the code in multiple tags.
|
|
|
2232
2305
|
response_json_str = re.sub(r",\s*}", "}", response_json_str)
|
|
2233
2306
|
response_json_str = re.sub(r",\s*]", "]", response_json_str)
|
|
2234
2307
|
|
|
2235
|
-
result =
|
|
2308
|
+
result = robust_json_parser(response_json_str)
|
|
2236
2309
|
ranking = result.get("ranking")
|
|
2237
2310
|
explanations = result.get("explanations", []) if return_explanation else None
|
|
2238
2311
|
|
|
@@ -2856,5 +2929,3 @@ def chunk_text(text, tokenizer, detokenizer, chunk_size, overlap, use_separators
|
|
|
2856
2929
|
break
|
|
2857
2930
|
|
|
2858
2931
|
return chunks
|
|
2859
|
-
|
|
2860
|
-
|
|
@@ -29,6 +29,8 @@ if False:
|
|
|
29
29
|
from lollms_client import LollmsClient
|
|
30
30
|
from lollms_personality import LollmsPersonality
|
|
31
31
|
|
|
32
|
+
from lollms_client.lollms_utilities import build_image_dicts, robust_json_parser
|
|
33
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
32
34
|
|
|
33
35
|
class EncryptedString(TypeDecorator):
|
|
34
36
|
"""A SQLAlchemy TypeDecorator for field-level database encryption.
|
|
@@ -421,6 +423,10 @@ class LollmsDiscussion:
|
|
|
421
423
|
else:
|
|
422
424
|
return cls(lollmsClient=lollms_client, discussion_id=kwargs.get('id'), **init_args)
|
|
423
425
|
|
|
426
|
+
def get_messages(self):
|
|
427
|
+
"""Returns the list of messages"""
|
|
428
|
+
return self._db_discussion.messages
|
|
429
|
+
|
|
424
430
|
def __getattr__(self, name: str) -> Any:
|
|
425
431
|
"""Proxies attribute getting to the underlying discussion object."""
|
|
426
432
|
if name == 'metadata':
|
|
@@ -564,11 +570,13 @@ class LollmsDiscussion:
|
|
|
564
570
|
self,
|
|
565
571
|
user_message: str,
|
|
566
572
|
personality: Optional['LollmsPersonality'] = None,
|
|
573
|
+
branch_tip_id: Optional[str | None] = None,
|
|
567
574
|
use_mcps: Union[None, bool, List[str]] = None,
|
|
568
575
|
use_data_store: Union[None, Dict[str, Callable]] = None,
|
|
569
576
|
add_user_message: bool = True,
|
|
570
|
-
max_reasoning_steps: int =
|
|
577
|
+
max_reasoning_steps: int = 20,
|
|
571
578
|
images: Optional[List[str]] = None,
|
|
579
|
+
debug: bool = False,
|
|
572
580
|
**kwargs
|
|
573
581
|
) -> Dict[str, 'LollmsMessage']:
|
|
574
582
|
"""Main interaction method that can invoke the dynamic, multi-modal agent.
|
|
@@ -597,6 +605,7 @@ class LollmsDiscussion:
|
|
|
597
605
|
before it must provide a final answer.
|
|
598
606
|
images: A list of base64-encoded images provided by the user, which will
|
|
599
607
|
be passed to the agent or a multi-modal LLM.
|
|
608
|
+
debug: If True, prints full prompts and raw AI responses to the console.
|
|
600
609
|
**kwargs: Additional keyword arguments passed to the underlying generation
|
|
601
610
|
methods, such as 'streaming_callback'.
|
|
602
611
|
|
|
@@ -640,12 +649,21 @@ class LollmsDiscussion:
|
|
|
640
649
|
# Step 3: Execute the appropriate generation logic.
|
|
641
650
|
if is_agentic_turn:
|
|
642
651
|
# --- AGENTIC TURN ---
|
|
652
|
+
prompt_for_agent = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id)
|
|
653
|
+
if debug:
|
|
654
|
+
ASCIIColors.cyan("\n" + "="*50)
|
|
655
|
+
ASCIIColors.cyan("--- DEBUG: AGENTIC TURN TRIGGERED ---")
|
|
656
|
+
ASCIIColors.cyan(f"--- PROMPT FOR AGENT (from discussion history) ---")
|
|
657
|
+
ASCIIColors.magenta(prompt_for_agent)
|
|
658
|
+
ASCIIColors.cyan("="*50 + "\n")
|
|
659
|
+
|
|
643
660
|
agent_result = self.lollmsClient.generate_with_mcp_rag(
|
|
644
|
-
prompt=
|
|
661
|
+
prompt=prompt_for_agent,
|
|
645
662
|
use_mcps=use_mcps,
|
|
646
663
|
use_data_store=use_data_store,
|
|
647
664
|
max_reasoning_steps=max_reasoning_steps,
|
|
648
665
|
images=images,
|
|
666
|
+
debug=debug, # Pass the debug flag down
|
|
649
667
|
**kwargs
|
|
650
668
|
)
|
|
651
669
|
final_content = agent_result.get("final_answer", "The agent did not produce a final answer.")
|
|
@@ -654,9 +672,27 @@ class LollmsDiscussion:
|
|
|
654
672
|
|
|
655
673
|
else:
|
|
656
674
|
# --- SIMPLE CHAT TURN ---
|
|
675
|
+
if debug:
|
|
676
|
+
prompt_for_chat = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id)
|
|
677
|
+
ASCIIColors.cyan("\n" + "="*50)
|
|
678
|
+
ASCIIColors.cyan("--- DEBUG: SIMPLE CHAT PROMPT ---")
|
|
679
|
+
ASCIIColors.magenta(prompt_for_chat)
|
|
680
|
+
ASCIIColors.cyan("="*50 + "\n")
|
|
681
|
+
|
|
657
682
|
# For simple chat, we also need to consider images if the model is multi-modal
|
|
658
683
|
final_raw_response = self.lollmsClient.chat(self, images=images, **kwargs) or ""
|
|
659
|
-
|
|
684
|
+
|
|
685
|
+
if debug:
|
|
686
|
+
ASCIIColors.cyan("\n" + "="*50)
|
|
687
|
+
ASCIIColors.cyan("--- DEBUG: RAW SIMPLE CHAT RESPONSE ---")
|
|
688
|
+
ASCIIColors.magenta(final_raw_response)
|
|
689
|
+
ASCIIColors.cyan("="*50 + "\n")
|
|
690
|
+
|
|
691
|
+
if isinstance(final_raw_response, dict) and final_raw_response.get("status") == "error":
|
|
692
|
+
raise Exception(final_raw_response.get("message", "Unknown error from lollmsClient.chat"))
|
|
693
|
+
else:
|
|
694
|
+
final_content = self.lollmsClient.remove_thinking_blocks(final_raw_response)
|
|
695
|
+
|
|
660
696
|
final_scratchpad = None # No agentic scratchpad in a simple turn
|
|
661
697
|
|
|
662
698
|
# Step 4: Post-generation processing and statistics.
|
|
@@ -694,7 +730,7 @@ class LollmsDiscussion:
|
|
|
694
730
|
|
|
695
731
|
return {"user_message": user_msg, "ai_message": ai_message_obj}
|
|
696
732
|
|
|
697
|
-
def regenerate_branch(self, **kwargs) -> Dict[str, 'LollmsMessage']:
|
|
733
|
+
def regenerate_branch(self, branch_tip_id=None, **kwargs) -> Dict[str, 'LollmsMessage']:
|
|
698
734
|
"""Regenerates the last AI response in the active branch.
|
|
699
735
|
|
|
700
736
|
It deletes the previous AI response and calls chat() again with the
|
|
@@ -706,8 +742,15 @@ class LollmsDiscussion:
|
|
|
706
742
|
Returns:
|
|
707
743
|
A dictionary with the user and the newly generated AI message.
|
|
708
744
|
"""
|
|
745
|
+
if not branch_tip_id:
|
|
746
|
+
branch_tip_id = self.active_branch_id
|
|
709
747
|
if not self.active_branch_id or self.active_branch_id not in self._message_index:
|
|
710
|
-
|
|
748
|
+
if len(self._message_index)>0:
|
|
749
|
+
ASCIIColors.warning("No active message to regenerate from.\n")
|
|
750
|
+
ASCIIColors.warning(f"Using last available message:{list(self._message_index.keys())[-1]}\n")
|
|
751
|
+
else:
|
|
752
|
+
branch_tip_id = list(self._message_index.keys())[-1]
|
|
753
|
+
raise ValueError("No active message to regenerate from.")
|
|
711
754
|
|
|
712
755
|
last_message_orm = self._message_index[self.active_branch_id]
|
|
713
756
|
|
|
@@ -722,11 +765,8 @@ class LollmsDiscussion:
|
|
|
722
765
|
if self._is_db_backed:
|
|
723
766
|
self._messages_to_delete_from_db.add(last_message_id)
|
|
724
767
|
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
prompt_to_regenerate = self._message_index[self.active_branch_id].content
|
|
729
|
-
return self.chat(user_message=prompt_to_regenerate, add_user_message=False, **kwargs)
|
|
768
|
+
return self.chat(user_message="", add_user_message=False, branch_tip_id=branch_tip_id, **kwargs)
|
|
769
|
+
|
|
730
770
|
def delete_branch(self, message_id: str):
|
|
731
771
|
"""Deletes a message and its entire descendant branch.
|
|
732
772
|
|
|
@@ -801,7 +841,7 @@ class LollmsDiscussion:
|
|
|
801
841
|
|
|
802
842
|
Args:
|
|
803
843
|
format_type: The target format. Can be "lollms_text", "openai_chat",
|
|
804
|
-
or "
|
|
844
|
+
"ollama_chat", or "markdown".
|
|
805
845
|
branch_tip_id: The ID of the message to use as the end of the context.
|
|
806
846
|
Defaults to the active branch ID.
|
|
807
847
|
max_allowed_tokens: The maximum number of tokens the final prompt can contain.
|
|
@@ -809,15 +849,15 @@ class LollmsDiscussion:
|
|
|
809
849
|
|
|
810
850
|
Returns:
|
|
811
851
|
A string for "lollms_text" or a list of dictionaries for "openai_chat"
|
|
812
|
-
and "ollama_chat".
|
|
852
|
+
and "ollama_chat". For "markdown", returns a Markdown-formatted string.
|
|
813
853
|
|
|
814
854
|
Raises:
|
|
815
855
|
ValueError: If an unsupported format_type is provided.
|
|
816
856
|
"""
|
|
817
857
|
branch_tip_id = branch_tip_id or self.active_branch_id
|
|
818
|
-
if not branch_tip_id and format_type in ["lollms_text", "openai_chat", "ollama_chat"]:
|
|
858
|
+
if not branch_tip_id and format_type in ["lollms_text", "openai_chat", "ollama_chat", "markdown"]:
|
|
819
859
|
return "" if format_type == "lollms_text" else []
|
|
820
|
-
|
|
860
|
+
|
|
821
861
|
branch = self.get_branch(branch_tip_id)
|
|
822
862
|
full_system_prompt = self.system_prompt # Simplified for clarity
|
|
823
863
|
participants = self.participants or {}
|
|
@@ -829,14 +869,12 @@ class LollmsDiscussion:
|
|
|
829
869
|
|
|
830
870
|
# --- NATIVE LOLLMS_TEXT FORMAT ---
|
|
831
871
|
if format_type == "lollms_text":
|
|
832
|
-
# --- FIX STARTS HERE ---
|
|
833
872
|
final_prompt_parts = []
|
|
834
873
|
message_parts = [] # Temporary list for correctly ordered messages
|
|
835
|
-
|
|
874
|
+
|
|
836
875
|
current_tokens = 0
|
|
837
876
|
messages_to_render = branch
|
|
838
877
|
|
|
839
|
-
# 1. Handle non-destructive pruning summary
|
|
840
878
|
summary_text = ""
|
|
841
879
|
if self.pruning_summary and self.pruning_point_id:
|
|
842
880
|
pruning_index = -1
|
|
@@ -848,7 +886,6 @@ class LollmsDiscussion:
|
|
|
848
886
|
messages_to_render = branch[pruning_index:]
|
|
849
887
|
summary_text = f"!@>system:\n--- Conversation Summary ---\n{self.pruning_summary.strip()}\n"
|
|
850
888
|
|
|
851
|
-
# 2. Add main system prompt to the final list
|
|
852
889
|
sys_msg_text = ""
|
|
853
890
|
if full_system_prompt:
|
|
854
891
|
sys_msg_text = f"!@>system:\n{full_system_prompt.strip()}\n"
|
|
@@ -856,15 +893,13 @@ class LollmsDiscussion:
|
|
|
856
893
|
if max_allowed_tokens is None or sys_tokens <= max_allowed_tokens:
|
|
857
894
|
final_prompt_parts.append(sys_msg_text)
|
|
858
895
|
current_tokens += sys_tokens
|
|
859
|
-
|
|
860
|
-
# 3. Add pruning summary (if it exists) to the final list
|
|
896
|
+
|
|
861
897
|
if summary_text:
|
|
862
898
|
summary_tokens = self.lollmsClient.count_tokens(summary_text)
|
|
863
899
|
if max_allowed_tokens is None or current_tokens + summary_tokens <= max_allowed_tokens:
|
|
864
900
|
final_prompt_parts.append(summary_text)
|
|
865
901
|
current_tokens += summary_tokens
|
|
866
902
|
|
|
867
|
-
# 4. Build the message list in correct order, respecting token limits
|
|
868
903
|
for msg in reversed(messages_to_render):
|
|
869
904
|
sender_str = msg.sender.replace(':', '').replace('!@>', '')
|
|
870
905
|
content = get_full_content(msg)
|
|
@@ -872,24 +907,21 @@ class LollmsDiscussion:
|
|
|
872
907
|
content += f"\n({len(msg.images)} image(s) attached)"
|
|
873
908
|
msg_text = f"!@>{sender_str}:\n{content}\n"
|
|
874
909
|
msg_tokens = self.lollmsClient.count_tokens(msg_text)
|
|
875
|
-
|
|
910
|
+
|
|
876
911
|
if max_allowed_tokens is not None and current_tokens + msg_tokens > max_allowed_tokens:
|
|
877
912
|
break
|
|
878
|
-
|
|
879
|
-
# Always insert at the beginning of the temporary list
|
|
913
|
+
|
|
880
914
|
message_parts.insert(0, msg_text)
|
|
881
915
|
current_tokens += msg_tokens
|
|
882
|
-
|
|
883
|
-
# 5. Combine system/summary prompts with the message parts
|
|
916
|
+
|
|
884
917
|
final_prompt_parts.extend(message_parts)
|
|
885
918
|
return "".join(final_prompt_parts).strip()
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
# --- OPENAI & OLLAMA CHAT FORMATS (remains the same and is correct) ---
|
|
919
|
+
|
|
920
|
+
# --- OPENAI & OLLAMA CHAT FORMATS ---
|
|
889
921
|
messages = []
|
|
890
922
|
if full_system_prompt:
|
|
891
923
|
messages.append({"role": "system", "content": full_system_prompt})
|
|
892
|
-
|
|
924
|
+
|
|
893
925
|
for msg in branch:
|
|
894
926
|
if msg.sender_type == 'user':
|
|
895
927
|
role = participants.get(msg.sender, "user")
|
|
@@ -897,6 +929,8 @@ class LollmsDiscussion:
|
|
|
897
929
|
role = participants.get(msg.sender, "assistant")
|
|
898
930
|
|
|
899
931
|
content, images = get_full_content(msg), msg.images or []
|
|
932
|
+
images = build_image_dicts(images)
|
|
933
|
+
|
|
900
934
|
|
|
901
935
|
if format_type == "openai_chat":
|
|
902
936
|
if images:
|
|
@@ -908,18 +942,29 @@ class LollmsDiscussion:
|
|
|
908
942
|
messages.append({"role": role, "content": content_parts})
|
|
909
943
|
else:
|
|
910
944
|
messages.append({"role": role, "content": content})
|
|
911
|
-
|
|
945
|
+
|
|
912
946
|
elif format_type == "ollama_chat":
|
|
913
947
|
message_dict = {"role": role, "content": content}
|
|
948
|
+
|
|
914
949
|
base64_images = [img['data'] for img in images if img['type'] == 'base64']
|
|
915
950
|
if base64_images:
|
|
916
951
|
message_dict["images"] = base64_images
|
|
917
952
|
messages.append(message_dict)
|
|
918
953
|
|
|
954
|
+
elif format_type == "markdown":
|
|
955
|
+
# Create Markdown content based on the role and content
|
|
956
|
+
markdown_line = f"**{role.capitalize()}**: {content}\n"
|
|
957
|
+
if images:
|
|
958
|
+
for img in images:
|
|
959
|
+
img_data = img['data']
|
|
960
|
+
url = f"" if img['type'] == 'base64' else f""
|
|
961
|
+
markdown_line += f"\n{url}\n"
|
|
962
|
+
messages.append(markdown_line)
|
|
963
|
+
|
|
919
964
|
else:
|
|
920
965
|
raise ValueError(f"Unsupported export format_type: {format_type}")
|
|
921
|
-
|
|
922
|
-
return messages
|
|
966
|
+
|
|
967
|
+
return "\n".join(messages) if format_type == "markdown" else messages
|
|
923
968
|
|
|
924
969
|
|
|
925
970
|
def summarize_and_prune(self, max_tokens: int, preserve_last_n: int = 4):
|
|
@@ -966,4 +1011,27 @@ class LollmsDiscussion:
|
|
|
966
1011
|
self.pruning_point_id = pruning_point_message.id
|
|
967
1012
|
|
|
968
1013
|
self.touch()
|
|
969
|
-
print(f"[INFO] Discussion auto-pruned. {len(messages_to_prune)} messages summarized. History preserved.")
|
|
1014
|
+
print(f"[INFO] Discussion auto-pruned. {len(messages_to_prune)} messages summarized. History preserved.")
|
|
1015
|
+
|
|
1016
|
+
def switch_to_branch(self, branch_id):
|
|
1017
|
+
self.active_branch_id = branch_id
|
|
1018
|
+
|
|
1019
|
+
def auto_title(self):
|
|
1020
|
+
try:
|
|
1021
|
+
if self.metadata is None:
|
|
1022
|
+
self.metadata = {}
|
|
1023
|
+
discussion = self.export("markdown")[0:1000]
|
|
1024
|
+
prompt = f"""You are a title builder. Your oibjective is to build a title for the following discussion:
|
|
1025
|
+
{discussion}
|
|
1026
|
+
...
|
|
1027
|
+
"""
|
|
1028
|
+
template = """{
|
|
1029
|
+
"title": "An short but comprehensive discussion title"
|
|
1030
|
+
}"""
|
|
1031
|
+
infos = self.lollmsClient.generate_code(prompt = prompt, template = template)
|
|
1032
|
+
discussion_title = robust_json_parser(infos)["title"]
|
|
1033
|
+
self.metadata['title'] = discussion_title
|
|
1034
|
+
self.commit()
|
|
1035
|
+
return discussion_title
|
|
1036
|
+
except Exception as ex:
|
|
1037
|
+
trace_exception(ex)
|
lollms_client/lollms_types.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
2
|
class MSG_TYPE(Enum):
|
|
3
3
|
# Messaging
|
|
4
|
-
MSG_TYPE_CHUNK
|
|
4
|
+
MSG_TYPE_CHUNK = 0 # A chunk of a message (used for classical chat)
|
|
5
5
|
MSG_TYPE_CONTENT = 1 # A full message (for some personality the answer is sent in bulk)
|
|
6
6
|
MSG_TYPE_CONTENT_INVISIBLE_TO_AI = 2 # A full message (for some personality the answer is sent in bulk)
|
|
7
7
|
MSG_TYPE_CONTENT_INVISIBLE_TO_USER = 3 # A full message (for some personality the answer is sent in bulk)
|
|
@@ -36,6 +36,14 @@ class MSG_TYPE(Enum):
|
|
|
36
36
|
MSG_TYPE_TOOL_CALL = 19# a tool call
|
|
37
37
|
MSG_TYPE_TOOL_OUTPUT = 20# the output of the tool
|
|
38
38
|
|
|
39
|
+
MSG_TYPE_REASONING = 21# the ai shows its reasoning
|
|
40
|
+
MSG_TYPE_SCRATCHPAD = 22# the ai shows its scratchpad
|
|
41
|
+
MSG_TYPE_OBSERVATION = 23# the ai shows its reasoning
|
|
42
|
+
|
|
43
|
+
MSG_TYPE_ERROR = 24#a severe error hapened
|
|
44
|
+
MSG_TYPE_GENERATING_TITLE_START = 25#a severe error hapened
|
|
45
|
+
MSG_TYPE_GENERATING_TITLE_END = 26#a severe error hapened
|
|
46
|
+
|
|
39
47
|
|
|
40
48
|
class SENDER_TYPES(Enum):
|
|
41
49
|
SENDER_TYPES_USER = 0 # Sent by user
|
|
@@ -11,6 +11,74 @@ import numpy as np
|
|
|
11
11
|
import json
|
|
12
12
|
from ascii_colors import ASCIIColors, trace_exception
|
|
13
13
|
|
|
14
|
+
def dict_to_markdown(d, indent=0):
|
|
15
|
+
"""
|
|
16
|
+
Formats a dictionary (with potential nested lists and dicts) as a markdown list.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
d (dict): The dictionary to format.
|
|
20
|
+
indent (int): Current indentation level (used recursively).
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
str: The formatted markdown string.
|
|
24
|
+
"""
|
|
25
|
+
lines = []
|
|
26
|
+
indent_str = ' ' * (indent * 2)
|
|
27
|
+
|
|
28
|
+
for key, value in d.items():
|
|
29
|
+
if isinstance(value, dict):
|
|
30
|
+
# Recursively handle nested dictionary
|
|
31
|
+
lines.append(f"{indent_str}- {key}:")
|
|
32
|
+
lines.append(dict_to_markdown(value, indent + 1))
|
|
33
|
+
elif isinstance(value, list):
|
|
34
|
+
lines.append(f"{indent_str}- {key}:")
|
|
35
|
+
for item in value:
|
|
36
|
+
if isinstance(item, dict):
|
|
37
|
+
# Render nested dicts in the list
|
|
38
|
+
lines.append(dict_to_markdown(item, indent + 1))
|
|
39
|
+
else:
|
|
40
|
+
# Render strings or other simple items in the list
|
|
41
|
+
lines.append(f"{' ' * (indent + 1) * 2}- {item}")
|
|
42
|
+
else:
|
|
43
|
+
# Simple key-value pair
|
|
44
|
+
lines.append(f"{indent_str}- {key}: {value}")
|
|
45
|
+
|
|
46
|
+
return "\n".join(lines)
|
|
47
|
+
|
|
48
|
+
def is_base64(s):
|
|
49
|
+
"""Check if the string is a valid base64 encoded string."""
|
|
50
|
+
try:
|
|
51
|
+
# Try to decode and then encode back to check for validity
|
|
52
|
+
import base64
|
|
53
|
+
base64.b64decode(s)
|
|
54
|
+
return True
|
|
55
|
+
except Exception as e:
|
|
56
|
+
return False
|
|
57
|
+
|
|
58
|
+
def build_image_dicts(images):
|
|
59
|
+
"""
|
|
60
|
+
Convert a list of image strings (base64 or URLs) into a list of dictionaries with type and data.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
images (list): List of image strings (either base64-encoded or URLs).
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
list: List of dictionaries in the format {'type': 'base64'/'url', 'data': <image string>}.
|
|
67
|
+
"""
|
|
68
|
+
result = []
|
|
69
|
+
|
|
70
|
+
for img in images:
|
|
71
|
+
if isinstance(img, str):
|
|
72
|
+
if is_base64(img):
|
|
73
|
+
result.append({'type': 'base64', 'data': img})
|
|
74
|
+
else:
|
|
75
|
+
# Assuming it's a URL if not base64
|
|
76
|
+
result.append({'type': 'url', 'data': img})
|
|
77
|
+
else:
|
|
78
|
+
result.append(img)
|
|
79
|
+
|
|
80
|
+
return result
|
|
81
|
+
|
|
14
82
|
def robust_json_parser(json_string: str) -> dict:
|
|
15
83
|
"""
|
|
16
84
|
Parses a possibly malformed JSON string using a series of corrective strategies.
|
|
@@ -119,7 +119,26 @@ class RemoteMCPBinding(LollmsMCPBinding):
|
|
|
119
119
|
future = asyncio.run_coroutine_threadsafe(coro, self._loop)
|
|
120
120
|
return future.result(timeout)
|
|
121
121
|
|
|
122
|
-
|
|
122
|
+
def _prepare_headers(self, alias: str) -> Dict[str, str]:
|
|
123
|
+
"""Prepares the headers dictionary from the server's auth_config."""
|
|
124
|
+
server_info = self.servers[alias]
|
|
125
|
+
auth_config = server_info.get("auth_config", {})
|
|
126
|
+
headers = {}
|
|
127
|
+
auth_type = auth_config.get("type")
|
|
128
|
+
if auth_type == "api_key":
|
|
129
|
+
api_key = auth_config.get("key")
|
|
130
|
+
header_name = auth_config.get("header_name", "X-API-Key") # Default to X-API-Key
|
|
131
|
+
if api_key:
|
|
132
|
+
headers[header_name] = api_key
|
|
133
|
+
ASCIIColors.info(f"{self.binding_name}: Using API Key authentication for server '{alias}'.")
|
|
134
|
+
|
|
135
|
+
elif auth_type == "bearer": # <-- NEW BLOCK
|
|
136
|
+
token = auth_config.get("token")
|
|
137
|
+
if token:
|
|
138
|
+
headers["Authorization"] = f"Bearer {token}"
|
|
139
|
+
|
|
140
|
+
return headers
|
|
141
|
+
|
|
123
142
|
async def _initialize_connection_async(self, alias: str) -> bool:
|
|
124
143
|
server_info = self.servers[alias]
|
|
125
144
|
if server_info["initialized"]:
|
|
@@ -128,10 +147,13 @@ class RemoteMCPBinding(LollmsMCPBinding):
|
|
|
128
147
|
server_url = server_info["url"]
|
|
129
148
|
ASCIIColors.info(f"{self.binding_name}: Initializing connection to '{alias}' ({server_url})...")
|
|
130
149
|
try:
|
|
150
|
+
# Prepare authentication headers
|
|
151
|
+
auth_headers = self._prepare_headers(alias)
|
|
152
|
+
|
|
131
153
|
exit_stack = AsyncExitStack()
|
|
132
154
|
|
|
133
155
|
client_streams = await exit_stack.enter_async_context(
|
|
134
|
-
streamablehttp_client(server_url)
|
|
156
|
+
streamablehttp_client(url=server_url, headers=auth_headers) # Pass the headers here
|
|
135
157
|
)
|
|
136
158
|
read_stream, write_stream, _ = client_streams
|
|
137
159
|
|
|
@@ -294,7 +316,7 @@ class RemoteMCPBinding(LollmsMCPBinding):
|
|
|
294
316
|
|
|
295
317
|
try:
|
|
296
318
|
# Ensure this specific server is connected before executing
|
|
297
|
-
self._ensure_initialized_sync(alias, timeout=
|
|
319
|
+
self._ensure_initialized_sync(alias, timeout=timeout)
|
|
298
320
|
return self._run_async(self._execute_tool_async(alias, actual_tool_name, params), timeout=timeout)
|
|
299
321
|
except (ConnectionError, RuntimeError) as e:
|
|
300
322
|
return {"error": f"{self.binding_name}: Connection issue for server '{alias}': {e}", "status_code": 503}
|
|
@@ -342,4 +364,60 @@ class RemoteMCPBinding(LollmsMCPBinding):
|
|
|
342
364
|
ASCIIColors.info(f"{self.binding_name}: Remote connection binding closed.")
|
|
343
365
|
|
|
344
366
|
def get_binding_config(self) -> Dict[str, Any]:
|
|
345
|
-
return self.config
|
|
367
|
+
return self.config
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
def set_auth_config(self, alias: str, auth_config: Dict[str, Any]):
|
|
371
|
+
"""
|
|
372
|
+
Dynamically updates the authentication configuration for a specific server.
|
|
373
|
+
|
|
374
|
+
If a connection was already active for this server, it will be closed to force
|
|
375
|
+
a new connection with the new authentication details on the next call.
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
alias (str): The alias of the server to update (the key in servers_infos).
|
|
379
|
+
auth_config (Dict[str, Any]): The new authentication configuration dictionary.
|
|
380
|
+
Example: {"type": "bearer", "token": "new-token-here"}
|
|
381
|
+
"""
|
|
382
|
+
ASCIIColors.info(f"{self.binding_name}: Updating auth_config for server '{alias}'.")
|
|
383
|
+
|
|
384
|
+
server_info = self.servers.get(alias)
|
|
385
|
+
if not server_info:
|
|
386
|
+
raise ValueError(f"Server alias '{alias}' does not exist in the configuration.")
|
|
387
|
+
|
|
388
|
+
# Update the configuration in the binding's internal state
|
|
389
|
+
server_info["config"]["auth_config"] = auth_config
|
|
390
|
+
|
|
391
|
+
# If the server was already initialized, its connection is now obsolete.
|
|
392
|
+
# We must close it and mark it as uninitialized.
|
|
393
|
+
if server_info["initialized"]:
|
|
394
|
+
ASCIIColors.warning(f"{self.binding_name}: Existing connection for '{alias}' is outdated due to new authentication. It will be reset.")
|
|
395
|
+
try:
|
|
396
|
+
# Execute the close operation asynchronously on the event loop thread
|
|
397
|
+
self._run_async(self._close_connection_async(alias), timeout=10.0)
|
|
398
|
+
except Exception as e:
|
|
399
|
+
ASCIIColors.error(f"{self.binding_name}: Error while closing the outdated connection for '{alias}': {e}")
|
|
400
|
+
# Even on error, reset the state to force a new connection attempt
|
|
401
|
+
server_info.update({"session": None, "exit_stack": None, "initialized": False})
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
# --- NEW INTERNAL HELPER METHOD ---
|
|
405
|
+
async def _close_connection_async(self, alias: str):
|
|
406
|
+
"""Cleanly closes the connection for a specific server alias."""
|
|
407
|
+
server_info = self.servers.get(alias)
|
|
408
|
+
if not server_info or not server_info.get("exit_stack"):
|
|
409
|
+
return # Nothing to do.
|
|
410
|
+
|
|
411
|
+
ASCIIColors.info(f"{self.binding_name}: Closing connection for '{alias}'...")
|
|
412
|
+
try:
|
|
413
|
+
await server_info["exit_stack"].aclose()
|
|
414
|
+
except Exception as e:
|
|
415
|
+
trace_exception(e)
|
|
416
|
+
ASCIIColors.error(f"{self.binding_name}: Exception while closing the exit_stack for '{alias}': {e}")
|
|
417
|
+
finally:
|
|
418
|
+
# Reset the state for this alias, no matter what.
|
|
419
|
+
server_info.update({
|
|
420
|
+
"session": None,
|
|
421
|
+
"exit_stack": None,
|
|
422
|
+
"initialized": False
|
|
423
|
+
})
|
|
@@ -26,10 +26,10 @@ examples/mcp_examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE
|
|
|
26
26
|
examples/mcp_examples/run_remote_mcp_example_v2.py,sha256=bbNn93NO_lKcFzfIsdvJJijGx2ePFTYfknofqZxMuRM,14626
|
|
27
27
|
examples/mcp_examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
|
|
28
28
|
examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
|
|
29
|
-
lollms_client/__init__.py,sha256=
|
|
29
|
+
lollms_client/__init__.py,sha256=Vt2zeJ4Ekn2UWxfSKbn_pjE-QGL7uwoTnbTFuFIOyUk,1047
|
|
30
30
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
31
|
-
lollms_client/lollms_core.py,sha256=
|
|
32
|
-
lollms_client/lollms_discussion.py,sha256=
|
|
31
|
+
lollms_client/lollms_core.py,sha256=m_qfzybasY61KgAPVa84tdkqJWIog9iuIZc88pQQ-vw,158842
|
|
32
|
+
lollms_client/lollms_discussion.py,sha256=JqKx--a6YMzL6ec6N9OD0B9oRlmkSV_KDKXjqP8291Y,47636
|
|
33
33
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
34
34
|
lollms_client/lollms_llm_binding.py,sha256=Kpzhs5Jx8eAlaaUacYnKV7qIq2wbME5lOEtKSfJKbpg,12161
|
|
35
35
|
lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
|
|
@@ -40,13 +40,13 @@ lollms_client/lollms_tti_binding.py,sha256=afO0-d-Kqsmh8UHTijTvy6dZAt-XDB6R-IHmd
|
|
|
40
40
|
lollms_client/lollms_ttm_binding.py,sha256=FjVVSNXOZXK1qvcKEfxdiX6l2b4XdGOSNnZ0utAsbDg,4167
|
|
41
41
|
lollms_client/lollms_tts_binding.py,sha256=5cJYECj8PYLJAyB6SEH7_fhHYK3Om-Y3arkygCnZ24o,4342
|
|
42
42
|
lollms_client/lollms_ttv_binding.py,sha256=KkTaHLBhEEdt4sSVBlbwr5i_g_TlhcrwrT-7DjOsjWQ,4131
|
|
43
|
-
lollms_client/lollms_types.py,sha256=
|
|
44
|
-
lollms_client/lollms_utilities.py,sha256=
|
|
43
|
+
lollms_client/lollms_types.py,sha256=0iSH1QHRRD-ddBqoL9EEKJ8wWCuwDUlN_FrfbCdg7Lw,3522
|
|
44
|
+
lollms_client/lollms_utilities.py,sha256=zx1X4lAXQ2eCUM4jDpu_1QV5oMGdFkpaSEdTASmaiqE,13545
|
|
45
45
|
lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
|
|
46
46
|
lollms_client/llm_bindings/llamacpp/__init__.py,sha256=Qj5RvsgPeHGNfb5AEwZSzFwAp4BOWjyxmm9qBNtstrc,63716
|
|
47
|
-
lollms_client/llm_bindings/lollms/__init__.py,sha256=
|
|
47
|
+
lollms_client/llm_bindings/lollms/__init__.py,sha256=jfiCGJqMensJ7RymeGDDJOsdokEdlORpw9ND_Q30GYc,17831
|
|
48
48
|
lollms_client/llm_bindings/ollama/__init__.py,sha256=QufsYqak2VlA2XGbzks8u55yNJFeDH2V35NGeZABkm8,32554
|
|
49
|
-
lollms_client/llm_bindings/openai/__init__.py,sha256=
|
|
49
|
+
lollms_client/llm_bindings/openai/__init__.py,sha256=4Mk8eBdc9VScI0Sdh4g4p_0eU2afJeCEUEJnCQO-QkM,20014
|
|
50
50
|
lollms_client/llm_bindings/openllm/__init__.py,sha256=xv2XDhJNCYe6NPnWBboDs24AQ1VJBOzsTuMcmuQ6xYY,29864
|
|
51
51
|
lollms_client/llm_bindings/pythonllamacpp/__init__.py,sha256=7dM42TCGKh0eV0njNL1tc9cInhyvBRIXzN3dcy12Gl0,33551
|
|
52
52
|
lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=nPaNhGRd-bsG0UlYwcEqjd_UagCMEf5VEbBUW-GWu6A,32203
|
|
@@ -57,7 +57,7 @@ lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py,sh
|
|
|
57
57
|
lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py,sha256=THtZsMxNnXZiBdkwoBlfbWY2C5hhDdmPtnM-8cSKN6s,9488
|
|
58
58
|
lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py,sha256=PLC31-D04QKTOTb1uuCHnrAlpysQjsk89yIJngK0VGc,4586
|
|
59
59
|
lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py,sha256=McDCBVoVrMDYgU7EYtyOY7mCk1uEeTea0PSD69QqDsQ,6228
|
|
60
|
-
lollms_client/mcp_bindings/remote_mcp/__init__.py,sha256=
|
|
60
|
+
lollms_client/mcp_bindings/remote_mcp/__init__.py,sha256=6ebENOqO-oUk3IpitVyiMGRICSl_X5DKKaGG52BdiT8,20388
|
|
61
61
|
lollms_client/mcp_bindings/standard_mcp/__init__.py,sha256=zpF4h8cTUxoERI-xcVjmS_V772LK0V4jegjz2k1PK98,31658
|
|
62
62
|
lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
63
63
|
lollms_client/stt_bindings/lollms/__init__.py,sha256=jBz3285atdPRqQe9ZRrb-AvjqKRB4f8tjLXjma0DLfE,6082
|
|
@@ -79,8 +79,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
|
|
|
79
79
|
lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
|
|
80
80
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
81
81
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
82
|
-
lollms_client-0.
|
|
83
|
-
lollms_client-0.
|
|
84
|
-
lollms_client-0.
|
|
85
|
-
lollms_client-0.
|
|
86
|
-
lollms_client-0.
|
|
82
|
+
lollms_client-0.25.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
83
|
+
lollms_client-0.25.1.dist-info/METADATA,sha256=4yR9ohOc_JjNnJeDRTdbzfYbKkmMpl0wbw0Y9D2P0gc,13401
|
|
84
|
+
lollms_client-0.25.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
85
|
+
lollms_client-0.25.1.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
|
|
86
|
+
lollms_client-0.25.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|