lollms-client 1.3.0__py3-none-any.whl → 1.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/ollama/__init__.py +80 -23
- lollms_client/lollms_agentic.py +361 -0
- lollms_client/lollms_core.py +305 -308
- lollms_client/tti_bindings/diffusers/__init__.py +20 -18
- {lollms_client-1.3.0.dist-info → lollms_client-1.3.2.dist-info}/METADATA +1 -1
- {lollms_client-1.3.0.dist-info → lollms_client-1.3.2.dist-info}/RECORD +10 -9
- {lollms_client-1.3.0.dist-info → lollms_client-1.3.2.dist-info}/WHEEL +0 -0
- {lollms_client-1.3.0.dist-info → lollms_client-1.3.2.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.3.0.dist-info → lollms_client-1.3.2.dist-info}/top_level.txt +0 -0
lollms_client/lollms_core.py
CHANGED
|
@@ -14,6 +14,8 @@ from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingM
|
|
|
14
14
|
|
|
15
15
|
from lollms_client.lollms_discussion import LollmsDiscussion
|
|
16
16
|
|
|
17
|
+
from lollms_client.lollms_agentic import TaskStatus, TaskPlanner, MemoryManager, UncertaintyManager, ToolPerformanceTracker
|
|
18
|
+
|
|
17
19
|
from lollms_client.lollms_utilities import build_image_dicts, dict_to_markdown
|
|
18
20
|
import json, re
|
|
19
21
|
from enum import Enum
|
|
@@ -23,7 +25,8 @@ from typing import List, Optional, Callable, Union, Dict, Any
|
|
|
23
25
|
import numpy as np
|
|
24
26
|
from pathlib import Path
|
|
25
27
|
import uuid
|
|
26
|
-
|
|
28
|
+
import hashlib
|
|
29
|
+
import time
|
|
27
30
|
class LollmsClient():
|
|
28
31
|
"""
|
|
29
32
|
Core client class for interacting with LOLLMS services, including LLM, TTS, TTI, STT, TTV, and TTM.
|
|
@@ -1442,375 +1445,369 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
|
|
|
1442
1445
|
)
|
|
1443
1446
|
new_scratchpad_text = self.generate_text(prompt=synthesis_prompt, n_predict=1024, temperature=0.0)
|
|
1444
1447
|
return self.remove_thinking_blocks(new_scratchpad_text).strip()
|
|
1448
|
+
|
|
1445
1449
|
|
|
1450
|
+
def _get_friendly_action_description(self, tool_name: str, requires_code: bool, requires_image: bool) -> str:
|
|
1451
|
+
"""Convert technical tool names to user-friendly descriptions for logging."""
|
|
1452
|
+
# Handle specific, high-priority built-in tools first
|
|
1453
|
+
if tool_name == "local_tools::final_answer":
|
|
1454
|
+
return "📋 Ready to provide your final answer"
|
|
1455
|
+
elif tool_name == "local_tools::request_clarification":
|
|
1456
|
+
return "❓ Asking for more information to proceed"
|
|
1457
|
+
elif tool_name == "local_tools::generate_image":
|
|
1458
|
+
return "🎨 Creating an image based on your request"
|
|
1459
|
+
|
|
1460
|
+
# Handle RAG (data store) tools by their pattern
|
|
1461
|
+
elif "research::" in tool_name:
|
|
1462
|
+
# Extract the friendly name of the data source
|
|
1463
|
+
source_name = tool_name.split("::")[-1].replace("_", " ").title()
|
|
1464
|
+
return f"🔍 Searching {source_name} for relevant information"
|
|
1465
|
+
|
|
1466
|
+
# Handle generic actions based on their input requirements
|
|
1467
|
+
elif requires_code:
|
|
1468
|
+
return "💻 Working on a coding solution"
|
|
1469
|
+
elif requires_image:
|
|
1470
|
+
return "🖼️ Analyzing the provided image(s)"
|
|
1471
|
+
|
|
1472
|
+
# Default fallback for any other tool
|
|
1473
|
+
else:
|
|
1474
|
+
# Clean up the technical tool name for a more readable display
|
|
1475
|
+
clean_name = tool_name.replace("_", " ").replace("::", " - ").title()
|
|
1476
|
+
return f"🔧 Using the {clean_name} tool"
|
|
1446
1477
|
def generate_with_mcp_rag(
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1478
|
+
self,
|
|
1479
|
+
prompt: str,
|
|
1480
|
+
context: Optional[str] = None,
|
|
1481
|
+
use_mcps: Union[None, bool, List[str]] = None,
|
|
1482
|
+
use_data_store: Union[None, Dict[str, Callable]] = None,
|
|
1483
|
+
system_prompt: str|None = None,
|
|
1484
|
+
reasoning_system_prompt: str = "You are a logical AI assistant. Your task is to achieve the user's goal by thinking step-by-step and using the available tools.",
|
|
1485
|
+
images: Optional[List[str]] = None,
|
|
1486
|
+
max_reasoning_steps: int = 15,
|
|
1487
|
+
decision_temperature: float = 0.5,
|
|
1488
|
+
final_answer_temperature: float = 0.7,
|
|
1489
|
+
streaming_callback: Optional[Callable[[str, 'MSG_TYPE', Optional[Dict], Optional[List]], bool]] = None,
|
|
1490
|
+
rag_top_k: int = 5,
|
|
1491
|
+
rag_min_similarity_percent: float = 50.0,
|
|
1492
|
+
output_summarization_threshold: int = 500,
|
|
1493
|
+
force_mcp_use: bool = False,
|
|
1494
|
+
debug: bool = False,
|
|
1495
|
+
enable_parallel_execution: bool = True,
|
|
1496
|
+
enable_self_reflection: bool = True,
|
|
1497
|
+
**llm_generation_kwargs
|
|
1498
|
+
) -> Dict[str, Any]:
|
|
1499
|
+
|
|
1466
1500
|
if not self.llm:
|
|
1467
1501
|
return {"final_answer": "", "tool_calls": [], "sources": [], "error": "LLM binding not initialized."}
|
|
1468
|
-
if max_reasoning_steps is None:
|
|
1469
|
-
max_reasoning_steps = 10
|
|
1470
1502
|
|
|
1471
1503
|
def log_event(desc, event_type=MSG_TYPE.MSG_TYPE_CHUNK, meta=None, event_id=None) -> Optional[str]:
|
|
1472
|
-
if not streaming_callback:
|
|
1473
|
-
return None
|
|
1504
|
+
if not streaming_callback: return None
|
|
1474
1505
|
is_start = event_type == MSG_TYPE.MSG_TYPE_STEP_START
|
|
1475
1506
|
event_id = str(uuid.uuid4()) if is_start and not event_id else event_id
|
|
1476
1507
|
params = {"type": event_type, "description": desc, **(meta or {})}
|
|
1477
|
-
if event_id:
|
|
1478
|
-
params["id"] = event_id
|
|
1508
|
+
if event_id: params["id"] = event_id
|
|
1479
1509
|
streaming_callback(desc, event_type, params)
|
|
1480
1510
|
return event_id
|
|
1481
1511
|
|
|
1482
1512
|
def log_prompt(title: str, prompt_text: str):
|
|
1483
|
-
if not debug:
|
|
1484
|
-
return
|
|
1513
|
+
if not debug: return
|
|
1485
1514
|
ASCIIColors.cyan(f"** DEBUG: {title} **")
|
|
1486
1515
|
ASCIIColors.magenta(prompt_text[-15000:])
|
|
1487
1516
|
prompt_size = self.count_tokens(prompt_text)
|
|
1488
1517
|
ASCIIColors.red(f"Prompt size:{prompt_size}/{self.llm.default_ctx_size}")
|
|
1489
1518
|
ASCIIColors.cyan(f"** DEBUG: DONE **")
|
|
1490
1519
|
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
initial_state_parts = ["### Initial State", "- My goal is to address the user's request comprehensively."]
|
|
1494
|
-
if images:
|
|
1495
|
-
for img_b64 in images:
|
|
1496
|
-
img_uuid = str(uuid.uuid4())
|
|
1497
|
-
asset_store[img_uuid] = {"type": "image", "content": img_b64}
|
|
1498
|
-
initial_state_parts.append(f"- User provided image, asset ID: {img_uuid}")
|
|
1499
|
-
if context:
|
|
1500
|
-
code_blocks = re.findall(r"```(?:\w+)?\n([\s\S]+?)\n```", context)
|
|
1501
|
-
if code_blocks:
|
|
1502
|
-
last_code_block = code_blocks[-1]
|
|
1503
|
-
code_uuid = str(uuid.uuid4())
|
|
1504
|
-
asset_store[code_uuid] = {"type": "code", "content": last_code_block}
|
|
1505
|
-
initial_state_parts.append(f"- A code block was found in the context. It has been registered as asset ID: {code_uuid}")
|
|
1506
|
-
current_scratchpad = "\n".join(initial_state_parts)
|
|
1507
|
-
|
|
1508
|
-
discovery_step_id = log_event("Discovering tools...", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1509
|
-
all_discovered_tools, visible_tools = [], []
|
|
1510
|
-
rag_registry: Dict[str, Callable] = {}
|
|
1511
|
-
rag_tool_specs: Dict[str, Dict] = {}
|
|
1512
|
-
|
|
1520
|
+
discovery_step_id = log_event("🔧 Setting up capabilities...", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1521
|
+
all_discovered_tools, visible_tools, rag_registry, rag_tool_specs = [], [], {}, {}
|
|
1513
1522
|
if use_mcps and hasattr(self, 'mcp'):
|
|
1514
1523
|
mcp_tools = self.mcp.discover_tools(force_refresh=True)
|
|
1515
|
-
if isinstance(use_mcps, list):
|
|
1516
|
-
|
|
1517
|
-
elif use_mcps is True:
|
|
1518
|
-
all_discovered_tools.extend(mcp_tools)
|
|
1519
|
-
|
|
1524
|
+
if isinstance(use_mcps, list): all_discovered_tools.extend([t for t in mcp_tools if t["name"] in use_mcps])
|
|
1525
|
+
elif use_mcps is True: all_discovered_tools.extend(mcp_tools)
|
|
1520
1526
|
if use_data_store:
|
|
1521
1527
|
for name, info in use_data_store.items():
|
|
1522
|
-
tool_name = f"research::{name}"
|
|
1523
|
-
|
|
1524
|
-
call_fn = None
|
|
1525
|
-
if callable(info):
|
|
1526
|
-
call_fn = info
|
|
1528
|
+
tool_name, description, call_fn = f"research::{name}", f"Queries the '{name}' knowledge base.", None
|
|
1529
|
+
if callable(info): call_fn = info
|
|
1527
1530
|
elif isinstance(info, dict):
|
|
1528
|
-
if "
|
|
1529
|
-
call_fn = info["call"]
|
|
1531
|
+
if "callable" in info and callable(info["callable"]): call_fn = info["callable"]
|
|
1530
1532
|
description = info.get("description", description)
|
|
1531
1533
|
if call_fn:
|
|
1532
|
-
visible_tools.append({
|
|
1533
|
-
"name": tool_name,
|
|
1534
|
-
"description": description,
|
|
1535
|
-
"input_schema": {
|
|
1536
|
-
"type": "object",
|
|
1537
|
-
"properties": {
|
|
1538
|
-
"query": {"type": "string"},
|
|
1539
|
-
"top_k": {"type": "integer"},
|
|
1540
|
-
"min_similarity_percent": {"type": "number"},
|
|
1541
|
-
"filters": {"type": "object"}
|
|
1542
|
-
},
|
|
1543
|
-
"required": ["query"]
|
|
1544
|
-
}
|
|
1545
|
-
})
|
|
1534
|
+
visible_tools.append({"name": tool_name, "description": description, "input_schema": {"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}})
|
|
1546
1535
|
rag_registry[tool_name] = call_fn
|
|
1547
1536
|
rag_tool_specs[tool_name] = {"default_top_k": rag_top_k, "default_min_sim": rag_min_similarity_percent}
|
|
1548
|
-
else:
|
|
1549
|
-
log_event("RAG tool registration failed", MSG_TYPE.MSG_TYPE_WARNING, meta={"store_name": name})
|
|
1550
|
-
|
|
1551
1537
|
visible_tools.extend(all_discovered_tools)
|
|
1538
|
+
built_in_tools = [{"name": "local_tools::final_answer", "description": "Provide the final answer directly to the user.", "input_schema": {}}]
|
|
1539
|
+
if getattr(self, "tti", None): built_in_tools.append({"name": "local_tools::generate_image", "description": "Generate an image from a text description.", "input_schema": {"type": "object", "properties": {"prompt": {"type": "string"}}, "required": ["prompt"]}})
|
|
1540
|
+
all_visible_tools = visible_tools + built_in_tools
|
|
1541
|
+
tool_summary = "\n".join([f"- {t['name']}: {t['description']}" for t in all_visible_tools[:15]])
|
|
1542
|
+
log_event(f"✅ Ready with {len(all_visible_tools)} capabilities", MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
|
|
1552
1543
|
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1544
|
+
triage_step_id = log_event("🤔 Analyzing the best approach...", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1545
|
+
strategy = "COMPLEX_PLAN"
|
|
1546
|
+
strategy_data = {}
|
|
1547
|
+
try:
|
|
1548
|
+
triage_prompt = f"""Analyze the user's request and determine the most efficient strategy.
|
|
1549
|
+
USER REQUEST: "{prompt}"
|
|
1550
|
+
AVAILABLE TOOLS:\n{tool_summary}
|
|
1551
|
+
Choose a strategy:
|
|
1552
|
+
- "DIRECT_ANSWER": For greetings or simple questions that need no tools.
|
|
1553
|
+
- "REQUEST_CLARIFICATION": If the request is ambiguous and you need more information from the user.
|
|
1554
|
+
- "SINGLE_TOOL": If the request can be resolved with one tool call.
|
|
1555
|
+
- "COMPLEX_PLAN": For multi-step requests requiring multiple tools or complex reasoning.
|
|
1556
|
+
|
|
1557
|
+
Provide your decision as JSON: {{"thought": "...", "strategy": "...", "text_output": "Your direct answer or clarification question.", "required_tool_name": "..."}}"""
|
|
1558
|
+
|
|
1559
|
+
triage_schema = {
|
|
1560
|
+
"thought": "string", "strategy": "string",
|
|
1561
|
+
"text_output": "string", "required_tool_name": "string"
|
|
1562
|
+
}
|
|
1563
|
+
strategy_data = self.generate_structured_content(prompt=triage_prompt, schema=triage_schema, temperature=0.1, **llm_generation_kwargs)
|
|
1564
|
+
strategy = strategy_data.get("strategy") if strategy_data else "COMPLEX_PLAN"
|
|
1565
|
+
except Exception as e:
|
|
1566
|
+
log_event(f"Triage failed, defaulting to complex plan. Error: {e}", MSG_TYPE.MSG_TYPE_EXCEPTION, event_id=triage_step_id)
|
|
1557
1567
|
|
|
1558
|
-
if
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
"description": "Generate an image from a text description. Returns a base64-encoded image.",
|
|
1562
|
-
"input_schema": {"type": "object", "properties": {"prompt": {"type": "string"}}, "required": ["prompt"]}
|
|
1563
|
-
})
|
|
1568
|
+
if force_mcp_use and strategy == "DIRECT_ANSWER":
|
|
1569
|
+
strategy = "COMPLEX_PLAN"
|
|
1570
|
+
log_event(f"✅ Approach decided: {strategy.replace('_', ' ').title()}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=triage_step_id)
|
|
1564
1571
|
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
f"
|
|
1569
|
-
MSG_TYPE.MSG_TYPE_STEP_END,
|
|
1570
|
-
meta={"visible": len(all_visible_tools), "rag_tools": list(rag_registry.keys())},
|
|
1571
|
-
event_id=discovery_step_id
|
|
1572
|
-
)
|
|
1572
|
+
if strategy == "DIRECT_ANSWER":
|
|
1573
|
+
final_answer = strategy_data.get("text_output", "I can help with that.")
|
|
1574
|
+
if streaming_callback: streaming_callback(final_answer, MSG_TYPE.MSG_TYPE_FULL, {})
|
|
1575
|
+
return {"final_answer": final_answer, "tool_calls": [], "sources": [], "error": None, "clarification_required": False, "final_scratchpad": f"Strategy: DIRECT_ANSWER\nThought: {strategy_data.get('thought')}"}
|
|
1573
1576
|
|
|
1574
|
-
|
|
1575
|
-
|
|
1577
|
+
if strategy == "REQUEST_CLARIFICATION":
|
|
1578
|
+
clarification_question = strategy_data.get("text_output", "Could you please provide more details?")
|
|
1579
|
+
return {"final_answer": clarification_question, "tool_calls": [], "sources": [], "error": None, "clarification_required": True, "final_scratchpad": f"Strategy: REQUEST_CLARIFICATION\nThought: {strategy_data.get('thought')}"}
|
|
1580
|
+
|
|
1581
|
+
if strategy == "SINGLE_TOOL":
|
|
1582
|
+
synthesis_id = log_event("⚡ Taking a direct approach...", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1576
1583
|
try:
|
|
1577
|
-
|
|
1578
|
-
|
|
1584
|
+
tool_name = strategy_data.get("required_tool_name")
|
|
1585
|
+
tool_spec = next((t for t in all_visible_tools if t['name'] == tool_name), None)
|
|
1586
|
+
if not tool_spec:
|
|
1587
|
+
raise ValueError(f"LLM chose an unavailable tool: '{tool_name}'")
|
|
1588
|
+
|
|
1589
|
+
param_prompt = f"""Given the user request, generate the correct parameters for the selected tool.
|
|
1590
|
+
USER REQUEST: "{prompt}"
|
|
1591
|
+
SELECTED TOOL: {json.dumps(tool_spec, indent=2)}
|
|
1592
|
+
Output ONLY the JSON for the tool's parameters: {{"tool_params": {{...}}}}"""
|
|
1593
|
+
param_data = self.generate_structured_content(prompt=param_prompt, schema={"tool_params": "object"}, temperature=0.1, **llm_generation_kwargs)
|
|
1594
|
+
tool_params = param_data.get("tool_params", {}) if param_data else {}
|
|
1595
|
+
|
|
1596
|
+
start_time, sources, tool_result = time.time(), [], {}
|
|
1597
|
+
if tool_name in rag_registry:
|
|
1598
|
+
query = tool_params.get("query", prompt)
|
|
1599
|
+
rag_fn = rag_registry[tool_name]
|
|
1600
|
+
raw_results = rag_fn(query=query, rag_top_k=rag_top_k, rag_min_similarity_percent=rag_min_similarity_percent)
|
|
1601
|
+
docs = [d for d in (raw_results.get("results", []) if isinstance(raw_results, dict) else raw_results or [])]
|
|
1602
|
+
tool_result = {"status": "success", "results": docs}
|
|
1603
|
+
sources = [{"source": tool_name, "metadata": d.get("metadata", {}), "score": d.get("score", 0.0)} for d in docs]
|
|
1604
|
+
elif hasattr(self, "mcp") and "local_tools" not in tool_name:
|
|
1605
|
+
tool_result = self.mcp.execute_tool(tool_name, tool_params, lollms_client_instance=self)
|
|
1606
|
+
else:
|
|
1607
|
+
tool_result = {"status": "failure", "error": f"Tool '{tool_name}' could not be executed in single-step mode."}
|
|
1608
|
+
|
|
1609
|
+
if tool_result.get("status") != "success":
|
|
1610
|
+
error_detail = tool_result.get("error", "Unknown tool error in single-step mode.")
|
|
1611
|
+
raise RuntimeError(error_detail)
|
|
1579
1612
|
|
|
1580
|
-
|
|
1581
|
-
{
|
|
1582
|
-
|
|
1613
|
+
response_time = time.time() - start_time
|
|
1614
|
+
tool_calls_this_turn = [{"name": tool_name, "params": tool_params, "result": tool_result, "response_time": response_time}]
|
|
1615
|
+
|
|
1616
|
+
synthesis_prompt = f"""The user asked: "{prompt}"
|
|
1617
|
+
I used the tool '{tool_name}' and got this result: {json.dumps(tool_result, indent=2)}
|
|
1618
|
+
Synthesize a direct, user-friendly final answer."""
|
|
1619
|
+
final_answer = self.generate_text(prompt=synthesis_prompt, system_prompt=system_prompt, stream=streaming_callback is not None, streaming_callback=streaming_callback, temperature=final_answer_temperature, **llm_generation_kwargs)
|
|
1620
|
+
final_answer = self.remove_thinking_blocks(final_answer)
|
|
1621
|
+
|
|
1622
|
+
log_event("✅ Direct answer ready!", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id)
|
|
1623
|
+
return {"final_answer": final_answer, "tool_calls": tool_calls_this_turn, "sources": sources, "error": None, "clarification_required": False, "final_scratchpad": f"Strategy: SINGLE_TOOL\nTool: {tool_name}\nResult: {json.dumps(tool_result)}"}
|
|
1583
1624
|
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1625
|
+
except Exception as e:
|
|
1626
|
+
log_event(f"Direct approach failed: {e}", MSG_TYPE.MSG_TYPE_EXCEPTION, event_id=synthesis_id)
|
|
1627
|
+
log_event("Escalating to a more detailed plan.", MSG_TYPE.MSG_TYPE_INFO)
|
|
1628
|
+
|
|
1629
|
+
return self._execute_complex_reasoning_loop(
|
|
1630
|
+
prompt=prompt, context=context, system_prompt=system_prompt,
|
|
1631
|
+
reasoning_system_prompt=reasoning_system_prompt, images=images,
|
|
1632
|
+
max_reasoning_steps=max_reasoning_steps, decision_temperature=decision_temperature,
|
|
1633
|
+
final_answer_temperature=final_answer_temperature, streaming_callback=streaming_callback,
|
|
1634
|
+
debug=debug, enable_self_reflection=enable_self_reflection,
|
|
1635
|
+
all_visible_tools=all_visible_tools, rag_registry=rag_registry, rag_tool_specs=rag_tool_specs,
|
|
1636
|
+
log_event_fn=log_event, log_prompt_fn=log_prompt,
|
|
1637
|
+
**llm_generation_kwargs
|
|
1638
|
+
)
|
|
1588
1639
|
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1640
|
+
def _execute_complex_reasoning_loop(
|
|
1641
|
+
self, prompt, context, system_prompt, reasoning_system_prompt, images,
|
|
1642
|
+
max_reasoning_steps, decision_temperature, final_answer_temperature,
|
|
1643
|
+
streaming_callback, debug, enable_self_reflection, all_visible_tools,
|
|
1644
|
+
rag_registry, rag_tool_specs, log_event_fn, log_prompt_fn, **llm_generation_kwargs
|
|
1645
|
+
) -> Dict[str, Any]:
|
|
1646
|
+
|
|
1647
|
+
planner, memory_manager, performance_tracker = TaskPlanner(self), MemoryManager(), ToolPerformanceTracker()
|
|
1648
|
+
|
|
1649
|
+
def _get_friendly_action_description(tool_name, requires_code, requires_image):
|
|
1650
|
+
if tool_name == "local_tools::final_answer": return "📋 Ready to provide your answer"
|
|
1651
|
+
if tool_name == "local_tools::request_clarification": return "❓ Need to ask for clarification"
|
|
1652
|
+
if tool_name == "local_tools::generate_image": return "🎨 Creating an image for you"
|
|
1653
|
+
if "research::" in tool_name: return f"🔍 Searching {tool_name.split('::')[-1]} for information"
|
|
1654
|
+
if requires_code: return "💻 Working on a coding solution"
|
|
1655
|
+
if requires_image: return "🖼️ Analyzing the provided images"
|
|
1656
|
+
return f"🔧 Using {tool_name.replace('_', ' ').replace('::', ' - ').title()}"
|
|
1597
1657
|
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
)
|
|
1612
|
-
|
|
1613
|
-
log_event("Invalid decision JSON", MSG_TYPE.MSG_TYPE_WARNING, meta={"decision_raw": str(decision_data)}, event_id=reasoning_step_id)
|
|
1614
|
-
current_scratchpad += "\n\n### Step Failure\n- Error: Invalid decision JSON."
|
|
1615
|
-
continue
|
|
1658
|
+
original_user_prompt, tool_calls_this_turn, sources_this_turn = prompt, [], []
|
|
1659
|
+
asset_store: Dict[str, Dict] = {}
|
|
1660
|
+
|
|
1661
|
+
planning_step_id = log_event_fn("📋 Creating a detailed plan...", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1662
|
+
execution_plan = planner.decompose_task(original_user_prompt, context or "")
|
|
1663
|
+
log_event_fn(f"✅ Plan ready ({len(execution_plan.tasks)} steps)", MSG_TYPE.MSG_TYPE_STEP_END, event_id=planning_step_id)
|
|
1664
|
+
|
|
1665
|
+
initial_state_parts = [f"### Execution Plan\n- Total tasks: {len(execution_plan.tasks)}"]
|
|
1666
|
+
for i, task in enumerate(execution_plan.tasks): initial_state_parts.append(f" {i+1}. {task.description}")
|
|
1667
|
+
if images:
|
|
1668
|
+
for img_b64 in images:
|
|
1669
|
+
img_uuid = str(uuid.uuid4())
|
|
1670
|
+
asset_store[img_uuid] = {"type": "image", "content": img_b64, "source": "user"}
|
|
1671
|
+
initial_state_parts.append(f"- User provided image, asset ID: {img_uuid}")
|
|
1672
|
+
current_scratchpad = "\n".join(initial_state_parts)
|
|
1616
1673
|
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1674
|
+
formatted_tools_list = "\n".join([f"**{t['name']}**: {t['description']}" for t in all_visible_tools])
|
|
1675
|
+
completed_tasks, current_task_index = set(), 0
|
|
1676
|
+
|
|
1677
|
+
for i in range(max_reasoning_steps):
|
|
1678
|
+
step_desc = f"🤔 Working on: {execution_plan.tasks[current_task_index].description}" if current_task_index < len(execution_plan.tasks) else f"🤔 Analyzing next steps... ({i+1}/{max_reasoning_steps})"
|
|
1679
|
+
reasoning_step_id = log_event_fn(step_desc, MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1680
|
+
|
|
1681
|
+
try:
|
|
1682
|
+
if len(current_scratchpad) > 12000:
|
|
1683
|
+
current_scratchpad = memory_manager.compress_scratchpad(current_scratchpad, original_user_prompt, 8000)
|
|
1684
|
+
|
|
1685
|
+
reasoning_prompt = f"""--- AVAILABLE ACTIONS ---\n{formatted_tools_list}\n--- YOUR INTERNAL SCRATCHPAD ---\n{current_scratchpad}\n--- END SCRATCHPAD ---\n
|
|
1686
|
+
INSTRUCTIONS: Observe, think, and then act. Choose the single best next action to achieve: "{original_user_prompt}".
|
|
1687
|
+
Produce ONLY this JSON: {{"thought": "short reasoning", "action": {{"tool_name": "...", "requires_code_input": false, "requires_image_input": false}}}}"""
|
|
1688
|
+
decision_data = self.generate_structured_content(prompt=reasoning_prompt, schema={"thought": "string", "action": "object"}, system_prompt=reasoning_system_prompt, temperature=decision_temperature, **llm_generation_kwargs)
|
|
1689
|
+
|
|
1690
|
+
if not (decision_data and isinstance(decision_data.get("action"), dict)):
|
|
1691
|
+
log_event_fn("LLM failed to produce a valid action JSON.", MSG_TYPE.MSG_TYPE_WARNING, event_id=reasoning_step_id)
|
|
1692
|
+
current_scratchpad += "\n\n### Step Failure\n- Error: Invalid decision JSON from LLM."
|
|
1693
|
+
continue
|
|
1623
1694
|
|
|
1624
|
-
|
|
1625
|
-
|
|
1695
|
+
action = decision_data.get("action", {})
|
|
1696
|
+
tool_name, requires_code, requires_image = action.get("tool_name"), action.get("requires_code_input", False), action.get("requires_image_input", False)
|
|
1697
|
+
current_scratchpad += f"\n\n### Step {i+1}: Thought\n{decision_data.get('thought', '')}"
|
|
1698
|
+
|
|
1699
|
+
log_event_fn(_get_friendly_action_description(tool_name, requires_code, requires_image), MSG_TYPE.MSG_TYPE_STEP)
|
|
1700
|
+
if tool_name == "local_tools::final_answer": break
|
|
1626
1701
|
if tool_name == "local_tools::request_clarification":
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
"tool_calls": tool_calls_this_turn,
|
|
1631
|
-
"sources": sources_this_turn,
|
|
1632
|
-
"clarification_required": True,
|
|
1633
|
-
"error": None
|
|
1634
|
-
}
|
|
1702
|
+
clarification_prompt = f"Based on your thought process, what is the single question you need to ask the user?\n\nSCRATCHPAD:\n{current_scratchpad}\n\nQUESTION:"
|
|
1703
|
+
question = self.generate_text(clarification_prompt)
|
|
1704
|
+
return {"final_answer": self.remove_thinking_blocks(question), "clarification_required": True, "final_scratchpad": current_scratchpad, "tool_calls": tool_calls_this_turn, "sources": sources_this_turn, "error": None}
|
|
1635
1705
|
|
|
1636
|
-
|
|
1706
|
+
param_assets = {}
|
|
1637
1707
|
if requires_code:
|
|
1638
|
-
code_prompt = f"
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
INSTRUCTIONS:
|
|
1646
|
-
Generate raw code only, with no explanations. The code must be self-contained and directly address the current next action."""
|
|
1647
|
-
log_prompt("Code Generation Prompt", code_prompt)
|
|
1648
|
-
generated_code = self.generate_code(prompt=code_prompt, system_prompt="Generate ONLY raw code.", **llm_generation_kwargs)
|
|
1649
|
-
code_uuid = str(uuid.uuid4())
|
|
1650
|
-
asset_store[code_uuid] = {"type": "code", "content": generated_code}
|
|
1651
|
-
prepared_assets["code_asset_id"] = code_uuid
|
|
1652
|
-
log_event("Code asset created", MSG_TYPE.MSG_TYPE_STEP, meta={"code_asset_id": code_uuid, "code_len": len(generated_code) if isinstance(generated_code, str) else None})
|
|
1653
|
-
|
|
1708
|
+
code_prompt = f"Generate only the raw code required for the current step.\n\nSCRATCHPAD:\n{current_scratchpad}\n\nCODE:"
|
|
1709
|
+
code_content = self.generate_code(prompt=code_prompt, **llm_generation_kwargs)
|
|
1710
|
+
code_uuid = f"code_asset_{uuid.uuid4()}"
|
|
1711
|
+
asset_store[code_uuid] = {"type": "code", "content": code_content}
|
|
1712
|
+
param_assets['code_asset_id'] = code_uuid
|
|
1713
|
+
log_event_fn("Code asset generated.", MSG_TYPE.MSG_TYPE_STEP)
|
|
1654
1714
|
if requires_image:
|
|
1655
|
-
for
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
{
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
code_asset_id: {prepared_assets.get("code_asset_id","<none>")}
|
|
1666
|
-
image_asset_ids: {prepared_assets.get("image_asset_ids","<none>")}
|
|
1667
|
-
|
|
1668
|
-
--- ORIGINAL USER REQUEST ---
|
|
1669
|
-
"{original_user_prompt}"
|
|
1670
|
-
|
|
1671
|
-
--- YOUR INTERNAL SCRATCHPAD ---
|
|
1672
|
-
{current_scratchpad}
|
|
1673
|
-
--- END SCRATCHPAD ---
|
|
1674
|
-
|
|
1675
|
-
INSTRUCTIONS:
|
|
1676
|
-
Fill the parameters for the selected tool. If code is required, do not paste code; use the code asset ID string exactly. If images are required, use the provided image asset IDs. Output only:
|
|
1677
|
-
|
|
1678
|
-
{{
|
|
1679
|
-
"tool_params": {{...}}
|
|
1680
|
-
}}
|
|
1681
|
-
"""
|
|
1682
|
-
log_prompt("Parameter Generation Prompt", param_prompt)
|
|
1683
|
-
param_schema = {"tool_params": "object"}
|
|
1684
|
-
param_data = self.generate_structured_content(
|
|
1685
|
-
prompt=param_prompt,
|
|
1686
|
-
schema=param_schema,
|
|
1687
|
-
system_prompt=reasoning_system_prompt,
|
|
1688
|
-
temperature=decision_temperature,
|
|
1689
|
-
**llm_generation_kwargs
|
|
1690
|
-
)
|
|
1691
|
-
tool_params = {}
|
|
1692
|
-
if param_data and isinstance(param_data.get("tool_params"), dict):
|
|
1693
|
-
tool_params = param_data["tool_params"]
|
|
1694
|
-
else:
|
|
1695
|
-
log_event("Parameter generation returned empty", MSG_TYPE.MSG_TYPE_WARNING, meta={"param_raw": str(param_data)})
|
|
1696
|
-
|
|
1715
|
+
image_assets = [asset_id for asset_id, asset in asset_store.items() if asset['type'] == 'image' and asset.get('source') == 'user']
|
|
1716
|
+
if image_assets:
|
|
1717
|
+
param_assets['image_asset_id'] = image_assets[0]
|
|
1718
|
+
|
|
1719
|
+
param_prompt = f"""Fill the parameters for the tool: '{tool_name}'. Available assets: {json.dumps(param_assets)}.
|
|
1720
|
+
SCRATCHPAD:\n{current_scratchpad}\n
|
|
1721
|
+
Output only: {{"tool_params": {{...}}}}"""
|
|
1722
|
+
param_data = self.generate_structured_content(prompt=param_prompt, schema={"tool_params": "object"}, temperature=decision_temperature, **llm_generation_kwargs)
|
|
1723
|
+
tool_params = param_data.get("tool_params", {}) if param_data else {}
|
|
1724
|
+
|
|
1697
1725
|
def _hydrate(data: Any, store: Dict) -> Any:
|
|
1698
|
-
if isinstance(data, dict):
|
|
1699
|
-
|
|
1700
|
-
if isinstance(data,
|
|
1701
|
-
return [_hydrate(item, store) for item in data]
|
|
1702
|
-
if isinstance(data, str) and data in store:
|
|
1703
|
-
return store[data].get("content", data)
|
|
1726
|
+
if isinstance(data, dict): return {k: _hydrate(v, store) for k, v in data.items()}
|
|
1727
|
+
if isinstance(data, list): return [_hydrate(item, store) for item in data]
|
|
1728
|
+
if isinstance(data, str) and "asset_" in data and data in store: return store[data].get("content", data)
|
|
1704
1729
|
return data
|
|
1705
|
-
|
|
1706
1730
|
hydrated_params = _hydrate(tool_params, asset_store)
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
tool_result = {"status": "failure", "error": f"Tool '{tool_name}' failed."}
|
|
1731
|
+
|
|
1732
|
+
start_time, tool_result = time.time(), {"status": "failure", "error": f"Tool '{tool_name}' failed to execute."}
|
|
1710
1733
|
try:
|
|
1711
|
-
if tool_name
|
|
1712
|
-
prompt_for_img = hydrated_params.get("prompt", "")
|
|
1713
|
-
log_event("TTI call start", MSG_TYPE.MSG_TYPE_STEP, meta={"tool_name": tool_name})
|
|
1714
|
-
image_bytes = self.tti.generate_image(prompt=prompt_for_img)
|
|
1715
|
-
if not image_bytes:
|
|
1716
|
-
raise Exception("TTI binding returned empty image data.")
|
|
1717
|
-
b64_image = base64.b64encode(image_bytes).decode("utf-8")
|
|
1718
|
-
img_uuid = str(uuid.uuid4())
|
|
1719
|
-
asset_store[img_uuid] = {"type": "image", "content": f"data:image/png;base64,{b64_image}"}
|
|
1720
|
-
tool_result = {"status": "success", "image_asset": img_uuid, "html_tag": f"<img src='data:image/png;base64,{b64_image}' alt='Generated Image'/>"}
|
|
1721
|
-
log_event("TTI call success", MSG_TYPE.MSG_TYPE_STEP, meta={"image_asset": img_uuid})
|
|
1722
|
-
elif tool_name in rag_registry:
|
|
1734
|
+
if tool_name in rag_registry:
|
|
1723
1735
|
query = hydrated_params.get("query", "")
|
|
1724
|
-
top_k =
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
except TypeError:
|
|
1732
|
-
raw_results = rag_fn(query)
|
|
1733
|
-
docs = []
|
|
1734
|
-
if isinstance(raw_results, dict) and "results" in raw_results:
|
|
1735
|
-
raw_iter = raw_results["results"]
|
|
1736
|
-
else:
|
|
1737
|
-
raw_iter = raw_results
|
|
1738
|
-
for d in raw_iter or []:
|
|
1739
|
-
text = d.get("text") if isinstance(d, dict) else str(d)
|
|
1740
|
-
score = d.get("score", 0.0) if isinstance(d, dict) else 0.0
|
|
1741
|
-
meta = d.get("metadata", {}) if isinstance(d, dict) else {}
|
|
1742
|
-
pct = score * 100.0 if score <= 1.0 else score
|
|
1743
|
-
docs.append({"text": text, "score": pct, "metadata": meta})
|
|
1744
|
-
docs.sort(key=lambda x: x.get("score", 0.0), reverse=True)
|
|
1745
|
-
kept = [x for x in docs if x.get("score", 0.0) >= min_sim][:top_k]
|
|
1746
|
-
dropped = len(docs) - len(kept)
|
|
1747
|
-
tool_result = {"status": "success", "results": kept, "dropped": dropped, "min_similarity_percent": min_sim, "top_k": top_k}
|
|
1748
|
-
sources_this_turn.extend([{"source": tool_name, "metadata": x.get("metadata", {}), "score": x.get("score", 0.0)} for x in kept])
|
|
1749
|
-
snippet_preview = [{"score": x["score"], "text": (x["text"][:200] + "…") if isinstance(x["text"], str) and len(x["text"]) > 200 else x["text"]} for x in kept]
|
|
1750
|
-
log_event("RAG call end", MSG_TYPE.MSG_TYPE_STEP_END, meta={"tool_name": tool_name, "kept": len(kept), "dropped": dropped, "preview": snippet_preview})
|
|
1751
|
-
rag_notes = "\n".join([f"- [{idx+1}] score={x['score']:.1f}% | {x['text'][:500]}" for idx, x in enumerate(kept)])
|
|
1752
|
-
current_scratchpad += f"\n\n### RAG Notes ({tool_name})\n{rag_notes if rag_notes else '- No results above threshold.'}"
|
|
1736
|
+
top_k, min_sim = rag_tool_specs[tool_name]["default_top_k"], rag_tool_specs[tool_name]["default_min_sim"]
|
|
1737
|
+
raw_results = rag_registry[tool_name](query=query, top_k=top_k)
|
|
1738
|
+
raw_iter = raw_results["results"] if isinstance(raw_results, dict) and "results" in raw_results else raw_results
|
|
1739
|
+
docs = [{"text": d.get("text", str(d)), "score": d.get("score", 0)*100, "metadata": d.get("metadata", {})} for d in raw_iter or []]
|
|
1740
|
+
kept = [x for x in docs if x['score'] >= min_sim]
|
|
1741
|
+
tool_result = {"status": "success", "results": kept, "dropped": len(docs) - len(kept)}
|
|
1742
|
+
sources_this_turn.extend([{"source": tool_name, "metadata": x["metadata"], "score": x["score"]} for x in kept])
|
|
1753
1743
|
elif hasattr(self, "mcp"):
|
|
1754
|
-
log_event("MCP tool call start", MSG_TYPE.MSG_TYPE_STEP, meta={"tool_name": tool_name})
|
|
1755
1744
|
tool_result = self.mcp.execute_tool(tool_name, hydrated_params, lollms_client_instance=self)
|
|
1756
|
-
log_event("MCP tool call end", MSG_TYPE.MSG_TYPE_STEP_END, meta={"tool_name": tool_name})
|
|
1757
|
-
else:
|
|
1758
|
-
tool_result = {"status": "failure", "error": "No MCP instance available and tool is not RAG/TTI."}
|
|
1759
1745
|
except Exception as e:
|
|
1760
|
-
|
|
1761
|
-
|
|
1746
|
+
error_msg = f"Exception during '{tool_name}' execution: {e}"
|
|
1747
|
+
log_event_fn(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
|
|
1748
|
+
tool_result = {"status": "failure", "error": error_msg}
|
|
1762
1749
|
|
|
1763
|
-
|
|
1764
|
-
|
|
1765
|
-
|
|
1750
|
+
response_time = time.time() - start_time
|
|
1751
|
+
success = tool_result.get("status") == "success"
|
|
1752
|
+
performance_tracker.record_tool_usage(tool_name, success, 0.8, response_time, tool_result.get("error"))
|
|
1753
|
+
|
|
1754
|
+
if success and current_task_index < len(execution_plan.tasks):
|
|
1755
|
+
execution_plan.tasks[current_task_index].status = TaskStatus.COMPLETED
|
|
1756
|
+
current_task_index += 1
|
|
1757
|
+
|
|
1758
|
+
observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
|
|
1759
|
+
tool_calls_this_turn.append({"name": tool_name, "params": tool_params, "result": tool_result, "response_time": response_time})
|
|
1766
1760
|
current_scratchpad += f"\n\n### Step {i+1}: Observation\n- Action: `{tool_name}`\n- Result:\n{observation_text}"
|
|
1767
|
-
|
|
1768
|
-
|
|
1769
|
-
|
|
1761
|
+
|
|
1762
|
+
if success:
|
|
1763
|
+
log_event_fn(f"✅ Step completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
|
|
1764
|
+
else:
|
|
1765
|
+
error_detail = tool_result.get("error", "No error detail provided.")
|
|
1766
|
+
log_event_fn(f"Tool reported failure: {error_detail}", MSG_TYPE.MSG_TYPE_WARNING)
|
|
1767
|
+
log_event_fn(f"⚠️ Step completed with issues", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={"error": error_detail})
|
|
1768
|
+
|
|
1769
|
+
if len(completed_tasks) == len(execution_plan.tasks): break
|
|
1770
|
+
|
|
1770
1771
|
except Exception as ex:
|
|
1772
|
+
log_event_fn(f"An unexpected error occurred in reasoning loop: {ex}", MSG_TYPE.MSG_TYPE_EXCEPTION, event_id=reasoning_step_id)
|
|
1771
1773
|
trace_exception(ex)
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
synthesis_id = log_event("Synthesizing final answer...", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1775
|
-
final_answer_prompt = f"""--- ORIGINAL USER REQUEST ---
|
|
1776
|
-
"{original_user_prompt}"
|
|
1777
|
-
|
|
1778
|
-
--- YOUR INTERNAL SCRATCHPAD ---
|
|
1779
|
-
{current_scratchpad}
|
|
1780
|
-
--- END SCRATCHPAD ---
|
|
1781
|
-
|
|
1782
|
-
INSTRUCTIONS:
|
|
1783
|
-
Synthesize a clear, comprehensive, and friendly answer for the user based ONLY on your scratchpad. If relevant images were generated, refer to them naturally. Keep the answer concise but complete."""
|
|
1784
|
-
final_synthesis_images = [img for img in (images or [])] + [asset['content'] for asset in asset_store.values() if asset['type'] == 'image']
|
|
1785
|
-
log_prompt("Final Synthesis Prompt", final_answer_prompt)
|
|
1786
|
-
final_answer_text = self.generate_text(
|
|
1787
|
-
prompt=final_answer_prompt,
|
|
1788
|
-
system_prompt=system_prompt,
|
|
1789
|
-
images=final_synthesis_images,
|
|
1790
|
-
stream=streaming_callback is not None,
|
|
1791
|
-
streaming_callback=streaming_callback,
|
|
1792
|
-
temperature=final_answer_temperature,
|
|
1793
|
-
**llm_generation_kwargs
|
|
1794
|
-
)
|
|
1795
|
-
if isinstance(final_answer_text, dict) and "error" in final_answer_text:
|
|
1796
|
-
return {"final_answer": "", "final_scratchpad": current_scratchpad, "tool_calls": tool_calls_this_turn, "sources": sources_this_turn, "clarification_required": False, "error": final_answer_text["error"]}
|
|
1774
|
+
log_event_fn("⚠️ Encountered an issue, adjusting approach...", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
|
|
1797
1775
|
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1776
|
+
if enable_self_reflection and len(tool_calls_this_turn) > 1:
|
|
1777
|
+
reflection_id = log_event_fn("🤔 Reviewing my work...", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1778
|
+
try:
|
|
1779
|
+
reflection_prompt = f"""Review the user request and your work. Was the goal achieved effectively?
|
|
1780
|
+
REQUEST: "{original_user_prompt}"
|
|
1781
|
+
SCRATCHPAD:\n{current_scratchpad}\n
|
|
1782
|
+
JSON assessment: {{"goal_achieved": true, "effectiveness_score": 0.8, "summary": "..."}}"""
|
|
1783
|
+
reflection_data = self.generate_structured_content(prompt=reflection_prompt, schema={"goal_achieved": "boolean", "effectiveness_score": "number", "summary": "string"}, temperature=0.3, **llm_generation_kwargs)
|
|
1784
|
+
if reflection_data: current_scratchpad += f"\n\n### Self-Reflection\n- Goal Achieved: {reflection_data.get('goal_achieved')}\n- Effectiveness: {reflection_data.get('effectiveness_score')}"
|
|
1785
|
+
log_event_fn("✅ Quality check completed", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reflection_id)
|
|
1786
|
+
except Exception as e:
|
|
1787
|
+
log_event_fn(f"Self-review failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
|
|
1802
1788
|
|
|
1803
|
-
|
|
1789
|
+
synthesis_id = log_event_fn("📝 Preparing your complete answer...", MSG_TYPE.MSG_TYPE_STEP_START)
|
|
1790
|
+
final_answer_prompt = f"""Synthesize a comprehensive, user-friendly final answer based on your complete analysis.
|
|
1791
|
+
USER REQUEST: "{original_user_prompt}"
|
|
1792
|
+
FULL SCRATCHPAD:\n{current_scratchpad}\n---
|
|
1793
|
+
FINAL ANSWER:"""
|
|
1794
|
+
|
|
1795
|
+
final_answer_text = self.generate_text(prompt=final_answer_prompt, system_prompt=system_prompt, stream=streaming_callback is not None, streaming_callback=streaming_callback, temperature=final_answer_temperature, **llm_generation_kwargs)
|
|
1796
|
+
if isinstance(final_answer_text, dict) and "error" in final_answer_text:
|
|
1797
|
+
return {"final_answer": "", "error": final_answer_text["error"], "final_scratchpad": current_scratchpad}
|
|
1798
|
+
|
|
1799
|
+
final_answer = self.remove_thinking_blocks(final_answer_text)
|
|
1800
|
+
log_event_fn("✅ Answer ready!", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id)
|
|
1804
1801
|
|
|
1802
|
+
overall_confidence = sum(c.get('confidence', 0.5) for c in tool_calls_this_turn) / max(len(tool_calls_this_turn), 1)
|
|
1805
1803
|
return {
|
|
1806
|
-
"final_answer": final_answer,
|
|
1807
|
-
"
|
|
1808
|
-
"
|
|
1809
|
-
"
|
|
1810
|
-
"clarification_required": False,
|
|
1811
|
-
"error": None
|
|
1804
|
+
"final_answer": final_answer, "final_scratchpad": current_scratchpad,
|
|
1805
|
+
"tool_calls": tool_calls_this_turn, "sources": sources_this_turn,
|
|
1806
|
+
"performance_stats": {"total_steps": len(tool_calls_this_turn), "average_confidence": overall_confidence},
|
|
1807
|
+
"clarification_required": False, "overall_confidence": overall_confidence, "error": None
|
|
1812
1808
|
}
|
|
1813
1809
|
|
|
1810
|
+
|
|
1814
1811
|
def generate_code(
|
|
1815
1812
|
self,
|
|
1816
1813
|
prompt:str,
|