lollms-client 1.3.7__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -1474,6 +1474,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1474
1474
  # Clean up the technical tool name for a more readable display
1475
1475
  clean_name = tool_name.replace("_", " ").replace("::", " - ").title()
1476
1476
  return f"🔧 Using the {clean_name} tool"
1477
+
1477
1478
  def generate_with_mcp_rag(
1478
1479
  self,
1479
1480
  prompt: str,
@@ -1494,6 +1495,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1494
1495
  debug: bool = False,
1495
1496
  enable_parallel_execution: bool = True,
1496
1497
  enable_self_reflection: bool = True,
1498
+ max_scratchpad_size: int = 20000,
1497
1499
  **llm_generation_kwargs
1498
1500
  ) -> Dict[str, Any]:
1499
1501
 
@@ -1525,13 +1527,23 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1525
1527
  ASCIIColors.red(f"Prompt size:{prompt_size}/{self.llm.default_ctx_size}")
1526
1528
  ASCIIColors.cyan(f"** DEBUG: DONE **")
1527
1529
 
1528
- discovery_step_id = log_event("🔧 Setting up capabilities...", MSG_TYPE.MSG_TYPE_STEP_START)
1530
+ # Enhanced discovery phase with more detailed logging
1531
+ discovery_step_id = log_event("🔧 Discovering and configuring available capabilities...", MSG_TYPE.MSG_TYPE_STEP_START)
1529
1532
  all_discovered_tools, visible_tools, rag_registry, rag_tool_specs = [], [], {}, {}
1533
+
1530
1534
  if use_mcps and hasattr(self, 'mcp'):
1535
+ log_event(" 📡 Connecting to MCP services...", MSG_TYPE.MSG_TYPE_INFO)
1531
1536
  mcp_tools = self.mcp.discover_tools(force_refresh=True)
1532
- if isinstance(use_mcps, list): all_discovered_tools.extend([t for t in mcp_tools if t["name"] in use_mcps])
1533
- elif use_mcps is True: all_discovered_tools.extend(mcp_tools)
1537
+ if isinstance(use_mcps, list):
1538
+ filtered_tools = [t for t in mcp_tools if t["name"] in use_mcps]
1539
+ all_discovered_tools.extend(filtered_tools)
1540
+ log_event(f" ✅ Loaded {len(filtered_tools)} specific MCP tools: {', '.join(use_mcps)}", MSG_TYPE.MSG_TYPE_INFO)
1541
+ elif use_mcps is True:
1542
+ all_discovered_tools.extend(mcp_tools)
1543
+ log_event(f" ✅ Loaded {len(mcp_tools)} MCP tools", MSG_TYPE.MSG_TYPE_INFO)
1544
+
1534
1545
  if use_data_store:
1546
+ log_event(f" 📚 Setting up {len(use_data_store)} knowledge bases...", MSG_TYPE.MSG_TYPE_INFO)
1535
1547
  for name, info in use_data_store.items():
1536
1548
  tool_name, description, call_fn = f"research::{name}", f"Queries the '{name}' knowledge base.", None
1537
1549
  if callable(info): call_fn = info
@@ -1542,75 +1554,138 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1542
1554
  visible_tools.append({"name": tool_name, "description": description, "input_schema": {"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}})
1543
1555
  rag_registry[tool_name] = call_fn
1544
1556
  rag_tool_specs[tool_name] = {"default_top_k": rag_top_k, "default_min_sim": rag_min_similarity_percent}
1557
+ log_event(f" 📖 Ready: {name}", MSG_TYPE.MSG_TYPE_INFO)
1558
+
1545
1559
  visible_tools.extend(all_discovered_tools)
1546
- built_in_tools = [{"name": "local_tools::final_answer", "description": "Provide the final answer directly to the user.", "input_schema": {}}]
1547
- if getattr(self, "tti", None): built_in_tools.append({"name": "local_tools::generate_image", "description": "Generate an image from a text description.", "input_schema": {"type": "object", "properties": {"prompt": {"type": "string"}}, "required": ["prompt"]}})
1560
+ built_in_tools = [
1561
+ {"name": "local_tools::final_answer", "description": "Provide the final answer directly to the user.", "input_schema": {}},
1562
+ {"name": "local_tools::request_clarification", "description": "Ask the user for more specific information when the request is ambiguous.", "input_schema": {"type": "object", "properties": {"question": {"type": "string"}}, "required": ["question"]}},
1563
+ {"name": "local_tools::revise_plan", "description": "Update the execution plan based on new discoveries or changing requirements.", "input_schema": {"type": "object", "properties": {"reason": {"type": "string"}, "new_plan": {"type": "array"}}, "required": ["reason", "new_plan"]}}
1564
+ ]
1565
+ if getattr(self, "tti", None):
1566
+ built_in_tools.append({"name": "local_tools::generate_image", "description": "Generate an image from a text description.", "input_schema": {"type": "object", "properties": {"prompt": {"type": "string"}}, "required": ["prompt"]}})
1567
+
1548
1568
  all_visible_tools = visible_tools + built_in_tools
1549
- tool_summary = "\n".join([f"- {t['name']}: {t['description']}" for t in all_visible_tools[:15]])
1550
- log_event(f"✅ Ready with {len(all_visible_tools)} capabilities", MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
1569
+ tool_summary = "\n".join([f"- **{t['name']}**: {t['description']}" for t in all_visible_tools[:20]])
1570
+
1571
+ log_event(f"✅ Ready with {len(all_visible_tools)} total capabilities", MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id, meta={"tool_count": len(all_visible_tools), "mcp_tools": len(all_discovered_tools), "rag_tools": len(rag_registry)})
1551
1572
 
1552
- triage_step_id = log_event("🤔 Analyzing the best approach...", MSG_TYPE.MSG_TYPE_STEP_START)
1573
+ # Enhanced triage with better prompting
1574
+ triage_step_id = log_event("🤔 Analyzing request complexity and optimal approach...", MSG_TYPE.MSG_TYPE_STEP_START)
1553
1575
  strategy = "COMPLEX_PLAN"
1554
1576
  strategy_data = {}
1555
1577
  try:
1556
- triage_prompt = f"""Analyze the user's request and determine the most efficient strategy.
1578
+ triage_prompt = f"""Analyze this user request to determine the most efficient execution strategy.
1579
+
1557
1580
  USER REQUEST: "{prompt}"
1558
- AVAILABLE TOOLS:\n{tool_summary}
1559
- Choose a strategy:
1560
- - "DIRECT_ANSWER": For greetings or simple questions that need no tools.
1561
- - "REQUEST_CLARIFICATION": If the request is ambiguous and you need more information from the user.
1562
- - "SINGLE_TOOL": If the request can be resolved with one tool call.
1563
- - "COMPLEX_PLAN": For multi-step requests requiring multiple tools or complex reasoning.
1564
-
1565
- Provide your decision as JSON: {{"thought": "...", "strategy": "...", "text_output": "Your direct answer or clarification question.", "required_tool_name": "..."}}"""
1581
+ CONTEXT: {context or "No additional context provided"}
1582
+ IMAGES PROVIDED: {"Yes" if images else "No"}
1583
+
1584
+ AVAILABLE CAPABILITIES:
1585
+ {tool_summary}
1586
+
1587
+ Based on the request complexity and available tools, choose the optimal strategy:
1588
+
1589
+ 1. **DIRECT_ANSWER**: For simple greetings, basic questions, or requests that don't require any tools
1590
+ - Use when: The request can be fully answered with your existing knowledge
1591
+ - Example: "Hello", "What is Python?", "Explain quantum physics"
1592
+
1593
+ 2. **REQUEST_CLARIFICATION**: When the request is too vague or ambiguous
1594
+ - Use when: The request lacks essential details needed to proceed
1595
+ - Example: "Help me with my code" (what code? what issue?)
1596
+
1597
+ 3. **SINGLE_TOOL**: For straightforward requests that need exactly one tool
1598
+ - Use when: The request clearly maps to a single, specific tool operation
1599
+ - Example: "Search for information about X", "Generate an image of Y"
1600
+
1601
+ 4. **COMPLEX_PLAN**: For multi-step requests requiring coordination of multiple tools
1602
+ - Use when: The request involves multiple steps, data analysis, or complex reasoning
1603
+ - Example: "Research X, then create a report comparing it to Y"
1604
+
1605
+ Provide your analysis in JSON format:
1606
+ {{"thought": "Detailed reasoning about the request complexity and requirements", "strategy": "ONE_OF_THE_FOUR_OPTIONS", "confidence": 0.8, "text_output": "Direct answer or clarification question if applicable", "required_tool_name": "specific tool name if SINGLE_TOOL strategy", "estimated_steps": 3}}"""
1607
+
1608
+ log_prompt("Triage Prompt", triage_prompt)
1566
1609
 
1567
1610
  triage_schema = {
1568
- "thought": "string", "strategy": "string",
1569
- "text_output": "string", "required_tool_name": "string"
1611
+ "thought": "string", "strategy": "string", "confidence": "number",
1612
+ "text_output": "string", "required_tool_name": "string", "estimated_steps": "number"
1570
1613
  }
1571
1614
  strategy_data = self.generate_structured_content(prompt=triage_prompt, schema=triage_schema, temperature=0.1, **llm_generation_kwargs)
1572
1615
  strategy = strategy_data.get("strategy") if strategy_data else "COMPLEX_PLAN"
1616
+
1617
+ log_event(f"Strategy analysis complete", MSG_TYPE.MSG_TYPE_INFO, meta={
1618
+ "strategy": strategy,
1619
+ "confidence": strategy_data.get("confidence", 0.5),
1620
+ "estimated_steps": strategy_data.get("estimated_steps", 1),
1621
+ "reasoning": strategy_data.get("thought", "")
1622
+ })
1623
+
1573
1624
  except Exception as e:
1574
- log_event(f"Triage failed, defaulting to complex plan. Error: {e}", MSG_TYPE.MSG_TYPE_EXCEPTION, event_id=triage_step_id)
1625
+ log_event(f"Triage analysis failed: {e}", MSG_TYPE.MSG_TYPE_EXCEPTION, event_id=triage_step_id)
1626
+ log_event("Defaulting to complex planning approach", MSG_TYPE.MSG_TYPE_WARNING)
1575
1627
 
1576
1628
  if force_mcp_use and strategy == "DIRECT_ANSWER":
1577
1629
  strategy = "COMPLEX_PLAN"
1578
- log_event(f" Approach decided: {strategy.replace('_', ' ').title()}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=triage_step_id)
1630
+ log_event("Forcing tool usage - switching to complex planning", MSG_TYPE.MSG_TYPE_INFO)
1631
+
1632
+ log_event(f"✅ Strategy selected: {strategy.replace('_', ' ').title()}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=triage_step_id, meta={"final_strategy": strategy})
1579
1633
 
1634
+ # Handle simple strategies
1580
1635
  if strategy == "DIRECT_ANSWER":
1581
1636
  final_answer = strategy_data.get("text_output", "I can help with that.")
1637
+ log_event("Providing direct response", MSG_TYPE.MSG_TYPE_INFO)
1582
1638
  if streaming_callback: streaming_callback(final_answer, MSG_TYPE.MSG_TYPE_CONTENT, {})
1583
- return {"final_answer": final_answer, "tool_calls": [], "sources": [], "error": None, "clarification_required": False, "final_scratchpad": f"Strategy: DIRECT_ANSWER\nThought: {strategy_data.get('thought')}"}
1639
+ return {"final_answer": final_answer, "tool_calls": [], "sources": [], "error": None, "clarification_required": False, "final_scratchpad": f"Strategy: DIRECT_ANSWER\nConfidence: {strategy_data.get('confidence', 0.9)}\nReasoning: {strategy_data.get('thought')}"}
1584
1640
 
1585
1641
  if strategy == "REQUEST_CLARIFICATION":
1586
- clarification_question = strategy_data.get("text_output", "Could you please provide more details?")
1587
- return {"final_answer": clarification_question, "tool_calls": [], "sources": [], "error": None, "clarification_required": True, "final_scratchpad": f"Strategy: REQUEST_CLARIFICATION\nThought: {strategy_data.get('thought')}"}
1642
+ clarification_question = strategy_data.get("text_output", "Could you please provide more details about what specifically you'd like me to help with?")
1643
+ log_event("Requesting clarification from user", MSG_TYPE.MSG_TYPE_INFO)
1644
+ return {"final_answer": clarification_question, "tool_calls": [], "sources": [], "error": None, "clarification_required": True, "final_scratchpad": f"Strategy: REQUEST_CLARIFICATION\nConfidence: {strategy_data.get('confidence', 0.8)}\nReasoning: {strategy_data.get('thought')}"}
1588
1645
 
1646
+ # Enhanced single tool execution
1589
1647
  if strategy == "SINGLE_TOOL":
1590
- synthesis_id = log_event("⚡ Taking a direct approach...", MSG_TYPE.MSG_TYPE_STEP_START)
1648
+ synthesis_id = log_event("⚡ Executing single-tool strategy...", MSG_TYPE.MSG_TYPE_STEP_START)
1591
1649
  try:
1592
1650
  tool_name = strategy_data.get("required_tool_name")
1593
1651
  tool_spec = next((t for t in all_visible_tools if t['name'] == tool_name), None)
1594
1652
  if not tool_spec:
1595
- raise ValueError(f"LLM chose an unavailable tool: '{tool_name}'")
1653
+ raise ValueError(f"Strategy analysis selected unavailable tool: '{tool_name}'")
1654
+
1655
+ log_event(f"Selected tool: {tool_name}", MSG_TYPE.MSG_TYPE_INFO)
1596
1656
 
1597
- param_prompt = f"""Given the user request, generate the correct parameters for the selected tool.
1657
+ # Enhanced parameter generation prompt
1658
+ param_prompt = f"""Generate the optimal parameters for the selected tool to fulfill the user's request.
1659
+
1598
1660
  USER REQUEST: "{prompt}"
1599
1661
  SELECTED TOOL: {json.dumps(tool_spec, indent=2)}
1600
- Output ONLY the JSON for the tool's parameters: {{"tool_params": {{...}}}}"""
1662
+ CONTEXT: {context or "None"}
1663
+
1664
+ Analyze the user's request carefully and provide the most appropriate parameters.
1665
+ If the request has implicit requirements, infer them intelligently.
1666
+
1667
+ Output the parameters as JSON: {{"tool_params": {{...}}}}"""
1668
+
1669
+ log_prompt("Parameter Generation Prompt", param_prompt)
1601
1670
  param_data = self.generate_structured_content(prompt=param_prompt, schema={"tool_params": "object"}, temperature=0.1, **llm_generation_kwargs)
1602
1671
  tool_params = param_data.get("tool_params", {}) if param_data else {}
1603
1672
 
1673
+ log_event(f"Generated parameters: {json.dumps(tool_params)}", MSG_TYPE.MSG_TYPE_INFO)
1674
+
1604
1675
  start_time, sources, tool_result = time.time(), [], {}
1605
1676
  if tool_name in rag_registry:
1606
1677
  query = tool_params.get("query", prompt)
1678
+ log_event(f"Searching knowledge base with query: '{query}'", MSG_TYPE.MSG_TYPE_INFO)
1607
1679
  rag_fn = rag_registry[tool_name]
1608
1680
  raw_results = rag_fn(query=query, rag_top_k=rag_top_k, rag_min_similarity_percent=rag_min_similarity_percent)
1609
1681
  docs = [d for d in (raw_results.get("results", []) if isinstance(raw_results, dict) else raw_results or [])]
1610
1682
  tool_result = {"status": "success", "results": docs}
1611
1683
  sources = [{"source": tool_name, "metadata": d.get("metadata", {}), "score": d.get("score", 0.0)} for d in docs]
1684
+ log_event(f"Retrieved {len(docs)} relevant documents", MSG_TYPE.MSG_TYPE_INFO)
1612
1685
  elif hasattr(self, "mcp") and "local_tools" not in tool_name:
1686
+ log_event(f"Executing MCP tool: {tool_name}", MSG_TYPE.MSG_TYPE_TOOL_CALL, meta={"tool_name": tool_name, "params": tool_params})
1613
1687
  tool_result = self.mcp.execute_tool(tool_name, tool_params, lollms_client_instance=self)
1688
+ log_event(f"Tool execution completed", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, meta={"result_status": tool_result.get("status", "unknown")})
1614
1689
  else:
1615
1690
  tool_result = {"status": "failure", "error": f"Tool '{tool_name}' could not be executed in single-step mode."}
1616
1691
 
@@ -1621,19 +1696,34 @@ Output ONLY the JSON for the tool's parameters: {{"tool_params": {{...}}}}"""
1621
1696
  response_time = time.time() - start_time
1622
1697
  tool_calls_this_turn = [{"name": tool_name, "params": tool_params, "result": tool_result, "response_time": response_time}]
1623
1698
 
1624
- synthesis_prompt = f"""The user asked: "{prompt}"
1625
- I used the tool '{tool_name}' and got this result: {json.dumps(tool_result, indent=2)}
1626
- Synthesize a direct, user-friendly final answer."""
1699
+ # Enhanced synthesis prompt
1700
+ synthesis_prompt = f"""Create a comprehensive and user-friendly response based on the tool execution results.
1701
+
1702
+ USER REQUEST: "{prompt}"
1703
+ TOOL USED: {tool_name}
1704
+ TOOL RESULT: {json.dumps(tool_result, indent=2)}
1705
+
1706
+ Guidelines for your response:
1707
+ 1. Be direct and helpful
1708
+ 2. Synthesize the information clearly
1709
+ 3. Address the user's specific needs
1710
+ 4. If the tool provided data, present it in an organized way
1711
+ 5. If relevant, mention any limitations or additional context
1712
+
1713
+ RESPONSE:"""
1714
+
1715
+ log_event("Synthesizing final response", MSG_TYPE.MSG_TYPE_INFO)
1627
1716
  final_answer = self.generate_text(prompt=synthesis_prompt, system_prompt=system_prompt, stream=streaming_callback is not None, streaming_callback=streaming_callback, temperature=final_answer_temperature, **llm_generation_kwargs)
1628
1717
  final_answer = self.remove_thinking_blocks(final_answer)
1629
1718
 
1630
- log_event("✅ Direct answer ready!", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id)
1631
- return {"final_answer": final_answer, "tool_calls": tool_calls_this_turn, "sources": sources, "error": None, "clarification_required": False, "final_scratchpad": f"Strategy: SINGLE_TOOL\nTool: {tool_name}\nResult: {json.dumps(tool_result)}"}
1719
+ log_event("✅ Single-tool execution completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id)
1720
+ return {"final_answer": final_answer, "tool_calls": tool_calls_this_turn, "sources": sources, "error": None, "clarification_required": False, "final_scratchpad": f"Strategy: SINGLE_TOOL\nTool: {tool_name}\nResult: Success\nResponse Time: {response_time:.2f}s"}
1632
1721
 
1633
1722
  except Exception as e:
1634
- log_event(f"Direct approach failed: {e}", MSG_TYPE.MSG_TYPE_EXCEPTION, event_id=synthesis_id)
1635
- log_event("Escalating to a more detailed plan.", MSG_TYPE.MSG_TYPE_INFO)
1723
+ log_event(f"Single-tool execution failed: {e}", MSG_TYPE.MSG_TYPE_EXCEPTION, event_id=synthesis_id)
1724
+ log_event("Escalating to complex planning approach", MSG_TYPE.MSG_TYPE_INFO)
1636
1725
 
1726
+ # Execute complex reasoning with enhanced capabilities
1637
1727
  return self._execute_complex_reasoning_loop(
1638
1728
  prompt=prompt, context=context, system_prompt=system_prompt,
1639
1729
  reasoning_system_prompt=reasoning_system_prompt, images=images,
@@ -1641,7 +1731,7 @@ Synthesize a direct, user-friendly final answer."""
1641
1731
  final_answer_temperature=final_answer_temperature, streaming_callback=streaming_callback,
1642
1732
  debug=debug, enable_self_reflection=enable_self_reflection,
1643
1733
  all_visible_tools=all_visible_tools, rag_registry=rag_registry, rag_tool_specs=rag_tool_specs,
1644
- log_event_fn=log_event, log_prompt_fn=log_prompt,
1734
+ log_event_fn=log_event, log_prompt_fn=log_prompt, max_scratchpad_size=max_scratchpad_size,
1645
1735
  **llm_generation_kwargs
1646
1736
  )
1647
1737
 
@@ -1649,170 +1739,2170 @@ Synthesize a direct, user-friendly final answer."""
1649
1739
  self, prompt, context, system_prompt, reasoning_system_prompt, images,
1650
1740
  max_reasoning_steps, decision_temperature, final_answer_temperature,
1651
1741
  streaming_callback, debug, enable_self_reflection, all_visible_tools,
1652
- rag_registry, rag_tool_specs, log_event_fn, log_prompt_fn, **llm_generation_kwargs
1742
+ rag_registry, rag_tool_specs, log_event_fn, log_prompt_fn, max_scratchpad_size, **llm_generation_kwargs
1653
1743
  ) -> Dict[str, Any]:
1654
1744
 
1655
1745
  planner, memory_manager, performance_tracker = TaskPlanner(self), MemoryManager(), ToolPerformanceTracker()
1656
1746
 
1657
1747
  def _get_friendly_action_description(tool_name, requires_code, requires_image):
1658
- if tool_name == "local_tools::final_answer": return "📋 Ready to provide your answer"
1659
- if tool_name == "local_tools::request_clarification": return " Need to ask for clarification"
1660
- if tool_name == "local_tools::generate_image": return "🎨 Creating an image for you"
1661
- if "research::" in tool_name: return f"🔍 Searching {tool_name.split('::')[-1]} for information"
1662
- if requires_code: return "💻 Working on a coding solution"
1663
- if requires_image: return "🖼️ Analyzing the provided images"
1748
+ descriptions = {
1749
+ "local_tools::final_answer": "📋 Preparing final answer",
1750
+ "local_tools::request_clarification": " Requesting clarification",
1751
+ "local_tools::generate_image": "🎨 Creating image",
1752
+ "local_tools::revise_plan": "📝 Revising execution plan"
1753
+ }
1754
+ if tool_name in descriptions:
1755
+ return descriptions[tool_name]
1756
+ if "research::" in tool_name:
1757
+ return f"🔍 Searching {tool_name.split('::')[-1]} knowledge base"
1758
+ if requires_code:
1759
+ return "💻 Processing code"
1760
+ if requires_image:
1761
+ return "🖼️ Analyzing images"
1664
1762
  return f"🔧 Using {tool_name.replace('_', ' ').replace('::', ' - ').title()}"
1665
1763
 
1764
+ def _compress_scratchpad_intelligently(scratchpad: str, original_request: str, target_size: int) -> str:
1765
+ """Enhanced scratchpad compression that preserves key decisions and recent context"""
1766
+ if len(scratchpad) <= target_size:
1767
+ return scratchpad
1768
+
1769
+ log_event_fn("📝 Compressing scratchpad to maintain focus...", MSG_TYPE.MSG_TYPE_INFO)
1770
+
1771
+ # Extract key components
1772
+ lines = scratchpad.split('\n')
1773
+ plan_section = []
1774
+ decisions = []
1775
+ recent_observations = []
1776
+
1777
+ current_section = None
1778
+ for i, line in enumerate(lines):
1779
+ if "### Execution Plan" in line or "### Updated Plan" in line:
1780
+ current_section = "plan"
1781
+ elif "### Step" in line and ("Thought" in line or "Decision" in line):
1782
+ current_section = "decision"
1783
+ elif "### Step" in line and "Observation" in line:
1784
+ current_section = "observation"
1785
+ elif line.startswith("###"):
1786
+ current_section = None
1787
+
1788
+ if current_section == "plan" and line.strip():
1789
+ plan_section.append(line)
1790
+ elif current_section == "decision" and line.strip():
1791
+ decisions.append((i, line))
1792
+ elif current_section == "observation" and line.strip():
1793
+ recent_observations.append((i, line))
1794
+
1795
+ # Keep most recent items and important decisions
1796
+ recent_decisions = decisions[-3:] if len(decisions) > 3 else decisions
1797
+ recent_obs = recent_observations[-5:] if len(recent_observations) > 5 else recent_observations
1798
+
1799
+ compressed_parts = [
1800
+ f"### Original Request\n{original_request}",
1801
+ f"### Current Plan\n" + '\n'.join(plan_section[-10:]),
1802
+ f"### Recent Key Decisions"
1803
+ ]
1804
+
1805
+ for _, decision in recent_decisions:
1806
+ compressed_parts.append(decision)
1807
+
1808
+ compressed_parts.append("### Recent Observations")
1809
+ for _, obs in recent_obs:
1810
+ compressed_parts.append(obs)
1811
+
1812
+ compressed = '\n'.join(compressed_parts)
1813
+ if len(compressed) > target_size:
1814
+ # Final trim if still too long
1815
+ compressed = compressed[:target_size-100] + "\n...[content compressed for focus]"
1816
+
1817
+ return compressed
1818
+
1666
1819
  original_user_prompt, tool_calls_this_turn, sources_this_turn = prompt, [], []
1667
1820
  asset_store: Dict[str, Dict] = {}
1821
+ decision_history = [] # Track all decisions made
1668
1822
 
1669
- planning_step_id = log_event_fn("📋 Creating a detailed plan...", MSG_TYPE.MSG_TYPE_STEP_START)
1823
+ # Enhanced planning phase
1824
+ planning_step_id = log_event_fn("📋 Creating adaptive execution plan...", MSG_TYPE.MSG_TYPE_STEP_START)
1670
1825
  execution_plan = planner.decompose_task(original_user_prompt, context or "")
1671
- log_event_fn(f"✅ Plan ready ({len(execution_plan.tasks)} steps)", MSG_TYPE.MSG_TYPE_STEP_END, event_id=planning_step_id)
1826
+ current_plan_version = 1
1827
+
1828
+ log_event_fn(f"Initial plan created with {len(execution_plan.tasks)} tasks", MSG_TYPE.MSG_TYPE_INFO, meta={
1829
+ "plan_version": current_plan_version,
1830
+ "total_tasks": len(execution_plan.tasks),
1831
+ "estimated_complexity": "medium" if len(execution_plan.tasks) <= 5 else "high"
1832
+ })
1833
+
1834
+ for i, task in enumerate(execution_plan.tasks):
1835
+ log_event_fn(f"Task {i+1}: {task.description}", MSG_TYPE.MSG_TYPE_INFO)
1836
+
1837
+ log_event_fn("✅ Adaptive plan ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=planning_step_id)
1838
+
1839
+ # Enhanced initial state
1840
+ initial_state_parts = [
1841
+ f"### Original User Request\n{original_user_prompt}",
1842
+ f"### Context\n{context or 'No additional context provided'}",
1843
+ f"### Execution Plan (Version {current_plan_version})\n- Total tasks: {len(execution_plan.tasks)}",
1844
+ f"- Estimated complexity: {'High' if len(execution_plan.tasks) > 5 else 'Medium'}"
1845
+ ]
1846
+
1847
+ for i, task in enumerate(execution_plan.tasks):
1848
+ initial_state_parts.append(f" {i+1}. {task.description} [Status: {task.status.value}]")
1672
1849
 
1673
- initial_state_parts = [f"### Execution Plan\n- Total tasks: {len(execution_plan.tasks)}"]
1674
- for i, task in enumerate(execution_plan.tasks): initial_state_parts.append(f" {i+1}. {task.description}")
1675
1850
  if images:
1851
+ initial_state_parts.append(f"### Provided Assets")
1676
1852
  for img_b64 in images:
1677
1853
  img_uuid = str(uuid.uuid4())
1678
1854
  asset_store[img_uuid] = {"type": "image", "content": img_b64, "source": "user"}
1679
- initial_state_parts.append(f"- User provided image, asset ID: {img_uuid}")
1855
+ initial_state_parts.append(f"- Image asset: {img_uuid}")
1856
+
1680
1857
  current_scratchpad = "\n".join(initial_state_parts)
1858
+ log_event_fn("Initial analysis complete", MSG_TYPE.MSG_TYPE_SCRATCHPAD, meta={"scratchpad_size": len(current_scratchpad)})
1681
1859
 
1682
1860
  formatted_tools_list = "\n".join([f"**{t['name']}**: {t['description']}" for t in all_visible_tools])
1683
1861
  completed_tasks, current_task_index = set(), 0
1862
+ plan_revision_count = 0
1684
1863
 
1864
+ # Main reasoning loop with enhanced decision tracking
1685
1865
  for i in range(max_reasoning_steps):
1686
- step_desc = f"🤔 Working on: {execution_plan.tasks[current_task_index].description}" if current_task_index < len(execution_plan.tasks) else f"🤔 Analyzing next steps... ({i+1}/{max_reasoning_steps})"
1866
+ current_task_desc = execution_plan.tasks[current_task_index].description if current_task_index < len(execution_plan.tasks) else "Finalizing analysis"
1867
+ step_desc = f"🤔 Step {i+1}: {current_task_desc}"
1687
1868
  reasoning_step_id = log_event_fn(step_desc, MSG_TYPE.MSG_TYPE_STEP_START)
1688
1869
 
1689
1870
  try:
1690
- if len(current_scratchpad) > 12000:
1691
- current_scratchpad = memory_manager.compress_scratchpad(current_scratchpad, original_user_prompt, 8000)
1871
+ # Enhanced scratchpad management
1872
+ if len(current_scratchpad) > max_scratchpad_size:
1873
+ log_event_fn(f"Scratchpad size ({len(current_scratchpad)}) exceeds limit, compressing...", MSG_TYPE.MSG_TYPE_INFO)
1874
+ current_scratchpad = _compress_scratchpad_intelligently(current_scratchpad, original_user_prompt, max_scratchpad_size // 2)
1875
+ log_event_fn(f"Scratchpad compressed to {len(current_scratchpad)} characters", MSG_TYPE.MSG_TYPE_INFO)
1692
1876
 
1693
- reasoning_prompt = f"""--- AVAILABLE ACTIONS ---\n{formatted_tools_list}\n--- YOUR INTERNAL SCRATCHPAD ---\n{current_scratchpad}\n--- END SCRATCHPAD ---\n
1694
- INSTRUCTIONS: Observe, think, and then act. Choose the single best next action to achieve: "{original_user_prompt}".
1695
- Produce ONLY this JSON: {{"thought": "short reasoning", "action": {{"tool_name": "...", "requires_code_input": false, "requires_image_input": false}}}}"""
1696
- decision_data = self.generate_structured_content(prompt=reasoning_prompt, schema={"thought": "string", "action": "object"}, system_prompt=reasoning_system_prompt, temperature=decision_temperature, **llm_generation_kwargs)
1877
+ # Enhanced reasoning prompt with better decision tracking
1878
+ reasoning_prompt = f"""You are working on: "{original_user_prompt}"
1879
+
1880
+ === AVAILABLE ACTIONS ===
1881
+ {formatted_tools_list}
1882
+
1883
+ === YOUR COMPLETE ANALYSIS HISTORY ===
1884
+ {current_scratchpad}
1885
+ === END ANALYSIS HISTORY ===
1886
+
1887
+ === DECISION GUIDELINES ===
1888
+ 1. **Review your progress**: Look at what you've already discovered and accomplished
1889
+ 2. **Consider your current task**: Focus on the next logical step in your plan
1890
+ 3. **Remember your decisions**: If you previously decided to use a tool, follow through unless you have a good reason to change
1891
+ 4. **Be adaptive**: If you discover new information that changes the situation, consider revising your plan
1892
+ 5. **Stay focused**: Each action should clearly advance toward the final goal
1893
+
1894
+ === YOUR NEXT DECISION ===
1895
+ Choose the single most appropriate action to take right now. Consider:
1896
+ - What specific step are you currently working on?
1897
+ - What information do you still need?
1898
+ - What would be most helpful for the user?
1899
+
1900
+ Provide your decision as JSON:
1901
+ {{
1902
+ "reasoning": "Explain your current thinking and why this action makes sense now",
1903
+ "action": {{
1904
+ "tool_name": "exact_tool_name",
1905
+ "requires_code_input": false,
1906
+ "requires_image_input": false,
1907
+ "confidence": 0.8
1908
+ }},
1909
+ "plan_status": "on_track" // or "needs_revision" if you want to change the plan
1910
+ }}"""
1911
+
1912
+ log_prompt_fn(f"Reasoning Prompt Step {i+1}", reasoning_prompt)
1913
+ decision_data = self.generate_structured_content(
1914
+ prompt=reasoning_prompt,
1915
+ schema={
1916
+ "reasoning": "string",
1917
+ "action": "object",
1918
+ "plan_status": "string"
1919
+ },
1920
+ system_prompt=reasoning_system_prompt,
1921
+ temperature=decision_temperature,
1922
+ **llm_generation_kwargs
1923
+ )
1697
1924
 
1698
1925
  if not (decision_data and isinstance(decision_data.get("action"), dict)):
1699
- log_event_fn("LLM failed to produce a valid action JSON.", MSG_TYPE.MSG_TYPE_WARNING, event_id=reasoning_step_id)
1700
- current_scratchpad += "\n\n### Step Failure\n- Error: Invalid decision JSON from LLM."
1926
+ log_event_fn("⚠️ Invalid decision format from AI", MSG_TYPE.MSG_TYPE_WARNING, event_id=reasoning_step_id)
1927
+ current_scratchpad += f"\n\n### Step {i+1}: Decision Error\n- Error: AI produced invalid decision JSON\n- Continuing with fallback approach"
1701
1928
  continue
1702
1929
 
1703
1930
  action = decision_data.get("action", {})
1704
- tool_name, requires_code, requires_image = action.get("tool_name"), action.get("requires_code_input", False), action.get("requires_image_input", False)
1705
- current_scratchpad += f"\n\n### Step {i+1}: Thought\n{decision_data.get('thought', '')}"
1931
+ reasoning = decision_data.get("reasoning", "No reasoning provided")
1932
+ plan_status = decision_data.get("plan_status", "on_track")
1933
+ tool_name = action.get("tool_name")
1934
+ requires_code = action.get("requires_code_input", False)
1935
+ requires_image = action.get("requires_image_input", False)
1936
+ confidence = action.get("confidence", 0.5)
1937
+
1938
+ # Track the decision
1939
+ decision_history.append({
1940
+ "step": i+1,
1941
+ "tool_name": tool_name,
1942
+ "reasoning": reasoning,
1943
+ "confidence": confidence,
1944
+ "plan_status": plan_status
1945
+ })
1946
+
1947
+ current_scratchpad += f"\n\n### Step {i+1}: Decision & Reasoning\n**Reasoning**: {reasoning}\n**Chosen Action**: {tool_name}\n**Confidence**: {confidence}\n**Plan Status**: {plan_status}"
1948
+
1949
+ log_event_fn(_get_friendly_action_description(tool_name, requires_code, requires_image), MSG_TYPE.MSG_TYPE_STEP, meta={
1950
+ "tool_name": tool_name,
1951
+ "confidence": confidence,
1952
+ "reasoning": reasoning[:100] + "..." if len(reasoning) > 100 else reasoning
1953
+ })
1954
+
1955
+ # Handle plan revision
1956
+ if plan_status == "needs_revision" and tool_name != "local_tools::revise_plan":
1957
+ log_event_fn("🔄 AI indicates plan needs revision", MSG_TYPE.MSG_TYPE_INFO)
1958
+ tool_name = "local_tools::revise_plan" # Force plan revision
1959
+
1960
+ # Handle final answer
1961
+ if tool_name == "local_tools::final_answer":
1962
+ log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
1963
+ break
1964
+
1965
+ # Handle clarification request
1966
+ if tool_name == "local_tools::request_clarification":
1967
+ clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
1968
+
1969
+ CURRENT ANALYSIS:
1970
+ {current_scratchpad}
1971
+
1972
+ Generate a clear, specific question that will help you proceed effectively:"""
1973
+
1974
+ question = self.generate_text(clarification_prompt, temperature=0.3)
1975
+ question = self.remove_thinking_blocks(question)
1976
+
1977
+ log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
1978
+ return {
1979
+ "final_answer": question,
1980
+ "clarification_required": True,
1981
+ "final_scratchpad": current_scratchpad,
1982
+ "tool_calls": tool_calls_this_turn,
1983
+ "sources": sources_this_turn,
1984
+ "error": None,
1985
+ "decision_history": decision_history
1986
+ }
1706
1987
 
1707
- log_event_fn(_get_friendly_action_description(tool_name, requires_code, requires_image), MSG_TYPE.MSG_TYPE_STEP)
1708
- if tool_name == "local_tools::final_answer": break
1988
+ # Handle final answer
1989
+ if tool_name == "local_tools::final_answer":
1990
+ log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
1991
+ break
1992
+
1993
+ # Handle clarification request
1709
1994
  if tool_name == "local_tools::request_clarification":
1710
- clarification_prompt = f"Based on your thought process, what is the single question you need to ask the user?\n\nSCRATCHPAD:\n{current_scratchpad}\n\nQUESTION:"
1711
- question = self.generate_text(clarification_prompt)
1712
- return {"final_answer": self.remove_thinking_blocks(question), "clarification_required": True, "final_scratchpad": current_scratchpad, "tool_calls": tool_calls_this_turn, "sources": sources_this_turn, "error": None}
1995
+ clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
1996
+
1997
+ CURRENT ANALYSIS:
1998
+ {current_scratchpad}
1999
+
2000
+ Generate a clear, specific question that will help you proceed effectively:"""
2001
+
2002
+ question = self.generate_text(clarification_prompt, temperature=0.3)
2003
+ question = self.remove_thinking_blocks(question)
2004
+
2005
+ log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
2006
+ return {
2007
+ "final_answer": question,
2008
+ "clarification_required": True,
2009
+ "final_scratchpad": current_scratchpad,
2010
+ "tool_calls": tool_calls_this_turn,
2011
+ "sources": sources_this_turn,
2012
+ "error": None,
2013
+ "decision_history": decision_history
2014
+ }
2015
+
2016
+ # Handle plan revision
2017
+ if tool_name == "local_tools::revise_plan":
2018
+ plan_revision_count += 1
2019
+ revision_id = log_event_fn(f"📝 Revising execution plan (revision #{plan_revision_count})", MSG_TYPE.MSG_TYPE_STEP_START)
2020
+
2021
+ try:
2022
+ revision_prompt = f"""Based on your current analysis and discoveries, create an updated execution plan.
2023
+
2024
+ ORIGINAL REQUEST: "{original_user_prompt}"
2025
+ CURRENT ANALYSIS:
2026
+ {current_scratchpad}
2027
+
2028
+ REASON FOR REVISION: {reasoning}
2029
+
2030
+ Create a new plan that reflects your current understanding. Consider:
2031
+ 1. What have you already accomplished?
2032
+ 2. What new information have you discovered?
2033
+ 3. What steps are still needed?
2034
+ 4. How can you be more efficient?
2035
+
2036
+ Provide your revision as JSON:
2037
+ {{
2038
+ "revision_reason": "Clear explanation of why the plan needed to change",
2039
+ "new_plan": [
2040
+ {{"step": 1, "description": "First revised step", "status": "pending"}},
2041
+ {{"step": 2, "description": "Second revised step", "status": "pending"}}
2042
+ ],
2043
+ "confidence": 0.8
2044
+ }}"""
2045
+
2046
+ revision_data = self.generate_structured_content(
2047
+ prompt=revision_prompt,
2048
+ schema={
2049
+ "revision_reason": "string",
2050
+ "new_plan": "array",
2051
+ "confidence": "number"
2052
+ },
2053
+ temperature=0.3,
2054
+ **llm_generation_kwargs
2055
+ )
2056
+
2057
+ if revision_data and revision_data.get("new_plan"):
2058
+ # Update the plan
2059
+ current_plan_version += 1
2060
+ new_tasks = []
2061
+ for task_data in revision_data["new_plan"]:
2062
+ task = TaskDecomposition() # Assuming this class exists
2063
+ task.description = task_data.get("description", "Undefined step")
2064
+ task.status = TaskStatus.PENDING # Reset all to pending
2065
+ new_tasks.append(task)
2066
+
2067
+ execution_plan.tasks = new_tasks
2068
+ current_task_index = 0 # Reset to beginning
2069
+
2070
+ # Update scratchpad with new plan
2071
+ current_scratchpad += f"\n\n### Updated Plan (Version {current_plan_version})\n"
2072
+ current_scratchpad += f"**Revision Reason**: {revision_data.get('revision_reason', 'Plan needed updating')}\n"
2073
+ current_scratchpad += f"**New Tasks**:\n"
2074
+ for i, task in enumerate(execution_plan.tasks):
2075
+ current_scratchpad += f" {i+1}. {task.description}\n"
2076
+
2077
+ log_event_fn(f"✅ Plan revised with {len(execution_plan.tasks)} updated tasks", MSG_TYPE.MSG_TYPE_STEP_END, event_id=revision_id, meta={
2078
+ "plan_version": current_plan_version,
2079
+ "new_task_count": len(execution_plan.tasks),
2080
+ "revision_reason": revision_data.get("revision_reason", "")
2081
+ })
2082
+
2083
+ # Continue with the new plan
2084
+ continue
2085
+ else:
2086
+ raise ValueError("Failed to generate valid plan revision")
2087
+
2088
+ except Exception as e:
2089
+ log_event_fn(f"Plan revision failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=revision_id)
2090
+ current_scratchpad += f"\n**Plan Revision Failed**: {str(e)}\nContinuing with original plan."
1713
2091
 
2092
+ # Prepare parameters for tool execution
1714
2093
  param_assets = {}
1715
2094
  if requires_code:
1716
- code_prompt = f"Generate only the raw code required for the current step.\n\nSCRATCHPAD:\n{current_scratchpad}\n\nCODE:"
2095
+ log_event_fn("💻 Generating code for task", MSG_TYPE.MSG_TYPE_INFO)
2096
+ code_prompt = f"""Generate the specific code needed for the current step.
2097
+
2098
+ CURRENT CONTEXT:
2099
+ {current_scratchpad}
2100
+
2101
+ CURRENT TASK: {tool_name}
2102
+ USER REQUEST: "{original_user_prompt}"
2103
+
2104
+ Generate clean, functional code that addresses the specific requirements. Focus on:
2105
+ 1. Solving the immediate problem
2106
+ 2. Being clear and readable
2107
+ 3. Including necessary imports and dependencies
2108
+ 4. Adding helpful comments where appropriate
2109
+
2110
+ CODE:"""
2111
+
1717
2112
  code_content = self.generate_code(prompt=code_prompt, **llm_generation_kwargs)
1718
2113
  code_uuid = f"code_asset_{uuid.uuid4()}"
1719
2114
  asset_store[code_uuid] = {"type": "code", "content": code_content}
1720
2115
  param_assets['code_asset_id'] = code_uuid
1721
- log_event_fn("Code asset generated.", MSG_TYPE.MSG_TYPE_STEP)
2116
+ log_event_fn(f"Code asset created: {code_uuid[:8]}...", MSG_TYPE.MSG_TYPE_INFO)
2117
+
1722
2118
  if requires_image:
1723
2119
  image_assets = [asset_id for asset_id, asset in asset_store.items() if asset['type'] == 'image' and asset.get('source') == 'user']
1724
2120
  if image_assets:
1725
2121
  param_assets['image_asset_id'] = image_assets[0]
2122
+ log_event_fn(f"Using image asset: {image_assets[0][:8]}...", MSG_TYPE.MSG_TYPE_INFO)
2123
+ else:
2124
+ log_event_fn("⚠️ Image required but none available", MSG_TYPE.MSG_TYPE_WARNING)
2125
+
2126
+ # Enhanced parameter generation
2127
+ param_prompt = f"""Generate the optimal parameters for this tool execution.
2128
+
2129
+ TOOL: {tool_name}
2130
+ CURRENT CONTEXT: {current_scratchpad}
2131
+ CURRENT REASONING: {reasoning}
2132
+ AVAILABLE ASSETS: {json.dumps(param_assets) if param_assets else "None"}
1726
2133
 
1727
- param_prompt = f"""Fill the parameters for the tool: '{tool_name}'. Available assets: {json.dumps(param_assets)}.
1728
- SCRATCHPAD:\n{current_scratchpad}\n
1729
- Output only: {{"tool_params": {{...}}}}"""
1730
- param_data = self.generate_structured_content(prompt=param_prompt, schema={"tool_params": "object"}, temperature=decision_temperature, **llm_generation_kwargs)
2134
+ Based on your analysis and the current step you're working on, provide the most appropriate parameters.
2135
+ Be specific and purposeful in your parameter choices.
2136
+
2137
+ Output format: {{"tool_params": {{...}}}}"""
2138
+
2139
+ log_prompt_fn(f"Parameter Generation Step {i+1}", param_prompt)
2140
+ param_data = self.generate_structured_content(
2141
+ prompt=param_prompt,
2142
+ schema={"tool_params": "object"},
2143
+ temperature=decision_temperature,
2144
+ **llm_generation_kwargs
2145
+ )
1731
2146
  tool_params = param_data.get("tool_params", {}) if param_data else {}
1732
2147
 
2148
+ current_scratchpad += f"\n**Parameters Generated**: {json.dumps(tool_params, indent=2)}"
2149
+
2150
+ # Hydrate parameters with assets
1733
2151
  def _hydrate(data: Any, store: Dict) -> Any:
1734
2152
  if isinstance(data, dict): return {k: _hydrate(v, store) for k, v in data.items()}
1735
2153
  if isinstance(data, list): return [_hydrate(item, store) for item in data]
1736
2154
  if isinstance(data, str) and "asset_" in data and data in store: return store[data].get("content", data)
1737
2155
  return data
2156
+
1738
2157
  hydrated_params = _hydrate(tool_params, asset_store)
1739
2158
 
1740
- start_time, tool_result = time.time(), {"status": "failure", "error": f"Tool '{tool_name}' failed to execute."}
2159
+ # Execute the tool with detailed logging
2160
+ start_time = time.time()
2161
+ tool_result = {"status": "failure", "error": f"Tool '{tool_name}' failed to execute."}
2162
+
1741
2163
  try:
1742
2164
  if tool_name in rag_registry:
1743
2165
  query = hydrated_params.get("query", "")
1744
- top_k, min_sim = rag_tool_specs[tool_name]["default_top_k"], rag_tool_specs[tool_name]["default_min_sim"]
2166
+ if not query:
2167
+ # Fall back to using reasoning as query
2168
+ query = reasoning[:200] + "..." if len(reasoning) > 200 else reasoning
2169
+
2170
+ log_event_fn(f"🔍 Searching knowledge base with query: '{query[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
2171
+
2172
+ top_k = rag_tool_specs[tool_name]["default_top_k"]
2173
+ min_sim = rag_tool_specs[tool_name]["default_min_sim"]
2174
+
1745
2175
  raw_results = rag_registry[tool_name](query=query, rag_top_k=top_k)
1746
2176
  raw_iter = raw_results["results"] if isinstance(raw_results, dict) and "results" in raw_results else raw_results
1747
- docs = [{"text": d.get("text", str(d)), "score": d.get("score", 0)*100, "metadata": d.get("metadata", {})} for d in raw_iter or []]
2177
+
2178
+ docs = []
2179
+ for d in raw_iter or []:
2180
+ doc_data = {
2181
+ "text": d.get("text", str(d)),
2182
+ "score": d.get("score", 0) * 100,
2183
+ "metadata": d.get("metadata", {})
2184
+ }
2185
+ docs.append(doc_data)
2186
+
1748
2187
  kept = [x for x in docs if x['score'] >= min_sim]
1749
- tool_result = {"status": "success", "results": kept, "dropped": len(docs) - len(kept)}
1750
- sources_this_turn.extend([{"source": tool_name, "metadata": x["metadata"], "score": x["score"]} for x in kept])
1751
- elif hasattr(self, "mcp"):
2188
+ tool_result = {
2189
+ "status": "success",
2190
+ "results": kept,
2191
+ "total_found": len(docs),
2192
+ "kept_after_filtering": len(kept),
2193
+ "query_used": query
2194
+ }
2195
+
2196
+ sources_this_turn.extend([{
2197
+ "source": tool_name,
2198
+ "metadata": x["metadata"],
2199
+ "score": x["score"]
2200
+ } for x in kept])
2201
+
2202
+ log_event_fn(f"📚 Retrieved {len(kept)} relevant documents (from {len(docs)} total)", MSG_TYPE.MSG_TYPE_INFO)
2203
+
2204
+ elif hasattr(self, "mcp") and "local_tools" not in tool_name:
2205
+ log_event_fn(f"🔧 Executing MCP tool: {tool_name}", MSG_TYPE.MSG_TYPE_TOOL_CALL, meta={
2206
+ "tool_name": tool_name,
2207
+ "params": {k: str(v)[:100] for k, v in hydrated_params.items()} # Truncate for logging
2208
+ })
2209
+
1752
2210
  tool_result = self.mcp.execute_tool(tool_name, hydrated_params, lollms_client_instance=self)
2211
+
2212
+ log_event_fn(f"Tool execution completed", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, meta={
2213
+ "result_status": tool_result.get("status", "unknown"),
2214
+ "has_error": "error" in tool_result
2215
+ })
2216
+
2217
+ elif tool_name == "local_tools::generate_image" and hasattr(self, "tti"):
2218
+ image_prompt = hydrated_params.get("prompt", "")
2219
+ log_event_fn(f"🎨 Generating image with prompt: '{image_prompt[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
2220
+
2221
+ # This would call your text-to-image functionality
2222
+ image_result = self.tti.generate_image(image_prompt) # Assuming this method exists
2223
+ if image_result:
2224
+ image_uuid = f"generated_image_{uuid.uuid4()}"
2225
+ asset_store[image_uuid] = {"type": "image", "content": image_result, "source": "generated"}
2226
+ tool_result = {"status": "success", "image_id": image_uuid, "prompt_used": image_prompt}
2227
+ else:
2228
+ tool_result = {"status": "failure", "error": "Image generation failed"}
2229
+
2230
+ else:
2231
+ tool_result = {"status": "failure", "error": f"Tool '{tool_name}' is not available or supported in this context."}
2232
+
1753
2233
  except Exception as e:
1754
- error_msg = f"Exception during '{tool_name}' execution: {e}"
2234
+ error_msg = f"Exception during '{tool_name}' execution: {str(e)}"
1755
2235
  log_event_fn(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
1756
2236
  tool_result = {"status": "failure", "error": error_msg}
1757
2237
 
1758
2238
  response_time = time.time() - start_time
1759
2239
  success = tool_result.get("status") == "success"
1760
- performance_tracker.record_tool_usage(tool_name, success, 0.8, response_time, tool_result.get("error"))
1761
2240
 
2241
+ # Record performance
2242
+ performance_tracker.record_tool_usage(tool_name, success, confidence, response_time, tool_result.get("error"))
2243
+
2244
+ # Update task status
1762
2245
  if success and current_task_index < len(execution_plan.tasks):
1763
2246
  execution_plan.tasks[current_task_index].status = TaskStatus.COMPLETED
2247
+ completed_tasks.add(current_task_index)
1764
2248
  current_task_index += 1
1765
2249
 
1766
- observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
1767
- tool_calls_this_turn.append({"name": tool_name, "params": tool_params, "result": tool_result, "response_time": response_time})
1768
- current_scratchpad += f"\n\n### Step {i+1}: Observation\n- Action: `{tool_name}`\n- Result:\n{observation_text}"
2250
+ # Enhanced observation logging
2251
+ observation_text = json.dumps(tool_result, indent=2)
2252
+ if len(observation_text) > 1000:
2253
+ # Truncate very long results for scratchpad
2254
+ truncated_result = {k: (str(v)[:200] + "..." if len(str(v)) > 200 else v) for k, v in tool_result.items()}
2255
+ observation_text = json.dumps(truncated_result, indent=2)
2256
+
2257
+ current_scratchpad += f"\n\n### Step {i+1}: Execution & Observation\n"
2258
+ current_scratchpad += f"**Tool Used**: {tool_name}\n"
2259
+ current_scratchpad += f"**Success**: {success}\n"
2260
+ current_scratchpad += f"**Response Time**: {response_time:.2f}s\n"
2261
+ current_scratchpad += f"**Result**:\n```json\n{observation_text}\n```"
2262
+
2263
+ # Track tool call
2264
+ tool_calls_this_turn.append({
2265
+ "name": tool_name,
2266
+ "params": tool_params,
2267
+ "result": tool_result,
2268
+ "response_time": response_time,
2269
+ "confidence": confidence,
2270
+ "reasoning": reasoning
2271
+ })
1769
2272
 
1770
2273
  if success:
1771
- log_event_fn(f"✅ Step completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
2274
+ log_event_fn(f"✅ Step {i+1} completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
2275
+ "tool_name": tool_name,
2276
+ "response_time": response_time,
2277
+ "confidence": confidence
2278
+ })
1772
2279
  else:
1773
2280
  error_detail = tool_result.get("error", "No error detail provided.")
1774
- log_event_fn(f"Tool reported failure: {error_detail}", MSG_TYPE.MSG_TYPE_WARNING)
1775
- log_event_fn(f"⚠️ Step completed with issues", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={"error": error_detail})
2281
+ log_event_fn(f"⚠️ Step {i+1} completed with issues: {error_detail}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
2282
+ "tool_name": tool_name,
2283
+ "error": error_detail,
2284
+ "confidence": confidence
2285
+ })
2286
+
2287
+ # Add failure handling to scratchpad
2288
+ current_scratchpad += f"\n**Failure Analysis**: {error_detail}"
2289
+ current_scratchpad += f"\n**Next Steps**: Consider alternative approaches or tools"
1776
2290
 
1777
- if len(completed_tasks) == len(execution_plan.tasks): break
2291
+ # Log current progress
2292
+ completed_count = len(completed_tasks)
2293
+ total_tasks = len(execution_plan.tasks)
2294
+ if total_tasks > 0:
2295
+ progress = (completed_count / total_tasks) * 100
2296
+ log_event_fn(f"Progress: {completed_count}/{total_tasks} tasks completed ({progress:.1f}%)", MSG_TYPE.MSG_TYPE_STEP_PROGRESS, meta={"progress": progress})
2297
+
2298
+ # Check if all tasks are completed
2299
+ if completed_count >= total_tasks:
2300
+ log_event_fn("🎯 All planned tasks completed", MSG_TYPE.MSG_TYPE_INFO)
2301
+ break
1778
2302
 
1779
2303
  except Exception as ex:
1780
- log_event_fn(f"An unexpected error occurred in reasoning loop: {ex}", MSG_TYPE.MSG_TYPE_EXCEPTION, event_id=reasoning_step_id)
2304
+ log_event_fn(f"💥 Unexpected error in reasoning step {i+1}: {str(ex)}", MSG_TYPE.MSG_TYPE_ERROR, event_id=reasoning_step_id)
1781
2305
  trace_exception(ex)
1782
- log_event_fn("⚠️ Encountered an issue, adjusting approach...", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
2306
+
2307
+ # Add error to scratchpad for context
2308
+ current_scratchpad += f"\n\n### Step {i+1}: Unexpected Error\n**Error**: {str(ex)}\n**Recovery**: Continuing with adjusted approach"
2309
+
2310
+ log_event_fn("🔄 Recovering and continuing with next step", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
1783
2311
 
1784
- if enable_self_reflection and len(tool_calls_this_turn) > 1:
1785
- reflection_id = log_event_fn("🤔 Reviewing my work...", MSG_TYPE.MSG_TYPE_STEP_START)
2312
+ # Enhanced self-reflection
2313
+ if enable_self_reflection and len(tool_calls_this_turn) > 0:
2314
+ reflection_id = log_event_fn("🤔 Conducting comprehensive self-assessment...", MSG_TYPE.MSG_TYPE_STEP_START)
1786
2315
  try:
1787
- reflection_prompt = f"""Review the user request and your work. Was the goal achieved effectively?
1788
- REQUEST: "{original_user_prompt}"
1789
- SCRATCHPAD:\n{current_scratchpad}\n
1790
- JSON assessment: {{"goal_achieved": true, "effectiveness_score": 0.8, "summary": "..."}}"""
1791
- reflection_data = self.generate_structured_content(prompt=reflection_prompt, schema={"goal_achieved": "boolean", "effectiveness_score": "number", "summary": "string"}, temperature=0.3, **llm_generation_kwargs)
1792
- if reflection_data: current_scratchpad += f"\n\n### Self-Reflection\n- Goal Achieved: {reflection_data.get('goal_achieved')}\n- Effectiveness: {reflection_data.get('effectiveness_score')}"
1793
- log_event_fn("✅ Quality check completed", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reflection_id)
2316
+ reflection_prompt = f"""Conduct a thorough review of your work and assess the quality of your response to the user's request.
2317
+
2318
+ ORIGINAL REQUEST: "{original_user_prompt}"
2319
+ TOOLS USED: {len(tool_calls_this_turn)}
2320
+ PLAN REVISIONS: {plan_revision_count}
2321
+
2322
+ COMPLETE ANALYSIS:
2323
+ {current_scratchpad}
2324
+
2325
+ Evaluate your performance on multiple dimensions:
2326
+
2327
+ 1. **Goal Achievement**: Did you fully address the user's request?
2328
+ 2. **Process Efficiency**: Was your approach optimal given the available tools?
2329
+ 3. **Information Quality**: Is the information you gathered accurate and relevant?
2330
+ 4. **Decision Making**: Were your tool choices and parameters appropriate?
2331
+ 5. **Adaptability**: How well did you handle unexpected results or plan changes?
2332
+
2333
+ Provide your assessment as JSON:
2334
+ {{
2335
+ "goal_achieved": true,
2336
+ "effectiveness_score": 0.85,
2337
+ "process_efficiency": 0.8,
2338
+ "information_quality": 0.9,
2339
+ "decision_making": 0.85,
2340
+ "adaptability": 0.7,
2341
+ "overall_confidence": 0.82,
2342
+ "strengths": ["Clear reasoning", "Good tool selection"],
2343
+ "areas_for_improvement": ["Could have been more efficient"],
2344
+ "summary": "Successfully completed the user's request with high quality results",
2345
+ "key_insights": ["Discovered that X was more important than initially thought"]
2346
+ }}"""
2347
+
2348
+ reflection_data = self.generate_structured_content(
2349
+ prompt=reflection_prompt,
2350
+ schema={
2351
+ "goal_achieved": "boolean",
2352
+ "effectiveness_score": "number",
2353
+ "process_efficiency": "number",
2354
+ "information_quality": "number",
2355
+ "decision_making": "number",
2356
+ "adaptability": "number",
2357
+ "overall_confidence": "number",
2358
+ "strengths": "array",
2359
+ "areas_for_improvement": "array",
2360
+ "summary": "string",
2361
+ "key_insights": "array"
2362
+ },
2363
+ temperature=0.3,
2364
+ **llm_generation_kwargs
2365
+ )
2366
+
2367
+ if reflection_data:
2368
+ current_scratchpad += f"\n\n### Comprehensive Self-Assessment\n"
2369
+ current_scratchpad += f"**Goal Achieved**: {reflection_data.get('goal_achieved', False)}\n"
2370
+ current_scratchpad += f"**Overall Confidence**: {reflection_data.get('overall_confidence', 0.5):.2f}\n"
2371
+ current_scratchpad += f"**Effectiveness Score**: {reflection_data.get('effectiveness_score', 0.5):.2f}\n"
2372
+ current_scratchpad += f"**Key Strengths**: {', '.join(reflection_data.get('strengths', []))}\n"
2373
+ current_scratchpad += f"**Improvement Areas**: {', '.join(reflection_data.get('areas_for_improvement', []))}\n"
2374
+ current_scratchpad += f"**Summary**: {reflection_data.get('summary', '')}\n"
2375
+
2376
+ log_event_fn(f"✅ Self-assessment completed", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reflection_id, meta={
2377
+ "overall_confidence": reflection_data.get('overall_confidence', 0.5),
2378
+ "goal_achieved": reflection_data.get('goal_achieved', False),
2379
+ "effectiveness_score": reflection_data.get('effectiveness_score', 0.5)
2380
+ })
2381
+ else:
2382
+ log_event_fn("Self-assessment data generation failed", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
2383
+
1794
2384
  except Exception as e:
1795
- log_event_fn(f"Self-review failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
2385
+ log_event_fn(f"Self-assessment failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
1796
2386
 
1797
- synthesis_id = log_event_fn("📝 Preparing your complete answer...", MSG_TYPE.MSG_TYPE_STEP_START)
1798
- final_answer_prompt = f"""Synthesize a comprehensive, user-friendly final answer based on your complete analysis.
1799
- USER REQUEST: "{original_user_prompt}"
1800
- FULL SCRATCHPAD:\n{current_scratchpad}\n---
1801
- FINAL ANSWER:"""
2387
+ # Enhanced final synthesis
2388
+ synthesis_id = log_event_fn("📝 Synthesizing comprehensive final response...", MSG_TYPE.MSG_TYPE_STEP_START)
2389
+
2390
+ final_answer_prompt = f"""Create a comprehensive, well-structured final response that fully addresses the user's request.
2391
+
2392
+ ORIGINAL REQUEST: "{original_user_prompt}"
2393
+ CONTEXT: {context or "No additional context"}
2394
+
2395
+ COMPLETE ANALYSIS AND WORK:
2396
+ {current_scratchpad}
2397
+
2398
+ GUIDELINES for your response:
2399
+ 1. **Be Complete**: Address all aspects of the user's request
2400
+ 2. **Be Clear**: Organize your response logically and use clear language
2401
+ 3. **Be Helpful**: Provide actionable information and insights
2402
+ 4. **Be Honest**: If there were limitations or uncertainties, mention them appropriately
2403
+ 5. **Be Concise**: While being thorough, avoid unnecessary verbosity
2404
+ 6. **Cite Sources**: If you used research tools, reference the information appropriately
2405
+
2406
+ Your response should feel natural and conversational while being informative and valuable.
2407
+
2408
+ FINAL RESPONSE:"""
2409
+
2410
+ log_prompt_fn("Final Synthesis Prompt", final_answer_prompt)
2411
+
2412
+ final_answer_text = self.generate_text(
2413
+ prompt=final_answer_prompt,
2414
+ system_prompt=system_prompt,
2415
+ stream=streaming_callback is not None,
2416
+ streaming_callback=streaming_callback,
2417
+ temperature=final_answer_temperature,
2418
+ **llm_generation_kwargs
2419
+ )
1802
2420
 
1803
- final_answer_text = self.generate_text(prompt=final_answer_prompt, system_prompt=system_prompt, stream=streaming_callback is not None, streaming_callback=streaming_callback, temperature=final_answer_temperature, **llm_generation_kwargs)
1804
2421
  if isinstance(final_answer_text, dict) and "error" in final_answer_text:
1805
- return {"final_answer": "", "error": final_answer_text["error"], "final_scratchpad": current_scratchpad}
2422
+ log_event_fn(f"Final synthesis failed: {final_answer_text['error']}", MSG_TYPE.MSG_TYPE_ERROR, event_id=synthesis_id)
2423
+ return {
2424
+ "final_answer": "I encountered an issue while preparing my final response. Please let me know if you'd like me to try again.",
2425
+ "error": final_answer_text["error"],
2426
+ "final_scratchpad": current_scratchpad,
2427
+ "tool_calls": tool_calls_this_turn,
2428
+ "sources": sources_this_turn,
2429
+ "decision_history": decision_history
2430
+ }
1806
2431
 
1807
2432
  final_answer = self.remove_thinking_blocks(final_answer_text)
1808
- log_event_fn("✅ Answer ready!", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id)
2433
+
2434
+ # Calculate overall performance metrics
2435
+ overall_confidence = sum(call.get('confidence', 0.5) for call in tool_calls_this_turn) / max(len(tool_calls_this_turn), 1)
2436
+ successful_calls = sum(1 for call in tool_calls_this_turn if call.get('result', {}).get('status') == 'success')
2437
+ success_rate = successful_calls / max(len(tool_calls_this_turn), 1)
2438
+
2439
+ log_event_fn("✅ Comprehensive response ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id, meta={
2440
+ "final_answer_length": len(final_answer),
2441
+ "total_tools_used": len(tool_calls_this_turn),
2442
+ "success_rate": success_rate,
2443
+ "overall_confidence": overall_confidence
2444
+ })
1809
2445
 
1810
- overall_confidence = sum(c.get('confidence', 0.5) for c in tool_calls_this_turn) / max(len(tool_calls_this_turn), 1)
1811
2446
  return {
1812
- "final_answer": final_answer, "final_scratchpad": current_scratchpad,
1813
- "tool_calls": tool_calls_this_turn, "sources": sources_this_turn,
1814
- "performance_stats": {"total_steps": len(tool_calls_this_turn), "average_confidence": overall_confidence},
1815
- "clarification_required": False, "overall_confidence": overall_confidence, "error": None
2447
+ "final_answer": final_answer,
2448
+ "final_scratchpad": current_scratchpad,
2449
+ "tool_calls": tool_calls_this_turn,
2450
+ "sources": sources_this_turn,
2451
+ "decision_history": decision_history,
2452
+ "performance_stats": {
2453
+ "total_steps": len(tool_calls_this_turn),
2454
+ "successful_steps": successful_calls,
2455
+ "success_rate": success_rate,
2456
+ "average_confidence": overall_confidence,
2457
+ "plan_revisions": plan_revision_count,
2458
+ "total_reasoning_steps": len(decision_history)
2459
+ },
2460
+ "plan_evolution": {
2461
+ "initial_tasks": len(execution_plan.tasks),
2462
+ "final_version": current_plan_version,
2463
+ "total_revisions": plan_revision_count
2464
+ },
2465
+ "clarification_required": False,
2466
+ "overall_confidence": overall_confidence,
2467
+ "error": None
2468
+ }
2469
+
2470
+
2471
+ def _execute_complex_reasoning_loop(
2472
+ self, prompt, context, system_prompt, reasoning_system_prompt, images,
2473
+ max_reasoning_steps, decision_temperature, final_answer_temperature,
2474
+ streaming_callback, debug, enable_self_reflection, all_visible_tools,
2475
+ rag_registry, rag_tool_specs, log_event_fn, log_prompt_fn, max_scratchpad_size, **llm_generation_kwargs
2476
+ ) -> Dict[str, Any]:
2477
+
2478
+ planner, memory_manager, performance_tracker = TaskPlanner(self), MemoryManager(), ToolPerformanceTracker()
2479
+
2480
+ def _get_friendly_action_description(tool_name, requires_code, requires_image):
2481
+ descriptions = {
2482
+ "local_tools::final_answer": "📋 Preparing final answer",
2483
+ "local_tools::request_clarification": "❓ Requesting clarification",
2484
+ "local_tools::generate_image": "🎨 Creating image",
2485
+ "local_tools::revise_plan": "📝 Revising execution plan"
2486
+ }
2487
+ if tool_name in descriptions:
2488
+ return descriptions[tool_name]
2489
+ if "research::" in tool_name:
2490
+ return f"🔍 Searching {tool_name.split('::')[-1]} knowledge base"
2491
+ if requires_code:
2492
+ return "💻 Processing code"
2493
+ if requires_image:
2494
+ return "🖼️ Analyzing images"
2495
+ return f"🔧 Using {tool_name.replace('_', ' ').replace('::', ' - ').title()}"
2496
+
2497
+ def _compress_scratchpad_intelligently(scratchpad: str, original_request: str, target_size: int) -> str:
2498
+ """Enhanced scratchpad compression that preserves key decisions and recent context"""
2499
+ if len(scratchpad) <= target_size:
2500
+ return scratchpad
2501
+
2502
+ log_event_fn("📝 Compressing scratchpad to maintain focus...", MSG_TYPE.MSG_TYPE_INFO)
2503
+
2504
+ # Extract key components
2505
+ lines = scratchpad.split('\n')
2506
+ plan_section = []
2507
+ decisions = []
2508
+ recent_observations = []
2509
+
2510
+ current_section = None
2511
+ for i, line in enumerate(lines):
2512
+ if "### Execution Plan" in line or "### Updated Plan" in line:
2513
+ current_section = "plan"
2514
+ elif "### Step" in line and ("Thought" in line or "Decision" in line):
2515
+ current_section = "decision"
2516
+ elif "### Step" in line and "Observation" in line:
2517
+ current_section = "observation"
2518
+ elif line.startswith("###"):
2519
+ current_section = None
2520
+
2521
+ if current_section == "plan" and line.strip():
2522
+ plan_section.append(line)
2523
+ elif current_section == "decision" and line.strip():
2524
+ decisions.append((i, line))
2525
+ elif current_section == "observation" and line.strip():
2526
+ recent_observations.append((i, line))
2527
+
2528
+ # Keep most recent items and important decisions
2529
+ recent_decisions = decisions[-3:] if len(decisions) > 3 else decisions
2530
+ recent_obs = recent_observations[-5:] if len(recent_observations) > 5 else recent_observations
2531
+
2532
+ compressed_parts = [
2533
+ f"### Original Request\n{original_request}",
2534
+ f"### Current Plan\n" + '\n'.join(plan_section[-10:]),
2535
+ f"### Recent Key Decisions"
2536
+ ]
2537
+
2538
+ for _, decision in recent_decisions:
2539
+ compressed_parts.append(decision)
2540
+
2541
+ compressed_parts.append("### Recent Observations")
2542
+ for _, obs in recent_obs:
2543
+ compressed_parts.append(obs)
2544
+
2545
+ compressed = '\n'.join(compressed_parts)
2546
+ if len(compressed) > target_size:
2547
+ # Final trim if still too long
2548
+ compressed = compressed[:target_size-100] + "\n...[content compressed for focus]"
2549
+
2550
+ return compressed
2551
+
2552
+ original_user_prompt, tool_calls_this_turn, sources_this_turn = prompt, [], []
2553
+ asset_store: Dict[str, Dict] = {}
2554
+ decision_history = [] # Track all decisions made
2555
+
2556
+ # Enhanced planning phase
2557
+ planning_step_id = log_event_fn("📋 Creating adaptive execution plan...", MSG_TYPE.MSG_TYPE_STEP_START)
2558
+ execution_plan = planner.decompose_task(original_user_prompt, context or "")
2559
+ current_plan_version = 1
2560
+
2561
+ log_event_fn(f"Initial plan created with {len(execution_plan.tasks)} tasks", MSG_TYPE.MSG_TYPE_INFO, meta={
2562
+ "plan_version": current_plan_version,
2563
+ "total_tasks": len(execution_plan.tasks),
2564
+ "estimated_complexity": "medium" if len(execution_plan.tasks) <= 5 else "high"
2565
+ })
2566
+
2567
+ for i, task in enumerate(execution_plan.tasks):
2568
+ log_event_fn(f"Task {i+1}: {task.description}", MSG_TYPE.MSG_TYPE_INFO)
2569
+
2570
+ log_event_fn("✅ Adaptive plan ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=planning_step_id)
2571
+
2572
+ # Enhanced initial state
2573
+ initial_state_parts = [
2574
+ f"### Original User Request\n{original_user_prompt}",
2575
+ f"### Context\n{context or 'No additional context provided'}",
2576
+ f"### Execution Plan (Version {current_plan_version})\n- Total tasks: {len(execution_plan.tasks)}",
2577
+ f"- Estimated complexity: {'High' if len(execution_plan.tasks) > 5 else 'Medium'}"
2578
+ ]
2579
+
2580
+ for i, task in enumerate(execution_plan.tasks):
2581
+ initial_state_parts.append(f" {i+1}. {task.description} [Status: {task.status.value}]")
2582
+
2583
+ if images:
2584
+ initial_state_parts.append(f"### Provided Assets")
2585
+ for img_b64 in images:
2586
+ img_uuid = str(uuid.uuid4())
2587
+ asset_store[img_uuid] = {"type": "image", "content": img_b64, "source": "user"}
2588
+ initial_state_parts.append(f"- Image asset: {img_uuid}")
2589
+
2590
+ current_scratchpad = "\n".join(initial_state_parts)
2591
+ log_event_fn("Initial analysis complete", MSG_TYPE.MSG_TYPE_SCRATCHPAD, meta={"scratchpad_size": len(current_scratchpad)})
2592
+
2593
+ formatted_tools_list = "\n".join([f"**{t['name']}**: {t['description']}" for t in all_visible_tools])
2594
+ completed_tasks, current_task_index = set(), 0
2595
+ plan_revision_count = 0
2596
+
2597
+ # Main reasoning loop with enhanced decision tracking
2598
+ for i in range(max_reasoning_steps):
2599
+ current_task_desc = execution_plan.tasks[current_task_index].description if current_task_index < len(execution_plan.tasks) else "Finalizing analysis"
2600
+ step_desc = f"🤔 Step {i+1}: {current_task_desc}"
2601
+ reasoning_step_id = log_event_fn(step_desc, MSG_TYPE.MSG_TYPE_STEP_START)
2602
+
2603
+ try:
2604
+ # Enhanced scratchpad management
2605
+ if len(current_scratchpad) > max_scratchpad_size:
2606
+ log_event_fn(f"Scratchpad size ({len(current_scratchpad)}) exceeds limit, compressing...", MSG_TYPE.MSG_TYPE_INFO)
2607
+ current_scratchpad = _compress_scratchpad_intelligently(current_scratchpad, original_user_prompt, max_scratchpad_size // 2)
2608
+ log_event_fn(f"Scratchpad compressed to {len(current_scratchpad)} characters", MSG_TYPE.MSG_TYPE_INFO)
2609
+
2610
+ # Enhanced reasoning prompt with better decision tracking
2611
+ reasoning_prompt = f"""You are working on: "{original_user_prompt}"
2612
+
2613
+ === AVAILABLE ACTIONS ===
2614
+ {formatted_tools_list}
2615
+
2616
+ === YOUR COMPLETE ANALYSIS HISTORY ===
2617
+ {current_scratchpad}
2618
+ === END ANALYSIS HISTORY ===
2619
+
2620
+ === DECISION GUIDELINES ===
2621
+ 1. **Review your progress**: Look at what you've already discovered and accomplished
2622
+ 2. **Consider your current task**: Focus on the next logical step in your plan
2623
+ 3. **Remember your decisions**: If you previously decided to use a tool, follow through unless you have a good reason to change
2624
+ 4. **Be adaptive**: If you discover new information that changes the situation, consider revising your plan
2625
+ 5. **Stay focused**: Each action should clearly advance toward the final goal
2626
+
2627
+ === YOUR NEXT DECISION ===
2628
+ Choose the single most appropriate action to take right now. Consider:
2629
+ - What specific step are you currently working on?
2630
+ - What information do you still need?
2631
+ - What would be most helpful for the user?
2632
+
2633
+ Provide your decision as JSON:
2634
+ {{
2635
+ "reasoning": "Explain your current thinking and why this action makes sense now",
2636
+ "action": {{
2637
+ "tool_name": "exact_tool_name",
2638
+ "requires_code_input": false,
2639
+ "requires_image_input": false,
2640
+ "confidence": 0.8
2641
+ }},
2642
+ "plan_status": "on_track" // or "needs_revision" if you want to change the plan
2643
+ }}"""
2644
+
2645
+ log_prompt_fn(f"Reasoning Prompt Step {i+1}", reasoning_prompt)
2646
+ decision_data = self.generate_structured_content(
2647
+ prompt=reasoning_prompt,
2648
+ schema={
2649
+ "reasoning": "string",
2650
+ "action": "object",
2651
+ "plan_status": "string"
2652
+ },
2653
+ system_prompt=reasoning_system_prompt,
2654
+ temperature=decision_temperature,
2655
+ **llm_generation_kwargs
2656
+ )
2657
+
2658
+ if not (decision_data and isinstance(decision_data.get("action"), dict)):
2659
+ log_event_fn("⚠️ Invalid decision format from AI", MSG_TYPE.MSG_TYPE_WARNING, event_id=reasoning_step_id)
2660
+ current_scratchpad += f"\n\n### Step {i+1}: Decision Error\n- Error: AI produced invalid decision JSON\n- Continuing with fallback approach"
2661
+ continue
2662
+
2663
+ action = decision_data.get("action", {})
2664
+ reasoning = decision_data.get("reasoning", "No reasoning provided")
2665
+ plan_status = decision_data.get("plan_status", "on_track")
2666
+ tool_name = action.get("tool_name")
2667
+ requires_code = action.get("requires_code_input", False)
2668
+ requires_image = action.get("requires_image_input", False)
2669
+ confidence = action.get("confidence", 0.5)
2670
+
2671
+ # Track the decision
2672
+ decision_history.append({
2673
+ "step": i+1,
2674
+ "tool_name": tool_name,
2675
+ "reasoning": reasoning,
2676
+ "confidence": confidence,
2677
+ "plan_status": plan_status
2678
+ })
2679
+
2680
+ current_scratchpad += f"\n\n### Step {i+1}: Decision & Reasoning\n**Reasoning**: {reasoning}\n**Chosen Action**: {tool_name}\n**Confidence**: {confidence}\n**Plan Status**: {plan_status}"
2681
+
2682
+ log_event_fn(_get_friendly_action_description(tool_name, requires_code, requires_image), MSG_TYPE.MSG_TYPE_STEP, meta={
2683
+ "tool_name": tool_name,
2684
+ "confidence": confidence,
2685
+ "reasoning": reasoning[:100] + "..." if len(reasoning) > 100 else reasoning
2686
+ })
2687
+
2688
+ # Handle plan revision
2689
+ if plan_status == "needs_revision" and tool_name != "local_tools::revise_plan":
2690
+ log_event_fn("🔄 AI indicates plan needs revision", MSG_TYPE.MSG_TYPE_INFO)
2691
+ tool_name = "local_tools::revise_plan" # Force plan revision
2692
+
2693
+ # Handle final answer
2694
+ if tool_name == "local_tools::final_answer":
2695
+ log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
2696
+ break
2697
+
2698
+ # Handle clarification request
2699
+ if tool_name == "local_tools::request_clarification":
2700
+ clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
2701
+
2702
+ CURRENT ANALYSIS:
2703
+ {current_scratchpad}
2704
+
2705
+ Generate a clear, specific question that will help you proceed effectively:"""
2706
+
2707
+ question = self.generate_text(clarification_prompt, temperature=0.3)
2708
+ question = self.remove_thinking_blocks(question)
2709
+
2710
+ log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
2711
+ return {
2712
+ "final_answer": question,
2713
+ "clarification_required": True,
2714
+ "final_scratchpad": current_scratchpad,
2715
+ "tool_calls": tool_calls_this_turn,
2716
+ "sources": sources_this_turn,
2717
+ "error": None,
2718
+ "decision_history": decision_history
2719
+ }
2720
+
2721
+ # Handle final answer
2722
+ if tool_name == "local_tools::final_answer":
2723
+ log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
2724
+ break
2725
+
2726
+ # Handle clarification request
2727
+ if tool_name == "local_tools::request_clarification":
2728
+ clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
2729
+
2730
+ CURRENT ANALYSIS:
2731
+ {current_scratchpad}
2732
+
2733
+ Generate a clear, specific question that will help you proceed effectively:"""
2734
+
2735
+ question = self.generate_text(clarification_prompt, temperature=0.3)
2736
+ question = self.remove_thinking_blocks(question)
2737
+
2738
+ log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
2739
+ return {
2740
+ "final_answer": question,
2741
+ "clarification_required": True,
2742
+ "final_scratchpad": current_scratchpad,
2743
+ "tool_calls": tool_calls_this_turn,
2744
+ "sources": sources_this_turn,
2745
+ "error": None,
2746
+ "decision_history": decision_history
2747
+ }
2748
+
2749
+ # Handle plan revision
2750
+ if tool_name == "local_tools::revise_plan":
2751
+ plan_revision_count += 1
2752
+ revision_id = log_event_fn(f"📝 Revising execution plan (revision #{plan_revision_count})", MSG_TYPE.MSG_TYPE_STEP_START)
2753
+
2754
+ try:
2755
+ revision_prompt = f"""Based on your current analysis and discoveries, create an updated execution plan.
2756
+
2757
+ ORIGINAL REQUEST: "{original_user_prompt}"
2758
+ CURRENT ANALYSIS:
2759
+ {current_scratchpad}
2760
+
2761
+ REASON FOR REVISION: {reasoning}
2762
+
2763
+ Create a new plan that reflects your current understanding. Consider:
2764
+ 1. What have you already accomplished?
2765
+ 2. What new information have you discovered?
2766
+ 3. What steps are still needed?
2767
+ 4. How can you be more efficient?
2768
+
2769
+ Provide your revision as JSON:
2770
+ {{
2771
+ "revision_reason": "Clear explanation of why the plan needed to change",
2772
+ "new_plan": [
2773
+ {{"step": 1, "description": "First revised step", "status": "pending"}},
2774
+ {{"step": 2, "description": "Second revised step", "status": "pending"}}
2775
+ ],
2776
+ "confidence": 0.8
2777
+ }}"""
2778
+
2779
+ revision_data = self.generate_structured_content(
2780
+ prompt=revision_prompt,
2781
+ schema={
2782
+ "revision_reason": "string",
2783
+ "new_plan": "array",
2784
+ "confidence": "number"
2785
+ },
2786
+ temperature=0.3,
2787
+ **llm_generation_kwargs
2788
+ )
2789
+
2790
+ if revision_data and revision_data.get("new_plan"):
2791
+ # Update the plan
2792
+ current_plan_version += 1
2793
+ new_tasks = []
2794
+ for task_data in revision_data["new_plan"]:
2795
+ task = TaskDecomposition() # Assuming this class exists
2796
+ task.description = task_data.get("description", "Undefined step")
2797
+ task.status = TaskStatus.PENDING # Reset all to pending
2798
+ new_tasks.append(task)
2799
+
2800
+ execution_plan.tasks = new_tasks
2801
+ current_task_index = 0 # Reset to beginning
2802
+
2803
+ # Update scratchpad with new plan
2804
+ current_scratchpad += f"\n\n### Updated Plan (Version {current_plan_version})\n"
2805
+ current_scratchpad += f"**Revision Reason**: {revision_data.get('revision_reason', 'Plan needed updating')}\n"
2806
+ current_scratchpad += f"**New Tasks**:\n"
2807
+ for i, task in enumerate(execution_plan.tasks):
2808
+ current_scratchpad += f" {i+1}. {task.description}\n"
2809
+
2810
+ log_event_fn(f"✅ Plan revised with {len(execution_plan.tasks)} updated tasks", MSG_TYPE.MSG_TYPE_STEP_END, event_id=revision_id, meta={
2811
+ "plan_version": current_plan_version,
2812
+ "new_task_count": len(execution_plan.tasks),
2813
+ "revision_reason": revision_data.get("revision_reason", "")
2814
+ })
2815
+
2816
+ # Continue with the new plan
2817
+ continue
2818
+ else:
2819
+ raise ValueError("Failed to generate valid plan revision")
2820
+
2821
+ except Exception as e:
2822
+ log_event_fn(f"Plan revision failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=revision_id)
2823
+ current_scratchpad += f"\n**Plan Revision Failed**: {str(e)}\nContinuing with original plan."
2824
+
2825
+ # Prepare parameters for tool execution
2826
+ param_assets = {}
2827
+ if requires_code:
2828
+ log_event_fn("💻 Generating code for task", MSG_TYPE.MSG_TYPE_INFO)
2829
+ code_prompt = f"""Generate the specific code needed for the current step.
2830
+
2831
+ CURRENT CONTEXT:
2832
+ {current_scratchpad}
2833
+
2834
+ CURRENT TASK: {tool_name}
2835
+ USER REQUEST: "{original_user_prompt}"
2836
+
2837
+ Generate clean, functional code that addresses the specific requirements. Focus on:
2838
+ 1. Solving the immediate problem
2839
+ 2. Being clear and readable
2840
+ 3. Including necessary imports and dependencies
2841
+ 4. Adding helpful comments where appropriate
2842
+
2843
+ CODE:"""
2844
+
2845
+ code_content = self.generate_code(prompt=code_prompt, **llm_generation_kwargs)
2846
+ code_uuid = f"code_asset_{uuid.uuid4()}"
2847
+ asset_store[code_uuid] = {"type": "code", "content": code_content}
2848
+ param_assets['code_asset_id'] = code_uuid
2849
+ log_event_fn(f"Code asset created: {code_uuid[:8]}...", MSG_TYPE.MSG_TYPE_INFO)
2850
+
2851
+ if requires_image:
2852
+ image_assets = [asset_id for asset_id, asset in asset_store.items() if asset['type'] == 'image' and asset.get('source') == 'user']
2853
+ if image_assets:
2854
+ param_assets['image_asset_id'] = image_assets[0]
2855
+ log_event_fn(f"Using image asset: {image_assets[0][:8]}...", MSG_TYPE.MSG_TYPE_INFO)
2856
+ else:
2857
+ log_event_fn("⚠️ Image required but none available", MSG_TYPE.MSG_TYPE_WARNING)
2858
+
2859
+ # Enhanced parameter generation
2860
+ param_prompt = f"""Generate the optimal parameters for this tool execution.
2861
+
2862
+ TOOL: {tool_name}
2863
+ CURRENT CONTEXT: {current_scratchpad}
2864
+ CURRENT REASONING: {reasoning}
2865
+ AVAILABLE ASSETS: {json.dumps(param_assets) if param_assets else "None"}
2866
+
2867
+ Based on your analysis and the current step you're working on, provide the most appropriate parameters.
2868
+ Be specific and purposeful in your parameter choices.
2869
+
2870
+ Output format: {{"tool_params": {{...}}}}"""
2871
+
2872
+ log_prompt_fn(f"Parameter Generation Step {i+1}", param_prompt)
2873
+ param_data = self.generate_structured_content(
2874
+ prompt=param_prompt,
2875
+ schema={"tool_params": "object"},
2876
+ temperature=decision_temperature,
2877
+ **llm_generation_kwargs
2878
+ )
2879
+ tool_params = param_data.get("tool_params", {}) if param_data else {}
2880
+
2881
+ current_scratchpad += f"\n**Parameters Generated**: {json.dumps(tool_params, indent=2)}"
2882
+
2883
+ # Hydrate parameters with assets
2884
+ def _hydrate(data: Any, store: Dict) -> Any:
2885
+ if isinstance(data, dict): return {k: _hydrate(v, store) for k, v in data.items()}
2886
+ if isinstance(data, list): return [_hydrate(item, store) for item in data]
2887
+ if isinstance(data, str) and "asset_" in data and data in store: return store[data].get("content", data)
2888
+ return data
2889
+
2890
+ hydrated_params = _hydrate(tool_params, asset_store)
2891
+
2892
+ # Execute the tool with detailed logging
2893
+ start_time = time.time()
2894
+ tool_result = {"status": "failure", "error": f"Tool '{tool_name}' failed to execute."}
2895
+
2896
+ try:
2897
+ if tool_name in rag_registry:
2898
+ query = hydrated_params.get("query", "")
2899
+ if not query:
2900
+ # Fall back to using reasoning as query
2901
+ query = reasoning[:200] + "..." if len(reasoning) > 200 else reasoning
2902
+
2903
+ log_event_fn(f"🔍 Searching knowledge base with query: '{query[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
2904
+
2905
+ top_k = rag_tool_specs[tool_name]["default_top_k"]
2906
+ min_sim = rag_tool_specs[tool_name]["default_min_sim"]
2907
+
2908
+ raw_results = rag_registry[tool_name](query=query, rag_top_k=top_k)
2909
+ raw_iter = raw_results["results"] if isinstance(raw_results, dict) and "results" in raw_results else raw_results
2910
+
2911
+ docs = []
2912
+ for d in raw_iter or []:
2913
+ doc_data = {
2914
+ "text": d.get("text", str(d)),
2915
+ "score": d.get("score", 0) * 100,
2916
+ "metadata": d.get("metadata", {})
2917
+ }
2918
+ docs.append(doc_data)
2919
+
2920
+ kept = [x for x in docs if x['score'] >= min_sim]
2921
+ tool_result = {
2922
+ "status": "success",
2923
+ "results": kept,
2924
+ "total_found": len(docs),
2925
+ "kept_after_filtering": len(kept),
2926
+ "query_used": query
2927
+ }
2928
+
2929
+ sources_this_turn.extend([{
2930
+ "source": tool_name,
2931
+ "metadata": x["metadata"],
2932
+ "score": x["score"]
2933
+ } for x in kept])
2934
+
2935
+ log_event_fn(f"📚 Retrieved {len(kept)} relevant documents (from {len(docs)} total)", MSG_TYPE.MSG_TYPE_INFO)
2936
+
2937
+ elif hasattr(self, "mcp") and "local_tools" not in tool_name:
2938
+ log_event_fn(f"🔧 Executing MCP tool: {tool_name}", MSG_TYPE.MSG_TYPE_TOOL_CALL, meta={
2939
+ "tool_name": tool_name,
2940
+ "params": {k: str(v)[:100] for k, v in hydrated_params.items()} # Truncate for logging
2941
+ })
2942
+
2943
+ tool_result = self.mcp.execute_tool(tool_name, hydrated_params, lollms_client_instance=self)
2944
+
2945
+ log_event_fn(f"Tool execution completed", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, meta={
2946
+ "result_status": tool_result.get("status", "unknown"),
2947
+ "has_error": "error" in tool_result
2948
+ })
2949
+
2950
+ elif tool_name == "local_tools::generate_image" and hasattr(self, "tti"):
2951
+ image_prompt = hydrated_params.get("prompt", "")
2952
+ log_event_fn(f"🎨 Generating image with prompt: '{image_prompt[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
2953
+
2954
+ # This would call your text-to-image functionality
2955
+ image_result = self.tti.generate_image(image_prompt) # Assuming this method exists
2956
+ if image_result:
2957
+ image_uuid = f"generated_image_{uuid.uuid4()}"
2958
+ asset_store[image_uuid] = {"type": "image", "content": image_result, "source": "generated"}
2959
+ tool_result = {"status": "success", "image_id": image_uuid, "prompt_used": image_prompt}
2960
+ else:
2961
+ tool_result = {"status": "failure", "error": "Image generation failed"}
2962
+
2963
+ else:
2964
+ tool_result = {"status": "failure", "error": f"Tool '{tool_name}' is not available or supported in this context."}
2965
+
2966
+ except Exception as e:
2967
+ error_msg = f"Exception during '{tool_name}' execution: {str(e)}"
2968
+ log_event_fn(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
2969
+ tool_result = {"status": "failure", "error": error_msg}
2970
+
2971
+ response_time = time.time() - start_time
2972
+ success = tool_result.get("status") == "success"
2973
+
2974
+ # Record performance
2975
+ performance_tracker.record_tool_usage(tool_name, success, confidence, response_time, tool_result.get("error"))
2976
+
2977
+ # Update task status
2978
+ if success and current_task_index < len(execution_plan.tasks):
2979
+ execution_plan.tasks[current_task_index].status = TaskStatus.COMPLETED
2980
+ completed_tasks.add(current_task_index)
2981
+ current_task_index += 1
2982
+
2983
+ # Enhanced observation logging
2984
+ observation_text = json.dumps(tool_result, indent=2)
2985
+ if len(observation_text) > 1000:
2986
+ # Truncate very long results for scratchpad
2987
+ truncated_result = {k: (str(v)[:200] + "..." if len(str(v)) > 200 else v) for k, v in tool_result.items()}
2988
+ observation_text = json.dumps(truncated_result, indent=2)
2989
+
2990
+ current_scratchpad += f"\n\n### Step {i+1}: Execution & Observation\n"
2991
+ current_scratchpad += f"**Tool Used**: {tool_name}\n"
2992
+ current_scratchpad += f"**Success**: {success}\n"
2993
+ current_scratchpad += f"**Response Time**: {response_time:.2f}s\n"
2994
+ current_scratchpad += f"**Result**:\n```json\n{observation_text}\n```"
2995
+
2996
+ # Track tool call
2997
+ tool_calls_this_turn.append({
2998
+ "name": tool_name,
2999
+ "params": tool_params,
3000
+ "result": tool_result,
3001
+ "response_time": response_time,
3002
+ "confidence": confidence,
3003
+ "reasoning": reasoning
3004
+ })
3005
+
3006
+ if success:
3007
+ log_event_fn(f"✅ Step {i+1} completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3008
+ "tool_name": tool_name,
3009
+ "response_time": response_time,
3010
+ "confidence": confidence
3011
+ })
3012
+ else:
3013
+ error_detail = tool_result.get("error", "No error detail provided.")
3014
+ log_event_fn(f"⚠️ Step {i+1} completed with issues: {error_detail}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3015
+ "tool_name": tool_name,
3016
+ "error": error_detail,
3017
+ "confidence": confidence
3018
+ })
3019
+
3020
+ # Add failure handling to scratchpad
3021
+ current_scratchpad += f"\n**Failure Analysis**: {error_detail}"
3022
+ current_scratchpad += f"\n**Next Steps**: Consider alternative approaches or tools"
3023
+
3024
+ # Log current progress
3025
+ completed_count = len(completed_tasks)
3026
+ total_tasks = len(execution_plan.tasks)
3027
+ if total_tasks > 0:
3028
+ progress = (completed_count / total_tasks) * 100
3029
+ log_event_fn(f"Progress: {completed_count}/{total_tasks} tasks completed ({progress:.1f}%)", MSG_TYPE.MSG_TYPE_STEP_PROGRESS, meta={"progress": progress})
3030
+
3031
+ # Check if all tasks are completed
3032
+ if completed_count >= total_tasks:
3033
+ log_event_fn("🎯 All planned tasks completed", MSG_TYPE.MSG_TYPE_INFO)
3034
+ break
3035
+
3036
+ except Exception as ex:
3037
+ log_event_fn(f"💥 Unexpected error in reasoning step {i+1}: {str(ex)}", MSG_TYPE.MSG_TYPE_ERROR, event_id=reasoning_step_id)
3038
+ trace_exception(ex)
3039
+
3040
+ # Add error to scratchpad for context
3041
+ current_scratchpad += f"\n\n### Step {i+1}: Unexpected Error\n**Error**: {str(ex)}\n**Recovery**: Continuing with adjusted approach"
3042
+
3043
+ log_event_fn("🔄 Recovering and continuing with next step", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
3044
+
3045
+ # Enhanced self-reflection
3046
+ if enable_self_reflection and len(tool_calls_this_turn) > 0:
3047
+ reflection_id = log_event_fn("🤔 Conducting comprehensive self-assessment...", MSG_TYPE.MSG_TYPE_STEP_START)
3048
+ try:
3049
+ reflection_prompt = f"""Conduct a thorough review of your work and assess the quality of your response to the user's request.
3050
+
3051
+ ORIGINAL REQUEST: "{original_user_prompt}"
3052
+ TOOLS USED: {len(tool_calls_this_turn)}
3053
+ PLAN REVISIONS: {plan_revision_count}
3054
+
3055
+ COMPLETE ANALYSIS:
3056
+ {current_scratchpad}
3057
+
3058
+ Evaluate your performance on multiple dimensions:
3059
+
3060
+ 1. **Goal Achievement**: Did you fully address the user's request?
3061
+ 2. **Process Efficiency**: Was your approach optimal given the available tools?
3062
+ 3. **Information Quality**: Is the information you gathered accurate and relevant?
3063
+ 4. **Decision Making**: Were your tool choices and parameters appropriate?
3064
+ 5. **Adaptability**: How well did you handle unexpected results or plan changes?
3065
+
3066
+ Provide your assessment as JSON:
3067
+ {{
3068
+ "goal_achieved": true,
3069
+ "effectiveness_score": 0.85,
3070
+ "process_efficiency": 0.8,
3071
+ "information_quality": 0.9,
3072
+ "decision_making": 0.85,
3073
+ "adaptability": 0.7,
3074
+ "overall_confidence": 0.82,
3075
+ "strengths": ["Clear reasoning", "Good tool selection"],
3076
+ "areas_for_improvement": ["Could have been more efficient"],
3077
+ "summary": "Successfully completed the user's request with high quality results",
3078
+ "key_insights": ["Discovered that X was more important than initially thought"]
3079
+ }}"""
3080
+
3081
+ reflection_data = self.generate_structured_content(
3082
+ prompt=reflection_prompt,
3083
+ schema={
3084
+ "goal_achieved": "boolean",
3085
+ "effectiveness_score": "number",
3086
+ "process_efficiency": "number",
3087
+ "information_quality": "number",
3088
+ "decision_making": "number",
3089
+ "adaptability": "number",
3090
+ "overall_confidence": "number",
3091
+ "strengths": "array",
3092
+ "areas_for_improvement": "array",
3093
+ "summary": "string",
3094
+ "key_insights": "array"
3095
+ },
3096
+ temperature=0.3,
3097
+ **llm_generation_kwargs
3098
+ )
3099
+
3100
+ if reflection_data:
3101
+ current_scratchpad += f"\n\n### Comprehensive Self-Assessment\n"
3102
+ current_scratchpad += f"**Goal Achieved**: {reflection_data.get('goal_achieved', False)}\n"
3103
+ current_scratchpad += f"**Overall Confidence**: {reflection_data.get('overall_confidence', 0.5):.2f}\n"
3104
+ current_scratchpad += f"**Effectiveness Score**: {reflection_data.get('effectiveness_score', 0.5):.2f}\n"
3105
+ current_scratchpad += f"**Key Strengths**: {', '.join(reflection_data.get('strengths', []))}\n"
3106
+ current_scratchpad += f"**Improvement Areas**: {', '.join(reflection_data.get('areas_for_improvement', []))}\n"
3107
+ current_scratchpad += f"**Summary**: {reflection_data.get('summary', '')}\n"
3108
+
3109
+ log_event_fn(f"✅ Self-assessment completed", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reflection_id, meta={
3110
+ "overall_confidence": reflection_data.get('overall_confidence', 0.5),
3111
+ "goal_achieved": reflection_data.get('goal_achieved', False),
3112
+ "effectiveness_score": reflection_data.get('effectiveness_score', 0.5)
3113
+ })
3114
+ else:
3115
+ log_event_fn("Self-assessment data generation failed", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3116
+
3117
+ except Exception as e:
3118
+ log_event_fn(f"Self-assessment failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3119
+
3120
+ # Enhanced final synthesis
3121
+ synthesis_id = log_event_fn("📝 Synthesizing comprehensive final response...", MSG_TYPE.MSG_TYPE_STEP_START)
3122
+
3123
+ final_answer_prompt = f"""Create a comprehensive, well-structured final response that fully addresses the user's request.
3124
+
3125
+ ORIGINAL REQUEST: "{original_user_prompt}"
3126
+ CONTEXT: {context or "No additional context"}
3127
+
3128
+ COMPLETE ANALYSIS AND WORK:
3129
+ {current_scratchpad}
3130
+
3131
+ GUIDELINES for your response:
3132
+ 1. **Be Complete**: Address all aspects of the user's request
3133
+ 2. **Be Clear**: Organize your response logically and use clear language
3134
+ 3. **Be Helpful**: Provide actionable information and insights
3135
+ 4. **Be Honest**: If there were limitations or uncertainties, mention them appropriately
3136
+ 5. **Be Concise**: While being thorough, avoid unnecessary verbosity
3137
+ 6. **Cite Sources**: If you used research tools, reference the information appropriately
3138
+
3139
+ Your response should feel natural and conversational while being informative and valuable.
3140
+
3141
+ FINAL RESPONSE:"""
3142
+
3143
+ log_prompt_fn("Final Synthesis Prompt", final_answer_prompt)
3144
+
3145
+ final_answer_text = self.generate_text(
3146
+ prompt=final_answer_prompt,
3147
+ system_prompt=system_prompt,
3148
+ stream=streaming_callback is not None,
3149
+ streaming_callback=streaming_callback,
3150
+ temperature=final_answer_temperature,
3151
+ **llm_generation_kwargs
3152
+ )
3153
+
3154
+ if isinstance(final_answer_text, dict) and "error" in final_answer_text:
3155
+ log_event_fn(f"Final synthesis failed: {final_answer_text['error']}", MSG_TYPE.MSG_TYPE_ERROR, event_id=synthesis_id)
3156
+ return {
3157
+ "final_answer": "I encountered an issue while preparing my final response. Please let me know if you'd like me to try again.",
3158
+ "error": final_answer_text["error"],
3159
+ "final_scratchpad": current_scratchpad,
3160
+ "tool_calls": tool_calls_this_turn,
3161
+ "sources": sources_this_turn,
3162
+ "decision_history": decision_history
3163
+ }
3164
+
3165
+ final_answer = self.remove_thinking_blocks(final_answer_text)
3166
+
3167
+ # Calculate overall performance metrics
3168
+ overall_confidence = sum(call.get('confidence', 0.5) for call in tool_calls_this_turn) / max(len(tool_calls_this_turn), 1)
3169
+ successful_calls = sum(1 for call in tool_calls_this_turn if call.get('result', {}).get('status') == 'success')
3170
+ success_rate = successful_calls / max(len(tool_calls_this_turn), 1)
3171
+
3172
+ log_event_fn("✅ Comprehensive response ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id, meta={
3173
+ "final_answer_length": len(final_answer),
3174
+ "total_tools_used": len(tool_calls_this_turn),
3175
+ "success_rate": success_rate,
3176
+ "overall_confidence": overall_confidence
3177
+ })
3178
+
3179
+ return {
3180
+ "final_answer": final_answer,
3181
+ "final_scratchpad": current_scratchpad,
3182
+ "tool_calls": tool_calls_this_turn,
3183
+ "sources": sources_this_turn,
3184
+ "decision_history": decision_history,
3185
+ "performance_stats": {
3186
+ "total_steps": len(tool_calls_this_turn),
3187
+ "successful_steps": successful_calls,
3188
+ "success_rate": success_rate,
3189
+ "average_confidence": overall_confidence,
3190
+ "plan_revisions": plan_revision_count,
3191
+ "total_reasoning_steps": len(decision_history)
3192
+ },
3193
+ "plan_evolution": {
3194
+ "initial_tasks": len(execution_plan.tasks),
3195
+ "final_version": current_plan_version,
3196
+ "total_revisions": plan_revision_count
3197
+ },
3198
+ "clarification_required": False,
3199
+ "overall_confidence": overall_confidence,
3200
+ "error": None
3201
+ }
3202
+
3203
+
3204
+ def _execute_complex_reasoning_loop(
3205
+ self, prompt, context, system_prompt, reasoning_system_prompt, images,
3206
+ max_reasoning_steps, decision_temperature, final_answer_temperature,
3207
+ streaming_callback, debug, enable_self_reflection, all_visible_tools,
3208
+ rag_registry, rag_tool_specs, log_event_fn, log_prompt_fn, max_scratchpad_size, **llm_generation_kwargs
3209
+ ) -> Dict[str, Any]:
3210
+
3211
+ planner, memory_manager, performance_tracker = TaskPlanner(self), MemoryManager(), ToolPerformanceTracker()
3212
+
3213
+ def _get_friendly_action_description(tool_name, requires_code, requires_image):
3214
+ descriptions = {
3215
+ "local_tools::final_answer": "📋 Preparing final answer",
3216
+ "local_tools::request_clarification": "❓ Requesting clarification",
3217
+ "local_tools::generate_image": "🎨 Creating image",
3218
+ "local_tools::revise_plan": "📝 Revising execution plan"
3219
+ }
3220
+ if tool_name in descriptions:
3221
+ return descriptions[tool_name]
3222
+ if "research::" in tool_name:
3223
+ return f"🔍 Searching {tool_name.split('::')[-1]} knowledge base"
3224
+ if requires_code:
3225
+ return "💻 Processing code"
3226
+ if requires_image:
3227
+ return "🖼️ Analyzing images"
3228
+ return f"🔧 Using {tool_name.replace('_', ' ').replace('::', ' - ').title()}"
3229
+
3230
+ def _compress_scratchpad_intelligently(scratchpad: str, original_request: str, target_size: int) -> str:
3231
+ """Enhanced scratchpad compression that preserves key decisions and recent context"""
3232
+ if len(scratchpad) <= target_size:
3233
+ return scratchpad
3234
+
3235
+ log_event_fn("📝 Compressing scratchpad to maintain focus...", MSG_TYPE.MSG_TYPE_INFO)
3236
+
3237
+ # Extract key components
3238
+ lines = scratchpad.split('\n')
3239
+ plan_section = []
3240
+ decisions = []
3241
+ recent_observations = []
3242
+
3243
+ current_section = None
3244
+ for i, line in enumerate(lines):
3245
+ if "### Execution Plan" in line or "### Updated Plan" in line:
3246
+ current_section = "plan"
3247
+ elif "### Step" in line and ("Thought" in line or "Decision" in line):
3248
+ current_section = "decision"
3249
+ elif "### Step" in line and "Observation" in line:
3250
+ current_section = "observation"
3251
+ elif line.startswith("###"):
3252
+ current_section = None
3253
+
3254
+ if current_section == "plan" and line.strip():
3255
+ plan_section.append(line)
3256
+ elif current_section == "decision" and line.strip():
3257
+ decisions.append((i, line))
3258
+ elif current_section == "observation" and line.strip():
3259
+ recent_observations.append((i, line))
3260
+
3261
+ # Keep most recent items and important decisions
3262
+ recent_decisions = decisions[-3:] if len(decisions) > 3 else decisions
3263
+ recent_obs = recent_observations[-5:] if len(recent_observations) > 5 else recent_observations
3264
+
3265
+ compressed_parts = [
3266
+ f"### Original Request\n{original_request}",
3267
+ f"### Current Plan\n" + '\n'.join(plan_section[-10:]),
3268
+ f"### Recent Key Decisions"
3269
+ ]
3270
+
3271
+ for _, decision in recent_decisions:
3272
+ compressed_parts.append(decision)
3273
+
3274
+ compressed_parts.append("### Recent Observations")
3275
+ for _, obs in recent_obs:
3276
+ compressed_parts.append(obs)
3277
+
3278
+ compressed = '\n'.join(compressed_parts)
3279
+ if len(compressed) > target_size:
3280
+ # Final trim if still too long
3281
+ compressed = compressed[:target_size-100] + "\n...[content compressed for focus]"
3282
+
3283
+ return compressed
3284
+
3285
+ original_user_prompt, tool_calls_this_turn, sources_this_turn = prompt, [], []
3286
+ asset_store: Dict[str, Dict] = {}
3287
+ decision_history = [] # Track all decisions made
3288
+
3289
+ # Enhanced planning phase
3290
+ planning_step_id = log_event_fn("📋 Creating adaptive execution plan...", MSG_TYPE.MSG_TYPE_STEP_START)
3291
+ execution_plan = planner.decompose_task(original_user_prompt, context or "")
3292
+ current_plan_version = 1
3293
+
3294
+ log_event_fn(f"Initial plan created with {len(execution_plan.tasks)} tasks", MSG_TYPE.MSG_TYPE_INFO, meta={
3295
+ "plan_version": current_plan_version,
3296
+ "total_tasks": len(execution_plan.tasks),
3297
+ "estimated_complexity": "medium" if len(execution_plan.tasks) <= 5 else "high"
3298
+ })
3299
+
3300
+ for i, task in enumerate(execution_plan.tasks):
3301
+ log_event_fn(f"Task {i+1}: {task.description}", MSG_TYPE.MSG_TYPE_INFO)
3302
+
3303
+ log_event_fn("✅ Adaptive plan ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=planning_step_id)
3304
+
3305
+ # Enhanced initial state
3306
+ initial_state_parts = [
3307
+ f"### Original User Request\n{original_user_prompt}",
3308
+ f"### Context\n{context or 'No additional context provided'}",
3309
+ f"### Execution Plan (Version {current_plan_version})\n- Total tasks: {len(execution_plan.tasks)}",
3310
+ f"- Estimated complexity: {'High' if len(execution_plan.tasks) > 5 else 'Medium'}"
3311
+ ]
3312
+
3313
+ for i, task in enumerate(execution_plan.tasks):
3314
+ initial_state_parts.append(f" {i+1}. {task.description} [Status: {task.status.value}]")
3315
+
3316
+ if images:
3317
+ initial_state_parts.append(f"### Provided Assets")
3318
+ for img_b64 in images:
3319
+ img_uuid = str(uuid.uuid4())
3320
+ asset_store[img_uuid] = {"type": "image", "content": img_b64, "source": "user"}
3321
+ initial_state_parts.append(f"- Image asset: {img_uuid}")
3322
+
3323
+ current_scratchpad = "\n".join(initial_state_parts)
3324
+ log_event_fn("Initial analysis complete", MSG_TYPE.MSG_TYPE_SCRATCHPAD, meta={"scratchpad_size": len(current_scratchpad)})
3325
+
3326
+ formatted_tools_list = "\n".join([f"**{t['name']}**: {t['description']}" for t in all_visible_tools])
3327
+ completed_tasks, current_task_index = set(), 0
3328
+ plan_revision_count = 0
3329
+
3330
+ # Main reasoning loop with enhanced decision tracking
3331
+ for i in range(max_reasoning_steps):
3332
+ current_task_desc = execution_plan.tasks[current_task_index].description if current_task_index < len(execution_plan.tasks) else "Finalizing analysis"
3333
+ step_desc = f"🤔 Step {i+1}: {current_task_desc}"
3334
+ reasoning_step_id = log_event_fn(step_desc, MSG_TYPE.MSG_TYPE_STEP_START)
3335
+
3336
+ try:
3337
+ # Enhanced scratchpad management
3338
+ if len(current_scratchpad) > max_scratchpad_size:
3339
+ log_event_fn(f"Scratchpad size ({len(current_scratchpad)}) exceeds limit, compressing...", MSG_TYPE.MSG_TYPE_INFO)
3340
+ current_scratchpad = _compress_scratchpad_intelligently(current_scratchpad, original_user_prompt, max_scratchpad_size // 2)
3341
+ log_event_fn(f"Scratchpad compressed to {len(current_scratchpad)} characters", MSG_TYPE.MSG_TYPE_INFO)
3342
+
3343
+ # Enhanced reasoning prompt with better decision tracking
3344
+ reasoning_prompt = f"""You are working on: "{original_user_prompt}"
3345
+
3346
+ === AVAILABLE ACTIONS ===
3347
+ {formatted_tools_list}
3348
+
3349
+ === YOUR COMPLETE ANALYSIS HISTORY ===
3350
+ {current_scratchpad}
3351
+ === END ANALYSIS HISTORY ===
3352
+
3353
+ === DECISION GUIDELINES ===
3354
+ 1. **Review your progress**: Look at what you've already discovered and accomplished
3355
+ 2. **Consider your current task**: Focus on the next logical step in your plan
3356
+ 3. **Remember your decisions**: If you previously decided to use a tool, follow through unless you have a good reason to change
3357
+ 4. **Be adaptive**: If you discover new information that changes the situation, consider revising your plan
3358
+ 5. **Stay focused**: Each action should clearly advance toward the final goal
3359
+
3360
+ === YOUR NEXT DECISION ===
3361
+ Choose the single most appropriate action to take right now. Consider:
3362
+ - What specific step are you currently working on?
3363
+ - What information do you still need?
3364
+ - What would be most helpful for the user?
3365
+
3366
+ Provide your decision as JSON:
3367
+ {{
3368
+ "reasoning": "Explain your current thinking and why this action makes sense now",
3369
+ "action": {{
3370
+ "tool_name": "exact_tool_name",
3371
+ "requires_code_input": false,
3372
+ "requires_image_input": false,
3373
+ "confidence": 0.8
3374
+ }},
3375
+ "plan_status": "on_track" // or "needs_revision" if you want to change the plan
3376
+ }}"""
3377
+
3378
+ log_prompt_fn(f"Reasoning Prompt Step {i+1}", reasoning_prompt)
3379
+ decision_data = self.generate_structured_content(
3380
+ prompt=reasoning_prompt,
3381
+ schema={
3382
+ "reasoning": "string",
3383
+ "action": "object",
3384
+ "plan_status": "string"
3385
+ },
3386
+ system_prompt=reasoning_system_prompt,
3387
+ temperature=decision_temperature,
3388
+ **llm_generation_kwargs
3389
+ )
3390
+
3391
+ if not (decision_data and isinstance(decision_data.get("action"), dict)):
3392
+ log_event_fn("⚠️ Invalid decision format from AI", MSG_TYPE.MSG_TYPE_WARNING, event_id=reasoning_step_id)
3393
+ current_scratchpad += f"\n\n### Step {i+1}: Decision Error\n- Error: AI produced invalid decision JSON\n- Continuing with fallback approach"
3394
+ continue
3395
+
3396
+ action = decision_data.get("action", {})
3397
+ reasoning = decision_data.get("reasoning", "No reasoning provided")
3398
+ plan_status = decision_data.get("plan_status", "on_track")
3399
+ tool_name = action.get("tool_name")
3400
+ requires_code = action.get("requires_code_input", False)
3401
+ requires_image = action.get("requires_image_input", False)
3402
+ confidence = action.get("confidence", 0.5)
3403
+
3404
+ # Track the decision
3405
+ decision_history.append({
3406
+ "step": i+1,
3407
+ "tool_name": tool_name,
3408
+ "reasoning": reasoning,
3409
+ "confidence": confidence,
3410
+ "plan_status": plan_status
3411
+ })
3412
+
3413
+ current_scratchpad += f"\n\n### Step {i+1}: Decision & Reasoning\n**Reasoning**: {reasoning}\n**Chosen Action**: {tool_name}\n**Confidence**: {confidence}\n**Plan Status**: {plan_status}"
3414
+
3415
+ log_event_fn(_get_friendly_action_description(tool_name, requires_code, requires_image), MSG_TYPE.MSG_TYPE_STEP, meta={
3416
+ "tool_name": tool_name,
3417
+ "confidence": confidence,
3418
+ "reasoning": reasoning[:100] + "..." if len(reasoning) > 100 else reasoning
3419
+ })
3420
+
3421
+ # Handle plan revision
3422
+ if plan_status == "needs_revision" and tool_name != "local_tools::revise_plan":
3423
+ log_event_fn("🔄 AI indicates plan needs revision", MSG_TYPE.MSG_TYPE_INFO)
3424
+ tool_name = "local_tools::revise_plan" # Force plan revision
3425
+
3426
+ # Handle final answer
3427
+ if tool_name == "local_tools::final_answer":
3428
+ log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
3429
+ break
3430
+
3431
+ # Handle clarification request
3432
+ if tool_name == "local_tools::request_clarification":
3433
+ clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
3434
+
3435
+ CURRENT ANALYSIS:
3436
+ {current_scratchpad}
3437
+
3438
+ Generate a clear, specific question that will help you proceed effectively:"""
3439
+
3440
+ question = self.generate_text(clarification_prompt, temperature=0.3)
3441
+ question = self.remove_thinking_blocks(question)
3442
+
3443
+ log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
3444
+ return {
3445
+ "final_answer": question,
3446
+ "clarification_required": True,
3447
+ "final_scratchpad": current_scratchpad,
3448
+ "tool_calls": tool_calls_this_turn,
3449
+ "sources": sources_this_turn,
3450
+ "error": None,
3451
+ "decision_history": decision_history
3452
+ }
3453
+
3454
+ # Handle plan revision
3455
+ if tool_name == "local_tools::revise_plan":
3456
+ plan_revision_count += 1
3457
+ revision_id = log_event_fn(f"📝 Revising execution plan (revision #{plan_revision_count})", MSG_TYPE.MSG_TYPE_STEP_START)
3458
+
3459
+ try:
3460
+ revision_prompt = f"""Based on your current analysis and discoveries, create an updated execution plan.
3461
+
3462
+ ORIGINAL REQUEST: "{original_user_prompt}"
3463
+ CURRENT ANALYSIS:
3464
+ {current_scratchpad}
3465
+
3466
+ REASON FOR REVISION: {reasoning}
3467
+
3468
+ Create a new plan that reflects your current understanding. Consider:
3469
+ 1. What have you already accomplished?
3470
+ 2. What new information have you discovered?
3471
+ 3. What steps are still needed?
3472
+ 4. How can you be more efficient?
3473
+
3474
+ Provide your revision as JSON:
3475
+ {{
3476
+ "revision_reason": "Clear explanation of why the plan needed to change",
3477
+ "new_plan": [
3478
+ {{"step": 1, "description": "First revised step", "status": "pending"}},
3479
+ {{"step": 2, "description": "Second revised step", "status": "pending"}}
3480
+ ],
3481
+ "confidence": 0.8
3482
+ }}"""
3483
+
3484
+ revision_data = self.generate_structured_content(
3485
+ prompt=revision_prompt,
3486
+ schema={
3487
+ "revision_reason": "string",
3488
+ "new_plan": "array",
3489
+ "confidence": "number"
3490
+ },
3491
+ temperature=0.3,
3492
+ **llm_generation_kwargs
3493
+ )
3494
+
3495
+ if revision_data and revision_data.get("new_plan"):
3496
+ # Update the plan
3497
+ current_plan_version += 1
3498
+ new_tasks = []
3499
+ for task_data in revision_data["new_plan"]:
3500
+ task = TaskDecomposition() # Assuming this class exists
3501
+ task.description = task_data.get("description", "Undefined step")
3502
+ task.status = TaskStatus.PENDING # Reset all to pending
3503
+ new_tasks.append(task)
3504
+
3505
+ execution_plan.tasks = new_tasks
3506
+ current_task_index = 0 # Reset to beginning
3507
+
3508
+ # Update scratchpad with new plan
3509
+ current_scratchpad += f"\n\n### Updated Plan (Version {current_plan_version})\n"
3510
+ current_scratchpad += f"**Revision Reason**: {revision_data.get('revision_reason', 'Plan needed updating')}\n"
3511
+ current_scratchpad += f"**New Tasks**:\n"
3512
+ for i, task in enumerate(execution_plan.tasks):
3513
+ current_scratchpad += f" {i+1}. {task.description}\n"
3514
+
3515
+ log_event_fn(f"✅ Plan revised with {len(execution_plan.tasks)} updated tasks", MSG_TYPE.MSG_TYPE_STEP_END, event_id=revision_id, meta={
3516
+ "plan_version": current_plan_version,
3517
+ "new_task_count": len(execution_plan.tasks),
3518
+ "revision_reason": revision_data.get("revision_reason", "")
3519
+ })
3520
+
3521
+ # Continue with the new plan
3522
+ continue
3523
+ else:
3524
+ raise ValueError("Failed to generate valid plan revision")
3525
+
3526
+ except Exception as e:
3527
+ log_event_fn(f"Plan revision failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=revision_id)
3528
+ current_scratchpad += f"\n**Plan Revision Failed**: {str(e)}\nContinuing with original plan."
3529
+
3530
+ # Prepare parameters for tool execution
3531
+ param_assets = {}
3532
+ if requires_code:
3533
+ log_event_fn("💻 Generating code for task", MSG_TYPE.MSG_TYPE_INFO)
3534
+ code_prompt = f"""Generate the specific code needed for the current step.
3535
+
3536
+ CURRENT CONTEXT:
3537
+ {current_scratchpad}
3538
+
3539
+ CURRENT TASK: {tool_name}
3540
+ USER REQUEST: "{original_user_prompt}"
3541
+
3542
+ Generate clean, functional code that addresses the specific requirements. Focus on:
3543
+ 1. Solving the immediate problem
3544
+ 2. Being clear and readable
3545
+ 3. Including necessary imports and dependencies
3546
+ 4. Adding helpful comments where appropriate
3547
+
3548
+ CODE:"""
3549
+
3550
+ code_content = self.generate_code(prompt=code_prompt, **llm_generation_kwargs)
3551
+ code_uuid = f"code_asset_{uuid.uuid4()}"
3552
+ asset_store[code_uuid] = {"type": "code", "content": code_content}
3553
+ param_assets['code_asset_id'] = code_uuid
3554
+ log_event_fn(f"Code asset created: {code_uuid[:8]}...", MSG_TYPE.MSG_TYPE_INFO)
3555
+
3556
+ if requires_image:
3557
+ image_assets = [asset_id for asset_id, asset in asset_store.items() if asset['type'] == 'image' and asset.get('source') == 'user']
3558
+ if image_assets:
3559
+ param_assets['image_asset_id'] = image_assets[0]
3560
+ log_event_fn(f"Using image asset: {image_assets[0][:8]}...", MSG_TYPE.MSG_TYPE_INFO)
3561
+ else:
3562
+ log_event_fn("⚠️ Image required but none available", MSG_TYPE.MSG_TYPE_WARNING)
3563
+
3564
+ # Enhanced parameter generation
3565
+ param_prompt = f"""Generate the optimal parameters for this tool execution.
3566
+
3567
+ TOOL: {tool_name}
3568
+ CURRENT CONTEXT: {current_scratchpad}
3569
+ CURRENT REASONING: {reasoning}
3570
+ AVAILABLE ASSETS: {json.dumps(param_assets) if param_assets else "None"}
3571
+
3572
+ Based on your analysis and the current step you're working on, provide the most appropriate parameters.
3573
+ Be specific and purposeful in your parameter choices.
3574
+
3575
+ Output format: {{"tool_params": {{...}}}}"""
3576
+
3577
+ log_prompt_fn(f"Parameter Generation Step {i+1}", param_prompt)
3578
+ param_data = self.generate_structured_content(
3579
+ prompt=param_prompt,
3580
+ schema={"tool_params": "object"},
3581
+ temperature=decision_temperature,
3582
+ **llm_generation_kwargs
3583
+ )
3584
+ tool_params = param_data.get("tool_params", {}) if param_data else {}
3585
+
3586
+ current_scratchpad += f"\n**Parameters Generated**: {json.dumps(tool_params, indent=2)}"
3587
+
3588
+ # Hydrate parameters with assets
3589
+ def _hydrate(data: Any, store: Dict) -> Any:
3590
+ if isinstance(data, dict): return {k: _hydrate(v, store) for k, v in data.items()}
3591
+ if isinstance(data, list): return [_hydrate(item, store) for item in data]
3592
+ if isinstance(data, str) and "asset_" in data and data in store: return store[data].get("content", data)
3593
+ return data
3594
+
3595
+ hydrated_params = _hydrate(tool_params, asset_store)
3596
+
3597
+ # Execute the tool with detailed logging
3598
+ start_time = time.time()
3599
+ tool_result = {"status": "failure", "error": f"Tool '{tool_name}' failed to execute."}
3600
+
3601
+ try:
3602
+ if tool_name in rag_registry:
3603
+ query = hydrated_params.get("query", "")
3604
+ if not query:
3605
+ # Fall back to using reasoning as query
3606
+ query = reasoning[:200] + "..." if len(reasoning) > 200 else reasoning
3607
+
3608
+ log_event_fn(f"🔍 Searching knowledge base with query: '{query[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
3609
+
3610
+ top_k = rag_tool_specs[tool_name]["default_top_k"]
3611
+ min_sim = rag_tool_specs[tool_name]["default_min_sim"]
3612
+
3613
+ raw_results = rag_registry[tool_name](query=query, rag_top_k=top_k)
3614
+ raw_iter = raw_results["results"] if isinstance(raw_results, dict) and "results" in raw_results else raw_results
3615
+
3616
+ docs = []
3617
+ for d in raw_iter or []:
3618
+ doc_data = {
3619
+ "text": d.get("text", str(d)),
3620
+ "score": d.get("score", 0) * 100,
3621
+ "metadata": d.get("metadata", {})
3622
+ }
3623
+ docs.append(doc_data)
3624
+
3625
+ kept = [x for x in docs if x['score'] >= min_sim]
3626
+ tool_result = {
3627
+ "status": "success",
3628
+ "results": kept,
3629
+ "total_found": len(docs),
3630
+ "kept_after_filtering": len(kept),
3631
+ "query_used": query
3632
+ }
3633
+
3634
+ sources_this_turn.extend([{
3635
+ "source": tool_name,
3636
+ "metadata": x["metadata"],
3637
+ "score": x["score"]
3638
+ } for x in kept])
3639
+
3640
+ log_event_fn(f"📚 Retrieved {len(kept)} relevant documents (from {len(docs)} total)", MSG_TYPE.MSG_TYPE_INFO)
3641
+
3642
+ elif hasattr(self, "mcp") and "local_tools" not in tool_name:
3643
+ log_event_fn(f"🔧 Executing MCP tool: {tool_name}", MSG_TYPE.MSG_TYPE_TOOL_CALL, meta={
3644
+ "tool_name": tool_name,
3645
+ "params": {k: str(v)[:100] for k, v in hydrated_params.items()} # Truncate for logging
3646
+ })
3647
+
3648
+ tool_result = self.mcp.execute_tool(tool_name, hydrated_params, lollms_client_instance=self)
3649
+
3650
+ log_event_fn(f"Tool execution completed", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, meta={
3651
+ "result_status": tool_result.get("status", "unknown"),
3652
+ "has_error": "error" in tool_result
3653
+ })
3654
+
3655
+ elif tool_name == "local_tools::generate_image" and hasattr(self, "tti"):
3656
+ image_prompt = hydrated_params.get("prompt", "")
3657
+ log_event_fn(f"🎨 Generating image with prompt: '{image_prompt[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
3658
+
3659
+ # This would call your text-to-image functionality
3660
+ image_result = self.tti.generate_image(image_prompt) # Assuming this method exists
3661
+ if image_result:
3662
+ image_uuid = f"generated_image_{uuid.uuid4()}"
3663
+ asset_store[image_uuid] = {"type": "image", "content": image_result, "source": "generated"}
3664
+ tool_result = {"status": "success", "image_id": image_uuid, "prompt_used": image_prompt}
3665
+ else:
3666
+ tool_result = {"status": "failure", "error": "Image generation failed"}
3667
+
3668
+ else:
3669
+ tool_result = {"status": "failure", "error": f"Tool '{tool_name}' is not available or supported in this context."}
3670
+
3671
+ except Exception as e:
3672
+ error_msg = f"Exception during '{tool_name}' execution: {str(e)}"
3673
+ log_event_fn(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
3674
+ tool_result = {"status": "failure", "error": error_msg}
3675
+
3676
+ response_time = time.time() - start_time
3677
+ success = tool_result.get("status") == "success"
3678
+
3679
+ # Record performance
3680
+ performance_tracker.record_tool_usage(tool_name, success, confidence, response_time, tool_result.get("error"))
3681
+
3682
+ # Update task status
3683
+ if success and current_task_index < len(execution_plan.tasks):
3684
+ execution_plan.tasks[current_task_index].status = TaskStatus.COMPLETED
3685
+ completed_tasks.add(current_task_index)
3686
+ current_task_index += 1
3687
+
3688
+ # Enhanced observation logging
3689
+ observation_text = json.dumps(tool_result, indent=2)
3690
+ if len(observation_text) > 1000:
3691
+ # Truncate very long results for scratchpad
3692
+ truncated_result = {k: (str(v)[:200] + "..." if len(str(v)) > 200 else v) for k, v in tool_result.items()}
3693
+ observation_text = json.dumps(truncated_result, indent=2)
3694
+
3695
+ current_scratchpad += f"\n\n### Step {i+1}: Execution & Observation\n"
3696
+ current_scratchpad += f"**Tool Used**: {tool_name}\n"
3697
+ current_scratchpad += f"**Success**: {success}\n"
3698
+ current_scratchpad += f"**Response Time**: {response_time:.2f}s\n"
3699
+ current_scratchpad += f"**Result**:\n```json\n{observation_text}\n```"
3700
+
3701
+ # Track tool call
3702
+ tool_calls_this_turn.append({
3703
+ "name": tool_name,
3704
+ "params": tool_params,
3705
+ "result": tool_result,
3706
+ "response_time": response_time,
3707
+ "confidence": confidence,
3708
+ "reasoning": reasoning
3709
+ })
3710
+
3711
+ if success:
3712
+ log_event_fn(f"✅ Step {i+1} completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3713
+ "tool_name": tool_name,
3714
+ "response_time": response_time,
3715
+ "confidence": confidence
3716
+ })
3717
+ else:
3718
+ error_detail = tool_result.get("error", "No error detail provided.")
3719
+ log_event_fn(f"⚠️ Step {i+1} completed with issues: {error_detail}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3720
+ "tool_name": tool_name,
3721
+ "error": error_detail,
3722
+ "confidence": confidence
3723
+ })
3724
+
3725
+ # Add failure handling to scratchpad
3726
+ current_scratchpad += f"\n**Failure Analysis**: {error_detail}"
3727
+ current_scratchpad += f"\n**Next Steps**: Consider alternative approaches or tools"
3728
+
3729
+ # Log current progress
3730
+ completed_count = len(completed_tasks)
3731
+ total_tasks = len(execution_plan.tasks)
3732
+ if total_tasks > 0:
3733
+ progress = (completed_count / total_tasks) * 100
3734
+ log_event_fn(f"Progress: {completed_count}/{total_tasks} tasks completed ({progress:.1f}%)", MSG_TYPE.MSG_TYPE_STEP_PROGRESS, meta={"progress": progress})
3735
+
3736
+ # Check if all tasks are completed
3737
+ if completed_count >= total_tasks:
3738
+ log_event_fn("🎯 All planned tasks completed", MSG_TYPE.MSG_TYPE_INFO)
3739
+ break
3740
+
3741
+ except Exception as ex:
3742
+ log_event_fn(f"💥 Unexpected error in reasoning step {i+1}: {str(ex)}", MSG_TYPE.MSG_TYPE_ERROR, event_id=reasoning_step_id)
3743
+ trace_exception(ex)
3744
+
3745
+ # Add error to scratchpad for context
3746
+ current_scratchpad += f"\n\n### Step {i+1}: Unexpected Error\n**Error**: {str(ex)}\n**Recovery**: Continuing with adjusted approach"
3747
+
3748
+ log_event_fn("🔄 Recovering and continuing with next step", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
3749
+
3750
+ # Enhanced self-reflection
3751
+ if enable_self_reflection and len(tool_calls_this_turn) > 0:
3752
+ reflection_id = log_event_fn("🤔 Conducting comprehensive self-assessment...", MSG_TYPE.MSG_TYPE_STEP_START)
3753
+ try:
3754
+ reflection_prompt = f"""Conduct a thorough review of your work and assess the quality of your response to the user's request.
3755
+
3756
+ ORIGINAL REQUEST: "{original_user_prompt}"
3757
+ TOOLS USED: {len(tool_calls_this_turn)}
3758
+ PLAN REVISIONS: {plan_revision_count}
3759
+
3760
+ COMPLETE ANALYSIS:
3761
+ {current_scratchpad}
3762
+
3763
+ Evaluate your performance on multiple dimensions:
3764
+
3765
+ 1. **Goal Achievement**: Did you fully address the user's request?
3766
+ 2. **Process Efficiency**: Was your approach optimal given the available tools?
3767
+ 3. **Information Quality**: Is the information you gathered accurate and relevant?
3768
+ 4. **Decision Making**: Were your tool choices and parameters appropriate?
3769
+ 5. **Adaptability**: How well did you handle unexpected results or plan changes?
3770
+
3771
+ Provide your assessment as JSON:
3772
+ {{
3773
+ "goal_achieved": true,
3774
+ "effectiveness_score": 0.85,
3775
+ "process_efficiency": 0.8,
3776
+ "information_quality": 0.9,
3777
+ "decision_making": 0.85,
3778
+ "adaptability": 0.7,
3779
+ "overall_confidence": 0.82,
3780
+ "strengths": ["Clear reasoning", "Good tool selection"],
3781
+ "areas_for_improvement": ["Could have been more efficient"],
3782
+ "summary": "Successfully completed the user's request with high quality results",
3783
+ "key_insights": ["Discovered that X was more important than initially thought"]
3784
+ }}"""
3785
+
3786
+ reflection_data = self.generate_structured_content(
3787
+ prompt=reflection_prompt,
3788
+ schema={
3789
+ "goal_achieved": "boolean",
3790
+ "effectiveness_score": "number",
3791
+ "process_efficiency": "number",
3792
+ "information_quality": "number",
3793
+ "decision_making": "number",
3794
+ "adaptability": "number",
3795
+ "overall_confidence": "number",
3796
+ "strengths": "array",
3797
+ "areas_for_improvement": "array",
3798
+ "summary": "string",
3799
+ "key_insights": "array"
3800
+ },
3801
+ temperature=0.3,
3802
+ **llm_generation_kwargs
3803
+ )
3804
+
3805
+ if reflection_data:
3806
+ current_scratchpad += f"\n\n### Comprehensive Self-Assessment\n"
3807
+ current_scratchpad += f"**Goal Achieved**: {reflection_data.get('goal_achieved', False)}\n"
3808
+ current_scratchpad += f"**Overall Confidence**: {reflection_data.get('overall_confidence', 0.5):.2f}\n"
3809
+ current_scratchpad += f"**Effectiveness Score**: {reflection_data.get('effectiveness_score', 0.5):.2f}\n"
3810
+ current_scratchpad += f"**Key Strengths**: {', '.join(reflection_data.get('strengths', []))}\n"
3811
+ current_scratchpad += f"**Improvement Areas**: {', '.join(reflection_data.get('areas_for_improvement', []))}\n"
3812
+ current_scratchpad += f"**Summary**: {reflection_data.get('summary', '')}\n"
3813
+
3814
+ log_event_fn(f"✅ Self-assessment completed", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reflection_id, meta={
3815
+ "overall_confidence": reflection_data.get('overall_confidence', 0.5),
3816
+ "goal_achieved": reflection_data.get('goal_achieved', False),
3817
+ "effectiveness_score": reflection_data.get('effectiveness_score', 0.5)
3818
+ })
3819
+ else:
3820
+ log_event_fn("Self-assessment data generation failed", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3821
+
3822
+ except Exception as e:
3823
+ log_event_fn(f"Self-assessment failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3824
+
3825
+ # Enhanced final synthesis
3826
+ synthesis_id = log_event_fn("📝 Synthesizing comprehensive final response...", MSG_TYPE.MSG_TYPE_STEP_START)
3827
+
3828
+ final_answer_prompt = f"""Create a comprehensive, well-structured final response that fully addresses the user's request.
3829
+
3830
+ ORIGINAL REQUEST: "{original_user_prompt}"
3831
+ CONTEXT: {context or "No additional context"}
3832
+
3833
+ COMPLETE ANALYSIS AND WORK:
3834
+ {current_scratchpad}
3835
+
3836
+ GUIDELINES for your response:
3837
+ 1. **Be Complete**: Address all aspects of the user's request
3838
+ 2. **Be Clear**: Organize your response logically and use clear language
3839
+ 3. **Be Helpful**: Provide actionable information and insights
3840
+ 4. **Be Honest**: If there were limitations or uncertainties, mention them appropriately
3841
+ 5. **Be Concise**: While being thorough, avoid unnecessary verbosity
3842
+ 6. **Cite Sources**: If you used research tools, reference the information appropriately
3843
+
3844
+ Your response should feel natural and conversational while being informative and valuable.
3845
+
3846
+ FINAL RESPONSE:"""
3847
+
3848
+ log_prompt_fn("Final Synthesis Prompt", final_answer_prompt)
3849
+
3850
+ final_answer_text = self.generate_text(
3851
+ prompt=final_answer_prompt,
3852
+ system_prompt=system_prompt,
3853
+ stream=streaming_callback is not None,
3854
+ streaming_callback=streaming_callback,
3855
+ temperature=final_answer_temperature,
3856
+ **llm_generation_kwargs
3857
+ )
3858
+
3859
+ if isinstance(final_answer_text, dict) and "error" in final_answer_text:
3860
+ log_event_fn(f"Final synthesis failed: {final_answer_text['error']}", MSG_TYPE.MSG_TYPE_ERROR, event_id=synthesis_id)
3861
+ return {
3862
+ "final_answer": "I encountered an issue while preparing my final response. Please let me know if you'd like me to try again.",
3863
+ "error": final_answer_text["error"],
3864
+ "final_scratchpad": current_scratchpad,
3865
+ "tool_calls": tool_calls_this_turn,
3866
+ "sources": sources_this_turn,
3867
+ "decision_history": decision_history
3868
+ }
3869
+
3870
+ final_answer = self.remove_thinking_blocks(final_answer_text)
3871
+
3872
+ # Calculate overall performance metrics
3873
+ overall_confidence = sum(call.get('confidence', 0.5) for call in tool_calls_this_turn) / max(len(tool_calls_this_turn), 1)
3874
+ successful_calls = sum(1 for call in tool_calls_this_turn if call.get('result', {}).get('status') == 'success')
3875
+ success_rate = successful_calls / max(len(tool_calls_this_turn), 1)
3876
+
3877
+ log_event_fn("✅ Comprehensive response ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id, meta={
3878
+ "final_answer_length": len(final_answer),
3879
+ "total_tools_used": len(tool_calls_this_turn),
3880
+ "success_rate": success_rate,
3881
+ "overall_confidence": overall_confidence
3882
+ })
3883
+
3884
+ return {
3885
+ "final_answer": final_answer,
3886
+ "final_scratchpad": current_scratchpad,
3887
+ "tool_calls": tool_calls_this_turn,
3888
+ "sources": sources_this_turn,
3889
+ "decision_history": decision_history,
3890
+ "performance_stats": {
3891
+ "total_steps": len(tool_calls_this_turn),
3892
+ "successful_steps": successful_calls,
3893
+ "success_rate": success_rate,
3894
+ "average_confidence": overall_confidence,
3895
+ "plan_revisions": plan_revision_count,
3896
+ "total_reasoning_steps": len(decision_history)
3897
+ },
3898
+ "plan_evolution": {
3899
+ "initial_tasks": len(execution_plan.tasks),
3900
+ "final_version": current_plan_version,
3901
+ "total_revisions": plan_revision_count
3902
+ },
3903
+ "clarification_required": False,
3904
+ "overall_confidence": overall_confidence,
3905
+ "error": None
1816
3906
  }
1817
3907
 
1818
3908