lollms-client 1.6.2__py3-none-any.whl → 1.6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (41) hide show
  1. lollms_client/__init__.py +1 -1
  2. lollms_client/llm_bindings/azure_openai/__init__.py +2 -2
  3. lollms_client/llm_bindings/claude/__init__.py +2 -2
  4. lollms_client/llm_bindings/gemini/__init__.py +2 -2
  5. lollms_client/llm_bindings/grok/__init__.py +2 -2
  6. lollms_client/llm_bindings/groq/__init__.py +2 -2
  7. lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +2 -2
  8. lollms_client/llm_bindings/litellm/__init__.py +1 -1
  9. lollms_client/llm_bindings/llamacpp/__init__.py +2 -2
  10. lollms_client/llm_bindings/lollms/__init__.py +1 -1
  11. lollms_client/llm_bindings/lollms_webui/__init__.py +1 -1
  12. lollms_client/llm_bindings/mistral/__init__.py +2 -2
  13. lollms_client/llm_bindings/novita_ai/__init__.py +2 -2
  14. lollms_client/llm_bindings/ollama/__init__.py +7 -4
  15. lollms_client/llm_bindings/open_router/__init__.py +2 -2
  16. lollms_client/llm_bindings/openai/__init__.py +1 -1
  17. lollms_client/llm_bindings/openllm/__init__.py +2 -2
  18. lollms_client/llm_bindings/openwebui/__init__.py +1 -1
  19. lollms_client/llm_bindings/perplexity/__init__.py +2 -2
  20. lollms_client/llm_bindings/pythonllamacpp/__init__.py +3 -3
  21. lollms_client/llm_bindings/tensor_rt/__init__.py +1 -1
  22. lollms_client/llm_bindings/transformers/__init__.py +4 -4
  23. lollms_client/llm_bindings/vllm/__init__.py +1 -1
  24. lollms_client/lollms_core.py +7 -1443
  25. lollms_client/lollms_llm_binding.py +1 -1
  26. lollms_client/lollms_tti_binding.py +1 -1
  27. lollms_client/tti_bindings/diffusers/__init__.py +320 -853
  28. lollms_client/tti_bindings/diffusers/server/main.py +882 -0
  29. lollms_client/tti_bindings/gemini/__init__.py +179 -239
  30. lollms_client/tti_bindings/leonardo_ai/__init__.py +1 -1
  31. lollms_client/tti_bindings/novita_ai/__init__.py +1 -1
  32. lollms_client/tti_bindings/stability_ai/__init__.py +1 -1
  33. lollms_client/tts_bindings/lollms/__init__.py +6 -1
  34. lollms_client/tts_bindings/piper_tts/__init__.py +1 -1
  35. lollms_client/tts_bindings/xtts/__init__.py +20 -14
  36. lollms_client/tts_bindings/xtts/server/main.py +17 -4
  37. {lollms_client-1.6.2.dist-info → lollms_client-1.6.5.dist-info}/METADATA +2 -2
  38. {lollms_client-1.6.2.dist-info → lollms_client-1.6.5.dist-info}/RECORD +41 -40
  39. {lollms_client-1.6.2.dist-info → lollms_client-1.6.5.dist-info}/WHEEL +0 -0
  40. {lollms_client-1.6.2.dist-info → lollms_client-1.6.5.dist-info}/licenses/LICENSE +0 -0
  41. {lollms_client-1.6.2.dist-info → lollms_client-1.6.5.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,6 @@
1
1
  # lollms_client/lollms_core.py
2
+ # author: ParisNeo
3
+ # description: LollmsClient definition file
2
4
  import requests
3
5
  from ascii_colors import ASCIIColors, trace_exception
4
6
  from lollms_client.lollms_types import MSG_TYPE, ELF_COMPLETION_FORMAT
@@ -273,8 +275,8 @@ class LollmsClient():
273
275
  raise ValueError(f"Failed to update LLM binding: {binding_name}. Available: {available}")
274
276
 
275
277
  def get_ctx_size(self, model_name:str|None=None):
276
- if self.llm:
277
- ctx_size = self.llm.get_ctx_size(model_name)
278
+ if self.llm and self.llm.model_name:
279
+ ctx_size = self.llm.get_ctx_size(model_name or self.llm.model_name)
278
280
  return ctx_size if ctx_size else self.llm.default_ctx_size
279
281
  else:
280
282
  return None
@@ -519,7 +521,7 @@ class LollmsClient():
519
521
  Union[str, dict]: Generated text or error dictionary if failed.
520
522
  """
521
523
  if self.llm:
522
-
524
+ images = [str(image) for image in images] if images else None
523
525
  ctx_size = ctx_size if ctx_size is not None else self.llm.default_ctx_size if self.llm.default_ctx_size else None
524
526
  if ctx_size is None:
525
527
  ctx_size = self.llm.get_ctx_size()
@@ -679,10 +681,10 @@ class LollmsClient():
679
681
  raise RuntimeError("LLM binding not initialized.")
680
682
 
681
683
 
682
- def listModels(self):
684
+ def list_models(self):
683
685
  """Lists models available to the current LLM binding."""
684
686
  if self.llm:
685
- return self.llm.listModels()
687
+ return self.llm.list_models()
686
688
  raise RuntimeError("LLM binding not initialized.")
687
689
 
688
690
  # --- Convenience Methods for Lollms LLM Binding Features ---
@@ -2410,1444 +2412,6 @@ GUIDELINES for your response:
2410
2412
 
2411
2413
  Your response should feel natural and conversational while being informative and valuable.
2412
2414
 
2413
- FINAL RESPONSE:"""
2414
-
2415
- log_prompt_fn("Final Synthesis Prompt", final_answer_prompt)
2416
-
2417
- final_answer_text = self.generate_text(
2418
- prompt=final_answer_prompt,
2419
- system_prompt=system_prompt,
2420
- stream=streaming_callback is not None,
2421
- streaming_callback=streaming_callback,
2422
- temperature=final_answer_temperature,
2423
- **llm_generation_kwargs
2424
- )
2425
-
2426
- if isinstance(final_answer_text, dict) and "error" in final_answer_text:
2427
- log_event_fn(f"Final synthesis failed: {final_answer_text['error']}", MSG_TYPE.MSG_TYPE_ERROR, event_id=synthesis_id)
2428
- return {
2429
- "final_answer": "I encountered an issue while preparing my final response. Please let me know if you'd like me to try again.",
2430
- "error": final_answer_text["error"],
2431
- "final_scratchpad": current_scratchpad,
2432
- "tool_calls": tool_calls_this_turn,
2433
- "sources": sources_this_turn,
2434
- "decision_history": decision_history
2435
- }
2436
-
2437
- final_answer = self.remove_thinking_blocks(final_answer_text)
2438
-
2439
- # Calculate overall performance metrics
2440
- overall_confidence = sum(call.get('confidence', 0.5) for call in tool_calls_this_turn) / max(len(tool_calls_this_turn), 1)
2441
- successful_calls = sum(1 for call in tool_calls_this_turn if call.get('result', {}).get('status') == 'success')
2442
- success_rate = successful_calls / max(len(tool_calls_this_turn), 1)
2443
-
2444
- log_event_fn("✅ Comprehensive response ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id, meta={
2445
- "final_answer_length": len(final_answer),
2446
- "total_tools_used": len(tool_calls_this_turn),
2447
- "success_rate": success_rate,
2448
- "overall_confidence": overall_confidence
2449
- })
2450
-
2451
- return {
2452
- "final_answer": final_answer,
2453
- "final_scratchpad": current_scratchpad,
2454
- "tool_calls": tool_calls_this_turn,
2455
- "sources": sources_this_turn,
2456
- "decision_history": decision_history,
2457
- "performance_stats": {
2458
- "total_steps": len(tool_calls_this_turn),
2459
- "successful_steps": successful_calls,
2460
- "success_rate": success_rate,
2461
- "average_confidence": overall_confidence,
2462
- "plan_revisions": plan_revision_count,
2463
- "total_reasoning_steps": len(decision_history)
2464
- },
2465
- "plan_evolution": {
2466
- "initial_tasks": len(execution_plan.tasks),
2467
- "final_version": current_plan_version,
2468
- "total_revisions": plan_revision_count
2469
- },
2470
- "clarification_required": False,
2471
- "overall_confidence": overall_confidence,
2472
- "error": None
2473
- }
2474
-
2475
-
2476
- def _execute_complex_reasoning_loop(
2477
- self, prompt, context, system_prompt, reasoning_system_prompt, images,
2478
- max_reasoning_steps, decision_temperature, final_answer_temperature,
2479
- streaming_callback, debug, enable_self_reflection, all_visible_tools,
2480
- rag_registry, rag_tool_specs, log_event_fn, log_prompt_fn, max_scratchpad_size, **llm_generation_kwargs
2481
- ) -> Dict[str, Any]:
2482
-
2483
- planner, memory_manager, performance_tracker = TaskPlanner(self), MemoryManager(), ToolPerformanceTracker()
2484
-
2485
- def _get_friendly_action_description(tool_name, requires_code, requires_image):
2486
- descriptions = {
2487
- "local_tools::final_answer": "📋 Preparing final answer",
2488
- "local_tools::request_clarification": "❓ Requesting clarification",
2489
- "local_tools::generate_image": "🎨 Creating image",
2490
- "local_tools::revise_plan": "📝 Revising execution plan"
2491
- }
2492
- if tool_name in descriptions:
2493
- return descriptions[tool_name]
2494
- if "research::" in tool_name:
2495
- return f"🔍 Searching {tool_name.split('::')[-1]} knowledge base"
2496
- if requires_code:
2497
- return "💻 Processing code"
2498
- if requires_image:
2499
- return "🖼️ Analyzing images"
2500
- return f"🔧 Using {tool_name.replace('_', ' ').replace('::', ' - ').title()}"
2501
-
2502
- def _compress_scratchpad_intelligently(scratchpad: str, original_request: str, target_size: int) -> str:
2503
- """Enhanced scratchpad compression that preserves key decisions and recent context"""
2504
- if len(scratchpad) <= target_size:
2505
- return scratchpad
2506
-
2507
- log_event_fn("📝 Compressing scratchpad to maintain focus...", MSG_TYPE.MSG_TYPE_INFO)
2508
-
2509
- # Extract key components
2510
- lines = scratchpad.split('\n')
2511
- plan_section = []
2512
- decisions = []
2513
- recent_observations = []
2514
-
2515
- current_section = None
2516
- for i, line in enumerate(lines):
2517
- if "### Execution Plan" in line or "### Updated Plan" in line:
2518
- current_section = "plan"
2519
- elif "### Step" in line and ("Thought" in line or "Decision" in line):
2520
- current_section = "decision"
2521
- elif "### Step" in line and "Observation" in line:
2522
- current_section = "observation"
2523
- elif line.startswith("###"):
2524
- current_section = None
2525
-
2526
- if current_section == "plan" and line.strip():
2527
- plan_section.append(line)
2528
- elif current_section == "decision" and line.strip():
2529
- decisions.append((i, line))
2530
- elif current_section == "observation" and line.strip():
2531
- recent_observations.append((i, line))
2532
-
2533
- # Keep most recent items and important decisions
2534
- recent_decisions = decisions[-3:] if len(decisions) > 3 else decisions
2535
- recent_obs = recent_observations[-5:] if len(recent_observations) > 5 else recent_observations
2536
-
2537
- compressed_parts = [
2538
- f"### Original Request\n{original_request}",
2539
- f"### Current Plan\n" + '\n'.join(plan_section[-10:]),
2540
- f"### Recent Key Decisions"
2541
- ]
2542
-
2543
- for _, decision in recent_decisions:
2544
- compressed_parts.append(decision)
2545
-
2546
- compressed_parts.append("### Recent Observations")
2547
- for _, obs in recent_obs:
2548
- compressed_parts.append(obs)
2549
-
2550
- compressed = '\n'.join(compressed_parts)
2551
- if len(compressed) > target_size:
2552
- # Final trim if still too long
2553
- compressed = compressed[:target_size-100] + "\n...[content compressed for focus]"
2554
-
2555
- return compressed
2556
-
2557
- original_user_prompt, tool_calls_this_turn, sources_this_turn = prompt, [], []
2558
- asset_store: Dict[str, Dict] = {}
2559
- decision_history = [] # Track all decisions made
2560
-
2561
- # Enhanced planning phase
2562
- planning_step_id = log_event_fn("📋 Creating adaptive execution plan...", MSG_TYPE.MSG_TYPE_STEP_START)
2563
- execution_plan = planner.decompose_task(original_user_prompt, context or "")
2564
- current_plan_version = 1
2565
-
2566
- log_event_fn(f"Initial plan created with {len(execution_plan.tasks)} tasks", MSG_TYPE.MSG_TYPE_INFO, meta={
2567
- "plan_version": current_plan_version,
2568
- "total_tasks": len(execution_plan.tasks),
2569
- "estimated_complexity": "medium" if len(execution_plan.tasks) <= 5 else "high"
2570
- })
2571
-
2572
- for i, task in enumerate(execution_plan.tasks):
2573
- log_event_fn(f"Task {i+1}: {task.description}", MSG_TYPE.MSG_TYPE_INFO)
2574
-
2575
- log_event_fn("✅ Adaptive plan ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=planning_step_id)
2576
-
2577
- # Enhanced initial state
2578
- initial_state_parts = [
2579
- f"### Original User Request\n{original_user_prompt}",
2580
- f"### Context\n{context or 'No additional context provided'}",
2581
- f"### Execution Plan (Version {current_plan_version})\n- Total tasks: {len(execution_plan.tasks)}",
2582
- f"- Estimated complexity: {'High' if len(execution_plan.tasks) > 5 else 'Medium'}"
2583
- ]
2584
-
2585
- for i, task in enumerate(execution_plan.tasks):
2586
- initial_state_parts.append(f" {i+1}. {task.description} [Status: {task.status.value}]")
2587
-
2588
- if images:
2589
- initial_state_parts.append(f"### Provided Assets")
2590
- for img_b64 in images:
2591
- img_uuid = str(uuid.uuid4())
2592
- asset_store[img_uuid] = {"type": "image", "content": img_b64, "source": "user"}
2593
- initial_state_parts.append(f"- Image asset: {img_uuid}")
2594
-
2595
- current_scratchpad = "\n".join(initial_state_parts)
2596
- log_event_fn("Initial analysis complete", MSG_TYPE.MSG_TYPE_SCRATCHPAD, meta={"scratchpad_size": len(current_scratchpad)})
2597
-
2598
- formatted_tools_list = "\n".join([f"**{t['name']}**: {t['description']}" for t in all_visible_tools])
2599
- completed_tasks, current_task_index = set(), 0
2600
- plan_revision_count = 0
2601
-
2602
- # Main reasoning loop with enhanced decision tracking
2603
- for i in range(max_reasoning_steps):
2604
- current_task_desc = execution_plan.tasks[current_task_index].description if current_task_index < len(execution_plan.tasks) else "Finalizing analysis"
2605
- step_desc = f"🤔 Step {i+1}: {current_task_desc}"
2606
- reasoning_step_id = log_event_fn(step_desc, MSG_TYPE.MSG_TYPE_STEP_START)
2607
-
2608
- try:
2609
- # Enhanced scratchpad management
2610
- if len(current_scratchpad) > max_scratchpad_size:
2611
- log_event_fn(f"Scratchpad size ({len(current_scratchpad)}) exceeds limit, compressing...", MSG_TYPE.MSG_TYPE_INFO)
2612
- current_scratchpad = _compress_scratchpad_intelligently(current_scratchpad, original_user_prompt, max_scratchpad_size // 2)
2613
- log_event_fn(f"Scratchpad compressed to {len(current_scratchpad)} characters", MSG_TYPE.MSG_TYPE_INFO)
2614
-
2615
- # Enhanced reasoning prompt with better decision tracking
2616
- reasoning_prompt = f"""You are working on: "{original_user_prompt}"
2617
-
2618
- === AVAILABLE ACTIONS ===
2619
- {formatted_tools_list}
2620
-
2621
- === YOUR COMPLETE ANALYSIS HISTORY ===
2622
- {current_scratchpad}
2623
- === END ANALYSIS HISTORY ===
2624
-
2625
- === DECISION GUIDELINES ===
2626
- 1. **Review your progress**: Look at what you've already discovered and accomplished
2627
- 2. **Consider your current task**: Focus on the next logical step in your plan
2628
- 3. **Remember your decisions**: If you previously decided to use a tool, follow through unless you have a good reason to change
2629
- 4. **Be adaptive**: If you discover new information that changes the situation, consider revising your plan
2630
- 5. **Stay focused**: Each action should clearly advance toward the final goal
2631
-
2632
- === YOUR NEXT DECISION ===
2633
- Choose the single most appropriate action to take right now. Consider:
2634
- - What specific step are you currently working on?
2635
- - What information do you still need?
2636
- - What would be most helpful for the user?
2637
-
2638
- Provide your decision as JSON:
2639
- {{
2640
- "reasoning": "Explain your current thinking and why this action makes sense now",
2641
- "action": {{
2642
- "tool_name": "exact_tool_name",
2643
- "requires_code_input": false,
2644
- "requires_image_input": false,
2645
- "confidence": 0.8
2646
- }},
2647
- "plan_status": "on_track" // or "needs_revision" if you want to change the plan
2648
- }}"""
2649
-
2650
- log_prompt_fn(f"Reasoning Prompt Step {i+1}", reasoning_prompt)
2651
- decision_data = self.generate_structured_content(
2652
- prompt=reasoning_prompt,
2653
- schema={
2654
- "reasoning": "string",
2655
- "action": "object",
2656
- "plan_status": "string"
2657
- },
2658
- system_prompt=reasoning_system_prompt,
2659
- temperature=decision_temperature,
2660
- **llm_generation_kwargs
2661
- )
2662
-
2663
- if not (decision_data and isinstance(decision_data.get("action"), dict)):
2664
- log_event_fn("⚠️ Invalid decision format from AI", MSG_TYPE.MSG_TYPE_WARNING, event_id=reasoning_step_id)
2665
- current_scratchpad += f"\n\n### Step {i+1}: Decision Error\n- Error: AI produced invalid decision JSON\n- Continuing with fallback approach"
2666
- continue
2667
-
2668
- action = decision_data.get("action", {})
2669
- reasoning = decision_data.get("reasoning", "No reasoning provided")
2670
- plan_status = decision_data.get("plan_status", "on_track")
2671
- tool_name = action.get("tool_name")
2672
- requires_code = action.get("requires_code_input", False)
2673
- requires_image = action.get("requires_image_input", False)
2674
- confidence = action.get("confidence", 0.5)
2675
-
2676
- # Track the decision
2677
- decision_history.append({
2678
- "step": i+1,
2679
- "tool_name": tool_name,
2680
- "reasoning": reasoning,
2681
- "confidence": confidence,
2682
- "plan_status": plan_status
2683
- })
2684
-
2685
- current_scratchpad += f"\n\n### Step {i+1}: Decision & Reasoning\n**Reasoning**: {reasoning}\n**Chosen Action**: {tool_name}\n**Confidence**: {confidence}\n**Plan Status**: {plan_status}"
2686
-
2687
- log_event_fn(_get_friendly_action_description(tool_name, requires_code, requires_image), MSG_TYPE.MSG_TYPE_STEP, meta={
2688
- "tool_name": tool_name,
2689
- "confidence": confidence,
2690
- "reasoning": reasoning[:100] + "..." if len(reasoning) > 100 else reasoning
2691
- })
2692
-
2693
- # Handle plan revision
2694
- if plan_status == "needs_revision" and tool_name != "local_tools::revise_plan":
2695
- log_event_fn("🔄 AI indicates plan needs revision", MSG_TYPE.MSG_TYPE_INFO)
2696
- tool_name = "local_tools::revise_plan" # Force plan revision
2697
-
2698
- # Handle final answer
2699
- if tool_name == "local_tools::final_answer":
2700
- log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
2701
- break
2702
-
2703
- # Handle clarification request
2704
- if tool_name == "local_tools::request_clarification":
2705
- clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
2706
-
2707
- CURRENT ANALYSIS:
2708
- {current_scratchpad}
2709
-
2710
- Generate a clear, specific question that will help you proceed effectively:"""
2711
-
2712
- question = self.generate_text(clarification_prompt, temperature=0.3)
2713
- question = self.remove_thinking_blocks(question)
2714
-
2715
- log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
2716
- return {
2717
- "final_answer": question,
2718
- "clarification_required": True,
2719
- "final_scratchpad": current_scratchpad,
2720
- "tool_calls": tool_calls_this_turn,
2721
- "sources": sources_this_turn,
2722
- "error": None,
2723
- "decision_history": decision_history
2724
- }
2725
-
2726
- # Handle final answer
2727
- if tool_name == "local_tools::final_answer":
2728
- log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
2729
- break
2730
-
2731
- # Handle clarification request
2732
- if tool_name == "local_tools::request_clarification":
2733
- clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
2734
-
2735
- CURRENT ANALYSIS:
2736
- {current_scratchpad}
2737
-
2738
- Generate a clear, specific question that will help you proceed effectively:"""
2739
-
2740
- question = self.generate_text(clarification_prompt, temperature=0.3)
2741
- question = self.remove_thinking_blocks(question)
2742
-
2743
- log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
2744
- return {
2745
- "final_answer": question,
2746
- "clarification_required": True,
2747
- "final_scratchpad": current_scratchpad,
2748
- "tool_calls": tool_calls_this_turn,
2749
- "sources": sources_this_turn,
2750
- "error": None,
2751
- "decision_history": decision_history
2752
- }
2753
-
2754
- # Handle plan revision
2755
- if tool_name == "local_tools::revise_plan":
2756
- plan_revision_count += 1
2757
- revision_id = log_event_fn(f"📝 Revising execution plan (revision #{plan_revision_count})", MSG_TYPE.MSG_TYPE_STEP_START)
2758
-
2759
- try:
2760
- revision_prompt = f"""Based on your current analysis and discoveries, create an updated execution plan.
2761
-
2762
- ORIGINAL REQUEST: "{original_user_prompt}"
2763
- CURRENT ANALYSIS:
2764
- {current_scratchpad}
2765
-
2766
- REASON FOR REVISION: {reasoning}
2767
-
2768
- Create a new plan that reflects your current understanding. Consider:
2769
- 1. What have you already accomplished?
2770
- 2. What new information have you discovered?
2771
- 3. What steps are still needed?
2772
- 4. How can you be more efficient?
2773
-
2774
- Provide your revision as JSON:
2775
- {{
2776
- "revision_reason": "Clear explanation of why the plan needed to change",
2777
- "new_plan": [
2778
- {{"step": 1, "description": "First revised step", "status": "pending"}},
2779
- {{"step": 2, "description": "Second revised step", "status": "pending"}}
2780
- ],
2781
- "confidence": 0.8
2782
- }}"""
2783
-
2784
- revision_data = self.generate_structured_content(
2785
- prompt=revision_prompt,
2786
- schema={
2787
- "revision_reason": "string",
2788
- "new_plan": "array",
2789
- "confidence": "number"
2790
- },
2791
- temperature=0.3,
2792
- **llm_generation_kwargs
2793
- )
2794
-
2795
- if revision_data and revision_data.get("new_plan"):
2796
- # Update the plan
2797
- current_plan_version += 1
2798
- new_tasks = []
2799
- for task_data in revision_data["new_plan"]:
2800
- task = TaskDecomposition() # Assuming this class exists
2801
- task.description = task_data.get("description", "Undefined step")
2802
- task.status = TaskStatus.PENDING # Reset all to pending
2803
- new_tasks.append(task)
2804
-
2805
- execution_plan.tasks = new_tasks
2806
- current_task_index = 0 # Reset to beginning
2807
-
2808
- # Update scratchpad with new plan
2809
- current_scratchpad += f"\n\n### Updated Plan (Version {current_plan_version})\n"
2810
- current_scratchpad += f"**Revision Reason**: {revision_data.get('revision_reason', 'Plan needed updating')}\n"
2811
- current_scratchpad += f"**New Tasks**:\n"
2812
- for i, task in enumerate(execution_plan.tasks):
2813
- current_scratchpad += f" {i+1}. {task.description}\n"
2814
-
2815
- log_event_fn(f"✅ Plan revised with {len(execution_plan.tasks)} updated tasks", MSG_TYPE.MSG_TYPE_STEP_END, event_id=revision_id, meta={
2816
- "plan_version": current_plan_version,
2817
- "new_task_count": len(execution_plan.tasks),
2818
- "revision_reason": revision_data.get("revision_reason", "")
2819
- })
2820
-
2821
- # Continue with the new plan
2822
- continue
2823
- else:
2824
- raise ValueError("Failed to generate valid plan revision")
2825
-
2826
- except Exception as e:
2827
- log_event_fn(f"Plan revision failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=revision_id)
2828
- current_scratchpad += f"\n**Plan Revision Failed**: {str(e)}\nContinuing with original plan."
2829
-
2830
- # Prepare parameters for tool execution
2831
- param_assets = {}
2832
- if requires_code:
2833
- log_event_fn("💻 Generating code for task", MSG_TYPE.MSG_TYPE_INFO)
2834
- code_prompt = f"""Generate the specific code needed for the current step.
2835
-
2836
- CURRENT CONTEXT:
2837
- {current_scratchpad}
2838
-
2839
- CURRENT TASK: {tool_name}
2840
- USER REQUEST: "{original_user_prompt}"
2841
-
2842
- Generate clean, functional code that addresses the specific requirements. Focus on:
2843
- 1. Solving the immediate problem
2844
- 2. Being clear and readable
2845
- 3. Including necessary imports and dependencies
2846
- 4. Adding helpful comments where appropriate
2847
-
2848
- CODE:"""
2849
-
2850
- code_content = self.generate_code(prompt=code_prompt, **llm_generation_kwargs)
2851
- code_uuid = f"code_asset_{uuid.uuid4()}"
2852
- asset_store[code_uuid] = {"type": "code", "content": code_content}
2853
- param_assets['code_asset_id'] = code_uuid
2854
- log_event_fn(f"Code asset created: {code_uuid[:8]}...", MSG_TYPE.MSG_TYPE_INFO)
2855
-
2856
- if requires_image:
2857
- image_assets = [asset_id for asset_id, asset in asset_store.items() if asset['type'] == 'image' and asset.get('source') == 'user']
2858
- if image_assets:
2859
- param_assets['image_asset_id'] = image_assets[0]
2860
- log_event_fn(f"Using image asset: {image_assets[0][:8]}...", MSG_TYPE.MSG_TYPE_INFO)
2861
- else:
2862
- log_event_fn("⚠️ Image required but none available", MSG_TYPE.MSG_TYPE_WARNING)
2863
-
2864
- # Enhanced parameter generation
2865
- param_prompt = f"""Generate the optimal parameters for this tool execution.
2866
-
2867
- TOOL: {tool_name}
2868
- CURRENT CONTEXT: {current_scratchpad}
2869
- CURRENT REASONING: {reasoning}
2870
- AVAILABLE ASSETS: {json.dumps(param_assets) if param_assets else "None"}
2871
-
2872
- Based on your analysis and the current step you're working on, provide the most appropriate parameters.
2873
- Be specific and purposeful in your parameter choices.
2874
-
2875
- Output format: {{"tool_params": {{...}}}}"""
2876
-
2877
- log_prompt_fn(f"Parameter Generation Step {i+1}", param_prompt)
2878
- param_data = self.generate_structured_content(
2879
- prompt=param_prompt,
2880
- schema={"tool_params": "object"},
2881
- temperature=decision_temperature,
2882
- **llm_generation_kwargs
2883
- )
2884
- tool_params = param_data.get("tool_params", {}) if param_data else {}
2885
-
2886
- current_scratchpad += f"\n**Parameters Generated**: {json.dumps(tool_params, indent=2)}"
2887
-
2888
- # Hydrate parameters with assets
2889
- def _hydrate(data: Any, store: Dict) -> Any:
2890
- if isinstance(data, dict): return {k: _hydrate(v, store) for k, v in data.items()}
2891
- if isinstance(data, list): return [_hydrate(item, store) for item in data]
2892
- if isinstance(data, str) and "asset_" in data and data in store: return store[data].get("content", data)
2893
- return data
2894
-
2895
- hydrated_params = _hydrate(tool_params, asset_store)
2896
-
2897
- # Execute the tool with detailed logging
2898
- start_time = time.time()
2899
- tool_result = {"status": "failure", "error": f"Tool '{tool_name}' failed to execute."}
2900
-
2901
- try:
2902
- if tool_name in rag_registry:
2903
- query = hydrated_params.get("query", "")
2904
- if not query:
2905
- # Fall back to using reasoning as query
2906
- query = reasoning[:200] + "..." if len(reasoning) > 200 else reasoning
2907
-
2908
- log_event_fn(f"🔍 Searching knowledge base with query: '{query[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
2909
-
2910
- top_k = rag_tool_specs[tool_name]["default_top_k"]
2911
- min_sim = rag_tool_specs[tool_name]["default_min_sim"]
2912
-
2913
- raw_results = rag_registry[tool_name](query=query, rag_top_k=top_k)
2914
- raw_iter = raw_results["results"] if isinstance(raw_results, dict) and "results" in raw_results else raw_results
2915
-
2916
- docs = []
2917
- for d in raw_iter or []:
2918
- doc_data = {
2919
- "text": d.get("text", str(d)),
2920
- "score": d.get("score", 0) * 100,
2921
- "metadata": d.get("metadata", {})
2922
- }
2923
- docs.append(doc_data)
2924
-
2925
- kept = [x for x in docs if x['score'] >= min_sim]
2926
- tool_result = {
2927
- "status": "success",
2928
- "results": kept,
2929
- "total_found": len(docs),
2930
- "kept_after_filtering": len(kept),
2931
- "query_used": query
2932
- }
2933
-
2934
- sources_this_turn.extend([{
2935
- "source": tool_name,
2936
- "metadata": x["metadata"],
2937
- "score": x["score"]
2938
- } for x in kept])
2939
-
2940
- log_event_fn(f"📚 Retrieved {len(kept)} relevant documents (from {len(docs)} total)", MSG_TYPE.MSG_TYPE_INFO)
2941
-
2942
- elif hasattr(self, "mcp") and "local_tools" not in tool_name:
2943
- log_event_fn(f"🔧 Executing MCP tool: {tool_name}", MSG_TYPE.MSG_TYPE_TOOL_CALL, meta={
2944
- "tool_name": tool_name,
2945
- "params": {k: str(v)[:100] for k, v in hydrated_params.items()} # Truncate for logging
2946
- })
2947
-
2948
- tool_result = self.mcp.execute_tool(tool_name, hydrated_params, lollms_client_instance=self)
2949
-
2950
- log_event_fn(f"Tool execution completed", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, meta={
2951
- "result_status": tool_result.get("status", "unknown"),
2952
- "has_error": "error" in tool_result
2953
- })
2954
-
2955
- elif tool_name == "local_tools::generate_image" and hasattr(self, "tti"):
2956
- image_prompt = hydrated_params.get("prompt", "")
2957
- log_event_fn(f"🎨 Generating image with prompt: '{image_prompt[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
2958
-
2959
- # This would call your text-to-image functionality
2960
- image_result = self.tti.generate_image(image_prompt) # Assuming this method exists
2961
- if image_result:
2962
- image_uuid = f"generated_image_{uuid.uuid4()}"
2963
- asset_store[image_uuid] = {"type": "image", "content": image_result, "source": "generated"}
2964
- tool_result = {"status": "success", "image_id": image_uuid, "prompt_used": image_prompt}
2965
- else:
2966
- tool_result = {"status": "failure", "error": "Image generation failed"}
2967
-
2968
- else:
2969
- tool_result = {"status": "failure", "error": f"Tool '{tool_name}' is not available or supported in this context."}
2970
-
2971
- except Exception as e:
2972
- error_msg = f"Exception during '{tool_name}' execution: {str(e)}"
2973
- log_event_fn(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
2974
- tool_result = {"status": "failure", "error": error_msg}
2975
-
2976
- response_time = time.time() - start_time
2977
- success = tool_result.get("status") == "success"
2978
-
2979
- # Record performance
2980
- performance_tracker.record_tool_usage(tool_name, success, confidence, response_time, tool_result.get("error"))
2981
-
2982
- # Update task status
2983
- if success and current_task_index < len(execution_plan.tasks):
2984
- execution_plan.tasks[current_task_index].status = TaskStatus.COMPLETED
2985
- completed_tasks.add(current_task_index)
2986
- current_task_index += 1
2987
-
2988
- # Enhanced observation logging
2989
- observation_text = json.dumps(tool_result, indent=2)
2990
- if len(observation_text) > 1000:
2991
- # Truncate very long results for scratchpad
2992
- truncated_result = {k: (str(v)[:200] + "..." if len(str(v)) > 200 else v) for k, v in tool_result.items()}
2993
- observation_text = json.dumps(truncated_result, indent=2)
2994
-
2995
- current_scratchpad += f"\n\n### Step {i+1}: Execution & Observation\n"
2996
- current_scratchpad += f"**Tool Used**: {tool_name}\n"
2997
- current_scratchpad += f"**Success**: {success}\n"
2998
- current_scratchpad += f"**Response Time**: {response_time:.2f}s\n"
2999
- current_scratchpad += f"**Result**:\n```json\n{observation_text}\n```"
3000
-
3001
- # Track tool call
3002
- tool_calls_this_turn.append({
3003
- "name": tool_name,
3004
- "params": tool_params,
3005
- "result": tool_result,
3006
- "response_time": response_time,
3007
- "confidence": confidence,
3008
- "reasoning": reasoning
3009
- })
3010
-
3011
- if success:
3012
- log_event_fn(f"✅ Step {i+1} completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3013
- "tool_name": tool_name,
3014
- "response_time": response_time,
3015
- "confidence": confidence
3016
- })
3017
- else:
3018
- error_detail = tool_result.get("error", "No error detail provided.")
3019
- log_event_fn(f"⚠️ Step {i+1} completed with issues: {error_detail}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3020
- "tool_name": tool_name,
3021
- "error": error_detail,
3022
- "confidence": confidence
3023
- })
3024
-
3025
- # Add failure handling to scratchpad
3026
- current_scratchpad += f"\n**Failure Analysis**: {error_detail}"
3027
- current_scratchpad += f"\n**Next Steps**: Consider alternative approaches or tools"
3028
-
3029
- # Log current progress
3030
- completed_count = len(completed_tasks)
3031
- total_tasks = len(execution_plan.tasks)
3032
- if total_tasks > 0:
3033
- progress = (completed_count / total_tasks) * 100
3034
- log_event_fn(f"Progress: {completed_count}/{total_tasks} tasks completed ({progress:.1f}%)", MSG_TYPE.MSG_TYPE_STEP_PROGRESS, meta={"progress": progress})
3035
-
3036
- # Check if all tasks are completed
3037
- if completed_count >= total_tasks:
3038
- log_event_fn("🎯 All planned tasks completed", MSG_TYPE.MSG_TYPE_INFO)
3039
- break
3040
-
3041
- except Exception as ex:
3042
- log_event_fn(f"💥 Unexpected error in reasoning step {i+1}: {str(ex)}", MSG_TYPE.MSG_TYPE_ERROR, event_id=reasoning_step_id)
3043
- trace_exception(ex)
3044
-
3045
- # Add error to scratchpad for context
3046
- current_scratchpad += f"\n\n### Step {i+1}: Unexpected Error\n**Error**: {str(ex)}\n**Recovery**: Continuing with adjusted approach"
3047
-
3048
- log_event_fn("🔄 Recovering and continuing with next step", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
3049
-
3050
- # Enhanced self-reflection
3051
- if enable_self_reflection and len(tool_calls_this_turn) > 0:
3052
- reflection_id = log_event_fn("🤔 Conducting comprehensive self-assessment...", MSG_TYPE.MSG_TYPE_STEP_START)
3053
- try:
3054
- reflection_prompt = f"""Conduct a thorough review of your work and assess the quality of your response to the user's request.
3055
-
3056
- ORIGINAL REQUEST: "{original_user_prompt}"
3057
- TOOLS USED: {len(tool_calls_this_turn)}
3058
- PLAN REVISIONS: {plan_revision_count}
3059
-
3060
- COMPLETE ANALYSIS:
3061
- {current_scratchpad}
3062
-
3063
- Evaluate your performance on multiple dimensions:
3064
-
3065
- 1. **Goal Achievement**: Did you fully address the user's request?
3066
- 2. **Process Efficiency**: Was your approach optimal given the available tools?
3067
- 3. **Information Quality**: Is the information you gathered accurate and relevant?
3068
- 4. **Decision Making**: Were your tool choices and parameters appropriate?
3069
- 5. **Adaptability**: How well did you handle unexpected results or plan changes?
3070
-
3071
- Provide your assessment as JSON:
3072
- {{
3073
- "goal_achieved": true,
3074
- "effectiveness_score": 0.85,
3075
- "process_efficiency": 0.8,
3076
- "information_quality": 0.9,
3077
- "decision_making": 0.85,
3078
- "adaptability": 0.7,
3079
- "overall_confidence": 0.82,
3080
- "strengths": ["Clear reasoning", "Good tool selection"],
3081
- "areas_for_improvement": ["Could have been more efficient"],
3082
- "summary": "Successfully completed the user's request with high quality results",
3083
- "key_insights": ["Discovered that X was more important than initially thought"]
3084
- }}"""
3085
-
3086
- reflection_data = self.generate_structured_content(
3087
- prompt=reflection_prompt,
3088
- schema={
3089
- "goal_achieved": "boolean",
3090
- "effectiveness_score": "number",
3091
- "process_efficiency": "number",
3092
- "information_quality": "number",
3093
- "decision_making": "number",
3094
- "adaptability": "number",
3095
- "overall_confidence": "number",
3096
- "strengths": "array",
3097
- "areas_for_improvement": "array",
3098
- "summary": "string",
3099
- "key_insights": "array"
3100
- },
3101
- temperature=0.3,
3102
- **llm_generation_kwargs
3103
- )
3104
-
3105
- if reflection_data:
3106
- current_scratchpad += f"\n\n### Comprehensive Self-Assessment\n"
3107
- current_scratchpad += f"**Goal Achieved**: {reflection_data.get('goal_achieved', False)}\n"
3108
- current_scratchpad += f"**Overall Confidence**: {reflection_data.get('overall_confidence', 0.5):.2f}\n"
3109
- current_scratchpad += f"**Effectiveness Score**: {reflection_data.get('effectiveness_score', 0.5):.2f}\n"
3110
- current_scratchpad += f"**Key Strengths**: {', '.join(reflection_data.get('strengths', []))}\n"
3111
- current_scratchpad += f"**Improvement Areas**: {', '.join(reflection_data.get('areas_for_improvement', []))}\n"
3112
- current_scratchpad += f"**Summary**: {reflection_data.get('summary', '')}\n"
3113
-
3114
- log_event_fn(f"✅ Self-assessment completed", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reflection_id, meta={
3115
- "overall_confidence": reflection_data.get('overall_confidence', 0.5),
3116
- "goal_achieved": reflection_data.get('goal_achieved', False),
3117
- "effectiveness_score": reflection_data.get('effectiveness_score', 0.5)
3118
- })
3119
- else:
3120
- log_event_fn("Self-assessment data generation failed", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3121
-
3122
- except Exception as e:
3123
- log_event_fn(f"Self-assessment failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3124
-
3125
- # Enhanced final synthesis
3126
- synthesis_id = log_event_fn("📝 Synthesizing comprehensive final response...", MSG_TYPE.MSG_TYPE_STEP_START)
3127
-
3128
- final_answer_prompt = f"""Create a comprehensive, well-structured final response that fully addresses the user's request.
3129
-
3130
- ORIGINAL REQUEST: "{original_user_prompt}"
3131
- CONTEXT: {context or "No additional context"}
3132
-
3133
- COMPLETE ANALYSIS AND WORK:
3134
- {current_scratchpad}
3135
-
3136
- GUIDELINES for your response:
3137
- 1. **Be Complete**: Address all aspects of the user's request
3138
- 2. **Be Clear**: Organize your response logically and use clear language
3139
- 3. **Be Helpful**: Provide actionable information and insights
3140
- 4. **Be Honest**: If there were limitations or uncertainties, mention them appropriately
3141
- 5. **Be Concise**: While being thorough, avoid unnecessary verbosity
3142
- 6. **Cite Sources**: If you used research tools, reference the information appropriately
3143
-
3144
- Your response should feel natural and conversational while being informative and valuable.
3145
-
3146
- FINAL RESPONSE:"""
3147
-
3148
- log_prompt_fn("Final Synthesis Prompt", final_answer_prompt)
3149
-
3150
- final_answer_text = self.generate_text(
3151
- prompt=final_answer_prompt,
3152
- system_prompt=system_prompt,
3153
- stream=streaming_callback is not None,
3154
- streaming_callback=streaming_callback,
3155
- temperature=final_answer_temperature,
3156
- **llm_generation_kwargs
3157
- )
3158
-
3159
- if isinstance(final_answer_text, dict) and "error" in final_answer_text:
3160
- log_event_fn(f"Final synthesis failed: {final_answer_text['error']}", MSG_TYPE.MSG_TYPE_ERROR, event_id=synthesis_id)
3161
- return {
3162
- "final_answer": "I encountered an issue while preparing my final response. Please let me know if you'd like me to try again.",
3163
- "error": final_answer_text["error"],
3164
- "final_scratchpad": current_scratchpad,
3165
- "tool_calls": tool_calls_this_turn,
3166
- "sources": sources_this_turn,
3167
- "decision_history": decision_history
3168
- }
3169
-
3170
- final_answer = self.remove_thinking_blocks(final_answer_text)
3171
-
3172
- # Calculate overall performance metrics
3173
- overall_confidence = sum(call.get('confidence', 0.5) for call in tool_calls_this_turn) / max(len(tool_calls_this_turn), 1)
3174
- successful_calls = sum(1 for call in tool_calls_this_turn if call.get('result', {}).get('status') == 'success')
3175
- success_rate = successful_calls / max(len(tool_calls_this_turn), 1)
3176
-
3177
- log_event_fn("✅ Comprehensive response ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id, meta={
3178
- "final_answer_length": len(final_answer),
3179
- "total_tools_used": len(tool_calls_this_turn),
3180
- "success_rate": success_rate,
3181
- "overall_confidence": overall_confidence
3182
- })
3183
-
3184
- return {
3185
- "final_answer": final_answer,
3186
- "final_scratchpad": current_scratchpad,
3187
- "tool_calls": tool_calls_this_turn,
3188
- "sources": sources_this_turn,
3189
- "decision_history": decision_history,
3190
- "performance_stats": {
3191
- "total_steps": len(tool_calls_this_turn),
3192
- "successful_steps": successful_calls,
3193
- "success_rate": success_rate,
3194
- "average_confidence": overall_confidence,
3195
- "plan_revisions": plan_revision_count,
3196
- "total_reasoning_steps": len(decision_history)
3197
- },
3198
- "plan_evolution": {
3199
- "initial_tasks": len(execution_plan.tasks),
3200
- "final_version": current_plan_version,
3201
- "total_revisions": plan_revision_count
3202
- },
3203
- "clarification_required": False,
3204
- "overall_confidence": overall_confidence,
3205
- "error": None
3206
- }
3207
-
3208
-
3209
- def _execute_complex_reasoning_loop(
3210
- self, prompt, context, system_prompt, reasoning_system_prompt, images,
3211
- max_reasoning_steps, decision_temperature, final_answer_temperature,
3212
- streaming_callback, debug, enable_self_reflection, all_visible_tools,
3213
- rag_registry, rag_tool_specs, log_event_fn, log_prompt_fn, max_scratchpad_size, **llm_generation_kwargs
3214
- ) -> Dict[str, Any]:
3215
-
3216
- planner, memory_manager, performance_tracker = TaskPlanner(self), MemoryManager(), ToolPerformanceTracker()
3217
-
3218
- def _get_friendly_action_description(tool_name, requires_code, requires_image):
3219
- descriptions = {
3220
- "local_tools::final_answer": "📋 Preparing final answer",
3221
- "local_tools::request_clarification": "❓ Requesting clarification",
3222
- "local_tools::generate_image": "🎨 Creating image",
3223
- "local_tools::revise_plan": "📝 Revising execution plan"
3224
- }
3225
- if tool_name in descriptions:
3226
- return descriptions[tool_name]
3227
- if "research::" in tool_name:
3228
- return f"🔍 Searching {tool_name.split('::')[-1]} knowledge base"
3229
- if requires_code:
3230
- return "💻 Processing code"
3231
- if requires_image:
3232
- return "🖼️ Analyzing images"
3233
- return f"🔧 Using {tool_name.replace('_', ' ').replace('::', ' - ').title()}"
3234
-
3235
- def _compress_scratchpad_intelligently(scratchpad: str, original_request: str, target_size: int) -> str:
3236
- """Enhanced scratchpad compression that preserves key decisions and recent context"""
3237
- if len(scratchpad) <= target_size:
3238
- return scratchpad
3239
-
3240
- log_event_fn("📝 Compressing scratchpad to maintain focus...", MSG_TYPE.MSG_TYPE_INFO)
3241
-
3242
- # Extract key components
3243
- lines = scratchpad.split('\n')
3244
- plan_section = []
3245
- decisions = []
3246
- recent_observations = []
3247
-
3248
- current_section = None
3249
- for i, line in enumerate(lines):
3250
- if "### Execution Plan" in line or "### Updated Plan" in line:
3251
- current_section = "plan"
3252
- elif "### Step" in line and ("Thought" in line or "Decision" in line):
3253
- current_section = "decision"
3254
- elif "### Step" in line and "Observation" in line:
3255
- current_section = "observation"
3256
- elif line.startswith("###"):
3257
- current_section = None
3258
-
3259
- if current_section == "plan" and line.strip():
3260
- plan_section.append(line)
3261
- elif current_section == "decision" and line.strip():
3262
- decisions.append((i, line))
3263
- elif current_section == "observation" and line.strip():
3264
- recent_observations.append((i, line))
3265
-
3266
- # Keep most recent items and important decisions
3267
- recent_decisions = decisions[-3:] if len(decisions) > 3 else decisions
3268
- recent_obs = recent_observations[-5:] if len(recent_observations) > 5 else recent_observations
3269
-
3270
- compressed_parts = [
3271
- f"### Original Request\n{original_request}",
3272
- f"### Current Plan\n" + '\n'.join(plan_section[-10:]),
3273
- f"### Recent Key Decisions"
3274
- ]
3275
-
3276
- for _, decision in recent_decisions:
3277
- compressed_parts.append(decision)
3278
-
3279
- compressed_parts.append("### Recent Observations")
3280
- for _, obs in recent_obs:
3281
- compressed_parts.append(obs)
3282
-
3283
- compressed = '\n'.join(compressed_parts)
3284
- if len(compressed) > target_size:
3285
- # Final trim if still too long
3286
- compressed = compressed[:target_size-100] + "\n...[content compressed for focus]"
3287
-
3288
- return compressed
3289
-
3290
- original_user_prompt, tool_calls_this_turn, sources_this_turn = prompt, [], []
3291
- asset_store: Dict[str, Dict] = {}
3292
- decision_history = [] # Track all decisions made
3293
-
3294
- # Enhanced planning phase
3295
- planning_step_id = log_event_fn("📋 Creating adaptive execution plan...", MSG_TYPE.MSG_TYPE_STEP_START)
3296
- execution_plan = planner.decompose_task(original_user_prompt, context or "")
3297
- current_plan_version = 1
3298
-
3299
- log_event_fn(f"Initial plan created with {len(execution_plan.tasks)} tasks", MSG_TYPE.MSG_TYPE_INFO, meta={
3300
- "plan_version": current_plan_version,
3301
- "total_tasks": len(execution_plan.tasks),
3302
- "estimated_complexity": "medium" if len(execution_plan.tasks) <= 5 else "high"
3303
- })
3304
-
3305
- for i, task in enumerate(execution_plan.tasks):
3306
- log_event_fn(f"Task {i+1}: {task.description}", MSG_TYPE.MSG_TYPE_INFO)
3307
-
3308
- log_event_fn("✅ Adaptive plan ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=planning_step_id)
3309
-
3310
- # Enhanced initial state
3311
- initial_state_parts = [
3312
- f"### Original User Request\n{original_user_prompt}",
3313
- f"### Context\n{context or 'No additional context provided'}",
3314
- f"### Execution Plan (Version {current_plan_version})\n- Total tasks: {len(execution_plan.tasks)}",
3315
- f"- Estimated complexity: {'High' if len(execution_plan.tasks) > 5 else 'Medium'}"
3316
- ]
3317
-
3318
- for i, task in enumerate(execution_plan.tasks):
3319
- initial_state_parts.append(f" {i+1}. {task.description} [Status: {task.status.value}]")
3320
-
3321
- if images:
3322
- initial_state_parts.append(f"### Provided Assets")
3323
- for img_b64 in images:
3324
- img_uuid = str(uuid.uuid4())
3325
- asset_store[img_uuid] = {"type": "image", "content": img_b64, "source": "user"}
3326
- initial_state_parts.append(f"- Image asset: {img_uuid}")
3327
-
3328
- current_scratchpad = "\n".join(initial_state_parts)
3329
- log_event_fn("Initial analysis complete", MSG_TYPE.MSG_TYPE_SCRATCHPAD, meta={"scratchpad_size": len(current_scratchpad)})
3330
-
3331
- formatted_tools_list = "\n".join([f"**{t['name']}**: {t['description']}" for t in all_visible_tools])
3332
- completed_tasks, current_task_index = set(), 0
3333
- plan_revision_count = 0
3334
-
3335
- # Main reasoning loop with enhanced decision tracking
3336
- for i in range(max_reasoning_steps):
3337
- current_task_desc = execution_plan.tasks[current_task_index].description if current_task_index < len(execution_plan.tasks) else "Finalizing analysis"
3338
- step_desc = f"🤔 Step {i+1}: {current_task_desc}"
3339
- reasoning_step_id = log_event_fn(step_desc, MSG_TYPE.MSG_TYPE_STEP_START)
3340
-
3341
- try:
3342
- # Enhanced scratchpad management
3343
- if len(current_scratchpad) > max_scratchpad_size:
3344
- log_event_fn(f"Scratchpad size ({len(current_scratchpad)}) exceeds limit, compressing...", MSG_TYPE.MSG_TYPE_INFO)
3345
- current_scratchpad = _compress_scratchpad_intelligently(current_scratchpad, original_user_prompt, max_scratchpad_size // 2)
3346
- log_event_fn(f"Scratchpad compressed to {len(current_scratchpad)} characters", MSG_TYPE.MSG_TYPE_INFO)
3347
-
3348
- # Enhanced reasoning prompt with better decision tracking
3349
- reasoning_prompt = f"""You are working on: "{original_user_prompt}"
3350
-
3351
- === AVAILABLE ACTIONS ===
3352
- {formatted_tools_list}
3353
-
3354
- === YOUR COMPLETE ANALYSIS HISTORY ===
3355
- {current_scratchpad}
3356
- === END ANALYSIS HISTORY ===
3357
-
3358
- === DECISION GUIDELINES ===
3359
- 1. **Review your progress**: Look at what you've already discovered and accomplished
3360
- 2. **Consider your current task**: Focus on the next logical step in your plan
3361
- 3. **Remember your decisions**: If you previously decided to use a tool, follow through unless you have a good reason to change
3362
- 4. **Be adaptive**: If you discover new information that changes the situation, consider revising your plan
3363
- 5. **Stay focused**: Each action should clearly advance toward the final goal
3364
-
3365
- === YOUR NEXT DECISION ===
3366
- Choose the single most appropriate action to take right now. Consider:
3367
- - What specific step are you currently working on?
3368
- - What information do you still need?
3369
- - What would be most helpful for the user?
3370
-
3371
- Provide your decision as JSON:
3372
- {{
3373
- "reasoning": "Explain your current thinking and why this action makes sense now",
3374
- "action": {{
3375
- "tool_name": "exact_tool_name",
3376
- "requires_code_input": false,
3377
- "requires_image_input": false,
3378
- "confidence": 0.8
3379
- }},
3380
- "plan_status": "on_track" // or "needs_revision" if you want to change the plan
3381
- }}"""
3382
-
3383
- log_prompt_fn(f"Reasoning Prompt Step {i+1}", reasoning_prompt)
3384
- decision_data = self.generate_structured_content(
3385
- prompt=reasoning_prompt,
3386
- schema={
3387
- "reasoning": "string",
3388
- "action": "object",
3389
- "plan_status": "string"
3390
- },
3391
- system_prompt=reasoning_system_prompt,
3392
- temperature=decision_temperature,
3393
- **llm_generation_kwargs
3394
- )
3395
-
3396
- if not (decision_data and isinstance(decision_data.get("action"), dict)):
3397
- log_event_fn("⚠️ Invalid decision format from AI", MSG_TYPE.MSG_TYPE_WARNING, event_id=reasoning_step_id)
3398
- current_scratchpad += f"\n\n### Step {i+1}: Decision Error\n- Error: AI produced invalid decision JSON\n- Continuing with fallback approach"
3399
- continue
3400
-
3401
- action = decision_data.get("action", {})
3402
- reasoning = decision_data.get("reasoning", "No reasoning provided")
3403
- plan_status = decision_data.get("plan_status", "on_track")
3404
- tool_name = action.get("tool_name")
3405
- requires_code = action.get("requires_code_input", False)
3406
- requires_image = action.get("requires_image_input", False)
3407
- confidence = action.get("confidence", 0.5)
3408
-
3409
- # Track the decision
3410
- decision_history.append({
3411
- "step": i+1,
3412
- "tool_name": tool_name,
3413
- "reasoning": reasoning,
3414
- "confidence": confidence,
3415
- "plan_status": plan_status
3416
- })
3417
-
3418
- current_scratchpad += f"\n\n### Step {i+1}: Decision & Reasoning\n**Reasoning**: {reasoning}\n**Chosen Action**: {tool_name}\n**Confidence**: {confidence}\n**Plan Status**: {plan_status}"
3419
-
3420
- log_event_fn(_get_friendly_action_description(tool_name, requires_code, requires_image), MSG_TYPE.MSG_TYPE_STEP, meta={
3421
- "tool_name": tool_name,
3422
- "confidence": confidence,
3423
- "reasoning": reasoning[:100] + "..." if len(reasoning) > 100 else reasoning
3424
- })
3425
-
3426
- # Handle plan revision
3427
- if plan_status == "needs_revision" and tool_name != "local_tools::revise_plan":
3428
- log_event_fn("🔄 AI indicates plan needs revision", MSG_TYPE.MSG_TYPE_INFO)
3429
- tool_name = "local_tools::revise_plan" # Force plan revision
3430
-
3431
- # Handle final answer
3432
- if tool_name == "local_tools::final_answer":
3433
- log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
3434
- break
3435
-
3436
- # Handle clarification request
3437
- if tool_name == "local_tools::request_clarification":
3438
- clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
3439
-
3440
- CURRENT ANALYSIS:
3441
- {current_scratchpad}
3442
-
3443
- Generate a clear, specific question that will help you proceed effectively:"""
3444
-
3445
- question = self.generate_text(clarification_prompt, temperature=0.3)
3446
- question = self.remove_thinking_blocks(question)
3447
-
3448
- log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
3449
- return {
3450
- "final_answer": question,
3451
- "clarification_required": True,
3452
- "final_scratchpad": current_scratchpad,
3453
- "tool_calls": tool_calls_this_turn,
3454
- "sources": sources_this_turn,
3455
- "error": None,
3456
- "decision_history": decision_history
3457
- }
3458
-
3459
- # Handle plan revision
3460
- if tool_name == "local_tools::revise_plan":
3461
- plan_revision_count += 1
3462
- revision_id = log_event_fn(f"📝 Revising execution plan (revision #{plan_revision_count})", MSG_TYPE.MSG_TYPE_STEP_START)
3463
-
3464
- try:
3465
- revision_prompt = f"""Based on your current analysis and discoveries, create an updated execution plan.
3466
-
3467
- ORIGINAL REQUEST: "{original_user_prompt}"
3468
- CURRENT ANALYSIS:
3469
- {current_scratchpad}
3470
-
3471
- REASON FOR REVISION: {reasoning}
3472
-
3473
- Create a new plan that reflects your current understanding. Consider:
3474
- 1. What have you already accomplished?
3475
- 2. What new information have you discovered?
3476
- 3. What steps are still needed?
3477
- 4. How can you be more efficient?
3478
-
3479
- Provide your revision as JSON:
3480
- {{
3481
- "revision_reason": "Clear explanation of why the plan needed to change",
3482
- "new_plan": [
3483
- {{"step": 1, "description": "First revised step", "status": "pending"}},
3484
- {{"step": 2, "description": "Second revised step", "status": "pending"}}
3485
- ],
3486
- "confidence": 0.8
3487
- }}"""
3488
-
3489
- revision_data = self.generate_structured_content(
3490
- prompt=revision_prompt,
3491
- schema={
3492
- "revision_reason": "string",
3493
- "new_plan": "array",
3494
- "confidence": "number"
3495
- },
3496
- temperature=0.3,
3497
- **llm_generation_kwargs
3498
- )
3499
-
3500
- if revision_data and revision_data.get("new_plan"):
3501
- # Update the plan
3502
- current_plan_version += 1
3503
- new_tasks = []
3504
- for task_data in revision_data["new_plan"]:
3505
- task = TaskDecomposition() # Assuming this class exists
3506
- task.description = task_data.get("description", "Undefined step")
3507
- task.status = TaskStatus.PENDING # Reset all to pending
3508
- new_tasks.append(task)
3509
-
3510
- execution_plan.tasks = new_tasks
3511
- current_task_index = 0 # Reset to beginning
3512
-
3513
- # Update scratchpad with new plan
3514
- current_scratchpad += f"\n\n### Updated Plan (Version {current_plan_version})\n"
3515
- current_scratchpad += f"**Revision Reason**: {revision_data.get('revision_reason', 'Plan needed updating')}\n"
3516
- current_scratchpad += f"**New Tasks**:\n"
3517
- for i, task in enumerate(execution_plan.tasks):
3518
- current_scratchpad += f" {i+1}. {task.description}\n"
3519
-
3520
- log_event_fn(f"✅ Plan revised with {len(execution_plan.tasks)} updated tasks", MSG_TYPE.MSG_TYPE_STEP_END, event_id=revision_id, meta={
3521
- "plan_version": current_plan_version,
3522
- "new_task_count": len(execution_plan.tasks),
3523
- "revision_reason": revision_data.get("revision_reason", "")
3524
- })
3525
-
3526
- # Continue with the new plan
3527
- continue
3528
- else:
3529
- raise ValueError("Failed to generate valid plan revision")
3530
-
3531
- except Exception as e:
3532
- log_event_fn(f"Plan revision failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=revision_id)
3533
- current_scratchpad += f"\n**Plan Revision Failed**: {str(e)}\nContinuing with original plan."
3534
-
3535
- # Prepare parameters for tool execution
3536
- param_assets = {}
3537
- if requires_code:
3538
- log_event_fn("💻 Generating code for task", MSG_TYPE.MSG_TYPE_INFO)
3539
- code_prompt = f"""Generate the specific code needed for the current step.
3540
-
3541
- CURRENT CONTEXT:
3542
- {current_scratchpad}
3543
-
3544
- CURRENT TASK: {tool_name}
3545
- USER REQUEST: "{original_user_prompt}"
3546
-
3547
- Generate clean, functional code that addresses the specific requirements. Focus on:
3548
- 1. Solving the immediate problem
3549
- 2. Being clear and readable
3550
- 3. Including necessary imports and dependencies
3551
- 4. Adding helpful comments where appropriate
3552
-
3553
- CODE:"""
3554
-
3555
- code_content = self.generate_code(prompt=code_prompt, **llm_generation_kwargs)
3556
- code_uuid = f"code_asset_{uuid.uuid4()}"
3557
- asset_store[code_uuid] = {"type": "code", "content": code_content}
3558
- param_assets['code_asset_id'] = code_uuid
3559
- log_event_fn(f"Code asset created: {code_uuid[:8]}...", MSG_TYPE.MSG_TYPE_INFO)
3560
-
3561
- if requires_image:
3562
- image_assets = [asset_id for asset_id, asset in asset_store.items() if asset['type'] == 'image' and asset.get('source') == 'user']
3563
- if image_assets:
3564
- param_assets['image_asset_id'] = image_assets[0]
3565
- log_event_fn(f"Using image asset: {image_assets[0][:8]}...", MSG_TYPE.MSG_TYPE_INFO)
3566
- else:
3567
- log_event_fn("⚠️ Image required but none available", MSG_TYPE.MSG_TYPE_WARNING)
3568
-
3569
- # Enhanced parameter generation
3570
- param_prompt = f"""Generate the optimal parameters for this tool execution.
3571
-
3572
- TOOL: {tool_name}
3573
- CURRENT CONTEXT: {current_scratchpad}
3574
- CURRENT REASONING: {reasoning}
3575
- AVAILABLE ASSETS: {json.dumps(param_assets) if param_assets else "None"}
3576
-
3577
- Based on your analysis and the current step you're working on, provide the most appropriate parameters.
3578
- Be specific and purposeful in your parameter choices.
3579
-
3580
- Output format: {{"tool_params": {{...}}}}"""
3581
-
3582
- log_prompt_fn(f"Parameter Generation Step {i+1}", param_prompt)
3583
- param_data = self.generate_structured_content(
3584
- prompt=param_prompt,
3585
- schema={"tool_params": "object"},
3586
- temperature=decision_temperature,
3587
- **llm_generation_kwargs
3588
- )
3589
- tool_params = param_data.get("tool_params", {}) if param_data else {}
3590
-
3591
- current_scratchpad += f"\n**Parameters Generated**: {json.dumps(tool_params, indent=2)}"
3592
-
3593
- # Hydrate parameters with assets
3594
- def _hydrate(data: Any, store: Dict) -> Any:
3595
- if isinstance(data, dict): return {k: _hydrate(v, store) for k, v in data.items()}
3596
- if isinstance(data, list): return [_hydrate(item, store) for item in data]
3597
- if isinstance(data, str) and "asset_" in data and data in store: return store[data].get("content", data)
3598
- return data
3599
-
3600
- hydrated_params = _hydrate(tool_params, asset_store)
3601
-
3602
- # Execute the tool with detailed logging
3603
- start_time = time.time()
3604
- tool_result = {"status": "failure", "error": f"Tool '{tool_name}' failed to execute."}
3605
-
3606
- try:
3607
- if tool_name in rag_registry:
3608
- query = hydrated_params.get("query", "")
3609
- if not query:
3610
- # Fall back to using reasoning as query
3611
- query = reasoning[:200] + "..." if len(reasoning) > 200 else reasoning
3612
-
3613
- log_event_fn(f"🔍 Searching knowledge base with query: '{query[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
3614
-
3615
- top_k = rag_tool_specs[tool_name]["default_top_k"]
3616
- min_sim = rag_tool_specs[tool_name]["default_min_sim"]
3617
-
3618
- raw_results = rag_registry[tool_name](query=query, rag_top_k=top_k)
3619
- raw_iter = raw_results["results"] if isinstance(raw_results, dict) and "results" in raw_results else raw_results
3620
-
3621
- docs = []
3622
- for d in raw_iter or []:
3623
- doc_data = {
3624
- "text": d.get("text", str(d)),
3625
- "score": d.get("score", 0) * 100,
3626
- "metadata": d.get("metadata", {})
3627
- }
3628
- docs.append(doc_data)
3629
-
3630
- kept = [x for x in docs if x['score'] >= min_sim]
3631
- tool_result = {
3632
- "status": "success",
3633
- "results": kept,
3634
- "total_found": len(docs),
3635
- "kept_after_filtering": len(kept),
3636
- "query_used": query
3637
- }
3638
-
3639
- sources_this_turn.extend([{
3640
- "source": tool_name,
3641
- "metadata": x["metadata"],
3642
- "score": x["score"]
3643
- } for x in kept])
3644
-
3645
- log_event_fn(f"📚 Retrieved {len(kept)} relevant documents (from {len(docs)} total)", MSG_TYPE.MSG_TYPE_INFO)
3646
-
3647
- elif hasattr(self, "mcp") and "local_tools" not in tool_name:
3648
- log_event_fn(f"🔧 Executing MCP tool: {tool_name}", MSG_TYPE.MSG_TYPE_TOOL_CALL, meta={
3649
- "tool_name": tool_name,
3650
- "params": {k: str(v)[:100] for k, v in hydrated_params.items()} # Truncate for logging
3651
- })
3652
-
3653
- tool_result = self.mcp.execute_tool(tool_name, hydrated_params, lollms_client_instance=self)
3654
-
3655
- log_event_fn(f"Tool execution completed", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, meta={
3656
- "result_status": tool_result.get("status", "unknown"),
3657
- "has_error": "error" in tool_result
3658
- })
3659
-
3660
- elif tool_name == "local_tools::generate_image" and hasattr(self, "tti"):
3661
- image_prompt = hydrated_params.get("prompt", "")
3662
- log_event_fn(f"🎨 Generating image with prompt: '{image_prompt[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
3663
-
3664
- # This would call your text-to-image functionality
3665
- image_result = self.tti.generate_image(image_prompt) # Assuming this method exists
3666
- if image_result:
3667
- image_uuid = f"generated_image_{uuid.uuid4()}"
3668
- asset_store[image_uuid] = {"type": "image", "content": image_result, "source": "generated"}
3669
- tool_result = {"status": "success", "image_id": image_uuid, "prompt_used": image_prompt}
3670
- else:
3671
- tool_result = {"status": "failure", "error": "Image generation failed"}
3672
-
3673
- else:
3674
- tool_result = {"status": "failure", "error": f"Tool '{tool_name}' is not available or supported in this context."}
3675
-
3676
- except Exception as e:
3677
- error_msg = f"Exception during '{tool_name}' execution: {str(e)}"
3678
- log_event_fn(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
3679
- tool_result = {"status": "failure", "error": error_msg}
3680
-
3681
- response_time = time.time() - start_time
3682
- success = tool_result.get("status") == "success"
3683
-
3684
- # Record performance
3685
- performance_tracker.record_tool_usage(tool_name, success, confidence, response_time, tool_result.get("error"))
3686
-
3687
- # Update task status
3688
- if success and current_task_index < len(execution_plan.tasks):
3689
- execution_plan.tasks[current_task_index].status = TaskStatus.COMPLETED
3690
- completed_tasks.add(current_task_index)
3691
- current_task_index += 1
3692
-
3693
- # Enhanced observation logging
3694
- observation_text = json.dumps(tool_result, indent=2)
3695
- if len(observation_text) > 1000:
3696
- # Truncate very long results for scratchpad
3697
- truncated_result = {k: (str(v)[:200] + "..." if len(str(v)) > 200 else v) for k, v in tool_result.items()}
3698
- observation_text = json.dumps(truncated_result, indent=2)
3699
-
3700
- current_scratchpad += f"\n\n### Step {i+1}: Execution & Observation\n"
3701
- current_scratchpad += f"**Tool Used**: {tool_name}\n"
3702
- current_scratchpad += f"**Success**: {success}\n"
3703
- current_scratchpad += f"**Response Time**: {response_time:.2f}s\n"
3704
- current_scratchpad += f"**Result**:\n```json\n{observation_text}\n```"
3705
-
3706
- # Track tool call
3707
- tool_calls_this_turn.append({
3708
- "name": tool_name,
3709
- "params": tool_params,
3710
- "result": tool_result,
3711
- "response_time": response_time,
3712
- "confidence": confidence,
3713
- "reasoning": reasoning
3714
- })
3715
-
3716
- if success:
3717
- log_event_fn(f"✅ Step {i+1} completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3718
- "tool_name": tool_name,
3719
- "response_time": response_time,
3720
- "confidence": confidence
3721
- })
3722
- else:
3723
- error_detail = tool_result.get("error", "No error detail provided.")
3724
- log_event_fn(f"⚠️ Step {i+1} completed with issues: {error_detail}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3725
- "tool_name": tool_name,
3726
- "error": error_detail,
3727
- "confidence": confidence
3728
- })
3729
-
3730
- # Add failure handling to scratchpad
3731
- current_scratchpad += f"\n**Failure Analysis**: {error_detail}"
3732
- current_scratchpad += f"\n**Next Steps**: Consider alternative approaches or tools"
3733
-
3734
- # Log current progress
3735
- completed_count = len(completed_tasks)
3736
- total_tasks = len(execution_plan.tasks)
3737
- if total_tasks > 0:
3738
- progress = (completed_count / total_tasks) * 100
3739
- log_event_fn(f"Progress: {completed_count}/{total_tasks} tasks completed ({progress:.1f}%)", MSG_TYPE.MSG_TYPE_STEP_PROGRESS, meta={"progress": progress})
3740
-
3741
- # Check if all tasks are completed
3742
- if completed_count >= total_tasks:
3743
- log_event_fn("🎯 All planned tasks completed", MSG_TYPE.MSG_TYPE_INFO)
3744
- break
3745
-
3746
- except Exception as ex:
3747
- log_event_fn(f"💥 Unexpected error in reasoning step {i+1}: {str(ex)}", MSG_TYPE.MSG_TYPE_ERROR, event_id=reasoning_step_id)
3748
- trace_exception(ex)
3749
-
3750
- # Add error to scratchpad for context
3751
- current_scratchpad += f"\n\n### Step {i+1}: Unexpected Error\n**Error**: {str(ex)}\n**Recovery**: Continuing with adjusted approach"
3752
-
3753
- log_event_fn("🔄 Recovering and continuing with next step", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
3754
-
3755
- # Enhanced self-reflection
3756
- if enable_self_reflection and len(tool_calls_this_turn) > 0:
3757
- reflection_id = log_event_fn("🤔 Conducting comprehensive self-assessment...", MSG_TYPE.MSG_TYPE_STEP_START)
3758
- try:
3759
- reflection_prompt = f"""Conduct a thorough review of your work and assess the quality of your response to the user's request.
3760
-
3761
- ORIGINAL REQUEST: "{original_user_prompt}"
3762
- TOOLS USED: {len(tool_calls_this_turn)}
3763
- PLAN REVISIONS: {plan_revision_count}
3764
-
3765
- COMPLETE ANALYSIS:
3766
- {current_scratchpad}
3767
-
3768
- Evaluate your performance on multiple dimensions:
3769
-
3770
- 1. **Goal Achievement**: Did you fully address the user's request?
3771
- 2. **Process Efficiency**: Was your approach optimal given the available tools?
3772
- 3. **Information Quality**: Is the information you gathered accurate and relevant?
3773
- 4. **Decision Making**: Were your tool choices and parameters appropriate?
3774
- 5. **Adaptability**: How well did you handle unexpected results or plan changes?
3775
-
3776
- Provide your assessment as JSON:
3777
- {{
3778
- "goal_achieved": true,
3779
- "effectiveness_score": 0.85,
3780
- "process_efficiency": 0.8,
3781
- "information_quality": 0.9,
3782
- "decision_making": 0.85,
3783
- "adaptability": 0.7,
3784
- "overall_confidence": 0.82,
3785
- "strengths": ["Clear reasoning", "Good tool selection"],
3786
- "areas_for_improvement": ["Could have been more efficient"],
3787
- "summary": "Successfully completed the user's request with high quality results",
3788
- "key_insights": ["Discovered that X was more important than initially thought"]
3789
- }}"""
3790
-
3791
- reflection_data = self.generate_structured_content(
3792
- prompt=reflection_prompt,
3793
- schema={
3794
- "goal_achieved": "boolean",
3795
- "effectiveness_score": "number",
3796
- "process_efficiency": "number",
3797
- "information_quality": "number",
3798
- "decision_making": "number",
3799
- "adaptability": "number",
3800
- "overall_confidence": "number",
3801
- "strengths": "array",
3802
- "areas_for_improvement": "array",
3803
- "summary": "string",
3804
- "key_insights": "array"
3805
- },
3806
- temperature=0.3,
3807
- **llm_generation_kwargs
3808
- )
3809
-
3810
- if reflection_data:
3811
- current_scratchpad += f"\n\n### Comprehensive Self-Assessment\n"
3812
- current_scratchpad += f"**Goal Achieved**: {reflection_data.get('goal_achieved', False)}\n"
3813
- current_scratchpad += f"**Overall Confidence**: {reflection_data.get('overall_confidence', 0.5):.2f}\n"
3814
- current_scratchpad += f"**Effectiveness Score**: {reflection_data.get('effectiveness_score', 0.5):.2f}\n"
3815
- current_scratchpad += f"**Key Strengths**: {', '.join(reflection_data.get('strengths', []))}\n"
3816
- current_scratchpad += f"**Improvement Areas**: {', '.join(reflection_data.get('areas_for_improvement', []))}\n"
3817
- current_scratchpad += f"**Summary**: {reflection_data.get('summary', '')}\n"
3818
-
3819
- log_event_fn(f"✅ Self-assessment completed", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reflection_id, meta={
3820
- "overall_confidence": reflection_data.get('overall_confidence', 0.5),
3821
- "goal_achieved": reflection_data.get('goal_achieved', False),
3822
- "effectiveness_score": reflection_data.get('effectiveness_score', 0.5)
3823
- })
3824
- else:
3825
- log_event_fn("Self-assessment data generation failed", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3826
-
3827
- except Exception as e:
3828
- log_event_fn(f"Self-assessment failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3829
-
3830
- # Enhanced final synthesis
3831
- synthesis_id = log_event_fn("📝 Synthesizing comprehensive final response...", MSG_TYPE.MSG_TYPE_STEP_START)
3832
-
3833
- final_answer_prompt = f"""Create a comprehensive, well-structured final response that fully addresses the user's request.
3834
-
3835
- ORIGINAL REQUEST: "{original_user_prompt}"
3836
- CONTEXT: {context or "No additional context"}
3837
-
3838
- COMPLETE ANALYSIS AND WORK:
3839
- {current_scratchpad}
3840
-
3841
- GUIDELINES for your response:
3842
- 1. **Be Complete**: Address all aspects of the user's request
3843
- 2. **Be Clear**: Organize your response logically and use clear language
3844
- 3. **Be Helpful**: Provide actionable information and insights
3845
- 4. **Be Honest**: If there were limitations or uncertainties, mention them appropriately
3846
- 5. **Be Concise**: While being thorough, avoid unnecessary verbosity
3847
- 6. **Cite Sources**: If you used research tools, reference the information appropriately
3848
-
3849
- Your response should feel natural and conversational while being informative and valuable.
3850
-
3851
2415
  FINAL RESPONSE:"""
3852
2416
 
3853
2417
  log_prompt_fn("Final Synthesis Prompt", final_answer_prompt)