lollms-client 1.6.1__py3-none-any.whl → 1.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (42) hide show
  1. lollms_client/__init__.py +1 -1
  2. lollms_client/llm_bindings/azure_openai/__init__.py +2 -2
  3. lollms_client/llm_bindings/claude/__init__.py +2 -2
  4. lollms_client/llm_bindings/gemini/__init__.py +2 -2
  5. lollms_client/llm_bindings/grok/__init__.py +2 -2
  6. lollms_client/llm_bindings/groq/__init__.py +2 -2
  7. lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +2 -2
  8. lollms_client/llm_bindings/litellm/__init__.py +1 -1
  9. lollms_client/llm_bindings/llamacpp/__init__.py +2 -2
  10. lollms_client/llm_bindings/lollms/__init__.py +1 -1
  11. lollms_client/llm_bindings/lollms_webui/__init__.py +1 -1
  12. lollms_client/llm_bindings/mistral/__init__.py +2 -2
  13. lollms_client/llm_bindings/novita_ai/__init__.py +2 -2
  14. lollms_client/llm_bindings/ollama/__init__.py +7 -4
  15. lollms_client/llm_bindings/open_router/__init__.py +2 -2
  16. lollms_client/llm_bindings/openai/__init__.py +1 -1
  17. lollms_client/llm_bindings/openllm/__init__.py +2 -2
  18. lollms_client/llm_bindings/openwebui/__init__.py +1 -1
  19. lollms_client/llm_bindings/perplexity/__init__.py +2 -2
  20. lollms_client/llm_bindings/pythonllamacpp/__init__.py +3 -3
  21. lollms_client/llm_bindings/tensor_rt/__init__.py +1 -1
  22. lollms_client/llm_bindings/transformers/__init__.py +4 -4
  23. lollms_client/llm_bindings/vllm/__init__.py +1 -1
  24. lollms_client/lollms_core.py +19 -1452
  25. lollms_client/lollms_llm_binding.py +1 -1
  26. lollms_client/lollms_tti_binding.py +1 -1
  27. lollms_client/lollms_tts_binding.py +15 -13
  28. lollms_client/tti_bindings/diffusers/__init__.py +276 -856
  29. lollms_client/tti_bindings/diffusers/server/main.py +730 -0
  30. lollms_client/tti_bindings/gemini/__init__.py +1 -1
  31. lollms_client/tti_bindings/leonardo_ai/__init__.py +1 -1
  32. lollms_client/tti_bindings/novita_ai/__init__.py +1 -1
  33. lollms_client/tti_bindings/stability_ai/__init__.py +1 -1
  34. lollms_client/tts_bindings/lollms/__init__.py +6 -1
  35. lollms_client/tts_bindings/piper_tts/__init__.py +1 -1
  36. lollms_client/tts_bindings/xtts/__init__.py +97 -38
  37. lollms_client/tts_bindings/xtts/server/main.py +288 -272
  38. {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/METADATA +6 -3
  39. {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/RECORD +42 -41
  40. {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/WHEEL +0 -0
  41. {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/licenses/LICENSE +0 -0
  42. {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/top_level.txt +0 -0
@@ -143,16 +143,21 @@ class LollmsClient():
143
143
  ASCIIColors.warning(f"Failed to create LLM binding: {llm_binding_name}. Available: {available}")
144
144
 
145
145
  if tts_binding_name:
146
- self.tts = self.tts_binding_manager.create_binding(
147
- binding_name=tts_binding_name,
148
- **{
149
- k: v
150
- for k, v in (tts_binding_config or {}).items()
151
- if k != "binding_name"
152
- }
153
- )
154
- if self.tts is None:
155
- ASCIIColors.warning(f"Failed to create TTS binding: {tts_binding_name}. Available: {self.tts_binding_manager.get_available_bindings()}")
146
+ try:
147
+ params = {
148
+ k: v
149
+ for k, v in (tts_binding_config or {}).items()
150
+ if k != "binding_name"
151
+ }
152
+ self.tts = self.tts_binding_manager.create_binding(
153
+ binding_name=tts_binding_name,
154
+ **params
155
+ )
156
+ if self.tts is None:
157
+ ASCIIColors.warning(f"Failed to create TTS binding: {tts_binding_name}. Available: {self.tts_binding_manager.get_available_bindings()}")
158
+ except Exception as e:
159
+ trace_exception(e)
160
+ ASCIIColors.warning(f"Exception occurred while creating TTS binding: {str(e)}")
156
161
 
157
162
  if tti_binding_name:
158
163
  if tti_binding_config:
@@ -268,8 +273,8 @@ class LollmsClient():
268
273
  raise ValueError(f"Failed to update LLM binding: {binding_name}. Available: {available}")
269
274
 
270
275
  def get_ctx_size(self, model_name:str|None=None):
271
- if self.llm:
272
- ctx_size = self.llm.get_ctx_size(model_name)
276
+ if self.llm and self.llm.model_name:
277
+ ctx_size = self.llm.get_ctx_size(model_name or self.llm.model_name)
273
278
  return ctx_size if ctx_size else self.llm.default_ctx_size
274
279
  else:
275
280
  return None
@@ -674,10 +679,10 @@ class LollmsClient():
674
679
  raise RuntimeError("LLM binding not initialized.")
675
680
 
676
681
 
677
- def listModels(self):
682
+ def list_models(self):
678
683
  """Lists models available to the current LLM binding."""
679
684
  if self.llm:
680
- return self.llm.listModels()
685
+ return self.llm.list_models()
681
686
  raise RuntimeError("LLM binding not initialized.")
682
687
 
683
688
  # --- Convenience Methods for Lollms LLM Binding Features ---
@@ -2405,1444 +2410,6 @@ GUIDELINES for your response:
2405
2410
 
2406
2411
  Your response should feel natural and conversational while being informative and valuable.
2407
2412
 
2408
- FINAL RESPONSE:"""
2409
-
2410
- log_prompt_fn("Final Synthesis Prompt", final_answer_prompt)
2411
-
2412
- final_answer_text = self.generate_text(
2413
- prompt=final_answer_prompt,
2414
- system_prompt=system_prompt,
2415
- stream=streaming_callback is not None,
2416
- streaming_callback=streaming_callback,
2417
- temperature=final_answer_temperature,
2418
- **llm_generation_kwargs
2419
- )
2420
-
2421
- if isinstance(final_answer_text, dict) and "error" in final_answer_text:
2422
- log_event_fn(f"Final synthesis failed: {final_answer_text['error']}", MSG_TYPE.MSG_TYPE_ERROR, event_id=synthesis_id)
2423
- return {
2424
- "final_answer": "I encountered an issue while preparing my final response. Please let me know if you'd like me to try again.",
2425
- "error": final_answer_text["error"],
2426
- "final_scratchpad": current_scratchpad,
2427
- "tool_calls": tool_calls_this_turn,
2428
- "sources": sources_this_turn,
2429
- "decision_history": decision_history
2430
- }
2431
-
2432
- final_answer = self.remove_thinking_blocks(final_answer_text)
2433
-
2434
- # Calculate overall performance metrics
2435
- overall_confidence = sum(call.get('confidence', 0.5) for call in tool_calls_this_turn) / max(len(tool_calls_this_turn), 1)
2436
- successful_calls = sum(1 for call in tool_calls_this_turn if call.get('result', {}).get('status') == 'success')
2437
- success_rate = successful_calls / max(len(tool_calls_this_turn), 1)
2438
-
2439
- log_event_fn("✅ Comprehensive response ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id, meta={
2440
- "final_answer_length": len(final_answer),
2441
- "total_tools_used": len(tool_calls_this_turn),
2442
- "success_rate": success_rate,
2443
- "overall_confidence": overall_confidence
2444
- })
2445
-
2446
- return {
2447
- "final_answer": final_answer,
2448
- "final_scratchpad": current_scratchpad,
2449
- "tool_calls": tool_calls_this_turn,
2450
- "sources": sources_this_turn,
2451
- "decision_history": decision_history,
2452
- "performance_stats": {
2453
- "total_steps": len(tool_calls_this_turn),
2454
- "successful_steps": successful_calls,
2455
- "success_rate": success_rate,
2456
- "average_confidence": overall_confidence,
2457
- "plan_revisions": plan_revision_count,
2458
- "total_reasoning_steps": len(decision_history)
2459
- },
2460
- "plan_evolution": {
2461
- "initial_tasks": len(execution_plan.tasks),
2462
- "final_version": current_plan_version,
2463
- "total_revisions": plan_revision_count
2464
- },
2465
- "clarification_required": False,
2466
- "overall_confidence": overall_confidence,
2467
- "error": None
2468
- }
2469
-
2470
-
2471
- def _execute_complex_reasoning_loop(
2472
- self, prompt, context, system_prompt, reasoning_system_prompt, images,
2473
- max_reasoning_steps, decision_temperature, final_answer_temperature,
2474
- streaming_callback, debug, enable_self_reflection, all_visible_tools,
2475
- rag_registry, rag_tool_specs, log_event_fn, log_prompt_fn, max_scratchpad_size, **llm_generation_kwargs
2476
- ) -> Dict[str, Any]:
2477
-
2478
- planner, memory_manager, performance_tracker = TaskPlanner(self), MemoryManager(), ToolPerformanceTracker()
2479
-
2480
- def _get_friendly_action_description(tool_name, requires_code, requires_image):
2481
- descriptions = {
2482
- "local_tools::final_answer": "📋 Preparing final answer",
2483
- "local_tools::request_clarification": "❓ Requesting clarification",
2484
- "local_tools::generate_image": "🎨 Creating image",
2485
- "local_tools::revise_plan": "📝 Revising execution plan"
2486
- }
2487
- if tool_name in descriptions:
2488
- return descriptions[tool_name]
2489
- if "research::" in tool_name:
2490
- return f"🔍 Searching {tool_name.split('::')[-1]} knowledge base"
2491
- if requires_code:
2492
- return "💻 Processing code"
2493
- if requires_image:
2494
- return "🖼️ Analyzing images"
2495
- return f"🔧 Using {tool_name.replace('_', ' ').replace('::', ' - ').title()}"
2496
-
2497
- def _compress_scratchpad_intelligently(scratchpad: str, original_request: str, target_size: int) -> str:
2498
- """Enhanced scratchpad compression that preserves key decisions and recent context"""
2499
- if len(scratchpad) <= target_size:
2500
- return scratchpad
2501
-
2502
- log_event_fn("📝 Compressing scratchpad to maintain focus...", MSG_TYPE.MSG_TYPE_INFO)
2503
-
2504
- # Extract key components
2505
- lines = scratchpad.split('\n')
2506
- plan_section = []
2507
- decisions = []
2508
- recent_observations = []
2509
-
2510
- current_section = None
2511
- for i, line in enumerate(lines):
2512
- if "### Execution Plan" in line or "### Updated Plan" in line:
2513
- current_section = "plan"
2514
- elif "### Step" in line and ("Thought" in line or "Decision" in line):
2515
- current_section = "decision"
2516
- elif "### Step" in line and "Observation" in line:
2517
- current_section = "observation"
2518
- elif line.startswith("###"):
2519
- current_section = None
2520
-
2521
- if current_section == "plan" and line.strip():
2522
- plan_section.append(line)
2523
- elif current_section == "decision" and line.strip():
2524
- decisions.append((i, line))
2525
- elif current_section == "observation" and line.strip():
2526
- recent_observations.append((i, line))
2527
-
2528
- # Keep most recent items and important decisions
2529
- recent_decisions = decisions[-3:] if len(decisions) > 3 else decisions
2530
- recent_obs = recent_observations[-5:] if len(recent_observations) > 5 else recent_observations
2531
-
2532
- compressed_parts = [
2533
- f"### Original Request\n{original_request}",
2534
- f"### Current Plan\n" + '\n'.join(plan_section[-10:]),
2535
- f"### Recent Key Decisions"
2536
- ]
2537
-
2538
- for _, decision in recent_decisions:
2539
- compressed_parts.append(decision)
2540
-
2541
- compressed_parts.append("### Recent Observations")
2542
- for _, obs in recent_obs:
2543
- compressed_parts.append(obs)
2544
-
2545
- compressed = '\n'.join(compressed_parts)
2546
- if len(compressed) > target_size:
2547
- # Final trim if still too long
2548
- compressed = compressed[:target_size-100] + "\n...[content compressed for focus]"
2549
-
2550
- return compressed
2551
-
2552
- original_user_prompt, tool_calls_this_turn, sources_this_turn = prompt, [], []
2553
- asset_store: Dict[str, Dict] = {}
2554
- decision_history = [] # Track all decisions made
2555
-
2556
- # Enhanced planning phase
2557
- planning_step_id = log_event_fn("📋 Creating adaptive execution plan...", MSG_TYPE.MSG_TYPE_STEP_START)
2558
- execution_plan = planner.decompose_task(original_user_prompt, context or "")
2559
- current_plan_version = 1
2560
-
2561
- log_event_fn(f"Initial plan created with {len(execution_plan.tasks)} tasks", MSG_TYPE.MSG_TYPE_INFO, meta={
2562
- "plan_version": current_plan_version,
2563
- "total_tasks": len(execution_plan.tasks),
2564
- "estimated_complexity": "medium" if len(execution_plan.tasks) <= 5 else "high"
2565
- })
2566
-
2567
- for i, task in enumerate(execution_plan.tasks):
2568
- log_event_fn(f"Task {i+1}: {task.description}", MSG_TYPE.MSG_TYPE_INFO)
2569
-
2570
- log_event_fn("✅ Adaptive plan ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=planning_step_id)
2571
-
2572
- # Enhanced initial state
2573
- initial_state_parts = [
2574
- f"### Original User Request\n{original_user_prompt}",
2575
- f"### Context\n{context or 'No additional context provided'}",
2576
- f"### Execution Plan (Version {current_plan_version})\n- Total tasks: {len(execution_plan.tasks)}",
2577
- f"- Estimated complexity: {'High' if len(execution_plan.tasks) > 5 else 'Medium'}"
2578
- ]
2579
-
2580
- for i, task in enumerate(execution_plan.tasks):
2581
- initial_state_parts.append(f" {i+1}. {task.description} [Status: {task.status.value}]")
2582
-
2583
- if images:
2584
- initial_state_parts.append(f"### Provided Assets")
2585
- for img_b64 in images:
2586
- img_uuid = str(uuid.uuid4())
2587
- asset_store[img_uuid] = {"type": "image", "content": img_b64, "source": "user"}
2588
- initial_state_parts.append(f"- Image asset: {img_uuid}")
2589
-
2590
- current_scratchpad = "\n".join(initial_state_parts)
2591
- log_event_fn("Initial analysis complete", MSG_TYPE.MSG_TYPE_SCRATCHPAD, meta={"scratchpad_size": len(current_scratchpad)})
2592
-
2593
- formatted_tools_list = "\n".join([f"**{t['name']}**: {t['description']}" for t in all_visible_tools])
2594
- completed_tasks, current_task_index = set(), 0
2595
- plan_revision_count = 0
2596
-
2597
- # Main reasoning loop with enhanced decision tracking
2598
- for i in range(max_reasoning_steps):
2599
- current_task_desc = execution_plan.tasks[current_task_index].description if current_task_index < len(execution_plan.tasks) else "Finalizing analysis"
2600
- step_desc = f"🤔 Step {i+1}: {current_task_desc}"
2601
- reasoning_step_id = log_event_fn(step_desc, MSG_TYPE.MSG_TYPE_STEP_START)
2602
-
2603
- try:
2604
- # Enhanced scratchpad management
2605
- if len(current_scratchpad) > max_scratchpad_size:
2606
- log_event_fn(f"Scratchpad size ({len(current_scratchpad)}) exceeds limit, compressing...", MSG_TYPE.MSG_TYPE_INFO)
2607
- current_scratchpad = _compress_scratchpad_intelligently(current_scratchpad, original_user_prompt, max_scratchpad_size // 2)
2608
- log_event_fn(f"Scratchpad compressed to {len(current_scratchpad)} characters", MSG_TYPE.MSG_TYPE_INFO)
2609
-
2610
- # Enhanced reasoning prompt with better decision tracking
2611
- reasoning_prompt = f"""You are working on: "{original_user_prompt}"
2612
-
2613
- === AVAILABLE ACTIONS ===
2614
- {formatted_tools_list}
2615
-
2616
- === YOUR COMPLETE ANALYSIS HISTORY ===
2617
- {current_scratchpad}
2618
- === END ANALYSIS HISTORY ===
2619
-
2620
- === DECISION GUIDELINES ===
2621
- 1. **Review your progress**: Look at what you've already discovered and accomplished
2622
- 2. **Consider your current task**: Focus on the next logical step in your plan
2623
- 3. **Remember your decisions**: If you previously decided to use a tool, follow through unless you have a good reason to change
2624
- 4. **Be adaptive**: If you discover new information that changes the situation, consider revising your plan
2625
- 5. **Stay focused**: Each action should clearly advance toward the final goal
2626
-
2627
- === YOUR NEXT DECISION ===
2628
- Choose the single most appropriate action to take right now. Consider:
2629
- - What specific step are you currently working on?
2630
- - What information do you still need?
2631
- - What would be most helpful for the user?
2632
-
2633
- Provide your decision as JSON:
2634
- {{
2635
- "reasoning": "Explain your current thinking and why this action makes sense now",
2636
- "action": {{
2637
- "tool_name": "exact_tool_name",
2638
- "requires_code_input": false,
2639
- "requires_image_input": false,
2640
- "confidence": 0.8
2641
- }},
2642
- "plan_status": "on_track" // or "needs_revision" if you want to change the plan
2643
- }}"""
2644
-
2645
- log_prompt_fn(f"Reasoning Prompt Step {i+1}", reasoning_prompt)
2646
- decision_data = self.generate_structured_content(
2647
- prompt=reasoning_prompt,
2648
- schema={
2649
- "reasoning": "string",
2650
- "action": "object",
2651
- "plan_status": "string"
2652
- },
2653
- system_prompt=reasoning_system_prompt,
2654
- temperature=decision_temperature,
2655
- **llm_generation_kwargs
2656
- )
2657
-
2658
- if not (decision_data and isinstance(decision_data.get("action"), dict)):
2659
- log_event_fn("⚠️ Invalid decision format from AI", MSG_TYPE.MSG_TYPE_WARNING, event_id=reasoning_step_id)
2660
- current_scratchpad += f"\n\n### Step {i+1}: Decision Error\n- Error: AI produced invalid decision JSON\n- Continuing with fallback approach"
2661
- continue
2662
-
2663
- action = decision_data.get("action", {})
2664
- reasoning = decision_data.get("reasoning", "No reasoning provided")
2665
- plan_status = decision_data.get("plan_status", "on_track")
2666
- tool_name = action.get("tool_name")
2667
- requires_code = action.get("requires_code_input", False)
2668
- requires_image = action.get("requires_image_input", False)
2669
- confidence = action.get("confidence", 0.5)
2670
-
2671
- # Track the decision
2672
- decision_history.append({
2673
- "step": i+1,
2674
- "tool_name": tool_name,
2675
- "reasoning": reasoning,
2676
- "confidence": confidence,
2677
- "plan_status": plan_status
2678
- })
2679
-
2680
- current_scratchpad += f"\n\n### Step {i+1}: Decision & Reasoning\n**Reasoning**: {reasoning}\n**Chosen Action**: {tool_name}\n**Confidence**: {confidence}\n**Plan Status**: {plan_status}"
2681
-
2682
- log_event_fn(_get_friendly_action_description(tool_name, requires_code, requires_image), MSG_TYPE.MSG_TYPE_STEP, meta={
2683
- "tool_name": tool_name,
2684
- "confidence": confidence,
2685
- "reasoning": reasoning[:100] + "..." if len(reasoning) > 100 else reasoning
2686
- })
2687
-
2688
- # Handle plan revision
2689
- if plan_status == "needs_revision" and tool_name != "local_tools::revise_plan":
2690
- log_event_fn("🔄 AI indicates plan needs revision", MSG_TYPE.MSG_TYPE_INFO)
2691
- tool_name = "local_tools::revise_plan" # Force plan revision
2692
-
2693
- # Handle final answer
2694
- if tool_name == "local_tools::final_answer":
2695
- log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
2696
- break
2697
-
2698
- # Handle clarification request
2699
- if tool_name == "local_tools::request_clarification":
2700
- clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
2701
-
2702
- CURRENT ANALYSIS:
2703
- {current_scratchpad}
2704
-
2705
- Generate a clear, specific question that will help you proceed effectively:"""
2706
-
2707
- question = self.generate_text(clarification_prompt, temperature=0.3)
2708
- question = self.remove_thinking_blocks(question)
2709
-
2710
- log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
2711
- return {
2712
- "final_answer": question,
2713
- "clarification_required": True,
2714
- "final_scratchpad": current_scratchpad,
2715
- "tool_calls": tool_calls_this_turn,
2716
- "sources": sources_this_turn,
2717
- "error": None,
2718
- "decision_history": decision_history
2719
- }
2720
-
2721
- # Handle final answer
2722
- if tool_name == "local_tools::final_answer":
2723
- log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
2724
- break
2725
-
2726
- # Handle clarification request
2727
- if tool_name == "local_tools::request_clarification":
2728
- clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
2729
-
2730
- CURRENT ANALYSIS:
2731
- {current_scratchpad}
2732
-
2733
- Generate a clear, specific question that will help you proceed effectively:"""
2734
-
2735
- question = self.generate_text(clarification_prompt, temperature=0.3)
2736
- question = self.remove_thinking_blocks(question)
2737
-
2738
- log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
2739
- return {
2740
- "final_answer": question,
2741
- "clarification_required": True,
2742
- "final_scratchpad": current_scratchpad,
2743
- "tool_calls": tool_calls_this_turn,
2744
- "sources": sources_this_turn,
2745
- "error": None,
2746
- "decision_history": decision_history
2747
- }
2748
-
2749
- # Handle plan revision
2750
- if tool_name == "local_tools::revise_plan":
2751
- plan_revision_count += 1
2752
- revision_id = log_event_fn(f"📝 Revising execution plan (revision #{plan_revision_count})", MSG_TYPE.MSG_TYPE_STEP_START)
2753
-
2754
- try:
2755
- revision_prompt = f"""Based on your current analysis and discoveries, create an updated execution plan.
2756
-
2757
- ORIGINAL REQUEST: "{original_user_prompt}"
2758
- CURRENT ANALYSIS:
2759
- {current_scratchpad}
2760
-
2761
- REASON FOR REVISION: {reasoning}
2762
-
2763
- Create a new plan that reflects your current understanding. Consider:
2764
- 1. What have you already accomplished?
2765
- 2. What new information have you discovered?
2766
- 3. What steps are still needed?
2767
- 4. How can you be more efficient?
2768
-
2769
- Provide your revision as JSON:
2770
- {{
2771
- "revision_reason": "Clear explanation of why the plan needed to change",
2772
- "new_plan": [
2773
- {{"step": 1, "description": "First revised step", "status": "pending"}},
2774
- {{"step": 2, "description": "Second revised step", "status": "pending"}}
2775
- ],
2776
- "confidence": 0.8
2777
- }}"""
2778
-
2779
- revision_data = self.generate_structured_content(
2780
- prompt=revision_prompt,
2781
- schema={
2782
- "revision_reason": "string",
2783
- "new_plan": "array",
2784
- "confidence": "number"
2785
- },
2786
- temperature=0.3,
2787
- **llm_generation_kwargs
2788
- )
2789
-
2790
- if revision_data and revision_data.get("new_plan"):
2791
- # Update the plan
2792
- current_plan_version += 1
2793
- new_tasks = []
2794
- for task_data in revision_data["new_plan"]:
2795
- task = TaskDecomposition() # Assuming this class exists
2796
- task.description = task_data.get("description", "Undefined step")
2797
- task.status = TaskStatus.PENDING # Reset all to pending
2798
- new_tasks.append(task)
2799
-
2800
- execution_plan.tasks = new_tasks
2801
- current_task_index = 0 # Reset to beginning
2802
-
2803
- # Update scratchpad with new plan
2804
- current_scratchpad += f"\n\n### Updated Plan (Version {current_plan_version})\n"
2805
- current_scratchpad += f"**Revision Reason**: {revision_data.get('revision_reason', 'Plan needed updating')}\n"
2806
- current_scratchpad += f"**New Tasks**:\n"
2807
- for i, task in enumerate(execution_plan.tasks):
2808
- current_scratchpad += f" {i+1}. {task.description}\n"
2809
-
2810
- log_event_fn(f"✅ Plan revised with {len(execution_plan.tasks)} updated tasks", MSG_TYPE.MSG_TYPE_STEP_END, event_id=revision_id, meta={
2811
- "plan_version": current_plan_version,
2812
- "new_task_count": len(execution_plan.tasks),
2813
- "revision_reason": revision_data.get("revision_reason", "")
2814
- })
2815
-
2816
- # Continue with the new plan
2817
- continue
2818
- else:
2819
- raise ValueError("Failed to generate valid plan revision")
2820
-
2821
- except Exception as e:
2822
- log_event_fn(f"Plan revision failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=revision_id)
2823
- current_scratchpad += f"\n**Plan Revision Failed**: {str(e)}\nContinuing with original plan."
2824
-
2825
- # Prepare parameters for tool execution
2826
- param_assets = {}
2827
- if requires_code:
2828
- log_event_fn("💻 Generating code for task", MSG_TYPE.MSG_TYPE_INFO)
2829
- code_prompt = f"""Generate the specific code needed for the current step.
2830
-
2831
- CURRENT CONTEXT:
2832
- {current_scratchpad}
2833
-
2834
- CURRENT TASK: {tool_name}
2835
- USER REQUEST: "{original_user_prompt}"
2836
-
2837
- Generate clean, functional code that addresses the specific requirements. Focus on:
2838
- 1. Solving the immediate problem
2839
- 2. Being clear and readable
2840
- 3. Including necessary imports and dependencies
2841
- 4. Adding helpful comments where appropriate
2842
-
2843
- CODE:"""
2844
-
2845
- code_content = self.generate_code(prompt=code_prompt, **llm_generation_kwargs)
2846
- code_uuid = f"code_asset_{uuid.uuid4()}"
2847
- asset_store[code_uuid] = {"type": "code", "content": code_content}
2848
- param_assets['code_asset_id'] = code_uuid
2849
- log_event_fn(f"Code asset created: {code_uuid[:8]}...", MSG_TYPE.MSG_TYPE_INFO)
2850
-
2851
- if requires_image:
2852
- image_assets = [asset_id for asset_id, asset in asset_store.items() if asset['type'] == 'image' and asset.get('source') == 'user']
2853
- if image_assets:
2854
- param_assets['image_asset_id'] = image_assets[0]
2855
- log_event_fn(f"Using image asset: {image_assets[0][:8]}...", MSG_TYPE.MSG_TYPE_INFO)
2856
- else:
2857
- log_event_fn("⚠️ Image required but none available", MSG_TYPE.MSG_TYPE_WARNING)
2858
-
2859
- # Enhanced parameter generation
2860
- param_prompt = f"""Generate the optimal parameters for this tool execution.
2861
-
2862
- TOOL: {tool_name}
2863
- CURRENT CONTEXT: {current_scratchpad}
2864
- CURRENT REASONING: {reasoning}
2865
- AVAILABLE ASSETS: {json.dumps(param_assets) if param_assets else "None"}
2866
-
2867
- Based on your analysis and the current step you're working on, provide the most appropriate parameters.
2868
- Be specific and purposeful in your parameter choices.
2869
-
2870
- Output format: {{"tool_params": {{...}}}}"""
2871
-
2872
- log_prompt_fn(f"Parameter Generation Step {i+1}", param_prompt)
2873
- param_data = self.generate_structured_content(
2874
- prompt=param_prompt,
2875
- schema={"tool_params": "object"},
2876
- temperature=decision_temperature,
2877
- **llm_generation_kwargs
2878
- )
2879
- tool_params = param_data.get("tool_params", {}) if param_data else {}
2880
-
2881
- current_scratchpad += f"\n**Parameters Generated**: {json.dumps(tool_params, indent=2)}"
2882
-
2883
- # Hydrate parameters with assets
2884
- def _hydrate(data: Any, store: Dict) -> Any:
2885
- if isinstance(data, dict): return {k: _hydrate(v, store) for k, v in data.items()}
2886
- if isinstance(data, list): return [_hydrate(item, store) for item in data]
2887
- if isinstance(data, str) and "asset_" in data and data in store: return store[data].get("content", data)
2888
- return data
2889
-
2890
- hydrated_params = _hydrate(tool_params, asset_store)
2891
-
2892
- # Execute the tool with detailed logging
2893
- start_time = time.time()
2894
- tool_result = {"status": "failure", "error": f"Tool '{tool_name}' failed to execute."}
2895
-
2896
- try:
2897
- if tool_name in rag_registry:
2898
- query = hydrated_params.get("query", "")
2899
- if not query:
2900
- # Fall back to using reasoning as query
2901
- query = reasoning[:200] + "..." if len(reasoning) > 200 else reasoning
2902
-
2903
- log_event_fn(f"🔍 Searching knowledge base with query: '{query[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
2904
-
2905
- top_k = rag_tool_specs[tool_name]["default_top_k"]
2906
- min_sim = rag_tool_specs[tool_name]["default_min_sim"]
2907
-
2908
- raw_results = rag_registry[tool_name](query=query, rag_top_k=top_k)
2909
- raw_iter = raw_results["results"] if isinstance(raw_results, dict) and "results" in raw_results else raw_results
2910
-
2911
- docs = []
2912
- for d in raw_iter or []:
2913
- doc_data = {
2914
- "text": d.get("text", str(d)),
2915
- "score": d.get("score", 0) * 100,
2916
- "metadata": d.get("metadata", {})
2917
- }
2918
- docs.append(doc_data)
2919
-
2920
- kept = [x for x in docs if x['score'] >= min_sim]
2921
- tool_result = {
2922
- "status": "success",
2923
- "results": kept,
2924
- "total_found": len(docs),
2925
- "kept_after_filtering": len(kept),
2926
- "query_used": query
2927
- }
2928
-
2929
- sources_this_turn.extend([{
2930
- "source": tool_name,
2931
- "metadata": x["metadata"],
2932
- "score": x["score"]
2933
- } for x in kept])
2934
-
2935
- log_event_fn(f"📚 Retrieved {len(kept)} relevant documents (from {len(docs)} total)", MSG_TYPE.MSG_TYPE_INFO)
2936
-
2937
- elif hasattr(self, "mcp") and "local_tools" not in tool_name:
2938
- log_event_fn(f"🔧 Executing MCP tool: {tool_name}", MSG_TYPE.MSG_TYPE_TOOL_CALL, meta={
2939
- "tool_name": tool_name,
2940
- "params": {k: str(v)[:100] for k, v in hydrated_params.items()} # Truncate for logging
2941
- })
2942
-
2943
- tool_result = self.mcp.execute_tool(tool_name, hydrated_params, lollms_client_instance=self)
2944
-
2945
- log_event_fn(f"Tool execution completed", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, meta={
2946
- "result_status": tool_result.get("status", "unknown"),
2947
- "has_error": "error" in tool_result
2948
- })
2949
-
2950
- elif tool_name == "local_tools::generate_image" and hasattr(self, "tti"):
2951
- image_prompt = hydrated_params.get("prompt", "")
2952
- log_event_fn(f"🎨 Generating image with prompt: '{image_prompt[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
2953
-
2954
- # This would call your text-to-image functionality
2955
- image_result = self.tti.generate_image(image_prompt) # Assuming this method exists
2956
- if image_result:
2957
- image_uuid = f"generated_image_{uuid.uuid4()}"
2958
- asset_store[image_uuid] = {"type": "image", "content": image_result, "source": "generated"}
2959
- tool_result = {"status": "success", "image_id": image_uuid, "prompt_used": image_prompt}
2960
- else:
2961
- tool_result = {"status": "failure", "error": "Image generation failed"}
2962
-
2963
- else:
2964
- tool_result = {"status": "failure", "error": f"Tool '{tool_name}' is not available or supported in this context."}
2965
-
2966
- except Exception as e:
2967
- error_msg = f"Exception during '{tool_name}' execution: {str(e)}"
2968
- log_event_fn(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
2969
- tool_result = {"status": "failure", "error": error_msg}
2970
-
2971
- response_time = time.time() - start_time
2972
- success = tool_result.get("status") == "success"
2973
-
2974
- # Record performance
2975
- performance_tracker.record_tool_usage(tool_name, success, confidence, response_time, tool_result.get("error"))
2976
-
2977
- # Update task status
2978
- if success and current_task_index < len(execution_plan.tasks):
2979
- execution_plan.tasks[current_task_index].status = TaskStatus.COMPLETED
2980
- completed_tasks.add(current_task_index)
2981
- current_task_index += 1
2982
-
2983
- # Enhanced observation logging
2984
- observation_text = json.dumps(tool_result, indent=2)
2985
- if len(observation_text) > 1000:
2986
- # Truncate very long results for scratchpad
2987
- truncated_result = {k: (str(v)[:200] + "..." if len(str(v)) > 200 else v) for k, v in tool_result.items()}
2988
- observation_text = json.dumps(truncated_result, indent=2)
2989
-
2990
- current_scratchpad += f"\n\n### Step {i+1}: Execution & Observation\n"
2991
- current_scratchpad += f"**Tool Used**: {tool_name}\n"
2992
- current_scratchpad += f"**Success**: {success}\n"
2993
- current_scratchpad += f"**Response Time**: {response_time:.2f}s\n"
2994
- current_scratchpad += f"**Result**:\n```json\n{observation_text}\n```"
2995
-
2996
- # Track tool call
2997
- tool_calls_this_turn.append({
2998
- "name": tool_name,
2999
- "params": tool_params,
3000
- "result": tool_result,
3001
- "response_time": response_time,
3002
- "confidence": confidence,
3003
- "reasoning": reasoning
3004
- })
3005
-
3006
- if success:
3007
- log_event_fn(f"✅ Step {i+1} completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3008
- "tool_name": tool_name,
3009
- "response_time": response_time,
3010
- "confidence": confidence
3011
- })
3012
- else:
3013
- error_detail = tool_result.get("error", "No error detail provided.")
3014
- log_event_fn(f"⚠️ Step {i+1} completed with issues: {error_detail}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3015
- "tool_name": tool_name,
3016
- "error": error_detail,
3017
- "confidence": confidence
3018
- })
3019
-
3020
- # Add failure handling to scratchpad
3021
- current_scratchpad += f"\n**Failure Analysis**: {error_detail}"
3022
- current_scratchpad += f"\n**Next Steps**: Consider alternative approaches or tools"
3023
-
3024
- # Log current progress
3025
- completed_count = len(completed_tasks)
3026
- total_tasks = len(execution_plan.tasks)
3027
- if total_tasks > 0:
3028
- progress = (completed_count / total_tasks) * 100
3029
- log_event_fn(f"Progress: {completed_count}/{total_tasks} tasks completed ({progress:.1f}%)", MSG_TYPE.MSG_TYPE_STEP_PROGRESS, meta={"progress": progress})
3030
-
3031
- # Check if all tasks are completed
3032
- if completed_count >= total_tasks:
3033
- log_event_fn("🎯 All planned tasks completed", MSG_TYPE.MSG_TYPE_INFO)
3034
- break
3035
-
3036
- except Exception as ex:
3037
- log_event_fn(f"💥 Unexpected error in reasoning step {i+1}: {str(ex)}", MSG_TYPE.MSG_TYPE_ERROR, event_id=reasoning_step_id)
3038
- trace_exception(ex)
3039
-
3040
- # Add error to scratchpad for context
3041
- current_scratchpad += f"\n\n### Step {i+1}: Unexpected Error\n**Error**: {str(ex)}\n**Recovery**: Continuing with adjusted approach"
3042
-
3043
- log_event_fn("🔄 Recovering and continuing with next step", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
3044
-
3045
- # Enhanced self-reflection
3046
- if enable_self_reflection and len(tool_calls_this_turn) > 0:
3047
- reflection_id = log_event_fn("🤔 Conducting comprehensive self-assessment...", MSG_TYPE.MSG_TYPE_STEP_START)
3048
- try:
3049
- reflection_prompt = f"""Conduct a thorough review of your work and assess the quality of your response to the user's request.
3050
-
3051
- ORIGINAL REQUEST: "{original_user_prompt}"
3052
- TOOLS USED: {len(tool_calls_this_turn)}
3053
- PLAN REVISIONS: {plan_revision_count}
3054
-
3055
- COMPLETE ANALYSIS:
3056
- {current_scratchpad}
3057
-
3058
- Evaluate your performance on multiple dimensions:
3059
-
3060
- 1. **Goal Achievement**: Did you fully address the user's request?
3061
- 2. **Process Efficiency**: Was your approach optimal given the available tools?
3062
- 3. **Information Quality**: Is the information you gathered accurate and relevant?
3063
- 4. **Decision Making**: Were your tool choices and parameters appropriate?
3064
- 5. **Adaptability**: How well did you handle unexpected results or plan changes?
3065
-
3066
- Provide your assessment as JSON:
3067
- {{
3068
- "goal_achieved": true,
3069
- "effectiveness_score": 0.85,
3070
- "process_efficiency": 0.8,
3071
- "information_quality": 0.9,
3072
- "decision_making": 0.85,
3073
- "adaptability": 0.7,
3074
- "overall_confidence": 0.82,
3075
- "strengths": ["Clear reasoning", "Good tool selection"],
3076
- "areas_for_improvement": ["Could have been more efficient"],
3077
- "summary": "Successfully completed the user's request with high quality results",
3078
- "key_insights": ["Discovered that X was more important than initially thought"]
3079
- }}"""
3080
-
3081
- reflection_data = self.generate_structured_content(
3082
- prompt=reflection_prompt,
3083
- schema={
3084
- "goal_achieved": "boolean",
3085
- "effectiveness_score": "number",
3086
- "process_efficiency": "number",
3087
- "information_quality": "number",
3088
- "decision_making": "number",
3089
- "adaptability": "number",
3090
- "overall_confidence": "number",
3091
- "strengths": "array",
3092
- "areas_for_improvement": "array",
3093
- "summary": "string",
3094
- "key_insights": "array"
3095
- },
3096
- temperature=0.3,
3097
- **llm_generation_kwargs
3098
- )
3099
-
3100
- if reflection_data:
3101
- current_scratchpad += f"\n\n### Comprehensive Self-Assessment\n"
3102
- current_scratchpad += f"**Goal Achieved**: {reflection_data.get('goal_achieved', False)}\n"
3103
- current_scratchpad += f"**Overall Confidence**: {reflection_data.get('overall_confidence', 0.5):.2f}\n"
3104
- current_scratchpad += f"**Effectiveness Score**: {reflection_data.get('effectiveness_score', 0.5):.2f}\n"
3105
- current_scratchpad += f"**Key Strengths**: {', '.join(reflection_data.get('strengths', []))}\n"
3106
- current_scratchpad += f"**Improvement Areas**: {', '.join(reflection_data.get('areas_for_improvement', []))}\n"
3107
- current_scratchpad += f"**Summary**: {reflection_data.get('summary', '')}\n"
3108
-
3109
- log_event_fn(f"✅ Self-assessment completed", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reflection_id, meta={
3110
- "overall_confidence": reflection_data.get('overall_confidence', 0.5),
3111
- "goal_achieved": reflection_data.get('goal_achieved', False),
3112
- "effectiveness_score": reflection_data.get('effectiveness_score', 0.5)
3113
- })
3114
- else:
3115
- log_event_fn("Self-assessment data generation failed", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3116
-
3117
- except Exception as e:
3118
- log_event_fn(f"Self-assessment failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3119
-
3120
- # Enhanced final synthesis
3121
- synthesis_id = log_event_fn("📝 Synthesizing comprehensive final response...", MSG_TYPE.MSG_TYPE_STEP_START)
3122
-
3123
- final_answer_prompt = f"""Create a comprehensive, well-structured final response that fully addresses the user's request.
3124
-
3125
- ORIGINAL REQUEST: "{original_user_prompt}"
3126
- CONTEXT: {context or "No additional context"}
3127
-
3128
- COMPLETE ANALYSIS AND WORK:
3129
- {current_scratchpad}
3130
-
3131
- GUIDELINES for your response:
3132
- 1. **Be Complete**: Address all aspects of the user's request
3133
- 2. **Be Clear**: Organize your response logically and use clear language
3134
- 3. **Be Helpful**: Provide actionable information and insights
3135
- 4. **Be Honest**: If there were limitations or uncertainties, mention them appropriately
3136
- 5. **Be Concise**: While being thorough, avoid unnecessary verbosity
3137
- 6. **Cite Sources**: If you used research tools, reference the information appropriately
3138
-
3139
- Your response should feel natural and conversational while being informative and valuable.
3140
-
3141
- FINAL RESPONSE:"""
3142
-
3143
- log_prompt_fn("Final Synthesis Prompt", final_answer_prompt)
3144
-
3145
- final_answer_text = self.generate_text(
3146
- prompt=final_answer_prompt,
3147
- system_prompt=system_prompt,
3148
- stream=streaming_callback is not None,
3149
- streaming_callback=streaming_callback,
3150
- temperature=final_answer_temperature,
3151
- **llm_generation_kwargs
3152
- )
3153
-
3154
- if isinstance(final_answer_text, dict) and "error" in final_answer_text:
3155
- log_event_fn(f"Final synthesis failed: {final_answer_text['error']}", MSG_TYPE.MSG_TYPE_ERROR, event_id=synthesis_id)
3156
- return {
3157
- "final_answer": "I encountered an issue while preparing my final response. Please let me know if you'd like me to try again.",
3158
- "error": final_answer_text["error"],
3159
- "final_scratchpad": current_scratchpad,
3160
- "tool_calls": tool_calls_this_turn,
3161
- "sources": sources_this_turn,
3162
- "decision_history": decision_history
3163
- }
3164
-
3165
- final_answer = self.remove_thinking_blocks(final_answer_text)
3166
-
3167
- # Calculate overall performance metrics
3168
- overall_confidence = sum(call.get('confidence', 0.5) for call in tool_calls_this_turn) / max(len(tool_calls_this_turn), 1)
3169
- successful_calls = sum(1 for call in tool_calls_this_turn if call.get('result', {}).get('status') == 'success')
3170
- success_rate = successful_calls / max(len(tool_calls_this_turn), 1)
3171
-
3172
- log_event_fn("✅ Comprehensive response ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=synthesis_id, meta={
3173
- "final_answer_length": len(final_answer),
3174
- "total_tools_used": len(tool_calls_this_turn),
3175
- "success_rate": success_rate,
3176
- "overall_confidence": overall_confidence
3177
- })
3178
-
3179
- return {
3180
- "final_answer": final_answer,
3181
- "final_scratchpad": current_scratchpad,
3182
- "tool_calls": tool_calls_this_turn,
3183
- "sources": sources_this_turn,
3184
- "decision_history": decision_history,
3185
- "performance_stats": {
3186
- "total_steps": len(tool_calls_this_turn),
3187
- "successful_steps": successful_calls,
3188
- "success_rate": success_rate,
3189
- "average_confidence": overall_confidence,
3190
- "plan_revisions": plan_revision_count,
3191
- "total_reasoning_steps": len(decision_history)
3192
- },
3193
- "plan_evolution": {
3194
- "initial_tasks": len(execution_plan.tasks),
3195
- "final_version": current_plan_version,
3196
- "total_revisions": plan_revision_count
3197
- },
3198
- "clarification_required": False,
3199
- "overall_confidence": overall_confidence,
3200
- "error": None
3201
- }
3202
-
3203
-
3204
- def _execute_complex_reasoning_loop(
3205
- self, prompt, context, system_prompt, reasoning_system_prompt, images,
3206
- max_reasoning_steps, decision_temperature, final_answer_temperature,
3207
- streaming_callback, debug, enable_self_reflection, all_visible_tools,
3208
- rag_registry, rag_tool_specs, log_event_fn, log_prompt_fn, max_scratchpad_size, **llm_generation_kwargs
3209
- ) -> Dict[str, Any]:
3210
-
3211
- planner, memory_manager, performance_tracker = TaskPlanner(self), MemoryManager(), ToolPerformanceTracker()
3212
-
3213
- def _get_friendly_action_description(tool_name, requires_code, requires_image):
3214
- descriptions = {
3215
- "local_tools::final_answer": "📋 Preparing final answer",
3216
- "local_tools::request_clarification": "❓ Requesting clarification",
3217
- "local_tools::generate_image": "🎨 Creating image",
3218
- "local_tools::revise_plan": "📝 Revising execution plan"
3219
- }
3220
- if tool_name in descriptions:
3221
- return descriptions[tool_name]
3222
- if "research::" in tool_name:
3223
- return f"🔍 Searching {tool_name.split('::')[-1]} knowledge base"
3224
- if requires_code:
3225
- return "💻 Processing code"
3226
- if requires_image:
3227
- return "🖼️ Analyzing images"
3228
- return f"🔧 Using {tool_name.replace('_', ' ').replace('::', ' - ').title()}"
3229
-
3230
- def _compress_scratchpad_intelligently(scratchpad: str, original_request: str, target_size: int) -> str:
3231
- """Enhanced scratchpad compression that preserves key decisions and recent context"""
3232
- if len(scratchpad) <= target_size:
3233
- return scratchpad
3234
-
3235
- log_event_fn("📝 Compressing scratchpad to maintain focus...", MSG_TYPE.MSG_TYPE_INFO)
3236
-
3237
- # Extract key components
3238
- lines = scratchpad.split('\n')
3239
- plan_section = []
3240
- decisions = []
3241
- recent_observations = []
3242
-
3243
- current_section = None
3244
- for i, line in enumerate(lines):
3245
- if "### Execution Plan" in line or "### Updated Plan" in line:
3246
- current_section = "plan"
3247
- elif "### Step" in line and ("Thought" in line or "Decision" in line):
3248
- current_section = "decision"
3249
- elif "### Step" in line and "Observation" in line:
3250
- current_section = "observation"
3251
- elif line.startswith("###"):
3252
- current_section = None
3253
-
3254
- if current_section == "plan" and line.strip():
3255
- plan_section.append(line)
3256
- elif current_section == "decision" and line.strip():
3257
- decisions.append((i, line))
3258
- elif current_section == "observation" and line.strip():
3259
- recent_observations.append((i, line))
3260
-
3261
- # Keep most recent items and important decisions
3262
- recent_decisions = decisions[-3:] if len(decisions) > 3 else decisions
3263
- recent_obs = recent_observations[-5:] if len(recent_observations) > 5 else recent_observations
3264
-
3265
- compressed_parts = [
3266
- f"### Original Request\n{original_request}",
3267
- f"### Current Plan\n" + '\n'.join(plan_section[-10:]),
3268
- f"### Recent Key Decisions"
3269
- ]
3270
-
3271
- for _, decision in recent_decisions:
3272
- compressed_parts.append(decision)
3273
-
3274
- compressed_parts.append("### Recent Observations")
3275
- for _, obs in recent_obs:
3276
- compressed_parts.append(obs)
3277
-
3278
- compressed = '\n'.join(compressed_parts)
3279
- if len(compressed) > target_size:
3280
- # Final trim if still too long
3281
- compressed = compressed[:target_size-100] + "\n...[content compressed for focus]"
3282
-
3283
- return compressed
3284
-
3285
- original_user_prompt, tool_calls_this_turn, sources_this_turn = prompt, [], []
3286
- asset_store: Dict[str, Dict] = {}
3287
- decision_history = [] # Track all decisions made
3288
-
3289
- # Enhanced planning phase
3290
- planning_step_id = log_event_fn("📋 Creating adaptive execution plan...", MSG_TYPE.MSG_TYPE_STEP_START)
3291
- execution_plan = planner.decompose_task(original_user_prompt, context or "")
3292
- current_plan_version = 1
3293
-
3294
- log_event_fn(f"Initial plan created with {len(execution_plan.tasks)} tasks", MSG_TYPE.MSG_TYPE_INFO, meta={
3295
- "plan_version": current_plan_version,
3296
- "total_tasks": len(execution_plan.tasks),
3297
- "estimated_complexity": "medium" if len(execution_plan.tasks) <= 5 else "high"
3298
- })
3299
-
3300
- for i, task in enumerate(execution_plan.tasks):
3301
- log_event_fn(f"Task {i+1}: {task.description}", MSG_TYPE.MSG_TYPE_INFO)
3302
-
3303
- log_event_fn("✅ Adaptive plan ready", MSG_TYPE.MSG_TYPE_STEP_END, event_id=planning_step_id)
3304
-
3305
- # Enhanced initial state
3306
- initial_state_parts = [
3307
- f"### Original User Request\n{original_user_prompt}",
3308
- f"### Context\n{context or 'No additional context provided'}",
3309
- f"### Execution Plan (Version {current_plan_version})\n- Total tasks: {len(execution_plan.tasks)}",
3310
- f"- Estimated complexity: {'High' if len(execution_plan.tasks) > 5 else 'Medium'}"
3311
- ]
3312
-
3313
- for i, task in enumerate(execution_plan.tasks):
3314
- initial_state_parts.append(f" {i+1}. {task.description} [Status: {task.status.value}]")
3315
-
3316
- if images:
3317
- initial_state_parts.append(f"### Provided Assets")
3318
- for img_b64 in images:
3319
- img_uuid = str(uuid.uuid4())
3320
- asset_store[img_uuid] = {"type": "image", "content": img_b64, "source": "user"}
3321
- initial_state_parts.append(f"- Image asset: {img_uuid}")
3322
-
3323
- current_scratchpad = "\n".join(initial_state_parts)
3324
- log_event_fn("Initial analysis complete", MSG_TYPE.MSG_TYPE_SCRATCHPAD, meta={"scratchpad_size": len(current_scratchpad)})
3325
-
3326
- formatted_tools_list = "\n".join([f"**{t['name']}**: {t['description']}" for t in all_visible_tools])
3327
- completed_tasks, current_task_index = set(), 0
3328
- plan_revision_count = 0
3329
-
3330
- # Main reasoning loop with enhanced decision tracking
3331
- for i in range(max_reasoning_steps):
3332
- current_task_desc = execution_plan.tasks[current_task_index].description if current_task_index < len(execution_plan.tasks) else "Finalizing analysis"
3333
- step_desc = f"🤔 Step {i+1}: {current_task_desc}"
3334
- reasoning_step_id = log_event_fn(step_desc, MSG_TYPE.MSG_TYPE_STEP_START)
3335
-
3336
- try:
3337
- # Enhanced scratchpad management
3338
- if len(current_scratchpad) > max_scratchpad_size:
3339
- log_event_fn(f"Scratchpad size ({len(current_scratchpad)}) exceeds limit, compressing...", MSG_TYPE.MSG_TYPE_INFO)
3340
- current_scratchpad = _compress_scratchpad_intelligently(current_scratchpad, original_user_prompt, max_scratchpad_size // 2)
3341
- log_event_fn(f"Scratchpad compressed to {len(current_scratchpad)} characters", MSG_TYPE.MSG_TYPE_INFO)
3342
-
3343
- # Enhanced reasoning prompt with better decision tracking
3344
- reasoning_prompt = f"""You are working on: "{original_user_prompt}"
3345
-
3346
- === AVAILABLE ACTIONS ===
3347
- {formatted_tools_list}
3348
-
3349
- === YOUR COMPLETE ANALYSIS HISTORY ===
3350
- {current_scratchpad}
3351
- === END ANALYSIS HISTORY ===
3352
-
3353
- === DECISION GUIDELINES ===
3354
- 1. **Review your progress**: Look at what you've already discovered and accomplished
3355
- 2. **Consider your current task**: Focus on the next logical step in your plan
3356
- 3. **Remember your decisions**: If you previously decided to use a tool, follow through unless you have a good reason to change
3357
- 4. **Be adaptive**: If you discover new information that changes the situation, consider revising your plan
3358
- 5. **Stay focused**: Each action should clearly advance toward the final goal
3359
-
3360
- === YOUR NEXT DECISION ===
3361
- Choose the single most appropriate action to take right now. Consider:
3362
- - What specific step are you currently working on?
3363
- - What information do you still need?
3364
- - What would be most helpful for the user?
3365
-
3366
- Provide your decision as JSON:
3367
- {{
3368
- "reasoning": "Explain your current thinking and why this action makes sense now",
3369
- "action": {{
3370
- "tool_name": "exact_tool_name",
3371
- "requires_code_input": false,
3372
- "requires_image_input": false,
3373
- "confidence": 0.8
3374
- }},
3375
- "plan_status": "on_track" // or "needs_revision" if you want to change the plan
3376
- }}"""
3377
-
3378
- log_prompt_fn(f"Reasoning Prompt Step {i+1}", reasoning_prompt)
3379
- decision_data = self.generate_structured_content(
3380
- prompt=reasoning_prompt,
3381
- schema={
3382
- "reasoning": "string",
3383
- "action": "object",
3384
- "plan_status": "string"
3385
- },
3386
- system_prompt=reasoning_system_prompt,
3387
- temperature=decision_temperature,
3388
- **llm_generation_kwargs
3389
- )
3390
-
3391
- if not (decision_data and isinstance(decision_data.get("action"), dict)):
3392
- log_event_fn("⚠️ Invalid decision format from AI", MSG_TYPE.MSG_TYPE_WARNING, event_id=reasoning_step_id)
3393
- current_scratchpad += f"\n\n### Step {i+1}: Decision Error\n- Error: AI produced invalid decision JSON\n- Continuing with fallback approach"
3394
- continue
3395
-
3396
- action = decision_data.get("action", {})
3397
- reasoning = decision_data.get("reasoning", "No reasoning provided")
3398
- plan_status = decision_data.get("plan_status", "on_track")
3399
- tool_name = action.get("tool_name")
3400
- requires_code = action.get("requires_code_input", False)
3401
- requires_image = action.get("requires_image_input", False)
3402
- confidence = action.get("confidence", 0.5)
3403
-
3404
- # Track the decision
3405
- decision_history.append({
3406
- "step": i+1,
3407
- "tool_name": tool_name,
3408
- "reasoning": reasoning,
3409
- "confidence": confidence,
3410
- "plan_status": plan_status
3411
- })
3412
-
3413
- current_scratchpad += f"\n\n### Step {i+1}: Decision & Reasoning\n**Reasoning**: {reasoning}\n**Chosen Action**: {tool_name}\n**Confidence**: {confidence}\n**Plan Status**: {plan_status}"
3414
-
3415
- log_event_fn(_get_friendly_action_description(tool_name, requires_code, requires_image), MSG_TYPE.MSG_TYPE_STEP, meta={
3416
- "tool_name": tool_name,
3417
- "confidence": confidence,
3418
- "reasoning": reasoning[:100] + "..." if len(reasoning) > 100 else reasoning
3419
- })
3420
-
3421
- # Handle plan revision
3422
- if plan_status == "needs_revision" and tool_name != "local_tools::revise_plan":
3423
- log_event_fn("🔄 AI indicates plan needs revision", MSG_TYPE.MSG_TYPE_INFO)
3424
- tool_name = "local_tools::revise_plan" # Force plan revision
3425
-
3426
- # Handle final answer
3427
- if tool_name == "local_tools::final_answer":
3428
- log_event_fn("🎯 Ready to provide final answer", MSG_TYPE.MSG_TYPE_INFO)
3429
- break
3430
-
3431
- # Handle clarification request
3432
- if tool_name == "local_tools::request_clarification":
3433
- clarification_prompt = f"""Based on your analysis, what specific information do you need from the user?
3434
-
3435
- CURRENT ANALYSIS:
3436
- {current_scratchpad}
3437
-
3438
- Generate a clear, specific question that will help you proceed effectively:"""
3439
-
3440
- question = self.generate_text(clarification_prompt, temperature=0.3)
3441
- question = self.remove_thinking_blocks(question)
3442
-
3443
- log_event_fn("❓ Clarification needed from user", MSG_TYPE.MSG_TYPE_INFO)
3444
- return {
3445
- "final_answer": question,
3446
- "clarification_required": True,
3447
- "final_scratchpad": current_scratchpad,
3448
- "tool_calls": tool_calls_this_turn,
3449
- "sources": sources_this_turn,
3450
- "error": None,
3451
- "decision_history": decision_history
3452
- }
3453
-
3454
- # Handle plan revision
3455
- if tool_name == "local_tools::revise_plan":
3456
- plan_revision_count += 1
3457
- revision_id = log_event_fn(f"📝 Revising execution plan (revision #{plan_revision_count})", MSG_TYPE.MSG_TYPE_STEP_START)
3458
-
3459
- try:
3460
- revision_prompt = f"""Based on your current analysis and discoveries, create an updated execution plan.
3461
-
3462
- ORIGINAL REQUEST: "{original_user_prompt}"
3463
- CURRENT ANALYSIS:
3464
- {current_scratchpad}
3465
-
3466
- REASON FOR REVISION: {reasoning}
3467
-
3468
- Create a new plan that reflects your current understanding. Consider:
3469
- 1. What have you already accomplished?
3470
- 2. What new information have you discovered?
3471
- 3. What steps are still needed?
3472
- 4. How can you be more efficient?
3473
-
3474
- Provide your revision as JSON:
3475
- {{
3476
- "revision_reason": "Clear explanation of why the plan needed to change",
3477
- "new_plan": [
3478
- {{"step": 1, "description": "First revised step", "status": "pending"}},
3479
- {{"step": 2, "description": "Second revised step", "status": "pending"}}
3480
- ],
3481
- "confidence": 0.8
3482
- }}"""
3483
-
3484
- revision_data = self.generate_structured_content(
3485
- prompt=revision_prompt,
3486
- schema={
3487
- "revision_reason": "string",
3488
- "new_plan": "array",
3489
- "confidence": "number"
3490
- },
3491
- temperature=0.3,
3492
- **llm_generation_kwargs
3493
- )
3494
-
3495
- if revision_data and revision_data.get("new_plan"):
3496
- # Update the plan
3497
- current_plan_version += 1
3498
- new_tasks = []
3499
- for task_data in revision_data["new_plan"]:
3500
- task = TaskDecomposition() # Assuming this class exists
3501
- task.description = task_data.get("description", "Undefined step")
3502
- task.status = TaskStatus.PENDING # Reset all to pending
3503
- new_tasks.append(task)
3504
-
3505
- execution_plan.tasks = new_tasks
3506
- current_task_index = 0 # Reset to beginning
3507
-
3508
- # Update scratchpad with new plan
3509
- current_scratchpad += f"\n\n### Updated Plan (Version {current_plan_version})\n"
3510
- current_scratchpad += f"**Revision Reason**: {revision_data.get('revision_reason', 'Plan needed updating')}\n"
3511
- current_scratchpad += f"**New Tasks**:\n"
3512
- for i, task in enumerate(execution_plan.tasks):
3513
- current_scratchpad += f" {i+1}. {task.description}\n"
3514
-
3515
- log_event_fn(f"✅ Plan revised with {len(execution_plan.tasks)} updated tasks", MSG_TYPE.MSG_TYPE_STEP_END, event_id=revision_id, meta={
3516
- "plan_version": current_plan_version,
3517
- "new_task_count": len(execution_plan.tasks),
3518
- "revision_reason": revision_data.get("revision_reason", "")
3519
- })
3520
-
3521
- # Continue with the new plan
3522
- continue
3523
- else:
3524
- raise ValueError("Failed to generate valid plan revision")
3525
-
3526
- except Exception as e:
3527
- log_event_fn(f"Plan revision failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=revision_id)
3528
- current_scratchpad += f"\n**Plan Revision Failed**: {str(e)}\nContinuing with original plan."
3529
-
3530
- # Prepare parameters for tool execution
3531
- param_assets = {}
3532
- if requires_code:
3533
- log_event_fn("💻 Generating code for task", MSG_TYPE.MSG_TYPE_INFO)
3534
- code_prompt = f"""Generate the specific code needed for the current step.
3535
-
3536
- CURRENT CONTEXT:
3537
- {current_scratchpad}
3538
-
3539
- CURRENT TASK: {tool_name}
3540
- USER REQUEST: "{original_user_prompt}"
3541
-
3542
- Generate clean, functional code that addresses the specific requirements. Focus on:
3543
- 1. Solving the immediate problem
3544
- 2. Being clear and readable
3545
- 3. Including necessary imports and dependencies
3546
- 4. Adding helpful comments where appropriate
3547
-
3548
- CODE:"""
3549
-
3550
- code_content = self.generate_code(prompt=code_prompt, **llm_generation_kwargs)
3551
- code_uuid = f"code_asset_{uuid.uuid4()}"
3552
- asset_store[code_uuid] = {"type": "code", "content": code_content}
3553
- param_assets['code_asset_id'] = code_uuid
3554
- log_event_fn(f"Code asset created: {code_uuid[:8]}...", MSG_TYPE.MSG_TYPE_INFO)
3555
-
3556
- if requires_image:
3557
- image_assets = [asset_id for asset_id, asset in asset_store.items() if asset['type'] == 'image' and asset.get('source') == 'user']
3558
- if image_assets:
3559
- param_assets['image_asset_id'] = image_assets[0]
3560
- log_event_fn(f"Using image asset: {image_assets[0][:8]}...", MSG_TYPE.MSG_TYPE_INFO)
3561
- else:
3562
- log_event_fn("⚠️ Image required but none available", MSG_TYPE.MSG_TYPE_WARNING)
3563
-
3564
- # Enhanced parameter generation
3565
- param_prompt = f"""Generate the optimal parameters for this tool execution.
3566
-
3567
- TOOL: {tool_name}
3568
- CURRENT CONTEXT: {current_scratchpad}
3569
- CURRENT REASONING: {reasoning}
3570
- AVAILABLE ASSETS: {json.dumps(param_assets) if param_assets else "None"}
3571
-
3572
- Based on your analysis and the current step you're working on, provide the most appropriate parameters.
3573
- Be specific and purposeful in your parameter choices.
3574
-
3575
- Output format: {{"tool_params": {{...}}}}"""
3576
-
3577
- log_prompt_fn(f"Parameter Generation Step {i+1}", param_prompt)
3578
- param_data = self.generate_structured_content(
3579
- prompt=param_prompt,
3580
- schema={"tool_params": "object"},
3581
- temperature=decision_temperature,
3582
- **llm_generation_kwargs
3583
- )
3584
- tool_params = param_data.get("tool_params", {}) if param_data else {}
3585
-
3586
- current_scratchpad += f"\n**Parameters Generated**: {json.dumps(tool_params, indent=2)}"
3587
-
3588
- # Hydrate parameters with assets
3589
- def _hydrate(data: Any, store: Dict) -> Any:
3590
- if isinstance(data, dict): return {k: _hydrate(v, store) for k, v in data.items()}
3591
- if isinstance(data, list): return [_hydrate(item, store) for item in data]
3592
- if isinstance(data, str) and "asset_" in data and data in store: return store[data].get("content", data)
3593
- return data
3594
-
3595
- hydrated_params = _hydrate(tool_params, asset_store)
3596
-
3597
- # Execute the tool with detailed logging
3598
- start_time = time.time()
3599
- tool_result = {"status": "failure", "error": f"Tool '{tool_name}' failed to execute."}
3600
-
3601
- try:
3602
- if tool_name in rag_registry:
3603
- query = hydrated_params.get("query", "")
3604
- if not query:
3605
- # Fall back to using reasoning as query
3606
- query = reasoning[:200] + "..." if len(reasoning) > 200 else reasoning
3607
-
3608
- log_event_fn(f"🔍 Searching knowledge base with query: '{query[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
3609
-
3610
- top_k = rag_tool_specs[tool_name]["default_top_k"]
3611
- min_sim = rag_tool_specs[tool_name]["default_min_sim"]
3612
-
3613
- raw_results = rag_registry[tool_name](query=query, rag_top_k=top_k)
3614
- raw_iter = raw_results["results"] if isinstance(raw_results, dict) and "results" in raw_results else raw_results
3615
-
3616
- docs = []
3617
- for d in raw_iter or []:
3618
- doc_data = {
3619
- "text": d.get("text", str(d)),
3620
- "score": d.get("score", 0) * 100,
3621
- "metadata": d.get("metadata", {})
3622
- }
3623
- docs.append(doc_data)
3624
-
3625
- kept = [x for x in docs if x['score'] >= min_sim]
3626
- tool_result = {
3627
- "status": "success",
3628
- "results": kept,
3629
- "total_found": len(docs),
3630
- "kept_after_filtering": len(kept),
3631
- "query_used": query
3632
- }
3633
-
3634
- sources_this_turn.extend([{
3635
- "source": tool_name,
3636
- "metadata": x["metadata"],
3637
- "score": x["score"]
3638
- } for x in kept])
3639
-
3640
- log_event_fn(f"📚 Retrieved {len(kept)} relevant documents (from {len(docs)} total)", MSG_TYPE.MSG_TYPE_INFO)
3641
-
3642
- elif hasattr(self, "mcp") and "local_tools" not in tool_name:
3643
- log_event_fn(f"🔧 Executing MCP tool: {tool_name}", MSG_TYPE.MSG_TYPE_TOOL_CALL, meta={
3644
- "tool_name": tool_name,
3645
- "params": {k: str(v)[:100] for k, v in hydrated_params.items()} # Truncate for logging
3646
- })
3647
-
3648
- tool_result = self.mcp.execute_tool(tool_name, hydrated_params, lollms_client_instance=self)
3649
-
3650
- log_event_fn(f"Tool execution completed", MSG_TYPE.MSG_TYPE_TOOL_OUTPUT, meta={
3651
- "result_status": tool_result.get("status", "unknown"),
3652
- "has_error": "error" in tool_result
3653
- })
3654
-
3655
- elif tool_name == "local_tools::generate_image" and hasattr(self, "tti"):
3656
- image_prompt = hydrated_params.get("prompt", "")
3657
- log_event_fn(f"🎨 Generating image with prompt: '{image_prompt[:50]}...'", MSG_TYPE.MSG_TYPE_INFO)
3658
-
3659
- # This would call your text-to-image functionality
3660
- image_result = self.tti.generate_image(image_prompt) # Assuming this method exists
3661
- if image_result:
3662
- image_uuid = f"generated_image_{uuid.uuid4()}"
3663
- asset_store[image_uuid] = {"type": "image", "content": image_result, "source": "generated"}
3664
- tool_result = {"status": "success", "image_id": image_uuid, "prompt_used": image_prompt}
3665
- else:
3666
- tool_result = {"status": "failure", "error": "Image generation failed"}
3667
-
3668
- else:
3669
- tool_result = {"status": "failure", "error": f"Tool '{tool_name}' is not available or supported in this context."}
3670
-
3671
- except Exception as e:
3672
- error_msg = f"Exception during '{tool_name}' execution: {str(e)}"
3673
- log_event_fn(error_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
3674
- tool_result = {"status": "failure", "error": error_msg}
3675
-
3676
- response_time = time.time() - start_time
3677
- success = tool_result.get("status") == "success"
3678
-
3679
- # Record performance
3680
- performance_tracker.record_tool_usage(tool_name, success, confidence, response_time, tool_result.get("error"))
3681
-
3682
- # Update task status
3683
- if success and current_task_index < len(execution_plan.tasks):
3684
- execution_plan.tasks[current_task_index].status = TaskStatus.COMPLETED
3685
- completed_tasks.add(current_task_index)
3686
- current_task_index += 1
3687
-
3688
- # Enhanced observation logging
3689
- observation_text = json.dumps(tool_result, indent=2)
3690
- if len(observation_text) > 1000:
3691
- # Truncate very long results for scratchpad
3692
- truncated_result = {k: (str(v)[:200] + "..." if len(str(v)) > 200 else v) for k, v in tool_result.items()}
3693
- observation_text = json.dumps(truncated_result, indent=2)
3694
-
3695
- current_scratchpad += f"\n\n### Step {i+1}: Execution & Observation\n"
3696
- current_scratchpad += f"**Tool Used**: {tool_name}\n"
3697
- current_scratchpad += f"**Success**: {success}\n"
3698
- current_scratchpad += f"**Response Time**: {response_time:.2f}s\n"
3699
- current_scratchpad += f"**Result**:\n```json\n{observation_text}\n```"
3700
-
3701
- # Track tool call
3702
- tool_calls_this_turn.append({
3703
- "name": tool_name,
3704
- "params": tool_params,
3705
- "result": tool_result,
3706
- "response_time": response_time,
3707
- "confidence": confidence,
3708
- "reasoning": reasoning
3709
- })
3710
-
3711
- if success:
3712
- log_event_fn(f"✅ Step {i+1} completed successfully", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3713
- "tool_name": tool_name,
3714
- "response_time": response_time,
3715
- "confidence": confidence
3716
- })
3717
- else:
3718
- error_detail = tool_result.get("error", "No error detail provided.")
3719
- log_event_fn(f"⚠️ Step {i+1} completed with issues: {error_detail}", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id, meta={
3720
- "tool_name": tool_name,
3721
- "error": error_detail,
3722
- "confidence": confidence
3723
- })
3724
-
3725
- # Add failure handling to scratchpad
3726
- current_scratchpad += f"\n**Failure Analysis**: {error_detail}"
3727
- current_scratchpad += f"\n**Next Steps**: Consider alternative approaches or tools"
3728
-
3729
- # Log current progress
3730
- completed_count = len(completed_tasks)
3731
- total_tasks = len(execution_plan.tasks)
3732
- if total_tasks > 0:
3733
- progress = (completed_count / total_tasks) * 100
3734
- log_event_fn(f"Progress: {completed_count}/{total_tasks} tasks completed ({progress:.1f}%)", MSG_TYPE.MSG_TYPE_STEP_PROGRESS, meta={"progress": progress})
3735
-
3736
- # Check if all tasks are completed
3737
- if completed_count >= total_tasks:
3738
- log_event_fn("🎯 All planned tasks completed", MSG_TYPE.MSG_TYPE_INFO)
3739
- break
3740
-
3741
- except Exception as ex:
3742
- log_event_fn(f"💥 Unexpected error in reasoning step {i+1}: {str(ex)}", MSG_TYPE.MSG_TYPE_ERROR, event_id=reasoning_step_id)
3743
- trace_exception(ex)
3744
-
3745
- # Add error to scratchpad for context
3746
- current_scratchpad += f"\n\n### Step {i+1}: Unexpected Error\n**Error**: {str(ex)}\n**Recovery**: Continuing with adjusted approach"
3747
-
3748
- log_event_fn("🔄 Recovering and continuing with next step", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
3749
-
3750
- # Enhanced self-reflection
3751
- if enable_self_reflection and len(tool_calls_this_turn) > 0:
3752
- reflection_id = log_event_fn("🤔 Conducting comprehensive self-assessment...", MSG_TYPE.MSG_TYPE_STEP_START)
3753
- try:
3754
- reflection_prompt = f"""Conduct a thorough review of your work and assess the quality of your response to the user's request.
3755
-
3756
- ORIGINAL REQUEST: "{original_user_prompt}"
3757
- TOOLS USED: {len(tool_calls_this_turn)}
3758
- PLAN REVISIONS: {plan_revision_count}
3759
-
3760
- COMPLETE ANALYSIS:
3761
- {current_scratchpad}
3762
-
3763
- Evaluate your performance on multiple dimensions:
3764
-
3765
- 1. **Goal Achievement**: Did you fully address the user's request?
3766
- 2. **Process Efficiency**: Was your approach optimal given the available tools?
3767
- 3. **Information Quality**: Is the information you gathered accurate and relevant?
3768
- 4. **Decision Making**: Were your tool choices and parameters appropriate?
3769
- 5. **Adaptability**: How well did you handle unexpected results or plan changes?
3770
-
3771
- Provide your assessment as JSON:
3772
- {{
3773
- "goal_achieved": true,
3774
- "effectiveness_score": 0.85,
3775
- "process_efficiency": 0.8,
3776
- "information_quality": 0.9,
3777
- "decision_making": 0.85,
3778
- "adaptability": 0.7,
3779
- "overall_confidence": 0.82,
3780
- "strengths": ["Clear reasoning", "Good tool selection"],
3781
- "areas_for_improvement": ["Could have been more efficient"],
3782
- "summary": "Successfully completed the user's request with high quality results",
3783
- "key_insights": ["Discovered that X was more important than initially thought"]
3784
- }}"""
3785
-
3786
- reflection_data = self.generate_structured_content(
3787
- prompt=reflection_prompt,
3788
- schema={
3789
- "goal_achieved": "boolean",
3790
- "effectiveness_score": "number",
3791
- "process_efficiency": "number",
3792
- "information_quality": "number",
3793
- "decision_making": "number",
3794
- "adaptability": "number",
3795
- "overall_confidence": "number",
3796
- "strengths": "array",
3797
- "areas_for_improvement": "array",
3798
- "summary": "string",
3799
- "key_insights": "array"
3800
- },
3801
- temperature=0.3,
3802
- **llm_generation_kwargs
3803
- )
3804
-
3805
- if reflection_data:
3806
- current_scratchpad += f"\n\n### Comprehensive Self-Assessment\n"
3807
- current_scratchpad += f"**Goal Achieved**: {reflection_data.get('goal_achieved', False)}\n"
3808
- current_scratchpad += f"**Overall Confidence**: {reflection_data.get('overall_confidence', 0.5):.2f}\n"
3809
- current_scratchpad += f"**Effectiveness Score**: {reflection_data.get('effectiveness_score', 0.5):.2f}\n"
3810
- current_scratchpad += f"**Key Strengths**: {', '.join(reflection_data.get('strengths', []))}\n"
3811
- current_scratchpad += f"**Improvement Areas**: {', '.join(reflection_data.get('areas_for_improvement', []))}\n"
3812
- current_scratchpad += f"**Summary**: {reflection_data.get('summary', '')}\n"
3813
-
3814
- log_event_fn(f"✅ Self-assessment completed", MSG_TYPE.MSG_TYPE_STEP_END, event_id=reflection_id, meta={
3815
- "overall_confidence": reflection_data.get('overall_confidence', 0.5),
3816
- "goal_achieved": reflection_data.get('goal_achieved', False),
3817
- "effectiveness_score": reflection_data.get('effectiveness_score', 0.5)
3818
- })
3819
- else:
3820
- log_event_fn("Self-assessment data generation failed", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3821
-
3822
- except Exception as e:
3823
- log_event_fn(f"Self-assessment failed: {e}", MSG_TYPE.MSG_TYPE_WARNING, event_id=reflection_id)
3824
-
3825
- # Enhanced final synthesis
3826
- synthesis_id = log_event_fn("📝 Synthesizing comprehensive final response...", MSG_TYPE.MSG_TYPE_STEP_START)
3827
-
3828
- final_answer_prompt = f"""Create a comprehensive, well-structured final response that fully addresses the user's request.
3829
-
3830
- ORIGINAL REQUEST: "{original_user_prompt}"
3831
- CONTEXT: {context or "No additional context"}
3832
-
3833
- COMPLETE ANALYSIS AND WORK:
3834
- {current_scratchpad}
3835
-
3836
- GUIDELINES for your response:
3837
- 1. **Be Complete**: Address all aspects of the user's request
3838
- 2. **Be Clear**: Organize your response logically and use clear language
3839
- 3. **Be Helpful**: Provide actionable information and insights
3840
- 4. **Be Honest**: If there were limitations or uncertainties, mention them appropriately
3841
- 5. **Be Concise**: While being thorough, avoid unnecessary verbosity
3842
- 6. **Cite Sources**: If you used research tools, reference the information appropriately
3843
-
3844
- Your response should feel natural and conversational while being informative and valuable.
3845
-
3846
2413
  FINAL RESPONSE:"""
3847
2414
 
3848
2415
  log_prompt_fn("Final Synthesis Prompt", final_answer_prompt)