tunacode-cli 0.0.44__py3-none-any.whl → 0.0.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

@@ -30,12 +30,11 @@ except ImportError:
30
30
  STREAMING_AVAILABLE = False
31
31
 
32
32
  from tunacode.constants import READ_ONLY_TOOLS
33
- from tunacode.core.agents.dspy_integration import DSPyIntegration
34
33
  from tunacode.core.recursive import RecursiveTaskExecutor
35
34
  from tunacode.core.state import StateManager
36
35
  from tunacode.core.token_usage.api_response_parser import ApiResponseParser
37
36
  from tunacode.core.token_usage.cost_calculator import CostCalculator
38
- from tunacode.exceptions import ToolBatchingJSONError
37
+ from tunacode.exceptions import ToolBatchingJSONError, UserAbortError
39
38
  from tunacode.services.mcp import get_mcp_servers
40
39
  from tunacode.tools.bash import bash
41
40
  from tunacode.tools.glob import glob
@@ -519,18 +518,6 @@ def get_or_create_agent(model: ModelName, state_manager: StateManager) -> Pydant
519
518
  # Use a default system prompt if neither file exists
520
519
  system_prompt = "You are a helpful AI assistant for software development tasks."
521
520
 
522
- # Enhance with DSPy optimization if enabled
523
- use_dspy = state_manager.session.user_config.get("settings", {}).get(
524
- "use_dspy_optimization", True
525
- )
526
- if use_dspy:
527
- try:
528
- dspy_integration = DSPyIntegration(state_manager)
529
- system_prompt = dspy_integration.enhance_system_prompt(system_prompt)
530
- logger.info("Enhanced system prompt with DSPy optimizations")
531
- except Exception as e:
532
- logger.warning(f"Failed to enhance prompt with DSPy: {e}")
533
-
534
521
  # Load TUNACODE.md context
535
522
  # Use sync version of get_code_style to avoid nested event loop issues
536
523
  try:
@@ -765,356 +752,335 @@ async def process_request(
765
752
  tool_callback: Optional[ToolCallback] = None,
766
753
  streaming_callback: Optional[callable] = None,
767
754
  ) -> AgentRun:
768
- agent = get_or_create_agent(model, state_manager)
769
- mh = state_manager.session.messages.copy()
770
- # Get max iterations from config (default: 40)
771
- max_iterations = state_manager.session.user_config.get("settings", {}).get("max_iterations", 40)
772
- fallback_enabled = state_manager.session.user_config.get("settings", {}).get(
773
- "fallback_response", True
774
- )
775
-
776
- # Check if DSPy optimization is enabled and if this is a complex task
777
- use_dspy = state_manager.session.user_config.get("settings", {}).get(
778
- "use_dspy_optimization", True
779
- )
780
- dspy_integration = None
781
- task_breakdown = None
782
-
783
- # Check if recursive execution is enabled
784
- use_recursive = state_manager.session.user_config.get("settings", {}).get(
785
- "use_recursive_execution", True
786
- )
787
- recursive_threshold = state_manager.session.user_config.get("settings", {}).get(
788
- "recursive_complexity_threshold", 0.7
789
- )
755
+ try:
756
+ agent = get_or_create_agent(model, state_manager)
757
+ mh = state_manager.session.messages.copy()
758
+ # Get max iterations from config (default: 40)
759
+ max_iterations = state_manager.session.user_config.get("settings", {}).get(
760
+ "max_iterations", 40
761
+ )
762
+ fallback_enabled = state_manager.session.user_config.get("settings", {}).get(
763
+ "fallback_response", True
764
+ )
790
765
 
791
- if use_dspy:
792
- try:
793
- dspy_integration = DSPyIntegration(state_manager)
766
+ # Check if recursive execution is enabled
767
+ use_recursive = state_manager.session.user_config.get("settings", {}).get(
768
+ "use_recursive_execution", True
769
+ )
770
+ recursive_threshold = state_manager.session.user_config.get("settings", {}).get(
771
+ "recursive_complexity_threshold", 0.7
772
+ )
794
773
 
795
- # Check if this is a complex task that needs planning
796
- if dspy_integration.should_use_task_planner(message):
797
- task_breakdown = dspy_integration.get_task_breakdown(message)
798
- if task_breakdown and task_breakdown.get("requires_todo"):
799
- # Auto-create todos for complex tasks
800
- from tunacode.tools.todo import TodoTool
774
+ # Check if recursive execution should be used
775
+ if use_recursive and state_manager.session.current_recursion_depth == 0:
776
+ try:
777
+ # Initialize recursive executor
778
+ recursive_executor = RecursiveTaskExecutor(
779
+ state_manager=state_manager,
780
+ max_depth=state_manager.session.max_recursion_depth,
781
+ min_complexity_threshold=recursive_threshold,
782
+ default_iteration_budget=max_iterations,
783
+ )
801
784
 
802
- todo_tool = TodoTool(state_manager=state_manager)
785
+ # Analyze task complexity
786
+ complexity_result = await recursive_executor.decomposer.analyze_and_decompose(
787
+ message
788
+ )
803
789
 
790
+ if (
791
+ complexity_result.should_decompose
792
+ and complexity_result.total_complexity >= recursive_threshold
793
+ ):
804
794
  if state_manager.session.show_thoughts:
805
795
  from tunacode.ui import console as ui
806
796
 
807
- await ui.muted("DSPy: Detected complex task - creating todo list")
808
-
809
- # Create todos from subtasks
810
- todos = []
811
- for subtask in task_breakdown["subtasks"][:5]: # Limit to first 5
812
- todos.append(
813
- {
814
- "content": subtask["task"],
815
- "priority": subtask.get("priority", "medium"),
816
- }
797
+ await ui.muted(
798
+ f"\n🔄 RECURSIVE EXECUTION: Task complexity {complexity_result.total_complexity:.2f} >= {recursive_threshold}"
817
799
  )
800
+ await ui.muted(f"Reasoning: {complexity_result.reasoning}")
801
+ await ui.muted(f"Subtasks: {len(complexity_result.subtasks)}")
818
802
 
819
- if todos:
820
- await todo_tool._execute(action="add_multiple", todos=todos)
821
- except Exception as e:
822
- logger.warning(f"DSPy task planning failed: {e}")
823
-
824
- # Check if recursive execution should be used
825
- if use_recursive and state_manager.session.current_recursion_depth == 0:
826
- try:
827
- # Initialize recursive executor
828
- recursive_executor = RecursiveTaskExecutor(
829
- state_manager=state_manager,
830
- max_depth=state_manager.session.max_recursion_depth,
831
- min_complexity_threshold=recursive_threshold,
832
- default_iteration_budget=max_iterations,
833
- )
803
+ # Execute recursively
804
+ success, result, error = await recursive_executor.execute_task(
805
+ request=message, parent_task_id=None, depth=0
806
+ )
834
807
 
835
- # Analyze task complexity
836
- complexity_result = await recursive_executor.decomposer.analyze_and_decompose(message)
808
+ # For now, fall back to normal execution
809
+ # TODO: Properly integrate recursive execution results
810
+ pass
811
+ except Exception as e:
812
+ logger.warning(f"Recursive execution failed, falling back to normal: {e}")
813
+ # Continue with normal execution
837
814
 
838
- if (
839
- complexity_result.should_decompose
840
- and complexity_result.total_complexity >= recursive_threshold
841
- ):
842
- if state_manager.session.show_thoughts:
843
- from tunacode.ui import console as ui
815
+ from tunacode.configuration.models import ModelRegistry
816
+ from tunacode.core.token_usage.usage_tracker import UsageTracker
844
817
 
845
- await ui.muted(
846
- f"\n🔄 RECURSIVE EXECUTION: Task complexity {complexity_result.total_complexity:.2f} >= {recursive_threshold}"
847
- )
848
- await ui.muted(f"Reasoning: {complexity_result.reasoning}")
849
- await ui.muted(f"Subtasks: {len(complexity_result.subtasks)}")
818
+ parser = ApiResponseParser()
819
+ registry = ModelRegistry()
820
+ calculator = CostCalculator(registry)
821
+ usage_tracker = UsageTracker(parser, calculator, state_manager)
822
+ response_state = ResponseState()
850
823
 
851
- # Execute recursively
852
- success, result, error = await recursive_executor.execute_task(
853
- request=message, parent_task_id=None, depth=0
854
- )
824
+ # Reset iteration tracking for this request
825
+ state_manager.session.iteration_count = 0
855
826
 
856
- # Create AgentRun response
857
- from datetime import datetime
827
+ # Create a request-level buffer for batching read-only tools across nodes
828
+ tool_buffer = ToolBuffer()
858
829
 
859
- if success:
860
- return AgentRun(
861
- messages=[{"role": "assistant", "content": str(result)}],
862
- timestamp=datetime.now(),
863
- model=model,
864
- iterations=1,
865
- status="success",
866
- )
867
- else:
868
- return AgentRun(
869
- messages=[{"role": "assistant", "content": f"Task failed: {error}"}],
870
- timestamp=datetime.now(),
871
- model=model,
872
- iterations=1,
873
- status="error",
874
- )
875
- except Exception as e:
876
- logger.warning(f"Recursive execution failed, falling back to normal: {e}")
877
- # Continue with normal execution
878
-
879
- from tunacode.configuration.models import ModelRegistry
880
- from tunacode.core.token_usage.usage_tracker import UsageTracker
881
-
882
- parser = ApiResponseParser()
883
- registry = ModelRegistry()
884
- calculator = CostCalculator(registry)
885
- usage_tracker = UsageTracker(parser, calculator, state_manager)
886
- response_state = ResponseState()
887
-
888
- # Reset iteration tracking for this request
889
- state_manager.session.iteration_count = 0
890
-
891
- # Create a request-level buffer for batching read-only tools across nodes
892
- tool_buffer = ToolBuffer()
893
-
894
- # Show TUNACODE.md preview if it was loaded and thoughts are enabled
895
- if state_manager.session.show_thoughts and hasattr(state_manager, "tunacode_preview"):
896
- from tunacode.ui import console as ui
897
-
898
- await ui.muted(state_manager.tunacode_preview)
899
- # Clear the preview after displaying it once
900
- delattr(state_manager, "tunacode_preview")
901
-
902
- # Show what we're sending to the API when thoughts are enabled
903
- if state_manager.session.show_thoughts:
904
- from tunacode.ui import console as ui
905
-
906
- await ui.muted("\n" + "=" * 60)
907
- await ui.muted("📤 SENDING TO API:")
908
- await ui.muted(f"Message: {message}")
909
- await ui.muted(f"Model: {model}")
910
- await ui.muted(f"Message History Length: {len(mh)}")
911
- await ui.muted("=" * 60)
912
-
913
- async with agent.iter(message, message_history=mh) as agent_run:
914
- i = 0
915
- async for node in agent_run:
916
- state_manager.session.current_iteration = i + 1
917
-
918
- # Handle token-level streaming for model request nodes
919
- if streaming_callback and STREAMING_AVAILABLE and Agent.is_model_request_node(node):
920
- async with node.stream(agent_run.ctx) as request_stream:
921
- async for event in request_stream:
922
- if isinstance(event, PartDeltaEvent) and isinstance(
923
- event.delta, TextPartDelta
924
- ):
925
- # Stream individual token deltas
926
- if event.delta.content_delta:
927
- await streaming_callback(event.delta.content_delta)
928
-
929
- await _process_node(
930
- node,
931
- tool_callback,
932
- state_manager,
933
- tool_buffer,
934
- streaming_callback,
935
- usage_tracker,
936
- )
937
- if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
938
- if node.result.output:
939
- response_state.has_user_response = True
940
- i += 1
941
- state_manager.session.iteration_count = i
830
+ # Show TUNACODE.md preview if it was loaded and thoughts are enabled
831
+ if state_manager.session.show_thoughts and hasattr(state_manager, "tunacode_preview"):
832
+ from tunacode.ui import console as ui
942
833
 
943
- # Display iteration progress if thoughts are enabled
944
- if state_manager.session.show_thoughts:
945
- from tunacode.ui import console as ui
834
+ await ui.muted(state_manager.tunacode_preview)
835
+ # Clear the preview after displaying it once
836
+ delattr(state_manager, "tunacode_preview")
946
837
 
947
- await ui.muted(f"\nITERATION: {i}/{max_iterations}")
838
+ # Show what we're sending to the API when thoughts are enabled
839
+ if state_manager.session.show_thoughts:
840
+ from tunacode.ui import console as ui
948
841
 
949
- # Show summary of tools used so far
950
- if state_manager.session.tool_calls:
951
- tool_summary = {}
952
- for tc in state_manager.session.tool_calls:
953
- tool_name = tc.get("tool", "unknown")
954
- tool_summary[tool_name] = tool_summary.get(tool_name, 0) + 1
842
+ await ui.muted("\n" + "=" * 60)
843
+ await ui.muted("📤 SENDING TO API:")
844
+ await ui.muted(f"Message: {message}")
845
+ await ui.muted(f"Model: {model}")
846
+ await ui.muted(f"Message History Length: {len(mh)}")
847
+ await ui.muted("=" * 60)
955
848
 
956
- summary_str = ", ".join(
957
- [f"{name}: {count}" for name, count in tool_summary.items()]
958
- )
959
- await ui.muted(f"TOOLS USED: {summary_str}")
849
+ async with agent.iter(message, message_history=mh) as agent_run:
850
+ i = 0
851
+ async for node in agent_run:
852
+ state_manager.session.current_iteration = i + 1
853
+
854
+ # Handle token-level streaming for model request nodes
855
+ if streaming_callback and STREAMING_AVAILABLE and Agent.is_model_request_node(node):
856
+ async with node.stream(agent_run.ctx) as request_stream:
857
+ async for event in request_stream:
858
+ if isinstance(event, PartDeltaEvent) and isinstance(
859
+ event.delta, TextPartDelta
860
+ ):
861
+ # Stream individual token deltas
862
+ if event.delta.content_delta:
863
+ await streaming_callback(event.delta.content_delta)
864
+
865
+ await _process_node(
866
+ node,
867
+ tool_callback,
868
+ state_manager,
869
+ tool_buffer,
870
+ streaming_callback,
871
+ usage_tracker,
872
+ )
873
+ if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
874
+ if node.result.output:
875
+ response_state.has_user_response = True
876
+ i += 1
877
+ state_manager.session.iteration_count = i
960
878
 
961
- if i >= max_iterations:
879
+ # Display iteration progress if thoughts are enabled
962
880
  if state_manager.session.show_thoughts:
963
881
  from tunacode.ui import console as ui
964
882
 
965
- await ui.warning(f"Reached maximum iterations ({max_iterations})")
966
- break
883
+ await ui.muted(f"\nITERATION: {i}/{max_iterations}")
967
884
 
968
- # Final flush: execute any remaining buffered read-only tools
969
- if tool_callback and tool_buffer.has_tasks():
970
- import time
885
+ # Show summary of tools used so far
886
+ if state_manager.session.tool_calls:
887
+ tool_summary = {}
888
+ for tc in state_manager.session.tool_calls:
889
+ tool_name = tc.get("tool", "unknown")
890
+ tool_summary[tool_name] = tool_summary.get(tool_name, 0) + 1
971
891
 
972
- from tunacode.ui import console as ui
973
-
974
- buffered_tasks = tool_buffer.flush()
975
- start_time = time.time()
892
+ summary_str = ", ".join(
893
+ [f"{name}: {count}" for name, count in tool_summary.items()]
894
+ )
895
+ await ui.muted(f"TOOLS USED: {summary_str}")
976
896
 
977
- if state_manager.session.show_thoughts:
978
- await ui.muted("\n" + "=" * 60)
979
- await ui.muted(
980
- f"🚀 FINAL BATCH: Executing {len(buffered_tasks)} buffered read-only tools"
981
- )
982
- await ui.muted("=" * 60)
897
+ if i >= max_iterations:
898
+ if state_manager.session.show_thoughts:
899
+ from tunacode.ui import console as ui
983
900
 
984
- for idx, (part, node) in enumerate(buffered_tasks, 1):
985
- tool_desc = f" [{idx}] {part.tool_name}"
986
- if hasattr(part, "args") and isinstance(part.args, dict):
987
- if part.tool_name == "read_file" and "file_path" in part.args:
988
- tool_desc += f" → {part.args['file_path']}"
989
- elif part.tool_name == "grep" and "pattern" in part.args:
990
- tool_desc += f" → pattern: '{part.args['pattern']}'"
991
- if "include_files" in part.args:
992
- tool_desc += f", files: '{part.args['include_files']}'"
993
- elif part.tool_name == "list_dir" and "directory" in part.args:
994
- tool_desc += f" → {part.args['directory']}"
995
- elif part.tool_name == "glob" and "pattern" in part.args:
996
- tool_desc += f" → pattern: '{part.args['pattern']}'"
997
- await ui.muted(tool_desc)
998
- await ui.muted("=" * 60)
901
+ await ui.warning(f"Reached maximum iterations ({max_iterations})")
902
+ break
999
903
 
1000
- await execute_tools_parallel(buffered_tasks, tool_callback)
904
+ # Final flush: execute any remaining buffered read-only tools
905
+ if tool_callback and tool_buffer.has_tasks():
906
+ import time
1001
907
 
1002
- elapsed_time = (time.time() - start_time) * 1000
1003
- sequential_estimate = len(buffered_tasks) * 100
1004
- speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
908
+ from tunacode.ui import console as ui
1005
909
 
1006
- if state_manager.session.show_thoughts:
1007
- await ui.muted(
1008
- f"✅ Final batch completed in {elapsed_time:.0f}ms "
1009
- f"(~{speedup:.1f}x faster than sequential)\n"
1010
- )
910
+ buffered_tasks = tool_buffer.flush()
911
+ start_time = time.time()
1011
912
 
1012
- # If we need to add a fallback response, create a wrapper
1013
- if not response_state.has_user_response and i >= max_iterations and fallback_enabled:
1014
- patch_tool_messages("Task incomplete", state_manager=state_manager)
1015
- response_state.has_final_synthesis = True
1016
-
1017
- # Extract context from the agent run
1018
- tool_calls_summary = []
1019
- files_modified = set()
1020
- commands_run = []
1021
-
1022
- # Analyze message history for context
1023
- for msg in state_manager.session.messages:
1024
- if hasattr(msg, "parts"):
1025
- for part in msg.parts:
1026
- if hasattr(part, "part_kind") and part.part_kind == "tool-call":
1027
- tool_name = getattr(part, "tool_name", "unknown")
1028
- tool_calls_summary.append(tool_name)
1029
-
1030
- # Track specific operations
1031
- if tool_name in ["write_file", "update_file"] and hasattr(part, "args"):
1032
- if isinstance(part.args, dict) and "file_path" in part.args:
1033
- files_modified.add(part.args["file_path"])
1034
- elif tool_name in ["run_command", "bash"] and hasattr(part, "args"):
1035
- if isinstance(part.args, dict) and "command" in part.args:
1036
- commands_run.append(part.args["command"])
1037
-
1038
- # Build fallback response with context
1039
- fallback = FallbackResponse(
1040
- summary="Reached maximum iterations without producing a final response.",
1041
- progress=f"Completed {i} iterations (limit: {max_iterations})",
1042
- )
913
+ if state_manager.session.show_thoughts:
914
+ await ui.muted("\n" + "=" * 60)
915
+ await ui.muted(
916
+ f"🚀 FINAL BATCH: Executing {len(buffered_tasks)} buffered read-only tools"
917
+ )
918
+ await ui.muted("=" * 60)
1043
919
 
1044
- # Get verbosity setting
1045
- verbosity = state_manager.session.user_config.get("settings", {}).get(
1046
- "fallback_verbosity", "normal"
1047
- )
920
+ for idx, (part, node) in enumerate(buffered_tasks, 1):
921
+ tool_desc = f" [{idx}] {part.tool_name}"
922
+ if hasattr(part, "args") and isinstance(part.args, dict):
923
+ if part.tool_name == "read_file" and "file_path" in part.args:
924
+ tool_desc += f" → {part.args['file_path']}"
925
+ elif part.tool_name == "grep" and "pattern" in part.args:
926
+ tool_desc += f" → pattern: '{part.args['pattern']}'"
927
+ if "include_files" in part.args:
928
+ tool_desc += f", files: '{part.args['include_files']}'"
929
+ elif part.tool_name == "list_dir" and "directory" in part.args:
930
+ tool_desc += f" → {part.args['directory']}"
931
+ elif part.tool_name == "glob" and "pattern" in part.args:
932
+ tool_desc += f" → pattern: '{part.args['pattern']}'"
933
+ await ui.muted(tool_desc)
934
+ await ui.muted("=" * 60)
1048
935
 
1049
- if verbosity in ["normal", "detailed"]:
1050
- # Add what was attempted
1051
- if tool_calls_summary:
1052
- tool_counts = {}
1053
- for tool in tool_calls_summary:
1054
- tool_counts[tool] = tool_counts.get(tool, 0) + 1
1055
-
1056
- fallback.issues.append(f"Executed {len(tool_calls_summary)} tool calls:")
1057
- for tool, count in sorted(tool_counts.items()):
1058
- fallback.issues.append(f" • {tool}: {count}x")
1059
-
1060
- if verbosity == "detailed":
1061
- if files_modified:
1062
- fallback.issues.append(f"\nFiles modified ({len(files_modified)}):")
1063
- for f in sorted(files_modified)[:5]: # Limit to 5 files
1064
- fallback.issues.append(f" • {f}")
1065
- if len(files_modified) > 5:
1066
- fallback.issues.append(f" • ... and {len(files_modified) - 5} more")
1067
-
1068
- if commands_run:
1069
- fallback.issues.append(f"\nCommands executed ({len(commands_run)}):")
1070
- for cmd in commands_run[:3]: # Limit to 3 commands
1071
- # Truncate long commands
1072
- display_cmd = cmd if len(cmd) <= 60 else cmd[:57] + "..."
1073
- fallback.issues.append(f" • {display_cmd}")
1074
- if len(commands_run) > 3:
1075
- fallback.issues.append(f" • ... and {len(commands_run) - 3} more")
1076
-
1077
- # Add helpful next steps
1078
- fallback.next_steps.append(
1079
- "The task may be too complex - try breaking it into smaller steps"
1080
- )
1081
- fallback.next_steps.append("Check the output above for any errors or partial progress")
1082
- if files_modified:
1083
- fallback.next_steps.append("Review modified files to see what changes were made")
936
+ await execute_tools_parallel(buffered_tasks, tool_callback)
1084
937
 
1085
- # Create comprehensive output
1086
- output_parts = [fallback.summary, ""]
938
+ elapsed_time = (time.time() - start_time) * 1000
939
+ sequential_estimate = len(buffered_tasks) * 100
940
+ speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
1087
941
 
1088
- if fallback.progress:
1089
- output_parts.append(f"Progress: {fallback.progress}")
942
+ if state_manager.session.show_thoughts:
943
+ await ui.muted(
944
+ f"✅ Final batch completed in {elapsed_time:.0f}ms "
945
+ f"(~{speedup:.1f}x faster than sequential)\n"
946
+ )
1090
947
 
1091
- if fallback.issues:
1092
- output_parts.append("\nWhat happened:")
1093
- output_parts.extend(fallback.issues)
948
+ # If we need to add a fallback response, create a wrapper
949
+ if not response_state.has_user_response and i >= max_iterations and fallback_enabled:
950
+ patch_tool_messages("Task incomplete", state_manager=state_manager)
951
+ response_state.has_final_synthesis = True
952
+
953
+ # Extract context from the agent run
954
+ tool_calls_summary = []
955
+ files_modified = set()
956
+ commands_run = []
957
+
958
+ # Analyze message history for context
959
+ for msg in state_manager.session.messages:
960
+ if hasattr(msg, "parts"):
961
+ for part in msg.parts:
962
+ if hasattr(part, "part_kind") and part.part_kind == "tool-call":
963
+ tool_name = getattr(part, "tool_name", "unknown")
964
+ tool_calls_summary.append(tool_name)
965
+
966
+ # Track specific operations
967
+ if tool_name in ["write_file", "update_file"] and hasattr(
968
+ part, "args"
969
+ ):
970
+ if isinstance(part.args, dict) and "file_path" in part.args:
971
+ files_modified.add(part.args["file_path"])
972
+ elif tool_name in ["run_command", "bash"] and hasattr(part, "args"):
973
+ if isinstance(part.args, dict) and "command" in part.args:
974
+ commands_run.append(part.args["command"])
975
+
976
+ # Build fallback response with context
977
+ fallback = FallbackResponse(
978
+ summary="Reached maximum iterations without producing a final response.",
979
+ progress=f"Completed {i} iterations (limit: {max_iterations})",
980
+ )
1094
981
 
1095
- if fallback.next_steps:
1096
- output_parts.append("\nSuggested next steps:")
1097
- for step in fallback.next_steps:
1098
- output_parts.append(f" • {step}")
982
+ # Get verbosity setting
983
+ verbosity = state_manager.session.user_config.get("settings", {}).get(
984
+ "fallback_verbosity", "normal"
985
+ )
1099
986
 
1100
- comprehensive_output = "\n".join(output_parts)
987
+ if verbosity in ["normal", "detailed"]:
988
+ # Add what was attempted
989
+ if tool_calls_summary:
990
+ tool_counts = {}
991
+ for tool in tool_calls_summary:
992
+ tool_counts[tool] = tool_counts.get(tool, 0) + 1
993
+
994
+ fallback.issues.append(f"Executed {len(tool_calls_summary)} tool calls:")
995
+ for tool, count in sorted(tool_counts.items()):
996
+ fallback.issues.append(f" • {tool}: {count}x")
997
+
998
+ if verbosity == "detailed":
999
+ if files_modified:
1000
+ fallback.issues.append(f"\nFiles modified ({len(files_modified)}):")
1001
+ for f in sorted(files_modified)[:5]: # Limit to 5 files
1002
+ fallback.issues.append(f" • {f}")
1003
+ if len(files_modified) > 5:
1004
+ fallback.issues.append(
1005
+ f" • ... and {len(files_modified) - 5} more"
1006
+ )
1007
+
1008
+ if commands_run:
1009
+ fallback.issues.append(f"\nCommands executed ({len(commands_run)}):")
1010
+ for cmd in commands_run[:3]: # Limit to 3 commands
1011
+ # Truncate long commands
1012
+ display_cmd = cmd if len(cmd) <= 60 else cmd[:57] + "..."
1013
+ fallback.issues.append(f" • {display_cmd}")
1014
+ if len(commands_run) > 3:
1015
+ fallback.issues.append(f" • ... and {len(commands_run) - 3} more")
1016
+
1017
+ # Add helpful next steps
1018
+ fallback.next_steps.append(
1019
+ "The task may be too complex - try breaking it into smaller steps"
1020
+ )
1021
+ fallback.next_steps.append(
1022
+ "Check the output above for any errors or partial progress"
1023
+ )
1024
+ if files_modified:
1025
+ fallback.next_steps.append(
1026
+ "Review modified files to see what changes were made"
1027
+ )
1101
1028
 
1102
- # Create a wrapper object that mimics AgentRun with the required attributes
1103
- class AgentRunWrapper:
1104
- def __init__(self, wrapped_run, fallback_result):
1029
+ # Create comprehensive output
1030
+ output_parts = [fallback.summary, ""]
1031
+
1032
+ if fallback.progress:
1033
+ output_parts.append(f"Progress: {fallback.progress}")
1034
+
1035
+ if fallback.issues:
1036
+ output_parts.append("\nWhat happened:")
1037
+ output_parts.extend(fallback.issues)
1038
+
1039
+ if fallback.next_steps:
1040
+ output_parts.append("\nSuggested next steps:")
1041
+ for step in fallback.next_steps:
1042
+ output_parts.append(f" • {step}")
1043
+
1044
+ comprehensive_output = "\n".join(output_parts)
1045
+
1046
+ # Create a wrapper object that mimics AgentRun with the required attributes
1047
+ class AgentRunWrapper:
1048
+ def __init__(self, wrapped_run, fallback_result):
1049
+ self._wrapped = wrapped_run
1050
+ self._result = fallback_result
1051
+ self.response_state = response_state
1052
+
1053
+ def __getattribute__(self, name):
1054
+ # Handle special attributes first to avoid conflicts
1055
+ if name in ["_wrapped", "_result", "response_state"]:
1056
+ return object.__getattribute__(self, name)
1057
+
1058
+ # Explicitly handle 'result' to return our fallback result
1059
+ if name == "result":
1060
+ return object.__getattribute__(self, "_result")
1061
+
1062
+ # Delegate all other attributes to the wrapped object
1063
+ try:
1064
+ return getattr(object.__getattribute__(self, "_wrapped"), name)
1065
+ except AttributeError:
1066
+ raise AttributeError(
1067
+ f"'{type(self).__name__}' object has no attribute '{name}'"
1068
+ )
1069
+
1070
+ return AgentRunWrapper(agent_run, SimpleResult(comprehensive_output))
1071
+
1072
+ # For non-fallback cases, we still need to handle the response_state
1073
+ # Create a minimal wrapper just to add response_state
1074
+ class AgentRunWithState:
1075
+ def __init__(self, wrapped_run):
1105
1076
  self._wrapped = wrapped_run
1106
- self._result = fallback_result
1107
1077
  self.response_state = response_state
1108
1078
 
1109
1079
  def __getattribute__(self, name):
1110
- # Handle special attributes first to avoid conflicts
1111
- if name in ["_wrapped", "_result", "response_state"]:
1080
+ # Handle special attributes first
1081
+ if name in ["_wrapped", "response_state"]:
1112
1082
  return object.__getattribute__(self, name)
1113
1083
 
1114
- # Explicitly handle 'result' to return our fallback result
1115
- if name == "result":
1116
- return object.__getattribute__(self, "_result")
1117
-
1118
1084
  # Delegate all other attributes to the wrapped object
1119
1085
  try:
1120
1086
  return getattr(object.__getattribute__(self, "_wrapped"), name)
@@ -1123,26 +1089,6 @@ async def process_request(
1123
1089
  f"'{type(self).__name__}' object has no attribute '{name}'"
1124
1090
  )
1125
1091
 
1126
- return AgentRunWrapper(agent_run, SimpleResult(comprehensive_output))
1127
-
1128
- # For non-fallback cases, we still need to handle the response_state
1129
- # Create a minimal wrapper just to add response_state
1130
- class AgentRunWithState:
1131
- def __init__(self, wrapped_run):
1132
- self._wrapped = wrapped_run
1133
- self.response_state = response_state
1134
-
1135
- def __getattribute__(self, name):
1136
- # Handle special attributes first
1137
- if name in ["_wrapped", "response_state"]:
1138
- return object.__getattribute__(self, name)
1139
-
1140
- # Delegate all other attributes to the wrapped object
1141
- try:
1142
- return getattr(object.__getattribute__(self, "_wrapped"), name)
1143
- except AttributeError:
1144
- raise AttributeError(
1145
- f"'{type(self).__name__}' object has no attribute '{name}'"
1146
- )
1147
-
1148
- return AgentRunWithState(agent_run)
1092
+ return AgentRunWithState(agent_run)
1093
+ except asyncio.CancelledError:
1094
+ raise UserAbortError("User aborted the request.")