tunacode-cli 0.0.48__py3-none-any.whl → 0.0.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

Files changed (45) hide show
  1. api/auth.py +13 -0
  2. api/users.py +8 -0
  3. tunacode/__init__.py +4 -0
  4. tunacode/cli/main.py +4 -0
  5. tunacode/cli/repl.py +39 -6
  6. tunacode/configuration/defaults.py +0 -1
  7. tunacode/constants.py +7 -1
  8. tunacode/core/agents/main.py +268 -245
  9. tunacode/core/agents/utils.py +54 -6
  10. tunacode/core/logging/__init__.py +29 -0
  11. tunacode/core/logging/config.py +57 -0
  12. tunacode/core/logging/formatters.py +48 -0
  13. tunacode/core/logging/handlers.py +83 -0
  14. tunacode/core/logging/logger.py +8 -0
  15. tunacode/core/recursive/__init__.py +18 -0
  16. tunacode/core/recursive/aggregator.py +467 -0
  17. tunacode/core/recursive/budget.py +414 -0
  18. tunacode/core/recursive/decomposer.py +398 -0
  19. tunacode/core/recursive/executor.py +470 -0
  20. tunacode/core/recursive/hierarchy.py +488 -0
  21. tunacode/core/state.py +45 -0
  22. tunacode/exceptions.py +23 -0
  23. tunacode/tools/base.py +7 -1
  24. tunacode/types.py +5 -1
  25. tunacode/ui/completers.py +2 -2
  26. tunacode/ui/console.py +30 -9
  27. tunacode/ui/input.py +2 -1
  28. tunacode/ui/keybindings.py +58 -1
  29. tunacode/ui/logging_compat.py +44 -0
  30. tunacode/ui/output.py +7 -6
  31. tunacode/ui/panels.py +30 -5
  32. tunacode/ui/recursive_progress.py +380 -0
  33. tunacode/utils/retry.py +163 -0
  34. tunacode/utils/security.py +3 -2
  35. tunacode/utils/token_counter.py +1 -2
  36. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/METADATA +2 -2
  37. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/RECORD +41 -29
  38. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/top_level.txt +1 -0
  39. tunacode/core/agents/dspy_integration.py +0 -223
  40. tunacode/core/agents/dspy_tunacode.py +0 -458
  41. tunacode/prompts/dspy_task_planning.md +0 -45
  42. tunacode/prompts/dspy_tool_selection.md +0 -58
  43. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/WHEEL +0 -0
  44. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/entry_points.txt +0 -0
  45. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/licenses/LICENSE +0 -0
@@ -6,7 +6,6 @@ Handles agent creation, configuration, and request processing.
6
6
 
7
7
  import asyncio
8
8
  import json
9
- import logging
10
9
  import os
11
10
  import re
12
11
  from datetime import datetime, timezone
@@ -15,6 +14,8 @@ from typing import Any, Iterator, List, Optional, Tuple
15
14
 
16
15
  from pydantic_ai import Agent
17
16
 
17
+ from tunacode.core.logging.logger import get_logger
18
+
18
19
  # Import streaming types with fallback for older versions
19
20
  try:
20
21
  from pydantic_ai.messages import (
@@ -33,6 +34,7 @@ from tunacode.constants import READ_ONLY_TOOLS
33
34
  from tunacode.core.state import StateManager
34
35
  from tunacode.core.token_usage.api_response_parser import ApiResponseParser
35
36
  from tunacode.core.token_usage.cost_calculator import CostCalculator
37
+ from tunacode.exceptions import ToolBatchingJSONError, UserAbortError
36
38
  from tunacode.services.mcp import get_mcp_servers
37
39
  from tunacode.tools.bash import bash
38
40
  from tunacode.tools.glob import glob
@@ -58,7 +60,7 @@ from tunacode.types import (
58
60
  )
59
61
 
60
62
  # Configure logging
61
- logger = logging.getLogger(__name__)
63
+ logger = get_logger(__name__)
62
64
 
63
65
 
64
66
  class ToolBuffer:
@@ -471,9 +473,17 @@ async def _process_node(
471
473
  if not has_tool_calls and buffering_callback:
472
474
  for part in node.model_response.parts:
473
475
  if hasattr(part, "content") and isinstance(part.content, str):
474
- await extract_and_execute_tool_calls(
475
- part.content, buffering_callback, state_manager
476
- )
476
+ try:
477
+ await extract_and_execute_tool_calls(
478
+ part.content, buffering_callback, state_manager
479
+ )
480
+ except ToolBatchingJSONError as e:
481
+ # Handle JSON parsing failure after retries
482
+ logger.error(f"Tool batching JSON error: {e}")
483
+ if state_manager.session.show_thoughts:
484
+ await ui.error(str(e))
485
+ # Continue processing other parts instead of failing completely
486
+ continue
477
487
 
478
488
  # Final flush: disabled temporarily while fixing the parallel execution design
479
489
  # The buffer is not being used in the current implementation
@@ -518,12 +528,12 @@ def get_or_create_agent(model: ModelName, state_manager: StateManager) -> Pydant
518
528
  tunacode_content = tunacode_path.read_text(encoding="utf-8")
519
529
  if tunacode_content.strip():
520
530
  # Log that we found TUNACODE.md
521
- print("📄 TUNACODE.md located: Loading context...")
531
+ logger.info("📄 TUNACODE.md located: Loading context...")
522
532
 
523
533
  system_prompt += "\n\n# Project Context from TUNACODE.md\n" + tunacode_content
524
534
  else:
525
535
  # Log that TUNACODE.md was not found
526
- print("📄 TUNACODE.md not found: Using default context")
536
+ logger.info("📄 TUNACODE.md not found: Using default context")
527
537
  except Exception as e:
528
538
  # Log errors loading TUNACODE.md at debug level
529
539
  logger.debug(f"Error loading TUNACODE.md: {e}")
@@ -537,9 +547,8 @@ def get_or_create_agent(model: ModelName, state_manager: StateManager) -> Pydant
537
547
  system_prompt += f'\n\n# Current Todo List\n\nYou have existing todos that need attention:\n\n{current_todos}\n\nRemember to check progress on these todos and update them as you work. Use todo("list") to see current status anytime.'
538
548
  except Exception as e:
539
549
  # Log error but don't fail agent creation
540
- import sys
541
550
 
542
- print(f"Warning: Failed to load todos: {e}", file=sys.stderr)
551
+ logger.warning(f"Warning: Failed to load todos: {e}")
543
552
 
544
553
  state_manager.session.agents[model] = Agent(
545
554
  model=model,
@@ -742,251 +751,284 @@ async def process_request(
742
751
  tool_callback: Optional[ToolCallback] = None,
743
752
  streaming_callback: Optional[callable] = None,
744
753
  ) -> AgentRun:
745
- agent = get_or_create_agent(model, state_manager)
746
- mh = state_manager.session.messages.copy()
747
- # Get max iterations from config (default: 40)
748
- max_iterations = state_manager.session.user_config.get("settings", {}).get("max_iterations", 40)
749
- fallback_enabled = state_manager.session.user_config.get("settings", {}).get(
750
- "fallback_response", True
751
- )
754
+ try:
755
+ agent = get_or_create_agent(model, state_manager)
756
+ mh = state_manager.session.messages.copy()
757
+ # Get max iterations from config (default: 40)
758
+ max_iterations = state_manager.session.user_config.get("settings", {}).get(
759
+ "max_iterations", 40
760
+ )
761
+ fallback_enabled = state_manager.session.user_config.get("settings", {}).get(
762
+ "fallback_response", True
763
+ )
752
764
 
753
- from tunacode.configuration.models import ModelRegistry
754
- from tunacode.core.token_usage.usage_tracker import UsageTracker
755
-
756
- parser = ApiResponseParser()
757
- registry = ModelRegistry()
758
- calculator = CostCalculator(registry)
759
- usage_tracker = UsageTracker(parser, calculator, state_manager)
760
- response_state = ResponseState()
761
-
762
- # Reset iteration tracking for this request
763
- state_manager.session.iteration_count = 0
764
-
765
- # Create a request-level buffer for batching read-only tools across nodes
766
- tool_buffer = ToolBuffer()
767
-
768
- # Show TUNACODE.md preview if it was loaded and thoughts are enabled
769
- if state_manager.session.show_thoughts and hasattr(state_manager, "tunacode_preview"):
770
- from tunacode.ui import console as ui
771
-
772
- await ui.muted(state_manager.tunacode_preview)
773
- # Clear the preview after displaying it once
774
- delattr(state_manager, "tunacode_preview")
775
-
776
- # Show what we're sending to the API when thoughts are enabled
777
- if state_manager.session.show_thoughts:
778
- from tunacode.ui import console as ui
779
-
780
- await ui.muted("\n" + "=" * 60)
781
- await ui.muted("📤 SENDING TO API:")
782
- await ui.muted(f"Message: {message}")
783
- await ui.muted(f"Model: {model}")
784
- await ui.muted(f"Message History Length: {len(mh)}")
785
- await ui.muted("=" * 60)
786
-
787
- async with agent.iter(message, message_history=mh) as agent_run:
788
- i = 0
789
- async for node in agent_run:
790
- state_manager.session.current_iteration = i + 1
791
-
792
- # Handle token-level streaming for model request nodes
793
- if streaming_callback and STREAMING_AVAILABLE and Agent.is_model_request_node(node):
794
- async with node.stream(agent_run.ctx) as request_stream:
795
- async for event in request_stream:
796
- if isinstance(event, PartDeltaEvent) and isinstance(
797
- event.delta, TextPartDelta
798
- ):
799
- # Stream individual token deltas
800
- if event.delta.content_delta:
801
- await streaming_callback(event.delta.content_delta)
802
-
803
- await _process_node(
804
- node,
805
- tool_callback,
806
- state_manager,
807
- tool_buffer,
808
- streaming_callback,
809
- usage_tracker,
810
- )
811
- if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
812
- if node.result.output:
813
- response_state.has_user_response = True
814
- i += 1
815
- state_manager.session.iteration_count = i
765
+ from tunacode.configuration.models import ModelRegistry
766
+ from tunacode.core.token_usage.usage_tracker import UsageTracker
816
767
 
817
- # Display iteration progress if thoughts are enabled
818
- if state_manager.session.show_thoughts:
819
- from tunacode.ui import console as ui
768
+ parser = ApiResponseParser()
769
+ registry = ModelRegistry()
770
+ calculator = CostCalculator(registry)
771
+ usage_tracker = UsageTracker(parser, calculator, state_manager)
772
+ response_state = ResponseState()
820
773
 
821
- await ui.muted(f"\nITERATION: {i}/{max_iterations}")
774
+ # Reset iteration tracking for this request
775
+ state_manager.session.iteration_count = 0
822
776
 
823
- # Show summary of tools used so far
824
- if state_manager.session.tool_calls:
825
- tool_summary = {}
826
- for tc in state_manager.session.tool_calls:
827
- tool_name = tc.get("tool", "unknown")
828
- tool_summary[tool_name] = tool_summary.get(tool_name, 0) + 1
777
+ # Create a request-level buffer for batching read-only tools across nodes
778
+ tool_buffer = ToolBuffer()
829
779
 
830
- summary_str = ", ".join(
831
- [f"{name}: {count}" for name, count in tool_summary.items()]
832
- )
833
- await ui.muted(f"TOOLS USED: {summary_str}")
780
+ # Show TUNACODE.md preview if it was loaded and thoughts are enabled
781
+ if state_manager.session.show_thoughts and hasattr(state_manager, "tunacode_preview"):
782
+ from tunacode.ui import console as ui
783
+
784
+ await ui.muted(state_manager.tunacode_preview)
785
+ # Clear the preview after displaying it once
786
+ delattr(state_manager, "tunacode_preview")
834
787
 
835
- if i >= max_iterations:
788
+ # Show what we're sending to the API when thoughts are enabled
789
+ if state_manager.session.show_thoughts:
790
+ from tunacode.ui import console as ui
791
+
792
+ await ui.muted("\n" + "=" * 60)
793
+ await ui.muted("📤 SENDING TO API:")
794
+ await ui.muted(f"Message: {message}")
795
+ await ui.muted(f"Model: {model}")
796
+ await ui.muted(f"Message History Length: {len(mh)}")
797
+ await ui.muted("=" * 60)
798
+
799
+ async with agent.iter(message, message_history=mh) as agent_run:
800
+ i = 0
801
+ async for node in agent_run:
802
+ state_manager.session.current_iteration = i + 1
803
+
804
+ # Handle token-level streaming for model request nodes
805
+ if streaming_callback and STREAMING_AVAILABLE and Agent.is_model_request_node(node):
806
+ async with node.stream(agent_run.ctx) as request_stream:
807
+ async for event in request_stream:
808
+ if isinstance(event, PartDeltaEvent) and isinstance(
809
+ event.delta, TextPartDelta
810
+ ):
811
+ # Stream individual token deltas
812
+ if event.delta.content_delta:
813
+ await streaming_callback(event.delta.content_delta)
814
+
815
+ await _process_node(
816
+ node,
817
+ tool_callback,
818
+ state_manager,
819
+ tool_buffer,
820
+ streaming_callback,
821
+ usage_tracker,
822
+ )
823
+ if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
824
+ if node.result.output:
825
+ response_state.has_user_response = True
826
+ i += 1
827
+ state_manager.session.iteration_count = i
828
+
829
+ # Display iteration progress if thoughts are enabled
836
830
  if state_manager.session.show_thoughts:
837
831
  from tunacode.ui import console as ui
838
832
 
839
- await ui.warning(f"Reached maximum iterations ({max_iterations})")
840
- break
833
+ await ui.muted(f"\nITERATION: {i}/{max_iterations}")
841
834
 
842
- # Final flush: execute any remaining buffered read-only tools
843
- if tool_callback and tool_buffer.has_tasks():
844
- import time
845
-
846
- from tunacode.ui import console as ui
835
+ # Show summary of tools used so far
836
+ if state_manager.session.tool_calls:
837
+ tool_summary = {}
838
+ for tc in state_manager.session.tool_calls:
839
+ tool_name = tc.get("tool", "unknown")
840
+ tool_summary[tool_name] = tool_summary.get(tool_name, 0) + 1
847
841
 
848
- buffered_tasks = tool_buffer.flush()
849
- start_time = time.time()
842
+ summary_str = ", ".join(
843
+ [f"{name}: {count}" for name, count in tool_summary.items()]
844
+ )
845
+ await ui.muted(f"TOOLS USED: {summary_str}")
850
846
 
851
- await ui.muted("\n" + "=" * 60)
852
- await ui.muted(
853
- f"🚀 FINAL BATCH: Executing {len(buffered_tasks)} buffered read-only tools"
854
- )
855
- await ui.muted("=" * 60)
847
+ if i >= max_iterations:
848
+ if state_manager.session.show_thoughts:
849
+ from tunacode.ui import console as ui
856
850
 
857
- for idx, (part, node) in enumerate(buffered_tasks, 1):
858
- tool_desc = f" [{idx}] {part.tool_name}"
859
- if hasattr(part, "args") and isinstance(part.args, dict):
860
- if part.tool_name == "read_file" and "file_path" in part.args:
861
- tool_desc += f" → {part.args['file_path']}"
862
- elif part.tool_name == "grep" and "pattern" in part.args:
863
- tool_desc += f" → pattern: '{part.args['pattern']}'"
864
- if "include_files" in part.args:
865
- tool_desc += f", files: '{part.args['include_files']}'"
866
- elif part.tool_name == "list_dir" and "directory" in part.args:
867
- tool_desc += f" → {part.args['directory']}"
868
- elif part.tool_name == "glob" and "pattern" in part.args:
869
- tool_desc += f" → pattern: '{part.args['pattern']}'"
870
- await ui.muted(tool_desc)
871
- await ui.muted("=" * 60)
851
+ await ui.warning(f"Reached maximum iterations ({max_iterations})")
852
+ break
872
853
 
873
- await execute_tools_parallel(buffered_tasks, tool_callback)
854
+ # Final flush: execute any remaining buffered read-only tools
855
+ if tool_callback and tool_buffer.has_tasks():
856
+ import time
874
857
 
875
- elapsed_time = (time.time() - start_time) * 1000
876
- sequential_estimate = len(buffered_tasks) * 100
877
- speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
858
+ from tunacode.ui import console as ui
878
859
 
879
- await ui.muted(
880
- f"✅ Final batch completed in {elapsed_time:.0f}ms "
881
- f"(~{speedup:.1f}x faster than sequential)\n"
882
- )
860
+ buffered_tasks = tool_buffer.flush()
861
+ start_time = time.time()
883
862
 
884
- # If we need to add a fallback response, create a wrapper
885
- if not response_state.has_user_response and i >= max_iterations and fallback_enabled:
886
- patch_tool_messages("Task incomplete", state_manager=state_manager)
887
- response_state.has_final_synthesis = True
888
-
889
- # Extract context from the agent run
890
- tool_calls_summary = []
891
- files_modified = set()
892
- commands_run = []
893
-
894
- # Analyze message history for context
895
- for msg in state_manager.session.messages:
896
- if hasattr(msg, "parts"):
897
- for part in msg.parts:
898
- if hasattr(part, "part_kind") and part.part_kind == "tool-call":
899
- tool_name = getattr(part, "tool_name", "unknown")
900
- tool_calls_summary.append(tool_name)
901
-
902
- # Track specific operations
903
- if tool_name in ["write_file", "update_file"] and hasattr(part, "args"):
904
- if isinstance(part.args, dict) and "file_path" in part.args:
905
- files_modified.add(part.args["file_path"])
906
- elif tool_name in ["run_command", "bash"] and hasattr(part, "args"):
907
- if isinstance(part.args, dict) and "command" in part.args:
908
- commands_run.append(part.args["command"])
909
-
910
- # Build fallback response with context
911
- fallback = FallbackResponse(
912
- summary="Reached maximum iterations without producing a final response.",
913
- progress=f"Completed {i} iterations (limit: {max_iterations})",
914
- )
863
+ await ui.muted("\n" + "=" * 60)
864
+ await ui.muted(
865
+ f"🚀 FINAL BATCH: Executing {len(buffered_tasks)} buffered read-only tools"
866
+ )
867
+ await ui.muted("=" * 60)
915
868
 
916
- # Get verbosity setting
917
- verbosity = state_manager.session.user_config.get("settings", {}).get(
918
- "fallback_verbosity", "normal"
919
- )
869
+ for idx, (part, node) in enumerate(buffered_tasks, 1):
870
+ tool_desc = f" [{idx}] {part.tool_name}"
871
+ if hasattr(part, "args") and isinstance(part.args, dict):
872
+ if part.tool_name == "read_file" and "file_path" in part.args:
873
+ tool_desc += f" → {part.args['file_path']}"
874
+ elif part.tool_name == "grep" and "pattern" in part.args:
875
+ tool_desc += f" → pattern: '{part.args['pattern']}'"
876
+ if "include_files" in part.args:
877
+ tool_desc += f", files: '{part.args['include_files']}'"
878
+ elif part.tool_name == "list_dir" and "directory" in part.args:
879
+ tool_desc += f" → {part.args['directory']}"
880
+ elif part.tool_name == "glob" and "pattern" in part.args:
881
+ tool_desc += f" → pattern: '{part.args['pattern']}'"
882
+ await ui.muted(tool_desc)
883
+ await ui.muted("=" * 60)
920
884
 
921
- if verbosity in ["normal", "detailed"]:
922
- # Add what was attempted
923
- if tool_calls_summary:
924
- tool_counts = {}
925
- for tool in tool_calls_summary:
926
- tool_counts[tool] = tool_counts.get(tool, 0) + 1
927
-
928
- fallback.issues.append(f"Executed {len(tool_calls_summary)} tool calls:")
929
- for tool, count in sorted(tool_counts.items()):
930
- fallback.issues.append(f" • {tool}: {count}x")
931
-
932
- if verbosity == "detailed":
933
- if files_modified:
934
- fallback.issues.append(f"\nFiles modified ({len(files_modified)}):")
935
- for f in sorted(files_modified)[:5]: # Limit to 5 files
936
- fallback.issues.append(f" • {f}")
937
- if len(files_modified) > 5:
938
- fallback.issues.append(f" • ... and {len(files_modified) - 5} more")
939
-
940
- if commands_run:
941
- fallback.issues.append(f"\nCommands executed ({len(commands_run)}):")
942
- for cmd in commands_run[:3]: # Limit to 3 commands
943
- # Truncate long commands
944
- display_cmd = cmd if len(cmd) <= 60 else cmd[:57] + "..."
945
- fallback.issues.append(f" • {display_cmd}")
946
- if len(commands_run) > 3:
947
- fallback.issues.append(f" • ... and {len(commands_run) - 3} more")
948
-
949
- # Add helpful next steps
950
- fallback.next_steps.append(
951
- "The task may be too complex - try breaking it into smaller steps"
952
- )
953
- fallback.next_steps.append("Check the output above for any errors or partial progress")
954
- if files_modified:
955
- fallback.next_steps.append("Review modified files to see what changes were made")
885
+ await execute_tools_parallel(buffered_tasks, tool_callback)
956
886
 
957
- # Create comprehensive output
958
- output_parts = [fallback.summary, ""]
887
+ elapsed_time = (time.time() - start_time) * 1000
888
+ sequential_estimate = len(buffered_tasks) * 100
889
+ speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
959
890
 
960
- if fallback.progress:
961
- output_parts.append(f"Progress: {fallback.progress}")
891
+ await ui.muted(
892
+ f" Final batch completed in {elapsed_time:.0f}ms "
893
+ f"(~{speedup:.1f}x faster than sequential)\n"
894
+ )
962
895
 
963
- if fallback.issues:
964
- output_parts.append("\nWhat happened:")
965
- output_parts.extend(fallback.issues)
896
+ # If we need to add a fallback response, create a wrapper
897
+ if not response_state.has_user_response and i >= max_iterations and fallback_enabled:
898
+ patch_tool_messages("Task incomplete", state_manager=state_manager)
899
+ response_state.has_final_synthesis = True
900
+
901
+ # Extract context from the agent run
902
+ tool_calls_summary = []
903
+ files_modified = set()
904
+ commands_run = []
905
+
906
+ # Analyze message history for context
907
+ for msg in state_manager.session.messages:
908
+ if hasattr(msg, "parts"):
909
+ for part in msg.parts:
910
+ if hasattr(part, "part_kind") and part.part_kind == "tool-call":
911
+ tool_name = getattr(part, "tool_name", "unknown")
912
+ tool_calls_summary.append(tool_name)
913
+
914
+ # Track specific operations
915
+ if tool_name in ["write_file", "update_file"] and hasattr(
916
+ part, "args"
917
+ ):
918
+ if isinstance(part.args, dict) and "file_path" in part.args:
919
+ files_modified.add(part.args["file_path"])
920
+ elif tool_name in ["run_command", "bash"] and hasattr(part, "args"):
921
+ if isinstance(part.args, dict) and "command" in part.args:
922
+ commands_run.append(part.args["command"])
923
+
924
+ # Build fallback response with context
925
+ fallback = FallbackResponse(
926
+ summary="Reached maximum iterations without producing a final response.",
927
+ progress=f"Completed {i} iterations (limit: {max_iterations})",
928
+ )
966
929
 
967
- if fallback.next_steps:
968
- output_parts.append("\nSuggested next steps:")
969
- for step in fallback.next_steps:
970
- output_parts.append(f" • {step}")
930
+ # Get verbosity setting
931
+ verbosity = state_manager.session.user_config.get("settings", {}).get(
932
+ "fallback_verbosity", "normal"
933
+ )
971
934
 
972
- comprehensive_output = "\n".join(output_parts)
935
+ if verbosity in ["normal", "detailed"]:
936
+ # Add what was attempted
937
+ if tool_calls_summary:
938
+ tool_counts = {}
939
+ for tool in tool_calls_summary:
940
+ tool_counts[tool] = tool_counts.get(tool, 0) + 1
941
+
942
+ fallback.issues.append(f"Executed {len(tool_calls_summary)} tool calls:")
943
+ for tool, count in sorted(tool_counts.items()):
944
+ fallback.issues.append(f" • {tool}: {count}x")
945
+
946
+ if verbosity == "detailed":
947
+ if files_modified:
948
+ fallback.issues.append(f"\nFiles modified ({len(files_modified)}):")
949
+ for f in sorted(files_modified)[:5]: # Limit to 5 files
950
+ fallback.issues.append(f" • {f}")
951
+ if len(files_modified) > 5:
952
+ fallback.issues.append(
953
+ f" • ... and {len(files_modified) - 5} more"
954
+ )
955
+
956
+ if commands_run:
957
+ fallback.issues.append(f"\nCommands executed ({len(commands_run)}):")
958
+ for cmd in commands_run[:3]: # Limit to 3 commands
959
+ # Truncate long commands
960
+ display_cmd = cmd if len(cmd) <= 60 else cmd[:57] + "..."
961
+ fallback.issues.append(f" • {display_cmd}")
962
+ if len(commands_run) > 3:
963
+ fallback.issues.append(f" • ... and {len(commands_run) - 3} more")
964
+
965
+ # Add helpful next steps
966
+ fallback.next_steps.append(
967
+ "The task may be too complex - try breaking it into smaller steps"
968
+ )
969
+ fallback.next_steps.append(
970
+ "Check the output above for any errors or partial progress"
971
+ )
972
+ if files_modified:
973
+ fallback.next_steps.append(
974
+ "Review modified files to see what changes were made"
975
+ )
973
976
 
974
- # Create a wrapper object that mimics AgentRun with the required attributes
975
- class AgentRunWrapper:
976
- def __init__(self, wrapped_run, fallback_result):
977
+ # Create comprehensive output
978
+ output_parts = [fallback.summary, ""]
979
+
980
+ if fallback.progress:
981
+ output_parts.append(f"Progress: {fallback.progress}")
982
+
983
+ if fallback.issues:
984
+ output_parts.append("\nWhat happened:")
985
+ output_parts.extend(fallback.issues)
986
+
987
+ if fallback.next_steps:
988
+ output_parts.append("\nSuggested next steps:")
989
+ for step in fallback.next_steps:
990
+ output_parts.append(f" • {step}")
991
+
992
+ comprehensive_output = "\n".join(output_parts)
993
+
994
+ # Create a wrapper object that mimics AgentRun with the required attributes
995
+ class AgentRunWrapper:
996
+ def __init__(self, wrapped_run, fallback_result):
997
+ self._wrapped = wrapped_run
998
+ self._result = fallback_result
999
+ self.response_state = response_state
1000
+
1001
+ def __getattribute__(self, name):
1002
+ # Handle special attributes first to avoid conflicts
1003
+ if name in ["_wrapped", "_result", "response_state"]:
1004
+ return object.__getattribute__(self, name)
1005
+
1006
+ # Explicitly handle 'result' to return our fallback result
1007
+ if name == "result":
1008
+ return object.__getattribute__(self, "_result")
1009
+
1010
+ # Delegate all other attributes to the wrapped object
1011
+ try:
1012
+ return getattr(object.__getattribute__(self, "_wrapped"), name)
1013
+ except AttributeError:
1014
+ raise AttributeError(
1015
+ f"'{type(self).__name__}' object has no attribute '{name}'"
1016
+ )
1017
+
1018
+ return AgentRunWrapper(agent_run, SimpleResult(comprehensive_output))
1019
+
1020
+ # For non-fallback cases, we still need to handle the response_state
1021
+ # Create a minimal wrapper just to add response_state
1022
+ class AgentRunWithState:
1023
+ def __init__(self, wrapped_run):
977
1024
  self._wrapped = wrapped_run
978
- self._result = fallback_result
979
1025
  self.response_state = response_state
980
1026
 
981
1027
  def __getattribute__(self, name):
982
- # Handle special attributes first to avoid conflicts
983
- if name in ["_wrapped", "_result", "response_state"]:
1028
+ # Handle special attributes first
1029
+ if name in ["_wrapped", "response_state"]:
984
1030
  return object.__getattribute__(self, name)
985
1031
 
986
- # Explicitly handle 'result' to return our fallback result
987
- if name == "result":
988
- return object.__getattribute__(self, "_result")
989
-
990
1032
  # Delegate all other attributes to the wrapped object
991
1033
  try:
992
1034
  return getattr(object.__getattribute__(self, "_wrapped"), name)
@@ -995,26 +1037,7 @@ async def process_request(
995
1037
  f"'{type(self).__name__}' object has no attribute '{name}'"
996
1038
  )
997
1039
 
998
- return AgentRunWrapper(agent_run, SimpleResult(comprehensive_output))
999
-
1000
- # For non-fallback cases, we still need to handle the response_state
1001
- # Create a minimal wrapper just to add response_state
1002
- class AgentRunWithState:
1003
- def __init__(self, wrapped_run):
1004
- self._wrapped = wrapped_run
1005
- self.response_state = response_state
1006
-
1007
- def __getattribute__(self, name):
1008
- # Handle special attributes first
1009
- if name in ["_wrapped", "response_state"]:
1010
- return object.__getattribute__(self, name)
1011
-
1012
- # Delegate all other attributes to the wrapped object
1013
- try:
1014
- return getattr(object.__getattribute__(self, "_wrapped"), name)
1015
- except AttributeError:
1016
- raise AttributeError(
1017
- f"'{type(self).__name__}' object has no attribute '{name}'"
1018
- )
1019
-
1020
1040
  return AgentRunWithState(agent_run)
1041
+ except asyncio.CancelledError:
1042
+ # When task is cancelled, raise UserAbortError instead
1043
+ raise UserAbortError("Operation was cancelled by user")