tunacode-cli 0.0.47__py3-none-any.whl → 0.0.48__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/main.py +0 -4
- tunacode/cli/repl.py +7 -14
- tunacode/configuration/defaults.py +1 -0
- tunacode/constants.py +1 -6
- tunacode/core/agents/dspy_integration.py +223 -0
- tunacode/core/agents/dspy_tunacode.py +458 -0
- tunacode/core/agents/main.py +237 -311
- tunacode/core/agents/utils.py +6 -54
- tunacode/core/state.py +0 -41
- tunacode/exceptions.py +0 -23
- tunacode/prompts/dspy_task_planning.md +45 -0
- tunacode/prompts/dspy_tool_selection.md +58 -0
- tunacode/ui/input.py +1 -2
- tunacode/ui/keybindings.py +1 -17
- tunacode/ui/panels.py +2 -9
- tunacode/utils/token_counter.py +2 -1
- {tunacode_cli-0.0.47.dist-info → tunacode_cli-0.0.48.dist-info}/METADATA +3 -3
- {tunacode_cli-0.0.47.dist-info → tunacode_cli-0.0.48.dist-info}/RECORD +22 -26
- tunacode/core/recursive/__init__.py +0 -18
- tunacode/core/recursive/aggregator.py +0 -467
- tunacode/core/recursive/budget.py +0 -414
- tunacode/core/recursive/decomposer.py +0 -398
- tunacode/core/recursive/executor.py +0 -470
- tunacode/core/recursive/hierarchy.py +0 -487
- tunacode/ui/recursive_progress.py +0 -380
- tunacode/utils/retry.py +0 -163
- {tunacode_cli-0.0.47.dist-info → tunacode_cli-0.0.48.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.47.dist-info → tunacode_cli-0.0.48.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.47.dist-info → tunacode_cli-0.0.48.dist-info}/licenses/LICENSE +0 -0
- {tunacode_cli-0.0.47.dist-info → tunacode_cli-0.0.48.dist-info}/top_level.txt +0 -0
tunacode/core/agents/main.py
CHANGED
|
@@ -30,11 +30,9 @@ except ImportError:
|
|
|
30
30
|
STREAMING_AVAILABLE = False
|
|
31
31
|
|
|
32
32
|
from tunacode.constants import READ_ONLY_TOOLS
|
|
33
|
-
from tunacode.core.recursive import RecursiveTaskExecutor
|
|
34
33
|
from tunacode.core.state import StateManager
|
|
35
34
|
from tunacode.core.token_usage.api_response_parser import ApiResponseParser
|
|
36
35
|
from tunacode.core.token_usage.cost_calculator import CostCalculator
|
|
37
|
-
from tunacode.exceptions import ToolBatchingJSONError, UserAbortError
|
|
38
36
|
from tunacode.services.mcp import get_mcp_servers
|
|
39
37
|
from tunacode.tools.bash import bash
|
|
40
38
|
from tunacode.tools.glob import glob
|
|
@@ -473,17 +471,9 @@ async def _process_node(
|
|
|
473
471
|
if not has_tool_calls and buffering_callback:
|
|
474
472
|
for part in node.model_response.parts:
|
|
475
473
|
if hasattr(part, "content") and isinstance(part.content, str):
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
)
|
|
480
|
-
except ToolBatchingJSONError as e:
|
|
481
|
-
# Handle JSON parsing failure after retries
|
|
482
|
-
logger.error(f"Tool batching JSON error: {e}")
|
|
483
|
-
if state_manager.session.show_thoughts:
|
|
484
|
-
await ui.error(str(e))
|
|
485
|
-
# Continue processing other parts instead of failing completely
|
|
486
|
-
continue
|
|
474
|
+
await extract_and_execute_tool_calls(
|
|
475
|
+
part.content, buffering_callback, state_manager
|
|
476
|
+
)
|
|
487
477
|
|
|
488
478
|
# Final flush: disabled temporarily while fixing the parallel execution design
|
|
489
479
|
# The buffer is not being used in the current implementation
|
|
@@ -752,335 +742,251 @@ async def process_request(
|
|
|
752
742
|
tool_callback: Optional[ToolCallback] = None,
|
|
753
743
|
streaming_callback: Optional[callable] = None,
|
|
754
744
|
) -> AgentRun:
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
fallback_enabled = state_manager.session.user_config.get("settings", {}).get(
|
|
763
|
-
"fallback_response", True
|
|
764
|
-
)
|
|
765
|
-
|
|
766
|
-
# Check if recursive execution is enabled
|
|
767
|
-
use_recursive = state_manager.session.user_config.get("settings", {}).get(
|
|
768
|
-
"use_recursive_execution", True
|
|
769
|
-
)
|
|
770
|
-
recursive_threshold = state_manager.session.user_config.get("settings", {}).get(
|
|
771
|
-
"recursive_complexity_threshold", 0.7
|
|
772
|
-
)
|
|
745
|
+
agent = get_or_create_agent(model, state_manager)
|
|
746
|
+
mh = state_manager.session.messages.copy()
|
|
747
|
+
# Get max iterations from config (default: 40)
|
|
748
|
+
max_iterations = state_manager.session.user_config.get("settings", {}).get("max_iterations", 40)
|
|
749
|
+
fallback_enabled = state_manager.session.user_config.get("settings", {}).get(
|
|
750
|
+
"fallback_response", True
|
|
751
|
+
)
|
|
773
752
|
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
753
|
+
from tunacode.configuration.models import ModelRegistry
|
|
754
|
+
from tunacode.core.token_usage.usage_tracker import UsageTracker
|
|
755
|
+
|
|
756
|
+
parser = ApiResponseParser()
|
|
757
|
+
registry = ModelRegistry()
|
|
758
|
+
calculator = CostCalculator(registry)
|
|
759
|
+
usage_tracker = UsageTracker(parser, calculator, state_manager)
|
|
760
|
+
response_state = ResponseState()
|
|
761
|
+
|
|
762
|
+
# Reset iteration tracking for this request
|
|
763
|
+
state_manager.session.iteration_count = 0
|
|
764
|
+
|
|
765
|
+
# Create a request-level buffer for batching read-only tools across nodes
|
|
766
|
+
tool_buffer = ToolBuffer()
|
|
767
|
+
|
|
768
|
+
# Show TUNACODE.md preview if it was loaded and thoughts are enabled
|
|
769
|
+
if state_manager.session.show_thoughts and hasattr(state_manager, "tunacode_preview"):
|
|
770
|
+
from tunacode.ui import console as ui
|
|
771
|
+
|
|
772
|
+
await ui.muted(state_manager.tunacode_preview)
|
|
773
|
+
# Clear the preview after displaying it once
|
|
774
|
+
delattr(state_manager, "tunacode_preview")
|
|
775
|
+
|
|
776
|
+
# Show what we're sending to the API when thoughts are enabled
|
|
777
|
+
if state_manager.session.show_thoughts:
|
|
778
|
+
from tunacode.ui import console as ui
|
|
779
|
+
|
|
780
|
+
await ui.muted("\n" + "=" * 60)
|
|
781
|
+
await ui.muted("📤 SENDING TO API:")
|
|
782
|
+
await ui.muted(f"Message: {message}")
|
|
783
|
+
await ui.muted(f"Model: {model}")
|
|
784
|
+
await ui.muted(f"Message History Length: {len(mh)}")
|
|
785
|
+
await ui.muted("=" * 60)
|
|
786
|
+
|
|
787
|
+
async with agent.iter(message, message_history=mh) as agent_run:
|
|
788
|
+
i = 0
|
|
789
|
+
async for node in agent_run:
|
|
790
|
+
state_manager.session.current_iteration = i + 1
|
|
791
|
+
|
|
792
|
+
# Handle token-level streaming for model request nodes
|
|
793
|
+
if streaming_callback and STREAMING_AVAILABLE and Agent.is_model_request_node(node):
|
|
794
|
+
async with node.stream(agent_run.ctx) as request_stream:
|
|
795
|
+
async for event in request_stream:
|
|
796
|
+
if isinstance(event, PartDeltaEvent) and isinstance(
|
|
797
|
+
event.delta, TextPartDelta
|
|
798
|
+
):
|
|
799
|
+
# Stream individual token deltas
|
|
800
|
+
if event.delta.content_delta:
|
|
801
|
+
await streaming_callback(event.delta.content_delta)
|
|
802
|
+
|
|
803
|
+
await _process_node(
|
|
804
|
+
node,
|
|
805
|
+
tool_callback,
|
|
806
|
+
state_manager,
|
|
807
|
+
tool_buffer,
|
|
808
|
+
streaming_callback,
|
|
809
|
+
usage_tracker,
|
|
810
|
+
)
|
|
811
|
+
if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
|
|
812
|
+
if node.result.output:
|
|
813
|
+
response_state.has_user_response = True
|
|
814
|
+
i += 1
|
|
815
|
+
state_manager.session.iteration_count = i
|
|
784
816
|
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
)
|
|
817
|
+
# Display iteration progress if thoughts are enabled
|
|
818
|
+
if state_manager.session.show_thoughts:
|
|
819
|
+
from tunacode.ui import console as ui
|
|
789
820
|
|
|
790
|
-
|
|
791
|
-
complexity_result.should_decompose
|
|
792
|
-
and complexity_result.total_complexity >= recursive_threshold
|
|
793
|
-
):
|
|
794
|
-
if state_manager.session.show_thoughts:
|
|
795
|
-
from tunacode.ui import console as ui
|
|
821
|
+
await ui.muted(f"\nITERATION: {i}/{max_iterations}")
|
|
796
822
|
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
823
|
+
# Show summary of tools used so far
|
|
824
|
+
if state_manager.session.tool_calls:
|
|
825
|
+
tool_summary = {}
|
|
826
|
+
for tc in state_manager.session.tool_calls:
|
|
827
|
+
tool_name = tc.get("tool", "unknown")
|
|
828
|
+
tool_summary[tool_name] = tool_summary.get(tool_name, 0) + 1
|
|
802
829
|
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
request=message, parent_task_id=None, depth=0
|
|
830
|
+
summary_str = ", ".join(
|
|
831
|
+
[f"{name}: {count}" for name, count in tool_summary.items()]
|
|
806
832
|
)
|
|
833
|
+
await ui.muted(f"TOOLS USED: {summary_str}")
|
|
807
834
|
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
except Exception as e:
|
|
812
|
-
logger.warning(f"Recursive execution failed, falling back to normal: {e}")
|
|
813
|
-
# Continue with normal execution
|
|
814
|
-
|
|
815
|
-
from tunacode.configuration.models import ModelRegistry
|
|
816
|
-
from tunacode.core.token_usage.usage_tracker import UsageTracker
|
|
817
|
-
|
|
818
|
-
parser = ApiResponseParser()
|
|
819
|
-
registry = ModelRegistry()
|
|
820
|
-
calculator = CostCalculator(registry)
|
|
821
|
-
usage_tracker = UsageTracker(parser, calculator, state_manager)
|
|
822
|
-
response_state = ResponseState()
|
|
835
|
+
if i >= max_iterations:
|
|
836
|
+
if state_manager.session.show_thoughts:
|
|
837
|
+
from tunacode.ui import console as ui
|
|
823
838
|
|
|
824
|
-
|
|
825
|
-
|
|
839
|
+
await ui.warning(f"Reached maximum iterations ({max_iterations})")
|
|
840
|
+
break
|
|
826
841
|
|
|
827
|
-
#
|
|
828
|
-
|
|
842
|
+
# Final flush: execute any remaining buffered read-only tools
|
|
843
|
+
if tool_callback and tool_buffer.has_tasks():
|
|
844
|
+
import time
|
|
829
845
|
|
|
830
|
-
# Show TUNACODE.md preview if it was loaded and thoughts are enabled
|
|
831
|
-
if state_manager.session.show_thoughts and hasattr(state_manager, "tunacode_preview"):
|
|
832
846
|
from tunacode.ui import console as ui
|
|
833
847
|
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
delattr(state_manager, "tunacode_preview")
|
|
837
|
-
|
|
838
|
-
# Show what we're sending to the API when thoughts are enabled
|
|
839
|
-
if state_manager.session.show_thoughts:
|
|
840
|
-
from tunacode.ui import console as ui
|
|
848
|
+
buffered_tasks = tool_buffer.flush()
|
|
849
|
+
start_time = time.time()
|
|
841
850
|
|
|
842
851
|
await ui.muted("\n" + "=" * 60)
|
|
843
|
-
await ui.muted(
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
await ui.muted(f"Message History Length: {len(mh)}")
|
|
852
|
+
await ui.muted(
|
|
853
|
+
f"🚀 FINAL BATCH: Executing {len(buffered_tasks)} buffered read-only tools"
|
|
854
|
+
)
|
|
847
855
|
await ui.muted("=" * 60)
|
|
848
856
|
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
await _process_node(
|
|
866
|
-
node,
|
|
867
|
-
tool_callback,
|
|
868
|
-
state_manager,
|
|
869
|
-
tool_buffer,
|
|
870
|
-
streaming_callback,
|
|
871
|
-
usage_tracker,
|
|
872
|
-
)
|
|
873
|
-
if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
|
|
874
|
-
if node.result.output:
|
|
875
|
-
response_state.has_user_response = True
|
|
876
|
-
i += 1
|
|
877
|
-
state_manager.session.iteration_count = i
|
|
878
|
-
|
|
879
|
-
# Display iteration progress if thoughts are enabled
|
|
880
|
-
if state_manager.session.show_thoughts:
|
|
881
|
-
from tunacode.ui import console as ui
|
|
882
|
-
|
|
883
|
-
await ui.muted(f"\nITERATION: {i}/{max_iterations}")
|
|
884
|
-
|
|
885
|
-
# Show summary of tools used so far
|
|
886
|
-
if state_manager.session.tool_calls:
|
|
887
|
-
tool_summary = {}
|
|
888
|
-
for tc in state_manager.session.tool_calls:
|
|
889
|
-
tool_name = tc.get("tool", "unknown")
|
|
890
|
-
tool_summary[tool_name] = tool_summary.get(tool_name, 0) + 1
|
|
891
|
-
|
|
892
|
-
summary_str = ", ".join(
|
|
893
|
-
[f"{name}: {count}" for name, count in tool_summary.items()]
|
|
894
|
-
)
|
|
895
|
-
await ui.muted(f"TOOLS USED: {summary_str}")
|
|
896
|
-
|
|
897
|
-
if i >= max_iterations:
|
|
898
|
-
if state_manager.session.show_thoughts:
|
|
899
|
-
from tunacode.ui import console as ui
|
|
900
|
-
|
|
901
|
-
await ui.warning(f"Reached maximum iterations ({max_iterations})")
|
|
902
|
-
break
|
|
857
|
+
for idx, (part, node) in enumerate(buffered_tasks, 1):
|
|
858
|
+
tool_desc = f" [{idx}] {part.tool_name}"
|
|
859
|
+
if hasattr(part, "args") and isinstance(part.args, dict):
|
|
860
|
+
if part.tool_name == "read_file" and "file_path" in part.args:
|
|
861
|
+
tool_desc += f" → {part.args['file_path']}"
|
|
862
|
+
elif part.tool_name == "grep" and "pattern" in part.args:
|
|
863
|
+
tool_desc += f" → pattern: '{part.args['pattern']}'"
|
|
864
|
+
if "include_files" in part.args:
|
|
865
|
+
tool_desc += f", files: '{part.args['include_files']}'"
|
|
866
|
+
elif part.tool_name == "list_dir" and "directory" in part.args:
|
|
867
|
+
tool_desc += f" → {part.args['directory']}"
|
|
868
|
+
elif part.tool_name == "glob" and "pattern" in part.args:
|
|
869
|
+
tool_desc += f" → pattern: '{part.args['pattern']}'"
|
|
870
|
+
await ui.muted(tool_desc)
|
|
871
|
+
await ui.muted("=" * 60)
|
|
903
872
|
|
|
904
|
-
|
|
905
|
-
if tool_callback and tool_buffer.has_tasks():
|
|
906
|
-
import time
|
|
873
|
+
await execute_tools_parallel(buffered_tasks, tool_callback)
|
|
907
874
|
|
|
908
|
-
|
|
875
|
+
elapsed_time = (time.time() - start_time) * 1000
|
|
876
|
+
sequential_estimate = len(buffered_tasks) * 100
|
|
877
|
+
speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
|
|
909
878
|
|
|
910
|
-
|
|
911
|
-
|
|
879
|
+
await ui.muted(
|
|
880
|
+
f"✅ Final batch completed in {elapsed_time:.0f}ms "
|
|
881
|
+
f"(~{speedup:.1f}x faster than sequential)\n"
|
|
882
|
+
)
|
|
912
883
|
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
884
|
+
# If we need to add a fallback response, create a wrapper
|
|
885
|
+
if not response_state.has_user_response and i >= max_iterations and fallback_enabled:
|
|
886
|
+
patch_tool_messages("Task incomplete", state_manager=state_manager)
|
|
887
|
+
response_state.has_final_synthesis = True
|
|
888
|
+
|
|
889
|
+
# Extract context from the agent run
|
|
890
|
+
tool_calls_summary = []
|
|
891
|
+
files_modified = set()
|
|
892
|
+
commands_run = []
|
|
893
|
+
|
|
894
|
+
# Analyze message history for context
|
|
895
|
+
for msg in state_manager.session.messages:
|
|
896
|
+
if hasattr(msg, "parts"):
|
|
897
|
+
for part in msg.parts:
|
|
898
|
+
if hasattr(part, "part_kind") and part.part_kind == "tool-call":
|
|
899
|
+
tool_name = getattr(part, "tool_name", "unknown")
|
|
900
|
+
tool_calls_summary.append(tool_name)
|
|
901
|
+
|
|
902
|
+
# Track specific operations
|
|
903
|
+
if tool_name in ["write_file", "update_file"] and hasattr(part, "args"):
|
|
904
|
+
if isinstance(part.args, dict) and "file_path" in part.args:
|
|
905
|
+
files_modified.add(part.args["file_path"])
|
|
906
|
+
elif tool_name in ["run_command", "bash"] and hasattr(part, "args"):
|
|
907
|
+
if isinstance(part.args, dict) and "command" in part.args:
|
|
908
|
+
commands_run.append(part.args["command"])
|
|
909
|
+
|
|
910
|
+
# Build fallback response with context
|
|
911
|
+
fallback = FallbackResponse(
|
|
912
|
+
summary="Reached maximum iterations without producing a final response.",
|
|
913
|
+
progress=f"Completed {i} iterations (limit: {max_iterations})",
|
|
914
|
+
)
|
|
919
915
|
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
tool_desc += f" → {part.args['file_path']}"
|
|
925
|
-
elif part.tool_name == "grep" and "pattern" in part.args:
|
|
926
|
-
tool_desc += f" → pattern: '{part.args['pattern']}'"
|
|
927
|
-
if "include_files" in part.args:
|
|
928
|
-
tool_desc += f", files: '{part.args['include_files']}'"
|
|
929
|
-
elif part.tool_name == "list_dir" and "directory" in part.args:
|
|
930
|
-
tool_desc += f" → {part.args['directory']}"
|
|
931
|
-
elif part.tool_name == "glob" and "pattern" in part.args:
|
|
932
|
-
tool_desc += f" → pattern: '{part.args['pattern']}'"
|
|
933
|
-
await ui.muted(tool_desc)
|
|
934
|
-
await ui.muted("=" * 60)
|
|
916
|
+
# Get verbosity setting
|
|
917
|
+
verbosity = state_manager.session.user_config.get("settings", {}).get(
|
|
918
|
+
"fallback_verbosity", "normal"
|
|
919
|
+
)
|
|
935
920
|
|
|
936
|
-
|
|
921
|
+
if verbosity in ["normal", "detailed"]:
|
|
922
|
+
# Add what was attempted
|
|
923
|
+
if tool_calls_summary:
|
|
924
|
+
tool_counts = {}
|
|
925
|
+
for tool in tool_calls_summary:
|
|
926
|
+
tool_counts[tool] = tool_counts.get(tool, 0) + 1
|
|
927
|
+
|
|
928
|
+
fallback.issues.append(f"Executed {len(tool_calls_summary)} tool calls:")
|
|
929
|
+
for tool, count in sorted(tool_counts.items()):
|
|
930
|
+
fallback.issues.append(f" • {tool}: {count}x")
|
|
931
|
+
|
|
932
|
+
if verbosity == "detailed":
|
|
933
|
+
if files_modified:
|
|
934
|
+
fallback.issues.append(f"\nFiles modified ({len(files_modified)}):")
|
|
935
|
+
for f in sorted(files_modified)[:5]: # Limit to 5 files
|
|
936
|
+
fallback.issues.append(f" • {f}")
|
|
937
|
+
if len(files_modified) > 5:
|
|
938
|
+
fallback.issues.append(f" • ... and {len(files_modified) - 5} more")
|
|
939
|
+
|
|
940
|
+
if commands_run:
|
|
941
|
+
fallback.issues.append(f"\nCommands executed ({len(commands_run)}):")
|
|
942
|
+
for cmd in commands_run[:3]: # Limit to 3 commands
|
|
943
|
+
# Truncate long commands
|
|
944
|
+
display_cmd = cmd if len(cmd) <= 60 else cmd[:57] + "..."
|
|
945
|
+
fallback.issues.append(f" • {display_cmd}")
|
|
946
|
+
if len(commands_run) > 3:
|
|
947
|
+
fallback.issues.append(f" • ... and {len(commands_run) - 3} more")
|
|
948
|
+
|
|
949
|
+
# Add helpful next steps
|
|
950
|
+
fallback.next_steps.append(
|
|
951
|
+
"The task may be too complex - try breaking it into smaller steps"
|
|
952
|
+
)
|
|
953
|
+
fallback.next_steps.append("Check the output above for any errors or partial progress")
|
|
954
|
+
if files_modified:
|
|
955
|
+
fallback.next_steps.append("Review modified files to see what changes were made")
|
|
937
956
|
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
|
|
957
|
+
# Create comprehensive output
|
|
958
|
+
output_parts = [fallback.summary, ""]
|
|
941
959
|
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
f"✅ Final batch completed in {elapsed_time:.0f}ms "
|
|
945
|
-
f"(~{speedup:.1f}x faster than sequential)\n"
|
|
946
|
-
)
|
|
960
|
+
if fallback.progress:
|
|
961
|
+
output_parts.append(f"Progress: {fallback.progress}")
|
|
947
962
|
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
response_state.has_final_synthesis = True
|
|
952
|
-
|
|
953
|
-
# Extract context from the agent run
|
|
954
|
-
tool_calls_summary = []
|
|
955
|
-
files_modified = set()
|
|
956
|
-
commands_run = []
|
|
957
|
-
|
|
958
|
-
# Analyze message history for context
|
|
959
|
-
for msg in state_manager.session.messages:
|
|
960
|
-
if hasattr(msg, "parts"):
|
|
961
|
-
for part in msg.parts:
|
|
962
|
-
if hasattr(part, "part_kind") and part.part_kind == "tool-call":
|
|
963
|
-
tool_name = getattr(part, "tool_name", "unknown")
|
|
964
|
-
tool_calls_summary.append(tool_name)
|
|
965
|
-
|
|
966
|
-
# Track specific operations
|
|
967
|
-
if tool_name in ["write_file", "update_file"] and hasattr(
|
|
968
|
-
part, "args"
|
|
969
|
-
):
|
|
970
|
-
if isinstance(part.args, dict) and "file_path" in part.args:
|
|
971
|
-
files_modified.add(part.args["file_path"])
|
|
972
|
-
elif tool_name in ["run_command", "bash"] and hasattr(part, "args"):
|
|
973
|
-
if isinstance(part.args, dict) and "command" in part.args:
|
|
974
|
-
commands_run.append(part.args["command"])
|
|
975
|
-
|
|
976
|
-
# Build fallback response with context
|
|
977
|
-
fallback = FallbackResponse(
|
|
978
|
-
summary="Reached maximum iterations without producing a final response.",
|
|
979
|
-
progress=f"Completed {i} iterations (limit: {max_iterations})",
|
|
980
|
-
)
|
|
963
|
+
if fallback.issues:
|
|
964
|
+
output_parts.append("\nWhat happened:")
|
|
965
|
+
output_parts.extend(fallback.issues)
|
|
981
966
|
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
967
|
+
if fallback.next_steps:
|
|
968
|
+
output_parts.append("\nSuggested next steps:")
|
|
969
|
+
for step in fallback.next_steps:
|
|
970
|
+
output_parts.append(f" • {step}")
|
|
986
971
|
|
|
987
|
-
|
|
988
|
-
# Add what was attempted
|
|
989
|
-
if tool_calls_summary:
|
|
990
|
-
tool_counts = {}
|
|
991
|
-
for tool in tool_calls_summary:
|
|
992
|
-
tool_counts[tool] = tool_counts.get(tool, 0) + 1
|
|
993
|
-
|
|
994
|
-
fallback.issues.append(f"Executed {len(tool_calls_summary)} tool calls:")
|
|
995
|
-
for tool, count in sorted(tool_counts.items()):
|
|
996
|
-
fallback.issues.append(f" • {tool}: {count}x")
|
|
997
|
-
|
|
998
|
-
if verbosity == "detailed":
|
|
999
|
-
if files_modified:
|
|
1000
|
-
fallback.issues.append(f"\nFiles modified ({len(files_modified)}):")
|
|
1001
|
-
for f in sorted(files_modified)[:5]: # Limit to 5 files
|
|
1002
|
-
fallback.issues.append(f" • {f}")
|
|
1003
|
-
if len(files_modified) > 5:
|
|
1004
|
-
fallback.issues.append(
|
|
1005
|
-
f" • ... and {len(files_modified) - 5} more"
|
|
1006
|
-
)
|
|
1007
|
-
|
|
1008
|
-
if commands_run:
|
|
1009
|
-
fallback.issues.append(f"\nCommands executed ({len(commands_run)}):")
|
|
1010
|
-
for cmd in commands_run[:3]: # Limit to 3 commands
|
|
1011
|
-
# Truncate long commands
|
|
1012
|
-
display_cmd = cmd if len(cmd) <= 60 else cmd[:57] + "..."
|
|
1013
|
-
fallback.issues.append(f" • {display_cmd}")
|
|
1014
|
-
if len(commands_run) > 3:
|
|
1015
|
-
fallback.issues.append(f" • ... and {len(commands_run) - 3} more")
|
|
1016
|
-
|
|
1017
|
-
# Add helpful next steps
|
|
1018
|
-
fallback.next_steps.append(
|
|
1019
|
-
"The task may be too complex - try breaking it into smaller steps"
|
|
1020
|
-
)
|
|
1021
|
-
fallback.next_steps.append(
|
|
1022
|
-
"Check the output above for any errors or partial progress"
|
|
1023
|
-
)
|
|
1024
|
-
if files_modified:
|
|
1025
|
-
fallback.next_steps.append(
|
|
1026
|
-
"Review modified files to see what changes were made"
|
|
1027
|
-
)
|
|
972
|
+
comprehensive_output = "\n".join(output_parts)
|
|
1028
973
|
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
if fallback.progress:
|
|
1033
|
-
output_parts.append(f"Progress: {fallback.progress}")
|
|
1034
|
-
|
|
1035
|
-
if fallback.issues:
|
|
1036
|
-
output_parts.append("\nWhat happened:")
|
|
1037
|
-
output_parts.extend(fallback.issues)
|
|
1038
|
-
|
|
1039
|
-
if fallback.next_steps:
|
|
1040
|
-
output_parts.append("\nSuggested next steps:")
|
|
1041
|
-
for step in fallback.next_steps:
|
|
1042
|
-
output_parts.append(f" • {step}")
|
|
1043
|
-
|
|
1044
|
-
comprehensive_output = "\n".join(output_parts)
|
|
1045
|
-
|
|
1046
|
-
# Create a wrapper object that mimics AgentRun with the required attributes
|
|
1047
|
-
class AgentRunWrapper:
|
|
1048
|
-
def __init__(self, wrapped_run, fallback_result):
|
|
1049
|
-
self._wrapped = wrapped_run
|
|
1050
|
-
self._result = fallback_result
|
|
1051
|
-
self.response_state = response_state
|
|
1052
|
-
|
|
1053
|
-
def __getattribute__(self, name):
|
|
1054
|
-
# Handle special attributes first to avoid conflicts
|
|
1055
|
-
if name in ["_wrapped", "_result", "response_state"]:
|
|
1056
|
-
return object.__getattribute__(self, name)
|
|
1057
|
-
|
|
1058
|
-
# Explicitly handle 'result' to return our fallback result
|
|
1059
|
-
if name == "result":
|
|
1060
|
-
return object.__getattribute__(self, "_result")
|
|
1061
|
-
|
|
1062
|
-
# Delegate all other attributes to the wrapped object
|
|
1063
|
-
try:
|
|
1064
|
-
return getattr(object.__getattribute__(self, "_wrapped"), name)
|
|
1065
|
-
except AttributeError:
|
|
1066
|
-
raise AttributeError(
|
|
1067
|
-
f"'{type(self).__name__}' object has no attribute '{name}'"
|
|
1068
|
-
)
|
|
1069
|
-
|
|
1070
|
-
return AgentRunWrapper(agent_run, SimpleResult(comprehensive_output))
|
|
1071
|
-
|
|
1072
|
-
# For non-fallback cases, we still need to handle the response_state
|
|
1073
|
-
# Create a minimal wrapper just to add response_state
|
|
1074
|
-
class AgentRunWithState:
|
|
1075
|
-
def __init__(self, wrapped_run):
|
|
974
|
+
# Create a wrapper object that mimics AgentRun with the required attributes
|
|
975
|
+
class AgentRunWrapper:
|
|
976
|
+
def __init__(self, wrapped_run, fallback_result):
|
|
1076
977
|
self._wrapped = wrapped_run
|
|
978
|
+
self._result = fallback_result
|
|
1077
979
|
self.response_state = response_state
|
|
1078
980
|
|
|
1079
981
|
def __getattribute__(self, name):
|
|
1080
|
-
# Handle special attributes first
|
|
1081
|
-
if name in ["_wrapped", "response_state"]:
|
|
982
|
+
# Handle special attributes first to avoid conflicts
|
|
983
|
+
if name in ["_wrapped", "_result", "response_state"]:
|
|
1082
984
|
return object.__getattribute__(self, name)
|
|
1083
985
|
|
|
986
|
+
# Explicitly handle 'result' to return our fallback result
|
|
987
|
+
if name == "result":
|
|
988
|
+
return object.__getattribute__(self, "_result")
|
|
989
|
+
|
|
1084
990
|
# Delegate all other attributes to the wrapped object
|
|
1085
991
|
try:
|
|
1086
992
|
return getattr(object.__getattribute__(self, "_wrapped"), name)
|
|
@@ -1089,6 +995,26 @@ async def process_request(
|
|
|
1089
995
|
f"'{type(self).__name__}' object has no attribute '{name}'"
|
|
1090
996
|
)
|
|
1091
997
|
|
|
1092
|
-
return
|
|
1093
|
-
|
|
1094
|
-
|
|
998
|
+
return AgentRunWrapper(agent_run, SimpleResult(comprehensive_output))
|
|
999
|
+
|
|
1000
|
+
# For non-fallback cases, we still need to handle the response_state
|
|
1001
|
+
# Create a minimal wrapper just to add response_state
|
|
1002
|
+
class AgentRunWithState:
|
|
1003
|
+
def __init__(self, wrapped_run):
|
|
1004
|
+
self._wrapped = wrapped_run
|
|
1005
|
+
self.response_state = response_state
|
|
1006
|
+
|
|
1007
|
+
def __getattribute__(self, name):
|
|
1008
|
+
# Handle special attributes first
|
|
1009
|
+
if name in ["_wrapped", "response_state"]:
|
|
1010
|
+
return object.__getattribute__(self, name)
|
|
1011
|
+
|
|
1012
|
+
# Delegate all other attributes to the wrapped object
|
|
1013
|
+
try:
|
|
1014
|
+
return getattr(object.__getattribute__(self, "_wrapped"), name)
|
|
1015
|
+
except AttributeError:
|
|
1016
|
+
raise AttributeError(
|
|
1017
|
+
f"'{type(self).__name__}' object has no attribute '{name}'"
|
|
1018
|
+
)
|
|
1019
|
+
|
|
1020
|
+
return AgentRunWithState(agent_run)
|