tunacode-cli 0.1.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/__init__.py +0 -0
- tunacode/cli/textual_repl.tcss +283 -0
- tunacode/configuration/__init__.py +1 -0
- tunacode/configuration/defaults.py +45 -0
- tunacode/configuration/models.py +147 -0
- tunacode/configuration/models_registry.json +1 -0
- tunacode/configuration/pricing.py +74 -0
- tunacode/configuration/settings.py +35 -0
- tunacode/constants.py +227 -0
- tunacode/core/__init__.py +6 -0
- tunacode/core/agents/__init__.py +39 -0
- tunacode/core/agents/agent_components/__init__.py +48 -0
- tunacode/core/agents/agent_components/agent_config.py +441 -0
- tunacode/core/agents/agent_components/agent_helpers.py +290 -0
- tunacode/core/agents/agent_components/message_handler.py +99 -0
- tunacode/core/agents/agent_components/node_processor.py +477 -0
- tunacode/core/agents/agent_components/response_state.py +129 -0
- tunacode/core/agents/agent_components/result_wrapper.py +51 -0
- tunacode/core/agents/agent_components/state_transition.py +112 -0
- tunacode/core/agents/agent_components/streaming.py +271 -0
- tunacode/core/agents/agent_components/task_completion.py +40 -0
- tunacode/core/agents/agent_components/tool_buffer.py +44 -0
- tunacode/core/agents/agent_components/tool_executor.py +101 -0
- tunacode/core/agents/agent_components/truncation_checker.py +37 -0
- tunacode/core/agents/delegation_tools.py +109 -0
- tunacode/core/agents/main.py +545 -0
- tunacode/core/agents/prompts.py +66 -0
- tunacode/core/agents/research_agent.py +231 -0
- tunacode/core/compaction.py +218 -0
- tunacode/core/prompting/__init__.py +27 -0
- tunacode/core/prompting/loader.py +66 -0
- tunacode/core/prompting/prompting_engine.py +98 -0
- tunacode/core/prompting/sections.py +50 -0
- tunacode/core/prompting/templates.py +69 -0
- tunacode/core/state.py +409 -0
- tunacode/exceptions.py +313 -0
- tunacode/indexing/__init__.py +5 -0
- tunacode/indexing/code_index.py +432 -0
- tunacode/indexing/constants.py +86 -0
- tunacode/lsp/__init__.py +112 -0
- tunacode/lsp/client.py +351 -0
- tunacode/lsp/diagnostics.py +19 -0
- tunacode/lsp/servers.py +101 -0
- tunacode/prompts/default_prompt.md +952 -0
- tunacode/prompts/research/sections/agent_role.xml +5 -0
- tunacode/prompts/research/sections/constraints.xml +14 -0
- tunacode/prompts/research/sections/output_format.xml +57 -0
- tunacode/prompts/research/sections/tool_use.xml +23 -0
- tunacode/prompts/sections/advanced_patterns.xml +255 -0
- tunacode/prompts/sections/agent_role.xml +8 -0
- tunacode/prompts/sections/completion.xml +10 -0
- tunacode/prompts/sections/critical_rules.xml +37 -0
- tunacode/prompts/sections/examples.xml +220 -0
- tunacode/prompts/sections/output_style.xml +94 -0
- tunacode/prompts/sections/parallel_exec.xml +105 -0
- tunacode/prompts/sections/search_pattern.xml +100 -0
- tunacode/prompts/sections/system_info.xml +6 -0
- tunacode/prompts/sections/tool_use.xml +84 -0
- tunacode/prompts/sections/user_instructions.xml +3 -0
- tunacode/py.typed +0 -0
- tunacode/templates/__init__.py +5 -0
- tunacode/templates/loader.py +15 -0
- tunacode/tools/__init__.py +10 -0
- tunacode/tools/authorization/__init__.py +29 -0
- tunacode/tools/authorization/context.py +32 -0
- tunacode/tools/authorization/factory.py +20 -0
- tunacode/tools/authorization/handler.py +58 -0
- tunacode/tools/authorization/notifier.py +35 -0
- tunacode/tools/authorization/policy.py +19 -0
- tunacode/tools/authorization/requests.py +119 -0
- tunacode/tools/authorization/rules.py +72 -0
- tunacode/tools/bash.py +222 -0
- tunacode/tools/decorators.py +213 -0
- tunacode/tools/glob.py +353 -0
- tunacode/tools/grep.py +468 -0
- tunacode/tools/grep_components/__init__.py +9 -0
- tunacode/tools/grep_components/file_filter.py +93 -0
- tunacode/tools/grep_components/pattern_matcher.py +158 -0
- tunacode/tools/grep_components/result_formatter.py +87 -0
- tunacode/tools/grep_components/search_result.py +34 -0
- tunacode/tools/list_dir.py +205 -0
- tunacode/tools/prompts/bash_prompt.xml +10 -0
- tunacode/tools/prompts/glob_prompt.xml +7 -0
- tunacode/tools/prompts/grep_prompt.xml +10 -0
- tunacode/tools/prompts/list_dir_prompt.xml +7 -0
- tunacode/tools/prompts/read_file_prompt.xml +9 -0
- tunacode/tools/prompts/todoclear_prompt.xml +12 -0
- tunacode/tools/prompts/todoread_prompt.xml +16 -0
- tunacode/tools/prompts/todowrite_prompt.xml +28 -0
- tunacode/tools/prompts/update_file_prompt.xml +9 -0
- tunacode/tools/prompts/web_fetch_prompt.xml +11 -0
- tunacode/tools/prompts/write_file_prompt.xml +7 -0
- tunacode/tools/react.py +111 -0
- tunacode/tools/read_file.py +68 -0
- tunacode/tools/todo.py +222 -0
- tunacode/tools/update_file.py +62 -0
- tunacode/tools/utils/__init__.py +1 -0
- tunacode/tools/utils/ripgrep.py +311 -0
- tunacode/tools/utils/text_match.py +352 -0
- tunacode/tools/web_fetch.py +245 -0
- tunacode/tools/write_file.py +34 -0
- tunacode/tools/xml_helper.py +34 -0
- tunacode/types/__init__.py +166 -0
- tunacode/types/base.py +94 -0
- tunacode/types/callbacks.py +53 -0
- tunacode/types/dataclasses.py +121 -0
- tunacode/types/pydantic_ai.py +31 -0
- tunacode/types/state.py +122 -0
- tunacode/ui/__init__.py +6 -0
- tunacode/ui/app.py +542 -0
- tunacode/ui/commands/__init__.py +430 -0
- tunacode/ui/components/__init__.py +1 -0
- tunacode/ui/headless/__init__.py +5 -0
- tunacode/ui/headless/output.py +72 -0
- tunacode/ui/main.py +252 -0
- tunacode/ui/renderers/__init__.py +41 -0
- tunacode/ui/renderers/errors.py +197 -0
- tunacode/ui/renderers/panels.py +550 -0
- tunacode/ui/renderers/search.py +314 -0
- tunacode/ui/renderers/tools/__init__.py +21 -0
- tunacode/ui/renderers/tools/bash.py +247 -0
- tunacode/ui/renderers/tools/diagnostics.py +186 -0
- tunacode/ui/renderers/tools/glob.py +226 -0
- tunacode/ui/renderers/tools/grep.py +228 -0
- tunacode/ui/renderers/tools/list_dir.py +198 -0
- tunacode/ui/renderers/tools/read_file.py +226 -0
- tunacode/ui/renderers/tools/research.py +294 -0
- tunacode/ui/renderers/tools/update_file.py +237 -0
- tunacode/ui/renderers/tools/web_fetch.py +182 -0
- tunacode/ui/repl_support.py +226 -0
- tunacode/ui/screens/__init__.py +16 -0
- tunacode/ui/screens/model_picker.py +303 -0
- tunacode/ui/screens/session_picker.py +181 -0
- tunacode/ui/screens/setup.py +218 -0
- tunacode/ui/screens/theme_picker.py +90 -0
- tunacode/ui/screens/update_confirm.py +69 -0
- tunacode/ui/shell_runner.py +129 -0
- tunacode/ui/styles/layout.tcss +98 -0
- tunacode/ui/styles/modals.tcss +38 -0
- tunacode/ui/styles/panels.tcss +81 -0
- tunacode/ui/styles/theme-nextstep.tcss +303 -0
- tunacode/ui/styles/widgets.tcss +33 -0
- tunacode/ui/styles.py +18 -0
- tunacode/ui/widgets/__init__.py +23 -0
- tunacode/ui/widgets/command_autocomplete.py +62 -0
- tunacode/ui/widgets/editor.py +402 -0
- tunacode/ui/widgets/file_autocomplete.py +47 -0
- tunacode/ui/widgets/messages.py +46 -0
- tunacode/ui/widgets/resource_bar.py +182 -0
- tunacode/ui/widgets/status_bar.py +98 -0
- tunacode/utils/__init__.py +0 -0
- tunacode/utils/config/__init__.py +13 -0
- tunacode/utils/config/user_configuration.py +91 -0
- tunacode/utils/messaging/__init__.py +10 -0
- tunacode/utils/messaging/message_utils.py +34 -0
- tunacode/utils/messaging/token_counter.py +77 -0
- tunacode/utils/parsing/__init__.py +13 -0
- tunacode/utils/parsing/command_parser.py +55 -0
- tunacode/utils/parsing/json_utils.py +188 -0
- tunacode/utils/parsing/retry.py +146 -0
- tunacode/utils/parsing/tool_parser.py +267 -0
- tunacode/utils/security/__init__.py +15 -0
- tunacode/utils/security/command.py +106 -0
- tunacode/utils/system/__init__.py +25 -0
- tunacode/utils/system/gitignore.py +155 -0
- tunacode/utils/system/paths.py +190 -0
- tunacode/utils/ui/__init__.py +9 -0
- tunacode/utils/ui/file_filter.py +135 -0
- tunacode/utils/ui/helpers.py +24 -0
- tunacode_cli-0.1.21.dist-info/METADATA +170 -0
- tunacode_cli-0.1.21.dist-info/RECORD +174 -0
- tunacode_cli-0.1.21.dist-info/WHEEL +4 -0
- tunacode_cli-0.1.21.dist-info/entry_points.txt +2 -0
- tunacode_cli-0.1.21.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,477 @@
|
|
|
1
|
+
"""Node processing functionality for agent responses."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Awaitable, Callable
|
|
4
|
+
from typing import Any, cast
|
|
5
|
+
|
|
6
|
+
from tunacode.constants import (
|
|
7
|
+
ERROR_TOOL_ARGS_MISSING,
|
|
8
|
+
ERROR_TOOL_CALL_ID_MISSING,
|
|
9
|
+
UI_COLORS,
|
|
10
|
+
)
|
|
11
|
+
from tunacode.core.state import StateManager
|
|
12
|
+
from tunacode.exceptions import StateError, UserAbortError
|
|
13
|
+
from tunacode.types import AgentState, ToolArgs, ToolCallId
|
|
14
|
+
from tunacode.utils.ui import DotDict
|
|
15
|
+
|
|
16
|
+
from .response_state import ResponseState
|
|
17
|
+
from .task_completion import check_task_completion
|
|
18
|
+
from .tool_buffer import ToolBuffer
|
|
19
|
+
from .truncation_checker import check_for_truncation
|
|
20
|
+
|
|
21
|
+
colors = DotDict(UI_COLORS)
|
|
22
|
+
|
|
23
|
+
PART_KIND_TOOL_CALL = "tool-call"
|
|
24
|
+
PART_KIND_TOOL_RETURN = "tool-return"
|
|
25
|
+
UNKNOWN_TOOL_NAME = "unknown"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _normalize_tool_args(raw_args: Any) -> ToolArgs:
|
|
29
|
+
from tunacode.utils.parsing.command_parser import parse_args
|
|
30
|
+
|
|
31
|
+
parsed_args = parse_args(raw_args)
|
|
32
|
+
return cast(ToolArgs, parsed_args)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _record_tool_call_args(part: Any, state_manager: StateManager) -> ToolArgs:
|
|
36
|
+
raw_args = getattr(part, "args", {})
|
|
37
|
+
parsed_args = _normalize_tool_args(raw_args)
|
|
38
|
+
tool_call_id: ToolCallId | None = getattr(part, "tool_call_id", None)
|
|
39
|
+
if tool_call_id:
|
|
40
|
+
state_manager.session.tool_call_args_by_id[tool_call_id] = parsed_args
|
|
41
|
+
return parsed_args
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _consume_tool_call_args(part: Any, state_manager: StateManager) -> ToolArgs:
|
|
45
|
+
tool_call_id: ToolCallId | None = getattr(part, "tool_call_id", None)
|
|
46
|
+
if not tool_call_id:
|
|
47
|
+
raise StateError(ERROR_TOOL_CALL_ID_MISSING)
|
|
48
|
+
tool_call_args = state_manager.session.tool_call_args_by_id.pop(tool_call_id, None)
|
|
49
|
+
if tool_call_args is None:
|
|
50
|
+
raise StateError(ERROR_TOOL_ARGS_MISSING.format(tool_call_id=tool_call_id))
|
|
51
|
+
return tool_call_args
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _has_tool_calls(parts: list[Any]) -> bool:
|
|
55
|
+
return any(getattr(part, "part_kind", None) == PART_KIND_TOOL_CALL for part in parts)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
PART_KIND_TEXT = "text"
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _extract_fallback_tool_calls(
|
|
62
|
+
parts: list[Any],
|
|
63
|
+
state_manager: StateManager,
|
|
64
|
+
response_state: "ResponseState | None",
|
|
65
|
+
) -> list[tuple[Any, ToolArgs]]:
|
|
66
|
+
"""Extract tool calls from text parts using fallback parsing.
|
|
67
|
+
|
|
68
|
+
Called when no structured tool calls (part_kind == "tool-call") are found.
|
|
69
|
+
Attempts to parse embedded tool calls from text content.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
parts: Model response parts to scan
|
|
73
|
+
state_manager: For recording tool call args
|
|
74
|
+
response_state: For state transitions
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
List of (part, args) tuples for found tool calls
|
|
78
|
+
"""
|
|
79
|
+
from pydantic_ai.messages import ToolCallPart
|
|
80
|
+
|
|
81
|
+
from tunacode.utils.parsing.tool_parser import (
|
|
82
|
+
has_potential_tool_call,
|
|
83
|
+
parse_tool_calls_from_text,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
results: list[tuple[Any, ToolArgs]] = []
|
|
87
|
+
|
|
88
|
+
# Collect text content from text parts
|
|
89
|
+
text_content = ""
|
|
90
|
+
for part in parts:
|
|
91
|
+
part_kind = getattr(part, "part_kind", None)
|
|
92
|
+
if part_kind == PART_KIND_TEXT:
|
|
93
|
+
content = getattr(part, "content", "")
|
|
94
|
+
if content:
|
|
95
|
+
text_content += content + "\n"
|
|
96
|
+
|
|
97
|
+
# Quick check before expensive parsing
|
|
98
|
+
if not has_potential_tool_call(text_content):
|
|
99
|
+
return results
|
|
100
|
+
|
|
101
|
+
# Parse tool calls from combined text
|
|
102
|
+
parsed_calls = parse_tool_calls_from_text(text_content)
|
|
103
|
+
|
|
104
|
+
if not parsed_calls:
|
|
105
|
+
return results
|
|
106
|
+
|
|
107
|
+
# Transition to TOOL_EXECUTION on finding fallback tools
|
|
108
|
+
if response_state and response_state.can_transition_to(AgentState.TOOL_EXECUTION):
|
|
109
|
+
response_state.transition_to(AgentState.TOOL_EXECUTION)
|
|
110
|
+
|
|
111
|
+
# Convert ParsedToolCall to ToolCallPart objects
|
|
112
|
+
for parsed in parsed_calls:
|
|
113
|
+
# Create a ToolCallPart compatible with existing infrastructure
|
|
114
|
+
part = ToolCallPart(
|
|
115
|
+
tool_name=parsed.tool_name,
|
|
116
|
+
args=parsed.args,
|
|
117
|
+
tool_call_id=parsed.tool_call_id,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Record args for later retrieval
|
|
121
|
+
tool_args = _normalize_tool_args(parsed.args)
|
|
122
|
+
state_manager.session.tool_call_args_by_id[parsed.tool_call_id] = tool_args
|
|
123
|
+
|
|
124
|
+
results.append((part, tool_args))
|
|
125
|
+
|
|
126
|
+
return results
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _update_token_usage(model_response: Any, state_manager: StateManager) -> None:
|
|
130
|
+
usage = getattr(model_response, "usage", None)
|
|
131
|
+
if not usage:
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
prompt_tokens = getattr(usage, "request_tokens", 0) or 0
|
|
135
|
+
completion_tokens = getattr(usage, "response_tokens", 0) or 0
|
|
136
|
+
cached_tokens = getattr(usage, "cached_tokens", 0) or 0
|
|
137
|
+
|
|
138
|
+
session = state_manager.session
|
|
139
|
+
session.last_call_usage["prompt_tokens"] = prompt_tokens
|
|
140
|
+
session.last_call_usage["completion_tokens"] = completion_tokens
|
|
141
|
+
|
|
142
|
+
from tunacode.configuration.pricing import calculate_cost, get_model_pricing
|
|
143
|
+
|
|
144
|
+
pricing = get_model_pricing(session.current_model)
|
|
145
|
+
if pricing is not None:
|
|
146
|
+
non_cached_input = max(0, prompt_tokens - cached_tokens)
|
|
147
|
+
cost = calculate_cost(pricing, non_cached_input, cached_tokens, completion_tokens)
|
|
148
|
+
session.last_call_usage["cost"] = cost
|
|
149
|
+
session.session_total_usage["cost"] += cost
|
|
150
|
+
else:
|
|
151
|
+
session.last_call_usage["cost"] = 0.0
|
|
152
|
+
|
|
153
|
+
session.session_total_usage["prompt_tokens"] += prompt_tokens
|
|
154
|
+
session.session_total_usage["completion_tokens"] += completion_tokens
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
async def _process_node(
|
|
158
|
+
node,
|
|
159
|
+
tool_callback: Callable | None,
|
|
160
|
+
state_manager: StateManager,
|
|
161
|
+
tool_buffer: ToolBuffer | None = None,
|
|
162
|
+
streaming_callback: Callable[[str], Awaitable[None]] | None = None,
|
|
163
|
+
response_state: ResponseState | None = None,
|
|
164
|
+
tool_result_callback: Callable[..., None] | None = None,
|
|
165
|
+
tool_start_callback: Callable[[str], None] | None = None,
|
|
166
|
+
) -> tuple[bool, str | None]:
|
|
167
|
+
"""Process a single node from the agent response.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
tuple: (is_empty: bool, reason: Optional[str]) - True if empty/problematic
|
|
171
|
+
response detected, with reason being one of: "empty", "truncated",
|
|
172
|
+
"intention_without_action"
|
|
173
|
+
"""
|
|
174
|
+
# Use the original callback directly - parallel execution will be handled differently
|
|
175
|
+
buffering_callback = tool_callback
|
|
176
|
+
empty_response_detected = False
|
|
177
|
+
has_non_empty_content = False
|
|
178
|
+
appears_truncated = False
|
|
179
|
+
has_intention = False
|
|
180
|
+
has_tool_calls = False
|
|
181
|
+
|
|
182
|
+
# Transition to ASSISTANT at the start of node processing
|
|
183
|
+
if response_state and response_state.can_transition_to(AgentState.ASSISTANT):
|
|
184
|
+
response_state.transition_to(AgentState.ASSISTANT)
|
|
185
|
+
|
|
186
|
+
if hasattr(node, "request"):
|
|
187
|
+
state_manager.session.messages.append(node.request)
|
|
188
|
+
|
|
189
|
+
# Display tool returns from previous iteration (they're in node.request)
|
|
190
|
+
if tool_result_callback and hasattr(node.request, "parts"):
|
|
191
|
+
for part in node.request.parts:
|
|
192
|
+
part_kind = getattr(part, "part_kind", None)
|
|
193
|
+
if part_kind != PART_KIND_TOOL_RETURN:
|
|
194
|
+
continue
|
|
195
|
+
tool_name = getattr(part, "tool_name", UNKNOWN_TOOL_NAME)
|
|
196
|
+
tool_args = _consume_tool_call_args(part, state_manager)
|
|
197
|
+
content = getattr(part, "content", None)
|
|
198
|
+
result_str = str(content) if content is not None else None
|
|
199
|
+
tool_result_callback(
|
|
200
|
+
tool_name=tool_name,
|
|
201
|
+
status="completed",
|
|
202
|
+
args=tool_args,
|
|
203
|
+
result=result_str,
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
if hasattr(node, "thought") and node.thought:
|
|
207
|
+
state_manager.session.messages.append({"thought": node.thought})
|
|
208
|
+
|
|
209
|
+
if hasattr(node, "model_response"):
|
|
210
|
+
state_manager.session.messages.append(node.model_response)
|
|
211
|
+
|
|
212
|
+
_update_token_usage(node.model_response, state_manager)
|
|
213
|
+
# Update context window token count
|
|
214
|
+
state_manager.session.update_token_count()
|
|
215
|
+
|
|
216
|
+
# Check for task completion marker in response content
|
|
217
|
+
if response_state:
|
|
218
|
+
has_non_empty_content = False
|
|
219
|
+
appears_truncated = False
|
|
220
|
+
all_content_parts = []
|
|
221
|
+
|
|
222
|
+
# First, check if there are any tool calls in this response
|
|
223
|
+
response_parts = node.model_response.parts
|
|
224
|
+
has_queued_tools = _has_tool_calls(response_parts)
|
|
225
|
+
|
|
226
|
+
for part in response_parts:
|
|
227
|
+
if hasattr(part, "content") and isinstance(part.content, str):
|
|
228
|
+
# Check if we have any non-empty content
|
|
229
|
+
if part.content.strip():
|
|
230
|
+
has_non_empty_content = True
|
|
231
|
+
all_content_parts.append(part.content)
|
|
232
|
+
|
|
233
|
+
is_complete, cleaned_content = check_task_completion(part.content)
|
|
234
|
+
if is_complete:
|
|
235
|
+
# Validate completion - check for premature completion
|
|
236
|
+
if has_queued_tools:
|
|
237
|
+
# Agent is trying to complete with pending tools!
|
|
238
|
+
# Don't mark as complete - let the tools run first
|
|
239
|
+
# Update the content to remove the marker but don't set task_completed
|
|
240
|
+
part.content = cleaned_content
|
|
241
|
+
else:
|
|
242
|
+
# Check if content suggests pending actions
|
|
243
|
+
combined_text = " ".join(all_content_parts).lower()
|
|
244
|
+
pending_phrases = [
|
|
245
|
+
"let me",
|
|
246
|
+
"i'll check",
|
|
247
|
+
"i will",
|
|
248
|
+
"going to",
|
|
249
|
+
"about to",
|
|
250
|
+
"need to check",
|
|
251
|
+
"let's check",
|
|
252
|
+
"i should",
|
|
253
|
+
"need to find",
|
|
254
|
+
"let me see",
|
|
255
|
+
"i'll look",
|
|
256
|
+
"let me search",
|
|
257
|
+
"let me find",
|
|
258
|
+
]
|
|
259
|
+
has_pending_intention = any(
|
|
260
|
+
phrase in combined_text for phrase in pending_phrases
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Also check for action verbs at end of content
|
|
264
|
+
# suggesting incomplete action
|
|
265
|
+
action_endings = [
|
|
266
|
+
"checking",
|
|
267
|
+
"searching",
|
|
268
|
+
"looking",
|
|
269
|
+
"finding",
|
|
270
|
+
"reading",
|
|
271
|
+
"analyzing",
|
|
272
|
+
]
|
|
273
|
+
ends_with_action = any(
|
|
274
|
+
combined_text.rstrip().endswith(ending) for ending in action_endings
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
early_with_pending = (
|
|
278
|
+
has_pending_intention or ends_with_action
|
|
279
|
+
) and state_manager.session.iteration_count <= 1
|
|
280
|
+
|
|
281
|
+
# Always strip the marker from content
|
|
282
|
+
part.content = cleaned_content
|
|
283
|
+
|
|
284
|
+
if not early_with_pending:
|
|
285
|
+
response_state.transition_to(AgentState.RESPONSE)
|
|
286
|
+
response_state.set_completion_detected(True)
|
|
287
|
+
response_state.has_user_response = True
|
|
288
|
+
break
|
|
289
|
+
|
|
290
|
+
# Check for truncation patterns
|
|
291
|
+
if all_content_parts:
|
|
292
|
+
combined_content = " ".join(all_content_parts).strip()
|
|
293
|
+
appears_truncated = check_for_truncation(combined_content)
|
|
294
|
+
|
|
295
|
+
# If we only got empty content and no tool calls, we should NOT consider this
|
|
296
|
+
# a valid response
|
|
297
|
+
# This prevents the agent from stopping when it gets empty responses
|
|
298
|
+
if not has_non_empty_content and not has_queued_tools:
|
|
299
|
+
# Empty response with no tools - keep going
|
|
300
|
+
empty_response_detected = True
|
|
301
|
+
|
|
302
|
+
# Check if response appears truncated
|
|
303
|
+
elif appears_truncated and not has_queued_tools:
|
|
304
|
+
# Truncated response detected
|
|
305
|
+
empty_response_detected = True
|
|
306
|
+
|
|
307
|
+
# Process tool calls
|
|
308
|
+
await _process_tool_calls(
|
|
309
|
+
node,
|
|
310
|
+
buffering_callback,
|
|
311
|
+
state_manager,
|
|
312
|
+
tool_buffer,
|
|
313
|
+
response_state,
|
|
314
|
+
tool_result_callback,
|
|
315
|
+
tool_start_callback,
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# If there were no tools and we processed a model response, transition to RESPONSE
|
|
319
|
+
# Only transition if not already completed (set by completion marker path)
|
|
320
|
+
if (
|
|
321
|
+
response_state
|
|
322
|
+
and response_state.can_transition_to(AgentState.RESPONSE)
|
|
323
|
+
and not response_state.is_completed()
|
|
324
|
+
):
|
|
325
|
+
response_state.transition_to(AgentState.RESPONSE)
|
|
326
|
+
|
|
327
|
+
# Determine empty response reason
|
|
328
|
+
if empty_response_detected:
|
|
329
|
+
if appears_truncated:
|
|
330
|
+
return True, "truncated"
|
|
331
|
+
else:
|
|
332
|
+
return True, "empty"
|
|
333
|
+
|
|
334
|
+
# Check for intention without action
|
|
335
|
+
if has_intention and not has_tool_calls and not has_non_empty_content:
|
|
336
|
+
return True, "intention_without_action"
|
|
337
|
+
|
|
338
|
+
return False, None
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
async def _process_tool_calls(
|
|
342
|
+
node: Any,
|
|
343
|
+
tool_callback: Callable | None,
|
|
344
|
+
state_manager: StateManager,
|
|
345
|
+
tool_buffer: ToolBuffer | None,
|
|
346
|
+
response_state: ResponseState | None,
|
|
347
|
+
tool_result_callback: Callable[..., None] | None = None,
|
|
348
|
+
tool_start_callback: Callable[[str], None] | None = None,
|
|
349
|
+
) -> None:
|
|
350
|
+
"""
|
|
351
|
+
Process tool calls from the node using smart batching strategy.
|
|
352
|
+
|
|
353
|
+
Smart batching optimization:
|
|
354
|
+
- Collect all read-only tools into a single batch (regardless of write tools in between)
|
|
355
|
+
- Execute all read-only tools in one parallel batch
|
|
356
|
+
- Execute write/execute tools sequentially in their original order
|
|
357
|
+
|
|
358
|
+
This maximizes parallel execution efficiency by avoiding premature buffer flushes.
|
|
359
|
+
"""
|
|
360
|
+
from tunacode.constants import READ_ONLY_TOOLS
|
|
361
|
+
|
|
362
|
+
# Track if we're processing tool calls
|
|
363
|
+
is_processing_tools = False
|
|
364
|
+
|
|
365
|
+
# Phase 1: Collect and categorize all tools
|
|
366
|
+
read_only_tasks = []
|
|
367
|
+
research_agent_tasks = []
|
|
368
|
+
write_execute_tasks = []
|
|
369
|
+
tool_call_records: list[tuple[Any, ToolArgs]] = []
|
|
370
|
+
|
|
371
|
+
for part in node.model_response.parts:
|
|
372
|
+
part_kind = getattr(part, "part_kind", None)
|
|
373
|
+
if part_kind != PART_KIND_TOOL_CALL:
|
|
374
|
+
continue
|
|
375
|
+
is_processing_tools = True
|
|
376
|
+
# Transition to TOOL_EXECUTION on first tool call
|
|
377
|
+
if response_state and response_state.can_transition_to(AgentState.TOOL_EXECUTION):
|
|
378
|
+
response_state.transition_to(AgentState.TOOL_EXECUTION)
|
|
379
|
+
|
|
380
|
+
tool_args = _record_tool_call_args(part, state_manager)
|
|
381
|
+
tool_call_records.append((part, tool_args))
|
|
382
|
+
|
|
383
|
+
if tool_callback:
|
|
384
|
+
# Categorize: research agent vs read-only vs write/execute
|
|
385
|
+
if part.tool_name == "research_codebase":
|
|
386
|
+
research_agent_tasks.append((part, node))
|
|
387
|
+
elif part.tool_name in READ_ONLY_TOOLS:
|
|
388
|
+
read_only_tasks.append((part, node))
|
|
389
|
+
else:
|
|
390
|
+
write_execute_tasks.append((part, node))
|
|
391
|
+
|
|
392
|
+
# Phase 1.5: FALLBACK - Parse text parts if no structured tool calls found
|
|
393
|
+
# Handles non-standard formats like Qwen2-style XML, Hermes-style, etc.
|
|
394
|
+
if not tool_call_records and tool_callback:
|
|
395
|
+
fallback_tool_calls = _extract_fallback_tool_calls(
|
|
396
|
+
node.model_response.parts,
|
|
397
|
+
state_manager,
|
|
398
|
+
response_state,
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
if fallback_tool_calls:
|
|
402
|
+
is_processing_tools = True
|
|
403
|
+
for part, tool_args in fallback_tool_calls:
|
|
404
|
+
tool_call_records.append((part, tool_args))
|
|
405
|
+
# Categorize fallback tools same as structured ones
|
|
406
|
+
if part.tool_name == "research_codebase":
|
|
407
|
+
research_agent_tasks.append((part, node))
|
|
408
|
+
elif part.tool_name in READ_ONLY_TOOLS:
|
|
409
|
+
read_only_tasks.append((part, node))
|
|
410
|
+
else:
|
|
411
|
+
write_execute_tasks.append((part, node))
|
|
412
|
+
|
|
413
|
+
# Phase 2: Execute research agent
|
|
414
|
+
if research_agent_tasks and tool_callback:
|
|
415
|
+
from .tool_executor import execute_tools_parallel
|
|
416
|
+
|
|
417
|
+
if tool_start_callback:
|
|
418
|
+
tool_start_callback("research")
|
|
419
|
+
|
|
420
|
+
await execute_tools_parallel(research_agent_tasks, tool_callback)
|
|
421
|
+
# Note: tool_result_callback is called when we see tool-return parts in node.request
|
|
422
|
+
|
|
423
|
+
# Phase 3: Execute read-only tools in ONE parallel batch
|
|
424
|
+
if read_only_tasks and tool_callback:
|
|
425
|
+
from .tool_executor import execute_tools_parallel
|
|
426
|
+
|
|
427
|
+
batch_id = getattr(state_manager.session, "batch_counter", 0) + 1
|
|
428
|
+
state_manager.session.batch_counter = batch_id
|
|
429
|
+
|
|
430
|
+
if tool_start_callback:
|
|
431
|
+
names = [p.tool_name for p, _ in read_only_tasks[:3]]
|
|
432
|
+
suffix = "..." if len(read_only_tasks) > 3 else ""
|
|
433
|
+
tool_start_callback(", ".join(names) + suffix)
|
|
434
|
+
|
|
435
|
+
await execute_tools_parallel(read_only_tasks, tool_callback)
|
|
436
|
+
# Note: tool_result_callback is called when we see tool-return parts in node.request
|
|
437
|
+
|
|
438
|
+
# Phase 4: Execute write/execute tools sequentially
|
|
439
|
+
for part, node in write_execute_tasks:
|
|
440
|
+
if tool_start_callback:
|
|
441
|
+
tool_start_callback(part.tool_name)
|
|
442
|
+
|
|
443
|
+
try:
|
|
444
|
+
await tool_callback(part, node)
|
|
445
|
+
except UserAbortError:
|
|
446
|
+
raise
|
|
447
|
+
|
|
448
|
+
# Track tool calls in session
|
|
449
|
+
if tool_call_records:
|
|
450
|
+
# Extract tool information for tracking
|
|
451
|
+
for part, tool_args in tool_call_records:
|
|
452
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
453
|
+
tool_info = {
|
|
454
|
+
"tool": part.tool_name,
|
|
455
|
+
"args": tool_args,
|
|
456
|
+
"timestamp": getattr(part, "timestamp", None),
|
|
457
|
+
"tool_call_id": tool_call_id,
|
|
458
|
+
}
|
|
459
|
+
state_manager.session.tool_calls.append(tool_info)
|
|
460
|
+
|
|
461
|
+
# After tools are processed, transition back to RESPONSE
|
|
462
|
+
if (
|
|
463
|
+
is_processing_tools
|
|
464
|
+
and response_state
|
|
465
|
+
and response_state.can_transition_to(AgentState.RESPONSE)
|
|
466
|
+
):
|
|
467
|
+
response_state.transition_to(AgentState.RESPONSE)
|
|
468
|
+
|
|
469
|
+
# Update has_user_response based on presence of actual response content
|
|
470
|
+
if (
|
|
471
|
+
response_state
|
|
472
|
+
and hasattr(node, "result")
|
|
473
|
+
and node.result
|
|
474
|
+
and hasattr(node.result, "output")
|
|
475
|
+
and node.result.output
|
|
476
|
+
):
|
|
477
|
+
response_state.has_user_response = True
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
"""Response state management for tracking agent processing state."""
|
|
2
|
+
|
|
3
|
+
import threading
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
|
|
6
|
+
from tunacode.types import AgentState
|
|
7
|
+
|
|
8
|
+
from .state_transition import AGENT_TRANSITION_RULES, AgentStateMachine
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class ResponseState:
|
|
13
|
+
"""Enhanced response state using enum-based state machine."""
|
|
14
|
+
|
|
15
|
+
# Internal state machine
|
|
16
|
+
_state_machine: AgentStateMachine = field(
|
|
17
|
+
default_factory=lambda: AgentStateMachine(AgentState.USER_INPUT, AGENT_TRANSITION_RULES)
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# Backward compatibility boolean flags (derived from enum state)
|
|
21
|
+
_has_user_response: bool = False
|
|
22
|
+
_task_completed: bool = False
|
|
23
|
+
_awaiting_user_guidance: bool = False
|
|
24
|
+
_has_final_synthesis: bool = False
|
|
25
|
+
# Thread-safe lock for boolean flag access
|
|
26
|
+
_lock: threading.RLock = field(default_factory=threading.RLock, init=False, repr=False)
|
|
27
|
+
|
|
28
|
+
def __post_init__(self):
|
|
29
|
+
"""Initialize the state machine."""
|
|
30
|
+
if not hasattr(self, "_state_machine"):
|
|
31
|
+
self._state_machine = AgentStateMachine(AgentState.USER_INPUT, AGENT_TRANSITION_RULES)
|
|
32
|
+
if not hasattr(self, "_lock"):
|
|
33
|
+
self._lock = threading.RLock()
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def current_state(self) -> AgentState:
|
|
37
|
+
"""Get the current enum state."""
|
|
38
|
+
return self._state_machine.current_state
|
|
39
|
+
|
|
40
|
+
def transition_to(self, new_state: AgentState) -> None:
|
|
41
|
+
"""Transition to a new state."""
|
|
42
|
+
self._state_machine.transition_to(new_state)
|
|
43
|
+
|
|
44
|
+
def can_transition_to(self, target_state: AgentState) -> bool:
|
|
45
|
+
"""Check if a transition to the target state is allowed."""
|
|
46
|
+
return self._state_machine.can_transition_to(target_state)
|
|
47
|
+
|
|
48
|
+
# Backward compatibility properties
|
|
49
|
+
@property
|
|
50
|
+
def has_user_response(self) -> bool:
|
|
51
|
+
"""Legacy boolean flag for user response detection."""
|
|
52
|
+
with self._lock:
|
|
53
|
+
return self._has_user_response
|
|
54
|
+
|
|
55
|
+
@has_user_response.setter
|
|
56
|
+
def has_user_response(self, value: bool) -> None:
|
|
57
|
+
"""Set the legacy has_user_response flag."""
|
|
58
|
+
with self._lock:
|
|
59
|
+
self._has_user_response = value
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def task_completed(self) -> bool:
|
|
63
|
+
"""Legacy boolean flag for task completion (derived from state machine)."""
|
|
64
|
+
with self._lock:
|
|
65
|
+
# If explicitly set true, honor it; otherwise derive from state machine
|
|
66
|
+
return bool(self._task_completed or self._state_machine.is_completed())
|
|
67
|
+
|
|
68
|
+
@task_completed.setter
|
|
69
|
+
def task_completed(self, value: bool) -> None:
|
|
70
|
+
"""Set the legacy task_completed flag and sync with state machine."""
|
|
71
|
+
with self._lock:
|
|
72
|
+
self._task_completed = bool(value)
|
|
73
|
+
if value:
|
|
74
|
+
# Ensure state reflects completion in RESPONSE
|
|
75
|
+
try:
|
|
76
|
+
if (
|
|
77
|
+
self._state_machine.current_state != AgentState.RESPONSE
|
|
78
|
+
and self._state_machine.can_transition_to(AgentState.RESPONSE)
|
|
79
|
+
):
|
|
80
|
+
self._state_machine.transition_to(AgentState.RESPONSE)
|
|
81
|
+
except Exception:
|
|
82
|
+
# Best-effort: ignore invalid transition in legacy paths
|
|
83
|
+
pass
|
|
84
|
+
self._state_machine.set_completion_detected(True)
|
|
85
|
+
else:
|
|
86
|
+
self._state_machine.set_completion_detected(False)
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def awaiting_user_guidance(self) -> bool:
|
|
90
|
+
"""Legacy boolean flag for awaiting user guidance."""
|
|
91
|
+
with self._lock:
|
|
92
|
+
return self._awaiting_user_guidance
|
|
93
|
+
|
|
94
|
+
@awaiting_user_guidance.setter
|
|
95
|
+
def awaiting_user_guidance(self, value: bool) -> None:
|
|
96
|
+
"""Set the legacy awaiting_user_guidance flag."""
|
|
97
|
+
with self._lock:
|
|
98
|
+
self._awaiting_user_guidance = value
|
|
99
|
+
|
|
100
|
+
@property
|
|
101
|
+
def has_final_synthesis(self) -> bool:
|
|
102
|
+
"""Legacy boolean flag for final synthesis."""
|
|
103
|
+
with self._lock:
|
|
104
|
+
return self._has_final_synthesis
|
|
105
|
+
|
|
106
|
+
@has_final_synthesis.setter
|
|
107
|
+
def has_final_synthesis(self, value: bool) -> None:
|
|
108
|
+
"""Set the legacy has_final_synthesis flag."""
|
|
109
|
+
with self._lock:
|
|
110
|
+
self._has_final_synthesis = value
|
|
111
|
+
|
|
112
|
+
# Enhanced state management methods
|
|
113
|
+
def set_completion_detected(self, detected: bool = True) -> None:
|
|
114
|
+
"""Mark that completion has been detected in the RESPONSE state."""
|
|
115
|
+
self._state_machine.set_completion_detected(detected)
|
|
116
|
+
|
|
117
|
+
def is_completed(self) -> bool:
|
|
118
|
+
"""Check if the task is completed according to the state machine."""
|
|
119
|
+
return self._state_machine.is_completed()
|
|
120
|
+
|
|
121
|
+
def reset_state(self, initial_state: AgentState | None = None) -> None:
|
|
122
|
+
"""Reset the state machine to initial state."""
|
|
123
|
+
with self._lock:
|
|
124
|
+
self._state_machine.reset(initial_state)
|
|
125
|
+
# Reset legacy flags
|
|
126
|
+
self._has_user_response = False
|
|
127
|
+
self._task_completed = False
|
|
128
|
+
self._awaiting_user_guidance = False
|
|
129
|
+
self._has_final_synthesis = False
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Result wrapper classes for agent responses."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SimpleResult:
|
|
7
|
+
"""Simple result wrapper for fallback responses."""
|
|
8
|
+
|
|
9
|
+
def __init__(self, output: str):
|
|
10
|
+
self.output = output
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AgentRunWrapper:
|
|
14
|
+
"""Wrapper that adds response_state to agent run results."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, wrapped_run: Any, fallback_result: Any, response_state: Any = None):
|
|
17
|
+
self._wrapped = wrapped_run
|
|
18
|
+
self._result = fallback_result
|
|
19
|
+
self.response_state = response_state
|
|
20
|
+
|
|
21
|
+
def __getattribute__(self, name: str) -> Any:
|
|
22
|
+
# Handle special attributes first to avoid conflicts
|
|
23
|
+
if name in ["_wrapped", "_result", "response_state"]:
|
|
24
|
+
return object.__getattribute__(self, name)
|
|
25
|
+
|
|
26
|
+
# Explicitly handle 'result' to return our fallback result
|
|
27
|
+
if name == "result":
|
|
28
|
+
return object.__getattribute__(self, "_result")
|
|
29
|
+
|
|
30
|
+
# Delegate all other attributes to the wrapped object
|
|
31
|
+
try:
|
|
32
|
+
return getattr(object.__getattribute__(self, "_wrapped"), name)
|
|
33
|
+
except AttributeError:
|
|
34
|
+
msg = f"'{type(self).__name__}' object has no attribute '{name}'"
|
|
35
|
+
raise AttributeError(msg) from None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class AgentRunWithState:
|
|
39
|
+
"""Minimal wrapper to add response_state to agent runs."""
|
|
40
|
+
|
|
41
|
+
def __init__(self, wrapped_run: Any, response_state: Any = None):
|
|
42
|
+
self._wrapped = wrapped_run
|
|
43
|
+
self.response_state = response_state
|
|
44
|
+
|
|
45
|
+
def __getattribute__(self, name: str) -> Any:
|
|
46
|
+
# Handle special attributes first
|
|
47
|
+
if name in ["_wrapped", "response_state"]:
|
|
48
|
+
return object.__getattribute__(self, name)
|
|
49
|
+
|
|
50
|
+
# Delegate all other attributes to the wrapped object
|
|
51
|
+
return getattr(object.__getattribute__(self, "_wrapped"), name)
|