tunacode-cli 0.0.70__py3-none-any.whl → 0.0.78.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/commands/__init__.py +0 -2
- tunacode/cli/commands/implementations/__init__.py +0 -3
- tunacode/cli/commands/implementations/debug.py +2 -2
- tunacode/cli/commands/implementations/development.py +10 -8
- tunacode/cli/commands/implementations/model.py +357 -29
- tunacode/cli/commands/implementations/system.py +3 -2
- tunacode/cli/commands/implementations/template.py +0 -2
- tunacode/cli/commands/registry.py +8 -7
- tunacode/cli/commands/slash/loader.py +2 -1
- tunacode/cli/commands/slash/validator.py +2 -1
- tunacode/cli/main.py +19 -1
- tunacode/cli/repl.py +90 -229
- tunacode/cli/repl_components/command_parser.py +2 -1
- tunacode/cli/repl_components/error_recovery.py +8 -5
- tunacode/cli/repl_components/output_display.py +1 -10
- tunacode/cli/repl_components/tool_executor.py +1 -13
- tunacode/configuration/defaults.py +2 -2
- tunacode/configuration/key_descriptions.py +284 -0
- tunacode/configuration/settings.py +0 -1
- tunacode/constants.py +6 -42
- tunacode/core/agents/__init__.py +43 -2
- tunacode/core/agents/agent_components/__init__.py +7 -0
- tunacode/core/agents/agent_components/agent_config.py +162 -158
- tunacode/core/agents/agent_components/agent_helpers.py +31 -2
- tunacode/core/agents/agent_components/node_processor.py +180 -146
- tunacode/core/agents/agent_components/response_state.py +123 -6
- tunacode/core/agents/agent_components/state_transition.py +116 -0
- tunacode/core/agents/agent_components/streaming.py +296 -0
- tunacode/core/agents/agent_components/task_completion.py +19 -6
- tunacode/core/agents/agent_components/tool_buffer.py +21 -1
- tunacode/core/agents/agent_components/tool_executor.py +10 -0
- tunacode/core/agents/main.py +522 -370
- tunacode/core/agents/main_legact.py +538 -0
- tunacode/core/agents/prompts.py +66 -0
- tunacode/core/agents/utils.py +29 -122
- tunacode/core/setup/__init__.py +0 -2
- tunacode/core/setup/config_setup.py +88 -227
- tunacode/core/setup/config_wizard.py +230 -0
- tunacode/core/setup/coordinator.py +2 -1
- tunacode/core/state.py +16 -64
- tunacode/core/token_usage/usage_tracker.py +3 -1
- tunacode/core/tool_authorization.py +352 -0
- tunacode/core/tool_handler.py +67 -60
- tunacode/prompts/system.xml +751 -0
- tunacode/services/mcp.py +97 -1
- tunacode/setup.py +0 -23
- tunacode/tools/base.py +54 -1
- tunacode/tools/bash.py +14 -0
- tunacode/tools/glob.py +4 -2
- tunacode/tools/grep.py +7 -17
- tunacode/tools/prompts/glob_prompt.xml +1 -1
- tunacode/tools/prompts/grep_prompt.xml +1 -0
- tunacode/tools/prompts/list_dir_prompt.xml +1 -1
- tunacode/tools/prompts/react_prompt.xml +23 -0
- tunacode/tools/prompts/read_file_prompt.xml +1 -1
- tunacode/tools/react.py +153 -0
- tunacode/tools/run_command.py +15 -0
- tunacode/types.py +14 -79
- tunacode/ui/completers.py +434 -50
- tunacode/ui/config_dashboard.py +585 -0
- tunacode/ui/console.py +63 -11
- tunacode/ui/input.py +8 -3
- tunacode/ui/keybindings.py +0 -18
- tunacode/ui/model_selector.py +395 -0
- tunacode/ui/output.py +40 -19
- tunacode/ui/panels.py +173 -49
- tunacode/ui/path_heuristics.py +91 -0
- tunacode/ui/prompt_manager.py +1 -20
- tunacode/ui/tool_ui.py +30 -8
- tunacode/utils/api_key_validation.py +93 -0
- tunacode/utils/config_comparator.py +340 -0
- tunacode/utils/models_registry.py +593 -0
- tunacode/utils/text_utils.py +18 -1
- {tunacode_cli-0.0.70.dist-info → tunacode_cli-0.0.78.6.dist-info}/METADATA +80 -12
- {tunacode_cli-0.0.70.dist-info → tunacode_cli-0.0.78.6.dist-info}/RECORD +78 -74
- tunacode/cli/commands/implementations/plan.py +0 -50
- tunacode/cli/commands/implementations/todo.py +0 -217
- tunacode/context.py +0 -71
- tunacode/core/setup/git_safety_setup.py +0 -186
- tunacode/prompts/system.md +0 -359
- tunacode/prompts/system.md.bak +0 -487
- tunacode/tools/exit_plan_mode.py +0 -273
- tunacode/tools/present_plan.py +0 -288
- tunacode/tools/prompts/exit_plan_mode_prompt.xml +0 -25
- tunacode/tools/prompts/present_plan_prompt.xml +0 -20
- tunacode/tools/prompts/todo_prompt.xml +0 -96
- tunacode/tools/todo.py +0 -456
- {tunacode_cli-0.0.70.dist-info → tunacode_cli-0.0.78.6.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.70.dist-info → tunacode_cli-0.0.78.6.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.70.dist-info → tunacode_cli-0.0.78.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,538 @@
|
|
|
1
|
+
"""Module: tunacode.core.agents.main
|
|
2
|
+
|
|
3
|
+
Main agent functionality and coordination for the TunaCode CLI.
|
|
4
|
+
Handles agent creation, configuration, and request processing.
|
|
5
|
+
|
|
6
|
+
CLAUDE_ANCHOR[main-agent-module]: Primary agent orchestration and lifecycle management
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import uuid
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional
|
|
14
|
+
|
|
15
|
+
from pydantic_ai import Agent
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from pydantic_ai import Tool # noqa: F401
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
from tunacode.core.logging.logger import get_logger
|
|
22
|
+
from tunacode.core.state import StateManager
|
|
23
|
+
from tunacode.exceptions import ToolBatchingJSONError, UserAbortError
|
|
24
|
+
from tunacode.services.mcp import ( # re-exported by design
|
|
25
|
+
cleanup_mcp_servers,
|
|
26
|
+
get_mcp_servers,
|
|
27
|
+
register_mcp_agent,
|
|
28
|
+
)
|
|
29
|
+
from tunacode.tools.react import ReactTool
|
|
30
|
+
from tunacode.types import (
|
|
31
|
+
AgentRun,
|
|
32
|
+
ModelName,
|
|
33
|
+
ToolCallback,
|
|
34
|
+
UsageTrackerProtocol,
|
|
35
|
+
)
|
|
36
|
+
from tunacode.ui import console as ui
|
|
37
|
+
from tunacode.ui.tool_descriptions import get_batch_description
|
|
38
|
+
|
|
39
|
+
from . import agent_components as ac
|
|
40
|
+
|
|
41
|
+
logger = get_logger(__name__)
|
|
42
|
+
|
|
43
|
+
__all__ = [
|
|
44
|
+
"process_request",
|
|
45
|
+
"get_mcp_servers",
|
|
46
|
+
"cleanup_mcp_servers",
|
|
47
|
+
"register_mcp_agent",
|
|
48
|
+
"get_agent_tool",
|
|
49
|
+
"check_query_satisfaction",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
DEFAULT_MAX_ITERATIONS = 15
|
|
53
|
+
UNPRODUCTIVE_LIMIT = 3
|
|
54
|
+
DEBUG_METRICS_DEFAULT = False
|
|
55
|
+
FORCED_REACT_INTERVAL = 2
|
|
56
|
+
FORCED_REACT_LIMIT = 5
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@dataclass(slots=True)
|
|
60
|
+
class RequestContext:
|
|
61
|
+
request_id: str
|
|
62
|
+
max_iterations: int
|
|
63
|
+
debug_metrics: bool
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class StateFacade:
|
|
67
|
+
"""wrapper to centralize session mutations and reads."""
|
|
68
|
+
|
|
69
|
+
def __init__(self, state_manager: StateManager) -> None:
|
|
70
|
+
self.sm = state_manager
|
|
71
|
+
|
|
72
|
+
def get_setting(self, dotted: str, default: Any) -> Any:
|
|
73
|
+
cfg: Dict[str, Any] = getattr(self.sm.session, "user_config", {}) or {}
|
|
74
|
+
node = cfg
|
|
75
|
+
for key in dotted.split("."):
|
|
76
|
+
if not isinstance(node, dict) or key not in node:
|
|
77
|
+
return default
|
|
78
|
+
node = node[key]
|
|
79
|
+
return node
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def show_thoughts(self) -> bool:
|
|
83
|
+
return bool(getattr(self.sm.session, "show_thoughts", False))
|
|
84
|
+
|
|
85
|
+
@property
|
|
86
|
+
def messages(self) -> list:
|
|
87
|
+
return list(getattr(self.sm.session, "messages", []))
|
|
88
|
+
|
|
89
|
+
def set_request_id(self, req_id: str) -> None:
|
|
90
|
+
try:
|
|
91
|
+
self.sm.session.request_id = req_id
|
|
92
|
+
except AttributeError:
|
|
93
|
+
logger.warning("Session missing 'request_id' attribute; unable to set (req=%s)", req_id)
|
|
94
|
+
|
|
95
|
+
def reset_for_new_request(self) -> None:
|
|
96
|
+
"""Reset/initialize fields needed for a new run."""
|
|
97
|
+
# Keep all assignments here to avoid scattered mutations across the codebase.
|
|
98
|
+
setattr(self.sm.session, "current_iteration", 0)
|
|
99
|
+
setattr(self.sm.session, "iteration_count", 0)
|
|
100
|
+
setattr(self.sm.session, "tool_calls", [])
|
|
101
|
+
setattr(self.sm.session, "react_forced_calls", 0)
|
|
102
|
+
setattr(self.sm.session, "react_guidance", [])
|
|
103
|
+
# Counter used by other subsystems; initialize if absent
|
|
104
|
+
if not hasattr(self.sm.session, "batch_counter"):
|
|
105
|
+
setattr(self.sm.session, "batch_counter", 0)
|
|
106
|
+
# Track empty response streaks
|
|
107
|
+
setattr(self.sm.session, "consecutive_empty_responses", 0)
|
|
108
|
+
# Always reset original query so subsequent requests don't leak prompts
|
|
109
|
+
setattr(self.sm.session, "original_query", "")
|
|
110
|
+
|
|
111
|
+
def set_original_query_once(self, q: str) -> None:
|
|
112
|
+
if not getattr(self.sm.session, "original_query", None):
|
|
113
|
+
setattr(self.sm.session, "original_query", q)
|
|
114
|
+
|
|
115
|
+
def set_iteration(self, i: int) -> None:
|
|
116
|
+
setattr(self.sm.session, "current_iteration", i)
|
|
117
|
+
setattr(self.sm.session, "iteration_count", i)
|
|
118
|
+
|
|
119
|
+
def increment_empty_response(self) -> int:
|
|
120
|
+
v = int(getattr(self.sm.session, "consecutive_empty_responses", 0)) + 1
|
|
121
|
+
setattr(self.sm.session, "consecutive_empty_responses", v)
|
|
122
|
+
return v
|
|
123
|
+
|
|
124
|
+
def clear_empty_response(self) -> None:
|
|
125
|
+
setattr(self.sm.session, "consecutive_empty_responses", 0)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _init_context(state: StateFacade) -> RequestContext:
|
|
129
|
+
req_id = str(uuid.uuid4())[:8]
|
|
130
|
+
state.set_request_id(req_id)
|
|
131
|
+
|
|
132
|
+
max_iters = int(state.get_setting("settings.max_iterations", DEFAULT_MAX_ITERATIONS))
|
|
133
|
+
debug_metrics = bool(state.get_setting("settings.debug_metrics", DEBUG_METRICS_DEFAULT))
|
|
134
|
+
|
|
135
|
+
return RequestContext(
|
|
136
|
+
request_id=req_id,
|
|
137
|
+
max_iterations=max_iters,
|
|
138
|
+
debug_metrics=debug_metrics,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _prepare_message_history(state: StateFacade) -> list:
|
|
143
|
+
return state.messages
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
async def _maybe_stream_node_tokens(
|
|
147
|
+
node: Any,
|
|
148
|
+
agent_run_ctx: Any,
|
|
149
|
+
state_manager: StateManager,
|
|
150
|
+
streaming_cb: Optional[Callable[[str], Awaitable[None]]],
|
|
151
|
+
request_id: str,
|
|
152
|
+
iteration_index: int,
|
|
153
|
+
) -> None:
|
|
154
|
+
if not streaming_cb:
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
# Delegate to component streaming helper (already optimized)
|
|
158
|
+
if Agent.is_model_request_node(node): # type: ignore[attr-defined]
|
|
159
|
+
await ac.stream_model_request_node(
|
|
160
|
+
node, agent_run_ctx, state_manager, streaming_cb, request_id, iteration_index
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _iteration_had_tool_use(node: Any) -> bool:
|
|
165
|
+
"""Inspect the node to see if model responded with any tool-call parts."""
|
|
166
|
+
if hasattr(node, "model_response"):
|
|
167
|
+
for part in getattr(node.model_response, "parts", []):
|
|
168
|
+
# pydantic-ai annotates tool calls; be resilient to attr differences
|
|
169
|
+
if getattr(part, "part_kind", None) == "tool-call":
|
|
170
|
+
return True
|
|
171
|
+
return False
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
async def _maybe_force_react_snapshot(
|
|
175
|
+
iteration: int,
|
|
176
|
+
state_manager: StateManager,
|
|
177
|
+
react_tool: ReactTool,
|
|
178
|
+
show_debug: bool,
|
|
179
|
+
agent_run_ctx: Any | None = None,
|
|
180
|
+
) -> None:
|
|
181
|
+
"""CLAUDE_ANCHOR[react-forced-call]: Auto-log reasoning every two turns."""
|
|
182
|
+
|
|
183
|
+
if iteration < FORCED_REACT_INTERVAL or iteration % FORCED_REACT_INTERVAL != 0:
|
|
184
|
+
return
|
|
185
|
+
|
|
186
|
+
forced_calls = getattr(state_manager.session, "react_forced_calls", 0)
|
|
187
|
+
if forced_calls >= FORCED_REACT_LIMIT:
|
|
188
|
+
return
|
|
189
|
+
|
|
190
|
+
try:
|
|
191
|
+
await react_tool.execute(
|
|
192
|
+
action="think",
|
|
193
|
+
thoughts=f"Auto snapshot after iteration {iteration}",
|
|
194
|
+
next_action="continue",
|
|
195
|
+
)
|
|
196
|
+
state_manager.session.react_forced_calls = forced_calls + 1
|
|
197
|
+
timeline = state_manager.session.react_scratchpad.get("timeline", [])
|
|
198
|
+
latest = timeline[-1] if timeline else {"thoughts": "?", "next_action": "?"}
|
|
199
|
+
summary = latest.get("thoughts", "")
|
|
200
|
+
tool_calls = getattr(state_manager.session, "tool_calls", [])
|
|
201
|
+
if tool_calls:
|
|
202
|
+
last_tool = tool_calls[-1]
|
|
203
|
+
tool_name = last_tool.get("tool", "tool")
|
|
204
|
+
args = last_tool.get("args", {})
|
|
205
|
+
if isinstance(args, str):
|
|
206
|
+
try:
|
|
207
|
+
import json
|
|
208
|
+
|
|
209
|
+
args = json.loads(args)
|
|
210
|
+
except (ValueError, TypeError):
|
|
211
|
+
args = {}
|
|
212
|
+
detail = ""
|
|
213
|
+
if tool_name == "grep" and isinstance(args, dict):
|
|
214
|
+
pattern = args.get("pattern")
|
|
215
|
+
detail = (
|
|
216
|
+
f"Review grep results for pattern '{pattern}'"
|
|
217
|
+
if pattern
|
|
218
|
+
else "Review grep results"
|
|
219
|
+
)
|
|
220
|
+
elif tool_name == "read_file" and isinstance(args, dict):
|
|
221
|
+
path = args.get("filepath") or args.get("file_path")
|
|
222
|
+
detail = f"Extract key notes from {path}" if path else "Summarize read_file output"
|
|
223
|
+
else:
|
|
224
|
+
detail = f"Act on {tool_name} findings"
|
|
225
|
+
else:
|
|
226
|
+
detail = "Plan your first lookup"
|
|
227
|
+
guidance_entry = (
|
|
228
|
+
f"React snapshot {forced_calls + 1}/{FORCED_REACT_LIMIT} at iteration {iteration}:"
|
|
229
|
+
f" {summary}. Next: {detail}"
|
|
230
|
+
)
|
|
231
|
+
state_manager.session.react_guidance.append(guidance_entry)
|
|
232
|
+
if len(state_manager.session.react_guidance) > FORCED_REACT_LIMIT:
|
|
233
|
+
state_manager.session.react_guidance = state_manager.session.react_guidance[
|
|
234
|
+
-FORCED_REACT_LIMIT:
|
|
235
|
+
]
|
|
236
|
+
|
|
237
|
+
if agent_run_ctx is not None:
|
|
238
|
+
ctx_messages = getattr(agent_run_ctx, "messages", None)
|
|
239
|
+
if isinstance(ctx_messages, list):
|
|
240
|
+
ModelRequest, _, SystemPromptPart = ac.get_model_messages()
|
|
241
|
+
system_part = SystemPromptPart(
|
|
242
|
+
content=f"[React Guidance] {guidance_entry}",
|
|
243
|
+
part_kind="system-prompt",
|
|
244
|
+
)
|
|
245
|
+
# CLAUDE_ANCHOR[react-system-injection]
|
|
246
|
+
# Append synthetic system message so LLM receives react guidance next turn
|
|
247
|
+
# This mutates the active run context so the very next model prompt
|
|
248
|
+
# includes the guidance
|
|
249
|
+
ctx_messages.append(ModelRequest(parts=[system_part], kind="request"))
|
|
250
|
+
|
|
251
|
+
if show_debug:
|
|
252
|
+
await ui.muted("\n[react → LLM] BEGIN\n" + guidance_entry + "\n[react → LLM] END\n")
|
|
253
|
+
except Exception:
|
|
254
|
+
logger.debug("Forced react snapshot failed", exc_info=True)
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
async def _force_action_if_unproductive(
|
|
258
|
+
message: str,
|
|
259
|
+
unproductive_count: int,
|
|
260
|
+
last_productive: int,
|
|
261
|
+
i: int,
|
|
262
|
+
max_iterations: int,
|
|
263
|
+
state: StateFacade,
|
|
264
|
+
) -> None:
|
|
265
|
+
no_progress_content = (
|
|
266
|
+
f"ALERT: No tools executed for {unproductive_count} iterations.\n\n"
|
|
267
|
+
f"Last productive iteration: {last_productive}\n"
|
|
268
|
+
f"Current iteration: {i}/{max_iterations}\n"
|
|
269
|
+
f"Task: {message[:200]}...\n\n"
|
|
270
|
+
"You're describing actions but not executing them. You MUST:\n\n"
|
|
271
|
+
"1. If task is COMPLETE: Start response with TUNACODE DONE:\n"
|
|
272
|
+
"2. If task needs work: Execute a tool RIGHT NOW (grep, read_file, bash, etc.)\n"
|
|
273
|
+
"3. If stuck: Explain the specific blocker\n\n"
|
|
274
|
+
"NO MORE DESCRIPTIONS. Take ACTION or mark COMPLETE."
|
|
275
|
+
)
|
|
276
|
+
ac.create_user_message(no_progress_content, state.sm)
|
|
277
|
+
if state.show_thoughts:
|
|
278
|
+
await ui.warning(f"NO PROGRESS: {unproductive_count} iterations without tool usage")
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
async def _ask_for_clarification(i: int, state: StateFacade) -> None:
|
|
282
|
+
_, tools_used_str = ac.create_progress_summary(getattr(state.sm.session, "tool_calls", []))
|
|
283
|
+
|
|
284
|
+
clarification_content = (
|
|
285
|
+
"I need clarification to continue.\n\n"
|
|
286
|
+
f"Original request: {getattr(state.sm.session, 'original_query', 'your request')}\n\n"
|
|
287
|
+
"Progress so far:\n"
|
|
288
|
+
f"- Iterations: {i}\n"
|
|
289
|
+
f"- Tools used: {tools_used_str}\n\n"
|
|
290
|
+
"If the task is complete, I should respond with TUNACODE DONE:\n"
|
|
291
|
+
"Otherwise, please provide specific guidance on what to do next."
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
ac.create_user_message(clarification_content, state.sm)
|
|
295
|
+
if state.show_thoughts:
|
|
296
|
+
await ui.muted("\nSEEKING CLARIFICATION: Asking user for guidance on task progress")
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
async def _finalize_buffered_tasks(
|
|
300
|
+
tool_buffer: ac.ToolBuffer,
|
|
301
|
+
tool_callback: Optional[ToolCallback],
|
|
302
|
+
state: StateFacade,
|
|
303
|
+
) -> None:
|
|
304
|
+
if not tool_callback or not tool_buffer.has_tasks():
|
|
305
|
+
return
|
|
306
|
+
|
|
307
|
+
buffered_tasks = tool_buffer.flush()
|
|
308
|
+
|
|
309
|
+
# Cosmetic UI around batch (kept but isolated here)
|
|
310
|
+
try:
|
|
311
|
+
tool_names = [part.tool_name for part, _ in buffered_tasks]
|
|
312
|
+
batch_msg = get_batch_description(len(buffered_tasks), tool_names)
|
|
313
|
+
await ui.update_spinner_message(f"[bold #00d7ff]{batch_msg}...[/bold #00d7ff]", state.sm)
|
|
314
|
+
|
|
315
|
+
# Build batch content as markdown for Rich panel
|
|
316
|
+
batch_content = (
|
|
317
|
+
f"**FINAL BATCH**: Executing {len(buffered_tasks)} buffered read-only tools\n\n"
|
|
318
|
+
)
|
|
319
|
+
for idx, (part, _node) in enumerate(buffered_tasks, 1):
|
|
320
|
+
tool_desc = f" **[{idx}]** `{getattr(part, 'tool_name', 'tool')}`"
|
|
321
|
+
args = getattr(part, "args", {})
|
|
322
|
+
if isinstance(args, dict):
|
|
323
|
+
if part.tool_name == "read_file" and "file_path" in args:
|
|
324
|
+
tool_desc += f" → `{args['file_path']}`"
|
|
325
|
+
elif part.tool_name == "grep" and "pattern" in args:
|
|
326
|
+
tool_desc += f" → pattern: `{args['pattern']}`"
|
|
327
|
+
if "include_files" in args:
|
|
328
|
+
tool_desc += f", files: `{args['include_files']}`"
|
|
329
|
+
elif part.tool_name == "list_dir" and "directory" in args:
|
|
330
|
+
tool_desc += f" → `{args['directory']}`"
|
|
331
|
+
elif part.tool_name == "glob" and "pattern" in args:
|
|
332
|
+
tool_desc += f" → pattern: `{args['pattern']}`"
|
|
333
|
+
batch_content += f"{tool_desc}\n"
|
|
334
|
+
except Exception:
|
|
335
|
+
# UI is best-effort; never fail request because of display
|
|
336
|
+
logger.debug("UI batch prelude failed (non-fatal)", exc_info=True)
|
|
337
|
+
batch_content = None
|
|
338
|
+
|
|
339
|
+
# Execute
|
|
340
|
+
await ac.execute_tools_parallel(buffered_tasks, tool_callback)
|
|
341
|
+
|
|
342
|
+
# Post metrics and display (best-effort)
|
|
343
|
+
try:
|
|
344
|
+
if batch_content:
|
|
345
|
+
await ui.batch(batch_content)
|
|
346
|
+
|
|
347
|
+
from tunacode.constants import UI_THINKING_MESSAGE # local import OK (rare path)
|
|
348
|
+
|
|
349
|
+
await ui.update_spinner_message(UI_THINKING_MESSAGE, state.sm)
|
|
350
|
+
except Exception:
|
|
351
|
+
logger.debug("UI batch epilogue failed (non-fatal)", exc_info=True)
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def get_agent_tool() -> tuple[type[Agent], type["Tool"]]:
|
|
355
|
+
"""Return Agent and Tool classes without importing at module load time."""
|
|
356
|
+
from pydantic_ai import Agent as AgentCls
|
|
357
|
+
from pydantic_ai import Tool as ToolCls
|
|
358
|
+
|
|
359
|
+
return AgentCls, ToolCls
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
async def check_query_satisfaction(
|
|
363
|
+
agent: Agent,
|
|
364
|
+
original_query: str,
|
|
365
|
+
response: str,
|
|
366
|
+
state_manager: StateManager,
|
|
367
|
+
) -> bool:
|
|
368
|
+
"""Legacy hook for compatibility; completion still signaled via DONE marker."""
|
|
369
|
+
return True
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
async def process_request(
|
|
373
|
+
message: str,
|
|
374
|
+
model: ModelName,
|
|
375
|
+
state_manager: StateManager,
|
|
376
|
+
tool_callback: Optional[ToolCallback] = None,
|
|
377
|
+
streaming_callback: Optional[Callable[[str], Awaitable[None]]] = None,
|
|
378
|
+
usage_tracker: Optional[
|
|
379
|
+
UsageTrackerProtocol
|
|
380
|
+
] = None, # currently passed through to _process_node
|
|
381
|
+
) -> AgentRun:
|
|
382
|
+
"""
|
|
383
|
+
Process a single request to the agent.
|
|
384
|
+
|
|
385
|
+
CLAUDE_ANCHOR[process-request-entry]: Main entry point for all agent requests
|
|
386
|
+
"""
|
|
387
|
+
state = StateFacade(state_manager)
|
|
388
|
+
ctx = _init_context(state)
|
|
389
|
+
state.reset_for_new_request()
|
|
390
|
+
state.set_original_query_once(message)
|
|
391
|
+
|
|
392
|
+
# Acquire agent (no local caching here; rely on upstream policies)
|
|
393
|
+
agent = ac.get_or_create_agent(model, state_manager)
|
|
394
|
+
|
|
395
|
+
# Prepare history snapshot
|
|
396
|
+
message_history = _prepare_message_history(state)
|
|
397
|
+
|
|
398
|
+
# Per-request trackers
|
|
399
|
+
tool_buffer = ac.ToolBuffer()
|
|
400
|
+
response_state = ac.ResponseState()
|
|
401
|
+
unproductive_iterations = 0
|
|
402
|
+
last_productive_iteration = 0
|
|
403
|
+
react_tool = ReactTool(state_manager=state_manager)
|
|
404
|
+
|
|
405
|
+
try:
|
|
406
|
+
async with agent.iter(message, message_history=message_history) as agent_run:
|
|
407
|
+
i = 1
|
|
408
|
+
async for node in agent_run:
|
|
409
|
+
state.set_iteration(i)
|
|
410
|
+
|
|
411
|
+
# Optional token streaming
|
|
412
|
+
await _maybe_stream_node_tokens(
|
|
413
|
+
node, agent_run.ctx, state_manager, streaming_callback, ctx.request_id, i
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
# Core node processing (delegated to components)
|
|
417
|
+
empty_response, empty_reason = await ac._process_node( # noqa: SLF001 (private but stable in repo)
|
|
418
|
+
node,
|
|
419
|
+
tool_callback,
|
|
420
|
+
state_manager,
|
|
421
|
+
tool_buffer,
|
|
422
|
+
streaming_callback,
|
|
423
|
+
usage_tracker,
|
|
424
|
+
response_state,
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
# Handle empty response (aggressive retry prompt)
|
|
428
|
+
if empty_response:
|
|
429
|
+
if state.increment_empty_response() >= 1:
|
|
430
|
+
await ac.handle_empty_response(message, empty_reason, i, state)
|
|
431
|
+
state.clear_empty_response()
|
|
432
|
+
else:
|
|
433
|
+
state.clear_empty_response()
|
|
434
|
+
# Track whether we produced visible user output this iteration
|
|
435
|
+
if getattr(getattr(node, "result", None), "output", None):
|
|
436
|
+
response_state.has_user_response = True
|
|
437
|
+
|
|
438
|
+
# Productivity tracking (tool usage signal)
|
|
439
|
+
if _iteration_had_tool_use(node):
|
|
440
|
+
unproductive_iterations = 0
|
|
441
|
+
last_productive_iteration = i
|
|
442
|
+
else:
|
|
443
|
+
unproductive_iterations += 1
|
|
444
|
+
# Force action if no tool usage for several iterations
|
|
445
|
+
if (
|
|
446
|
+
unproductive_iterations >= UNPRODUCTIVE_LIMIT
|
|
447
|
+
and not response_state.task_completed
|
|
448
|
+
):
|
|
449
|
+
await _force_action_if_unproductive(
|
|
450
|
+
message,
|
|
451
|
+
unproductive_iterations,
|
|
452
|
+
last_productive_iteration,
|
|
453
|
+
i,
|
|
454
|
+
ctx.max_iterations,
|
|
455
|
+
state,
|
|
456
|
+
)
|
|
457
|
+
unproductive_iterations = 0 # reset after nudge
|
|
458
|
+
await _maybe_force_react_snapshot(
|
|
459
|
+
i,
|
|
460
|
+
state_manager,
|
|
461
|
+
react_tool,
|
|
462
|
+
state.show_thoughts,
|
|
463
|
+
agent_run.ctx,
|
|
464
|
+
)
|
|
465
|
+
# Optional debug progress
|
|
466
|
+
if state.show_thoughts:
|
|
467
|
+
await ui.muted(
|
|
468
|
+
f"\nITERATION: {i}/{ctx.max_iterations} (Request ID: {ctx.request_id})"
|
|
469
|
+
)
|
|
470
|
+
tool_summary = ac.get_tool_summary(getattr(state.sm.session, "tool_calls", []))
|
|
471
|
+
if tool_summary:
|
|
472
|
+
summary_str = ", ".join(
|
|
473
|
+
f"{name}: {count}" for name, count in tool_summary.items()
|
|
474
|
+
)
|
|
475
|
+
await ui.muted(f"TOOLS USED: {summary_str}")
|
|
476
|
+
# Ask for clarification if agent requested it
|
|
477
|
+
if response_state.awaiting_user_guidance:
|
|
478
|
+
await _ask_for_clarification(i, state)
|
|
479
|
+
# Keep the flag set; downstream logic can react to new user input
|
|
480
|
+
|
|
481
|
+
# Early completion
|
|
482
|
+
if response_state.task_completed:
|
|
483
|
+
if state.show_thoughts:
|
|
484
|
+
await ui.success("Task completed successfully")
|
|
485
|
+
break
|
|
486
|
+
|
|
487
|
+
# Reaching iteration cap → ask what to do next (no auto-extend by default)
|
|
488
|
+
if i >= ctx.max_iterations and not response_state.task_completed:
|
|
489
|
+
_, tools_str = ac.create_progress_summary(
|
|
490
|
+
getattr(state.sm.session, "tool_calls", [])
|
|
491
|
+
)
|
|
492
|
+
if tools_str == "No tools used yet":
|
|
493
|
+
tools_str = "No tools used"
|
|
494
|
+
|
|
495
|
+
extend_content = (
|
|
496
|
+
f"I've reached the iteration limit ({ctx.max_iterations}).\n\n"
|
|
497
|
+
"Progress summary:\n"
|
|
498
|
+
f"- Tools used: {tools_str}\n"
|
|
499
|
+
f"- Iterations completed: {i}\n\n"
|
|
500
|
+
"Plese add more context to the task."
|
|
501
|
+
)
|
|
502
|
+
ac.create_user_message(extend_content, state.sm)
|
|
503
|
+
if state.show_thoughts:
|
|
504
|
+
await ui.muted(
|
|
505
|
+
f"\nITERATION LIMIT: Awaiting user guidance at "
|
|
506
|
+
f"{ctx.max_iterations} iterations"
|
|
507
|
+
)
|
|
508
|
+
response_state.awaiting_user_guidance = True
|
|
509
|
+
# Do not auto-increase max_iterations here (avoid infinite loops)
|
|
510
|
+
i += 1
|
|
511
|
+
|
|
512
|
+
await _finalize_buffered_tasks(tool_buffer, tool_callback, state)
|
|
513
|
+
|
|
514
|
+
# Normal path: return a wrapper that carries response_state
|
|
515
|
+
return ac.AgentRunWithState(agent_run, response_state)
|
|
516
|
+
|
|
517
|
+
except UserAbortError:
|
|
518
|
+
raise
|
|
519
|
+
except ToolBatchingJSONError as e:
|
|
520
|
+
logger.error("Tool batching JSON error [req=%s]: %s", ctx.request_id, e, exc_info=True)
|
|
521
|
+
ac.patch_tool_messages(
|
|
522
|
+
f"Tool batching failed: {str(e)[:100]}...", state_manager=state_manager
|
|
523
|
+
)
|
|
524
|
+
raise
|
|
525
|
+
except Exception as e:
|
|
526
|
+
# Attach request/iteration context for observability
|
|
527
|
+
safe_iter = getattr(state_manager.session, "current_iteration", "?")
|
|
528
|
+
logger.error(
|
|
529
|
+
"Error in process_request [req=%s iter=%s]: %s",
|
|
530
|
+
ctx.request_id,
|
|
531
|
+
safe_iter,
|
|
532
|
+
e,
|
|
533
|
+
exc_info=True,
|
|
534
|
+
)
|
|
535
|
+
ac.patch_tool_messages(
|
|
536
|
+
f"Request processing failed: {str(e)[:100]}...", state_manager=state_manager
|
|
537
|
+
)
|
|
538
|
+
raise
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""Prompt templates for agent intervention mechanisms.
|
|
2
|
+
|
|
3
|
+
Extracted from main.py to centralize all prompt strings and formatting logic.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def format_no_progress(
|
|
8
|
+
message: str,
|
|
9
|
+
unproductive_count: int,
|
|
10
|
+
last_productive: int,
|
|
11
|
+
current: int,
|
|
12
|
+
max_iterations: int,
|
|
13
|
+
) -> str:
|
|
14
|
+
"""Format the no-progress alert message.
|
|
15
|
+
|
|
16
|
+
Reference: main.py _force_action_if_unproductive() lines 265-275
|
|
17
|
+
"""
|
|
18
|
+
return (
|
|
19
|
+
f"ALERT: No tools executed for {unproductive_count} iterations.\n\n"
|
|
20
|
+
f"Last productive iteration: {last_productive}\n"
|
|
21
|
+
f"Current iteration: {current}/{max_iterations}\n"
|
|
22
|
+
f"Task: {message[:200]}...\n\n"
|
|
23
|
+
"You're describing actions but not executing them. You MUST:\n\n"
|
|
24
|
+
"1. If task is COMPLETE: Start response with TUNACODE DONE:\n"
|
|
25
|
+
"2. If task needs work: Execute a tool RIGHT NOW (grep, read_file, bash, etc.)\n"
|
|
26
|
+
"3. If stuck: Explain the specific blocker\n\n"
|
|
27
|
+
"NO MORE DESCRIPTIONS. Take ACTION or mark COMPLETE."
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def format_clarification(original_query: str, iteration: int, tools_used: str) -> str:
|
|
32
|
+
"""Format the clarification request message.
|
|
33
|
+
|
|
34
|
+
Reference: main.py _ask_for_clarification() lines 284-292
|
|
35
|
+
"""
|
|
36
|
+
return (
|
|
37
|
+
"I need clarification to continue.\n\n"
|
|
38
|
+
f"Original request: {original_query}\n\n"
|
|
39
|
+
"Progress so far:\n"
|
|
40
|
+
f"- Iterations: {iteration}\n"
|
|
41
|
+
f"- Tools used: {tools_used}\n\n"
|
|
42
|
+
"If the task is complete, I should respond with TUNACODE DONE:\n"
|
|
43
|
+
"Otherwise, please provide specific guidance on what to do next."
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def format_iteration_limit(max_iterations: int, iteration: int, tools_used: str) -> str:
|
|
48
|
+
"""Format the iteration limit reached message.
|
|
49
|
+
|
|
50
|
+
Reference: main.py process_request() lines 495-501
|
|
51
|
+
"""
|
|
52
|
+
if tools_used == "No tools used yet":
|
|
53
|
+
tools_used = "No tools used"
|
|
54
|
+
|
|
55
|
+
return (
|
|
56
|
+
f"I've reached the iteration limit ({max_iterations}).\n\n"
|
|
57
|
+
"Progress summary:\n"
|
|
58
|
+
f"- Tools used: {tools_used}\n"
|
|
59
|
+
f"- Iterations completed: {iteration}\n\n"
|
|
60
|
+
"Please add more context to the task."
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
# Note: Empty response handling is delegated to agent_components.handle_empty_response()
|
|
65
|
+
# which uses create_empty_response_message() from agent_helpers.py
|
|
66
|
+
# No template needed here as it's already modularized.
|