tunacode-cli 0.0.76.1__py3-none-any.whl → 0.0.76.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

@@ -6,17 +6,25 @@ Handles agent creation, configuration, and request processing.
6
6
  CLAUDE_ANCHOR[main-agent-module]: Primary agent orchestration and lifecycle management
7
7
  """
8
8
 
9
- from typing import TYPE_CHECKING, Awaitable, Callable, Optional
9
+ from __future__ import annotations
10
+
11
+ import time
12
+ import uuid
13
+ from dataclasses import dataclass
14
+ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional
10
15
 
11
16
  from pydantic_ai import Agent
12
17
 
13
18
  if TYPE_CHECKING:
14
19
  from pydantic_ai import Tool # noqa: F401
15
20
 
21
+ from tunacode.core.agents.agent_components import ResponseState, ToolBuffer
22
+
16
23
  from tunacode.core.logging.logger import get_logger
17
24
  from tunacode.core.state import StateManager
18
25
  from tunacode.exceptions import ToolBatchingJSONError, UserAbortError
19
- from tunacode.services.mcp import get_mcp_servers
26
+ from tunacode.services.mcp import get_mcp_servers # re-exported by design
27
+ from tunacode.tools.react import ReactTool
20
28
  from tunacode.types import (
21
29
  AgentRun,
22
30
  ModelName,
@@ -25,70 +33,427 @@ from tunacode.types import (
25
33
  )
26
34
  from tunacode.ui.tool_descriptions import get_batch_description
27
35
 
28
- # Import agent components
29
- from .agent_components import (
30
- AgentRunWithState,
31
- AgentRunWrapper,
32
- ResponseState,
33
- SimpleResult,
34
- ToolBuffer,
35
- _process_node,
36
- check_task_completion,
37
- create_empty_response_message,
38
- create_fallback_response,
39
- create_progress_summary,
40
- create_user_message,
41
- execute_tools_parallel,
42
- extract_and_execute_tool_calls,
43
- format_fallback_output,
44
- get_model_messages,
45
- get_or_create_agent,
46
- get_recent_tools_context,
47
- get_tool_summary,
48
- parse_json_tool_calls,
49
- patch_tool_messages,
50
- )
51
- from .agent_components.streaming import stream_model_request_node
36
+ # Optional UI console (avoid nested imports in hot paths)
37
+ try:
38
+ from tunacode.ui import console as ui # rich-style helpers with async methods
39
+ except Exception: # pragma: no cover - UI is optional
52
40
 
53
- # Import streaming types with fallback for older versions
41
+ class _NoopUI: # minimal no-op shim
42
+ async def muted(self, *_: Any, **__: Any) -> None: ...
43
+ async def warning(self, *_: Any, **__: Any) -> None: ...
44
+ async def success(self, *_: Any, **__: Any) -> None: ...
45
+ async def update_spinner_message(self, *_: Any, **__: Any) -> None: ...
46
+
47
+ ui = _NoopUI() # type: ignore
48
+
49
+ # Streaming parts (keep guarded import but avoid per-iteration imports)
54
50
  try:
55
- from pydantic_ai.messages import PartDeltaEvent, TextPartDelta
51
+ from pydantic_ai.messages import PartDeltaEvent, TextPartDelta # type: ignore
56
52
 
57
53
  STREAMING_AVAILABLE = True
58
- except ImportError:
59
- PartDeltaEvent = None
60
- TextPartDelta = None
54
+ except Exception: # pragma: no cover
55
+ PartDeltaEvent = None # type: ignore
56
+ TextPartDelta = None # type: ignore
61
57
  STREAMING_AVAILABLE = False
62
58
 
59
+ # Agent components (flattned to a single module import to reduce coupling)
60
+ from . import agent_components as ac
61
+
63
62
  # Configure logging
64
63
  logger = get_logger(__name__)
65
64
 
65
+
66
+ # -----------------------
67
+ # Module exports
68
+ # -----------------------
66
69
  __all__ = [
67
- "ToolBuffer",
68
- "check_task_completion",
69
- "extract_and_execute_tool_calls",
70
- "get_model_messages",
71
- "parse_json_tool_calls",
72
- "patch_tool_messages",
73
- "get_mcp_servers",
74
- "check_query_satisfaction",
75
70
  "process_request",
76
- "get_or_create_agent",
77
- "_process_node",
78
- "ResponseState",
79
- "SimpleResult",
80
- "AgentRunWrapper",
81
- "AgentRunWithState",
82
- "execute_tools_parallel",
71
+ "get_mcp_servers",
83
72
  "get_agent_tool",
73
+ "check_query_satisfaction",
84
74
  ]
85
75
 
76
+ # -----------------------
77
+ # Constants & Defaults
78
+ # -----------------------
79
+ DEFAULT_MAX_ITERATIONS = 15
80
+ UNPRODUCTIVE_LIMIT = 3 # iterations without tool use before forcing action
81
+ FALLBACK_VERBOSITY_DEFAULT = "normal"
82
+ DEBUG_METRICS_DEFAULT = False
83
+ FORCED_REACT_INTERVAL = 2
84
+ FORCED_REACT_LIMIT = 5
85
+
86
+
87
+ # -----------------------
88
+ # Data structures
89
+ # -----------------------
90
+ @dataclass(slots=True)
91
+ class RequestContext:
92
+ request_id: str
93
+ max_iterations: int
94
+ debug_metrics: bool
95
+ fallback_enabled: bool
96
+
97
+
98
+ class StateFacade:
99
+ """Thin wrapper to centralize session mutations and reads."""
100
+
101
+ def __init__(self, state_manager: StateManager) -> None:
102
+ self.sm = state_manager
103
+
104
+ # ---- safe getters ----
105
+ def get_setting(self, dotted: str, default: Any) -> Any:
106
+ cfg: Dict[str, Any] = getattr(self.sm.session, "user_config", {}) or {}
107
+ node = cfg
108
+ for key in dotted.split("."):
109
+ if not isinstance(node, dict) or key not in node:
110
+ return default
111
+ node = node[key]
112
+ return node
113
+
114
+ @property
115
+ def show_thoughts(self) -> bool:
116
+ return bool(getattr(self.sm.session, "show_thoughts", False))
117
+
118
+ @property
119
+ def messages(self) -> list:
120
+ return list(getattr(self.sm.session, "messages", []))
121
+
122
+ # ---- safe setters ----
123
+ def set_request_id(self, req_id: str) -> None:
124
+ try:
125
+ self.sm.session.request_id = req_id
126
+ except AttributeError:
127
+ logger.warning("Session missing 'request_id' attribute; unable to set (req=%s)", req_id)
128
+
129
+ def reset_for_new_request(self) -> None:
130
+ """Reset/initialize fields needed for a new run."""
131
+ # Keep all assignments here to avoid scattered mutations across the codebase.
132
+ setattr(self.sm.session, "current_iteration", 0)
133
+ setattr(self.sm.session, "iteration_count", 0)
134
+ setattr(self.sm.session, "tool_calls", [])
135
+ setattr(self.sm.session, "react_forced_calls", 0)
136
+ setattr(self.sm.session, "react_guidance", [])
137
+ # Counter used by other subsystems; initialize if absent
138
+ if not hasattr(self.sm.session, "batch_counter"):
139
+ setattr(self.sm.session, "batch_counter", 0)
140
+ # Track empty response streaks
141
+ setattr(self.sm.session, "consecutive_empty_responses", 0)
142
+ # Always reset original query so subsequent requests don't leak prompts
143
+ setattr(self.sm.session, "original_query", "")
144
+
145
+ def set_original_query_once(self, q: str) -> None:
146
+ if not getattr(self.sm.session, "original_query", None):
147
+ setattr(self.sm.session, "original_query", q)
148
+
149
+ # ---- progress helpers ----
150
+ def set_iteration(self, i: int) -> None:
151
+ setattr(self.sm.session, "current_iteration", i)
152
+ setattr(self.sm.session, "iteration_count", i)
153
+
154
+ def increment_empty_response(self) -> int:
155
+ v = int(getattr(self.sm.session, "consecutive_empty_responses", 0)) + 1
156
+ setattr(self.sm.session, "consecutive_empty_responses", v)
157
+ return v
158
+
159
+ def clear_empty_response(self) -> None:
160
+ setattr(self.sm.session, "consecutive_empty_responses", 0)
161
+
162
+
163
+ # -----------------------
164
+ # Helper functions
165
+ # -----------------------
166
+ def _init_context(state: StateFacade, fallback_enabled: bool) -> RequestContext:
167
+ req_id = str(uuid.uuid4())[:8]
168
+ state.set_request_id(req_id)
169
+
170
+ max_iters = int(state.get_setting("settings.max_iterations", DEFAULT_MAX_ITERATIONS))
171
+ debug_metrics = bool(state.get_setting("settings.debug_metrics", DEBUG_METRICS_DEFAULT))
172
+
173
+ return RequestContext(
174
+ request_id=req_id,
175
+ max_iterations=max_iters,
176
+ debug_metrics=debug_metrics,
177
+ fallback_enabled=fallback_enabled,
178
+ )
179
+
180
+
181
+ def _prepare_message_history(state: StateFacade) -> list:
182
+ return state.messages
183
+
184
+
185
+ async def _maybe_stream_node_tokens(
186
+ node: Any,
187
+ agent_run_ctx: Any,
188
+ state_manager: StateManager,
189
+ streaming_cb: Optional[Callable[[str], Awaitable[None]]],
190
+ request_id: str,
191
+ iteration_index: int,
192
+ ) -> None:
193
+ if not streaming_cb or not STREAMING_AVAILABLE:
194
+ return
195
+
196
+ # Delegate to component streaming helper (already optimized)
197
+ if Agent.is_model_request_node(node): # type: ignore[attr-defined]
198
+ await ac.stream_model_request_node(
199
+ node, agent_run_ctx, state_manager, streaming_cb, request_id, iteration_index
200
+ )
201
+
202
+
203
+ def _iteration_had_tool_use(node: Any) -> bool:
204
+ """Inspect the node to see if model responded with any tool-call parts."""
205
+ if hasattr(node, "model_response"):
206
+ for part in getattr(node.model_response, "parts", []):
207
+ # pydantic-ai annotates tool calls; be resilient to attr differences
208
+ if getattr(part, "part_kind", None) == "tool-call":
209
+ return True
210
+ return False
211
+
212
+
213
+ async def _maybe_force_react_snapshot(
214
+ iteration: int,
215
+ state_manager: StateManager,
216
+ react_tool: ReactTool,
217
+ show_debug: bool,
218
+ agent_run_ctx: Any | None = None,
219
+ ) -> None:
220
+ """CLAUDE_ANCHOR[react-forced-call]: Auto-log reasoning every two turns."""
221
+
222
+ if iteration < FORCED_REACT_INTERVAL or iteration % FORCED_REACT_INTERVAL != 0:
223
+ return
224
+
225
+ forced_calls = getattr(state_manager.session, "react_forced_calls", 0)
226
+ if forced_calls >= FORCED_REACT_LIMIT:
227
+ return
228
+
229
+ try:
230
+ await react_tool.execute(
231
+ action="think",
232
+ thoughts=f"Auto snapshot after iteration {iteration}",
233
+ next_action="continue",
234
+ )
235
+ state_manager.session.react_forced_calls = forced_calls + 1
236
+ timeline = state_manager.session.react_scratchpad.get("timeline", [])
237
+ latest = timeline[-1] if timeline else {"thoughts": "?", "next_action": "?"}
238
+ summary = latest.get("thoughts", "")
239
+ tool_calls = getattr(state_manager.session, "tool_calls", [])
240
+ if tool_calls:
241
+ last_tool = tool_calls[-1]
242
+ tool_name = last_tool.get("tool", "tool")
243
+ args = last_tool.get("args", {})
244
+ if isinstance(args, str):
245
+ try:
246
+ import json
247
+
248
+ args = json.loads(args)
249
+ except (ValueError, TypeError):
250
+ args = {}
251
+ detail = ""
252
+ if tool_name == "grep" and isinstance(args, dict):
253
+ pattern = args.get("pattern")
254
+ detail = (
255
+ f"Review grep results for pattern '{pattern}'"
256
+ if pattern
257
+ else "Review grep results"
258
+ )
259
+ elif tool_name == "read_file" and isinstance(args, dict):
260
+ path = args.get("filepath") or args.get("file_path")
261
+ detail = f"Extract key notes from {path}" if path else "Summarize read_file output"
262
+ else:
263
+ detail = f"Act on {tool_name} findings"
264
+ else:
265
+ detail = "Plan your first lookup"
266
+ guidance_entry = (
267
+ f"React snapshot {forced_calls + 1}/{FORCED_REACT_LIMIT} at iteration {iteration}:"
268
+ f" {summary}. Next: {detail}"
269
+ )
270
+ state_manager.session.react_guidance.append(guidance_entry)
271
+ if len(state_manager.session.react_guidance) > FORCED_REACT_LIMIT:
272
+ state_manager.session.react_guidance = state_manager.session.react_guidance[
273
+ -FORCED_REACT_LIMIT:
274
+ ]
275
+
276
+ if agent_run_ctx is not None:
277
+ ctx_messages = getattr(agent_run_ctx, "messages", None)
278
+ if isinstance(ctx_messages, list):
279
+ ModelRequest, _, SystemPromptPart = ac.get_model_messages()
280
+ system_part = SystemPromptPart(
281
+ content=f"[React Guidance] {guidance_entry}",
282
+ part_kind="system-prompt",
283
+ )
284
+ # CLAUDE_ANCHOR[react-system-injection]
285
+ # Append synthetic system message so LLM receives react guidance next turn
286
+ # This mutates the active run context so the very next model prompt includes the guidance
287
+ ctx_messages.append(ModelRequest(parts=[system_part], kind="request"))
288
+
289
+ if show_debug:
290
+ await ui.muted("\n[react → LLM] BEGIN\n" + guidance_entry + "\n[react → LLM] END\n")
291
+ except Exception:
292
+ logger.debug("Forced react snapshot failed", exc_info=True)
293
+
86
294
 
295
+ async def _handle_empty_response(
296
+ message: str,
297
+ reason: str,
298
+ iter_index: int,
299
+ state: StateFacade,
300
+ ) -> None:
301
+ force_action_content = ac.create_empty_response_message(
302
+ message,
303
+ reason,
304
+ getattr(state.sm.session, "tool_calls", []),
305
+ iter_index,
306
+ state.sm,
307
+ )
308
+ ac.create_user_message(force_action_content, state.sm)
309
+
310
+ if state.show_thoughts:
311
+ await ui.warning("\nEMPTY RESPONSE FAILURE - AGGRESSIVE RETRY TRIGGERED")
312
+ await ui.muted(f" Reason: {reason}")
313
+ await ui.muted(
314
+ f" Recent tools: {ac.get_recent_tools_context(getattr(state.sm.session, 'tool_calls', []))}"
315
+ )
316
+ await ui.muted(" Injecting retry guidance prompt")
317
+
318
+
319
+ async def _force_action_if_unproductive(
320
+ message: str,
321
+ unproductive_count: int,
322
+ last_productive: int,
323
+ i: int,
324
+ max_iterations: int,
325
+ state: StateFacade,
326
+ ) -> None:
327
+ no_progress_content = (
328
+ f"ALERT: No tools executed for {unproductive_count} iterations.\n\n"
329
+ f"Last productive iteration: {last_productive}\n"
330
+ f"Current iteration: {i}/{max_iterations}\n"
331
+ f"Task: {message[:200]}...\n\n"
332
+ "You're describing actions but not executing them. You MUST:\n\n"
333
+ "1. If task is COMPLETE: Start response with TUNACODE DONE:\n"
334
+ "2. If task needs work: Execute a tool RIGHT NOW (grep, read_file, bash, etc.)\n"
335
+ "3. If stuck: Explain the specific blocker\n\n"
336
+ "NO MORE DESCRIPTIONS. Take ACTION or mark COMPLETE."
337
+ )
338
+ ac.create_user_message(no_progress_content, state.sm)
339
+ if state.show_thoughts:
340
+ await ui.warning(f"NO PROGRESS: {unproductive_count} iterations without tool usage")
341
+
342
+
343
+ async def _ask_for_clarification(i: int, state: StateFacade) -> None:
344
+ _, tools_used_str = ac.create_progress_summary(getattr(state.sm.session, "tool_calls", []))
345
+
346
+ clarification_content = (
347
+ "I need clarification to continue.\n\n"
348
+ f"Original request: {getattr(state.sm.session, 'original_query', 'your request')}\n\n"
349
+ "Progress so far:\n"
350
+ f"- Iterations: {i}\n"
351
+ f"- Tools used: {tools_used_str}\n\n"
352
+ "If the task is complete, I should respond with TUNACODE DONE:\n"
353
+ "Otherwise, please provide specific guidance on what to do next."
354
+ )
355
+
356
+ ac.create_user_message(clarification_content, state.sm)
357
+ if state.show_thoughts:
358
+ await ui.muted("\nSEEKING CLARIFICATION: Asking user for guidance on task progress")
359
+
360
+
361
+ async def _finalize_buffered_tasks(
362
+ tool_buffer: ToolBuffer,
363
+ tool_callback: Optional[ToolCallback],
364
+ state: StateFacade,
365
+ ) -> None:
366
+ if not tool_callback or not tool_buffer.has_tasks():
367
+ return
368
+
369
+ buffered_tasks = tool_buffer.flush()
370
+
371
+ # Cosmetic UI around batch (kept but isolated here)
372
+ try:
373
+ tool_names = [part.tool_name for part, _ in buffered_tasks]
374
+ batch_msg = get_batch_description(len(buffered_tasks), tool_names)
375
+ await ui.update_spinner_message(f"[bold #00d7ff]{batch_msg}...[/bold #00d7ff]", state.sm)
376
+ await ui.muted("\n" + "=" * 60)
377
+ await ui.muted(f"FINAL BATCH: Executing {len(buffered_tasks)} buffered read-only tools")
378
+ await ui.muted("=" * 60)
379
+ for idx, (part, _node) in enumerate(buffered_tasks, 1):
380
+ tool_desc = f" [{idx}] {getattr(part, 'tool_name', 'tool')}"
381
+ args = getattr(part, "args", {})
382
+ if isinstance(args, dict):
383
+ if part.tool_name == "read_file" and "file_path" in args:
384
+ tool_desc += f" → {args['file_path']}"
385
+ elif part.tool_name == "grep" and "pattern" in args:
386
+ tool_desc += f" → pattern: '{args['pattern']}'"
387
+ if "include_files" in args:
388
+ tool_desc += f", files: '{args['include_files']}'"
389
+ elif part.tool_name == "list_dir" and "directory" in args:
390
+ tool_desc += f" → {args['directory']}"
391
+ elif part.tool_name == "glob" and "pattern" in args:
392
+ tool_desc += f" → pattern: '{args['pattern']}'"
393
+ await ui.muted(tool_desc)
394
+ await ui.muted("=" * 60)
395
+ except Exception:
396
+ # UI is best-effort; never fail request because of display
397
+ logger.debug("UI batch prelude failed (non-fatal)", exc_info=True)
398
+
399
+ # Execute
400
+ start = time.time()
401
+ await ac.execute_tools_parallel(buffered_tasks, tool_callback)
402
+ elapsed_ms = (time.time() - start) * 1000
403
+
404
+ # Post metrics (best-effort)
405
+ try:
406
+ sequential_estimate = len(buffered_tasks) * 100.0
407
+ speedup = (sequential_estimate / elapsed_ms) if elapsed_ms > 0 else 1.0
408
+ await ui.muted(
409
+ f"Final batch completed in {elapsed_ms:.0f}ms (~{speedup:.1f}x faster than sequential)\n"
410
+ )
411
+ from tunacode.constants import UI_THINKING_MESSAGE # local import OK (rare path)
412
+
413
+ await ui.update_spinner_message(UI_THINKING_MESSAGE, state.sm)
414
+ except Exception:
415
+ logger.debug("UI batch epilogue failed (non-fatal)", exc_info=True)
416
+
417
+
418
+ def _should_build_fallback(
419
+ response_state: ResponseState,
420
+ iter_idx: int,
421
+ max_iterations: int,
422
+ fallback_enabled: bool,
423
+ ) -> bool:
424
+ return (
425
+ fallback_enabled
426
+ and not response_state.has_user_response
427
+ and not response_state.task_completed
428
+ and iter_idx >= max_iterations
429
+ )
430
+
431
+
432
+ def _build_fallback_output(
433
+ iter_idx: int,
434
+ max_iterations: int,
435
+ state: StateFacade,
436
+ ) -> str:
437
+ verbosity = state.get_setting("settings.fallback_verbosity", FALLBACK_VERBOSITY_DEFAULT)
438
+ fallback = ac.create_fallback_response(
439
+ iter_idx,
440
+ max_iterations,
441
+ getattr(state.sm.session, "tool_calls", []),
442
+ getattr(state.sm.session, "messages", []),
443
+ verbosity,
444
+ )
445
+ return ac.format_fallback_output(fallback)
446
+
447
+
448
+ # -----------------------
449
+ # Public API
450
+ # -----------------------
87
451
  def get_agent_tool() -> tuple[type[Agent], type["Tool"]]:
88
- """Lazy import for Agent and Tool to avoid circular imports."""
89
- from pydantic_ai import Agent, Tool
452
+ """Return Agent and Tool classes without importing at module load time."""
453
+ from pydantic_ai import Agent as AgentCls
454
+ from pydantic_ai import Tool as ToolCls
90
455
 
91
- return Agent, Tool
456
+ return AgentCls, ToolCls
92
457
 
93
458
 
94
459
  async def check_query_satisfaction(
@@ -97,8 +462,8 @@ async def check_query_satisfaction(
97
462
  response: str,
98
463
  state_manager: StateManager,
99
464
  ) -> bool:
100
- """Check if the response satisfies the original query."""
101
- return True # Completion decided via DONE marker in RESPONSE
465
+ """Legacy hook for compatibility; completion still signaled via DONE marker."""
466
+ return True
102
467
 
103
468
 
104
469
  async def process_request(
@@ -107,84 +472,48 @@ async def process_request(
107
472
  state_manager: StateManager,
108
473
  tool_callback: Optional[ToolCallback] = None,
109
474
  streaming_callback: Optional[Callable[[str], Awaitable[None]]] = None,
110
- usage_tracker: Optional[UsageTrackerProtocol] = None,
475
+ usage_tracker: Optional[
476
+ UsageTrackerProtocol
477
+ ] = None, # currently passed through to _process_node
111
478
  fallback_enabled: bool = True,
112
479
  ) -> AgentRun:
113
480
  """
114
481
  Process a single request to the agent.
115
482
 
116
483
  CLAUDE_ANCHOR[process-request-entry]: Main entry point for all agent requests
117
-
118
- Args:
119
- message: The user's request
120
- model: The model to use
121
- state_manager: State manager instance
122
- tool_callback: Optional callback for tool execution
123
- streaming_callback: Optional callback for streaming responses
124
- usage_tracker: Optional usage tracker
125
- fallback_enabled: Whether to enable fallback responses
126
-
127
- Returns:
128
- AgentRun or wrapper with result
129
484
  """
130
- # Get or create agent for the model
131
- agent = get_or_create_agent(model, state_manager)
132
-
133
- # Create a unique request ID for debugging
134
- import uuid
135
-
136
- request_id = str(uuid.uuid4())[:8]
137
- # Attach request_id to session for downstream logging/context
138
- try:
139
- state_manager.session.request_id = request_id
140
- except Exception:
141
- pass
485
+ state = StateFacade(state_manager)
486
+ fallback_config_enabled = bool(state.get_setting("settings.fallback_response", True))
487
+ ctx = _init_context(state, fallback_enabled=fallback_enabled and fallback_config_enabled)
488
+ state.reset_for_new_request()
489
+ state.set_original_query_once(message)
142
490
 
143
- # Reset state for new request
144
- state_manager.session.current_iteration = 0
145
- state_manager.session.iteration_count = 0
146
- state_manager.session.tool_calls = []
491
+ # Acquire agent (no local caching here; rely on upstream policies)
492
+ agent = ac.get_or_create_agent(model, state_manager)
147
493
 
148
- # Initialize batch counter if not exists
149
- if not hasattr(state_manager.session, "batch_counter"):
150
- state_manager.session.batch_counter = 0
494
+ # Prepare history snapshot
495
+ message_history = _prepare_message_history(state)
151
496
 
152
- # Create tool buffer for parallel execution
153
- tool_buffer = ToolBuffer()
154
-
155
- # Track iterations and productivity
156
- max_iterations = state_manager.session.user_config.get("settings", {}).get("max_iterations", 15)
497
+ # Per-request trackers
498
+ tool_buffer = ac.ToolBuffer()
499
+ response_state = ac.ResponseState()
157
500
  unproductive_iterations = 0
158
501
  last_productive_iteration = 0
159
-
160
- # Track response state
161
- response_state = ResponseState()
502
+ react_tool = ReactTool(state_manager=state_manager)
162
503
 
163
504
  try:
164
- # Get message history from session messages
165
- # Create a copy of the message history to avoid modifying the original
166
- message_history = list(state_manager.session.messages)
167
-
168
505
  async with agent.iter(message, message_history=message_history) as agent_run:
169
- # Process nodes iteratively
170
506
  i = 1
171
507
  async for node in agent_run:
172
- state_manager.session.current_iteration = i
173
- state_manager.session.iteration_count = i
174
-
175
- # Handle token-level streaming for model request nodes
176
- Agent, _ = get_agent_tool()
177
- if streaming_callback and STREAMING_AVAILABLE and Agent.is_model_request_node(node):
178
- await stream_model_request_node(
179
- node,
180
- agent_run.ctx,
181
- state_manager,
182
- streaming_callback,
183
- request_id,
184
- i,
185
- )
508
+ state.set_iteration(i)
186
509
 
187
- empty_response, empty_reason = await _process_node(
510
+ # Optional token streaming
511
+ await _maybe_stream_node_tokens(
512
+ node, agent_run.ctx, state_manager, streaming_callback, ctx.request_id, i
513
+ )
514
+
515
+ # Core node processing (delegated to components)
516
+ empty_response, empty_reason = await ac._process_node( # noqa: SLF001 (private but stable in repo)
188
517
  node,
189
518
  tool_callback,
190
519
  state_manager,
@@ -194,282 +523,135 @@ async def process_request(
194
523
  response_state,
195
524
  )
196
525
 
197
- # Handle empty response
526
+ # Handle empty response (aggressive retry prompt)
198
527
  if empty_response:
199
- if not hasattr(state_manager.session, "consecutive_empty_responses"):
200
- state_manager.session.consecutive_empty_responses = 0
201
- state_manager.session.consecutive_empty_responses += 1
202
-
203
- if state_manager.session.consecutive_empty_responses >= 1:
204
- force_action_content = create_empty_response_message(
205
- message,
206
- empty_reason,
207
- state_manager.session.tool_calls,
208
- i,
209
- state_manager,
210
- )
211
- create_user_message(force_action_content, state_manager)
212
-
213
- if state_manager.session.show_thoughts:
214
- from tunacode.ui import console as ui
528
+ if state.increment_empty_response() >= 1:
529
+ await _handle_empty_response(message, empty_reason, i, state)
530
+ state.clear_empty_response()
531
+ else:
532
+ state.clear_empty_response()
215
533
 
216
- await ui.warning(
217
- "\nEMPTY RESPONSE FAILURE - AGGRESSIVE RETRY TRIGGERED"
218
- )
219
- await ui.muted(f" Reason: {empty_reason}")
220
- await ui.muted(
221
- f" Recent tools: {get_recent_tools_context(state_manager.session.tool_calls)}"
222
- )
223
- await ui.muted(" Injecting retry guidance prompt")
534
+ # Track whether we produced visible user output this iteration
535
+ if getattr(getattr(node, "result", None), "output", None):
536
+ response_state.has_user_response = True
224
537
 
225
- state_manager.session.consecutive_empty_responses = 0
226
- else:
227
- if hasattr(state_manager.session, "consecutive_empty_responses"):
228
- state_manager.session.consecutive_empty_responses = 0
229
-
230
- if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
231
- if node.result.output:
232
- response_state.has_user_response = True
233
-
234
- # Track productivity - check if any tools were used in this iteration
235
- iteration_had_tools = False
236
- if hasattr(node, "model_response"):
237
- for part in node.model_response.parts:
238
- if hasattr(part, "part_kind") and part.part_kind == "tool-call":
239
- iteration_had_tools = True
240
- break
241
-
242
- if iteration_had_tools:
243
- # Reset unproductive counter
538
+ # Productivity tracking (tool usage signal)
539
+ if _iteration_had_tool_use(node):
244
540
  unproductive_iterations = 0
245
541
  last_productive_iteration = i
246
542
  else:
247
- # Increment unproductive counter
248
543
  unproductive_iterations += 1
249
544
 
250
- # After 3 unproductive iterations, force action
251
- if unproductive_iterations >= 3 and not response_state.task_completed:
252
- no_progress_content = f"""ALERT: No tools executed for {unproductive_iterations} iterations.
253
-
254
- Last productive iteration: {last_productive_iteration}
255
- Current iteration: {i}/{max_iterations}
256
- Task: {message[:200]}...
257
-
258
- You're describing actions but not executing them. You MUST:
259
-
260
- 1. If task is COMPLETE: Start response with TUNACODE DONE:
261
- 2. If task needs work: Execute a tool RIGHT NOW (grep, read_file, bash, etc.)
262
- 3. If stuck: Explain the specific blocker
263
-
264
- NO MORE DESCRIPTIONS. Take ACTION or mark COMPLETE."""
265
-
266
- create_user_message(no_progress_content, state_manager)
267
-
268
- if state_manager.session.show_thoughts:
269
- from tunacode.ui import console as ui
270
-
271
- await ui.warning(
272
- f"NO PROGRESS: {unproductive_iterations} iterations without tool usage"
273
- )
274
-
275
- unproductive_iterations = 0
276
-
277
- # REMOVED: Recursive satisfaction check that caused empty responses
278
- # The agent now decides completion using a DONE marker
279
- # This eliminates recursive agent calls and gives control back to the agent
280
-
281
- # Store original query for reference
282
- if not hasattr(state_manager.session, "original_query"):
283
- state_manager.session.original_query = message
284
-
285
- # Display iteration progress if thoughts are enabled
286
- if state_manager.session.show_thoughts:
287
- from tunacode.ui import console as ui
545
+ # Force action if no tool usage for several iterations
546
+ if (
547
+ unproductive_iterations >= UNPRODUCTIVE_LIMIT
548
+ and not response_state.task_completed
549
+ ):
550
+ await _force_action_if_unproductive(
551
+ message,
552
+ unproductive_iterations,
553
+ last_productive_iteration,
554
+ i,
555
+ ctx.max_iterations,
556
+ state,
557
+ )
558
+ unproductive_iterations = 0 # reset after nudge
288
559
 
289
- await ui.muted(f"\nITERATION: {i}/{max_iterations} (Request ID: {request_id})")
560
+ await _maybe_force_react_snapshot(
561
+ i,
562
+ state_manager,
563
+ react_tool,
564
+ state.show_thoughts,
565
+ agent_run.ctx,
566
+ )
290
567
 
291
- # Show summary of tools used so far
292
- if state_manager.session.tool_calls:
293
- tool_summary = get_tool_summary(state_manager.session.tool_calls)
568
+ # Optional debug progress
569
+ if state.show_thoughts:
570
+ await ui.muted(
571
+ f"\nITERATION: {i}/{ctx.max_iterations} (Request ID: {ctx.request_id})"
572
+ )
573
+ tool_summary = ac.get_tool_summary(getattr(state.sm.session, "tool_calls", []))
574
+ if tool_summary:
294
575
  summary_str = ", ".join(
295
- [f"{name}: {count}" for name, count in tool_summary.items()]
576
+ f"{name}: {count}" for name, count in tool_summary.items()
296
577
  )
297
578
  await ui.muted(f"TOOLS USED: {summary_str}")
298
579
 
299
- # User clarification: Ask user for guidance when explicitly awaiting
580
+ # Ask for clarification if agent requested it
300
581
  if response_state.awaiting_user_guidance:
301
- _, tools_used_str = create_progress_summary(state_manager.session.tool_calls)
302
-
303
- clarification_content = f"""I need clarification to continue.
304
-
305
- Original request: {getattr(state_manager.session, "original_query", "your request")}
306
-
307
- Progress so far:
308
- - Iterations: {i}
309
- - Tools used: {tools_used_str}
310
-
311
- If the task is complete, I should respond with TUNACODE DONE:
312
- Otherwise, please provide specific guidance on what to do next."""
313
-
314
- create_user_message(clarification_content, state_manager)
315
-
316
- if state_manager.session.show_thoughts:
317
- from tunacode.ui import console as ui
318
-
319
- await ui.muted(
320
- "\nSEEKING CLARIFICATION: Asking user for guidance on task progress"
321
- )
582
+ await _ask_for_clarification(i, state)
583
+ # Keep the flag set; downstream logic can react to new user input
322
584
 
323
- response_state.awaiting_user_guidance = True
324
-
325
- # Check if task is explicitly completed
585
+ # Early completion
326
586
  if response_state.task_completed:
327
- if state_manager.session.show_thoughts:
328
- from tunacode.ui import console as ui
329
-
587
+ if state.show_thoughts:
330
588
  await ui.success("Task completed successfully")
331
589
  break
332
590
 
333
- if i >= max_iterations and not response_state.task_completed:
334
- _, tools_str = create_progress_summary(state_manager.session.tool_calls)
335
- tools_str = tools_str if tools_str != "No tools used yet" else "No tools used"
336
-
337
- extend_content = f"""I've reached the iteration limit ({max_iterations}).
338
-
339
- Progress summary:
340
- - Tools used: {tools_str}
341
- - Iterations completed: {i}
342
-
343
- The task appears incomplete. Would you like me to:
344
- 1. Continue working (I can extend the limit)
345
- 2. Summarize what I've done and stop
346
- 3. Try a different approach
347
-
348
- Please let me know how to proceed."""
349
-
350
- create_user_message(extend_content, state_manager)
351
-
352
- if state_manager.session.show_thoughts:
353
- from tunacode.ui import console as ui
354
-
591
+ # Reaching iteration cap ask what to do next (no auto-extend by default)
592
+ if i >= ctx.max_iterations and not response_state.task_completed:
593
+ _, tools_str = ac.create_progress_summary(
594
+ getattr(state.sm.session, "tool_calls", [])
595
+ )
596
+ if tools_str == "No tools used yet":
597
+ tools_str = "No tools used"
598
+
599
+ extend_content = (
600
+ f"I've reached the iteration limit ({ctx.max_iterations}).\n\n"
601
+ "Progress summary:\n"
602
+ f"- Tools used: {tools_str}\n"
603
+ f"- Iterations completed: {i}\n\n"
604
+ "The task appears incomplete. Would you like me to:\n"
605
+ "1. Continue working (extend limit)\n"
606
+ "2. Summarize what I've done and stop\n"
607
+ "3. Try a different approach\n\n"
608
+ "Please let me know how to proceed."
609
+ )
610
+ ac.create_user_message(extend_content, state.sm)
611
+ if state.show_thoughts:
355
612
  await ui.muted(
356
- f"\nITERATION LIMIT: Asking user for guidance at {max_iterations} iterations"
613
+ f"\nITERATION LIMIT: Awaiting user guidance at {ctx.max_iterations} iterations"
357
614
  )
358
-
359
- max_iterations += 5
360
615
  response_state.awaiting_user_guidance = True
616
+ # Do not auto-increase max_iterations here (avoid infinite loops)
361
617
 
362
- # Increment iteration counter
363
618
  i += 1
364
619
 
365
- # Final flush: execute any remaining buffered read-only tools
366
- if tool_callback and tool_buffer.has_tasks():
367
- import time
368
-
369
- from tunacode.ui import console as ui
370
-
371
- buffered_tasks = tool_buffer.flush()
372
- start_time = time.time()
620
+ # Final buffered read-only tasks (batch)
621
+ await _finalize_buffered_tasks(tool_buffer, tool_callback, state)
373
622
 
374
- # Update spinner message for final batch execution
375
- tool_names = [part.tool_name for part, _ in buffered_tasks]
376
- batch_msg = get_batch_description(len(buffered_tasks), tool_names)
377
- await ui.update_spinner_message(
378
- f"[bold #00d7ff]{batch_msg}...[/bold #00d7ff]", state_manager
379
- )
380
-
381
- await ui.muted("\n" + "=" * 60)
382
- await ui.muted(
383
- f"FINAL BATCH: Executing {len(buffered_tasks)} buffered read-only tools"
384
- )
385
- await ui.muted("=" * 60)
386
-
387
- for idx, (part, node) in enumerate(buffered_tasks, 1):
388
- tool_desc = f" [{idx}] {part.tool_name}"
389
- if hasattr(part, "args") and isinstance(part.args, dict):
390
- if part.tool_name == "read_file" and "file_path" in part.args:
391
- tool_desc += f" → {part.args['file_path']}"
392
- elif part.tool_name == "grep" and "pattern" in part.args:
393
- tool_desc += f" → pattern: '{part.args['pattern']}'"
394
- if "include_files" in part.args:
395
- tool_desc += f", files: '{part.args['include_files']}'"
396
- elif part.tool_name == "list_dir" and "directory" in part.args:
397
- tool_desc += f" → {part.args['directory']}"
398
- elif part.tool_name == "glob" and "pattern" in part.args:
399
- tool_desc += f" → pattern: '{part.args['pattern']}'"
400
- await ui.muted(tool_desc)
401
- await ui.muted("=" * 60)
402
-
403
- await execute_tools_parallel(buffered_tasks, tool_callback)
404
-
405
- elapsed_time = (time.time() - start_time) * 1000
406
- sequential_estimate = len(buffered_tasks) * 100
407
- speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
408
-
409
- await ui.muted(
410
- f"Final batch completed in {elapsed_time:.0f}ms "
411
- f"(~{speedup:.1f}x faster than sequential)\n"
412
- )
413
-
414
- # Reset spinner back to thinking
415
- from tunacode.constants import UI_THINKING_MESSAGE
416
-
417
- await ui.update_spinner_message(UI_THINKING_MESSAGE, state_manager)
418
-
419
- # If we need to add a fallback response, create a wrapper
420
- if (
421
- not response_state.has_user_response
422
- and not response_state.task_completed
423
- and i >= max_iterations
424
- and fallback_enabled
425
- ):
426
- patch_tool_messages("Task incomplete", state_manager=state_manager)
623
+ # Build fallback synthesis if needed
624
+ if _should_build_fallback(response_state, i, ctx.max_iterations, ctx.fallback_enabled):
625
+ ac.patch_tool_messages("Task incomplete", state_manager=state_manager)
427
626
  response_state.has_final_synthesis = True
428
-
429
- verbosity = state_manager.session.user_config.get("settings", {}).get(
430
- "fallback_verbosity", "normal"
431
- )
432
- fallback = create_fallback_response(
433
- i,
434
- max_iterations,
435
- state_manager.session.tool_calls,
436
- state_manager.session.messages,
437
- verbosity,
438
- )
439
- comprehensive_output = format_fallback_output(fallback)
440
-
441
- wrapper = AgentRunWrapper(
442
- agent_run, SimpleResult(comprehensive_output), response_state
627
+ comprehensive_output = _build_fallback_output(i, ctx.max_iterations, state)
628
+ wrapper = ac.AgentRunWrapper(
629
+ agent_run, ac.SimpleResult(comprehensive_output), response_state
443
630
  )
444
631
  return wrapper
445
632
 
446
- # For non-fallback cases, we still need to handle the response_state
447
- # Create a minimal wrapper just to add response_state
448
- state_wrapper = AgentRunWithState(agent_run, response_state)
449
- return state_wrapper
633
+ # Normal path: return a wrapper that carries response_state
634
+ return ac.AgentRunWithState(agent_run, response_state)
450
635
 
451
636
  except UserAbortError:
452
637
  raise
453
638
  except ToolBatchingJSONError as e:
454
- logger.error(f"Tool batching JSON error: {e}", exc_info=True)
455
- # Patch orphaned tool messages with error
456
- patch_tool_messages(f"Tool batching failed: {str(e)[:100]}...", state_manager=state_manager)
457
- # Re-raise to be handled by caller
639
+ logger.error("Tool batching JSON error [req=%s]: %s", ctx.request_id, e, exc_info=True)
640
+ ac.patch_tool_messages(
641
+ f"Tool batching failed: {str(e)[:100]}...", state_manager=state_manager
642
+ )
458
643
  raise
459
644
  except Exception as e:
460
- # Include request context to aid debugging
461
- safe_iter = (
462
- state_manager.session.current_iteration
463
- if hasattr(state_manager.session, "current_iteration")
464
- else "?"
465
- )
645
+ # Attach request/iteration context for observability
646
+ safe_iter = getattr(state_manager.session, "current_iteration", "?")
466
647
  logger.error(
467
- f"Error in process_request [req={request_id} iter={safe_iter}]: {e}",
648
+ "Error in process_request [req=%s iter=%s]: %s",
649
+ ctx.request_id,
650
+ safe_iter,
651
+ e,
468
652
  exc_info=True,
469
653
  )
470
- # Patch orphaned tool messages with generic error
471
- patch_tool_messages(
654
+ ac.patch_tool_messages(
472
655
  f"Request processing failed: {str(e)[:100]}...", state_manager=state_manager
473
656
  )
474
- # Re-raise to be handled by caller
475
657
  raise