tunacode-cli 0.0.29__py3-none-any.whl → 0.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

@@ -4,16 +4,21 @@ Main agent functionality and coordination for the TunaCode CLI.
4
4
  Handles agent creation, configuration, and request processing.
5
5
  """
6
6
 
7
+ import asyncio
7
8
  import json
9
+ import os
8
10
  import re
9
11
  from datetime import datetime, timezone
10
12
  from pathlib import Path
11
- from typing import Optional
13
+ from typing import Any, Iterator, List, Optional, Tuple
12
14
 
15
+ from tunacode.constants import READ_ONLY_TOOLS
13
16
  from tunacode.core.state import StateManager
14
17
  from tunacode.services.mcp import get_mcp_servers
15
18
  from tunacode.tools.bash import bash
19
+ from tunacode.tools.glob import glob
16
20
  from tunacode.tools.grep import grep
21
+ from tunacode.tools.list_dir import list_dir
17
22
  from tunacode.tools.read_file import read_file
18
23
  from tunacode.tools.run_command import run_command
19
24
  from tunacode.tools.update_file import update_file
@@ -22,6 +27,27 @@ from tunacode.types import (AgentRun, ErrorMessage, FallbackResponse, ModelName,
22
27
  ResponseState, SimpleResult, ToolCallback, ToolCallId, ToolName)
23
28
 
24
29
 
30
+ class ToolBuffer:
31
+ """Buffer for collecting read-only tool calls to execute in parallel."""
32
+
33
+ def __init__(self):
34
+ self.read_only_tasks: List[Tuple[Any, Any]] = []
35
+
36
+ def add(self, part: Any, node: Any) -> None:
37
+ """Add a read-only tool call to the buffer."""
38
+ self.read_only_tasks.append((part, node))
39
+
40
+ def flush(self) -> List[Tuple[Any, Any]]:
41
+ """Return buffered tasks and clear the buffer."""
42
+ tasks = self.read_only_tasks
43
+ self.read_only_tasks = []
44
+ return tasks
45
+
46
+ def has_tasks(self) -> bool:
47
+ """Check if there are buffered tasks."""
48
+ return len(self.read_only_tasks) > 0
49
+
50
+
25
51
  # Lazy import for Agent and Tool
26
52
  def get_agent_tool():
27
53
  import importlib
@@ -37,7 +63,137 @@ def get_model_messages():
37
63
  return messages.ModelRequest, messages.ToolReturnPart
38
64
 
39
65
 
40
- async def _process_node(node, tool_callback: Optional[ToolCallback], state_manager: StateManager):
66
+ async def execute_tools_parallel(
67
+ tool_calls: List[Tuple[Any, Any]], callback: ToolCallback, return_exceptions: bool = True
68
+ ) -> List[Any]:
69
+ """
70
+ Execute multiple tool calls in parallel using asyncio.
71
+
72
+ Args:
73
+ tool_calls: List of (part, node) tuples
74
+ callback: The tool callback function to execute
75
+ return_exceptions: Whether to return exceptions or raise them
76
+
77
+ Returns:
78
+ List of results in the same order as input, with exceptions for failed calls
79
+ """
80
+ # Get max parallel from environment or default to CPU count
81
+ max_parallel = int(os.environ.get("TUNACODE_MAX_PARALLEL", os.cpu_count() or 4))
82
+
83
+ async def execute_with_error_handling(part, node):
84
+ try:
85
+ return await callback(part, node)
86
+ except Exception as e:
87
+ return e
88
+
89
+ # If we have more tools than max_parallel, execute in batches
90
+ if len(tool_calls) > max_parallel:
91
+ results = []
92
+ for i in range(0, len(tool_calls), max_parallel):
93
+ batch = tool_calls[i : i + max_parallel]
94
+ batch_tasks = [execute_with_error_handling(part, node) for part, node in batch]
95
+ batch_results = await asyncio.gather(*batch_tasks, return_exceptions=return_exceptions)
96
+ results.extend(batch_results)
97
+ return results
98
+ else:
99
+ tasks = [execute_with_error_handling(part, node) for part, node in tool_calls]
100
+ return await asyncio.gather(*tasks, return_exceptions=return_exceptions)
101
+
102
+
103
+ def batch_read_only_tools(tool_calls: List[Any]) -> Iterator[List[Any]]:
104
+ """
105
+ Batch tool calls so read-only tools can be executed in parallel.
106
+
107
+ Yields batches where:
108
+ - Read-only tools are grouped together
109
+ - Write/execute tools are in their own batch (single item)
110
+ - Order within each batch is preserved
111
+
112
+ Args:
113
+ tool_calls: List of tool call objects with 'tool' attribute
114
+
115
+ Yields:
116
+ Batches of tool calls
117
+ """
118
+ if not tool_calls:
119
+ return
120
+
121
+ current_batch = []
122
+
123
+ for tool_call in tool_calls:
124
+ tool_name = tool_call.tool_name if hasattr(tool_call, "tool_name") else None
125
+
126
+ if tool_name in READ_ONLY_TOOLS:
127
+ # Add to current batch
128
+ current_batch.append(tool_call)
129
+ else:
130
+ # Yield any pending read-only batch
131
+ if current_batch:
132
+ yield current_batch
133
+ current_batch = []
134
+
135
+ # Yield write/execute tool as single-item batch
136
+ yield [tool_call]
137
+
138
+ # Yield any remaining read-only tools
139
+ if current_batch:
140
+ yield current_batch
141
+
142
+
143
+ async def create_buffering_callback(
144
+ original_callback: ToolCallback, buffer: ToolBuffer, state_manager: StateManager
145
+ ) -> ToolCallback:
146
+ """
147
+ Create a callback wrapper that buffers read-only tools for parallel execution.
148
+
149
+ Args:
150
+ original_callback: The original tool callback
151
+ buffer: ToolBuffer instance to store read-only tools
152
+ state_manager: StateManager for UI access
153
+
154
+ Returns:
155
+ A wrapped callback function
156
+ """
157
+
158
+ async def buffering_callback(part, node):
159
+ tool_name = getattr(part, "tool_name", None)
160
+
161
+ if tool_name in READ_ONLY_TOOLS:
162
+ # Buffer read-only tools
163
+ buffer.add(part, node)
164
+ # Don't execute yet - will be executed in parallel batch
165
+ return None
166
+
167
+ # Non-read-only tool encountered - flush buffer first
168
+ if buffer.has_tasks():
169
+ buffered_tasks = buffer.flush()
170
+
171
+ # Execute buffered read-only tools in parallel
172
+ if state_manager.session.show_thoughts:
173
+ from tunacode.ui import console as ui
174
+
175
+ await ui.muted(f"Executing {len(buffered_tasks)} read-only tools in parallel")
176
+
177
+ await execute_tools_parallel(buffered_tasks, original_callback)
178
+
179
+ # Execute the non-read-only tool
180
+ return await original_callback(part, node)
181
+
182
+ return buffering_callback
183
+
184
+
185
+ async def _process_node(
186
+ node,
187
+ tool_callback: Optional[ToolCallback],
188
+ state_manager: StateManager,
189
+ tool_buffer: Optional[ToolBuffer] = None,
190
+ ):
191
+ from tunacode.ui import console as ui
192
+ from tunacode.utils.token_counter import estimate_tokens
193
+
194
+ # Use the original callback directly - parallel execution will be handled differently
195
+ buffering_callback = tool_callback
196
+
41
197
  if hasattr(node, "request"):
42
198
  state_manager.session.messages.append(node.request)
43
199
 
@@ -45,36 +201,81 @@ async def _process_node(node, tool_callback: Optional[ToolCallback], state_manag
45
201
  state_manager.session.messages.append({"thought": node.thought})
46
202
  # Display thought immediately if show_thoughts is enabled
47
203
  if state_manager.session.show_thoughts:
48
- from tunacode.ui import console as ui
49
-
50
- await ui.muted(f"💭 THOUGHT: {node.thought}")
204
+ await ui.muted(f"THOUGHT: {node.thought}")
51
205
 
52
206
  if hasattr(node, "model_response"):
53
207
  state_manager.session.messages.append(node.model_response)
54
208
 
55
- # Enhanced ReAct thought processing
209
+ # Enhanced display when thoughts are enabled
56
210
  if state_manager.session.show_thoughts:
211
+ # Show raw API response data
57
212
  import json
58
213
  import re
59
214
 
60
- from tunacode.ui import console as ui
215
+ # Display the raw model response parts
216
+ await ui.muted("\n" + "=" * 60)
217
+ await ui.muted(" RAW API RESPONSE DATA:")
218
+ await ui.muted("=" * 60)
219
+
220
+ for idx, part in enumerate(node.model_response.parts):
221
+ part_data = {"part_index": idx, "part_kind": getattr(part, "part_kind", "unknown")}
222
+
223
+ # Add part-specific data
224
+ if hasattr(part, "content"):
225
+ part_data["content"] = (
226
+ part.content[:200] + "..." if len(str(part.content)) > 200 else part.content
227
+ )
228
+ if hasattr(part, "tool_name"):
229
+ part_data["tool_name"] = part.tool_name
230
+ if hasattr(part, "args"):
231
+ part_data["args"] = part.args
232
+ if hasattr(part, "tool_call_id"):
233
+ part_data["tool_call_id"] = part.tool_call_id
234
+
235
+ await ui.muted(json.dumps(part_data, indent=2))
236
+
237
+ await ui.muted("=" * 60)
238
+
239
+ # Count how many tool calls are in this response
240
+ tool_count = sum(
241
+ 1
242
+ for part in node.model_response.parts
243
+ if hasattr(part, "part_kind") and part.part_kind == "tool-call"
244
+ )
245
+ if tool_count > 0:
246
+ await ui.muted(f"\n MODEL RESPONSE: Contains {tool_count} tool call(s)")
61
247
 
248
+ # Display LLM response content
62
249
  for part in node.model_response.parts:
63
250
  if hasattr(part, "content") and isinstance(part.content, str):
64
251
  content = part.content.strip()
65
252
 
253
+ # Skip empty content
254
+ if not content:
255
+ continue
256
+
257
+ # Estimate tokens in this response
258
+ token_count = estimate_tokens(content)
259
+
260
+ # Display non-JSON content as LLM response
261
+ if not content.startswith('{"thought"'):
262
+ # Truncate very long responses for display
263
+ display_content = content[:500] + "..." if len(content) > 500 else content
264
+ await ui.muted(f"\nRESPONSE: {display_content}")
265
+ await ui.muted(f"TOKENS: ~{token_count}")
266
+
66
267
  # Pattern 1: Inline JSON thoughts {"thought": "..."}
67
268
  thought_pattern = r'\{"thought":\s*"([^"]+)"\}'
68
269
  matches = re.findall(thought_pattern, content)
69
270
  for thought in matches:
70
- await ui.muted(f"💭 REASONING: {thought}")
271
+ await ui.muted(f"REASONING: {thought}")
71
272
 
72
273
  # Pattern 2: Standalone thought JSON objects
73
274
  try:
74
275
  if content.startswith('{"thought"'):
75
276
  thought_obj = json.loads(content)
76
277
  if "thought" in thought_obj:
77
- await ui.muted(f"💭 REASONING: {thought_obj['thought']}")
278
+ await ui.muted(f"REASONING: {thought_obj['thought']}")
78
279
  except (json.JSONDecodeError, KeyError):
79
280
  pass
80
281
 
@@ -85,41 +286,148 @@ async def _process_node(node, tool_callback: Optional[ToolCallback], state_manag
85
286
  if thought not in [m for m in matches]: # Avoid duplicates
86
287
  # Clean up escaped characters
87
288
  cleaned_thought = thought.replace('\\"', '"').replace("\\n", " ")
88
- await ui.muted(f"💭 REASONING: {cleaned_thought}")
89
-
90
- # Pattern 4: Text-based reasoning indicators
91
- reasoning_indicators = [
92
- (r"I need to (.+?)\.", "PLANNING"),
93
- (r"Let me (.+?)\.", "ACTION"),
94
- (r"The output shows (.+?)\.", "OBSERVATION"),
95
- (r"Based on (.+?), I should (.+?)\.", "DECISION"),
96
- ]
97
-
98
- for pattern, label in reasoning_indicators:
99
- indicator_matches = re.findall(pattern, content, re.IGNORECASE)
100
- for match in indicator_matches:
101
- if isinstance(match, tuple):
102
- match_text = " ".join(match)
103
- else:
104
- match_text = match
105
- await ui.muted(f"🎯 {label}: {match_text}")
106
- break # Only show first match per pattern
107
-
108
- # Check for tool calls and fallback to JSON parsing if needed
289
+ await ui.muted(f"REASONING: {cleaned_thought}")
290
+
291
+ # Check for tool calls and collect them for potential parallel execution
109
292
  has_tool_calls = False
293
+ tool_parts = [] # Collect all tool calls from this node
294
+
110
295
  for part in node.model_response.parts:
111
296
  if part.part_kind == "tool-call" and tool_callback:
112
297
  has_tool_calls = True
113
- await tool_callback(part, node)
114
- elif part.part_kind == "tool-return":
298
+ tool_parts.append(part)
299
+
300
+ # Display tool call details when thoughts are enabled
301
+ if state_manager.session.show_thoughts:
302
+ # Show each tool as it's collected
303
+ tool_desc = f" COLLECTED: {part.tool_name}"
304
+ if hasattr(part, "args") and isinstance(part.args, dict):
305
+ if part.tool_name == "read_file" and "file_path" in part.args:
306
+ tool_desc += f" → {part.args['file_path']}"
307
+ elif part.tool_name == "grep" and "pattern" in part.args:
308
+ tool_desc += f" → pattern: '{part.args['pattern']}'"
309
+ elif part.tool_name == "list_dir" and "directory" in part.args:
310
+ tool_desc += f" → {part.args['directory']}"
311
+ elif part.tool_name == "run_command" and "command" in part.args:
312
+ tool_desc += f" → {part.args['command']}"
313
+ await ui.muted(tool_desc)
314
+
315
+ # Track this tool call (moved outside thoughts block)
316
+ state_manager.session.tool_calls.append(
317
+ {
318
+ "tool": part.tool_name,
319
+ "args": part.args if hasattr(part, "args") else {},
320
+ "iteration": state_manager.session.current_iteration,
321
+ }
322
+ )
323
+
324
+ # Track files if this is read_file (moved outside thoughts block)
325
+ if (
326
+ part.tool_name == "read_file"
327
+ and hasattr(part, "args")
328
+ and isinstance(part.args, dict)
329
+ and "file_path" in part.args
330
+ ):
331
+ state_manager.session.files_in_context.add(part.args["file_path"])
332
+ # Show files in context when thoughts are enabled
333
+ if state_manager.session.show_thoughts:
334
+ await ui.muted(
335
+ f"\nFILES IN CONTEXT: {list(state_manager.session.files_in_context)}"
336
+ )
337
+
338
+ # Execute tool calls - with ACTUAL parallel execution for read-only batches
339
+ if tool_parts:
340
+ if state_manager.session.show_thoughts:
341
+ await ui.muted(
342
+ f"\n NODE SUMMARY: {len(tool_parts)} tool(s) collected in this response"
343
+ )
344
+
345
+ # Check if ALL tools in this node are read-only
346
+ all_read_only = all(part.tool_name in READ_ONLY_TOOLS for part in tool_parts)
347
+
348
+ if all_read_only and len(tool_parts) > 1 and buffering_callback:
349
+ # Execute read-only tools in parallel!
350
+ import time
351
+
352
+ start_time = time.time()
353
+
354
+ if state_manager.session.show_thoughts:
355
+ await ui.muted("\n" + "=" * 60)
356
+ await ui.muted(
357
+ f" PARALLEL BATCH: Executing {len(tool_parts)} read-only tools concurrently"
358
+ )
359
+ await ui.muted("=" * 60)
360
+
361
+ for idx, part in enumerate(tool_parts, 1):
362
+ tool_desc = f" [{idx}] {part.tool_name}"
363
+ if hasattr(part, "args") and isinstance(part.args, dict):
364
+ if part.tool_name == "read_file" and "file_path" in part.args:
365
+ tool_desc += f" → {part.args['file_path']}"
366
+ elif part.tool_name == "grep" and "pattern" in part.args:
367
+ tool_desc += f" → pattern: '{part.args['pattern']}'"
368
+ elif part.tool_name == "list_dir" and "directory" in part.args:
369
+ tool_desc += f" → {part.args['directory']}"
370
+ elif part.tool_name == "glob" and "pattern" in part.args:
371
+ tool_desc += f" → pattern: '{part.args['pattern']}'"
372
+ await ui.muted(tool_desc)
373
+ await ui.muted("=" * 60)
374
+
375
+ # Execute in parallel
376
+ tool_tuples = [(part, node) for part in tool_parts]
377
+ await execute_tools_parallel(tool_tuples, buffering_callback)
378
+
379
+ if state_manager.session.show_thoughts:
380
+ elapsed_time = (time.time() - start_time) * 1000
381
+ sequential_estimate = len(tool_parts) * 100
382
+ speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
383
+ await ui.muted(
384
+ f" Parallel batch completed in {elapsed_time:.0f}ms ({speedup:.1f}x faster than sequential)"
385
+ )
386
+
387
+ else:
388
+ # Sequential execution for mixed or write/execute tools
389
+ for part in tool_parts:
390
+ if (
391
+ state_manager.session.show_thoughts
392
+ and part.tool_name not in READ_ONLY_TOOLS
393
+ ):
394
+ await ui.muted(f"\n SEQUENTIAL: {part.tool_name} (write/execute tool)")
395
+
396
+ # Execute the tool
397
+ if buffering_callback:
398
+ await buffering_callback(part, node)
399
+
400
+ # Handle tool returns
401
+ for part in node.model_response.parts:
402
+ if part.part_kind == "tool-return":
115
403
  obs_msg = f"OBSERVATION[{part.tool_name}]: {part.content[:2_000]}"
116
404
  state_manager.session.messages.append(obs_msg)
117
405
 
406
+ # Display tool return when thoughts are enabled
407
+ if state_manager.session.show_thoughts:
408
+ # Truncate for display
409
+ display_content = (
410
+ part.content[:200] + "..." if len(part.content) > 200 else part.content
411
+ )
412
+ await ui.muted(f"TOOL RESULT: {display_content}")
413
+
118
414
  # If no structured tool calls found, try parsing JSON from text content
119
- if not has_tool_calls and tool_callback:
415
+ if not has_tool_calls and buffering_callback:
120
416
  for part in node.model_response.parts:
121
417
  if hasattr(part, "content") and isinstance(part.content, str):
122
- await extract_and_execute_tool_calls(part.content, tool_callback, state_manager)
418
+ await extract_and_execute_tool_calls(
419
+ part.content, buffering_callback, state_manager
420
+ )
421
+
422
+ # Final flush: disabled temporarily while fixing the parallel execution design
423
+ # The buffer is not being used in the current implementation
424
+ # if tool_callback and buffer.has_tasks():
425
+ # buffered_tasks = buffer.flush()
426
+ # if state_manager.session.show_thoughts:
427
+ # await ui.muted(
428
+ # f"Final flush: Executing {len(buffered_tasks)} remaining read-only tools in parallel"
429
+ # )
430
+ # await execute_tools_parallel(buffered_tasks, tool_callback)
123
431
 
124
432
 
125
433
  def get_or_create_agent(model: ModelName, state_manager: StateManager) -> PydanticAgent:
@@ -149,7 +457,9 @@ def get_or_create_agent(model: ModelName, state_manager: StateManager) -> Pydant
149
457
  system_prompt=system_prompt,
150
458
  tools=[
151
459
  Tool(bash, max_retries=max_retries),
460
+ Tool(glob, max_retries=max_retries),
152
461
  Tool(grep, max_retries=max_retries),
462
+ Tool(list_dir, max_retries=max_retries),
153
463
  Tool(read_file, max_retries=max_retries),
154
464
  Tool(run_command, max_retries=max_retries),
155
465
  Tool(update_file, max_retries=max_retries),
@@ -276,13 +586,13 @@ async def parse_json_tool_calls(
276
586
  if state_manager.session.show_thoughts:
277
587
  from tunacode.ui import console as ui
278
588
 
279
- await ui.muted(f"🔧 FALLBACK: Executed {tool_name} via JSON parsing")
589
+ await ui.muted(f"FALLBACK: Executed {tool_name} via JSON parsing")
280
590
 
281
591
  except Exception as e:
282
592
  if state_manager.session.show_thoughts:
283
593
  from tunacode.ui import console as ui
284
594
 
285
- await ui.error(f"Error executing fallback tool {tool_name}: {str(e)}")
595
+ await ui.error(f"Error executing fallback tool {tool_name}: {str(e)}")
286
596
 
287
597
 
288
598
  async def extract_and_execute_tool_calls(
@@ -324,13 +634,13 @@ async def extract_and_execute_tool_calls(
324
634
  if state_manager.session.show_thoughts:
325
635
  from tunacode.ui import console as ui
326
636
 
327
- await ui.muted(f"🔧 FALLBACK: Executed {tool_data['tool']} from code block")
637
+ await ui.muted(f"FALLBACK: Executed {tool_data['tool']} from code block")
328
638
 
329
639
  except (json.JSONDecodeError, KeyError, Exception) as e:
330
640
  if state_manager.session.show_thoughts:
331
641
  from tunacode.ui import console as ui
332
642
 
333
- await ui.error(f"Error parsing code block tool call: {str(e)}")
643
+ await ui.error(f"Error parsing code block tool call: {str(e)}")
334
644
 
335
645
 
336
646
  async def process_request(
@@ -349,28 +659,101 @@ async def process_request(
349
659
 
350
660
  response_state = ResponseState()
351
661
 
662
+ # Reset iteration tracking for this request
663
+ state_manager.session.iteration_count = 0
664
+
665
+ # Create a request-level buffer for batching read-only tools across nodes
666
+ tool_buffer = ToolBuffer()
667
+
668
+ # Show what we're sending to the API when thoughts are enabled
669
+ if state_manager.session.show_thoughts:
670
+ from tunacode.ui import console as ui
671
+
672
+ await ui.muted("\n" + "=" * 60)
673
+ await ui.muted("📤 SENDING TO API:")
674
+ await ui.muted(f"Message: {message}")
675
+ await ui.muted(f"Model: {model}")
676
+ await ui.muted(f"Message History Length: {len(mh)}")
677
+ await ui.muted("=" * 60)
678
+
352
679
  async with agent.iter(message, message_history=mh) as agent_run:
353
680
  i = 0
354
681
  async for node in agent_run:
355
- await _process_node(node, tool_callback, state_manager)
682
+ state_manager.session.current_iteration = i + 1
683
+ await _process_node(node, tool_callback, state_manager, tool_buffer)
356
684
  if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
357
685
  if node.result.output:
358
686
  response_state.has_user_response = True
359
687
  i += 1
688
+ state_manager.session.iteration_count = i
360
689
 
361
690
  # Display iteration progress if thoughts are enabled
362
- if state_manager.session.show_thoughts and i > 1:
691
+ if state_manager.session.show_thoughts:
363
692
  from tunacode.ui import console as ui
364
693
 
365
- await ui.muted(f"🔄 Iteration {i}/{max_iterations}")
694
+ await ui.muted(f"\nITERATION: {i}/{max_iterations}")
695
+
696
+ # Show summary of tools used so far
697
+ if state_manager.session.tool_calls:
698
+ tool_summary = {}
699
+ for tc in state_manager.session.tool_calls:
700
+ tool_name = tc.get("tool", "unknown")
701
+ tool_summary[tool_name] = tool_summary.get(tool_name, 0) + 1
702
+
703
+ summary_str = ", ".join(
704
+ [f"{name}: {count}" for name, count in tool_summary.items()]
705
+ )
706
+ await ui.muted(f"TOOLS USED: {summary_str}")
366
707
 
367
708
  if i >= max_iterations:
368
709
  if state_manager.session.show_thoughts:
369
710
  from tunacode.ui import console as ui
370
711
 
371
- await ui.warning(f"⚠️ Reached maximum iterations ({max_iterations})")
712
+ await ui.warning(f"Reached maximum iterations ({max_iterations})")
372
713
  break
373
714
 
715
+ # Final flush: execute any remaining buffered read-only tools
716
+ if tool_callback and tool_buffer.has_tasks():
717
+ import time
718
+
719
+ from tunacode.ui import console as ui
720
+
721
+ buffered_tasks = tool_buffer.flush()
722
+ start_time = time.time()
723
+
724
+ await ui.muted("\n" + "=" * 60)
725
+ await ui.muted(
726
+ f"🚀 FINAL BATCH: Executing {len(buffered_tasks)} buffered read-only tools"
727
+ )
728
+ await ui.muted("=" * 60)
729
+
730
+ for idx, (part, node) in enumerate(buffered_tasks, 1):
731
+ tool_desc = f" [{idx}] {part.tool_name}"
732
+ if hasattr(part, "args") and isinstance(part.args, dict):
733
+ if part.tool_name == "read_file" and "file_path" in part.args:
734
+ tool_desc += f" → {part.args['file_path']}"
735
+ elif part.tool_name == "grep" and "pattern" in part.args:
736
+ tool_desc += f" → pattern: '{part.args['pattern']}'"
737
+ if "include_files" in part.args:
738
+ tool_desc += f", files: '{part.args['include_files']}'"
739
+ elif part.tool_name == "list_dir" and "directory" in part.args:
740
+ tool_desc += f" → {part.args['directory']}"
741
+ elif part.tool_name == "glob" and "pattern" in part.args:
742
+ tool_desc += f" → pattern: '{part.args['pattern']}'"
743
+ await ui.muted(tool_desc)
744
+ await ui.muted("=" * 60)
745
+
746
+ await execute_tools_parallel(buffered_tasks, tool_callback)
747
+
748
+ elapsed_time = (time.time() - start_time) * 1000
749
+ sequential_estimate = len(buffered_tasks) * 100
750
+ speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
751
+
752
+ await ui.muted(
753
+ f"✅ Final batch completed in {elapsed_time:.0f}ms "
754
+ f"(~{speedup:.1f}x faster than sequential)\n"
755
+ )
756
+
374
757
  # If we need to add a fallback response, create a wrapper
375
758
  if not response_state.has_user_response and i >= max_iterations and fallback_enabled:
376
759
  patch_tool_messages("Task incomplete", state_manager=state_manager)
@@ -391,10 +774,10 @@ async def process_request(
391
774
 
392
775
  # Track specific operations
393
776
  if tool_name in ["write_file", "update_file"] and hasattr(part, "args"):
394
- if "file_path" in part.args:
777
+ if isinstance(part.args, dict) and "file_path" in part.args:
395
778
  files_modified.add(part.args["file_path"])
396
779
  elif tool_name in ["run_command", "bash"] and hasattr(part, "args"):
397
- if "command" in part.args:
780
+ if isinstance(part.args, dict) and "command" in part.args:
398
781
  commands_run.append(part.args["command"])
399
782
 
400
783
  # Build fallback response with context
@@ -465,12 +848,25 @@ async def process_request(
465
848
  class AgentRunWrapper:
466
849
  def __init__(self, wrapped_run, fallback_result):
467
850
  self._wrapped = wrapped_run
468
- self.result = fallback_result
851
+ self._result = fallback_result
469
852
  self.response_state = response_state
470
853
 
471
- def __getattr__(self, name):
854
+ def __getattribute__(self, name):
855
+ # Handle special attributes first to avoid conflicts
856
+ if name in ["_wrapped", "_result", "response_state"]:
857
+ return object.__getattribute__(self, name)
858
+
859
+ # Explicitly handle 'result' to return our fallback result
860
+ if name == "result":
861
+ return object.__getattribute__(self, "_result")
862
+
472
863
  # Delegate all other attributes to the wrapped object
473
- return getattr(self._wrapped, name)
864
+ try:
865
+ return getattr(object.__getattribute__(self, "_wrapped"), name)
866
+ except AttributeError:
867
+ raise AttributeError(
868
+ f"'{type(self).__name__}' object has no attribute '{name}'"
869
+ )
474
870
 
475
871
  return AgentRunWrapper(agent_run, SimpleResult(comprehensive_output))
476
872
 
@@ -481,8 +877,17 @@ async def process_request(
481
877
  self._wrapped = wrapped_run
482
878
  self.response_state = response_state
483
879
 
484
- def __getattr__(self, name):
880
+ def __getattribute__(self, name):
881
+ # Handle special attributes first
882
+ if name in ["_wrapped", "response_state"]:
883
+ return object.__getattribute__(self, name)
884
+
485
885
  # Delegate all other attributes to the wrapped object
486
- return getattr(self._wrapped, name)
886
+ try:
887
+ return getattr(object.__getattribute__(self, "_wrapped"), name)
888
+ except AttributeError:
889
+ raise AttributeError(
890
+ f"'{type(self).__name__}' object has no attribute '{name}'"
891
+ )
487
892
 
488
893
  return AgentRunWithState(agent_run)