tunacode-cli 0.0.54__py3-none-any.whl → 0.0.56__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

Files changed (35) hide show
  1. tunacode/cli/commands/__init__.py +2 -0
  2. tunacode/cli/commands/implementations/plan.py +50 -0
  3. tunacode/cli/commands/registry.py +7 -1
  4. tunacode/cli/repl.py +358 -8
  5. tunacode/cli/repl_components/output_display.py +18 -1
  6. tunacode/cli/repl_components/tool_executor.py +15 -4
  7. tunacode/constants.py +4 -2
  8. tunacode/core/agents/agent_components/__init__.py +20 -0
  9. tunacode/core/agents/agent_components/agent_config.py +134 -7
  10. tunacode/core/agents/agent_components/agent_helpers.py +219 -0
  11. tunacode/core/agents/agent_components/node_processor.py +82 -115
  12. tunacode/core/agents/agent_components/truncation_checker.py +81 -0
  13. tunacode/core/agents/main.py +86 -312
  14. tunacode/core/state.py +51 -3
  15. tunacode/core/tool_handler.py +20 -0
  16. tunacode/prompts/system.md +5 -4
  17. tunacode/tools/exit_plan_mode.py +191 -0
  18. tunacode/tools/grep.py +12 -1
  19. tunacode/tools/present_plan.py +208 -0
  20. tunacode/types.py +57 -0
  21. tunacode/ui/console.py +2 -0
  22. tunacode/ui/input.py +13 -2
  23. tunacode/ui/keybindings.py +26 -38
  24. tunacode/ui/output.py +39 -4
  25. tunacode/ui/panels.py +79 -2
  26. tunacode/ui/prompt_manager.py +19 -2
  27. tunacode/ui/tool_descriptions.py +115 -0
  28. tunacode/ui/tool_ui.py +3 -2
  29. tunacode/utils/message_utils.py +14 -4
  30. {tunacode_cli-0.0.54.dist-info → tunacode_cli-0.0.56.dist-info}/METADATA +4 -3
  31. {tunacode_cli-0.0.54.dist-info → tunacode_cli-0.0.56.dist-info}/RECORD +35 -29
  32. {tunacode_cli-0.0.54.dist-info → tunacode_cli-0.0.56.dist-info}/WHEEL +0 -0
  33. {tunacode_cli-0.0.54.dist-info → tunacode_cli-0.0.56.dist-info}/entry_points.txt +0 -0
  34. {tunacode_cli-0.0.54.dist-info → tunacode_cli-0.0.56.dist-info}/licenses/LICENSE +0 -0
  35. {tunacode_cli-0.0.54.dist-info → tunacode_cli-0.0.56.dist-info}/top_level.txt +0 -0
@@ -8,6 +8,7 @@ from tunacode.core.logging.logger import get_logger
8
8
  from tunacode.core.state import StateManager
9
9
  from tunacode.services.mcp import get_mcp_servers
10
10
  from tunacode.tools.bash import bash
11
+ from tunacode.tools.present_plan import create_present_plan_tool
11
12
  from tunacode.tools.glob import glob
12
13
  from tunacode.tools.grep import grep
13
14
  from tunacode.tools.list_dir import list_dir
@@ -65,7 +66,11 @@ def load_tunacode_context() -> str:
65
66
 
66
67
  def get_or_create_agent(model: ModelName, state_manager: StateManager) -> PydanticAgent:
67
68
  """Get existing agent or create new one for the specified model."""
69
+ import logging
70
+ logger = logging.getLogger(__name__)
71
+
68
72
  if model not in state_manager.session.agents:
73
+ logger.debug(f"Creating new agent for model {model}, plan_mode={state_manager.is_plan_mode()}")
69
74
  max_retries = state_manager.session.user_config.get("settings", {}).get("max_retries", 3)
70
75
 
71
76
  # Lazy import Agent and Tool
@@ -78,8 +83,105 @@ def get_or_create_agent(model: ModelName, state_manager: StateManager) -> Pydant
78
83
  # Load TUNACODE.md context
79
84
  system_prompt += load_tunacode_context()
80
85
 
81
- # Initialize todo tool
86
+ # Add plan mode context if in plan mode
87
+ if state_manager.is_plan_mode():
88
+ # REMOVE all TUNACODE_TASK_COMPLETE instructions from the system prompt
89
+ system_prompt = system_prompt.replace("TUNACODE_TASK_COMPLETE", "PLAN_MODE_TASK_PLACEHOLDER")
90
+ # Remove the completion guidance that conflicts with plan mode
91
+ lines_to_remove = [
92
+ "When a task is COMPLETE, start your response with: TUNACODE_TASK_COMPLETE",
93
+ "4. When a task is COMPLETE, start your response with: TUNACODE_TASK_COMPLETE",
94
+ "**How to signal completion:**",
95
+ "TUNACODE_TASK_COMPLETE",
96
+ "[Your summary of what was accomplished]",
97
+ "**IMPORTANT**: Always evaluate if you've completed the task. If yes, use TUNACODE_TASK_COMPLETE.",
98
+ "This prevents wasting iterations and API calls."
99
+ ]
100
+ for line in lines_to_remove:
101
+ system_prompt = system_prompt.replace(line, "")
102
+ plan_mode_override = """
103
+ 🔍 PLAN MODE - YOU MUST USE THE present_plan TOOL 🔍
104
+
105
+ CRITICAL: You are in Plan Mode. You MUST execute the present_plan TOOL, not show it as text.
106
+
107
+ ❌ WRONG - DO NOT SHOW THE FUNCTION AS TEXT:
108
+ ```
109
+ present_plan(title="...", ...) # THIS IS WRONG - DON'T SHOW AS CODE
110
+ ```
111
+
112
+ ✅ CORRECT - ACTUALLY EXECUTE THE TOOL:
113
+ You must EXECUTE present_plan as a tool call, just like you execute read_file or grep.
114
+
115
+ CRITICAL RULES:
116
+ 1. DO NOT show present_plan() as code or text
117
+ 2. DO NOT write "Here's the plan" or any text description
118
+ 3. DO NOT use TUNACODE_TASK_COMPLETE
119
+ 4. DO NOT use markdown code blocks for present_plan
120
+
121
+ YOU MUST EXECUTE THE TOOL:
122
+ When the user asks you to "plan" something, you must:
123
+ 1. Research using read_only tools (optional)
124
+ 2. EXECUTE present_plan tool with the plan data
125
+ 3. The tool will handle displaying the plan
126
+
127
+ Example of CORRECT behavior:
128
+ User: "plan a markdown file"
129
+ You: [Execute read_file/grep if needed for research]
130
+ [Then EXECUTE present_plan tool - not as text but as an actual tool call]
131
+
132
+ Remember: present_plan is a TOOL like read_file or grep. You must EXECUTE it, not SHOW it.
133
+
134
+ Available tools:
135
+ - read_file, grep, list_dir, glob: For research
136
+ - present_plan: EXECUTE this tool to present the plan (DO NOT show as text)
137
+
138
+ """
139
+ # COMPLETELY REPLACE system prompt in plan mode - nuclear option
140
+ system_prompt = """
141
+ 🔧 PLAN MODE - TOOL EXECUTION ONLY 🔧
142
+
143
+ You are a planning assistant that ONLY communicates through tool execution.
144
+
145
+ CRITICAL: You cannot respond with text. You MUST use tools for everything.
146
+
147
+ AVAILABLE TOOLS:
148
+ - read_file(filepath): Read file contents
149
+ - grep(pattern): Search for text patterns
150
+ - list_dir(directory): List directory contents
151
+ - glob(pattern): Find files matching patterns
152
+ - present_plan(title, overview, steps, files_to_create, success_criteria): Present structured plan
153
+
154
+ MANDATORY WORKFLOW:
155
+ 1. User asks you to plan something
156
+ 2. You research using read-only tools (if needed)
157
+ 3. You EXECUTE present_plan tool with structured data
158
+ 4. DONE
159
+
160
+ FORBIDDEN:
161
+ - Text responses
162
+ - Showing function calls as code
163
+ - Saying "here is the plan"
164
+ - Any text completion
165
+
166
+ EXAMPLE:
167
+ User: "plan a markdown file"
168
+ You: [Call read_file or grep for research if needed]
169
+ [Call present_plan tool with actual parameters - NOT as text]
170
+
171
+ The present_plan tool takes these parameters:
172
+ - title: Brief title string
173
+ - overview: What the plan accomplishes
174
+ - steps: List of implementation steps
175
+ - files_to_create: List of files to create
176
+ - success_criteria: List of success criteria
177
+
178
+ YOU MUST EXECUTE present_plan TOOL TO COMPLETE ANY PLANNING TASK.
179
+ """
180
+
181
+ # Initialize tools that need state manager
82
182
  todo_tool = TodoTool(state_manager=state_manager)
183
+ present_plan = create_present_plan_tool(state_manager)
184
+ logger.debug(f"Tools initialized, present_plan available: {present_plan is not None}")
83
185
 
84
186
  # Add todo context if available
85
187
  try:
@@ -89,12 +191,21 @@ def get_or_create_agent(model: ModelName, state_manager: StateManager) -> Pydant
89
191
  except Exception as e:
90
192
  logger.warning(f"Warning: Failed to load todos: {e}")
91
193
 
92
- # Create agent with all tools
93
- state_manager.session.agents[model] = Agent(
94
- model=model,
95
- system_prompt=system_prompt,
96
- tools=[
194
+ # Create tool list based on mode
195
+ if state_manager.is_plan_mode():
196
+ # Plan mode: Only read-only tools + present_plan
197
+ tools_list = [
198
+ Tool(present_plan, max_retries=max_retries),
199
+ Tool(glob, max_retries=max_retries),
200
+ Tool(grep, max_retries=max_retries),
201
+ Tool(list_dir, max_retries=max_retries),
202
+ Tool(read_file, max_retries=max_retries),
203
+ ]
204
+ else:
205
+ # Normal mode: All tools
206
+ tools_list = [
97
207
  Tool(bash, max_retries=max_retries),
208
+ Tool(present_plan, max_retries=max_retries),
98
209
  Tool(glob, max_retries=max_retries),
99
210
  Tool(grep, max_retries=max_retries),
100
211
  Tool(list_dir, max_retries=max_retries),
@@ -103,7 +214,23 @@ def get_or_create_agent(model: ModelName, state_manager: StateManager) -> Pydant
103
214
  Tool(todo_tool._execute, max_retries=max_retries),
104
215
  Tool(update_file, max_retries=max_retries),
105
216
  Tool(write_file, max_retries=max_retries),
106
- ],
217
+ ]
218
+
219
+ # Log which tools are being registered
220
+ logger.debug(f"Creating agent: plan_mode={state_manager.is_plan_mode()}, tools={len(tools_list)}")
221
+ if state_manager.is_plan_mode():
222
+ logger.debug(f"PLAN MODE TOOLS: {[str(tool) for tool in tools_list]}")
223
+ logger.debug(f"present_plan tool type: {type(present_plan)}")
224
+
225
+ if "PLAN MODE - YOU MUST USE THE present_plan TOOL" in system_prompt:
226
+ logger.debug("✅ Plan mode instructions ARE in system prompt")
227
+ else:
228
+ logger.debug("❌ Plan mode instructions NOT in system prompt")
229
+
230
+ state_manager.session.agents[model] = Agent(
231
+ model=model,
232
+ system_prompt=system_prompt,
233
+ tools=tools_list,
107
234
  mcp_servers=get_mcp_servers(state_manager),
108
235
  )
109
236
  return state_manager.session.agents[model]
@@ -0,0 +1,219 @@
1
+ """Helper functions for agent operations to reduce code duplication."""
2
+
3
+ from typing import Any
4
+
5
+ from tunacode.core.state import StateManager
6
+ from tunacode.types import FallbackResponse
7
+
8
+
9
+ class UserPromptPartFallback:
10
+ """Fallback class for UserPromptPart when pydantic_ai is not available."""
11
+
12
+ def __init__(self, content: str, part_kind: str):
13
+ self.content = content
14
+ self.part_kind = part_kind
15
+
16
+
17
+ # Cache for UserPromptPart class
18
+ _USER_PROMPT_PART_CLASS = None
19
+
20
+
21
+ def get_user_prompt_part_class():
22
+ """Get UserPromptPart class with caching and fallback for test environment."""
23
+ global _USER_PROMPT_PART_CLASS
24
+
25
+ if _USER_PROMPT_PART_CLASS is not None:
26
+ return _USER_PROMPT_PART_CLASS
27
+
28
+ try:
29
+ import importlib
30
+
31
+ messages = importlib.import_module("pydantic_ai.messages")
32
+ _USER_PROMPT_PART_CLASS = getattr(messages, "UserPromptPart", None)
33
+
34
+ if _USER_PROMPT_PART_CLASS is None:
35
+ _USER_PROMPT_PART_CLASS = UserPromptPartFallback
36
+ except Exception:
37
+ _USER_PROMPT_PART_CLASS = UserPromptPartFallback
38
+
39
+ return _USER_PROMPT_PART_CLASS
40
+
41
+
42
+ def create_user_message(content: str, state_manager: StateManager):
43
+ """Create a user message and add it to the session messages."""
44
+ from .message_handler import get_model_messages
45
+
46
+ model_request_cls = get_model_messages()[0]
47
+ UserPromptPart = get_user_prompt_part_class()
48
+ user_prompt_part = UserPromptPart(content=content, part_kind="user-prompt")
49
+ message = model_request_cls(parts=[user_prompt_part], kind="request")
50
+ state_manager.session.messages.append(message)
51
+ return message
52
+
53
+
54
+ def get_tool_summary(tool_calls: list[dict[str, Any]]) -> dict[str, int]:
55
+ """Generate a summary of tool usage from tool calls."""
56
+ tool_summary: dict[str, int] = {}
57
+ for tc in tool_calls:
58
+ tool_name = tc.get("tool", "unknown")
59
+ tool_summary[tool_name] = tool_summary.get(tool_name, 0) + 1
60
+ return tool_summary
61
+
62
+
63
+ def get_tool_description(tool_name: str, tool_args: dict[str, Any]) -> str:
64
+ """Get a descriptive string for a tool call."""
65
+ tool_desc = tool_name
66
+ if tool_name in ["grep", "glob"] and isinstance(tool_args, dict):
67
+ pattern = tool_args.get("pattern", "")
68
+ tool_desc = f"{tool_name}('{pattern}')"
69
+ elif tool_name == "read_file" and isinstance(tool_args, dict):
70
+ path = tool_args.get("file_path", tool_args.get("filepath", ""))
71
+ tool_desc = f"{tool_name}('{path}')"
72
+ return tool_desc
73
+
74
+
75
+ def get_recent_tools_context(tool_calls: list[dict[str, Any]], limit: int = 3) -> str:
76
+ """Get a context string describing recent tool usage."""
77
+ if not tool_calls:
78
+ return "No tools used yet"
79
+
80
+ last_tools = []
81
+ for tc in tool_calls[-limit:]:
82
+ tool_name = tc.get("tool", "unknown")
83
+ tool_args = tc.get("args", {})
84
+ tool_desc = get_tool_description(tool_name, tool_args)
85
+ last_tools.append(tool_desc)
86
+
87
+ return f"Recent tools: {', '.join(last_tools)}"
88
+
89
+
90
+ def create_empty_response_message(
91
+ message: str,
92
+ empty_reason: str,
93
+ tool_calls: list[dict[str, Any]],
94
+ iteration: int,
95
+ state_manager: StateManager,
96
+ ) -> str:
97
+ """Create an aggressive message for handling empty responses."""
98
+ tools_context = get_recent_tools_context(tool_calls)
99
+
100
+ content = f"""FAILURE DETECTED: You returned {("an " + empty_reason if empty_reason != "empty" else "an empty")} response.
101
+
102
+ This is UNACCEPTABLE. You FAILED to produce output.
103
+
104
+ Task: {message[:200]}...
105
+ {tools_context}
106
+ Current iteration: {iteration}
107
+
108
+ TRY AGAIN RIGHT NOW:
109
+
110
+ 1. If your search returned no results → Try a DIFFERENT search pattern
111
+ 2. If you found what you need → Use TUNACODE_TASK_COMPLETE
112
+ 3. If you're stuck → EXPLAIN SPECIFICALLY what's blocking you
113
+ 4. If you need to explore → Use list_dir or broader searches
114
+
115
+ YOU MUST PRODUCE REAL OUTPUT IN THIS RESPONSE. NO EXCUSES.
116
+ EXECUTE A TOOL OR PROVIDE SUBSTANTIAL CONTENT.
117
+ DO NOT RETURN ANOTHER EMPTY RESPONSE."""
118
+
119
+ return content
120
+
121
+
122
+ def create_progress_summary(tool_calls: list[dict[str, Any]]) -> tuple[dict[str, int], str]:
123
+ """Create a progress summary from tool calls."""
124
+ tool_summary = get_tool_summary(tool_calls)
125
+
126
+ if tool_summary:
127
+ summary_str = ", ".join([f"{name}: {count}" for name, count in tool_summary.items()])
128
+ else:
129
+ summary_str = "No tools used yet"
130
+
131
+ return tool_summary, summary_str
132
+
133
+
134
+ def create_fallback_response(
135
+ iterations: int,
136
+ max_iterations: int,
137
+ tool_calls: list[dict[str, Any]],
138
+ messages: list[Any],
139
+ verbosity: str = "normal",
140
+ ) -> FallbackResponse:
141
+ """Create a comprehensive fallback response when iteration limit is reached."""
142
+ fallback = FallbackResponse(
143
+ summary="Reached maximum iterations without producing a final response.",
144
+ progress=f"Completed {iterations} iterations (limit: {max_iterations})",
145
+ )
146
+
147
+ # Extract context from messages
148
+ tool_calls_summary = []
149
+ files_modified = set()
150
+ commands_run = []
151
+
152
+ for msg in messages:
153
+ if hasattr(msg, "parts"):
154
+ for part in msg.parts:
155
+ if hasattr(part, "part_kind") and part.part_kind == "tool-call":
156
+ tool_name = getattr(part, "tool_name", "unknown")
157
+ tool_calls_summary.append(tool_name)
158
+
159
+ # Track specific operations
160
+ if tool_name in ["write_file", "update_file"] and hasattr(part, "args"):
161
+ if isinstance(part.args, dict) and "file_path" in part.args:
162
+ files_modified.add(part.args["file_path"])
163
+ elif tool_name in ["run_command", "bash"] and hasattr(part, "args"):
164
+ if isinstance(part.args, dict) and "command" in part.args:
165
+ commands_run.append(part.args["command"])
166
+
167
+ if verbosity in ["normal", "detailed"]:
168
+ # Add what was attempted
169
+ if tool_calls_summary:
170
+ tool_counts: dict[str, int] = {}
171
+ for tool in tool_calls_summary:
172
+ tool_counts[tool] = tool_counts.get(tool, 0) + 1
173
+
174
+ fallback.issues.append(f"Executed {len(tool_calls_summary)} tool calls:")
175
+ for tool, count in sorted(tool_counts.items()):
176
+ fallback.issues.append(f" • {tool}: {count}x")
177
+
178
+ if verbosity == "detailed":
179
+ if files_modified:
180
+ fallback.issues.append(f"\nFiles modified ({len(files_modified)}):")
181
+ for f in sorted(files_modified)[:5]:
182
+ fallback.issues.append(f" • {f}")
183
+ if len(files_modified) > 5:
184
+ fallback.issues.append(f" • ... and {len(files_modified) - 5} more")
185
+
186
+ if commands_run:
187
+ fallback.issues.append(f"\nCommands executed ({len(commands_run)}):")
188
+ for cmd in commands_run[:3]:
189
+ display_cmd = cmd if len(cmd) <= 60 else cmd[:57] + "..."
190
+ fallback.issues.append(f" • {display_cmd}")
191
+ if len(commands_run) > 3:
192
+ fallback.issues.append(f" • ... and {len(commands_run) - 3} more")
193
+
194
+ # Add helpful next steps
195
+ fallback.next_steps.append("The task may be too complex - try breaking it into smaller steps")
196
+ fallback.next_steps.append("Check the output above for any errors or partial progress")
197
+ if files_modified:
198
+ fallback.next_steps.append("Review modified files to see what changes were made")
199
+
200
+ return fallback
201
+
202
+
203
+ def format_fallback_output(fallback: FallbackResponse) -> str:
204
+ """Format a fallback response into a comprehensive output string."""
205
+ output_parts = [fallback.summary, ""]
206
+
207
+ if fallback.progress:
208
+ output_parts.append(f"Progress: {fallback.progress}")
209
+
210
+ if fallback.issues:
211
+ output_parts.append("\nWhat happened:")
212
+ output_parts.extend(fallback.issues)
213
+
214
+ if fallback.next_steps:
215
+ output_parts.append("\nSuggested next steps:")
216
+ for step in fallback.next_steps:
217
+ output_parts.append(f" • {step}")
218
+
219
+ return "\n".join(output_parts)
@@ -6,10 +6,12 @@ from typing import Any, Awaitable, Callable, Optional, Tuple
6
6
  from tunacode.core.logging.logger import get_logger
7
7
  from tunacode.core.state import StateManager
8
8
  from tunacode.types import UsageTrackerProtocol
9
+ from tunacode.ui.tool_descriptions import get_batch_description, get_tool_description
9
10
 
10
11
  from .response_state import ResponseState
11
12
  from .task_completion import check_task_completion
12
13
  from .tool_buffer import ToolBuffer
14
+ from .truncation_checker import check_for_truncation
13
15
 
14
16
  logger = get_logger(__name__)
15
17
 
@@ -171,7 +173,7 @@ async def _process_node(
171
173
  # Check for truncation patterns
172
174
  if all_content_parts:
173
175
  combined_content = " ".join(all_content_parts).strip()
174
- appears_truncated = _check_for_truncation(combined_content)
176
+ appears_truncated = check_for_truncation(combined_content)
175
177
 
176
178
  # If we only got empty content and no tool calls, we should NOT consider this a valid response
177
179
  # This prevents the agent from stopping when it gets empty responses
@@ -229,79 +231,6 @@ async def _process_node(
229
231
  return False, None
230
232
 
231
233
 
232
- def _check_for_truncation(combined_content: str) -> bool:
233
- """Check if content appears to be truncated."""
234
- if not combined_content:
235
- return False
236
-
237
- # Truncation indicators:
238
- # 1. Ends with "..." or "…" (but not part of a complete sentence)
239
- # 2. Ends mid-word (no punctuation, space, or complete word)
240
- # 3. Contains incomplete markdown/code blocks
241
- # 4. Ends with incomplete parentheses/brackets
242
-
243
- # Check for ellipsis at end suggesting truncation
244
- if combined_content.endswith(("...", "…")) and not combined_content.endswith(("....", "….")):
245
- return True
246
-
247
- # Check for mid-word truncation (ends with letters but no punctuation)
248
- if combined_content and combined_content[-1].isalpha():
249
- # Look for incomplete words by checking if last "word" seems cut off
250
- words = combined_content.split()
251
- if words:
252
- last_word = words[-1]
253
- # Common complete word endings vs likely truncations
254
- complete_endings = (
255
- "ing",
256
- "ed",
257
- "ly",
258
- "er",
259
- "est",
260
- "tion",
261
- "ment",
262
- "ness",
263
- "ity",
264
- "ous",
265
- "ive",
266
- "able",
267
- "ible",
268
- )
269
- incomplete_patterns = (
270
- "referen",
271
- "inte",
272
- "proces",
273
- "analy",
274
- "deve",
275
- "imple",
276
- "execu",
277
- )
278
-
279
- if any(last_word.lower().endswith(pattern) for pattern in incomplete_patterns):
280
- return True
281
- elif len(last_word) > 2 and not any(
282
- last_word.lower().endswith(end) for end in complete_endings
283
- ):
284
- # Likely truncated if doesn't end with common suffix
285
- return True
286
-
287
- # Check for unclosed markdown code blocks
288
- code_block_count = combined_content.count("```")
289
- if code_block_count % 2 != 0:
290
- return True
291
-
292
- # Check for unclosed brackets/parentheses (more opens than closes)
293
- open_brackets = (
294
- combined_content.count("[") + combined_content.count("(") + combined_content.count("{")
295
- )
296
- close_brackets = (
297
- combined_content.count("]") + combined_content.count(")") + combined_content.count("}")
298
- )
299
- if open_brackets > close_brackets:
300
- return True
301
-
302
- return False
303
-
304
-
305
234
  async def _display_raw_api_response(node: Any, ui: Any) -> None:
306
235
  """Display raw API response data when thoughts are enabled."""
307
236
 
@@ -382,6 +311,14 @@ async def _process_tool_calls(
382
311
  if tool_buffer is not None and part.tool_name in READ_ONLY_TOOLS:
383
312
  # Add to buffer instead of executing immediately
384
313
  tool_buffer.add(part, node)
314
+
315
+ # Update spinner to show we're collecting tools
316
+ buffered_count = len(tool_buffer.read_only_tasks)
317
+ await ui.update_spinner_message(
318
+ f"[bold #00d7ff]Collecting tools ({buffered_count} buffered)...[/bold #00d7ff]",
319
+ state_manager,
320
+ )
321
+
385
322
  if state_manager.session.show_thoughts:
386
323
  await ui.muted(
387
324
  f"⏸️ BUFFERED: {part.tool_name} (will execute in parallel batch)"
@@ -399,45 +336,53 @@ async def _process_tool_calls(
399
336
 
400
337
  start_time = time.time()
401
338
 
402
- # Enhanced visual feedback for parallel execution
403
- await ui.muted("\n" + "=" * 60)
404
- await ui.muted(
405
- f"🚀 PARALLEL BATCH #{batch_id}: Executing {len(buffered_tasks)} read-only tools concurrently"
339
+ # Update spinner message for batch execution
340
+ tool_names = [part.tool_name for part, _ in buffered_tasks]
341
+ batch_msg = get_batch_description(len(buffered_tasks), tool_names)
342
+ await ui.update_spinner_message(
343
+ f"[bold #00d7ff]{batch_msg}...[/bold #00d7ff]", state_manager
406
344
  )
407
- await ui.muted("=" * 60)
408
-
409
- # Display details of what's being executed
410
- for idx, (buffered_part, _) in enumerate(buffered_tasks, 1):
411
- tool_desc = f" [{idx}] {buffered_part.tool_name}"
412
- if hasattr(buffered_part, "args") and isinstance(
413
- buffered_part.args, dict
414
- ):
415
- if (
416
- buffered_part.tool_name == "read_file"
417
- and "file_path" in buffered_part.args
418
- ):
419
- tool_desc += f" {buffered_part.args['file_path']}"
420
- elif (
421
- buffered_part.tool_name == "grep"
422
- and "pattern" in buffered_part.args
423
- ):
424
- tool_desc += f" → pattern: '{buffered_part.args['pattern']}'"
425
- if "include_files" in buffered_part.args:
426
- tool_desc += (
427
- f", files: '{buffered_part.args['include_files']}'"
428
- )
429
- elif (
430
- buffered_part.tool_name == "list_dir"
431
- and "directory" in buffered_part.args
432
- ):
433
- tool_desc += f" → {buffered_part.args['directory']}"
434
- elif (
435
- buffered_part.tool_name == "glob"
436
- and "pattern" in buffered_part.args
345
+
346
+ # Enhanced visual feedback for parallel execution (suppress in plan mode)
347
+ if not state_manager.is_plan_mode():
348
+ await ui.muted("\n" + "=" * 60)
349
+ await ui.muted(
350
+ f"🚀 PARALLEL BATCH #{batch_id}: Executing {len(buffered_tasks)} read-only tools concurrently"
351
+ )
352
+ await ui.muted("=" * 60)
353
+
354
+ # Display details of what's being executed
355
+ for idx, (buffered_part, _) in enumerate(buffered_tasks, 1):
356
+ tool_desc = f" [{idx}] {buffered_part.tool_name}"
357
+ if hasattr(buffered_part, "args") and isinstance(
358
+ buffered_part.args, dict
437
359
  ):
438
- tool_desc += f" → pattern: '{buffered_part.args['pattern']}'"
439
- await ui.muted(tool_desc)
440
- await ui.muted("=" * 60)
360
+ if (
361
+ buffered_part.tool_name == "read_file"
362
+ and "file_path" in buffered_part.args
363
+ ):
364
+ tool_desc += f" → {buffered_part.args['file_path']}"
365
+ elif (
366
+ buffered_part.tool_name == "grep"
367
+ and "pattern" in buffered_part.args
368
+ ):
369
+ tool_desc += f" → pattern: '{buffered_part.args['pattern']}'"
370
+ if "include_files" in buffered_part.args:
371
+ tool_desc += (
372
+ f", files: '{buffered_part.args['include_files']}'"
373
+ )
374
+ elif (
375
+ buffered_part.tool_name == "list_dir"
376
+ and "directory" in buffered_part.args
377
+ ):
378
+ tool_desc += f" → {buffered_part.args['directory']}"
379
+ elif (
380
+ buffered_part.tool_name == "glob"
381
+ and "pattern" in buffered_part.args
382
+ ):
383
+ tool_desc += f" → pattern: '{buffered_part.args['pattern']}'"
384
+ await ui.muted(tool_desc)
385
+ await ui.muted("=" * 60)
441
386
 
442
387
  await execute_tools_parallel(buffered_tasks, tool_callback)
443
388
 
@@ -447,14 +392,36 @@ async def _process_tool_calls(
447
392
  ) # Assume 100ms per tool average
448
393
  speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
449
394
 
450
- await ui.muted(
451
- f"✅ Parallel batch completed in {elapsed_time:.0f}ms "
452
- f"(~{speedup:.1f}x faster than sequential)\n"
453
- )
395
+ if not state_manager.is_plan_mode():
396
+ await ui.muted(
397
+ f"✅ Parallel batch completed in {elapsed_time:.0f}ms "
398
+ f"(~{speedup:.1f}x faster than sequential)\n"
399
+ )
400
+
401
+ # Reset spinner message back to thinking
402
+ from tunacode.constants import UI_THINKING_MESSAGE
403
+
404
+ await ui.update_spinner_message(UI_THINKING_MESSAGE, state_manager)
454
405
 
455
406
  # Now execute the write/execute tool
456
407
  if state_manager.session.show_thoughts:
457
408
  await ui.warning(f"⚠️ SEQUENTIAL: {part.tool_name} (write/execute tool)")
409
+
410
+ # Update spinner for sequential tool
411
+ tool_args = getattr(part, "args", {}) if hasattr(part, "args") else {}
412
+ # Parse args if they're a JSON string
413
+ if isinstance(tool_args, str):
414
+ import json
415
+
416
+ try:
417
+ tool_args = json.loads(tool_args)
418
+ except (json.JSONDecodeError, TypeError):
419
+ tool_args = {}
420
+ tool_desc = get_tool_description(part.tool_name, tool_args)
421
+ await ui.update_spinner_message(
422
+ f"[bold #00d7ff]{tool_desc}...[/bold #00d7ff]", state_manager
423
+ )
424
+
458
425
  await tool_callback(part, node)
459
426
 
460
427
  # Track tool calls in session