tunacode-cli 0.0.51__py3-none-any.whl → 0.0.53__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

Files changed (87) hide show
  1. tunacode/cli/commands/base.py +2 -2
  2. tunacode/cli/commands/implementations/__init__.py +7 -1
  3. tunacode/cli/commands/implementations/conversation.py +1 -1
  4. tunacode/cli/commands/implementations/debug.py +1 -1
  5. tunacode/cli/commands/implementations/development.py +4 -1
  6. tunacode/cli/commands/implementations/template.py +132 -0
  7. tunacode/cli/commands/registry.py +28 -1
  8. tunacode/cli/commands/template_shortcut.py +93 -0
  9. tunacode/cli/main.py +6 -0
  10. tunacode/cli/repl.py +29 -174
  11. tunacode/cli/repl_components/__init__.py +10 -0
  12. tunacode/cli/repl_components/command_parser.py +34 -0
  13. tunacode/cli/repl_components/error_recovery.py +88 -0
  14. tunacode/cli/repl_components/output_display.py +33 -0
  15. tunacode/cli/repl_components/tool_executor.py +84 -0
  16. tunacode/configuration/defaults.py +2 -2
  17. tunacode/configuration/settings.py +11 -14
  18. tunacode/constants.py +57 -23
  19. tunacode/context.py +0 -14
  20. tunacode/core/agents/agent_components/__init__.py +27 -0
  21. tunacode/core/agents/agent_components/agent_config.py +109 -0
  22. tunacode/core/agents/agent_components/json_tool_parser.py +109 -0
  23. tunacode/core/agents/agent_components/message_handler.py +100 -0
  24. tunacode/core/agents/agent_components/node_processor.py +480 -0
  25. tunacode/core/agents/agent_components/response_state.py +13 -0
  26. tunacode/core/agents/agent_components/result_wrapper.py +50 -0
  27. tunacode/core/agents/agent_components/task_completion.py +28 -0
  28. tunacode/core/agents/agent_components/tool_buffer.py +24 -0
  29. tunacode/core/agents/agent_components/tool_executor.py +49 -0
  30. tunacode/core/agents/main.py +421 -778
  31. tunacode/core/agents/utils.py +42 -2
  32. tunacode/core/background/manager.py +3 -3
  33. tunacode/core/logging/__init__.py +4 -3
  34. tunacode/core/logging/config.py +1 -1
  35. tunacode/core/logging/formatters.py +1 -1
  36. tunacode/core/logging/handlers.py +41 -7
  37. tunacode/core/setup/__init__.py +2 -0
  38. tunacode/core/setup/agent_setup.py +2 -2
  39. tunacode/core/setup/base.py +2 -2
  40. tunacode/core/setup/config_setup.py +10 -6
  41. tunacode/core/setup/git_safety_setup.py +13 -2
  42. tunacode/core/setup/template_setup.py +75 -0
  43. tunacode/core/state.py +13 -2
  44. tunacode/core/token_usage/api_response_parser.py +6 -2
  45. tunacode/core/token_usage/usage_tracker.py +37 -7
  46. tunacode/core/tool_handler.py +24 -1
  47. tunacode/prompts/system.md +289 -4
  48. tunacode/setup.py +2 -0
  49. tunacode/templates/__init__.py +9 -0
  50. tunacode/templates/loader.py +210 -0
  51. tunacode/tools/glob.py +3 -3
  52. tunacode/tools/grep.py +26 -276
  53. tunacode/tools/grep_components/__init__.py +9 -0
  54. tunacode/tools/grep_components/file_filter.py +93 -0
  55. tunacode/tools/grep_components/pattern_matcher.py +152 -0
  56. tunacode/tools/grep_components/result_formatter.py +45 -0
  57. tunacode/tools/grep_components/search_result.py +35 -0
  58. tunacode/tools/todo.py +27 -21
  59. tunacode/types.py +19 -4
  60. tunacode/ui/completers.py +6 -1
  61. tunacode/ui/decorators.py +2 -2
  62. tunacode/ui/keybindings.py +1 -1
  63. tunacode/ui/panels.py +13 -5
  64. tunacode/ui/prompt_manager.py +1 -1
  65. tunacode/ui/tool_ui.py +8 -2
  66. tunacode/utils/bm25.py +4 -4
  67. tunacode/utils/file_utils.py +2 -2
  68. tunacode/utils/message_utils.py +3 -1
  69. tunacode/utils/system.py +0 -4
  70. tunacode/utils/text_utils.py +1 -1
  71. tunacode/utils/token_counter.py +2 -2
  72. {tunacode_cli-0.0.51.dist-info → tunacode_cli-0.0.53.dist-info}/METADATA +146 -1
  73. tunacode_cli-0.0.53.dist-info/RECORD +123 -0
  74. {tunacode_cli-0.0.51.dist-info → tunacode_cli-0.0.53.dist-info}/top_level.txt +0 -1
  75. api/auth.py +0 -13
  76. api/users.py +0 -8
  77. tunacode/core/recursive/__init__.py +0 -18
  78. tunacode/core/recursive/aggregator.py +0 -467
  79. tunacode/core/recursive/budget.py +0 -414
  80. tunacode/core/recursive/decomposer.py +0 -398
  81. tunacode/core/recursive/executor.py +0 -470
  82. tunacode/core/recursive/hierarchy.py +0 -488
  83. tunacode/ui/recursive_progress.py +0 -380
  84. tunacode_cli-0.0.51.dist-info/RECORD +0 -107
  85. {tunacode_cli-0.0.51.dist-info → tunacode_cli-0.0.53.dist-info}/WHEEL +0 -0
  86. {tunacode_cli-0.0.51.dist-info → tunacode_cli-0.0.53.dist-info}/entry_points.txt +0 -0
  87. {tunacode_cli-0.0.51.dist-info → tunacode_cli-0.0.53.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,100 @@
1
+ """Message handling utilities for agent communication."""
2
+
3
+ from datetime import datetime, timezone
4
+ from typing import Dict, Set, Union
5
+
6
+ from tunacode.core.state import StateManager
7
+
8
+ ToolCallId = str
9
+ ToolName = str
10
+ ErrorMessage = Union[str, None]
11
+
12
+
13
+ def get_model_messages():
14
+ """
15
+ Safely retrieve message-related classes from pydantic_ai.
16
+
17
+ If the running environment (e.g. our test stubs) does not define
18
+ SystemPromptPart we create a minimal placeholder so that the rest of the
19
+ code can continue to work without depending on the real implementation.
20
+ """
21
+ import importlib
22
+
23
+ messages = importlib.import_module("pydantic_ai.messages")
24
+
25
+ # Get the required classes
26
+ ModelRequest = getattr(messages, "ModelRequest")
27
+ ToolReturnPart = getattr(messages, "ToolReturnPart")
28
+
29
+ # Create minimal fallback for SystemPromptPart if it doesn't exist
30
+ if not hasattr(messages, "SystemPromptPart"):
31
+
32
+ class SystemPromptPart: # type: ignore
33
+ def __init__(self, content: str = "", role: str = "system", part_kind: str = ""):
34
+ self.content = content
35
+ self.role = role
36
+ self.part_kind = part_kind
37
+ else:
38
+ SystemPromptPart = messages.SystemPromptPart
39
+
40
+ return ModelRequest, ToolReturnPart, SystemPromptPart
41
+
42
+
43
+ def patch_tool_messages(
44
+ error_message: ErrorMessage = "Tool operation failed",
45
+ state_manager: StateManager = None,
46
+ ):
47
+ """
48
+ Find any tool calls without responses and add synthetic error responses for them.
49
+ Takes an error message to use in the synthesized tool response.
50
+
51
+ Ignores tools that have corresponding retry prompts as the model is already
52
+ addressing them.
53
+ """
54
+ if state_manager is None:
55
+ raise ValueError("state_manager is required for patch_tool_messages")
56
+
57
+ messages = state_manager.session.messages
58
+
59
+ if not messages:
60
+ return
61
+
62
+ # Map tool calls to their tool returns
63
+ tool_calls: Dict[ToolCallId, ToolName] = {} # tool_call_id -> tool_name
64
+ tool_returns: Set[ToolCallId] = set() # set of tool_call_ids with returns
65
+ retry_prompts: Set[ToolCallId] = set() # set of tool_call_ids with retry prompts
66
+
67
+ for message in messages:
68
+ if hasattr(message, "parts"):
69
+ for part in message.parts:
70
+ if (
71
+ hasattr(part, "part_kind")
72
+ and hasattr(part, "tool_call_id")
73
+ and part.tool_call_id
74
+ ):
75
+ if part.part_kind == "tool-call":
76
+ tool_calls[part.tool_call_id] = part.tool_name
77
+ elif part.part_kind == "tool-return":
78
+ tool_returns.add(part.tool_call_id)
79
+ elif part.part_kind == "retry-prompt":
80
+ retry_prompts.add(part.tool_call_id)
81
+
82
+ # Identify orphaned tools (those without responses and not being retried)
83
+ for tool_call_id, tool_name in list(tool_calls.items()):
84
+ if tool_call_id not in tool_returns and tool_call_id not in retry_prompts:
85
+ # Import ModelRequest and ToolReturnPart lazily
86
+ ModelRequest, ToolReturnPart, _ = get_model_messages()
87
+ messages.append(
88
+ ModelRequest(
89
+ parts=[
90
+ ToolReturnPart(
91
+ tool_name=tool_name,
92
+ content=error_message,
93
+ tool_call_id=tool_call_id,
94
+ timestamp=datetime.now(timezone.utc),
95
+ part_kind="tool-return",
96
+ )
97
+ ],
98
+ kind="request",
99
+ )
100
+ )
@@ -0,0 +1,480 @@
1
+ """Node processing functionality for agent responses."""
2
+
3
+ import json
4
+ from typing import Any, Awaitable, Callable, Optional, Tuple
5
+
6
+ from tunacode.core.logging.logger import get_logger
7
+ from tunacode.core.state import StateManager
8
+ from tunacode.types import UsageTrackerProtocol
9
+
10
+ from .response_state import ResponseState
11
+ from .task_completion import check_task_completion
12
+ from .tool_buffer import ToolBuffer
13
+
14
+ logger = get_logger(__name__)
15
+
16
+ # Import streaming types with fallback for older versions
17
+ try:
18
+ from pydantic_ai.messages import PartDeltaEvent, TextPartDelta
19
+
20
+ STREAMING_AVAILABLE = True
21
+ except ImportError:
22
+ # Fallback for older pydantic-ai versions
23
+ PartDeltaEvent = None
24
+ TextPartDelta = None
25
+ STREAMING_AVAILABLE = False
26
+
27
+
28
+ async def _process_node(
29
+ node,
30
+ tool_callback: Optional[Callable],
31
+ state_manager: StateManager,
32
+ tool_buffer: Optional[ToolBuffer] = None,
33
+ streaming_callback: Optional[Callable[[str], Awaitable[None]]] = None,
34
+ usage_tracker: Optional[UsageTrackerProtocol] = None,
35
+ response_state: Optional[ResponseState] = None,
36
+ ) -> Tuple[bool, Optional[str]]:
37
+ """Process a single node from the agent response.
38
+
39
+ Returns:
40
+ tuple: (is_empty: bool, reason: Optional[str]) - True if empty/problematic response detected,
41
+ with reason being one of: "empty", "truncated", "intention_without_action"
42
+ """
43
+ from tunacode.ui import console as ui
44
+
45
+ # Use the original callback directly - parallel execution will be handled differently
46
+ buffering_callback = tool_callback
47
+ empty_response_detected = False
48
+ has_non_empty_content = False
49
+ appears_truncated = False
50
+ has_intention = False
51
+ has_tool_calls = False
52
+
53
+ if hasattr(node, "request"):
54
+ state_manager.session.messages.append(node.request)
55
+
56
+ if hasattr(node, "thought") and node.thought:
57
+ state_manager.session.messages.append({"thought": node.thought})
58
+ # Display thought immediately if show_thoughts is enabled
59
+ if state_manager.session.show_thoughts:
60
+ await ui.muted(f"THOUGHT: {node.thought}")
61
+
62
+ if hasattr(node, "model_response"):
63
+ state_manager.session.messages.append(node.model_response)
64
+
65
+ if usage_tracker:
66
+ await usage_tracker.track_and_display(node.model_response)
67
+
68
+ # Check for task completion marker in response content
69
+ if response_state:
70
+ has_non_empty_content = False
71
+ appears_truncated = False
72
+ all_content_parts = []
73
+
74
+ # First, check if there are any tool calls in this response
75
+ has_queued_tools = any(
76
+ hasattr(part, "part_kind") and part.part_kind == "tool-call"
77
+ for part in node.model_response.parts
78
+ )
79
+
80
+ for part in node.model_response.parts:
81
+ if hasattr(part, "content") and isinstance(part.content, str):
82
+ # Check if we have any non-empty content
83
+ if part.content.strip():
84
+ has_non_empty_content = True
85
+ all_content_parts.append(part.content)
86
+
87
+ is_complete, cleaned_content = check_task_completion(part.content)
88
+ if is_complete:
89
+ # Validate completion - check for premature completion
90
+ if has_queued_tools:
91
+ # Agent is trying to complete with pending tools!
92
+ if state_manager.session.show_thoughts:
93
+ await ui.warning(
94
+ "⚠️ PREMATURE COMPLETION DETECTED - Agent queued tools but marked complete"
95
+ )
96
+ await ui.muted(" Overriding completion to allow tool execution")
97
+ # Don't mark as complete - let the tools run first
98
+ # Update the content to remove the marker but don't set task_completed
99
+ part.content = cleaned_content
100
+ # Log this as an issue
101
+ logger.warning(
102
+ f"Agent attempted premature completion with {sum(1 for p in node.model_response.parts if getattr(p, 'part_kind', '') == 'tool-call')} pending tools"
103
+ )
104
+ else:
105
+ # Check if content suggests pending actions
106
+ combined_text = " ".join(all_content_parts).lower()
107
+ pending_phrases = [
108
+ "let me",
109
+ "i'll check",
110
+ "i will",
111
+ "going to",
112
+ "about to",
113
+ "need to check",
114
+ "let's check",
115
+ "i should",
116
+ "need to find",
117
+ "let me see",
118
+ "i'll look",
119
+ "let me search",
120
+ "let me find",
121
+ ]
122
+ has_pending_intention = any(
123
+ phrase in combined_text for phrase in pending_phrases
124
+ )
125
+
126
+ # Also check for action verbs at end of content suggesting incomplete action
127
+ action_endings = [
128
+ "checking",
129
+ "searching",
130
+ "looking",
131
+ "finding",
132
+ "reading",
133
+ "analyzing",
134
+ ]
135
+ ends_with_action = any(
136
+ combined_text.rstrip().endswith(ending) for ending in action_endings
137
+ )
138
+
139
+ if (
140
+ has_pending_intention or ends_with_action
141
+ ) and state_manager.session.iteration_count <= 1:
142
+ # Too early to complete with pending intentions
143
+ if state_manager.session.show_thoughts:
144
+ await ui.warning(
145
+ "⚠️ SUSPICIOUS COMPLETION - Stated intentions but completing early"
146
+ )
147
+ found_phrases = [
148
+ p for p in pending_phrases if p in combined_text
149
+ ]
150
+ await ui.muted(
151
+ f" Iteration {state_manager.session.iteration_count} with pending: {found_phrases}"
152
+ )
153
+ if ends_with_action:
154
+ await ui.muted(
155
+ f" Content ends with action verb: '{combined_text.split()[-1] if combined_text.split() else ''}'"
156
+ )
157
+ # Still allow it but log warning
158
+ logger.warning(
159
+ f"Task completion with pending intentions detected: {found_phrases}"
160
+ )
161
+
162
+ # Normal completion
163
+ response_state.task_completed = True
164
+ response_state.has_user_response = True
165
+ # Update the part content to remove the marker
166
+ part.content = cleaned_content
167
+ if state_manager.session.show_thoughts:
168
+ await ui.muted("✅ TASK COMPLETION DETECTED")
169
+ break
170
+
171
+ # Check for truncation patterns
172
+ if all_content_parts:
173
+ combined_content = " ".join(all_content_parts).strip()
174
+ appears_truncated = _check_for_truncation(combined_content)
175
+
176
+ # If we only got empty content and no tool calls, we should NOT consider this a valid response
177
+ # This prevents the agent from stopping when it gets empty responses
178
+ if not has_non_empty_content and not any(
179
+ hasattr(part, "part_kind") and part.part_kind == "tool-call"
180
+ for part in node.model_response.parts
181
+ ):
182
+ # Empty response with no tools - keep going
183
+ empty_response_detected = True
184
+ if state_manager.session.show_thoughts:
185
+ await ui.muted("⚠️ EMPTY RESPONSE - CONTINUING")
186
+
187
+ # Check if response appears truncated
188
+ elif appears_truncated and not any(
189
+ hasattr(part, "part_kind") and part.part_kind == "tool-call"
190
+ for part in node.model_response.parts
191
+ ):
192
+ # Truncated response detected
193
+ empty_response_detected = True
194
+ if state_manager.session.show_thoughts:
195
+ await ui.muted("⚠️ TRUNCATED RESPONSE DETECTED - CONTINUING")
196
+ await ui.muted(f" Last content: ...{combined_content[-100:]}")
197
+
198
+ # Stream content to callback if provided
199
+ # Use this as fallback when true token streaming is not available
200
+ if streaming_callback and not STREAMING_AVAILABLE:
201
+ for part in node.model_response.parts:
202
+ if hasattr(part, "content") and isinstance(part.content, str):
203
+ content = part.content.strip()
204
+ if content and not content.startswith('{"thought"'):
205
+ # Stream non-JSON content (actual response content)
206
+ if streaming_callback:
207
+ await streaming_callback(content)
208
+
209
+ # Enhanced display when thoughts are enabled
210
+ if state_manager.session.show_thoughts:
211
+ await _display_raw_api_response(node, ui)
212
+
213
+ # Process tool calls
214
+ await _process_tool_calls(
215
+ node, buffering_callback, state_manager, tool_buffer, response_state
216
+ )
217
+
218
+ # Determine empty response reason
219
+ if empty_response_detected:
220
+ if appears_truncated:
221
+ return True, "truncated"
222
+ else:
223
+ return True, "empty"
224
+
225
+ # Check for intention without action
226
+ if has_intention and not has_tool_calls and not has_non_empty_content:
227
+ return True, "intention_without_action"
228
+
229
+ return False, None
230
+
231
+
232
+ def _check_for_truncation(combined_content: str) -> bool:
233
+ """Check if content appears to be truncated."""
234
+ if not combined_content:
235
+ return False
236
+
237
+ # Truncation indicators:
238
+ # 1. Ends with "..." or "…" (but not part of a complete sentence)
239
+ # 2. Ends mid-word (no punctuation, space, or complete word)
240
+ # 3. Contains incomplete markdown/code blocks
241
+ # 4. Ends with incomplete parentheses/brackets
242
+
243
+ # Check for ellipsis at end suggesting truncation
244
+ if combined_content.endswith(("...", "…")) and not combined_content.endswith(("....", "….")):
245
+ return True
246
+
247
+ # Check for mid-word truncation (ends with letters but no punctuation)
248
+ if combined_content and combined_content[-1].isalpha():
249
+ # Look for incomplete words by checking if last "word" seems cut off
250
+ words = combined_content.split()
251
+ if words:
252
+ last_word = words[-1]
253
+ # Common complete word endings vs likely truncations
254
+ complete_endings = (
255
+ "ing",
256
+ "ed",
257
+ "ly",
258
+ "er",
259
+ "est",
260
+ "tion",
261
+ "ment",
262
+ "ness",
263
+ "ity",
264
+ "ous",
265
+ "ive",
266
+ "able",
267
+ "ible",
268
+ )
269
+ incomplete_patterns = (
270
+ "referen",
271
+ "inte",
272
+ "proces",
273
+ "analy",
274
+ "deve",
275
+ "imple",
276
+ "execu",
277
+ )
278
+
279
+ if any(last_word.lower().endswith(pattern) for pattern in incomplete_patterns):
280
+ return True
281
+ elif len(last_word) > 2 and not any(
282
+ last_word.lower().endswith(end) for end in complete_endings
283
+ ):
284
+ # Likely truncated if doesn't end with common suffix
285
+ return True
286
+
287
+ # Check for unclosed markdown code blocks
288
+ code_block_count = combined_content.count("```")
289
+ if code_block_count % 2 != 0:
290
+ return True
291
+
292
+ # Check for unclosed brackets/parentheses (more opens than closes)
293
+ open_brackets = (
294
+ combined_content.count("[") + combined_content.count("(") + combined_content.count("{")
295
+ )
296
+ close_brackets = (
297
+ combined_content.count("]") + combined_content.count(")") + combined_content.count("}")
298
+ )
299
+ if open_brackets > close_brackets:
300
+ return True
301
+
302
+ return False
303
+
304
+
305
+ async def _display_raw_api_response(node: Any, ui: Any) -> None:
306
+ """Display raw API response data when thoughts are enabled."""
307
+
308
+ # Display the raw model response parts
309
+ await ui.muted("\n" + "=" * 60)
310
+ await ui.muted(" RAW API RESPONSE DATA:")
311
+ await ui.muted("=" * 60)
312
+
313
+ for idx, part in enumerate(node.model_response.parts):
314
+ part_data = {"part_index": idx, "part_kind": getattr(part, "part_kind", "unknown")}
315
+
316
+ # Add part-specific data
317
+ if hasattr(part, "content"):
318
+ part_data["content"] = (
319
+ part.content[:200] + "..." if len(str(part.content)) > 200 else part.content
320
+ )
321
+ if hasattr(part, "tool_name"):
322
+ part_data["tool_name"] = part.tool_name
323
+ if hasattr(part, "args"):
324
+ part_data["args"] = part.args
325
+ if hasattr(part, "tool_call_id"):
326
+ part_data["tool_call_id"] = part.tool_call_id
327
+
328
+ await ui.muted(json.dumps(part_data, indent=2))
329
+
330
+ await ui.muted("=" * 60)
331
+
332
+ # Count how many tool calls are in this response
333
+ tool_count = sum(
334
+ 1
335
+ for part in node.model_response.parts
336
+ if hasattr(part, "part_kind") and part.part_kind == "tool-call"
337
+ )
338
+ if tool_count > 0:
339
+ await ui.muted(f"\n MODEL RESPONSE: Contains {tool_count} tool call(s)")
340
+
341
+ # Display LLM response content
342
+ for part in node.model_response.parts:
343
+ if hasattr(part, "content") and isinstance(part.content, str):
344
+ content = part.content.strip()
345
+
346
+ # Skip empty content
347
+ if not content:
348
+ continue
349
+
350
+ # Skip thought content (JSON)
351
+ if content.startswith('{"thought"'):
352
+ continue
353
+
354
+ # Skip tool result content
355
+ if hasattr(part, "part_kind") and part.part_kind == "tool-return":
356
+ continue
357
+
358
+ # Display text part
359
+ await ui.muted(f" TEXT PART: {content[:200]}{'...' if len(content) > 200 else ''}")
360
+
361
+
362
+ async def _process_tool_calls(
363
+ node: Any,
364
+ tool_callback: Optional[Callable],
365
+ state_manager: StateManager,
366
+ tool_buffer: Optional[ToolBuffer],
367
+ response_state: Optional[ResponseState],
368
+ ) -> None:
369
+ """Process tool calls from the node."""
370
+ from tunacode.constants import READ_ONLY_TOOLS
371
+ from tunacode.ui import console as ui
372
+
373
+ # Track if we're processing tool calls
374
+ is_processing_tools = False
375
+
376
+ # Process tool calls
377
+ for part in node.model_response.parts:
378
+ if hasattr(part, "part_kind") and part.part_kind == "tool-call":
379
+ is_processing_tools = True
380
+ if tool_callback:
381
+ # Check if this is a read-only tool that can be batched
382
+ if tool_buffer is not None and part.tool_name in READ_ONLY_TOOLS:
383
+ # Add to buffer instead of executing immediately
384
+ tool_buffer.add(part, node)
385
+ if state_manager.session.show_thoughts:
386
+ await ui.muted(
387
+ f"⏸️ BUFFERED: {part.tool_name} (will execute in parallel batch)"
388
+ )
389
+ else:
390
+ # Write/execute tool - process any buffered reads first
391
+ if tool_buffer is not None and tool_buffer.has_tasks():
392
+ import time
393
+
394
+ from .tool_executor import execute_tools_parallel
395
+
396
+ buffered_tasks = tool_buffer.flush()
397
+ batch_id = getattr(state_manager.session, "batch_counter", 0) + 1
398
+ state_manager.session.batch_counter = batch_id
399
+
400
+ start_time = time.time()
401
+
402
+ # Enhanced visual feedback for parallel execution
403
+ await ui.muted("\n" + "=" * 60)
404
+ await ui.muted(
405
+ f"🚀 PARALLEL BATCH #{batch_id}: Executing {len(buffered_tasks)} read-only tools concurrently"
406
+ )
407
+ await ui.muted("=" * 60)
408
+
409
+ # Display details of what's being executed
410
+ for idx, (buffered_part, _) in enumerate(buffered_tasks, 1):
411
+ tool_desc = f" [{idx}] {buffered_part.tool_name}"
412
+ if hasattr(buffered_part, "args") and isinstance(
413
+ buffered_part.args, dict
414
+ ):
415
+ if (
416
+ buffered_part.tool_name == "read_file"
417
+ and "file_path" in buffered_part.args
418
+ ):
419
+ tool_desc += f" → {buffered_part.args['file_path']}"
420
+ elif (
421
+ buffered_part.tool_name == "grep"
422
+ and "pattern" in buffered_part.args
423
+ ):
424
+ tool_desc += f" → pattern: '{buffered_part.args['pattern']}'"
425
+ if "include_files" in buffered_part.args:
426
+ tool_desc += (
427
+ f", files: '{buffered_part.args['include_files']}'"
428
+ )
429
+ elif (
430
+ buffered_part.tool_name == "list_dir"
431
+ and "directory" in buffered_part.args
432
+ ):
433
+ tool_desc += f" → {buffered_part.args['directory']}"
434
+ elif (
435
+ buffered_part.tool_name == "glob"
436
+ and "pattern" in buffered_part.args
437
+ ):
438
+ tool_desc += f" → pattern: '{buffered_part.args['pattern']}'"
439
+ await ui.muted(tool_desc)
440
+ await ui.muted("=" * 60)
441
+
442
+ await execute_tools_parallel(buffered_tasks, tool_callback)
443
+
444
+ elapsed_time = (time.time() - start_time) * 1000
445
+ sequential_estimate = (
446
+ len(buffered_tasks) * 100
447
+ ) # Assume 100ms per tool average
448
+ speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
449
+
450
+ await ui.muted(
451
+ f"✅ Parallel batch completed in {elapsed_time:.0f}ms "
452
+ f"(~{speedup:.1f}x faster than sequential)\n"
453
+ )
454
+
455
+ # Now execute the write/execute tool
456
+ if state_manager.session.show_thoughts:
457
+ await ui.warning(f"⚠️ SEQUENTIAL: {part.tool_name} (write/execute tool)")
458
+ await tool_callback(part, node)
459
+
460
+ # Track tool calls in session
461
+ if is_processing_tools:
462
+ # Extract tool information for tracking
463
+ for part in node.model_response.parts:
464
+ if hasattr(part, "part_kind") and part.part_kind == "tool-call":
465
+ tool_info = {
466
+ "tool": part.tool_name,
467
+ "args": getattr(part, "args", {}),
468
+ "timestamp": getattr(part, "timestamp", None),
469
+ }
470
+ state_manager.session.tool_calls.append(tool_info)
471
+
472
+ # Update has_user_response based on presence of actual response content
473
+ if (
474
+ response_state
475
+ and hasattr(node, "result")
476
+ and node.result
477
+ and hasattr(node.result, "output")
478
+ ):
479
+ if node.result.output:
480
+ response_state.has_user_response = True
@@ -0,0 +1,13 @@
1
+ """Response state management for tracking agent processing state."""
2
+
3
+ from dataclasses import dataclass
4
+
5
+
6
+ @dataclass
7
+ class ResponseState:
8
+ """Track state across agent response processing."""
9
+
10
+ has_user_response: bool = False
11
+ task_completed: bool = False
12
+ awaiting_user_guidance: bool = False
13
+ has_final_synthesis: bool = False
@@ -0,0 +1,50 @@
1
+ """Result wrapper classes for agent responses."""
2
+
3
+ from typing import Any
4
+
5
+
6
+ class SimpleResult:
7
+ """Simple result wrapper for fallback responses."""
8
+
9
+ def __init__(self, output: str):
10
+ self.output = output
11
+
12
+
13
+ class AgentRunWrapper:
14
+ """Wrapper that adds response_state to agent run results."""
15
+
16
+ def __init__(self, wrapped_run: Any, fallback_result: Any, response_state: Any = None):
17
+ self._wrapped = wrapped_run
18
+ self._result = fallback_result
19
+ self.response_state = response_state
20
+
21
+ def __getattribute__(self, name: str) -> Any:
22
+ # Handle special attributes first to avoid conflicts
23
+ if name in ["_wrapped", "_result", "response_state"]:
24
+ return object.__getattribute__(self, name)
25
+
26
+ # Explicitly handle 'result' to return our fallback result
27
+ if name == "result":
28
+ return object.__getattribute__(self, "_result")
29
+
30
+ # Delegate all other attributes to the wrapped object
31
+ try:
32
+ return getattr(object.__getattribute__(self, "_wrapped"), name)
33
+ except AttributeError:
34
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
35
+
36
+
37
+ class AgentRunWithState:
38
+ """Minimal wrapper to add response_state to agent runs."""
39
+
40
+ def __init__(self, wrapped_run: Any, response_state: Any = None):
41
+ self._wrapped = wrapped_run
42
+ self.response_state = response_state
43
+
44
+ def __getattribute__(self, name: str) -> Any:
45
+ # Handle special attributes first
46
+ if name in ["_wrapped", "response_state"]:
47
+ return object.__getattribute__(self, name)
48
+
49
+ # Delegate all other attributes to the wrapped object
50
+ return getattr(object.__getattribute__(self, "_wrapped"), name)
@@ -0,0 +1,28 @@
1
+ """Task completion detection utilities."""
2
+
3
+ from typing import Tuple
4
+
5
+
6
+ def check_task_completion(content: str) -> Tuple[bool, str]:
7
+ """
8
+ Check if the content indicates task completion.
9
+
10
+ Args:
11
+ content: The text content to check
12
+
13
+ Returns:
14
+ Tuple of (is_complete, cleaned_content)
15
+ - is_complete: True if task completion marker found
16
+ - cleaned_content: Content with marker removed
17
+ """
18
+ if not content:
19
+ return False, content
20
+
21
+ lines = content.strip().split("\n")
22
+ if lines and lines[0].strip() == "TUNACODE_TASK_COMPLETE":
23
+ # Task is complete, return cleaned content
24
+ cleaned_lines = lines[1:] if len(lines) > 1 else []
25
+ cleaned_content = "\n".join(cleaned_lines).strip()
26
+ return True, cleaned_content
27
+
28
+ return False, content