tunacode-cli 0.0.30__py3-none-any.whl → 0.0.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- api/auth.py +13 -0
- api/users.py +8 -0
- tunacode/cli/commands.py +113 -232
- tunacode/cli/repl.py +40 -84
- tunacode/constants.py +10 -1
- tunacode/core/agents/__init__.py +0 -4
- tunacode/core/agents/main.py +345 -43
- tunacode/core/code_index.py +479 -0
- tunacode/core/setup/git_safety_setup.py +7 -9
- tunacode/core/tool_handler.py +18 -0
- tunacode/exceptions.py +13 -0
- tunacode/prompts/system.md +237 -28
- tunacode/tools/glob.py +288 -0
- tunacode/tools/grep.py +168 -195
- tunacode/tools/list_dir.py +190 -0
- tunacode/tools/read_file.py +9 -3
- tunacode/tools/read_file_async_poc.py +188 -0
- {tunacode_cli-0.0.30.dist-info → tunacode_cli-0.0.31.dist-info}/METADATA +16 -7
- {tunacode_cli-0.0.30.dist-info → tunacode_cli-0.0.31.dist-info}/RECORD +23 -21
- {tunacode_cli-0.0.30.dist-info → tunacode_cli-0.0.31.dist-info}/top_level.txt +1 -0
- tunacode/core/agents/orchestrator.py +0 -213
- tunacode/core/agents/planner_schema.py +0 -9
- tunacode/core/agents/readonly.py +0 -65
- tunacode/core/llm/planner.py +0 -62
- {tunacode_cli-0.0.30.dist-info → tunacode_cli-0.0.31.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.30.dist-info → tunacode_cli-0.0.31.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.30.dist-info → tunacode_cli-0.0.31.dist-info}/licenses/LICENSE +0 -0
tunacode/constants.py
CHANGED
|
@@ -7,7 +7,7 @@ Centralizes all magic strings, UI text, error messages, and application constant
|
|
|
7
7
|
|
|
8
8
|
# Application info
|
|
9
9
|
APP_NAME = "TunaCode"
|
|
10
|
-
APP_VERSION = "0.0.
|
|
10
|
+
APP_VERSION = "0.0.31"
|
|
11
11
|
|
|
12
12
|
# File patterns
|
|
13
13
|
GUIDE_FILE_PATTERN = "{name}.md"
|
|
@@ -29,6 +29,15 @@ TOOL_READ_FILE = "read_file"
|
|
|
29
29
|
TOOL_WRITE_FILE = "write_file"
|
|
30
30
|
TOOL_UPDATE_FILE = "update_file"
|
|
31
31
|
TOOL_RUN_COMMAND = "run_command"
|
|
32
|
+
TOOL_BASH = "bash"
|
|
33
|
+
TOOL_GREP = "grep"
|
|
34
|
+
TOOL_LIST_DIR = "list_dir"
|
|
35
|
+
TOOL_GLOB = "glob"
|
|
36
|
+
|
|
37
|
+
# Tool categorization
|
|
38
|
+
READ_ONLY_TOOLS = [TOOL_READ_FILE, TOOL_GREP, TOOL_LIST_DIR, TOOL_GLOB]
|
|
39
|
+
WRITE_TOOLS = [TOOL_WRITE_FILE, TOOL_UPDATE_FILE]
|
|
40
|
+
EXECUTE_TOOLS = [TOOL_BASH, TOOL_RUN_COMMAND]
|
|
32
41
|
|
|
33
42
|
# Commands
|
|
34
43
|
CMD_HELP = "/help"
|
tunacode/core/agents/__init__.py
CHANGED
|
@@ -1,12 +1,8 @@
|
|
|
1
1
|
"""Agent helper modules."""
|
|
2
2
|
|
|
3
3
|
from .main import get_or_create_agent, process_request
|
|
4
|
-
from .orchestrator import OrchestratorAgent
|
|
5
|
-
from .readonly import ReadOnlyAgent
|
|
6
4
|
|
|
7
5
|
__all__ = [
|
|
8
6
|
"process_request",
|
|
9
7
|
"get_or_create_agent",
|
|
10
|
-
"OrchestratorAgent",
|
|
11
|
-
"ReadOnlyAgent",
|
|
12
8
|
]
|
tunacode/core/agents/main.py
CHANGED
|
@@ -4,16 +4,21 @@ Main agent functionality and coordination for the TunaCode CLI.
|
|
|
4
4
|
Handles agent creation, configuration, and request processing.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
+
import asyncio
|
|
7
8
|
import json
|
|
9
|
+
import os
|
|
8
10
|
import re
|
|
9
11
|
from datetime import datetime, timezone
|
|
10
12
|
from pathlib import Path
|
|
11
|
-
from typing import Optional
|
|
13
|
+
from typing import Any, Iterator, List, Optional, Tuple
|
|
12
14
|
|
|
15
|
+
from tunacode.constants import READ_ONLY_TOOLS
|
|
13
16
|
from tunacode.core.state import StateManager
|
|
14
17
|
from tunacode.services.mcp import get_mcp_servers
|
|
15
18
|
from tunacode.tools.bash import bash
|
|
19
|
+
from tunacode.tools.glob import glob
|
|
16
20
|
from tunacode.tools.grep import grep
|
|
21
|
+
from tunacode.tools.list_dir import list_dir
|
|
17
22
|
from tunacode.tools.read_file import read_file
|
|
18
23
|
from tunacode.tools.run_command import run_command
|
|
19
24
|
from tunacode.tools.update_file import update_file
|
|
@@ -22,6 +27,27 @@ from tunacode.types import (AgentRun, ErrorMessage, FallbackResponse, ModelName,
|
|
|
22
27
|
ResponseState, SimpleResult, ToolCallback, ToolCallId, ToolName)
|
|
23
28
|
|
|
24
29
|
|
|
30
|
+
class ToolBuffer:
|
|
31
|
+
"""Buffer for collecting read-only tool calls to execute in parallel."""
|
|
32
|
+
|
|
33
|
+
def __init__(self):
|
|
34
|
+
self.read_only_tasks: List[Tuple[Any, Any]] = []
|
|
35
|
+
|
|
36
|
+
def add(self, part: Any, node: Any) -> None:
|
|
37
|
+
"""Add a read-only tool call to the buffer."""
|
|
38
|
+
self.read_only_tasks.append((part, node))
|
|
39
|
+
|
|
40
|
+
def flush(self) -> List[Tuple[Any, Any]]:
|
|
41
|
+
"""Return buffered tasks and clear the buffer."""
|
|
42
|
+
tasks = self.read_only_tasks
|
|
43
|
+
self.read_only_tasks = []
|
|
44
|
+
return tasks
|
|
45
|
+
|
|
46
|
+
def has_tasks(self) -> bool:
|
|
47
|
+
"""Check if there are buffered tasks."""
|
|
48
|
+
return len(self.read_only_tasks) > 0
|
|
49
|
+
|
|
50
|
+
|
|
25
51
|
# Lazy import for Agent and Tool
|
|
26
52
|
def get_agent_tool():
|
|
27
53
|
import importlib
|
|
@@ -37,10 +63,137 @@ def get_model_messages():
|
|
|
37
63
|
return messages.ModelRequest, messages.ToolReturnPart
|
|
38
64
|
|
|
39
65
|
|
|
40
|
-
async def
|
|
66
|
+
async def execute_tools_parallel(
|
|
67
|
+
tool_calls: List[Tuple[Any, Any]], callback: ToolCallback, return_exceptions: bool = True
|
|
68
|
+
) -> List[Any]:
|
|
69
|
+
"""
|
|
70
|
+
Execute multiple tool calls in parallel using asyncio.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
tool_calls: List of (part, node) tuples
|
|
74
|
+
callback: The tool callback function to execute
|
|
75
|
+
return_exceptions: Whether to return exceptions or raise them
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
List of results in the same order as input, with exceptions for failed calls
|
|
79
|
+
"""
|
|
80
|
+
# Get max parallel from environment or default to CPU count
|
|
81
|
+
max_parallel = int(os.environ.get("TUNACODE_MAX_PARALLEL", os.cpu_count() or 4))
|
|
82
|
+
|
|
83
|
+
async def execute_with_error_handling(part, node):
|
|
84
|
+
try:
|
|
85
|
+
return await callback(part, node)
|
|
86
|
+
except Exception as e:
|
|
87
|
+
return e
|
|
88
|
+
|
|
89
|
+
# If we have more tools than max_parallel, execute in batches
|
|
90
|
+
if len(tool_calls) > max_parallel:
|
|
91
|
+
results = []
|
|
92
|
+
for i in range(0, len(tool_calls), max_parallel):
|
|
93
|
+
batch = tool_calls[i : i + max_parallel]
|
|
94
|
+
batch_tasks = [execute_with_error_handling(part, node) for part, node in batch]
|
|
95
|
+
batch_results = await asyncio.gather(*batch_tasks, return_exceptions=return_exceptions)
|
|
96
|
+
results.extend(batch_results)
|
|
97
|
+
return results
|
|
98
|
+
else:
|
|
99
|
+
tasks = [execute_with_error_handling(part, node) for part, node in tool_calls]
|
|
100
|
+
return await asyncio.gather(*tasks, return_exceptions=return_exceptions)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def batch_read_only_tools(tool_calls: List[Any]) -> Iterator[List[Any]]:
|
|
104
|
+
"""
|
|
105
|
+
Batch tool calls so read-only tools can be executed in parallel.
|
|
106
|
+
|
|
107
|
+
Yields batches where:
|
|
108
|
+
- Read-only tools are grouped together
|
|
109
|
+
- Write/execute tools are in their own batch (single item)
|
|
110
|
+
- Order within each batch is preserved
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
tool_calls: List of tool call objects with 'tool' attribute
|
|
114
|
+
|
|
115
|
+
Yields:
|
|
116
|
+
Batches of tool calls
|
|
117
|
+
"""
|
|
118
|
+
if not tool_calls:
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
current_batch = []
|
|
122
|
+
|
|
123
|
+
for tool_call in tool_calls:
|
|
124
|
+
tool_name = tool_call.tool_name if hasattr(tool_call, "tool_name") else None
|
|
125
|
+
|
|
126
|
+
if tool_name in READ_ONLY_TOOLS:
|
|
127
|
+
# Add to current batch
|
|
128
|
+
current_batch.append(tool_call)
|
|
129
|
+
else:
|
|
130
|
+
# Yield any pending read-only batch
|
|
131
|
+
if current_batch:
|
|
132
|
+
yield current_batch
|
|
133
|
+
current_batch = []
|
|
134
|
+
|
|
135
|
+
# Yield write/execute tool as single-item batch
|
|
136
|
+
yield [tool_call]
|
|
137
|
+
|
|
138
|
+
# Yield any remaining read-only tools
|
|
139
|
+
if current_batch:
|
|
140
|
+
yield current_batch
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
async def create_buffering_callback(
|
|
144
|
+
original_callback: ToolCallback, buffer: ToolBuffer, state_manager: StateManager
|
|
145
|
+
) -> ToolCallback:
|
|
146
|
+
"""
|
|
147
|
+
Create a callback wrapper that buffers read-only tools for parallel execution.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
original_callback: The original tool callback
|
|
151
|
+
buffer: ToolBuffer instance to store read-only tools
|
|
152
|
+
state_manager: StateManager for UI access
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
A wrapped callback function
|
|
156
|
+
"""
|
|
157
|
+
|
|
158
|
+
async def buffering_callback(part, node):
|
|
159
|
+
tool_name = getattr(part, "tool_name", None)
|
|
160
|
+
|
|
161
|
+
if tool_name in READ_ONLY_TOOLS:
|
|
162
|
+
# Buffer read-only tools
|
|
163
|
+
buffer.add(part, node)
|
|
164
|
+
# Don't execute yet - will be executed in parallel batch
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
# Non-read-only tool encountered - flush buffer first
|
|
168
|
+
if buffer.has_tasks():
|
|
169
|
+
buffered_tasks = buffer.flush()
|
|
170
|
+
|
|
171
|
+
# Execute buffered read-only tools in parallel
|
|
172
|
+
if state_manager.session.show_thoughts:
|
|
173
|
+
from tunacode.ui import console as ui
|
|
174
|
+
|
|
175
|
+
await ui.muted(f"Executing {len(buffered_tasks)} read-only tools in parallel")
|
|
176
|
+
|
|
177
|
+
await execute_tools_parallel(buffered_tasks, original_callback)
|
|
178
|
+
|
|
179
|
+
# Execute the non-read-only tool
|
|
180
|
+
return await original_callback(part, node)
|
|
181
|
+
|
|
182
|
+
return buffering_callback
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
async def _process_node(
|
|
186
|
+
node,
|
|
187
|
+
tool_callback: Optional[ToolCallback],
|
|
188
|
+
state_manager: StateManager,
|
|
189
|
+
tool_buffer: Optional[ToolBuffer] = None,
|
|
190
|
+
):
|
|
41
191
|
from tunacode.ui import console as ui
|
|
42
192
|
from tunacode.utils.token_counter import estimate_tokens
|
|
43
193
|
|
|
194
|
+
# Use the original callback directly - parallel execution will be handled differently
|
|
195
|
+
buffering_callback = tool_callback
|
|
196
|
+
|
|
44
197
|
if hasattr(node, "request"):
|
|
45
198
|
state_manager.session.messages.append(node.request)
|
|
46
199
|
|
|
@@ -55,9 +208,43 @@ async def _process_node(node, tool_callback: Optional[ToolCallback], state_manag
|
|
|
55
208
|
|
|
56
209
|
# Enhanced display when thoughts are enabled
|
|
57
210
|
if state_manager.session.show_thoughts:
|
|
211
|
+
# Show raw API response data
|
|
58
212
|
import json
|
|
59
213
|
import re
|
|
60
214
|
|
|
215
|
+
# Display the raw model response parts
|
|
216
|
+
await ui.muted("\n" + "=" * 60)
|
|
217
|
+
await ui.muted(" RAW API RESPONSE DATA:")
|
|
218
|
+
await ui.muted("=" * 60)
|
|
219
|
+
|
|
220
|
+
for idx, part in enumerate(node.model_response.parts):
|
|
221
|
+
part_data = {"part_index": idx, "part_kind": getattr(part, "part_kind", "unknown")}
|
|
222
|
+
|
|
223
|
+
# Add part-specific data
|
|
224
|
+
if hasattr(part, "content"):
|
|
225
|
+
part_data["content"] = (
|
|
226
|
+
part.content[:200] + "..." if len(str(part.content)) > 200 else part.content
|
|
227
|
+
)
|
|
228
|
+
if hasattr(part, "tool_name"):
|
|
229
|
+
part_data["tool_name"] = part.tool_name
|
|
230
|
+
if hasattr(part, "args"):
|
|
231
|
+
part_data["args"] = part.args
|
|
232
|
+
if hasattr(part, "tool_call_id"):
|
|
233
|
+
part_data["tool_call_id"] = part.tool_call_id
|
|
234
|
+
|
|
235
|
+
await ui.muted(json.dumps(part_data, indent=2))
|
|
236
|
+
|
|
237
|
+
await ui.muted("=" * 60)
|
|
238
|
+
|
|
239
|
+
# Count how many tool calls are in this response
|
|
240
|
+
tool_count = sum(
|
|
241
|
+
1
|
|
242
|
+
for part in node.model_response.parts
|
|
243
|
+
if hasattr(part, "part_kind") and part.part_kind == "tool-call"
|
|
244
|
+
)
|
|
245
|
+
if tool_count > 0:
|
|
246
|
+
await ui.muted(f"\n MODEL RESPONSE: Contains {tool_count} tool call(s)")
|
|
247
|
+
|
|
61
248
|
# Display LLM response content
|
|
62
249
|
for part in node.model_response.parts:
|
|
63
250
|
if hasattr(part, "content") and isinstance(part.content, str):
|
|
@@ -101,47 +288,29 @@ async def _process_node(node, tool_callback: Optional[ToolCallback], state_manag
|
|
|
101
288
|
cleaned_thought = thought.replace('\\"', '"').replace("\\n", " ")
|
|
102
289
|
await ui.muted(f"REASONING: {cleaned_thought}")
|
|
103
290
|
|
|
104
|
-
# Check for tool calls and
|
|
291
|
+
# Check for tool calls and collect them for potential parallel execution
|
|
105
292
|
has_tool_calls = False
|
|
293
|
+
tool_parts = [] # Collect all tool calls from this node
|
|
294
|
+
|
|
106
295
|
for part in node.model_response.parts:
|
|
107
296
|
if part.part_kind == "tool-call" and tool_callback:
|
|
108
297
|
has_tool_calls = True
|
|
298
|
+
tool_parts.append(part)
|
|
109
299
|
|
|
110
300
|
# Display tool call details when thoughts are enabled
|
|
111
301
|
if state_manager.session.show_thoughts:
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
if
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
await ui.muted(f"Writing: {filename}")
|
|
125
|
-
elif part.tool_name == "update_file" and "file_path" in part.args:
|
|
126
|
-
file_path = part.args["file_path"]
|
|
127
|
-
filename = Path(file_path).name
|
|
128
|
-
await ui.muted(f"Updating: {filename}")
|
|
129
|
-
elif (
|
|
130
|
-
part.tool_name in ["run_command", "bash"] and "command" in part.args
|
|
131
|
-
):
|
|
132
|
-
command = part.args["command"]
|
|
133
|
-
# Truncate long commands
|
|
134
|
-
display_cmd = (
|
|
135
|
-
command if len(command) <= 60 else command[:57] + "..."
|
|
136
|
-
)
|
|
137
|
-
await ui.muted(f"Command: {display_cmd}")
|
|
138
|
-
else:
|
|
139
|
-
# For other tools, show full args but more compact
|
|
140
|
-
args_str = json.dumps(part.args, indent=2)
|
|
141
|
-
await ui.muted(f"ARGS: {args_str}")
|
|
142
|
-
else:
|
|
143
|
-
# If args is not a dict (e.g., a string), just display it as is
|
|
144
|
-
await ui.muted(f"ARGS: {part.args}")
|
|
302
|
+
# Show each tool as it's collected
|
|
303
|
+
tool_desc = f" COLLECTED: {part.tool_name}"
|
|
304
|
+
if hasattr(part, "args") and isinstance(part.args, dict):
|
|
305
|
+
if part.tool_name == "read_file" and "file_path" in part.args:
|
|
306
|
+
tool_desc += f" → {part.args['file_path']}"
|
|
307
|
+
elif part.tool_name == "grep" and "pattern" in part.args:
|
|
308
|
+
tool_desc += f" → pattern: '{part.args['pattern']}'"
|
|
309
|
+
elif part.tool_name == "list_dir" and "directory" in part.args:
|
|
310
|
+
tool_desc += f" → {part.args['directory']}"
|
|
311
|
+
elif part.tool_name == "run_command" and "command" in part.args:
|
|
312
|
+
tool_desc += f" → {part.args['command']}"
|
|
313
|
+
await ui.muted(tool_desc)
|
|
145
314
|
|
|
146
315
|
# Track this tool call (moved outside thoughts block)
|
|
147
316
|
state_manager.session.tool_calls.append(
|
|
@@ -156,6 +325,7 @@ async def _process_node(node, tool_callback: Optional[ToolCallback], state_manag
|
|
|
156
325
|
if (
|
|
157
326
|
part.tool_name == "read_file"
|
|
158
327
|
and hasattr(part, "args")
|
|
328
|
+
and isinstance(part.args, dict)
|
|
159
329
|
and "file_path" in part.args
|
|
160
330
|
):
|
|
161
331
|
state_manager.session.files_in_context.add(part.args["file_path"])
|
|
@@ -165,9 +335,71 @@ async def _process_node(node, tool_callback: Optional[ToolCallback], state_manag
|
|
|
165
335
|
f"\nFILES IN CONTEXT: {list(state_manager.session.files_in_context)}"
|
|
166
336
|
)
|
|
167
337
|
|
|
168
|
-
|
|
338
|
+
# Execute tool calls - with ACTUAL parallel execution for read-only batches
|
|
339
|
+
if tool_parts:
|
|
340
|
+
if state_manager.session.show_thoughts:
|
|
341
|
+
await ui.muted(
|
|
342
|
+
f"\n NODE SUMMARY: {len(tool_parts)} tool(s) collected in this response"
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
# Check if ALL tools in this node are read-only
|
|
346
|
+
all_read_only = all(part.tool_name in READ_ONLY_TOOLS for part in tool_parts)
|
|
347
|
+
|
|
348
|
+
if all_read_only and len(tool_parts) > 1 and buffering_callback:
|
|
349
|
+
# Execute read-only tools in parallel!
|
|
350
|
+
import time
|
|
351
|
+
|
|
352
|
+
start_time = time.time()
|
|
353
|
+
|
|
354
|
+
if state_manager.session.show_thoughts:
|
|
355
|
+
await ui.muted("\n" + "=" * 60)
|
|
356
|
+
await ui.muted(
|
|
357
|
+
f" PARALLEL BATCH: Executing {len(tool_parts)} read-only tools concurrently"
|
|
358
|
+
)
|
|
359
|
+
await ui.muted("=" * 60)
|
|
169
360
|
|
|
170
|
-
|
|
361
|
+
for idx, part in enumerate(tool_parts, 1):
|
|
362
|
+
tool_desc = f" [{idx}] {part.tool_name}"
|
|
363
|
+
if hasattr(part, "args") and isinstance(part.args, dict):
|
|
364
|
+
if part.tool_name == "read_file" and "file_path" in part.args:
|
|
365
|
+
tool_desc += f" → {part.args['file_path']}"
|
|
366
|
+
elif part.tool_name == "grep" and "pattern" in part.args:
|
|
367
|
+
tool_desc += f" → pattern: '{part.args['pattern']}'"
|
|
368
|
+
elif part.tool_name == "list_dir" and "directory" in part.args:
|
|
369
|
+
tool_desc += f" → {part.args['directory']}"
|
|
370
|
+
elif part.tool_name == "glob" and "pattern" in part.args:
|
|
371
|
+
tool_desc += f" → pattern: '{part.args['pattern']}'"
|
|
372
|
+
await ui.muted(tool_desc)
|
|
373
|
+
await ui.muted("=" * 60)
|
|
374
|
+
|
|
375
|
+
# Execute in parallel
|
|
376
|
+
tool_tuples = [(part, node) for part in tool_parts]
|
|
377
|
+
await execute_tools_parallel(tool_tuples, buffering_callback)
|
|
378
|
+
|
|
379
|
+
if state_manager.session.show_thoughts:
|
|
380
|
+
elapsed_time = (time.time() - start_time) * 1000
|
|
381
|
+
sequential_estimate = len(tool_parts) * 100
|
|
382
|
+
speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
|
|
383
|
+
await ui.muted(
|
|
384
|
+
f" Parallel batch completed in {elapsed_time:.0f}ms ({speedup:.1f}x faster than sequential)"
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
else:
|
|
388
|
+
# Sequential execution for mixed or write/execute tools
|
|
389
|
+
for part in tool_parts:
|
|
390
|
+
if (
|
|
391
|
+
state_manager.session.show_thoughts
|
|
392
|
+
and part.tool_name not in READ_ONLY_TOOLS
|
|
393
|
+
):
|
|
394
|
+
await ui.muted(f"\n SEQUENTIAL: {part.tool_name} (write/execute tool)")
|
|
395
|
+
|
|
396
|
+
# Execute the tool
|
|
397
|
+
if buffering_callback:
|
|
398
|
+
await buffering_callback(part, node)
|
|
399
|
+
|
|
400
|
+
# Handle tool returns
|
|
401
|
+
for part in node.model_response.parts:
|
|
402
|
+
if part.part_kind == "tool-return":
|
|
171
403
|
obs_msg = f"OBSERVATION[{part.tool_name}]: {part.content[:2_000]}"
|
|
172
404
|
state_manager.session.messages.append(obs_msg)
|
|
173
405
|
|
|
@@ -180,10 +412,22 @@ async def _process_node(node, tool_callback: Optional[ToolCallback], state_manag
|
|
|
180
412
|
await ui.muted(f"TOOL RESULT: {display_content}")
|
|
181
413
|
|
|
182
414
|
# If no structured tool calls found, try parsing JSON from text content
|
|
183
|
-
if not has_tool_calls and
|
|
415
|
+
if not has_tool_calls and buffering_callback:
|
|
184
416
|
for part in node.model_response.parts:
|
|
185
417
|
if hasattr(part, "content") and isinstance(part.content, str):
|
|
186
|
-
await extract_and_execute_tool_calls(
|
|
418
|
+
await extract_and_execute_tool_calls(
|
|
419
|
+
part.content, buffering_callback, state_manager
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
# Final flush: disabled temporarily while fixing the parallel execution design
|
|
423
|
+
# The buffer is not being used in the current implementation
|
|
424
|
+
# if tool_callback and buffer.has_tasks():
|
|
425
|
+
# buffered_tasks = buffer.flush()
|
|
426
|
+
# if state_manager.session.show_thoughts:
|
|
427
|
+
# await ui.muted(
|
|
428
|
+
# f"Final flush: Executing {len(buffered_tasks)} remaining read-only tools in parallel"
|
|
429
|
+
# )
|
|
430
|
+
# await execute_tools_parallel(buffered_tasks, tool_callback)
|
|
187
431
|
|
|
188
432
|
|
|
189
433
|
def get_or_create_agent(model: ModelName, state_manager: StateManager) -> PydanticAgent:
|
|
@@ -213,7 +457,9 @@ def get_or_create_agent(model: ModelName, state_manager: StateManager) -> Pydant
|
|
|
213
457
|
system_prompt=system_prompt,
|
|
214
458
|
tools=[
|
|
215
459
|
Tool(bash, max_retries=max_retries),
|
|
460
|
+
Tool(glob, max_retries=max_retries),
|
|
216
461
|
Tool(grep, max_retries=max_retries),
|
|
462
|
+
Tool(list_dir, max_retries=max_retries),
|
|
217
463
|
Tool(read_file, max_retries=max_retries),
|
|
218
464
|
Tool(run_command, max_retries=max_retries),
|
|
219
465
|
Tool(update_file, max_retries=max_retries),
|
|
@@ -416,11 +662,25 @@ async def process_request(
|
|
|
416
662
|
# Reset iteration tracking for this request
|
|
417
663
|
state_manager.session.iteration_count = 0
|
|
418
664
|
|
|
665
|
+
# Create a request-level buffer for batching read-only tools across nodes
|
|
666
|
+
tool_buffer = ToolBuffer()
|
|
667
|
+
|
|
668
|
+
# Show what we're sending to the API when thoughts are enabled
|
|
669
|
+
if state_manager.session.show_thoughts:
|
|
670
|
+
from tunacode.ui import console as ui
|
|
671
|
+
|
|
672
|
+
await ui.muted("\n" + "=" * 60)
|
|
673
|
+
await ui.muted("📤 SENDING TO API:")
|
|
674
|
+
await ui.muted(f"Message: {message}")
|
|
675
|
+
await ui.muted(f"Model: {model}")
|
|
676
|
+
await ui.muted(f"Message History Length: {len(mh)}")
|
|
677
|
+
await ui.muted("=" * 60)
|
|
678
|
+
|
|
419
679
|
async with agent.iter(message, message_history=mh) as agent_run:
|
|
420
680
|
i = 0
|
|
421
681
|
async for node in agent_run:
|
|
422
682
|
state_manager.session.current_iteration = i + 1
|
|
423
|
-
await _process_node(node, tool_callback, state_manager)
|
|
683
|
+
await _process_node(node, tool_callback, state_manager, tool_buffer)
|
|
424
684
|
if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
|
|
425
685
|
if node.result.output:
|
|
426
686
|
response_state.has_user_response = True
|
|
@@ -452,6 +712,48 @@ async def process_request(
|
|
|
452
712
|
await ui.warning(f"Reached maximum iterations ({max_iterations})")
|
|
453
713
|
break
|
|
454
714
|
|
|
715
|
+
# Final flush: execute any remaining buffered read-only tools
|
|
716
|
+
if tool_callback and tool_buffer.has_tasks():
|
|
717
|
+
import time
|
|
718
|
+
|
|
719
|
+
from tunacode.ui import console as ui
|
|
720
|
+
|
|
721
|
+
buffered_tasks = tool_buffer.flush()
|
|
722
|
+
start_time = time.time()
|
|
723
|
+
|
|
724
|
+
await ui.muted("\n" + "=" * 60)
|
|
725
|
+
await ui.muted(
|
|
726
|
+
f"🚀 FINAL BATCH: Executing {len(buffered_tasks)} buffered read-only tools"
|
|
727
|
+
)
|
|
728
|
+
await ui.muted("=" * 60)
|
|
729
|
+
|
|
730
|
+
for idx, (part, node) in enumerate(buffered_tasks, 1):
|
|
731
|
+
tool_desc = f" [{idx}] {part.tool_name}"
|
|
732
|
+
if hasattr(part, "args") and isinstance(part.args, dict):
|
|
733
|
+
if part.tool_name == "read_file" and "file_path" in part.args:
|
|
734
|
+
tool_desc += f" → {part.args['file_path']}"
|
|
735
|
+
elif part.tool_name == "grep" and "pattern" in part.args:
|
|
736
|
+
tool_desc += f" → pattern: '{part.args['pattern']}'"
|
|
737
|
+
if "include_files" in part.args:
|
|
738
|
+
tool_desc += f", files: '{part.args['include_files']}'"
|
|
739
|
+
elif part.tool_name == "list_dir" and "directory" in part.args:
|
|
740
|
+
tool_desc += f" → {part.args['directory']}"
|
|
741
|
+
elif part.tool_name == "glob" and "pattern" in part.args:
|
|
742
|
+
tool_desc += f" → pattern: '{part.args['pattern']}'"
|
|
743
|
+
await ui.muted(tool_desc)
|
|
744
|
+
await ui.muted("=" * 60)
|
|
745
|
+
|
|
746
|
+
await execute_tools_parallel(buffered_tasks, tool_callback)
|
|
747
|
+
|
|
748
|
+
elapsed_time = (time.time() - start_time) * 1000
|
|
749
|
+
sequential_estimate = len(buffered_tasks) * 100
|
|
750
|
+
speedup = sequential_estimate / elapsed_time if elapsed_time > 0 else 1.0
|
|
751
|
+
|
|
752
|
+
await ui.muted(
|
|
753
|
+
f"✅ Final batch completed in {elapsed_time:.0f}ms "
|
|
754
|
+
f"(~{speedup:.1f}x faster than sequential)\n"
|
|
755
|
+
)
|
|
756
|
+
|
|
455
757
|
# If we need to add a fallback response, create a wrapper
|
|
456
758
|
if not response_state.has_user_response and i >= max_iterations and fallback_enabled:
|
|
457
759
|
patch_tool_messages("Task incomplete", state_manager=state_manager)
|
|
@@ -472,10 +774,10 @@ async def process_request(
|
|
|
472
774
|
|
|
473
775
|
# Track specific operations
|
|
474
776
|
if tool_name in ["write_file", "update_file"] and hasattr(part, "args"):
|
|
475
|
-
if "file_path" in part.args:
|
|
777
|
+
if isinstance(part.args, dict) and "file_path" in part.args:
|
|
476
778
|
files_modified.add(part.args["file_path"])
|
|
477
779
|
elif tool_name in ["run_command", "bash"] and hasattr(part, "args"):
|
|
478
|
-
if "command" in part.args:
|
|
780
|
+
if isinstance(part.args, dict) and "command" in part.args:
|
|
479
781
|
commands_run.append(part.args["command"])
|
|
480
782
|
|
|
481
783
|
# Build fallback response with context
|