tunacode-cli 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/commands.py +306 -14
- tunacode/cli/repl.py +41 -2
- tunacode/configuration/defaults.py +1 -0
- tunacode/constants.py +1 -1
- tunacode/core/agents/main.py +218 -2
- tunacode/core/state.py +10 -2
- tunacode/prompts/system.txt +22 -0
- tunacode/tools/grep.py +760 -0
- tunacode/tools/read_file.py +15 -10
- tunacode/tools/run_command.py +13 -7
- tunacode/tools/update_file.py +9 -10
- tunacode/tools/write_file.py +8 -9
- {tunacode_cli-0.0.13.dist-info → tunacode_cli-0.0.15.dist-info}/METADATA +50 -14
- {tunacode_cli-0.0.13.dist-info → tunacode_cli-0.0.15.dist-info}/RECORD +18 -17
- {tunacode_cli-0.0.13.dist-info → tunacode_cli-0.0.15.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.13.dist-info → tunacode_cli-0.0.15.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.13.dist-info → tunacode_cli-0.0.15.dist-info}/licenses/LICENSE +0 -0
- {tunacode_cli-0.0.13.dist-info → tunacode_cli-0.0.15.dist-info}/top_level.txt +0 -0
tunacode/core/agents/main.py
CHANGED
|
@@ -4,6 +4,8 @@ Main agent functionality and coordination for the TunaCode CLI.
|
|
|
4
4
|
Handles agent creation, configuration, and request processing.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
+
import json
|
|
8
|
+
import re
|
|
7
9
|
from datetime import datetime, timezone
|
|
8
10
|
from typing import Optional
|
|
9
11
|
|
|
@@ -13,32 +15,125 @@ from pydantic_ai.messages import ModelRequest, ToolReturnPart
|
|
|
13
15
|
from tunacode.core.state import StateManager
|
|
14
16
|
from tunacode.services.mcp import get_mcp_servers
|
|
15
17
|
from tunacode.tools.bash import bash
|
|
18
|
+
from tunacode.tools.grep import grep
|
|
16
19
|
from tunacode.tools.read_file import read_file
|
|
17
20
|
from tunacode.tools.run_command import run_command
|
|
18
21
|
from tunacode.tools.update_file import update_file
|
|
19
22
|
from tunacode.tools.write_file import write_file
|
|
20
|
-
from tunacode.types import (
|
|
21
|
-
|
|
23
|
+
from tunacode.types import (
|
|
24
|
+
AgentRun,
|
|
25
|
+
ErrorMessage,
|
|
26
|
+
ModelName,
|
|
27
|
+
PydanticAgent,
|
|
28
|
+
ToolCallback,
|
|
29
|
+
ToolCallId,
|
|
30
|
+
ToolName,
|
|
31
|
+
)
|
|
22
32
|
|
|
23
33
|
|
|
24
34
|
async def _process_node(node, tool_callback: Optional[ToolCallback], state_manager: StateManager):
|
|
25
35
|
if hasattr(node, "request"):
|
|
26
36
|
state_manager.session.messages.append(node.request)
|
|
27
37
|
|
|
38
|
+
if hasattr(node, "thought") and node.thought:
|
|
39
|
+
state_manager.session.messages.append({"thought": node.thought})
|
|
40
|
+
# Display thought immediately if show_thoughts is enabled
|
|
41
|
+
if state_manager.session.show_thoughts:
|
|
42
|
+
from tunacode.ui import console as ui
|
|
43
|
+
await ui.muted(f"💭 THOUGHT: {node.thought}")
|
|
44
|
+
|
|
28
45
|
if hasattr(node, "model_response"):
|
|
29
46
|
state_manager.session.messages.append(node.model_response)
|
|
47
|
+
|
|
48
|
+
# Enhanced ReAct thought processing
|
|
49
|
+
if state_manager.session.show_thoughts:
|
|
50
|
+
from tunacode.ui import console as ui
|
|
51
|
+
import json
|
|
52
|
+
import re
|
|
53
|
+
|
|
54
|
+
for part in node.model_response.parts:
|
|
55
|
+
if hasattr(part, 'content') and isinstance(part.content, str):
|
|
56
|
+
content = part.content.strip()
|
|
57
|
+
|
|
58
|
+
# Pattern 1: Inline JSON thoughts {"thought": "..."}
|
|
59
|
+
thought_pattern = r'\{"thought":\s*"([^"]+)"\}'
|
|
60
|
+
matches = re.findall(thought_pattern, content)
|
|
61
|
+
for thought in matches:
|
|
62
|
+
await ui.muted(f"💭 REASONING: {thought}")
|
|
63
|
+
|
|
64
|
+
# Pattern 2: Standalone thought JSON objects
|
|
65
|
+
try:
|
|
66
|
+
if content.startswith('{"thought"'):
|
|
67
|
+
thought_obj = json.loads(content)
|
|
68
|
+
if 'thought' in thought_obj:
|
|
69
|
+
await ui.muted(f"💭 REASONING: {thought_obj['thought']}")
|
|
70
|
+
except (json.JSONDecodeError, KeyError):
|
|
71
|
+
pass
|
|
72
|
+
|
|
73
|
+
# Pattern 3: Multi-line thoughts with context
|
|
74
|
+
multiline_pattern = r'\{"thought":\s*"([^"]+(?:\\.[^"]*)*?)"\}'
|
|
75
|
+
multiline_matches = re.findall(multiline_pattern, content, re.DOTALL)
|
|
76
|
+
for thought in multiline_matches:
|
|
77
|
+
if thought not in [m for m in matches]: # Avoid duplicates
|
|
78
|
+
# Clean up escaped characters
|
|
79
|
+
cleaned_thought = thought.replace('\\"', '"').replace('\\n', ' ')
|
|
80
|
+
await ui.muted(f"💭 REASONING: {cleaned_thought}")
|
|
81
|
+
|
|
82
|
+
# Pattern 4: Text-based reasoning indicators
|
|
83
|
+
reasoning_indicators = [
|
|
84
|
+
(r'I need to (.+?)\.', 'PLANNING'),
|
|
85
|
+
(r'Let me (.+?)\.', 'ACTION'),
|
|
86
|
+
(r'The output shows (.+?)\.', 'OBSERVATION'),
|
|
87
|
+
(r'Based on (.+?), I should (.+?)\.', 'DECISION')
|
|
88
|
+
]
|
|
89
|
+
|
|
90
|
+
for pattern, label in reasoning_indicators:
|
|
91
|
+
indicator_matches = re.findall(pattern, content, re.IGNORECASE)
|
|
92
|
+
for match in indicator_matches:
|
|
93
|
+
if isinstance(match, tuple):
|
|
94
|
+
match_text = ' '.join(match)
|
|
95
|
+
else:
|
|
96
|
+
match_text = match
|
|
97
|
+
await ui.muted(f"🎯 {label}: {match_text}")
|
|
98
|
+
break # Only show first match per pattern
|
|
99
|
+
|
|
100
|
+
# Check for tool calls and fallback to JSON parsing if needed
|
|
101
|
+
has_tool_calls = False
|
|
30
102
|
for part in node.model_response.parts:
|
|
31
103
|
if part.part_kind == "tool-call" and tool_callback:
|
|
104
|
+
has_tool_calls = True
|
|
32
105
|
await tool_callback(part, node)
|
|
106
|
+
elif part.part_kind == "tool-return":
|
|
107
|
+
obs_msg = f"OBSERVATION[{part.tool_name}]: {part.content[:2_000]}"
|
|
108
|
+
state_manager.session.messages.append(obs_msg)
|
|
109
|
+
|
|
110
|
+
# If no structured tool calls found, try parsing JSON from text content
|
|
111
|
+
if not has_tool_calls and tool_callback:
|
|
112
|
+
for part in node.model_response.parts:
|
|
113
|
+
if hasattr(part, 'content') and isinstance(part.content, str):
|
|
114
|
+
await extract_and_execute_tool_calls(part.content, tool_callback, state_manager)
|
|
33
115
|
|
|
34
116
|
|
|
35
117
|
def get_or_create_agent(model: ModelName, state_manager: StateManager) -> PydanticAgent:
|
|
36
118
|
if model not in state_manager.session.agents:
|
|
37
119
|
max_retries = state_manager.session.user_config["settings"]["max_retries"]
|
|
120
|
+
|
|
121
|
+
# Load system prompt
|
|
122
|
+
import os
|
|
123
|
+
from pathlib import Path
|
|
124
|
+
prompt_path = Path(__file__).parent.parent.parent / "prompts" / "system.txt"
|
|
125
|
+
try:
|
|
126
|
+
with open(prompt_path, "r", encoding="utf-8") as f:
|
|
127
|
+
system_prompt = f.read().strip()
|
|
128
|
+
except FileNotFoundError:
|
|
129
|
+
system_prompt = None
|
|
130
|
+
|
|
38
131
|
state_manager.session.agents[model] = Agent(
|
|
39
132
|
model=model,
|
|
133
|
+
system_prompt=system_prompt,
|
|
40
134
|
tools=[
|
|
41
135
|
Tool(bash, max_retries=max_retries),
|
|
136
|
+
Tool(grep, max_retries=max_retries),
|
|
42
137
|
Tool(read_file, max_retries=max_retries),
|
|
43
138
|
Tool(run_command, max_retries=max_retries),
|
|
44
139
|
Tool(update_file, max_retries=max_retries),
|
|
@@ -107,6 +202,110 @@ def patch_tool_messages(
|
|
|
107
202
|
)
|
|
108
203
|
|
|
109
204
|
|
|
205
|
+
async def parse_json_tool_calls(text: str, tool_callback: Optional[ToolCallback], state_manager: StateManager):
|
|
206
|
+
"""
|
|
207
|
+
Parse JSON tool calls from text when structured tool calling fails.
|
|
208
|
+
Fallback for when API providers don't support proper tool calling.
|
|
209
|
+
"""
|
|
210
|
+
if not tool_callback:
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
# Pattern for JSON tool calls: {"tool": "tool_name", "args": {...}}
|
|
214
|
+
# Find potential JSON objects and parse them
|
|
215
|
+
potential_jsons = []
|
|
216
|
+
brace_count = 0
|
|
217
|
+
start_pos = -1
|
|
218
|
+
|
|
219
|
+
for i, char in enumerate(text):
|
|
220
|
+
if char == '{':
|
|
221
|
+
if brace_count == 0:
|
|
222
|
+
start_pos = i
|
|
223
|
+
brace_count += 1
|
|
224
|
+
elif char == '}':
|
|
225
|
+
brace_count -= 1
|
|
226
|
+
if brace_count == 0 and start_pos != -1:
|
|
227
|
+
potential_json = text[start_pos:i+1]
|
|
228
|
+
try:
|
|
229
|
+
parsed = json.loads(potential_json)
|
|
230
|
+
if isinstance(parsed, dict) and 'tool' in parsed and 'args' in parsed:
|
|
231
|
+
potential_jsons.append((parsed['tool'], parsed['args']))
|
|
232
|
+
except json.JSONDecodeError:
|
|
233
|
+
pass
|
|
234
|
+
start_pos = -1
|
|
235
|
+
|
|
236
|
+
matches = potential_jsons
|
|
237
|
+
|
|
238
|
+
for tool_name, args in matches:
|
|
239
|
+
try:
|
|
240
|
+
# Create a mock tool call object
|
|
241
|
+
class MockToolCall:
|
|
242
|
+
def __init__(self, tool_name: str, args: dict):
|
|
243
|
+
self.tool_name = tool_name
|
|
244
|
+
self.args = args
|
|
245
|
+
self.tool_call_id = f"fallback_{datetime.now().timestamp()}"
|
|
246
|
+
|
|
247
|
+
class MockNode:
|
|
248
|
+
pass
|
|
249
|
+
|
|
250
|
+
# Execute the tool through the callback
|
|
251
|
+
mock_call = MockToolCall(tool_name, args)
|
|
252
|
+
mock_node = MockNode()
|
|
253
|
+
|
|
254
|
+
await tool_callback(mock_call, mock_node)
|
|
255
|
+
|
|
256
|
+
if state_manager.session.show_thoughts:
|
|
257
|
+
from tunacode.ui import console as ui
|
|
258
|
+
await ui.muted(f"🔧 FALLBACK: Executed {tool_name} via JSON parsing")
|
|
259
|
+
|
|
260
|
+
except Exception as e:
|
|
261
|
+
if state_manager.session.show_thoughts:
|
|
262
|
+
from tunacode.ui import console as ui
|
|
263
|
+
await ui.error(f"❌ Error executing fallback tool {tool_name}: {str(e)}")
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
async def extract_and_execute_tool_calls(text: str, tool_callback: Optional[ToolCallback], state_manager: StateManager):
|
|
267
|
+
"""
|
|
268
|
+
Extract tool calls from text content and execute them.
|
|
269
|
+
Supports multiple formats for maximum compatibility.
|
|
270
|
+
"""
|
|
271
|
+
if not tool_callback:
|
|
272
|
+
return
|
|
273
|
+
|
|
274
|
+
# Format 1: {"tool": "name", "args": {...}}
|
|
275
|
+
await parse_json_tool_calls(text, tool_callback, state_manager)
|
|
276
|
+
|
|
277
|
+
# Format 2: Tool calls in code blocks
|
|
278
|
+
code_block_pattern = r'```json\s*(\{(?:[^{}]|"[^"]*"|(?:\{[^}]*\}))*"tool"(?:[^{}]|"[^"]*"|(?:\{[^}]*\}))*\})\s*```'
|
|
279
|
+
code_matches = re.findall(code_block_pattern, text, re.MULTILINE | re.DOTALL)
|
|
280
|
+
|
|
281
|
+
for match in code_matches:
|
|
282
|
+
try:
|
|
283
|
+
tool_data = json.loads(match)
|
|
284
|
+
if 'tool' in tool_data and 'args' in tool_data:
|
|
285
|
+
class MockToolCall:
|
|
286
|
+
def __init__(self, tool_name: str, args: dict):
|
|
287
|
+
self.tool_name = tool_name
|
|
288
|
+
self.args = args
|
|
289
|
+
self.tool_call_id = f"codeblock_{datetime.now().timestamp()}"
|
|
290
|
+
|
|
291
|
+
class MockNode:
|
|
292
|
+
pass
|
|
293
|
+
|
|
294
|
+
mock_call = MockToolCall(tool_data['tool'], tool_data['args'])
|
|
295
|
+
mock_node = MockNode()
|
|
296
|
+
|
|
297
|
+
await tool_callback(mock_call, mock_node)
|
|
298
|
+
|
|
299
|
+
if state_manager.session.show_thoughts:
|
|
300
|
+
from tunacode.ui import console as ui
|
|
301
|
+
await ui.muted(f"🔧 FALLBACK: Executed {tool_data['tool']} from code block")
|
|
302
|
+
|
|
303
|
+
except (json.JSONDecodeError, KeyError, Exception) as e:
|
|
304
|
+
if state_manager.session.show_thoughts:
|
|
305
|
+
from tunacode.ui import console as ui
|
|
306
|
+
await ui.error(f"❌ Error parsing code block tool call: {str(e)}")
|
|
307
|
+
|
|
308
|
+
|
|
110
309
|
async def process_request(
|
|
111
310
|
model: ModelName,
|
|
112
311
|
message: str,
|
|
@@ -115,7 +314,24 @@ async def process_request(
|
|
|
115
314
|
) -> AgentRun:
|
|
116
315
|
agent = get_or_create_agent(model, state_manager)
|
|
117
316
|
mh = state_manager.session.messages.copy()
|
|
317
|
+
# Get max iterations from config (default: 20)
|
|
318
|
+
max_iterations = state_manager.session.user_config.get("settings", {}).get("max_iterations", 20)
|
|
319
|
+
|
|
118
320
|
async with agent.iter(message, message_history=mh) as agent_run:
|
|
321
|
+
i = 0
|
|
119
322
|
async for node in agent_run:
|
|
120
323
|
await _process_node(node, tool_callback, state_manager)
|
|
324
|
+
i += 1
|
|
325
|
+
|
|
326
|
+
# Display iteration progress if thoughts are enabled
|
|
327
|
+
if state_manager.session.show_thoughts and i > 1:
|
|
328
|
+
from tunacode.ui import console as ui
|
|
329
|
+
await ui.muted(f"🔄 Iteration {i}/{max_iterations}")
|
|
330
|
+
|
|
331
|
+
if i >= max_iterations:
|
|
332
|
+
if state_manager.session.show_thoughts:
|
|
333
|
+
from tunacode.ui import console as ui
|
|
334
|
+
await ui.warning(f"⚠️ Reached maximum iterations ({max_iterations})")
|
|
335
|
+
break
|
|
336
|
+
|
|
121
337
|
return agent_run
|
tunacode/core/state.py
CHANGED
|
@@ -8,8 +8,15 @@ import uuid
|
|
|
8
8
|
from dataclasses import dataclass, field
|
|
9
9
|
from typing import Any, Optional
|
|
10
10
|
|
|
11
|
-
from tunacode.types import (
|
|
12
|
-
|
|
11
|
+
from tunacode.types import (
|
|
12
|
+
DeviceId,
|
|
13
|
+
InputSessions,
|
|
14
|
+
MessageHistory,
|
|
15
|
+
ModelName,
|
|
16
|
+
SessionId,
|
|
17
|
+
ToolName,
|
|
18
|
+
UserConfig,
|
|
19
|
+
)
|
|
13
20
|
|
|
14
21
|
|
|
15
22
|
@dataclass
|
|
@@ -25,6 +32,7 @@ class SessionState:
|
|
|
25
32
|
tool_ignore: list[ToolName] = field(default_factory=list)
|
|
26
33
|
yolo: bool = False
|
|
27
34
|
undo_initialized: bool = False
|
|
35
|
+
show_thoughts: bool = False
|
|
28
36
|
session_id: SessionId = field(default_factory=lambda: str(uuid.uuid4()))
|
|
29
37
|
device_id: Optional[DeviceId] = None
|
|
30
38
|
input_sessions: InputSessions = field(default_factory=dict)
|
tunacode/prompts/system.txt
CHANGED
|
@@ -66,6 +66,28 @@ CORRECT: First `read_file("tools/base.py")` to see the base class, then `write_f
|
|
|
66
66
|
- `run_command("pwd")` - Show current directory
|
|
67
67
|
- `run_command("cat pyproject.toml | grep -A5 dependencies")` - Check dependencies
|
|
68
68
|
|
|
69
|
+
## ReAct Pattern: Reasoning and Acting
|
|
70
|
+
|
|
71
|
+
Follow this pattern for complex tasks:
|
|
72
|
+
|
|
73
|
+
1. **THINK**: Output {"thought": "I need to understand the task..."} to reason about what to do
|
|
74
|
+
2. **ACT**: Use tools to gather information or make changes
|
|
75
|
+
3. **OBSERVE**: Analyze tool outputs with {"thought": "The output shows..."}
|
|
76
|
+
4. **ITERATE**: Continue thinking and acting until the task is complete
|
|
77
|
+
|
|
78
|
+
Examples:
|
|
79
|
+
- {"thought": "User wants me to analyze a file. I should first read it to understand its contents."}
|
|
80
|
+
- Use read_file tool
|
|
81
|
+
- {"thought": "The file contains Python code. I can see it needs optimization in the loop section."}
|
|
82
|
+
- Use update_file tool
|
|
83
|
+
|
|
84
|
+
**Key principles:**
|
|
85
|
+
- Always think before acting
|
|
86
|
+
- Use tools immediately after thinking
|
|
87
|
+
- Reason about tool outputs before continuing
|
|
88
|
+
- Break complex tasks into logical steps
|
|
89
|
+
|
|
69
90
|
USE YOUR TOOLS NOW!
|
|
70
91
|
|
|
71
92
|
If asked, you were created by the grifter tunahors
|
|
93
|
+
|