tunacode-cli 0.0.36__py3-none-any.whl → 0.0.37__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/commands/implementations/__init__.py +2 -1
- tunacode/cli/commands/implementations/system.py +39 -0
- tunacode/cli/commands/registry.py +8 -1
- tunacode/cli/repl.py +91 -30
- tunacode/constants.py +1 -1
- tunacode/core/agents/main.py +41 -1
- tunacode/core/agents/utils.py +304 -0
- tunacode/core/setup/config_setup.py +0 -1
- tunacode/core/state.py +4 -0
- tunacode/ui/console.py +4 -0
- tunacode/ui/panels.py +74 -0
- tunacode/ui/utils.py +3 -0
- {tunacode_cli-0.0.36.dist-info → tunacode_cli-0.0.37.dist-info}/METADATA +17 -17
- {tunacode_cli-0.0.36.dist-info → tunacode_cli-0.0.37.dist-info}/RECORD +18 -16
- {tunacode_cli-0.0.36.dist-info → tunacode_cli-0.0.37.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.36.dist-info → tunacode_cli-0.0.37.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.36.dist-info → tunacode_cli-0.0.37.dist-info}/licenses/LICENSE +0 -0
- {tunacode_cli-0.0.36.dist-info → tunacode_cli-0.0.37.dist-info}/top_level.txt +0 -0
|
@@ -12,13 +12,14 @@ from .debug import (
|
|
|
12
12
|
)
|
|
13
13
|
from .development import BranchCommand, InitCommand
|
|
14
14
|
from .model import ModelCommand
|
|
15
|
-
from .system import ClearCommand, HelpCommand, RefreshConfigCommand, UpdateCommand
|
|
15
|
+
from .system import ClearCommand, HelpCommand, RefreshConfigCommand, StreamingCommand, UpdateCommand
|
|
16
16
|
|
|
17
17
|
__all__ = [
|
|
18
18
|
# System commands
|
|
19
19
|
"HelpCommand",
|
|
20
20
|
"ClearCommand",
|
|
21
21
|
"RefreshConfigCommand",
|
|
22
|
+
"StreamingCommand",
|
|
22
23
|
"UpdateCommand",
|
|
23
24
|
# Debug commands
|
|
24
25
|
"YoloCommand",
|
|
@@ -175,3 +175,42 @@ class UpdateCommand(SimpleCommand):
|
|
|
175
175
|
await ui.error(f"Update failed: {e}")
|
|
176
176
|
except FileNotFoundError:
|
|
177
177
|
await ui.error(f"Could not find {installation_method} executable")
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
class StreamingCommand(SimpleCommand):
|
|
181
|
+
"""Toggle streaming display on/off."""
|
|
182
|
+
|
|
183
|
+
spec = CommandSpec(
|
|
184
|
+
name="streaming",
|
|
185
|
+
aliases=["/streaming"],
|
|
186
|
+
description="Toggle streaming display on/off",
|
|
187
|
+
category=CommandCategory.SYSTEM,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
async def execute(self, args: List[str], context: CommandContext) -> None:
|
|
191
|
+
current_setting = context.state_manager.session.user_config.get("settings", {}).get(
|
|
192
|
+
"enable_streaming", True
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
if args and args[0].lower() in ["on", "true", "1", "enable", "enabled"]:
|
|
196
|
+
new_setting = True
|
|
197
|
+
elif args and args[0].lower() in ["off", "false", "0", "disable", "disabled"]:
|
|
198
|
+
new_setting = False
|
|
199
|
+
else:
|
|
200
|
+
# Toggle current setting
|
|
201
|
+
new_setting = not current_setting
|
|
202
|
+
|
|
203
|
+
# Update the configuration
|
|
204
|
+
if "settings" not in context.state_manager.session.user_config:
|
|
205
|
+
context.state_manager.session.user_config["settings"] = {}
|
|
206
|
+
context.state_manager.session.user_config["settings"]["enable_streaming"] = new_setting
|
|
207
|
+
|
|
208
|
+
status = "enabled" if new_setting else "disabled"
|
|
209
|
+
await ui.success(f"Streaming display {status}")
|
|
210
|
+
|
|
211
|
+
if new_setting:
|
|
212
|
+
await ui.muted(
|
|
213
|
+
"Responses will be displayed progressively as they are generated (default)"
|
|
214
|
+
)
|
|
215
|
+
else:
|
|
216
|
+
await ui.muted("Responses will be displayed all at once after completion")
|
|
@@ -19,7 +19,13 @@ from .implementations.debug import (
|
|
|
19
19
|
)
|
|
20
20
|
from .implementations.development import BranchCommand, InitCommand
|
|
21
21
|
from .implementations.model import ModelCommand
|
|
22
|
-
from .implementations.system import
|
|
22
|
+
from .implementations.system import (
|
|
23
|
+
ClearCommand,
|
|
24
|
+
HelpCommand,
|
|
25
|
+
RefreshConfigCommand,
|
|
26
|
+
StreamingCommand,
|
|
27
|
+
UpdateCommand,
|
|
28
|
+
)
|
|
23
29
|
|
|
24
30
|
|
|
25
31
|
@dataclass
|
|
@@ -106,6 +112,7 @@ class CommandRegistry:
|
|
|
106
112
|
FixCommand,
|
|
107
113
|
ParseToolsCommand,
|
|
108
114
|
RefreshConfigCommand,
|
|
115
|
+
StreamingCommand,
|
|
109
116
|
UpdateCommand,
|
|
110
117
|
HelpCommand,
|
|
111
118
|
BranchCommand,
|
tunacode/cli/repl.py
CHANGED
|
@@ -70,8 +70,9 @@ async def _tool_confirm(tool_call, node, state_manager: StateManager):
|
|
|
70
70
|
await _tool_ui.log_mcp(title, args)
|
|
71
71
|
return
|
|
72
72
|
|
|
73
|
-
# Stop spinner during user interaction
|
|
74
|
-
state_manager.session.spinner
|
|
73
|
+
# Stop spinner during user interaction (only if not streaming)
|
|
74
|
+
if not state_manager.session.is_streaming_active and state_manager.session.spinner:
|
|
75
|
+
state_manager.session.spinner.stop()
|
|
75
76
|
|
|
76
77
|
# Create confirmation request
|
|
77
78
|
request = tool_handler.create_confirmation_request(tool_call.tool_name, args)
|
|
@@ -84,7 +85,10 @@ async def _tool_confirm(tool_call, node, state_manager: StateManager):
|
|
|
84
85
|
raise UserAbortError("User aborted.")
|
|
85
86
|
|
|
86
87
|
await ui.line() # Add line after user input
|
|
87
|
-
|
|
88
|
+
|
|
89
|
+
# Restart spinner (only if not streaming)
|
|
90
|
+
if not state_manager.session.is_streaming_active and state_manager.session.spinner:
|
|
91
|
+
state_manager.session.spinner.start()
|
|
88
92
|
|
|
89
93
|
|
|
90
94
|
async def _tool_handler(part, node, state_manager: StateManager):
|
|
@@ -96,7 +100,19 @@ async def _tool_handler(part, node, state_manager: StateManager):
|
|
|
96
100
|
if tool_handler.should_confirm(part.tool_name):
|
|
97
101
|
await ui.info(f"Tool({part.tool_name})")
|
|
98
102
|
|
|
99
|
-
|
|
103
|
+
# Stop spinner only if not streaming
|
|
104
|
+
if not state_manager.session.is_streaming_active and state_manager.session.spinner:
|
|
105
|
+
state_manager.session.spinner.stop()
|
|
106
|
+
|
|
107
|
+
# Track if we need to stop/restart streaming panel
|
|
108
|
+
streaming_panel = None
|
|
109
|
+
if state_manager.session.is_streaming_active and hasattr(
|
|
110
|
+
state_manager.session, "streaming_panel"
|
|
111
|
+
):
|
|
112
|
+
streaming_panel = state_manager.session.streaming_panel
|
|
113
|
+
# Stop the streaming panel to prevent UI interference during confirmation
|
|
114
|
+
if streaming_panel and tool_handler.should_confirm(part.tool_name):
|
|
115
|
+
await streaming_panel.stop()
|
|
100
116
|
|
|
101
117
|
try:
|
|
102
118
|
args = _parse_args(part.args)
|
|
@@ -128,7 +144,13 @@ async def _tool_handler(part, node, state_manager: StateManager):
|
|
|
128
144
|
patch_tool_messages("Operation aborted by user.", state_manager)
|
|
129
145
|
raise
|
|
130
146
|
finally:
|
|
131
|
-
|
|
147
|
+
# Restart streaming panel if it was stopped
|
|
148
|
+
if streaming_panel and tool_handler.should_confirm(part.tool_name):
|
|
149
|
+
await streaming_panel.start()
|
|
150
|
+
|
|
151
|
+
# Restart spinner only if not streaming
|
|
152
|
+
if not state_manager.session.is_streaming_active and state_manager.session.spinner:
|
|
153
|
+
state_manager.session.spinner.start()
|
|
132
154
|
|
|
133
155
|
|
|
134
156
|
# Initialize command registry
|
|
@@ -195,38 +217,77 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
|
|
|
195
217
|
await ui.error(str(e))
|
|
196
218
|
return
|
|
197
219
|
|
|
198
|
-
#
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
text,
|
|
202
|
-
state_manager,
|
|
203
|
-
tool_callback=tool_callback_with_state,
|
|
220
|
+
# Check if streaming is enabled (default: True for better UX)
|
|
221
|
+
enable_streaming = state_manager.session.user_config.get("settings", {}).get(
|
|
222
|
+
"enable_streaming", True
|
|
204
223
|
)
|
|
224
|
+
|
|
225
|
+
if enable_streaming:
|
|
226
|
+
# Stop spinner before starting streaming display (Rich.Live conflict)
|
|
227
|
+
await ui.spinner(False, state_manager.session.spinner, state_manager)
|
|
228
|
+
|
|
229
|
+
# Mark that streaming is active to prevent spinner conflicts
|
|
230
|
+
state_manager.session.is_streaming_active = True
|
|
231
|
+
|
|
232
|
+
# Use streaming agent processing
|
|
233
|
+
streaming_panel = ui.StreamingAgentPanel()
|
|
234
|
+
await streaming_panel.start()
|
|
235
|
+
|
|
236
|
+
# Store streaming panel reference in session for tool handler access
|
|
237
|
+
state_manager.session.streaming_panel = streaming_panel
|
|
238
|
+
|
|
239
|
+
try:
|
|
240
|
+
|
|
241
|
+
async def streaming_callback(content: str):
|
|
242
|
+
await streaming_panel.update(content)
|
|
243
|
+
|
|
244
|
+
res = await agent.process_request(
|
|
245
|
+
state_manager.session.current_model,
|
|
246
|
+
text,
|
|
247
|
+
state_manager,
|
|
248
|
+
tool_callback=tool_callback_with_state,
|
|
249
|
+
streaming_callback=streaming_callback,
|
|
250
|
+
)
|
|
251
|
+
finally:
|
|
252
|
+
await streaming_panel.stop()
|
|
253
|
+
# Clear streaming panel reference
|
|
254
|
+
state_manager.session.streaming_panel = None
|
|
255
|
+
# Mark streaming as inactive
|
|
256
|
+
state_manager.session.is_streaming_active = False
|
|
257
|
+
# Don't restart spinner - it will be stopped in the outer finally block anyway
|
|
258
|
+
else:
|
|
259
|
+
# Use normal agent processing
|
|
260
|
+
res = await agent.process_request(
|
|
261
|
+
state_manager.session.current_model,
|
|
262
|
+
text,
|
|
263
|
+
state_manager,
|
|
264
|
+
tool_callback=tool_callback_with_state,
|
|
265
|
+
)
|
|
205
266
|
if output:
|
|
206
267
|
if state_manager.session.show_thoughts:
|
|
207
268
|
new_msgs = state_manager.session.messages[start_idx:]
|
|
208
269
|
for msg in new_msgs:
|
|
209
270
|
if isinstance(msg, dict) and "thought" in msg:
|
|
210
271
|
await ui.muted(f"THOUGHT: {msg['thought']}")
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
#
|
|
215
|
-
if
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
await ui.
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
272
|
+
|
|
273
|
+
# Only display result if not streaming (streaming already showed content)
|
|
274
|
+
if not enable_streaming:
|
|
275
|
+
# Check if result exists and has output
|
|
276
|
+
if (
|
|
277
|
+
hasattr(res, "result")
|
|
278
|
+
and res.result is not None
|
|
279
|
+
and hasattr(res.result, "output")
|
|
280
|
+
):
|
|
281
|
+
await ui.agent(res.result.output)
|
|
282
|
+
else:
|
|
283
|
+
# Fallback: show that the request was processed
|
|
284
|
+
await ui.muted("Request completed")
|
|
285
|
+
|
|
286
|
+
# Always show files in context after agent response
|
|
287
|
+
if state_manager.session.files_in_context:
|
|
288
|
+
# Extract just filenames from full paths for readability
|
|
289
|
+
filenames = [Path(f).name for f in sorted(state_manager.session.files_in_context)]
|
|
290
|
+
await ui.muted(f"\nFiles in context: {', '.join(filenames)}")
|
|
230
291
|
except CancelledError:
|
|
231
292
|
await ui.muted("Request cancelled")
|
|
232
293
|
except UserAbortError:
|
tunacode/constants.py
CHANGED
tunacode/core/agents/main.py
CHANGED
|
@@ -12,6 +12,22 @@ from datetime import datetime, timezone
|
|
|
12
12
|
from pathlib import Path
|
|
13
13
|
from typing import Any, Iterator, List, Optional, Tuple
|
|
14
14
|
|
|
15
|
+
from pydantic_ai import Agent
|
|
16
|
+
|
|
17
|
+
# Import streaming types with fallback for older versions
|
|
18
|
+
try:
|
|
19
|
+
from pydantic_ai.messages import (
|
|
20
|
+
PartDeltaEvent,
|
|
21
|
+
TextPartDelta,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
STREAMING_AVAILABLE = True
|
|
25
|
+
except ImportError:
|
|
26
|
+
# Fallback for older pydantic-ai versions
|
|
27
|
+
PartDeltaEvent = None
|
|
28
|
+
TextPartDelta = None
|
|
29
|
+
STREAMING_AVAILABLE = False
|
|
30
|
+
|
|
15
31
|
from tunacode.constants import READ_ONLY_TOOLS
|
|
16
32
|
from tunacode.core.state import StateManager
|
|
17
33
|
from tunacode.services.mcp import get_mcp_servers
|
|
@@ -197,6 +213,7 @@ async def _process_node(
|
|
|
197
213
|
tool_callback: Optional[ToolCallback],
|
|
198
214
|
state_manager: StateManager,
|
|
199
215
|
tool_buffer: Optional[ToolBuffer] = None,
|
|
216
|
+
streaming_callback: Optional[callable] = None,
|
|
200
217
|
):
|
|
201
218
|
from tunacode.ui import console as ui
|
|
202
219
|
from tunacode.utils.token_counter import estimate_tokens
|
|
@@ -216,6 +233,16 @@ async def _process_node(
|
|
|
216
233
|
if hasattr(node, "model_response"):
|
|
217
234
|
state_manager.session.messages.append(node.model_response)
|
|
218
235
|
|
|
236
|
+
# Stream content to callback if provided
|
|
237
|
+
# Use this as fallback when true token streaming is not available
|
|
238
|
+
if streaming_callback and not STREAMING_AVAILABLE:
|
|
239
|
+
for part in node.model_response.parts:
|
|
240
|
+
if hasattr(part, "content") and isinstance(part.content, str):
|
|
241
|
+
content = part.content.strip()
|
|
242
|
+
if content and not content.startswith('{"thought"'):
|
|
243
|
+
# Stream non-JSON content (actual response content)
|
|
244
|
+
await streaming_callback(content)
|
|
245
|
+
|
|
219
246
|
# Enhanced display when thoughts are enabled
|
|
220
247
|
if state_manager.session.show_thoughts:
|
|
221
248
|
# Show raw API response data
|
|
@@ -683,6 +710,7 @@ async def process_request(
|
|
|
683
710
|
message: str,
|
|
684
711
|
state_manager: StateManager,
|
|
685
712
|
tool_callback: Optional[ToolCallback] = None,
|
|
713
|
+
streaming_callback: Optional[callable] = None,
|
|
686
714
|
) -> AgentRun:
|
|
687
715
|
agent = get_or_create_agent(model, state_manager)
|
|
688
716
|
mh = state_manager.session.messages.copy()
|
|
@@ -723,7 +751,19 @@ async def process_request(
|
|
|
723
751
|
i = 0
|
|
724
752
|
async for node in agent_run:
|
|
725
753
|
state_manager.session.current_iteration = i + 1
|
|
726
|
-
|
|
754
|
+
|
|
755
|
+
# Handle token-level streaming for model request nodes
|
|
756
|
+
if streaming_callback and STREAMING_AVAILABLE and Agent.is_model_request_node(node):
|
|
757
|
+
async with node.stream(agent_run.ctx) as request_stream:
|
|
758
|
+
async for event in request_stream:
|
|
759
|
+
if isinstance(event, PartDeltaEvent) and isinstance(
|
|
760
|
+
event.delta, TextPartDelta
|
|
761
|
+
):
|
|
762
|
+
# Stream individual token deltas
|
|
763
|
+
if event.delta.content_delta:
|
|
764
|
+
await streaming_callback(event.delta.content_delta)
|
|
765
|
+
|
|
766
|
+
await _process_node(node, tool_callback, state_manager, tool_buffer, streaming_callback)
|
|
727
767
|
if hasattr(node, "result") and node.result and hasattr(node.result, "output"):
|
|
728
768
|
if node.result.output:
|
|
729
769
|
response_state.has_user_response = True
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import importlib
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
from collections.abc import Iterator
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from tunacode.constants import READ_ONLY_TOOLS
|
|
11
|
+
from tunacode.types import (
|
|
12
|
+
ErrorMessage,
|
|
13
|
+
StateManager,
|
|
14
|
+
ToolCallback,
|
|
15
|
+
ToolCallId,
|
|
16
|
+
ToolName,
|
|
17
|
+
)
|
|
18
|
+
from tunacode.ui import console as ui
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Lazy import for Agent and Tool
|
|
22
|
+
def get_agent_tool():
|
|
23
|
+
pydantic_ai = importlib.import_module("pydantic_ai")
|
|
24
|
+
return pydantic_ai.Agent, pydantic_ai.Tool
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_model_messages():
|
|
28
|
+
messages = importlib.import_module("pydantic_ai.messages")
|
|
29
|
+
return messages.ModelRequest, messages.ToolReturnPart
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
async def execute_tools_parallel(
|
|
33
|
+
tool_calls: list[tuple[Any, Any]], callback: ToolCallback, return_exceptions: bool = True
|
|
34
|
+
) -> list[Any]:
|
|
35
|
+
"""Execute multiple tool calls in parallel using asyncio.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
tool_calls: List of (part, node) tuples
|
|
39
|
+
callback: The tool callback function to execute
|
|
40
|
+
return_exceptions: Whether to return exceptions or raise them
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
List of results in the same order as input, with exceptions for failed calls
|
|
44
|
+
"""
|
|
45
|
+
# Get max parallel from environment or default to CPU count
|
|
46
|
+
max_parallel = int(os.environ.get("TUNACODE_MAX_PARALLEL", os.cpu_count() or 4))
|
|
47
|
+
|
|
48
|
+
async def execute_with_error_handling(part, node):
|
|
49
|
+
try:
|
|
50
|
+
return await callback(part, node)
|
|
51
|
+
except Exception as e:
|
|
52
|
+
return e
|
|
53
|
+
|
|
54
|
+
# If we have more tools than max_parallel, execute in batches
|
|
55
|
+
if len(tool_calls) > max_parallel:
|
|
56
|
+
results = []
|
|
57
|
+
for i in range(0, len(tool_calls), max_parallel):
|
|
58
|
+
batch = tool_calls[i : i + max_parallel]
|
|
59
|
+
batch_tasks = [execute_with_error_handling(part, node) for part, node in batch]
|
|
60
|
+
batch_results = await asyncio.gather(*batch_tasks, return_exceptions=return_exceptions)
|
|
61
|
+
results.extend(batch_results)
|
|
62
|
+
return results
|
|
63
|
+
tasks = [execute_with_error_handling(part, node) for part, node in tool_calls]
|
|
64
|
+
return await asyncio.gather(*tasks, return_exceptions=return_exceptions)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def batch_read_only_tools(tool_calls: list[Any]) -> Iterator[list[Any]]:
|
|
68
|
+
"""Batch tool calls so read-only tools can be executed in parallel.
|
|
69
|
+
|
|
70
|
+
Yields batches where:
|
|
71
|
+
- Read-only tools are grouped together
|
|
72
|
+
- Write/execute tools are in their own batch (single item)
|
|
73
|
+
- Order within each batch is preserved
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
tool_calls: List of tool call objects with 'tool' attribute
|
|
77
|
+
|
|
78
|
+
Yields:
|
|
79
|
+
Batches of tool calls
|
|
80
|
+
"""
|
|
81
|
+
if not tool_calls:
|
|
82
|
+
return
|
|
83
|
+
|
|
84
|
+
current_batch = []
|
|
85
|
+
|
|
86
|
+
for tool_call in tool_calls:
|
|
87
|
+
tool_name = tool_call.tool_name if hasattr(tool_call, "tool_name") else None
|
|
88
|
+
|
|
89
|
+
if tool_name in READ_ONLY_TOOLS:
|
|
90
|
+
# Add to current batch
|
|
91
|
+
current_batch.append(tool_call)
|
|
92
|
+
else:
|
|
93
|
+
# Yield any pending read-only batch
|
|
94
|
+
if current_batch:
|
|
95
|
+
yield current_batch
|
|
96
|
+
current_batch = []
|
|
97
|
+
|
|
98
|
+
# Yield write/execute tool as single-item batch
|
|
99
|
+
yield [tool_call]
|
|
100
|
+
|
|
101
|
+
# Yield any remaining read-only tools
|
|
102
|
+
if current_batch:
|
|
103
|
+
yield current_batch
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
async def create_buffering_callback(
|
|
107
|
+
original_callback: ToolCallback, buffer: Any, state_manager: StateManager
|
|
108
|
+
) -> ToolCallback:
|
|
109
|
+
"""Create a callback wrapper that buffers read-only tools for parallel execution.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
original_callback: The original tool callback
|
|
113
|
+
buffer: ToolBuffer instance to store read-only tools
|
|
114
|
+
state_manager: StateManager for UI access
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
A wrapped callback function
|
|
118
|
+
"""
|
|
119
|
+
|
|
120
|
+
async def buffering_callback(part, node):
|
|
121
|
+
tool_name = getattr(part, "tool_name", None)
|
|
122
|
+
|
|
123
|
+
if tool_name in READ_ONLY_TOOLS:
|
|
124
|
+
# Buffer read-only tools
|
|
125
|
+
buffer.add(part, node)
|
|
126
|
+
# Don't execute yet - will be executed in parallel batch
|
|
127
|
+
return None
|
|
128
|
+
|
|
129
|
+
# Non-read-only tool encountered - flush buffer first
|
|
130
|
+
if buffer.has_tasks():
|
|
131
|
+
buffered_tasks = buffer.flush()
|
|
132
|
+
|
|
133
|
+
# Execute buffered read-only tools in parallel
|
|
134
|
+
if state_manager.session.show_thoughts:
|
|
135
|
+
await ui.muted(f"Executing {len(buffered_tasks)} read-only tools in parallel")
|
|
136
|
+
|
|
137
|
+
await execute_tools_parallel(buffered_tasks, original_callback)
|
|
138
|
+
|
|
139
|
+
# Execute the non-read-only tool
|
|
140
|
+
return await original_callback(part, node)
|
|
141
|
+
|
|
142
|
+
return buffering_callback
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
async def parse_json_tool_calls(
|
|
146
|
+
text: str, tool_callback: ToolCallback | None, state_manager: StateManager
|
|
147
|
+
):
|
|
148
|
+
"""Parse JSON tool calls from text when structured tool calling fails.
|
|
149
|
+
Fallback for when API providers don't support proper tool calling.
|
|
150
|
+
"""
|
|
151
|
+
if not tool_callback:
|
|
152
|
+
return
|
|
153
|
+
|
|
154
|
+
# Pattern for JSON tool calls: {"tool": "tool_name", "args": {...}}
|
|
155
|
+
# Find potential JSON objects and parse them
|
|
156
|
+
potential_jsons = []
|
|
157
|
+
brace_count = 0
|
|
158
|
+
start_pos = -1
|
|
159
|
+
|
|
160
|
+
for i, char in enumerate(text):
|
|
161
|
+
if char == "{":
|
|
162
|
+
if brace_count == 0:
|
|
163
|
+
start_pos = i
|
|
164
|
+
brace_count += 1
|
|
165
|
+
elif char == "}":
|
|
166
|
+
brace_count -= 1
|
|
167
|
+
if brace_count == 0 and start_pos != -1:
|
|
168
|
+
potential_json = text[start_pos : i + 1]
|
|
169
|
+
try:
|
|
170
|
+
parsed = json.loads(potential_json)
|
|
171
|
+
if isinstance(parsed, dict) and "tool" in parsed and "args" in parsed:
|
|
172
|
+
potential_jsons.append((parsed["tool"], parsed["args"]))
|
|
173
|
+
except json.JSONDecodeError:
|
|
174
|
+
pass
|
|
175
|
+
start_pos = -1
|
|
176
|
+
|
|
177
|
+
matches = potential_jsons
|
|
178
|
+
|
|
179
|
+
for tool_name, args in matches:
|
|
180
|
+
try:
|
|
181
|
+
# Create a mock tool call object
|
|
182
|
+
class MockToolCall:
|
|
183
|
+
def __init__(self, tool_name: str, args: dict):
|
|
184
|
+
self.tool_name = tool_name
|
|
185
|
+
self.args = args
|
|
186
|
+
self.tool_call_id = f"fallback_{datetime.now().timestamp()}"
|
|
187
|
+
|
|
188
|
+
class MockNode:
|
|
189
|
+
pass
|
|
190
|
+
|
|
191
|
+
# Execute the tool through the callback
|
|
192
|
+
mock_call = MockToolCall(tool_name, args)
|
|
193
|
+
mock_node = MockNode()
|
|
194
|
+
|
|
195
|
+
await tool_callback(mock_call, mock_node)
|
|
196
|
+
|
|
197
|
+
if state_manager.session.show_thoughts:
|
|
198
|
+
await ui.muted(f"FALLBACK: Executed {tool_name} via JSON parsing")
|
|
199
|
+
|
|
200
|
+
except Exception as e:
|
|
201
|
+
if state_manager.session.show_thoughts:
|
|
202
|
+
await ui.error(f"Error executing fallback tool {tool_name}: {e!s}")
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
async def extract_and_execute_tool_calls(
|
|
206
|
+
text: str, tool_callback: ToolCallback | None, state_manager: StateManager
|
|
207
|
+
):
|
|
208
|
+
"""Extract tool calls from text content and execute them.
|
|
209
|
+
Supports multiple formats for maximum compatibility.
|
|
210
|
+
"""
|
|
211
|
+
if not tool_callback:
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
# Format 1: {"tool": "name", "args": {...}}
|
|
215
|
+
await parse_json_tool_calls(text, tool_callback, state_manager)
|
|
216
|
+
|
|
217
|
+
# Format 2: Tool calls in code blocks
|
|
218
|
+
code_block_pattern = r'```json\s*(\{(?:[^{}]|"[^"]*"|(?:\{[^}]*\}))*"tool"(?:[^{}]|"[^"]*"|(?:\{[^}]*\}))*\})\s*```'
|
|
219
|
+
code_matches = re.findall(code_block_pattern, text, re.MULTILINE | re.DOTALL)
|
|
220
|
+
|
|
221
|
+
for match in code_matches:
|
|
222
|
+
try:
|
|
223
|
+
tool_data = json.loads(match)
|
|
224
|
+
if "tool" in tool_data and "args" in tool_data:
|
|
225
|
+
|
|
226
|
+
class MockToolCall:
|
|
227
|
+
def __init__(self, tool_name: str, args: dict):
|
|
228
|
+
self.tool_name = tool_name
|
|
229
|
+
self.args = args
|
|
230
|
+
self.tool_call_id = f"codeblock_{datetime.now().timestamp()}"
|
|
231
|
+
|
|
232
|
+
class MockNode:
|
|
233
|
+
pass
|
|
234
|
+
|
|
235
|
+
mock_call = MockToolCall(tool_data["tool"], tool_data["args"])
|
|
236
|
+
mock_node = MockNode()
|
|
237
|
+
|
|
238
|
+
await tool_callback(mock_call, mock_node)
|
|
239
|
+
|
|
240
|
+
if state_manager.session.show_thoughts:
|
|
241
|
+
await ui.muted(f"FALLBACK: Executed {tool_data['tool']} from code block")
|
|
242
|
+
|
|
243
|
+
except (json.JSONDecodeError, KeyError, Exception) as e:
|
|
244
|
+
if state_manager.session.show_thoughts:
|
|
245
|
+
await ui.error(f"Error parsing code block tool call: {e!s}")
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def patch_tool_messages(
|
|
249
|
+
error_message: ErrorMessage = "Tool operation failed",
|
|
250
|
+
state_manager: StateManager = None,
|
|
251
|
+
):
|
|
252
|
+
"""Find any tool calls without responses and add synthetic error responses for them.
|
|
253
|
+
Takes an error message to use in the synthesized tool response.
|
|
254
|
+
|
|
255
|
+
Ignores tools that have corresponding retry prompts as the model is already
|
|
256
|
+
addressing them.
|
|
257
|
+
"""
|
|
258
|
+
if state_manager is None:
|
|
259
|
+
raise ValueError("state_manager is required for patch_tool_messages")
|
|
260
|
+
|
|
261
|
+
messages = state_manager.session.messages
|
|
262
|
+
|
|
263
|
+
if not messages:
|
|
264
|
+
return
|
|
265
|
+
|
|
266
|
+
# Map tool calls to their tool returns
|
|
267
|
+
tool_calls: dict[ToolCallId, ToolName] = {} # tool_call_id -> tool_name
|
|
268
|
+
tool_returns: set[ToolCallId] = set() # set of tool_call_ids with returns
|
|
269
|
+
retry_prompts: set[ToolCallId] = set() # set of tool_call_ids with retry prompts
|
|
270
|
+
|
|
271
|
+
for message in messages:
|
|
272
|
+
if hasattr(message, "parts"):
|
|
273
|
+
for part in message.parts:
|
|
274
|
+
if (
|
|
275
|
+
hasattr(part, "part_kind")
|
|
276
|
+
and hasattr(part, "tool_call_id")
|
|
277
|
+
and part.tool_call_id
|
|
278
|
+
):
|
|
279
|
+
if part.part_kind == "tool-call":
|
|
280
|
+
tool_calls[part.tool_call_id] = part.tool_name
|
|
281
|
+
elif part.part_kind == "tool-return":
|
|
282
|
+
tool_returns.add(part.tool_call_id)
|
|
283
|
+
elif part.part_kind == "retry-prompt":
|
|
284
|
+
retry_prompts.add(part.tool_call_id)
|
|
285
|
+
|
|
286
|
+
# Identify orphaned tools (those without responses and not being retried)
|
|
287
|
+
for tool_call_id, tool_name in list(tool_calls.items()):
|
|
288
|
+
if tool_call_id not in tool_returns and tool_call_id not in retry_prompts:
|
|
289
|
+
# Import ModelRequest and ToolReturnPart lazily
|
|
290
|
+
model_request_cls, tool_return_part_cls = get_model_messages()
|
|
291
|
+
messages.append(
|
|
292
|
+
model_request_cls(
|
|
293
|
+
parts=[
|
|
294
|
+
tool_return_part_cls(
|
|
295
|
+
tool_name=tool_name,
|
|
296
|
+
content=error_message,
|
|
297
|
+
tool_call_id=tool_call_id,
|
|
298
|
+
timestamp=datetime.now(timezone.utc),
|
|
299
|
+
part_kind="tool-return",
|
|
300
|
+
)
|
|
301
|
+
],
|
|
302
|
+
kind="request",
|
|
303
|
+
)
|
|
304
|
+
)
|
|
@@ -107,7 +107,6 @@ class ConfigSetup(BaseSetup):
|
|
|
107
107
|
"--key 'your-key' --baseurl 'https://openrouter.ai/api/v1'[/green]"
|
|
108
108
|
)
|
|
109
109
|
console.print("\n[yellow]Run 'tunacode --help' for more options[/yellow]\n")
|
|
110
|
-
from tunacode.exceptions import ConfigurationError
|
|
111
110
|
|
|
112
111
|
raise ConfigurationError(
|
|
113
112
|
"No configuration found. Please use CLI flags to configure."
|
tunacode/core/state.py
CHANGED
|
@@ -42,6 +42,10 @@ class SessionState:
|
|
|
42
42
|
tool_calls: list[dict[str, Any]] = field(default_factory=list)
|
|
43
43
|
iteration_count: int = 0
|
|
44
44
|
current_iteration: int = 0
|
|
45
|
+
# Track streaming state to prevent spinner conflicts
|
|
46
|
+
is_streaming_active: bool = False
|
|
47
|
+
# Track streaming panel reference for tool handler access
|
|
48
|
+
streaming_panel: Optional[Any] = None
|
|
45
49
|
|
|
46
50
|
|
|
47
51
|
class StateManager:
|
tunacode/ui/console.py
CHANGED
|
@@ -27,7 +27,9 @@ from .output import (
|
|
|
27
27
|
|
|
28
28
|
# Patch banner to use sync fast version
|
|
29
29
|
from .panels import (
|
|
30
|
+
StreamingAgentPanel,
|
|
30
31
|
agent,
|
|
32
|
+
agent_streaming,
|
|
31
33
|
dump_messages,
|
|
32
34
|
error,
|
|
33
35
|
help,
|
|
@@ -79,11 +81,13 @@ __all__ = [
|
|
|
79
81
|
"warning",
|
|
80
82
|
# From panels module
|
|
81
83
|
"agent",
|
|
84
|
+
"agent_streaming",
|
|
82
85
|
"dump_messages",
|
|
83
86
|
"error",
|
|
84
87
|
"help",
|
|
85
88
|
"models",
|
|
86
89
|
"panel",
|
|
90
|
+
"StreamingAgentPanel",
|
|
87
91
|
"sync_panel",
|
|
88
92
|
"sync_tool_confirm",
|
|
89
93
|
"tool_confirm",
|
tunacode/ui/panels.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from typing import Any, Optional, Union
|
|
4
4
|
|
|
5
5
|
from rich.box import ROUNDED
|
|
6
|
+
from rich.live import Live
|
|
6
7
|
from rich.markdown import Markdown
|
|
7
8
|
from rich.padding import Padding
|
|
8
9
|
from rich.panel import Panel
|
|
@@ -74,6 +75,79 @@ async def agent(text: str, bottom: int = 1) -> None:
|
|
|
74
75
|
await panel(title, Markdown(text), bottom=bottom, border_style=colors.primary)
|
|
75
76
|
|
|
76
77
|
|
|
78
|
+
class StreamingAgentPanel:
|
|
79
|
+
"""Streaming agent panel using Rich.Live for progressive display."""
|
|
80
|
+
|
|
81
|
+
def __init__(self, bottom: int = 1):
|
|
82
|
+
self.bottom = bottom
|
|
83
|
+
self.title = f"[bold {colors.primary}]●[/bold {colors.primary}] {APP_NAME}"
|
|
84
|
+
self.content = ""
|
|
85
|
+
self.live = None
|
|
86
|
+
|
|
87
|
+
def _create_panel(self) -> Panel:
|
|
88
|
+
"""Create a Rich panel with current content."""
|
|
89
|
+
markdown_content = Markdown(self.content or "Thinking...")
|
|
90
|
+
panel_obj = Panel(
|
|
91
|
+
Padding(markdown_content, (0, 1, 0, 1)),
|
|
92
|
+
title=f"[bold]{self.title}[/bold]",
|
|
93
|
+
title_align="left",
|
|
94
|
+
border_style=colors.primary,
|
|
95
|
+
padding=(0, 1),
|
|
96
|
+
box=ROUNDED,
|
|
97
|
+
)
|
|
98
|
+
return Padding(
|
|
99
|
+
panel_obj,
|
|
100
|
+
(
|
|
101
|
+
DEFAULT_PANEL_PADDING["top"],
|
|
102
|
+
DEFAULT_PANEL_PADDING["right"],
|
|
103
|
+
self.bottom,
|
|
104
|
+
DEFAULT_PANEL_PADDING["left"],
|
|
105
|
+
),
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
async def start(self):
|
|
109
|
+
"""Start the live streaming display."""
|
|
110
|
+
from .output import console
|
|
111
|
+
|
|
112
|
+
self.live = Live(self._create_panel(), console=console, refresh_per_second=4)
|
|
113
|
+
self.live.start()
|
|
114
|
+
|
|
115
|
+
async def update(self, content_chunk: str):
|
|
116
|
+
"""Update the streaming display with new content."""
|
|
117
|
+
self.content += content_chunk
|
|
118
|
+
if self.live:
|
|
119
|
+
self.live.update(self._create_panel())
|
|
120
|
+
|
|
121
|
+
async def set_content(self, content: str):
|
|
122
|
+
"""Set the complete content (overwrites previous)."""
|
|
123
|
+
self.content = content
|
|
124
|
+
if self.live:
|
|
125
|
+
self.live.update(self._create_panel())
|
|
126
|
+
|
|
127
|
+
async def stop(self):
|
|
128
|
+
"""Stop the live streaming display."""
|
|
129
|
+
if self.live:
|
|
130
|
+
self.live.stop()
|
|
131
|
+
self.live = None
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
async def agent_streaming(content_stream, bottom: int = 1):
|
|
135
|
+
"""Display an agent panel with streaming content updates.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
content_stream: Async iterator yielding content chunks
|
|
139
|
+
bottom: Bottom padding for the panel
|
|
140
|
+
"""
|
|
141
|
+
panel = StreamingAgentPanel(bottom=bottom)
|
|
142
|
+
await panel.start()
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
async for chunk in content_stream:
|
|
146
|
+
await panel.update(chunk)
|
|
147
|
+
finally:
|
|
148
|
+
await panel.stop()
|
|
149
|
+
|
|
150
|
+
|
|
77
151
|
async def error(text: str) -> None:
|
|
78
152
|
"""Display an error panel."""
|
|
79
153
|
await panel(PANEL_ERROR, text, style=colors.error)
|
tunacode/ui/utils.py
ADDED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tunacode-cli
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.37
|
|
4
4
|
Summary: Your agentic CLI developer.
|
|
5
5
|
Author-email: larock22 <noreply@github.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -26,9 +26,7 @@ Requires-Dist: pygments==2.19.1
|
|
|
26
26
|
Requires-Dist: rich==14.0.0
|
|
27
27
|
Provides-Extra: dev
|
|
28
28
|
Requires-Dist: build; extra == "dev"
|
|
29
|
-
Requires-Dist:
|
|
30
|
-
Requires-Dist: flake8; extra == "dev"
|
|
31
|
-
Requires-Dist: isort; extra == "dev"
|
|
29
|
+
Requires-Dist: ruff; extra == "dev"
|
|
32
30
|
Requires-Dist: pytest; extra == "dev"
|
|
33
31
|
Requires-Dist: pytest-cov; extra == "dev"
|
|
34
32
|
Requires-Dist: pytest-asyncio; extra == "dev"
|
|
@@ -70,7 +68,7 @@ Choose your AI provider and set your API key:
|
|
|
70
68
|
# OpenAI
|
|
71
69
|
tunacode --model "openai:gpt-4o" --key "sk-your-openai-key"
|
|
72
70
|
|
|
73
|
-
# Anthropic Claude
|
|
71
|
+
# Anthropic Claude
|
|
74
72
|
tunacode --model "anthropic:claude-3.5-sonnet" --key "sk-ant-your-anthropic-key"
|
|
75
73
|
|
|
76
74
|
# OpenRouter (100+ models)
|
|
@@ -82,13 +80,14 @@ Your config is saved to `~/.config/tunacode.json` (edit directly with `nvim ~/.c
|
|
|
82
80
|
### Recommended Models
|
|
83
81
|
|
|
84
82
|
Based on extensive testing, these models provide the best performance:
|
|
83
|
+
|
|
85
84
|
- `google/gemini-2.5-pro` - Excellent for complex reasoning
|
|
86
85
|
- `openai/gpt-4.1` - Strong general-purpose model
|
|
87
86
|
- `deepseek/deepseek-r1-0528` - Great for code generation
|
|
88
87
|
- `openai/gpt-4.1-mini` - Fast and cost-effective
|
|
89
88
|
- `anthropic/claude-4-sonnet-20250522` - Superior context handling
|
|
90
89
|
|
|
91
|
-
|
|
90
|
+
_Note: Formal evaluations coming soon. Any model can work, but these have shown the best results in practice._
|
|
92
91
|
|
|
93
92
|
## Start Coding
|
|
94
93
|
|
|
@@ -98,16 +97,16 @@ tunacode
|
|
|
98
97
|
|
|
99
98
|
## Basic Commands
|
|
100
99
|
|
|
101
|
-
| Command
|
|
102
|
-
|
|
|
103
|
-
| `/help`
|
|
104
|
-
| `/model <provider:name>` | Switch model
|
|
105
|
-
| `/clear`
|
|
106
|
-
| `/compact`
|
|
107
|
-
| `/branch <name>`
|
|
108
|
-
| `/yolo`
|
|
109
|
-
| `!<command>`
|
|
110
|
-
| `exit`
|
|
100
|
+
| Command | Description |
|
|
101
|
+
| ------------------------ | ---------------------- |
|
|
102
|
+
| `/help` | Show all commands |
|
|
103
|
+
| `/model <provider:name>` | Switch model |
|
|
104
|
+
| `/clear` | Clear message history |
|
|
105
|
+
| `/compact` | Summarize conversation |
|
|
106
|
+
| `/branch <name>` | Create Git branch |
|
|
107
|
+
| `/yolo` | Skip confirmations |
|
|
108
|
+
| `!<command>` | Run shell command |
|
|
109
|
+
| `exit` | Exit TunaCode |
|
|
111
110
|
|
|
112
111
|
## Performance
|
|
113
112
|
|
|
@@ -122,11 +121,12 @@ Multiple file reads, directory listings, and searches execute concurrently using
|
|
|
122
121
|
- **Streaming UI**: Currently working on implementing streaming responses for better user experience
|
|
123
122
|
- **Bug Fixes**: Actively addressing issues - please report any bugs you encounter!
|
|
124
123
|
|
|
125
|
-
|
|
124
|
+
_Note: While the tool is fully functional, we're focusing on stability and core features before optimizing for speed._
|
|
126
125
|
|
|
127
126
|
## Safety First
|
|
128
127
|
|
|
129
128
|
⚠️ **Important**: TunaCode can modify your codebase. Always:
|
|
129
|
+
|
|
130
130
|
- Use Git branches before making changes
|
|
131
131
|
- Review file modifications before confirming
|
|
132
132
|
- Keep backups of important work
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
tunacode/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
tunacode/constants.py,sha256=
|
|
2
|
+
tunacode/constants.py,sha256=ri3tiaz69KDnxnTdQDQxcz85_qXe4P3JE7KafTpDroU,4074
|
|
3
3
|
tunacode/context.py,sha256=6sterdRvPOyG3LU0nEAXpBsEPZbO3qtPyTlJBi-_VXE,2612
|
|
4
4
|
tunacode/exceptions.py,sha256=mTWXuWyr1k16CGLWN2tsthDGi7lbx1JK0ekIqogYDP8,3105
|
|
5
5
|
tunacode/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -7,35 +7,36 @@ tunacode/setup.py,sha256=XPt4eAK-qcIZQv64jGZ_ryxcImDwps9OmXjJfIS1xcs,1899
|
|
|
7
7
|
tunacode/types.py,sha256=BciT-uxnQ44iC-4QiDY72OD23LOtqSyMOuK_N0ttlaA,7676
|
|
8
8
|
tunacode/cli/__init__.py,sha256=zgs0UbAck8hfvhYsWhWOfBe5oK09ug2De1r4RuQZREA,55
|
|
9
9
|
tunacode/cli/main.py,sha256=PIcFnfmIoI_pmK2y-zB_ouJbzR5fbSI7zsKQNPB_J8o,2406
|
|
10
|
-
tunacode/cli/repl.py,sha256=
|
|
10
|
+
tunacode/cli/repl.py,sha256=SoW5QiMohHB6BLuP8grCdNrrJq-HTF80p6RFmoMMeqk,17227
|
|
11
11
|
tunacode/cli/textual_app.py,sha256=14-Nt0IIETmyHBrNn9uwSF3EwCcutwTp6gdoKgNm0sY,12593
|
|
12
12
|
tunacode/cli/textual_bridge.py,sha256=LvqiTtF0hu3gNujzpKaW9h-m6xzEP3OH2M8KL2pCwRc,6333
|
|
13
13
|
tunacode/cli/commands/__init__.py,sha256=YMrLz7szrmseJCRZGGX6_TyO3dJU8_QDCOFEhRAztzo,1634
|
|
14
14
|
tunacode/cli/commands/base.py,sha256=GxUuDsDSpz0iXryy8MrEw88UM3C3yxL__kDK1QhshoA,2517
|
|
15
|
-
tunacode/cli/commands/registry.py,sha256=
|
|
16
|
-
tunacode/cli/commands/implementations/__init__.py,sha256=
|
|
15
|
+
tunacode/cli/commands/registry.py,sha256=d2dSAKrz_c02zU2AKtWghqPmm-p7XG_L81yj9nI1qu8,8152
|
|
16
|
+
tunacode/cli/commands/implementations/__init__.py,sha256=sRFG2aktjtWt-M0Co_GdeLjODiVjEFqHDcLyyVabj8M,917
|
|
17
17
|
tunacode/cli/commands/implementations/conversation.py,sha256=EsnsZB6yyVI_sbNNMvk37tCz3iAj4E85R9ev696qeqg,4683
|
|
18
18
|
tunacode/cli/commands/implementations/debug.py,sha256=TdP72Dpd3Nq3lwwyj0qZEdbSjDDmRyFu-0t6ttPFNno,6769
|
|
19
19
|
tunacode/cli/commands/implementations/development.py,sha256=kZRdVgReVmGU0uijFxtPio2RYkTrYMufOwgI1Aj1_NU,2729
|
|
20
20
|
tunacode/cli/commands/implementations/model.py,sha256=uthx6IX9KwgwywNTDklkJpqCbaTX9h1_p-eVmqL73WQ,2245
|
|
21
|
-
tunacode/cli/commands/implementations/system.py,sha256=
|
|
21
|
+
tunacode/cli/commands/implementations/system.py,sha256=2cGw5iCJO3aNhXTFF28CgAIyLgslvHmpfyL2ZHVB6oQ,7903
|
|
22
22
|
tunacode/configuration/__init__.py,sha256=MbVXy8bGu0yKehzgdgZ_mfWlYGvIdb1dY2Ly75nfuPE,17
|
|
23
23
|
tunacode/configuration/defaults.py,sha256=lNeJUW1S8zj4-XTCkMP9UaDc-tHWXLff9K8t0uPA_oE,801
|
|
24
24
|
tunacode/configuration/models.py,sha256=XPobkLM_TzKTuMIWhK-svJfGRGFT9r2LhKEM6rv6QHk,3756
|
|
25
25
|
tunacode/configuration/settings.py,sha256=KoN0u6GG3Hh_TWt02D_wpRfbACYri3gCDTXHtJfHl2w,994
|
|
26
26
|
tunacode/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
27
|
tunacode/core/code_index.py,sha256=jgAx3lSWP_DwnyiP5Jkm1YvX4JJyI4teMzlNrJSpEOA,15661
|
|
28
|
-
tunacode/core/state.py,sha256=
|
|
28
|
+
tunacode/core/state.py,sha256=d2xUa7sRYizVc1mkaQN4aU034VSeh9nxQadzpL-aRas,1810
|
|
29
29
|
tunacode/core/tool_handler.py,sha256=BPjR013OOO0cLXPdLeL2FDK0ixUwOYu59FfHdcdFhp4,2277
|
|
30
30
|
tunacode/core/agents/__init__.py,sha256=UUJiPYb91arwziSpjd7vIk7XNGA_4HQbsOIbskSqevA,149
|
|
31
|
-
tunacode/core/agents/main.py,sha256=
|
|
31
|
+
tunacode/core/agents/main.py,sha256=BkAyqSr-xgENdyEmrPrx-E21ljclSU36EJh675Vhttk,41173
|
|
32
|
+
tunacode/core/agents/utils.py,sha256=VaNsPB2l1dAP-VlS_QLRKvCb4NW0pXNRoxkh12AGXAg,10744
|
|
32
33
|
tunacode/core/background/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
33
34
|
tunacode/core/background/manager.py,sha256=rJdl3eDLTQwjbT7VhxXcJbZopCNR3M8ZGMbmeVnwwMc,1126
|
|
34
35
|
tunacode/core/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
35
36
|
tunacode/core/setup/__init__.py,sha256=lzdpY6rIGf9DDlDBDGFvQZaSOQeFsNglHbkpq1-GtU8,376
|
|
36
37
|
tunacode/core/setup/agent_setup.py,sha256=trELO8cPnWo36BBnYmXDEnDPdhBg0p-VLnx9A8hSSSQ,1401
|
|
37
38
|
tunacode/core/setup/base.py,sha256=cbyT2-xK2mWgH4EO17VfM_OM2bj0kT895NW2jSXbe3c,968
|
|
38
|
-
tunacode/core/setup/config_setup.py,sha256=
|
|
39
|
+
tunacode/core/setup/config_setup.py,sha256=xOQcjlFEL7HtGXH7wtpu83S6oA9K02wLYR1r-cWINpI,14449
|
|
39
40
|
tunacode/core/setup/coordinator.py,sha256=oVTN2xIeJERXitVJpkIk9tDGLs1D1bxIRmaogJwZJFI,2049
|
|
40
41
|
tunacode/core/setup/environment_setup.py,sha256=n3IrObKEynHZSwtUJ1FddMg2C4sHz7ca42awemImV8s,2225
|
|
41
42
|
tunacode/core/setup/git_safety_setup.py,sha256=CRIqrQt0QUJQRS344njty_iCqTorrDhHlXRuET7w0Tk,6714
|
|
@@ -55,16 +56,17 @@ tunacode/tools/update_file.py,sha256=bW1MhTzRjBDjJzqQ6A1yCVEbkr1oIqtEC8uqcg_rfY4
|
|
|
55
56
|
tunacode/tools/write_file.py,sha256=prL6u8XOi9ZyPU-YNlG9YMLbSLrDJXDRuDX73ncXh-k,2699
|
|
56
57
|
tunacode/ui/__init__.py,sha256=aRNE2pS50nFAX6y--rSGMNYwhz905g14gRd6g4BolYU,13
|
|
57
58
|
tunacode/ui/completers.py,sha256=Jx1zyCESwdm_4ZopvCBtb0bCJF-bRy8aBWG2yhPQtDc,4878
|
|
58
|
-
tunacode/ui/console.py,sha256=
|
|
59
|
+
tunacode/ui/console.py,sha256=YXNFlnV7n4wyaIy-VohzIMJJ71C7fzgcjuLheNIO-QU,2079
|
|
59
60
|
tunacode/ui/constants.py,sha256=A76B_KpM8jCuBYRg4cPmhi8_j6LLyWttO7_jjv47r3w,421
|
|
60
61
|
tunacode/ui/decorators.py,sha256=e2KM-_pI5EKHa2M045IjUe4rPkTboxaKHXJT0K3461g,1914
|
|
61
62
|
tunacode/ui/input.py,sha256=6LlEwKIXYXusNDI2PD0DDjRymQgu5mf2v06TsHbUln0,2957
|
|
62
63
|
tunacode/ui/keybindings.py,sha256=h0MlD73CW_3i2dQzb9EFSPkqy0raZ_isgjxUiA9u6ts,691
|
|
63
64
|
tunacode/ui/lexers.py,sha256=tmg4ic1enyTRLzanN5QPP7D_0n12YjX_8ZhsffzhXA4,1340
|
|
64
65
|
tunacode/ui/output.py,sha256=kjVklUZumGwjV8LeB7aX1WjcwAbURnuudZZY1t4qsN4,4499
|
|
65
|
-
tunacode/ui/panels.py,sha256=
|
|
66
|
+
tunacode/ui/panels.py,sha256=IZpiWBb7jVXaycH5BPAnqTCs2-_ccJYq2V55MxkVHzQ,8199
|
|
66
67
|
tunacode/ui/prompt_manager.py,sha256=U2cntB34vm-YwOj3gzFRUK362zccrz8pigQfpxr5sv8,4650
|
|
67
68
|
tunacode/ui/tool_ui.py,sha256=S5-k1HwRlSqiQ8shGQ_QYGXQbuzb6Pg7u3CTqZwffdQ,6533
|
|
69
|
+
tunacode/ui/utils.py,sha256=P3RVc1j9XfRfLn3cEkaZnLD-pJKN_p27xmuaV4evHPo,73
|
|
68
70
|
tunacode/ui/validators.py,sha256=MMIMT1I2v0l2jIy-gxX_4GSApvUTi8XWIOACr_dmoBA,758
|
|
69
71
|
tunacode/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
70
72
|
tunacode/utils/bm25.py,sha256=yq7KFWP3g_zIsjUV7l2hFPXYCzXyNQUInLU7u4qsc_4,1909
|
|
@@ -77,9 +79,9 @@ tunacode/utils/system.py,sha256=FSoibTIH0eybs4oNzbYyufIiV6gb77QaeY2yGqW39AY,1138
|
|
|
77
79
|
tunacode/utils/text_utils.py,sha256=IiRviMqz5uoAbid8emkRXxgvQz6KE27ZeQom-qh9ymI,2984
|
|
78
80
|
tunacode/utils/token_counter.py,sha256=nGCWwrHHFbKywqeDCEuJnADCkfJuzysWiB6cCltJOKI,648
|
|
79
81
|
tunacode/utils/user_configuration.py,sha256=Ilz8dpGVJDBE2iLWHAPT0xR8D51VRKV3kIbsAz8Bboc,3275
|
|
80
|
-
tunacode_cli-0.0.
|
|
81
|
-
tunacode_cli-0.0.
|
|
82
|
-
tunacode_cli-0.0.
|
|
83
|
-
tunacode_cli-0.0.
|
|
84
|
-
tunacode_cli-0.0.
|
|
85
|
-
tunacode_cli-0.0.
|
|
82
|
+
tunacode_cli-0.0.37.dist-info/licenses/LICENSE,sha256=Btzdu2kIoMbdSp6OyCLupB1aRgpTCJ_szMimgEnpkkE,1056
|
|
83
|
+
tunacode_cli-0.0.37.dist-info/METADATA,sha256=1DdlPYbydBr_Wcjnzr6lpCUxSRCE5R7qatJVSqJjj1w,5064
|
|
84
|
+
tunacode_cli-0.0.37.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
85
|
+
tunacode_cli-0.0.37.dist-info/entry_points.txt,sha256=hbkytikj4dGu6rizPuAd_DGUPBGF191RTnhr9wdhORY,51
|
|
86
|
+
tunacode_cli-0.0.37.dist-info/top_level.txt,sha256=lKy2P6BWNi5XSA4DHFvyjQ14V26lDZctwdmhEJrxQbU,9
|
|
87
|
+
tunacode_cli-0.0.37.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|