tunacode-cli 0.0.17__py3-none-any.whl → 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/commands.py +39 -41
- tunacode/cli/main.py +29 -26
- tunacode/cli/repl.py +35 -10
- tunacode/cli/textual_app.py +69 -66
- tunacode/cli/textual_bridge.py +33 -32
- tunacode/configuration/settings.py +2 -9
- tunacode/constants.py +2 -4
- tunacode/context.py +1 -1
- tunacode/core/agents/main.py +88 -62
- tunacode/core/setup/config_setup.py +79 -44
- tunacode/core/setup/coordinator.py +20 -13
- tunacode/core/setup/git_safety_setup.py +35 -49
- tunacode/core/state.py +2 -9
- tunacode/exceptions.py +0 -2
- tunacode/tools/__init__.py +10 -1
- tunacode/tools/base.py +1 -1
- tunacode/tools/bash.py +5 -5
- tunacode/tools/grep.py +210 -250
- tunacode/tools/read_file.py +2 -8
- tunacode/tools/run_command.py +4 -11
- tunacode/tools/update_file.py +2 -6
- tunacode/ui/completers.py +32 -31
- tunacode/ui/console.py +3 -3
- tunacode/ui/input.py +8 -5
- tunacode/ui/keybindings.py +1 -3
- tunacode/ui/lexers.py +16 -16
- tunacode/ui/output.py +2 -2
- tunacode/ui/panels.py +8 -8
- tunacode/ui/prompt_manager.py +19 -7
- tunacode/utils/import_cache.py +11 -0
- tunacode/utils/user_configuration.py +24 -2
- {tunacode_cli-0.0.17.dist-info → tunacode_cli-0.0.18.dist-info}/METADATA +43 -2
- tunacode_cli-0.0.18.dist-info/RECORD +68 -0
- tunacode_cli-0.0.17.dist-info/RECORD +0 -67
- {tunacode_cli-0.0.17.dist-info → tunacode_cli-0.0.18.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.17.dist-info → tunacode_cli-0.0.18.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.17.dist-info → tunacode_cli-0.0.18.dist-info}/licenses/LICENSE +0 -0
- {tunacode_cli-0.0.17.dist-info → tunacode_cli-0.0.18.dist-info}/top_level.txt +0 -0
tunacode/core/agents/main.py
CHANGED
|
@@ -9,8 +9,22 @@ import re
|
|
|
9
9
|
from datetime import datetime, timezone
|
|
10
10
|
from typing import Optional
|
|
11
11
|
|
|
12
|
-
|
|
13
|
-
|
|
12
|
+
# Lazy import for Agent and Tool
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_agent_tool():
|
|
16
|
+
import importlib
|
|
17
|
+
|
|
18
|
+
pydantic_ai = importlib.import_module("pydantic_ai")
|
|
19
|
+
return pydantic_ai.Agent, pydantic_ai.Tool
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_model_messages():
|
|
23
|
+
import importlib
|
|
24
|
+
|
|
25
|
+
messages = importlib.import_module("pydantic_ai.messages")
|
|
26
|
+
return messages.ModelRequest, messages.ToolReturnPart
|
|
27
|
+
|
|
14
28
|
|
|
15
29
|
from tunacode.core.state import StateManager
|
|
16
30
|
from tunacode.services.mcp import get_mcp_servers
|
|
@@ -20,15 +34,8 @@ from tunacode.tools.read_file import read_file
|
|
|
20
34
|
from tunacode.tools.run_command import run_command
|
|
21
35
|
from tunacode.tools.update_file import update_file
|
|
22
36
|
from tunacode.tools.write_file import write_file
|
|
23
|
-
from tunacode.types import (
|
|
24
|
-
|
|
25
|
-
ErrorMessage,
|
|
26
|
-
ModelName,
|
|
27
|
-
PydanticAgent,
|
|
28
|
-
ToolCallback,
|
|
29
|
-
ToolCallId,
|
|
30
|
-
ToolName,
|
|
31
|
-
)
|
|
37
|
+
from tunacode.types import (AgentRun, ErrorMessage, ModelName, PydanticAgent, ToolCallback,
|
|
38
|
+
ToolCallId, ToolName)
|
|
32
39
|
|
|
33
40
|
|
|
34
41
|
async def _process_node(node, tool_callback: Optional[ToolCallback], state_manager: StateManager):
|
|
@@ -40,63 +47,65 @@ async def _process_node(node, tool_callback: Optional[ToolCallback], state_manag
|
|
|
40
47
|
# Display thought immediately if show_thoughts is enabled
|
|
41
48
|
if state_manager.session.show_thoughts:
|
|
42
49
|
from tunacode.ui import console as ui
|
|
50
|
+
|
|
43
51
|
await ui.muted(f"💭 THOUGHT: {node.thought}")
|
|
44
52
|
|
|
45
53
|
if hasattr(node, "model_response"):
|
|
46
54
|
state_manager.session.messages.append(node.model_response)
|
|
47
|
-
|
|
55
|
+
|
|
48
56
|
# Enhanced ReAct thought processing
|
|
49
57
|
if state_manager.session.show_thoughts:
|
|
50
|
-
from tunacode.ui import console as ui
|
|
51
58
|
import json
|
|
52
59
|
import re
|
|
53
|
-
|
|
60
|
+
|
|
61
|
+
from tunacode.ui import console as ui
|
|
62
|
+
|
|
54
63
|
for part in node.model_response.parts:
|
|
55
|
-
if hasattr(part,
|
|
64
|
+
if hasattr(part, "content") and isinstance(part.content, str):
|
|
56
65
|
content = part.content.strip()
|
|
57
|
-
|
|
66
|
+
|
|
58
67
|
# Pattern 1: Inline JSON thoughts {"thought": "..."}
|
|
59
68
|
thought_pattern = r'\{"thought":\s*"([^"]+)"\}'
|
|
60
69
|
matches = re.findall(thought_pattern, content)
|
|
61
70
|
for thought in matches:
|
|
62
71
|
await ui.muted(f"💭 REASONING: {thought}")
|
|
63
|
-
|
|
72
|
+
|
|
64
73
|
# Pattern 2: Standalone thought JSON objects
|
|
65
74
|
try:
|
|
66
75
|
if content.startswith('{"thought"'):
|
|
67
76
|
thought_obj = json.loads(content)
|
|
68
|
-
if
|
|
77
|
+
if "thought" in thought_obj:
|
|
69
78
|
await ui.muted(f"💭 REASONING: {thought_obj['thought']}")
|
|
70
79
|
except (json.JSONDecodeError, KeyError):
|
|
71
80
|
pass
|
|
72
|
-
|
|
81
|
+
|
|
73
82
|
# Pattern 3: Multi-line thoughts with context
|
|
74
83
|
multiline_pattern = r'\{"thought":\s*"([^"]+(?:\\.[^"]*)*?)"\}'
|
|
75
84
|
multiline_matches = re.findall(multiline_pattern, content, re.DOTALL)
|
|
76
85
|
for thought in multiline_matches:
|
|
77
86
|
if thought not in [m for m in matches]: # Avoid duplicates
|
|
78
87
|
# Clean up escaped characters
|
|
79
|
-
cleaned_thought = thought.replace('\\"', '"').replace(
|
|
88
|
+
cleaned_thought = thought.replace('\\"', '"').replace("\\n", " ")
|
|
80
89
|
await ui.muted(f"💭 REASONING: {cleaned_thought}")
|
|
81
|
-
|
|
90
|
+
|
|
82
91
|
# Pattern 4: Text-based reasoning indicators
|
|
83
92
|
reasoning_indicators = [
|
|
84
|
-
(r
|
|
85
|
-
(r
|
|
86
|
-
(r
|
|
87
|
-
(r
|
|
93
|
+
(r"I need to (.+?)\.", "PLANNING"),
|
|
94
|
+
(r"Let me (.+?)\.", "ACTION"),
|
|
95
|
+
(r"The output shows (.+?)\.", "OBSERVATION"),
|
|
96
|
+
(r"Based on (.+?), I should (.+?)\.", "DECISION"),
|
|
88
97
|
]
|
|
89
|
-
|
|
98
|
+
|
|
90
99
|
for pattern, label in reasoning_indicators:
|
|
91
100
|
indicator_matches = re.findall(pattern, content, re.IGNORECASE)
|
|
92
101
|
for match in indicator_matches:
|
|
93
102
|
if isinstance(match, tuple):
|
|
94
|
-
match_text =
|
|
103
|
+
match_text = " ".join(match)
|
|
95
104
|
else:
|
|
96
105
|
match_text = match
|
|
97
106
|
await ui.muted(f"🎯 {label}: {match_text}")
|
|
98
107
|
break # Only show first match per pattern
|
|
99
|
-
|
|
108
|
+
|
|
100
109
|
# Check for tool calls and fallback to JSON parsing if needed
|
|
101
110
|
has_tool_calls = False
|
|
102
111
|
for part in node.model_response.parts:
|
|
@@ -106,28 +115,32 @@ async def _process_node(node, tool_callback: Optional[ToolCallback], state_manag
|
|
|
106
115
|
elif part.part_kind == "tool-return":
|
|
107
116
|
obs_msg = f"OBSERVATION[{part.tool_name}]: {part.content[:2_000]}"
|
|
108
117
|
state_manager.session.messages.append(obs_msg)
|
|
109
|
-
|
|
118
|
+
|
|
110
119
|
# If no structured tool calls found, try parsing JSON from text content
|
|
111
120
|
if not has_tool_calls and tool_callback:
|
|
112
121
|
for part in node.model_response.parts:
|
|
113
|
-
if hasattr(part,
|
|
122
|
+
if hasattr(part, "content") and isinstance(part.content, str):
|
|
114
123
|
await extract_and_execute_tool_calls(part.content, tool_callback, state_manager)
|
|
115
124
|
|
|
116
125
|
|
|
117
126
|
def get_or_create_agent(model: ModelName, state_manager: StateManager) -> PydanticAgent:
|
|
118
127
|
if model not in state_manager.session.agents:
|
|
119
128
|
max_retries = state_manager.session.user_config["settings"]["max_retries"]
|
|
120
|
-
|
|
129
|
+
|
|
130
|
+
# Lazy import Agent and Tool
|
|
131
|
+
Agent, Tool = get_agent_tool()
|
|
132
|
+
|
|
121
133
|
# Load system prompt
|
|
122
134
|
import os
|
|
123
135
|
from pathlib import Path
|
|
136
|
+
|
|
124
137
|
prompt_path = Path(__file__).parent.parent.parent / "prompts" / "system.txt"
|
|
125
138
|
try:
|
|
126
139
|
with open(prompt_path, "r", encoding="utf-8") as f:
|
|
127
140
|
system_prompt = f.read().strip()
|
|
128
141
|
except FileNotFoundError:
|
|
129
142
|
system_prompt = None
|
|
130
|
-
|
|
143
|
+
|
|
131
144
|
state_manager.session.agents[model] = Agent(
|
|
132
145
|
model=model,
|
|
133
146
|
system_prompt=system_prompt,
|
|
@@ -186,6 +199,8 @@ def patch_tool_messages(
|
|
|
186
199
|
# Identify orphaned tools (those without responses and not being retried)
|
|
187
200
|
for tool_call_id, tool_name in list(tool_calls.items()):
|
|
188
201
|
if tool_call_id not in tool_returns and tool_call_id not in retry_prompts:
|
|
202
|
+
# Import ModelRequest and ToolReturnPart lazily
|
|
203
|
+
ModelRequest, ToolReturnPart = get_model_messages()
|
|
189
204
|
messages.append(
|
|
190
205
|
ModelRequest(
|
|
191
206
|
parts=[
|
|
@@ -202,39 +217,41 @@ def patch_tool_messages(
|
|
|
202
217
|
)
|
|
203
218
|
|
|
204
219
|
|
|
205
|
-
async def parse_json_tool_calls(
|
|
220
|
+
async def parse_json_tool_calls(
|
|
221
|
+
text: str, tool_callback: Optional[ToolCallback], state_manager: StateManager
|
|
222
|
+
):
|
|
206
223
|
"""
|
|
207
224
|
Parse JSON tool calls from text when structured tool calling fails.
|
|
208
225
|
Fallback for when API providers don't support proper tool calling.
|
|
209
226
|
"""
|
|
210
227
|
if not tool_callback:
|
|
211
228
|
return
|
|
212
|
-
|
|
229
|
+
|
|
213
230
|
# Pattern for JSON tool calls: {"tool": "tool_name", "args": {...}}
|
|
214
231
|
# Find potential JSON objects and parse them
|
|
215
232
|
potential_jsons = []
|
|
216
233
|
brace_count = 0
|
|
217
234
|
start_pos = -1
|
|
218
|
-
|
|
235
|
+
|
|
219
236
|
for i, char in enumerate(text):
|
|
220
|
-
if char ==
|
|
237
|
+
if char == "{":
|
|
221
238
|
if brace_count == 0:
|
|
222
239
|
start_pos = i
|
|
223
240
|
brace_count += 1
|
|
224
|
-
elif char ==
|
|
241
|
+
elif char == "}":
|
|
225
242
|
brace_count -= 1
|
|
226
243
|
if brace_count == 0 and start_pos != -1:
|
|
227
|
-
potential_json = text[start_pos:i+1]
|
|
244
|
+
potential_json = text[start_pos : i + 1]
|
|
228
245
|
try:
|
|
229
246
|
parsed = json.loads(potential_json)
|
|
230
|
-
if isinstance(parsed, dict) and
|
|
231
|
-
potential_jsons.append((parsed[
|
|
247
|
+
if isinstance(parsed, dict) and "tool" in parsed and "args" in parsed:
|
|
248
|
+
potential_jsons.append((parsed["tool"], parsed["args"]))
|
|
232
249
|
except json.JSONDecodeError:
|
|
233
250
|
pass
|
|
234
251
|
start_pos = -1
|
|
235
|
-
|
|
252
|
+
|
|
236
253
|
matches = potential_jsons
|
|
237
|
-
|
|
254
|
+
|
|
238
255
|
for tool_name, args in matches:
|
|
239
256
|
try:
|
|
240
257
|
# Create a mock tool call object
|
|
@@ -243,66 +260,73 @@ async def parse_json_tool_calls(text: str, tool_callback: Optional[ToolCallback]
|
|
|
243
260
|
self.tool_name = tool_name
|
|
244
261
|
self.args = args
|
|
245
262
|
self.tool_call_id = f"fallback_{datetime.now().timestamp()}"
|
|
246
|
-
|
|
263
|
+
|
|
247
264
|
class MockNode:
|
|
248
265
|
pass
|
|
249
|
-
|
|
266
|
+
|
|
250
267
|
# Execute the tool through the callback
|
|
251
268
|
mock_call = MockToolCall(tool_name, args)
|
|
252
269
|
mock_node = MockNode()
|
|
253
|
-
|
|
270
|
+
|
|
254
271
|
await tool_callback(mock_call, mock_node)
|
|
255
|
-
|
|
272
|
+
|
|
256
273
|
if state_manager.session.show_thoughts:
|
|
257
274
|
from tunacode.ui import console as ui
|
|
275
|
+
|
|
258
276
|
await ui.muted(f"🔧 FALLBACK: Executed {tool_name} via JSON parsing")
|
|
259
|
-
|
|
277
|
+
|
|
260
278
|
except Exception as e:
|
|
261
279
|
if state_manager.session.show_thoughts:
|
|
262
280
|
from tunacode.ui import console as ui
|
|
281
|
+
|
|
263
282
|
await ui.error(f"❌ Error executing fallback tool {tool_name}: {str(e)}")
|
|
264
283
|
|
|
265
284
|
|
|
266
|
-
async def extract_and_execute_tool_calls(
|
|
285
|
+
async def extract_and_execute_tool_calls(
|
|
286
|
+
text: str, tool_callback: Optional[ToolCallback], state_manager: StateManager
|
|
287
|
+
):
|
|
267
288
|
"""
|
|
268
289
|
Extract tool calls from text content and execute them.
|
|
269
290
|
Supports multiple formats for maximum compatibility.
|
|
270
291
|
"""
|
|
271
292
|
if not tool_callback:
|
|
272
293
|
return
|
|
273
|
-
|
|
294
|
+
|
|
274
295
|
# Format 1: {"tool": "name", "args": {...}}
|
|
275
296
|
await parse_json_tool_calls(text, tool_callback, state_manager)
|
|
276
|
-
|
|
297
|
+
|
|
277
298
|
# Format 2: Tool calls in code blocks
|
|
278
299
|
code_block_pattern = r'```json\s*(\{(?:[^{}]|"[^"]*"|(?:\{[^}]*\}))*"tool"(?:[^{}]|"[^"]*"|(?:\{[^}]*\}))*\})\s*```'
|
|
279
300
|
code_matches = re.findall(code_block_pattern, text, re.MULTILINE | re.DOTALL)
|
|
280
|
-
|
|
301
|
+
|
|
281
302
|
for match in code_matches:
|
|
282
303
|
try:
|
|
283
304
|
tool_data = json.loads(match)
|
|
284
|
-
if
|
|
305
|
+
if "tool" in tool_data and "args" in tool_data:
|
|
306
|
+
|
|
285
307
|
class MockToolCall:
|
|
286
308
|
def __init__(self, tool_name: str, args: dict):
|
|
287
309
|
self.tool_name = tool_name
|
|
288
310
|
self.args = args
|
|
289
311
|
self.tool_call_id = f"codeblock_{datetime.now().timestamp()}"
|
|
290
|
-
|
|
312
|
+
|
|
291
313
|
class MockNode:
|
|
292
314
|
pass
|
|
293
|
-
|
|
294
|
-
mock_call = MockToolCall(tool_data[
|
|
315
|
+
|
|
316
|
+
mock_call = MockToolCall(tool_data["tool"], tool_data["args"])
|
|
295
317
|
mock_node = MockNode()
|
|
296
|
-
|
|
318
|
+
|
|
297
319
|
await tool_callback(mock_call, mock_node)
|
|
298
|
-
|
|
320
|
+
|
|
299
321
|
if state_manager.session.show_thoughts:
|
|
300
322
|
from tunacode.ui import console as ui
|
|
323
|
+
|
|
301
324
|
await ui.muted(f"🔧 FALLBACK: Executed {tool_data['tool']} from code block")
|
|
302
|
-
|
|
325
|
+
|
|
303
326
|
except (json.JSONDecodeError, KeyError, Exception) as e:
|
|
304
327
|
if state_manager.session.show_thoughts:
|
|
305
328
|
from tunacode.ui import console as ui
|
|
329
|
+
|
|
306
330
|
await ui.error(f"❌ Error parsing code block tool call: {str(e)}")
|
|
307
331
|
|
|
308
332
|
|
|
@@ -316,22 +340,24 @@ async def process_request(
|
|
|
316
340
|
mh = state_manager.session.messages.copy()
|
|
317
341
|
# Get max iterations from config (default: 20)
|
|
318
342
|
max_iterations = state_manager.session.user_config.get("settings", {}).get("max_iterations", 20)
|
|
319
|
-
|
|
343
|
+
|
|
320
344
|
async with agent.iter(message, message_history=mh) as agent_run:
|
|
321
345
|
i = 0
|
|
322
346
|
async for node in agent_run:
|
|
323
347
|
await _process_node(node, tool_callback, state_manager)
|
|
324
348
|
i += 1
|
|
325
|
-
|
|
349
|
+
|
|
326
350
|
# Display iteration progress if thoughts are enabled
|
|
327
351
|
if state_manager.session.show_thoughts and i > 1:
|
|
328
352
|
from tunacode.ui import console as ui
|
|
353
|
+
|
|
329
354
|
await ui.muted(f"🔄 Iteration {i}/{max_iterations}")
|
|
330
|
-
|
|
355
|
+
|
|
331
356
|
if i >= max_iterations:
|
|
332
357
|
if state_manager.session.show_thoughts:
|
|
333
358
|
from tunacode.ui import console as ui
|
|
359
|
+
|
|
334
360
|
await ui.warning(f"⚠️ Reached maximum iterations ({max_iterations})")
|
|
335
361
|
break
|
|
336
|
-
|
|
362
|
+
|
|
337
363
|
return agent_run
|
|
@@ -38,9 +38,30 @@ class ConfigSetup(BaseSetup):
|
|
|
38
38
|
return True
|
|
39
39
|
|
|
40
40
|
async def execute(self, force_setup: bool = False) -> None:
|
|
41
|
-
"""Setup configuration and run onboarding if needed."""
|
|
41
|
+
"""Setup configuration and run onboarding if needed, with config fingerprint fast path."""
|
|
42
|
+
import hashlib
|
|
43
|
+
|
|
42
44
|
self.state_manager.session.device_id = system.get_device_id()
|
|
43
45
|
loaded_config = user_configuration.load_config()
|
|
46
|
+
# Fast path: if config fingerprint matches last loaded and config is already present, skip reprocessing
|
|
47
|
+
new_fp = None
|
|
48
|
+
if loaded_config:
|
|
49
|
+
b = json.dumps(loaded_config, sort_keys=True).encode()
|
|
50
|
+
new_fp = hashlib.sha1(b).hexdigest()[:12]
|
|
51
|
+
last_fp = getattr(self.state_manager, "_config_fingerprint", None)
|
|
52
|
+
if (
|
|
53
|
+
loaded_config
|
|
54
|
+
and not force_setup
|
|
55
|
+
and new_fp
|
|
56
|
+
and last_fp == new_fp
|
|
57
|
+
and getattr(self.state_manager, "_config_valid", False)
|
|
58
|
+
):
|
|
59
|
+
# Fast path: config unchanged, already validated
|
|
60
|
+
self.state_manager.session.user_config = loaded_config
|
|
61
|
+
self.state_manager.session.current_model = loaded_config["default_model"]
|
|
62
|
+
return
|
|
63
|
+
# Save current config fingerprint for next run
|
|
64
|
+
self.state_manager._config_fingerprint = new_fp
|
|
44
65
|
|
|
45
66
|
# Handle CLI configuration if provided
|
|
46
67
|
if self.cli_config and any(self.cli_config.values()):
|
|
@@ -50,9 +71,7 @@ class ConfigSetup(BaseSetup):
|
|
|
50
71
|
if loaded_config and not force_setup:
|
|
51
72
|
# Silent loading
|
|
52
73
|
# Merge loaded config with defaults to ensure all required keys exist
|
|
53
|
-
self.state_manager.session.user_config = self._merge_with_defaults(
|
|
54
|
-
loaded_config
|
|
55
|
-
)
|
|
74
|
+
self.state_manager.session.user_config = self._merge_with_defaults(loaded_config)
|
|
56
75
|
else:
|
|
57
76
|
if force_setup:
|
|
58
77
|
await ui.muted("Running setup process, resetting config")
|
|
@@ -64,13 +83,18 @@ class ConfigSetup(BaseSetup):
|
|
|
64
83
|
else:
|
|
65
84
|
# No config found - show CLI usage instead of onboarding
|
|
66
85
|
from tunacode.ui.console import console
|
|
86
|
+
|
|
67
87
|
console.print("\n[bold red]No configuration found![/bold red]")
|
|
68
88
|
console.print("\n[bold]Quick Setup:[/bold]")
|
|
69
89
|
console.print("Configure TunaCode using CLI flags:")
|
|
70
90
|
console.print("\n[blue]Examples:[/blue]")
|
|
71
91
|
console.print(" [green]tunacode --model 'openai:gpt-4' --key 'your-key'[/green]")
|
|
72
|
-
console.print(
|
|
73
|
-
|
|
92
|
+
console.print(
|
|
93
|
+
" [green]tunacode --model 'anthropic:claude-3-opus' --key 'your-key'[/green]"
|
|
94
|
+
)
|
|
95
|
+
console.print(
|
|
96
|
+
" [green]tunacode --model 'openrouter:anthropic/claude-3.5-sonnet' --key 'your-key' --baseurl 'https://openrouter.ai/api/v1'[/green]"
|
|
97
|
+
)
|
|
74
98
|
console.print("\n[yellow]Run 'tunacode --help' for more options[/yellow]\n")
|
|
75
99
|
raise SystemExit(0)
|
|
76
100
|
|
|
@@ -84,23 +108,24 @@ class ConfigSetup(BaseSetup):
|
|
|
84
108
|
|
|
85
109
|
# No model validation - trust user's model choice
|
|
86
110
|
|
|
87
|
-
self.state_manager.session.current_model =
|
|
88
|
-
|
|
89
|
-
|
|
111
|
+
self.state_manager.session.current_model = self.state_manager.session.user_config[
|
|
112
|
+
"default_model"
|
|
113
|
+
]
|
|
90
114
|
|
|
91
115
|
async def validate(self) -> bool:
|
|
92
116
|
"""Validate that configuration is properly set up."""
|
|
93
117
|
# Check that we have a user config
|
|
118
|
+
valid = True
|
|
94
119
|
if not self.state_manager.session.user_config:
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
return
|
|
120
|
+
valid = False
|
|
121
|
+
elif not self.state_manager.session.user_config.get("default_model"):
|
|
122
|
+
valid = False
|
|
123
|
+
# Cache result for fastpath
|
|
124
|
+
if valid:
|
|
125
|
+
setattr(self.state_manager, "_config_valid", True)
|
|
126
|
+
else:
|
|
127
|
+
setattr(self.state_manager, "_config_valid", False)
|
|
128
|
+
return valid
|
|
104
129
|
|
|
105
130
|
def _merge_with_defaults(self, loaded_config: UserConfig) -> UserConfig:
|
|
106
131
|
"""Merge loaded config with defaults to ensure all required keys exist."""
|
|
@@ -119,9 +144,7 @@ class ConfigSetup(BaseSetup):
|
|
|
119
144
|
|
|
120
145
|
async def _onboarding(self):
|
|
121
146
|
"""Run the onboarding process for new users."""
|
|
122
|
-
initial_config = json.dumps(
|
|
123
|
-
self.state_manager.session.user_config, sort_keys=True
|
|
124
|
-
)
|
|
147
|
+
initial_config = json.dumps(self.state_manager.session.user_config, sort_keys=True)
|
|
125
148
|
|
|
126
149
|
await self._step1_api_keys()
|
|
127
150
|
|
|
@@ -134,15 +157,11 @@ class ConfigSetup(BaseSetup):
|
|
|
134
157
|
await self._step2_default_model()
|
|
135
158
|
|
|
136
159
|
# Compare configs to see if anything changed
|
|
137
|
-
current_config = json.dumps(
|
|
138
|
-
self.state_manager.session.user_config, sort_keys=True
|
|
139
|
-
)
|
|
160
|
+
current_config = json.dumps(self.state_manager.session.user_config, sort_keys=True)
|
|
140
161
|
if initial_config != current_config:
|
|
141
162
|
if user_configuration.save_config(self.state_manager):
|
|
142
163
|
message = f"Config saved to: [bold]{self.config_file}[/bold]"
|
|
143
|
-
await ui.panel(
|
|
144
|
-
"Finished", message, top=0, border_style=UI_COLORS["success"]
|
|
145
|
-
)
|
|
164
|
+
await ui.panel("Finished", message, top=0, border_style=UI_COLORS["success"])
|
|
146
165
|
else:
|
|
147
166
|
await ui.error("Failed to save configuration.")
|
|
148
167
|
else:
|
|
@@ -194,8 +213,10 @@ class ConfigSetup(BaseSetup):
|
|
|
194
213
|
async def _step2_default_model_simple(self):
|
|
195
214
|
"""Simple model selection - just enter model name."""
|
|
196
215
|
await ui.muted("Format: provider:model-name")
|
|
197
|
-
await ui.muted(
|
|
198
|
-
|
|
216
|
+
await ui.muted(
|
|
217
|
+
"Examples: openai:gpt-4.1, anthropic:claude-3-opus, google-gla:gemini-2.0-flash"
|
|
218
|
+
)
|
|
219
|
+
|
|
199
220
|
while True:
|
|
200
221
|
model_name = await ui.input(
|
|
201
222
|
"step2",
|
|
@@ -203,14 +224,14 @@ class ConfigSetup(BaseSetup):
|
|
|
203
224
|
state_manager=self.state_manager,
|
|
204
225
|
)
|
|
205
226
|
model_name = model_name.strip()
|
|
206
|
-
|
|
227
|
+
|
|
207
228
|
# Check if provider prefix is present
|
|
208
229
|
if ":" not in model_name:
|
|
209
230
|
await ui.error("Model name must include provider prefix")
|
|
210
231
|
await ui.muted("Format: provider:model-name")
|
|
211
232
|
await ui.muted("You can always change it later with /model")
|
|
212
233
|
continue
|
|
213
|
-
|
|
234
|
+
|
|
214
235
|
# No validation - user is responsible for correct model names
|
|
215
236
|
self.state_manager.session.user_config["default_model"] = model_name
|
|
216
237
|
await ui.warning("Model set without validation - verify the model name is correct")
|
|
@@ -224,26 +245,38 @@ class ConfigSetup(BaseSetup):
|
|
|
224
245
|
self.state_manager.session.user_config = self._merge_with_defaults(loaded_config)
|
|
225
246
|
else:
|
|
226
247
|
self.state_manager.session.user_config = DEFAULT_USER_CONFIG.copy()
|
|
227
|
-
|
|
248
|
+
|
|
228
249
|
# Apply CLI overrides
|
|
229
250
|
if self.cli_config.get("key"):
|
|
230
251
|
# Determine which API key to set based on the model or baseurl
|
|
231
252
|
if self.cli_config.get("baseurl") and "openrouter" in self.cli_config["baseurl"]:
|
|
232
|
-
self.state_manager.session.user_config["env"]["OPENROUTER_API_KEY"] =
|
|
253
|
+
self.state_manager.session.user_config["env"]["OPENROUTER_API_KEY"] = (
|
|
254
|
+
self.cli_config["key"]
|
|
255
|
+
)
|
|
233
256
|
elif self.cli_config.get("model"):
|
|
234
257
|
if "claude" in self.cli_config["model"] or "anthropic" in self.cli_config["model"]:
|
|
235
|
-
self.state_manager.session.user_config["env"]["ANTHROPIC_API_KEY"] =
|
|
258
|
+
self.state_manager.session.user_config["env"]["ANTHROPIC_API_KEY"] = (
|
|
259
|
+
self.cli_config["key"]
|
|
260
|
+
)
|
|
236
261
|
elif "gpt" in self.cli_config["model"] or "openai" in self.cli_config["model"]:
|
|
237
|
-
self.state_manager.session.user_config["env"]["OPENAI_API_KEY"] =
|
|
262
|
+
self.state_manager.session.user_config["env"]["OPENAI_API_KEY"] = (
|
|
263
|
+
self.cli_config["key"]
|
|
264
|
+
)
|
|
238
265
|
elif "gemini" in self.cli_config["model"]:
|
|
239
|
-
self.state_manager.session.user_config["env"]["GEMINI_API_KEY"] =
|
|
266
|
+
self.state_manager.session.user_config["env"]["GEMINI_API_KEY"] = (
|
|
267
|
+
self.cli_config["key"]
|
|
268
|
+
)
|
|
240
269
|
else:
|
|
241
270
|
# Default to OpenRouter for unknown models
|
|
242
|
-
self.state_manager.session.user_config["env"]["OPENROUTER_API_KEY"] =
|
|
243
|
-
|
|
271
|
+
self.state_manager.session.user_config["env"]["OPENROUTER_API_KEY"] = (
|
|
272
|
+
self.cli_config["key"]
|
|
273
|
+
)
|
|
274
|
+
|
|
244
275
|
if self.cli_config.get("baseurl"):
|
|
245
|
-
self.state_manager.session.user_config["env"]["OPENAI_BASE_URL"] = self.cli_config[
|
|
246
|
-
|
|
276
|
+
self.state_manager.session.user_config["env"]["OPENAI_BASE_URL"] = self.cli_config[
|
|
277
|
+
"baseurl"
|
|
278
|
+
]
|
|
279
|
+
|
|
247
280
|
if self.cli_config.get("model"):
|
|
248
281
|
model = self.cli_config["model"]
|
|
249
282
|
# Require provider prefix
|
|
@@ -253,12 +286,14 @@ class ConfigSetup(BaseSetup):
|
|
|
253
286
|
"Format: provider:model-name\n"
|
|
254
287
|
"Examples: openai:gpt-4.1, anthropic:claude-3-opus"
|
|
255
288
|
)
|
|
256
|
-
|
|
289
|
+
|
|
257
290
|
self.state_manager.session.user_config["default_model"] = model
|
|
258
|
-
|
|
291
|
+
|
|
259
292
|
# Set current model
|
|
260
|
-
self.state_manager.session.current_model = self.state_manager.session.user_config[
|
|
261
|
-
|
|
293
|
+
self.state_manager.session.current_model = self.state_manager.session.user_config[
|
|
294
|
+
"default_model"
|
|
295
|
+
]
|
|
296
|
+
|
|
262
297
|
# Save the configuration
|
|
263
298
|
if user_configuration.save_config(self.state_manager):
|
|
264
299
|
await ui.warning("Model set without validation - verify the model name is correct")
|
|
@@ -23,24 +23,31 @@ class SetupCoordinator:
|
|
|
23
23
|
self.setup_steps.append(step)
|
|
24
24
|
|
|
25
25
|
async def run_setup(self, force_setup: bool = False) -> None:
|
|
26
|
-
"""Run all registered setup steps
|
|
26
|
+
"""Run all registered setup steps concurrently if possible."""
|
|
27
|
+
# Run should_run checks sequentially (they may depend on order)
|
|
28
|
+
steps_to_run = []
|
|
27
29
|
for step in self.setup_steps:
|
|
28
30
|
try:
|
|
29
31
|
if await step.should_run(force_setup):
|
|
30
|
-
|
|
31
|
-
await step.execute(force_setup)
|
|
32
|
-
|
|
33
|
-
if not await step.validate():
|
|
34
|
-
await ui.error(f"Setup validation failed: {step.name}")
|
|
35
|
-
raise RuntimeError(
|
|
36
|
-
f"Setup step '{step.name}' failed validation"
|
|
37
|
-
)
|
|
38
|
-
else:
|
|
39
|
-
# Skip silently
|
|
40
|
-
pass
|
|
32
|
+
steps_to_run.append(step)
|
|
41
33
|
except Exception as e:
|
|
42
|
-
await ui.error(
|
|
34
|
+
await ui.error(
|
|
35
|
+
f"Setup failed at step '{getattr(step, 'name', repr(step))}': {str(e)}"
|
|
36
|
+
)
|
|
43
37
|
raise
|
|
38
|
+
# Run all .execute(force_setup) in parallel where possible (independent steps)
|
|
39
|
+
from asyncio import gather
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
await gather(*(step.execute(force_setup) for step in steps_to_run))
|
|
43
|
+
# Now validate all sequentially: if any fail, raise error
|
|
44
|
+
for step in steps_to_run:
|
|
45
|
+
if not await step.validate():
|
|
46
|
+
await ui.error(f"Setup validation failed: {step.name}")
|
|
47
|
+
raise RuntimeError(f"Setup step '{step.name}' failed validation")
|
|
48
|
+
except Exception as e:
|
|
49
|
+
await ui.error(f"Setup error: {str(e)}")
|
|
50
|
+
raise
|
|
44
51
|
|
|
45
52
|
def clear_steps(self) -> None:
|
|
46
53
|
"""Clear all registered setup steps."""
|