tunacode-cli 0.0.9__py3-none-any.whl → 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/commands.py +34 -165
- tunacode/cli/main.py +15 -38
- tunacode/cli/repl.py +24 -18
- tunacode/configuration/defaults.py +1 -1
- tunacode/configuration/models.py +4 -11
- tunacode/configuration/settings.py +10 -3
- tunacode/constants.py +6 -4
- tunacode/context.py +3 -1
- tunacode/core/agents/main.py +94 -52
- tunacode/core/setup/agent_setup.py +1 -1
- tunacode/core/setup/config_setup.py +161 -81
- tunacode/core/setup/coordinator.py +4 -2
- tunacode/core/setup/environment_setup.py +1 -1
- tunacode/core/setup/git_safety_setup.py +51 -39
- tunacode/exceptions.py +2 -0
- tunacode/prompts/system.txt +1 -1
- tunacode/services/undo_service.py +16 -13
- tunacode/setup.py +6 -2
- tunacode/tools/base.py +20 -11
- tunacode/tools/update_file.py +14 -24
- tunacode/tools/write_file.py +7 -9
- tunacode/ui/completers.py +33 -98
- tunacode/ui/input.py +9 -13
- tunacode/ui/keybindings.py +3 -1
- tunacode/ui/lexers.py +17 -16
- tunacode/ui/output.py +8 -14
- tunacode/ui/panels.py +7 -5
- tunacode/ui/prompt_manager.py +4 -8
- tunacode/ui/tool_ui.py +3 -3
- tunacode/utils/system.py +0 -40
- tunacode_cli-0.0.11.dist-info/METADATA +387 -0
- tunacode_cli-0.0.11.dist-info/RECORD +65 -0
- {tunacode_cli-0.0.9.dist-info → tunacode_cli-0.0.11.dist-info}/licenses/LICENSE +1 -1
- tunacode/cli/model_selector.py +0 -178
- tunacode/core/agents/tinyagent_main.py +0 -194
- tunacode/core/setup/optimized_coordinator.py +0 -73
- tunacode/services/enhanced_undo_service.py +0 -322
- tunacode/services/project_undo_service.py +0 -311
- tunacode/tools/tinyagent_tools.py +0 -103
- tunacode/utils/lazy_imports.py +0 -59
- tunacode/utils/regex_cache.py +0 -33
- tunacode_cli-0.0.9.dist-info/METADATA +0 -321
- tunacode_cli-0.0.9.dist-info/RECORD +0 -73
- {tunacode_cli-0.0.9.dist-info → tunacode_cli-0.0.11.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.9.dist-info → tunacode_cli-0.0.11.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.9.dist-info → tunacode_cli-0.0.11.dist-info}/top_level.txt +0 -0
tunacode/core/agents/main.py
CHANGED
|
@@ -2,28 +2,49 @@
|
|
|
2
2
|
|
|
3
3
|
Main agent functionality and coordination for the Sidekick CLI.
|
|
4
4
|
Provides agent creation, message processing, and tool call management.
|
|
5
|
-
Now using tinyAgent instead of pydantic-ai.
|
|
6
5
|
"""
|
|
7
6
|
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
8
|
from typing import Optional
|
|
9
9
|
|
|
10
|
-
from
|
|
11
|
-
from
|
|
12
|
-
|
|
13
|
-
# Import tinyAgent implementation
|
|
14
|
-
from .tinyagent_main import get_or_create_react_agent
|
|
15
|
-
from .tinyagent_main import patch_tool_messages as tinyagent_patch_tool_messages
|
|
16
|
-
from .tinyagent_main import process_request_with_tinyagent
|
|
17
|
-
|
|
18
|
-
# Wrapper functions for backward compatibility with pydantic-ai interface
|
|
10
|
+
from pydantic_ai import Agent, Tool
|
|
11
|
+
from pydantic_ai.messages import ModelRequest, ToolReturnPart
|
|
19
12
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
13
|
+
from tunacode.core.state import StateManager
|
|
14
|
+
from tunacode.services.mcp import get_mcp_servers
|
|
15
|
+
from tunacode.tools.read_file import read_file
|
|
16
|
+
from tunacode.tools.run_command import run_command
|
|
17
|
+
from tunacode.tools.update_file import update_file
|
|
18
|
+
from tunacode.tools.write_file import write_file
|
|
19
|
+
from tunacode.types import (AgentRun, ErrorMessage, ModelName, PydanticAgent, ToolCallback,
|
|
20
|
+
ToolCallId, ToolName)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
async def _process_node(node, tool_callback: Optional[ToolCallback], state_manager: StateManager):
|
|
24
|
+
if hasattr(node, "request"):
|
|
25
|
+
state_manager.session.messages.append(node.request)
|
|
26
|
+
|
|
27
|
+
if hasattr(node, "model_response"):
|
|
28
|
+
state_manager.session.messages.append(node.model_response)
|
|
29
|
+
for part in node.model_response.parts:
|
|
30
|
+
if part.part_kind == "tool-call" and tool_callback:
|
|
31
|
+
await tool_callback(part, node)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_or_create_agent(model: ModelName, state_manager: StateManager) -> PydanticAgent:
|
|
35
|
+
if model not in state_manager.session.agents:
|
|
36
|
+
max_retries = state_manager.session.user_config["settings"]["max_retries"]
|
|
37
|
+
state_manager.session.agents[model] = Agent(
|
|
38
|
+
model=model,
|
|
39
|
+
tools=[
|
|
40
|
+
Tool(read_file, max_retries=max_retries),
|
|
41
|
+
Tool(run_command, max_retries=max_retries),
|
|
42
|
+
Tool(update_file, max_retries=max_retries),
|
|
43
|
+
Tool(write_file, max_retries=max_retries),
|
|
44
|
+
],
|
|
45
|
+
mcp_servers=get_mcp_servers(state_manager),
|
|
46
|
+
)
|
|
47
|
+
return state_manager.session.agents[model]
|
|
27
48
|
|
|
28
49
|
|
|
29
50
|
def patch_tool_messages(
|
|
@@ -31,10 +52,57 @@ def patch_tool_messages(
|
|
|
31
52
|
state_manager: StateManager = None,
|
|
32
53
|
):
|
|
33
54
|
"""
|
|
34
|
-
|
|
35
|
-
|
|
55
|
+
Find any tool calls without responses and add synthetic error responses for them.
|
|
56
|
+
Takes an error message to use in the synthesized tool response.
|
|
57
|
+
|
|
58
|
+
Ignores tools that have corresponding retry prompts as the model is already
|
|
59
|
+
addressing them.
|
|
36
60
|
"""
|
|
37
|
-
|
|
61
|
+
if state_manager is None:
|
|
62
|
+
raise ValueError("state_manager is required for patch_tool_messages")
|
|
63
|
+
|
|
64
|
+
messages = state_manager.session.messages
|
|
65
|
+
|
|
66
|
+
if not messages:
|
|
67
|
+
return
|
|
68
|
+
|
|
69
|
+
# Map tool calls to their tool returns
|
|
70
|
+
tool_calls: dict[ToolCallId, ToolName] = {} # tool_call_id -> tool_name
|
|
71
|
+
tool_returns: set[ToolCallId] = set() # set of tool_call_ids with returns
|
|
72
|
+
retry_prompts: set[ToolCallId] = set() # set of tool_call_ids with retry prompts
|
|
73
|
+
|
|
74
|
+
for message in messages:
|
|
75
|
+
if hasattr(message, "parts"):
|
|
76
|
+
for part in message.parts:
|
|
77
|
+
if (
|
|
78
|
+
hasattr(part, "part_kind")
|
|
79
|
+
and hasattr(part, "tool_call_id")
|
|
80
|
+
and part.tool_call_id
|
|
81
|
+
):
|
|
82
|
+
if part.part_kind == "tool-call":
|
|
83
|
+
tool_calls[part.tool_call_id] = part.tool_name
|
|
84
|
+
elif part.part_kind == "tool-return":
|
|
85
|
+
tool_returns.add(part.tool_call_id)
|
|
86
|
+
elif part.part_kind == "retry-prompt":
|
|
87
|
+
retry_prompts.add(part.tool_call_id)
|
|
88
|
+
|
|
89
|
+
# Identify orphaned tools (those without responses and not being retried)
|
|
90
|
+
for tool_call_id, tool_name in list(tool_calls.items()):
|
|
91
|
+
if tool_call_id not in tool_returns and tool_call_id not in retry_prompts:
|
|
92
|
+
messages.append(
|
|
93
|
+
ModelRequest(
|
|
94
|
+
parts=[
|
|
95
|
+
ToolReturnPart(
|
|
96
|
+
tool_name=tool_name,
|
|
97
|
+
content=error_message,
|
|
98
|
+
tool_call_id=tool_call_id,
|
|
99
|
+
timestamp=datetime.now(timezone.utc),
|
|
100
|
+
part_kind="tool-return",
|
|
101
|
+
)
|
|
102
|
+
],
|
|
103
|
+
kind="request",
|
|
104
|
+
)
|
|
105
|
+
)
|
|
38
106
|
|
|
39
107
|
|
|
40
108
|
async def process_request(
|
|
@@ -43,35 +111,9 @@ async def process_request(
|
|
|
43
111
|
state_manager: StateManager,
|
|
44
112
|
tool_callback: Optional[ToolCallback] = None,
|
|
45
113
|
) -> AgentRun:
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
# Create a mock AgentRun object for compatibility
|
|
53
|
-
class MockAgentRun:
|
|
54
|
-
def __init__(self, result_dict):
|
|
55
|
-
self._result = result_dict
|
|
56
|
-
|
|
57
|
-
@property
|
|
58
|
-
def result(self):
|
|
59
|
-
class MockResult:
|
|
60
|
-
def __init__(self, content):
|
|
61
|
-
self._content = content
|
|
62
|
-
|
|
63
|
-
@property
|
|
64
|
-
def output(self):
|
|
65
|
-
return self._content
|
|
66
|
-
|
|
67
|
-
return MockResult(self._result.get("result", ""))
|
|
68
|
-
|
|
69
|
-
@property
|
|
70
|
-
def messages(self):
|
|
71
|
-
return state_manager.session.messages
|
|
72
|
-
|
|
73
|
-
@property
|
|
74
|
-
def model(self):
|
|
75
|
-
return self._result.get("model", model)
|
|
76
|
-
|
|
77
|
-
return MockAgentRun(result)
|
|
114
|
+
agent = get_or_create_agent(model, state_manager)
|
|
115
|
+
mh = state_manager.session.messages.copy()
|
|
116
|
+
async with agent.iter(message, message_history=mh) as agent_run:
|
|
117
|
+
async for node in agent_run:
|
|
118
|
+
await _process_node(node, tool_callback, state_manager)
|
|
119
|
+
return agent_run
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""Module:
|
|
1
|
+
"""Module: tinyagent.core.setup.config_setup
|
|
2
2
|
|
|
3
3
|
Configuration system initialization for the Sidekick CLI.
|
|
4
4
|
Handles user configuration loading, validation, and first-time setup onboarding.
|
|
@@ -27,6 +27,7 @@ class ConfigSetup(BaseSetup):
|
|
|
27
27
|
self.config_dir: ConfigPath = Path.home() / ".config"
|
|
28
28
|
self.config_file: ConfigFile = self.config_dir / CONFIG_FILE_NAME
|
|
29
29
|
self.model_registry = ModelRegistry()
|
|
30
|
+
self.cli_config = None # Will be set if CLI args are provided
|
|
30
31
|
|
|
31
32
|
@property
|
|
32
33
|
def name(self) -> str:
|
|
@@ -41,18 +42,37 @@ class ConfigSetup(BaseSetup):
|
|
|
41
42
|
self.state_manager.session.device_id = system.get_device_id()
|
|
42
43
|
loaded_config = user_configuration.load_config()
|
|
43
44
|
|
|
45
|
+
# Handle CLI configuration if provided
|
|
46
|
+
if self.cli_config and any(self.cli_config.values()):
|
|
47
|
+
await self._handle_cli_config(loaded_config)
|
|
48
|
+
return
|
|
49
|
+
|
|
44
50
|
if loaded_config and not force_setup:
|
|
45
51
|
# Silent loading
|
|
46
52
|
# Merge loaded config with defaults to ensure all required keys exist
|
|
47
|
-
self.state_manager.session.user_config = self._merge_with_defaults(
|
|
53
|
+
self.state_manager.session.user_config = self._merge_with_defaults(
|
|
54
|
+
loaded_config
|
|
55
|
+
)
|
|
48
56
|
else:
|
|
49
57
|
if force_setup:
|
|
50
58
|
await ui.muted("Running setup process, resetting config")
|
|
59
|
+
self.state_manager.session.user_config = DEFAULT_USER_CONFIG.copy()
|
|
60
|
+
user_configuration.save_config(
|
|
61
|
+
self.state_manager
|
|
62
|
+
) # Save the default config initially
|
|
63
|
+
await self._onboarding()
|
|
51
64
|
else:
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
65
|
+
# No config found - show CLI usage instead of onboarding
|
|
66
|
+
from tunacode.ui.console import console
|
|
67
|
+
console.print("\n[bold red]No configuration found![/bold red]")
|
|
68
|
+
console.print("\n[bold]Quick Setup:[/bold]")
|
|
69
|
+
console.print("Configure TunaCode using CLI flags:")
|
|
70
|
+
console.print("\n[blue]Examples:[/blue]")
|
|
71
|
+
console.print(" [green]tunacode --model 'openai:gpt-4' --key 'your-key'[/green]")
|
|
72
|
+
console.print(" [green]tunacode --model 'anthropic:claude-3-opus' --key 'your-key'[/green]")
|
|
73
|
+
console.print(" [green]tunacode --model 'openrouter:anthropic/claude-3.5-sonnet' --key 'your-key' --baseurl 'https://openrouter.ai/api/v1'[/green]")
|
|
74
|
+
console.print("\n[yellow]Run 'tunacode --help' for more options[/yellow]\n")
|
|
75
|
+
raise SystemExit(0)
|
|
56
76
|
|
|
57
77
|
if not self.state_manager.session.user_config.get("default_model"):
|
|
58
78
|
raise ConfigurationError(
|
|
@@ -62,21 +82,11 @@ class ConfigSetup(BaseSetup):
|
|
|
62
82
|
)
|
|
63
83
|
)
|
|
64
84
|
|
|
65
|
-
#
|
|
66
|
-
default_model = self.state_manager.session.user_config["default_model"]
|
|
67
|
-
if not self.model_registry.get_model(default_model):
|
|
68
|
-
# If model not found, run the onboarding again
|
|
69
|
-
await ui.panel(
|
|
70
|
-
"Model Not Found",
|
|
71
|
-
f"The configured model '[bold]{default_model}[/bold]' is no longer available.\n"
|
|
72
|
-
"Let's reconfigure your setup.",
|
|
73
|
-
border_style=UI_COLORS["warning"],
|
|
74
|
-
)
|
|
75
|
-
await self._onboarding()
|
|
85
|
+
# No model validation - trust user's model choice
|
|
76
86
|
|
|
77
|
-
self.state_manager.session.current_model =
|
|
78
|
-
"default_model"
|
|
79
|
-
|
|
87
|
+
self.state_manager.session.current_model = (
|
|
88
|
+
self.state_manager.session.user_config["default_model"]
|
|
89
|
+
)
|
|
80
90
|
|
|
81
91
|
async def validate(self) -> bool:
|
|
82
92
|
"""Validate that configuration is properly set up."""
|
|
@@ -88,10 +98,7 @@ class ConfigSetup(BaseSetup):
|
|
|
88
98
|
if not self.state_manager.session.user_config.get("default_model"):
|
|
89
99
|
return False
|
|
90
100
|
|
|
91
|
-
#
|
|
92
|
-
default_model = self.state_manager.session.user_config["default_model"]
|
|
93
|
-
if not self.model_registry.get_model(default_model):
|
|
94
|
-
return False
|
|
101
|
+
# No model validation - trust user input
|
|
95
102
|
|
|
96
103
|
return True
|
|
97
104
|
|
|
@@ -112,76 +119,149 @@ class ConfigSetup(BaseSetup):
|
|
|
112
119
|
|
|
113
120
|
async def _onboarding(self):
|
|
114
121
|
"""Run the onboarding process for new users."""
|
|
115
|
-
initial_config = json.dumps(
|
|
116
|
-
|
|
117
|
-
# Welcome message
|
|
118
|
-
message = (
|
|
119
|
-
f"Welcome to {APP_NAME}!\n\n"
|
|
120
|
-
"Let's configure your AI provider. TunaCode supports:\n"
|
|
121
|
-
"• OpenAI (api.openai.com)\n"
|
|
122
|
-
"• OpenRouter (openrouter.ai) - Access 100+ models\n"
|
|
123
|
-
"• Any OpenAI-compatible API\n"
|
|
122
|
+
initial_config = json.dumps(
|
|
123
|
+
self.state_manager.session.user_config, sort_keys=True
|
|
124
124
|
)
|
|
125
|
-
await ui.panel("Setup", message, border_style=UI_COLORS["primary"])
|
|
126
125
|
|
|
127
|
-
|
|
128
|
-
base_url = await ui.input(
|
|
129
|
-
"step1",
|
|
130
|
-
pretext=" API Base URL (press Enter for OpenAI): ",
|
|
131
|
-
default="https://api.openai.com/v1",
|
|
132
|
-
state_manager=self.state_manager,
|
|
133
|
-
)
|
|
134
|
-
base_url = base_url.strip()
|
|
135
|
-
if not base_url:
|
|
136
|
-
base_url = "https://api.openai.com/v1"
|
|
137
|
-
|
|
138
|
-
# Step 2: Ask for API key
|
|
139
|
-
if "openrouter.ai" in base_url.lower():
|
|
140
|
-
key_prompt = " OpenRouter API Key: "
|
|
141
|
-
key_name = "OPENROUTER_API_KEY"
|
|
142
|
-
default_model = "openrouter:openai/gpt-4o-mini"
|
|
143
|
-
else:
|
|
144
|
-
key_prompt = " API Key: "
|
|
145
|
-
key_name = "OPENAI_API_KEY"
|
|
146
|
-
default_model = "openai:gpt-4o"
|
|
126
|
+
await self._step1_api_keys()
|
|
147
127
|
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
is_password=True,
|
|
152
|
-
state_manager=self.state_manager,
|
|
153
|
-
)
|
|
154
|
-
api_key = api_key.strip()
|
|
128
|
+
# Only continue if at least one API key was provided
|
|
129
|
+
env = self.state_manager.session.user_config.get("env", {})
|
|
130
|
+
has_api_key = any(key.endswith("_API_KEY") and env.get(key) for key in env)
|
|
155
131
|
|
|
156
|
-
if
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
# Set base URL in environment for OpenRouter
|
|
161
|
-
if "openrouter.ai" in base_url.lower():
|
|
162
|
-
import os
|
|
163
|
-
os.environ["OPENAI_BASE_URL"] = base_url
|
|
164
|
-
|
|
165
|
-
# Set default model
|
|
166
|
-
self.state_manager.session.user_config["default_model"] = default_model
|
|
132
|
+
if has_api_key:
|
|
133
|
+
if not self.state_manager.session.user_config.get("default_model"):
|
|
134
|
+
await self._step2_default_model()
|
|
167
135
|
|
|
168
|
-
#
|
|
169
|
-
current_config = json.dumps(
|
|
136
|
+
# Compare configs to see if anything changed
|
|
137
|
+
current_config = json.dumps(
|
|
138
|
+
self.state_manager.session.user_config, sort_keys=True
|
|
139
|
+
)
|
|
170
140
|
if initial_config != current_config:
|
|
171
141
|
if user_configuration.save_config(self.state_manager):
|
|
172
|
-
message =
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
f"Config file: {self.config_file}\n\n"
|
|
176
|
-
f"You can change models anytime with /model"
|
|
142
|
+
message = f"Config saved to: [bold]{self.config_file}[/bold]"
|
|
143
|
+
await ui.panel(
|
|
144
|
+
"Finished", message, top=0, border_style=UI_COLORS["success"]
|
|
177
145
|
)
|
|
178
|
-
await ui.panel("Setup Complete", message, top=0, border_style=UI_COLORS["success"])
|
|
179
146
|
else:
|
|
180
147
|
await ui.error("Failed to save configuration.")
|
|
181
148
|
else:
|
|
182
149
|
await ui.panel(
|
|
183
150
|
"Setup canceled",
|
|
184
|
-
"
|
|
151
|
+
"At least one API key is required.",
|
|
185
152
|
border_style=UI_COLORS["warning"],
|
|
186
153
|
)
|
|
187
154
|
|
|
155
|
+
async def _step1_api_keys(self):
|
|
156
|
+
"""Onboarding step 1: Collect API keys."""
|
|
157
|
+
message = (
|
|
158
|
+
f"Welcome to {APP_NAME}!\n"
|
|
159
|
+
"Let's get you setup. First, we'll need to set some environment variables.\n"
|
|
160
|
+
"Skip the ones you don't need."
|
|
161
|
+
)
|
|
162
|
+
await ui.panel("Setup", message, border_style=UI_COLORS["primary"])
|
|
163
|
+
env_keys = self.state_manager.session.user_config["env"].copy()
|
|
164
|
+
for key in env_keys:
|
|
165
|
+
provider = key_to_title(key)
|
|
166
|
+
val = await ui.input(
|
|
167
|
+
"step1",
|
|
168
|
+
pretext=f" {provider}: ",
|
|
169
|
+
is_password=True,
|
|
170
|
+
state_manager=self.state_manager,
|
|
171
|
+
)
|
|
172
|
+
val = val.strip()
|
|
173
|
+
if val:
|
|
174
|
+
self.state_manager.session.user_config["env"][key] = val
|
|
175
|
+
|
|
176
|
+
async def _step2_default_model(self):
|
|
177
|
+
"""Onboarding step 2: Select default model."""
|
|
178
|
+
message = "Which model would you like to use by default?\n\n"
|
|
179
|
+
|
|
180
|
+
model_ids = self.model_registry.list_model_ids()
|
|
181
|
+
for index, model_id in enumerate(model_ids):
|
|
182
|
+
message += f" {index} - {model_id}\n"
|
|
183
|
+
message = message.strip()
|
|
184
|
+
|
|
185
|
+
await ui.panel("Default Model", message, border_style=UI_COLORS["primary"])
|
|
186
|
+
choice = await ui.input(
|
|
187
|
+
"step2",
|
|
188
|
+
pretext=" Default model (#): ",
|
|
189
|
+
validator=ui.ModelValidator(len(model_ids)),
|
|
190
|
+
state_manager=self.state_manager,
|
|
191
|
+
)
|
|
192
|
+
self.state_manager.session.user_config["default_model"] = model_ids[int(choice)]
|
|
193
|
+
|
|
194
|
+
async def _step2_default_model_simple(self):
|
|
195
|
+
"""Simple model selection - just enter model name."""
|
|
196
|
+
await ui.muted("Format: provider:model-name")
|
|
197
|
+
await ui.muted("Examples: openai:gpt-4.1, anthropic:claude-3-opus, google-gla:gemini-2.0-flash")
|
|
198
|
+
|
|
199
|
+
while True:
|
|
200
|
+
model_name = await ui.input(
|
|
201
|
+
"step2",
|
|
202
|
+
pretext=" Model (provider:name): ",
|
|
203
|
+
state_manager=self.state_manager,
|
|
204
|
+
)
|
|
205
|
+
model_name = model_name.strip()
|
|
206
|
+
|
|
207
|
+
# Check if provider prefix is present
|
|
208
|
+
if ":" not in model_name:
|
|
209
|
+
await ui.error("Model name must include provider prefix")
|
|
210
|
+
await ui.muted("Format: provider:model-name")
|
|
211
|
+
await ui.muted("You can always change it later with /model")
|
|
212
|
+
continue
|
|
213
|
+
|
|
214
|
+
# No validation - user is responsible for correct model names
|
|
215
|
+
self.state_manager.session.user_config["default_model"] = model_name
|
|
216
|
+
await ui.warning("Model set without validation - verify the model name is correct")
|
|
217
|
+
await ui.success(f"Selected model: {model_name}")
|
|
218
|
+
break
|
|
219
|
+
|
|
220
|
+
async def _handle_cli_config(self, loaded_config: UserConfig) -> None:
|
|
221
|
+
"""Handle configuration provided via CLI arguments."""
|
|
222
|
+
# Start with existing config or defaults
|
|
223
|
+
if loaded_config:
|
|
224
|
+
self.state_manager.session.user_config = self._merge_with_defaults(loaded_config)
|
|
225
|
+
else:
|
|
226
|
+
self.state_manager.session.user_config = DEFAULT_USER_CONFIG.copy()
|
|
227
|
+
|
|
228
|
+
# Apply CLI overrides
|
|
229
|
+
if self.cli_config.get("key"):
|
|
230
|
+
# Determine which API key to set based on the model or baseurl
|
|
231
|
+
if self.cli_config.get("baseurl") and "openrouter" in self.cli_config["baseurl"]:
|
|
232
|
+
self.state_manager.session.user_config["env"]["OPENROUTER_API_KEY"] = self.cli_config["key"]
|
|
233
|
+
elif self.cli_config.get("model"):
|
|
234
|
+
if "claude" in self.cli_config["model"] or "anthropic" in self.cli_config["model"]:
|
|
235
|
+
self.state_manager.session.user_config["env"]["ANTHROPIC_API_KEY"] = self.cli_config["key"]
|
|
236
|
+
elif "gpt" in self.cli_config["model"] or "openai" in self.cli_config["model"]:
|
|
237
|
+
self.state_manager.session.user_config["env"]["OPENAI_API_KEY"] = self.cli_config["key"]
|
|
238
|
+
elif "gemini" in self.cli_config["model"]:
|
|
239
|
+
self.state_manager.session.user_config["env"]["GEMINI_API_KEY"] = self.cli_config["key"]
|
|
240
|
+
else:
|
|
241
|
+
# Default to OpenRouter for unknown models
|
|
242
|
+
self.state_manager.session.user_config["env"]["OPENROUTER_API_KEY"] = self.cli_config["key"]
|
|
243
|
+
|
|
244
|
+
if self.cli_config.get("baseurl"):
|
|
245
|
+
self.state_manager.session.user_config["env"]["OPENAI_BASE_URL"] = self.cli_config["baseurl"]
|
|
246
|
+
|
|
247
|
+
if self.cli_config.get("model"):
|
|
248
|
+
model = self.cli_config["model"]
|
|
249
|
+
# Require provider prefix
|
|
250
|
+
if ":" not in model:
|
|
251
|
+
raise ConfigurationError(
|
|
252
|
+
f"Model '{model}' must include provider prefix\n"
|
|
253
|
+
"Format: provider:model-name\n"
|
|
254
|
+
"Examples: openai:gpt-4.1, anthropic:claude-3-opus"
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
self.state_manager.session.user_config["default_model"] = model
|
|
258
|
+
|
|
259
|
+
# Set current model
|
|
260
|
+
self.state_manager.session.current_model = self.state_manager.session.user_config["default_model"]
|
|
261
|
+
|
|
262
|
+
# Save the configuration
|
|
263
|
+
if user_configuration.save_config(self.state_manager):
|
|
264
|
+
await ui.warning("Model set without validation - verify the model name is correct")
|
|
265
|
+
await ui.success(f"Configuration saved to: {self.config_file}")
|
|
266
|
+
else:
|
|
267
|
+
await ui.error("Failed to save configuration.")
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""Module:
|
|
1
|
+
"""Module: tinyagent.core.setup.coordinator
|
|
2
2
|
|
|
3
3
|
Setup orchestration and coordination for the Sidekick CLI.
|
|
4
4
|
Manages the execution order and validation of all registered setup steps.
|
|
@@ -32,7 +32,9 @@ class SetupCoordinator:
|
|
|
32
32
|
|
|
33
33
|
if not await step.validate():
|
|
34
34
|
await ui.error(f"Setup validation failed: {step.name}")
|
|
35
|
-
raise RuntimeError(
|
|
35
|
+
raise RuntimeError(
|
|
36
|
+
f"Setup step '{step.name}' failed validation"
|
|
37
|
+
)
|
|
36
38
|
else:
|
|
37
39
|
# Skip silently
|
|
38
40
|
pass
|