EvoScientist 0.0.1.dev3__py3-none-any.whl → 0.0.1.dev4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- EvoScientist/EvoScientist.py +13 -9
- EvoScientist/__init__.py +19 -0
- EvoScientist/cli.py +264 -23
- EvoScientist/config.py +274 -0
- EvoScientist/llm/__init__.py +21 -0
- EvoScientist/llm/models.py +99 -0
- EvoScientist/onboard.py +725 -0
- EvoScientist/paths.py +2 -3
- EvoScientist/skills_manager.py +1 -2
- EvoScientist/stream/display.py +1 -1
- EvoScientist/tools.py +11 -2
- {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.0.1.dev4.dist-info}/METADATA +68 -22
- {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.0.1.dev4.dist-info}/RECORD +17 -13
- {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.0.1.dev4.dist-info}/WHEEL +0 -0
- {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.0.1.dev4.dist-info}/entry_points.txt +0 -0
- {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.0.1.dev4.dist-info}/licenses/LICENSE +0 -0
- {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.0.1.dev4.dist-info}/top_level.txt +0 -0
EvoScientist/onboard.py
ADDED
|
@@ -0,0 +1,725 @@
|
|
|
1
|
+
"""Interactive onboarding wizard for EvoScientist.
|
|
2
|
+
|
|
3
|
+
Guides users through initial setup including API keys, model selection,
|
|
4
|
+
workspace settings, and agent parameters. Uses flow-style arrow-key selection UI.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
import questionary
|
|
12
|
+
from prompt_toolkit.styles import Style
|
|
13
|
+
from prompt_toolkit.validation import Validator, ValidationError
|
|
14
|
+
from questionary import Choice
|
|
15
|
+
from rich.console import Console
|
|
16
|
+
from rich.panel import Panel
|
|
17
|
+
from rich.text import Text
|
|
18
|
+
|
|
19
|
+
from .config import (
|
|
20
|
+
EvoScientistConfig,
|
|
21
|
+
load_config,
|
|
22
|
+
save_config,
|
|
23
|
+
get_config_path,
|
|
24
|
+
)
|
|
25
|
+
from .llm import MODELS
|
|
26
|
+
|
|
27
|
+
console = Console()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# =============================================================================
|
|
31
|
+
# Wizard Style
|
|
32
|
+
# =============================================================================
|
|
33
|
+
|
|
34
|
+
WIZARD_STYLE = Style.from_dict({
|
|
35
|
+
"qmark": "fg:#00bcd4 bold", # Cyan question mark
|
|
36
|
+
"question": "bold", # Bold question text
|
|
37
|
+
"answer": "fg:#4caf50 bold", # Green selected answer
|
|
38
|
+
"pointer": "fg:#4caf50", # Green pointer (»)
|
|
39
|
+
"highlighted": "noreverse bold", # No background, bold text
|
|
40
|
+
"selected": "fg:#4caf50 bold", # Green ● indicator
|
|
41
|
+
"separator": "fg:#6c6c6c", # Dim separator
|
|
42
|
+
"instruction": "fg:#858585", # Dim instructions
|
|
43
|
+
"text": "fg:#858585", # Dim gray ○ and unselected text
|
|
44
|
+
})
|
|
45
|
+
|
|
46
|
+
CONFIRM_STYLE = Style.from_dict({
|
|
47
|
+
"qmark": "fg:#e69500 bold", # Orange warning mark (!)
|
|
48
|
+
"question": "bold",
|
|
49
|
+
"answer": "fg:#4caf50 bold",
|
|
50
|
+
"instruction": "fg:#858585",
|
|
51
|
+
"text": "",
|
|
52
|
+
})
|
|
53
|
+
|
|
54
|
+
STEPS = ["Provider", "API Key", "Model", "Tavily Key", "Workspace", "Parameters"]
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# =============================================================================
|
|
58
|
+
# Validators
|
|
59
|
+
# =============================================================================
|
|
60
|
+
|
|
61
|
+
class IntegerValidator(Validator):
|
|
62
|
+
"""Validates that input is a positive integer."""
|
|
63
|
+
|
|
64
|
+
def __init__(self, min_value: int = 1, max_value: int = 100):
|
|
65
|
+
self.min_value = min_value
|
|
66
|
+
self.max_value = max_value
|
|
67
|
+
|
|
68
|
+
def validate(self, document) -> None:
|
|
69
|
+
text = document.text.strip()
|
|
70
|
+
if not text:
|
|
71
|
+
return # Allow empty for default
|
|
72
|
+
try:
|
|
73
|
+
value = int(text)
|
|
74
|
+
if value < self.min_value or value > self.max_value:
|
|
75
|
+
raise ValidationError(
|
|
76
|
+
message=f"Must be between {self.min_value} and {self.max_value}"
|
|
77
|
+
)
|
|
78
|
+
except ValueError:
|
|
79
|
+
raise ValidationError(message="Must be a valid integer")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class ChoiceValidator(Validator):
|
|
83
|
+
"""Validates that input is one of the allowed choices."""
|
|
84
|
+
|
|
85
|
+
def __init__(self, choices: list[str], allow_empty: bool = True):
|
|
86
|
+
self.choices = choices
|
|
87
|
+
self.allow_empty = allow_empty
|
|
88
|
+
|
|
89
|
+
def validate(self, document) -> None:
|
|
90
|
+
text = document.text.strip().lower()
|
|
91
|
+
if not text and self.allow_empty:
|
|
92
|
+
return
|
|
93
|
+
if text not in [c.lower() for c in self.choices]:
|
|
94
|
+
raise ValidationError(
|
|
95
|
+
message=f"Must be one of: {', '.join(self.choices)}"
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# =============================================================================
|
|
100
|
+
# API Key Validation
|
|
101
|
+
# =============================================================================
|
|
102
|
+
|
|
103
|
+
def validate_anthropic_key(api_key: str) -> tuple[bool, str]:
|
|
104
|
+
"""Validate an Anthropic API key by making a test request.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
api_key: The API key to validate.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Tuple of (is_valid, message).
|
|
111
|
+
"""
|
|
112
|
+
if not api_key:
|
|
113
|
+
return True, "Skipped (no key provided)"
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
import anthropic
|
|
117
|
+
client = anthropic.Anthropic(api_key=api_key)
|
|
118
|
+
# Make a minimal request to validate the key
|
|
119
|
+
client.models.list()
|
|
120
|
+
return True, "Valid"
|
|
121
|
+
except anthropic.AuthenticationError:
|
|
122
|
+
return False, "Invalid API key"
|
|
123
|
+
except Exception as e:
|
|
124
|
+
return False, f"Error: {e}"
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def validate_openai_key(api_key: str) -> tuple[bool, str]:
|
|
128
|
+
"""Validate an OpenAI API key by making a test request.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
api_key: The API key to validate.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Tuple of (is_valid, message).
|
|
135
|
+
"""
|
|
136
|
+
if not api_key:
|
|
137
|
+
return True, "Skipped (no key provided)"
|
|
138
|
+
|
|
139
|
+
try:
|
|
140
|
+
import openai
|
|
141
|
+
client = openai.OpenAI(api_key=api_key)
|
|
142
|
+
# Make a minimal request to validate the key
|
|
143
|
+
client.models.list()
|
|
144
|
+
return True, "Valid"
|
|
145
|
+
except openai.AuthenticationError:
|
|
146
|
+
return False, "Invalid API key"
|
|
147
|
+
except Exception as e:
|
|
148
|
+
return False, f"Error: {e}"
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def validate_nvidia_key(api_key: str) -> tuple[bool, str]:
|
|
152
|
+
"""Validate an NVIDIA API key by making a test request.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
api_key: The API key to validate.
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
Tuple of (is_valid, message).
|
|
159
|
+
"""
|
|
160
|
+
if not api_key:
|
|
161
|
+
return True, "Skipped (no key provided)"
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
from langchain_nvidia_ai_endpoints import ChatNVIDIA
|
|
165
|
+
llm = ChatNVIDIA(api_key=api_key, model="meta/llama-3.1-8b-instruct")
|
|
166
|
+
llm.available_models
|
|
167
|
+
return True, "Valid"
|
|
168
|
+
except Exception as e:
|
|
169
|
+
error_str = str(e).lower()
|
|
170
|
+
if "401" in error_str or "unauthorized" in error_str or "invalid" in error_str or "authentication" in error_str:
|
|
171
|
+
return False, "Invalid API key"
|
|
172
|
+
return False, f"Error: {e}"
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def validate_tavily_key(api_key: str) -> tuple[bool, str]:
|
|
176
|
+
"""Validate a Tavily API key by making a test request.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
api_key: The API key to validate.
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
Tuple of (is_valid, message).
|
|
183
|
+
"""
|
|
184
|
+
if not api_key:
|
|
185
|
+
return True, "Skipped (no key provided)"
|
|
186
|
+
|
|
187
|
+
try:
|
|
188
|
+
from tavily import TavilyClient
|
|
189
|
+
client = TavilyClient(api_key=api_key)
|
|
190
|
+
# Make a minimal search to validate
|
|
191
|
+
client.search("test", max_results=1)
|
|
192
|
+
return True, "Valid"
|
|
193
|
+
except Exception as e:
|
|
194
|
+
error_str = str(e).lower()
|
|
195
|
+
if "invalid" in error_str or "unauthorized" in error_str or "401" in error_str:
|
|
196
|
+
return False, "Invalid API key"
|
|
197
|
+
return False, f"Error: {e}"
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
# =============================================================================
|
|
201
|
+
# Display Helpers
|
|
202
|
+
# =============================================================================
|
|
203
|
+
|
|
204
|
+
def _print_header() -> None:
|
|
205
|
+
"""Print the wizard header."""
|
|
206
|
+
console.print()
|
|
207
|
+
console.print(Panel.fit(
|
|
208
|
+
Text.from_markup(
|
|
209
|
+
"[bold cyan]EvoScientist Setup Wizard[/bold cyan]\n\n"
|
|
210
|
+
"This wizard will help you configure EvoScientist.\n"
|
|
211
|
+
"Press Ctrl+C at any time to cancel."
|
|
212
|
+
),
|
|
213
|
+
border_style="cyan",
|
|
214
|
+
))
|
|
215
|
+
console.print()
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def _print_step_result(step_name: str, value: str, success: bool = True) -> None:
|
|
219
|
+
"""Print a completed step result inline.
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
step_name: Name of the step.
|
|
223
|
+
value: The selected/entered value.
|
|
224
|
+
success: Whether the step was successful (affects icon).
|
|
225
|
+
"""
|
|
226
|
+
icon = "[green]✓[/green]" if success else "[red]✗[/red]"
|
|
227
|
+
console.print(f" {icon} [bold]{step_name}:[/bold] [cyan]{value}[/cyan]")
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def _print_step_skipped(step_name: str, reason: str = "kept current") -> None:
|
|
231
|
+
"""Print a skipped step result inline.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
step_name: Name of the step.
|
|
235
|
+
reason: Reason for skipping.
|
|
236
|
+
"""
|
|
237
|
+
console.print(f" [dim]○ {step_name}: {reason}[/dim]")
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
# =============================================================================
|
|
241
|
+
# Step Functions
|
|
242
|
+
# =============================================================================
|
|
243
|
+
|
|
244
|
+
def _step_provider(config: EvoScientistConfig) -> str:
|
|
245
|
+
"""Step 1: Select LLM provider.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
config: Current configuration.
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Selected provider name.
|
|
252
|
+
"""
|
|
253
|
+
choices = [
|
|
254
|
+
Choice(title="Anthropic (Claude models)", value="anthropic"),
|
|
255
|
+
Choice(title="OpenAI (GPT models)", value="openai"),
|
|
256
|
+
Choice(title="NVIDIA (GLM, MiniMax, Kimi, etc.)", value="nvidia"),
|
|
257
|
+
]
|
|
258
|
+
|
|
259
|
+
# Set default based on current config
|
|
260
|
+
default = config.provider if config.provider in ["anthropic", "openai", "nvidia"] else "anthropic"
|
|
261
|
+
|
|
262
|
+
provider = questionary.select(
|
|
263
|
+
"Select your LLM provider:",
|
|
264
|
+
choices=choices,
|
|
265
|
+
default=default,
|
|
266
|
+
style=WIZARD_STYLE,
|
|
267
|
+
use_indicator=True,
|
|
268
|
+
).ask()
|
|
269
|
+
|
|
270
|
+
if provider is None:
|
|
271
|
+
raise KeyboardInterrupt()
|
|
272
|
+
|
|
273
|
+
return provider
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def _step_provider_api_key(
|
|
277
|
+
config: EvoScientistConfig,
|
|
278
|
+
provider: str,
|
|
279
|
+
skip_validation: bool = False,
|
|
280
|
+
) -> str | None:
|
|
281
|
+
"""Step 2: Enter API key for the selected provider.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
config: Current configuration.
|
|
285
|
+
provider: Selected provider name.
|
|
286
|
+
skip_validation: Skip API key validation.
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
New API key or None if unchanged.
|
|
290
|
+
"""
|
|
291
|
+
if provider == "anthropic":
|
|
292
|
+
key_name = "Anthropic"
|
|
293
|
+
current = config.anthropic_api_key or os.environ.get("ANTHROPIC_API_KEY", "")
|
|
294
|
+
validate_fn = validate_anthropic_key
|
|
295
|
+
elif provider == "nvidia":
|
|
296
|
+
key_name = "NVIDIA"
|
|
297
|
+
current = config.nvidia_api_key or os.environ.get("NVIDIA_API_KEY", "")
|
|
298
|
+
validate_fn = validate_nvidia_key
|
|
299
|
+
else:
|
|
300
|
+
key_name = "OpenAI"
|
|
301
|
+
current = config.openai_api_key or os.environ.get("OPENAI_API_KEY", "")
|
|
302
|
+
validate_fn = validate_openai_key
|
|
303
|
+
|
|
304
|
+
# Show current status inline
|
|
305
|
+
if current:
|
|
306
|
+
display_current = f"***{current[-4:]}"
|
|
307
|
+
hint = f"Current: {display_current}"
|
|
308
|
+
else:
|
|
309
|
+
hint = "Not set"
|
|
310
|
+
|
|
311
|
+
# Prompt for new key
|
|
312
|
+
new_key = questionary.password(
|
|
313
|
+
f"Enter {key_name} API key ({hint}, Enter to keep):",
|
|
314
|
+
style=WIZARD_STYLE,
|
|
315
|
+
).ask()
|
|
316
|
+
|
|
317
|
+
if new_key is None:
|
|
318
|
+
raise KeyboardInterrupt()
|
|
319
|
+
|
|
320
|
+
new_key = new_key.strip()
|
|
321
|
+
|
|
322
|
+
# Determine the key to validate: new input or existing current key
|
|
323
|
+
key_to_validate = new_key if new_key else current
|
|
324
|
+
|
|
325
|
+
if not key_to_validate:
|
|
326
|
+
return None # Nothing to validate
|
|
327
|
+
|
|
328
|
+
# Validate the key (new or current)
|
|
329
|
+
if not skip_validation:
|
|
330
|
+
console.print(" [dim]Validating...[/dim]", end="")
|
|
331
|
+
valid, msg = validate_fn(key_to_validate)
|
|
332
|
+
if valid:
|
|
333
|
+
console.print(f"\r [green]✓ {msg}[/green] ")
|
|
334
|
+
return new_key if new_key else None # Return new key or None (keep current)
|
|
335
|
+
else:
|
|
336
|
+
console.print(f"\r [red]✗ {msg}[/red] ")
|
|
337
|
+
if not new_key:
|
|
338
|
+
return None # Current key invalid, but keep it
|
|
339
|
+
# Ask if they want to save anyway
|
|
340
|
+
save_anyway = questionary.confirm(
|
|
341
|
+
"Save anyway?",
|
|
342
|
+
default=False,
|
|
343
|
+
style=WIZARD_STYLE,
|
|
344
|
+
).ask()
|
|
345
|
+
if save_anyway is None:
|
|
346
|
+
raise KeyboardInterrupt()
|
|
347
|
+
if save_anyway:
|
|
348
|
+
return new_key
|
|
349
|
+
return None
|
|
350
|
+
else:
|
|
351
|
+
return new_key if new_key else None
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def _step_model(config: EvoScientistConfig, provider: str) -> str:
|
|
355
|
+
"""Step 3: Select model for the provider.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
config: Current configuration.
|
|
359
|
+
provider: Selected provider name.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
Selected model name.
|
|
363
|
+
"""
|
|
364
|
+
# Get models for the selected provider
|
|
365
|
+
provider_models = [
|
|
366
|
+
name for name, (model_id, p) in MODELS.items()
|
|
367
|
+
if p == provider
|
|
368
|
+
]
|
|
369
|
+
|
|
370
|
+
if not provider_models:
|
|
371
|
+
# Fallback if no models for provider
|
|
372
|
+
console.print(f" [yellow]No registered models for {provider}[/yellow]")
|
|
373
|
+
model = questionary.text(
|
|
374
|
+
"Enter model name:",
|
|
375
|
+
default=config.model,
|
|
376
|
+
style=WIZARD_STYLE,
|
|
377
|
+
).ask()
|
|
378
|
+
if model is None:
|
|
379
|
+
raise KeyboardInterrupt()
|
|
380
|
+
return model
|
|
381
|
+
|
|
382
|
+
# Create choices with model IDs as hints
|
|
383
|
+
choices = []
|
|
384
|
+
for name in provider_models:
|
|
385
|
+
model_id, _ = MODELS[name]
|
|
386
|
+
choices.append(Choice(title=f"{name} ({model_id})", value=name))
|
|
387
|
+
|
|
388
|
+
# Determine default
|
|
389
|
+
if config.model in provider_models:
|
|
390
|
+
default = config.model
|
|
391
|
+
else:
|
|
392
|
+
default = provider_models[0]
|
|
393
|
+
|
|
394
|
+
model = questionary.select(
|
|
395
|
+
"Select model:",
|
|
396
|
+
choices=choices,
|
|
397
|
+
default=default,
|
|
398
|
+
style=WIZARD_STYLE,
|
|
399
|
+
use_indicator=True,
|
|
400
|
+
).ask()
|
|
401
|
+
|
|
402
|
+
if model is None:
|
|
403
|
+
raise KeyboardInterrupt()
|
|
404
|
+
|
|
405
|
+
return model
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
def _step_tavily_key(
|
|
409
|
+
config: EvoScientistConfig,
|
|
410
|
+
skip_validation: bool = False,
|
|
411
|
+
) -> str | None:
|
|
412
|
+
"""Step 4: Enter Tavily API key for web search.
|
|
413
|
+
|
|
414
|
+
Args:
|
|
415
|
+
config: Current configuration.
|
|
416
|
+
skip_validation: Skip API key validation.
|
|
417
|
+
|
|
418
|
+
Returns:
|
|
419
|
+
New API key or None if unchanged.
|
|
420
|
+
"""
|
|
421
|
+
current = config.tavily_api_key or os.environ.get("TAVILY_API_KEY", "")
|
|
422
|
+
|
|
423
|
+
# Show current status inline
|
|
424
|
+
if current:
|
|
425
|
+
display_current = f"***{current[-4:]}"
|
|
426
|
+
hint = f"Current: {display_current}"
|
|
427
|
+
else:
|
|
428
|
+
hint = "Not set"
|
|
429
|
+
|
|
430
|
+
# Prompt for new key
|
|
431
|
+
new_key = questionary.password(
|
|
432
|
+
f"Tavily API key for web search ({hint}, Enter to keep):",
|
|
433
|
+
style=WIZARD_STYLE,
|
|
434
|
+
).ask()
|
|
435
|
+
|
|
436
|
+
if new_key is None:
|
|
437
|
+
raise KeyboardInterrupt()
|
|
438
|
+
|
|
439
|
+
new_key = new_key.strip()
|
|
440
|
+
|
|
441
|
+
# Determine the key to validate: new input or existing current key
|
|
442
|
+
key_to_validate = new_key if new_key else current
|
|
443
|
+
|
|
444
|
+
if not key_to_validate:
|
|
445
|
+
return None # Nothing to validate
|
|
446
|
+
|
|
447
|
+
# Validate the key (new or current)
|
|
448
|
+
if not skip_validation:
|
|
449
|
+
console.print(" [dim]Validating...[/dim]", end="")
|
|
450
|
+
valid, msg = validate_tavily_key(key_to_validate)
|
|
451
|
+
if valid:
|
|
452
|
+
console.print(f"\r [green]✓ {msg}[/green] ")
|
|
453
|
+
return new_key if new_key else None # Return new key or None (keep current)
|
|
454
|
+
else:
|
|
455
|
+
console.print(f"\r [red]✗ {msg}[/red] ")
|
|
456
|
+
if not new_key:
|
|
457
|
+
return None # Current key invalid, but keep it
|
|
458
|
+
# Ask if they want to save anyway
|
|
459
|
+
save_anyway = questionary.confirm(
|
|
460
|
+
"Save anyway?",
|
|
461
|
+
default=False,
|
|
462
|
+
style=WIZARD_STYLE,
|
|
463
|
+
).ask()
|
|
464
|
+
if save_anyway is None:
|
|
465
|
+
raise KeyboardInterrupt()
|
|
466
|
+
if save_anyway:
|
|
467
|
+
return new_key
|
|
468
|
+
return None
|
|
469
|
+
else:
|
|
470
|
+
return new_key if new_key else None
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def _step_workspace(config: EvoScientistConfig) -> tuple[str, str]:
|
|
474
|
+
"""Step 5: Configure workspace settings.
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
config: Current configuration.
|
|
478
|
+
|
|
479
|
+
Returns:
|
|
480
|
+
Tuple of (mode, workdir).
|
|
481
|
+
"""
|
|
482
|
+
# Mode selection
|
|
483
|
+
mode_choices = [
|
|
484
|
+
Choice(
|
|
485
|
+
title="Daemon (persistent workspace ./workspace/)",
|
|
486
|
+
value="daemon",
|
|
487
|
+
),
|
|
488
|
+
Choice(
|
|
489
|
+
title="Run (isolated per-session ./workspace/runs/<timestamp>/)",
|
|
490
|
+
value="run",
|
|
491
|
+
),
|
|
492
|
+
]
|
|
493
|
+
|
|
494
|
+
mode = questionary.select(
|
|
495
|
+
"Default workspace mode:",
|
|
496
|
+
choices=mode_choices,
|
|
497
|
+
default=config.default_mode,
|
|
498
|
+
style=WIZARD_STYLE,
|
|
499
|
+
use_indicator=True,
|
|
500
|
+
).ask()
|
|
501
|
+
|
|
502
|
+
if mode is None:
|
|
503
|
+
raise KeyboardInterrupt()
|
|
504
|
+
|
|
505
|
+
# Custom workdir (optional)
|
|
506
|
+
use_custom = questionary.confirm(
|
|
507
|
+
"Use custom workspace directory? (default: ./workspace/)",
|
|
508
|
+
default=bool(config.default_workdir),
|
|
509
|
+
style=WIZARD_STYLE,
|
|
510
|
+
).ask()
|
|
511
|
+
|
|
512
|
+
if use_custom is None:
|
|
513
|
+
raise KeyboardInterrupt()
|
|
514
|
+
|
|
515
|
+
workdir = ""
|
|
516
|
+
if use_custom:
|
|
517
|
+
workdir = questionary.text(
|
|
518
|
+
"Workspace directory path:",
|
|
519
|
+
default=config.default_workdir or "",
|
|
520
|
+
style=WIZARD_STYLE,
|
|
521
|
+
).ask()
|
|
522
|
+
if workdir is None:
|
|
523
|
+
raise KeyboardInterrupt()
|
|
524
|
+
workdir = workdir.strip()
|
|
525
|
+
|
|
526
|
+
return mode, workdir
|
|
527
|
+
|
|
528
|
+
|
|
529
|
+
def _step_parameters(config: EvoScientistConfig) -> tuple[int, int, bool]:
|
|
530
|
+
"""Step 6: Configure agent parameters.
|
|
531
|
+
|
|
532
|
+
Args:
|
|
533
|
+
config: Current configuration.
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
Tuple of (max_concurrent, max_iterations, show_thinking).
|
|
537
|
+
"""
|
|
538
|
+
# Max concurrent
|
|
539
|
+
max_concurrent_str = questionary.text(
|
|
540
|
+
"Max concurrent sub-agents (1-10):",
|
|
541
|
+
default=str(config.max_concurrent),
|
|
542
|
+
style=WIZARD_STYLE,
|
|
543
|
+
validate=lambda x: x.strip() == "" or (x.strip().isdigit() and 1 <= int(x.strip()) <= 10),
|
|
544
|
+
).ask()
|
|
545
|
+
|
|
546
|
+
if max_concurrent_str is None:
|
|
547
|
+
raise KeyboardInterrupt()
|
|
548
|
+
|
|
549
|
+
max_concurrent = int(max_concurrent_str.strip()) if max_concurrent_str.strip() else config.max_concurrent
|
|
550
|
+
|
|
551
|
+
# Max iterations
|
|
552
|
+
max_iterations_str = questionary.text(
|
|
553
|
+
"Max delegation iterations (1-10):",
|
|
554
|
+
default=str(config.max_iterations),
|
|
555
|
+
style=WIZARD_STYLE,
|
|
556
|
+
validate=lambda x: x.strip() == "" or (x.strip().isdigit() and 1 <= int(x.strip()) <= 10),
|
|
557
|
+
).ask()
|
|
558
|
+
|
|
559
|
+
if max_iterations_str is None:
|
|
560
|
+
raise KeyboardInterrupt()
|
|
561
|
+
|
|
562
|
+
max_iterations = int(max_iterations_str.strip()) if max_iterations_str.strip() else config.max_iterations
|
|
563
|
+
|
|
564
|
+
# Show thinking
|
|
565
|
+
thinking_choices = [
|
|
566
|
+
Choice(title="On (show model reasoning)", value=True),
|
|
567
|
+
Choice(title="Off (hide model reasoning)", value=False),
|
|
568
|
+
]
|
|
569
|
+
|
|
570
|
+
show_thinking = questionary.select(
|
|
571
|
+
"Show thinking panel in CLI?",
|
|
572
|
+
choices=thinking_choices,
|
|
573
|
+
default=config.show_thinking,
|
|
574
|
+
style=WIZARD_STYLE,
|
|
575
|
+
use_indicator=True,
|
|
576
|
+
).ask()
|
|
577
|
+
|
|
578
|
+
if show_thinking is None:
|
|
579
|
+
raise KeyboardInterrupt()
|
|
580
|
+
|
|
581
|
+
return max_concurrent, max_iterations, show_thinking
|
|
582
|
+
|
|
583
|
+
|
|
584
|
+
# =============================================================================
|
|
585
|
+
# Progress Rendering (for tests and potential future use)
|
|
586
|
+
# =============================================================================
|
|
587
|
+
|
|
588
|
+
def render_progress(current_step: int, completed: set[int]) -> Panel:
|
|
589
|
+
"""Render the progress indicator panel.
|
|
590
|
+
|
|
591
|
+
Args:
|
|
592
|
+
current_step: Index of the current step (0-based).
|
|
593
|
+
completed: Set of completed step indices.
|
|
594
|
+
|
|
595
|
+
Returns:
|
|
596
|
+
A Rich Panel displaying the progress.
|
|
597
|
+
"""
|
|
598
|
+
lines = []
|
|
599
|
+
for i, step_name in enumerate(STEPS):
|
|
600
|
+
if i in completed:
|
|
601
|
+
icon = Text("●", style="green bold")
|
|
602
|
+
label = Text(f" {step_name}", style="green")
|
|
603
|
+
elif i == current_step:
|
|
604
|
+
icon = Text("◉", style="cyan bold")
|
|
605
|
+
label = Text(f" {step_name}", style="cyan bold")
|
|
606
|
+
else:
|
|
607
|
+
icon = Text("○", style="dim")
|
|
608
|
+
label = Text(f" {step_name}", style="dim")
|
|
609
|
+
|
|
610
|
+
line = Text()
|
|
611
|
+
line.append_text(icon)
|
|
612
|
+
line.append_text(label)
|
|
613
|
+
lines.append(line)
|
|
614
|
+
|
|
615
|
+
# Add connector line between steps
|
|
616
|
+
if i < len(STEPS) - 1:
|
|
617
|
+
if i in completed:
|
|
618
|
+
connector_style = "green"
|
|
619
|
+
elif i == current_step:
|
|
620
|
+
connector_style = "cyan"
|
|
621
|
+
else:
|
|
622
|
+
connector_style = "dim"
|
|
623
|
+
lines.append(Text("│", style=connector_style))
|
|
624
|
+
|
|
625
|
+
# Join all lines with newlines
|
|
626
|
+
content = Text("\n").join(lines)
|
|
627
|
+
return Panel(content, title="[bold]EvoScientist Setup[/bold]", border_style="blue")
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
# =============================================================================
|
|
631
|
+
# Main onboard function
|
|
632
|
+
# =============================================================================
|
|
633
|
+
|
|
634
|
+
def run_onboard(skip_validation: bool = False) -> bool:
|
|
635
|
+
"""Run the interactive onboarding wizard.
|
|
636
|
+
|
|
637
|
+
Args:
|
|
638
|
+
skip_validation: Skip API key validation.
|
|
639
|
+
|
|
640
|
+
Returns:
|
|
641
|
+
True if configuration was saved, False if cancelled.
|
|
642
|
+
"""
|
|
643
|
+
try:
|
|
644
|
+
# Print header once
|
|
645
|
+
_print_header()
|
|
646
|
+
|
|
647
|
+
# Load existing config as starting point
|
|
648
|
+
config = load_config()
|
|
649
|
+
|
|
650
|
+
# Step 1: Provider
|
|
651
|
+
provider = _step_provider(config)
|
|
652
|
+
config.provider = provider
|
|
653
|
+
|
|
654
|
+
# Step 2: Provider API Key
|
|
655
|
+
new_key = _step_provider_api_key(config, provider, skip_validation)
|
|
656
|
+
if new_key is not None:
|
|
657
|
+
if provider == "anthropic":
|
|
658
|
+
config.anthropic_api_key = new_key
|
|
659
|
+
elif provider == "nvidia":
|
|
660
|
+
config.nvidia_api_key = new_key
|
|
661
|
+
else:
|
|
662
|
+
config.openai_api_key = new_key
|
|
663
|
+
else:
|
|
664
|
+
if provider == "anthropic":
|
|
665
|
+
current = config.anthropic_api_key
|
|
666
|
+
elif provider == "nvidia":
|
|
667
|
+
current = config.nvidia_api_key
|
|
668
|
+
else:
|
|
669
|
+
current = config.openai_api_key
|
|
670
|
+
if not current:
|
|
671
|
+
_print_step_skipped("API Key", "not set")
|
|
672
|
+
|
|
673
|
+
# Step 3: Model
|
|
674
|
+
model = _step_model(config, provider)
|
|
675
|
+
config.model = model
|
|
676
|
+
|
|
677
|
+
# Step 4: Tavily Key
|
|
678
|
+
new_tavily_key = _step_tavily_key(config, skip_validation)
|
|
679
|
+
if new_tavily_key is not None:
|
|
680
|
+
config.tavily_api_key = new_tavily_key
|
|
681
|
+
else:
|
|
682
|
+
if not config.tavily_api_key:
|
|
683
|
+
_print_step_skipped("Tavily Key", "not set")
|
|
684
|
+
|
|
685
|
+
# Step 5: Workspace
|
|
686
|
+
mode, workdir = _step_workspace(config)
|
|
687
|
+
config.default_mode = mode
|
|
688
|
+
config.default_workdir = workdir
|
|
689
|
+
|
|
690
|
+
# Step 6: Parameters
|
|
691
|
+
max_concurrent, max_iterations, show_thinking = _step_parameters(config)
|
|
692
|
+
config.max_concurrent = max_concurrent
|
|
693
|
+
config.max_iterations = max_iterations
|
|
694
|
+
config.show_thinking = show_thinking
|
|
695
|
+
|
|
696
|
+
# Confirm save
|
|
697
|
+
console.print()
|
|
698
|
+
save = questionary.confirm(
|
|
699
|
+
"Save this configuration?",
|
|
700
|
+
default=True,
|
|
701
|
+
style=CONFIRM_STYLE,
|
|
702
|
+
qmark="!",
|
|
703
|
+
).ask()
|
|
704
|
+
|
|
705
|
+
if save is None:
|
|
706
|
+
raise KeyboardInterrupt()
|
|
707
|
+
|
|
708
|
+
if save:
|
|
709
|
+
save_config(config)
|
|
710
|
+
console.print()
|
|
711
|
+
console.print("[green]✓ Configuration saved![/green]")
|
|
712
|
+
console.print(f"[dim] → {get_config_path()}[/dim]")
|
|
713
|
+
console.print()
|
|
714
|
+
return True
|
|
715
|
+
else:
|
|
716
|
+
console.print()
|
|
717
|
+
console.print("[yellow]Configuration not saved.[/yellow]")
|
|
718
|
+
console.print()
|
|
719
|
+
return False
|
|
720
|
+
|
|
721
|
+
except KeyboardInterrupt:
|
|
722
|
+
console.print()
|
|
723
|
+
console.print("[yellow]Setup cancelled.[/yellow]")
|
|
724
|
+
console.print()
|
|
725
|
+
return False
|