tunacode-cli 0.0.73__py3-none-any.whl → 0.0.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

@@ -104,7 +104,15 @@ class ModelCommand(SimpleCommand):
104
104
  # Auto-select single result
105
105
  model = models[0]
106
106
  context.state_manager.session.current_model = model.full_id
107
- await ui.success(f"Switched to model: {model.full_id} - {model.name}")
107
+ # Persist selection to config by default
108
+ try:
109
+ user_configuration.set_default_model(model.full_id, context.state_manager)
110
+ await ui.success(
111
+ f"Switched to model: {model.full_id} - {model.name} (saved as default)"
112
+ )
113
+ except ConfigurationError as e:
114
+ await ui.error(str(e))
115
+ await ui.warning("Model switched for this session only; failed to save default.")
108
116
  return None
109
117
 
110
118
  # Show multiple results
@@ -159,7 +167,7 @@ class ModelCommand(SimpleCommand):
159
167
  # Set the model
160
168
  context.state_manager.session.current_model = model_name
161
169
 
162
- # Check if setting as default
170
+ # Check if setting as default (preserve existing behavior)
163
171
  if extra_args and extra_args[0] == "default":
164
172
  try:
165
173
  user_configuration.set_default_model(model_name, context.state_manager)
@@ -169,7 +177,13 @@ class ModelCommand(SimpleCommand):
169
177
  await ui.error(str(e))
170
178
  return None
171
179
 
172
- await ui.success(f"Switched to model: {model_name}")
180
+ # Persist selection to config by default (auto-persist)
181
+ try:
182
+ user_configuration.set_default_model(model_name, context.state_manager)
183
+ await ui.success(f"Switched to model: {model_name} (saved as default)")
184
+ except ConfigurationError as e:
185
+ await ui.error(str(e))
186
+ await ui.warning("Model switched for this session only; failed to save default.")
173
187
  return None
174
188
 
175
189
  # No colon - treat as search query
@@ -184,7 +198,15 @@ class ModelCommand(SimpleCommand):
184
198
  # Single match - use it
185
199
  model = models[0]
186
200
  context.state_manager.session.current_model = model.full_id
187
- await ui.success(f"Switched to model: {model.full_id} - {model.name}")
201
+ # Persist selection to config by default
202
+ try:
203
+ user_configuration.set_default_model(model.full_id, context.state_manager)
204
+ await ui.success(
205
+ f"Switched to model: {model.full_id} - {model.name} (saved as default)"
206
+ )
207
+ except ConfigurationError as e:
208
+ await ui.error(str(e))
209
+ await ui.warning("Model switched for this session only; failed to save default.")
188
210
  return None
189
211
 
190
212
  # Multiple matches - show interactive selector with results
@@ -193,7 +215,13 @@ class ModelCommand(SimpleCommand):
193
215
 
194
216
  if selected_model:
195
217
  context.state_manager.session.current_model = selected_model
196
- await ui.success(f"Switched to model: {selected_model}")
218
+ # Persist selection to config by default
219
+ try:
220
+ user_configuration.set_default_model(selected_model, context.state_manager)
221
+ await ui.success(f"Switched to model: {selected_model} (saved as default)")
222
+ except ConfigurationError as e:
223
+ await ui.error(str(e))
224
+ await ui.warning("Model switched for this session only; failed to save default.")
197
225
  else:
198
226
  await ui.info("Model selection cancelled")
199
227
 
tunacode/constants.py CHANGED
@@ -9,7 +9,7 @@ from enum import Enum
9
9
 
10
10
  # Application info
11
11
  APP_NAME = "TunaCode"
12
- APP_VERSION = "0.0.73"
12
+ APP_VERSION = "0.0.76"
13
13
 
14
14
 
15
15
  # File patterns
@@ -448,7 +448,22 @@ async def _process_tool_calls(
448
448
  f"[bold #00d7ff]{tool_desc}...[/bold #00d7ff]", state_manager
449
449
  )
450
450
 
451
- await tool_callback(part, node)
451
+ # Execute the tool with robust error handling so one failure doesn't crash the run
452
+ try:
453
+ await tool_callback(part, node)
454
+ except Exception as tool_err:
455
+ logger.error(
456
+ "Tool callback failed: tool=%s iter=%s err=%s",
457
+ getattr(part, "tool_name", "<unknown>"),
458
+ getattr(state_manager.session, "current_iteration", "?"),
459
+ tool_err,
460
+ exc_info=True,
461
+ )
462
+ # Surface to UI when thoughts are enabled, then continue gracefully
463
+ if getattr(state_manager.session, "show_thoughts", False):
464
+ await ui.warning(
465
+ f"❌ Tool failed: {getattr(part, 'tool_name', '<unknown>')} — continuing"
466
+ )
452
467
 
453
468
  # Track tool calls in session
454
469
  if is_processing_tools:
@@ -133,6 +133,11 @@ async def process_request(
133
133
  import uuid
134
134
 
135
135
  request_id = str(uuid.uuid4())[:8]
136
+ # Attach request_id to session for downstream logging/context
137
+ try:
138
+ state_manager.session.request_id = request_id
139
+ except Exception:
140
+ pass
136
141
 
137
142
  # Reset state for new request
138
143
  state_manager.session.current_iteration = 0
@@ -169,14 +174,43 @@ async def process_request(
169
174
  # Handle token-level streaming for model request nodes
170
175
  Agent, _ = get_agent_tool()
171
176
  if streaming_callback and STREAMING_AVAILABLE and Agent.is_model_request_node(node):
172
- async with node.stream(agent_run.ctx) as request_stream:
173
- async for event in request_stream:
174
- if isinstance(event, PartDeltaEvent) and isinstance(
175
- event.delta, TextPartDelta
176
- ):
177
- # Stream individual token deltas
178
- if event.delta.content_delta and streaming_callback:
179
- await streaming_callback(event.delta.content_delta)
177
+ # Gracefully handle streaming errors from LLM provider
178
+ for attempt in range(2): # simple retry once, then degrade gracefully
179
+ try:
180
+ async with node.stream(agent_run.ctx) as request_stream:
181
+ async for event in request_stream:
182
+ if isinstance(event, PartDeltaEvent) and isinstance(
183
+ event.delta, TextPartDelta
184
+ ):
185
+ # Stream individual token deltas
186
+ if event.delta.content_delta and streaming_callback:
187
+ await streaming_callback(event.delta.content_delta)
188
+ break # successful streaming; exit retry loop
189
+ except Exception as stream_err:
190
+ # Log with context and optionally notify UI, then retry once
191
+ logger.warning(
192
+ "Streaming error (attempt %s/2) req=%s iter=%s: %s",
193
+ attempt + 1,
194
+ request_id,
195
+ i,
196
+ stream_err,
197
+ exc_info=True,
198
+ )
199
+ if getattr(state_manager.session, "show_thoughts", False):
200
+ from tunacode.ui import console as ui
201
+
202
+ await ui.warning(
203
+ "⚠️ Streaming failed; retrying once then falling back"
204
+ )
205
+ # On second failure, degrade gracefully (no streaming)
206
+ if attempt == 1:
207
+ if getattr(state_manager.session, "show_thoughts", False):
208
+ from tunacode.ui import console as ui
209
+
210
+ await ui.muted(
211
+ "Switching to non-streaming processing for this node"
212
+ )
213
+ break
180
214
 
181
215
  empty_response, empty_reason = await _process_node(
182
216
  node,
@@ -451,10 +485,22 @@ Please let me know how to proceed."""
451
485
  # Re-raise to be handled by caller
452
486
  raise
453
487
  except Exception as e:
454
- logger.error(f"Error in process_request: {e}", exc_info=True)
488
+ # Include request context to aid debugging
489
+ safe_iter = (
490
+ state_manager.session.current_iteration
491
+ if hasattr(state_manager.session, "current_iteration")
492
+ else "?"
493
+ )
494
+ logger.error(
495
+ f"Error in process_request [req={request_id} iter={safe_iter}]: {e}",
496
+ exc_info=True,
497
+ )
455
498
  # Patch orphaned tool messages with generic error
456
499
  patch_tool_messages(
457
500
  f"Request processing failed: {str(e)[:100]}...", state_manager=state_manager
458
501
  )
459
502
  # Re-raise to be handled by caller
460
503
  raise
504
+
505
+
506
+ 1
@@ -11,12 +11,16 @@ from tunacode.configuration.defaults import DEFAULT_USER_CONFIG
11
11
  from tunacode.configuration.models import ModelRegistry
12
12
  from tunacode.constants import APP_NAME, CONFIG_FILE_NAME, UI_COLORS
13
13
  from tunacode.core.setup.base import BaseSetup
14
+ from tunacode.core.setup.config_wizard import ConfigWizard
14
15
  from tunacode.core.state import StateManager
15
16
  from tunacode.exceptions import ConfigurationError
16
17
  from tunacode.types import ConfigFile, ConfigPath, UserConfig
17
18
  from tunacode.ui import console as ui
18
19
  from tunacode.utils import system, user_configuration
19
- from tunacode.utils.api_key_validation import validate_api_key_for_model
20
+ from tunacode.utils.api_key_validation import (
21
+ get_required_api_key_for_model,
22
+ validate_api_key_for_model,
23
+ )
20
24
  from tunacode.utils.text_utils import key_to_title
21
25
 
22
26
 
@@ -105,14 +109,17 @@ class ConfigSetup(BaseSetup):
105
109
  raise
106
110
 
107
111
  if wizard_mode:
108
- await self._wizard_onboarding()
112
+ wizard = ConfigWizard(self.state_manager, self.model_registry, self.config_file)
113
+ await wizard.run_onboarding()
109
114
  else:
110
115
  await self._onboarding()
111
116
  else:
112
- # No config found - show CLI usage instead of onboarding
117
+ # No config found - show CLI usage and continue with safe defaults (no crash)
113
118
  from tunacode.ui.console import console
114
119
 
115
- console.print("\n[bold red]No configuration found![/bold red]")
120
+ console.print(
121
+ "\n[bold yellow]No configuration found — using safe defaults.[/bold yellow]"
122
+ )
116
123
  console.print("\n[bold]Quick Setup:[/bold]")
117
124
  console.print("Configure TunaCode using CLI flags:")
118
125
  console.print("\n[blue]Examples:[/blue]")
@@ -127,16 +134,19 @@ class ConfigSetup(BaseSetup):
127
134
  console.print("\n[yellow]Run 'tunacode --help' for more options[/yellow]\n")
128
135
  console.print("\n[cyan]Or use --wizard for guided setup[/cyan]\n")
129
136
 
130
- raise ConfigurationError(
131
- "No configuration found. Please use CLI flags to configure or --wizard for guided setup."
132
- )
137
+ # Initialize in-memory defaults so we don't crash
138
+ self.state_manager.session.user_config = DEFAULT_USER_CONFIG.copy()
139
+ # Mark config as not fully validated for the fast path
140
+ setattr(self.state_manager, "_config_valid", False)
133
141
 
134
142
  if not self.state_manager.session.user_config.get("default_model"):
135
- raise ConfigurationError(
136
- (
137
- f"No default model found in config at [bold]{self.config_file}[/bold]\n\n"
138
- "Run [code]sidekick --setup[/code] to rerun the setup process."
139
- )
143
+ # Gracefully apply default model instead of crashing
144
+ self.state_manager.session.user_config["default_model"] = DEFAULT_USER_CONFIG[
145
+ "default_model"
146
+ ]
147
+ await ui.warning(
148
+ "No default model set in config; applying safe default "
149
+ f"'{self.state_manager.session.user_config['default_model']}'."
140
150
  )
141
151
 
142
152
  # Validate API key exists for the selected model
@@ -145,10 +155,42 @@ class ConfigSetup(BaseSetup):
145
155
  model, self.state_manager.session.user_config
146
156
  )
147
157
  if not is_valid:
148
- raise ConfigurationError(error_msg)
158
+ # Try to pick a fallback model based on whichever provider has a key configured
159
+ fallback = self._pick_fallback_model(self.state_manager.session.user_config)
160
+ if fallback and fallback != model:
161
+ await ui.warning(
162
+ "API key missing for selected model; switching to configured provider: "
163
+ f"'{fallback}'."
164
+ )
165
+ self.state_manager.session.user_config["default_model"] = fallback
166
+ model = fallback
167
+ else:
168
+ # No suitable fallback; continue without crashing but mark invalid
169
+ await ui.warning(
170
+ (error_msg or "API key missing for model")
171
+ + "\nContinuing without provider initialization; run 'tunacode --setup' later."
172
+ )
173
+ setattr(self.state_manager, "_config_valid", False)
149
174
 
150
175
  self.state_manager.session.current_model = model
151
176
 
177
+ def _pick_fallback_model(self, user_config: UserConfig) -> str | None:
178
+ """Select a reasonable fallback model based on configured API keys."""
179
+ env = (user_config or {}).get("env", {})
180
+
181
+ # Preference order: OpenAI → Anthropic → Google → OpenRouter
182
+ if env.get("OPENAI_API_KEY", "").strip():
183
+ return "openai:gpt-4o"
184
+ if env.get("ANTHROPIC_API_KEY", "").strip():
185
+ return "anthropic:claude-sonnet-4"
186
+ if env.get("GEMINI_API_KEY", "").strip():
187
+ return "google:gemini-2.5-flash"
188
+ if env.get("OPENROUTER_API_KEY", "").strip():
189
+ # Use the project default when OpenRouter is configured
190
+ return DEFAULT_USER_CONFIG.get("default_model", "openrouter:openai/gpt-4.1")
191
+
192
+ return None
193
+
152
194
  async def validate(self) -> bool:
153
195
  """Validate that configuration is properly set up."""
154
196
  # Check that we have a user config
@@ -168,6 +210,18 @@ class ConfigSetup(BaseSetup):
168
210
  # Store error message for later use
169
211
  setattr(self.state_manager, "_config_error", error_msg)
170
212
 
213
+ # Provide actionable guidance for manual setup
214
+ required_key, provider_name = get_required_api_key_for_model(model)
215
+ setup_hint = (
216
+ f"Missing API key for {provider_name}.\n"
217
+ f"Either run 'tunacode --wizard' (recommended) or add it manually to: {self.config_file}\n\n"
218
+ "Example snippet (add under 'env'):\n"
219
+ ' "env": {\n'
220
+ f' "{required_key or "PROVIDER_API_KEY"}": "your-key-here"\n'
221
+ " }\n"
222
+ )
223
+ await ui.error(setup_hint)
224
+
171
225
  # Cache result for fastpath
172
226
  if valid:
173
227
  setattr(self.state_manager, "_config_valid", True)
@@ -373,214 +427,3 @@ class ConfigSetup(BaseSetup):
373
427
  await ui.success(f"Configuration saved to: {self.config_file}")
374
428
  except ConfigurationError as e:
375
429
  await ui.error(str(e))
376
-
377
- async def _wizard_onboarding(self):
378
- """Run enhanced wizard-style onboarding process for new users."""
379
- initial_config = json.dumps(self.state_manager.session.user_config, sort_keys=True)
380
-
381
- # Welcome message with provider guidance
382
- await ui.panel(
383
- "Welcome to TunaCode Setup Wizard!",
384
- "This guided setup will help you configure TunaCode in under 5 minutes.\n"
385
- "We'll help you choose a provider, set up your API keys, and configure your preferred model.",
386
- border_style=UI_COLORS["primary"],
387
- )
388
-
389
- # Step 1: Provider selection with detailed guidance
390
- await self._wizard_step1_provider_selection()
391
-
392
- # Step 2: API key setup with provider-specific guidance
393
- await self._wizard_step2_api_key_setup()
394
-
395
- # Step 3: Model selection with smart recommendations
396
- await self._wizard_step3_model_selection()
397
-
398
- # Step 4: Optional settings configuration
399
- await self._wizard_step4_optional_settings()
400
-
401
- # Save configuration and finish
402
- current_config = json.dumps(self.state_manager.session.user_config, sort_keys=True)
403
- if initial_config != current_config:
404
- try:
405
- user_configuration.save_config(self.state_manager)
406
- await ui.panel(
407
- "Setup Complete!",
408
- f"Configuration saved to: [bold]{self.config_file}[/bold]\n\n"
409
- "You're ready to start using TunaCode!\n"
410
- "Use [green]/quickstart[/green] anytime for a tutorial.",
411
- border_style=UI_COLORS["success"],
412
- )
413
- except ConfigurationError as e:
414
- await ui.error(str(e))
415
-
416
- async def _wizard_step1_provider_selection(self):
417
- """Wizard step 1: Provider selection with detailed explanations."""
418
- provider_info = {
419
- "1": {
420
- "name": "OpenRouter",
421
- "description": "Access to multiple models (GPT-4, Claude, Gemini, etc.)",
422
- "signup": "https://openrouter.ai/",
423
- "key_name": "OPENROUTER_API_KEY",
424
- },
425
- "2": {
426
- "name": "OpenAI",
427
- "description": "GPT-4 models",
428
- "signup": "https://platform.openai.com/signup",
429
- "key_name": "OPENAI_API_KEY",
430
- },
431
- "3": {
432
- "name": "Anthropic",
433
- "description": "Claude-3 models",
434
- "signup": "https://console.anthropic.com/",
435
- "key_name": "ANTHROPIC_API_KEY",
436
- },
437
- "4": {
438
- "name": "Google",
439
- "description": "Gemini models",
440
- "signup": "https://ai.google.dev/",
441
- "key_name": "GEMINI_API_KEY",
442
- },
443
- }
444
-
445
- message = "Choose your AI provider:\n\n"
446
- for key, info in provider_info.items():
447
- message += f" {key} - {info['name']}: {info['description']}\n"
448
-
449
- await ui.panel("Provider Selection", message, border_style=UI_COLORS["primary"])
450
-
451
- while True:
452
- choice = await ui.input(
453
- "wizard_provider",
454
- pretext=" Choose provider (1-4): ",
455
- state_manager=self.state_manager,
456
- )
457
-
458
- if choice.strip() in provider_info:
459
- selected = provider_info[choice.strip()]
460
- self._wizard_selected_provider = selected
461
-
462
- await ui.success(f"Selected: {selected['name']}")
463
- await ui.info(f"Sign up at: {selected['signup']}")
464
- break
465
- else:
466
- await ui.error("Please enter 1, 2, 3, or 4")
467
-
468
- async def _wizard_step2_api_key_setup(self):
469
- """Wizard step 2: API key setup with provider-specific guidance."""
470
- provider = self._wizard_selected_provider
471
-
472
- message = f"Enter your {provider['name']} API key:\n\n"
473
- message += f"Get your key from: {provider['signup']}\n"
474
- message += "Your key will be stored securely in your local config"
475
-
476
- await ui.panel(f"{provider['name']} API Key", message, border_style=UI_COLORS["primary"])
477
-
478
- while True:
479
- api_key = await ui.input(
480
- "wizard_api_key",
481
- pretext=f" {provider['name']} API Key: ",
482
- is_password=True,
483
- state_manager=self.state_manager,
484
- )
485
-
486
- if api_key.strip():
487
- # Ensure env dict exists
488
- if "env" not in self.state_manager.session.user_config:
489
- self.state_manager.session.user_config["env"] = {}
490
-
491
- self.state_manager.session.user_config["env"][provider["key_name"]] = (
492
- api_key.strip()
493
- )
494
- await ui.success("API key saved successfully!")
495
- break
496
- else:
497
- await ui.error("API key cannot be empty")
498
-
499
- async def _wizard_step3_model_selection(self):
500
- """Wizard step 3: Model selection with smart recommendations."""
501
- provider = self._wizard_selected_provider
502
-
503
- # Provide smart recommendations based on provider
504
- recommendations = {
505
- "OpenAI": [
506
- ("openai:gpt-4o", "GPT-4o flagship multimodal model (recommended)"),
507
- ("openai:gpt-4.1", "Latest GPT-4.1 with enhanced coding"),
508
- ("openai:o3", "Advanced reasoning model for complex tasks"),
509
- ],
510
- "Anthropic": [
511
- ("anthropic:claude-sonnet-4", "Claude Sonnet 4 latest generation (recommended)"),
512
- ("anthropic:claude-opus-4.1", "Most capable Claude with extended thinking"),
513
- ("anthropic:claude-3.5-sonnet", "Claude 3.5 Sonnet proven performance"),
514
- ],
515
- "OpenRouter": [
516
- (
517
- "openrouter:anthropic/claude-sonnet-4",
518
- "Claude Sonnet 4 via OpenRouter (recommended)",
519
- ),
520
- ("openrouter:openai/gpt-4.1", "GPT-4.1 via OpenRouter"),
521
- ("openrouter:google/gemini-2.5-flash", "Google Gemini 2.5 Flash latest"),
522
- ],
523
- "Google": [
524
- (
525
- "google:gemini-2.5-pro",
526
- "Gemini 2.5 Pro with thinking capabilities (recommended)",
527
- ),
528
- ("google:gemini-2.5-flash", "Gemini 2.5 Flash best price-performance"),
529
- ("google:gemini-2.0-flash", "Gemini 2.0 Flash with native tool use"),
530
- ],
531
- }
532
-
533
- models = recommendations.get(provider["name"], [])
534
- message = f"Choose your default {provider['name']} model:\n\n"
535
-
536
- for i, (model_id, description) in enumerate(models, 1):
537
- message += f" {i} - {description}\n"
538
-
539
- message += "\nYou can change this later with [green]/model[/green]"
540
-
541
- await ui.panel("Model Selection", message, border_style=UI_COLORS["primary"])
542
-
543
- while True:
544
- choice = await ui.input(
545
- "wizard_model",
546
- pretext=f" Choose model (1-{len(models)}): ",
547
- state_manager=self.state_manager,
548
- )
549
-
550
- try:
551
- index = int(choice.strip()) - 1
552
- if 0 <= index < len(models):
553
- selected_model = models[index][0]
554
- self.state_manager.session.user_config["default_model"] = selected_model
555
- await ui.success(f"Selected: {selected_model}")
556
- break
557
- else:
558
- await ui.error(f"Please enter a number between 1 and {len(models)}")
559
- except ValueError:
560
- await ui.error("Please enter a valid number")
561
-
562
- async def _wizard_step4_optional_settings(self):
563
- """Wizard step 4: Optional settings configuration."""
564
- message = "Configure optional settings:\n\n"
565
- message += "• Tutorial: Enable interactive tutorial for new users\n"
566
- message += "\nSkip this step to use recommended defaults"
567
-
568
- await ui.panel("Optional Settings", message, border_style=UI_COLORS["primary"])
569
-
570
- # Ask about tutorial
571
- tutorial_choice = await ui.input(
572
- "wizard_tutorial",
573
- pretext=" Enable tutorial for new users? [Y/n]: ",
574
- state_manager=self.state_manager,
575
- )
576
-
577
- enable_tutorial = tutorial_choice.strip().lower() not in ["n", "no", "false"]
578
-
579
- if "settings" not in self.state_manager.session.user_config:
580
- self.state_manager.session.user_config["settings"] = {}
581
-
582
- self.state_manager.session.user_config["settings"]["enable_tutorial"] = enable_tutorial
583
-
584
- # Streaming is always enabled - no user choice needed
585
-
586
- await ui.info("Optional settings configured!")
@@ -0,0 +1,228 @@
1
+ """Module: tunacode.core.setup.config_wizard
2
+
3
+ Wizard-style onboarding helpers extracted from ConfigSetup to reduce file size
4
+ and keep responsibilities focused.
5
+ """
6
+
7
+ import json
8
+ from pathlib import Path
9
+
10
+ from tunacode.constants import UI_COLORS
11
+ from tunacode.exceptions import ConfigurationError
12
+ from tunacode.ui import console as ui
13
+ from tunacode.utils import user_configuration
14
+
15
+
16
+ class ConfigWizard:
17
+ """Encapsulates the interactive configuration wizard flow."""
18
+
19
+ def __init__(self, state_manager, model_registry, config_file: Path):
20
+ self.state_manager = state_manager
21
+ self.model_registry = model_registry
22
+ self.config_file = config_file
23
+ self._wizard_selected_provider = None
24
+
25
+ async def run_onboarding(self) -> None:
26
+ """Run enhanced wizard-style onboarding process for new users."""
27
+ initial_config = json.dumps(self.state_manager.session.user_config, sort_keys=True)
28
+
29
+ # Welcome message with provider guidance
30
+ await ui.panel(
31
+ "Welcome to TunaCode Setup Wizard!",
32
+ "This guided setup will help you configure TunaCode in under 5 minutes.\n"
33
+ "We'll help you choose a provider, set up your API keys, and configure your preferred model.",
34
+ border_style=UI_COLORS["primary"],
35
+ )
36
+
37
+ # Steps
38
+ await self._step1_provider_selection()
39
+ await self._step2_api_key_setup()
40
+ await self._step3_model_selection()
41
+ await self._step4_optional_settings()
42
+
43
+ # Save configuration and finish
44
+ current_config = json.dumps(self.state_manager.session.user_config, sort_keys=True)
45
+ if initial_config != current_config:
46
+ try:
47
+ user_configuration.save_config(self.state_manager)
48
+ await ui.panel(
49
+ "Setup Complete!",
50
+ f"Configuration saved to: [bold]{self.config_file}[/bold]\n\n"
51
+ "You're ready to start using TunaCode!\n"
52
+ "Use [green]/quickstart[/green] anytime for a tutorial.",
53
+ border_style=UI_COLORS["success"],
54
+ )
55
+ except ConfigurationError as e:
56
+ await ui.error(str(e))
57
+
58
+ async def _step1_provider_selection(self) -> None:
59
+ """Wizard step 1: Provider selection with detailed explanations."""
60
+ provider_info = {
61
+ "1": {
62
+ "name": "OpenRouter",
63
+ "description": "Access to multiple models (GPT-4, Claude, Gemini, etc.)",
64
+ "signup": "https://openrouter.ai/",
65
+ "key_name": "OPENROUTER_API_KEY",
66
+ },
67
+ "2": {
68
+ "name": "OpenAI",
69
+ "description": "GPT-4 models",
70
+ "signup": "https://platform.openai.com/signup",
71
+ "key_name": "OPENAI_API_KEY",
72
+ },
73
+ "3": {
74
+ "name": "Anthropic",
75
+ "description": "Claude-3 models",
76
+ "signup": "https://console.anthropic.com/",
77
+ "key_name": "ANTHROPIC_API_KEY",
78
+ },
79
+ "4": {
80
+ "name": "Google",
81
+ "description": "Gemini models",
82
+ "signup": "https://ai.google.dev/",
83
+ "key_name": "GEMINI_API_KEY",
84
+ },
85
+ }
86
+
87
+ message = "Choose your AI provider:\n\n"
88
+ for key, info in provider_info.items():
89
+ message += f" {key} - {info['name']}: {info['description']}\n"
90
+
91
+ await ui.panel("Provider Selection", message, border_style=UI_COLORS["primary"])
92
+
93
+ while True:
94
+ choice = await ui.input(
95
+ "wizard_provider",
96
+ pretext=" Choose provider (1-4): ",
97
+ state_manager=self.state_manager,
98
+ )
99
+
100
+ if choice.strip() in provider_info:
101
+ selected = provider_info[choice.strip()]
102
+ self._wizard_selected_provider = selected
103
+
104
+ await ui.success(f"Selected: {selected['name']}")
105
+ await ui.info(f"Sign up at: {selected['signup']}")
106
+ break
107
+ else:
108
+ await ui.error("Please enter 1, 2, 3, or 4")
109
+
110
+ async def _step2_api_key_setup(self) -> None:
111
+ """Wizard step 2: API key setup with provider-specific guidance."""
112
+ provider = self._wizard_selected_provider
113
+
114
+ message = f"Enter your {provider['name']} API key:\n\n"
115
+ message += f"Get your key from: {provider['signup']}\n"
116
+ message += "Your key will be stored securely in your local config"
117
+
118
+ await ui.panel(f"{provider['name']} API Key", message, border_style=UI_COLORS["primary"])
119
+
120
+ while True:
121
+ api_key = await ui.input(
122
+ "wizard_api_key",
123
+ pretext=f" {provider['name']} API Key: ",
124
+ is_password=True,
125
+ state_manager=self.state_manager,
126
+ )
127
+
128
+ if api_key.strip():
129
+ # Ensure env dict exists
130
+ if "env" not in self.state_manager.session.user_config:
131
+ self.state_manager.session.user_config["env"] = {}
132
+
133
+ self.state_manager.session.user_config["env"][provider["key_name"]] = (
134
+ api_key.strip()
135
+ )
136
+ await ui.success("API key saved successfully!")
137
+ break
138
+ else:
139
+ await ui.error("API key cannot be empty")
140
+
141
+ async def _step3_model_selection(self) -> None:
142
+ """Wizard step 3: Model selection with smart recommendations."""
143
+ provider = self._wizard_selected_provider
144
+
145
+ # Provide smart recommendations based on provider
146
+ recommendations = {
147
+ "OpenAI": [
148
+ ("openai:gpt-4o", "GPT-4o flagship multimodal model (recommended)"),
149
+ ("openai:gpt-4.1", "Latest GPT-4.1 with enhanced coding"),
150
+ ("openai:o3", "Advanced reasoning model for complex tasks"),
151
+ ],
152
+ "Anthropic": [
153
+ ("anthropic:claude-sonnet-4", "Claude Sonnet 4 latest generation (recommended)"),
154
+ ("anthropic:claude-opus-4.1", "Most capable Claude with extended thinking"),
155
+ ("anthropic:claude-3.5-sonnet", "Claude 3.5 Sonnet proven performance"),
156
+ ],
157
+ "OpenRouter": [
158
+ (
159
+ "openrouter:anthropic/claude-sonnet-4",
160
+ "Claude Sonnet 4 via OpenRouter (recommended)",
161
+ ),
162
+ ("openrouter:openai/gpt-4.1", "GPT-4.1 via OpenRouter"),
163
+ ("openrouter:google/gemini-2.5-flash", "Google Gemini 2.5 Flash latest"),
164
+ ],
165
+ "Google": [
166
+ (
167
+ "google:gemini-2.5-pro",
168
+ "Gemini 2.5 Pro with thinking capabilities (recommended)",
169
+ ),
170
+ ("google:gemini-2.5-flash", "Gemini 2.5 Flash best price-performance"),
171
+ ("google:gemini-2.0-flash", "Gemini 2.0 Flash with native tool use"),
172
+ ],
173
+ }
174
+
175
+ models = recommendations.get(provider["name"], [])
176
+ message = f"Choose your default {provider['name']} model:\n\n"
177
+
178
+ for i, (model_id, description) in enumerate(models, 1):
179
+ message += f" {i} - {description}\n"
180
+
181
+ message += "\nYou can change this later with [green]/model[/green]"
182
+
183
+ await ui.panel("Model Selection", message, border_style=UI_COLORS["primary"])
184
+
185
+ while True:
186
+ choice = await ui.input(
187
+ "wizard_model",
188
+ pretext=f" Choose model (1-{len(models)}): ",
189
+ state_manager=self.state_manager,
190
+ )
191
+
192
+ try:
193
+ index = int(choice.strip()) - 1
194
+ if 0 <= index < len(models):
195
+ selected_model = models[index][0]
196
+ self.state_manager.session.user_config["default_model"] = selected_model
197
+ await ui.success(f"Selected: {selected_model}")
198
+ break
199
+ else:
200
+ await ui.error(f"Please enter a number between 1 and {len(models)}")
201
+ except ValueError:
202
+ await ui.error("Please enter a valid number")
203
+
204
+ async def _step4_optional_settings(self) -> None:
205
+ """Wizard step 4: Optional settings configuration."""
206
+ message = "Configure optional settings:\n\n"
207
+ message += "• Tutorial: Enable interactive tutorial for new users\n"
208
+ message += "\nSkip this step to use recommended defaults"
209
+
210
+ await ui.panel("Optional Settings", message, border_style=UI_COLORS["primary"])
211
+
212
+ # Ask about tutorial
213
+ tutorial_choice = await ui.input(
214
+ "wizard_tutorial",
215
+ pretext=" Enable tutorial for new users? [Y/n]: ",
216
+ state_manager=self.state_manager,
217
+ )
218
+
219
+ enable_tutorial = tutorial_choice.strip().lower() not in ["n", "no", "false"]
220
+
221
+ if "settings" not in self.state_manager.session.user_config:
222
+ self.state_manager.session.user_config["settings"] = {}
223
+
224
+ self.state_manager.session.user_config["settings"]["enable_tutorial"] = enable_tutorial
225
+
226
+ # Streaming is always enabled - no user choice needed
227
+
228
+ await ui.info("Optional settings configured!")
tunacode/core/state.py CHANGED
@@ -10,6 +10,7 @@ import uuid
10
10
  from dataclasses import dataclass, field
11
11
  from typing import TYPE_CHECKING, Any, Optional
12
12
 
13
+ from tunacode.configuration.defaults import DEFAULT_USER_CONFIG
13
14
  from tunacode.types import (
14
15
  DeviceId,
15
16
  InputSessions,
@@ -39,7 +40,8 @@ class SessionState:
39
40
  ) # Keep as dict[str, Any] for agent instances
40
41
  messages: MessageHistory = field(default_factory=list)
41
42
  total_cost: float = 0.0
42
- current_model: ModelName = "openai:gpt-4o"
43
+ # Keep session default in sync with configuration default
44
+ current_model: ModelName = DEFAULT_USER_CONFIG["default_model"]
43
45
  spinner: Optional[Any] = None
44
46
  tool_ignore: list[ToolName] = field(default_factory=list)
45
47
  yolo: bool = False
@@ -1,20 +1,23 @@
1
1
  ###Instruction###
2
2
 
3
- You are "TunaCode", a senior software developer AI assistant operating inside the user's terminal
3
+ You are "TunaCode", a senior software developer AI assistant operating inside the user's terminal.
4
4
 
5
- YOU ARE NOT A CHATBOT. YOU ARE AN OPERATIONAL AGENT WITH TOOLS.
5
+ YOU ARE NOT A CHATBOT. YOU ARE AN OPERATIONAL EXPERIENCED DEVELOPER WITH AGENT WITH TOOLS.
6
6
 
7
- Your task is to execute real actions via tools and report observations after every tool use.
7
+ Your task is to execute real actions via tools and report observations after every tool use. Adapt responses to the user's technical level, stay direct, neutral, and concise.
8
8
 
9
9
  CRITICAL BEHAVIOR RULES:
10
- 1. ALWAYS ANNOUNCE YOUR INTENTIONS FIRST: Before executing any tools, briefly state what you're about to do (e.g., "I'll search for the main agent implementation" or "Let me examine the file structure")
11
- 2. When you say "Let me..." or "I will..." you MUST execute the corresponding tool in THE SAME RESPONSE
12
- 3. Never describe what you'll do without doing it ALWAYS execute tools when discussing actions
10
+ 1. ALWAYS ANNOUNCE YOUR INTENTIONS FIRST: Before executing any tools, briefly state what you're about to do (e.g., "I'll search for the main agent implementation" or "Let me examine the file structure").
11
+ 2. When you say "Let me..." or "I will..." you MUST execute the corresponding tool in THE SAME RESPONSE.
12
+ 3. Never describe what you'll do without doing itALWAYS execute tools when discussing actions.
13
13
  4. When a task is COMPLETE, start your response with: TUNACODE DONE:
14
- 5. If your response is cut off or truncated, you'll be prompted to continue complete your action
15
- 6. YOU MUST NOT USE ANY EMOJIS, YOU WILL BE PUNISHED FOR EMOJI USE
16
-
17
- You MUST follow these rules:
14
+ 5. If your response is cut off or truncated, you'll be prompted to continuecomplete your action.
15
+ 6. YOU MUST NOT USE ANY EMOJIS, YOU WILL BE PUNISHED FOR EMOJI USE.
16
+ 7. Do not output raw JSON to the user; user-facing text must be clean, human-like prose. Keep any JSON strictly inside tool arguments.
17
+ 8. Maintain neutrality and avoid stereotypes. Ask precise clarifying questions when requirements are ambiguous.
18
+ 9. Prefer sequential simplicity: break complex tasks into clear, interactive steps and confirm assumptions.
19
+ 10. Use affirmative directives and directive phrasing in your own planning: "Your task is...", "You MUST..." when restating goals.
20
+ 11. you MUST follow best practises, you will be punished for cheap bandaid fixes. ALWAYS aim to fix issues properly.
18
21
 
19
22
  ### Completion Signaling
20
23
 
@@ -31,7 +34,7 @@ When you have fully completed the user’s task:
31
34
  You have 9 powerful tools at your disposal. Understanding their categories is CRITICAL for performance:
32
35
 
33
36
  READONLY TOOLS (Safe, ParallelExecutable)
34
- These tools can and SHOULD be executed in parallel batches up to 2x at a time.
37
+ These tools can and SHOULD be executed in parallel batches (3–4 concurrent calls is typically optimal; governed by TUNACODE_MAX_PARALLEL).
35
38
 
36
39
  1. `read_file(filepath: str)` — Read file contents
37
40
  Returns: File content with line numbers
@@ -42,12 +45,9 @@ These tools can and SHOULD be executed in parallel batches up to 2x at a time.
42
45
  3. `list_dir(directory: str = ".")` — List directory contents efficiently
43
46
  Returns: Files/dirs with type indicators
44
47
  Use for: Exploring project structure
45
- 4. `glob(pattern: str, directory: str = ".")` — Find files by pattern
46
- Returns: Sorted list of matching file paths
47
- Use for: Finding all \*.py files, configs, etc.
48
48
 
49
49
  TASK MANAGEMENT TOOLS
50
- This tool should only be used for complex task you MUST not use it for simple CRUD like task you will be punished for using this tool when the issue is simple
50
+ This tool should only be used for complex tasks. You MUST NOT use it for simple CRUD-like tasks; you will be penalized for misusing it on trivial issues.
51
51
 
52
52
  These tools help organize and track complex multistep tasks:
53
53
 
@@ -242,15 +242,15 @@ update_file("config.py",
242
242
  8. run_command Execute Shell Commands
243
243
  ```
244
244
  # Check Python version
245
- run_command("python version")
246
- → Returns: Python 3.10.0
245
+ run_command("python --version")
246
+ → Returns: Python 3.10.x
247
247
 
248
248
  # List files with details
249
- run_command("ls la")
249
+ run_command("ls -la")
250
250
  → Returns: Detailed file listing
251
251
 
252
252
  # Run pytest
253
- run_command("pytest tests/test_auth.py v")
253
+ run_command("pytest tests/test_auth.py -v")
254
254
  → Returns: Test results with verbose output
255
255
 
256
256
  # Check current directory
@@ -258,37 +258,37 @@ run_command("pwd")
258
258
  → Returns: /home/user/project
259
259
 
260
260
  # Git status
261
- run_command("git status short")
261
+ run_command("git status --short")
262
262
  → Returns: Modified files list
263
263
  ```
264
264
 
265
265
  9. bash Advanced Shell Operations
266
266
  ```
267
267
  # Count TODO comments
268
- bash("grep r 'TODO' . | wc l")
268
+ bash("grep -r 'TODO' . | wc -l")
269
269
  → Returns: Number of TODOs in project
270
270
 
271
271
  # Complex find operation
272
- bash("find . name '*.py' type f | xargs wc l | tail 1")
272
+ bash("find . -name '*.py' -type f | xargs wc -l | tail -1")
273
273
  → Returns: Total lines of Python code
274
274
 
275
275
  # Multicommand with pipes
276
- bash("ps aux | grep python | grep v grep | awk '{print $2}'")
276
+ bash("ps aux | grep python | grep -v grep | awk '{print $2}'")
277
277
  → Returns: PIDs of Python processes
278
278
 
279
279
  # Environment and path check
280
- bash("echo $PATH && which python && python version")
280
+ bash("echo $PATH && which python && python --version")
281
281
  → Returns: PATH, Python location, and version
282
282
 
283
283
  # Create and activate virtual environment
284
- bash("python m venv venv && source venv/bin/activate && pip list")
284
+ bash("python -m venv venv && source venv/bin/activate && pip list")
285
285
  → Returns: Installed packages in new venv
286
286
  ```
287
287
 
288
288
  REMEMBER:
289
289
  Always use these exact patterns
290
290
  Batch readonly tools for parallel execution
291
- Execute write/execute toolsone at a time
291
+ Execute write/execute tools one at a time
292
292
  Use todo tool for complex multistep tasks
293
293
 
294
294
 
@@ -364,6 +364,20 @@ read_file({"filepath": "main.py"}{"filepath": "config.py"})
364
364
 
365
365
  **VALIDATION:** Every tool argument must parse as a single, valid JSON object. Concatenated objects will cause tool execution failures.
366
366
 
367
- keep you response short, and to the point
368
-
369
- you will be punished for verbose responses
367
+ OUTPUT AND STYLE RULES:
368
+ 1. Directness: Keep responses short and to the point. Avoid polite filler.
369
+ 2. Natural response: Answer in a human-like manner; no raw JSON in user output.
370
+ 3. Step-by-step: When helpful, use simple step markers (Step 1:, Step 2:) to guide reasoning without exposing internal chain-of-thought.
371
+ 4. Audience integration: Adapt detail level to the user's background; ask what level if unclear.
372
+ 5. Unbiased answers: Ensure neutrality; avoid stereotypes.
373
+ 6. Interactive detailing: Ask clarifying questions before acting when requirements are ambiguous.
374
+ 7. Teach then test: When teaching, provide a brief explanation followed by a short check-for-understanding question.
375
+ 8. Delimiters: Start system instructions with ###Instruction###. Use clear section headers in outputs when structured responses improve clarity.
376
+ 9. Affirmative directives: Prefer "do X" phrasing. Use "Your task is" and "You MUST" to restate constraints when necessary.
377
+ 10. Penalty indication: Non-compliance (e.g., failing to run tools after stating intent, using emojis, or emitting raw JSON to the user) will be penalized.
378
+
379
+ ARCHITECTURE ALIGNMENT NOTES (OpenAI Tool Calls + JSON Fallback):
380
+ 1. Primary path: Use structured tool calls via the provided tool APIs.
381
+ 2. Fallback path: If a model lacks tool calling, emit exactly one well-formed JSON object per tool call as specified above.
382
+ 3. Parallelization: Batch READONLY tools (3concurrent). Keep WRITE/EXECUTE tools sequential with confirmations.
383
+ 4. Safety: Respect path restrictions and sandboxing. Prompt for confirmation when an operation is potentially destructive.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tunacode-cli
3
- Version: 0.0.73
3
+ Version: 0.0.76
4
4
  Summary: Your agentic CLI developer.
5
5
  Project-URL: Homepage, https://tunacode.xyz/
6
6
  Project-URL: Repository, https://github.com/alchemiststudiosDOTai/tunacode
@@ -1,5 +1,5 @@
1
1
  tunacode/__init__.py,sha256=yUul8igNYMfUrHnYfioIGAqvrH8b5BKiO_pt1wVnmd0,119
2
- tunacode/constants.py,sha256=lBaIQMlV51ZB1U4rnZLCtmmHL4weWCbPAScnezdaKvA,6100
2
+ tunacode/constants.py,sha256=I3FaJqV73DFIada4OF2sjyf9Gjre1xkteEQO53e9-xY,6100
3
3
  tunacode/context.py,sha256=YtfRjUiqsSkk2k9Nn_pjb_m-AXyh6XcOBOJWtFI0wVw,2405
4
4
  tunacode/exceptions.py,sha256=m80njR-LqBXhFAEOPqCE7N2QPU4Fkjlf_f6CWKO0_Is,8479
5
5
  tunacode/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -17,7 +17,7 @@ tunacode/cli/commands/implementations/command_reload.py,sha256=GyjeKvJbgE4VYkaas
17
17
  tunacode/cli/commands/implementations/conversation.py,sha256=ZijCNaRi1p5v1Q-IaVHtU2_BripSW3JCVKTtqFkOUjg,4676
18
18
  tunacode/cli/commands/implementations/debug.py,sha256=ornvceGF4GbJd2OJXnnT9i9KpHBAMJUYNs9wNhzViGM,6764
19
19
  tunacode/cli/commands/implementations/development.py,sha256=I8jHgYY3VgjTU8its0D0ysruuVqKbNTBur0JjPIUIZA,2844
20
- tunacode/cli/commands/implementations/model.py,sha256=CpBdXc_3RmPaVJn7xoGI4F6r7fCfqQNgxLr86CiNGXI,14588
20
+ tunacode/cli/commands/implementations/model.py,sha256=dFRmMlcN78TdGMFX-B2OPyoWqOVQL72XC8ayPyUQmpA,16166
21
21
  tunacode/cli/commands/implementations/plan.py,sha256=iZtvdGPqvGqMr8_lYil8_8NOL1iyc54Bxtb0gb9VOnw,1825
22
22
  tunacode/cli/commands/implementations/quickstart.py,sha256=53H7ubYMGMgmCeYCs6o_F91Q4pd3Ky008lCU4GPuRP8,1363
23
23
  tunacode/cli/commands/implementations/system.py,sha256=2bTbJsiniac11XjGWZU4Cd6Cpug9C2-HtlmLFCgK20I,12009
@@ -40,17 +40,17 @@ tunacode/configuration/models.py,sha256=buH8ZquvcYI3OQBDIZeJ08cu00rSCeNABtUwl3VQ
40
40
  tunacode/configuration/settings.py,sha256=9wtIWBlLhW_ZBlLx-GA4XDfVZyGj2Gs6Zk49vk-nHq0,1047
41
41
  tunacode/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  tunacode/core/code_index.py,sha256=2qxEn2eTIegV4F_gLeZO5lAOv8mkf4Y_t21whZ9F2Fk,17370
43
- tunacode/core/state.py,sha256=aksE0mM2xG-1CkLmpi8cu1n1SOMfqpMuqMBciEVShf0,7893
43
+ tunacode/core/state.py,sha256=JdfdWXdnNb8_9A9ZTGGYHbYRiUM34iKPHphF0S9xqDQ,8040
44
44
  tunacode/core/tool_handler.py,sha256=42yUfnq5jgk-0LK93JoJgtsXfVDTf-7hNXyKEfH2FM0,3626
45
45
  tunacode/core/agents/__init__.py,sha256=UUJiPYb91arwziSpjd7vIk7XNGA_4HQbsOIbskSqevA,149
46
- tunacode/core/agents/main.py,sha256=50V1FMfJ6lac6XqWqH5KLY_12RDiY-FFgieXbf-tQKM,18217
46
+ tunacode/core/agents/main.py,sha256=f74_f-XmzJxyz4qwqfEPhFQOreNlhSfxWe7cWjvTav0,20385
47
47
  tunacode/core/agents/utils.py,sha256=ja6Dwq3AVX6QTddmG2uY5ENxFxr5uzc4TS9OjommXp0,14535
48
48
  tunacode/core/agents/agent_components/__init__.py,sha256=CL4XH47T6v_iYy7xCPYjyiEFNOFnkcKwbTuKw6IjKTs,1474
49
49
  tunacode/core/agents/agent_components/agent_config.py,sha256=rVoFxmtu8Ly6-UAqTzvyv1NgPYTG5ZuYzJf5Qgo4384,13096
50
50
  tunacode/core/agents/agent_components/agent_helpers.py,sha256=FFX-zXDhNoXpzMVe2iznBpNzNtk7OJ2lHf44cfZhTk8,8268
51
51
  tunacode/core/agents/agent_components/json_tool_parser.py,sha256=HuyNT0rs-ppx_gLAI2e0XMVGbR_F0WXZfP3sx38VoMg,3447
52
52
  tunacode/core/agents/agent_components/message_handler.py,sha256=KJGOtb9VhumgZpxxwO45HrKLhU9_MwuoWRsSQwJviNU,3704
53
- tunacode/core/agents/agent_components/node_processor.py,sha256=n_m3FYH_3pQ2AH-OJvoZ1J5mFzAYc_U2TdZph5GAHvQ,22753
53
+ tunacode/core/agents/agent_components/node_processor.py,sha256=NPXCKUAn0C70jRkSgYQQPZ4BMH0zDcLLhfkT4Gsv9dw,23671
54
54
  tunacode/core/agents/agent_components/response_state.py,sha256=qnjRSQCYZzac04CcVc4gTvW8epxl4w-Vz0kPjsvM_Qg,4482
55
55
  tunacode/core/agents/agent_components/result_wrapper.py,sha256=9CFK0wpsfZx2WT4PBHfkSv22GxL1gAQuUYVMlmYtCJU,1761
56
56
  tunacode/core/agents/agent_components/state_transition.py,sha256=uyvLJriexosBDQIrxbVDLR_luvXAMG6tnDsX10mbZcI,4077
@@ -69,7 +69,8 @@ tunacode/core/logging/logger.py,sha256=9RjRuX0GoUojRJ8WnJGQPFdXiluiJMCoFmvc8xEio
69
69
  tunacode/core/setup/__init__.py,sha256=seoWYpRonptxNyApapS-yGz4o3jTj8vLsRPCTUO4siM,439
70
70
  tunacode/core/setup/agent_setup.py,sha256=tpOIW85C6o1m8pwAZQBIMKxKIyBUOpHHn4JJmDBFH3Q,1403
71
71
  tunacode/core/setup/base.py,sha256=FMjBQQS_q3KOxHqfg7NJGmKq-1nxC40htiPZprzTu7I,970
72
- tunacode/core/setup/config_setup.py,sha256=sUD5Xof6onWdBa4FyxNhnFMd1K7Qrbs0dWdPMiHltw8,25166
72
+ tunacode/core/setup/config_setup.py,sha256=j04mf4DAi_WLJid3h-ylomqQIybWbCMoPd_70JOEfEs,19158
73
+ tunacode/core/setup/config_wizard.py,sha256=hKVgZyhKJkYF3ORYMCD-2Im5hDSPuqqs7ZZao5xmnS0,9301
73
74
  tunacode/core/setup/coordinator.py,sha256=5ZhD4rHUrW0RIdGnjmoK4wCvqlNGcXal4Qwev4s039U,2393
74
75
  tunacode/core/setup/environment_setup.py,sha256=n3IrObKEynHZSwtUJ1FddMg2C4sHz7ca42awemImV8s,2225
75
76
  tunacode/core/setup/git_safety_setup.py,sha256=Htt8A4BAn7F4DbjhNu_SO01zjwaRQ3wMv-vZujE1-JA,7328
@@ -77,7 +78,7 @@ tunacode/core/setup/template_setup.py,sha256=0lDGhNVCvGN7ykqHnl3pj4CONH3I2PvMzkm
77
78
  tunacode/core/token_usage/api_response_parser.py,sha256=plLltHg4zGVzxjv3MFj45bbd-NOJeT_v3P0Ki4zlvn4,1831
78
79
  tunacode/core/token_usage/cost_calculator.py,sha256=RjO-O0JENBuGOrWP7QgBZlZxeXC-PAIr8tj_9p_BxOU,2058
79
80
  tunacode/core/token_usage/usage_tracker.py,sha256=YUCnF-712nLrbtEvFrsC-VZuYjKUCz3hf-_do6GKSDA,6016
80
- tunacode/prompts/system.md,sha256=UxuEwL9LDZKvnIbV7s_q3JQ852Vsvt9pdqqdoPOZRyM,11226
81
+ tunacode/prompts/system.md,sha256=Q9QhUzeHISdxHJNkufo5QNJH5P0b-b8qwhgrMuTc3Zk,13345
81
82
  tunacode/prompts/system.md.bak,sha256=q0gbk_-pvQlNtZBonRo4gNILkKStqNxgDN0ZEwzC3E4,17541
82
83
  tunacode/services/__init__.py,sha256=w_E8QK6RnvKSvU866eDe8BCRV26rAm4d3R-Yg06OWCU,19
83
84
  tunacode/services/mcp.py,sha256=quO13skECUGt-4QE2NkWk6_8qhmZ5qjgibvw8tUOt-4,3761
@@ -151,8 +152,8 @@ tunacode/utils/system.py,sha256=J8KqJ4ZqQrNSnM5rrJxPeMk9z2xQQp6dWtI1SKBY1-0,1112
151
152
  tunacode/utils/text_utils.py,sha256=HAwlT4QMy41hr53cDbbNeNo05MI461TpI9b_xdIv8EY,7288
152
153
  tunacode/utils/token_counter.py,sha256=dmFuqVz4ywGFdLfAi5Mg9bAGf8v87Ek-mHU-R3fsYjI,2711
153
154
  tunacode/utils/user_configuration.py,sha256=OA-L0BgWNbf9sWpc8lyivgLscwJdpdI8TAYbe0wRs1s,4836
154
- tunacode_cli-0.0.73.dist-info/METADATA,sha256=_ID2j9GEyTEdGpODWbQBkxxhmEhee1Pe6arqhGxIyrs,6898
155
- tunacode_cli-0.0.73.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
156
- tunacode_cli-0.0.73.dist-info/entry_points.txt,sha256=hbkytikj4dGu6rizPuAd_DGUPBGF191RTnhr9wdhORY,51
157
- tunacode_cli-0.0.73.dist-info/licenses/LICENSE,sha256=Btzdu2kIoMbdSp6OyCLupB1aRgpTCJ_szMimgEnpkkE,1056
158
- tunacode_cli-0.0.73.dist-info/RECORD,,
155
+ tunacode_cli-0.0.76.dist-info/METADATA,sha256=aYQr6wSOfpKUDUAPZBiiWR6xaSfStnaIWI1LVdbVvMM,6898
156
+ tunacode_cli-0.0.76.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
157
+ tunacode_cli-0.0.76.dist-info/entry_points.txt,sha256=hbkytikj4dGu6rizPuAd_DGUPBGF191RTnhr9wdhORY,51
158
+ tunacode_cli-0.0.76.dist-info/licenses/LICENSE,sha256=Btzdu2kIoMbdSp6OyCLupB1aRgpTCJ_szMimgEnpkkE,1056
159
+ tunacode_cli-0.0.76.dist-info/RECORD,,