shotgun-sh 0.1.15.dev1__py3-none-any.whl → 0.2.1.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (39) hide show
  1. shotgun/agents/common.py +4 -5
  2. shotgun/agents/config/constants.py +21 -5
  3. shotgun/agents/config/manager.py +147 -39
  4. shotgun/agents/config/models.py +59 -86
  5. shotgun/agents/config/provider.py +164 -61
  6. shotgun/agents/history/compaction.py +1 -1
  7. shotgun/agents/history/history_processors.py +18 -9
  8. shotgun/agents/history/token_counting/__init__.py +31 -0
  9. shotgun/agents/history/token_counting/anthropic.py +89 -0
  10. shotgun/agents/history/token_counting/base.py +67 -0
  11. shotgun/agents/history/token_counting/openai.py +80 -0
  12. shotgun/agents/history/token_counting/sentencepiece_counter.py +119 -0
  13. shotgun/agents/history/token_counting/tokenizer_cache.py +90 -0
  14. shotgun/agents/history/token_counting/utils.py +147 -0
  15. shotgun/agents/history/token_estimation.py +12 -12
  16. shotgun/agents/llm.py +62 -0
  17. shotgun/agents/models.py +2 -2
  18. shotgun/agents/tools/web_search/__init__.py +42 -15
  19. shotgun/agents/tools/web_search/anthropic.py +46 -40
  20. shotgun/agents/tools/web_search/gemini.py +31 -20
  21. shotgun/agents/tools/web_search/openai.py +4 -4
  22. shotgun/cli/config.py +14 -55
  23. shotgun/cli/models.py +2 -2
  24. shotgun/codebase/models.py +4 -4
  25. shotgun/llm_proxy/__init__.py +16 -0
  26. shotgun/llm_proxy/clients.py +39 -0
  27. shotgun/llm_proxy/constants.py +8 -0
  28. shotgun/main.py +6 -0
  29. shotgun/posthog_telemetry.py +5 -3
  30. shotgun/tui/app.py +2 -0
  31. shotgun/tui/screens/chat_screen/command_providers.py +20 -0
  32. shotgun/tui/screens/model_picker.py +214 -0
  33. shotgun/tui/screens/provider_config.py +39 -26
  34. {shotgun_sh-0.1.15.dev1.dist-info → shotgun_sh-0.2.1.dev1.dist-info}/METADATA +2 -2
  35. {shotgun_sh-0.1.15.dev1.dist-info → shotgun_sh-0.2.1.dev1.dist-info}/RECORD +38 -27
  36. shotgun/agents/history/token_counting.py +0 -429
  37. {shotgun_sh-0.1.15.dev1.dist-info → shotgun_sh-0.2.1.dev1.dist-info}/WHEEL +0 -0
  38. {shotgun_sh-0.1.15.dev1.dist-info → shotgun_sh-0.2.1.dev1.dist-info}/entry_points.txt +0 -0
  39. {shotgun_sh-0.1.15.dev1.dist-info → shotgun_sh-0.2.1.dev1.dist-info}/licenses/LICENSE +0 -0
shotgun/agents/common.py CHANGED
@@ -18,7 +18,7 @@ from pydantic_ai.messages import (
18
18
  ModelRequest,
19
19
  )
20
20
 
21
- from shotgun.agents.config import ProviderType, get_config_manager, get_provider_model
21
+ from shotgun.agents.config import ProviderType, get_provider_model
22
22
  from shotgun.agents.models import AgentType
23
23
  from shotgun.logging_config import get_logger
24
24
  from shotgun.prompts import PromptLoader
@@ -115,14 +115,13 @@ def create_base_agent(
115
115
  """
116
116
  ensure_shotgun_directory_exists()
117
117
 
118
- # Get configured model or fall back to hardcoded default
118
+ # Get configured model or fall back to first available provider
119
119
  try:
120
120
  model_config = get_provider_model(provider)
121
- config_manager = get_config_manager()
122
- provider_name = provider or config_manager.load().default_provider
121
+ provider_name = model_config.provider
123
122
  logger.debug(
124
123
  "🤖 Creating agent with configured %s model: %s",
125
- provider_name.upper(),
124
+ provider_name.value.upper(),
126
125
  model_config.name,
127
126
  )
128
127
  # Use the Model instance directly (has API key baked in)
@@ -1,17 +1,33 @@
1
1
  """Configuration constants for Shotgun agents."""
2
2
 
3
+ from enum import StrEnum, auto
4
+
3
5
  # Field names
4
6
  API_KEY_FIELD = "api_key"
5
- DEFAULT_PROVIDER_FIELD = "default_provider"
6
7
  USER_ID_FIELD = "user_id"
7
8
  CONFIG_VERSION_FIELD = "config_version"
8
9
 
9
- # Provider names (for consistency with data dict keys)
10
- OPENAI_PROVIDER = "openai"
11
- ANTHROPIC_PROVIDER = "anthropic"
12
- GOOGLE_PROVIDER = "google"
10
+
11
+ class ConfigSection(StrEnum):
12
+ """Configuration file section names (JSON keys)."""
13
+
14
+ OPENAI = auto()
15
+ ANTHROPIC = auto()
16
+ GOOGLE = auto()
17
+ SHOTGUN = auto()
18
+
19
+
20
+ # Backwards compatibility - deprecated
21
+ OPENAI_PROVIDER = ConfigSection.OPENAI.value
22
+ ANTHROPIC_PROVIDER = ConfigSection.ANTHROPIC.value
23
+ GOOGLE_PROVIDER = ConfigSection.GOOGLE.value
24
+ SHOTGUN_PROVIDER = ConfigSection.SHOTGUN.value
13
25
 
14
26
  # Environment variable names
15
27
  OPENAI_API_KEY_ENV = "OPENAI_API_KEY"
16
28
  ANTHROPIC_API_KEY_ENV = "ANTHROPIC_API_KEY"
17
29
  GEMINI_API_KEY_ENV = "GEMINI_API_KEY"
30
+ SHOTGUN_API_KEY_ENV = "SHOTGUN_API_KEY"
31
+
32
+ # Token limits
33
+ MEDIUM_TEXT_8K_TOKENS = 8192 # Default max_tokens for web search requests
@@ -13,17 +13,26 @@ from shotgun.utils import get_shotgun_home
13
13
 
14
14
  from .constants import (
15
15
  ANTHROPIC_API_KEY_ENV,
16
- ANTHROPIC_PROVIDER,
17
16
  API_KEY_FIELD,
18
17
  GEMINI_API_KEY_ENV,
19
- GOOGLE_PROVIDER,
20
18
  OPENAI_API_KEY_ENV,
21
- OPENAI_PROVIDER,
19
+ ConfigSection,
20
+ )
21
+ from .models import (
22
+ AnthropicConfig,
23
+ GoogleConfig,
24
+ ModelName,
25
+ OpenAIConfig,
26
+ ProviderType,
27
+ ShotgunAccountConfig,
28
+ ShotgunConfig,
22
29
  )
23
- from .models import ProviderType, ShotgunConfig
24
30
 
25
31
  logger = get_logger(__name__)
26
32
 
33
+ # Type alias for provider configuration objects
34
+ ProviderConfig = OpenAIConfig | AnthropicConfig | GoogleConfig | ShotgunAccountConfig
35
+
27
36
 
28
37
  class ConfigManager:
29
38
  """Manager for Shotgun configuration."""
@@ -69,20 +78,56 @@ class ConfigManager:
69
78
  self._config = ShotgunConfig.model_validate(data)
70
79
  logger.debug("Configuration loaded successfully from %s", self.config_path)
71
80
 
72
- # Check if the default provider has a key, if not find one that does
73
- if not self.has_provider_key(self._config.default_provider):
74
- original_default = self._config.default_provider
75
- # Find first provider with a configured key
76
- for provider in ProviderType:
77
- if self.has_provider_key(provider):
81
+ # Validate selected_model if in BYOK mode (no Shotgun key)
82
+ if not self._provider_has_api_key(self._config.shotgun):
83
+ should_save = False
84
+
85
+ # If selected_model is set, verify its provider has a key
86
+ if self._config.selected_model:
87
+ from .models import MODEL_SPECS
88
+
89
+ if self._config.selected_model in MODEL_SPECS:
90
+ spec = MODEL_SPECS[self._config.selected_model]
91
+ if not self.has_provider_key(spec.provider):
92
+ logger.info(
93
+ "Selected model %s provider has no API key, finding available model",
94
+ self._config.selected_model.value,
95
+ )
96
+ self._config.selected_model = None
97
+ should_save = True
98
+ else:
78
99
  logger.info(
79
- "Default provider %s has no API key, updating to %s",
80
- original_default.value,
81
- provider.value,
100
+ "Selected model %s not found in MODEL_SPECS, resetting",
101
+ self._config.selected_model.value,
82
102
  )
83
- self._config.default_provider = provider
84
- self.save(self._config)
85
- break
103
+ self._config.selected_model = None
104
+ should_save = True
105
+
106
+ # If no selected_model or it was invalid, find first available model
107
+ if not self._config.selected_model:
108
+ for provider in ProviderType:
109
+ if self.has_provider_key(provider):
110
+ # Set to that provider's default model
111
+ from .models import MODEL_SPECS, ModelName
112
+
113
+ # Find default model for this provider
114
+ provider_models = {
115
+ ProviderType.OPENAI: ModelName.GPT_5,
116
+ ProviderType.ANTHROPIC: ModelName.CLAUDE_OPUS_4_1,
117
+ ProviderType.GOOGLE: ModelName.GEMINI_2_5_PRO,
118
+ }
119
+
120
+ if provider in provider_models:
121
+ self._config.selected_model = provider_models[provider]
122
+ logger.info(
123
+ "Set selected_model to %s (first available provider)",
124
+ self._config.selected_model.value,
125
+ )
126
+ should_save = True
127
+ break
128
+
129
+ if should_save:
130
+ self.save(self._config)
86
131
 
87
132
  return self._config
88
133
 
@@ -107,7 +152,6 @@ class ConfigManager:
107
152
  # Create a new config with generated user_id
108
153
  config = ShotgunConfig(
109
154
  user_id=str(uuid.uuid4()),
110
- config_version=1,
111
155
  )
112
156
 
113
157
  # Ensure directory exists
@@ -136,8 +180,13 @@ class ConfigManager:
136
180
  **kwargs: Configuration fields to update (only api_key supported)
137
181
  """
138
182
  config = self.load()
139
- provider_enum = self._ensure_provider_enum(provider)
140
- provider_config = self._get_provider_config(config, provider_enum)
183
+
184
+ # Get provider config and check if it's shotgun
185
+ provider_config, is_shotgun = self._get_provider_config_and_type(
186
+ config, provider
187
+ )
188
+ # For non-shotgun providers, we need the enum for default provider logic
189
+ provider_enum = None if is_shotgun else self._ensure_provider_enum(provider)
141
190
 
142
191
  # Only support api_key updates
143
192
  if API_KEY_FIELD in kwargs:
@@ -152,23 +201,47 @@ class ConfigManager:
152
201
  raise ValueError(f"Unsupported configuration fields: {unsupported_fields}")
153
202
 
154
203
  # If no other providers have keys configured and we just added one,
155
- # set this provider as the default
156
- if API_KEY_FIELD in kwargs and api_key_value is not None:
204
+ # set selected_model to that provider's default model (only for LLM providers, not shotgun)
205
+ if not is_shotgun and API_KEY_FIELD in kwargs and api_key_value is not None:
206
+ # provider_enum is guaranteed to be non-None here since is_shotgun is False
207
+ if provider_enum is None:
208
+ raise RuntimeError("Provider enum should not be None for LLM providers")
157
209
  other_providers = [p for p in ProviderType if p != provider_enum]
158
210
  has_other_keys = any(self.has_provider_key(p) for p in other_providers)
159
211
  if not has_other_keys:
160
- config.default_provider = provider_enum
212
+ # Set selected_model to this provider's default model
213
+ from .models import ModelName
214
+
215
+ provider_models = {
216
+ ProviderType.OPENAI: ModelName.GPT_5,
217
+ ProviderType.ANTHROPIC: ModelName.CLAUDE_OPUS_4_1,
218
+ ProviderType.GOOGLE: ModelName.GEMINI_2_5_PRO,
219
+ }
220
+ if provider_enum in provider_models:
221
+ config.selected_model = provider_models[provider_enum]
161
222
 
162
223
  self.save(config)
163
224
 
164
225
  def clear_provider_key(self, provider: ProviderType | str) -> None:
165
- """Remove the API key for the given provider."""
226
+ """Remove the API key for the given provider (LLM provider or shotgun)."""
166
227
  config = self.load()
167
- provider_enum = self._ensure_provider_enum(provider)
168
- provider_config = self._get_provider_config(config, provider_enum)
228
+
229
+ # Get provider config (shotgun or LLM provider)
230
+ provider_config, _ = self._get_provider_config_and_type(config, provider)
231
+
169
232
  provider_config.api_key = None
170
233
  self.save(config)
171
234
 
235
+ def update_selected_model(self, model_name: "ModelName") -> None:
236
+ """Update the selected model.
237
+
238
+ Args:
239
+ model_name: Model to select
240
+ """
241
+ config = self.load()
242
+ config.selected_model = model_name
243
+ self.save(config)
244
+
172
245
  def has_provider_key(self, provider: ProviderType | str) -> bool:
173
246
  """Check if the given provider has a non-empty API key configured.
174
247
 
@@ -195,7 +268,8 @@ class ConfigManager:
195
268
  def has_any_provider_key(self) -> bool:
196
269
  """Determine whether any provider has a configured API key."""
197
270
  config = self.load()
198
- return any(
271
+ # Check LLM provider keys (BYOK)
272
+ has_llm_key = any(
199
273
  self._provider_has_api_key(self._get_provider_config(config, provider))
200
274
  for provider in (
201
275
  ProviderType.OPENAI,
@@ -203,6 +277,9 @@ class ConfigManager:
203
277
  ProviderType.GOOGLE,
204
278
  )
205
279
  )
280
+ # Also check Shotgun Account key
281
+ has_shotgun_key = self._provider_has_api_key(config.shotgun)
282
+ return has_llm_key or has_shotgun_key
206
283
 
207
284
  def initialize(self) -> ShotgunConfig:
208
285
  """Initialize configuration with defaults and save to file.
@@ -213,7 +290,6 @@ class ConfigManager:
213
290
  # Generate unique user ID for new config
214
291
  config = ShotgunConfig(
215
292
  user_id=str(uuid.uuid4()),
216
- config_version=1,
217
293
  )
218
294
  self.save(config)
219
295
  logger.info(
@@ -225,26 +301,26 @@ class ConfigManager:
225
301
 
226
302
  def _convert_secrets_to_secretstr(self, data: dict[str, Any]) -> None:
227
303
  """Convert plain text secrets in data to SecretStr objects."""
228
- for provider in [OPENAI_PROVIDER, ANTHROPIC_PROVIDER, GOOGLE_PROVIDER]:
229
- if provider in data and isinstance(data[provider], dict):
304
+ for section in ConfigSection:
305
+ if section.value in data and isinstance(data[section.value], dict):
230
306
  if (
231
- API_KEY_FIELD in data[provider]
232
- and data[provider][API_KEY_FIELD] is not None
307
+ API_KEY_FIELD in data[section.value]
308
+ and data[section.value][API_KEY_FIELD] is not None
233
309
  ):
234
- data[provider][API_KEY_FIELD] = SecretStr(
235
- data[provider][API_KEY_FIELD]
310
+ data[section.value][API_KEY_FIELD] = SecretStr(
311
+ data[section.value][API_KEY_FIELD]
236
312
  )
237
313
 
238
314
  def _convert_secretstr_to_plain(self, data: dict[str, Any]) -> None:
239
315
  """Convert SecretStr objects in data to plain text for JSON serialization."""
240
- for provider in [OPENAI_PROVIDER, ANTHROPIC_PROVIDER, GOOGLE_PROVIDER]:
241
- if provider in data and isinstance(data[provider], dict):
316
+ for section in ConfigSection:
317
+ if section.value in data and isinstance(data[section.value], dict):
242
318
  if (
243
- API_KEY_FIELD in data[provider]
244
- and data[provider][API_KEY_FIELD] is not None
319
+ API_KEY_FIELD in data[section.value]
320
+ and data[section.value][API_KEY_FIELD] is not None
245
321
  ):
246
- if hasattr(data[provider][API_KEY_FIELD], "get_secret_value"):
247
- data[provider][API_KEY_FIELD] = data[provider][
322
+ if hasattr(data[section.value][API_KEY_FIELD], "get_secret_value"):
323
+ data[section.value][API_KEY_FIELD] = data[section.value][
248
324
  API_KEY_FIELD
249
325
  ].get_secret_value()
250
326
 
@@ -279,6 +355,38 @@ class ConfigManager:
279
355
 
280
356
  return bool(value.strip())
281
357
 
358
+ def _is_shotgun_provider(self, provider: ProviderType | str) -> bool:
359
+ """Check if provider string represents Shotgun Account.
360
+
361
+ Args:
362
+ provider: Provider type or string
363
+
364
+ Returns:
365
+ True if provider is shotgun account
366
+ """
367
+ return (
368
+ isinstance(provider, str)
369
+ and provider.lower() == ConfigSection.SHOTGUN.value
370
+ )
371
+
372
+ def _get_provider_config_and_type(
373
+ self, config: ShotgunConfig, provider: ProviderType | str
374
+ ) -> tuple[ProviderConfig, bool]:
375
+ """Get provider config, handling shotgun as special case.
376
+
377
+ Args:
378
+ config: Shotgun configuration
379
+ provider: Provider type or string
380
+
381
+ Returns:
382
+ Tuple of (provider_config, is_shotgun)
383
+ """
384
+ if self._is_shotgun_provider(provider):
385
+ return (config.shotgun, True)
386
+
387
+ provider_enum = self._ensure_provider_enum(provider)
388
+ return (self._get_provider_config(config, provider_enum), False)
389
+
282
390
  def get_user_id(self) -> str:
283
391
  """Get the user ID from configuration.
284
392
 
@@ -1,16 +1,12 @@
1
1
  """Pydantic models for configuration."""
2
2
 
3
- from enum import Enum
4
- from typing import Any
3
+ from enum import StrEnum
5
4
 
6
5
  from pydantic import BaseModel, Field, PrivateAttr, SecretStr
7
- from pydantic_ai.direct import model_request
8
- from pydantic_ai.messages import ModelMessage, ModelResponse
9
6
  from pydantic_ai.models import Model
10
- from pydantic_ai.settings import ModelSettings
11
7
 
12
8
 
13
- class ProviderType(str, Enum):
9
+ class ProviderType(StrEnum):
14
10
  """Provider types for AI services."""
15
11
 
16
12
  OPENAI = "openai"
@@ -18,20 +14,41 @@ class ProviderType(str, Enum):
18
14
  GOOGLE = "google"
19
15
 
20
16
 
17
+ class KeyProvider(StrEnum):
18
+ """Authentication method for accessing AI models."""
19
+
20
+ BYOK = "byok" # Bring Your Own Key (individual provider keys)
21
+ SHOTGUN = "shotgun" # Shotgun Account (unified LiteLLM proxy)
22
+
23
+
24
+ class ModelName(StrEnum):
25
+ """Available AI model names."""
26
+
27
+ GPT_5 = "gpt-5"
28
+ GPT_5_MINI = "gpt-5-mini"
29
+ CLAUDE_OPUS_4_1 = "claude-opus-4-1"
30
+ GEMINI_2_5_PRO = "gemini-2.5-pro"
31
+ GEMINI_2_5_FLASH = "gemini-2.5-flash"
32
+
33
+
21
34
  class ModelSpec(BaseModel):
22
35
  """Static specification for a model - just metadata."""
23
36
 
24
- name: str # Model identifier (e.g., "gpt-5", "claude-opus-4-1")
37
+ name: ModelName # Model identifier
25
38
  provider: ProviderType
26
39
  max_input_tokens: int
27
40
  max_output_tokens: int
41
+ litellm_proxy_model_name: (
42
+ str # LiteLLM format (e.g., "openai/gpt-5", "gemini/gemini-2-pro")
43
+ )
28
44
 
29
45
 
30
46
  class ModelConfig(BaseModel):
31
47
  """A fully configured model with API key and settings."""
32
48
 
33
- name: str # Model identifier (e.g., "gpt-5", "claude-opus-4-1")
34
- provider: ProviderType
49
+ name: ModelName # Model identifier
50
+ provider: ProviderType # Actual LLM provider (openai, anthropic, google)
51
+ key_provider: KeyProvider # Authentication method (byok or shotgun)
35
52
  max_input_tokens: int
36
53
  max_output_tokens: int
37
54
  api_key: str
@@ -47,7 +64,7 @@ class ModelConfig(BaseModel):
47
64
  from .provider import get_or_create_model
48
65
 
49
66
  self._model_instance = get_or_create_model(
50
- self.provider, self.name, self.api_key
67
+ self.provider, self.key_provider, self.name, self.api_key
51
68
  )
52
69
  return self._model_instance
53
70
 
@@ -61,54 +78,43 @@ class ModelConfig(BaseModel):
61
78
  }
62
79
  return f"{provider_prefix[self.provider]}:{self.name}"
63
80
 
64
- def get_model_settings(self, max_tokens: int | None = None) -> ModelSettings:
65
- """Get ModelSettings with optional token override.
66
-
67
- This provides flexibility for specific use cases that need different
68
- token limits while defaulting to maximum utilization.
69
-
70
- Args:
71
- max_tokens: Optional override for max_tokens. If None, uses max_output_tokens
72
-
73
- Returns:
74
- ModelSettings configured with specified or maximum tokens
75
- """
76
- return ModelSettings(
77
- max_tokens=max_tokens if max_tokens is not None else self.max_output_tokens
78
- )
79
-
80
81
 
81
82
  # Model specifications registry (static metadata)
82
- MODEL_SPECS: dict[str, ModelSpec] = {
83
- "gpt-5": ModelSpec(
84
- name="gpt-5",
83
+ MODEL_SPECS: dict[ModelName, ModelSpec] = {
84
+ ModelName.GPT_5: ModelSpec(
85
+ name=ModelName.GPT_5,
85
86
  provider=ProviderType.OPENAI,
86
87
  max_input_tokens=400_000,
87
88
  max_output_tokens=128_000,
89
+ litellm_proxy_model_name="openai/gpt-5",
88
90
  ),
89
- "gpt-4o": ModelSpec(
90
- name="gpt-4o",
91
+ ModelName.GPT_5_MINI: ModelSpec(
92
+ name=ModelName.GPT_5_MINI,
91
93
  provider=ProviderType.OPENAI,
92
- max_input_tokens=128_000,
93
- max_output_tokens=16_000,
94
+ max_input_tokens=400_000,
95
+ max_output_tokens=128_000,
96
+ litellm_proxy_model_name="openai/gpt-5-mini",
94
97
  ),
95
- "claude-opus-4-1": ModelSpec(
96
- name="claude-opus-4-1",
98
+ ModelName.CLAUDE_OPUS_4_1: ModelSpec(
99
+ name=ModelName.CLAUDE_OPUS_4_1,
97
100
  provider=ProviderType.ANTHROPIC,
98
101
  max_input_tokens=200_000,
99
102
  max_output_tokens=32_000,
103
+ litellm_proxy_model_name="anthropic/claude-opus-4-1",
100
104
  ),
101
- "claude-3-5-sonnet-latest": ModelSpec(
102
- name="claude-3-5-sonnet-latest",
103
- provider=ProviderType.ANTHROPIC,
104
- max_input_tokens=200_000,
105
- max_output_tokens=8_192,
105
+ ModelName.GEMINI_2_5_PRO: ModelSpec(
106
+ name=ModelName.GEMINI_2_5_PRO,
107
+ provider=ProviderType.GOOGLE,
108
+ max_input_tokens=1_000_000,
109
+ max_output_tokens=64_000,
110
+ litellm_proxy_model_name="gemini/gemini-2.5-pro",
106
111
  ),
107
- "gemini-2.5-pro": ModelSpec(
108
- name="gemini-2.5-pro",
112
+ ModelName.GEMINI_2_5_FLASH: ModelSpec(
113
+ name=ModelName.GEMINI_2_5_FLASH,
109
114
  provider=ProviderType.GOOGLE,
110
115
  max_input_tokens=1_000_000,
111
116
  max_output_tokens=64_000,
117
+ litellm_proxy_model_name="gemini/gemini-2.5-flash",
112
118
  ),
113
119
  }
114
120
 
@@ -131,55 +137,22 @@ class GoogleConfig(BaseModel):
131
137
  api_key: SecretStr | None = None
132
138
 
133
139
 
140
+ class ShotgunAccountConfig(BaseModel):
141
+ """Configuration for Shotgun Account (LiteLLM proxy)."""
142
+
143
+ api_key: SecretStr | None = None
144
+
145
+
134
146
  class ShotgunConfig(BaseModel):
135
147
  """Main configuration for Shotgun CLI."""
136
148
 
137
149
  openai: OpenAIConfig = Field(default_factory=OpenAIConfig)
138
150
  anthropic: AnthropicConfig = Field(default_factory=AnthropicConfig)
139
151
  google: GoogleConfig = Field(default_factory=GoogleConfig)
140
- default_provider: ProviderType = Field(
141
- default=ProviderType.OPENAI, description="Default AI provider to use"
152
+ shotgun: ShotgunAccountConfig = Field(default_factory=ShotgunAccountConfig)
153
+ selected_model: ModelName | None = Field(
154
+ default=None,
155
+ description="User-selected model",
142
156
  )
143
157
  user_id: str = Field(description="Unique anonymous user identifier")
144
- config_version: int = Field(default=1, description="Configuration schema version")
145
-
146
-
147
- async def shotgun_model_request(
148
- model_config: ModelConfig,
149
- messages: list[ModelMessage],
150
- max_tokens: int | None = None,
151
- **kwargs: Any,
152
- ) -> ModelResponse:
153
- """Model request wrapper that uses full token capacity by default.
154
-
155
- This wrapper ensures all LLM calls in Shotgun use the maximum available
156
- token capacity of each model, improving response quality and completeness.
157
- The most common issue this fixes is truncated summaries that were cut off
158
- at default token limits (e.g., 4096 for Claude models).
159
-
160
- Args:
161
- model_config: ModelConfig instance with model settings and API key
162
- messages: Messages to send to the model
163
- max_tokens: Optional override for max_tokens. If None, uses model's max_output_tokens
164
- **kwargs: Additional arguments passed to model_request
165
-
166
- Returns:
167
- ModelResponse from the model
168
-
169
- Example:
170
- # Uses full token capacity (e.g., 4096 for Claude, 128k for GPT-5)
171
- response = await shotgun_model_request(model_config, messages)
172
-
173
- # Override for specific use case
174
- response = await shotgun_model_request(model_config, messages, max_tokens=1000)
175
- """
176
- # Get properly configured ModelSettings with maximum or overridden token limit
177
- model_settings = model_config.get_model_settings(max_tokens)
178
-
179
- # Make the model request with full token utilization
180
- return await model_request(
181
- model=model_config.model_instance,
182
- messages=messages,
183
- model_settings=model_settings,
184
- **kwargs,
185
- )
158
+ config_version: int = Field(default=2, description="Configuration schema version")