shotgun-sh 0.1.9__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (150) hide show
  1. shotgun/agents/agent_manager.py +761 -52
  2. shotgun/agents/common.py +80 -75
  3. shotgun/agents/config/constants.py +21 -10
  4. shotgun/agents/config/manager.py +322 -97
  5. shotgun/agents/config/models.py +114 -84
  6. shotgun/agents/config/provider.py +232 -88
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +471 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +57 -19
  14. shotgun/agents/export.py +6 -7
  15. shotgun/agents/history/compaction.py +23 -3
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +179 -11
  18. shotgun/agents/history/token_counting/__init__.py +31 -0
  19. shotgun/agents/history/token_counting/anthropic.py +127 -0
  20. shotgun/agents/history/token_counting/base.py +78 -0
  21. shotgun/agents/history/token_counting/openai.py +90 -0
  22. shotgun/agents/history/token_counting/sentencepiece_counter.py +127 -0
  23. shotgun/agents/history/token_counting/tokenizer_cache.py +92 -0
  24. shotgun/agents/history/token_counting/utils.py +144 -0
  25. shotgun/agents/history/token_estimation.py +12 -12
  26. shotgun/agents/llm.py +62 -0
  27. shotgun/agents/models.py +59 -4
  28. shotgun/agents/plan.py +6 -7
  29. shotgun/agents/research.py +7 -8
  30. shotgun/agents/specify.py +6 -7
  31. shotgun/agents/tasks.py +6 -7
  32. shotgun/agents/tools/__init__.py +0 -2
  33. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  34. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  35. shotgun/agents/tools/codebase/file_read.py +11 -2
  36. shotgun/agents/tools/codebase/query_graph.py +6 -0
  37. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  38. shotgun/agents/tools/file_management.py +82 -16
  39. shotgun/agents/tools/registry.py +217 -0
  40. shotgun/agents/tools/web_search/__init__.py +55 -16
  41. shotgun/agents/tools/web_search/anthropic.py +76 -51
  42. shotgun/agents/tools/web_search/gemini.py +50 -27
  43. shotgun/agents/tools/web_search/openai.py +26 -17
  44. shotgun/agents/tools/web_search/utils.py +2 -2
  45. shotgun/agents/usage_manager.py +164 -0
  46. shotgun/api_endpoints.py +15 -0
  47. shotgun/cli/clear.py +53 -0
  48. shotgun/cli/codebase/commands.py +71 -2
  49. shotgun/cli/compact.py +186 -0
  50. shotgun/cli/config.py +41 -67
  51. shotgun/cli/context.py +111 -0
  52. shotgun/cli/export.py +1 -1
  53. shotgun/cli/feedback.py +50 -0
  54. shotgun/cli/models.py +3 -2
  55. shotgun/cli/plan.py +1 -1
  56. shotgun/cli/research.py +1 -1
  57. shotgun/cli/specify.py +1 -1
  58. shotgun/cli/tasks.py +1 -1
  59. shotgun/cli/update.py +18 -5
  60. shotgun/codebase/core/change_detector.py +5 -3
  61. shotgun/codebase/core/code_retrieval.py +4 -2
  62. shotgun/codebase/core/ingestor.py +169 -19
  63. shotgun/codebase/core/manager.py +177 -13
  64. shotgun/codebase/core/nl_query.py +1 -1
  65. shotgun/codebase/models.py +28 -3
  66. shotgun/codebase/service.py +14 -2
  67. shotgun/exceptions.py +32 -0
  68. shotgun/llm_proxy/__init__.py +19 -0
  69. shotgun/llm_proxy/clients.py +44 -0
  70. shotgun/llm_proxy/constants.py +15 -0
  71. shotgun/logging_config.py +18 -27
  72. shotgun/main.py +91 -4
  73. shotgun/posthog_telemetry.py +87 -40
  74. shotgun/prompts/agents/export.j2 +18 -1
  75. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  76. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  77. shotgun/prompts/agents/plan.j2 +1 -1
  78. shotgun/prompts/agents/research.j2 +1 -1
  79. shotgun/prompts/agents/specify.j2 +270 -3
  80. shotgun/prompts/agents/state/system_state.j2 +4 -0
  81. shotgun/prompts/agents/tasks.j2 +1 -1
  82. shotgun/prompts/codebase/partials/cypher_rules.j2 +13 -0
  83. shotgun/prompts/loader.py +2 -2
  84. shotgun/prompts/tools/web_search.j2 +14 -0
  85. shotgun/sdk/codebase.py +60 -2
  86. shotgun/sentry_telemetry.py +28 -21
  87. shotgun/settings.py +238 -0
  88. shotgun/shotgun_web/__init__.py +19 -0
  89. shotgun/shotgun_web/client.py +138 -0
  90. shotgun/shotgun_web/constants.py +21 -0
  91. shotgun/shotgun_web/models.py +47 -0
  92. shotgun/telemetry.py +24 -36
  93. shotgun/tui/app.py +275 -23
  94. shotgun/tui/commands/__init__.py +1 -1
  95. shotgun/tui/components/context_indicator.py +179 -0
  96. shotgun/tui/components/mode_indicator.py +70 -0
  97. shotgun/tui/components/status_bar.py +48 -0
  98. shotgun/tui/components/vertical_tail.py +6 -0
  99. shotgun/tui/containers.py +91 -0
  100. shotgun/tui/dependencies.py +39 -0
  101. shotgun/tui/filtered_codebase_service.py +46 -0
  102. shotgun/tui/protocols.py +45 -0
  103. shotgun/tui/screens/chat/__init__.py +5 -0
  104. shotgun/tui/screens/chat/chat.tcss +54 -0
  105. shotgun/tui/screens/chat/chat_screen.py +1234 -0
  106. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  107. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  108. shotgun/tui/screens/chat/help_text.py +40 -0
  109. shotgun/tui/screens/chat/prompt_history.py +48 -0
  110. shotgun/tui/screens/chat.tcss +11 -0
  111. shotgun/tui/screens/chat_screen/command_providers.py +226 -11
  112. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  113. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  114. shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
  115. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  116. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  117. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  118. shotgun/tui/screens/confirmation_dialog.py +151 -0
  119. shotgun/tui/screens/feedback.py +193 -0
  120. shotgun/tui/screens/github_issue.py +102 -0
  121. shotgun/tui/screens/model_picker.py +352 -0
  122. shotgun/tui/screens/onboarding.py +431 -0
  123. shotgun/tui/screens/pipx_migration.py +153 -0
  124. shotgun/tui/screens/provider_config.py +156 -39
  125. shotgun/tui/screens/shotgun_auth.py +295 -0
  126. shotgun/tui/screens/welcome.py +198 -0
  127. shotgun/tui/services/__init__.py +5 -0
  128. shotgun/tui/services/conversation_service.py +184 -0
  129. shotgun/tui/state/__init__.py +7 -0
  130. shotgun/tui/state/processing_state.py +185 -0
  131. shotgun/tui/utils/mode_progress.py +14 -7
  132. shotgun/tui/widgets/__init__.py +5 -0
  133. shotgun/tui/widgets/widget_coordinator.py +262 -0
  134. shotgun/utils/datetime_utils.py +77 -0
  135. shotgun/utils/env_utils.py +13 -0
  136. shotgun/utils/file_system_utils.py +22 -2
  137. shotgun/utils/marketing.py +110 -0
  138. shotgun/utils/source_detection.py +16 -0
  139. shotgun/utils/update_checker.py +73 -21
  140. shotgun_sh-0.2.11.dist-info/METADATA +130 -0
  141. shotgun_sh-0.2.11.dist-info/RECORD +194 -0
  142. {shotgun_sh-0.1.9.dist-info → shotgun_sh-0.2.11.dist-info}/entry_points.txt +1 -0
  143. {shotgun_sh-0.1.9.dist-info → shotgun_sh-0.2.11.dist-info}/licenses/LICENSE +1 -1
  144. shotgun/agents/history/token_counting.py +0 -429
  145. shotgun/agents/tools/user_interaction.py +0 -37
  146. shotgun/tui/screens/chat.py +0 -818
  147. shotgun/tui/screens/chat_screen/history.py +0 -222
  148. shotgun_sh-0.1.9.dist-info/METADATA +0 -466
  149. shotgun_sh-0.1.9.dist-info/RECORD +0 -131
  150. {shotgun_sh-0.1.9.dist-info → shotgun_sh-0.2.11.dist-info}/WHEEL +0 -0
@@ -1,7 +1,5 @@
1
1
  """Provider management for LLM configuration."""
2
2
 
3
- import os
4
-
5
3
  from pydantic import SecretStr
6
4
  from pydantic_ai.models import Model
7
5
  from pydantic_ai.models.anthropic import AnthropicModel, AnthropicModelSettings
@@ -12,27 +10,67 @@ from pydantic_ai.providers.google import GoogleProvider
12
10
  from pydantic_ai.providers.openai import OpenAIProvider
13
11
  from pydantic_ai.settings import ModelSettings
14
12
 
13
+ from shotgun.llm_proxy import (
14
+ create_anthropic_proxy_provider,
15
+ create_litellm_provider,
16
+ )
15
17
  from shotgun.logging_config import get_logger
16
18
 
17
- from .constants import (
18
- ANTHROPIC_API_KEY_ENV,
19
- GEMINI_API_KEY_ENV,
20
- OPENAI_API_KEY_ENV,
21
- )
22
19
  from .manager import get_config_manager
23
- from .models import MODEL_SPECS, ModelConfig, ProviderType
20
+ from .models import (
21
+ MODEL_SPECS,
22
+ KeyProvider,
23
+ ModelConfig,
24
+ ModelName,
25
+ ProviderType,
26
+ ShotgunConfig,
27
+ )
24
28
 
25
29
  logger = get_logger(__name__)
26
30
 
27
31
  # Global cache for Model instances (singleton pattern)
28
- _model_cache: dict[tuple[ProviderType, str, str], Model] = {}
32
+ _model_cache: dict[tuple[ProviderType, KeyProvider, ModelName, str], Model] = {}
33
+
34
+
35
+ def get_default_model_for_provider(config: ShotgunConfig) -> ModelName:
36
+ """Get the default model based on which provider/account is configured.
29
37
 
38
+ Checks API keys in priority order and returns appropriate default model.
39
+ Treats Shotgun Account as a provider context.
30
40
 
31
- def get_or_create_model(provider: ProviderType, model_name: str, api_key: str) -> Model:
41
+ Args:
42
+ config: Shotgun configuration containing API keys
43
+
44
+ Returns:
45
+ Default ModelName for the configured provider/account
46
+ """
47
+ # Priority 1: Shotgun Account
48
+ if _get_api_key(config.shotgun.api_key):
49
+ return ModelName.GPT_5
50
+
51
+ # Priority 2: Individual provider keys
52
+ if _get_api_key(config.anthropic.api_key):
53
+ return ModelName.CLAUDE_HAIKU_4_5
54
+ if _get_api_key(config.openai.api_key):
55
+ return ModelName.GPT_5
56
+ if _get_api_key(config.google.api_key):
57
+ return ModelName.GEMINI_2_5_PRO
58
+
59
+ # Fallback: system-wide default
60
+ return ModelName.CLAUDE_HAIKU_4_5
61
+
62
+
63
+ def get_or_create_model(
64
+ provider: ProviderType,
65
+ key_provider: "KeyProvider",
66
+ model_name: ModelName,
67
+ api_key: str,
68
+ ) -> Model:
32
69
  """Get or create a singleton Model instance.
33
70
 
34
71
  Args:
35
- provider: Provider type
72
+ provider: Actual LLM provider (openai, anthropic, google)
73
+ key_provider: Authentication method (byok or shotgun)
36
74
  model_name: Name of the model
37
75
  api_key: API key for the provider
38
76
 
@@ -42,66 +80,106 @@ def get_or_create_model(provider: ProviderType, model_name: str, api_key: str) -
42
80
  Raises:
43
81
  ValueError: If provider is not supported
44
82
  """
45
- cache_key = (provider, model_name, api_key)
83
+ cache_key = (provider, key_provider, model_name, api_key)
46
84
 
47
85
  if cache_key not in _model_cache:
48
- logger.debug("Creating new %s model instance: %s", provider.value, model_name)
86
+ logger.debug(
87
+ "Creating new %s model instance via %s: %s",
88
+ provider.value,
89
+ key_provider.value,
90
+ model_name,
91
+ )
49
92
 
50
- if provider == ProviderType.OPENAI:
51
- # Get max_tokens from MODEL_SPECS to use full capacity
93
+ # Get max_tokens from MODEL_SPECS
94
+ if model_name in MODEL_SPECS:
95
+ max_tokens = MODEL_SPECS[model_name].max_output_tokens
96
+ else:
97
+ # Fallback defaults based on provider
98
+ max_tokens = {
99
+ ProviderType.OPENAI: 16_000,
100
+ ProviderType.ANTHROPIC: 32_000,
101
+ ProviderType.GOOGLE: 64_000,
102
+ }.get(provider, 16_000)
103
+
104
+ # Use LiteLLM proxy for Shotgun Account, native providers for BYOK
105
+ if key_provider == KeyProvider.SHOTGUN:
106
+ # Shotgun Account uses LiteLLM proxy with native model types where possible
52
107
  if model_name in MODEL_SPECS:
53
- max_tokens = MODEL_SPECS[model_name].max_output_tokens
108
+ litellm_model_name = MODEL_SPECS[model_name].litellm_proxy_model_name
54
109
  else:
55
- max_tokens = 16_000 # Default for GPT models
56
-
57
- openai_provider = OpenAIProvider(api_key=api_key)
58
- _model_cache[cache_key] = OpenAIChatModel(
59
- model_name,
60
- provider=openai_provider,
61
- settings=ModelSettings(max_tokens=max_tokens),
62
- )
63
- elif provider == ProviderType.ANTHROPIC:
64
- # Get max_tokens from MODEL_SPECS to use full capacity
65
- if model_name in MODEL_SPECS:
66
- max_tokens = MODEL_SPECS[model_name].max_output_tokens
110
+ # Fallback for unmapped models
111
+ litellm_model_name = f"openai/{model_name.value}"
112
+
113
+ # Use native provider types to preserve API formats and features
114
+ if provider == ProviderType.ANTHROPIC:
115
+ # Anthropic: Use native AnthropicProvider with /anthropic endpoint
116
+ # This preserves Anthropic-specific features like tool_choice
117
+ # Note: Web search for Shotgun Account uses Gemini only (not Anthropic)
118
+ # Note: Anthropic API expects model name without prefix (e.g., "claude-sonnet-4-5")
119
+ anthropic_provider = create_anthropic_proxy_provider(api_key)
120
+ _model_cache[cache_key] = AnthropicModel(
121
+ model_name.value, # Use model name without "anthropic/" prefix
122
+ provider=anthropic_provider,
123
+ settings=AnthropicModelSettings(
124
+ max_tokens=max_tokens,
125
+ timeout=600, # 10 minutes timeout for large responses
126
+ ),
127
+ )
67
128
  else:
68
- max_tokens = 32_000 # Default for Claude models
69
-
70
- anthropic_provider = AnthropicProvider(api_key=api_key)
71
- _model_cache[cache_key] = AnthropicModel(
72
- model_name,
73
- provider=anthropic_provider,
74
- settings=AnthropicModelSettings(
75
- max_tokens=max_tokens,
76
- timeout=600, # 10 minutes timeout for large responses
77
- ),
78
- )
79
- elif provider == ProviderType.GOOGLE:
80
- # Get max_tokens from MODEL_SPECS to use full capacity
81
- if model_name in MODEL_SPECS:
82
- max_tokens = MODEL_SPECS[model_name].max_output_tokens
129
+ # OpenAI and Google: Use LiteLLMProvider (OpenAI-compatible format)
130
+ # Google's GoogleProvider doesn't support base_url, so use LiteLLM
131
+ litellm_provider = create_litellm_provider(api_key)
132
+ _model_cache[cache_key] = OpenAIChatModel(
133
+ litellm_model_name,
134
+ provider=litellm_provider,
135
+ settings=ModelSettings(max_tokens=max_tokens),
136
+ )
137
+ elif key_provider == KeyProvider.BYOK:
138
+ # Use native provider implementations with user's API keys
139
+ if provider == ProviderType.OPENAI:
140
+ openai_provider = OpenAIProvider(api_key=api_key)
141
+ _model_cache[cache_key] = OpenAIChatModel(
142
+ model_name,
143
+ provider=openai_provider,
144
+ settings=ModelSettings(max_tokens=max_tokens),
145
+ )
146
+ elif provider == ProviderType.ANTHROPIC:
147
+ anthropic_provider = AnthropicProvider(api_key=api_key)
148
+ _model_cache[cache_key] = AnthropicModel(
149
+ model_name,
150
+ provider=anthropic_provider,
151
+ settings=AnthropicModelSettings(
152
+ max_tokens=max_tokens,
153
+ timeout=600, # 10 minutes timeout for large responses
154
+ ),
155
+ )
156
+ elif provider == ProviderType.GOOGLE:
157
+ google_provider = GoogleProvider(api_key=api_key)
158
+ _model_cache[cache_key] = GoogleModel(
159
+ model_name,
160
+ provider=google_provider,
161
+ settings=ModelSettings(max_tokens=max_tokens),
162
+ )
83
163
  else:
84
- max_tokens = 64_000 # Default for Gemini models
85
-
86
- google_provider = GoogleProvider(api_key=api_key)
87
- _model_cache[cache_key] = GoogleModel(
88
- model_name,
89
- provider=google_provider,
90
- settings=ModelSettings(max_tokens=max_tokens),
91
- )
164
+ raise ValueError(f"Unsupported provider: {provider}")
92
165
  else:
93
- raise ValueError(f"Unsupported provider: {provider}")
166
+ raise ValueError(f"Unsupported key provider: {key_provider}")
94
167
  else:
95
168
  logger.debug("Reusing cached %s model instance: %s", provider.value, model_name)
96
169
 
97
170
  return _model_cache[cache_key]
98
171
 
99
172
 
100
- def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
173
+ async def get_provider_model(
174
+ provider_or_model: ProviderType | ModelName | None = None,
175
+ ) -> ModelConfig:
101
176
  """Get a fully configured ModelConfig with API key and Model instance.
102
177
 
103
178
  Args:
104
- provider: Provider to get model for. If None, uses default provider
179
+ provider_or_model: Either a ProviderType, ModelName, or None.
180
+ - If ModelName: returns that specific model with appropriate API key
181
+ - If ProviderType: returns default model for that provider (backward compatible)
182
+ - If None: uses default provider with its default model
105
183
 
106
184
  Returns:
107
185
  ModelConfig with API key configured and lazy Model instance
@@ -110,77 +188,125 @@ def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
110
188
  ValueError: If provider is not configured properly or model not found
111
189
  """
112
190
  config_manager = get_config_manager()
113
- config = config_manager.load()
114
- # Convert string to ProviderType enum if needed
115
- provider_enum = (
116
- provider
117
- if isinstance(provider, ProviderType)
118
- else ProviderType(provider)
119
- if provider
120
- else config.default_provider
121
- )
191
+ # Use cached config for read-only access (performance)
192
+ config = await config_manager.load(force_reload=False)
193
+
194
+ # Priority 1: Check if Shotgun key exists - if so, use it for ANY model
195
+ shotgun_api_key = _get_api_key(config.shotgun.api_key)
196
+ if shotgun_api_key:
197
+ # Determine which model to use
198
+ if isinstance(provider_or_model, ModelName):
199
+ # Specific model requested - honor it (e.g., web search tools)
200
+ model_name = provider_or_model
201
+ else:
202
+ # No specific model requested - use selected or default
203
+ model_name = config.selected_model or ModelName.GPT_5
204
+
205
+ if model_name not in MODEL_SPECS:
206
+ raise ValueError(f"Model '{model_name.value}' not found")
207
+ spec = MODEL_SPECS[model_name]
208
+
209
+ # Use Shotgun Account with determined model (provider = actual LLM provider)
210
+ return ModelConfig(
211
+ name=spec.name,
212
+ provider=spec.provider, # Actual LLM provider (OPENAI/ANTHROPIC/GOOGLE)
213
+ key_provider=KeyProvider.SHOTGUN, # Authenticated via Shotgun Account
214
+ max_input_tokens=spec.max_input_tokens,
215
+ max_output_tokens=spec.max_output_tokens,
216
+ api_key=shotgun_api_key,
217
+ )
218
+
219
+ # Priority 2: Fall back to individual provider keys
220
+
221
+ # Check if a specific model was requested
222
+ if isinstance(provider_or_model, ModelName):
223
+ # Look up the model spec
224
+ if provider_or_model not in MODEL_SPECS:
225
+ raise ValueError(f"Model '{provider_or_model.value}' not found")
226
+ spec = MODEL_SPECS[provider_or_model]
227
+ provider_enum = spec.provider
228
+ requested_model = provider_or_model
229
+ else:
230
+ # Convert string to ProviderType enum if needed (backward compatible)
231
+ if provider_or_model:
232
+ provider_enum = (
233
+ provider_or_model
234
+ if isinstance(provider_or_model, ProviderType)
235
+ else ProviderType(provider_or_model)
236
+ )
237
+ else:
238
+ # No provider specified - find first available provider with a key
239
+ provider_enum = None
240
+ for provider in ProviderType:
241
+ if _has_provider_key(config, provider):
242
+ provider_enum = provider
243
+ break
244
+
245
+ if provider_enum is None:
246
+ raise ValueError(
247
+ "No provider keys configured. Set via environment variables or config."
248
+ )
249
+
250
+ requested_model = None # Will use provider's default model
122
251
 
123
252
  if provider_enum == ProviderType.OPENAI:
124
- api_key = _get_api_key(config.openai.api_key, OPENAI_API_KEY_ENV)
253
+ api_key = _get_api_key(config.openai.api_key)
125
254
  if not api_key:
126
- raise ValueError(
127
- f"OpenAI API key not configured. Set via environment variable {OPENAI_API_KEY_ENV} or config."
128
- )
255
+ raise ValueError("OpenAI API key not configured. Set via config.")
129
256
 
130
- # Get model spec - hardcoded to gpt-5
131
- model_name = "gpt-5"
257
+ # Use requested model or default to gpt-5
258
+ model_name = requested_model if requested_model else ModelName.GPT_5
132
259
  if model_name not in MODEL_SPECS:
133
- raise ValueError(f"Model '{model_name}' not found")
260
+ raise ValueError(f"Model '{model_name.value}' not found")
134
261
  spec = MODEL_SPECS[model_name]
135
262
 
136
263
  # Create fully configured ModelConfig
137
264
  return ModelConfig(
138
265
  name=spec.name,
139
266
  provider=spec.provider,
267
+ key_provider=KeyProvider.BYOK,
140
268
  max_input_tokens=spec.max_input_tokens,
141
269
  max_output_tokens=spec.max_output_tokens,
142
270
  api_key=api_key,
143
271
  )
144
272
 
145
273
  elif provider_enum == ProviderType.ANTHROPIC:
146
- api_key = _get_api_key(config.anthropic.api_key, ANTHROPIC_API_KEY_ENV)
274
+ api_key = _get_api_key(config.anthropic.api_key)
147
275
  if not api_key:
148
- raise ValueError(
149
- f"Anthropic API key not configured. Set via environment variable {ANTHROPIC_API_KEY_ENV} or config."
150
- )
276
+ raise ValueError("Anthropic API key not configured. Set via config.")
151
277
 
152
- # Get model spec - hardcoded to claude-opus-4-1
153
- model_name = "claude-opus-4-1"
278
+ # Use requested model or default to claude-haiku-4-5
279
+ model_name = requested_model if requested_model else ModelName.CLAUDE_HAIKU_4_5
154
280
  if model_name not in MODEL_SPECS:
155
- raise ValueError(f"Model '{model_name}' not found")
281
+ raise ValueError(f"Model '{model_name.value}' not found")
156
282
  spec = MODEL_SPECS[model_name]
157
283
 
158
284
  # Create fully configured ModelConfig
159
285
  return ModelConfig(
160
286
  name=spec.name,
161
287
  provider=spec.provider,
288
+ key_provider=KeyProvider.BYOK,
162
289
  max_input_tokens=spec.max_input_tokens,
163
290
  max_output_tokens=spec.max_output_tokens,
164
291
  api_key=api_key,
165
292
  )
166
293
 
167
294
  elif provider_enum == ProviderType.GOOGLE:
168
- api_key = _get_api_key(config.google.api_key, GEMINI_API_KEY_ENV)
295
+ api_key = _get_api_key(config.google.api_key)
169
296
  if not api_key:
170
- raise ValueError(
171
- f"Gemini API key not configured. Set via environment variable {GEMINI_API_KEY_ENV} or config."
172
- )
297
+ raise ValueError("Gemini API key not configured. Set via config.")
173
298
 
174
- # Get model spec - hardcoded to gemini-2.5-pro
175
- model_name = "gemini-2.5-pro"
299
+ # Use requested model or default to gemini-2.5-pro
300
+ model_name = requested_model if requested_model else ModelName.GEMINI_2_5_PRO
176
301
  if model_name not in MODEL_SPECS:
177
- raise ValueError(f"Model '{model_name}' not found")
302
+ raise ValueError(f"Model '{model_name.value}' not found")
178
303
  spec = MODEL_SPECS[model_name]
179
304
 
180
305
  # Create fully configured ModelConfig
181
306
  return ModelConfig(
182
307
  name=spec.name,
183
308
  provider=spec.provider,
309
+ key_provider=KeyProvider.BYOK,
184
310
  max_input_tokens=spec.max_input_tokens,
185
311
  max_output_tokens=spec.max_output_tokens,
186
312
  api_key=api_key,
@@ -190,12 +316,30 @@ def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
190
316
  raise ValueError(f"Unsupported provider: {provider_enum}")
191
317
 
192
318
 
193
- def _get_api_key(config_key: SecretStr | None, env_var: str) -> str | None:
194
- """Get API key from config or environment variable.
319
+ def _has_provider_key(config: "ShotgunConfig", provider: ProviderType) -> bool:
320
+ """Check if a provider has a configured API key.
321
+
322
+ Args:
323
+ config: Shotgun configuration
324
+ provider: Provider to check
325
+
326
+ Returns:
327
+ True if provider has a configured API key
328
+ """
329
+ if provider == ProviderType.OPENAI:
330
+ return bool(_get_api_key(config.openai.api_key))
331
+ elif provider == ProviderType.ANTHROPIC:
332
+ return bool(_get_api_key(config.anthropic.api_key))
333
+ elif provider == ProviderType.GOOGLE:
334
+ return bool(_get_api_key(config.google.api_key))
335
+ return False
336
+
337
+
338
+ def _get_api_key(config_key: SecretStr | None) -> str | None:
339
+ """Get API key from config.
195
340
 
196
341
  Args:
197
342
  config_key: API key from configuration
198
- env_var: Environment variable name to check
199
343
 
200
344
  Returns:
201
345
  API key string or None
@@ -203,4 +347,4 @@ def _get_api_key(config_key: SecretStr | None, env_var: str) -> str | None:
203
347
  if config_key is not None:
204
348
  return config_key.get_secret_value()
205
349
 
206
- return os.getenv(env_var)
350
+ return None
@@ -0,0 +1,28 @@
1
+ """Context analysis module for conversation composition statistics.
2
+
3
+ This module provides tools for analyzing conversation context usage, breaking down
4
+ token consumption by message type and tool category.
5
+ """
6
+
7
+ from .analyzer import ContextAnalyzer
8
+ from .constants import ToolCategory, get_tool_category
9
+ from .formatter import ContextFormatter
10
+ from .models import (
11
+ ContextAnalysis,
12
+ ContextAnalysisOutput,
13
+ ContextCompositionTelemetry,
14
+ MessageTypeStats,
15
+ TokenAllocation,
16
+ )
17
+
18
+ __all__ = [
19
+ "ContextAnalyzer",
20
+ "ContextAnalysis",
21
+ "ContextAnalysisOutput",
22
+ "ContextCompositionTelemetry",
23
+ "ContextFormatter",
24
+ "MessageTypeStats",
25
+ "TokenAllocation",
26
+ "ToolCategory",
27
+ "get_tool_category",
28
+ ]