shotgun-sh 0.1.14__py3-none-any.whl → 0.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shotgun-sh might be problematic. Click here for more details.
- shotgun/agents/agent_manager.py +715 -75
- shotgun/agents/common.py +80 -75
- shotgun/agents/config/constants.py +21 -10
- shotgun/agents/config/manager.py +322 -97
- shotgun/agents/config/models.py +114 -84
- shotgun/agents/config/provider.py +232 -88
- shotgun/agents/context_analyzer/__init__.py +28 -0
- shotgun/agents/context_analyzer/analyzer.py +471 -0
- shotgun/agents/context_analyzer/constants.py +9 -0
- shotgun/agents/context_analyzer/formatter.py +115 -0
- shotgun/agents/context_analyzer/models.py +212 -0
- shotgun/agents/conversation_history.py +125 -2
- shotgun/agents/conversation_manager.py +57 -19
- shotgun/agents/export.py +6 -7
- shotgun/agents/history/compaction.py +10 -5
- shotgun/agents/history/context_extraction.py +93 -6
- shotgun/agents/history/history_processors.py +129 -12
- shotgun/agents/history/token_counting/__init__.py +31 -0
- shotgun/agents/history/token_counting/anthropic.py +127 -0
- shotgun/agents/history/token_counting/base.py +78 -0
- shotgun/agents/history/token_counting/openai.py +90 -0
- shotgun/agents/history/token_counting/sentencepiece_counter.py +127 -0
- shotgun/agents/history/token_counting/tokenizer_cache.py +92 -0
- shotgun/agents/history/token_counting/utils.py +144 -0
- shotgun/agents/history/token_estimation.py +12 -12
- shotgun/agents/llm.py +62 -0
- shotgun/agents/models.py +59 -4
- shotgun/agents/plan.py +6 -7
- shotgun/agents/research.py +7 -8
- shotgun/agents/specify.py +6 -7
- shotgun/agents/tasks.py +6 -7
- shotgun/agents/tools/__init__.py +0 -2
- shotgun/agents/tools/codebase/codebase_shell.py +6 -0
- shotgun/agents/tools/codebase/directory_lister.py +6 -0
- shotgun/agents/tools/codebase/file_read.py +11 -2
- shotgun/agents/tools/codebase/query_graph.py +6 -0
- shotgun/agents/tools/codebase/retrieve_code.py +6 -0
- shotgun/agents/tools/file_management.py +82 -16
- shotgun/agents/tools/registry.py +217 -0
- shotgun/agents/tools/web_search/__init__.py +55 -16
- shotgun/agents/tools/web_search/anthropic.py +76 -51
- shotgun/agents/tools/web_search/gemini.py +50 -27
- shotgun/agents/tools/web_search/openai.py +26 -17
- shotgun/agents/tools/web_search/utils.py +2 -2
- shotgun/agents/usage_manager.py +164 -0
- shotgun/api_endpoints.py +15 -0
- shotgun/cli/clear.py +53 -0
- shotgun/cli/compact.py +186 -0
- shotgun/cli/config.py +41 -67
- shotgun/cli/context.py +111 -0
- shotgun/cli/export.py +1 -1
- shotgun/cli/feedback.py +50 -0
- shotgun/cli/models.py +3 -2
- shotgun/cli/plan.py +1 -1
- shotgun/cli/research.py +1 -1
- shotgun/cli/specify.py +1 -1
- shotgun/cli/tasks.py +1 -1
- shotgun/cli/update.py +16 -2
- shotgun/codebase/core/change_detector.py +5 -3
- shotgun/codebase/core/code_retrieval.py +4 -2
- shotgun/codebase/core/ingestor.py +57 -16
- shotgun/codebase/core/manager.py +20 -7
- shotgun/codebase/core/nl_query.py +1 -1
- shotgun/codebase/models.py +4 -4
- shotgun/exceptions.py +32 -0
- shotgun/llm_proxy/__init__.py +19 -0
- shotgun/llm_proxy/clients.py +44 -0
- shotgun/llm_proxy/constants.py +15 -0
- shotgun/logging_config.py +18 -27
- shotgun/main.py +91 -12
- shotgun/posthog_telemetry.py +81 -10
- shotgun/prompts/agents/export.j2 +18 -1
- shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
- shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
- shotgun/prompts/agents/plan.j2 +1 -1
- shotgun/prompts/agents/research.j2 +1 -1
- shotgun/prompts/agents/specify.j2 +270 -3
- shotgun/prompts/agents/state/system_state.j2 +4 -0
- shotgun/prompts/agents/tasks.j2 +1 -1
- shotgun/prompts/loader.py +2 -2
- shotgun/prompts/tools/web_search.j2 +14 -0
- shotgun/sentry_telemetry.py +27 -18
- shotgun/settings.py +238 -0
- shotgun/shotgun_web/__init__.py +19 -0
- shotgun/shotgun_web/client.py +138 -0
- shotgun/shotgun_web/constants.py +21 -0
- shotgun/shotgun_web/models.py +47 -0
- shotgun/telemetry.py +24 -36
- shotgun/tui/app.py +251 -23
- shotgun/tui/commands/__init__.py +1 -1
- shotgun/tui/components/context_indicator.py +179 -0
- shotgun/tui/components/mode_indicator.py +70 -0
- shotgun/tui/components/status_bar.py +48 -0
- shotgun/tui/containers.py +91 -0
- shotgun/tui/dependencies.py +39 -0
- shotgun/tui/protocols.py +45 -0
- shotgun/tui/screens/chat/__init__.py +5 -0
- shotgun/tui/screens/chat/chat.tcss +54 -0
- shotgun/tui/screens/chat/chat_screen.py +1234 -0
- shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
- shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
- shotgun/tui/screens/chat/help_text.py +40 -0
- shotgun/tui/screens/chat/prompt_history.py +48 -0
- shotgun/tui/screens/chat.tcss +11 -0
- shotgun/tui/screens/chat_screen/command_providers.py +226 -11
- shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
- shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
- shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
- shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
- shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
- shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
- shotgun/tui/screens/confirmation_dialog.py +151 -0
- shotgun/tui/screens/feedback.py +193 -0
- shotgun/tui/screens/github_issue.py +102 -0
- shotgun/tui/screens/model_picker.py +352 -0
- shotgun/tui/screens/onboarding.py +431 -0
- shotgun/tui/screens/pipx_migration.py +153 -0
- shotgun/tui/screens/provider_config.py +156 -39
- shotgun/tui/screens/shotgun_auth.py +295 -0
- shotgun/tui/screens/welcome.py +198 -0
- shotgun/tui/services/__init__.py +5 -0
- shotgun/tui/services/conversation_service.py +184 -0
- shotgun/tui/state/__init__.py +7 -0
- shotgun/tui/state/processing_state.py +185 -0
- shotgun/tui/utils/mode_progress.py +14 -7
- shotgun/tui/widgets/__init__.py +5 -0
- shotgun/tui/widgets/widget_coordinator.py +262 -0
- shotgun/utils/datetime_utils.py +77 -0
- shotgun/utils/env_utils.py +13 -0
- shotgun/utils/file_system_utils.py +22 -2
- shotgun/utils/marketing.py +110 -0
- shotgun/utils/update_checker.py +69 -14
- shotgun_sh-0.2.11.dist-info/METADATA +130 -0
- shotgun_sh-0.2.11.dist-info/RECORD +194 -0
- {shotgun_sh-0.1.14.dist-info → shotgun_sh-0.2.11.dist-info}/entry_points.txt +1 -0
- {shotgun_sh-0.1.14.dist-info → shotgun_sh-0.2.11.dist-info}/licenses/LICENSE +1 -1
- shotgun/agents/history/token_counting.py +0 -429
- shotgun/agents/tools/user_interaction.py +0 -37
- shotgun/tui/screens/chat.py +0 -797
- shotgun/tui/screens/chat_screen/history.py +0 -350
- shotgun_sh-0.1.14.dist-info/METADATA +0 -466
- shotgun_sh-0.1.14.dist-info/RECORD +0 -133
- {shotgun_sh-0.1.14.dist-info → shotgun_sh-0.2.11.dist-info}/WHEEL +0 -0
shotgun/agents/config/models.py
CHANGED
|
@@ -1,16 +1,13 @@
|
|
|
1
1
|
"""Pydantic models for configuration."""
|
|
2
2
|
|
|
3
|
-
from
|
|
4
|
-
from
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from enum import StrEnum
|
|
5
5
|
|
|
6
6
|
from pydantic import BaseModel, Field, PrivateAttr, SecretStr
|
|
7
|
-
from pydantic_ai.direct import model_request
|
|
8
|
-
from pydantic_ai.messages import ModelMessage, ModelResponse
|
|
9
7
|
from pydantic_ai.models import Model
|
|
10
|
-
from pydantic_ai.settings import ModelSettings
|
|
11
8
|
|
|
12
9
|
|
|
13
|
-
class ProviderType(
|
|
10
|
+
class ProviderType(StrEnum):
|
|
14
11
|
"""Provider types for AI services."""
|
|
15
12
|
|
|
16
13
|
OPENAI = "openai"
|
|
@@ -18,20 +15,44 @@ class ProviderType(str, Enum):
|
|
|
18
15
|
GOOGLE = "google"
|
|
19
16
|
|
|
20
17
|
|
|
18
|
+
class KeyProvider(StrEnum):
|
|
19
|
+
"""Authentication method for accessing AI models."""
|
|
20
|
+
|
|
21
|
+
BYOK = "byok" # Bring Your Own Key (individual provider keys)
|
|
22
|
+
SHOTGUN = "shotgun" # Shotgun Account (unified LiteLLM proxy)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ModelName(StrEnum):
|
|
26
|
+
"""Available AI model names."""
|
|
27
|
+
|
|
28
|
+
GPT_5 = "gpt-5"
|
|
29
|
+
GPT_5_MINI = "gpt-5-mini"
|
|
30
|
+
CLAUDE_OPUS_4_1 = "claude-opus-4-1"
|
|
31
|
+
CLAUDE_SONNET_4_5 = "claude-sonnet-4-5"
|
|
32
|
+
CLAUDE_HAIKU_4_5 = "claude-haiku-4-5"
|
|
33
|
+
GEMINI_2_5_PRO = "gemini-2.5-pro"
|
|
34
|
+
GEMINI_2_5_FLASH = "gemini-2.5-flash"
|
|
35
|
+
|
|
36
|
+
|
|
21
37
|
class ModelSpec(BaseModel):
|
|
22
38
|
"""Static specification for a model - just metadata."""
|
|
23
39
|
|
|
24
|
-
name:
|
|
40
|
+
name: ModelName # Model identifier
|
|
25
41
|
provider: ProviderType
|
|
26
42
|
max_input_tokens: int
|
|
27
43
|
max_output_tokens: int
|
|
44
|
+
litellm_proxy_model_name: (
|
|
45
|
+
str # LiteLLM format (e.g., "openai/gpt-5", "gemini/gemini-2-pro")
|
|
46
|
+
)
|
|
47
|
+
short_name: str # Display name for UI (e.g., "Sonnet 4.5", "GPT-5")
|
|
28
48
|
|
|
29
49
|
|
|
30
50
|
class ModelConfig(BaseModel):
|
|
31
51
|
"""A fully configured model with API key and settings."""
|
|
32
52
|
|
|
33
|
-
name:
|
|
34
|
-
provider: ProviderType
|
|
53
|
+
name: ModelName # Model identifier
|
|
54
|
+
provider: ProviderType # Actual LLM provider (openai, anthropic, google)
|
|
55
|
+
key_provider: KeyProvider # Authentication method (byok or shotgun)
|
|
35
56
|
max_input_tokens: int
|
|
36
57
|
max_output_tokens: int
|
|
37
58
|
api_key: str
|
|
@@ -47,7 +68,7 @@ class ModelConfig(BaseModel):
|
|
|
47
68
|
from .provider import get_or_create_model
|
|
48
69
|
|
|
49
70
|
self._model_instance = get_or_create_model(
|
|
50
|
-
self.provider, self.name, self.api_key
|
|
71
|
+
self.provider, self.key_provider, self.name, self.api_key
|
|
51
72
|
)
|
|
52
73
|
return self._model_instance
|
|
53
74
|
|
|
@@ -61,54 +82,64 @@ class ModelConfig(BaseModel):
|
|
|
61
82
|
}
|
|
62
83
|
return f"{provider_prefix[self.provider]}:{self.name}"
|
|
63
84
|
|
|
64
|
-
def get_model_settings(self, max_tokens: int | None = None) -> ModelSettings:
|
|
65
|
-
"""Get ModelSettings with optional token override.
|
|
66
|
-
|
|
67
|
-
This provides flexibility for specific use cases that need different
|
|
68
|
-
token limits while defaulting to maximum utilization.
|
|
69
|
-
|
|
70
|
-
Args:
|
|
71
|
-
max_tokens: Optional override for max_tokens. If None, uses max_output_tokens
|
|
72
|
-
|
|
73
|
-
Returns:
|
|
74
|
-
ModelSettings configured with specified or maximum tokens
|
|
75
|
-
"""
|
|
76
|
-
return ModelSettings(
|
|
77
|
-
max_tokens=max_tokens if max_tokens is not None else self.max_output_tokens
|
|
78
|
-
)
|
|
79
|
-
|
|
80
85
|
|
|
81
86
|
# Model specifications registry (static metadata)
|
|
82
|
-
MODEL_SPECS: dict[
|
|
83
|
-
|
|
84
|
-
name=
|
|
87
|
+
MODEL_SPECS: dict[ModelName, ModelSpec] = {
|
|
88
|
+
ModelName.GPT_5: ModelSpec(
|
|
89
|
+
name=ModelName.GPT_5,
|
|
85
90
|
provider=ProviderType.OPENAI,
|
|
86
91
|
max_input_tokens=400_000,
|
|
87
92
|
max_output_tokens=128_000,
|
|
93
|
+
litellm_proxy_model_name="openai/gpt-5",
|
|
94
|
+
short_name="GPT-5",
|
|
88
95
|
),
|
|
89
|
-
|
|
90
|
-
name=
|
|
96
|
+
ModelName.GPT_5_MINI: ModelSpec(
|
|
97
|
+
name=ModelName.GPT_5_MINI,
|
|
91
98
|
provider=ProviderType.OPENAI,
|
|
92
|
-
max_input_tokens=
|
|
93
|
-
max_output_tokens=
|
|
99
|
+
max_input_tokens=400_000,
|
|
100
|
+
max_output_tokens=128_000,
|
|
101
|
+
litellm_proxy_model_name="openai/gpt-5-mini",
|
|
102
|
+
short_name="GPT-5 Mini",
|
|
94
103
|
),
|
|
95
|
-
|
|
96
|
-
name=
|
|
104
|
+
ModelName.CLAUDE_OPUS_4_1: ModelSpec(
|
|
105
|
+
name=ModelName.CLAUDE_OPUS_4_1,
|
|
97
106
|
provider=ProviderType.ANTHROPIC,
|
|
98
107
|
max_input_tokens=200_000,
|
|
99
108
|
max_output_tokens=32_000,
|
|
109
|
+
litellm_proxy_model_name="anthropic/claude-opus-4-1",
|
|
110
|
+
short_name="Opus 4.1",
|
|
100
111
|
),
|
|
101
|
-
|
|
102
|
-
name=
|
|
112
|
+
ModelName.CLAUDE_SONNET_4_5: ModelSpec(
|
|
113
|
+
name=ModelName.CLAUDE_SONNET_4_5,
|
|
103
114
|
provider=ProviderType.ANTHROPIC,
|
|
104
115
|
max_input_tokens=200_000,
|
|
105
|
-
max_output_tokens=
|
|
116
|
+
max_output_tokens=16_000,
|
|
117
|
+
litellm_proxy_model_name="anthropic/claude-sonnet-4-5",
|
|
118
|
+
short_name="Sonnet 4.5",
|
|
119
|
+
),
|
|
120
|
+
ModelName.CLAUDE_HAIKU_4_5: ModelSpec(
|
|
121
|
+
name=ModelName.CLAUDE_HAIKU_4_5,
|
|
122
|
+
provider=ProviderType.ANTHROPIC,
|
|
123
|
+
max_input_tokens=200_000,
|
|
124
|
+
max_output_tokens=64_000,
|
|
125
|
+
litellm_proxy_model_name="anthropic/claude-haiku-4-5",
|
|
126
|
+
short_name="Haiku 4.5",
|
|
106
127
|
),
|
|
107
|
-
|
|
108
|
-
name=
|
|
128
|
+
ModelName.GEMINI_2_5_PRO: ModelSpec(
|
|
129
|
+
name=ModelName.GEMINI_2_5_PRO,
|
|
109
130
|
provider=ProviderType.GOOGLE,
|
|
110
131
|
max_input_tokens=1_000_000,
|
|
111
132
|
max_output_tokens=64_000,
|
|
133
|
+
litellm_proxy_model_name="gemini/gemini-2.5-pro",
|
|
134
|
+
short_name="Gemini 2.5 Pro",
|
|
135
|
+
),
|
|
136
|
+
ModelName.GEMINI_2_5_FLASH: ModelSpec(
|
|
137
|
+
name=ModelName.GEMINI_2_5_FLASH,
|
|
138
|
+
provider=ProviderType.GOOGLE,
|
|
139
|
+
max_input_tokens=1_000_000,
|
|
140
|
+
max_output_tokens=64_000,
|
|
141
|
+
litellm_proxy_model_name="gemini/gemini-2.5-flash",
|
|
142
|
+
short_name="Gemini 2.5 Flash",
|
|
112
143
|
),
|
|
113
144
|
}
|
|
114
145
|
|
|
@@ -131,55 +162,54 @@ class GoogleConfig(BaseModel):
|
|
|
131
162
|
api_key: SecretStr | None = None
|
|
132
163
|
|
|
133
164
|
|
|
165
|
+
class ShotgunAccountConfig(BaseModel):
|
|
166
|
+
"""Configuration for Shotgun Account (LiteLLM proxy)."""
|
|
167
|
+
|
|
168
|
+
api_key: SecretStr | None = None
|
|
169
|
+
supabase_jwt: SecretStr | None = Field(
|
|
170
|
+
default=None, description="Supabase authentication JWT"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class MarketingMessageRecord(BaseModel):
|
|
175
|
+
"""Record of when a marketing message was shown to the user."""
|
|
176
|
+
|
|
177
|
+
shown_at: datetime = Field(description="Timestamp when the message was shown")
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
class MarketingConfig(BaseModel):
|
|
181
|
+
"""Configuration for marketing messages shown to users."""
|
|
182
|
+
|
|
183
|
+
messages: dict[str, MarketingMessageRecord] = Field(
|
|
184
|
+
default_factory=dict,
|
|
185
|
+
description="Tracking which marketing messages have been shown. Key is message ID (e.g., 'github_star_v1')",
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
|
|
134
189
|
class ShotgunConfig(BaseModel):
|
|
135
190
|
"""Main configuration for Shotgun CLI."""
|
|
136
191
|
|
|
137
192
|
openai: OpenAIConfig = Field(default_factory=OpenAIConfig)
|
|
138
193
|
anthropic: AnthropicConfig = Field(default_factory=AnthropicConfig)
|
|
139
194
|
google: GoogleConfig = Field(default_factory=GoogleConfig)
|
|
140
|
-
|
|
141
|
-
|
|
195
|
+
shotgun: ShotgunAccountConfig = Field(default_factory=ShotgunAccountConfig)
|
|
196
|
+
selected_model: ModelName | None = Field(
|
|
197
|
+
default=None,
|
|
198
|
+
description="User-selected model",
|
|
199
|
+
)
|
|
200
|
+
shotgun_instance_id: str = Field(
|
|
201
|
+
description="Unique shotgun instance identifier (also used for anonymous telemetry)",
|
|
202
|
+
)
|
|
203
|
+
config_version: int = Field(default=4, description="Configuration schema version")
|
|
204
|
+
shown_welcome_screen: bool = Field(
|
|
205
|
+
default=False,
|
|
206
|
+
description="Whether the welcome screen has been shown to the user",
|
|
207
|
+
)
|
|
208
|
+
shown_onboarding_popup: datetime | None = Field(
|
|
209
|
+
default=None,
|
|
210
|
+
description="Timestamp when the onboarding popup was shown to the user (ISO8601 format)",
|
|
142
211
|
)
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
async def shotgun_model_request(
|
|
148
|
-
model_config: ModelConfig,
|
|
149
|
-
messages: list[ModelMessage],
|
|
150
|
-
max_tokens: int | None = None,
|
|
151
|
-
**kwargs: Any,
|
|
152
|
-
) -> ModelResponse:
|
|
153
|
-
"""Model request wrapper that uses full token capacity by default.
|
|
154
|
-
|
|
155
|
-
This wrapper ensures all LLM calls in Shotgun use the maximum available
|
|
156
|
-
token capacity of each model, improving response quality and completeness.
|
|
157
|
-
The most common issue this fixes is truncated summaries that were cut off
|
|
158
|
-
at default token limits (e.g., 4096 for Claude models).
|
|
159
|
-
|
|
160
|
-
Args:
|
|
161
|
-
model_config: ModelConfig instance with model settings and API key
|
|
162
|
-
messages: Messages to send to the model
|
|
163
|
-
max_tokens: Optional override for max_tokens. If None, uses model's max_output_tokens
|
|
164
|
-
**kwargs: Additional arguments passed to model_request
|
|
165
|
-
|
|
166
|
-
Returns:
|
|
167
|
-
ModelResponse from the model
|
|
168
|
-
|
|
169
|
-
Example:
|
|
170
|
-
# Uses full token capacity (e.g., 4096 for Claude, 128k for GPT-5)
|
|
171
|
-
response = await shotgun_model_request(model_config, messages)
|
|
172
|
-
|
|
173
|
-
# Override for specific use case
|
|
174
|
-
response = await shotgun_model_request(model_config, messages, max_tokens=1000)
|
|
175
|
-
"""
|
|
176
|
-
# Get properly configured ModelSettings with maximum or overridden token limit
|
|
177
|
-
model_settings = model_config.get_model_settings(max_tokens)
|
|
178
|
-
|
|
179
|
-
# Make the model request with full token utilization
|
|
180
|
-
return await model_request(
|
|
181
|
-
model=model_config.model_instance,
|
|
182
|
-
messages=messages,
|
|
183
|
-
model_settings=model_settings,
|
|
184
|
-
**kwargs,
|
|
212
|
+
marketing: MarketingConfig = Field(
|
|
213
|
+
default_factory=MarketingConfig,
|
|
214
|
+
description="Marketing messages configuration and tracking",
|
|
185
215
|
)
|