shotgun-sh 0.1.9__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (150) hide show
  1. shotgun/agents/agent_manager.py +761 -52
  2. shotgun/agents/common.py +80 -75
  3. shotgun/agents/config/constants.py +21 -10
  4. shotgun/agents/config/manager.py +322 -97
  5. shotgun/agents/config/models.py +114 -84
  6. shotgun/agents/config/provider.py +232 -88
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +471 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +57 -19
  14. shotgun/agents/export.py +6 -7
  15. shotgun/agents/history/compaction.py +23 -3
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +179 -11
  18. shotgun/agents/history/token_counting/__init__.py +31 -0
  19. shotgun/agents/history/token_counting/anthropic.py +127 -0
  20. shotgun/agents/history/token_counting/base.py +78 -0
  21. shotgun/agents/history/token_counting/openai.py +90 -0
  22. shotgun/agents/history/token_counting/sentencepiece_counter.py +127 -0
  23. shotgun/agents/history/token_counting/tokenizer_cache.py +92 -0
  24. shotgun/agents/history/token_counting/utils.py +144 -0
  25. shotgun/agents/history/token_estimation.py +12 -12
  26. shotgun/agents/llm.py +62 -0
  27. shotgun/agents/models.py +59 -4
  28. shotgun/agents/plan.py +6 -7
  29. shotgun/agents/research.py +7 -8
  30. shotgun/agents/specify.py +6 -7
  31. shotgun/agents/tasks.py +6 -7
  32. shotgun/agents/tools/__init__.py +0 -2
  33. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  34. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  35. shotgun/agents/tools/codebase/file_read.py +11 -2
  36. shotgun/agents/tools/codebase/query_graph.py +6 -0
  37. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  38. shotgun/agents/tools/file_management.py +82 -16
  39. shotgun/agents/tools/registry.py +217 -0
  40. shotgun/agents/tools/web_search/__init__.py +55 -16
  41. shotgun/agents/tools/web_search/anthropic.py +76 -51
  42. shotgun/agents/tools/web_search/gemini.py +50 -27
  43. shotgun/agents/tools/web_search/openai.py +26 -17
  44. shotgun/agents/tools/web_search/utils.py +2 -2
  45. shotgun/agents/usage_manager.py +164 -0
  46. shotgun/api_endpoints.py +15 -0
  47. shotgun/cli/clear.py +53 -0
  48. shotgun/cli/codebase/commands.py +71 -2
  49. shotgun/cli/compact.py +186 -0
  50. shotgun/cli/config.py +41 -67
  51. shotgun/cli/context.py +111 -0
  52. shotgun/cli/export.py +1 -1
  53. shotgun/cli/feedback.py +50 -0
  54. shotgun/cli/models.py +3 -2
  55. shotgun/cli/plan.py +1 -1
  56. shotgun/cli/research.py +1 -1
  57. shotgun/cli/specify.py +1 -1
  58. shotgun/cli/tasks.py +1 -1
  59. shotgun/cli/update.py +18 -5
  60. shotgun/codebase/core/change_detector.py +5 -3
  61. shotgun/codebase/core/code_retrieval.py +4 -2
  62. shotgun/codebase/core/ingestor.py +169 -19
  63. shotgun/codebase/core/manager.py +177 -13
  64. shotgun/codebase/core/nl_query.py +1 -1
  65. shotgun/codebase/models.py +28 -3
  66. shotgun/codebase/service.py +14 -2
  67. shotgun/exceptions.py +32 -0
  68. shotgun/llm_proxy/__init__.py +19 -0
  69. shotgun/llm_proxy/clients.py +44 -0
  70. shotgun/llm_proxy/constants.py +15 -0
  71. shotgun/logging_config.py +18 -27
  72. shotgun/main.py +91 -4
  73. shotgun/posthog_telemetry.py +87 -40
  74. shotgun/prompts/agents/export.j2 +18 -1
  75. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  76. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  77. shotgun/prompts/agents/plan.j2 +1 -1
  78. shotgun/prompts/agents/research.j2 +1 -1
  79. shotgun/prompts/agents/specify.j2 +270 -3
  80. shotgun/prompts/agents/state/system_state.j2 +4 -0
  81. shotgun/prompts/agents/tasks.j2 +1 -1
  82. shotgun/prompts/codebase/partials/cypher_rules.j2 +13 -0
  83. shotgun/prompts/loader.py +2 -2
  84. shotgun/prompts/tools/web_search.j2 +14 -0
  85. shotgun/sdk/codebase.py +60 -2
  86. shotgun/sentry_telemetry.py +28 -21
  87. shotgun/settings.py +238 -0
  88. shotgun/shotgun_web/__init__.py +19 -0
  89. shotgun/shotgun_web/client.py +138 -0
  90. shotgun/shotgun_web/constants.py +21 -0
  91. shotgun/shotgun_web/models.py +47 -0
  92. shotgun/telemetry.py +24 -36
  93. shotgun/tui/app.py +275 -23
  94. shotgun/tui/commands/__init__.py +1 -1
  95. shotgun/tui/components/context_indicator.py +179 -0
  96. shotgun/tui/components/mode_indicator.py +70 -0
  97. shotgun/tui/components/status_bar.py +48 -0
  98. shotgun/tui/components/vertical_tail.py +6 -0
  99. shotgun/tui/containers.py +91 -0
  100. shotgun/tui/dependencies.py +39 -0
  101. shotgun/tui/filtered_codebase_service.py +46 -0
  102. shotgun/tui/protocols.py +45 -0
  103. shotgun/tui/screens/chat/__init__.py +5 -0
  104. shotgun/tui/screens/chat/chat.tcss +54 -0
  105. shotgun/tui/screens/chat/chat_screen.py +1234 -0
  106. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  107. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  108. shotgun/tui/screens/chat/help_text.py +40 -0
  109. shotgun/tui/screens/chat/prompt_history.py +48 -0
  110. shotgun/tui/screens/chat.tcss +11 -0
  111. shotgun/tui/screens/chat_screen/command_providers.py +226 -11
  112. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  113. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  114. shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
  115. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  116. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  117. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  118. shotgun/tui/screens/confirmation_dialog.py +151 -0
  119. shotgun/tui/screens/feedback.py +193 -0
  120. shotgun/tui/screens/github_issue.py +102 -0
  121. shotgun/tui/screens/model_picker.py +352 -0
  122. shotgun/tui/screens/onboarding.py +431 -0
  123. shotgun/tui/screens/pipx_migration.py +153 -0
  124. shotgun/tui/screens/provider_config.py +156 -39
  125. shotgun/tui/screens/shotgun_auth.py +295 -0
  126. shotgun/tui/screens/welcome.py +198 -0
  127. shotgun/tui/services/__init__.py +5 -0
  128. shotgun/tui/services/conversation_service.py +184 -0
  129. shotgun/tui/state/__init__.py +7 -0
  130. shotgun/tui/state/processing_state.py +185 -0
  131. shotgun/tui/utils/mode_progress.py +14 -7
  132. shotgun/tui/widgets/__init__.py +5 -0
  133. shotgun/tui/widgets/widget_coordinator.py +262 -0
  134. shotgun/utils/datetime_utils.py +77 -0
  135. shotgun/utils/env_utils.py +13 -0
  136. shotgun/utils/file_system_utils.py +22 -2
  137. shotgun/utils/marketing.py +110 -0
  138. shotgun/utils/source_detection.py +16 -0
  139. shotgun/utils/update_checker.py +73 -21
  140. shotgun_sh-0.2.11.dist-info/METADATA +130 -0
  141. shotgun_sh-0.2.11.dist-info/RECORD +194 -0
  142. {shotgun_sh-0.1.9.dist-info → shotgun_sh-0.2.11.dist-info}/entry_points.txt +1 -0
  143. {shotgun_sh-0.1.9.dist-info → shotgun_sh-0.2.11.dist-info}/licenses/LICENSE +1 -1
  144. shotgun/agents/history/token_counting.py +0 -429
  145. shotgun/agents/tools/user_interaction.py +0 -37
  146. shotgun/tui/screens/chat.py +0 -818
  147. shotgun/tui/screens/chat_screen/history.py +0 -222
  148. shotgun_sh-0.1.9.dist-info/METADATA +0 -466
  149. shotgun_sh-0.1.9.dist-info/RECORD +0 -131
  150. {shotgun_sh-0.1.9.dist-info → shotgun_sh-0.2.11.dist-info}/WHEEL +0 -0
@@ -1,16 +1,13 @@
1
1
  """Pydantic models for configuration."""
2
2
 
3
- from enum import Enum
4
- from typing import Any
3
+ from datetime import datetime
4
+ from enum import StrEnum
5
5
 
6
6
  from pydantic import BaseModel, Field, PrivateAttr, SecretStr
7
- from pydantic_ai.direct import model_request
8
- from pydantic_ai.messages import ModelMessage, ModelResponse
9
7
  from pydantic_ai.models import Model
10
- from pydantic_ai.settings import ModelSettings
11
8
 
12
9
 
13
- class ProviderType(str, Enum):
10
+ class ProviderType(StrEnum):
14
11
  """Provider types for AI services."""
15
12
 
16
13
  OPENAI = "openai"
@@ -18,20 +15,44 @@ class ProviderType(str, Enum):
18
15
  GOOGLE = "google"
19
16
 
20
17
 
18
+ class KeyProvider(StrEnum):
19
+ """Authentication method for accessing AI models."""
20
+
21
+ BYOK = "byok" # Bring Your Own Key (individual provider keys)
22
+ SHOTGUN = "shotgun" # Shotgun Account (unified LiteLLM proxy)
23
+
24
+
25
+ class ModelName(StrEnum):
26
+ """Available AI model names."""
27
+
28
+ GPT_5 = "gpt-5"
29
+ GPT_5_MINI = "gpt-5-mini"
30
+ CLAUDE_OPUS_4_1 = "claude-opus-4-1"
31
+ CLAUDE_SONNET_4_5 = "claude-sonnet-4-5"
32
+ CLAUDE_HAIKU_4_5 = "claude-haiku-4-5"
33
+ GEMINI_2_5_PRO = "gemini-2.5-pro"
34
+ GEMINI_2_5_FLASH = "gemini-2.5-flash"
35
+
36
+
21
37
  class ModelSpec(BaseModel):
22
38
  """Static specification for a model - just metadata."""
23
39
 
24
- name: str # Model identifier (e.g., "gpt-5", "claude-opus-4-1")
40
+ name: ModelName # Model identifier
25
41
  provider: ProviderType
26
42
  max_input_tokens: int
27
43
  max_output_tokens: int
44
+ litellm_proxy_model_name: (
45
+ str # LiteLLM format (e.g., "openai/gpt-5", "gemini/gemini-2-pro")
46
+ )
47
+ short_name: str # Display name for UI (e.g., "Sonnet 4.5", "GPT-5")
28
48
 
29
49
 
30
50
  class ModelConfig(BaseModel):
31
51
  """A fully configured model with API key and settings."""
32
52
 
33
- name: str # Model identifier (e.g., "gpt-5", "claude-opus-4-1")
34
- provider: ProviderType
53
+ name: ModelName # Model identifier
54
+ provider: ProviderType # Actual LLM provider (openai, anthropic, google)
55
+ key_provider: KeyProvider # Authentication method (byok or shotgun)
35
56
  max_input_tokens: int
36
57
  max_output_tokens: int
37
58
  api_key: str
@@ -47,7 +68,7 @@ class ModelConfig(BaseModel):
47
68
  from .provider import get_or_create_model
48
69
 
49
70
  self._model_instance = get_or_create_model(
50
- self.provider, self.name, self.api_key
71
+ self.provider, self.key_provider, self.name, self.api_key
51
72
  )
52
73
  return self._model_instance
53
74
 
@@ -61,54 +82,64 @@ class ModelConfig(BaseModel):
61
82
  }
62
83
  return f"{provider_prefix[self.provider]}:{self.name}"
63
84
 
64
- def get_model_settings(self, max_tokens: int | None = None) -> ModelSettings:
65
- """Get ModelSettings with optional token override.
66
-
67
- This provides flexibility for specific use cases that need different
68
- token limits while defaulting to maximum utilization.
69
-
70
- Args:
71
- max_tokens: Optional override for max_tokens. If None, uses max_output_tokens
72
-
73
- Returns:
74
- ModelSettings configured with specified or maximum tokens
75
- """
76
- return ModelSettings(
77
- max_tokens=max_tokens if max_tokens is not None else self.max_output_tokens
78
- )
79
-
80
85
 
81
86
  # Model specifications registry (static metadata)
82
- MODEL_SPECS: dict[str, ModelSpec] = {
83
- "gpt-5": ModelSpec(
84
- name="gpt-5",
87
+ MODEL_SPECS: dict[ModelName, ModelSpec] = {
88
+ ModelName.GPT_5: ModelSpec(
89
+ name=ModelName.GPT_5,
85
90
  provider=ProviderType.OPENAI,
86
91
  max_input_tokens=400_000,
87
92
  max_output_tokens=128_000,
93
+ litellm_proxy_model_name="openai/gpt-5",
94
+ short_name="GPT-5",
88
95
  ),
89
- "gpt-4o": ModelSpec(
90
- name="gpt-4o",
96
+ ModelName.GPT_5_MINI: ModelSpec(
97
+ name=ModelName.GPT_5_MINI,
91
98
  provider=ProviderType.OPENAI,
92
- max_input_tokens=128_000,
93
- max_output_tokens=16_000,
99
+ max_input_tokens=400_000,
100
+ max_output_tokens=128_000,
101
+ litellm_proxy_model_name="openai/gpt-5-mini",
102
+ short_name="GPT-5 Mini",
94
103
  ),
95
- "claude-opus-4-1": ModelSpec(
96
- name="claude-opus-4-1",
104
+ ModelName.CLAUDE_OPUS_4_1: ModelSpec(
105
+ name=ModelName.CLAUDE_OPUS_4_1,
97
106
  provider=ProviderType.ANTHROPIC,
98
107
  max_input_tokens=200_000,
99
108
  max_output_tokens=32_000,
109
+ litellm_proxy_model_name="anthropic/claude-opus-4-1",
110
+ short_name="Opus 4.1",
100
111
  ),
101
- "claude-3-5-sonnet-latest": ModelSpec(
102
- name="claude-3-5-sonnet-latest",
112
+ ModelName.CLAUDE_SONNET_4_5: ModelSpec(
113
+ name=ModelName.CLAUDE_SONNET_4_5,
103
114
  provider=ProviderType.ANTHROPIC,
104
115
  max_input_tokens=200_000,
105
- max_output_tokens=8_192,
116
+ max_output_tokens=16_000,
117
+ litellm_proxy_model_name="anthropic/claude-sonnet-4-5",
118
+ short_name="Sonnet 4.5",
119
+ ),
120
+ ModelName.CLAUDE_HAIKU_4_5: ModelSpec(
121
+ name=ModelName.CLAUDE_HAIKU_4_5,
122
+ provider=ProviderType.ANTHROPIC,
123
+ max_input_tokens=200_000,
124
+ max_output_tokens=64_000,
125
+ litellm_proxy_model_name="anthropic/claude-haiku-4-5",
126
+ short_name="Haiku 4.5",
106
127
  ),
107
- "gemini-2.5-pro": ModelSpec(
108
- name="gemini-2.5-pro",
128
+ ModelName.GEMINI_2_5_PRO: ModelSpec(
129
+ name=ModelName.GEMINI_2_5_PRO,
109
130
  provider=ProviderType.GOOGLE,
110
131
  max_input_tokens=1_000_000,
111
132
  max_output_tokens=64_000,
133
+ litellm_proxy_model_name="gemini/gemini-2.5-pro",
134
+ short_name="Gemini 2.5 Pro",
135
+ ),
136
+ ModelName.GEMINI_2_5_FLASH: ModelSpec(
137
+ name=ModelName.GEMINI_2_5_FLASH,
138
+ provider=ProviderType.GOOGLE,
139
+ max_input_tokens=1_000_000,
140
+ max_output_tokens=64_000,
141
+ litellm_proxy_model_name="gemini/gemini-2.5-flash",
142
+ short_name="Gemini 2.5 Flash",
112
143
  ),
113
144
  }
114
145
 
@@ -131,55 +162,54 @@ class GoogleConfig(BaseModel):
131
162
  api_key: SecretStr | None = None
132
163
 
133
164
 
165
+ class ShotgunAccountConfig(BaseModel):
166
+ """Configuration for Shotgun Account (LiteLLM proxy)."""
167
+
168
+ api_key: SecretStr | None = None
169
+ supabase_jwt: SecretStr | None = Field(
170
+ default=None, description="Supabase authentication JWT"
171
+ )
172
+
173
+
174
+ class MarketingMessageRecord(BaseModel):
175
+ """Record of when a marketing message was shown to the user."""
176
+
177
+ shown_at: datetime = Field(description="Timestamp when the message was shown")
178
+
179
+
180
+ class MarketingConfig(BaseModel):
181
+ """Configuration for marketing messages shown to users."""
182
+
183
+ messages: dict[str, MarketingMessageRecord] = Field(
184
+ default_factory=dict,
185
+ description="Tracking which marketing messages have been shown. Key is message ID (e.g., 'github_star_v1')",
186
+ )
187
+
188
+
134
189
  class ShotgunConfig(BaseModel):
135
190
  """Main configuration for Shotgun CLI."""
136
191
 
137
192
  openai: OpenAIConfig = Field(default_factory=OpenAIConfig)
138
193
  anthropic: AnthropicConfig = Field(default_factory=AnthropicConfig)
139
194
  google: GoogleConfig = Field(default_factory=GoogleConfig)
140
- default_provider: ProviderType = Field(
141
- default=ProviderType.OPENAI, description="Default AI provider to use"
195
+ shotgun: ShotgunAccountConfig = Field(default_factory=ShotgunAccountConfig)
196
+ selected_model: ModelName | None = Field(
197
+ default=None,
198
+ description="User-selected model",
199
+ )
200
+ shotgun_instance_id: str = Field(
201
+ description="Unique shotgun instance identifier (also used for anonymous telemetry)",
202
+ )
203
+ config_version: int = Field(default=4, description="Configuration schema version")
204
+ shown_welcome_screen: bool = Field(
205
+ default=False,
206
+ description="Whether the welcome screen has been shown to the user",
207
+ )
208
+ shown_onboarding_popup: datetime | None = Field(
209
+ default=None,
210
+ description="Timestamp when the onboarding popup was shown to the user (ISO8601 format)",
142
211
  )
143
- user_id: str = Field(description="Unique anonymous user identifier")
144
- config_version: int = Field(default=1, description="Configuration schema version")
145
-
146
-
147
- async def shotgun_model_request(
148
- model_config: ModelConfig,
149
- messages: list[ModelMessage],
150
- max_tokens: int | None = None,
151
- **kwargs: Any,
152
- ) -> ModelResponse:
153
- """Model request wrapper that uses full token capacity by default.
154
-
155
- This wrapper ensures all LLM calls in Shotgun use the maximum available
156
- token capacity of each model, improving response quality and completeness.
157
- The most common issue this fixes is truncated summaries that were cut off
158
- at default token limits (e.g., 4096 for Claude models).
159
-
160
- Args:
161
- model_config: ModelConfig instance with model settings and API key
162
- messages: Messages to send to the model
163
- max_tokens: Optional override for max_tokens. If None, uses model's max_output_tokens
164
- **kwargs: Additional arguments passed to model_request
165
-
166
- Returns:
167
- ModelResponse from the model
168
-
169
- Example:
170
- # Uses full token capacity (e.g., 4096 for Claude, 128k for GPT-5)
171
- response = await shotgun_model_request(model_config, messages)
172
-
173
- # Override for specific use case
174
- response = await shotgun_model_request(model_config, messages, max_tokens=1000)
175
- """
176
- # Get properly configured ModelSettings with maximum or overridden token limit
177
- model_settings = model_config.get_model_settings(max_tokens)
178
-
179
- # Make the model request with full token utilization
180
- return await model_request(
181
- model=model_config.model_instance,
182
- messages=messages,
183
- model_settings=model_settings,
184
- **kwargs,
212
+ marketing: MarketingConfig = Field(
213
+ default_factory=MarketingConfig,
214
+ description="Marketing messages configuration and tracking",
185
215
  )