shotgun-sh 0.2.3.dev2__py3-none-any.whl → 0.2.11.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (132) hide show
  1. shotgun/agents/agent_manager.py +664 -75
  2. shotgun/agents/common.py +76 -70
  3. shotgun/agents/config/constants.py +0 -6
  4. shotgun/agents/config/manager.py +78 -36
  5. shotgun/agents/config/models.py +41 -1
  6. shotgun/agents/config/provider.py +70 -15
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +471 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +57 -19
  14. shotgun/agents/export.py +6 -7
  15. shotgun/agents/history/compaction.py +9 -4
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +14 -2
  18. shotgun/agents/history/token_counting/anthropic.py +49 -11
  19. shotgun/agents/history/token_counting/base.py +14 -3
  20. shotgun/agents/history/token_counting/openai.py +8 -0
  21. shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
  22. shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
  23. shotgun/agents/history/token_counting/utils.py +0 -3
  24. shotgun/agents/models.py +50 -2
  25. shotgun/agents/plan.py +6 -7
  26. shotgun/agents/research.py +7 -8
  27. shotgun/agents/specify.py +6 -7
  28. shotgun/agents/tasks.py +6 -7
  29. shotgun/agents/tools/__init__.py +0 -2
  30. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  31. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  32. shotgun/agents/tools/codebase/file_read.py +11 -2
  33. shotgun/agents/tools/codebase/query_graph.py +6 -0
  34. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  35. shotgun/agents/tools/file_management.py +82 -16
  36. shotgun/agents/tools/registry.py +217 -0
  37. shotgun/agents/tools/web_search/__init__.py +30 -18
  38. shotgun/agents/tools/web_search/anthropic.py +26 -5
  39. shotgun/agents/tools/web_search/gemini.py +23 -11
  40. shotgun/agents/tools/web_search/openai.py +22 -13
  41. shotgun/agents/tools/web_search/utils.py +2 -2
  42. shotgun/agents/usage_manager.py +16 -11
  43. shotgun/api_endpoints.py +7 -3
  44. shotgun/build_constants.py +1 -1
  45. shotgun/cli/clear.py +53 -0
  46. shotgun/cli/compact.py +186 -0
  47. shotgun/cli/config.py +8 -5
  48. shotgun/cli/context.py +111 -0
  49. shotgun/cli/export.py +1 -1
  50. shotgun/cli/feedback.py +4 -2
  51. shotgun/cli/models.py +1 -0
  52. shotgun/cli/plan.py +1 -1
  53. shotgun/cli/research.py +1 -1
  54. shotgun/cli/specify.py +1 -1
  55. shotgun/cli/tasks.py +1 -1
  56. shotgun/cli/update.py +16 -2
  57. shotgun/codebase/core/change_detector.py +5 -3
  58. shotgun/codebase/core/code_retrieval.py +4 -2
  59. shotgun/codebase/core/ingestor.py +10 -8
  60. shotgun/codebase/core/manager.py +13 -4
  61. shotgun/codebase/core/nl_query.py +1 -1
  62. shotgun/llm_proxy/__init__.py +5 -2
  63. shotgun/llm_proxy/clients.py +12 -7
  64. shotgun/logging_config.py +18 -27
  65. shotgun/main.py +73 -11
  66. shotgun/posthog_telemetry.py +23 -7
  67. shotgun/prompts/agents/export.j2 +18 -1
  68. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  69. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  70. shotgun/prompts/agents/plan.j2 +1 -1
  71. shotgun/prompts/agents/research.j2 +1 -1
  72. shotgun/prompts/agents/specify.j2 +270 -3
  73. shotgun/prompts/agents/state/system_state.j2 +4 -0
  74. shotgun/prompts/agents/tasks.j2 +1 -1
  75. shotgun/prompts/loader.py +2 -2
  76. shotgun/prompts/tools/web_search.j2 +14 -0
  77. shotgun/sentry_telemetry.py +7 -16
  78. shotgun/settings.py +238 -0
  79. shotgun/telemetry.py +18 -33
  80. shotgun/tui/app.py +243 -43
  81. shotgun/tui/commands/__init__.py +1 -1
  82. shotgun/tui/components/context_indicator.py +179 -0
  83. shotgun/tui/components/mode_indicator.py +70 -0
  84. shotgun/tui/components/status_bar.py +48 -0
  85. shotgun/tui/containers.py +91 -0
  86. shotgun/tui/dependencies.py +39 -0
  87. shotgun/tui/protocols.py +45 -0
  88. shotgun/tui/screens/chat/__init__.py +5 -0
  89. shotgun/tui/screens/chat/chat.tcss +54 -0
  90. shotgun/tui/screens/chat/chat_screen.py +1202 -0
  91. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  92. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  93. shotgun/tui/screens/chat/help_text.py +40 -0
  94. shotgun/tui/screens/chat/prompt_history.py +48 -0
  95. shotgun/tui/screens/chat.tcss +11 -0
  96. shotgun/tui/screens/chat_screen/command_providers.py +78 -2
  97. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  98. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  99. shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
  100. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  101. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  102. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  103. shotgun/tui/screens/confirmation_dialog.py +151 -0
  104. shotgun/tui/screens/feedback.py +4 -4
  105. shotgun/tui/screens/github_issue.py +102 -0
  106. shotgun/tui/screens/model_picker.py +49 -24
  107. shotgun/tui/screens/onboarding.py +431 -0
  108. shotgun/tui/screens/pipx_migration.py +153 -0
  109. shotgun/tui/screens/provider_config.py +50 -27
  110. shotgun/tui/screens/shotgun_auth.py +2 -2
  111. shotgun/tui/screens/welcome.py +32 -10
  112. shotgun/tui/services/__init__.py +5 -0
  113. shotgun/tui/services/conversation_service.py +184 -0
  114. shotgun/tui/state/__init__.py +7 -0
  115. shotgun/tui/state/processing_state.py +185 -0
  116. shotgun/tui/utils/mode_progress.py +14 -7
  117. shotgun/tui/widgets/__init__.py +5 -0
  118. shotgun/tui/widgets/widget_coordinator.py +262 -0
  119. shotgun/utils/datetime_utils.py +77 -0
  120. shotgun/utils/file_system_utils.py +22 -2
  121. shotgun/utils/marketing.py +110 -0
  122. shotgun/utils/update_checker.py +69 -14
  123. shotgun_sh-0.2.11.dev5.dist-info/METADATA +130 -0
  124. shotgun_sh-0.2.11.dev5.dist-info/RECORD +193 -0
  125. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/entry_points.txt +1 -0
  126. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/licenses/LICENSE +1 -1
  127. shotgun/agents/tools/user_interaction.py +0 -37
  128. shotgun/tui/screens/chat.py +0 -804
  129. shotgun/tui/screens/chat_screen/history.py +0 -352
  130. shotgun_sh-0.2.3.dev2.dist-info/METADATA +0 -467
  131. shotgun_sh-0.2.3.dev2.dist-info/RECORD +0 -154
  132. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/WHEEL +0 -0
@@ -1,5 +1,6 @@
1
1
  """Pydantic models for configuration."""
2
2
 
3
+ from datetime import datetime
3
4
  from enum import StrEnum
4
5
 
5
6
  from pydantic import BaseModel, Field, PrivateAttr, SecretStr
@@ -28,6 +29,7 @@ class ModelName(StrEnum):
28
29
  GPT_5_MINI = "gpt-5-mini"
29
30
  CLAUDE_OPUS_4_1 = "claude-opus-4-1"
30
31
  CLAUDE_SONNET_4_5 = "claude-sonnet-4-5"
32
+ CLAUDE_HAIKU_4_5 = "claude-haiku-4-5"
31
33
  GEMINI_2_5_PRO = "gemini-2.5-pro"
32
34
  GEMINI_2_5_FLASH = "gemini-2.5-flash"
33
35
 
@@ -42,6 +44,7 @@ class ModelSpec(BaseModel):
42
44
  litellm_proxy_model_name: (
43
45
  str # LiteLLM format (e.g., "openai/gpt-5", "gemini/gemini-2-pro")
44
46
  )
47
+ short_name: str # Display name for UI (e.g., "Sonnet 4.5", "GPT-5")
45
48
 
46
49
 
47
50
  class ModelConfig(BaseModel):
@@ -88,6 +91,7 @@ MODEL_SPECS: dict[ModelName, ModelSpec] = {
88
91
  max_input_tokens=400_000,
89
92
  max_output_tokens=128_000,
90
93
  litellm_proxy_model_name="openai/gpt-5",
94
+ short_name="GPT-5",
91
95
  ),
92
96
  ModelName.GPT_5_MINI: ModelSpec(
93
97
  name=ModelName.GPT_5_MINI,
@@ -95,6 +99,7 @@ MODEL_SPECS: dict[ModelName, ModelSpec] = {
95
99
  max_input_tokens=400_000,
96
100
  max_output_tokens=128_000,
97
101
  litellm_proxy_model_name="openai/gpt-5-mini",
102
+ short_name="GPT-5 Mini",
98
103
  ),
99
104
  ModelName.CLAUDE_OPUS_4_1: ModelSpec(
100
105
  name=ModelName.CLAUDE_OPUS_4_1,
@@ -102,6 +107,7 @@ MODEL_SPECS: dict[ModelName, ModelSpec] = {
102
107
  max_input_tokens=200_000,
103
108
  max_output_tokens=32_000,
104
109
  litellm_proxy_model_name="anthropic/claude-opus-4-1",
110
+ short_name="Opus 4.1",
105
111
  ),
106
112
  ModelName.CLAUDE_SONNET_4_5: ModelSpec(
107
113
  name=ModelName.CLAUDE_SONNET_4_5,
@@ -109,6 +115,15 @@ MODEL_SPECS: dict[ModelName, ModelSpec] = {
109
115
  max_input_tokens=200_000,
110
116
  max_output_tokens=16_000,
111
117
  litellm_proxy_model_name="anthropic/claude-sonnet-4-5",
118
+ short_name="Sonnet 4.5",
119
+ ),
120
+ ModelName.CLAUDE_HAIKU_4_5: ModelSpec(
121
+ name=ModelName.CLAUDE_HAIKU_4_5,
122
+ provider=ProviderType.ANTHROPIC,
123
+ max_input_tokens=200_000,
124
+ max_output_tokens=64_000,
125
+ litellm_proxy_model_name="anthropic/claude-haiku-4-5",
126
+ short_name="Haiku 4.5",
112
127
  ),
113
128
  ModelName.GEMINI_2_5_PRO: ModelSpec(
114
129
  name=ModelName.GEMINI_2_5_PRO,
@@ -116,6 +131,7 @@ MODEL_SPECS: dict[ModelName, ModelSpec] = {
116
131
  max_input_tokens=1_000_000,
117
132
  max_output_tokens=64_000,
118
133
  litellm_proxy_model_name="gemini/gemini-2.5-pro",
134
+ short_name="Gemini 2.5 Pro",
119
135
  ),
120
136
  ModelName.GEMINI_2_5_FLASH: ModelSpec(
121
137
  name=ModelName.GEMINI_2_5_FLASH,
@@ -123,6 +139,7 @@ MODEL_SPECS: dict[ModelName, ModelSpec] = {
123
139
  max_input_tokens=1_000_000,
124
140
  max_output_tokens=64_000,
125
141
  litellm_proxy_model_name="gemini/gemini-2.5-flash",
142
+ short_name="Gemini 2.5 Flash",
126
143
  ),
127
144
  }
128
145
 
@@ -154,6 +171,21 @@ class ShotgunAccountConfig(BaseModel):
154
171
  )
155
172
 
156
173
 
174
+ class MarketingMessageRecord(BaseModel):
175
+ """Record of when a marketing message was shown to the user."""
176
+
177
+ shown_at: datetime = Field(description="Timestamp when the message was shown")
178
+
179
+
180
+ class MarketingConfig(BaseModel):
181
+ """Configuration for marketing messages shown to users."""
182
+
183
+ messages: dict[str, MarketingMessageRecord] = Field(
184
+ default_factory=dict,
185
+ description="Tracking which marketing messages have been shown. Key is message ID (e.g., 'github_star_v1')",
186
+ )
187
+
188
+
157
189
  class ShotgunConfig(BaseModel):
158
190
  """Main configuration for Shotgun CLI."""
159
191
 
@@ -168,8 +200,16 @@ class ShotgunConfig(BaseModel):
168
200
  shotgun_instance_id: str = Field(
169
201
  description="Unique shotgun instance identifier (also used for anonymous telemetry)",
170
202
  )
171
- config_version: int = Field(default=3, description="Configuration schema version")
203
+ config_version: int = Field(default=4, description="Configuration schema version")
172
204
  shown_welcome_screen: bool = Field(
173
205
  default=False,
174
206
  description="Whether the welcome screen has been shown to the user",
175
207
  )
208
+ shown_onboarding_popup: datetime | None = Field(
209
+ default=None,
210
+ description="Timestamp when the onboarding popup was shown to the user (ISO8601 format)",
211
+ )
212
+ marketing: MarketingConfig = Field(
213
+ default_factory=MarketingConfig,
214
+ description="Marketing messages configuration and tracking",
215
+ )
@@ -10,7 +10,10 @@ from pydantic_ai.providers.google import GoogleProvider
10
10
  from pydantic_ai.providers.openai import OpenAIProvider
11
11
  from pydantic_ai.settings import ModelSettings
12
12
 
13
- from shotgun.llm_proxy import create_litellm_provider
13
+ from shotgun.llm_proxy import (
14
+ create_anthropic_proxy_provider,
15
+ create_litellm_provider,
16
+ )
14
17
  from shotgun.logging_config import get_logger
15
18
 
16
19
  from .manager import get_config_manager
@@ -29,6 +32,34 @@ logger = get_logger(__name__)
29
32
  _model_cache: dict[tuple[ProviderType, KeyProvider, ModelName, str], Model] = {}
30
33
 
31
34
 
35
+ def get_default_model_for_provider(config: ShotgunConfig) -> ModelName:
36
+ """Get the default model based on which provider/account is configured.
37
+
38
+ Checks API keys in priority order and returns appropriate default model.
39
+ Treats Shotgun Account as a provider context.
40
+
41
+ Args:
42
+ config: Shotgun configuration containing API keys
43
+
44
+ Returns:
45
+ Default ModelName for the configured provider/account
46
+ """
47
+ # Priority 1: Shotgun Account
48
+ if _get_api_key(config.shotgun.api_key):
49
+ return ModelName.GPT_5
50
+
51
+ # Priority 2: Individual provider keys
52
+ if _get_api_key(config.anthropic.api_key):
53
+ return ModelName.CLAUDE_HAIKU_4_5
54
+ if _get_api_key(config.openai.api_key):
55
+ return ModelName.GPT_5
56
+ if _get_api_key(config.google.api_key):
57
+ return ModelName.GEMINI_2_5_PRO
58
+
59
+ # Fallback: system-wide default
60
+ return ModelName.CLAUDE_HAIKU_4_5
61
+
62
+
32
63
  def get_or_create_model(
33
64
  provider: ProviderType,
34
65
  key_provider: "KeyProvider",
@@ -72,19 +103,37 @@ def get_or_create_model(
72
103
 
73
104
  # Use LiteLLM proxy for Shotgun Account, native providers for BYOK
74
105
  if key_provider == KeyProvider.SHOTGUN:
75
- # Shotgun Account uses LiteLLM proxy for any model
106
+ # Shotgun Account uses LiteLLM proxy with native model types where possible
76
107
  if model_name in MODEL_SPECS:
77
108
  litellm_model_name = MODEL_SPECS[model_name].litellm_proxy_model_name
78
109
  else:
79
110
  # Fallback for unmapped models
80
111
  litellm_model_name = f"openai/{model_name.value}"
81
112
 
82
- litellm_provider = create_litellm_provider(api_key)
83
- _model_cache[cache_key] = OpenAIChatModel(
84
- litellm_model_name,
85
- provider=litellm_provider,
86
- settings=ModelSettings(max_tokens=max_tokens),
87
- )
113
+ # Use native provider types to preserve API formats and features
114
+ if provider == ProviderType.ANTHROPIC:
115
+ # Anthropic: Use native AnthropicProvider with /anthropic endpoint
116
+ # This preserves Anthropic-specific features like tool_choice
117
+ # Note: Web search for Shotgun Account uses Gemini only (not Anthropic)
118
+ # Note: Anthropic API expects model name without prefix (e.g., "claude-sonnet-4-5")
119
+ anthropic_provider = create_anthropic_proxy_provider(api_key)
120
+ _model_cache[cache_key] = AnthropicModel(
121
+ model_name.value, # Use model name without "anthropic/" prefix
122
+ provider=anthropic_provider,
123
+ settings=AnthropicModelSettings(
124
+ max_tokens=max_tokens,
125
+ timeout=600, # 10 minutes timeout for large responses
126
+ ),
127
+ )
128
+ else:
129
+ # OpenAI and Google: Use LiteLLMProvider (OpenAI-compatible format)
130
+ # Google's GoogleProvider doesn't support base_url, so use LiteLLM
131
+ litellm_provider = create_litellm_provider(api_key)
132
+ _model_cache[cache_key] = OpenAIChatModel(
133
+ litellm_model_name,
134
+ provider=litellm_provider,
135
+ settings=ModelSettings(max_tokens=max_tokens),
136
+ )
88
137
  elif key_provider == KeyProvider.BYOK:
89
138
  # Use native provider implementations with user's API keys
90
139
  if provider == ProviderType.OPENAI:
@@ -121,7 +170,7 @@ def get_or_create_model(
121
170
  return _model_cache[cache_key]
122
171
 
123
172
 
124
- def get_provider_model(
173
+ async def get_provider_model(
125
174
  provider_or_model: ProviderType | ModelName | None = None,
126
175
  ) -> ModelConfig:
127
176
  """Get a fully configured ModelConfig with API key and Model instance.
@@ -140,18 +189,24 @@ def get_provider_model(
140
189
  """
141
190
  config_manager = get_config_manager()
142
191
  # Use cached config for read-only access (performance)
143
- config = config_manager.load(force_reload=False)
192
+ config = await config_manager.load(force_reload=False)
144
193
 
145
194
  # Priority 1: Check if Shotgun key exists - if so, use it for ANY model
146
195
  shotgun_api_key = _get_api_key(config.shotgun.api_key)
147
196
  if shotgun_api_key:
148
- # Use selected model or default to claude-sonnet-4-5
149
- model_name = config.selected_model or ModelName.CLAUDE_SONNET_4_5
197
+ # Determine which model to use
198
+ if isinstance(provider_or_model, ModelName):
199
+ # Specific model requested - honor it (e.g., web search tools)
200
+ model_name = provider_or_model
201
+ else:
202
+ # No specific model requested - use selected or default
203
+ model_name = config.selected_model or ModelName.GPT_5
204
+
150
205
  if model_name not in MODEL_SPECS:
151
206
  raise ValueError(f"Model '{model_name.value}' not found")
152
207
  spec = MODEL_SPECS[model_name]
153
208
 
154
- # Use Shotgun Account with selected model (provider = actual LLM provider)
209
+ # Use Shotgun Account with determined model (provider = actual LLM provider)
155
210
  return ModelConfig(
156
211
  name=spec.name,
157
212
  provider=spec.provider, # Actual LLM provider (OPENAI/ANTHROPIC/GOOGLE)
@@ -220,8 +275,8 @@ def get_provider_model(
220
275
  if not api_key:
221
276
  raise ValueError("Anthropic API key not configured. Set via config.")
222
277
 
223
- # Use requested model or default to claude-sonnet-4-5
224
- model_name = requested_model if requested_model else ModelName.CLAUDE_SONNET_4_5
278
+ # Use requested model or default to claude-haiku-4-5
279
+ model_name = requested_model if requested_model else ModelName.CLAUDE_HAIKU_4_5
225
280
  if model_name not in MODEL_SPECS:
226
281
  raise ValueError(f"Model '{model_name.value}' not found")
227
282
  spec = MODEL_SPECS[model_name]
@@ -0,0 +1,28 @@
1
+ """Context analysis module for conversation composition statistics.
2
+
3
+ This module provides tools for analyzing conversation context usage, breaking down
4
+ token consumption by message type and tool category.
5
+ """
6
+
7
+ from .analyzer import ContextAnalyzer
8
+ from .constants import ToolCategory, get_tool_category
9
+ from .formatter import ContextFormatter
10
+ from .models import (
11
+ ContextAnalysis,
12
+ ContextAnalysisOutput,
13
+ ContextCompositionTelemetry,
14
+ MessageTypeStats,
15
+ TokenAllocation,
16
+ )
17
+
18
+ __all__ = [
19
+ "ContextAnalyzer",
20
+ "ContextAnalysis",
21
+ "ContextAnalysisOutput",
22
+ "ContextCompositionTelemetry",
23
+ "ContextFormatter",
24
+ "MessageTypeStats",
25
+ "TokenAllocation",
26
+ "ToolCategory",
27
+ "get_tool_category",
28
+ ]