shotgun-sh 0.2.11.dev5__py3-none-any.whl → 0.2.17.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

@@ -2,6 +2,7 @@
2
2
 
3
3
  import asyncio
4
4
  import logging
5
+ import time
5
6
  from datetime import datetime, timezone
6
7
  from pathlib import Path
7
8
  from typing import cast
@@ -11,6 +12,7 @@ from pydantic_ai.messages import (
11
12
  ModelRequest,
12
13
  ModelResponse,
13
14
  TextPart,
15
+ ToolCallPart,
14
16
  ToolReturnPart,
15
17
  UserPromptPart,
16
18
  )
@@ -47,6 +49,7 @@ from shotgun.codebase.core.manager import (
47
49
  CodebaseGraphManager,
48
50
  )
49
51
  from shotgun.codebase.models import IndexProgress, ProgressPhase
52
+ from shotgun.exceptions import ContextSizeLimitExceeded
50
53
  from shotgun.posthog_telemetry import track_event
51
54
  from shotgun.sdk.codebase import CodebaseSDK
52
55
  from shotgun.sdk.exceptions import CodebaseNotFoundError, InvalidPathError
@@ -101,7 +104,6 @@ class ChatScreen(Screen[None]):
101
104
  history: PromptHistory = PromptHistory()
102
105
  messages = reactive(list[ModelMessage | HintMessage]())
103
106
  indexing_job: reactive[CodebaseIndexSelection | None] = reactive(None)
104
- partial_message: reactive[ModelMessage | None] = reactive(None)
105
107
 
106
108
  # Q&A mode state (for structured output clarifying questions)
107
109
  qa_mode = reactive(False)
@@ -112,6 +114,10 @@ class ChatScreen(Screen[None]):
112
114
  # Working state - keep reactive for Textual watchers
113
115
  working = reactive(False)
114
116
 
117
+ # Throttle context indicator updates (in seconds)
118
+ _last_context_update: float = 0.0
119
+ _context_update_throttle: float = 5.0 # 5 seconds
120
+
115
121
  def __init__(
116
122
  self,
117
123
  agent_manager: AgentManager,
@@ -572,8 +578,6 @@ class ChatScreen(Screen[None]):
572
578
 
573
579
  @on(PartialResponseMessage)
574
580
  def handle_partial_response(self, event: PartialResponseMessage) -> None:
575
- self.partial_message = event.message
576
-
577
581
  # Filter event.messages to exclude ModelRequest with only ToolReturnPart
578
582
  # These are intermediate tool results that would render as empty (UserQuestionWidget
579
583
  # filters out ToolReturnPart in format_prompt_parts), causing user messages to disappear
@@ -597,16 +601,33 @@ class ChatScreen(Screen[None]):
597
601
  )
598
602
 
599
603
  # Use widget coordinator to set partial response
600
- self.widget_coordinator.set_partial_response(
601
- self.partial_message, new_message_list
604
+ self.widget_coordinator.set_partial_response(event.message, new_message_list)
605
+
606
+ # Skip context updates for file write operations (they don't add to input context)
607
+ has_file_write = any(
608
+ isinstance(msg, ModelResponse)
609
+ and any(
610
+ isinstance(part, ToolCallPart)
611
+ and part.tool_name in ("write_file", "append_file")
612
+ for part in msg.parts
613
+ )
614
+ for msg in event.messages
602
615
  )
603
616
 
604
- # Update context indicator with full message history including streaming messages
605
- # Combine existing agent history with new streaming messages for accurate token count
606
- combined_agent_history = self.agent_manager.message_history + event.messages
607
- self.update_context_indicator_with_messages(
608
- combined_agent_history, new_message_list
609
- )
617
+ if has_file_write:
618
+ return # Skip context update for file writes
619
+
620
+ # Throttle context indicator updates to improve performance during streaming
621
+ # Only update at most once per 5 seconds to avoid excessive token calculations
622
+ current_time = time.time()
623
+ if current_time - self._last_context_update >= self._context_update_throttle:
624
+ self._last_context_update = current_time
625
+ # Update context indicator with full message history including streaming messages
626
+ # Combine existing agent history with new streaming messages for accurate token count
627
+ combined_agent_history = self.agent_manager.message_history + event.messages
628
+ self.update_context_indicator_with_messages(
629
+ combined_agent_history, new_message_list
630
+ )
610
631
 
611
632
  def _clear_partial_response(self) -> None:
612
633
  # Use widget coordinator to clear partial response
@@ -666,32 +687,42 @@ class ChatScreen(Screen[None]):
666
687
  self.update_context_indicator()
667
688
 
668
689
  # If there are file operations, add a message showing the modified files
690
+ # Skip if hint was already added by agent_manager (e.g., in QA mode)
669
691
  if event.file_operations:
670
- chat_history = self.query_one(ChatHistory)
671
- if chat_history.vertical_tail:
672
- tracker = FileOperationTracker(operations=event.file_operations)
673
- display_path = tracker.get_display_path()
674
-
675
- if display_path:
676
- # Create a simple markdown message with the file path
677
- # The terminal emulator will make this clickable automatically
678
- path_obj = Path(display_path)
679
-
680
- if len(event.file_operations) == 1:
681
- message = f"📝 Modified: `{display_path}`"
682
- else:
683
- num_files = len({op.file_path for op in event.file_operations})
684
- if path_obj.is_dir():
685
- message = (
686
- f"📁 Modified {num_files} files in: `{display_path}`"
687
- )
692
+ # Check if file operation hint already exists in recent messages
693
+ file_hint_exists = any(
694
+ isinstance(msg, HintMessage)
695
+ and (
696
+ msg.message.startswith("📝 Modified:")
697
+ or msg.message.startswith("📁 Modified")
698
+ )
699
+ for msg in event.messages[-5:] # Check last 5 messages
700
+ )
701
+
702
+ if not file_hint_exists:
703
+ chat_history = self.query_one(ChatHistory)
704
+ if chat_history.vertical_tail:
705
+ tracker = FileOperationTracker(operations=event.file_operations)
706
+ display_path = tracker.get_display_path()
707
+
708
+ if display_path:
709
+ # Create a simple markdown message with the file path
710
+ # The terminal emulator will make this clickable automatically
711
+ path_obj = Path(display_path)
712
+
713
+ if len(event.file_operations) == 1:
714
+ message = f"📝 Modified: `{display_path}`"
688
715
  else:
689
- # Common path is a file, show parent directory
690
- message = (
691
- f"📁 Modified {num_files} files in: `{path_obj.parent}`"
716
+ num_files = len(
717
+ {op.file_path for op in event.file_operations}
692
718
  )
719
+ if path_obj.is_dir():
720
+ message = f"📁 Modified {num_files} files in: `{display_path}`"
721
+ else:
722
+ # Common path is a file, show parent directory
723
+ message = f"📁 Modified {num_files} files in: `{path_obj.parent}`"
693
724
 
694
- self.mount_hint(message)
725
+ self.mount_hint(message)
695
726
 
696
727
  # Check and display any marketing messages
697
728
  from shotgun.tui.app import ShotgunApp
@@ -1117,6 +1148,27 @@ class ChatScreen(Screen[None]):
1117
1148
  except asyncio.CancelledError:
1118
1149
  # Handle cancellation gracefully - DO NOT re-raise
1119
1150
  self.mount_hint("⚠️ Operation cancelled by user")
1151
+ except ContextSizeLimitExceeded as e:
1152
+ # User-friendly error with actionable options
1153
+ hint = (
1154
+ f"⚠️ **Context too large for {e.model_name}**\n\n"
1155
+ f"Your conversation history exceeds this model's limit ({e.max_tokens:,} tokens).\n\n"
1156
+ f"**Choose an action:**\n\n"
1157
+ f"1. Switch to a larger model (`Ctrl+P` → Change Model)\n"
1158
+ f"2. Switch to a larger model, compact (`/compact`), then switch back to {e.model_name}\n"
1159
+ f"3. Clear conversation (`/clear`)\n"
1160
+ )
1161
+
1162
+ self.mount_hint(hint)
1163
+
1164
+ # Log for debugging (won't send to Sentry due to ErrorNotPickedUpBySentry)
1165
+ logger.info(
1166
+ "Context size limit exceeded",
1167
+ extra={
1168
+ "max_tokens": e.max_tokens,
1169
+ "model_name": e.model_name,
1170
+ },
1171
+ )
1120
1172
  except Exception as e:
1121
1173
  # Log with full stack trace to shotgun.log
1122
1174
  logger.exception(
@@ -47,7 +47,6 @@ class ChatHistory(Widget):
47
47
  super().__init__()
48
48
  self.items: Sequence[ModelMessage | HintMessage] = []
49
49
  self.vertical_tail: VerticalTail | None = None
50
- self.partial_response = None
51
50
  self._rendered_count = 0 # Track how many messages have been mounted
52
51
 
53
52
  def compose(self) -> ComposeResult:
@@ -63,7 +62,7 @@ class ChatHistory(Widget):
63
62
  yield HintMessageWidget(item)
64
63
  elif isinstance(item, ModelResponse):
65
64
  yield AgentResponseWidget(item)
66
- yield PartialResponseWidget(self.partial_response).data_bind(
65
+ yield PartialResponseWidget(None).data_bind(
67
66
  item=ChatHistory.partial_response
68
67
  )
69
68
 
@@ -166,8 +166,9 @@ class WidgetCoordinator:
166
166
 
167
167
  try:
168
168
  chat_history = self.screen.query_one(ChatHistory)
169
- if message:
170
- chat_history.partial_response = message
169
+ # Set the reactive attribute to trigger the PartialResponseWidget update
170
+ chat_history.partial_response = message
171
+ # Also update the full message list
171
172
  chat_history.update_messages(messages)
172
173
  except Exception as e:
173
174
  logger.exception(f"Failed to set partial response: {e}")
@@ -0,0 +1,465 @@
1
+ Metadata-Version: 2.4
2
+ Name: shotgun-sh
3
+ Version: 0.2.17.dev1
4
+ Summary: AI-powered research, planning, and task management CLI tool
5
+ Project-URL: Homepage, https://shotgun.sh/
6
+ Project-URL: Repository, https://github.com/shotgun-sh/shotgun
7
+ Project-URL: Issues, https://github.com/shotgun-sh/shotgun-alpha/issues
8
+ Project-URL: Discord, https://discord.gg/5RmY6J2N7s
9
+ Author-email: "Proofs.io" <hello@proofs.io>
10
+ License: MIT
11
+ License-File: LICENSE
12
+ Keywords: agent,ai,cli,llm,planning,productivity,pydantic-ai,research,task-management
13
+ Classifier: Development Status :: 4 - Beta
14
+ Classifier: Environment :: Console
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: License :: OSI Approved :: MIT License
17
+ Classifier: Operating System :: OS Independent
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.11
20
+ Classifier: Programming Language :: Python :: 3.12
21
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
+ Classifier: Topic :: Utilities
23
+ Requires-Python: >=3.11
24
+ Requires-Dist: aiofiles>=24.0.0
25
+ Requires-Dist: anthropic>=0.39.0
26
+ Requires-Dist: dependency-injector>=4.41.0
27
+ Requires-Dist: genai-prices>=0.0.27
28
+ Requires-Dist: httpx>=0.27.0
29
+ Requires-Dist: jinja2>=3.1.0
30
+ Requires-Dist: kuzu>=0.7.0
31
+ Requires-Dist: logfire>=2.0.0
32
+ Requires-Dist: openai>=1.0.0
33
+ Requires-Dist: packaging>=23.0
34
+ Requires-Dist: posthog>=3.0.0
35
+ Requires-Dist: pydantic-ai>=0.0.14
36
+ Requires-Dist: pydantic-settings>=2.0.0
37
+ Requires-Dist: rich>=13.0.0
38
+ Requires-Dist: sentencepiece>=0.2.0
39
+ Requires-Dist: sentry-sdk[pure-eval]>=2.0.0
40
+ Requires-Dist: tenacity>=8.0.0
41
+ Requires-Dist: textual-dev>=1.7.0
42
+ Requires-Dist: textual-serve>=0.1.0
43
+ Requires-Dist: textual>=6.1.0
44
+ Requires-Dist: tiktoken>=0.7.0
45
+ Requires-Dist: tree-sitter-go>=0.23.0
46
+ Requires-Dist: tree-sitter-javascript>=0.23.0
47
+ Requires-Dist: tree-sitter-python>=0.23.0
48
+ Requires-Dist: tree-sitter-rust>=0.23.0
49
+ Requires-Dist: tree-sitter-typescript>=0.23.0
50
+ Requires-Dist: tree-sitter>=0.21.0
51
+ Requires-Dist: typer>=0.12.0
52
+ Requires-Dist: watchdog>=4.0.0
53
+ Provides-Extra: dev
54
+ Requires-Dist: commitizen>=3.13.0; extra == 'dev'
55
+ Requires-Dist: lefthook>=1.12.0; extra == 'dev'
56
+ Requires-Dist: mypy>=1.11.0; extra == 'dev'
57
+ Requires-Dist: ruff>=0.6.0; extra == 'dev'
58
+ Description-Content-Type: text/markdown
59
+
60
+ <div align="center">
61
+ <img width="400" height="150" alt="Shotgun logo transparent background" src="https://github.com/user-attachments/assets/08f9ccd5-f2e8-4bf4-9cb2-2f0de866a76a" />
62
+
63
+ ### Spec-Driven Development
64
+
65
+
66
+ **Write codebase-aware specs for AI coding agents (Codex, Cursor, Claude Code) so they don't derail.**
67
+ <p align="center">
68
+ <a href="https://github.com/shotgun-sh/shotgun">
69
+ <img src="https://img.shields.io/badge/python-3.11+-blue?style=flat&logo=python&logoColor=white" />
70
+ </a>
71
+ <a href="https://www.producthunt.com/products/shotgun-cli/launches/shotgun-cli">
72
+ <img src="https://img.shields.io/badge/Product%20Hunt-%237%20Product%20of%20the%20Day-FF6154?style=flat&logo=producthunt&logoColor=white" />
73
+ </a>
74
+ <a href="https://github.com/shotgun-sh/shotgun?tab=contributing-ov-file">
75
+ <img src="https://img.shields.io/badge/PRs-welcome-brightgreen?style=flat" />
76
+ </a>
77
+ <a href="https://github.com/shotgun-sh/shotgun?tab=MIT-1-ov-file">
78
+ <img src="https://img.shields.io/badge/license-MIT-blue?style=flat" />
79
+ </a>
80
+ <a href="https://discord.com/invite/5RmY6J2N7s">
81
+ <img src="https://img.shields.io/badge/discord-150+%20online-5865F2?style=flat&logo=discord&logoColor=white" />
82
+ </a>
83
+ </p>
84
+
85
+ [![Website](https://img.shields.io/badge/-shotgun.sh-5865F2?style=social&logo=safari&logoColor=5865F2)](https://shotgun.sh) [![Follow @ShotgunCLI](https://img.shields.io/badge/Follow%20@ShotgunCLI-1DA1F2?style=social&logo=x&logoColor=000000)](https://x.com/ShotgunCLI) [![YouTube](https://img.shields.io/badge/-@shotgunCLI-FF0000?style=social&logo=youtube&logoColor=red)](https://www.youtube.com/@shotgunCLI)
86
+
87
+ </div>
88
+
89
+ ---
90
+
91
+ <table>
92
+ <tr>
93
+ <td>
94
+
95
+ **Shotgun is a CLI tool** that generates codebase-aware specs for AI coding agents like Cursor, Claude Code, and Lovable. **It reads your entire repository**, researches how new features should fit your architecture, and produces technical specifications that keep AI agents on track—so they build what you actually want instead of derailing halfway through. **Bring your own key (BYOK) or use a Shotgun subscription — $10 for $10 in usage.**
96
+
97
+ It includes research on existing patterns, implementation plans that respect your architecture, and task breakdowns ready to export as **AGENTS.md** files. Each spec is complete enough that your AI agent can work longer and further without losing context or creating conflicts.
98
+
99
+ <p align="center">
100
+ <img src="https://github.com/user-attachments/assets/9c7ca014-1ed3-4935-b310-9147b275fdc7" alt="Shotgun Demo" />
101
+ </p>
102
+
103
+ </td>
104
+ </tr>
105
+ </table>
106
+
107
+ ---
108
+
109
+ # 📦 Installation
110
+
111
+ ### 1. Install uv
112
+
113
+ Shotgun runs via `uvx` or `uv tool install`. First, install `uv` for your platform:
114
+
115
+ <table>
116
+ <tr>
117
+ <th>Platform</th>
118
+ <th>Installation Command</th>
119
+ </tr>
120
+ <tr>
121
+ <td><strong>macOS</strong> (Homebrew)</td>
122
+ <td>
123
+
124
+ ```bash
125
+ brew install uv
126
+ ```
127
+ </td>
128
+ </tr>
129
+ <tr>
130
+ <td><strong>macOS/Linux</strong> (curl)</td>
131
+ <td>
132
+
133
+ ```bash
134
+ curl -LsSf https://astral.sh/uv/install.sh | sh
135
+ ```
136
+ </td>
137
+ </tr>
138
+ <tr>
139
+ <td><strong>Windows</strong> (PowerShell)</td>
140
+ <td>
141
+
142
+ ```powershell
143
+ powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
144
+ ```
145
+ </td>
146
+ </tr>
147
+ </table>
148
+
149
+ _💡 Restart your terminal after installation_
150
+
151
+ ### 2. Run Shotgun
152
+
153
+ <table>
154
+ <tr>
155
+ <th>🚀 Try It Out (Ephemeral)</th>
156
+ <th>⚡ Regular Use (Permanent)</th>
157
+ </tr>
158
+ <tr>
159
+ <td>
160
+
161
+ **Best for:** Testing Shotgun first
162
+
163
+ ```bash
164
+ uvx shotgun-sh@latest
165
+ ```
166
+
167
+ No installation needed, runs immediately
168
+
169
+ </td>
170
+ <td>
171
+
172
+ **Best for:** Daily use
173
+
174
+ ```bash
175
+ uv tool install shotgun-sh
176
+ ```
177
+
178
+ Then run anywhere: ``` shotgun ```
179
+
180
+ </td>
181
+ </tr>
182
+ </table>
183
+
184
+ _**Why uv?** It's 10-100x faster than pip and handles binary wheels reliably—no cmake/build tool errors._
185
+
186
+ ### 3. Get Started
187
+
188
+ When you launch Shotgun, it will guide you through:
189
+
190
+ | Step | What Happens |
191
+ |------|--------------|
192
+ | **1. Codebase Indexing** | Builds a searchable graph of your entire repository |
193
+ | **2. LLM Setup** | Configure OpenAI, Anthropic, or Gemini |
194
+ | **3. First Research** | Start generating codebase-aware specs |
195
+
196
+ _**💡 Pro tip:** Run Shotgun in your IDE's terminal for the best experience._
197
+
198
+ > [!WARNING]
199
+ > **Upgrading from alpha?** Uninstall the old version first:
200
+ > ```bash
201
+ > npm uninstall -g @proofs-io/shotgun @proofs-io/shotgun-server
202
+ > ```
203
+
204
+ ---
205
+
206
+ # 🎥 Demo
207
+
208
+ <p align="center">
209
+ <a href="https://youtu.be/nR6iKbJ8l_I">
210
+ <img src="https://github.com/user-attachments/assets/37eae206-0d6f-4499-b980-2f33a5aed65d" alt="Watch the Shotgun demo" width="720" height="405">
211
+ </a>
212
+ </p>
213
+
214
+ _Click the image above to watch the full demo on YouTube_
215
+
216
+ ---
217
+
218
+ # 🎯 Usage
219
+
220
+ Shotgun's Terminal UI guides you through **5 specialized modes** — from research to export. Each mode has a dedicated AI agent optimized for that phase.
221
+
222
+ ### Launch Shotgun in your project directory:
223
+
224
+ | Already Installed | First Time / Try It Out |
225
+ |-------------------|------------------------|
226
+ | `shotgun` | `uvx shotgun-sh@latest` |
227
+
228
+ _The TUI opens automatically. **Press `Shift+Tab` to switch modes** or `Ctrl+P` for the command palette._
229
+
230
+ ### The 5-Phase Workflow
231
+
232
+ <table>
233
+ <tr>
234
+ <td align="center"><b>🔬 Research</b><br/>Explore & understand</td>
235
+ <td align="center">→</td>
236
+ <td align="center"><b>📝 Specify</b><br/>Define requirements</td>
237
+ <td align="center">→</td>
238
+ <td align="center"><b>📋 Plan</b><br/>Create roadmap</td>
239
+ <td align="center">→</td>
240
+ <td align="center"><b>✅ Tasks</b><br/>Break into steps</td>
241
+ <td align="center">→</td>
242
+ <td align="center"><b>📤 Export</b><br/>Format for AI</td>
243
+ </tr>
244
+ </table>
245
+
246
+ _Each phase builds on the previous one, creating a complete specification ready for AI coding agents._
247
+
248
+ ### Mode Reference
249
+
250
+ | Mode | What It Does | Example Prompt | Output |
251
+ |:-----|:-------------|:---------------|:-------|
252
+ | **🔬&nbsp;Research** | Searches codebase + web, identifies patterns | `How do we handle authentication in this codebase?` | `research.md` |
253
+ | **📝&nbsp;Specify** | Creates technical specs aware of architecture | `Add OAuth2 authentication with refresh token support` | `specification.md` |
254
+ | **📋&nbsp;Plan** | Generates implementation roadmap | `Create an implementation plan for the payment system` | `plan.md` |
255
+ | **✅&nbsp;Tasks** | Breaks plans into actionable items | `Break down the user dashboard plan into tasks` | `tasks.md` |
256
+ | **📤&nbsp;Export** | Formats for AI coding agents | `Export everything to AGENTS.md` | `AGENTS.md` |
257
+
258
+ _**Mode switching:** `Shift+Tab` cycles through modes_
259
+
260
+ _**Visual status:** See current mode and progress at bottom_
261
+
262
+ ### ⌨️ Keyboard Shortcuts
263
+
264
+ | Shortcut | Action |
265
+ |----------|--------|
266
+ | `Shift+Tab` | Switch modes |
267
+ | `Ctrl+P` | Open command palette |
268
+ | `Ctrl+C` | Cancel operation |
269
+ | `Escape` | Exit Q&A / stop agent |
270
+ | `Ctrl+U` | View usage stats |
271
+
272
+ ### Tips for Better Results
273
+
274
+ | Do This | Not This |
275
+ |---------|----------|
276
+ | ✅ `Research how we handle auth` | ❌ Jump straight to building |
277
+ | ✅ `Shotgun please ask me questions first` | ❌ Assume Shotgun knows your needs |
278
+ | ✅ `I'm working on payments, need refunds` | ❌ `Add refunds` (no context) |
279
+ | ✅ Follow Research → Specify → Plan → Tasks | ❌ Skip phases |
280
+
281
+ **Result:** Your AI coding agent gets complete context—what exists, why, and what to build.
282
+
283
+ **Note:** CLI available in [docs/CLI.md](docs/CLI.md), but TUI is recommended.
284
+
285
+ ---
286
+
287
+ # ✨ Features
288
+
289
+ ### What Makes Shotgun Different
290
+
291
+ <table>
292
+ <tr>
293
+ <th width="25%">Feature</th>
294
+ <th width="35%">Shotgun</th>
295
+ <th width="40%">Other Tools</th>
296
+ </tr>
297
+
298
+ <tr>
299
+ <td><strong>Codebase Understanding</strong></td>
300
+ <td>
301
+ Reads your <strong>entire repository</strong> before generating specs. Finds existing patterns, dependencies, and architecture.
302
+ </td>
303
+ <td>
304
+ Require manual context or search each time. No persistent understanding of your codebase structure.
305
+ </td>
306
+ </tr>
307
+
308
+ <tr>
309
+ <td><strong>Research Phase</strong></td>
310
+ <td>
311
+ Starts with research—discovers what you already have AND what exists externally before writing anything.
312
+ </td>
313
+ <td>
314
+ Start at specification. Build first, discover problems later.
315
+ </td>
316
+ </tr>
317
+
318
+ <tr>
319
+ <td><strong>Dedicated Agents Per Mode</strong></td>
320
+ <td>
321
+ Each mode (research, spec, plan, tasks, export) uses a <strong>separate specialized agent</strong> with prompts tailored specifically for that phase. 100% user-controllable via mode switching.
322
+ </td>
323
+ <td>
324
+ Single-agent or one-size-fits-all prompts.
325
+ </td>
326
+ </tr>
327
+
328
+ <tr>
329
+ <td><strong>Structured Workflow</strong></td>
330
+ <td>
331
+ 5-phase journey with checkpoints: Research → Spec → Plan → Tasks → Export
332
+ </td>
333
+ <td>
334
+ No structure. Just "prompt and hope."
335
+ </td>
336
+ </tr>
337
+
338
+ <tr>
339
+ <td><strong>Export Formats</strong></td>
340
+ <td>
341
+ <code>AGENTS.md</code> files ready for Cursor, Claude Code, Windsurf, Lovable—your choice of tool.
342
+ </td>
343
+ <td>
344
+ Locked into specific IDE or coding agent.
345
+ </td>
346
+ </tr>
347
+
348
+ </table>
349
+
350
+ ### Case Study - Real Example:
351
+
352
+ We had to implement payments. Cursor, Claude Code, and Copilot all suggested building a custom payment proxy — 3-4 weeks of development.
353
+
354
+ ⭐ Shotgun's research found [LiteLLM Proxy](https://docs.litellm.ai/docs/simple_proxy) instead—**30 minutes to discover, 5 days to deploy, first customer in 14 hours.**
355
+
356
+ ****80% less dev time. Near-zero technical debt.****
357
+
358
+ ### **[📖 Read the full case study](docs/CASE_STUDY.md)**
359
+
360
+ ---
361
+
362
+ # Use Cases
363
+
364
+ - **🚀 Onboarding** - New developer? Shotgun maps your entire architecture and generates docs that actually match the code
365
+ - **🔧 Refactoring** - Understand all dependencies before touching anything. Keep your refactor from becoming a rewrite
366
+ - **🌱 Greenfield Projects** - Research existing solutions globally before writing line one
367
+ - **➕ Adding Features** - Know exactly where your feature fits. Prevent duplicate functionality
368
+ - **📦 Migration** - Map the old, plan the new, track the delta. Break migration into safe stages
369
+
370
+ **📚 Want to see a detailed example?** Check out our [Case Study](docs/CASE_STUDY.md) showing Shotgun in action on a real-world project.
371
+
372
+ ---
373
+
374
+ # FAQ
375
+
376
+ **Q: Does Shotgun collect any stats or data?**
377
+
378
+ A: We only gather minimal, anonymous events (e.g., install, server start, tool call). We don't collect the content itself—only that an event occurred. We use Sentry for error reporting to improve stability.
379
+
380
+ **Q: Local LLMs?**
381
+
382
+ A: Planned. We'll publish compatibility notes and local provider integrations.
383
+
384
+ **Q: What LLM providers are supported?**
385
+
386
+ A: Currently OpenAI, Anthropic (Claude), and Google Gemini. Local LLM support is on the roadmap.
387
+
388
+ **Q: Can I use Shotgun offline?**
389
+
390
+ A: You need an internet connection for LLM API calls, but your codebase stays local.
391
+
392
+ **Q: How does the code graph work?**
393
+
394
+ A: Shotgun indexes your codebase using tree-sitter for accurate parsing and creates a searchable graph of your code structure, dependencies, and relationships.
395
+
396
+ ---
397
+ # Contributing
398
+
399
+ Shotgun is open-source and we welcome contributions. Whether you're fixing bugs, proposing features, improving docs, or spreading the word—we'd love to have you as part of the community.
400
+
401
+ ### Ways to contribute:
402
+
403
+ - **Bug Report:** Found an issue? [Create a bug report](https://github.com/shotgun-sh/shotgun/issues/new?template=bug_report.md)
404
+ - **Feature Request:** Have an idea to make Shotgun better? [Submit a feature request](https://github.com/shotgun-sh/shotgun/issues/new?template=feature_request.md)
405
+ - **Documentation:** See something missing in the docs? [Request documentation](https://github.com/shotgun-sh/shotgun/issues/new?template=documentation.md)
406
+
407
+ **Not sure where to start?** Join our Discord and we'll help you get started!
408
+
409
+ <div align="left">
410
+ <a href="https://discord.com/invite/5RmY6J2N7s">
411
+ <img src="https://img.shields.io/badge/Join%20our%20community-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Join Discord" />
412
+ </a>
413
+ </div>
414
+
415
+ ### Development Resources
416
+
417
+ - **[Contributing Guide](docs/CONTRIBUTING.md)** - Setup, workflow, and guidelines
418
+ - **[Git Hooks](docs/GIT_HOOKS.md)** - Lefthook, trufflehog, and security scanning
419
+ - **[CI/CD](docs/CI_CD.md)** - GitHub Actions and automated testing
420
+ - **[Observability](docs/OBSERVABILITY.md)** - Telemetry, Logfire, and monitoring
421
+ - **[Docker](docs/DOCKER.md)** - Container setup and deployment
422
+
423
+ ---
424
+
425
+ <div align="center">
426
+
427
+ ## 🚀 Ready to Stop AI Agents from Derailing?
428
+
429
+ **Research → Specify → Plan → Tasks → Export** — Five phases that give AI agents the full picture.
430
+
431
+ ```bash
432
+ uvx shotgun-sh@latest
433
+ ```
434
+
435
+
436
+ ### ⭐ Star us on GitHub
437
+
438
+
439
+ <a href="https://github.com/shotgun-sh/shotgun">
440
+ <img src="https://img.shields.io/badge/⭐%20Star%20on%20GitHub-181717?style=for-the-badge&logo=github&logoColor=white" alt="Star Shotgun Repo" />
441
+ </a>
442
+
443
+ ### Star History
444
+
445
+ <a href="https://www.star-history.com/#shotgun-sh/shotgun&type=date&legend=bottom-right">
446
+ <picture>
447
+ <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=shotgun-sh/shotgun&type=date&theme=dark&legend=bottom-right" />
448
+ <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=shotgun-sh/shotgun&type=date&legend=bottom-right" />
449
+ <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=shotgun-sh/shotgun&type=date&legend=bottom-right" />
450
+ </picture>
451
+ </a>
452
+
453
+ </div>
454
+
455
+ ---
456
+
457
+ **License:** MIT | **Python:** 3.11+ | **Homepage:** [shotgun.sh](https://shotgun.sh/)
458
+
459
+ ---
460
+
461
+ ## Uninstall
462
+
463
+ ```bash
464
+ uv tool uninstall shotgun-sh
465
+ ```