shotgun-sh 0.2.6.dev1__py3-none-any.whl → 0.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. shotgun/agents/agent_manager.py +694 -73
  2. shotgun/agents/common.py +69 -70
  3. shotgun/agents/config/constants.py +0 -6
  4. shotgun/agents/config/manager.py +70 -35
  5. shotgun/agents/config/models.py +41 -1
  6. shotgun/agents/config/provider.py +33 -5
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +471 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +57 -19
  14. shotgun/agents/export.py +6 -7
  15. shotgun/agents/history/compaction.py +9 -4
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +113 -5
  18. shotgun/agents/history/token_counting/anthropic.py +39 -3
  19. shotgun/agents/history/token_counting/base.py +14 -3
  20. shotgun/agents/history/token_counting/openai.py +11 -1
  21. shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
  22. shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
  23. shotgun/agents/history/token_counting/utils.py +0 -3
  24. shotgun/agents/models.py +50 -2
  25. shotgun/agents/plan.py +6 -7
  26. shotgun/agents/research.py +7 -8
  27. shotgun/agents/specify.py +6 -7
  28. shotgun/agents/tasks.py +6 -7
  29. shotgun/agents/tools/__init__.py +0 -2
  30. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  31. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  32. shotgun/agents/tools/codebase/file_read.py +11 -2
  33. shotgun/agents/tools/codebase/query_graph.py +6 -0
  34. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  35. shotgun/agents/tools/file_management.py +82 -16
  36. shotgun/agents/tools/registry.py +217 -0
  37. shotgun/agents/tools/web_search/__init__.py +8 -8
  38. shotgun/agents/tools/web_search/anthropic.py +8 -2
  39. shotgun/agents/tools/web_search/gemini.py +7 -1
  40. shotgun/agents/tools/web_search/openai.py +7 -1
  41. shotgun/agents/tools/web_search/utils.py +2 -2
  42. shotgun/agents/usage_manager.py +16 -11
  43. shotgun/api_endpoints.py +7 -3
  44. shotgun/build_constants.py +3 -3
  45. shotgun/cli/clear.py +53 -0
  46. shotgun/cli/compact.py +186 -0
  47. shotgun/cli/config.py +8 -5
  48. shotgun/cli/context.py +111 -0
  49. shotgun/cli/export.py +1 -1
  50. shotgun/cli/feedback.py +4 -2
  51. shotgun/cli/models.py +1 -0
  52. shotgun/cli/plan.py +1 -1
  53. shotgun/cli/research.py +1 -1
  54. shotgun/cli/specify.py +1 -1
  55. shotgun/cli/tasks.py +1 -1
  56. shotgun/cli/update.py +16 -2
  57. shotgun/codebase/core/change_detector.py +5 -3
  58. shotgun/codebase/core/code_retrieval.py +4 -2
  59. shotgun/codebase/core/ingestor.py +10 -8
  60. shotgun/codebase/core/manager.py +13 -4
  61. shotgun/codebase/core/nl_query.py +1 -1
  62. shotgun/exceptions.py +32 -0
  63. shotgun/logging_config.py +18 -27
  64. shotgun/main.py +73 -11
  65. shotgun/posthog_telemetry.py +37 -28
  66. shotgun/prompts/agents/export.j2 +18 -1
  67. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  68. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  69. shotgun/prompts/agents/plan.j2 +1 -1
  70. shotgun/prompts/agents/research.j2 +1 -1
  71. shotgun/prompts/agents/specify.j2 +270 -3
  72. shotgun/prompts/agents/tasks.j2 +1 -1
  73. shotgun/sentry_telemetry.py +163 -16
  74. shotgun/settings.py +238 -0
  75. shotgun/telemetry.py +18 -33
  76. shotgun/tui/app.py +243 -43
  77. shotgun/tui/commands/__init__.py +1 -1
  78. shotgun/tui/components/context_indicator.py +179 -0
  79. shotgun/tui/components/mode_indicator.py +70 -0
  80. shotgun/tui/components/status_bar.py +48 -0
  81. shotgun/tui/containers.py +91 -0
  82. shotgun/tui/dependencies.py +39 -0
  83. shotgun/tui/protocols.py +45 -0
  84. shotgun/tui/screens/chat/__init__.py +5 -0
  85. shotgun/tui/screens/chat/chat.tcss +54 -0
  86. shotgun/tui/screens/chat/chat_screen.py +1254 -0
  87. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  88. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  89. shotgun/tui/screens/chat/help_text.py +40 -0
  90. shotgun/tui/screens/chat/prompt_history.py +48 -0
  91. shotgun/tui/screens/chat.tcss +11 -0
  92. shotgun/tui/screens/chat_screen/command_providers.py +78 -2
  93. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  94. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  95. shotgun/tui/screens/chat_screen/history/chat_history.py +115 -0
  96. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  97. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  98. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  99. shotgun/tui/screens/confirmation_dialog.py +151 -0
  100. shotgun/tui/screens/feedback.py +4 -4
  101. shotgun/tui/screens/github_issue.py +102 -0
  102. shotgun/tui/screens/model_picker.py +49 -24
  103. shotgun/tui/screens/onboarding.py +431 -0
  104. shotgun/tui/screens/pipx_migration.py +153 -0
  105. shotgun/tui/screens/provider_config.py +50 -27
  106. shotgun/tui/screens/shotgun_auth.py +2 -2
  107. shotgun/tui/screens/welcome.py +23 -12
  108. shotgun/tui/services/__init__.py +5 -0
  109. shotgun/tui/services/conversation_service.py +184 -0
  110. shotgun/tui/state/__init__.py +7 -0
  111. shotgun/tui/state/processing_state.py +185 -0
  112. shotgun/tui/utils/mode_progress.py +14 -7
  113. shotgun/tui/widgets/__init__.py +5 -0
  114. shotgun/tui/widgets/widget_coordinator.py +263 -0
  115. shotgun/utils/file_system_utils.py +22 -2
  116. shotgun/utils/marketing.py +110 -0
  117. shotgun/utils/update_checker.py +69 -14
  118. shotgun_sh-0.2.17.dist-info/METADATA +465 -0
  119. shotgun_sh-0.2.17.dist-info/RECORD +194 -0
  120. {shotgun_sh-0.2.6.dev1.dist-info → shotgun_sh-0.2.17.dist-info}/entry_points.txt +1 -0
  121. {shotgun_sh-0.2.6.dev1.dist-info → shotgun_sh-0.2.17.dist-info}/licenses/LICENSE +1 -1
  122. shotgun/agents/tools/user_interaction.py +0 -37
  123. shotgun/tui/screens/chat.py +0 -804
  124. shotgun/tui/screens/chat_screen/history.py +0 -401
  125. shotgun_sh-0.2.6.dev1.dist-info/METADATA +0 -467
  126. shotgun_sh-0.2.6.dev1.dist-info/RECORD +0 -156
  127. {shotgun_sh-0.2.6.dev1.dist-info → shotgun_sh-0.2.17.dist-info}/WHEEL +0 -0
shotgun/cli/compact.py ADDED
@@ -0,0 +1,186 @@
1
+ """Compact command for shotgun CLI."""
2
+
3
+ import asyncio
4
+ import json
5
+ from pathlib import Path
6
+ from typing import Annotated, Any
7
+
8
+ import typer
9
+ from pydantic_ai.usage import RequestUsage
10
+ from rich.console import Console
11
+
12
+ from shotgun.agents.config import get_provider_model
13
+ from shotgun.agents.conversation_manager import ConversationManager
14
+ from shotgun.agents.history.history_processors import token_limit_compactor
15
+ from shotgun.agents.history.token_estimation import estimate_tokens_from_messages
16
+ from shotgun.cli.models import OutputFormat
17
+ from shotgun.logging_config import get_logger
18
+
19
+ app = typer.Typer(
20
+ name="compact", help="Compact the conversation history", no_args_is_help=False
21
+ )
22
+ logger = get_logger(__name__)
23
+ console = Console()
24
+
25
+
26
+ @app.callback(invoke_without_command=True)
27
+ def compact(
28
+ format: Annotated[
29
+ OutputFormat,
30
+ typer.Option(
31
+ "--format",
32
+ "-f",
33
+ help="Output format: markdown or json",
34
+ ),
35
+ ] = OutputFormat.MARKDOWN,
36
+ ) -> None:
37
+ """Compact the current conversation history to reduce size.
38
+
39
+ This command compacts the conversation in ~/.shotgun-sh/conversation.json
40
+ by summarizing older messages while preserving recent context. The compacted
41
+ conversation is automatically saved back to the file.
42
+ """
43
+ try:
44
+ result = asyncio.run(compact_conversation())
45
+
46
+ if format == OutputFormat.JSON:
47
+ # Output as JSON
48
+ console.print_json(json.dumps(result, indent=2))
49
+ else:
50
+ # Output as markdown
51
+ console.print(format_markdown(result))
52
+
53
+ except FileNotFoundError as e:
54
+ console.print(
55
+ f"[red]Error:[/red] {e}\n\n"
56
+ "No conversation found. Start a TUI session first with: [cyan]shotgun[/cyan]",
57
+ style="bold",
58
+ )
59
+ raise typer.Exit(code=1) from e
60
+ except Exception as e:
61
+ console.print(
62
+ f"[red]Error:[/red] Failed to compact conversation: {e}", style="bold"
63
+ )
64
+ logger.debug("Full traceback:", exc_info=True)
65
+ raise typer.Exit(code=1) from e
66
+
67
+
68
+ async def compact_conversation() -> dict[str, Any]:
69
+ """Compact the conversation and return statistics.
70
+
71
+ Returns:
72
+ Dictionary with compaction statistics including before/after metrics
73
+ """
74
+ # Get conversation file path
75
+ conversation_file = Path.home() / ".shotgun-sh" / "conversation.json"
76
+
77
+ if not conversation_file.exists():
78
+ raise FileNotFoundError(f"Conversation file not found at {conversation_file}")
79
+
80
+ # Load conversation
81
+ manager = ConversationManager(conversation_file)
82
+ conversation = await manager.load()
83
+
84
+ if not conversation:
85
+ raise ValueError("Conversation file is empty or corrupted")
86
+
87
+ # Get agent messages only (not UI messages)
88
+ agent_messages = conversation.get_agent_messages()
89
+
90
+ if not agent_messages:
91
+ raise ValueError("No agent messages found in conversation")
92
+
93
+ # Get model config
94
+ model_config = await get_provider_model()
95
+
96
+ # Calculate before metrics
97
+ original_message_count = len(agent_messages)
98
+ original_tokens = await estimate_tokens_from_messages(agent_messages, model_config)
99
+
100
+ # For CLI, we can call token_limit_compactor directly without full AgentDeps
101
+ # since we only need the model config and message history
102
+ # Create a minimal context object for compaction
103
+ class CompactContext:
104
+ def __init__(self, model_config: Any, usage: RequestUsage) -> None:
105
+ self.deps = type("Deps", (), {"llm_model": model_config})()
106
+ self.usage = usage
107
+
108
+ # Create minimal usage info for compaction check
109
+ usage = RequestUsage(input_tokens=original_tokens, output_tokens=0)
110
+ ctx = CompactContext(model_config, usage)
111
+
112
+ # Apply compaction with force=True to bypass threshold checks
113
+ compacted_messages = await token_limit_compactor(ctx, agent_messages, force=True)
114
+
115
+ # Calculate after metrics
116
+ compacted_message_count = len(compacted_messages)
117
+ compacted_tokens = await estimate_tokens_from_messages(
118
+ compacted_messages, model_config
119
+ )
120
+
121
+ # Calculate reduction percentages
122
+ message_reduction = (
123
+ ((original_message_count - compacted_message_count) / original_message_count)
124
+ * 100
125
+ if original_message_count > 0
126
+ else 0
127
+ )
128
+ token_reduction = (
129
+ ((original_tokens - compacted_tokens) / original_tokens) * 100
130
+ if original_tokens > 0
131
+ else 0
132
+ )
133
+
134
+ # Save compacted conversation
135
+ conversation.set_agent_messages(compacted_messages)
136
+ await manager.save(conversation)
137
+
138
+ logger.info(
139
+ f"Compacted conversation: {original_message_count} → {compacted_message_count} messages "
140
+ f"({message_reduction:.1f}% reduction)"
141
+ )
142
+
143
+ return {
144
+ "success": True,
145
+ "before": {
146
+ "messages": original_message_count,
147
+ "estimated_tokens": original_tokens,
148
+ },
149
+ "after": {
150
+ "messages": compacted_message_count,
151
+ "estimated_tokens": compacted_tokens,
152
+ },
153
+ "reduction": {
154
+ "messages_percent": round(message_reduction, 1),
155
+ "tokens_percent": round(token_reduction, 1),
156
+ },
157
+ }
158
+
159
+
160
+ def format_markdown(result: dict[str, Any]) -> str:
161
+ """Format compaction result as markdown.
162
+
163
+ Args:
164
+ result: Dictionary with compaction statistics
165
+
166
+ Returns:
167
+ Formatted markdown string
168
+ """
169
+ before = result["before"]
170
+ after = result["after"]
171
+ reduction = result["reduction"]
172
+
173
+ return f"""# Conversation Compacted ✓
174
+
175
+ ## Before
176
+ - **Messages:** {before["messages"]:,}
177
+ - **Estimated Tokens:** {before["estimated_tokens"]:,}
178
+
179
+ ## After
180
+ - **Messages:** {after["messages"]:,}
181
+ - **Estimated Tokens:** {after["estimated_tokens"]:,}
182
+
183
+ ## Reduction
184
+ - **Messages:** {reduction["messages_percent"]}%
185
+ - **Tokens:** {reduction["tokens_percent"]}%
186
+ """
shotgun/cli/config.py CHANGED
@@ -1,5 +1,6 @@
1
1
  """Configuration management CLI commands."""
2
2
 
3
+ import asyncio
3
4
  import json
4
5
  from typing import Annotated, Any
5
6
 
@@ -44,7 +45,7 @@ def init(
44
45
  console.print()
45
46
 
46
47
  # Initialize with defaults
47
- config_manager.initialize()
48
+ asyncio.run(config_manager.initialize())
48
49
 
49
50
  # Ask for provider
50
51
  provider_choices = ["openai", "anthropic", "google"]
@@ -76,7 +77,7 @@ def init(
76
77
 
77
78
  if api_key:
78
79
  # update_provider will automatically set selected_model for first provider
79
- config_manager.update_provider(provider, api_key=api_key)
80
+ asyncio.run(config_manager.update_provider(provider, api_key=api_key))
80
81
 
81
82
  console.print(
82
83
  f"\n✅ [bold green]Configuration saved to {config_manager.config_path}[/bold green]"
@@ -84,7 +85,7 @@ def init(
84
85
  console.print("🎯 You can now use Shotgun with your configured provider!")
85
86
 
86
87
  else:
87
- config_manager.initialize()
88
+ asyncio.run(config_manager.initialize())
88
89
  console.print(f"✅ Configuration initialized at {config_manager.config_path}")
89
90
 
90
91
 
@@ -112,7 +113,7 @@ def set(
112
113
 
113
114
  try:
114
115
  if api_key:
115
- config_manager.update_provider(provider, api_key=api_key)
116
+ asyncio.run(config_manager.update_provider(provider, api_key=api_key))
116
117
 
117
118
  console.print(f"✅ Configuration updated for {provider}")
118
119
 
@@ -133,8 +134,10 @@ def get(
133
134
  ] = False,
134
135
  ) -> None:
135
136
  """Display current configuration."""
137
+ import asyncio
138
+
136
139
  config_manager = get_config_manager()
137
- config = config_manager.load()
140
+ config = asyncio.run(config_manager.load())
138
141
 
139
142
  if json_output:
140
143
  # Convert to dict and mask secrets
shotgun/cli/context.py ADDED
@@ -0,0 +1,111 @@
1
+ """Context command for shotgun CLI."""
2
+
3
+ import asyncio
4
+ import json
5
+ from pathlib import Path
6
+ from typing import Annotated
7
+
8
+ import typer
9
+ from rich.console import Console
10
+
11
+ from shotgun.agents.config import get_provider_model
12
+ from shotgun.agents.context_analyzer import (
13
+ ContextAnalysisOutput,
14
+ ContextAnalyzer,
15
+ ContextFormatter,
16
+ )
17
+ from shotgun.agents.conversation_manager import ConversationManager
18
+ from shotgun.cli.models import OutputFormat
19
+ from shotgun.logging_config import get_logger
20
+
21
+ app = typer.Typer(
22
+ name="context", help="Analyze conversation context usage", no_args_is_help=False
23
+ )
24
+ logger = get_logger(__name__)
25
+ console = Console()
26
+
27
+
28
+ @app.callback(invoke_without_command=True)
29
+ def context(
30
+ format: Annotated[
31
+ OutputFormat,
32
+ typer.Option(
33
+ "--format",
34
+ "-f",
35
+ help="Output format: markdown or json",
36
+ ),
37
+ ] = OutputFormat.MARKDOWN,
38
+ ) -> None:
39
+ """Analyze the current conversation's context usage.
40
+
41
+ This command analyzes the agent's message history from ~/.shotgun-sh/conversation.json
42
+ and displays token usage breakdown by message type. Only agent context is counted
43
+ (UI elements like hints are excluded).
44
+ """
45
+ try:
46
+ result = asyncio.run(analyze_context())
47
+
48
+ if format == OutputFormat.JSON:
49
+ # Output as JSON
50
+ console.print_json(json.dumps(result.json_data, indent=2))
51
+ else:
52
+ # Output as plain text (Markdown() reformats and makes categories inline)
53
+ console.print(result.markdown)
54
+
55
+ except FileNotFoundError as e:
56
+ console.print(
57
+ f"[red]Error:[/red] {e}\n\n"
58
+ "No conversation found. Start a TUI session first with: [cyan]shotgun[/cyan]",
59
+ style="bold",
60
+ )
61
+ raise typer.Exit(code=1) from e
62
+ except Exception as e:
63
+ console.print(f"[red]Error:[/red] Failed to analyze context: {e}", style="bold")
64
+ logger.debug("Full traceback:", exc_info=True)
65
+ raise typer.Exit(code=1) from e
66
+
67
+
68
+ async def analyze_context() -> ContextAnalysisOutput:
69
+ """Analyze the conversation context and return structured data.
70
+
71
+ Returns:
72
+ ContextAnalysisOutput with both markdown and JSON representations of the analysis
73
+ """
74
+ # Get conversation file path
75
+ conversation_file = Path.home() / ".shotgun-sh" / "conversation.json"
76
+
77
+ if not conversation_file.exists():
78
+ raise FileNotFoundError(f"Conversation file not found at {conversation_file}")
79
+
80
+ # Load conversation
81
+ manager = ConversationManager(conversation_file)
82
+ conversation = await manager.load()
83
+
84
+ if not conversation:
85
+ raise ValueError("Conversation file is empty or corrupted")
86
+
87
+ # Get agent messages only (not UI messages)
88
+ agent_messages = conversation.get_agent_messages()
89
+
90
+ if not agent_messages:
91
+ raise ValueError("No agent messages found in conversation")
92
+
93
+ # Get model config (use default provider settings)
94
+ model_config = await get_provider_model()
95
+
96
+ # Debug: Log the model being used
97
+ logger.debug(f"Using model: {model_config.name.value}")
98
+ logger.debug(f"Provider: {model_config.provider.value}")
99
+ logger.debug(f"Key provider: {model_config.key_provider.value}")
100
+ logger.debug(f"Max input tokens: {model_config.max_input_tokens}")
101
+
102
+ # Analyze with ContextAnalyzer
103
+ analyzer = ContextAnalyzer(model_config)
104
+ # For CLI, agent_messages and ui_message_history are the same (no hints in CLI mode)
105
+ analysis = await analyzer.analyze_conversation(agent_messages, list(agent_messages))
106
+
107
+ # Use formatter to generate markdown and JSON
108
+ markdown = ContextFormatter.format_markdown(analysis)
109
+ json_data = ContextFormatter.format_json(analysis)
110
+
111
+ return ContextAnalysisOutput(markdown=markdown, json_data=json_data)
shotgun/cli/export.py CHANGED
@@ -63,7 +63,7 @@ def export(
63
63
  )
64
64
 
65
65
  # Create the export agent with deps and provider
66
- agent, deps = create_export_agent(agent_runtime_options, provider)
66
+ agent, deps = asyncio.run(create_export_agent(agent_runtime_options, provider))
67
67
 
68
68
  # Start export process
69
69
  logger.info("🎯 Starting export...")
shotgun/cli/feedback.py CHANGED
@@ -28,9 +28,11 @@ def send_feedback(
28
28
  ],
29
29
  ) -> None:
30
30
  """Initialize Shotgun configuration."""
31
+ import asyncio
32
+
31
33
  config_manager = get_config_manager()
32
- config_manager.load()
33
- shotgun_instance_id = config_manager.get_shotgun_instance_id()
34
+ asyncio.run(config_manager.load())
35
+ shotgun_instance_id = asyncio.run(config_manager.get_shotgun_instance_id())
34
36
 
35
37
  if not description:
36
38
  console.print(
shotgun/cli/models.py CHANGED
@@ -8,3 +8,4 @@ class OutputFormat(StrEnum):
8
8
 
9
9
  TEXT = "text"
10
10
  JSON = "json"
11
+ MARKDOWN = "markdown"
shotgun/cli/plan.py CHANGED
@@ -55,7 +55,7 @@ def plan(
55
55
  )
56
56
 
57
57
  # Create the plan agent with deps and provider
58
- agent, deps = create_plan_agent(agent_runtime_options, provider)
58
+ agent, deps = asyncio.run(create_plan_agent(agent_runtime_options, provider))
59
59
 
60
60
  # Start planning process
61
61
  logger.info("🎯 Starting planning...")
shotgun/cli/research.py CHANGED
@@ -73,7 +73,7 @@ async def async_research(
73
73
  agent_runtime_options = AgentRuntimeOptions(interactive_mode=not non_interactive)
74
74
 
75
75
  # Create the research agent with deps and provider
76
- agent, deps = create_research_agent(agent_runtime_options, provider)
76
+ agent, deps = await create_research_agent(agent_runtime_options, provider)
77
77
 
78
78
  # Start research process
79
79
  logger.info("🔬 Starting research...")
shotgun/cli/specify.py CHANGED
@@ -51,7 +51,7 @@ def specify(
51
51
  )
52
52
 
53
53
  # Create the specify agent with deps and provider
54
- agent, deps = create_specify_agent(agent_runtime_options, provider)
54
+ agent, deps = asyncio.run(create_specify_agent(agent_runtime_options, provider))
55
55
 
56
56
  # Start specification process
57
57
  logger.info("📋 Starting specification generation...")
shotgun/cli/tasks.py CHANGED
@@ -60,7 +60,7 @@ def tasks(
60
60
  )
61
61
 
62
62
  # Create the tasks agent with deps and provider
63
- agent, deps = create_tasks_agent(agent_runtime_options, provider)
63
+ agent, deps = asyncio.run(create_tasks_agent(agent_runtime_options, provider))
64
64
 
65
65
  # Start task creation process
66
66
  logger.info("🎯 Starting task creation...")
shotgun/cli/update.py CHANGED
@@ -45,7 +45,7 @@ def update(
45
45
 
46
46
  This command will:
47
47
  - Check PyPI for the latest version
48
- - Detect your installation method (pipx, pip, or venv)
48
+ - Detect your installation method (uvx, uv-tool, pipx, pip, or venv)
49
49
  - Perform the appropriate upgrade command
50
50
 
51
51
  Examples:
@@ -93,6 +93,8 @@ def update(
93
93
  )
94
94
  console.print(
95
95
  "Use --force to update anyway, or install the stable version with:\n"
96
+ " uv tool install shotgun-sh\n"
97
+ " or\n"
96
98
  " pipx install shotgun-sh\n"
97
99
  " or\n"
98
100
  " pip install shotgun-sh",
@@ -134,7 +136,19 @@ def update(
134
136
  console.print(f"\n[red]✗[/red] {message}", style="bold red")
135
137
 
136
138
  # Provide manual update instructions
137
- if method == "pipx":
139
+ if method == "uvx":
140
+ console.print(
141
+ "\n[yellow]Run uvx again to use the latest version:[/yellow]\n"
142
+ " uvx shotgun-sh\n"
143
+ "\n[yellow]Or install permanently:[/yellow]\n"
144
+ " uv tool install shotgun-sh"
145
+ )
146
+ elif method == "uv-tool":
147
+ console.print(
148
+ "\n[yellow]Try updating manually:[/yellow]\n"
149
+ " uv tool upgrade shotgun-sh"
150
+ )
151
+ elif method == "pipx":
138
152
  console.print(
139
153
  "\n[yellow]Try updating manually:[/yellow]\n"
140
154
  " pipx upgrade shotgun-sh"
@@ -6,6 +6,7 @@ from enum import Enum
6
6
  from pathlib import Path
7
7
  from typing import Any, cast
8
8
 
9
+ import aiofiles
9
10
  import kuzu
10
11
 
11
12
  from shotgun.logging_config import get_logger
@@ -301,7 +302,7 @@ class ChangeDetector:
301
302
  # Direct substring match
302
303
  return pattern in filepath
303
304
 
304
- def _calculate_file_hash(self, filepath: Path) -> str:
305
+ async def _calculate_file_hash(self, filepath: Path) -> str:
305
306
  """Calculate hash of file contents.
306
307
 
307
308
  Args:
@@ -311,8 +312,9 @@ class ChangeDetector:
311
312
  SHA256 hash of file contents
312
313
  """
313
314
  try:
314
- with open(filepath, "rb") as f:
315
- return hashlib.sha256(f.read()).hexdigest()
315
+ async with aiofiles.open(filepath, "rb") as f:
316
+ content = await f.read()
317
+ return hashlib.sha256(content).hexdigest()
316
318
  except Exception as e:
317
319
  logger.error(f"Failed to calculate hash for {filepath}: {e}")
318
320
  return ""
@@ -3,6 +3,7 @@
3
3
  from pathlib import Path
4
4
  from typing import TYPE_CHECKING
5
5
 
6
+ import aiofiles
6
7
  from pydantic import BaseModel
7
8
 
8
9
  from shotgun.logging_config import get_logger
@@ -141,8 +142,9 @@ async def retrieve_code_by_qualified_name(
141
142
 
142
143
  # Read the file and extract the snippet
143
144
  try:
144
- with full_path.open("r", encoding="utf-8") as f:
145
- all_lines = f.readlines()
145
+ async with aiofiles.open(full_path, encoding="utf-8") as f:
146
+ content = await f.read()
147
+ all_lines = content.splitlines(keepends=True)
146
148
 
147
149
  # Extract the relevant lines (1-indexed to 0-indexed)
148
150
  snippet_lines = all_lines[start_line - 1 : end_line]
@@ -1,5 +1,6 @@
1
1
  """Kuzu graph ingestor for building code knowledge graphs."""
2
2
 
3
+ import asyncio
3
4
  import hashlib
4
5
  import os
5
6
  import time
@@ -8,6 +9,7 @@ from collections import defaultdict
8
9
  from pathlib import Path
9
10
  from typing import Any
10
11
 
12
+ import aiofiles
11
13
  import kuzu
12
14
  from tree_sitter import Node, Parser, QueryCursor
13
15
 
@@ -619,7 +621,7 @@ class SimpleGraphBuilder:
619
621
  # Don't let progress callback errors crash the build
620
622
  logger.debug(f"Progress callback error: {e}")
621
623
 
622
- def run(self) -> None:
624
+ async def run(self) -> None:
623
625
  """Run the three-pass graph building process."""
624
626
  logger.info(f"Building graph for project: {self.project_name}")
625
627
 
@@ -629,7 +631,7 @@ class SimpleGraphBuilder:
629
631
 
630
632
  # Pass 2: Definitions
631
633
  logger.info("Pass 2: Processing files and extracting definitions...")
632
- self._process_files()
634
+ await self._process_files()
633
635
 
634
636
  # Pass 3: Relationships
635
637
  logger.info("Pass 3: Processing relationships (calls, imports)...")
@@ -771,7 +773,7 @@ class SimpleGraphBuilder:
771
773
  phase_complete=True,
772
774
  )
773
775
 
774
- def _process_files(self) -> None:
776
+ async def _process_files(self) -> None:
775
777
  """Second pass: Process files and extract definitions."""
776
778
  # First pass: Count total files
777
779
  total_files = 0
@@ -807,7 +809,7 @@ class SimpleGraphBuilder:
807
809
  lang_config = get_language_config(ext)
808
810
 
809
811
  if lang_config and lang_config.name in self.parsers:
810
- self._process_single_file(filepath, lang_config.name)
812
+ await self._process_single_file(filepath, lang_config.name)
811
813
  file_count += 1
812
814
 
813
815
  # Report progress after each file
@@ -832,7 +834,7 @@ class SimpleGraphBuilder:
832
834
  phase_complete=True,
833
835
  )
834
836
 
835
- def _process_single_file(self, filepath: Path, language: str) -> None:
837
+ async def _process_single_file(self, filepath: Path, language: str) -> None:
836
838
  """Process a single file."""
837
839
  relative_path = filepath.relative_to(self.repo_path)
838
840
  relative_path_str = str(relative_path).replace(os.sep, "/")
@@ -873,8 +875,8 @@ class SimpleGraphBuilder:
873
875
 
874
876
  # Parse file
875
877
  try:
876
- with open(filepath, "rb") as f:
877
- content = f.read()
878
+ async with aiofiles.open(filepath, "rb") as f:
879
+ content = await f.read()
878
880
 
879
881
  parser = self.parsers[language]
880
882
  tree = parser.parse(content)
@@ -1636,7 +1638,7 @@ class CodebaseIngestor:
1636
1638
  )
1637
1639
  if self.project_name:
1638
1640
  builder.project_name = self.project_name
1639
- builder.run()
1641
+ asyncio.run(builder.run())
1640
1642
 
1641
1643
  logger.info(f"Graph successfully created at: {self.db_path}")
1642
1644
 
@@ -371,7 +371,16 @@ class CodebaseGraphManager:
371
371
  )
372
372
  import shutil
373
373
 
374
- shutil.rmtree(graph_path)
374
+ # Handle both files and directories (kuzu v0.11.2+ uses files)
375
+ if graph_path.is_file():
376
+ graph_path.unlink() # Delete file
377
+ # Also delete WAL file if it exists
378
+ wal_path = graph_path.with_suffix(graph_path.suffix + ".wal")
379
+ if wal_path.exists():
380
+ wal_path.unlink()
381
+ logger.debug(f"Deleted WAL file: {wal_path}")
382
+ else:
383
+ shutil.rmtree(graph_path) # Delete directory
375
384
 
376
385
  # Import the builder from local core module
377
386
  from shotgun.codebase.core import CodebaseIngestor
@@ -760,7 +769,7 @@ class CodebaseGraphManager:
760
769
 
761
770
  lang_config = get_language_config(full_path.suffix)
762
771
  if lang_config and lang_config.name in parsers:
763
- builder._process_single_file(full_path, lang_config.name)
772
+ await builder._process_single_file(full_path, lang_config.name)
764
773
  stats["nodes_modified"] += 1 # Approximate
765
774
 
766
775
  # Process additions
@@ -775,7 +784,7 @@ class CodebaseGraphManager:
775
784
 
776
785
  lang_config = get_language_config(full_path.suffix)
777
786
  if lang_config and lang_config.name in parsers:
778
- builder._process_single_file(full_path, lang_config.name)
787
+ await builder._process_single_file(full_path, lang_config.name)
779
788
  stats["nodes_added"] += 1 # Approximate
780
789
 
781
790
  # Flush all pending operations
@@ -1742,7 +1751,7 @@ class CodebaseGraphManager:
1742
1751
  )
1743
1752
 
1744
1753
  # Build the graph
1745
- builder.run()
1754
+ asyncio.run(builder.run())
1746
1755
 
1747
1756
  # Run build in thread pool
1748
1757
  await anyio.to_thread.run_sync(_build_graph)
@@ -34,7 +34,7 @@ async def llm_cypher_prompt(
34
34
  Returns:
35
35
  CypherGenerationResponse with cypher_query, can_generate flag, and reason if not
36
36
  """
37
- model_config = get_provider_model()
37
+ model_config = await get_provider_model()
38
38
 
39
39
  # Create an agent with structured output for Cypher generation
40
40
  cypher_agent = Agent(
shotgun/exceptions.py ADDED
@@ -0,0 +1,32 @@
1
+ """General exceptions for Shotgun application."""
2
+
3
+
4
+ class ErrorNotPickedUpBySentry(Exception): # noqa: N818
5
+ """Base for user-actionable errors that shouldn't be sent to Sentry.
6
+
7
+ These errors represent expected user conditions requiring action
8
+ rather than bugs that need tracking.
9
+ """
10
+
11
+
12
+ class ContextSizeLimitExceeded(ErrorNotPickedUpBySentry):
13
+ """Raised when conversation context exceeds the model's limits.
14
+
15
+ This is a user-actionable error - they need to either:
16
+ 1. Switch to a larger context model
17
+ 2. Switch to a larger model, compact their conversation, then switch back
18
+ 3. Clear the conversation and start fresh
19
+ """
20
+
21
+ def __init__(self, model_name: str, max_tokens: int):
22
+ """Initialize the exception.
23
+
24
+ Args:
25
+ model_name: Name of the model whose limit was exceeded
26
+ max_tokens: Maximum tokens allowed by the model
27
+ """
28
+ self.model_name = model_name
29
+ self.max_tokens = max_tokens
30
+ super().__init__(
31
+ f"Context too large for {model_name} (limit: {max_tokens:,} tokens)"
32
+ )