shotgun-sh 0.2.3.dev2__py3-none-any.whl → 0.2.11.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (107) hide show
  1. shotgun/agents/agent_manager.py +524 -58
  2. shotgun/agents/common.py +62 -62
  3. shotgun/agents/config/constants.py +0 -6
  4. shotgun/agents/config/manager.py +14 -3
  5. shotgun/agents/config/models.py +16 -0
  6. shotgun/agents/config/provider.py +68 -13
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +493 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +24 -2
  14. shotgun/agents/export.py +4 -5
  15. shotgun/agents/history/compaction.py +9 -4
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +14 -2
  18. shotgun/agents/history/token_counting/anthropic.py +32 -10
  19. shotgun/agents/models.py +50 -2
  20. shotgun/agents/plan.py +4 -5
  21. shotgun/agents/research.py +4 -5
  22. shotgun/agents/specify.py +4 -5
  23. shotgun/agents/tasks.py +4 -5
  24. shotgun/agents/tools/__init__.py +0 -2
  25. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  26. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  27. shotgun/agents/tools/codebase/file_read.py +6 -0
  28. shotgun/agents/tools/codebase/query_graph.py +6 -0
  29. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  30. shotgun/agents/tools/file_management.py +71 -9
  31. shotgun/agents/tools/registry.py +217 -0
  32. shotgun/agents/tools/web_search/__init__.py +24 -12
  33. shotgun/agents/tools/web_search/anthropic.py +24 -3
  34. shotgun/agents/tools/web_search/gemini.py +22 -10
  35. shotgun/agents/tools/web_search/openai.py +21 -12
  36. shotgun/api_endpoints.py +7 -3
  37. shotgun/build_constants.py +1 -1
  38. shotgun/cli/clear.py +52 -0
  39. shotgun/cli/compact.py +186 -0
  40. shotgun/cli/context.py +111 -0
  41. shotgun/cli/models.py +1 -0
  42. shotgun/cli/update.py +16 -2
  43. shotgun/codebase/core/manager.py +10 -1
  44. shotgun/llm_proxy/__init__.py +5 -2
  45. shotgun/llm_proxy/clients.py +12 -7
  46. shotgun/logging_config.py +8 -10
  47. shotgun/main.py +70 -10
  48. shotgun/posthog_telemetry.py +9 -3
  49. shotgun/prompts/agents/export.j2 +18 -1
  50. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  51. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  52. shotgun/prompts/agents/plan.j2 +1 -1
  53. shotgun/prompts/agents/research.j2 +1 -1
  54. shotgun/prompts/agents/specify.j2 +270 -3
  55. shotgun/prompts/agents/state/system_state.j2 +4 -0
  56. shotgun/prompts/agents/tasks.j2 +1 -1
  57. shotgun/prompts/loader.py +2 -2
  58. shotgun/prompts/tools/web_search.j2 +14 -0
  59. shotgun/sentry_telemetry.py +4 -15
  60. shotgun/settings.py +238 -0
  61. shotgun/telemetry.py +15 -32
  62. shotgun/tui/app.py +203 -9
  63. shotgun/tui/commands/__init__.py +1 -1
  64. shotgun/tui/components/context_indicator.py +136 -0
  65. shotgun/tui/components/mode_indicator.py +70 -0
  66. shotgun/tui/components/status_bar.py +48 -0
  67. shotgun/tui/containers.py +93 -0
  68. shotgun/tui/dependencies.py +39 -0
  69. shotgun/tui/protocols.py +45 -0
  70. shotgun/tui/screens/chat/__init__.py +5 -0
  71. shotgun/tui/screens/chat/chat.tcss +54 -0
  72. shotgun/tui/screens/chat/chat_screen.py +1110 -0
  73. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  74. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  75. shotgun/tui/screens/chat/help_text.py +39 -0
  76. shotgun/tui/screens/chat/prompt_history.py +48 -0
  77. shotgun/tui/screens/chat.tcss +11 -0
  78. shotgun/tui/screens/chat_screen/command_providers.py +68 -2
  79. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  80. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  81. shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
  82. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  83. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  84. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  85. shotgun/tui/screens/confirmation_dialog.py +151 -0
  86. shotgun/tui/screens/model_picker.py +30 -6
  87. shotgun/tui/screens/pipx_migration.py +153 -0
  88. shotgun/tui/screens/welcome.py +24 -5
  89. shotgun/tui/services/__init__.py +5 -0
  90. shotgun/tui/services/conversation_service.py +182 -0
  91. shotgun/tui/state/__init__.py +7 -0
  92. shotgun/tui/state/processing_state.py +185 -0
  93. shotgun/tui/widgets/__init__.py +5 -0
  94. shotgun/tui/widgets/widget_coordinator.py +247 -0
  95. shotgun/utils/datetime_utils.py +77 -0
  96. shotgun/utils/file_system_utils.py +3 -2
  97. shotgun/utils/update_checker.py +69 -14
  98. shotgun_sh-0.2.11.dev1.dist-info/METADATA +129 -0
  99. shotgun_sh-0.2.11.dev1.dist-info/RECORD +190 -0
  100. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev1.dist-info}/entry_points.txt +1 -0
  101. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev1.dist-info}/licenses/LICENSE +1 -1
  102. shotgun/agents/tools/user_interaction.py +0 -37
  103. shotgun/tui/screens/chat.py +0 -804
  104. shotgun/tui/screens/chat_screen/history.py +0 -352
  105. shotgun_sh-0.2.3.dev2.dist-info/METADATA +0 -467
  106. shotgun_sh-0.2.3.dev2.dist-info/RECORD +0 -154
  107. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev1.dist-info}/WHEEL +0 -0
shotgun/cli/compact.py ADDED
@@ -0,0 +1,186 @@
1
+ """Compact command for shotgun CLI."""
2
+
3
+ import asyncio
4
+ import json
5
+ from pathlib import Path
6
+ from typing import Annotated, Any
7
+
8
+ import typer
9
+ from pydantic_ai.usage import RequestUsage
10
+ from rich.console import Console
11
+
12
+ from shotgun.agents.config import get_provider_model
13
+ from shotgun.agents.conversation_manager import ConversationManager
14
+ from shotgun.agents.history.history_processors import token_limit_compactor
15
+ from shotgun.agents.history.token_estimation import estimate_tokens_from_messages
16
+ from shotgun.cli.models import OutputFormat
17
+ from shotgun.logging_config import get_logger
18
+
19
+ app = typer.Typer(
20
+ name="compact", help="Compact the conversation history", no_args_is_help=False
21
+ )
22
+ logger = get_logger(__name__)
23
+ console = Console()
24
+
25
+
26
+ @app.callback(invoke_without_command=True)
27
+ def compact(
28
+ format: Annotated[
29
+ OutputFormat,
30
+ typer.Option(
31
+ "--format",
32
+ "-f",
33
+ help="Output format: markdown or json",
34
+ ),
35
+ ] = OutputFormat.MARKDOWN,
36
+ ) -> None:
37
+ """Compact the current conversation history to reduce size.
38
+
39
+ This command compacts the conversation in ~/.shotgun-sh/conversation.json
40
+ by summarizing older messages while preserving recent context. The compacted
41
+ conversation is automatically saved back to the file.
42
+ """
43
+ try:
44
+ result = asyncio.run(compact_conversation())
45
+
46
+ if format == OutputFormat.JSON:
47
+ # Output as JSON
48
+ console.print_json(json.dumps(result, indent=2))
49
+ else:
50
+ # Output as markdown
51
+ console.print(format_markdown(result))
52
+
53
+ except FileNotFoundError as e:
54
+ console.print(
55
+ f"[red]Error:[/red] {e}\n\n"
56
+ "No conversation found. Start a TUI session first with: [cyan]shotgun[/cyan]",
57
+ style="bold",
58
+ )
59
+ raise typer.Exit(code=1) from e
60
+ except Exception as e:
61
+ console.print(
62
+ f"[red]Error:[/red] Failed to compact conversation: {e}", style="bold"
63
+ )
64
+ logger.debug("Full traceback:", exc_info=True)
65
+ raise typer.Exit(code=1) from e
66
+
67
+
68
+ async def compact_conversation() -> dict[str, Any]:
69
+ """Compact the conversation and return statistics.
70
+
71
+ Returns:
72
+ Dictionary with compaction statistics including before/after metrics
73
+ """
74
+ # Get conversation file path
75
+ conversation_file = Path.home() / ".shotgun-sh" / "conversation.json"
76
+
77
+ if not conversation_file.exists():
78
+ raise FileNotFoundError(f"Conversation file not found at {conversation_file}")
79
+
80
+ # Load conversation
81
+ manager = ConversationManager(conversation_file)
82
+ conversation = manager.load()
83
+
84
+ if not conversation:
85
+ raise ValueError("Conversation file is empty or corrupted")
86
+
87
+ # Get agent messages only (not UI messages)
88
+ agent_messages = conversation.get_agent_messages()
89
+
90
+ if not agent_messages:
91
+ raise ValueError("No agent messages found in conversation")
92
+
93
+ # Get model config
94
+ model_config = get_provider_model()
95
+
96
+ # Calculate before metrics
97
+ original_message_count = len(agent_messages)
98
+ original_tokens = await estimate_tokens_from_messages(agent_messages, model_config)
99
+
100
+ # For CLI, we can call token_limit_compactor directly without full AgentDeps
101
+ # since we only need the model config and message history
102
+ # Create a minimal context object for compaction
103
+ class CompactContext:
104
+ def __init__(self, model_config: Any, usage: RequestUsage) -> None:
105
+ self.deps = type("Deps", (), {"llm_model": model_config})()
106
+ self.usage = usage
107
+
108
+ # Create minimal usage info for compaction check
109
+ usage = RequestUsage(input_tokens=original_tokens, output_tokens=0)
110
+ ctx = CompactContext(model_config, usage)
111
+
112
+ # Apply compaction with force=True to bypass threshold checks
113
+ compacted_messages = await token_limit_compactor(ctx, agent_messages, force=True)
114
+
115
+ # Calculate after metrics
116
+ compacted_message_count = len(compacted_messages)
117
+ compacted_tokens = await estimate_tokens_from_messages(
118
+ compacted_messages, model_config
119
+ )
120
+
121
+ # Calculate reduction percentages
122
+ message_reduction = (
123
+ ((original_message_count - compacted_message_count) / original_message_count)
124
+ * 100
125
+ if original_message_count > 0
126
+ else 0
127
+ )
128
+ token_reduction = (
129
+ ((original_tokens - compacted_tokens) / original_tokens) * 100
130
+ if original_tokens > 0
131
+ else 0
132
+ )
133
+
134
+ # Save compacted conversation
135
+ conversation.set_agent_messages(compacted_messages)
136
+ manager.save(conversation)
137
+
138
+ logger.info(
139
+ f"Compacted conversation: {original_message_count} → {compacted_message_count} messages "
140
+ f"({message_reduction:.1f}% reduction)"
141
+ )
142
+
143
+ return {
144
+ "success": True,
145
+ "before": {
146
+ "messages": original_message_count,
147
+ "estimated_tokens": original_tokens,
148
+ },
149
+ "after": {
150
+ "messages": compacted_message_count,
151
+ "estimated_tokens": compacted_tokens,
152
+ },
153
+ "reduction": {
154
+ "messages_percent": round(message_reduction, 1),
155
+ "tokens_percent": round(token_reduction, 1),
156
+ },
157
+ }
158
+
159
+
160
+ def format_markdown(result: dict[str, Any]) -> str:
161
+ """Format compaction result as markdown.
162
+
163
+ Args:
164
+ result: Dictionary with compaction statistics
165
+
166
+ Returns:
167
+ Formatted markdown string
168
+ """
169
+ before = result["before"]
170
+ after = result["after"]
171
+ reduction = result["reduction"]
172
+
173
+ return f"""# Conversation Compacted ✓
174
+
175
+ ## Before
176
+ - **Messages:** {before["messages"]:,}
177
+ - **Estimated Tokens:** {before["estimated_tokens"]:,}
178
+
179
+ ## After
180
+ - **Messages:** {after["messages"]:,}
181
+ - **Estimated Tokens:** {after["estimated_tokens"]:,}
182
+
183
+ ## Reduction
184
+ - **Messages:** {reduction["messages_percent"]}%
185
+ - **Tokens:** {reduction["tokens_percent"]}%
186
+ """
shotgun/cli/context.py ADDED
@@ -0,0 +1,111 @@
1
+ """Context command for shotgun CLI."""
2
+
3
+ import asyncio
4
+ import json
5
+ from pathlib import Path
6
+ from typing import Annotated
7
+
8
+ import typer
9
+ from rich.console import Console
10
+
11
+ from shotgun.agents.config import get_provider_model
12
+ from shotgun.agents.context_analyzer import (
13
+ ContextAnalysisOutput,
14
+ ContextAnalyzer,
15
+ ContextFormatter,
16
+ )
17
+ from shotgun.agents.conversation_manager import ConversationManager
18
+ from shotgun.cli.models import OutputFormat
19
+ from shotgun.logging_config import get_logger
20
+
21
+ app = typer.Typer(
22
+ name="context", help="Analyze conversation context usage", no_args_is_help=False
23
+ )
24
+ logger = get_logger(__name__)
25
+ console = Console()
26
+
27
+
28
+ @app.callback(invoke_without_command=True)
29
+ def context(
30
+ format: Annotated[
31
+ OutputFormat,
32
+ typer.Option(
33
+ "--format",
34
+ "-f",
35
+ help="Output format: markdown or json",
36
+ ),
37
+ ] = OutputFormat.MARKDOWN,
38
+ ) -> None:
39
+ """Analyze the current conversation's context usage.
40
+
41
+ This command analyzes the agent's message history from ~/.shotgun-sh/conversation.json
42
+ and displays token usage breakdown by message type. Only agent context is counted
43
+ (UI elements like hints are excluded).
44
+ """
45
+ try:
46
+ result = asyncio.run(analyze_context())
47
+
48
+ if format == OutputFormat.JSON:
49
+ # Output as JSON
50
+ console.print_json(json.dumps(result.json_data, indent=2))
51
+ else:
52
+ # Output as plain text (Markdown() reformats and makes categories inline)
53
+ console.print(result.markdown)
54
+
55
+ except FileNotFoundError as e:
56
+ console.print(
57
+ f"[red]Error:[/red] {e}\n\n"
58
+ "No conversation found. Start a TUI session first with: [cyan]shotgun[/cyan]",
59
+ style="bold",
60
+ )
61
+ raise typer.Exit(code=1) from e
62
+ except Exception as e:
63
+ console.print(f"[red]Error:[/red] Failed to analyze context: {e}", style="bold")
64
+ logger.debug("Full traceback:", exc_info=True)
65
+ raise typer.Exit(code=1) from e
66
+
67
+
68
+ async def analyze_context() -> ContextAnalysisOutput:
69
+ """Analyze the conversation context and return structured data.
70
+
71
+ Returns:
72
+ ContextAnalysisOutput with both markdown and JSON representations of the analysis
73
+ """
74
+ # Get conversation file path
75
+ conversation_file = Path.home() / ".shotgun-sh" / "conversation.json"
76
+
77
+ if not conversation_file.exists():
78
+ raise FileNotFoundError(f"Conversation file not found at {conversation_file}")
79
+
80
+ # Load conversation
81
+ manager = ConversationManager(conversation_file)
82
+ conversation = manager.load()
83
+
84
+ if not conversation:
85
+ raise ValueError("Conversation file is empty or corrupted")
86
+
87
+ # Get agent messages only (not UI messages)
88
+ agent_messages = conversation.get_agent_messages()
89
+
90
+ if not agent_messages:
91
+ raise ValueError("No agent messages found in conversation")
92
+
93
+ # Get model config (use default provider settings)
94
+ model_config = get_provider_model()
95
+
96
+ # Debug: Log the model being used
97
+ logger.debug(f"Using model: {model_config.name.value}")
98
+ logger.debug(f"Provider: {model_config.provider.value}")
99
+ logger.debug(f"Key provider: {model_config.key_provider.value}")
100
+ logger.debug(f"Max input tokens: {model_config.max_input_tokens}")
101
+
102
+ # Analyze with ContextAnalyzer
103
+ analyzer = ContextAnalyzer(model_config)
104
+ # For CLI, agent_messages and ui_message_history are the same (no hints in CLI mode)
105
+ analysis = await analyzer.analyze_conversation(agent_messages, list(agent_messages))
106
+
107
+ # Use formatter to generate markdown and JSON
108
+ markdown = ContextFormatter.format_markdown(analysis)
109
+ json_data = ContextFormatter.format_json(analysis)
110
+
111
+ return ContextAnalysisOutput(markdown=markdown, json_data=json_data)
shotgun/cli/models.py CHANGED
@@ -8,3 +8,4 @@ class OutputFormat(StrEnum):
8
8
 
9
9
  TEXT = "text"
10
10
  JSON = "json"
11
+ MARKDOWN = "markdown"
shotgun/cli/update.py CHANGED
@@ -45,7 +45,7 @@ def update(
45
45
 
46
46
  This command will:
47
47
  - Check PyPI for the latest version
48
- - Detect your installation method (pipx, pip, or venv)
48
+ - Detect your installation method (uvx, uv-tool, pipx, pip, or venv)
49
49
  - Perform the appropriate upgrade command
50
50
 
51
51
  Examples:
@@ -93,6 +93,8 @@ def update(
93
93
  )
94
94
  console.print(
95
95
  "Use --force to update anyway, or install the stable version with:\n"
96
+ " uv tool install shotgun-sh\n"
97
+ " or\n"
96
98
  " pipx install shotgun-sh\n"
97
99
  " or\n"
98
100
  " pip install shotgun-sh",
@@ -134,7 +136,19 @@ def update(
134
136
  console.print(f"\n[red]✗[/red] {message}", style="bold red")
135
137
 
136
138
  # Provide manual update instructions
137
- if method == "pipx":
139
+ if method == "uvx":
140
+ console.print(
141
+ "\n[yellow]Run uvx again to use the latest version:[/yellow]\n"
142
+ " uvx shotgun-sh\n"
143
+ "\n[yellow]Or install permanently:[/yellow]\n"
144
+ " uv tool install shotgun-sh"
145
+ )
146
+ elif method == "uv-tool":
147
+ console.print(
148
+ "\n[yellow]Try updating manually:[/yellow]\n"
149
+ " uv tool upgrade shotgun-sh"
150
+ )
151
+ elif method == "pipx":
138
152
  console.print(
139
153
  "\n[yellow]Try updating manually:[/yellow]\n"
140
154
  " pipx upgrade shotgun-sh"
@@ -371,7 +371,16 @@ class CodebaseGraphManager:
371
371
  )
372
372
  import shutil
373
373
 
374
- shutil.rmtree(graph_path)
374
+ # Handle both files and directories (kuzu v0.11.2+ uses files)
375
+ if graph_path.is_file():
376
+ graph_path.unlink() # Delete file
377
+ # Also delete WAL file if it exists
378
+ wal_path = graph_path.with_suffix(graph_path.suffix + ".wal")
379
+ if wal_path.exists():
380
+ wal_path.unlink()
381
+ logger.debug(f"Deleted WAL file: {wal_path}")
382
+ else:
383
+ shutil.rmtree(graph_path) # Delete directory
375
384
 
376
385
  # Import the builder from local core module
377
386
  from shotgun.codebase.core import CodebaseIngestor
@@ -1,6 +1,9 @@
1
1
  """LiteLLM proxy client utilities and configuration."""
2
2
 
3
- from .clients import create_anthropic_proxy_client, create_litellm_provider
3
+ from .clients import (
4
+ create_anthropic_proxy_provider,
5
+ create_litellm_provider,
6
+ )
4
7
  from .constants import (
5
8
  LITELLM_PROXY_ANTHROPIC_BASE,
6
9
  LITELLM_PROXY_BASE_URL,
@@ -12,5 +15,5 @@ __all__ = [
12
15
  "LITELLM_PROXY_ANTHROPIC_BASE",
13
16
  "LITELLM_PROXY_OPENAI_BASE",
14
17
  "create_litellm_provider",
15
- "create_anthropic_proxy_client",
18
+ "create_anthropic_proxy_provider",
16
19
  ]
@@ -1,6 +1,6 @@
1
1
  """Client creation utilities for LiteLLM proxy."""
2
2
 
3
- from anthropic import Anthropic
3
+ from pydantic_ai.providers.anthropic import AnthropicProvider
4
4
  from pydantic_ai.providers.litellm import LiteLLMProvider
5
5
 
6
6
  from .constants import LITELLM_PROXY_ANTHROPIC_BASE, LITELLM_PROXY_BASE_URL
@@ -21,19 +21,24 @@ def create_litellm_provider(api_key: str) -> LiteLLMProvider:
21
21
  )
22
22
 
23
23
 
24
- def create_anthropic_proxy_client(api_key: str) -> Anthropic:
25
- """Create Anthropic client configured for LiteLLM proxy.
24
+ def create_anthropic_proxy_provider(api_key: str) -> AnthropicProvider:
25
+ """Create Anthropic provider configured for LiteLLM proxy.
26
26
 
27
- This client will proxy token counting requests through the
28
- LiteLLM proxy to Anthropic's actual token counting API.
27
+ This provider uses native Anthropic API format while routing through
28
+ the LiteLLM proxy. This preserves Anthropic-specific features like
29
+ tool_choice and web search.
30
+
31
+ The provider's .client attribute provides access to the async Anthropic
32
+ client (AsyncAnthropic), which should be used for all API operations
33
+ including token counting.
29
34
 
30
35
  Args:
31
36
  api_key: Shotgun API key
32
37
 
33
38
  Returns:
34
- Anthropic client configured to use LiteLLM proxy
39
+ AnthropicProvider configured to use LiteLLM proxy /anthropic endpoint
35
40
  """
36
- return Anthropic(
41
+ return AnthropicProvider(
37
42
  api_key=api_key,
38
43
  base_url=LITELLM_PROXY_ANTHROPIC_BASE,
39
44
  )
shotgun/logging_config.py CHANGED
@@ -2,10 +2,10 @@
2
2
 
3
3
  import logging
4
4
  import logging.handlers
5
- import os
6
5
  import sys
7
6
  from pathlib import Path
8
7
 
8
+ from shotgun.settings import settings
9
9
  from shotgun.utils.env_utils import is_truthy
10
10
 
11
11
 
@@ -75,12 +75,10 @@ def setup_logger(
75
75
  if has_file_handler:
76
76
  return logger
77
77
 
78
- # Get log level from environment variable, default to INFO
79
- env_level = os.getenv("SHOTGUN_LOG_LEVEL", "INFO").upper()
80
- if env_level not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
81
- env_level = "INFO"
78
+ # Get log level from settings (already validated and uppercased)
79
+ log_level = settings.logging.log_level
82
80
 
83
- logger.setLevel(getattr(logging, env_level))
81
+ logger.setLevel(getattr(logging, log_level))
84
82
 
85
83
  # Default format string
86
84
  if format_string is None:
@@ -102,13 +100,13 @@ def setup_logger(
102
100
  # Check if console logging is enabled (default: off)
103
101
  # Force console logging OFF if Logfire is enabled in dev build
104
102
  console_logging_enabled = (
105
- is_truthy(os.getenv("LOGGING_TO_CONSOLE", "false")) and not is_logfire_dev_build
103
+ settings.logging.logging_to_console and not is_logfire_dev_build
106
104
  )
107
105
 
108
106
  if console_logging_enabled:
109
107
  # Create console handler
110
108
  console_handler = logging.StreamHandler(sys.stdout)
111
- console_handler.setLevel(getattr(logging, env_level))
109
+ console_handler.setLevel(getattr(logging, log_level))
112
110
 
113
111
  # Use colored formatter for console
114
112
  console_formatter = ColoredFormatter(format_string, datefmt="%H:%M:%S")
@@ -118,7 +116,7 @@ def setup_logger(
118
116
  logger.addHandler(console_handler)
119
117
 
120
118
  # Check if file logging is enabled (default: on)
121
- file_logging_enabled = is_truthy(os.getenv("LOGGING_TO_FILE", "true"))
119
+ file_logging_enabled = settings.logging.logging_to_file
122
120
 
123
121
  if file_logging_enabled:
124
122
  try:
@@ -137,7 +135,7 @@ def setup_logger(
137
135
 
138
136
  # Also set max file size (10MB) using RotatingFileHandler as fallback
139
137
  # Note: We'll use TimedRotatingFileHandler which handles both time and size
140
- file_handler.setLevel(getattr(logging, env_level))
138
+ file_handler.setLevel(getattr(logging, log_level))
141
139
 
142
140
  # Use standard formatter for file (no colors)
143
141
  file_formatter = logging.Formatter(
shotgun/main.py CHANGED
@@ -23,8 +23,11 @@ from dotenv import load_dotenv
23
23
  from shotgun import __version__
24
24
  from shotgun.agents.config import get_config_manager
25
25
  from shotgun.cli import (
26
+ clear,
26
27
  codebase,
28
+ compact,
27
29
  config,
30
+ context,
28
31
  export,
29
32
  feedback,
30
33
  plan,
@@ -78,6 +81,9 @@ app.add_typer(config.app, name="config", help="Manage Shotgun configuration")
78
81
  app.add_typer(
79
82
  codebase.app, name="codebase", help="Manage and query code knowledge graphs"
80
83
  )
84
+ app.add_typer(context.app, name="context", help="Analyze conversation context usage")
85
+ app.add_typer(compact.app, name="compact", help="Compact conversation history")
86
+ app.add_typer(clear.app, name="clear", help="Clear conversation history")
81
87
  app.add_typer(research.app, name="research", help="Perform research with agentic loops")
82
88
  app.add_typer(plan.app, name="plan", help="Generate structured plans")
83
89
  app.add_typer(specify.app, name="specify", help="Generate comprehensive specifications")
@@ -125,6 +131,41 @@ def main(
125
131
  help="Continue previous TUI conversation",
126
132
  ),
127
133
  ] = False,
134
+ web: Annotated[
135
+ bool,
136
+ typer.Option(
137
+ "--web",
138
+ help="Serve TUI as web application",
139
+ ),
140
+ ] = False,
141
+ port: Annotated[
142
+ int,
143
+ typer.Option(
144
+ "--port",
145
+ help="Port for web server (only used with --web)",
146
+ ),
147
+ ] = 8000,
148
+ host: Annotated[
149
+ str,
150
+ typer.Option(
151
+ "--host",
152
+ help="Host address for web server (only used with --web)",
153
+ ),
154
+ ] = "localhost",
155
+ public_url: Annotated[
156
+ str | None,
157
+ typer.Option(
158
+ "--public-url",
159
+ help="Public URL if behind proxy (only used with --web)",
160
+ ),
161
+ ] = None,
162
+ force_reindex: Annotated[
163
+ bool,
164
+ typer.Option(
165
+ "--force-reindex",
166
+ help="Force re-indexing of codebase (ignores existing index)",
167
+ ),
168
+ ] = False,
128
169
  ) -> None:
129
170
  """Shotgun - AI-powered CLI tool."""
130
171
  logger.debug("Starting shotgun CLI application")
@@ -134,16 +175,35 @@ def main(
134
175
  perform_auto_update_async(no_update_check=no_update_check)
135
176
 
136
177
  if ctx.invoked_subcommand is None and not ctx.resilient_parsing:
137
- logger.debug("Launching shotgun TUI application")
138
- try:
139
- tui_app.run(
140
- no_update_check=no_update_check, continue_session=continue_session
141
- )
142
- finally:
143
- # Ensure PostHog is shut down cleanly even if TUI exits unexpectedly
144
- from shotgun.posthog_telemetry import shutdown
145
-
146
- shutdown()
178
+ if web:
179
+ logger.debug("Launching shotgun TUI as web application")
180
+ try:
181
+ tui_app.serve(
182
+ host=host,
183
+ port=port,
184
+ public_url=public_url,
185
+ no_update_check=no_update_check,
186
+ continue_session=continue_session,
187
+ force_reindex=force_reindex,
188
+ )
189
+ finally:
190
+ # Ensure PostHog is shut down cleanly even if server exits unexpectedly
191
+ from shotgun.posthog_telemetry import shutdown
192
+
193
+ shutdown()
194
+ else:
195
+ logger.debug("Launching shotgun TUI application")
196
+ try:
197
+ tui_app.run(
198
+ no_update_check=no_update_check,
199
+ continue_session=continue_session,
200
+ force_reindex=force_reindex,
201
+ )
202
+ finally:
203
+ # Ensure PostHog is shut down cleanly even if TUI exits unexpectedly
204
+ from shotgun.posthog_telemetry import shutdown
205
+
206
+ shutdown()
147
207
  raise typer.Exit()
148
208
 
149
209
  # For CLI commands, register PostHog shutdown handler
@@ -10,6 +10,7 @@ from shotgun import __version__
10
10
  from shotgun.agents.config import get_config_manager
11
11
  from shotgun.agents.conversation_manager import ConversationManager
12
12
  from shotgun.logging_config import get_early_logger
13
+ from shotgun.settings import settings
13
14
 
14
15
  # Use early logger to prevent automatic StreamHandler creation
15
16
  logger = get_early_logger(__name__)
@@ -32,10 +33,15 @@ def setup_posthog_observability() -> bool:
32
33
  logger.debug("PostHog is already initialized, skipping")
33
34
  return True
34
35
 
35
- # Hardcoded PostHog configuration
36
- api_key = "phc_KKnChzZUKeNqZDOTJ6soCBWNQSx3vjiULdwTR9H5Mcr"
36
+ # Get API key from settings (handles build constants + env vars automatically)
37
+ api_key = settings.telemetry.posthog_api_key
37
38
 
38
- logger.debug("Using hardcoded PostHog configuration")
39
+ # If no API key is available, skip PostHog initialization
40
+ if not api_key:
41
+ logger.debug("No PostHog API key available, skipping initialization")
42
+ return False
43
+
44
+ logger.debug("Using PostHog API key from settings")
39
45
 
40
46
  # Determine environment based on version
41
47
  # Dev versions contain "dev", "rc", "alpha", or "beta"
@@ -124,6 +124,7 @@ content_tasks = read_file('tasks.md') # Read implementation details
124
124
  - `plan.md` - Extract development approach and stages
125
125
  - `tasks.md` - Understand implementation tasks and structure
126
126
  2. **Map content to agents.md standard sections**:
127
+ - **Research, Specifications, and Planning**: ALWAYS include this section first. Check which pipeline files exist in `.shotgun/` (research.md, specification.md, plan.md, tasks.md) and list only the ones that exist. If none exist, omit this section.
127
128
  - **Project Overview**: Brief description and key technologies from specification.md
128
129
  - **Dev Environment Setup**: Installation, dependencies, dev server commands
129
130
  - **Code Style Guidelines**: Coding conventions and patterns from research.md
@@ -170,6 +171,14 @@ For additional specialized exports (only if specifically requested):
170
171
  <CORRECT_CONTENT_TEMPLATE>
171
172
  # Agents.md - [Project Name]
172
173
 
174
+ ## Research, Specifications, and Planning
175
+
176
+ The `.shotgun/` folder contains background research, specifications, and implementation planning files. Refer to these files for additional context:
177
+ - `research.md` - Codebase analysis and research findings
178
+ - `specification.md` - Project requirements and specifications
179
+ - `plan.md` - Development plan and implementation approach
180
+ - `tasks.md` - Task breakdown and implementation progress
181
+
173
182
  ## Project Overview
174
183
  - Brief description of what the project does
175
184
  - Key technologies and frameworks used
@@ -253,6 +262,14 @@ This project is about [making assumptions without reading files]...
253
262
  <GOOD_CONTENT_EXAMPLE>
254
263
  # Agents.md - E-commerce API Project
255
264
 
265
+ ## Research, Specifications, and Planning
266
+
267
+ The `.shotgun/` folder contains background research, specifications, and implementation planning files. Refer to these files for additional context:
268
+ - `research.md` - Codebase analysis and research findings
269
+ - `specification.md` - Project requirements and specifications
270
+ - `plan.md` - Development plan and implementation approach
271
+ - `tasks.md` - Task breakdown and implementation progress
272
+
256
273
  ## Project Overview
257
274
  - REST API for product catalog management with authentication
258
275
  - Built with Python/FastAPI for high performance async operations
@@ -316,7 +333,7 @@ This project is about [making assumptions without reading files]...
316
333
  USER INTERACTION - CLARIFY EXPORT REQUIREMENTS:
317
334
 
318
335
  - ALWAYS ask clarifying questions when export requirements are unclear
319
- - Use ask_user tool to gather specific details about:
336
+ - Use clarifying questions to gather specific details about:
320
337
  - Target format and file type preferences
321
338
  - Intended use case and audience for the export
322
339
  - Specific content sections to include/exclude from files
@@ -7,7 +7,10 @@ Your extensive expertise spans, among other things:
7
7
  ## KEY RULES
8
8
 
9
9
  {% if interactive_mode %}
10
- 0. Always ask CLARIFYING QUESTIONS if the user's request is ambiguous or lacks sufficient detail. Do not make assumptions about what the user wants.
10
+ 0. Always ask CLARIFYING QUESTIONS using structured output if the user's request is ambiguous or lacks sufficient detail.
11
+ - Return your response with the clarifying_questions field populated
12
+ - Do not make assumptions about what the user wants
13
+ - Questions should be clear, specific, and answerable
11
14
  {% endif %}
12
15
  1. Above all, prefer using tools to do the work and NEVER respond with text.
13
16
  2. IMPORTANT: Always ask for review and go ahead to move forward after using write_file().
@@ -20,6 +23,7 @@ Your extensive expertise spans, among other things:
20
23
  9. **Be Creative**: If the user seems not to know something, always be creative and come up with ideas that fit their thinking.
21
24
  10. Greet the user when you're just starting to work.
22
25
  11. DO NOT repeat yourself.
26
+ 12. If a user has agreed to a plan, you DO NOT NEED TO FOLLOW UP with them after every step to ask "is this search query ok?".
23
27
 
24
28
 
25
29
  ## Quality Standards