shotgun-sh 0.2.3.dev2__py3-none-any.whl → 0.2.11.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (132) hide show
  1. shotgun/agents/agent_manager.py +664 -75
  2. shotgun/agents/common.py +76 -70
  3. shotgun/agents/config/constants.py +0 -6
  4. shotgun/agents/config/manager.py +78 -36
  5. shotgun/agents/config/models.py +41 -1
  6. shotgun/agents/config/provider.py +70 -15
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +471 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +57 -19
  14. shotgun/agents/export.py +6 -7
  15. shotgun/agents/history/compaction.py +9 -4
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +14 -2
  18. shotgun/agents/history/token_counting/anthropic.py +49 -11
  19. shotgun/agents/history/token_counting/base.py +14 -3
  20. shotgun/agents/history/token_counting/openai.py +8 -0
  21. shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
  22. shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
  23. shotgun/agents/history/token_counting/utils.py +0 -3
  24. shotgun/agents/models.py +50 -2
  25. shotgun/agents/plan.py +6 -7
  26. shotgun/agents/research.py +7 -8
  27. shotgun/agents/specify.py +6 -7
  28. shotgun/agents/tasks.py +6 -7
  29. shotgun/agents/tools/__init__.py +0 -2
  30. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  31. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  32. shotgun/agents/tools/codebase/file_read.py +11 -2
  33. shotgun/agents/tools/codebase/query_graph.py +6 -0
  34. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  35. shotgun/agents/tools/file_management.py +82 -16
  36. shotgun/agents/tools/registry.py +217 -0
  37. shotgun/agents/tools/web_search/__init__.py +30 -18
  38. shotgun/agents/tools/web_search/anthropic.py +26 -5
  39. shotgun/agents/tools/web_search/gemini.py +23 -11
  40. shotgun/agents/tools/web_search/openai.py +22 -13
  41. shotgun/agents/tools/web_search/utils.py +2 -2
  42. shotgun/agents/usage_manager.py +16 -11
  43. shotgun/api_endpoints.py +7 -3
  44. shotgun/build_constants.py +1 -1
  45. shotgun/cli/clear.py +53 -0
  46. shotgun/cli/compact.py +186 -0
  47. shotgun/cli/config.py +8 -5
  48. shotgun/cli/context.py +111 -0
  49. shotgun/cli/export.py +1 -1
  50. shotgun/cli/feedback.py +4 -2
  51. shotgun/cli/models.py +1 -0
  52. shotgun/cli/plan.py +1 -1
  53. shotgun/cli/research.py +1 -1
  54. shotgun/cli/specify.py +1 -1
  55. shotgun/cli/tasks.py +1 -1
  56. shotgun/cli/update.py +16 -2
  57. shotgun/codebase/core/change_detector.py +5 -3
  58. shotgun/codebase/core/code_retrieval.py +4 -2
  59. shotgun/codebase/core/ingestor.py +10 -8
  60. shotgun/codebase/core/manager.py +13 -4
  61. shotgun/codebase/core/nl_query.py +1 -1
  62. shotgun/llm_proxy/__init__.py +5 -2
  63. shotgun/llm_proxy/clients.py +12 -7
  64. shotgun/logging_config.py +18 -27
  65. shotgun/main.py +73 -11
  66. shotgun/posthog_telemetry.py +23 -7
  67. shotgun/prompts/agents/export.j2 +18 -1
  68. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  69. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  70. shotgun/prompts/agents/plan.j2 +1 -1
  71. shotgun/prompts/agents/research.j2 +1 -1
  72. shotgun/prompts/agents/specify.j2 +270 -3
  73. shotgun/prompts/agents/state/system_state.j2 +4 -0
  74. shotgun/prompts/agents/tasks.j2 +1 -1
  75. shotgun/prompts/loader.py +2 -2
  76. shotgun/prompts/tools/web_search.j2 +14 -0
  77. shotgun/sentry_telemetry.py +7 -16
  78. shotgun/settings.py +238 -0
  79. shotgun/telemetry.py +18 -33
  80. shotgun/tui/app.py +243 -43
  81. shotgun/tui/commands/__init__.py +1 -1
  82. shotgun/tui/components/context_indicator.py +179 -0
  83. shotgun/tui/components/mode_indicator.py +70 -0
  84. shotgun/tui/components/status_bar.py +48 -0
  85. shotgun/tui/containers.py +91 -0
  86. shotgun/tui/dependencies.py +39 -0
  87. shotgun/tui/protocols.py +45 -0
  88. shotgun/tui/screens/chat/__init__.py +5 -0
  89. shotgun/tui/screens/chat/chat.tcss +54 -0
  90. shotgun/tui/screens/chat/chat_screen.py +1202 -0
  91. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  92. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  93. shotgun/tui/screens/chat/help_text.py +40 -0
  94. shotgun/tui/screens/chat/prompt_history.py +48 -0
  95. shotgun/tui/screens/chat.tcss +11 -0
  96. shotgun/tui/screens/chat_screen/command_providers.py +78 -2
  97. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  98. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  99. shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
  100. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  101. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  102. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  103. shotgun/tui/screens/confirmation_dialog.py +151 -0
  104. shotgun/tui/screens/feedback.py +4 -4
  105. shotgun/tui/screens/github_issue.py +102 -0
  106. shotgun/tui/screens/model_picker.py +49 -24
  107. shotgun/tui/screens/onboarding.py +431 -0
  108. shotgun/tui/screens/pipx_migration.py +153 -0
  109. shotgun/tui/screens/provider_config.py +50 -27
  110. shotgun/tui/screens/shotgun_auth.py +2 -2
  111. shotgun/tui/screens/welcome.py +32 -10
  112. shotgun/tui/services/__init__.py +5 -0
  113. shotgun/tui/services/conversation_service.py +184 -0
  114. shotgun/tui/state/__init__.py +7 -0
  115. shotgun/tui/state/processing_state.py +185 -0
  116. shotgun/tui/utils/mode_progress.py +14 -7
  117. shotgun/tui/widgets/__init__.py +5 -0
  118. shotgun/tui/widgets/widget_coordinator.py +262 -0
  119. shotgun/utils/datetime_utils.py +77 -0
  120. shotgun/utils/file_system_utils.py +22 -2
  121. shotgun/utils/marketing.py +110 -0
  122. shotgun/utils/update_checker.py +69 -14
  123. shotgun_sh-0.2.11.dev5.dist-info/METADATA +130 -0
  124. shotgun_sh-0.2.11.dev5.dist-info/RECORD +193 -0
  125. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/entry_points.txt +1 -0
  126. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/licenses/LICENSE +1 -1
  127. shotgun/agents/tools/user_interaction.py +0 -37
  128. shotgun/tui/screens/chat.py +0 -804
  129. shotgun/tui/screens/chat_screen/history.py +0 -352
  130. shotgun_sh-0.2.3.dev2.dist-info/METADATA +0 -467
  131. shotgun_sh-0.2.3.dev2.dist-info/RECORD +0 -154
  132. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/WHEEL +0 -0
@@ -8,11 +8,22 @@ from shotgun.agents.config import get_provider_model
8
8
  from shotgun.agents.config.constants import MEDIUM_TEXT_8K_TOKENS
9
9
  from shotgun.agents.config.models import ModelName
10
10
  from shotgun.agents.llm import shotgun_model_request
11
+ from shotgun.agents.tools.registry import ToolCategory, register_tool
11
12
  from shotgun.logging_config import get_logger
13
+ from shotgun.prompts import PromptLoader
14
+ from shotgun.utils.datetime_utils import get_datetime_context
12
15
 
13
16
  logger = get_logger(__name__)
14
17
 
18
+ # Global prompt loader instance
19
+ prompt_loader = PromptLoader()
15
20
 
21
+
22
+ @register_tool(
23
+ category=ToolCategory.WEB_RESEARCH,
24
+ display_text="Searching web",
25
+ key_arg="query",
26
+ )
16
27
  async def gemini_web_search_tool(query: str) -> str:
17
28
  """Perform a web search using Google's Gemini API with grounding.
18
29
 
@@ -35,23 +46,24 @@ async def gemini_web_search_tool(query: str) -> str:
35
46
 
36
47
  # Get model configuration (supports both Shotgun and BYOK)
37
48
  try:
38
- model_config = get_provider_model(ModelName.GEMINI_2_5_FLASH)
49
+ model_config = await get_provider_model(ModelName.GEMINI_2_5_FLASH)
39
50
  except ValueError as e:
40
51
  error_msg = f"Gemini API key not configured: {str(e)}"
41
52
  logger.error("❌ %s", error_msg)
42
53
  span.set_attribute("output.value", f"**Error:**\n {error_msg}\n")
43
54
  return error_msg
44
55
 
45
- # Create a search-optimized prompt
46
- search_prompt = f"""Please provide current and accurate information about the following query:
47
-
48
- Query: {query}
49
-
50
- Instructions:
51
- - Provide comprehensive, factual information
52
- - Include relevant details and context
53
- - Focus on current and recent information
54
- - Be specific and accurate in your response"""
56
+ # Get datetime context for the search prompt
57
+ dt_context = get_datetime_context()
58
+
59
+ # Render search prompt from template
60
+ search_prompt = prompt_loader.render(
61
+ "tools/web_search.j2",
62
+ query=query,
63
+ current_datetime=dt_context.datetime_formatted,
64
+ timezone_name=dt_context.timezone_name,
65
+ utc_offset=dt_context.utc_offset,
66
+ )
55
67
 
56
68
  # Build the request messages
57
69
  messages: list[ModelMessage] = [ModelRequest.user_text_prompt(search_prompt)]
@@ -5,11 +5,22 @@ from opentelemetry import trace
5
5
 
6
6
  from shotgun.agents.config import get_provider_model
7
7
  from shotgun.agents.config.models import ProviderType
8
+ from shotgun.agents.tools.registry import ToolCategory, register_tool
8
9
  from shotgun.logging_config import get_logger
10
+ from shotgun.prompts import PromptLoader
11
+ from shotgun.utils.datetime_utils import get_datetime_context
9
12
 
10
13
  logger = get_logger(__name__)
11
14
 
15
+ # Global prompt loader instance
16
+ prompt_loader = PromptLoader()
12
17
 
18
+
19
+ @register_tool(
20
+ category=ToolCategory.WEB_RESEARCH,
21
+ display_text="Searching web",
22
+ key_arg="query",
23
+ )
13
24
  async def openai_web_search_tool(query: str) -> str:
14
25
  """Perform a web search and return results.
15
26
 
@@ -32,7 +43,7 @@ async def openai_web_search_tool(query: str) -> str:
32
43
 
33
44
  # Get API key from centralized configuration
34
45
  try:
35
- model_config = get_provider_model(ProviderType.OPENAI)
46
+ model_config = await get_provider_model(ProviderType.OPENAI)
36
47
  api_key = model_config.api_key
37
48
  except ValueError as e:
38
49
  error_msg = f"OpenAI API key not configured: {str(e)}"
@@ -40,19 +51,17 @@ async def openai_web_search_tool(query: str) -> str:
40
51
  span.set_attribute("output.value", f"**Error:**\n {error_msg}\n")
41
52
  return error_msg
42
53
 
43
- prompt = f"""Please provide current and accurate information about the following query:
44
-
45
- Query: {query}
54
+ # Get datetime context for the search prompt
55
+ dt_context = get_datetime_context()
46
56
 
47
- Instructions:
48
- - Provide comprehensive, factual information
49
- - Include relevant details and context
50
- - Focus on current and recent information
51
- - Be specific and accurate in your response
52
- - You can't ask the user for details, so assume the most relevant details for the query
53
-
54
- ALWAYS PROVIDE THE SOURCES (urls) TO BACK UP THE INFORMATION YOU PROVIDE.
55
- """
57
+ # Render search prompt from template
58
+ prompt = prompt_loader.render(
59
+ "tools/web_search.j2",
60
+ query=query,
61
+ current_datetime=dt_context.datetime_formatted,
62
+ timezone_name=dt_context.timezone_name,
63
+ utc_offset=dt_context.utc_offset,
64
+ )
56
65
 
57
66
  client = AsyncOpenAI(api_key=api_key)
58
67
  response = await client.responses.create( # type: ignore[call-overload]
@@ -4,7 +4,7 @@ from shotgun.agents.config import get_provider_model
4
4
  from shotgun.agents.config.models import ProviderType
5
5
 
6
6
 
7
- def is_provider_available(provider: ProviderType) -> bool:
7
+ async def is_provider_available(provider: ProviderType) -> bool:
8
8
  """Check if a provider has API key configured.
9
9
 
10
10
  Args:
@@ -14,7 +14,7 @@ def is_provider_available(provider: ProviderType) -> bool:
14
14
  True if the provider has valid credentials configured (from config or env)
15
15
  """
16
16
  try:
17
- get_provider_model(provider)
17
+ await get_provider_model(provider)
18
18
  return True
19
19
  except ValueError:
20
20
  return False
@@ -6,6 +6,8 @@ from logging import getLogger
6
6
  from pathlib import Path
7
7
  from typing import TypeAlias
8
8
 
9
+ import aiofiles
10
+ import aiofiles.os
9
11
  from genai_prices import calc_price
10
12
  from pydantic import BaseModel, Field
11
13
  from pydantic_ai import RunUsage
@@ -48,9 +50,10 @@ class SessionUsageManager:
48
50
  self._model_providers: dict[ModelName, ProviderType] = {}
49
51
  self._usage_log: list[UsageLogEntry] = []
50
52
  self._usage_path: Path = get_shotgun_home() / "usage.json"
51
- self.restore_usage_state()
53
+ # Note: restore_usage_state needs to be called asynchronously after init
54
+ # Caller should use: manager = SessionUsageManager(); await manager.restore_usage_state()
52
55
 
53
- def add_usage(
56
+ async def add_usage(
54
57
  self, usage: RunUsage, *, model_name: ModelName, provider: ProviderType
55
58
  ) -> None:
56
59
  self.usage[model_name] += usage
@@ -58,7 +61,7 @@ class SessionUsageManager:
58
61
  self._usage_log.append(
59
62
  UsageLogEntry(model_name=model_name, usage=usage, provider=provider)
60
63
  )
61
- self.persist_usage_state()
64
+ await self.persist_usage_state()
62
65
 
63
66
  def get_usage_report(self) -> dict[ModelName, RunUsage]:
64
67
  return self.usage.copy()
@@ -78,7 +81,7 @@ class SessionUsageManager:
78
81
  def build_usage_hint(self) -> str | None:
79
82
  return format_usage_hint(self.get_usage_breakdown())
80
83
 
81
- def persist_usage_state(self) -> None:
84
+ async def persist_usage_state(self) -> None:
82
85
  state = UsageState(
83
86
  usage=dict(self.usage.items()),
84
87
  model_providers=self._model_providers.copy(),
@@ -86,23 +89,25 @@ class SessionUsageManager:
86
89
  )
87
90
 
88
91
  try:
89
- self._usage_path.parent.mkdir(parents=True, exist_ok=True)
90
- with self._usage_path.open("w", encoding="utf-8") as f:
91
- json.dump(state.model_dump(mode="json"), f, indent=2)
92
+ await aiofiles.os.makedirs(self._usage_path.parent, exist_ok=True)
93
+ json_content = json.dumps(state.model_dump(mode="json"), indent=2)
94
+ async with aiofiles.open(self._usage_path, "w", encoding="utf-8") as f:
95
+ await f.write(json_content)
92
96
  logger.debug("Usage state persisted to %s", self._usage_path)
93
97
  except Exception as exc:
94
98
  logger.error(
95
99
  "Failed to persist usage state to %s: %s", self._usage_path, exc
96
100
  )
97
101
 
98
- def restore_usage_state(self) -> None:
99
- if not self._usage_path.exists():
102
+ async def restore_usage_state(self) -> None:
103
+ if not await aiofiles.os.path.exists(self._usage_path):
100
104
  logger.debug("No usage state file found at %s", self._usage_path)
101
105
  return
102
106
 
103
107
  try:
104
- with self._usage_path.open(encoding="utf-8") as f:
105
- data = json.load(f)
108
+ async with aiofiles.open(self._usage_path, encoding="utf-8") as f:
109
+ content = await f.read()
110
+ data = json.loads(content)
106
111
 
107
112
  state = UsageState.model_validate(data)
108
113
  except Exception as exc:
shotgun/api_endpoints.py CHANGED
@@ -1,10 +1,14 @@
1
1
  """Shotgun backend service API endpoints and URLs."""
2
2
 
3
+ from shotgun.settings import settings
4
+
3
5
  # Shotgun Web API base URL (for authentication/subscription)
4
- # Can be overridden with environment variable
5
- SHOTGUN_WEB_BASE_URL = "https://api-219702594231.us-east4.run.app"
6
+ # Can be overridden with SHOTGUN_WEB_BASE_URL environment variable
7
+ SHOTGUN_WEB_BASE_URL = settings.api.web_base_url
8
+
6
9
  # Shotgun's LiteLLM proxy base URL (for AI model requests)
7
- LITELLM_PROXY_BASE_URL = "https://litellm-219702594231.us-east4.run.app"
10
+ # Can be overridden with SHOTGUN_ACCOUNT_LLM_BASE_URL environment variable
11
+ LITELLM_PROXY_BASE_URL = settings.api.account_llm_base_url
8
12
 
9
13
  # Provider-specific LiteLLM proxy endpoints
10
14
  LITELLM_PROXY_ANTHROPIC_BASE = f"{LITELLM_PROXY_BASE_URL}/anthropic"
@@ -13,7 +13,7 @@ POSTHOG_PROJECT_ID = '191396'
13
13
 
14
14
  # Logfire configuration embedded at build time (only for dev builds)
15
15
  LOGFIRE_ENABLED = 'true'
16
- LOGFIRE_TOKEN = 'pylf_v1_us_KZ5NM1pP3NwgJkbBJt6Ftdzk8mMhmrXcGJHQQgDJ1LfK'
16
+ LOGFIRE_TOKEN = 'pylf_v1_us_RwZMlJm1tX6j0PL5RWWbmZpzK2hLBNtFWStNKlySfjh8'
17
17
 
18
18
  # Build metadata
19
19
  BUILD_TIME_ENV = "production" if SENTRY_DSN else "development"
shotgun/cli/clear.py ADDED
@@ -0,0 +1,53 @@
1
+ """Clear command for shotgun CLI."""
2
+
3
+ import asyncio
4
+ from pathlib import Path
5
+
6
+ import typer
7
+ from rich.console import Console
8
+
9
+ from shotgun.agents.conversation_manager import ConversationManager
10
+ from shotgun.logging_config import get_logger
11
+
12
+ app = typer.Typer(
13
+ name="clear", help="Clear the conversation history", no_args_is_help=False
14
+ )
15
+ logger = get_logger(__name__)
16
+ console = Console()
17
+
18
+
19
+ @app.callback(invoke_without_command=True)
20
+ def clear() -> None:
21
+ """Clear the current conversation history.
22
+
23
+ This command deletes the conversation file at ~/.shotgun-sh/conversation.json,
24
+ removing all conversation history. Other files in ~/.shotgun-sh/ (config, usage,
25
+ codebases, logs) are preserved.
26
+ """
27
+ try:
28
+ # Get conversation file path
29
+ conversation_file = Path.home() / ".shotgun-sh" / "conversation.json"
30
+
31
+ # Check if file exists
32
+ if not conversation_file.exists():
33
+ console.print(
34
+ "[yellow]No conversation file found.[/yellow] Nothing to clear.",
35
+ style="bold",
36
+ )
37
+ return
38
+
39
+ # Clear the conversation
40
+ manager = ConversationManager(conversation_file)
41
+ asyncio.run(manager.clear())
42
+
43
+ console.print(
44
+ "[green]✓[/green] Conversation cleared successfully", style="bold"
45
+ )
46
+ logger.info("Conversation cleared successfully")
47
+
48
+ except Exception as e:
49
+ console.print(
50
+ f"[red]Error:[/red] Failed to clear conversation: {e}", style="bold"
51
+ )
52
+ logger.debug("Full traceback:", exc_info=True)
53
+ raise typer.Exit(code=1) from e
shotgun/cli/compact.py ADDED
@@ -0,0 +1,186 @@
1
+ """Compact command for shotgun CLI."""
2
+
3
+ import asyncio
4
+ import json
5
+ from pathlib import Path
6
+ from typing import Annotated, Any
7
+
8
+ import typer
9
+ from pydantic_ai.usage import RequestUsage
10
+ from rich.console import Console
11
+
12
+ from shotgun.agents.config import get_provider_model
13
+ from shotgun.agents.conversation_manager import ConversationManager
14
+ from shotgun.agents.history.history_processors import token_limit_compactor
15
+ from shotgun.agents.history.token_estimation import estimate_tokens_from_messages
16
+ from shotgun.cli.models import OutputFormat
17
+ from shotgun.logging_config import get_logger
18
+
19
+ app = typer.Typer(
20
+ name="compact", help="Compact the conversation history", no_args_is_help=False
21
+ )
22
+ logger = get_logger(__name__)
23
+ console = Console()
24
+
25
+
26
+ @app.callback(invoke_without_command=True)
27
+ def compact(
28
+ format: Annotated[
29
+ OutputFormat,
30
+ typer.Option(
31
+ "--format",
32
+ "-f",
33
+ help="Output format: markdown or json",
34
+ ),
35
+ ] = OutputFormat.MARKDOWN,
36
+ ) -> None:
37
+ """Compact the current conversation history to reduce size.
38
+
39
+ This command compacts the conversation in ~/.shotgun-sh/conversation.json
40
+ by summarizing older messages while preserving recent context. The compacted
41
+ conversation is automatically saved back to the file.
42
+ """
43
+ try:
44
+ result = asyncio.run(compact_conversation())
45
+
46
+ if format == OutputFormat.JSON:
47
+ # Output as JSON
48
+ console.print_json(json.dumps(result, indent=2))
49
+ else:
50
+ # Output as markdown
51
+ console.print(format_markdown(result))
52
+
53
+ except FileNotFoundError as e:
54
+ console.print(
55
+ f"[red]Error:[/red] {e}\n\n"
56
+ "No conversation found. Start a TUI session first with: [cyan]shotgun[/cyan]",
57
+ style="bold",
58
+ )
59
+ raise typer.Exit(code=1) from e
60
+ except Exception as e:
61
+ console.print(
62
+ f"[red]Error:[/red] Failed to compact conversation: {e}", style="bold"
63
+ )
64
+ logger.debug("Full traceback:", exc_info=True)
65
+ raise typer.Exit(code=1) from e
66
+
67
+
68
+ async def compact_conversation() -> dict[str, Any]:
69
+ """Compact the conversation and return statistics.
70
+
71
+ Returns:
72
+ Dictionary with compaction statistics including before/after metrics
73
+ """
74
+ # Get conversation file path
75
+ conversation_file = Path.home() / ".shotgun-sh" / "conversation.json"
76
+
77
+ if not conversation_file.exists():
78
+ raise FileNotFoundError(f"Conversation file not found at {conversation_file}")
79
+
80
+ # Load conversation
81
+ manager = ConversationManager(conversation_file)
82
+ conversation = await manager.load()
83
+
84
+ if not conversation:
85
+ raise ValueError("Conversation file is empty or corrupted")
86
+
87
+ # Get agent messages only (not UI messages)
88
+ agent_messages = conversation.get_agent_messages()
89
+
90
+ if not agent_messages:
91
+ raise ValueError("No agent messages found in conversation")
92
+
93
+ # Get model config
94
+ model_config = await get_provider_model()
95
+
96
+ # Calculate before metrics
97
+ original_message_count = len(agent_messages)
98
+ original_tokens = await estimate_tokens_from_messages(agent_messages, model_config)
99
+
100
+ # For CLI, we can call token_limit_compactor directly without full AgentDeps
101
+ # since we only need the model config and message history
102
+ # Create a minimal context object for compaction
103
+ class CompactContext:
104
+ def __init__(self, model_config: Any, usage: RequestUsage) -> None:
105
+ self.deps = type("Deps", (), {"llm_model": model_config})()
106
+ self.usage = usage
107
+
108
+ # Create minimal usage info for compaction check
109
+ usage = RequestUsage(input_tokens=original_tokens, output_tokens=0)
110
+ ctx = CompactContext(model_config, usage)
111
+
112
+ # Apply compaction with force=True to bypass threshold checks
113
+ compacted_messages = await token_limit_compactor(ctx, agent_messages, force=True)
114
+
115
+ # Calculate after metrics
116
+ compacted_message_count = len(compacted_messages)
117
+ compacted_tokens = await estimate_tokens_from_messages(
118
+ compacted_messages, model_config
119
+ )
120
+
121
+ # Calculate reduction percentages
122
+ message_reduction = (
123
+ ((original_message_count - compacted_message_count) / original_message_count)
124
+ * 100
125
+ if original_message_count > 0
126
+ else 0
127
+ )
128
+ token_reduction = (
129
+ ((original_tokens - compacted_tokens) / original_tokens) * 100
130
+ if original_tokens > 0
131
+ else 0
132
+ )
133
+
134
+ # Save compacted conversation
135
+ conversation.set_agent_messages(compacted_messages)
136
+ await manager.save(conversation)
137
+
138
+ logger.info(
139
+ f"Compacted conversation: {original_message_count} → {compacted_message_count} messages "
140
+ f"({message_reduction:.1f}% reduction)"
141
+ )
142
+
143
+ return {
144
+ "success": True,
145
+ "before": {
146
+ "messages": original_message_count,
147
+ "estimated_tokens": original_tokens,
148
+ },
149
+ "after": {
150
+ "messages": compacted_message_count,
151
+ "estimated_tokens": compacted_tokens,
152
+ },
153
+ "reduction": {
154
+ "messages_percent": round(message_reduction, 1),
155
+ "tokens_percent": round(token_reduction, 1),
156
+ },
157
+ }
158
+
159
+
160
+ def format_markdown(result: dict[str, Any]) -> str:
161
+ """Format compaction result as markdown.
162
+
163
+ Args:
164
+ result: Dictionary with compaction statistics
165
+
166
+ Returns:
167
+ Formatted markdown string
168
+ """
169
+ before = result["before"]
170
+ after = result["after"]
171
+ reduction = result["reduction"]
172
+
173
+ return f"""# Conversation Compacted ✓
174
+
175
+ ## Before
176
+ - **Messages:** {before["messages"]:,}
177
+ - **Estimated Tokens:** {before["estimated_tokens"]:,}
178
+
179
+ ## After
180
+ - **Messages:** {after["messages"]:,}
181
+ - **Estimated Tokens:** {after["estimated_tokens"]:,}
182
+
183
+ ## Reduction
184
+ - **Messages:** {reduction["messages_percent"]}%
185
+ - **Tokens:** {reduction["tokens_percent"]}%
186
+ """
shotgun/cli/config.py CHANGED
@@ -1,5 +1,6 @@
1
1
  """Configuration management CLI commands."""
2
2
 
3
+ import asyncio
3
4
  import json
4
5
  from typing import Annotated, Any
5
6
 
@@ -44,7 +45,7 @@ def init(
44
45
  console.print()
45
46
 
46
47
  # Initialize with defaults
47
- config_manager.initialize()
48
+ asyncio.run(config_manager.initialize())
48
49
 
49
50
  # Ask for provider
50
51
  provider_choices = ["openai", "anthropic", "google"]
@@ -76,7 +77,7 @@ def init(
76
77
 
77
78
  if api_key:
78
79
  # update_provider will automatically set selected_model for first provider
79
- config_manager.update_provider(provider, api_key=api_key)
80
+ asyncio.run(config_manager.update_provider(provider, api_key=api_key))
80
81
 
81
82
  console.print(
82
83
  f"\n✅ [bold green]Configuration saved to {config_manager.config_path}[/bold green]"
@@ -84,7 +85,7 @@ def init(
84
85
  console.print("🎯 You can now use Shotgun with your configured provider!")
85
86
 
86
87
  else:
87
- config_manager.initialize()
88
+ asyncio.run(config_manager.initialize())
88
89
  console.print(f"✅ Configuration initialized at {config_manager.config_path}")
89
90
 
90
91
 
@@ -112,7 +113,7 @@ def set(
112
113
 
113
114
  try:
114
115
  if api_key:
115
- config_manager.update_provider(provider, api_key=api_key)
116
+ asyncio.run(config_manager.update_provider(provider, api_key=api_key))
116
117
 
117
118
  console.print(f"✅ Configuration updated for {provider}")
118
119
 
@@ -133,8 +134,10 @@ def get(
133
134
  ] = False,
134
135
  ) -> None:
135
136
  """Display current configuration."""
137
+ import asyncio
138
+
136
139
  config_manager = get_config_manager()
137
- config = config_manager.load()
140
+ config = asyncio.run(config_manager.load())
138
141
 
139
142
  if json_output:
140
143
  # Convert to dict and mask secrets
shotgun/cli/context.py ADDED
@@ -0,0 +1,111 @@
1
+ """Context command for shotgun CLI."""
2
+
3
+ import asyncio
4
+ import json
5
+ from pathlib import Path
6
+ from typing import Annotated
7
+
8
+ import typer
9
+ from rich.console import Console
10
+
11
+ from shotgun.agents.config import get_provider_model
12
+ from shotgun.agents.context_analyzer import (
13
+ ContextAnalysisOutput,
14
+ ContextAnalyzer,
15
+ ContextFormatter,
16
+ )
17
+ from shotgun.agents.conversation_manager import ConversationManager
18
+ from shotgun.cli.models import OutputFormat
19
+ from shotgun.logging_config import get_logger
20
+
21
+ app = typer.Typer(
22
+ name="context", help="Analyze conversation context usage", no_args_is_help=False
23
+ )
24
+ logger = get_logger(__name__)
25
+ console = Console()
26
+
27
+
28
+ @app.callback(invoke_without_command=True)
29
+ def context(
30
+ format: Annotated[
31
+ OutputFormat,
32
+ typer.Option(
33
+ "--format",
34
+ "-f",
35
+ help="Output format: markdown or json",
36
+ ),
37
+ ] = OutputFormat.MARKDOWN,
38
+ ) -> None:
39
+ """Analyze the current conversation's context usage.
40
+
41
+ This command analyzes the agent's message history from ~/.shotgun-sh/conversation.json
42
+ and displays token usage breakdown by message type. Only agent context is counted
43
+ (UI elements like hints are excluded).
44
+ """
45
+ try:
46
+ result = asyncio.run(analyze_context())
47
+
48
+ if format == OutputFormat.JSON:
49
+ # Output as JSON
50
+ console.print_json(json.dumps(result.json_data, indent=2))
51
+ else:
52
+ # Output as plain text (Markdown() reformats and makes categories inline)
53
+ console.print(result.markdown)
54
+
55
+ except FileNotFoundError as e:
56
+ console.print(
57
+ f"[red]Error:[/red] {e}\n\n"
58
+ "No conversation found. Start a TUI session first with: [cyan]shotgun[/cyan]",
59
+ style="bold",
60
+ )
61
+ raise typer.Exit(code=1) from e
62
+ except Exception as e:
63
+ console.print(f"[red]Error:[/red] Failed to analyze context: {e}", style="bold")
64
+ logger.debug("Full traceback:", exc_info=True)
65
+ raise typer.Exit(code=1) from e
66
+
67
+
68
+ async def analyze_context() -> ContextAnalysisOutput:
69
+ """Analyze the conversation context and return structured data.
70
+
71
+ Returns:
72
+ ContextAnalysisOutput with both markdown and JSON representations of the analysis
73
+ """
74
+ # Get conversation file path
75
+ conversation_file = Path.home() / ".shotgun-sh" / "conversation.json"
76
+
77
+ if not conversation_file.exists():
78
+ raise FileNotFoundError(f"Conversation file not found at {conversation_file}")
79
+
80
+ # Load conversation
81
+ manager = ConversationManager(conversation_file)
82
+ conversation = await manager.load()
83
+
84
+ if not conversation:
85
+ raise ValueError("Conversation file is empty or corrupted")
86
+
87
+ # Get agent messages only (not UI messages)
88
+ agent_messages = conversation.get_agent_messages()
89
+
90
+ if not agent_messages:
91
+ raise ValueError("No agent messages found in conversation")
92
+
93
+ # Get model config (use default provider settings)
94
+ model_config = await get_provider_model()
95
+
96
+ # Debug: Log the model being used
97
+ logger.debug(f"Using model: {model_config.name.value}")
98
+ logger.debug(f"Provider: {model_config.provider.value}")
99
+ logger.debug(f"Key provider: {model_config.key_provider.value}")
100
+ logger.debug(f"Max input tokens: {model_config.max_input_tokens}")
101
+
102
+ # Analyze with ContextAnalyzer
103
+ analyzer = ContextAnalyzer(model_config)
104
+ # For CLI, agent_messages and ui_message_history are the same (no hints in CLI mode)
105
+ analysis = await analyzer.analyze_conversation(agent_messages, list(agent_messages))
106
+
107
+ # Use formatter to generate markdown and JSON
108
+ markdown = ContextFormatter.format_markdown(analysis)
109
+ json_data = ContextFormatter.format_json(analysis)
110
+
111
+ return ContextAnalysisOutput(markdown=markdown, json_data=json_data)
shotgun/cli/export.py CHANGED
@@ -63,7 +63,7 @@ def export(
63
63
  )
64
64
 
65
65
  # Create the export agent with deps and provider
66
- agent, deps = create_export_agent(agent_runtime_options, provider)
66
+ agent, deps = asyncio.run(create_export_agent(agent_runtime_options, provider))
67
67
 
68
68
  # Start export process
69
69
  logger.info("🎯 Starting export...")
shotgun/cli/feedback.py CHANGED
@@ -28,9 +28,11 @@ def send_feedback(
28
28
  ],
29
29
  ) -> None:
30
30
  """Initialize Shotgun configuration."""
31
+ import asyncio
32
+
31
33
  config_manager = get_config_manager()
32
- config_manager.load()
33
- shotgun_instance_id = config_manager.get_shotgun_instance_id()
34
+ asyncio.run(config_manager.load())
35
+ shotgun_instance_id = asyncio.run(config_manager.get_shotgun_instance_id())
34
36
 
35
37
  if not description:
36
38
  console.print(
shotgun/cli/models.py CHANGED
@@ -8,3 +8,4 @@ class OutputFormat(StrEnum):
8
8
 
9
9
  TEXT = "text"
10
10
  JSON = "json"
11
+ MARKDOWN = "markdown"