shotgun-sh 0.2.6.dev1__py3-none-any.whl → 0.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. shotgun/agents/agent_manager.py +694 -73
  2. shotgun/agents/common.py +69 -70
  3. shotgun/agents/config/constants.py +0 -6
  4. shotgun/agents/config/manager.py +70 -35
  5. shotgun/agents/config/models.py +41 -1
  6. shotgun/agents/config/provider.py +33 -5
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +471 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +57 -19
  14. shotgun/agents/export.py +6 -7
  15. shotgun/agents/history/compaction.py +9 -4
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +113 -5
  18. shotgun/agents/history/token_counting/anthropic.py +39 -3
  19. shotgun/agents/history/token_counting/base.py +14 -3
  20. shotgun/agents/history/token_counting/openai.py +11 -1
  21. shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
  22. shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
  23. shotgun/agents/history/token_counting/utils.py +0 -3
  24. shotgun/agents/models.py +50 -2
  25. shotgun/agents/plan.py +6 -7
  26. shotgun/agents/research.py +7 -8
  27. shotgun/agents/specify.py +6 -7
  28. shotgun/agents/tasks.py +6 -7
  29. shotgun/agents/tools/__init__.py +0 -2
  30. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  31. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  32. shotgun/agents/tools/codebase/file_read.py +11 -2
  33. shotgun/agents/tools/codebase/query_graph.py +6 -0
  34. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  35. shotgun/agents/tools/file_management.py +82 -16
  36. shotgun/agents/tools/registry.py +217 -0
  37. shotgun/agents/tools/web_search/__init__.py +8 -8
  38. shotgun/agents/tools/web_search/anthropic.py +8 -2
  39. shotgun/agents/tools/web_search/gemini.py +7 -1
  40. shotgun/agents/tools/web_search/openai.py +7 -1
  41. shotgun/agents/tools/web_search/utils.py +2 -2
  42. shotgun/agents/usage_manager.py +16 -11
  43. shotgun/api_endpoints.py +7 -3
  44. shotgun/build_constants.py +3 -3
  45. shotgun/cli/clear.py +53 -0
  46. shotgun/cli/compact.py +186 -0
  47. shotgun/cli/config.py +8 -5
  48. shotgun/cli/context.py +111 -0
  49. shotgun/cli/export.py +1 -1
  50. shotgun/cli/feedback.py +4 -2
  51. shotgun/cli/models.py +1 -0
  52. shotgun/cli/plan.py +1 -1
  53. shotgun/cli/research.py +1 -1
  54. shotgun/cli/specify.py +1 -1
  55. shotgun/cli/tasks.py +1 -1
  56. shotgun/cli/update.py +16 -2
  57. shotgun/codebase/core/change_detector.py +5 -3
  58. shotgun/codebase/core/code_retrieval.py +4 -2
  59. shotgun/codebase/core/ingestor.py +10 -8
  60. shotgun/codebase/core/manager.py +13 -4
  61. shotgun/codebase/core/nl_query.py +1 -1
  62. shotgun/exceptions.py +32 -0
  63. shotgun/logging_config.py +18 -27
  64. shotgun/main.py +73 -11
  65. shotgun/posthog_telemetry.py +37 -28
  66. shotgun/prompts/agents/export.j2 +18 -1
  67. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  68. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  69. shotgun/prompts/agents/plan.j2 +1 -1
  70. shotgun/prompts/agents/research.j2 +1 -1
  71. shotgun/prompts/agents/specify.j2 +270 -3
  72. shotgun/prompts/agents/tasks.j2 +1 -1
  73. shotgun/sentry_telemetry.py +163 -16
  74. shotgun/settings.py +238 -0
  75. shotgun/telemetry.py +18 -33
  76. shotgun/tui/app.py +243 -43
  77. shotgun/tui/commands/__init__.py +1 -1
  78. shotgun/tui/components/context_indicator.py +179 -0
  79. shotgun/tui/components/mode_indicator.py +70 -0
  80. shotgun/tui/components/status_bar.py +48 -0
  81. shotgun/tui/containers.py +91 -0
  82. shotgun/tui/dependencies.py +39 -0
  83. shotgun/tui/protocols.py +45 -0
  84. shotgun/tui/screens/chat/__init__.py +5 -0
  85. shotgun/tui/screens/chat/chat.tcss +54 -0
  86. shotgun/tui/screens/chat/chat_screen.py +1254 -0
  87. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  88. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  89. shotgun/tui/screens/chat/help_text.py +40 -0
  90. shotgun/tui/screens/chat/prompt_history.py +48 -0
  91. shotgun/tui/screens/chat.tcss +11 -0
  92. shotgun/tui/screens/chat_screen/command_providers.py +78 -2
  93. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  94. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  95. shotgun/tui/screens/chat_screen/history/chat_history.py +115 -0
  96. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  97. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  98. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  99. shotgun/tui/screens/confirmation_dialog.py +151 -0
  100. shotgun/tui/screens/feedback.py +4 -4
  101. shotgun/tui/screens/github_issue.py +102 -0
  102. shotgun/tui/screens/model_picker.py +49 -24
  103. shotgun/tui/screens/onboarding.py +431 -0
  104. shotgun/tui/screens/pipx_migration.py +153 -0
  105. shotgun/tui/screens/provider_config.py +50 -27
  106. shotgun/tui/screens/shotgun_auth.py +2 -2
  107. shotgun/tui/screens/welcome.py +23 -12
  108. shotgun/tui/services/__init__.py +5 -0
  109. shotgun/tui/services/conversation_service.py +184 -0
  110. shotgun/tui/state/__init__.py +7 -0
  111. shotgun/tui/state/processing_state.py +185 -0
  112. shotgun/tui/utils/mode_progress.py +14 -7
  113. shotgun/tui/widgets/__init__.py +5 -0
  114. shotgun/tui/widgets/widget_coordinator.py +263 -0
  115. shotgun/utils/file_system_utils.py +22 -2
  116. shotgun/utils/marketing.py +110 -0
  117. shotgun/utils/update_checker.py +69 -14
  118. shotgun_sh-0.2.17.dist-info/METADATA +465 -0
  119. shotgun_sh-0.2.17.dist-info/RECORD +194 -0
  120. {shotgun_sh-0.2.6.dev1.dist-info → shotgun_sh-0.2.17.dist-info}/entry_points.txt +1 -0
  121. {shotgun_sh-0.2.6.dev1.dist-info → shotgun_sh-0.2.17.dist-info}/licenses/LICENSE +1 -1
  122. shotgun/agents/tools/user_interaction.py +0 -37
  123. shotgun/tui/screens/chat.py +0 -804
  124. shotgun/tui/screens/chat_screen/history.py +0 -401
  125. shotgun_sh-0.2.6.dev1.dist-info/METADATA +0 -467
  126. shotgun_sh-0.2.6.dev1.dist-info/RECORD +0 -156
  127. {shotgun_sh-0.2.6.dev1.dist-info → shotgun_sh-0.2.17.dist-info}/WHEEL +0 -0
shotgun/agents/export.py CHANGED
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import ModelMessage
@@ -19,14 +18,14 @@ from .common import (
19
18
  create_usage_limits,
20
19
  run_agent,
21
20
  )
22
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
21
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
22
 
24
23
  logger = get_logger(__name__)
25
24
 
26
25
 
27
- def create_export_agent(
26
+ async def create_export_agent(
28
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
29
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
28
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
30
29
  """Create an export agent with file management capabilities.
31
30
 
32
31
  Args:
@@ -40,7 +39,7 @@ def create_export_agent(
40
39
  # Use partial to create system prompt function for export agent
41
40
  system_prompt_fn = partial(build_agent_system_prompt, "export")
42
41
 
43
- agent, deps = create_base_agent(
42
+ agent, deps = await create_base_agent(
44
43
  system_prompt_fn,
45
44
  agent_runtime_options,
46
45
  provider=provider,
@@ -50,11 +49,11 @@ def create_export_agent(
50
49
 
51
50
 
52
51
  async def run_export_agent(
53
- agent: Agent[AgentDeps, str | DeferredToolRequests],
52
+ agent: Agent[AgentDeps, AgentResponse],
54
53
  instruction: str,
55
54
  deps: AgentDeps,
56
55
  message_history: list[ModelMessage] | None = None,
57
- ) -> AgentRunResult[str | DeferredToolRequests]:
56
+ ) -> AgentRunResult[AgentResponse]:
58
57
  """Export artifacts based on the given instruction.
59
58
 
60
59
  Args:
@@ -13,7 +13,7 @@ logger = get_logger(__name__)
13
13
 
14
14
 
15
15
  async def apply_persistent_compaction(
16
- messages: list[ModelMessage], deps: AgentDeps
16
+ messages: list[ModelMessage], deps: AgentDeps, force: bool = False
17
17
  ) -> list[ModelMessage]:
18
18
  """Apply compaction to message history for persistent storage.
19
19
 
@@ -23,6 +23,7 @@ async def apply_persistent_compaction(
23
23
  Args:
24
24
  messages: Full message history from agent run
25
25
  deps: Agent dependencies containing model config
26
+ force: If True, force compaction even if below token threshold
26
27
 
27
28
  Returns:
28
29
  Compacted message history that should be stored as conversation state
@@ -46,7 +47,7 @@ async def apply_persistent_compaction(
46
47
  self.usage = usage
47
48
 
48
49
  ctx = MockContext(deps, usage)
49
- compacted_messages = await token_limit_compactor(ctx, messages)
50
+ compacted_messages = await token_limit_compactor(ctx, messages, force=force)
50
51
 
51
52
  # Log the result for monitoring
52
53
  original_size = len(messages)
@@ -59,17 +60,21 @@ async def apply_persistent_compaction(
59
60
  f"({reduction_pct:.1f}% reduction)"
60
61
  )
61
62
 
62
- # Track persistent compaction event
63
+ # Track persistent compaction event with simple metrics (fast, no token counting)
63
64
  track_event(
64
65
  "persistent_compaction_applied",
65
66
  {
67
+ # Basic compaction metrics
66
68
  "messages_before": original_size,
67
69
  "messages_after": compacted_size,
68
- "tokens_before": estimated_tokens,
69
70
  "reduction_percentage": round(reduction_pct, 2),
70
71
  "agent_mode": deps.agent_mode.value
71
72
  if hasattr(deps, "agent_mode") and deps.agent_mode
72
73
  else "unknown",
74
+ # Model and provider info (no computation needed)
75
+ "model_name": deps.llm_model.name.value,
76
+ "provider": deps.llm_model.provider.value,
77
+ "key_provider": deps.llm_model.key_provider.value,
73
78
  },
74
79
  )
75
80
  else:
@@ -1,5 +1,9 @@
1
1
  """Context extraction utilities for history processing."""
2
2
 
3
+ import json
4
+ import logging
5
+ import traceback
6
+
3
7
  from pydantic_ai.messages import (
4
8
  BuiltinToolCallPart,
5
9
  BuiltinToolReturnPart,
@@ -16,6 +20,46 @@ from pydantic_ai.messages import (
16
20
  UserPromptPart,
17
21
  )
18
22
 
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ def _safely_parse_tool_args(args: dict[str, object] | str | None) -> dict[str, object]:
27
+ """Safely parse tool call arguments, handling incomplete/invalid JSON.
28
+
29
+ Args:
30
+ args: Tool call arguments (dict, JSON string, or None)
31
+
32
+ Returns:
33
+ Parsed args dict, or empty dict if parsing fails
34
+ """
35
+ if args is None:
36
+ return {}
37
+
38
+ if isinstance(args, dict):
39
+ return args
40
+
41
+ if not isinstance(args, str):
42
+ return {}
43
+
44
+ try:
45
+ parsed = json.loads(args)
46
+ return parsed if isinstance(parsed, dict) else {}
47
+ except (json.JSONDecodeError, ValueError) as e:
48
+ # Only log warning if it looks like JSON (starts with { or [) - incomplete JSON
49
+ # Plain strings are valid args and shouldn't trigger warnings
50
+ stripped_args = args.strip()
51
+ if stripped_args.startswith(("{", "[")):
52
+ args_preview = args[:100] + "..." if len(args) > 100 else args
53
+ logger.warning(
54
+ "Detected incomplete/invalid JSON in tool call args during parsing",
55
+ extra={
56
+ "args_preview": args_preview,
57
+ "error": str(e),
58
+ "args_length": len(args),
59
+ },
60
+ )
61
+ return {}
62
+
19
63
 
20
64
  def extract_context_from_messages(messages: list[ModelMessage]) -> str:
21
65
  """Extract context from a list of messages for summarization."""
@@ -87,12 +131,55 @@ def extract_context_from_part(
87
131
  return f"<ASSISTANT_TEXT>\n{message_part.content}\n</ASSISTANT_TEXT>"
88
132
 
89
133
  elif isinstance(message_part, ToolCallPart):
90
- if isinstance(message_part.args, dict):
91
- args_str = ", ".join(f"{k}={repr(v)}" for k, v in message_part.args.items())
92
- tool_call_str = f"{message_part.tool_name}({args_str})"
93
- else:
94
- tool_call_str = f"{message_part.tool_name}({message_part.args})"
95
- return f"<TOOL_CALL>\n{tool_call_str}\n</TOOL_CALL>"
134
+ # Safely parse args to avoid crashes from incomplete JSON during streaming
135
+ try:
136
+ parsed_args = _safely_parse_tool_args(message_part.args)
137
+ if parsed_args:
138
+ # Successfully parsed as dict - format nicely
139
+ args_str = ", ".join(f"{k}={repr(v)}" for k, v in parsed_args.items())
140
+ tool_call_str = f"{message_part.tool_name}({args_str})"
141
+ elif isinstance(message_part.args, str) and message_part.args:
142
+ # Non-empty string that didn't parse as JSON
143
+ # Check if it looks like JSON (starts with { or [) - if so, it's incomplete
144
+ stripped_args = message_part.args.strip()
145
+ if stripped_args.startswith(("{", "[")):
146
+ # Looks like incomplete JSON - log warning and show empty parens
147
+ args_preview = (
148
+ stripped_args[:100] + "..."
149
+ if len(stripped_args) > 100
150
+ else stripped_args
151
+ )
152
+ stack_trace = "".join(traceback.format_stack())
153
+ logger.warning(
154
+ "ToolCallPart with unparseable args encountered during context extraction",
155
+ extra={
156
+ "tool_name": message_part.tool_name,
157
+ "tool_call_id": message_part.tool_call_id,
158
+ "args_preview": args_preview,
159
+ "args_type": type(message_part.args).__name__,
160
+ "stack_trace": stack_trace,
161
+ },
162
+ )
163
+ tool_call_str = f"{message_part.tool_name}()"
164
+ else:
165
+ # Plain string arg - display as-is
166
+ tool_call_str = f"{message_part.tool_name}({message_part.args})"
167
+ else:
168
+ # No args
169
+ tool_call_str = f"{message_part.tool_name}()"
170
+ return f"<TOOL_CALL>\n{tool_call_str}\n</TOOL_CALL>"
171
+ except Exception as e: # pragma: no cover - defensive catch-all
172
+ # If anything goes wrong, log full exception with stack trace
173
+ logger.error(
174
+ "Unexpected error processing ToolCallPart",
175
+ exc_info=True,
176
+ extra={
177
+ "tool_name": message_part.tool_name,
178
+ "tool_call_id": message_part.tool_call_id,
179
+ "error": str(e),
180
+ },
181
+ )
182
+ return f"<TOOL_CALL>\n{message_part.tool_name}()\n</TOOL_CALL>"
96
183
 
97
184
  elif isinstance(message_part, BuiltinToolCallPart):
98
185
  return f"<BUILTIN_TOOL_CALL>\n{message_part.tool_name}\n</BUILTIN_TOOL_CALL>"
@@ -1,7 +1,9 @@
1
1
  """History processors for managing conversation history in Shotgun agents."""
2
2
 
3
+ from collections.abc import Awaitable, Callable
3
4
  from typing import TYPE_CHECKING, Any, Protocol
4
5
 
6
+ from anthropic import APIStatusError
5
7
  from pydantic_ai import ModelSettings
6
8
  from pydantic_ai.messages import (
7
9
  ModelMessage,
@@ -14,6 +16,7 @@ from pydantic_ai.messages import (
14
16
  from shotgun.agents.llm import shotgun_model_request
15
17
  from shotgun.agents.messages import AgentSystemPrompt, SystemStatusPrompt
16
18
  from shotgun.agents.models import AgentDeps
19
+ from shotgun.exceptions import ContextSizeLimitExceeded
17
20
  from shotgun.logging_config import get_logger
18
21
  from shotgun.posthog_telemetry import track_event
19
22
  from shotgun.prompts import PromptLoader
@@ -51,6 +54,86 @@ logger = get_logger(__name__)
51
54
  prompt_loader = PromptLoader()
52
55
 
53
56
 
57
+ async def _safe_token_estimation(
58
+ estimation_func: Callable[..., Awaitable[int]],
59
+ model_name: str,
60
+ max_tokens: int,
61
+ *args: Any,
62
+ **kwargs: Any,
63
+ ) -> int:
64
+ """Safely estimate tokens with proper error handling.
65
+
66
+ Wraps token estimation functions to handle failures gracefully.
67
+ Only RuntimeError (from token counters) is wrapped in ContextSizeLimitExceeded.
68
+ Other errors (network, auth) are allowed to bubble up.
69
+
70
+ Args:
71
+ estimation_func: Async function that estimates tokens
72
+ model_name: Name of the model for error messages
73
+ max_tokens: Maximum tokens for the model
74
+ *args: Arguments to pass to estimation_func
75
+ **kwargs: Keyword arguments to pass to estimation_func
76
+
77
+ Returns:
78
+ Token count from estimation_func
79
+
80
+ Raises:
81
+ ContextSizeLimitExceeded: If token counting fails with RuntimeError
82
+ Exception: Any other exceptions from estimation_func
83
+ """
84
+ try:
85
+ return await estimation_func(*args, **kwargs)
86
+ except Exception as e:
87
+ # Log the error with full context
88
+ logger.warning(
89
+ f"Token counting failed for {model_name}",
90
+ extra={
91
+ "error_type": type(e).__name__,
92
+ "error_message": str(e),
93
+ "model": model_name,
94
+ },
95
+ )
96
+
97
+ # Token counting behavior with oversized context (verified via testing):
98
+ #
99
+ # 1. OpenAI/tiktoken:
100
+ # - Successfully counts any size (tested with 752K tokens, no error)
101
+ # - Library errors: ValueError, KeyError, AttributeError, SSLError (file/cache issues)
102
+ # - Wrapped as: RuntimeError by our counter
103
+ #
104
+ # 2. Gemini/SentencePiece:
105
+ # - Successfully counts any size (tested with 752K tokens, no error)
106
+ # - Library errors: RuntimeError, IOError, TypeError (file/model loading issues)
107
+ # - Wrapped as: RuntimeError by our counter
108
+ #
109
+ # 3. Anthropic API:
110
+ # - Successfully counts large token counts (tested with 752K tokens, no error)
111
+ # - Only enforces 32 MB request size limit (not token count)
112
+ # - Raises: APIStatusError(413) with error type 'request_too_large' for 32MB+ requests
113
+ # - Other API errors: APIConnectionError, RateLimitError, APIStatusError (4xx/5xx)
114
+ # - Wrapped as: RuntimeError by our counter
115
+ #
116
+ # IMPORTANT: No provider raises errors for "too many tokens" during counting.
117
+ # Token count validation happens separately by comparing count to max_input_tokens.
118
+ #
119
+ # We wrap RuntimeError (library-level failures from tiktoken/sentencepiece).
120
+ # We also wrap Anthropic's 413 error (request exceeds 32 MB) as it indicates
121
+ # context is effectively too large and needs user action to reduce it.
122
+ if isinstance(e, RuntimeError):
123
+ raise ContextSizeLimitExceeded(
124
+ model_name=model_name, max_tokens=max_tokens
125
+ ) from e
126
+
127
+ # Check for Anthropic's 32 MB request size limit (APIStatusError with status 413)
128
+ if isinstance(e, APIStatusError) and e.status_code == 413:
129
+ raise ContextSizeLimitExceeded(
130
+ model_name=model_name, max_tokens=max_tokens
131
+ ) from e
132
+
133
+ # Re-raise other exceptions (network errors, auth failures, etc.)
134
+ raise
135
+
136
+
54
137
  def is_summary_part(part: Any) -> bool:
55
138
  """Check if a message part is a compacted summary."""
56
139
  return isinstance(part, TextPart) and part.content.startswith(SUMMARY_MARKER)
@@ -127,6 +210,7 @@ calculate_max_summarization_tokens = _calculate_max_summarization_tokens
127
210
  async def token_limit_compactor(
128
211
  ctx: ContextProtocol,
129
212
  messages: list[ModelMessage],
213
+ force: bool = False,
130
214
  ) -> list[ModelMessage]:
131
215
  """Compact message history based on token limits with incremental processing.
132
216
 
@@ -139,6 +223,7 @@ async def token_limit_compactor(
139
223
  Args:
140
224
  ctx: Run context with usage information and dependencies
141
225
  messages: Current conversation history
226
+ force: If True, force compaction even if below token threshold
142
227
 
143
228
  Returns:
144
229
  Compacted list of messages within token limits
@@ -155,9 +240,15 @@ async def token_limit_compactor(
155
240
 
156
241
  if last_summary_index is not None:
157
242
  # Check if post-summary conversation exceeds threshold for incremental compaction
158
- post_summary_tokens = await estimate_post_summary_tokens(
159
- messages, last_summary_index, deps.llm_model
243
+ post_summary_tokens = await _safe_token_estimation(
244
+ estimate_post_summary_tokens,
245
+ deps.llm_model.name,
246
+ model_max_tokens,
247
+ messages,
248
+ last_summary_index,
249
+ deps.llm_model,
160
250
  )
251
+
161
252
  post_summary_percentage = (
162
253
  (post_summary_tokens / max_tokens) * 100 if max_tokens > 0 else 0
163
254
  )
@@ -169,7 +260,7 @@ async def token_limit_compactor(
169
260
  )
170
261
 
171
262
  # Only do incremental compaction if post-summary conversation exceeds threshold
172
- if post_summary_tokens < max_tokens:
263
+ if post_summary_tokens < max_tokens and not force:
173
264
  logger.debug(
174
265
  f"Post-summary conversation under threshold ({post_summary_tokens} < {max_tokens}), "
175
266
  f"keeping all {len(messages)} messages"
@@ -340,6 +431,7 @@ async def token_limit_compactor(
340
431
  else 0
341
432
  )
342
433
 
434
+ # Track incremental compaction with simple metrics (fast, no token counting)
343
435
  track_event(
344
436
  "context_compaction_triggered",
345
437
  {
@@ -352,6 +444,10 @@ async def token_limit_compactor(
352
444
  "agent_mode": deps.agent_mode.value
353
445
  if hasattr(deps, "agent_mode") and deps.agent_mode
354
446
  else "unknown",
447
+ # Model and provider info (no computation needed)
448
+ "model_name": deps.llm_model.name.value,
449
+ "provider": deps.llm_model.provider.value,
450
+ "key_provider": deps.llm_model.key_provider.value,
355
451
  },
356
452
  )
357
453
 
@@ -359,7 +455,14 @@ async def token_limit_compactor(
359
455
 
360
456
  else:
361
457
  # Check if total conversation exceeds threshold for full compaction
362
- total_tokens = await estimate_tokens_from_messages(messages, deps.llm_model)
458
+ total_tokens = await _safe_token_estimation(
459
+ estimate_tokens_from_messages,
460
+ deps.llm_model.name,
461
+ model_max_tokens,
462
+ messages,
463
+ deps.llm_model,
464
+ )
465
+
363
466
  total_percentage = (total_tokens / max_tokens) * 100 if max_tokens > 0 else 0
364
467
 
365
468
  logger.debug(
@@ -368,7 +471,7 @@ async def token_limit_compactor(
368
471
  )
369
472
 
370
473
  # Only do full compaction if total conversation exceeds threshold
371
- if total_tokens < max_tokens:
474
+ if total_tokens < max_tokens and not force:
372
475
  logger.debug(
373
476
  f"Total conversation under threshold ({total_tokens} < {max_tokens}), "
374
477
  f"keeping all {len(messages)} messages"
@@ -468,6 +571,7 @@ async def _full_compaction(
468
571
  tokens_before = current_tokens # Already calculated above
469
572
  tokens_after = summary_usage.output_tokens if summary_usage else 0
470
573
 
574
+ # Track full compaction with simple metrics (fast, no token counting)
471
575
  track_event(
472
576
  "context_compaction_triggered",
473
577
  {
@@ -480,6 +584,10 @@ async def _full_compaction(
480
584
  "agent_mode": deps.agent_mode.value
481
585
  if hasattr(deps, "agent_mode") and deps.agent_mode
482
586
  else "unknown",
587
+ # Model and provider info (no computation needed)
588
+ "model_name": deps.llm_model.name.value,
589
+ "provider": deps.llm_model.provider.value,
590
+ "key_provider": deps.llm_model.key_provider.value,
483
591
  },
484
592
  )
485
593
 
@@ -1,5 +1,6 @@
1
1
  """Anthropic token counting using official client."""
2
2
 
3
+ import logfire
3
4
  from pydantic_ai.messages import ModelMessage
4
5
 
5
6
  from shotgun.agents.config.models import KeyProvider
@@ -49,7 +50,15 @@ class AnthropicTokenCounter(TokenCounter):
49
50
  f"Initialized async Anthropic token counter for {model_name} via direct API"
50
51
  )
51
52
  except Exception as e:
52
- raise RuntimeError("Failed to initialize Anthropic async client") from e
53
+ logfire.exception(
54
+ f"Failed to initialize Anthropic token counter for {model_name}",
55
+ model_name=model_name,
56
+ key_provider=key_provider.value,
57
+ exception_type=type(e).__name__,
58
+ )
59
+ raise RuntimeError(
60
+ f"Failed to initialize Anthropic async client for {model_name}: {type(e).__name__}: {str(e)}"
61
+ ) from e
53
62
 
54
63
  async def count_tokens(self, text: str) -> int:
55
64
  """Count tokens using Anthropic's official API (async).
@@ -63,16 +72,39 @@ class AnthropicTokenCounter(TokenCounter):
63
72
  Raises:
64
73
  RuntimeError: If API call fails
65
74
  """
75
+ # Handle empty text to avoid unnecessary API calls
76
+ # Anthropic API requires non-empty content, so we need a strict check
77
+ if not text or not text.strip():
78
+ return 0
79
+
80
+ # Additional validation: ensure the text has actual content
81
+ # Some edge cases might have only whitespace or control characters
82
+ cleaned_text = text.strip()
83
+ if not cleaned_text:
84
+ return 0
85
+
66
86
  try:
67
87
  # Anthropic API expects messages format and model parameter
68
88
  # Use await with async client
69
89
  result = await self.client.messages.count_tokens(
70
- messages=[{"role": "user", "content": text}], model=self.model_name
90
+ messages=[{"role": "user", "content": cleaned_text}],
91
+ model=self.model_name,
71
92
  )
72
93
  return result.input_tokens
73
94
  except Exception as e:
95
+ # Create a preview of the text for logging (truncated to avoid huge logs)
96
+ text_preview = text[:100] + "..." if len(text) > 100 else text
97
+
98
+ logfire.exception(
99
+ f"Anthropic token counting failed for {self.model_name}",
100
+ model_name=self.model_name,
101
+ text_length=len(text),
102
+ text_preview=text_preview,
103
+ exception_type=type(e).__name__,
104
+ exception_message=str(e),
105
+ )
74
106
  raise RuntimeError(
75
- f"Anthropic token counting API failed for {self.model_name}"
107
+ f"Anthropic token counting API failed for {self.model_name}: {type(e).__name__}: {str(e)}"
76
108
  ) from e
77
109
 
78
110
  async def count_message_tokens(self, messages: list[ModelMessage]) -> int:
@@ -87,5 +119,9 @@ class AnthropicTokenCounter(TokenCounter):
87
119
  Raises:
88
120
  RuntimeError: If token counting fails
89
121
  """
122
+ # Handle empty message list early
123
+ if not messages:
124
+ return 0
125
+
90
126
  total_text = extract_text_from_messages(messages)
91
127
  return await self.count_tokens(total_text)
@@ -56,12 +56,23 @@ def extract_text_from_messages(messages: list[ModelMessage]) -> str:
56
56
  if hasattr(message, "parts"):
57
57
  for part in message.parts:
58
58
  if hasattr(part, "content") and isinstance(part.content, str):
59
- text_parts.append(part.content)
59
+ # Only add non-empty content
60
+ if part.content.strip():
61
+ text_parts.append(part.content)
60
62
  else:
61
63
  # Handle non-text parts (tool calls, etc.)
62
- text_parts.append(str(part))
64
+ part_str = str(part)
65
+ if part_str.strip():
66
+ text_parts.append(part_str)
63
67
  else:
64
68
  # Handle messages without parts
65
- text_parts.append(str(message))
69
+ msg_str = str(message)
70
+ if msg_str.strip():
71
+ text_parts.append(msg_str)
72
+
73
+ # If no valid text parts found, return a minimal placeholder
74
+ # This ensures we never send completely empty content to APIs
75
+ if not text_parts:
76
+ return "."
66
77
 
67
78
  return "\n".join(text_parts)
@@ -57,9 +57,15 @@ class OpenAITokenCounter(TokenCounter):
57
57
  Raises:
58
58
  RuntimeError: If token counting fails
59
59
  """
60
+ # Handle empty text to avoid unnecessary encoding
61
+ if not text or not text.strip():
62
+ return 0
63
+
60
64
  try:
61
65
  return len(self.encoding.encode(text))
62
- except Exception as e:
66
+ except BaseException as e:
67
+ # Must catch BaseException to handle PanicException from tiktoken's Rust layer
68
+ # which can occur with extremely long texts. Regular Exception won't catch it.
63
69
  raise RuntimeError(
64
70
  f"Failed to count tokens for OpenAI model {self.model_name}"
65
71
  ) from e
@@ -76,5 +82,9 @@ class OpenAITokenCounter(TokenCounter):
76
82
  Raises:
77
83
  RuntimeError: If token counting fails
78
84
  """
85
+ # Handle empty message list early
86
+ if not messages:
87
+ return 0
88
+
79
89
  total_text = extract_text_from_messages(messages)
80
90
  return await self.count_tokens(total_text)
@@ -88,6 +88,10 @@ class SentencePieceTokenCounter(TokenCounter):
88
88
  Raises:
89
89
  RuntimeError: If token counting fails
90
90
  """
91
+ # Handle empty text to avoid unnecessary tokenization
92
+ if not text or not text.strip():
93
+ return 0
94
+
91
95
  await self._ensure_tokenizer()
92
96
 
93
97
  if self.sp is None:
@@ -115,5 +119,9 @@ class SentencePieceTokenCounter(TokenCounter):
115
119
  Raises:
116
120
  RuntimeError: If token counting fails
117
121
  """
122
+ # Handle empty message list early
123
+ if not messages:
124
+ return 0
125
+
118
126
  total_text = extract_text_from_messages(messages)
119
127
  return await self.count_tokens(total_text)
@@ -3,6 +3,7 @@
3
3
  import hashlib
4
4
  from pathlib import Path
5
5
 
6
+ import aiofiles
6
7
  import httpx
7
8
 
8
9
  from shotgun.logging_config import get_logger
@@ -78,7 +79,8 @@ async def download_gemini_tokenizer() -> Path:
78
79
 
79
80
  # Atomic write: write to temp file first, then rename
80
81
  temp_path = cache_path.with_suffix(".tmp")
81
- temp_path.write_bytes(content)
82
+ async with aiofiles.open(temp_path, "wb") as f:
83
+ await f.write(content)
82
84
  temp_path.rename(cache_path)
83
85
 
84
86
  logger.info(f"Gemini tokenizer downloaded and cached at {cache_path}")
@@ -44,9 +44,6 @@ def get_token_counter(model_config: ModelConfig) -> TokenCounter:
44
44
 
45
45
  # Return cached instance if available
46
46
  if cache_key in _token_counter_cache:
47
- logger.debug(
48
- f"Reusing cached token counter for {model_config.provider.value}:{model_config.name}"
49
- )
50
47
  return _token_counter_cache[cache_key]
51
48
 
52
49
  # Create new instance and cache it
shotgun/agents/models.py CHANGED
@@ -19,6 +19,30 @@ if TYPE_CHECKING:
19
19
  from shotgun.codebase.service import CodebaseService
20
20
 
21
21
 
22
+ class AgentResponse(BaseModel):
23
+ """Structured response from an agent with optional clarifying questions.
24
+
25
+ This model provides a consistent response format for all agents:
26
+ - response: The main response text (can be empty if only asking questions)
27
+ - clarifying_questions: Optional list of questions to ask the user
28
+
29
+ When clarifying_questions is provided, the agent expects to receive
30
+ answers before continuing its work. This replaces the ask_questions tool.
31
+ """
32
+
33
+ response: str = Field(
34
+ description="The agent's response text. Always respond with some text summarizing what happened, whats next, etc.",
35
+ )
36
+ clarifying_questions: list[str] | None = Field(
37
+ default=None,
38
+ description="""
39
+ Optional list of clarifying questions to ask the user.
40
+ - Single question: Shown as a non-blocking suggestion (user can answer or continue with other prompts)
41
+ - Multiple questions (2+): Asked sequentially in Q&A mode (blocks input until all answered or cancelled)
42
+ """,
43
+ )
44
+
45
+
22
46
  class AgentType(StrEnum):
23
47
  """Enumeration for available agent types."""
24
48
 
@@ -73,6 +97,30 @@ class UserQuestion(BaseModel):
73
97
  )
74
98
 
75
99
 
100
+ class MultipleUserQuestions(BaseModel):
101
+ """Multiple questions to ask the user sequentially."""
102
+
103
+ model_config = ConfigDict(arbitrary_types_allowed=True)
104
+
105
+ questions: list[str] = Field(
106
+ description="List of questions to ask the user",
107
+ )
108
+ current_index: int = Field(
109
+ default=0,
110
+ description="Current question index being asked",
111
+ )
112
+ answers: list[str] = Field(
113
+ default_factory=list,
114
+ description="Accumulated answers from the user",
115
+ )
116
+ tool_call_id: str = Field(
117
+ description="Tool call id",
118
+ )
119
+ result: Future[UserAnswer] = Field(
120
+ description="Future that will contain all answers formatted as Q&A pairs"
121
+ )
122
+
123
+
76
124
  class AgentRuntimeOptions(BaseModel):
77
125
  """User interface options for agents."""
78
126
 
@@ -100,9 +148,9 @@ class AgentRuntimeOptions(BaseModel):
100
148
  description="Maximum number of iterations for agent loops",
101
149
  )
102
150
 
103
- queue: Queue[UserQuestion] = Field(
151
+ queue: Queue[UserQuestion | MultipleUserQuestions] = Field(
104
152
  default_factory=Queue,
105
- description="Queue for storing user responses",
153
+ description="Queue for storing user questions (single or multiple)",
106
154
  )
107
155
 
108
156
  tasks: list[Future[UserAnswer]] = Field(