shotgun-sh 0.2.3.dev2__py3-none-any.whl → 0.2.11.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (132) hide show
  1. shotgun/agents/agent_manager.py +664 -75
  2. shotgun/agents/common.py +76 -70
  3. shotgun/agents/config/constants.py +0 -6
  4. shotgun/agents/config/manager.py +78 -36
  5. shotgun/agents/config/models.py +41 -1
  6. shotgun/agents/config/provider.py +70 -15
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +471 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +57 -19
  14. shotgun/agents/export.py +6 -7
  15. shotgun/agents/history/compaction.py +9 -4
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +14 -2
  18. shotgun/agents/history/token_counting/anthropic.py +49 -11
  19. shotgun/agents/history/token_counting/base.py +14 -3
  20. shotgun/agents/history/token_counting/openai.py +8 -0
  21. shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
  22. shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
  23. shotgun/agents/history/token_counting/utils.py +0 -3
  24. shotgun/agents/models.py +50 -2
  25. shotgun/agents/plan.py +6 -7
  26. shotgun/agents/research.py +7 -8
  27. shotgun/agents/specify.py +6 -7
  28. shotgun/agents/tasks.py +6 -7
  29. shotgun/agents/tools/__init__.py +0 -2
  30. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  31. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  32. shotgun/agents/tools/codebase/file_read.py +11 -2
  33. shotgun/agents/tools/codebase/query_graph.py +6 -0
  34. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  35. shotgun/agents/tools/file_management.py +82 -16
  36. shotgun/agents/tools/registry.py +217 -0
  37. shotgun/agents/tools/web_search/__init__.py +30 -18
  38. shotgun/agents/tools/web_search/anthropic.py +26 -5
  39. shotgun/agents/tools/web_search/gemini.py +23 -11
  40. shotgun/agents/tools/web_search/openai.py +22 -13
  41. shotgun/agents/tools/web_search/utils.py +2 -2
  42. shotgun/agents/usage_manager.py +16 -11
  43. shotgun/api_endpoints.py +7 -3
  44. shotgun/build_constants.py +1 -1
  45. shotgun/cli/clear.py +53 -0
  46. shotgun/cli/compact.py +186 -0
  47. shotgun/cli/config.py +8 -5
  48. shotgun/cli/context.py +111 -0
  49. shotgun/cli/export.py +1 -1
  50. shotgun/cli/feedback.py +4 -2
  51. shotgun/cli/models.py +1 -0
  52. shotgun/cli/plan.py +1 -1
  53. shotgun/cli/research.py +1 -1
  54. shotgun/cli/specify.py +1 -1
  55. shotgun/cli/tasks.py +1 -1
  56. shotgun/cli/update.py +16 -2
  57. shotgun/codebase/core/change_detector.py +5 -3
  58. shotgun/codebase/core/code_retrieval.py +4 -2
  59. shotgun/codebase/core/ingestor.py +10 -8
  60. shotgun/codebase/core/manager.py +13 -4
  61. shotgun/codebase/core/nl_query.py +1 -1
  62. shotgun/llm_proxy/__init__.py +5 -2
  63. shotgun/llm_proxy/clients.py +12 -7
  64. shotgun/logging_config.py +18 -27
  65. shotgun/main.py +73 -11
  66. shotgun/posthog_telemetry.py +23 -7
  67. shotgun/prompts/agents/export.j2 +18 -1
  68. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  69. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  70. shotgun/prompts/agents/plan.j2 +1 -1
  71. shotgun/prompts/agents/research.j2 +1 -1
  72. shotgun/prompts/agents/specify.j2 +270 -3
  73. shotgun/prompts/agents/state/system_state.j2 +4 -0
  74. shotgun/prompts/agents/tasks.j2 +1 -1
  75. shotgun/prompts/loader.py +2 -2
  76. shotgun/prompts/tools/web_search.j2 +14 -0
  77. shotgun/sentry_telemetry.py +7 -16
  78. shotgun/settings.py +238 -0
  79. shotgun/telemetry.py +18 -33
  80. shotgun/tui/app.py +243 -43
  81. shotgun/tui/commands/__init__.py +1 -1
  82. shotgun/tui/components/context_indicator.py +179 -0
  83. shotgun/tui/components/mode_indicator.py +70 -0
  84. shotgun/tui/components/status_bar.py +48 -0
  85. shotgun/tui/containers.py +91 -0
  86. shotgun/tui/dependencies.py +39 -0
  87. shotgun/tui/protocols.py +45 -0
  88. shotgun/tui/screens/chat/__init__.py +5 -0
  89. shotgun/tui/screens/chat/chat.tcss +54 -0
  90. shotgun/tui/screens/chat/chat_screen.py +1202 -0
  91. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  92. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  93. shotgun/tui/screens/chat/help_text.py +40 -0
  94. shotgun/tui/screens/chat/prompt_history.py +48 -0
  95. shotgun/tui/screens/chat.tcss +11 -0
  96. shotgun/tui/screens/chat_screen/command_providers.py +78 -2
  97. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  98. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  99. shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
  100. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  101. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  102. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  103. shotgun/tui/screens/confirmation_dialog.py +151 -0
  104. shotgun/tui/screens/feedback.py +4 -4
  105. shotgun/tui/screens/github_issue.py +102 -0
  106. shotgun/tui/screens/model_picker.py +49 -24
  107. shotgun/tui/screens/onboarding.py +431 -0
  108. shotgun/tui/screens/pipx_migration.py +153 -0
  109. shotgun/tui/screens/provider_config.py +50 -27
  110. shotgun/tui/screens/shotgun_auth.py +2 -2
  111. shotgun/tui/screens/welcome.py +32 -10
  112. shotgun/tui/services/__init__.py +5 -0
  113. shotgun/tui/services/conversation_service.py +184 -0
  114. shotgun/tui/state/__init__.py +7 -0
  115. shotgun/tui/state/processing_state.py +185 -0
  116. shotgun/tui/utils/mode_progress.py +14 -7
  117. shotgun/tui/widgets/__init__.py +5 -0
  118. shotgun/tui/widgets/widget_coordinator.py +262 -0
  119. shotgun/utils/datetime_utils.py +77 -0
  120. shotgun/utils/file_system_utils.py +22 -2
  121. shotgun/utils/marketing.py +110 -0
  122. shotgun/utils/update_checker.py +69 -14
  123. shotgun_sh-0.2.11.dev5.dist-info/METADATA +130 -0
  124. shotgun_sh-0.2.11.dev5.dist-info/RECORD +193 -0
  125. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/entry_points.txt +1 -0
  126. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/licenses/LICENSE +1 -1
  127. shotgun/agents/tools/user_interaction.py +0 -37
  128. shotgun/tui/screens/chat.py +0 -804
  129. shotgun/tui/screens/chat_screen/history.py +0 -352
  130. shotgun_sh-0.2.3.dev2.dist-info/METADATA +0 -467
  131. shotgun_sh-0.2.3.dev2.dist-info/RECORD +0 -154
  132. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/WHEEL +0 -0
@@ -1,5 +1,9 @@
1
1
  """Context extraction utilities for history processing."""
2
2
 
3
+ import json
4
+ import logging
5
+ import traceback
6
+
3
7
  from pydantic_ai.messages import (
4
8
  BuiltinToolCallPart,
5
9
  BuiltinToolReturnPart,
@@ -16,6 +20,46 @@ from pydantic_ai.messages import (
16
20
  UserPromptPart,
17
21
  )
18
22
 
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ def _safely_parse_tool_args(args: dict[str, object] | str | None) -> dict[str, object]:
27
+ """Safely parse tool call arguments, handling incomplete/invalid JSON.
28
+
29
+ Args:
30
+ args: Tool call arguments (dict, JSON string, or None)
31
+
32
+ Returns:
33
+ Parsed args dict, or empty dict if parsing fails
34
+ """
35
+ if args is None:
36
+ return {}
37
+
38
+ if isinstance(args, dict):
39
+ return args
40
+
41
+ if not isinstance(args, str):
42
+ return {}
43
+
44
+ try:
45
+ parsed = json.loads(args)
46
+ return parsed if isinstance(parsed, dict) else {}
47
+ except (json.JSONDecodeError, ValueError) as e:
48
+ # Only log warning if it looks like JSON (starts with { or [) - incomplete JSON
49
+ # Plain strings are valid args and shouldn't trigger warnings
50
+ stripped_args = args.strip()
51
+ if stripped_args.startswith(("{", "[")):
52
+ args_preview = args[:100] + "..." if len(args) > 100 else args
53
+ logger.warning(
54
+ "Detected incomplete/invalid JSON in tool call args during parsing",
55
+ extra={
56
+ "args_preview": args_preview,
57
+ "error": str(e),
58
+ "args_length": len(args),
59
+ },
60
+ )
61
+ return {}
62
+
19
63
 
20
64
  def extract_context_from_messages(messages: list[ModelMessage]) -> str:
21
65
  """Extract context from a list of messages for summarization."""
@@ -87,12 +131,55 @@ def extract_context_from_part(
87
131
  return f"<ASSISTANT_TEXT>\n{message_part.content}\n</ASSISTANT_TEXT>"
88
132
 
89
133
  elif isinstance(message_part, ToolCallPart):
90
- if isinstance(message_part.args, dict):
91
- args_str = ", ".join(f"{k}={repr(v)}" for k, v in message_part.args.items())
92
- tool_call_str = f"{message_part.tool_name}({args_str})"
93
- else:
94
- tool_call_str = f"{message_part.tool_name}({message_part.args})"
95
- return f"<TOOL_CALL>\n{tool_call_str}\n</TOOL_CALL>"
134
+ # Safely parse args to avoid crashes from incomplete JSON during streaming
135
+ try:
136
+ parsed_args = _safely_parse_tool_args(message_part.args)
137
+ if parsed_args:
138
+ # Successfully parsed as dict - format nicely
139
+ args_str = ", ".join(f"{k}={repr(v)}" for k, v in parsed_args.items())
140
+ tool_call_str = f"{message_part.tool_name}({args_str})"
141
+ elif isinstance(message_part.args, str) and message_part.args:
142
+ # Non-empty string that didn't parse as JSON
143
+ # Check if it looks like JSON (starts with { or [) - if so, it's incomplete
144
+ stripped_args = message_part.args.strip()
145
+ if stripped_args.startswith(("{", "[")):
146
+ # Looks like incomplete JSON - log warning and show empty parens
147
+ args_preview = (
148
+ stripped_args[:100] + "..."
149
+ if len(stripped_args) > 100
150
+ else stripped_args
151
+ )
152
+ stack_trace = "".join(traceback.format_stack())
153
+ logger.warning(
154
+ "ToolCallPart with unparseable args encountered during context extraction",
155
+ extra={
156
+ "tool_name": message_part.tool_name,
157
+ "tool_call_id": message_part.tool_call_id,
158
+ "args_preview": args_preview,
159
+ "args_type": type(message_part.args).__name__,
160
+ "stack_trace": stack_trace,
161
+ },
162
+ )
163
+ tool_call_str = f"{message_part.tool_name}()"
164
+ else:
165
+ # Plain string arg - display as-is
166
+ tool_call_str = f"{message_part.tool_name}({message_part.args})"
167
+ else:
168
+ # No args
169
+ tool_call_str = f"{message_part.tool_name}()"
170
+ return f"<TOOL_CALL>\n{tool_call_str}\n</TOOL_CALL>"
171
+ except Exception as e: # pragma: no cover - defensive catch-all
172
+ # If anything goes wrong, log full exception with stack trace
173
+ logger.error(
174
+ "Unexpected error processing ToolCallPart",
175
+ exc_info=True,
176
+ extra={
177
+ "tool_name": message_part.tool_name,
178
+ "tool_call_id": message_part.tool_call_id,
179
+ "error": str(e),
180
+ },
181
+ )
182
+ return f"<TOOL_CALL>\n{message_part.tool_name}()\n</TOOL_CALL>"
96
183
 
97
184
  elif isinstance(message_part, BuiltinToolCallPart):
98
185
  return f"<BUILTIN_TOOL_CALL>\n{message_part.tool_name}\n</BUILTIN_TOOL_CALL>"
@@ -127,6 +127,7 @@ calculate_max_summarization_tokens = _calculate_max_summarization_tokens
127
127
  async def token_limit_compactor(
128
128
  ctx: ContextProtocol,
129
129
  messages: list[ModelMessage],
130
+ force: bool = False,
130
131
  ) -> list[ModelMessage]:
131
132
  """Compact message history based on token limits with incremental processing.
132
133
 
@@ -139,6 +140,7 @@ async def token_limit_compactor(
139
140
  Args:
140
141
  ctx: Run context with usage information and dependencies
141
142
  messages: Current conversation history
143
+ force: If True, force compaction even if below token threshold
142
144
 
143
145
  Returns:
144
146
  Compacted list of messages within token limits
@@ -169,7 +171,7 @@ async def token_limit_compactor(
169
171
  )
170
172
 
171
173
  # Only do incremental compaction if post-summary conversation exceeds threshold
172
- if post_summary_tokens < max_tokens:
174
+ if post_summary_tokens < max_tokens and not force:
173
175
  logger.debug(
174
176
  f"Post-summary conversation under threshold ({post_summary_tokens} < {max_tokens}), "
175
177
  f"keeping all {len(messages)} messages"
@@ -340,6 +342,7 @@ async def token_limit_compactor(
340
342
  else 0
341
343
  )
342
344
 
345
+ # Track incremental compaction with simple metrics (fast, no token counting)
343
346
  track_event(
344
347
  "context_compaction_triggered",
345
348
  {
@@ -352,6 +355,10 @@ async def token_limit_compactor(
352
355
  "agent_mode": deps.agent_mode.value
353
356
  if hasattr(deps, "agent_mode") and deps.agent_mode
354
357
  else "unknown",
358
+ # Model and provider info (no computation needed)
359
+ "model_name": deps.llm_model.name.value,
360
+ "provider": deps.llm_model.provider.value,
361
+ "key_provider": deps.llm_model.key_provider.value,
355
362
  },
356
363
  )
357
364
 
@@ -368,7 +375,7 @@ async def token_limit_compactor(
368
375
  )
369
376
 
370
377
  # Only do full compaction if total conversation exceeds threshold
371
- if total_tokens < max_tokens:
378
+ if total_tokens < max_tokens and not force:
372
379
  logger.debug(
373
380
  f"Total conversation under threshold ({total_tokens} < {max_tokens}), "
374
381
  f"keeping all {len(messages)} messages"
@@ -468,6 +475,7 @@ async def _full_compaction(
468
475
  tokens_before = current_tokens # Already calculated above
469
476
  tokens_after = summary_usage.output_tokens if summary_usage else 0
470
477
 
478
+ # Track full compaction with simple metrics (fast, no token counting)
471
479
  track_event(
472
480
  "context_compaction_triggered",
473
481
  {
@@ -480,6 +488,10 @@ async def _full_compaction(
480
488
  "agent_mode": deps.agent_mode.value
481
489
  if hasattr(deps, "agent_mode") and deps.agent_mode
482
490
  else "unknown",
491
+ # Model and provider info (no computation needed)
492
+ "model_name": deps.llm_model.name.value,
493
+ "provider": deps.llm_model.provider.value,
494
+ "key_provider": deps.llm_model.key_provider.value,
483
495
  },
484
496
  )
485
497
 
@@ -1,9 +1,10 @@
1
1
  """Anthropic token counting using official client."""
2
2
 
3
+ import logfire
3
4
  from pydantic_ai.messages import ModelMessage
4
5
 
5
6
  from shotgun.agents.config.models import KeyProvider
6
- from shotgun.llm_proxy import create_anthropic_proxy_client
7
+ from shotgun.llm_proxy import create_anthropic_proxy_provider
7
8
  from shotgun.logging_config import get_logger
8
9
 
9
10
  from .base import TokenCounter, extract_text_from_messages
@@ -36,19 +37,28 @@ class AnthropicTokenCounter(TokenCounter):
36
37
  try:
37
38
  if key_provider == KeyProvider.SHOTGUN:
38
39
  # Use LiteLLM proxy for Shotgun Account
39
- # Proxies to Anthropic's token counting API
40
- self.client = create_anthropic_proxy_client(api_key)
40
+ # Get async client from AnthropicProvider
41
+ provider = create_anthropic_proxy_provider(api_key)
42
+ self.client = provider.client
41
43
  logger.debug(
42
- f"Initialized Anthropic token counter for {model_name} via LiteLLM proxy"
44
+ f"Initialized async Anthropic token counter for {model_name} via LiteLLM proxy"
43
45
  )
44
46
  else:
45
- # Direct Anthropic API for BYOK
46
- self.client = anthropic.Anthropic(api_key=api_key)
47
+ # Direct Anthropic API for BYOK - use async client
48
+ self.client = anthropic.AsyncAnthropic(api_key=api_key)
47
49
  logger.debug(
48
- f"Initialized Anthropic token counter for {model_name} via direct API"
50
+ f"Initialized async Anthropic token counter for {model_name} via direct API"
49
51
  )
50
52
  except Exception as e:
51
- raise RuntimeError("Failed to initialize Anthropic client") from e
53
+ logfire.exception(
54
+ f"Failed to initialize Anthropic token counter for {model_name}",
55
+ model_name=model_name,
56
+ key_provider=key_provider.value,
57
+ exception_type=type(e).__name__,
58
+ )
59
+ raise RuntimeError(
60
+ f"Failed to initialize Anthropic async client for {model_name}: {type(e).__name__}: {str(e)}"
61
+ ) from e
52
62
 
53
63
  async def count_tokens(self, text: str) -> int:
54
64
  """Count tokens using Anthropic's official API (async).
@@ -62,15 +72,39 @@ class AnthropicTokenCounter(TokenCounter):
62
72
  Raises:
63
73
  RuntimeError: If API call fails
64
74
  """
75
+ # Handle empty text to avoid unnecessary API calls
76
+ # Anthropic API requires non-empty content, so we need a strict check
77
+ if not text or not text.strip():
78
+ return 0
79
+
80
+ # Additional validation: ensure the text has actual content
81
+ # Some edge cases might have only whitespace or control characters
82
+ cleaned_text = text.strip()
83
+ if not cleaned_text:
84
+ return 0
85
+
65
86
  try:
66
87
  # Anthropic API expects messages format and model parameter
67
- result = self.client.messages.count_tokens(
68
- messages=[{"role": "user", "content": text}], model=self.model_name
88
+ # Use await with async client
89
+ result = await self.client.messages.count_tokens(
90
+ messages=[{"role": "user", "content": cleaned_text}],
91
+ model=self.model_name,
69
92
  )
70
93
  return result.input_tokens
71
94
  except Exception as e:
95
+ # Create a preview of the text for logging (truncated to avoid huge logs)
96
+ text_preview = text[:100] + "..." if len(text) > 100 else text
97
+
98
+ logfire.exception(
99
+ f"Anthropic token counting failed for {self.model_name}",
100
+ model_name=self.model_name,
101
+ text_length=len(text),
102
+ text_preview=text_preview,
103
+ exception_type=type(e).__name__,
104
+ exception_message=str(e),
105
+ )
72
106
  raise RuntimeError(
73
- f"Anthropic token counting API failed for {self.model_name}"
107
+ f"Anthropic token counting API failed for {self.model_name}: {type(e).__name__}: {str(e)}"
74
108
  ) from e
75
109
 
76
110
  async def count_message_tokens(self, messages: list[ModelMessage]) -> int:
@@ -85,5 +119,9 @@ class AnthropicTokenCounter(TokenCounter):
85
119
  Raises:
86
120
  RuntimeError: If token counting fails
87
121
  """
122
+ # Handle empty message list early
123
+ if not messages:
124
+ return 0
125
+
88
126
  total_text = extract_text_from_messages(messages)
89
127
  return await self.count_tokens(total_text)
@@ -56,12 +56,23 @@ def extract_text_from_messages(messages: list[ModelMessage]) -> str:
56
56
  if hasattr(message, "parts"):
57
57
  for part in message.parts:
58
58
  if hasattr(part, "content") and isinstance(part.content, str):
59
- text_parts.append(part.content)
59
+ # Only add non-empty content
60
+ if part.content.strip():
61
+ text_parts.append(part.content)
60
62
  else:
61
63
  # Handle non-text parts (tool calls, etc.)
62
- text_parts.append(str(part))
64
+ part_str = str(part)
65
+ if part_str.strip():
66
+ text_parts.append(part_str)
63
67
  else:
64
68
  # Handle messages without parts
65
- text_parts.append(str(message))
69
+ msg_str = str(message)
70
+ if msg_str.strip():
71
+ text_parts.append(msg_str)
72
+
73
+ # If no valid text parts found, return a minimal placeholder
74
+ # This ensures we never send completely empty content to APIs
75
+ if not text_parts:
76
+ return "."
66
77
 
67
78
  return "\n".join(text_parts)
@@ -57,6 +57,10 @@ class OpenAITokenCounter(TokenCounter):
57
57
  Raises:
58
58
  RuntimeError: If token counting fails
59
59
  """
60
+ # Handle empty text to avoid unnecessary encoding
61
+ if not text or not text.strip():
62
+ return 0
63
+
60
64
  try:
61
65
  return len(self.encoding.encode(text))
62
66
  except Exception as e:
@@ -76,5 +80,9 @@ class OpenAITokenCounter(TokenCounter):
76
80
  Raises:
77
81
  RuntimeError: If token counting fails
78
82
  """
83
+ # Handle empty message list early
84
+ if not messages:
85
+ return 0
86
+
79
87
  total_text = extract_text_from_messages(messages)
80
88
  return await self.count_tokens(total_text)
@@ -88,6 +88,10 @@ class SentencePieceTokenCounter(TokenCounter):
88
88
  Raises:
89
89
  RuntimeError: If token counting fails
90
90
  """
91
+ # Handle empty text to avoid unnecessary tokenization
92
+ if not text or not text.strip():
93
+ return 0
94
+
91
95
  await self._ensure_tokenizer()
92
96
 
93
97
  if self.sp is None:
@@ -115,5 +119,9 @@ class SentencePieceTokenCounter(TokenCounter):
115
119
  Raises:
116
120
  RuntimeError: If token counting fails
117
121
  """
122
+ # Handle empty message list early
123
+ if not messages:
124
+ return 0
125
+
118
126
  total_text = extract_text_from_messages(messages)
119
127
  return await self.count_tokens(total_text)
@@ -3,6 +3,7 @@
3
3
  import hashlib
4
4
  from pathlib import Path
5
5
 
6
+ import aiofiles
6
7
  import httpx
7
8
 
8
9
  from shotgun.logging_config import get_logger
@@ -78,7 +79,8 @@ async def download_gemini_tokenizer() -> Path:
78
79
 
79
80
  # Atomic write: write to temp file first, then rename
80
81
  temp_path = cache_path.with_suffix(".tmp")
81
- temp_path.write_bytes(content)
82
+ async with aiofiles.open(temp_path, "wb") as f:
83
+ await f.write(content)
82
84
  temp_path.rename(cache_path)
83
85
 
84
86
  logger.info(f"Gemini tokenizer downloaded and cached at {cache_path}")
@@ -44,9 +44,6 @@ def get_token_counter(model_config: ModelConfig) -> TokenCounter:
44
44
 
45
45
  # Return cached instance if available
46
46
  if cache_key in _token_counter_cache:
47
- logger.debug(
48
- f"Reusing cached token counter for {model_config.provider.value}:{model_config.name}"
49
- )
50
47
  return _token_counter_cache[cache_key]
51
48
 
52
49
  # Create new instance and cache it
shotgun/agents/models.py CHANGED
@@ -19,6 +19,30 @@ if TYPE_CHECKING:
19
19
  from shotgun.codebase.service import CodebaseService
20
20
 
21
21
 
22
+ class AgentResponse(BaseModel):
23
+ """Structured response from an agent with optional clarifying questions.
24
+
25
+ This model provides a consistent response format for all agents:
26
+ - response: The main response text (can be empty if only asking questions)
27
+ - clarifying_questions: Optional list of questions to ask the user
28
+
29
+ When clarifying_questions is provided, the agent expects to receive
30
+ answers before continuing its work. This replaces the ask_questions tool.
31
+ """
32
+
33
+ response: str = Field(
34
+ description="The agent's response text. Always respond with some text summarizing what happened, whats next, etc.",
35
+ )
36
+ clarifying_questions: list[str] | None = Field(
37
+ default=None,
38
+ description="""
39
+ Optional list of clarifying questions to ask the user.
40
+ - Single question: Shown as a non-blocking suggestion (user can answer or continue with other prompts)
41
+ - Multiple questions (2+): Asked sequentially in Q&A mode (blocks input until all answered or cancelled)
42
+ """,
43
+ )
44
+
45
+
22
46
  class AgentType(StrEnum):
23
47
  """Enumeration for available agent types."""
24
48
 
@@ -73,6 +97,30 @@ class UserQuestion(BaseModel):
73
97
  )
74
98
 
75
99
 
100
+ class MultipleUserQuestions(BaseModel):
101
+ """Multiple questions to ask the user sequentially."""
102
+
103
+ model_config = ConfigDict(arbitrary_types_allowed=True)
104
+
105
+ questions: list[str] = Field(
106
+ description="List of questions to ask the user",
107
+ )
108
+ current_index: int = Field(
109
+ default=0,
110
+ description="Current question index being asked",
111
+ )
112
+ answers: list[str] = Field(
113
+ default_factory=list,
114
+ description="Accumulated answers from the user",
115
+ )
116
+ tool_call_id: str = Field(
117
+ description="Tool call id",
118
+ )
119
+ result: Future[UserAnswer] = Field(
120
+ description="Future that will contain all answers formatted as Q&A pairs"
121
+ )
122
+
123
+
76
124
  class AgentRuntimeOptions(BaseModel):
77
125
  """User interface options for agents."""
78
126
 
@@ -100,9 +148,9 @@ class AgentRuntimeOptions(BaseModel):
100
148
  description="Maximum number of iterations for agent loops",
101
149
  )
102
150
 
103
- queue: Queue[UserQuestion] = Field(
151
+ queue: Queue[UserQuestion | MultipleUserQuestions] = Field(
104
152
  default_factory=Queue,
105
- description="Queue for storing user responses",
153
+ description="Queue for storing user questions (single or multiple)",
106
154
  )
107
155
 
108
156
  tasks: list[Future[UserAnswer]] = Field(
shotgun/agents/plan.py CHANGED
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import ModelMessage
@@ -19,14 +18,14 @@ from .common import (
19
18
  create_usage_limits,
20
19
  run_agent,
21
20
  )
22
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
21
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
22
 
24
23
  logger = get_logger(__name__)
25
24
 
26
25
 
27
- def create_plan_agent(
26
+ async def create_plan_agent(
28
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
29
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
28
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
30
29
  """Create a plan agent with artifact management capabilities.
31
30
 
32
31
  Args:
@@ -40,7 +39,7 @@ def create_plan_agent(
40
39
  # Use partial to create system prompt function for plan agent
41
40
  system_prompt_fn = partial(build_agent_system_prompt, "plan")
42
41
 
43
- agent, deps = create_base_agent(
42
+ agent, deps = await create_base_agent(
44
43
  system_prompt_fn,
45
44
  agent_runtime_options,
46
45
  load_codebase_understanding_tools=True,
@@ -52,11 +51,11 @@ def create_plan_agent(
52
51
 
53
52
 
54
53
  async def run_plan_agent(
55
- agent: Agent[AgentDeps, str | DeferredToolRequests],
54
+ agent: Agent[AgentDeps, AgentResponse],
56
55
  goal: str,
57
56
  deps: AgentDeps,
58
57
  message_history: list[ModelMessage] | None = None,
59
- ) -> AgentRunResult[str | DeferredToolRequests]:
58
+ ) -> AgentRunResult[AgentResponse]:
60
59
  """Create or update a plan based on the given goal using artifacts.
61
60
 
62
61
  Args:
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import (
@@ -21,15 +20,15 @@ from .common import (
21
20
  create_usage_limits,
22
21
  run_agent,
23
22
  )
24
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
23
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
25
24
  from .tools import get_available_web_search_tools
26
25
 
27
26
  logger = get_logger(__name__)
28
27
 
29
28
 
30
- def create_research_agent(
29
+ async def create_research_agent(
31
30
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
32
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
31
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
33
32
  """Create a research agent with web search and artifact management capabilities.
34
33
 
35
34
  Args:
@@ -42,7 +41,7 @@ def create_research_agent(
42
41
  logger.debug("Initializing research agent")
43
42
 
44
43
  # Get available web search tools based on configured API keys
45
- web_search_tools = get_available_web_search_tools()
44
+ web_search_tools = await get_available_web_search_tools()
46
45
  if web_search_tools:
47
46
  logger.info(
48
47
  "Research agent configured with %d web search tool(s)",
@@ -54,7 +53,7 @@ def create_research_agent(
54
53
  # Use partial to create system prompt function for research agent
55
54
  system_prompt_fn = partial(build_agent_system_prompt, "research")
56
55
 
57
- agent, deps = create_base_agent(
56
+ agent, deps = await create_base_agent(
58
57
  system_prompt_fn,
59
58
  agent_runtime_options,
60
59
  load_codebase_understanding_tools=True,
@@ -66,11 +65,11 @@ def create_research_agent(
66
65
 
67
66
 
68
67
  async def run_research_agent(
69
- agent: Agent[AgentDeps, str | DeferredToolRequests],
68
+ agent: Agent[AgentDeps, AgentResponse],
70
69
  query: str,
71
70
  deps: AgentDeps,
72
71
  message_history: list[ModelMessage] | None = None,
73
- ) -> AgentRunResult[str | DeferredToolRequests]:
72
+ ) -> AgentRunResult[AgentResponse]:
74
73
  """Perform research on the given query and update research artifacts.
75
74
 
76
75
  Args:
shotgun/agents/specify.py CHANGED
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import ModelMessage
@@ -19,14 +18,14 @@ from .common import (
19
18
  create_usage_limits,
20
19
  run_agent,
21
20
  )
22
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
21
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
22
 
24
23
  logger = get_logger(__name__)
25
24
 
26
25
 
27
- def create_specify_agent(
26
+ async def create_specify_agent(
28
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
29
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
28
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
30
29
  """Create a specify agent with artifact management capabilities.
31
30
 
32
31
  Args:
@@ -40,7 +39,7 @@ def create_specify_agent(
40
39
  # Use partial to create system prompt function for specify agent
41
40
  system_prompt_fn = partial(build_agent_system_prompt, "specify")
42
41
 
43
- agent, deps = create_base_agent(
42
+ agent, deps = await create_base_agent(
44
43
  system_prompt_fn,
45
44
  agent_runtime_options,
46
45
  load_codebase_understanding_tools=True,
@@ -52,11 +51,11 @@ def create_specify_agent(
52
51
 
53
52
 
54
53
  async def run_specify_agent(
55
- agent: Agent[AgentDeps, str | DeferredToolRequests],
54
+ agent: Agent[AgentDeps, AgentResponse],
56
55
  requirement: str,
57
56
  deps: AgentDeps,
58
57
  message_history: list[ModelMessage] | None = None,
59
- ) -> AgentRunResult[str | DeferredToolRequests]:
58
+ ) -> AgentRunResult[AgentResponse]:
60
59
  """Create or update specifications based on the given requirement.
61
60
 
62
61
  Args:
shotgun/agents/tasks.py CHANGED
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import ModelMessage
@@ -19,14 +18,14 @@ from .common import (
19
18
  create_usage_limits,
20
19
  run_agent,
21
20
  )
22
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
21
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
22
 
24
23
  logger = get_logger(__name__)
25
24
 
26
25
 
27
- def create_tasks_agent(
26
+ async def create_tasks_agent(
28
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
29
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
28
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
30
29
  """Create a tasks agent with file management capabilities.
31
30
 
32
31
  Args:
@@ -40,7 +39,7 @@ def create_tasks_agent(
40
39
  # Use partial to create system prompt function for tasks agent
41
40
  system_prompt_fn = partial(build_agent_system_prompt, "tasks")
42
41
 
43
- agent, deps = create_base_agent(
42
+ agent, deps = await create_base_agent(
44
43
  system_prompt_fn,
45
44
  agent_runtime_options,
46
45
  provider=provider,
@@ -50,11 +49,11 @@ def create_tasks_agent(
50
49
 
51
50
 
52
51
  async def run_tasks_agent(
53
- agent: Agent[AgentDeps, str | DeferredToolRequests],
52
+ agent: Agent[AgentDeps, AgentResponse],
54
53
  instruction: str,
55
54
  deps: AgentDeps,
56
55
  message_history: list[ModelMessage] | None = None,
57
- ) -> AgentRunResult[str | DeferredToolRequests]:
56
+ ) -> AgentRunResult[AgentResponse]:
58
57
  """Create or update tasks based on the given instruction.
59
58
 
60
59
  Args: