shotgun-sh 0.2.11.dev1__py3-none-any.whl → 0.2.17.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (76) hide show
  1. shotgun/agents/agent_manager.py +194 -28
  2. shotgun/agents/common.py +14 -8
  3. shotgun/agents/config/manager.py +64 -33
  4. shotgun/agents/config/models.py +25 -1
  5. shotgun/agents/config/provider.py +2 -2
  6. shotgun/agents/context_analyzer/analyzer.py +2 -24
  7. shotgun/agents/conversation_manager.py +35 -19
  8. shotgun/agents/export.py +2 -2
  9. shotgun/agents/history/history_processors.py +99 -3
  10. shotgun/agents/history/token_counting/anthropic.py +17 -1
  11. shotgun/agents/history/token_counting/base.py +14 -3
  12. shotgun/agents/history/token_counting/openai.py +11 -1
  13. shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
  14. shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
  15. shotgun/agents/history/token_counting/utils.py +0 -3
  16. shotgun/agents/plan.py +2 -2
  17. shotgun/agents/research.py +3 -3
  18. shotgun/agents/specify.py +2 -2
  19. shotgun/agents/tasks.py +2 -2
  20. shotgun/agents/tools/codebase/file_read.py +5 -2
  21. shotgun/agents/tools/file_management.py +11 -7
  22. shotgun/agents/tools/web_search/__init__.py +8 -8
  23. shotgun/agents/tools/web_search/anthropic.py +2 -2
  24. shotgun/agents/tools/web_search/gemini.py +1 -1
  25. shotgun/agents/tools/web_search/openai.py +1 -1
  26. shotgun/agents/tools/web_search/utils.py +2 -2
  27. shotgun/agents/usage_manager.py +16 -11
  28. shotgun/build_constants.py +1 -1
  29. shotgun/cli/clear.py +2 -1
  30. shotgun/cli/compact.py +3 -3
  31. shotgun/cli/config.py +8 -5
  32. shotgun/cli/context.py +2 -2
  33. shotgun/cli/export.py +1 -1
  34. shotgun/cli/feedback.py +4 -2
  35. shotgun/cli/plan.py +1 -1
  36. shotgun/cli/research.py +1 -1
  37. shotgun/cli/specify.py +1 -1
  38. shotgun/cli/tasks.py +1 -1
  39. shotgun/codebase/core/change_detector.py +5 -3
  40. shotgun/codebase/core/code_retrieval.py +4 -2
  41. shotgun/codebase/core/ingestor.py +10 -8
  42. shotgun/codebase/core/manager.py +3 -3
  43. shotgun/codebase/core/nl_query.py +1 -1
  44. shotgun/exceptions.py +32 -0
  45. shotgun/logging_config.py +10 -17
  46. shotgun/main.py +3 -1
  47. shotgun/posthog_telemetry.py +28 -25
  48. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +3 -2
  49. shotgun/sentry_telemetry.py +160 -2
  50. shotgun/telemetry.py +3 -1
  51. shotgun/tui/app.py +71 -65
  52. shotgun/tui/components/context_indicator.py +43 -0
  53. shotgun/tui/containers.py +15 -17
  54. shotgun/tui/dependencies.py +2 -2
  55. shotgun/tui/screens/chat/chat_screen.py +189 -45
  56. shotgun/tui/screens/chat/help_text.py +16 -15
  57. shotgun/tui/screens/chat_screen/command_providers.py +10 -0
  58. shotgun/tui/screens/chat_screen/history/chat_history.py +1 -2
  59. shotgun/tui/screens/feedback.py +4 -4
  60. shotgun/tui/screens/github_issue.py +102 -0
  61. shotgun/tui/screens/model_picker.py +21 -20
  62. shotgun/tui/screens/onboarding.py +431 -0
  63. shotgun/tui/screens/provider_config.py +50 -27
  64. shotgun/tui/screens/shotgun_auth.py +2 -2
  65. shotgun/tui/screens/welcome.py +14 -11
  66. shotgun/tui/services/conversation_service.py +16 -14
  67. shotgun/tui/utils/mode_progress.py +14 -7
  68. shotgun/tui/widgets/widget_coordinator.py +18 -2
  69. shotgun/utils/file_system_utils.py +19 -0
  70. shotgun/utils/marketing.py +110 -0
  71. shotgun_sh-0.2.17.dev1.dist-info/METADATA +465 -0
  72. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.17.dev1.dist-info}/RECORD +75 -71
  73. shotgun_sh-0.2.11.dev1.dist-info/METADATA +0 -129
  74. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.17.dev1.dist-info}/WHEEL +0 -0
  75. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.17.dev1.dist-info}/entry_points.txt +0 -0
  76. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.17.dev1.dist-info}/licenses/LICENSE +0 -0
@@ -1,5 +1,6 @@
1
1
  """Pydantic models for configuration."""
2
2
 
3
+ from datetime import datetime
3
4
  from enum import StrEnum
4
5
 
5
6
  from pydantic import BaseModel, Field, PrivateAttr, SecretStr
@@ -170,6 +171,21 @@ class ShotgunAccountConfig(BaseModel):
170
171
  )
171
172
 
172
173
 
174
+ class MarketingMessageRecord(BaseModel):
175
+ """Record of when a marketing message was shown to the user."""
176
+
177
+ shown_at: datetime = Field(description="Timestamp when the message was shown")
178
+
179
+
180
+ class MarketingConfig(BaseModel):
181
+ """Configuration for marketing messages shown to users."""
182
+
183
+ messages: dict[str, MarketingMessageRecord] = Field(
184
+ default_factory=dict,
185
+ description="Tracking which marketing messages have been shown. Key is message ID (e.g., 'github_star_v1')",
186
+ )
187
+
188
+
173
189
  class ShotgunConfig(BaseModel):
174
190
  """Main configuration for Shotgun CLI."""
175
191
 
@@ -184,8 +200,16 @@ class ShotgunConfig(BaseModel):
184
200
  shotgun_instance_id: str = Field(
185
201
  description="Unique shotgun instance identifier (also used for anonymous telemetry)",
186
202
  )
187
- config_version: int = Field(default=3, description="Configuration schema version")
203
+ config_version: int = Field(default=4, description="Configuration schema version")
188
204
  shown_welcome_screen: bool = Field(
189
205
  default=False,
190
206
  description="Whether the welcome screen has been shown to the user",
191
207
  )
208
+ shown_onboarding_popup: datetime | None = Field(
209
+ default=None,
210
+ description="Timestamp when the onboarding popup was shown to the user (ISO8601 format)",
211
+ )
212
+ marketing: MarketingConfig = Field(
213
+ default_factory=MarketingConfig,
214
+ description="Marketing messages configuration and tracking",
215
+ )
@@ -170,7 +170,7 @@ def get_or_create_model(
170
170
  return _model_cache[cache_key]
171
171
 
172
172
 
173
- def get_provider_model(
173
+ async def get_provider_model(
174
174
  provider_or_model: ProviderType | ModelName | None = None,
175
175
  ) -> ModelConfig:
176
176
  """Get a fully configured ModelConfig with API key and Model instance.
@@ -189,7 +189,7 @@ def get_provider_model(
189
189
  """
190
190
  config_manager = get_config_manager()
191
191
  # Use cached config for read-only access (performance)
192
- config = config_manager.load(force_reload=False)
192
+ config = await config_manager.load(force_reload=False)
193
193
 
194
194
  # Priority 1: Check if Shotgun key exists - if so, use it for ANY model
195
195
  shotgun_api_key = _get_api_key(config.shotgun.api_key)
@@ -67,26 +67,13 @@ class ContextAnalyzer:
67
67
  for msg in reversed(message_history):
68
68
  if isinstance(msg, ModelResponse) and msg.usage:
69
69
  last_input_tokens = msg.usage.input_tokens + msg.usage.cache_read_tokens
70
- logger.debug(
71
- f"[ANALYZER] Found last response with usage - "
72
- f"input_tokens={msg.usage.input_tokens}, "
73
- f"cache_read_tokens={msg.usage.cache_read_tokens}, "
74
- f"total={last_input_tokens}"
75
- )
76
70
  break
77
71
 
78
72
  if last_input_tokens == 0:
79
- logger.warning(
80
- f"[ANALYZER] No usage data found in message history! "
81
- f"message_count={len(message_history)}, "
82
- f"response_count={sum(1 for m in message_history if isinstance(m, ModelResponse))}"
83
- )
84
- # Fallback to token estimation
85
- logger.info("[ANALYZER] Falling back to token estimation")
73
+ # Fallback to token estimation (no logging to reduce verbosity)
86
74
  last_input_tokens = await estimate_tokens_from_messages(
87
75
  message_history, self.model_config
88
76
  )
89
- logger.debug(f"[ANALYZER] Estimated tokens: {last_input_tokens}")
90
77
 
91
78
  # Step 2: Calculate total output tokens (sum across all responses)
92
79
  for msg in message_history:
@@ -247,16 +234,7 @@ class ContextAnalyzer:
247
234
  # If no content, put all in agent responses
248
235
  agent_response_tokens = total_output_tokens
249
236
 
250
- logger.debug(
251
- f"Token allocation complete: user={user_tokens}, agent_responses={agent_response_tokens}, "
252
- f"system_prompts={system_prompt_tokens}, system_status={system_status_tokens}, "
253
- f"codebase_understanding={codebase_understanding_tokens}, "
254
- f"artifact_management={artifact_management_tokens}, web_research={web_research_tokens}, "
255
- f"unknown={unknown_tokens}"
256
- )
257
- logger.debug(
258
- f"Input tokens (from last response): {last_input_tokens}, Output tokens (sum): {total_output_tokens}"
259
- )
237
+ # Token allocation complete (no logging to reduce verbosity)
260
238
 
261
239
  # Create TokenAllocation model
262
240
  return TokenAllocation(
@@ -1,11 +1,15 @@
1
1
  """Manager for handling conversation persistence operations."""
2
2
 
3
+ import asyncio
3
4
  import json
4
- import shutil
5
5
  from pathlib import Path
6
6
 
7
+ import aiofiles
8
+ import aiofiles.os
9
+
7
10
  from shotgun.logging_config import get_logger
8
11
  from shotgun.utils import get_shotgun_home
12
+ from shotgun.utils.file_system_utils import async_copy_file
9
13
 
10
14
  from .conversation_history import ConversationHistory
11
15
 
@@ -27,14 +31,14 @@ class ConversationManager:
27
31
  else:
28
32
  self.conversation_path = conversation_path
29
33
 
30
- def save(self, conversation: ConversationHistory) -> None:
34
+ async def save(self, conversation: ConversationHistory) -> None:
31
35
  """Save conversation history to file.
32
36
 
33
37
  Args:
34
38
  conversation: ConversationHistory to save
35
39
  """
36
40
  # Ensure directory exists
37
- self.conversation_path.parent.mkdir(parents=True, exist_ok=True)
41
+ await aiofiles.os.makedirs(self.conversation_path.parent, exist_ok=True)
38
42
 
39
43
  try:
40
44
  # Update timestamp
@@ -42,11 +46,17 @@ class ConversationManager:
42
46
 
43
47
  conversation.updated_at = datetime.now()
44
48
 
45
- # Serialize to JSON using Pydantic's model_dump
46
- data = conversation.model_dump(mode="json")
49
+ # Serialize to JSON in background thread to avoid blocking event loop
50
+ # This is crucial for large conversations (5k+ tokens)
51
+ data = await asyncio.to_thread(conversation.model_dump, mode="json")
52
+ json_content = await asyncio.to_thread(
53
+ json.dumps, data, indent=2, ensure_ascii=False
54
+ )
47
55
 
48
- with open(self.conversation_path, "w", encoding="utf-8") as f:
49
- json.dump(data, f, indent=2, ensure_ascii=False)
56
+ async with aiofiles.open(
57
+ self.conversation_path, "w", encoding="utf-8"
58
+ ) as f:
59
+ await f.write(json_content)
50
60
 
51
61
  logger.debug("Conversation saved to %s", self.conversation_path)
52
62
 
@@ -56,21 +66,26 @@ class ConversationManager:
56
66
  )
57
67
  # Don't raise - we don't want to interrupt the user's session
58
68
 
59
- def load(self) -> ConversationHistory | None:
69
+ async def load(self) -> ConversationHistory | None:
60
70
  """Load conversation history from file.
61
71
 
62
72
  Returns:
63
73
  ConversationHistory if file exists and is valid, None otherwise
64
74
  """
65
- if not self.conversation_path.exists():
75
+ if not await aiofiles.os.path.exists(self.conversation_path):
66
76
  logger.debug("No conversation history found at %s", self.conversation_path)
67
77
  return None
68
78
 
69
79
  try:
70
- with open(self.conversation_path, encoding="utf-8") as f:
71
- data = json.load(f)
72
-
73
- conversation = ConversationHistory.model_validate(data)
80
+ async with aiofiles.open(self.conversation_path, encoding="utf-8") as f:
81
+ content = await f.read()
82
+ # Deserialize JSON in background thread to avoid blocking
83
+ data = await asyncio.to_thread(json.loads, content)
84
+
85
+ # Validate model in background thread for large conversations
86
+ conversation = await asyncio.to_thread(
87
+ ConversationHistory.model_validate, data
88
+ )
74
89
  logger.debug(
75
90
  "Conversation loaded from %s with %d agent messages",
76
91
  self.conversation_path,
@@ -89,7 +104,7 @@ class ConversationManager:
89
104
  # Create a backup of the corrupted file for debugging
90
105
  backup_path = self.conversation_path.with_suffix(".json.backup")
91
106
  try:
92
- shutil.copy2(self.conversation_path, backup_path)
107
+ await async_copy_file(self.conversation_path, backup_path)
93
108
  logger.info("Backed up corrupted conversation to %s", backup_path)
94
109
  except Exception as backup_error: # pragma: no cover
95
110
  logger.warning("Failed to backup corrupted file: %s", backup_error)
@@ -105,11 +120,12 @@ class ConversationManager:
105
120
  )
106
121
  return None
107
122
 
108
- def clear(self) -> None:
123
+ async def clear(self) -> None:
109
124
  """Delete the conversation history file."""
110
- if self.conversation_path.exists():
125
+ if await aiofiles.os.path.exists(self.conversation_path):
111
126
  try:
112
- self.conversation_path.unlink()
127
+ # Use asyncio.to_thread for unlink operation
128
+ await asyncio.to_thread(self.conversation_path.unlink)
113
129
  logger.debug(
114
130
  "Conversation history cleared at %s", self.conversation_path
115
131
  )
@@ -118,10 +134,10 @@ class ConversationManager:
118
134
  "Failed to clear conversation at %s: %s", self.conversation_path, e
119
135
  )
120
136
 
121
- def exists(self) -> bool:
137
+ async def exists(self) -> bool:
122
138
  """Check if a conversation history file exists.
123
139
 
124
140
  Returns:
125
141
  True if conversation file exists, False otherwise
126
142
  """
127
- return self.conversation_path.exists()
143
+ return await aiofiles.os.path.exists(str(self.conversation_path))
shotgun/agents/export.py CHANGED
@@ -23,7 +23,7 @@ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
23
  logger = get_logger(__name__)
24
24
 
25
25
 
26
- def create_export_agent(
26
+ async def create_export_agent(
27
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
28
28
  ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
29
29
  """Create an export agent with file management capabilities.
@@ -39,7 +39,7 @@ def create_export_agent(
39
39
  # Use partial to create system prompt function for export agent
40
40
  system_prompt_fn = partial(build_agent_system_prompt, "export")
41
41
 
42
- agent, deps = create_base_agent(
42
+ agent, deps = await create_base_agent(
43
43
  system_prompt_fn,
44
44
  agent_runtime_options,
45
45
  provider=provider,
@@ -1,7 +1,9 @@
1
1
  """History processors for managing conversation history in Shotgun agents."""
2
2
 
3
+ from collections.abc import Awaitable, Callable
3
4
  from typing import TYPE_CHECKING, Any, Protocol
4
5
 
6
+ from anthropic import APIStatusError
5
7
  from pydantic_ai import ModelSettings
6
8
  from pydantic_ai.messages import (
7
9
  ModelMessage,
@@ -14,6 +16,7 @@ from pydantic_ai.messages import (
14
16
  from shotgun.agents.llm import shotgun_model_request
15
17
  from shotgun.agents.messages import AgentSystemPrompt, SystemStatusPrompt
16
18
  from shotgun.agents.models import AgentDeps
19
+ from shotgun.exceptions import ContextSizeLimitExceeded
17
20
  from shotgun.logging_config import get_logger
18
21
  from shotgun.posthog_telemetry import track_event
19
22
  from shotgun.prompts import PromptLoader
@@ -51,6 +54,86 @@ logger = get_logger(__name__)
51
54
  prompt_loader = PromptLoader()
52
55
 
53
56
 
57
+ async def _safe_token_estimation(
58
+ estimation_func: Callable[..., Awaitable[int]],
59
+ model_name: str,
60
+ max_tokens: int,
61
+ *args: Any,
62
+ **kwargs: Any,
63
+ ) -> int:
64
+ """Safely estimate tokens with proper error handling.
65
+
66
+ Wraps token estimation functions to handle failures gracefully.
67
+ Only RuntimeError (from token counters) is wrapped in ContextSizeLimitExceeded.
68
+ Other errors (network, auth) are allowed to bubble up.
69
+
70
+ Args:
71
+ estimation_func: Async function that estimates tokens
72
+ model_name: Name of the model for error messages
73
+ max_tokens: Maximum tokens for the model
74
+ *args: Arguments to pass to estimation_func
75
+ **kwargs: Keyword arguments to pass to estimation_func
76
+
77
+ Returns:
78
+ Token count from estimation_func
79
+
80
+ Raises:
81
+ ContextSizeLimitExceeded: If token counting fails with RuntimeError
82
+ Exception: Any other exceptions from estimation_func
83
+ """
84
+ try:
85
+ return await estimation_func(*args, **kwargs)
86
+ except Exception as e:
87
+ # Log the error with full context
88
+ logger.warning(
89
+ f"Token counting failed for {model_name}",
90
+ extra={
91
+ "error_type": type(e).__name__,
92
+ "error_message": str(e),
93
+ "model": model_name,
94
+ },
95
+ )
96
+
97
+ # Token counting behavior with oversized context (verified via testing):
98
+ #
99
+ # 1. OpenAI/tiktoken:
100
+ # - Successfully counts any size (tested with 752K tokens, no error)
101
+ # - Library errors: ValueError, KeyError, AttributeError, SSLError (file/cache issues)
102
+ # - Wrapped as: RuntimeError by our counter
103
+ #
104
+ # 2. Gemini/SentencePiece:
105
+ # - Successfully counts any size (tested with 752K tokens, no error)
106
+ # - Library errors: RuntimeError, IOError, TypeError (file/model loading issues)
107
+ # - Wrapped as: RuntimeError by our counter
108
+ #
109
+ # 3. Anthropic API:
110
+ # - Successfully counts large token counts (tested with 752K tokens, no error)
111
+ # - Only enforces 32 MB request size limit (not token count)
112
+ # - Raises: APIStatusError(413) with error type 'request_too_large' for 32MB+ requests
113
+ # - Other API errors: APIConnectionError, RateLimitError, APIStatusError (4xx/5xx)
114
+ # - Wrapped as: RuntimeError by our counter
115
+ #
116
+ # IMPORTANT: No provider raises errors for "too many tokens" during counting.
117
+ # Token count validation happens separately by comparing count to max_input_tokens.
118
+ #
119
+ # We wrap RuntimeError (library-level failures from tiktoken/sentencepiece).
120
+ # We also wrap Anthropic's 413 error (request exceeds 32 MB) as it indicates
121
+ # context is effectively too large and needs user action to reduce it.
122
+ if isinstance(e, RuntimeError):
123
+ raise ContextSizeLimitExceeded(
124
+ model_name=model_name, max_tokens=max_tokens
125
+ ) from e
126
+
127
+ # Check for Anthropic's 32 MB request size limit (APIStatusError with status 413)
128
+ if isinstance(e, APIStatusError) and e.status_code == 413:
129
+ raise ContextSizeLimitExceeded(
130
+ model_name=model_name, max_tokens=max_tokens
131
+ ) from e
132
+
133
+ # Re-raise other exceptions (network errors, auth failures, etc.)
134
+ raise
135
+
136
+
54
137
  def is_summary_part(part: Any) -> bool:
55
138
  """Check if a message part is a compacted summary."""
56
139
  return isinstance(part, TextPart) and part.content.startswith(SUMMARY_MARKER)
@@ -157,9 +240,15 @@ async def token_limit_compactor(
157
240
 
158
241
  if last_summary_index is not None:
159
242
  # Check if post-summary conversation exceeds threshold for incremental compaction
160
- post_summary_tokens = await estimate_post_summary_tokens(
161
- messages, last_summary_index, deps.llm_model
243
+ post_summary_tokens = await _safe_token_estimation(
244
+ estimate_post_summary_tokens,
245
+ deps.llm_model.name,
246
+ model_max_tokens,
247
+ messages,
248
+ last_summary_index,
249
+ deps.llm_model,
162
250
  )
251
+
163
252
  post_summary_percentage = (
164
253
  (post_summary_tokens / max_tokens) * 100 if max_tokens > 0 else 0
165
254
  )
@@ -366,7 +455,14 @@ async def token_limit_compactor(
366
455
 
367
456
  else:
368
457
  # Check if total conversation exceeds threshold for full compaction
369
- total_tokens = await estimate_tokens_from_messages(messages, deps.llm_model)
458
+ total_tokens = await _safe_token_estimation(
459
+ estimate_tokens_from_messages,
460
+ deps.llm_model.name,
461
+ model_max_tokens,
462
+ messages,
463
+ deps.llm_model,
464
+ )
465
+
370
466
  total_percentage = (total_tokens / max_tokens) * 100 if max_tokens > 0 else 0
371
467
 
372
468
  logger.debug(
@@ -72,11 +72,23 @@ class AnthropicTokenCounter(TokenCounter):
72
72
  Raises:
73
73
  RuntimeError: If API call fails
74
74
  """
75
+ # Handle empty text to avoid unnecessary API calls
76
+ # Anthropic API requires non-empty content, so we need a strict check
77
+ if not text or not text.strip():
78
+ return 0
79
+
80
+ # Additional validation: ensure the text has actual content
81
+ # Some edge cases might have only whitespace or control characters
82
+ cleaned_text = text.strip()
83
+ if not cleaned_text:
84
+ return 0
85
+
75
86
  try:
76
87
  # Anthropic API expects messages format and model parameter
77
88
  # Use await with async client
78
89
  result = await self.client.messages.count_tokens(
79
- messages=[{"role": "user", "content": text}], model=self.model_name
90
+ messages=[{"role": "user", "content": cleaned_text}],
91
+ model=self.model_name,
80
92
  )
81
93
  return result.input_tokens
82
94
  except Exception as e:
@@ -107,5 +119,9 @@ class AnthropicTokenCounter(TokenCounter):
107
119
  Raises:
108
120
  RuntimeError: If token counting fails
109
121
  """
122
+ # Handle empty message list early
123
+ if not messages:
124
+ return 0
125
+
110
126
  total_text = extract_text_from_messages(messages)
111
127
  return await self.count_tokens(total_text)
@@ -56,12 +56,23 @@ def extract_text_from_messages(messages: list[ModelMessage]) -> str:
56
56
  if hasattr(message, "parts"):
57
57
  for part in message.parts:
58
58
  if hasattr(part, "content") and isinstance(part.content, str):
59
- text_parts.append(part.content)
59
+ # Only add non-empty content
60
+ if part.content.strip():
61
+ text_parts.append(part.content)
60
62
  else:
61
63
  # Handle non-text parts (tool calls, etc.)
62
- text_parts.append(str(part))
64
+ part_str = str(part)
65
+ if part_str.strip():
66
+ text_parts.append(part_str)
63
67
  else:
64
68
  # Handle messages without parts
65
- text_parts.append(str(message))
69
+ msg_str = str(message)
70
+ if msg_str.strip():
71
+ text_parts.append(msg_str)
72
+
73
+ # If no valid text parts found, return a minimal placeholder
74
+ # This ensures we never send completely empty content to APIs
75
+ if not text_parts:
76
+ return "."
66
77
 
67
78
  return "\n".join(text_parts)
@@ -57,9 +57,15 @@ class OpenAITokenCounter(TokenCounter):
57
57
  Raises:
58
58
  RuntimeError: If token counting fails
59
59
  """
60
+ # Handle empty text to avoid unnecessary encoding
61
+ if not text or not text.strip():
62
+ return 0
63
+
60
64
  try:
61
65
  return len(self.encoding.encode(text))
62
- except Exception as e:
66
+ except BaseException as e:
67
+ # Must catch BaseException to handle PanicException from tiktoken's Rust layer
68
+ # which can occur with extremely long texts. Regular Exception won't catch it.
63
69
  raise RuntimeError(
64
70
  f"Failed to count tokens for OpenAI model {self.model_name}"
65
71
  ) from e
@@ -76,5 +82,9 @@ class OpenAITokenCounter(TokenCounter):
76
82
  Raises:
77
83
  RuntimeError: If token counting fails
78
84
  """
85
+ # Handle empty message list early
86
+ if not messages:
87
+ return 0
88
+
79
89
  total_text = extract_text_from_messages(messages)
80
90
  return await self.count_tokens(total_text)
@@ -88,6 +88,10 @@ class SentencePieceTokenCounter(TokenCounter):
88
88
  Raises:
89
89
  RuntimeError: If token counting fails
90
90
  """
91
+ # Handle empty text to avoid unnecessary tokenization
92
+ if not text or not text.strip():
93
+ return 0
94
+
91
95
  await self._ensure_tokenizer()
92
96
 
93
97
  if self.sp is None:
@@ -115,5 +119,9 @@ class SentencePieceTokenCounter(TokenCounter):
115
119
  Raises:
116
120
  RuntimeError: If token counting fails
117
121
  """
122
+ # Handle empty message list early
123
+ if not messages:
124
+ return 0
125
+
118
126
  total_text = extract_text_from_messages(messages)
119
127
  return await self.count_tokens(total_text)
@@ -3,6 +3,7 @@
3
3
  import hashlib
4
4
  from pathlib import Path
5
5
 
6
+ import aiofiles
6
7
  import httpx
7
8
 
8
9
  from shotgun.logging_config import get_logger
@@ -78,7 +79,8 @@ async def download_gemini_tokenizer() -> Path:
78
79
 
79
80
  # Atomic write: write to temp file first, then rename
80
81
  temp_path = cache_path.with_suffix(".tmp")
81
- temp_path.write_bytes(content)
82
+ async with aiofiles.open(temp_path, "wb") as f:
83
+ await f.write(content)
82
84
  temp_path.rename(cache_path)
83
85
 
84
86
  logger.info(f"Gemini tokenizer downloaded and cached at {cache_path}")
@@ -44,9 +44,6 @@ def get_token_counter(model_config: ModelConfig) -> TokenCounter:
44
44
 
45
45
  # Return cached instance if available
46
46
  if cache_key in _token_counter_cache:
47
- logger.debug(
48
- f"Reusing cached token counter for {model_config.provider.value}:{model_config.name}"
49
- )
50
47
  return _token_counter_cache[cache_key]
51
48
 
52
49
  # Create new instance and cache it
shotgun/agents/plan.py CHANGED
@@ -23,7 +23,7 @@ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
23
  logger = get_logger(__name__)
24
24
 
25
25
 
26
- def create_plan_agent(
26
+ async def create_plan_agent(
27
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
28
28
  ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
29
29
  """Create a plan agent with artifact management capabilities.
@@ -39,7 +39,7 @@ def create_plan_agent(
39
39
  # Use partial to create system prompt function for plan agent
40
40
  system_prompt_fn = partial(build_agent_system_prompt, "plan")
41
41
 
42
- agent, deps = create_base_agent(
42
+ agent, deps = await create_base_agent(
43
43
  system_prompt_fn,
44
44
  agent_runtime_options,
45
45
  load_codebase_understanding_tools=True,
@@ -26,7 +26,7 @@ from .tools import get_available_web_search_tools
26
26
  logger = get_logger(__name__)
27
27
 
28
28
 
29
- def create_research_agent(
29
+ async def create_research_agent(
30
30
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
31
31
  ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
32
32
  """Create a research agent with web search and artifact management capabilities.
@@ -41,7 +41,7 @@ def create_research_agent(
41
41
  logger.debug("Initializing research agent")
42
42
 
43
43
  # Get available web search tools based on configured API keys
44
- web_search_tools = get_available_web_search_tools()
44
+ web_search_tools = await get_available_web_search_tools()
45
45
  if web_search_tools:
46
46
  logger.info(
47
47
  "Research agent configured with %d web search tool(s)",
@@ -53,7 +53,7 @@ def create_research_agent(
53
53
  # Use partial to create system prompt function for research agent
54
54
  system_prompt_fn = partial(build_agent_system_prompt, "research")
55
55
 
56
- agent, deps = create_base_agent(
56
+ agent, deps = await create_base_agent(
57
57
  system_prompt_fn,
58
58
  agent_runtime_options,
59
59
  load_codebase_understanding_tools=True,
shotgun/agents/specify.py CHANGED
@@ -23,7 +23,7 @@ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
23
  logger = get_logger(__name__)
24
24
 
25
25
 
26
- def create_specify_agent(
26
+ async def create_specify_agent(
27
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
28
28
  ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
29
29
  """Create a specify agent with artifact management capabilities.
@@ -39,7 +39,7 @@ def create_specify_agent(
39
39
  # Use partial to create system prompt function for specify agent
40
40
  system_prompt_fn = partial(build_agent_system_prompt, "specify")
41
41
 
42
- agent, deps = create_base_agent(
42
+ agent, deps = await create_base_agent(
43
43
  system_prompt_fn,
44
44
  agent_runtime_options,
45
45
  load_codebase_understanding_tools=True,
shotgun/agents/tasks.py CHANGED
@@ -23,7 +23,7 @@ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
23
  logger = get_logger(__name__)
24
24
 
25
25
 
26
- def create_tasks_agent(
26
+ async def create_tasks_agent(
27
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
28
28
  ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
29
29
  """Create a tasks agent with file management capabilities.
@@ -39,7 +39,7 @@ def create_tasks_agent(
39
39
  # Use partial to create system prompt function for tasks agent
40
40
  system_prompt_fn = partial(build_agent_system_prompt, "tasks")
41
41
 
42
- agent, deps = create_base_agent(
42
+ agent, deps = await create_base_agent(
43
43
  system_prompt_fn,
44
44
  agent_runtime_options,
45
45
  provider=provider,
@@ -2,6 +2,7 @@
2
2
 
3
3
  from pathlib import Path
4
4
 
5
+ import aiofiles
5
6
  from pydantic_ai import RunContext
6
7
 
7
8
  from shotgun.agents.models import AgentDeps
@@ -93,7 +94,8 @@ async def file_read(
93
94
  # Read file contents
94
95
  encoding_used = "utf-8"
95
96
  try:
96
- content = full_file_path.read_text(encoding="utf-8")
97
+ async with aiofiles.open(full_file_path, encoding="utf-8") as f:
98
+ content = await f.read()
97
99
  size_bytes = full_file_path.stat().st_size
98
100
 
99
101
  logger.debug(
@@ -119,7 +121,8 @@ async def file_read(
119
121
  try:
120
122
  # Try with different encoding
121
123
  encoding_used = "latin-1"
122
- content = full_file_path.read_text(encoding="latin-1")
124
+ async with aiofiles.open(full_file_path, encoding="latin-1") as f:
125
+ content = await f.read()
123
126
  size_bytes = full_file_path.stat().st_size
124
127
 
125
128
  # Detect language from file extension