shotgun-sh 0.2.19__py3-none-any.whl → 0.2.23.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

@@ -0,0 +1,215 @@
1
+ """HTTP client for LiteLLM Proxy API."""
2
+
3
+ import logging
4
+ from typing import Any
5
+
6
+ import httpx
7
+ from tenacity import (
8
+ before_sleep_log,
9
+ retry,
10
+ retry_if_exception,
11
+ stop_after_attempt,
12
+ wait_exponential_jitter,
13
+ )
14
+
15
+ from shotgun.api_endpoints import LITELLM_PROXY_BASE_URL
16
+ from shotgun.logging_config import get_logger
17
+
18
+ from .models import BudgetInfo, KeyInfoResponse, TeamInfoResponse
19
+
20
+ logger = get_logger(__name__)
21
+
22
+
23
+ def _is_retryable_http_error(exception: BaseException) -> bool:
24
+ """Check if HTTP exception should trigger a retry.
25
+
26
+ Args:
27
+ exception: The exception to check
28
+
29
+ Returns:
30
+ True if the exception is a transient error that should be retried
31
+ """
32
+ # Retry on network errors and timeouts
33
+ if isinstance(exception, (httpx.RequestError, httpx.TimeoutException)):
34
+ return True
35
+
36
+ # Retry on server errors (5xx) and rate limits (429)
37
+ if isinstance(exception, httpx.HTTPStatusError):
38
+ status_code = exception.response.status_code
39
+ return status_code >= 500 or status_code == 429
40
+
41
+ # Don't retry on other errors (e.g., 4xx client errors)
42
+ return False
43
+
44
+
45
+ class LiteLLMProxyClient:
46
+ """HTTP client for LiteLLM Proxy API.
47
+
48
+ Provides methods to query budget information and key/team metadata
49
+ from a LiteLLM proxy server.
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ api_key: str,
55
+ base_url: str | None = None,
56
+ timeout: float = 10.0,
57
+ ):
58
+ """Initialize LiteLLM Proxy client.
59
+
60
+ Args:
61
+ api_key: LiteLLM API key for authentication
62
+ base_url: Base URL for LiteLLM proxy. If None, uses LITELLM_PROXY_BASE_URL
63
+ timeout: Request timeout in seconds
64
+ """
65
+ self.api_key = api_key
66
+ self.base_url = base_url or LITELLM_PROXY_BASE_URL
67
+ self.timeout = timeout
68
+
69
+ @retry(
70
+ stop=stop_after_attempt(3),
71
+ wait=wait_exponential_jitter(initial=1, max=8),
72
+ retry=retry_if_exception(_is_retryable_http_error),
73
+ before_sleep=before_sleep_log(logger, logging.WARNING),
74
+ reraise=True,
75
+ )
76
+ async def _request_with_retry(
77
+ self,
78
+ method: str,
79
+ url: str,
80
+ **kwargs: Any,
81
+ ) -> httpx.Response:
82
+ """Make async HTTP request with exponential backoff retry and jitter.
83
+
84
+ Uses tenacity to retry on transient errors (5xx, 429, network errors)
85
+ with exponential backoff and jitter. Client errors (4xx except 429)
86
+ are not retried.
87
+
88
+ Args:
89
+ method: HTTP method (GET, POST, etc.)
90
+ url: Request URL
91
+ **kwargs: Additional arguments to pass to httpx request
92
+
93
+ Returns:
94
+ HTTP response
95
+
96
+ Raises:
97
+ httpx.HTTPError: If request fails after all retries
98
+ """
99
+ async with httpx.AsyncClient(timeout=self.timeout) as client:
100
+ response = await client.request(method, url, **kwargs)
101
+ response.raise_for_status()
102
+ return response
103
+
104
+ async def get_key_info(self) -> KeyInfoResponse:
105
+ """Get key information from LiteLLM proxy.
106
+
107
+ Returns:
108
+ Key information including spend, budget, and team_id
109
+
110
+ Raises:
111
+ httpx.HTTPError: If request fails
112
+ """
113
+ url = f"{self.base_url}/key/info"
114
+ params = {"key": self.api_key}
115
+ headers = {"Authorization": f"Bearer {self.api_key}"}
116
+
117
+ logger.debug("Fetching key info from %s", url)
118
+
119
+ response = await self._request_with_retry(
120
+ "GET", url, params=params, headers=headers
121
+ )
122
+
123
+ data = response.json()
124
+ result = KeyInfoResponse.model_validate(data)
125
+
126
+ logger.info(
127
+ "Successfully fetched key info: key_alias=%s, team_id=%s",
128
+ result.info.key_alias,
129
+ result.info.team_id,
130
+ )
131
+ return result
132
+
133
+ async def get_team_info(self, team_id: str) -> TeamInfoResponse:
134
+ """Get team information from LiteLLM proxy.
135
+
136
+ Args:
137
+ team_id: Team identifier
138
+
139
+ Returns:
140
+ Team information including spend and budget
141
+
142
+ Raises:
143
+ httpx.HTTPError: If request fails
144
+ """
145
+ url = f"{self.base_url}/team/info"
146
+ params = {"team_id": team_id}
147
+ headers = {"Authorization": f"Bearer {self.api_key}"}
148
+
149
+ logger.debug("Fetching team info from %s for team_id=%s", url, team_id)
150
+
151
+ response = await self._request_with_retry(
152
+ "GET", url, params=params, headers=headers
153
+ )
154
+
155
+ data = response.json()
156
+ result = TeamInfoResponse.model_validate(data)
157
+
158
+ logger.info(
159
+ "Successfully fetched team info: team_alias=%s",
160
+ result.team_info.team_alias,
161
+ )
162
+ return result
163
+
164
+ async def get_budget_info(self) -> BudgetInfo:
165
+ """Get team-level budget information for this key.
166
+
167
+ Budget is always configured at the team level, never at the key level.
168
+ This method fetches the team_id from the key info, then retrieves
169
+ the team's budget information.
170
+
171
+ Returns:
172
+ Team-level budget information
173
+
174
+ Raises:
175
+ httpx.HTTPError: If request fails
176
+ ValueError: If team has no budget configured
177
+ """
178
+ logger.debug("Fetching budget info")
179
+
180
+ # Get key info to retrieve team_id
181
+ key_response = await self.get_key_info()
182
+ key_info = key_response.info
183
+
184
+ # Fetch team budget (budget is always at team level)
185
+ logger.debug(
186
+ "Fetching team budget for team_id=%s",
187
+ key_info.team_id,
188
+ )
189
+ team_response = await self.get_team_info(key_info.team_id)
190
+ team_info = team_response.team_info
191
+
192
+ if team_info.max_budget is None:
193
+ raise ValueError(
194
+ f"Team (team_id={key_info.team_id}) has no max_budget configured"
195
+ )
196
+
197
+ logger.debug("Using team-level budget: $%.6f", team_info.max_budget)
198
+ return BudgetInfo.from_team_info(team_info)
199
+
200
+
201
+ # Convenience function for standalone use
202
+ async def get_budget_info(api_key: str, base_url: str | None = None) -> BudgetInfo:
203
+ """Get budget information for an API key.
204
+
205
+ Convenience function that creates a client and calls get_budget_info.
206
+
207
+ Args:
208
+ api_key: LiteLLM API key
209
+ base_url: Optional base URL for LiteLLM proxy
210
+
211
+ Returns:
212
+ Budget information
213
+ """
214
+ client = LiteLLMProxyClient(api_key, base_url=base_url)
215
+ return await client.get_budget_info()
@@ -0,0 +1,137 @@
1
+ """Pydantic models for LiteLLM Proxy API."""
2
+
3
+ from enum import StrEnum
4
+
5
+ from pydantic import BaseModel, Field
6
+
7
+
8
+ class BudgetSource(StrEnum):
9
+ """Source of budget information."""
10
+
11
+ KEY = "key"
12
+ TEAM = "team"
13
+
14
+
15
+ class KeyInfoData(BaseModel):
16
+ """Key information data from /key/info endpoint."""
17
+
18
+ key_name: str = Field(description="Key name/identifier")
19
+ key_alias: str | None = Field(default=None, description="Human-readable key alias")
20
+ spend: float = Field(description="Current spend for this key in USD")
21
+ max_budget: float | None = Field(
22
+ default=None, description="Maximum budget for this key in USD"
23
+ )
24
+ team_id: str = Field(description="Team ID associated with this key")
25
+ user_id: str = Field(description="User ID associated with this key")
26
+ models: list[str] = Field(
27
+ default_factory=list, description="List of models available to this key"
28
+ )
29
+
30
+
31
+ class KeyInfoResponse(BaseModel):
32
+ """Response from /key/info endpoint."""
33
+
34
+ key: str = Field(description="The API key")
35
+ info: KeyInfoData = Field(description="Key information data")
36
+
37
+
38
+ class TeamInfoData(BaseModel):
39
+ """Team information data from /team/info endpoint."""
40
+
41
+ team_id: str = Field(description="Team identifier")
42
+ team_alias: str | None = Field(
43
+ default=None, description="Human-readable team alias"
44
+ )
45
+ max_budget: float | None = Field(
46
+ default=None, description="Maximum budget for this team in USD"
47
+ )
48
+ spend: float = Field(description="Current spend for this team in USD")
49
+ models: list[str] = Field(
50
+ default_factory=list, description="List of models available to this team"
51
+ )
52
+
53
+
54
+ class TeamInfoResponse(BaseModel):
55
+ """Response from /team/info endpoint."""
56
+
57
+ team_id: str = Field(description="Team identifier")
58
+ team_info: TeamInfoData = Field(description="Team information data")
59
+
60
+
61
+ class BudgetInfo(BaseModel):
62
+ """Unified budget information.
63
+
64
+ Combines key and team budget information to provide a single view
65
+ of budget status. Budget can come from either key-level or team-level,
66
+ with key-level taking priority if set.
67
+ """
68
+
69
+ max_budget: float = Field(description="Maximum budget in USD")
70
+ spend: float = Field(description="Current spend in USD")
71
+ remaining: float = Field(description="Remaining budget in USD")
72
+ source: BudgetSource = Field(
73
+ description="Source of budget information (key or team)"
74
+ )
75
+ percentage_used: float = Field(description="Percentage of budget used (0-100)")
76
+
77
+ @classmethod
78
+ def from_key_info(cls, key_info: KeyInfoData) -> "BudgetInfo":
79
+ """Create BudgetInfo from key-level budget.
80
+
81
+ Args:
82
+ key_info: Key information containing budget data
83
+
84
+ Returns:
85
+ BudgetInfo instance with key-level budget
86
+
87
+ Raises:
88
+ ValueError: If key does not have max_budget set
89
+ """
90
+ if key_info.max_budget is None:
91
+ raise ValueError("Key does not have max_budget set")
92
+
93
+ remaining = key_info.max_budget - key_info.spend
94
+ percentage_used = (
95
+ (key_info.spend / key_info.max_budget * 100)
96
+ if key_info.max_budget > 0
97
+ else 0.0
98
+ )
99
+
100
+ return cls(
101
+ max_budget=key_info.max_budget,
102
+ spend=key_info.spend,
103
+ remaining=remaining,
104
+ source=BudgetSource.KEY,
105
+ percentage_used=percentage_used,
106
+ )
107
+
108
+ @classmethod
109
+ def from_team_info(cls, team_info: TeamInfoData) -> "BudgetInfo":
110
+ """Create BudgetInfo from team-level budget.
111
+
112
+ Args:
113
+ team_info: Team information containing budget data
114
+
115
+ Returns:
116
+ BudgetInfo instance with team-level budget
117
+
118
+ Raises:
119
+ ValueError: If team does not have max_budget set
120
+ """
121
+ if team_info.max_budget is None:
122
+ raise ValueError("Team does not have max_budget set")
123
+
124
+ remaining = team_info.max_budget - team_info.spend
125
+ percentage_used = (
126
+ (team_info.spend / team_info.max_budget * 100)
127
+ if team_info.max_budget > 0
128
+ else 0.0
129
+ )
130
+
131
+ return cls(
132
+ max_budget=team_info.max_budget,
133
+ spend=team_info.spend,
134
+ remaining=remaining,
135
+ source=BudgetSource.TEAM,
136
+ percentage_used=percentage_used,
137
+ )
shotgun/sdk/codebase.py CHANGED
@@ -93,6 +93,19 @@ class CodebaseSDK:
93
93
  if indexed_from_cwd is None:
94
94
  indexed_from_cwd = str(Path.cwd().resolve())
95
95
 
96
+ # Track codebase indexing started event
97
+ source = detect_source()
98
+ logger.debug(
99
+ "Tracking codebase_index_started event: source=%s",
100
+ source,
101
+ )
102
+ track_event(
103
+ "codebase_index_started",
104
+ {
105
+ "source": source,
106
+ },
107
+ )
108
+
96
109
  graph = await self.service.create_graph(
97
110
  resolved_path,
98
111
  name,
@@ -101,9 +114,7 @@ class CodebaseSDK:
101
114
  )
102
115
  file_count = sum(graph.language_stats.values()) if graph.language_stats else 0
103
116
 
104
- # Track codebase indexing event
105
- # Detect if called from TUI by checking the call stack
106
- source = detect_source()
117
+ # Track codebase indexing completion event (reuse source from start event)
107
118
 
108
119
  logger.debug(
109
120
  "Tracking codebase_indexed event: file_count=%d, node_count=%d, relationship_count=%d, source=%s",
shotgun/tui/app.py CHANGED
@@ -14,7 +14,10 @@ from shotgun.agents.models import AgentType
14
14
  from shotgun.logging_config import get_logger
15
15
  from shotgun.tui.containers import TUIContainer
16
16
  from shotgun.tui.screens.splash import SplashScreen
17
- from shotgun.utils.file_system_utils import get_shotgun_base_path
17
+ from shotgun.utils.file_system_utils import (
18
+ ensure_shotgun_directory_exists,
19
+ get_shotgun_base_path,
20
+ )
18
21
  from shotgun.utils.update_checker import (
19
22
  detect_installation_method,
20
23
  perform_auto_update_async,
@@ -34,10 +37,10 @@ logger = get_logger(__name__)
34
37
  class ShotgunApp(App[None]):
35
38
  # ChatScreen removed from SCREENS dict since it requires dependency injection
36
39
  # and is instantiated manually in refresh_startup_screen()
40
+ # DirectorySetupScreen also removed since it requires error_message parameter
37
41
  SCREENS = {
38
42
  "provider_config": ProviderConfigScreen,
39
43
  "model_picker": ModelPickerScreen,
40
- "directory_setup": DirectorySetupScreen,
41
44
  "github_issue": GitHubIssueScreen,
42
45
  }
43
46
  BINDINGS = [
@@ -117,16 +120,32 @@ class ShotgunApp(App[None]):
117
120
  )
118
121
  return
119
122
 
123
+ # Try to create .shotgun directory if it doesn't exist
120
124
  if not self.check_local_shotgun_directory_exists():
121
- if isinstance(self.screen, DirectorySetupScreen):
125
+ try:
126
+ path = ensure_shotgun_directory_exists()
127
+ # Verify directory was created successfully
128
+ if not path.is_dir():
129
+ # Show error screen if creation failed
130
+ if isinstance(self.screen, DirectorySetupScreen):
131
+ return
132
+ self.push_screen(
133
+ DirectorySetupScreen(
134
+ error_message="Unable to create .shotgun directory due to filesystem conflict."
135
+ ),
136
+ callback=lambda _arg: self.refresh_startup_screen(),
137
+ )
138
+ return
139
+ except Exception as exc:
140
+ # Show error screen if creation failed with exception
141
+ if isinstance(self.screen, DirectorySetupScreen):
142
+ return
143
+ self.push_screen(
144
+ DirectorySetupScreen(error_message=str(exc)),
145
+ callback=lambda _arg: self.refresh_startup_screen(),
146
+ )
122
147
  return
123
148
 
124
- self.push_screen(
125
- DirectorySetupScreen(),
126
- callback=lambda _arg: self.refresh_startup_screen(),
127
- )
128
- return
129
-
130
149
  if isinstance(self.screen, ChatScreen):
131
150
  return
132
151
 
@@ -44,12 +44,17 @@ from shotgun.agents.models import (
44
44
  AgentType,
45
45
  FileOperationTracker,
46
46
  )
47
+ from shotgun.agents.runner import AgentRunner
47
48
  from shotgun.codebase.core.manager import (
48
49
  CodebaseAlreadyIndexedError,
49
50
  CodebaseGraphManager,
50
51
  )
51
52
  from shotgun.codebase.models import IndexProgress, ProgressPhase
52
- from shotgun.exceptions import ContextSizeLimitExceeded
53
+ from shotgun.exceptions import (
54
+ SHOTGUN_CONTACT_EMAIL,
55
+ ErrorNotPickedUpBySentry,
56
+ ShotgunAccountException,
57
+ )
53
58
  from shotgun.posthog_telemetry import track_event
54
59
  from shotgun.sdk.codebase import CodebaseSDK
55
60
  from shotgun.sdk.exceptions import CodebaseNotFoundError, InvalidPathError
@@ -59,6 +64,8 @@ from shotgun.tui.components.mode_indicator import ModeIndicator
59
64
  from shotgun.tui.components.prompt_input import PromptInput
60
65
  from shotgun.tui.components.spinner import Spinner
61
66
  from shotgun.tui.components.status_bar import StatusBar
67
+
68
+ # TUIErrorHandler removed - exceptions now caught directly
62
69
  from shotgun.tui.screens.chat.codebase_index_prompt_screen import (
63
70
  CodebaseIndexPromptScreen,
64
71
  )
@@ -301,9 +308,75 @@ class ChatScreen(Screen[None]):
301
308
  # Re-focus input after mode change
302
309
  self.call_later(lambda: self.widget_coordinator.update_prompt_input(focus=True))
303
310
 
304
- def action_show_usage(self) -> None:
311
+ async def action_show_usage(self) -> None:
305
312
  usage_hint = self.agent_manager.get_usage_hint()
306
313
  logger.info(f"Usage hint: {usage_hint}")
314
+
315
+ # Add budget info for Shotgun Account users
316
+ if self.deps.llm_model.is_shotgun_account:
317
+ try:
318
+ from shotgun.llm_proxy import LiteLLMProxyClient
319
+
320
+ logger.debug("Fetching budget info for Shotgun Account")
321
+ client = LiteLLMProxyClient(self.deps.llm_model.api_key)
322
+ budget_info = await client.get_budget_info()
323
+
324
+ # Format budget section
325
+ source_label = "Key" if budget_info.source == "key" else "Team"
326
+ budget_section = f"""## Shotgun Account Budget
327
+
328
+ * Max Budget: ${budget_info.max_budget:.2f}
329
+ * Current Spend: ${budget_info.spend:.2f}
330
+ * Remaining: ${budget_info.remaining:.2f} ({100 - budget_info.percentage_used:.1f}%)
331
+ * Budget Source: {source_label}-level
332
+
333
+ **Questions or need help?**"""
334
+
335
+ # Build markdown_before (usage + budget info before email)
336
+ if usage_hint:
337
+ markdown_before = f"{usage_hint}\n\n{budget_section}"
338
+ else:
339
+ markdown_before = budget_section
340
+
341
+ markdown_after = (
342
+ "\n\n_Reach out anytime for billing questions "
343
+ "or to increase your budget._"
344
+ )
345
+
346
+ # Mount with email copy button
347
+ self.mount_hint_with_email(
348
+ markdown_before=markdown_before,
349
+ email="contact@shotgun.sh",
350
+ markdown_after=markdown_after,
351
+ )
352
+ logger.debug("Successfully added budget info to usage hint")
353
+ return # Exit early since we've already mounted
354
+
355
+ except Exception as e:
356
+ logger.warning(f"Failed to fetch budget info: {e}")
357
+ # For Shotgun Account, show budget fetch error
358
+ # If we have usage data, still show it
359
+ if usage_hint:
360
+ # Show usage even though budget fetch failed
361
+ self.mount_hint(usage_hint)
362
+ else:
363
+ # No usage and budget fetch failed - show specific error with email
364
+ markdown_before = (
365
+ "⚠️ **Unable to fetch budget information**\n\n"
366
+ "There was an error retrieving your budget data."
367
+ )
368
+ markdown_after = (
369
+ "\n\n_Try the command again in a moment. "
370
+ "If the issue persists, reach out for help._"
371
+ )
372
+ self.mount_hint_with_email(
373
+ markdown_before=markdown_before,
374
+ email="contact@shotgun.sh",
375
+ markdown_after=markdown_after,
376
+ )
377
+ return # Exit early
378
+
379
+ # Fallback for non-Shotgun Account users
307
380
  if usage_hint:
308
381
  self.mount_hint(usage_hint)
309
382
  else:
@@ -582,6 +655,21 @@ class ChatScreen(Screen[None]):
582
655
  hint = HintMessage(message=markdown)
583
656
  self.agent_manager.add_hint_message(hint)
584
657
 
658
+ def mount_hint_with_email(
659
+ self, markdown_before: str, email: str, markdown_after: str = ""
660
+ ) -> None:
661
+ """Mount a hint with inline email copy button.
662
+
663
+ Args:
664
+ markdown_before: Markdown content to display before the email line
665
+ email: Email address to display with copy button
666
+ markdown_after: Optional markdown content to display after the email line
667
+ """
668
+ hint = HintMessage(
669
+ message=markdown_before, email=email, markdown_after=markdown_after
670
+ )
671
+ self.agent_manager.add_hint_message(hint)
672
+
585
673
  @on(PartialResponseMessage)
586
674
  def handle_partial_response(self, event: PartialResponseMessage) -> None:
587
675
  # Filter event.messages to exclude ModelRequest with only ToolReturnPart
@@ -768,6 +856,19 @@ class ChatScreen(Screen[None]):
768
856
  # Update the agent manager's model configuration
769
857
  self.agent_manager.deps.llm_model = result.model_config
770
858
 
859
+ # Reset agents so they get recreated with new model
860
+ self.agent_manager._agents_initialized = False
861
+ self.agent_manager._research_agent = None
862
+ self.agent_manager._plan_agent = None
863
+ self.agent_manager._tasks_agent = None
864
+ self.agent_manager._specify_agent = None
865
+ self.agent_manager._export_agent = None
866
+ self.agent_manager._research_deps = None
867
+ self.agent_manager._plan_deps = None
868
+ self.agent_manager._tasks_deps = None
869
+ self.agent_manager._specify_deps = None
870
+ self.agent_manager._export_deps = None
871
+
771
872
  # Get current analysis and update context indicator via coordinator
772
873
  analysis = await self.agent_manager.get_context_analysis()
773
874
  self.widget_coordinator.update_context_indicator(analysis, result.new_model)
@@ -1139,8 +1240,6 @@ class ChatScreen(Screen[None]):
1139
1240
 
1140
1241
  @work
1141
1242
  async def run_agent(self, message: str) -> None:
1142
- prompt = None
1143
-
1144
1243
  # Start processing with spinner
1145
1244
  from textual.worker import get_current_worker
1146
1245
 
@@ -1150,60 +1249,31 @@ class ChatScreen(Screen[None]):
1150
1249
  # Start context indicator animation immediately
1151
1250
  self.widget_coordinator.set_context_streaming(True)
1152
1251
 
1153
- prompt = message
1154
-
1155
1252
  try:
1156
- await self.agent_manager.run(
1157
- prompt=prompt,
1158
- )
1159
- except asyncio.CancelledError:
1160
- # Handle cancellation gracefully - DO NOT re-raise
1161
- self.mount_hint("⚠️ Operation cancelled by user")
1162
- except ContextSizeLimitExceeded as e:
1163
- # User-friendly error with actionable options
1164
- hint = (
1165
- f"⚠️ **Context too large for {e.model_name}**\n\n"
1166
- f"Your conversation history exceeds this model's limit ({e.max_tokens:,} tokens).\n\n"
1167
- f"**Choose an action:**\n\n"
1168
- f"1. Switch to a larger model (`Ctrl+P` → Change Model)\n"
1169
- f"2. Switch to a larger model, compact (`/compact`), then switch back to {e.model_name}\n"
1170
- f"3. Clear conversation (`/clear`)\n"
1171
- )
1172
-
1173
- self.mount_hint(hint)
1174
-
1175
- # Log for debugging (won't send to Sentry due to ErrorNotPickedUpBySentry)
1176
- logger.info(
1177
- "Context size limit exceeded",
1178
- extra={
1179
- "max_tokens": e.max_tokens,
1180
- "model_name": e.model_name,
1181
- },
1182
- )
1183
- except Exception as e:
1184
- # Log with full stack trace to shotgun.log
1185
- logger.exception(
1186
- "Agent run failed",
1187
- extra={
1188
- "agent_mode": self.mode.value,
1189
- "error_type": type(e).__name__,
1190
- },
1191
- )
1192
-
1193
- # Determine user-friendly message based on error type
1194
- error_name = type(e).__name__
1195
- error_message = str(e)
1196
-
1197
- if "APIStatusError" in error_name and "overload" in error_message.lower():
1198
- hint = "⚠️ The AI service is temporarily overloaded. Please wait a moment and try again."
1199
- elif "APIStatusError" in error_name and "rate" in error_message.lower():
1200
- hint = "⚠️ Rate limit reached. Please wait before trying again."
1201
- elif "APIStatusError" in error_name:
1202
- hint = f"⚠️ AI service error: {error_message}"
1253
+ # Use unified agent runner - exceptions propagate for handling
1254
+ runner = AgentRunner(self.agent_manager)
1255
+ await runner.run(message)
1256
+ except ShotgunAccountException as e:
1257
+ # Shotgun Account errors show contact email UI
1258
+ message_parts = e.to_markdown().split("**Need help?**")
1259
+ if len(message_parts) == 2:
1260
+ markdown_before = message_parts[0] + "**Need help?**"
1261
+ markdown_after = message_parts[1].strip()
1262
+ self.mount_hint_with_email(
1263
+ markdown_before=markdown_before,
1264
+ email=SHOTGUN_CONTACT_EMAIL,
1265
+ markdown_after=markdown_after,
1266
+ )
1203
1267
  else:
1204
- hint = f"⚠️ An error occurred: {error_message}\n\nCheck logs at ~/.shotgun-sh/logs/shotgun.log"
1205
-
1206
- self.mount_hint(hint)
1268
+ # Fallback if message format is unexpected
1269
+ self.mount_hint(e.to_markdown())
1270
+ except ErrorNotPickedUpBySentry as e:
1271
+ # All other user-actionable errors - display with markdown
1272
+ self.mount_hint(e.to_markdown())
1273
+ except Exception as e:
1274
+ # Unexpected errors that weren't wrapped (shouldn't happen)
1275
+ logger.exception("Unexpected error in run_agent")
1276
+ self.mount_hint(f"⚠️ An unexpected error occurred: {str(e)}")
1207
1277
  finally:
1208
1278
  self.processing_state.stop_processing()
1209
1279
  # Stop context indicator animation