shotgun-sh 0.2.11.dev3__py3-none-any.whl → 0.2.11.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

@@ -58,7 +58,12 @@ from shotgun.agents.context_analyzer import (
58
58
  ContextCompositionTelemetry,
59
59
  ContextFormatter,
60
60
  )
61
- from shotgun.agents.models import AgentResponse, AgentType, FileOperation
61
+ from shotgun.agents.models import (
62
+ AgentResponse,
63
+ AgentType,
64
+ FileOperation,
65
+ FileOperationTracker,
66
+ )
62
67
  from shotgun.posthog_telemetry import track_event
63
68
  from shotgun.tui.screens.chat_screen.hint_message import HintMessage
64
69
  from shotgun.utils.source_detection import detect_source
@@ -769,6 +774,12 @@ class AgentManager(Widget):
769
774
  HintMessage(message=agent_response.response)
770
775
  )
771
776
 
777
+ # Add file operation hints before questions (so they appear first in UI)
778
+ if file_operations:
779
+ file_hint = self._create_file_operation_hint(file_operations)
780
+ if file_hint:
781
+ self.ui_message_history.append(HintMessage(message=file_hint))
782
+
772
783
  if len(agent_response.clarifying_questions) == 1:
773
784
  # Single question - treat as non-blocking suggestion, DON'T enter Q&A mode
774
785
  self.ui_message_history.append(
@@ -1134,6 +1145,38 @@ class AgentManager(Widget):
1134
1145
  )
1135
1146
  )
1136
1147
 
1148
+ def _create_file_operation_hint(
1149
+ self, file_operations: list[FileOperation]
1150
+ ) -> str | None:
1151
+ """Create a hint message for file operations.
1152
+
1153
+ Args:
1154
+ file_operations: List of file operations to create a hint for
1155
+
1156
+ Returns:
1157
+ Hint message string or None if no operations
1158
+ """
1159
+ if not file_operations:
1160
+ return None
1161
+
1162
+ tracker = FileOperationTracker(operations=file_operations)
1163
+ display_path = tracker.get_display_path()
1164
+
1165
+ if not display_path:
1166
+ return None
1167
+
1168
+ path_obj = Path(display_path)
1169
+
1170
+ if len(file_operations) == 1:
1171
+ return f"📝 Modified: `{display_path}`"
1172
+ else:
1173
+ num_files = len({op.file_path for op in file_operations})
1174
+ if path_obj.is_dir():
1175
+ return f"📁 Modified {num_files} files in: `{display_path}`"
1176
+ else:
1177
+ # Common path is a file, show parent directory
1178
+ return f"📁 Modified {num_files} files in: `{path_obj.parent}`"
1179
+
1137
1180
  def _post_messages_updated(
1138
1181
  self, file_operations: list[FileOperation] | None = None
1139
1182
  ) -> None:
@@ -205,6 +205,10 @@ class ShotgunConfig(BaseModel):
205
205
  default=False,
206
206
  description="Whether the welcome screen has been shown to the user",
207
207
  )
208
+ shown_onboarding_popup: datetime | None = Field(
209
+ default=None,
210
+ description="Timestamp when the onboarding popup was shown to the user (ISO8601 format)",
211
+ )
208
212
  marketing: MarketingConfig = Field(
209
213
  default_factory=MarketingConfig,
210
214
  description="Marketing messages configuration and tracking",
@@ -46,9 +46,12 @@ class ConversationManager:
46
46
 
47
47
  conversation.updated_at = datetime.now()
48
48
 
49
- # Serialize to JSON using Pydantic's model_dump
50
- data = conversation.model_dump(mode="json")
51
- json_content = json.dumps(data, indent=2, ensure_ascii=False)
49
+ # Serialize to JSON in background thread to avoid blocking event loop
50
+ # This is crucial for large conversations (5k+ tokens)
51
+ data = await asyncio.to_thread(conversation.model_dump, mode="json")
52
+ json_content = await asyncio.to_thread(
53
+ json.dumps, data, indent=2, ensure_ascii=False
54
+ )
52
55
 
53
56
  async with aiofiles.open(
54
57
  self.conversation_path, "w", encoding="utf-8"
@@ -76,9 +79,13 @@ class ConversationManager:
76
79
  try:
77
80
  async with aiofiles.open(self.conversation_path, encoding="utf-8") as f:
78
81
  content = await f.read()
79
- data = json.loads(content)
82
+ # Deserialize JSON in background thread to avoid blocking
83
+ data = await asyncio.to_thread(json.loads, content)
80
84
 
81
- conversation = ConversationHistory.model_validate(data)
85
+ # Validate model in background thread for large conversations
86
+ conversation = await asyncio.to_thread(
87
+ ConversationHistory.model_validate, data
88
+ )
82
89
  logger.debug(
83
90
  "Conversation loaded from %s with %d agent messages",
84
91
  self.conversation_path,
@@ -127,10 +134,10 @@ class ConversationManager:
127
134
  "Failed to clear conversation at %s: %s", self.conversation_path, e
128
135
  )
129
136
 
130
- def exists(self) -> bool:
137
+ async def exists(self) -> bool:
131
138
  """Check if a conversation history file exists.
132
139
 
133
140
  Returns:
134
141
  True if conversation file exists, False otherwise
135
142
  """
136
- return self.conversation_path.exists()
143
+ return await aiofiles.os.path.exists(str(self.conversation_path))
@@ -1,7 +1,9 @@
1
1
  """History processors for managing conversation history in Shotgun agents."""
2
2
 
3
+ from collections.abc import Awaitable, Callable
3
4
  from typing import TYPE_CHECKING, Any, Protocol
4
5
 
6
+ from anthropic import APIStatusError
5
7
  from pydantic_ai import ModelSettings
6
8
  from pydantic_ai.messages import (
7
9
  ModelMessage,
@@ -14,6 +16,7 @@ from pydantic_ai.messages import (
14
16
  from shotgun.agents.llm import shotgun_model_request
15
17
  from shotgun.agents.messages import AgentSystemPrompt, SystemStatusPrompt
16
18
  from shotgun.agents.models import AgentDeps
19
+ from shotgun.exceptions import ContextSizeLimitExceeded
17
20
  from shotgun.logging_config import get_logger
18
21
  from shotgun.posthog_telemetry import track_event
19
22
  from shotgun.prompts import PromptLoader
@@ -51,6 +54,86 @@ logger = get_logger(__name__)
51
54
  prompt_loader = PromptLoader()
52
55
 
53
56
 
57
+ async def _safe_token_estimation(
58
+ estimation_func: Callable[..., Awaitable[int]],
59
+ model_name: str,
60
+ max_tokens: int,
61
+ *args: Any,
62
+ **kwargs: Any,
63
+ ) -> int:
64
+ """Safely estimate tokens with proper error handling.
65
+
66
+ Wraps token estimation functions to handle failures gracefully.
67
+ Only RuntimeError (from token counters) is wrapped in ContextSizeLimitExceeded.
68
+ Other errors (network, auth) are allowed to bubble up.
69
+
70
+ Args:
71
+ estimation_func: Async function that estimates tokens
72
+ model_name: Name of the model for error messages
73
+ max_tokens: Maximum tokens for the model
74
+ *args: Arguments to pass to estimation_func
75
+ **kwargs: Keyword arguments to pass to estimation_func
76
+
77
+ Returns:
78
+ Token count from estimation_func
79
+
80
+ Raises:
81
+ ContextSizeLimitExceeded: If token counting fails with RuntimeError
82
+ Exception: Any other exceptions from estimation_func
83
+ """
84
+ try:
85
+ return await estimation_func(*args, **kwargs)
86
+ except Exception as e:
87
+ # Log the error with full context
88
+ logger.warning(
89
+ f"Token counting failed for {model_name}",
90
+ extra={
91
+ "error_type": type(e).__name__,
92
+ "error_message": str(e),
93
+ "model": model_name,
94
+ },
95
+ )
96
+
97
+ # Token counting behavior with oversized context (verified via testing):
98
+ #
99
+ # 1. OpenAI/tiktoken:
100
+ # - Successfully counts any size (tested with 752K tokens, no error)
101
+ # - Library errors: ValueError, KeyError, AttributeError, SSLError (file/cache issues)
102
+ # - Wrapped as: RuntimeError by our counter
103
+ #
104
+ # 2. Gemini/SentencePiece:
105
+ # - Successfully counts any size (tested with 752K tokens, no error)
106
+ # - Library errors: RuntimeError, IOError, TypeError (file/model loading issues)
107
+ # - Wrapped as: RuntimeError by our counter
108
+ #
109
+ # 3. Anthropic API:
110
+ # - Successfully counts large token counts (tested with 752K tokens, no error)
111
+ # - Only enforces 32 MB request size limit (not token count)
112
+ # - Raises: APIStatusError(413) with error type 'request_too_large' for 32MB+ requests
113
+ # - Other API errors: APIConnectionError, RateLimitError, APIStatusError (4xx/5xx)
114
+ # - Wrapped as: RuntimeError by our counter
115
+ #
116
+ # IMPORTANT: No provider raises errors for "too many tokens" during counting.
117
+ # Token count validation happens separately by comparing count to max_input_tokens.
118
+ #
119
+ # We wrap RuntimeError (library-level failures from tiktoken/sentencepiece).
120
+ # We also wrap Anthropic's 413 error (request exceeds 32 MB) as it indicates
121
+ # context is effectively too large and needs user action to reduce it.
122
+ if isinstance(e, RuntimeError):
123
+ raise ContextSizeLimitExceeded(
124
+ model_name=model_name, max_tokens=max_tokens
125
+ ) from e
126
+
127
+ # Check for Anthropic's 32 MB request size limit (APIStatusError with status 413)
128
+ if isinstance(e, APIStatusError) and e.status_code == 413:
129
+ raise ContextSizeLimitExceeded(
130
+ model_name=model_name, max_tokens=max_tokens
131
+ ) from e
132
+
133
+ # Re-raise other exceptions (network errors, auth failures, etc.)
134
+ raise
135
+
136
+
54
137
  def is_summary_part(part: Any) -> bool:
55
138
  """Check if a message part is a compacted summary."""
56
139
  return isinstance(part, TextPart) and part.content.startswith(SUMMARY_MARKER)
@@ -157,9 +240,15 @@ async def token_limit_compactor(
157
240
 
158
241
  if last_summary_index is not None:
159
242
  # Check if post-summary conversation exceeds threshold for incremental compaction
160
- post_summary_tokens = await estimate_post_summary_tokens(
161
- messages, last_summary_index, deps.llm_model
243
+ post_summary_tokens = await _safe_token_estimation(
244
+ estimate_post_summary_tokens,
245
+ deps.llm_model.name,
246
+ model_max_tokens,
247
+ messages,
248
+ last_summary_index,
249
+ deps.llm_model,
162
250
  )
251
+
163
252
  post_summary_percentage = (
164
253
  (post_summary_tokens / max_tokens) * 100 if max_tokens > 0 else 0
165
254
  )
@@ -366,7 +455,14 @@ async def token_limit_compactor(
366
455
 
367
456
  else:
368
457
  # Check if total conversation exceeds threshold for full compaction
369
- total_tokens = await estimate_tokens_from_messages(messages, deps.llm_model)
458
+ total_tokens = await _safe_token_estimation(
459
+ estimate_tokens_from_messages,
460
+ deps.llm_model.name,
461
+ model_max_tokens,
462
+ messages,
463
+ deps.llm_model,
464
+ )
465
+
370
466
  total_percentage = (total_tokens / max_tokens) * 100 if max_tokens > 0 else 0
371
467
 
372
468
  logger.debug(
@@ -63,7 +63,9 @@ class OpenAITokenCounter(TokenCounter):
63
63
 
64
64
  try:
65
65
  return len(self.encoding.encode(text))
66
- except Exception as e:
66
+ except BaseException as e:
67
+ # Must catch BaseException to handle PanicException from tiktoken's Rust layer
68
+ # which can occur with extremely long texts. Regular Exception won't catch it.
67
69
  raise RuntimeError(
68
70
  f"Failed to count tokens for OpenAI model {self.model_name}"
69
71
  ) from e
shotgun/exceptions.py ADDED
@@ -0,0 +1,32 @@
1
+ """General exceptions for Shotgun application."""
2
+
3
+
4
+ class ErrorNotPickedUpBySentry(Exception): # noqa: N818
5
+ """Base for user-actionable errors that shouldn't be sent to Sentry.
6
+
7
+ These errors represent expected user conditions requiring action
8
+ rather than bugs that need tracking.
9
+ """
10
+
11
+
12
+ class ContextSizeLimitExceeded(ErrorNotPickedUpBySentry):
13
+ """Raised when conversation context exceeds the model's limits.
14
+
15
+ This is a user-actionable error - they need to either:
16
+ 1. Switch to a larger context model
17
+ 2. Switch to a larger model, compact their conversation, then switch back
18
+ 3. Clear the conversation and start fresh
19
+ """
20
+
21
+ def __init__(self, model_name: str, max_tokens: int):
22
+ """Initialize the exception.
23
+
24
+ Args:
25
+ model_name: Name of the model whose limit was exceeded
26
+ max_tokens: Maximum tokens allowed by the model
27
+ """
28
+ self.model_name = model_name
29
+ self.max_tokens = max_tokens
30
+ super().__init__(
31
+ f"Context too large for {model_name} (limit: {max_tokens:,} tokens)"
32
+ )
@@ -1,5 +1,7 @@
1
1
  """Sentry observability setup for Shotgun."""
2
2
 
3
+ from typing import Any
4
+
3
5
  from shotgun import __version__
4
6
  from shotgun.logging_config import get_early_logger
5
7
  from shotgun.settings import settings
@@ -32,12 +34,27 @@ def setup_sentry_observability() -> bool:
32
34
  logger.debug("Using Sentry DSN from settings, proceeding with setup")
33
35
 
34
36
  # Determine environment based on version
35
- # Dev versions contain "dev", "rc", "alpha", or "beta"
37
+ # Dev versions contain "dev", "rc", "alpha", "beta"
36
38
  if any(marker in __version__ for marker in ["dev", "rc", "alpha", "beta"]):
37
39
  environment = "development"
38
40
  else:
39
41
  environment = "production"
40
42
 
43
+ def before_send(event: Any, hint: dict[str, Any]) -> Any:
44
+ """Filter out user-actionable errors from Sentry.
45
+
46
+ User-actionable errors (like context size limits) are expected conditions
47
+ that users need to resolve, not bugs that need tracking.
48
+ """
49
+ if "exc_info" in hint:
50
+ exc_type, exc_value, tb = hint["exc_info"]
51
+ from shotgun.exceptions import ErrorNotPickedUpBySentry
52
+
53
+ if isinstance(exc_value, ErrorNotPickedUpBySentry):
54
+ # Don't send to Sentry - this is user-actionable, not a bug
55
+ return None
56
+ return event
57
+
41
58
  # Initialize Sentry
42
59
  sentry_sdk.init(
43
60
  dsn=dsn,
@@ -46,6 +63,7 @@ def setup_sentry_observability() -> bool:
46
63
  send_default_pii=False, # Privacy-first: never send PII
47
64
  traces_sample_rate=0.1 if environment == "production" else 1.0,
48
65
  profiles_sample_rate=0.1 if environment == "production" else 1.0,
66
+ before_send=before_send,
49
67
  )
50
68
 
51
69
  # Set user context with anonymous shotgun instance ID from config
shotgun/tui/app.py CHANGED
@@ -19,7 +19,7 @@ from shotgun.utils.update_checker import (
19
19
 
20
20
  from .screens.chat import ChatScreen
21
21
  from .screens.directory_setup import DirectorySetupScreen
22
- from .screens.feedback import FeedbackScreen
22
+ from .screens.github_issue import GitHubIssueScreen
23
23
  from .screens.model_picker import ModelPickerScreen
24
24
  from .screens.pipx_migration import PipxMigrationScreen
25
25
  from .screens.provider_config import ProviderConfigScreen
@@ -35,7 +35,7 @@ class ShotgunApp(App[None]):
35
35
  "provider_config": ProviderConfigScreen,
36
36
  "model_picker": ModelPickerScreen,
37
37
  "directory_setup": DirectorySetupScreen,
38
- "feedback": FeedbackScreen,
38
+ "github_issue": GitHubIssueScreen,
39
39
  }
40
40
  BINDINGS = [
41
41
  Binding("ctrl+c", "quit", "Quit the app"),
@@ -181,20 +181,15 @@ class ShotgunApp(App[None]):
181
181
  def get_system_commands(self, screen: Screen[Any]) -> Iterable[SystemCommand]:
182
182
  return [
183
183
  SystemCommand(
184
- "Feedback", "Send us feedback or report a bug", self.action_feedback
184
+ "New Issue",
185
+ "Report a bug or request a feature on GitHub",
186
+ self.action_new_issue,
185
187
  )
186
- ] # we don't want any system commands
188
+ ]
187
189
 
188
- def action_feedback(self) -> None:
189
- """Open feedback screen and submit feedback."""
190
- from shotgun.posthog_telemetry import Feedback, submit_feedback_survey
191
-
192
- def handle_feedback(feedback: Feedback | None) -> None:
193
- if feedback is not None:
194
- submit_feedback_survey(feedback)
195
- self.notify("Feedback sent. Thank you!")
196
-
197
- self.push_screen(FeedbackScreen(), callback=handle_feedback)
190
+ def action_new_issue(self) -> None:
191
+ """Open GitHub issue screen to guide users to create an issue."""
192
+ self.push_screen(GitHubIssueScreen())
198
193
 
199
194
 
200
195
  def run(
@@ -2,6 +2,7 @@
2
2
 
3
3
  import asyncio
4
4
  import logging
5
+ from datetime import datetime, timezone
5
6
  from pathlib import Path
6
7
  from typing import cast
7
8
 
@@ -31,6 +32,7 @@ from shotgun.agents.agent_manager import (
31
32
  ModelConfigUpdated,
32
33
  PartialResponseMessage,
33
34
  )
35
+ from shotgun.agents.config import get_config_manager
34
36
  from shotgun.agents.config.models import MODEL_SPECS
35
37
  from shotgun.agents.conversation_manager import ConversationManager
36
38
  from shotgun.agents.history.compaction import apply_persistent_compaction
@@ -45,6 +47,7 @@ from shotgun.codebase.core.manager import (
45
47
  CodebaseGraphManager,
46
48
  )
47
49
  from shotgun.codebase.models import IndexProgress, ProgressPhase
50
+ from shotgun.exceptions import ContextSizeLimitExceeded
48
51
  from shotgun.posthog_telemetry import track_event
49
52
  from shotgun.sdk.codebase import CodebaseSDK
50
53
  from shotgun.sdk.exceptions import CodebaseNotFoundError, InvalidPathError
@@ -70,6 +73,7 @@ from shotgun.tui.screens.chat_screen.command_providers import (
70
73
  from shotgun.tui.screens.chat_screen.hint_message import HintMessage
71
74
  from shotgun.tui.screens.chat_screen.history import ChatHistory
72
75
  from shotgun.tui.screens.confirmation_dialog import ConfirmationDialog
76
+ from shotgun.tui.screens.onboarding import OnboardingModal
73
77
  from shotgun.tui.services.conversation_service import ConversationService
74
78
  from shotgun.tui.state.processing_state import ProcessingStateManager
75
79
  from shotgun.tui.utils.mode_progress import PlaceholderHints
@@ -166,13 +170,17 @@ class ChatScreen(Screen[None]):
166
170
  self.processing_state.bind_spinner(self.query_one("#spinner", Spinner))
167
171
 
168
172
  # Load conversation history if --continue flag was provided
169
- if self.continue_session and self.conversation_manager.exists():
170
- self._load_conversation()
173
+ # Use call_later to handle async exists() check
174
+ if self.continue_session:
175
+ self.call_later(self._check_and_load_conversation)
171
176
 
172
177
  self.call_later(self.check_if_codebase_is_indexed)
173
178
  # Initial update of context indicator
174
179
  self.update_context_indicator()
175
180
 
181
+ # Show onboarding popup if not shown before
182
+ self.call_later(self._check_and_show_onboarding)
183
+
176
184
  async def on_key(self, event: events.Key) -> None:
177
185
  """Handle key presses for cancellation."""
178
186
  # If escape is pressed during Q&A mode, exit Q&A
@@ -305,6 +313,10 @@ class ChatScreen(Screen[None]):
305
313
  else:
306
314
  self.notify("No context analysis available", severity="error")
307
315
 
316
+ def action_view_onboarding(self) -> None:
317
+ """Show the onboarding modal."""
318
+ self.app.push_screen(OnboardingModal())
319
+
308
320
  @work
309
321
  async def action_compact_conversation(self) -> None:
310
322
  """Compact the conversation history to reduce size."""
@@ -456,7 +468,7 @@ class ChatScreen(Screen[None]):
456
468
  self.agent_manager.ui_message_history = []
457
469
 
458
470
  # Use conversation service to clear conversation
459
- self.conversation_service.clear_conversation()
471
+ await self.conversation_service.clear_conversation()
460
472
 
461
473
  # Post message history updated event to refresh UI
462
474
  self.agent_manager.post_message(
@@ -655,32 +667,42 @@ class ChatScreen(Screen[None]):
655
667
  self.update_context_indicator()
656
668
 
657
669
  # If there are file operations, add a message showing the modified files
670
+ # Skip if hint was already added by agent_manager (e.g., in QA mode)
658
671
  if event.file_operations:
659
- chat_history = self.query_one(ChatHistory)
660
- if chat_history.vertical_tail:
661
- tracker = FileOperationTracker(operations=event.file_operations)
662
- display_path = tracker.get_display_path()
663
-
664
- if display_path:
665
- # Create a simple markdown message with the file path
666
- # The terminal emulator will make this clickable automatically
667
- path_obj = Path(display_path)
668
-
669
- if len(event.file_operations) == 1:
670
- message = f"📝 Modified: `{display_path}`"
671
- else:
672
- num_files = len({op.file_path for op in event.file_operations})
673
- if path_obj.is_dir():
674
- message = (
675
- f"📁 Modified {num_files} files in: `{display_path}`"
676
- )
672
+ # Check if file operation hint already exists in recent messages
673
+ file_hint_exists = any(
674
+ isinstance(msg, HintMessage)
675
+ and (
676
+ msg.message.startswith("📝 Modified:")
677
+ or msg.message.startswith("📁 Modified")
678
+ )
679
+ for msg in event.messages[-5:] # Check last 5 messages
680
+ )
681
+
682
+ if not file_hint_exists:
683
+ chat_history = self.query_one(ChatHistory)
684
+ if chat_history.vertical_tail:
685
+ tracker = FileOperationTracker(operations=event.file_operations)
686
+ display_path = tracker.get_display_path()
687
+
688
+ if display_path:
689
+ # Create a simple markdown message with the file path
690
+ # The terminal emulator will make this clickable automatically
691
+ path_obj = Path(display_path)
692
+
693
+ if len(event.file_operations) == 1:
694
+ message = f"📝 Modified: `{display_path}`"
677
695
  else:
678
- # Common path is a file, show parent directory
679
- message = (
680
- f"📁 Modified {num_files} files in: `{path_obj.parent}`"
696
+ num_files = len(
697
+ {op.file_path for op in event.file_operations}
681
698
  )
699
+ if path_obj.is_dir():
700
+ message = f"📁 Modified {num_files} files in: `{display_path}`"
701
+ else:
702
+ # Common path is a file, show parent directory
703
+ message = f"📁 Modified {num_files} files in: `{path_obj.parent}`"
682
704
 
683
- self.mount_hint(message)
705
+ self.mount_hint(message)
684
706
 
685
707
  # Check and display any marketing messages
686
708
  from shotgun.tui.app import ShotgunApp
@@ -1106,6 +1128,27 @@ class ChatScreen(Screen[None]):
1106
1128
  except asyncio.CancelledError:
1107
1129
  # Handle cancellation gracefully - DO NOT re-raise
1108
1130
  self.mount_hint("⚠️ Operation cancelled by user")
1131
+ except ContextSizeLimitExceeded as e:
1132
+ # User-friendly error with actionable options
1133
+ hint = (
1134
+ f"⚠️ **Context too large for {e.model_name}**\n\n"
1135
+ f"Your conversation history exceeds this model's limit ({e.max_tokens:,} tokens).\n\n"
1136
+ f"**Choose an action:**\n\n"
1137
+ f"1. Switch to a larger model (`Ctrl+P` → Change Model)\n"
1138
+ f"2. Switch to a larger model, compact (`/compact`), then switch back to {e.model_name}\n"
1139
+ f"3. Clear conversation (`/clear`)\n"
1140
+ )
1141
+
1142
+ self.mount_hint(hint)
1143
+
1144
+ # Log for debugging (won't send to Sentry due to ErrorNotPickedUpBySentry)
1145
+ logger.info(
1146
+ "Context size limit exceeded",
1147
+ extra={
1148
+ "max_tokens": e.max_tokens,
1149
+ "model_name": e.model_name,
1150
+ },
1151
+ )
1109
1152
  except Exception as e:
1110
1153
  # Log with full stack trace to shotgun.log
1111
1154
  logger.exception(
@@ -1143,11 +1186,17 @@ class ChatScreen(Screen[None]):
1143
1186
  def _save_conversation(self) -> None:
1144
1187
  """Save the current conversation to persistent storage."""
1145
1188
  # Use conversation service for saving (run async in background)
1189
+ # Use exclusive=True to prevent concurrent saves that can cause file contention
1146
1190
  self.run_worker(
1147
1191
  self.conversation_service.save_conversation(self.agent_manager),
1148
- exclusive=False,
1192
+ exclusive=True,
1149
1193
  )
1150
1194
 
1195
+ async def _check_and_load_conversation(self) -> None:
1196
+ """Check if conversation exists and load it if it does."""
1197
+ if await self.conversation_manager.exists():
1198
+ self._load_conversation()
1199
+
1151
1200
  def _load_conversation(self) -> None:
1152
1201
  """Load conversation from persistent storage."""
1153
1202
 
@@ -1168,3 +1217,18 @@ class ChatScreen(Screen[None]):
1168
1217
  self.mode = restored_type
1169
1218
 
1170
1219
  self.run_worker(_do_load(), exclusive=False)
1220
+
1221
+ @work
1222
+ async def _check_and_show_onboarding(self) -> None:
1223
+ """Check if onboarding should be shown and display modal if needed."""
1224
+ config_manager = get_config_manager()
1225
+ config = await config_manager.load()
1226
+
1227
+ # Only show onboarding if it hasn't been shown before
1228
+ if config.shown_onboarding_popup is None:
1229
+ # Show the onboarding modal
1230
+ await self.app.push_screen_wait(OnboardingModal())
1231
+
1232
+ # Mark as shown in config with current timestamp
1233
+ config.shown_onboarding_popup = datetime.now(timezone.utc)
1234
+ await config_manager.save(config)
@@ -369,6 +369,11 @@ class UnifiedCommandProvider(Provider):
369
369
  self.chat_screen.action_show_usage,
370
370
  help="Display usage information for the current session",
371
371
  )
372
+ yield DiscoveryHit(
373
+ "View Onboarding",
374
+ self.chat_screen.action_view_onboarding,
375
+ help="View the onboarding tutorial and helpful resources",
376
+ )
372
377
 
373
378
  async def search(self, query: str) -> AsyncGenerator[Hit, None]:
374
379
  """Search for commands in alphabetical order."""
@@ -416,6 +421,11 @@ class UnifiedCommandProvider(Provider):
416
421
  self.chat_screen.action_show_usage,
417
422
  "Display usage information for the current session",
418
423
  ),
424
+ (
425
+ "View Onboarding",
426
+ self.chat_screen.action_view_onboarding,
427
+ "View the onboarding tutorial and helpful resources",
428
+ ),
419
429
  ]
420
430
 
421
431
  for title, callback, help_text in commands: