shotgun-sh 0.2.11.dev1__py3-none-any.whl → 0.2.17.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (76) hide show
  1. shotgun/agents/agent_manager.py +194 -28
  2. shotgun/agents/common.py +14 -8
  3. shotgun/agents/config/manager.py +64 -33
  4. shotgun/agents/config/models.py +25 -1
  5. shotgun/agents/config/provider.py +2 -2
  6. shotgun/agents/context_analyzer/analyzer.py +2 -24
  7. shotgun/agents/conversation_manager.py +35 -19
  8. shotgun/agents/export.py +2 -2
  9. shotgun/agents/history/history_processors.py +99 -3
  10. shotgun/agents/history/token_counting/anthropic.py +17 -1
  11. shotgun/agents/history/token_counting/base.py +14 -3
  12. shotgun/agents/history/token_counting/openai.py +11 -1
  13. shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
  14. shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
  15. shotgun/agents/history/token_counting/utils.py +0 -3
  16. shotgun/agents/plan.py +2 -2
  17. shotgun/agents/research.py +3 -3
  18. shotgun/agents/specify.py +2 -2
  19. shotgun/agents/tasks.py +2 -2
  20. shotgun/agents/tools/codebase/file_read.py +5 -2
  21. shotgun/agents/tools/file_management.py +11 -7
  22. shotgun/agents/tools/web_search/__init__.py +8 -8
  23. shotgun/agents/tools/web_search/anthropic.py +2 -2
  24. shotgun/agents/tools/web_search/gemini.py +1 -1
  25. shotgun/agents/tools/web_search/openai.py +1 -1
  26. shotgun/agents/tools/web_search/utils.py +2 -2
  27. shotgun/agents/usage_manager.py +16 -11
  28. shotgun/build_constants.py +1 -1
  29. shotgun/cli/clear.py +2 -1
  30. shotgun/cli/compact.py +3 -3
  31. shotgun/cli/config.py +8 -5
  32. shotgun/cli/context.py +2 -2
  33. shotgun/cli/export.py +1 -1
  34. shotgun/cli/feedback.py +4 -2
  35. shotgun/cli/plan.py +1 -1
  36. shotgun/cli/research.py +1 -1
  37. shotgun/cli/specify.py +1 -1
  38. shotgun/cli/tasks.py +1 -1
  39. shotgun/codebase/core/change_detector.py +5 -3
  40. shotgun/codebase/core/code_retrieval.py +4 -2
  41. shotgun/codebase/core/ingestor.py +10 -8
  42. shotgun/codebase/core/manager.py +3 -3
  43. shotgun/codebase/core/nl_query.py +1 -1
  44. shotgun/exceptions.py +32 -0
  45. shotgun/logging_config.py +10 -17
  46. shotgun/main.py +3 -1
  47. shotgun/posthog_telemetry.py +28 -25
  48. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +3 -2
  49. shotgun/sentry_telemetry.py +160 -2
  50. shotgun/telemetry.py +3 -1
  51. shotgun/tui/app.py +71 -65
  52. shotgun/tui/components/context_indicator.py +43 -0
  53. shotgun/tui/containers.py +15 -17
  54. shotgun/tui/dependencies.py +2 -2
  55. shotgun/tui/screens/chat/chat_screen.py +189 -45
  56. shotgun/tui/screens/chat/help_text.py +16 -15
  57. shotgun/tui/screens/chat_screen/command_providers.py +10 -0
  58. shotgun/tui/screens/chat_screen/history/chat_history.py +1 -2
  59. shotgun/tui/screens/feedback.py +4 -4
  60. shotgun/tui/screens/github_issue.py +102 -0
  61. shotgun/tui/screens/model_picker.py +21 -20
  62. shotgun/tui/screens/onboarding.py +431 -0
  63. shotgun/tui/screens/provider_config.py +50 -27
  64. shotgun/tui/screens/shotgun_auth.py +2 -2
  65. shotgun/tui/screens/welcome.py +14 -11
  66. shotgun/tui/services/conversation_service.py +16 -14
  67. shotgun/tui/utils/mode_progress.py +14 -7
  68. shotgun/tui/widgets/widget_coordinator.py +18 -2
  69. shotgun/utils/file_system_utils.py +19 -0
  70. shotgun/utils/marketing.py +110 -0
  71. shotgun_sh-0.2.17.dev1.dist-info/METADATA +465 -0
  72. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.17.dev1.dist-info}/RECORD +75 -71
  73. shotgun_sh-0.2.11.dev1.dist-info/METADATA +0 -129
  74. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.17.dev1.dist-info}/WHEEL +0 -0
  75. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.17.dev1.dist-info}/entry_points.txt +0 -0
  76. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.17.dev1.dist-info}/licenses/LICENSE +0 -0
shotgun/tui/containers.py CHANGED
@@ -5,10 +5,8 @@ from typing import TYPE_CHECKING
5
5
  from dependency_injector import containers, providers
6
6
  from pydantic_ai import RunContext
7
7
 
8
- from shotgun.agents.agent_manager import AgentManager
9
- from shotgun.agents.config import get_provider_model
10
8
  from shotgun.agents.conversation_manager import ConversationManager
11
- from shotgun.agents.models import AgentDeps, AgentType
9
+ from shotgun.agents.models import AgentDeps
12
10
  from shotgun.sdk.codebase import CodebaseSDK
13
11
  from shotgun.tui.commands import CommandHandler
14
12
  from shotgun.tui.filtered_codebase_service import FilteredCodebaseService
@@ -35,13 +33,19 @@ class TUIContainer(containers.DeclarativeContainer):
35
33
 
36
34
  This container manages the lifecycle and dependencies of all TUI components,
37
35
  ensuring consistent configuration and facilitating testing.
36
+
37
+ Note: model_config and agent_deps are created lazily via async factory methods
38
+ since get_provider_model() is now async.
38
39
  """
39
40
 
40
41
  # Configuration
41
42
  config = providers.Configuration()
42
43
 
43
44
  # Core dependencies
44
- model_config = providers.Singleton(get_provider_model)
45
+ # TODO: Figure out a better solution for async dependency injection
46
+ # model_config is now loaded lazily via create_default_tui_deps()
47
+ # because get_provider_model() is async. This breaks the DI pattern
48
+ # and should be refactored to support async factories properly.
45
49
 
46
50
  storage_dir = providers.Singleton(lambda: get_shotgun_home() / "codebases")
47
51
 
@@ -51,15 +55,10 @@ class TUIContainer(containers.DeclarativeContainer):
51
55
 
52
56
  system_prompt_fn = providers.Object(_placeholder_system_prompt)
53
57
 
54
- # AgentDeps singleton
55
- agent_deps = providers.Singleton(
56
- AgentDeps,
57
- interactive_mode=True,
58
- is_tui_context=True,
59
- llm_model=model_config,
60
- codebase_service=codebase_service,
61
- system_prompt_fn=system_prompt_fn,
62
- )
58
+ # TODO: Figure out a better solution for async dependency injection
59
+ # AgentDeps is now created via async create_default_tui_deps()
60
+ # instead of using DI container's Singleton provider because it requires
61
+ # async model_config initialization
63
62
 
64
63
  # Service singletons
65
64
  codebase_sdk = providers.Singleton(CodebaseSDK)
@@ -74,10 +73,9 @@ class TUIContainer(containers.DeclarativeContainer):
74
73
  ConversationService, conversation_manager=conversation_manager
75
74
  )
76
75
 
77
- # Factory for AgentManager (needs agent_type parameter)
78
- agent_manager_factory = providers.Factory(
79
- AgentManager, deps=agent_deps, initial_type=providers.Object(AgentType.RESEARCH)
80
- )
76
+ # TODO: Figure out a better solution for async dependency injection
77
+ # AgentManager factory removed - create via async initialization
78
+ # since it requires async agent creation
81
79
 
82
80
  # Factory for ProcessingStateManager (needs ChatScreen reference)
83
81
  processing_state_factory = providers.Factory(
@@ -8,7 +8,7 @@ from shotgun.tui.filtered_codebase_service import FilteredCodebaseService
8
8
  from shotgun.utils import get_shotgun_home
9
9
 
10
10
 
11
- def create_default_tui_deps() -> AgentDeps:
11
+ async def create_default_tui_deps() -> AgentDeps:
12
12
  """Create default AgentDeps for TUI components.
13
13
 
14
14
  This creates a standard AgentDeps configuration suitable for interactive
@@ -21,7 +21,7 @@ def create_default_tui_deps() -> AgentDeps:
21
21
  Returns:
22
22
  Configured AgentDeps instance ready for TUI use.
23
23
  """
24
- model_config = get_provider_model()
24
+ model_config = await get_provider_model()
25
25
  storage_dir = get_shotgun_home() / "codebases"
26
26
  codebase_service = FilteredCodebaseService(storage_dir)
27
27
 
@@ -2,6 +2,8 @@
2
2
 
3
3
  import asyncio
4
4
  import logging
5
+ import time
6
+ from datetime import datetime, timezone
5
7
  from pathlib import Path
6
8
  from typing import cast
7
9
 
@@ -10,6 +12,7 @@ from pydantic_ai.messages import (
10
12
  ModelRequest,
11
13
  ModelResponse,
12
14
  TextPart,
15
+ ToolCallPart,
13
16
  ToolReturnPart,
14
17
  UserPromptPart,
15
18
  )
@@ -31,6 +34,7 @@ from shotgun.agents.agent_manager import (
31
34
  ModelConfigUpdated,
32
35
  PartialResponseMessage,
33
36
  )
37
+ from shotgun.agents.config import get_config_manager
34
38
  from shotgun.agents.config.models import MODEL_SPECS
35
39
  from shotgun.agents.conversation_manager import ConversationManager
36
40
  from shotgun.agents.history.compaction import apply_persistent_compaction
@@ -45,6 +49,7 @@ from shotgun.codebase.core.manager import (
45
49
  CodebaseGraphManager,
46
50
  )
47
51
  from shotgun.codebase.models import IndexProgress, ProgressPhase
52
+ from shotgun.exceptions import ContextSizeLimitExceeded
48
53
  from shotgun.posthog_telemetry import track_event
49
54
  from shotgun.sdk.codebase import CodebaseSDK
50
55
  from shotgun.sdk.exceptions import CodebaseNotFoundError, InvalidPathError
@@ -70,11 +75,13 @@ from shotgun.tui.screens.chat_screen.command_providers import (
70
75
  from shotgun.tui.screens.chat_screen.hint_message import HintMessage
71
76
  from shotgun.tui.screens.chat_screen.history import ChatHistory
72
77
  from shotgun.tui.screens.confirmation_dialog import ConfirmationDialog
78
+ from shotgun.tui.screens.onboarding import OnboardingModal
73
79
  from shotgun.tui.services.conversation_service import ConversationService
74
80
  from shotgun.tui.state.processing_state import ProcessingStateManager
75
81
  from shotgun.tui.utils.mode_progress import PlaceholderHints
76
82
  from shotgun.tui.widgets.widget_coordinator import WidgetCoordinator
77
83
  from shotgun.utils import get_shotgun_home
84
+ from shotgun.utils.marketing import MarketingManager
78
85
 
79
86
  logger = logging.getLogger(__name__)
80
87
 
@@ -97,7 +104,6 @@ class ChatScreen(Screen[None]):
97
104
  history: PromptHistory = PromptHistory()
98
105
  messages = reactive(list[ModelMessage | HintMessage]())
99
106
  indexing_job: reactive[CodebaseIndexSelection | None] = reactive(None)
100
- partial_message: reactive[ModelMessage | None] = reactive(None)
101
107
 
102
108
  # Q&A mode state (for structured output clarifying questions)
103
109
  qa_mode = reactive(False)
@@ -108,6 +114,10 @@ class ChatScreen(Screen[None]):
108
114
  # Working state - keep reactive for Textual watchers
109
115
  working = reactive(False)
110
116
 
117
+ # Throttle context indicator updates (in seconds)
118
+ _last_context_update: float = 0.0
119
+ _context_update_throttle: float = 5.0 # 5 seconds
120
+
111
121
  def __init__(
112
122
  self,
113
123
  agent_manager: AgentManager,
@@ -165,13 +175,17 @@ class ChatScreen(Screen[None]):
165
175
  self.processing_state.bind_spinner(self.query_one("#spinner", Spinner))
166
176
 
167
177
  # Load conversation history if --continue flag was provided
168
- if self.continue_session and self.conversation_manager.exists():
169
- self._load_conversation()
178
+ # Use call_later to handle async exists() check
179
+ if self.continue_session:
180
+ self.call_later(self._check_and_load_conversation)
170
181
 
171
182
  self.call_later(self.check_if_codebase_is_indexed)
172
183
  # Initial update of context indicator
173
184
  self.update_context_indicator()
174
185
 
186
+ # Show onboarding popup if not shown before
187
+ self.call_later(self._check_and_show_onboarding)
188
+
175
189
  async def on_key(self, event: events.Key) -> None:
176
190
  """Handle key presses for cancellation."""
177
191
  # If escape is pressed during Q&A mode, exit Q&A
@@ -304,6 +318,10 @@ class ChatScreen(Screen[None]):
304
318
  else:
305
319
  self.notify("No context analysis available", severity="error")
306
320
 
321
+ def action_view_onboarding(self) -> None:
322
+ """Show the onboarding modal."""
323
+ self.app.push_screen(OnboardingModal())
324
+
307
325
  @work
308
326
  async def action_compact_conversation(self) -> None:
309
327
  """Compact the conversation history to reduce size."""
@@ -386,11 +404,11 @@ class ChatScreen(Screen[None]):
386
404
  # Save to conversation file
387
405
  conversation_file = get_shotgun_home() / "conversation.json"
388
406
  manager = ConversationManager(conversation_file)
389
- conversation = manager.load()
407
+ conversation = await manager.load()
390
408
 
391
409
  if conversation:
392
410
  conversation.set_agent_messages(compacted_messages)
393
- manager.save(conversation)
411
+ await manager.save(conversation)
394
412
 
395
413
  # Post compaction completed event
396
414
  self.agent_manager.post_message(CompactionCompletedMessage())
@@ -455,7 +473,7 @@ class ChatScreen(Screen[None]):
455
473
  self.agent_manager.ui_message_history = []
456
474
 
457
475
  # Use conversation service to clear conversation
458
- self.conversation_service.clear_conversation()
476
+ await self.conversation_service.clear_conversation()
459
477
 
460
478
  # Post message history updated event to refresh UI
461
479
  self.agent_manager.post_message(
@@ -502,6 +520,34 @@ class ChatScreen(Screen[None]):
502
520
  f"[CONTEXT] Failed to update context indicator: {e}", exc_info=True
503
521
  )
504
522
 
523
+ @work(exclusive=False)
524
+ async def update_context_indicator_with_messages(
525
+ self,
526
+ agent_messages: list[ModelMessage],
527
+ ui_messages: list[ModelMessage | HintMessage],
528
+ ) -> None:
529
+ """Update the context indicator with specific message sets (for streaming updates).
530
+
531
+ Args:
532
+ agent_messages: Agent message history including streaming messages (for token counting)
533
+ ui_messages: UI message history including hints and streaming messages
534
+ """
535
+ try:
536
+ from shotgun.agents.context_analyzer.analyzer import ContextAnalyzer
537
+
538
+ analyzer = ContextAnalyzer(self.deps.llm_model)
539
+ # Analyze the combined message histories for accurate progressive token counts
540
+ analysis = await analyzer.analyze_conversation(agent_messages, ui_messages)
541
+
542
+ if analysis:
543
+ model_name = self.deps.llm_model.name
544
+ self.widget_coordinator.update_context_indicator(analysis, model_name)
545
+ except Exception as e:
546
+ logger.error(
547
+ f"Failed to update context indicator with streaming messages: {e}",
548
+ exc_info=True,
549
+ )
550
+
505
551
  def compose(self) -> ComposeResult:
506
552
  """Create child widgets for the app."""
507
553
  with Container(id="window"):
@@ -532,8 +578,6 @@ class ChatScreen(Screen[None]):
532
578
 
533
579
  @on(PartialResponseMessage)
534
580
  def handle_partial_response(self, event: PartialResponseMessage) -> None:
535
- self.partial_message = event.message
536
-
537
581
  # Filter event.messages to exclude ModelRequest with only ToolReturnPart
538
582
  # These are intermediate tool results that would render as empty (UserQuestionWidget
539
583
  # filters out ToolReturnPart in format_prompt_parts), causing user messages to disappear
@@ -551,16 +595,40 @@ class ChatScreen(Screen[None]):
551
595
  # Keep all ModelResponse and other message types
552
596
  filtered_event_messages.append(msg)
553
597
 
554
- # Build new message list
598
+ # Build new message list combining existing messages with new streaming content
555
599
  new_message_list = self.messages + cast(
556
600
  list[ModelMessage | HintMessage], filtered_event_messages
557
601
  )
558
602
 
559
603
  # Use widget coordinator to set partial response
560
- self.widget_coordinator.set_partial_response(
561
- self.partial_message, new_message_list
604
+ self.widget_coordinator.set_partial_response(event.message, new_message_list)
605
+
606
+ # Skip context updates for file write operations (they don't add to input context)
607
+ has_file_write = any(
608
+ isinstance(msg, ModelResponse)
609
+ and any(
610
+ isinstance(part, ToolCallPart)
611
+ and part.tool_name in ("write_file", "append_file")
612
+ for part in msg.parts
613
+ )
614
+ for msg in event.messages
562
615
  )
563
616
 
617
+ if has_file_write:
618
+ return # Skip context update for file writes
619
+
620
+ # Throttle context indicator updates to improve performance during streaming
621
+ # Only update at most once per 5 seconds to avoid excessive token calculations
622
+ current_time = time.time()
623
+ if current_time - self._last_context_update >= self._context_update_throttle:
624
+ self._last_context_update = current_time
625
+ # Update context indicator with full message history including streaming messages
626
+ # Combine existing agent history with new streaming messages for accurate token count
627
+ combined_agent_history = self.agent_manager.message_history + event.messages
628
+ self.update_context_indicator_with_messages(
629
+ combined_agent_history, new_message_list
630
+ )
631
+
564
632
  def _clear_partial_response(self) -> None:
565
633
  # Use widget coordinator to clear partial response
566
634
  self.widget_coordinator.set_partial_response(None, self.messages)
@@ -602,7 +670,9 @@ class ChatScreen(Screen[None]):
602
670
  self.qa_answers = []
603
671
 
604
672
  @on(MessageHistoryUpdated)
605
- def handle_message_history_updated(self, event: MessageHistoryUpdated) -> None:
673
+ async def handle_message_history_updated(
674
+ self, event: MessageHistoryUpdated
675
+ ) -> None:
606
676
  """Handle message history updates from the agent manager."""
607
677
  self._clear_partial_response()
608
678
  self.messages = event.messages
@@ -617,32 +687,50 @@ class ChatScreen(Screen[None]):
617
687
  self.update_context_indicator()
618
688
 
619
689
  # If there are file operations, add a message showing the modified files
690
+ # Skip if hint was already added by agent_manager (e.g., in QA mode)
620
691
  if event.file_operations:
621
- chat_history = self.query_one(ChatHistory)
622
- if chat_history.vertical_tail:
623
- tracker = FileOperationTracker(operations=event.file_operations)
624
- display_path = tracker.get_display_path()
625
-
626
- if display_path:
627
- # Create a simple markdown message with the file path
628
- # The terminal emulator will make this clickable automatically
629
- path_obj = Path(display_path)
630
-
631
- if len(event.file_operations) == 1:
632
- message = f"📝 Modified: `{display_path}`"
633
- else:
634
- num_files = len({op.file_path for op in event.file_operations})
635
- if path_obj.is_dir():
636
- message = (
637
- f"📁 Modified {num_files} files in: `{display_path}`"
638
- )
692
+ # Check if file operation hint already exists in recent messages
693
+ file_hint_exists = any(
694
+ isinstance(msg, HintMessage)
695
+ and (
696
+ msg.message.startswith("📝 Modified:")
697
+ or msg.message.startswith("📁 Modified")
698
+ )
699
+ for msg in event.messages[-5:] # Check last 5 messages
700
+ )
701
+
702
+ if not file_hint_exists:
703
+ chat_history = self.query_one(ChatHistory)
704
+ if chat_history.vertical_tail:
705
+ tracker = FileOperationTracker(operations=event.file_operations)
706
+ display_path = tracker.get_display_path()
707
+
708
+ if display_path:
709
+ # Create a simple markdown message with the file path
710
+ # The terminal emulator will make this clickable automatically
711
+ path_obj = Path(display_path)
712
+
713
+ if len(event.file_operations) == 1:
714
+ message = f"📝 Modified: `{display_path}`"
639
715
  else:
640
- # Common path is a file, show parent directory
641
- message = (
642
- f"📁 Modified {num_files} files in: `{path_obj.parent}`"
716
+ num_files = len(
717
+ {op.file_path for op in event.file_operations}
643
718
  )
719
+ if path_obj.is_dir():
720
+ message = f"📁 Modified {num_files} files in: `{display_path}`"
721
+ else:
722
+ # Common path is a file, show parent directory
723
+ message = f"📁 Modified {num_files} files in: `{path_obj.parent}`"
724
+
725
+ self.mount_hint(message)
726
+
727
+ # Check and display any marketing messages
728
+ from shotgun.tui.app import ShotgunApp
644
729
 
645
- self.mount_hint(message)
730
+ app = cast(ShotgunApp, self.app)
731
+ await MarketingManager.check_and_display_messages(
732
+ app.config_manager, event.file_operations, self.mount_hint
733
+ )
646
734
 
647
735
  @on(CompactionStartedMessage)
648
736
  def handle_compaction_started(self, event: CompactionStartedMessage) -> None:
@@ -1048,6 +1136,9 @@ class ChatScreen(Screen[None]):
1048
1136
  self.processing_state.start_processing("Processing...")
1049
1137
  self.processing_state.bind_worker(get_current_worker())
1050
1138
 
1139
+ # Start context indicator animation immediately
1140
+ self.widget_coordinator.set_context_streaming(True)
1141
+
1051
1142
  prompt = message
1052
1143
 
1053
1144
  try:
@@ -1057,6 +1148,27 @@ class ChatScreen(Screen[None]):
1057
1148
  except asyncio.CancelledError:
1058
1149
  # Handle cancellation gracefully - DO NOT re-raise
1059
1150
  self.mount_hint("⚠️ Operation cancelled by user")
1151
+ except ContextSizeLimitExceeded as e:
1152
+ # User-friendly error with actionable options
1153
+ hint = (
1154
+ f"⚠️ **Context too large for {e.model_name}**\n\n"
1155
+ f"Your conversation history exceeds this model's limit ({e.max_tokens:,} tokens).\n\n"
1156
+ f"**Choose an action:**\n\n"
1157
+ f"1. Switch to a larger model (`Ctrl+P` → Change Model)\n"
1158
+ f"2. Switch to a larger model, compact (`/compact`), then switch back to {e.model_name}\n"
1159
+ f"3. Clear conversation (`/clear`)\n"
1160
+ )
1161
+
1162
+ self.mount_hint(hint)
1163
+
1164
+ # Log for debugging (won't send to Sentry due to ErrorNotPickedUpBySentry)
1165
+ logger.info(
1166
+ "Context size limit exceeded",
1167
+ extra={
1168
+ "max_tokens": e.max_tokens,
1169
+ "model_name": e.model_name,
1170
+ },
1171
+ )
1060
1172
  except Exception as e:
1061
1173
  # Log with full stack trace to shotgun.log
1062
1174
  logger.exception(
@@ -1083,6 +1195,8 @@ class ChatScreen(Screen[None]):
1083
1195
  self.mount_hint(hint)
1084
1196
  finally:
1085
1197
  self.processing_state.stop_processing()
1198
+ # Stop context indicator animation
1199
+ self.widget_coordinator.set_context_streaming(False)
1086
1200
 
1087
1201
  # Save conversation after each interaction
1088
1202
  self._save_conversation()
@@ -1091,20 +1205,50 @@ class ChatScreen(Screen[None]):
1091
1205
 
1092
1206
  def _save_conversation(self) -> None:
1093
1207
  """Save the current conversation to persistent storage."""
1094
- # Use conversation service for saving
1095
- self.conversation_service.save_conversation(self.agent_manager)
1208
+ # Use conversation service for saving (run async in background)
1209
+ # Use exclusive=True to prevent concurrent saves that can cause file contention
1210
+ self.run_worker(
1211
+ self.conversation_service.save_conversation(self.agent_manager),
1212
+ exclusive=True,
1213
+ )
1214
+
1215
+ async def _check_and_load_conversation(self) -> None:
1216
+ """Check if conversation exists and load it if it does."""
1217
+ if await self.conversation_manager.exists():
1218
+ self._load_conversation()
1096
1219
 
1097
1220
  def _load_conversation(self) -> None:
1098
1221
  """Load conversation from persistent storage."""
1099
- # Use conversation service for restoration
1100
- success, error_msg, restored_type = (
1101
- self.conversation_service.restore_conversation(
1222
+
1223
+ # Use conversation service for restoration (run async)
1224
+ async def _do_load() -> None:
1225
+ (
1226
+ success,
1227
+ error_msg,
1228
+ restored_type,
1229
+ ) = await self.conversation_service.restore_conversation(
1102
1230
  self.agent_manager, self.deps.usage_manager
1103
1231
  )
1104
- )
1105
1232
 
1106
- if not success and error_msg:
1107
- self.mount_hint(error_msg)
1108
- elif success and restored_type:
1109
- # Update the current mode to match restored conversation
1110
- self.mode = restored_type
1233
+ if not success and error_msg:
1234
+ self.mount_hint(error_msg)
1235
+ elif success and restored_type:
1236
+ # Update the current mode to match restored conversation
1237
+ self.mode = restored_type
1238
+
1239
+ self.run_worker(_do_load(), exclusive=False)
1240
+
1241
+ @work
1242
+ async def _check_and_show_onboarding(self) -> None:
1243
+ """Check if onboarding should be shown and display modal if needed."""
1244
+ config_manager = get_config_manager()
1245
+ config = await config_manager.load()
1246
+
1247
+ # Only show onboarding if it hasn't been shown before
1248
+ if config.shown_onboarding_popup is None:
1249
+ # Show the onboarding modal
1250
+ await self.app.push_screen_wait(OnboardingModal())
1251
+
1252
+ # Mark as shown in config with current timestamp
1253
+ config.shown_onboarding_popup = datetime.now(timezone.utc)
1254
+ await config_manager.save(config)
@@ -11,14 +11,14 @@ def help_text_with_codebase(already_indexed: bool = False) -> str:
11
11
  Formatted help text string.
12
12
  """
13
13
  return (
14
- "Howdy! Welcome to Shotgun - the context tool for software engineering. \n\n"
15
- "You can research, build specs, plan, create tasks, and export context to your "
16
- "favorite code-gen agents.\n\n"
17
- f"{'' if already_indexed else 'Once your codebase is indexed, '}I can help with:\n\n"
18
- "- Speccing out a new feature\n"
19
- "- Onboarding you onto this project\n"
20
- "- Helping with a refactor spec\n"
21
- "- Creating AGENTS.md file for this project\n"
14
+ "Howdy! Welcome to Shotgun - Spec Driven Development for Developers and AI Agents.\n\n"
15
+ "Shotgun writes codebase-aware specs for your AI coding agents so they don't derail.\n\n"
16
+ f"{'It' if already_indexed else 'Once your codebase is indexed, it'} can help you:\n"
17
+ "- Research your codebase and spec out new features\n"
18
+ "- Create implementation plans that fit your architecture\n"
19
+ "- Generate AGENTS.md files for AI coding agents\n"
20
+ "- Onboard to existing projects or plan refactors\n\n"
21
+ "Ready to build something? Let's go.\n"
22
22
  )
23
23
 
24
24
 
@@ -29,11 +29,12 @@ def help_text_empty_dir() -> str:
29
29
  Formatted help text string.
30
30
  """
31
31
  return (
32
- "Howdy! Welcome to Shotgun - the context tool for software engineering.\n\n"
33
- "You can research, build specs, plan, create tasks, and export context to your "
34
- "favorite code-gen agents.\n\n"
35
- "What would you like to build? Here are some examples:\n\n"
36
- "- Research FastAPI vs Django\n"
37
- "- Plan my new web app using React\n"
38
- "- Create PRD for my planned product\n"
32
+ "Howdy! Welcome to Shotgun - Spec Driven Development for Developers and AI Agents.\n\n"
33
+ "Shotgun writes codebase-aware specs for your AI coding agents so they don't derail.\n\n"
34
+ "It can help you:\n"
35
+ "- Research your codebase and spec out new features\n"
36
+ "- Create implementation plans that fit your architecture\n"
37
+ "- Generate AGENTS.md files for AI coding agents\n"
38
+ "- Onboard to existing projects or plan refactors\n\n"
39
+ "Ready to build something? Let's go.\n"
39
40
  )
@@ -369,6 +369,11 @@ class UnifiedCommandProvider(Provider):
369
369
  self.chat_screen.action_show_usage,
370
370
  help="Display usage information for the current session",
371
371
  )
372
+ yield DiscoveryHit(
373
+ "View Onboarding",
374
+ self.chat_screen.action_view_onboarding,
375
+ help="View the onboarding tutorial and helpful resources",
376
+ )
372
377
 
373
378
  async def search(self, query: str) -> AsyncGenerator[Hit, None]:
374
379
  """Search for commands in alphabetical order."""
@@ -416,6 +421,11 @@ class UnifiedCommandProvider(Provider):
416
421
  self.chat_screen.action_show_usage,
417
422
  "Display usage information for the current session",
418
423
  ),
424
+ (
425
+ "View Onboarding",
426
+ self.chat_screen.action_view_onboarding,
427
+ "View the onboarding tutorial and helpful resources",
428
+ ),
419
429
  ]
420
430
 
421
431
  for title, callback, help_text in commands:
@@ -47,7 +47,6 @@ class ChatHistory(Widget):
47
47
  super().__init__()
48
48
  self.items: Sequence[ModelMessage | HintMessage] = []
49
49
  self.vertical_tail: VerticalTail | None = None
50
- self.partial_response = None
51
50
  self._rendered_count = 0 # Track how many messages have been mounted
52
51
 
53
52
  def compose(self) -> ComposeResult:
@@ -63,7 +62,7 @@ class ChatHistory(Widget):
63
62
  yield HintMessageWidget(item)
64
63
  elif isinstance(item, ModelResponse):
65
64
  yield AgentResponseWidget(item)
66
- yield PartialResponseWidget(self.partial_response).data_bind(
65
+ yield PartialResponseWidget(None).data_bind(
67
66
  item=ChatHistory.partial_response
68
67
  )
69
68
 
@@ -125,8 +125,8 @@ class FeedbackScreen(Screen[Feedback | None]):
125
125
  self.set_focus(self.query_one("#feedback-description", TextArea))
126
126
 
127
127
  @on(Button.Pressed, "#submit")
128
- def _on_submit_pressed(self) -> None:
129
- self._submit_feedback()
128
+ async def _on_submit_pressed(self) -> None:
129
+ await self._submit_feedback()
130
130
 
131
131
  @on(Button.Pressed, "#cancel")
132
132
  def _on_cancel_pressed(self) -> None:
@@ -171,7 +171,7 @@ class FeedbackScreen(Screen[Feedback | None]):
171
171
  }
172
172
  return placeholders.get(kind, "Enter your feedback...")
173
173
 
174
- def _submit_feedback(self) -> None:
174
+ async def _submit_feedback(self) -> None:
175
175
  text_area = self.query_one("#feedback-description", TextArea)
176
176
  description = text_area.text.strip()
177
177
 
@@ -182,7 +182,7 @@ class FeedbackScreen(Screen[Feedback | None]):
182
182
  return
183
183
 
184
184
  app = cast("ShotgunApp", self.app)
185
- shotgun_instance_id = app.config_manager.get_shotgun_instance_id()
185
+ shotgun_instance_id = await app.config_manager.get_shotgun_instance_id()
186
186
 
187
187
  feedback = Feedback(
188
188
  kind=self.selected_kind,