shotgun-sh 0.2.11.dev1__py3-none-any.whl → 0.2.11.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (70) hide show
  1. shotgun/agents/agent_manager.py +150 -27
  2. shotgun/agents/common.py +14 -8
  3. shotgun/agents/config/manager.py +64 -33
  4. shotgun/agents/config/models.py +25 -1
  5. shotgun/agents/config/provider.py +2 -2
  6. shotgun/agents/context_analyzer/analyzer.py +2 -24
  7. shotgun/agents/conversation_manager.py +35 -19
  8. shotgun/agents/export.py +2 -2
  9. shotgun/agents/history/token_counting/anthropic.py +17 -1
  10. shotgun/agents/history/token_counting/base.py +14 -3
  11. shotgun/agents/history/token_counting/openai.py +8 -0
  12. shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
  13. shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
  14. shotgun/agents/history/token_counting/utils.py +0 -3
  15. shotgun/agents/plan.py +2 -2
  16. shotgun/agents/research.py +3 -3
  17. shotgun/agents/specify.py +2 -2
  18. shotgun/agents/tasks.py +2 -2
  19. shotgun/agents/tools/codebase/file_read.py +5 -2
  20. shotgun/agents/tools/file_management.py +11 -7
  21. shotgun/agents/tools/web_search/__init__.py +8 -8
  22. shotgun/agents/tools/web_search/anthropic.py +2 -2
  23. shotgun/agents/tools/web_search/gemini.py +1 -1
  24. shotgun/agents/tools/web_search/openai.py +1 -1
  25. shotgun/agents/tools/web_search/utils.py +2 -2
  26. shotgun/agents/usage_manager.py +16 -11
  27. shotgun/cli/clear.py +2 -1
  28. shotgun/cli/compact.py +3 -3
  29. shotgun/cli/config.py +8 -5
  30. shotgun/cli/context.py +2 -2
  31. shotgun/cli/export.py +1 -1
  32. shotgun/cli/feedback.py +4 -2
  33. shotgun/cli/plan.py +1 -1
  34. shotgun/cli/research.py +1 -1
  35. shotgun/cli/specify.py +1 -1
  36. shotgun/cli/tasks.py +1 -1
  37. shotgun/codebase/core/change_detector.py +5 -3
  38. shotgun/codebase/core/code_retrieval.py +4 -2
  39. shotgun/codebase/core/ingestor.py +10 -8
  40. shotgun/codebase/core/manager.py +3 -3
  41. shotgun/codebase/core/nl_query.py +1 -1
  42. shotgun/logging_config.py +10 -17
  43. shotgun/main.py +3 -1
  44. shotgun/posthog_telemetry.py +14 -4
  45. shotgun/sentry_telemetry.py +3 -1
  46. shotgun/telemetry.py +3 -1
  47. shotgun/tui/app.py +71 -65
  48. shotgun/tui/components/context_indicator.py +43 -0
  49. shotgun/tui/containers.py +15 -17
  50. shotgun/tui/dependencies.py +2 -2
  51. shotgun/tui/screens/chat/chat_screen.py +110 -18
  52. shotgun/tui/screens/chat/help_text.py +16 -15
  53. shotgun/tui/screens/chat_screen/command_providers.py +10 -0
  54. shotgun/tui/screens/feedback.py +4 -4
  55. shotgun/tui/screens/github_issue.py +102 -0
  56. shotgun/tui/screens/model_picker.py +21 -20
  57. shotgun/tui/screens/onboarding.py +431 -0
  58. shotgun/tui/screens/provider_config.py +50 -27
  59. shotgun/tui/screens/shotgun_auth.py +2 -2
  60. shotgun/tui/screens/welcome.py +14 -11
  61. shotgun/tui/services/conversation_service.py +16 -14
  62. shotgun/tui/utils/mode_progress.py +14 -7
  63. shotgun/tui/widgets/widget_coordinator.py +15 -0
  64. shotgun/utils/file_system_utils.py +19 -0
  65. shotgun/utils/marketing.py +110 -0
  66. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/METADATA +2 -1
  67. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/RECORD +70 -67
  68. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/WHEEL +0 -0
  69. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/entry_points.txt +0 -0
  70. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/licenses/LICENSE +0 -0
shotgun/tui/app.py CHANGED
@@ -5,6 +5,7 @@ from textual.app import App, SystemCommand
5
5
  from textual.binding import Binding
6
6
  from textual.screen import Screen
7
7
 
8
+ from shotgun.agents.agent_manager import AgentManager
8
9
  from shotgun.agents.config import ConfigManager, get_config_manager
9
10
  from shotgun.agents.models import AgentType
10
11
  from shotgun.logging_config import get_logger
@@ -18,7 +19,7 @@ from shotgun.utils.update_checker import (
18
19
 
19
20
  from .screens.chat import ChatScreen
20
21
  from .screens.directory_setup import DirectorySetupScreen
21
- from .screens.feedback import FeedbackScreen
22
+ from .screens.github_issue import GitHubIssueScreen
22
23
  from .screens.model_picker import ModelPickerScreen
23
24
  from .screens.pipx_migration import PipxMigrationScreen
24
25
  from .screens.provider_config import ProviderConfigScreen
@@ -34,7 +35,7 @@ class ShotgunApp(App[None]):
34
35
  "provider_config": ProviderConfigScreen,
35
36
  "model_picker": ModelPickerScreen,
36
37
  "directory_setup": DirectorySetupScreen,
37
- "feedback": FeedbackScreen,
38
+ "github_issue": GitHubIssueScreen,
38
39
  }
39
40
  BINDINGS = [
40
41
  Binding("ctrl+c", "quit", "Quit the app"),
@@ -95,65 +96,75 @@ class ShotgunApp(App[None]):
95
96
  )
96
97
  return
97
98
 
98
- # Show welcome screen if no providers are configured OR if user hasn't seen it yet
99
- config = self.config_manager.load()
100
- if (
101
- not self.config_manager.has_any_provider_key()
102
- or not config.shown_welcome_screen
103
- ):
104
- if isinstance(self.screen, WelcomeScreen):
99
+ # Run async config loading in worker
100
+ async def _check_config() -> None:
101
+ # Show welcome screen if no providers are configured OR if user hasn't seen it yet
102
+ config = await self.config_manager.load()
103
+ has_any_key = await self.config_manager.has_any_provider_key()
104
+ if not has_any_key or not config.shown_welcome_screen:
105
+ if isinstance(self.screen, WelcomeScreen):
106
+ return
107
+
108
+ self.push_screen(
109
+ WelcomeScreen(),
110
+ callback=lambda _arg: self.refresh_startup_screen(),
111
+ )
105
112
  return
106
113
 
107
- self.push_screen(
108
- WelcomeScreen(),
109
- callback=lambda _arg: self.refresh_startup_screen(),
110
- )
111
- return
114
+ if not self.check_local_shotgun_directory_exists():
115
+ if isinstance(self.screen, DirectorySetupScreen):
116
+ return
117
+
118
+ self.push_screen(
119
+ DirectorySetupScreen(),
120
+ callback=lambda _arg: self.refresh_startup_screen(),
121
+ )
122
+ return
112
123
 
113
- if not self.check_local_shotgun_directory_exists():
114
- if isinstance(self.screen, DirectorySetupScreen):
124
+ if isinstance(self.screen, ChatScreen):
115
125
  return
116
126
 
117
- self.push_screen(
118
- DirectorySetupScreen(),
119
- callback=lambda _arg: self.refresh_startup_screen(),
127
+ # Create ChatScreen with all dependencies injected from container
128
+ # Get the default agent mode (RESEARCH)
129
+ agent_mode = AgentType.RESEARCH
130
+
131
+ # Create AgentDeps asynchronously (get_provider_model is now async)
132
+ from shotgun.tui.dependencies import create_default_tui_deps
133
+
134
+ agent_deps = await create_default_tui_deps()
135
+
136
+ # Create AgentManager with async initialization
137
+ agent_manager = AgentManager(deps=agent_deps, initial_type=agent_mode)
138
+
139
+ # Create ProcessingStateManager - we'll pass the screen after creation
140
+ # For now, create with None and the ChatScreen will set itself
141
+ chat_screen = ChatScreen(
142
+ agent_manager=agent_manager,
143
+ conversation_manager=self.container.conversation_manager(),
144
+ conversation_service=self.container.conversation_service(),
145
+ widget_coordinator=self.container.widget_coordinator_factory(
146
+ screen=None
147
+ ),
148
+ processing_state=self.container.processing_state_factory(
149
+ screen=None, # Will be set after ChatScreen is created
150
+ telemetry_context={"agent_mode": agent_mode.value},
151
+ ),
152
+ command_handler=self.container.command_handler(),
153
+ placeholder_hints=self.container.placeholder_hints(),
154
+ codebase_sdk=self.container.codebase_sdk(),
155
+ deps=agent_deps,
156
+ continue_session=self.continue_session,
157
+ force_reindex=self.force_reindex,
120
158
  )
121
- return
122
-
123
- if isinstance(self.screen, ChatScreen):
124
- return
125
-
126
- # Create ChatScreen with all dependencies injected from container
127
- # Get the default agent mode (RESEARCH)
128
- agent_mode = AgentType.RESEARCH
129
-
130
- # Create AgentManager with the correct mode
131
- agent_manager = self.container.agent_manager_factory(initial_type=agent_mode)
132
-
133
- # Create ProcessingStateManager - we'll pass the screen after creation
134
- # For now, create with None and the ChatScreen will set itself
135
- chat_screen = ChatScreen(
136
- agent_manager=agent_manager,
137
- conversation_manager=self.container.conversation_manager(),
138
- conversation_service=self.container.conversation_service(),
139
- widget_coordinator=self.container.widget_coordinator_factory(screen=None),
140
- processing_state=self.container.processing_state_factory(
141
- screen=None, # Will be set after ChatScreen is created
142
- telemetry_context={"agent_mode": agent_mode.value},
143
- ),
144
- command_handler=self.container.command_handler(),
145
- placeholder_hints=self.container.placeholder_hints(),
146
- codebase_sdk=self.container.codebase_sdk(),
147
- deps=self.container.agent_deps(),
148
- continue_session=self.continue_session,
149
- force_reindex=self.force_reindex,
150
- )
151
159
 
152
- # Update the ProcessingStateManager and WidgetCoordinator with the actual ChatScreen instance
153
- chat_screen.processing_state.screen = chat_screen
154
- chat_screen.widget_coordinator.screen = chat_screen
160
+ # Update the ProcessingStateManager and WidgetCoordinator with the actual ChatScreen instance
161
+ chat_screen.processing_state.screen = chat_screen
162
+ chat_screen.widget_coordinator.screen = chat_screen
155
163
 
156
- self.push_screen(chat_screen)
164
+ self.push_screen(chat_screen)
165
+
166
+ # Run the async config check in a worker
167
+ self.run_worker(_check_config(), exclusive=False)
157
168
 
158
169
  def check_local_shotgun_directory_exists(self) -> bool:
159
170
  shotgun_dir = get_shotgun_base_path()
@@ -170,20 +181,15 @@ class ShotgunApp(App[None]):
170
181
  def get_system_commands(self, screen: Screen[Any]) -> Iterable[SystemCommand]:
171
182
  return [
172
183
  SystemCommand(
173
- "Feedback", "Send us feedback or report a bug", self.action_feedback
184
+ "New Issue",
185
+ "Report a bug or request a feature on GitHub",
186
+ self.action_new_issue,
174
187
  )
175
- ] # we don't want any system commands
176
-
177
- def action_feedback(self) -> None:
178
- """Open feedback screen and submit feedback."""
179
- from shotgun.posthog_telemetry import Feedback, submit_feedback_survey
180
-
181
- def handle_feedback(feedback: Feedback | None) -> None:
182
- if feedback is not None:
183
- submit_feedback_survey(feedback)
184
- self.notify("Feedback sent. Thank you!")
188
+ ]
185
189
 
186
- self.push_screen(FeedbackScreen(), callback=handle_feedback)
190
+ def action_new_issue(self) -> None:
191
+ """Open GitHub issue screen to guide users to create an issue."""
192
+ self.push_screen(GitHubIssueScreen())
187
193
 
188
194
 
189
195
  def run(
@@ -1,6 +1,7 @@
1
1
  """Context window indicator component for showing model usage."""
2
2
 
3
3
  from textual.reactive import reactive
4
+ from textual.timer import Timer
4
5
  from textual.widgets import Static
5
6
 
6
7
  from shotgun.agents.config.models import MODEL_SPECS, ModelName
@@ -20,6 +21,10 @@ class ContextIndicator(Static):
20
21
 
21
22
  context_analysis: reactive[ContextAnalysis | None] = reactive(None)
22
23
  model_name: reactive[ModelName | None] = reactive(None)
24
+ is_streaming: reactive[bool] = reactive(False)
25
+
26
+ _animation_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
27
+ _animation_index = 0
23
28
 
24
29
  def __init__(
25
30
  self,
@@ -29,6 +34,7 @@ class ContextIndicator(Static):
29
34
  classes: str | None = None,
30
35
  ) -> None:
31
36
  super().__init__(name=name, id=id, classes=classes)
37
+ self._animation_timer: Timer | None = None
32
38
 
33
39
  def update_context(
34
40
  self, analysis: ContextAnalysis | None, model: ModelName | None
@@ -43,6 +49,38 @@ class ContextIndicator(Static):
43
49
  self.model_name = model
44
50
  self._refresh_display()
45
51
 
52
+ def set_streaming(self, streaming: bool) -> None:
53
+ """Enable or disable streaming animation.
54
+
55
+ Args:
56
+ streaming: Whether to show streaming animation
57
+ """
58
+ self.is_streaming = streaming
59
+ if streaming:
60
+ self._start_animation()
61
+ else:
62
+ self._stop_animation()
63
+
64
+ def _start_animation(self) -> None:
65
+ """Start the pulsing animation."""
66
+ if self._animation_timer is None:
67
+ self._animation_timer = self.set_interval(0.1, self._animate_frame)
68
+
69
+ def _stop_animation(self) -> None:
70
+ """Stop the pulsing animation."""
71
+ if self._animation_timer is not None:
72
+ self._animation_timer.stop()
73
+ self._animation_timer = None
74
+ self._animation_index = 0
75
+ self._refresh_display()
76
+
77
+ def _animate_frame(self) -> None:
78
+ """Advance the animation frame."""
79
+ self._animation_index = (self._animation_index + 1) % len(
80
+ self._animation_frames
81
+ )
82
+ self._refresh_display()
83
+
46
84
  def _get_percentage_color(self, percentage: float) -> str:
47
85
  """Get color for percentage based on threshold.
48
86
 
@@ -112,6 +150,11 @@ class ContextIndicator(Static):
112
150
  f"[{color}]{percentage}% ({current_tokens}/{max_tokens})[/]",
113
151
  ]
114
152
 
153
+ # Add streaming animation indicator if streaming
154
+ if self.is_streaming:
155
+ animation_char = self._animation_frames[self._animation_index]
156
+ parts.append(f"[bold cyan]{animation_char}[/]")
157
+
115
158
  # Add model name if available
116
159
  if self.model_name:
117
160
  model_spec = MODEL_SPECS.get(self.model_name)
shotgun/tui/containers.py CHANGED
@@ -5,10 +5,8 @@ from typing import TYPE_CHECKING
5
5
  from dependency_injector import containers, providers
6
6
  from pydantic_ai import RunContext
7
7
 
8
- from shotgun.agents.agent_manager import AgentManager
9
- from shotgun.agents.config import get_provider_model
10
8
  from shotgun.agents.conversation_manager import ConversationManager
11
- from shotgun.agents.models import AgentDeps, AgentType
9
+ from shotgun.agents.models import AgentDeps
12
10
  from shotgun.sdk.codebase import CodebaseSDK
13
11
  from shotgun.tui.commands import CommandHandler
14
12
  from shotgun.tui.filtered_codebase_service import FilteredCodebaseService
@@ -35,13 +33,19 @@ class TUIContainer(containers.DeclarativeContainer):
35
33
 
36
34
  This container manages the lifecycle and dependencies of all TUI components,
37
35
  ensuring consistent configuration and facilitating testing.
36
+
37
+ Note: model_config and agent_deps are created lazily via async factory methods
38
+ since get_provider_model() is now async.
38
39
  """
39
40
 
40
41
  # Configuration
41
42
  config = providers.Configuration()
42
43
 
43
44
  # Core dependencies
44
- model_config = providers.Singleton(get_provider_model)
45
+ # TODO: Figure out a better solution for async dependency injection
46
+ # model_config is now loaded lazily via create_default_tui_deps()
47
+ # because get_provider_model() is async. This breaks the DI pattern
48
+ # and should be refactored to support async factories properly.
45
49
 
46
50
  storage_dir = providers.Singleton(lambda: get_shotgun_home() / "codebases")
47
51
 
@@ -51,15 +55,10 @@ class TUIContainer(containers.DeclarativeContainer):
51
55
 
52
56
  system_prompt_fn = providers.Object(_placeholder_system_prompt)
53
57
 
54
- # AgentDeps singleton
55
- agent_deps = providers.Singleton(
56
- AgentDeps,
57
- interactive_mode=True,
58
- is_tui_context=True,
59
- llm_model=model_config,
60
- codebase_service=codebase_service,
61
- system_prompt_fn=system_prompt_fn,
62
- )
58
+ # TODO: Figure out a better solution for async dependency injection
59
+ # AgentDeps is now created via async create_default_tui_deps()
60
+ # instead of using DI container's Singleton provider because it requires
61
+ # async model_config initialization
63
62
 
64
63
  # Service singletons
65
64
  codebase_sdk = providers.Singleton(CodebaseSDK)
@@ -74,10 +73,9 @@ class TUIContainer(containers.DeclarativeContainer):
74
73
  ConversationService, conversation_manager=conversation_manager
75
74
  )
76
75
 
77
- # Factory for AgentManager (needs agent_type parameter)
78
- agent_manager_factory = providers.Factory(
79
- AgentManager, deps=agent_deps, initial_type=providers.Object(AgentType.RESEARCH)
80
- )
76
+ # TODO: Figure out a better solution for async dependency injection
77
+ # AgentManager factory removed - create via async initialization
78
+ # since it requires async agent creation
81
79
 
82
80
  # Factory for ProcessingStateManager (needs ChatScreen reference)
83
81
  processing_state_factory = providers.Factory(
@@ -8,7 +8,7 @@ from shotgun.tui.filtered_codebase_service import FilteredCodebaseService
8
8
  from shotgun.utils import get_shotgun_home
9
9
 
10
10
 
11
- def create_default_tui_deps() -> AgentDeps:
11
+ async def create_default_tui_deps() -> AgentDeps:
12
12
  """Create default AgentDeps for TUI components.
13
13
 
14
14
  This creates a standard AgentDeps configuration suitable for interactive
@@ -21,7 +21,7 @@ def create_default_tui_deps() -> AgentDeps:
21
21
  Returns:
22
22
  Configured AgentDeps instance ready for TUI use.
23
23
  """
24
- model_config = get_provider_model()
24
+ model_config = await get_provider_model()
25
25
  storage_dir = get_shotgun_home() / "codebases"
26
26
  codebase_service = FilteredCodebaseService(storage_dir)
27
27
 
@@ -2,6 +2,7 @@
2
2
 
3
3
  import asyncio
4
4
  import logging
5
+ from datetime import datetime, timezone
5
6
  from pathlib import Path
6
7
  from typing import cast
7
8
 
@@ -31,6 +32,7 @@ from shotgun.agents.agent_manager import (
31
32
  ModelConfigUpdated,
32
33
  PartialResponseMessage,
33
34
  )
35
+ from shotgun.agents.config import get_config_manager
34
36
  from shotgun.agents.config.models import MODEL_SPECS
35
37
  from shotgun.agents.conversation_manager import ConversationManager
36
38
  from shotgun.agents.history.compaction import apply_persistent_compaction
@@ -70,11 +72,13 @@ from shotgun.tui.screens.chat_screen.command_providers import (
70
72
  from shotgun.tui.screens.chat_screen.hint_message import HintMessage
71
73
  from shotgun.tui.screens.chat_screen.history import ChatHistory
72
74
  from shotgun.tui.screens.confirmation_dialog import ConfirmationDialog
75
+ from shotgun.tui.screens.onboarding import OnboardingModal
73
76
  from shotgun.tui.services.conversation_service import ConversationService
74
77
  from shotgun.tui.state.processing_state import ProcessingStateManager
75
78
  from shotgun.tui.utils.mode_progress import PlaceholderHints
76
79
  from shotgun.tui.widgets.widget_coordinator import WidgetCoordinator
77
80
  from shotgun.utils import get_shotgun_home
81
+ from shotgun.utils.marketing import MarketingManager
78
82
 
79
83
  logger = logging.getLogger(__name__)
80
84
 
@@ -165,13 +169,17 @@ class ChatScreen(Screen[None]):
165
169
  self.processing_state.bind_spinner(self.query_one("#spinner", Spinner))
166
170
 
167
171
  # Load conversation history if --continue flag was provided
168
- if self.continue_session and self.conversation_manager.exists():
169
- self._load_conversation()
172
+ # Use call_later to handle async exists() check
173
+ if self.continue_session:
174
+ self.call_later(self._check_and_load_conversation)
170
175
 
171
176
  self.call_later(self.check_if_codebase_is_indexed)
172
177
  # Initial update of context indicator
173
178
  self.update_context_indicator()
174
179
 
180
+ # Show onboarding popup if not shown before
181
+ self.call_later(self._check_and_show_onboarding)
182
+
175
183
  async def on_key(self, event: events.Key) -> None:
176
184
  """Handle key presses for cancellation."""
177
185
  # If escape is pressed during Q&A mode, exit Q&A
@@ -304,6 +312,10 @@ class ChatScreen(Screen[None]):
304
312
  else:
305
313
  self.notify("No context analysis available", severity="error")
306
314
 
315
+ def action_view_onboarding(self) -> None:
316
+ """Show the onboarding modal."""
317
+ self.app.push_screen(OnboardingModal())
318
+
307
319
  @work
308
320
  async def action_compact_conversation(self) -> None:
309
321
  """Compact the conversation history to reduce size."""
@@ -386,11 +398,11 @@ class ChatScreen(Screen[None]):
386
398
  # Save to conversation file
387
399
  conversation_file = get_shotgun_home() / "conversation.json"
388
400
  manager = ConversationManager(conversation_file)
389
- conversation = manager.load()
401
+ conversation = await manager.load()
390
402
 
391
403
  if conversation:
392
404
  conversation.set_agent_messages(compacted_messages)
393
- manager.save(conversation)
405
+ await manager.save(conversation)
394
406
 
395
407
  # Post compaction completed event
396
408
  self.agent_manager.post_message(CompactionCompletedMessage())
@@ -455,7 +467,7 @@ class ChatScreen(Screen[None]):
455
467
  self.agent_manager.ui_message_history = []
456
468
 
457
469
  # Use conversation service to clear conversation
458
- self.conversation_service.clear_conversation()
470
+ await self.conversation_service.clear_conversation()
459
471
 
460
472
  # Post message history updated event to refresh UI
461
473
  self.agent_manager.post_message(
@@ -502,6 +514,34 @@ class ChatScreen(Screen[None]):
502
514
  f"[CONTEXT] Failed to update context indicator: {e}", exc_info=True
503
515
  )
504
516
 
517
+ @work(exclusive=False)
518
+ async def update_context_indicator_with_messages(
519
+ self,
520
+ agent_messages: list[ModelMessage],
521
+ ui_messages: list[ModelMessage | HintMessage],
522
+ ) -> None:
523
+ """Update the context indicator with specific message sets (for streaming updates).
524
+
525
+ Args:
526
+ agent_messages: Agent message history including streaming messages (for token counting)
527
+ ui_messages: UI message history including hints and streaming messages
528
+ """
529
+ try:
530
+ from shotgun.agents.context_analyzer.analyzer import ContextAnalyzer
531
+
532
+ analyzer = ContextAnalyzer(self.deps.llm_model)
533
+ # Analyze the combined message histories for accurate progressive token counts
534
+ analysis = await analyzer.analyze_conversation(agent_messages, ui_messages)
535
+
536
+ if analysis:
537
+ model_name = self.deps.llm_model.name
538
+ self.widget_coordinator.update_context_indicator(analysis, model_name)
539
+ except Exception as e:
540
+ logger.error(
541
+ f"Failed to update context indicator with streaming messages: {e}",
542
+ exc_info=True,
543
+ )
544
+
505
545
  def compose(self) -> ComposeResult:
506
546
  """Create child widgets for the app."""
507
547
  with Container(id="window"):
@@ -551,7 +591,7 @@ class ChatScreen(Screen[None]):
551
591
  # Keep all ModelResponse and other message types
552
592
  filtered_event_messages.append(msg)
553
593
 
554
- # Build new message list
594
+ # Build new message list combining existing messages with new streaming content
555
595
  new_message_list = self.messages + cast(
556
596
  list[ModelMessage | HintMessage], filtered_event_messages
557
597
  )
@@ -561,6 +601,13 @@ class ChatScreen(Screen[None]):
561
601
  self.partial_message, new_message_list
562
602
  )
563
603
 
604
+ # Update context indicator with full message history including streaming messages
605
+ # Combine existing agent history with new streaming messages for accurate token count
606
+ combined_agent_history = self.agent_manager.message_history + event.messages
607
+ self.update_context_indicator_with_messages(
608
+ combined_agent_history, new_message_list
609
+ )
610
+
564
611
  def _clear_partial_response(self) -> None:
565
612
  # Use widget coordinator to clear partial response
566
613
  self.widget_coordinator.set_partial_response(None, self.messages)
@@ -602,7 +649,9 @@ class ChatScreen(Screen[None]):
602
649
  self.qa_answers = []
603
650
 
604
651
  @on(MessageHistoryUpdated)
605
- def handle_message_history_updated(self, event: MessageHistoryUpdated) -> None:
652
+ async def handle_message_history_updated(
653
+ self, event: MessageHistoryUpdated
654
+ ) -> None:
606
655
  """Handle message history updates from the agent manager."""
607
656
  self._clear_partial_response()
608
657
  self.messages = event.messages
@@ -644,6 +693,14 @@ class ChatScreen(Screen[None]):
644
693
 
645
694
  self.mount_hint(message)
646
695
 
696
+ # Check and display any marketing messages
697
+ from shotgun.tui.app import ShotgunApp
698
+
699
+ app = cast(ShotgunApp, self.app)
700
+ await MarketingManager.check_and_display_messages(
701
+ app.config_manager, event.file_operations, self.mount_hint
702
+ )
703
+
647
704
  @on(CompactionStartedMessage)
648
705
  def handle_compaction_started(self, event: CompactionStartedMessage) -> None:
649
706
  """Update spinner text when compaction starts."""
@@ -1048,6 +1105,9 @@ class ChatScreen(Screen[None]):
1048
1105
  self.processing_state.start_processing("Processing...")
1049
1106
  self.processing_state.bind_worker(get_current_worker())
1050
1107
 
1108
+ # Start context indicator animation immediately
1109
+ self.widget_coordinator.set_context_streaming(True)
1110
+
1051
1111
  prompt = message
1052
1112
 
1053
1113
  try:
@@ -1083,6 +1143,8 @@ class ChatScreen(Screen[None]):
1083
1143
  self.mount_hint(hint)
1084
1144
  finally:
1085
1145
  self.processing_state.stop_processing()
1146
+ # Stop context indicator animation
1147
+ self.widget_coordinator.set_context_streaming(False)
1086
1148
 
1087
1149
  # Save conversation after each interaction
1088
1150
  self._save_conversation()
@@ -1091,20 +1153,50 @@ class ChatScreen(Screen[None]):
1091
1153
 
1092
1154
  def _save_conversation(self) -> None:
1093
1155
  """Save the current conversation to persistent storage."""
1094
- # Use conversation service for saving
1095
- self.conversation_service.save_conversation(self.agent_manager)
1156
+ # Use conversation service for saving (run async in background)
1157
+ # Use exclusive=True to prevent concurrent saves that can cause file contention
1158
+ self.run_worker(
1159
+ self.conversation_service.save_conversation(self.agent_manager),
1160
+ exclusive=True,
1161
+ )
1162
+
1163
+ async def _check_and_load_conversation(self) -> None:
1164
+ """Check if conversation exists and load it if it does."""
1165
+ if await self.conversation_manager.exists():
1166
+ self._load_conversation()
1096
1167
 
1097
1168
  def _load_conversation(self) -> None:
1098
1169
  """Load conversation from persistent storage."""
1099
- # Use conversation service for restoration
1100
- success, error_msg, restored_type = (
1101
- self.conversation_service.restore_conversation(
1170
+
1171
+ # Use conversation service for restoration (run async)
1172
+ async def _do_load() -> None:
1173
+ (
1174
+ success,
1175
+ error_msg,
1176
+ restored_type,
1177
+ ) = await self.conversation_service.restore_conversation(
1102
1178
  self.agent_manager, self.deps.usage_manager
1103
1179
  )
1104
- )
1105
1180
 
1106
- if not success and error_msg:
1107
- self.mount_hint(error_msg)
1108
- elif success and restored_type:
1109
- # Update the current mode to match restored conversation
1110
- self.mode = restored_type
1181
+ if not success and error_msg:
1182
+ self.mount_hint(error_msg)
1183
+ elif success and restored_type:
1184
+ # Update the current mode to match restored conversation
1185
+ self.mode = restored_type
1186
+
1187
+ self.run_worker(_do_load(), exclusive=False)
1188
+
1189
+ @work
1190
+ async def _check_and_show_onboarding(self) -> None:
1191
+ """Check if onboarding should be shown and display modal if needed."""
1192
+ config_manager = get_config_manager()
1193
+ config = await config_manager.load()
1194
+
1195
+ # Only show onboarding if it hasn't been shown before
1196
+ if config.shown_onboarding_popup is None:
1197
+ # Show the onboarding modal
1198
+ await self.app.push_screen_wait(OnboardingModal())
1199
+
1200
+ # Mark as shown in config with current timestamp
1201
+ config.shown_onboarding_popup = datetime.now(timezone.utc)
1202
+ await config_manager.save(config)
@@ -11,14 +11,14 @@ def help_text_with_codebase(already_indexed: bool = False) -> str:
11
11
  Formatted help text string.
12
12
  """
13
13
  return (
14
- "Howdy! Welcome to Shotgun - the context tool for software engineering. \n\n"
15
- "You can research, build specs, plan, create tasks, and export context to your "
16
- "favorite code-gen agents.\n\n"
17
- f"{'' if already_indexed else 'Once your codebase is indexed, '}I can help with:\n\n"
18
- "- Speccing out a new feature\n"
19
- "- Onboarding you onto this project\n"
20
- "- Helping with a refactor spec\n"
21
- "- Creating AGENTS.md file for this project\n"
14
+ "Howdy! Welcome to Shotgun - Spec Driven Development for Developers and AI Agents.\n\n"
15
+ "Shotgun writes codebase-aware specs for your AI coding agents so they don't derail.\n\n"
16
+ f"{'It' if already_indexed else 'Once your codebase is indexed, it'} can help you:\n"
17
+ "- Research your codebase and spec out new features\n"
18
+ "- Create implementation plans that fit your architecture\n"
19
+ "- Generate AGENTS.md files for AI coding agents\n"
20
+ "- Onboard to existing projects or plan refactors\n\n"
21
+ "Ready to build something? Let's go.\n"
22
22
  )
23
23
 
24
24
 
@@ -29,11 +29,12 @@ def help_text_empty_dir() -> str:
29
29
  Formatted help text string.
30
30
  """
31
31
  return (
32
- "Howdy! Welcome to Shotgun - the context tool for software engineering.\n\n"
33
- "You can research, build specs, plan, create tasks, and export context to your "
34
- "favorite code-gen agents.\n\n"
35
- "What would you like to build? Here are some examples:\n\n"
36
- "- Research FastAPI vs Django\n"
37
- "- Plan my new web app using React\n"
38
- "- Create PRD for my planned product\n"
32
+ "Howdy! Welcome to Shotgun - Spec Driven Development for Developers and AI Agents.\n\n"
33
+ "Shotgun writes codebase-aware specs for your AI coding agents so they don't derail.\n\n"
34
+ "It can help you:\n"
35
+ "- Research your codebase and spec out new features\n"
36
+ "- Create implementation plans that fit your architecture\n"
37
+ "- Generate AGENTS.md files for AI coding agents\n"
38
+ "- Onboard to existing projects or plan refactors\n\n"
39
+ "Ready to build something? Let's go.\n"
39
40
  )
@@ -369,6 +369,11 @@ class UnifiedCommandProvider(Provider):
369
369
  self.chat_screen.action_show_usage,
370
370
  help="Display usage information for the current session",
371
371
  )
372
+ yield DiscoveryHit(
373
+ "View Onboarding",
374
+ self.chat_screen.action_view_onboarding,
375
+ help="View the onboarding tutorial and helpful resources",
376
+ )
372
377
 
373
378
  async def search(self, query: str) -> AsyncGenerator[Hit, None]:
374
379
  """Search for commands in alphabetical order."""
@@ -416,6 +421,11 @@ class UnifiedCommandProvider(Provider):
416
421
  self.chat_screen.action_show_usage,
417
422
  "Display usage information for the current session",
418
423
  ),
424
+ (
425
+ "View Onboarding",
426
+ self.chat_screen.action_view_onboarding,
427
+ "View the onboarding tutorial and helpful resources",
428
+ ),
419
429
  ]
420
430
 
421
431
  for title, callback, help_text in commands: