shotgun-sh 0.2.8.dev2__py3-none-any.whl → 0.3.3.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (175) hide show
  1. shotgun/agents/agent_manager.py +382 -60
  2. shotgun/agents/common.py +15 -9
  3. shotgun/agents/config/README.md +89 -0
  4. shotgun/agents/config/__init__.py +10 -1
  5. shotgun/agents/config/constants.py +0 -6
  6. shotgun/agents/config/manager.py +383 -82
  7. shotgun/agents/config/models.py +122 -18
  8. shotgun/agents/config/provider.py +81 -15
  9. shotgun/agents/config/streaming_test.py +119 -0
  10. shotgun/agents/context_analyzer/__init__.py +28 -0
  11. shotgun/agents/context_analyzer/analyzer.py +475 -0
  12. shotgun/agents/context_analyzer/constants.py +9 -0
  13. shotgun/agents/context_analyzer/formatter.py +115 -0
  14. shotgun/agents/context_analyzer/models.py +212 -0
  15. shotgun/agents/conversation/__init__.py +18 -0
  16. shotgun/agents/conversation/filters.py +164 -0
  17. shotgun/agents/conversation/history/chunking.py +278 -0
  18. shotgun/agents/{history → conversation/history}/compaction.py +36 -5
  19. shotgun/agents/{history → conversation/history}/constants.py +5 -0
  20. shotgun/agents/conversation/history/file_content_deduplication.py +216 -0
  21. shotgun/agents/{history → conversation/history}/history_processors.py +380 -8
  22. shotgun/agents/{history → conversation/history}/token_counting/anthropic.py +25 -1
  23. shotgun/agents/{history → conversation/history}/token_counting/base.py +14 -3
  24. shotgun/agents/{history → conversation/history}/token_counting/openai.py +11 -1
  25. shotgun/agents/{history → conversation/history}/token_counting/sentencepiece_counter.py +8 -0
  26. shotgun/agents/{history → conversation/history}/token_counting/tokenizer_cache.py +3 -1
  27. shotgun/agents/{history → conversation/history}/token_counting/utils.py +0 -3
  28. shotgun/agents/{conversation_manager.py → conversation/manager.py} +36 -20
  29. shotgun/agents/{conversation_history.py → conversation/models.py} +8 -92
  30. shotgun/agents/error/__init__.py +11 -0
  31. shotgun/agents/error/models.py +19 -0
  32. shotgun/agents/export.py +2 -2
  33. shotgun/agents/plan.py +2 -2
  34. shotgun/agents/research.py +3 -3
  35. shotgun/agents/runner.py +230 -0
  36. shotgun/agents/specify.py +2 -2
  37. shotgun/agents/tasks.py +2 -2
  38. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  39. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  40. shotgun/agents/tools/codebase/file_read.py +11 -2
  41. shotgun/agents/tools/codebase/query_graph.py +6 -0
  42. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  43. shotgun/agents/tools/file_management.py +27 -7
  44. shotgun/agents/tools/registry.py +217 -0
  45. shotgun/agents/tools/web_search/__init__.py +8 -8
  46. shotgun/agents/tools/web_search/anthropic.py +8 -2
  47. shotgun/agents/tools/web_search/gemini.py +7 -1
  48. shotgun/agents/tools/web_search/openai.py +8 -2
  49. shotgun/agents/tools/web_search/utils.py +2 -2
  50. shotgun/agents/usage_manager.py +16 -11
  51. shotgun/api_endpoints.py +7 -3
  52. shotgun/build_constants.py +2 -2
  53. shotgun/cli/clear.py +53 -0
  54. shotgun/cli/compact.py +188 -0
  55. shotgun/cli/config.py +8 -5
  56. shotgun/cli/context.py +154 -0
  57. shotgun/cli/error_handler.py +24 -0
  58. shotgun/cli/export.py +34 -34
  59. shotgun/cli/feedback.py +4 -2
  60. shotgun/cli/models.py +1 -0
  61. shotgun/cli/plan.py +34 -34
  62. shotgun/cli/research.py +18 -10
  63. shotgun/cli/spec/__init__.py +5 -0
  64. shotgun/cli/spec/backup.py +81 -0
  65. shotgun/cli/spec/commands.py +132 -0
  66. shotgun/cli/spec/models.py +48 -0
  67. shotgun/cli/spec/pull_service.py +219 -0
  68. shotgun/cli/specify.py +20 -19
  69. shotgun/cli/tasks.py +34 -34
  70. shotgun/cli/update.py +16 -2
  71. shotgun/codebase/core/change_detector.py +5 -3
  72. shotgun/codebase/core/code_retrieval.py +4 -2
  73. shotgun/codebase/core/ingestor.py +163 -15
  74. shotgun/codebase/core/manager.py +13 -4
  75. shotgun/codebase/core/nl_query.py +1 -1
  76. shotgun/codebase/models.py +2 -0
  77. shotgun/exceptions.py +357 -0
  78. shotgun/llm_proxy/__init__.py +17 -0
  79. shotgun/llm_proxy/client.py +215 -0
  80. shotgun/llm_proxy/models.py +137 -0
  81. shotgun/logging_config.py +60 -27
  82. shotgun/main.py +77 -11
  83. shotgun/posthog_telemetry.py +38 -29
  84. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +28 -2
  85. shotgun/prompts/agents/partials/interactive_mode.j2 +3 -3
  86. shotgun/prompts/agents/plan.j2 +16 -0
  87. shotgun/prompts/agents/research.j2 +16 -3
  88. shotgun/prompts/agents/specify.j2 +54 -1
  89. shotgun/prompts/agents/state/system_state.j2 +0 -2
  90. shotgun/prompts/agents/tasks.j2 +16 -0
  91. shotgun/prompts/history/chunk_summarization.j2 +34 -0
  92. shotgun/prompts/history/combine_summaries.j2 +53 -0
  93. shotgun/sdk/codebase.py +14 -3
  94. shotgun/sentry_telemetry.py +163 -16
  95. shotgun/settings.py +243 -0
  96. shotgun/shotgun_web/__init__.py +67 -1
  97. shotgun/shotgun_web/client.py +42 -1
  98. shotgun/shotgun_web/constants.py +46 -0
  99. shotgun/shotgun_web/exceptions.py +29 -0
  100. shotgun/shotgun_web/models.py +390 -0
  101. shotgun/shotgun_web/shared_specs/__init__.py +32 -0
  102. shotgun/shotgun_web/shared_specs/file_scanner.py +175 -0
  103. shotgun/shotgun_web/shared_specs/hasher.py +83 -0
  104. shotgun/shotgun_web/shared_specs/models.py +71 -0
  105. shotgun/shotgun_web/shared_specs/upload_pipeline.py +329 -0
  106. shotgun/shotgun_web/shared_specs/utils.py +34 -0
  107. shotgun/shotgun_web/specs_client.py +703 -0
  108. shotgun/shotgun_web/supabase_client.py +31 -0
  109. shotgun/telemetry.py +10 -33
  110. shotgun/tui/app.py +310 -46
  111. shotgun/tui/commands/__init__.py +1 -1
  112. shotgun/tui/components/context_indicator.py +179 -0
  113. shotgun/tui/components/mode_indicator.py +70 -0
  114. shotgun/tui/components/status_bar.py +48 -0
  115. shotgun/tui/containers.py +91 -0
  116. shotgun/tui/dependencies.py +39 -0
  117. shotgun/tui/layout.py +5 -0
  118. shotgun/tui/protocols.py +45 -0
  119. shotgun/tui/screens/chat/__init__.py +5 -0
  120. shotgun/tui/screens/chat/chat.tcss +54 -0
  121. shotgun/tui/screens/chat/chat_screen.py +1531 -0
  122. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +243 -0
  123. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  124. shotgun/tui/screens/chat/help_text.py +40 -0
  125. shotgun/tui/screens/chat/prompt_history.py +48 -0
  126. shotgun/tui/screens/chat.tcss +11 -0
  127. shotgun/tui/screens/chat_screen/command_providers.py +91 -4
  128. shotgun/tui/screens/chat_screen/hint_message.py +76 -1
  129. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  130. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  131. shotgun/tui/screens/chat_screen/history/chat_history.py +115 -0
  132. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  133. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  134. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  135. shotgun/tui/screens/confirmation_dialog.py +191 -0
  136. shotgun/tui/screens/directory_setup.py +45 -41
  137. shotgun/tui/screens/feedback.py +14 -7
  138. shotgun/tui/screens/github_issue.py +111 -0
  139. shotgun/tui/screens/model_picker.py +77 -32
  140. shotgun/tui/screens/onboarding.py +580 -0
  141. shotgun/tui/screens/pipx_migration.py +205 -0
  142. shotgun/tui/screens/provider_config.py +116 -35
  143. shotgun/tui/screens/shared_specs/__init__.py +21 -0
  144. shotgun/tui/screens/shared_specs/create_spec_dialog.py +273 -0
  145. shotgun/tui/screens/shared_specs/models.py +56 -0
  146. shotgun/tui/screens/shared_specs/share_specs_dialog.py +390 -0
  147. shotgun/tui/screens/shared_specs/upload_progress_screen.py +452 -0
  148. shotgun/tui/screens/shotgun_auth.py +112 -18
  149. shotgun/tui/screens/spec_pull.py +288 -0
  150. shotgun/tui/screens/welcome.py +137 -11
  151. shotgun/tui/services/__init__.py +5 -0
  152. shotgun/tui/services/conversation_service.py +187 -0
  153. shotgun/tui/state/__init__.py +7 -0
  154. shotgun/tui/state/processing_state.py +185 -0
  155. shotgun/tui/utils/mode_progress.py +14 -7
  156. shotgun/tui/widgets/__init__.py +5 -0
  157. shotgun/tui/widgets/widget_coordinator.py +263 -0
  158. shotgun/utils/file_system_utils.py +22 -2
  159. shotgun/utils/marketing.py +110 -0
  160. shotgun/utils/update_checker.py +69 -14
  161. shotgun_sh-0.3.3.dev1.dist-info/METADATA +472 -0
  162. shotgun_sh-0.3.3.dev1.dist-info/RECORD +229 -0
  163. {shotgun_sh-0.2.8.dev2.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/WHEEL +1 -1
  164. {shotgun_sh-0.2.8.dev2.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/entry_points.txt +1 -0
  165. {shotgun_sh-0.2.8.dev2.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/licenses/LICENSE +1 -1
  166. shotgun/tui/screens/chat.py +0 -996
  167. shotgun/tui/screens/chat_screen/history.py +0 -335
  168. shotgun_sh-0.2.8.dev2.dist-info/METADATA +0 -126
  169. shotgun_sh-0.2.8.dev2.dist-info/RECORD +0 -155
  170. /shotgun/agents/{history → conversation/history}/__init__.py +0 -0
  171. /shotgun/agents/{history → conversation/history}/context_extraction.py +0 -0
  172. /shotgun/agents/{history → conversation/history}/history_building.py +0 -0
  173. /shotgun/agents/{history → conversation/history}/message_utils.py +0 -0
  174. /shotgun/agents/{history → conversation/history}/token_counting/__init__.py +0 -0
  175. /shotgun/agents/{history → conversation/history}/token_estimation.py +0 -0
@@ -0,0 +1,1531 @@
1
+ """Main chat screen implementation."""
2
+
3
+ import asyncio
4
+ import logging
5
+ import time
6
+ from datetime import datetime, timezone
7
+ from pathlib import Path
8
+ from typing import cast
9
+
10
+ from pydantic_ai.messages import (
11
+ ModelMessage,
12
+ ModelRequest,
13
+ ModelResponse,
14
+ TextPart,
15
+ ToolCallPart,
16
+ ToolReturnPart,
17
+ UserPromptPart,
18
+ )
19
+ from textual import events, on, work
20
+ from textual.app import ComposeResult
21
+ from textual.command import CommandPalette
22
+ from textual.containers import Container, Grid
23
+ from textual.keys import Keys
24
+ from textual.reactive import reactive
25
+ from textual.screen import Screen
26
+ from textual.widgets import Static
27
+
28
+ from shotgun.agents.agent_manager import (
29
+ AgentManager,
30
+ ClarifyingQuestionsMessage,
31
+ CompactionCompletedMessage,
32
+ CompactionStartedMessage,
33
+ MessageHistoryUpdated,
34
+ ModelConfigUpdated,
35
+ PartialResponseMessage,
36
+ )
37
+ from shotgun.agents.config import get_config_manager
38
+ from shotgun.agents.config.models import MODEL_SPECS
39
+ from shotgun.agents.conversation import ConversationManager
40
+ from shotgun.agents.conversation.history.compaction import apply_persistent_compaction
41
+ from shotgun.agents.conversation.history.token_estimation import (
42
+ estimate_tokens_from_messages,
43
+ )
44
+ from shotgun.agents.models import (
45
+ AgentDeps,
46
+ AgentType,
47
+ FileOperationTracker,
48
+ )
49
+ from shotgun.agents.runner import AgentRunner
50
+ from shotgun.codebase.core.manager import (
51
+ CodebaseAlreadyIndexedError,
52
+ CodebaseGraphManager,
53
+ )
54
+ from shotgun.codebase.models import IndexProgress, ProgressPhase
55
+ from shotgun.exceptions import (
56
+ SHOTGUN_CONTACT_EMAIL,
57
+ ErrorNotPickedUpBySentry,
58
+ ShotgunAccountException,
59
+ )
60
+ from shotgun.posthog_telemetry import track_event
61
+ from shotgun.sdk.codebase import CodebaseSDK
62
+ from shotgun.sdk.exceptions import CodebaseNotFoundError, InvalidPathError
63
+ from shotgun.tui.commands import CommandHandler
64
+ from shotgun.tui.components.context_indicator import ContextIndicator
65
+ from shotgun.tui.components.mode_indicator import ModeIndicator
66
+ from shotgun.tui.components.prompt_input import PromptInput
67
+ from shotgun.tui.components.spinner import Spinner
68
+ from shotgun.tui.components.status_bar import StatusBar
69
+
70
+ # TUIErrorHandler removed - exceptions now caught directly
71
+ from shotgun.tui.screens.chat.codebase_index_prompt_screen import (
72
+ CodebaseIndexPromptScreen,
73
+ )
74
+ from shotgun.tui.screens.chat.codebase_index_selection import CodebaseIndexSelection
75
+ from shotgun.tui.screens.chat.help_text import (
76
+ help_text_empty_dir,
77
+ help_text_with_codebase,
78
+ )
79
+ from shotgun.tui.screens.chat.prompt_history import PromptHistory
80
+ from shotgun.tui.screens.chat_screen.command_providers import (
81
+ DeleteCodebasePaletteProvider,
82
+ UnifiedCommandProvider,
83
+ )
84
+ from shotgun.tui.screens.chat_screen.hint_message import HintMessage
85
+ from shotgun.tui.screens.chat_screen.history import ChatHistory
86
+ from shotgun.tui.screens.confirmation_dialog import ConfirmationDialog
87
+ from shotgun.tui.screens.onboarding import OnboardingModal
88
+ from shotgun.tui.screens.shared_specs import (
89
+ CreateSpecDialog,
90
+ ShareSpecsAction,
91
+ ShareSpecsDialog,
92
+ UploadProgressScreen,
93
+ )
94
+ from shotgun.tui.services.conversation_service import ConversationService
95
+ from shotgun.tui.state.processing_state import ProcessingStateManager
96
+ from shotgun.tui.utils.mode_progress import PlaceholderHints
97
+ from shotgun.tui.widgets.widget_coordinator import WidgetCoordinator
98
+ from shotgun.utils import get_shotgun_home
99
+ from shotgun.utils.file_system_utils import get_shotgun_base_path
100
+ from shotgun.utils.marketing import MarketingManager
101
+
102
+ logger = logging.getLogger(__name__)
103
+
104
+
105
+ def _format_duration(seconds: float) -> str:
106
+ """Format duration in natural language."""
107
+ if seconds < 60:
108
+ return f"{int(seconds)} seconds"
109
+ minutes = int(seconds // 60)
110
+ secs = int(seconds % 60)
111
+ if secs == 0:
112
+ return f"{minutes} minute{'s' if minutes != 1 else ''}"
113
+ return f"{minutes} minute{'s' if minutes != 1 else ''} {secs} seconds"
114
+
115
+
116
+ def _format_count(count: int) -> str:
117
+ """Format count in natural language (e.g., '5 thousand')."""
118
+ if count < 1000:
119
+ return str(count)
120
+ elif count < 1_000_000:
121
+ thousands = count / 1000
122
+ if thousands == int(thousands):
123
+ return f"{int(thousands)} thousand"
124
+ return f"{thousands:.1f} thousand"
125
+ else:
126
+ millions = count / 1_000_000
127
+ if millions == int(millions):
128
+ return f"{int(millions)} million"
129
+ return f"{millions:.1f} million"
130
+
131
+
132
+ class ChatScreen(Screen[None]):
133
+ CSS_PATH = "chat.tcss"
134
+
135
+ BINDINGS = [
136
+ ("ctrl+p", "command_palette", "Command Palette"),
137
+ ("shift+tab", "toggle_mode", "Toggle mode"),
138
+ ("ctrl+u", "show_usage", "Show usage"),
139
+ ]
140
+
141
+ COMMANDS = {
142
+ UnifiedCommandProvider,
143
+ }
144
+
145
+ value = reactive("")
146
+ mode = reactive(AgentType.RESEARCH)
147
+ history: PromptHistory = PromptHistory()
148
+ messages = reactive(list[ModelMessage | HintMessage]())
149
+ indexing_job: reactive[CodebaseIndexSelection | None] = reactive(None)
150
+
151
+ # Q&A mode state (for structured output clarifying questions)
152
+ qa_mode = reactive(False)
153
+ qa_questions: list[str] = []
154
+ qa_current_index = reactive(0)
155
+ qa_answers: list[str] = []
156
+
157
+ # Working state - keep reactive for Textual watchers
158
+ working = reactive(False)
159
+
160
+ # Throttle context indicator updates (in seconds)
161
+ _last_context_update: float = 0.0
162
+ _context_update_throttle: float = 5.0 # 5 seconds
163
+
164
+ def __init__(
165
+ self,
166
+ agent_manager: AgentManager,
167
+ conversation_manager: ConversationManager,
168
+ conversation_service: ConversationService,
169
+ widget_coordinator: WidgetCoordinator,
170
+ processing_state: ProcessingStateManager,
171
+ command_handler: CommandHandler,
172
+ placeholder_hints: PlaceholderHints,
173
+ codebase_sdk: CodebaseSDK,
174
+ deps: AgentDeps,
175
+ continue_session: bool = False,
176
+ force_reindex: bool = False,
177
+ show_pull_hint: bool = False,
178
+ ) -> None:
179
+ """Initialize the ChatScreen.
180
+
181
+ All dependencies must be provided via dependency injection.
182
+ No objects are created in the constructor.
183
+
184
+ Args:
185
+ agent_manager: AgentManager instance for managing agent interactions
186
+ conversation_manager: ConversationManager for conversation persistence
187
+ conversation_service: ConversationService for conversation save/load/restore
188
+ widget_coordinator: WidgetCoordinator for centralized widget updates
189
+ processing_state: ProcessingStateManager for managing processing state
190
+ command_handler: CommandHandler for handling slash commands
191
+ placeholder_hints: PlaceholderHints for providing input hints
192
+ codebase_sdk: CodebaseSDK for codebase indexing operations
193
+ deps: AgentDeps configuration for agent dependencies
194
+ continue_session: Whether to continue a previous session
195
+ force_reindex: Whether to force reindexing of codebases
196
+ show_pull_hint: Whether to show hint about recently pulled spec
197
+ """
198
+ super().__init__()
199
+
200
+ # All dependencies are now required and injected
201
+ self.deps = deps
202
+ self.codebase_sdk = codebase_sdk
203
+ self.agent_manager = agent_manager
204
+ self.command_handler = command_handler
205
+ self.placeholder_hints = placeholder_hints
206
+ self.conversation_manager = conversation_manager
207
+ self.conversation_service = conversation_service
208
+ self.widget_coordinator = widget_coordinator
209
+ self.processing_state = processing_state
210
+ self.continue_session = continue_session
211
+ self.force_reindex = force_reindex
212
+ self.show_pull_hint = show_pull_hint
213
+
214
+ def on_mount(self) -> None:
215
+ # Use widget coordinator to focus input
216
+ self.widget_coordinator.update_prompt_input(focus=True)
217
+ # Hide spinner initially
218
+ self.query_one("#spinner").display = False
219
+
220
+ # Bind spinner to processing state manager
221
+ self.processing_state.bind_spinner(self.query_one("#spinner", Spinner))
222
+
223
+ # Load conversation history if --continue flag was provided
224
+ # Use call_later to handle async exists() check
225
+ if self.continue_session:
226
+ self.call_later(self._check_and_load_conversation)
227
+
228
+ # Show pull hint if launching after spec pull
229
+ if self.show_pull_hint:
230
+ self.call_later(self._show_pull_hint)
231
+
232
+ self.call_later(self.check_if_codebase_is_indexed)
233
+ # Initial update of context indicator
234
+ self.update_context_indicator()
235
+
236
+ # Show onboarding popup if not shown before
237
+ self.call_later(self._check_and_show_onboarding)
238
+
239
+ async def on_key(self, event: events.Key) -> None:
240
+ """Handle key presses for cancellation."""
241
+ # If escape is pressed during Q&A mode, exit Q&A
242
+ if event.key in (Keys.Escape, Keys.ControlC) and self.qa_mode:
243
+ self._exit_qa_mode()
244
+ # Re-enable the input
245
+ self.widget_coordinator.update_prompt_input(focus=True)
246
+ # Prevent the event from propagating (don't quit the app)
247
+ event.stop()
248
+ return
249
+
250
+ # If escape or ctrl+c is pressed while agent is working, cancel the operation
251
+ if event.key in (Keys.Escape, Keys.ControlC):
252
+ if self.processing_state.cancel_current_operation(cancel_key=event.key):
253
+ # Show cancellation message
254
+ self.mount_hint("⚠️ Cancelling operation...")
255
+ # Re-enable the input
256
+ self.widget_coordinator.update_prompt_input(focus=True)
257
+ # Prevent the event from propagating (don't quit the app)
258
+ event.stop()
259
+
260
+ @work
261
+ async def check_if_codebase_is_indexed(self) -> None:
262
+ cur_dir = Path.cwd().resolve()
263
+ is_empty = all(
264
+ dir.is_dir() and dir.name in ["__pycache__", ".git", ".shotgun"]
265
+ for dir in cur_dir.iterdir()
266
+ )
267
+ if is_empty or self.continue_session:
268
+ return
269
+
270
+ # If force_reindex is True, delete any existing graphs for this directory
271
+ if self.force_reindex:
272
+ accessible_graphs = (
273
+ await self.codebase_sdk.list_codebases_for_directory()
274
+ ).graphs
275
+ for graph in accessible_graphs:
276
+ try:
277
+ await self.codebase_sdk.delete_codebase(graph.graph_id)
278
+ logger.info(
279
+ f"Deleted existing graph {graph.graph_id} due to --force-reindex"
280
+ )
281
+ except Exception as e:
282
+ logger.warning(
283
+ f"Failed to delete graph {graph.graph_id} during force reindex: {e}"
284
+ )
285
+
286
+ # Check if the current directory has any accessible codebases
287
+ accessible_graphs = (
288
+ await self.codebase_sdk.list_codebases_for_directory()
289
+ ).graphs
290
+ if accessible_graphs:
291
+ self.mount_hint(help_text_with_codebase(already_indexed=True))
292
+ return
293
+
294
+ # Ask user if they want to index the current directory
295
+ should_index = await self.app.push_screen_wait(CodebaseIndexPromptScreen())
296
+ if not should_index:
297
+ self.mount_hint(help_text_empty_dir())
298
+ return
299
+
300
+ self.mount_hint(help_text_with_codebase(already_indexed=False))
301
+
302
+ # Auto-index the current directory with its name
303
+ cwd_name = cur_dir.name
304
+ selection = CodebaseIndexSelection(repo_path=cur_dir, name=cwd_name)
305
+ self.call_later(lambda: self.index_codebase(selection))
306
+
307
+ def watch_mode(self, new_mode: AgentType) -> None:
308
+ """React to mode changes by updating the agent manager."""
309
+
310
+ if self.is_mounted:
311
+ self.agent_manager.set_agent(new_mode)
312
+ # Use widget coordinator for all widget updates
313
+ self.widget_coordinator.update_for_mode_change(new_mode)
314
+
315
+ def watch_working(self, is_working: bool) -> None:
316
+ """Show or hide the spinner based on working state."""
317
+ logger.debug(f"[WATCH] watch_working called - is_working={is_working}")
318
+ if self.is_mounted:
319
+ # Use widget coordinator for all widget updates
320
+ self.widget_coordinator.update_for_processing_state(is_working)
321
+
322
+ def watch_qa_mode(self, qa_mode_active: bool) -> None:
323
+ """Update UI when Q&A mode state changes."""
324
+ if self.is_mounted:
325
+ # Use widget coordinator for all widget updates
326
+ self.widget_coordinator.update_for_qa_mode(qa_mode_active)
327
+
328
+ def watch_messages(self, messages: list[ModelMessage | HintMessage]) -> None:
329
+ """Update the chat history when messages change."""
330
+ if self.is_mounted:
331
+ # Use widget coordinator for all widget updates
332
+ self.widget_coordinator.update_messages(messages)
333
+
334
+ def action_toggle_mode(self) -> None:
335
+ # Prevent mode switching during Q&A
336
+ if self.qa_mode:
337
+ self.agent_manager.add_hint_message(
338
+ HintMessage(message="⚠️ Cannot switch modes while answering questions")
339
+ )
340
+ return
341
+
342
+ modes = [
343
+ AgentType.RESEARCH,
344
+ AgentType.SPECIFY,
345
+ AgentType.PLAN,
346
+ AgentType.TASKS,
347
+ AgentType.EXPORT,
348
+ ]
349
+ self.mode = modes[(modes.index(self.mode) + 1) % len(modes)]
350
+ self.agent_manager.set_agent(self.mode)
351
+ # Re-focus input after mode change
352
+ self.call_later(lambda: self.widget_coordinator.update_prompt_input(focus=True))
353
+
354
+ async def action_show_usage(self) -> None:
355
+ usage_hint = self.agent_manager.get_usage_hint()
356
+ logger.info(f"Usage hint: {usage_hint}")
357
+
358
+ # Add budget info for Shotgun Account users
359
+ if self.deps.llm_model.is_shotgun_account:
360
+ try:
361
+ from shotgun.llm_proxy import LiteLLMProxyClient
362
+
363
+ logger.debug("Fetching budget info for Shotgun Account")
364
+ client = LiteLLMProxyClient(self.deps.llm_model.api_key)
365
+ budget_info = await client.get_budget_info()
366
+
367
+ # Format budget section
368
+ source_label = "Key" if budget_info.source == "key" else "Team"
369
+ budget_section = f"""## Shotgun Account Budget
370
+
371
+ * Max Budget: ${budget_info.max_budget:.2f}
372
+ * Current Spend: ${budget_info.spend:.2f}
373
+ * Remaining: ${budget_info.remaining:.2f} ({100 - budget_info.percentage_used:.1f}%)
374
+ * Budget Source: {source_label}-level
375
+
376
+ **Questions or need help?**"""
377
+
378
+ # Build markdown_before (usage + budget info before email)
379
+ if usage_hint:
380
+ markdown_before = f"{usage_hint}\n\n{budget_section}"
381
+ else:
382
+ markdown_before = budget_section
383
+
384
+ markdown_after = (
385
+ "\n\n_Reach out anytime for billing questions "
386
+ "or to increase your budget._"
387
+ )
388
+
389
+ # Mount with email copy button
390
+ self.mount_hint_with_email(
391
+ markdown_before=markdown_before,
392
+ email="contact@shotgun.sh",
393
+ markdown_after=markdown_after,
394
+ )
395
+ logger.debug("Successfully added budget info to usage hint")
396
+ return # Exit early since we've already mounted
397
+
398
+ except Exception as e:
399
+ logger.warning(f"Failed to fetch budget info: {e}")
400
+ # For Shotgun Account, show budget fetch error
401
+ # If we have usage data, still show it
402
+ if usage_hint:
403
+ # Show usage even though budget fetch failed
404
+ self.mount_hint(usage_hint)
405
+ else:
406
+ # No usage and budget fetch failed - show specific error with email
407
+ markdown_before = (
408
+ "⚠️ **Unable to fetch budget information**\n\n"
409
+ "There was an error retrieving your budget data."
410
+ )
411
+ markdown_after = (
412
+ "\n\n_Try the command again in a moment. "
413
+ "If the issue persists, reach out for help._"
414
+ )
415
+ self.mount_hint_with_email(
416
+ markdown_before=markdown_before,
417
+ email="contact@shotgun.sh",
418
+ markdown_after=markdown_after,
419
+ )
420
+ return # Exit early
421
+
422
+ # Fallback for non-Shotgun Account users
423
+ if usage_hint:
424
+ self.mount_hint(usage_hint)
425
+ else:
426
+ self.agent_manager.add_hint_message(
427
+ HintMessage(message="⚠️ No usage hint available")
428
+ )
429
+
430
+ async def action_show_context(self) -> None:
431
+ context_hint = await self.agent_manager.get_context_hint()
432
+ if context_hint:
433
+ self.mount_hint(context_hint)
434
+ else:
435
+ self.agent_manager.add_hint_message(
436
+ HintMessage(message="⚠️ No context analysis available")
437
+ )
438
+
439
+ def action_view_onboarding(self) -> None:
440
+ """Show the onboarding modal."""
441
+ self.app.push_screen(OnboardingModal())
442
+
443
+ @work
444
+ async def action_compact_conversation(self) -> None:
445
+ """Compact the conversation history to reduce size."""
446
+ logger.debug(f"[COMPACT] Starting compaction - working={self.working}")
447
+
448
+ try:
449
+ # Show spinner and enable ESC cancellation
450
+ from textual.worker import get_current_worker
451
+
452
+ self.processing_state.start_processing("Compacting Conversation...")
453
+ self.processing_state.bind_worker(get_current_worker())
454
+ logger.debug(f"[COMPACT] Processing started - working={self.working}")
455
+
456
+ # Get current message count and tokens
457
+ original_count = len(self.agent_manager.message_history)
458
+ original_tokens = await estimate_tokens_from_messages(
459
+ self.agent_manager.message_history, self.deps.llm_model
460
+ )
461
+
462
+ # Log compaction start
463
+ logger.info(
464
+ f"Starting conversation compaction - {original_count} messages, {original_tokens} tokens"
465
+ )
466
+
467
+ # Post compaction started event
468
+ self.agent_manager.post_message(CompactionStartedMessage())
469
+ logger.debug("[COMPACT] Posted CompactionStartedMessage")
470
+
471
+ # Apply compaction with force=True to bypass threshold checks
472
+ compacted_messages = await apply_persistent_compaction(
473
+ self.agent_manager.message_history, self.deps, force=True
474
+ )
475
+
476
+ logger.debug(
477
+ f"[COMPACT] Compacted messages: count={len(compacted_messages)}, "
478
+ f"last_message_type={type(compacted_messages[-1]).__name__ if compacted_messages else 'None'}"
479
+ )
480
+
481
+ # Check last response usage
482
+ last_response = next(
483
+ (
484
+ msg
485
+ for msg in reversed(compacted_messages)
486
+ if isinstance(msg, ModelResponse)
487
+ ),
488
+ None,
489
+ )
490
+ if last_response:
491
+ logger.debug(
492
+ f"[COMPACT] Last response has usage: {last_response.usage is not None}, "
493
+ f"usage={last_response.usage if last_response.usage else 'None'}"
494
+ )
495
+ else:
496
+ logger.warning(
497
+ "[COMPACT] No ModelResponse found in compacted messages!"
498
+ )
499
+
500
+ # Update agent manager's message history
501
+ self.agent_manager.message_history = compacted_messages
502
+ logger.debug("[COMPACT] Updated agent_manager.message_history")
503
+
504
+ # Calculate after metrics
505
+ compacted_count = len(compacted_messages)
506
+ compacted_tokens = await estimate_tokens_from_messages(
507
+ compacted_messages, self.deps.llm_model
508
+ )
509
+
510
+ # Calculate reductions
511
+ message_reduction = (
512
+ ((original_count - compacted_count) / original_count) * 100
513
+ if original_count > 0
514
+ else 0
515
+ )
516
+ token_reduction = (
517
+ ((original_tokens - compacted_tokens) / original_tokens) * 100
518
+ if original_tokens > 0
519
+ else 0
520
+ )
521
+
522
+ # Save to conversation file
523
+ conversation_file = get_shotgun_home() / "conversation.json"
524
+ manager = ConversationManager(conversation_file)
525
+ conversation = await manager.load()
526
+
527
+ if conversation:
528
+ conversation.set_agent_messages(compacted_messages)
529
+ await manager.save(conversation)
530
+
531
+ # Post compaction completed event
532
+ self.agent_manager.post_message(CompactionCompletedMessage())
533
+
534
+ # Post message history updated event
535
+ self.agent_manager.post_message(
536
+ MessageHistoryUpdated(
537
+ messages=self.agent_manager.ui_message_history.copy(),
538
+ agent_type=self.agent_manager._current_agent_type,
539
+ file_operations=None,
540
+ )
541
+ )
542
+ logger.debug("[COMPACT] Posted MessageHistoryUpdated event")
543
+
544
+ # Force immediate context indicator update
545
+ logger.debug("[COMPACT] Calling update_context_indicator()")
546
+ self.update_context_indicator()
547
+
548
+ # Log compaction completion
549
+ logger.info(
550
+ f"Compaction completed: {original_count} → {compacted_count} messages "
551
+ f"({message_reduction:.0f}% message reduction, {token_reduction:.0f}% token reduction)"
552
+ )
553
+
554
+ # Add persistent hint message with stats
555
+ self.mount_hint(
556
+ f"✓ Compacted conversation: {original_count} → {compacted_count} messages "
557
+ f"({message_reduction:.0f}% message reduction, {token_reduction:.0f}% token reduction)"
558
+ )
559
+
560
+ except Exception as e:
561
+ logger.error(f"Failed to compact conversation: {e}", exc_info=True)
562
+ self.agent_manager.add_hint_message(
563
+ HintMessage(message=f"❌ Failed to compact: {e}")
564
+ )
565
+ finally:
566
+ # Hide spinner
567
+ self.processing_state.stop_processing()
568
+ logger.debug(f"[COMPACT] Processing stopped - working={self.working}")
569
+
570
+ @work
571
+ async def action_clear_conversation(self) -> None:
572
+ """Clear the conversation history."""
573
+ # Show confirmation dialog
574
+ should_clear = await self.app.push_screen_wait(
575
+ ConfirmationDialog(
576
+ title="Clear conversation?",
577
+ message="This will permanently delete your entire conversation history. "
578
+ "All messages, context, and progress will be lost. "
579
+ "This action cannot be undone.",
580
+ confirm_label="Clear",
581
+ cancel_label="Keep",
582
+ confirm_variant="warning",
583
+ danger=True,
584
+ )
585
+ )
586
+
587
+ if not should_clear:
588
+ return # User cancelled
589
+
590
+ try:
591
+ # Clear message histories
592
+ self.agent_manager.message_history = []
593
+ self.agent_manager.ui_message_history = []
594
+
595
+ # Use conversation service to clear conversation
596
+ await self.conversation_service.clear_conversation()
597
+
598
+ # Post message history updated event to refresh UI
599
+ self.agent_manager.post_message(
600
+ MessageHistoryUpdated(
601
+ messages=[],
602
+ agent_type=self.agent_manager._current_agent_type,
603
+ file_operations=None,
604
+ )
605
+ )
606
+
607
+ # Show persistent success message
608
+ self.mount_hint("✓ Conversation cleared - Starting fresh!")
609
+
610
+ except Exception as e:
611
+ logger.error(f"Failed to clear conversation: {e}", exc_info=True)
612
+ self.agent_manager.add_hint_message(
613
+ HintMessage(message=f"❌ Failed to clear: {e}")
614
+ )
615
+
616
+ @work(exclusive=False)
617
+ async def update_context_indicator(self) -> None:
618
+ """Update the context indicator with current usage data."""
619
+ logger.debug("[CONTEXT] update_context_indicator called")
620
+ try:
621
+ logger.debug(
622
+ f"[CONTEXT] Getting context analysis - "
623
+ f"message_history_count={len(self.agent_manager.message_history)}"
624
+ )
625
+ analysis = await self.agent_manager.get_context_analysis()
626
+
627
+ if analysis:
628
+ logger.debug(
629
+ f"[CONTEXT] Analysis received - "
630
+ f"agent_context_tokens={analysis.agent_context_tokens}, "
631
+ f"max_usable_tokens={analysis.max_usable_tokens}, "
632
+ f"percentage={round((analysis.agent_context_tokens / analysis.max_usable_tokens) * 100, 1) if analysis.max_usable_tokens > 0 else 0}%"
633
+ )
634
+ else:
635
+ logger.warning("[CONTEXT] Analysis is None!")
636
+
637
+ model_name = self.deps.llm_model.name
638
+ # Use widget coordinator for context indicator update
639
+ self.widget_coordinator.update_context_indicator(analysis, model_name)
640
+ except Exception as e:
641
+ logger.error(
642
+ f"[CONTEXT] Failed to update context indicator: {e}", exc_info=True
643
+ )
644
+
645
+ @work(exclusive=False)
646
+ async def update_context_indicator_with_messages(
647
+ self,
648
+ agent_messages: list[ModelMessage],
649
+ ui_messages: list[ModelMessage | HintMessage],
650
+ ) -> None:
651
+ """Update the context indicator with specific message sets (for streaming updates).
652
+
653
+ Args:
654
+ agent_messages: Agent message history including streaming messages (for token counting)
655
+ ui_messages: UI message history including hints and streaming messages
656
+ """
657
+ try:
658
+ from shotgun.agents.context_analyzer.analyzer import ContextAnalyzer
659
+
660
+ analyzer = ContextAnalyzer(self.deps.llm_model)
661
+ # Analyze the combined message histories for accurate progressive token counts
662
+ analysis = await analyzer.analyze_conversation(agent_messages, ui_messages)
663
+
664
+ if analysis:
665
+ model_name = self.deps.llm_model.name
666
+ self.widget_coordinator.update_context_indicator(analysis, model_name)
667
+ except Exception as e:
668
+ logger.error(
669
+ f"Failed to update context indicator with streaming messages: {e}",
670
+ exc_info=True,
671
+ )
672
+
673
+ def compose(self) -> ComposeResult:
674
+ """Create child widgets for the app."""
675
+ with Container(id="window"):
676
+ yield self.agent_manager
677
+ yield ChatHistory()
678
+ with Container(id="footer"):
679
+ yield Spinner(
680
+ text="Processing...",
681
+ id="spinner",
682
+ classes="" if self.working else "hidden",
683
+ )
684
+ yield StatusBar(working=self.working)
685
+ yield PromptInput(
686
+ text=self.value,
687
+ highlight_cursor_line=False,
688
+ id="prompt-input",
689
+ placeholder=self._placeholder_for_mode(self.mode),
690
+ )
691
+ with Grid():
692
+ yield ModeIndicator(mode=self.mode)
693
+ with Container(id="right-footer-indicators"):
694
+ yield ContextIndicator(id="context-indicator")
695
+ yield Static("", id="indexing-job-display")
696
+
697
+ def mount_hint(self, markdown: str) -> None:
698
+ hint = HintMessage(message=markdown)
699
+ self.agent_manager.add_hint_message(hint)
700
+
701
+ def _show_pull_hint(self) -> None:
702
+ """Show hint about recently pulled spec from meta.json."""
703
+ # Import at runtime to avoid circular import (CLI -> TUI dependency)
704
+ from shotgun.cli.spec.models import SpecMeta
705
+
706
+ shotgun_dir = get_shotgun_base_path()
707
+ meta_path = shotgun_dir / "meta.json"
708
+ if not meta_path.exists():
709
+ return
710
+
711
+ try:
712
+ meta: SpecMeta = SpecMeta.model_validate_json(meta_path.read_text())
713
+ # Only show if pulled within last 60 seconds
714
+ age_seconds = (datetime.now(timezone.utc) - meta.pulled_at).total_seconds()
715
+ if age_seconds > 60:
716
+ return
717
+
718
+ hint_parts = [f"You just pulled **{meta.spec_name}** from the cloud."]
719
+ if meta.web_url:
720
+ hint_parts.append(f"[View in browser]({meta.web_url})")
721
+ hint_parts.append(
722
+ f"The specs are now located at `{shotgun_dir}` so Shotgun has access to them."
723
+ )
724
+ if meta.backup_path:
725
+ hint_parts.append(
726
+ f"Previous files were backed up to: `{meta.backup_path}`"
727
+ )
728
+ self.mount_hint("\n\n".join(hint_parts))
729
+ except Exception:
730
+ # Ignore errors reading meta.json - this is optional UI feedback
731
+ logger.debug("Failed to read meta.json for pull hint", exc_info=True)
732
+
733
+ def mount_hint_with_email(
734
+ self, markdown_before: str, email: str, markdown_after: str = ""
735
+ ) -> None:
736
+ """Mount a hint with inline email copy button.
737
+
738
+ Args:
739
+ markdown_before: Markdown content to display before the email line
740
+ email: Email address to display with copy button
741
+ markdown_after: Optional markdown content to display after the email line
742
+ """
743
+ hint = HintMessage(
744
+ message=markdown_before, email=email, markdown_after=markdown_after
745
+ )
746
+ self.agent_manager.add_hint_message(hint)
747
+
748
+ @on(PartialResponseMessage)
749
+ def handle_partial_response(self, event: PartialResponseMessage) -> None:
750
+ # Filter event.messages to exclude ModelRequest with only ToolReturnPart
751
+ # These are intermediate tool results that would render as empty (UserQuestionWidget
752
+ # filters out ToolReturnPart in format_prompt_parts), causing user messages to disappear
753
+ filtered_event_messages: list[ModelMessage] = []
754
+ for msg in event.messages:
755
+ if isinstance(msg, ModelRequest):
756
+ # Check if this ModelRequest has any user-visible parts
757
+ has_user_content = any(
758
+ not isinstance(part, ToolReturnPart) for part in msg.parts
759
+ )
760
+ if has_user_content:
761
+ filtered_event_messages.append(msg)
762
+ # Skip ModelRequest with only ToolReturnPart
763
+ else:
764
+ # Keep all ModelResponse and other message types
765
+ filtered_event_messages.append(msg)
766
+
767
+ # Build new message list combining existing messages with new streaming content
768
+ new_message_list = self.messages + cast(
769
+ list[ModelMessage | HintMessage], filtered_event_messages
770
+ )
771
+
772
+ # Use widget coordinator to set partial response
773
+ self.widget_coordinator.set_partial_response(event.message, new_message_list)
774
+
775
+ # Skip context updates for file write operations (they don't add to input context)
776
+ has_file_write = any(
777
+ isinstance(msg, ModelResponse)
778
+ and any(
779
+ isinstance(part, ToolCallPart)
780
+ and part.tool_name in ("write_file", "append_file")
781
+ for part in msg.parts
782
+ )
783
+ for msg in event.messages
784
+ )
785
+
786
+ if has_file_write:
787
+ return # Skip context update for file writes
788
+
789
+ # Throttle context indicator updates to improve performance during streaming
790
+ # Only update at most once per 5 seconds to avoid excessive token calculations
791
+ current_time = time.time()
792
+ if current_time - self._last_context_update >= self._context_update_throttle:
793
+ self._last_context_update = current_time
794
+ # Update context indicator with full message history including streaming messages
795
+ # Combine existing agent history with new streaming messages for accurate token count
796
+ combined_agent_history = self.agent_manager.message_history + event.messages
797
+ self.update_context_indicator_with_messages(
798
+ combined_agent_history, new_message_list
799
+ )
800
+
801
+ def _clear_partial_response(self) -> None:
802
+ # Use widget coordinator to clear partial response
803
+ self.widget_coordinator.set_partial_response(None, self.messages)
804
+
805
+ def _exit_qa_mode(self) -> None:
806
+ """Exit Q&A mode and clean up state."""
807
+ # Track cancellation event
808
+ track_event(
809
+ "qa_mode_cancelled",
810
+ {
811
+ "questions_total": len(self.qa_questions),
812
+ "questions_answered": len(self.qa_answers),
813
+ },
814
+ )
815
+
816
+ # Clear Q&A state
817
+ self.qa_mode = False
818
+ self.qa_questions = []
819
+ self.qa_answers = []
820
+ self.qa_current_index = 0
821
+
822
+ # Show cancellation message
823
+ self.mount_hint("⚠️ Q&A cancelled - You can continue the conversation.")
824
+
825
+ @on(ClarifyingQuestionsMessage)
826
+ def handle_clarifying_questions(self, event: ClarifyingQuestionsMessage) -> None:
827
+ """Handle clarifying questions from agent structured output.
828
+
829
+ Note: Hints are now added synchronously in agent_manager.run() before this
830
+ handler is called, so we only need to set up Q&A mode state here.
831
+ """
832
+ # Clear any streaming partial response (removes final_result JSON)
833
+ self._clear_partial_response()
834
+
835
+ # Enter Q&A mode
836
+ self.qa_mode = True
837
+ self.qa_questions = event.questions
838
+ self.qa_current_index = 0
839
+ self.qa_answers = []
840
+
841
+ @on(MessageHistoryUpdated)
842
+ async def handle_message_history_updated(
843
+ self, event: MessageHistoryUpdated
844
+ ) -> None:
845
+ """Handle message history updates from the agent manager."""
846
+ self._clear_partial_response()
847
+ self.messages = event.messages
848
+
849
+ # Use widget coordinator to refresh placeholder and mode indicator
850
+ self.widget_coordinator.update_prompt_input(
851
+ placeholder=self._placeholder_for_mode(self.mode)
852
+ )
853
+ self.widget_coordinator.refresh_mode_indicator()
854
+
855
+ # Update context indicator
856
+ self.update_context_indicator()
857
+
858
+ # If there are file operations, add a message showing the modified files
859
+ # Skip if hint was already added by agent_manager (e.g., in QA mode)
860
+ if event.file_operations:
861
+ # Check if file operation hint already exists in recent messages
862
+ file_hint_exists = any(
863
+ isinstance(msg, HintMessage)
864
+ and (
865
+ msg.message.startswith("📝 Modified:")
866
+ or msg.message.startswith("📁 Modified")
867
+ )
868
+ for msg in event.messages[-5:] # Check last 5 messages
869
+ )
870
+
871
+ if not file_hint_exists:
872
+ chat_history = self.query_one(ChatHistory)
873
+ if chat_history.vertical_tail:
874
+ tracker = FileOperationTracker(operations=event.file_operations)
875
+ display_path = tracker.get_display_path()
876
+
877
+ if display_path:
878
+ # Create a simple markdown message with the file path
879
+ # The terminal emulator will make this clickable automatically
880
+ path_obj = Path(display_path)
881
+
882
+ if len(event.file_operations) == 1:
883
+ message = f"📝 Modified: `{display_path}`"
884
+ else:
885
+ num_files = len(
886
+ {op.file_path for op in event.file_operations}
887
+ )
888
+ if path_obj.is_dir():
889
+ message = f"📁 Modified {num_files} files in: `{display_path}`"
890
+ else:
891
+ # Common path is a file, show parent directory
892
+ message = f"📁 Modified {num_files} files in: `{path_obj.parent}`"
893
+
894
+ self.mount_hint(message)
895
+
896
+ # Check and display any marketing messages
897
+ from shotgun.tui.app import ShotgunApp
898
+
899
+ app = cast(ShotgunApp, self.app)
900
+ await MarketingManager.check_and_display_messages(
901
+ app.config_manager, event.file_operations, self.mount_hint
902
+ )
903
+
904
+ @on(CompactionStartedMessage)
905
+ def handle_compaction_started(self, event: CompactionStartedMessage) -> None:
906
+ """Update spinner text when compaction starts."""
907
+ # Use widget coordinator to update spinner text
908
+ self.widget_coordinator.update_spinner_text("Compacting Conversation...")
909
+
910
+ @on(CompactionCompletedMessage)
911
+ def handle_compaction_completed(self, event: CompactionCompletedMessage) -> None:
912
+ """Reset spinner text when compaction completes."""
913
+ # Use widget coordinator to update spinner text
914
+ self.widget_coordinator.update_spinner_text("Processing...")
915
+
916
+ async def handle_model_selected(self, result: ModelConfigUpdated | None) -> None:
917
+ """Handle model selection from ModelPickerScreen.
918
+
919
+ Called as a callback when the ModelPickerScreen is dismissed.
920
+
921
+ Args:
922
+ result: ModelConfigUpdated if a model was selected, None if cancelled
923
+ """
924
+ if result is None:
925
+ return
926
+
927
+ try:
928
+ # Update the model configuration in dependencies
929
+ self.deps.llm_model = result.model_config
930
+
931
+ # Update the agent manager's model configuration
932
+ self.agent_manager.deps.llm_model = result.model_config
933
+
934
+ # Reset agents so they get recreated with new model
935
+ self.agent_manager._agents_initialized = False
936
+ self.agent_manager._research_agent = None
937
+ self.agent_manager._plan_agent = None
938
+ self.agent_manager._tasks_agent = None
939
+ self.agent_manager._specify_agent = None
940
+ self.agent_manager._export_agent = None
941
+ self.agent_manager._research_deps = None
942
+ self.agent_manager._plan_deps = None
943
+ self.agent_manager._tasks_deps = None
944
+ self.agent_manager._specify_deps = None
945
+ self.agent_manager._export_deps = None
946
+
947
+ # Get current analysis and update context indicator via coordinator
948
+ analysis = await self.agent_manager.get_context_analysis()
949
+ self.widget_coordinator.update_context_indicator(analysis, result.new_model)
950
+
951
+ # Get model display name for user feedback
952
+ model_spec = MODEL_SPECS.get(result.new_model)
953
+ model_display = (
954
+ model_spec.short_name if model_spec else str(result.new_model)
955
+ )
956
+
957
+ # Format provider information
958
+ key_method = (
959
+ "Shotgun Account" if result.key_provider == "shotgun" else "BYOK"
960
+ )
961
+ provider_display = result.provider.value.title()
962
+
963
+ # Track model switch in telemetry
964
+ track_event(
965
+ "model_switched",
966
+ {
967
+ "old_model": str(result.old_model) if result.old_model else None,
968
+ "new_model": str(result.new_model),
969
+ "provider": result.provider.value,
970
+ "key_provider": result.key_provider.value,
971
+ },
972
+ )
973
+
974
+ # Show confirmation to user with provider info
975
+ self.agent_manager.add_hint_message(
976
+ HintMessage(
977
+ message=f"✓ Switched to {model_display} ({provider_display}, {key_method})"
978
+ )
979
+ )
980
+
981
+ except Exception as e:
982
+ logger.error(f"Failed to handle model selection: {e}")
983
+ self.agent_manager.add_hint_message(
984
+ HintMessage(message=f"⚠ Failed to update model configuration: {e}")
985
+ )
986
+
987
+ @on(PromptInput.Submitted)
988
+ async def handle_submit(self, message: PromptInput.Submitted) -> None:
989
+ text = message.text.strip()
990
+
991
+ # If empty text, just clear input and return
992
+ if not text:
993
+ self.widget_coordinator.update_prompt_input(clear=True)
994
+ self.value = ""
995
+ return
996
+
997
+ # Handle Q&A mode (from structured output clarifying questions)
998
+ if self.qa_mode and self.qa_questions:
999
+ # Collect answer
1000
+ self.qa_answers.append(text)
1001
+
1002
+ # Show answer
1003
+ if len(self.qa_questions) == 1:
1004
+ self.agent_manager.add_hint_message(
1005
+ HintMessage(message=f"**A:** {text}")
1006
+ )
1007
+ else:
1008
+ q_num = self.qa_current_index + 1
1009
+ self.agent_manager.add_hint_message(
1010
+ HintMessage(message=f"**A{q_num}:** {text}")
1011
+ )
1012
+
1013
+ # Move to next or finish
1014
+ self.qa_current_index += 1
1015
+
1016
+ if self.qa_current_index < len(self.qa_questions):
1017
+ # Show next question
1018
+ next_q = self.qa_questions[self.qa_current_index]
1019
+ next_q_num = self.qa_current_index + 1
1020
+ self.agent_manager.add_hint_message(
1021
+ HintMessage(message=f"**Q{next_q_num}:** {next_q}")
1022
+ )
1023
+ else:
1024
+ # All answered - format and send back
1025
+ if len(self.qa_questions) == 1:
1026
+ # Single question - just send the answer
1027
+ formatted_qa = f"Q: {self.qa_questions[0]}\nA: {self.qa_answers[0]}"
1028
+ else:
1029
+ # Multiple questions - format all Q&A pairs
1030
+ formatted_qa = "\n\n".join(
1031
+ f"Q{i + 1}: {q}\nA{i + 1}: {a}"
1032
+ for i, (q, a) in enumerate(
1033
+ zip(self.qa_questions, self.qa_answers, strict=True)
1034
+ )
1035
+ )
1036
+
1037
+ # Exit Q&A mode
1038
+ self.qa_mode = False
1039
+ self.qa_questions = []
1040
+ self.qa_answers = []
1041
+ self.qa_current_index = 0
1042
+
1043
+ # Send answers back to agent
1044
+ self.run_agent(formatted_qa)
1045
+
1046
+ # Clear input
1047
+ self.widget_coordinator.update_prompt_input(clear=True)
1048
+ self.value = ""
1049
+ return
1050
+
1051
+ # Check if it's a command
1052
+ if self.command_handler.is_command(text):
1053
+ success, response = self.command_handler.handle_command(text)
1054
+
1055
+ # Add the command to history
1056
+ self.history.append(message.text)
1057
+
1058
+ # Display the command in chat history
1059
+ user_message = ModelRequest(parts=[UserPromptPart(content=text)])
1060
+ self.messages = self.messages + [user_message]
1061
+
1062
+ # Display the response (help text or error message)
1063
+ response_message = ModelResponse(parts=[TextPart(content=response)])
1064
+ self.messages = self.messages + [response_message]
1065
+
1066
+ # Clear the input
1067
+ self.widget_coordinator.update_prompt_input(clear=True)
1068
+ self.value = ""
1069
+ return
1070
+
1071
+ # Not a command, process as normal
1072
+ self.history.append(message.text)
1073
+
1074
+ # Add user message to agent_manager's history BEFORE running the agent
1075
+ # This ensures immediate visual feedback AND proper deduplication
1076
+ user_message = ModelRequest.user_text_prompt(text)
1077
+ self.agent_manager.ui_message_history.append(user_message)
1078
+ self.messages = self.agent_manager.ui_message_history.copy()
1079
+
1080
+ # Clear the input
1081
+ self.value = ""
1082
+ self.run_agent(text) # Use stripped text
1083
+
1084
+ self.widget_coordinator.update_prompt_input(clear=True)
1085
+
1086
+ def _placeholder_for_mode(self, mode: AgentType, force_new: bool = False) -> str:
1087
+ """Return the placeholder text appropriate for the current mode.
1088
+
1089
+ Args:
1090
+ mode: The current agent mode.
1091
+ force_new: If True, force selection of a new random hint.
1092
+
1093
+ Returns:
1094
+ Dynamic placeholder hint based on mode and progress.
1095
+ """
1096
+ return self.placeholder_hints.get_placeholder_for_mode(mode)
1097
+
1098
+ def index_codebase_command(self) -> None:
1099
+ # Simplified: always index current working directory with its name
1100
+ cur_dir = Path.cwd().resolve()
1101
+ cwd_name = cur_dir.name
1102
+ selection = CodebaseIndexSelection(repo_path=cur_dir, name=cwd_name)
1103
+ self.call_later(lambda: self.index_codebase(selection))
1104
+
1105
+ def delete_codebase_command(self) -> None:
1106
+ self.app.push_screen(
1107
+ CommandPalette(
1108
+ providers=[DeleteCodebasePaletteProvider],
1109
+ placeholder="Select a codebase to delete…",
1110
+ )
1111
+ )
1112
+
1113
+ def share_specs_command(self) -> None:
1114
+ """Launch the share specs workflow."""
1115
+ self.call_later(lambda: self._start_share_specs_flow())
1116
+
1117
+ @work
1118
+ async def _start_share_specs_flow(self) -> None:
1119
+ """Main workflow for sharing specs to workspace."""
1120
+ # 1. Check preconditions (instant check, no API call)
1121
+ shotgun_dir = Path.cwd() / ".shotgun"
1122
+ if not shotgun_dir.exists():
1123
+ self.mount_hint("No .shotgun/ directory found in current directory")
1124
+ return
1125
+
1126
+ # 2. Show spec selection dialog (handles workspace fetch, permissions, and spec loading)
1127
+ result = await self.app.push_screen_wait(ShareSpecsDialog())
1128
+ if result is None or result.action is None:
1129
+ return # User cancelled or error
1130
+
1131
+ workspace_id = result.workspace_id
1132
+ if not workspace_id:
1133
+ self.mount_hint("Failed to get workspace")
1134
+ return
1135
+
1136
+ # 3. Handle create vs add version
1137
+ if result.action == ShareSpecsAction.CREATE:
1138
+ # Show create spec dialog
1139
+ create_result = await self.app.push_screen_wait(CreateSpecDialog())
1140
+ if create_result is None:
1141
+ return # User cancelled
1142
+
1143
+ # Pass spec creation info to UploadProgressScreen
1144
+ # It will create the spec/version and then upload
1145
+ upload_result = await self.app.push_screen_wait(
1146
+ UploadProgressScreen(
1147
+ workspace_id,
1148
+ spec_name=create_result.name,
1149
+ spec_description=create_result.description,
1150
+ spec_is_public=create_result.is_public,
1151
+ )
1152
+ )
1153
+
1154
+ else: # add_version
1155
+ spec_id = result.spec_id
1156
+ if not spec_id:
1157
+ self.mount_hint("No spec selected")
1158
+ return
1159
+
1160
+ # Pass spec_id to UploadProgressScreen
1161
+ # It will create the version and then upload
1162
+ upload_result = await self.app.push_screen_wait(
1163
+ UploadProgressScreen(workspace_id, spec_id=spec_id)
1164
+ )
1165
+
1166
+ # 7. Show result
1167
+ if upload_result and upload_result.success:
1168
+ if upload_result.web_url:
1169
+ self.mount_hint(
1170
+ f"Specs shared successfully!\n\nView at: {upload_result.web_url}"
1171
+ )
1172
+ else:
1173
+ self.mount_hint("Specs shared successfully!")
1174
+ elif upload_result and upload_result.cancelled:
1175
+ self.mount_hint("Upload cancelled")
1176
+ # Error case is handled by the upload screen
1177
+
1178
+ def delete_codebase_from_palette(self, graph_id: str) -> None:
1179
+ stack = getattr(self.app, "screen_stack", None)
1180
+ if stack and isinstance(stack[-1], CommandPalette):
1181
+ self.app.pop_screen()
1182
+
1183
+ self.call_later(lambda: self.delete_codebase(graph_id))
1184
+
1185
+ @work
1186
+ async def delete_codebase(self, graph_id: str) -> None:
1187
+ try:
1188
+ await self.codebase_sdk.delete_codebase(graph_id)
1189
+ self.agent_manager.add_hint_message(
1190
+ HintMessage(message=f"✓ Deleted codebase: {graph_id}")
1191
+ )
1192
+ except CodebaseNotFoundError as exc:
1193
+ self.agent_manager.add_hint_message(HintMessage(message=f"❌ {exc}"))
1194
+ except Exception as exc: # pragma: no cover - defensive UI path
1195
+ self.agent_manager.add_hint_message(
1196
+ HintMessage(message=f"❌ Failed to delete codebase: {exc}")
1197
+ )
1198
+
1199
+ def _is_kuzu_corruption_error(self, exception: Exception) -> bool:
1200
+ """Check if error is related to kuzu database corruption.
1201
+
1202
+ Args:
1203
+ exception: The exception to check
1204
+
1205
+ Returns:
1206
+ True if the error indicates kuzu database corruption
1207
+ """
1208
+ error_str = str(exception).lower()
1209
+ error_indicators = [
1210
+ "not a directory",
1211
+ "errno 20",
1212
+ "corrupted",
1213
+ ".kuzu",
1214
+ "ioexception",
1215
+ "unordered_map", # C++ STL map errors from kuzu
1216
+ "key not found", # unordered_map::at errors
1217
+ "std::exception", # Generic C++ exceptions from kuzu
1218
+ ]
1219
+ return any(indicator in error_str for indicator in error_indicators)
1220
+
1221
+ @work
1222
+ async def index_codebase(self, selection: CodebaseIndexSelection) -> None:
1223
+ index_start_time = time.time()
1224
+
1225
+ label = self.query_one("#indexing-job-display", Static)
1226
+ label.update(
1227
+ f"[$foreground-muted]Indexing codebase: [bold $text-accent]{selection.name}[/][/]"
1228
+ )
1229
+ label.refresh()
1230
+
1231
+ def create_progress_bar(percentage: float, width: int = 20) -> str:
1232
+ """Create a visual progress bar using Unicode block characters."""
1233
+ filled = int((percentage / 100) * width)
1234
+ empty = width - filled
1235
+ return "▓" * filled + "░" * empty
1236
+
1237
+ # Spinner animation frames
1238
+ spinner_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
1239
+
1240
+ # Progress state (shared between timer and progress callback)
1241
+ progress_state: dict[str, int | float] = {
1242
+ "frame_index": 0,
1243
+ "percentage": 0.0,
1244
+ }
1245
+
1246
+ def update_progress_display() -> None:
1247
+ """Update progress bar on timer - runs every 100ms."""
1248
+ # Advance spinner frame
1249
+ frame_idx = int(progress_state["frame_index"])
1250
+ progress_state["frame_index"] = (frame_idx + 1) % len(spinner_frames)
1251
+ spinner = spinner_frames[frame_idx]
1252
+
1253
+ # Get current state
1254
+ pct = float(progress_state["percentage"])
1255
+ bar = create_progress_bar(pct)
1256
+
1257
+ # Update label
1258
+ label.update(
1259
+ f"[$foreground-muted]Indexing codebase: {spinner} {bar} {pct:.0f}%[/]"
1260
+ )
1261
+
1262
+ def progress_callback(progress_info: IndexProgress) -> None:
1263
+ """Update progress state (timer renders it independently)."""
1264
+ # Calculate overall percentage with weights based on actual timing:
1265
+ # Structure: 0-2%, Definitions: 2-18%, Relationships: 18-20%
1266
+ # Flush nodes: 20-28%, Flush relationships: 28-100%
1267
+ if progress_info.phase == ProgressPhase.STRUCTURE:
1268
+ # Phase 1: 0-2% (actual: ~0%)
1269
+ overall_pct = 2.0 if progress_info.phase_complete else 1.0
1270
+ elif progress_info.phase == ProgressPhase.DEFINITIONS:
1271
+ # Phase 2: 2-18% based on files processed (actual: ~16%)
1272
+ if progress_info.total and progress_info.total > 0:
1273
+ phase_pct = (progress_info.current / progress_info.total) * 16.0
1274
+ overall_pct = 2.0 + phase_pct
1275
+ else:
1276
+ overall_pct = 2.0
1277
+ elif progress_info.phase == ProgressPhase.RELATIONSHIPS:
1278
+ # Phase 3: 18-20% based on relationships processed (actual: ~0.3%)
1279
+ if progress_info.total and progress_info.total > 0:
1280
+ phase_pct = (progress_info.current / progress_info.total) * 2.0
1281
+ overall_pct = 18.0 + phase_pct
1282
+ else:
1283
+ overall_pct = 18.0
1284
+ elif progress_info.phase == ProgressPhase.FLUSH_NODES:
1285
+ # Phase 4: 20-28% based on nodes flushed (actual: ~7.5%)
1286
+ if progress_info.total and progress_info.total > 0:
1287
+ phase_pct = (progress_info.current / progress_info.total) * 8.0
1288
+ overall_pct = 20.0 + phase_pct
1289
+ else:
1290
+ overall_pct = 20.0
1291
+ elif progress_info.phase == ProgressPhase.FLUSH_RELATIONSHIPS:
1292
+ # Phase 5: 28-100% based on relationships flushed (actual: ~76%)
1293
+ if progress_info.total and progress_info.total > 0:
1294
+ phase_pct = (progress_info.current / progress_info.total) * 72.0
1295
+ overall_pct = 28.0 + phase_pct
1296
+ else:
1297
+ overall_pct = 28.0
1298
+ else:
1299
+ overall_pct = 0.0
1300
+
1301
+ # Update shared state (timer will render it)
1302
+ progress_state["percentage"] = overall_pct
1303
+
1304
+ # Start progress animation timer (10 fps = 100ms interval)
1305
+ progress_timer = self.set_interval(0.1, update_progress_display)
1306
+
1307
+ # Retry logic for handling kuzu corruption
1308
+ max_retries = 3
1309
+
1310
+ for attempt in range(max_retries):
1311
+ try:
1312
+ # Clean up corrupted DBs before retry (skip on first attempt)
1313
+ if attempt > 0:
1314
+ logger.info(
1315
+ f"Retry attempt {attempt + 1}/{max_retries} - cleaning up corrupted databases"
1316
+ )
1317
+ manager = CodebaseGraphManager(
1318
+ self.codebase_sdk.service.storage_dir
1319
+ )
1320
+ cleaned = await manager.cleanup_corrupted_databases()
1321
+ logger.info(f"Cleaned up {len(cleaned)} corrupted database(s)")
1322
+ self.agent_manager.add_hint_message(
1323
+ HintMessage(
1324
+ message=f"🔄 Retrying indexing after cleanup (attempt {attempt + 1}/{max_retries})..."
1325
+ )
1326
+ )
1327
+
1328
+ # Pass the current working directory as the indexed_from_cwd
1329
+ logger.debug(
1330
+ f"Starting indexing - repo_path: {selection.repo_path}, "
1331
+ f"name: {selection.name}, cwd: {Path.cwd().resolve()}"
1332
+ )
1333
+ result = await self.codebase_sdk.index_codebase(
1334
+ selection.repo_path,
1335
+ selection.name,
1336
+ indexed_from_cwd=str(Path.cwd().resolve()),
1337
+ progress_callback=progress_callback,
1338
+ )
1339
+
1340
+ # Success! Stop progress animation
1341
+ progress_timer.stop()
1342
+
1343
+ # Show 100% completion after indexing finishes
1344
+ final_bar = create_progress_bar(100.0)
1345
+ label.update(
1346
+ f"[$foreground-muted]Indexing codebase: {final_bar} 100%[/]"
1347
+ )
1348
+ label.refresh()
1349
+
1350
+ # Calculate duration and format message
1351
+ duration = time.time() - index_start_time
1352
+ duration_str = _format_duration(duration)
1353
+ entity_count = result.node_count + result.relationship_count
1354
+ entity_str = _format_count(entity_count)
1355
+
1356
+ logger.info(
1357
+ f"Successfully indexed codebase '{result.name}' in {duration_str} "
1358
+ f"({entity_count} entities)"
1359
+ )
1360
+ self.agent_manager.add_hint_message(
1361
+ HintMessage(
1362
+ message=f"✓ Indexed '{result.name}' in {duration_str} ({entity_str} entities)"
1363
+ )
1364
+ )
1365
+ break # Success - exit retry loop
1366
+
1367
+ except CodebaseAlreadyIndexedError as exc:
1368
+ progress_timer.stop()
1369
+ logger.warning(f"Codebase already indexed: {exc}")
1370
+ self.agent_manager.add_hint_message(HintMessage(message=f"⚠️ {exc}"))
1371
+ return
1372
+ except InvalidPathError as exc:
1373
+ progress_timer.stop()
1374
+ logger.error(f"Invalid path error: {exc}")
1375
+ self.agent_manager.add_hint_message(HintMessage(message=f"❌ {exc}"))
1376
+ return
1377
+
1378
+ except Exception as exc: # pragma: no cover - defensive UI path
1379
+ # Check if this is a kuzu corruption error and we have retries left
1380
+ if attempt < max_retries - 1 and self._is_kuzu_corruption_error(exc):
1381
+ logger.warning(
1382
+ f"Kuzu corruption detected on attempt {attempt + 1}/{max_retries}: {exc}. "
1383
+ f"Will retry after cleanup..."
1384
+ )
1385
+ # Exponential backoff: 1s, 2s
1386
+ await asyncio.sleep(2**attempt)
1387
+ continue
1388
+
1389
+ # Either final retry failed OR not a corruption error - show error
1390
+ logger.exception(
1391
+ f"Failed to index codebase after {attempt + 1} attempts - "
1392
+ f"repo_path: {selection.repo_path}, name: {selection.name}, error: {exc}"
1393
+ )
1394
+ self.agent_manager.add_hint_message(
1395
+ HintMessage(
1396
+ message=f"❌ Failed to index codebase after {attempt + 1} attempts: {exc}"
1397
+ )
1398
+ )
1399
+ break
1400
+
1401
+ # Always stop the progress timer and clean up label
1402
+ progress_timer.stop()
1403
+ label.update("")
1404
+ label.refresh()
1405
+
1406
+ @work
1407
+ async def run_agent(self, message: str) -> None:
1408
+ # Start processing with spinner
1409
+ from textual.worker import get_current_worker
1410
+
1411
+ self.processing_state.start_processing("Processing...")
1412
+ self.processing_state.bind_worker(get_current_worker())
1413
+
1414
+ # Start context indicator animation immediately
1415
+ self.widget_coordinator.set_context_streaming(True)
1416
+
1417
+ try:
1418
+ # Use unified agent runner - exceptions propagate for handling
1419
+ runner = AgentRunner(self.agent_manager)
1420
+ await runner.run(message)
1421
+ except ShotgunAccountException as e:
1422
+ # Shotgun Account errors show contact email UI
1423
+ message_parts = e.to_markdown().split("**Need help?**")
1424
+ if len(message_parts) == 2:
1425
+ markdown_before = message_parts[0] + "**Need help?**"
1426
+ markdown_after = message_parts[1].strip()
1427
+ self.mount_hint_with_email(
1428
+ markdown_before=markdown_before,
1429
+ email=SHOTGUN_CONTACT_EMAIL,
1430
+ markdown_after=markdown_after,
1431
+ )
1432
+ else:
1433
+ # Fallback if message format is unexpected
1434
+ self.mount_hint(e.to_markdown())
1435
+ except ErrorNotPickedUpBySentry as e:
1436
+ # All other user-actionable errors - display with markdown
1437
+ self.mount_hint(e.to_markdown())
1438
+ except Exception as e:
1439
+ # Unexpected errors that weren't wrapped (shouldn't happen)
1440
+ logger.exception("Unexpected error in run_agent")
1441
+ self.mount_hint(f"⚠️ An unexpected error occurred: {str(e)}")
1442
+ finally:
1443
+ self.processing_state.stop_processing()
1444
+ # Stop context indicator animation
1445
+ self.widget_coordinator.set_context_streaming(False)
1446
+
1447
+ # Check for low balance after agent loop completes (only for Shotgun Account)
1448
+ # This runs after processing but doesn't interfere with Q&A mode
1449
+ if self.deps.llm_model.is_shotgun_account:
1450
+ await self._check_low_balance_warning()
1451
+
1452
+ # Save conversation after each interaction
1453
+ self._save_conversation()
1454
+
1455
+ self.widget_coordinator.update_prompt_input(focus=True)
1456
+
1457
+ def _save_conversation(self) -> None:
1458
+ """Save the current conversation to persistent storage."""
1459
+ # Use conversation service for saving (run async in background)
1460
+ # Use exclusive=True to prevent concurrent saves that can cause file contention
1461
+ self.run_worker(
1462
+ self.conversation_service.save_conversation(self.agent_manager),
1463
+ exclusive=True,
1464
+ )
1465
+
1466
+ async def _check_low_balance_warning(self) -> None:
1467
+ """Check account balance and show warning if $2.50 or less remaining.
1468
+
1469
+ This runs after every agent loop completion for Shotgun Account users.
1470
+ Errors are silently caught to avoid disrupting user workflow.
1471
+ """
1472
+ try:
1473
+ from shotgun.llm_proxy import LiteLLMProxyClient
1474
+
1475
+ client = LiteLLMProxyClient(self.deps.llm_model.api_key)
1476
+ budget_info = await client.get_budget_info()
1477
+
1478
+ # Show warning if remaining balance is $2.50 or less
1479
+ if budget_info.remaining <= 2.50:
1480
+ warning_message = (
1481
+ f"⚠️ **Low Balance Warning**\n\n"
1482
+ f"Your Shotgun Account has **${budget_info.remaining:.2f}** remaining.\n\n"
1483
+ f"👉 **[Top Up Now at https://app.shotgun.sh/dashboard](https://app.shotgun.sh/dashboard)**"
1484
+ )
1485
+ self.agent_manager.add_hint_message(
1486
+ HintMessage(message=warning_message)
1487
+ )
1488
+ except Exception as e:
1489
+ # Silently log and continue - don't block user workflow
1490
+ logger.debug(f"Failed to check low balance warning: {e}")
1491
+
1492
+ async def _check_and_load_conversation(self) -> None:
1493
+ """Check if conversation exists and load it if it does."""
1494
+ if await self.conversation_manager.exists():
1495
+ self._load_conversation()
1496
+
1497
+ def _load_conversation(self) -> None:
1498
+ """Load conversation from persistent storage."""
1499
+
1500
+ # Use conversation service for restoration (run async)
1501
+ async def _do_load() -> None:
1502
+ (
1503
+ success,
1504
+ error_msg,
1505
+ restored_type,
1506
+ ) = await self.conversation_service.restore_conversation(
1507
+ self.agent_manager, self.deps.usage_manager
1508
+ )
1509
+
1510
+ if not success and error_msg:
1511
+ self.mount_hint(error_msg)
1512
+ elif success and restored_type:
1513
+ # Update the current mode to match restored conversation
1514
+ self.mode = restored_type
1515
+
1516
+ self.run_worker(_do_load(), exclusive=False)
1517
+
1518
+ @work
1519
+ async def _check_and_show_onboarding(self) -> None:
1520
+ """Check if onboarding should be shown and display modal if needed."""
1521
+ config_manager = get_config_manager()
1522
+ config = await config_manager.load()
1523
+
1524
+ # Only show onboarding if it hasn't been shown before
1525
+ if config.shown_onboarding_popup is None:
1526
+ # Show the onboarding modal
1527
+ await self.app.push_screen_wait(OnboardingModal())
1528
+
1529
+ # Mark as shown in config with current timestamp
1530
+ config.shown_onboarding_popup = datetime.now(timezone.utc)
1531
+ await config_manager.save(config)