shotgun-sh 0.1.14__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (143) hide show
  1. shotgun/agents/agent_manager.py +715 -75
  2. shotgun/agents/common.py +80 -75
  3. shotgun/agents/config/constants.py +21 -10
  4. shotgun/agents/config/manager.py +322 -97
  5. shotgun/agents/config/models.py +114 -84
  6. shotgun/agents/config/provider.py +232 -88
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +471 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +57 -19
  14. shotgun/agents/export.py +6 -7
  15. shotgun/agents/history/compaction.py +10 -5
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +129 -12
  18. shotgun/agents/history/token_counting/__init__.py +31 -0
  19. shotgun/agents/history/token_counting/anthropic.py +127 -0
  20. shotgun/agents/history/token_counting/base.py +78 -0
  21. shotgun/agents/history/token_counting/openai.py +90 -0
  22. shotgun/agents/history/token_counting/sentencepiece_counter.py +127 -0
  23. shotgun/agents/history/token_counting/tokenizer_cache.py +92 -0
  24. shotgun/agents/history/token_counting/utils.py +144 -0
  25. shotgun/agents/history/token_estimation.py +12 -12
  26. shotgun/agents/llm.py +62 -0
  27. shotgun/agents/models.py +59 -4
  28. shotgun/agents/plan.py +6 -7
  29. shotgun/agents/research.py +7 -8
  30. shotgun/agents/specify.py +6 -7
  31. shotgun/agents/tasks.py +6 -7
  32. shotgun/agents/tools/__init__.py +0 -2
  33. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  34. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  35. shotgun/agents/tools/codebase/file_read.py +11 -2
  36. shotgun/agents/tools/codebase/query_graph.py +6 -0
  37. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  38. shotgun/agents/tools/file_management.py +82 -16
  39. shotgun/agents/tools/registry.py +217 -0
  40. shotgun/agents/tools/web_search/__init__.py +55 -16
  41. shotgun/agents/tools/web_search/anthropic.py +76 -51
  42. shotgun/agents/tools/web_search/gemini.py +50 -27
  43. shotgun/agents/tools/web_search/openai.py +26 -17
  44. shotgun/agents/tools/web_search/utils.py +2 -2
  45. shotgun/agents/usage_manager.py +164 -0
  46. shotgun/api_endpoints.py +15 -0
  47. shotgun/cli/clear.py +53 -0
  48. shotgun/cli/compact.py +186 -0
  49. shotgun/cli/config.py +41 -67
  50. shotgun/cli/context.py +111 -0
  51. shotgun/cli/export.py +1 -1
  52. shotgun/cli/feedback.py +50 -0
  53. shotgun/cli/models.py +3 -2
  54. shotgun/cli/plan.py +1 -1
  55. shotgun/cli/research.py +1 -1
  56. shotgun/cli/specify.py +1 -1
  57. shotgun/cli/tasks.py +1 -1
  58. shotgun/cli/update.py +16 -2
  59. shotgun/codebase/core/change_detector.py +5 -3
  60. shotgun/codebase/core/code_retrieval.py +4 -2
  61. shotgun/codebase/core/ingestor.py +57 -16
  62. shotgun/codebase/core/manager.py +20 -7
  63. shotgun/codebase/core/nl_query.py +1 -1
  64. shotgun/codebase/models.py +4 -4
  65. shotgun/exceptions.py +32 -0
  66. shotgun/llm_proxy/__init__.py +19 -0
  67. shotgun/llm_proxy/clients.py +44 -0
  68. shotgun/llm_proxy/constants.py +15 -0
  69. shotgun/logging_config.py +18 -27
  70. shotgun/main.py +91 -12
  71. shotgun/posthog_telemetry.py +81 -10
  72. shotgun/prompts/agents/export.j2 +18 -1
  73. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  74. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  75. shotgun/prompts/agents/plan.j2 +1 -1
  76. shotgun/prompts/agents/research.j2 +1 -1
  77. shotgun/prompts/agents/specify.j2 +270 -3
  78. shotgun/prompts/agents/state/system_state.j2 +4 -0
  79. shotgun/prompts/agents/tasks.j2 +1 -1
  80. shotgun/prompts/loader.py +2 -2
  81. shotgun/prompts/tools/web_search.j2 +14 -0
  82. shotgun/sentry_telemetry.py +27 -18
  83. shotgun/settings.py +238 -0
  84. shotgun/shotgun_web/__init__.py +19 -0
  85. shotgun/shotgun_web/client.py +138 -0
  86. shotgun/shotgun_web/constants.py +21 -0
  87. shotgun/shotgun_web/models.py +47 -0
  88. shotgun/telemetry.py +24 -36
  89. shotgun/tui/app.py +251 -23
  90. shotgun/tui/commands/__init__.py +1 -1
  91. shotgun/tui/components/context_indicator.py +179 -0
  92. shotgun/tui/components/mode_indicator.py +70 -0
  93. shotgun/tui/components/status_bar.py +48 -0
  94. shotgun/tui/containers.py +91 -0
  95. shotgun/tui/dependencies.py +39 -0
  96. shotgun/tui/protocols.py +45 -0
  97. shotgun/tui/screens/chat/__init__.py +5 -0
  98. shotgun/tui/screens/chat/chat.tcss +54 -0
  99. shotgun/tui/screens/chat/chat_screen.py +1234 -0
  100. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  101. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  102. shotgun/tui/screens/chat/help_text.py +40 -0
  103. shotgun/tui/screens/chat/prompt_history.py +48 -0
  104. shotgun/tui/screens/chat.tcss +11 -0
  105. shotgun/tui/screens/chat_screen/command_providers.py +226 -11
  106. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  107. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  108. shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
  109. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  110. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  111. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  112. shotgun/tui/screens/confirmation_dialog.py +151 -0
  113. shotgun/tui/screens/feedback.py +193 -0
  114. shotgun/tui/screens/github_issue.py +102 -0
  115. shotgun/tui/screens/model_picker.py +352 -0
  116. shotgun/tui/screens/onboarding.py +431 -0
  117. shotgun/tui/screens/pipx_migration.py +153 -0
  118. shotgun/tui/screens/provider_config.py +156 -39
  119. shotgun/tui/screens/shotgun_auth.py +295 -0
  120. shotgun/tui/screens/welcome.py +198 -0
  121. shotgun/tui/services/__init__.py +5 -0
  122. shotgun/tui/services/conversation_service.py +184 -0
  123. shotgun/tui/state/__init__.py +7 -0
  124. shotgun/tui/state/processing_state.py +185 -0
  125. shotgun/tui/utils/mode_progress.py +14 -7
  126. shotgun/tui/widgets/__init__.py +5 -0
  127. shotgun/tui/widgets/widget_coordinator.py +262 -0
  128. shotgun/utils/datetime_utils.py +77 -0
  129. shotgun/utils/env_utils.py +13 -0
  130. shotgun/utils/file_system_utils.py +22 -2
  131. shotgun/utils/marketing.py +110 -0
  132. shotgun/utils/update_checker.py +69 -14
  133. shotgun_sh-0.2.11.dist-info/METADATA +130 -0
  134. shotgun_sh-0.2.11.dist-info/RECORD +194 -0
  135. {shotgun_sh-0.1.14.dist-info → shotgun_sh-0.2.11.dist-info}/entry_points.txt +1 -0
  136. {shotgun_sh-0.1.14.dist-info → shotgun_sh-0.2.11.dist-info}/licenses/LICENSE +1 -1
  137. shotgun/agents/history/token_counting.py +0 -429
  138. shotgun/agents/tools/user_interaction.py +0 -37
  139. shotgun/tui/screens/chat.py +0 -797
  140. shotgun/tui/screens/chat_screen/history.py +0 -350
  141. shotgun_sh-0.1.14.dist-info/METADATA +0 -466
  142. shotgun_sh-0.1.14.dist-info/RECORD +0 -133
  143. {shotgun_sh-0.1.14.dist-info → shotgun_sh-0.2.11.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1234 @@
1
+ """Main chat screen implementation."""
2
+
3
+ import asyncio
4
+ import logging
5
+ from datetime import datetime, timezone
6
+ from pathlib import Path
7
+ from typing import cast
8
+
9
+ from pydantic_ai.messages import (
10
+ ModelMessage,
11
+ ModelRequest,
12
+ ModelResponse,
13
+ TextPart,
14
+ ToolReturnPart,
15
+ UserPromptPart,
16
+ )
17
+ from textual import events, on, work
18
+ from textual.app import ComposeResult
19
+ from textual.command import CommandPalette
20
+ from textual.containers import Container, Grid
21
+ from textual.keys import Keys
22
+ from textual.reactive import reactive
23
+ from textual.screen import Screen
24
+ from textual.widgets import Static
25
+
26
+ from shotgun.agents.agent_manager import (
27
+ AgentManager,
28
+ ClarifyingQuestionsMessage,
29
+ CompactionCompletedMessage,
30
+ CompactionStartedMessage,
31
+ MessageHistoryUpdated,
32
+ ModelConfigUpdated,
33
+ PartialResponseMessage,
34
+ )
35
+ from shotgun.agents.config import get_config_manager
36
+ from shotgun.agents.config.models import MODEL_SPECS
37
+ from shotgun.agents.conversation_manager import ConversationManager
38
+ from shotgun.agents.history.compaction import apply_persistent_compaction
39
+ from shotgun.agents.history.token_estimation import estimate_tokens_from_messages
40
+ from shotgun.agents.models import (
41
+ AgentDeps,
42
+ AgentType,
43
+ FileOperationTracker,
44
+ )
45
+ from shotgun.codebase.core.manager import (
46
+ CodebaseAlreadyIndexedError,
47
+ CodebaseGraphManager,
48
+ )
49
+ from shotgun.codebase.models import IndexProgress, ProgressPhase
50
+ from shotgun.exceptions import ContextSizeLimitExceeded
51
+ from shotgun.posthog_telemetry import track_event
52
+ from shotgun.sdk.codebase import CodebaseSDK
53
+ from shotgun.sdk.exceptions import CodebaseNotFoundError, InvalidPathError
54
+ from shotgun.tui.commands import CommandHandler
55
+ from shotgun.tui.components.context_indicator import ContextIndicator
56
+ from shotgun.tui.components.mode_indicator import ModeIndicator
57
+ from shotgun.tui.components.prompt_input import PromptInput
58
+ from shotgun.tui.components.spinner import Spinner
59
+ from shotgun.tui.components.status_bar import StatusBar
60
+ from shotgun.tui.screens.chat.codebase_index_prompt_screen import (
61
+ CodebaseIndexPromptScreen,
62
+ )
63
+ from shotgun.tui.screens.chat.codebase_index_selection import CodebaseIndexSelection
64
+ from shotgun.tui.screens.chat.help_text import (
65
+ help_text_empty_dir,
66
+ help_text_with_codebase,
67
+ )
68
+ from shotgun.tui.screens.chat.prompt_history import PromptHistory
69
+ from shotgun.tui.screens.chat_screen.command_providers import (
70
+ DeleteCodebasePaletteProvider,
71
+ UnifiedCommandProvider,
72
+ )
73
+ from shotgun.tui.screens.chat_screen.hint_message import HintMessage
74
+ from shotgun.tui.screens.chat_screen.history import ChatHistory
75
+ from shotgun.tui.screens.confirmation_dialog import ConfirmationDialog
76
+ from shotgun.tui.screens.onboarding import OnboardingModal
77
+ from shotgun.tui.services.conversation_service import ConversationService
78
+ from shotgun.tui.state.processing_state import ProcessingStateManager
79
+ from shotgun.tui.utils.mode_progress import PlaceholderHints
80
+ from shotgun.tui.widgets.widget_coordinator import WidgetCoordinator
81
+ from shotgun.utils import get_shotgun_home
82
+ from shotgun.utils.marketing import MarketingManager
83
+
84
+ logger = logging.getLogger(__name__)
85
+
86
+
87
+ class ChatScreen(Screen[None]):
88
+ CSS_PATH = "chat.tcss"
89
+
90
+ BINDINGS = [
91
+ ("ctrl+p", "command_palette", "Command Palette"),
92
+ ("shift+tab", "toggle_mode", "Toggle mode"),
93
+ ("ctrl+u", "show_usage", "Show usage"),
94
+ ]
95
+
96
+ COMMANDS = {
97
+ UnifiedCommandProvider,
98
+ }
99
+
100
+ value = reactive("")
101
+ mode = reactive(AgentType.RESEARCH)
102
+ history: PromptHistory = PromptHistory()
103
+ messages = reactive(list[ModelMessage | HintMessage]())
104
+ indexing_job: reactive[CodebaseIndexSelection | None] = reactive(None)
105
+ partial_message: reactive[ModelMessage | None] = reactive(None)
106
+
107
+ # Q&A mode state (for structured output clarifying questions)
108
+ qa_mode = reactive(False)
109
+ qa_questions: list[str] = []
110
+ qa_current_index = reactive(0)
111
+ qa_answers: list[str] = []
112
+
113
+ # Working state - keep reactive for Textual watchers
114
+ working = reactive(False)
115
+
116
+ def __init__(
117
+ self,
118
+ agent_manager: AgentManager,
119
+ conversation_manager: ConversationManager,
120
+ conversation_service: ConversationService,
121
+ widget_coordinator: WidgetCoordinator,
122
+ processing_state: ProcessingStateManager,
123
+ command_handler: CommandHandler,
124
+ placeholder_hints: PlaceholderHints,
125
+ codebase_sdk: CodebaseSDK,
126
+ deps: AgentDeps,
127
+ continue_session: bool = False,
128
+ force_reindex: bool = False,
129
+ ) -> None:
130
+ """Initialize the ChatScreen.
131
+
132
+ All dependencies must be provided via dependency injection.
133
+ No objects are created in the constructor.
134
+
135
+ Args:
136
+ agent_manager: AgentManager instance for managing agent interactions
137
+ conversation_manager: ConversationManager for conversation persistence
138
+ conversation_service: ConversationService for conversation save/load/restore
139
+ widget_coordinator: WidgetCoordinator for centralized widget updates
140
+ processing_state: ProcessingStateManager for managing processing state
141
+ command_handler: CommandHandler for handling slash commands
142
+ placeholder_hints: PlaceholderHints for providing input hints
143
+ codebase_sdk: CodebaseSDK for codebase indexing operations
144
+ deps: AgentDeps configuration for agent dependencies
145
+ continue_session: Whether to continue a previous session
146
+ force_reindex: Whether to force reindexing of codebases
147
+ """
148
+ super().__init__()
149
+
150
+ # All dependencies are now required and injected
151
+ self.deps = deps
152
+ self.codebase_sdk = codebase_sdk
153
+ self.agent_manager = agent_manager
154
+ self.command_handler = command_handler
155
+ self.placeholder_hints = placeholder_hints
156
+ self.conversation_manager = conversation_manager
157
+ self.conversation_service = conversation_service
158
+ self.widget_coordinator = widget_coordinator
159
+ self.processing_state = processing_state
160
+ self.continue_session = continue_session
161
+ self.force_reindex = force_reindex
162
+
163
+ def on_mount(self) -> None:
164
+ # Use widget coordinator to focus input
165
+ self.widget_coordinator.update_prompt_input(focus=True)
166
+ # Hide spinner initially
167
+ self.query_one("#spinner").display = False
168
+
169
+ # Bind spinner to processing state manager
170
+ self.processing_state.bind_spinner(self.query_one("#spinner", Spinner))
171
+
172
+ # Load conversation history if --continue flag was provided
173
+ # Use call_later to handle async exists() check
174
+ if self.continue_session:
175
+ self.call_later(self._check_and_load_conversation)
176
+
177
+ self.call_later(self.check_if_codebase_is_indexed)
178
+ # Initial update of context indicator
179
+ self.update_context_indicator()
180
+
181
+ # Show onboarding popup if not shown before
182
+ self.call_later(self._check_and_show_onboarding)
183
+
184
+ async def on_key(self, event: events.Key) -> None:
185
+ """Handle key presses for cancellation."""
186
+ # If escape is pressed during Q&A mode, exit Q&A
187
+ if event.key in (Keys.Escape, Keys.ControlC) and self.qa_mode:
188
+ self._exit_qa_mode()
189
+ # Re-enable the input
190
+ self.widget_coordinator.update_prompt_input(focus=True)
191
+ # Prevent the event from propagating (don't quit the app)
192
+ event.stop()
193
+ return
194
+
195
+ # If escape or ctrl+c is pressed while agent is working, cancel the operation
196
+ if event.key in (Keys.Escape, Keys.ControlC):
197
+ if self.processing_state.cancel_current_operation(cancel_key=event.key):
198
+ # Show cancellation message
199
+ self.mount_hint("⚠️ Cancelling operation...")
200
+ # Re-enable the input
201
+ self.widget_coordinator.update_prompt_input(focus=True)
202
+ # Prevent the event from propagating (don't quit the app)
203
+ event.stop()
204
+
205
+ @work
206
+ async def check_if_codebase_is_indexed(self) -> None:
207
+ cur_dir = Path.cwd().resolve()
208
+ is_empty = all(
209
+ dir.is_dir() and dir.name in ["__pycache__", ".git", ".shotgun"]
210
+ for dir in cur_dir.iterdir()
211
+ )
212
+ if is_empty or self.continue_session:
213
+ return
214
+
215
+ # If force_reindex is True, delete any existing graphs for this directory
216
+ if self.force_reindex:
217
+ accessible_graphs = (
218
+ await self.codebase_sdk.list_codebases_for_directory()
219
+ ).graphs
220
+ for graph in accessible_graphs:
221
+ try:
222
+ await self.codebase_sdk.delete_codebase(graph.graph_id)
223
+ logger.info(
224
+ f"Deleted existing graph {graph.graph_id} due to --force-reindex"
225
+ )
226
+ except Exception as e:
227
+ logger.warning(
228
+ f"Failed to delete graph {graph.graph_id} during force reindex: {e}"
229
+ )
230
+
231
+ # Check if the current directory has any accessible codebases
232
+ accessible_graphs = (
233
+ await self.codebase_sdk.list_codebases_for_directory()
234
+ ).graphs
235
+ if accessible_graphs:
236
+ self.mount_hint(help_text_with_codebase(already_indexed=True))
237
+ return
238
+
239
+ # Ask user if they want to index the current directory
240
+ should_index = await self.app.push_screen_wait(CodebaseIndexPromptScreen())
241
+ if not should_index:
242
+ self.mount_hint(help_text_empty_dir())
243
+ return
244
+
245
+ self.mount_hint(help_text_with_codebase(already_indexed=False))
246
+
247
+ # Auto-index the current directory with its name
248
+ cwd_name = cur_dir.name
249
+ selection = CodebaseIndexSelection(repo_path=cur_dir, name=cwd_name)
250
+ self.call_later(lambda: self.index_codebase(selection))
251
+
252
+ def watch_mode(self, new_mode: AgentType) -> None:
253
+ """React to mode changes by updating the agent manager."""
254
+
255
+ if self.is_mounted:
256
+ self.agent_manager.set_agent(new_mode)
257
+ # Use widget coordinator for all widget updates
258
+ self.widget_coordinator.update_for_mode_change(new_mode)
259
+
260
+ def watch_working(self, is_working: bool) -> None:
261
+ """Show or hide the spinner based on working state."""
262
+ logger.debug(f"[WATCH] watch_working called - is_working={is_working}")
263
+ if self.is_mounted:
264
+ # Use widget coordinator for all widget updates
265
+ self.widget_coordinator.update_for_processing_state(is_working)
266
+
267
+ def watch_qa_mode(self, qa_mode_active: bool) -> None:
268
+ """Update UI when Q&A mode state changes."""
269
+ if self.is_mounted:
270
+ # Use widget coordinator for all widget updates
271
+ self.widget_coordinator.update_for_qa_mode(qa_mode_active)
272
+
273
+ def watch_messages(self, messages: list[ModelMessage | HintMessage]) -> None:
274
+ """Update the chat history when messages change."""
275
+ if self.is_mounted:
276
+ # Use widget coordinator for all widget updates
277
+ self.widget_coordinator.update_messages(messages)
278
+
279
+ def action_toggle_mode(self) -> None:
280
+ # Prevent mode switching during Q&A
281
+ if self.qa_mode:
282
+ self.notify(
283
+ "Cannot switch modes while answering questions",
284
+ severity="warning",
285
+ timeout=3,
286
+ )
287
+ return
288
+
289
+ modes = [
290
+ AgentType.RESEARCH,
291
+ AgentType.SPECIFY,
292
+ AgentType.PLAN,
293
+ AgentType.TASKS,
294
+ AgentType.EXPORT,
295
+ ]
296
+ self.mode = modes[(modes.index(self.mode) + 1) % len(modes)]
297
+ self.agent_manager.set_agent(self.mode)
298
+ # Re-focus input after mode change
299
+ self.call_later(lambda: self.widget_coordinator.update_prompt_input(focus=True))
300
+
301
+ def action_show_usage(self) -> None:
302
+ usage_hint = self.agent_manager.get_usage_hint()
303
+ logger.info(f"Usage hint: {usage_hint}")
304
+ if usage_hint:
305
+ self.mount_hint(usage_hint)
306
+ else:
307
+ self.notify("No usage hint available", severity="error")
308
+
309
+ async def action_show_context(self) -> None:
310
+ context_hint = await self.agent_manager.get_context_hint()
311
+ if context_hint:
312
+ self.mount_hint(context_hint)
313
+ else:
314
+ self.notify("No context analysis available", severity="error")
315
+
316
+ def action_view_onboarding(self) -> None:
317
+ """Show the onboarding modal."""
318
+ self.app.push_screen(OnboardingModal())
319
+
320
+ @work
321
+ async def action_compact_conversation(self) -> None:
322
+ """Compact the conversation history to reduce size."""
323
+ logger.debug(f"[COMPACT] Starting compaction - working={self.working}")
324
+
325
+ try:
326
+ # Show spinner and enable ESC cancellation
327
+ from textual.worker import get_current_worker
328
+
329
+ self.processing_state.start_processing("Compacting Conversation...")
330
+ self.processing_state.bind_worker(get_current_worker())
331
+ logger.debug(f"[COMPACT] Processing started - working={self.working}")
332
+
333
+ # Get current message count and tokens
334
+ original_count = len(self.agent_manager.message_history)
335
+ original_tokens = await estimate_tokens_from_messages(
336
+ self.agent_manager.message_history, self.deps.llm_model
337
+ )
338
+
339
+ # Log compaction start
340
+ logger.info(
341
+ f"Starting conversation compaction - {original_count} messages, {original_tokens} tokens"
342
+ )
343
+
344
+ # Post compaction started event
345
+ self.agent_manager.post_message(CompactionStartedMessage())
346
+ logger.debug("[COMPACT] Posted CompactionStartedMessage")
347
+
348
+ # Apply compaction with force=True to bypass threshold checks
349
+ compacted_messages = await apply_persistent_compaction(
350
+ self.agent_manager.message_history, self.deps, force=True
351
+ )
352
+
353
+ logger.debug(
354
+ f"[COMPACT] Compacted messages: count={len(compacted_messages)}, "
355
+ f"last_message_type={type(compacted_messages[-1]).__name__ if compacted_messages else 'None'}"
356
+ )
357
+
358
+ # Check last response usage
359
+ last_response = next(
360
+ (
361
+ msg
362
+ for msg in reversed(compacted_messages)
363
+ if isinstance(msg, ModelResponse)
364
+ ),
365
+ None,
366
+ )
367
+ if last_response:
368
+ logger.debug(
369
+ f"[COMPACT] Last response has usage: {last_response.usage is not None}, "
370
+ f"usage={last_response.usage if last_response.usage else 'None'}"
371
+ )
372
+ else:
373
+ logger.warning(
374
+ "[COMPACT] No ModelResponse found in compacted messages!"
375
+ )
376
+
377
+ # Update agent manager's message history
378
+ self.agent_manager.message_history = compacted_messages
379
+ logger.debug("[COMPACT] Updated agent_manager.message_history")
380
+
381
+ # Calculate after metrics
382
+ compacted_count = len(compacted_messages)
383
+ compacted_tokens = await estimate_tokens_from_messages(
384
+ compacted_messages, self.deps.llm_model
385
+ )
386
+
387
+ # Calculate reductions
388
+ message_reduction = (
389
+ ((original_count - compacted_count) / original_count) * 100
390
+ if original_count > 0
391
+ else 0
392
+ )
393
+ token_reduction = (
394
+ ((original_tokens - compacted_tokens) / original_tokens) * 100
395
+ if original_tokens > 0
396
+ else 0
397
+ )
398
+
399
+ # Save to conversation file
400
+ conversation_file = get_shotgun_home() / "conversation.json"
401
+ manager = ConversationManager(conversation_file)
402
+ conversation = await manager.load()
403
+
404
+ if conversation:
405
+ conversation.set_agent_messages(compacted_messages)
406
+ await manager.save(conversation)
407
+
408
+ # Post compaction completed event
409
+ self.agent_manager.post_message(CompactionCompletedMessage())
410
+
411
+ # Post message history updated event
412
+ self.agent_manager.post_message(
413
+ MessageHistoryUpdated(
414
+ messages=self.agent_manager.ui_message_history.copy(),
415
+ agent_type=self.agent_manager._current_agent_type,
416
+ file_operations=None,
417
+ )
418
+ )
419
+ logger.debug("[COMPACT] Posted MessageHistoryUpdated event")
420
+
421
+ # Force immediate context indicator update
422
+ logger.debug("[COMPACT] Calling update_context_indicator()")
423
+ self.update_context_indicator()
424
+
425
+ # Log compaction completion
426
+ logger.info(
427
+ f"Compaction completed: {original_count} → {compacted_count} messages "
428
+ f"({message_reduction:.0f}% message reduction, {token_reduction:.0f}% token reduction)"
429
+ )
430
+
431
+ # Add persistent hint message with stats
432
+ self.mount_hint(
433
+ f"✓ Compacted conversation: {original_count} → {compacted_count} messages "
434
+ f"({message_reduction:.0f}% message reduction, {token_reduction:.0f}% token reduction)"
435
+ )
436
+
437
+ except Exception as e:
438
+ logger.error(f"Failed to compact conversation: {e}", exc_info=True)
439
+ self.notify(f"Failed to compact: {e}", severity="error")
440
+ finally:
441
+ # Hide spinner
442
+ self.processing_state.stop_processing()
443
+ logger.debug(f"[COMPACT] Processing stopped - working={self.working}")
444
+
445
+ @work
446
+ async def action_clear_conversation(self) -> None:
447
+ """Clear the conversation history."""
448
+ # Show confirmation dialog
449
+ should_clear = await self.app.push_screen_wait(
450
+ ConfirmationDialog(
451
+ title="Clear conversation?",
452
+ message="This will permanently delete your entire conversation history. "
453
+ "All messages, context, and progress will be lost. "
454
+ "This action cannot be undone.",
455
+ confirm_label="Clear",
456
+ cancel_label="Keep",
457
+ confirm_variant="warning",
458
+ danger=True,
459
+ )
460
+ )
461
+
462
+ if not should_clear:
463
+ return # User cancelled
464
+
465
+ try:
466
+ # Clear message histories
467
+ self.agent_manager.message_history = []
468
+ self.agent_manager.ui_message_history = []
469
+
470
+ # Use conversation service to clear conversation
471
+ await self.conversation_service.clear_conversation()
472
+
473
+ # Post message history updated event to refresh UI
474
+ self.agent_manager.post_message(
475
+ MessageHistoryUpdated(
476
+ messages=[],
477
+ agent_type=self.agent_manager._current_agent_type,
478
+ file_operations=None,
479
+ )
480
+ )
481
+
482
+ # Show persistent success message
483
+ self.mount_hint("✓ Conversation cleared - Starting fresh!")
484
+
485
+ except Exception as e:
486
+ logger.error(f"Failed to clear conversation: {e}", exc_info=True)
487
+ self.notify(f"Failed to clear: {e}", severity="error")
488
+
489
+ @work(exclusive=False)
490
+ async def update_context_indicator(self) -> None:
491
+ """Update the context indicator with current usage data."""
492
+ logger.debug("[CONTEXT] update_context_indicator called")
493
+ try:
494
+ logger.debug(
495
+ f"[CONTEXT] Getting context analysis - "
496
+ f"message_history_count={len(self.agent_manager.message_history)}"
497
+ )
498
+ analysis = await self.agent_manager.get_context_analysis()
499
+
500
+ if analysis:
501
+ logger.debug(
502
+ f"[CONTEXT] Analysis received - "
503
+ f"agent_context_tokens={analysis.agent_context_tokens}, "
504
+ f"max_usable_tokens={analysis.max_usable_tokens}, "
505
+ f"percentage={round((analysis.agent_context_tokens / analysis.max_usable_tokens) * 100, 1) if analysis.max_usable_tokens > 0 else 0}%"
506
+ )
507
+ else:
508
+ logger.warning("[CONTEXT] Analysis is None!")
509
+
510
+ model_name = self.deps.llm_model.name
511
+ # Use widget coordinator for context indicator update
512
+ self.widget_coordinator.update_context_indicator(analysis, model_name)
513
+ except Exception as e:
514
+ logger.error(
515
+ f"[CONTEXT] Failed to update context indicator: {e}", exc_info=True
516
+ )
517
+
518
+ @work(exclusive=False)
519
+ async def update_context_indicator_with_messages(
520
+ self,
521
+ agent_messages: list[ModelMessage],
522
+ ui_messages: list[ModelMessage | HintMessage],
523
+ ) -> None:
524
+ """Update the context indicator with specific message sets (for streaming updates).
525
+
526
+ Args:
527
+ agent_messages: Agent message history including streaming messages (for token counting)
528
+ ui_messages: UI message history including hints and streaming messages
529
+ """
530
+ try:
531
+ from shotgun.agents.context_analyzer.analyzer import ContextAnalyzer
532
+
533
+ analyzer = ContextAnalyzer(self.deps.llm_model)
534
+ # Analyze the combined message histories for accurate progressive token counts
535
+ analysis = await analyzer.analyze_conversation(agent_messages, ui_messages)
536
+
537
+ if analysis:
538
+ model_name = self.deps.llm_model.name
539
+ self.widget_coordinator.update_context_indicator(analysis, model_name)
540
+ except Exception as e:
541
+ logger.error(
542
+ f"Failed to update context indicator with streaming messages: {e}",
543
+ exc_info=True,
544
+ )
545
+
546
+ def compose(self) -> ComposeResult:
547
+ """Create child widgets for the app."""
548
+ with Container(id="window"):
549
+ yield self.agent_manager
550
+ yield ChatHistory()
551
+ with Container(id="footer"):
552
+ yield Spinner(
553
+ text="Processing...",
554
+ id="spinner",
555
+ classes="" if self.working else "hidden",
556
+ )
557
+ yield StatusBar(working=self.working)
558
+ yield PromptInput(
559
+ text=self.value,
560
+ highlight_cursor_line=False,
561
+ id="prompt-input",
562
+ placeholder=self._placeholder_for_mode(self.mode),
563
+ )
564
+ with Grid():
565
+ yield ModeIndicator(mode=self.mode)
566
+ with Container(id="right-footer-indicators"):
567
+ yield ContextIndicator(id="context-indicator")
568
+ yield Static("", id="indexing-job-display")
569
+
570
+ def mount_hint(self, markdown: str) -> None:
571
+ hint = HintMessage(message=markdown)
572
+ self.agent_manager.add_hint_message(hint)
573
+
574
+ @on(PartialResponseMessage)
575
+ def handle_partial_response(self, event: PartialResponseMessage) -> None:
576
+ self.partial_message = event.message
577
+
578
+ # Filter event.messages to exclude ModelRequest with only ToolReturnPart
579
+ # These are intermediate tool results that would render as empty (UserQuestionWidget
580
+ # filters out ToolReturnPart in format_prompt_parts), causing user messages to disappear
581
+ filtered_event_messages: list[ModelMessage] = []
582
+ for msg in event.messages:
583
+ if isinstance(msg, ModelRequest):
584
+ # Check if this ModelRequest has any user-visible parts
585
+ has_user_content = any(
586
+ not isinstance(part, ToolReturnPart) for part in msg.parts
587
+ )
588
+ if has_user_content:
589
+ filtered_event_messages.append(msg)
590
+ # Skip ModelRequest with only ToolReturnPart
591
+ else:
592
+ # Keep all ModelResponse and other message types
593
+ filtered_event_messages.append(msg)
594
+
595
+ # Build new message list combining existing messages with new streaming content
596
+ new_message_list = self.messages + cast(
597
+ list[ModelMessage | HintMessage], filtered_event_messages
598
+ )
599
+
600
+ # Use widget coordinator to set partial response
601
+ self.widget_coordinator.set_partial_response(
602
+ self.partial_message, new_message_list
603
+ )
604
+
605
+ # Update context indicator with full message history including streaming messages
606
+ # Combine existing agent history with new streaming messages for accurate token count
607
+ combined_agent_history = self.agent_manager.message_history + event.messages
608
+ self.update_context_indicator_with_messages(
609
+ combined_agent_history, new_message_list
610
+ )
611
+
612
+ def _clear_partial_response(self) -> None:
613
+ # Use widget coordinator to clear partial response
614
+ self.widget_coordinator.set_partial_response(None, self.messages)
615
+
616
+ def _exit_qa_mode(self) -> None:
617
+ """Exit Q&A mode and clean up state."""
618
+ # Track cancellation event
619
+ track_event(
620
+ "qa_mode_cancelled",
621
+ {
622
+ "questions_total": len(self.qa_questions),
623
+ "questions_answered": len(self.qa_answers),
624
+ },
625
+ )
626
+
627
+ # Clear Q&A state
628
+ self.qa_mode = False
629
+ self.qa_questions = []
630
+ self.qa_answers = []
631
+ self.qa_current_index = 0
632
+
633
+ # Show cancellation message
634
+ self.mount_hint("⚠️ Q&A cancelled - You can continue the conversation.")
635
+
636
+ @on(ClarifyingQuestionsMessage)
637
+ def handle_clarifying_questions(self, event: ClarifyingQuestionsMessage) -> None:
638
+ """Handle clarifying questions from agent structured output.
639
+
640
+ Note: Hints are now added synchronously in agent_manager.run() before this
641
+ handler is called, so we only need to set up Q&A mode state here.
642
+ """
643
+ # Clear any streaming partial response (removes final_result JSON)
644
+ self._clear_partial_response()
645
+
646
+ # Enter Q&A mode
647
+ self.qa_mode = True
648
+ self.qa_questions = event.questions
649
+ self.qa_current_index = 0
650
+ self.qa_answers = []
651
+
652
+ @on(MessageHistoryUpdated)
653
+ async def handle_message_history_updated(
654
+ self, event: MessageHistoryUpdated
655
+ ) -> None:
656
+ """Handle message history updates from the agent manager."""
657
+ self._clear_partial_response()
658
+ self.messages = event.messages
659
+
660
+ # Use widget coordinator to refresh placeholder and mode indicator
661
+ self.widget_coordinator.update_prompt_input(
662
+ placeholder=self._placeholder_for_mode(self.mode)
663
+ )
664
+ self.widget_coordinator.refresh_mode_indicator()
665
+
666
+ # Update context indicator
667
+ self.update_context_indicator()
668
+
669
+ # If there are file operations, add a message showing the modified files
670
+ # Skip if hint was already added by agent_manager (e.g., in QA mode)
671
+ if event.file_operations:
672
+ # Check if file operation hint already exists in recent messages
673
+ file_hint_exists = any(
674
+ isinstance(msg, HintMessage)
675
+ and (
676
+ msg.message.startswith("📝 Modified:")
677
+ or msg.message.startswith("📁 Modified")
678
+ )
679
+ for msg in event.messages[-5:] # Check last 5 messages
680
+ )
681
+
682
+ if not file_hint_exists:
683
+ chat_history = self.query_one(ChatHistory)
684
+ if chat_history.vertical_tail:
685
+ tracker = FileOperationTracker(operations=event.file_operations)
686
+ display_path = tracker.get_display_path()
687
+
688
+ if display_path:
689
+ # Create a simple markdown message with the file path
690
+ # The terminal emulator will make this clickable automatically
691
+ path_obj = Path(display_path)
692
+
693
+ if len(event.file_operations) == 1:
694
+ message = f"📝 Modified: `{display_path}`"
695
+ else:
696
+ num_files = len(
697
+ {op.file_path for op in event.file_operations}
698
+ )
699
+ if path_obj.is_dir():
700
+ message = f"📁 Modified {num_files} files in: `{display_path}`"
701
+ else:
702
+ # Common path is a file, show parent directory
703
+ message = f"📁 Modified {num_files} files in: `{path_obj.parent}`"
704
+
705
+ self.mount_hint(message)
706
+
707
+ # Check and display any marketing messages
708
+ from shotgun.tui.app import ShotgunApp
709
+
710
+ app = cast(ShotgunApp, self.app)
711
+ await MarketingManager.check_and_display_messages(
712
+ app.config_manager, event.file_operations, self.mount_hint
713
+ )
714
+
715
+ @on(CompactionStartedMessage)
716
+ def handle_compaction_started(self, event: CompactionStartedMessage) -> None:
717
+ """Update spinner text when compaction starts."""
718
+ # Use widget coordinator to update spinner text
719
+ self.widget_coordinator.update_spinner_text("Compacting Conversation...")
720
+
721
+ @on(CompactionCompletedMessage)
722
+ def handle_compaction_completed(self, event: CompactionCompletedMessage) -> None:
723
+ """Reset spinner text when compaction completes."""
724
+ # Use widget coordinator to update spinner text
725
+ self.widget_coordinator.update_spinner_text("Processing...")
726
+
727
+ async def handle_model_selected(self, result: ModelConfigUpdated | None) -> None:
728
+ """Handle model selection from ModelPickerScreen.
729
+
730
+ Called as a callback when the ModelPickerScreen is dismissed.
731
+
732
+ Args:
733
+ result: ModelConfigUpdated if a model was selected, None if cancelled
734
+ """
735
+ if result is None:
736
+ return
737
+
738
+ try:
739
+ # Update the model configuration in dependencies
740
+ self.deps.llm_model = result.model_config
741
+
742
+ # Update the agent manager's model configuration
743
+ self.agent_manager.deps.llm_model = result.model_config
744
+
745
+ # Get current analysis and update context indicator via coordinator
746
+ analysis = await self.agent_manager.get_context_analysis()
747
+ self.widget_coordinator.update_context_indicator(analysis, result.new_model)
748
+
749
+ # Get model display name for user feedback
750
+ model_spec = MODEL_SPECS.get(result.new_model)
751
+ model_display = (
752
+ model_spec.short_name if model_spec else str(result.new_model)
753
+ )
754
+
755
+ # Format provider information
756
+ key_method = (
757
+ "Shotgun Account" if result.key_provider == "shotgun" else "BYOK"
758
+ )
759
+ provider_display = result.provider.value.title()
760
+
761
+ # Track model switch in telemetry
762
+ track_event(
763
+ "model_switched",
764
+ {
765
+ "old_model": str(result.old_model) if result.old_model else None,
766
+ "new_model": str(result.new_model),
767
+ "provider": result.provider.value,
768
+ "key_provider": result.key_provider.value,
769
+ },
770
+ )
771
+
772
+ # Show confirmation to user with provider info
773
+ self.agent_manager.add_hint_message(
774
+ HintMessage(
775
+ message=f"✓ Switched to {model_display} ({provider_display}, {key_method})"
776
+ )
777
+ )
778
+
779
+ except Exception as e:
780
+ logger.error(f"Failed to handle model selection: {e}")
781
+ self.agent_manager.add_hint_message(
782
+ HintMessage(message=f"⚠ Failed to update model configuration: {e}")
783
+ )
784
+
785
+ @on(PromptInput.Submitted)
786
+ async def handle_submit(self, message: PromptInput.Submitted) -> None:
787
+ text = message.text.strip()
788
+
789
+ # If empty text, just clear input and return
790
+ if not text:
791
+ self.widget_coordinator.update_prompt_input(clear=True)
792
+ self.value = ""
793
+ return
794
+
795
+ # Handle Q&A mode (from structured output clarifying questions)
796
+ if self.qa_mode and self.qa_questions:
797
+ # Collect answer
798
+ self.qa_answers.append(text)
799
+
800
+ # Show answer
801
+ if len(self.qa_questions) == 1:
802
+ self.agent_manager.add_hint_message(
803
+ HintMessage(message=f"**A:** {text}")
804
+ )
805
+ else:
806
+ q_num = self.qa_current_index + 1
807
+ self.agent_manager.add_hint_message(
808
+ HintMessage(message=f"**A{q_num}:** {text}")
809
+ )
810
+
811
+ # Move to next or finish
812
+ self.qa_current_index += 1
813
+
814
+ if self.qa_current_index < len(self.qa_questions):
815
+ # Show next question
816
+ next_q = self.qa_questions[self.qa_current_index]
817
+ next_q_num = self.qa_current_index + 1
818
+ self.agent_manager.add_hint_message(
819
+ HintMessage(message=f"**Q{next_q_num}:** {next_q}")
820
+ )
821
+ else:
822
+ # All answered - format and send back
823
+ if len(self.qa_questions) == 1:
824
+ # Single question - just send the answer
825
+ formatted_qa = f"Q: {self.qa_questions[0]}\nA: {self.qa_answers[0]}"
826
+ else:
827
+ # Multiple questions - format all Q&A pairs
828
+ formatted_qa = "\n\n".join(
829
+ f"Q{i + 1}: {q}\nA{i + 1}: {a}"
830
+ for i, (q, a) in enumerate(
831
+ zip(self.qa_questions, self.qa_answers, strict=True)
832
+ )
833
+ )
834
+
835
+ # Exit Q&A mode
836
+ self.qa_mode = False
837
+ self.qa_questions = []
838
+ self.qa_answers = []
839
+ self.qa_current_index = 0
840
+
841
+ # Send answers back to agent
842
+ self.run_agent(formatted_qa)
843
+
844
+ # Clear input
845
+ self.widget_coordinator.update_prompt_input(clear=True)
846
+ self.value = ""
847
+ return
848
+
849
+ # Check if it's a command
850
+ if self.command_handler.is_command(text):
851
+ success, response = self.command_handler.handle_command(text)
852
+
853
+ # Add the command to history
854
+ self.history.append(message.text)
855
+
856
+ # Display the command in chat history
857
+ user_message = ModelRequest(parts=[UserPromptPart(content=text)])
858
+ self.messages = self.messages + [user_message]
859
+
860
+ # Display the response (help text or error message)
861
+ response_message = ModelResponse(parts=[TextPart(content=response)])
862
+ self.messages = self.messages + [response_message]
863
+
864
+ # Clear the input
865
+ self.widget_coordinator.update_prompt_input(clear=True)
866
+ self.value = ""
867
+ return
868
+
869
+ # Not a command, process as normal
870
+ self.history.append(message.text)
871
+
872
+ # Add user message to agent_manager's history BEFORE running the agent
873
+ # This ensures immediate visual feedback AND proper deduplication
874
+ user_message = ModelRequest.user_text_prompt(text)
875
+ self.agent_manager.ui_message_history.append(user_message)
876
+ self.messages = self.agent_manager.ui_message_history.copy()
877
+
878
+ # Clear the input
879
+ self.value = ""
880
+ self.run_agent(text) # Use stripped text
881
+
882
+ self.widget_coordinator.update_prompt_input(clear=True)
883
+
884
+ def _placeholder_for_mode(self, mode: AgentType, force_new: bool = False) -> str:
885
+ """Return the placeholder text appropriate for the current mode.
886
+
887
+ Args:
888
+ mode: The current agent mode.
889
+ force_new: If True, force selection of a new random hint.
890
+
891
+ Returns:
892
+ Dynamic placeholder hint based on mode and progress.
893
+ """
894
+ return self.placeholder_hints.get_placeholder_for_mode(mode)
895
+
896
+ def index_codebase_command(self) -> None:
897
+ # Simplified: always index current working directory with its name
898
+ cur_dir = Path.cwd().resolve()
899
+ cwd_name = cur_dir.name
900
+ selection = CodebaseIndexSelection(repo_path=cur_dir, name=cwd_name)
901
+ self.call_later(lambda: self.index_codebase(selection))
902
+
903
+ def delete_codebase_command(self) -> None:
904
+ self.app.push_screen(
905
+ CommandPalette(
906
+ providers=[DeleteCodebasePaletteProvider],
907
+ placeholder="Select a codebase to delete…",
908
+ )
909
+ )
910
+
911
+ def delete_codebase_from_palette(self, graph_id: str) -> None:
912
+ stack = getattr(self.app, "screen_stack", None)
913
+ if stack and isinstance(stack[-1], CommandPalette):
914
+ self.app.pop_screen()
915
+
916
+ self.call_later(lambda: self.delete_codebase(graph_id))
917
+
918
+ @work
919
+ async def delete_codebase(self, graph_id: str) -> None:
920
+ try:
921
+ await self.codebase_sdk.delete_codebase(graph_id)
922
+ self.notify(f"Deleted codebase: {graph_id}", severity="information")
923
+ except CodebaseNotFoundError as exc:
924
+ self.notify(str(exc), severity="error")
925
+ except Exception as exc: # pragma: no cover - defensive UI path
926
+ self.notify(f"Failed to delete codebase: {exc}", severity="error")
927
+
928
+ def _is_kuzu_corruption_error(self, exception: Exception) -> bool:
929
+ """Check if error is related to kuzu database corruption.
930
+
931
+ Args:
932
+ exception: The exception to check
933
+
934
+ Returns:
935
+ True if the error indicates kuzu database corruption
936
+ """
937
+ error_str = str(exception).lower()
938
+ error_indicators = [
939
+ "not a directory",
940
+ "errno 20",
941
+ "corrupted",
942
+ ".kuzu",
943
+ "ioexception",
944
+ "unordered_map", # C++ STL map errors from kuzu
945
+ "key not found", # unordered_map::at errors
946
+ "std::exception", # Generic C++ exceptions from kuzu
947
+ ]
948
+ return any(indicator in error_str for indicator in error_indicators)
949
+
950
+ @work
951
+ async def index_codebase(self, selection: CodebaseIndexSelection) -> None:
952
+ label = self.query_one("#indexing-job-display", Static)
953
+ label.update(
954
+ f"[$foreground-muted]Indexing codebase: [bold $text-accent]{selection.name}[/][/]"
955
+ )
956
+ label.refresh()
957
+
958
+ def create_progress_bar(percentage: float, width: int = 20) -> str:
959
+ """Create a visual progress bar using Unicode block characters."""
960
+ filled = int((percentage / 100) * width)
961
+ empty = width - filled
962
+ return "▓" * filled + "░" * empty
963
+
964
+ # Spinner animation frames
965
+ spinner_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
966
+
967
+ # Progress state (shared between timer and progress callback)
968
+ progress_state: dict[str, int | float] = {
969
+ "frame_index": 0,
970
+ "percentage": 0.0,
971
+ }
972
+
973
+ def update_progress_display() -> None:
974
+ """Update progress bar on timer - runs every 100ms."""
975
+ # Advance spinner frame
976
+ frame_idx = int(progress_state["frame_index"])
977
+ progress_state["frame_index"] = (frame_idx + 1) % len(spinner_frames)
978
+ spinner = spinner_frames[frame_idx]
979
+
980
+ # Get current state
981
+ pct = float(progress_state["percentage"])
982
+ bar = create_progress_bar(pct)
983
+
984
+ # Update label
985
+ label.update(
986
+ f"[$foreground-muted]Indexing codebase: {spinner} {bar} {pct:.0f}%[/]"
987
+ )
988
+
989
+ def progress_callback(progress_info: IndexProgress) -> None:
990
+ """Update progress state (timer renders it independently)."""
991
+ # Calculate overall percentage (0-95%, reserve 95-100% for finalization)
992
+ if progress_info.phase == ProgressPhase.STRUCTURE:
993
+ # Phase 1: 0-10%, always show 5% while running, 10% when complete
994
+ overall_pct = 10.0 if progress_info.phase_complete else 5.0
995
+ elif progress_info.phase == ProgressPhase.DEFINITIONS:
996
+ # Phase 2: 10-80% based on files processed
997
+ if progress_info.total and progress_info.total > 0:
998
+ phase_pct = (progress_info.current / progress_info.total) * 70.0
999
+ overall_pct = 10.0 + phase_pct
1000
+ else:
1001
+ overall_pct = 10.0
1002
+ elif progress_info.phase == ProgressPhase.RELATIONSHIPS:
1003
+ # Phase 3: 80-95% based on relationships processed (cap at 95%)
1004
+ if progress_info.total and progress_info.total > 0:
1005
+ phase_pct = (progress_info.current / progress_info.total) * 15.0
1006
+ overall_pct = 80.0 + phase_pct
1007
+ else:
1008
+ overall_pct = 80.0
1009
+ else:
1010
+ overall_pct = 0.0
1011
+
1012
+ # Update shared state (timer will render it)
1013
+ progress_state["percentage"] = overall_pct
1014
+
1015
+ # Start progress animation timer (10 fps = 100ms interval)
1016
+ progress_timer = self.set_interval(0.1, update_progress_display)
1017
+
1018
+ # Retry logic for handling kuzu corruption
1019
+ max_retries = 3
1020
+
1021
+ for attempt in range(max_retries):
1022
+ try:
1023
+ # Clean up corrupted DBs before retry (skip on first attempt)
1024
+ if attempt > 0:
1025
+ logger.info(
1026
+ f"Retry attempt {attempt + 1}/{max_retries} - cleaning up corrupted databases"
1027
+ )
1028
+ manager = CodebaseGraphManager(
1029
+ self.codebase_sdk.service.storage_dir
1030
+ )
1031
+ cleaned = await manager.cleanup_corrupted_databases()
1032
+ logger.info(f"Cleaned up {len(cleaned)} corrupted database(s)")
1033
+ self.notify(
1034
+ f"Retrying indexing after cleanup (attempt {attempt + 1}/{max_retries})...",
1035
+ severity="information",
1036
+ )
1037
+
1038
+ # Pass the current working directory as the indexed_from_cwd
1039
+ logger.debug(
1040
+ f"Starting indexing - repo_path: {selection.repo_path}, "
1041
+ f"name: {selection.name}, cwd: {Path.cwd().resolve()}"
1042
+ )
1043
+ result = await self.codebase_sdk.index_codebase(
1044
+ selection.repo_path,
1045
+ selection.name,
1046
+ indexed_from_cwd=str(Path.cwd().resolve()),
1047
+ progress_callback=progress_callback,
1048
+ )
1049
+
1050
+ # Success! Stop progress animation
1051
+ progress_timer.stop()
1052
+
1053
+ # Show 100% completion after indexing finishes
1054
+ final_bar = create_progress_bar(100.0)
1055
+ label.update(
1056
+ f"[$foreground-muted]Indexing codebase: {final_bar} 100%[/]"
1057
+ )
1058
+ label.refresh()
1059
+
1060
+ logger.info(
1061
+ f"Successfully indexed codebase '{result.name}' (ID: {result.graph_id})"
1062
+ )
1063
+ self.notify(
1064
+ f"Indexed codebase '{result.name}' (ID: {result.graph_id})",
1065
+ severity="information",
1066
+ timeout=8,
1067
+ )
1068
+ break # Success - exit retry loop
1069
+
1070
+ except CodebaseAlreadyIndexedError as exc:
1071
+ progress_timer.stop()
1072
+ logger.warning(f"Codebase already indexed: {exc}")
1073
+ self.notify(str(exc), severity="warning")
1074
+ return
1075
+ except InvalidPathError as exc:
1076
+ progress_timer.stop()
1077
+ logger.error(f"Invalid path error: {exc}")
1078
+ self.notify(str(exc), severity="error")
1079
+ return
1080
+
1081
+ except Exception as exc: # pragma: no cover - defensive UI path
1082
+ # Check if this is a kuzu corruption error and we have retries left
1083
+ if attempt < max_retries - 1 and self._is_kuzu_corruption_error(exc):
1084
+ logger.warning(
1085
+ f"Kuzu corruption detected on attempt {attempt + 1}/{max_retries}: {exc}. "
1086
+ f"Will retry after cleanup..."
1087
+ )
1088
+ # Exponential backoff: 1s, 2s
1089
+ await asyncio.sleep(2**attempt)
1090
+ continue
1091
+
1092
+ # Either final retry failed OR not a corruption error - show error
1093
+ logger.exception(
1094
+ f"Failed to index codebase after {attempt + 1} attempts - "
1095
+ f"repo_path: {selection.repo_path}, name: {selection.name}, error: {exc}"
1096
+ )
1097
+ self.notify(
1098
+ f"Failed to index codebase after {attempt + 1} attempts: {exc}",
1099
+ severity="error",
1100
+ timeout=30, # Keep error visible for 30 seconds
1101
+ )
1102
+ break
1103
+
1104
+ # Always stop the progress timer and clean up label
1105
+ progress_timer.stop()
1106
+ label.update("")
1107
+ label.refresh()
1108
+
1109
+ @work
1110
+ async def run_agent(self, message: str) -> None:
1111
+ prompt = None
1112
+
1113
+ # Start processing with spinner
1114
+ from textual.worker import get_current_worker
1115
+
1116
+ self.processing_state.start_processing("Processing...")
1117
+ self.processing_state.bind_worker(get_current_worker())
1118
+
1119
+ # Start context indicator animation immediately
1120
+ self.widget_coordinator.set_context_streaming(True)
1121
+
1122
+ prompt = message
1123
+
1124
+ try:
1125
+ await self.agent_manager.run(
1126
+ prompt=prompt,
1127
+ )
1128
+ except asyncio.CancelledError:
1129
+ # Handle cancellation gracefully - DO NOT re-raise
1130
+ self.mount_hint("⚠️ Operation cancelled by user")
1131
+ except ContextSizeLimitExceeded as e:
1132
+ # User-friendly error with actionable options
1133
+ hint = (
1134
+ f"⚠️ **Context too large for {e.model_name}**\n\n"
1135
+ f"Your conversation history exceeds this model's limit ({e.max_tokens:,} tokens).\n\n"
1136
+ f"**Choose an action:**\n\n"
1137
+ f"1. Switch to a larger model (`Ctrl+P` → Change Model)\n"
1138
+ f"2. Switch to a larger model, compact (`/compact`), then switch back to {e.model_name}\n"
1139
+ f"3. Clear conversation (`/clear`)\n"
1140
+ )
1141
+
1142
+ self.mount_hint(hint)
1143
+
1144
+ # Log for debugging (won't send to Sentry due to ErrorNotPickedUpBySentry)
1145
+ logger.info(
1146
+ "Context size limit exceeded",
1147
+ extra={
1148
+ "max_tokens": e.max_tokens,
1149
+ "model_name": e.model_name,
1150
+ },
1151
+ )
1152
+ except Exception as e:
1153
+ # Log with full stack trace to shotgun.log
1154
+ logger.exception(
1155
+ "Agent run failed",
1156
+ extra={
1157
+ "agent_mode": self.mode.value,
1158
+ "error_type": type(e).__name__,
1159
+ },
1160
+ )
1161
+
1162
+ # Determine user-friendly message based on error type
1163
+ error_name = type(e).__name__
1164
+ error_message = str(e)
1165
+
1166
+ if "APIStatusError" in error_name and "overload" in error_message.lower():
1167
+ hint = "⚠️ The AI service is temporarily overloaded. Please wait a moment and try again."
1168
+ elif "APIStatusError" in error_name and "rate" in error_message.lower():
1169
+ hint = "⚠️ Rate limit reached. Please wait before trying again."
1170
+ elif "APIStatusError" in error_name:
1171
+ hint = f"⚠️ AI service error: {error_message}"
1172
+ else:
1173
+ hint = f"⚠️ An error occurred: {error_message}\n\nCheck logs at ~/.shotgun-sh/logs/shotgun.log"
1174
+
1175
+ self.mount_hint(hint)
1176
+ finally:
1177
+ self.processing_state.stop_processing()
1178
+ # Stop context indicator animation
1179
+ self.widget_coordinator.set_context_streaming(False)
1180
+
1181
+ # Save conversation after each interaction
1182
+ self._save_conversation()
1183
+
1184
+ self.widget_coordinator.update_prompt_input(focus=True)
1185
+
1186
+ def _save_conversation(self) -> None:
1187
+ """Save the current conversation to persistent storage."""
1188
+ # Use conversation service for saving (run async in background)
1189
+ # Use exclusive=True to prevent concurrent saves that can cause file contention
1190
+ self.run_worker(
1191
+ self.conversation_service.save_conversation(self.agent_manager),
1192
+ exclusive=True,
1193
+ )
1194
+
1195
+ async def _check_and_load_conversation(self) -> None:
1196
+ """Check if conversation exists and load it if it does."""
1197
+ if await self.conversation_manager.exists():
1198
+ self._load_conversation()
1199
+
1200
+ def _load_conversation(self) -> None:
1201
+ """Load conversation from persistent storage."""
1202
+
1203
+ # Use conversation service for restoration (run async)
1204
+ async def _do_load() -> None:
1205
+ (
1206
+ success,
1207
+ error_msg,
1208
+ restored_type,
1209
+ ) = await self.conversation_service.restore_conversation(
1210
+ self.agent_manager, self.deps.usage_manager
1211
+ )
1212
+
1213
+ if not success and error_msg:
1214
+ self.mount_hint(error_msg)
1215
+ elif success and restored_type:
1216
+ # Update the current mode to match restored conversation
1217
+ self.mode = restored_type
1218
+
1219
+ self.run_worker(_do_load(), exclusive=False)
1220
+
1221
+ @work
1222
+ async def _check_and_show_onboarding(self) -> None:
1223
+ """Check if onboarding should be shown and display modal if needed."""
1224
+ config_manager = get_config_manager()
1225
+ config = await config_manager.load()
1226
+
1227
+ # Only show onboarding if it hasn't been shown before
1228
+ if config.shown_onboarding_popup is None:
1229
+ # Show the onboarding modal
1230
+ await self.app.push_screen_wait(OnboardingModal())
1231
+
1232
+ # Mark as shown in config with current timestamp
1233
+ config.shown_onboarding_popup = datetime.now(timezone.utc)
1234
+ await config_manager.save(config)