shotgun-sh 0.2.3.dev2__py3-none-any.whl → 0.2.11.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (107) hide show
  1. shotgun/agents/agent_manager.py +524 -58
  2. shotgun/agents/common.py +62 -62
  3. shotgun/agents/config/constants.py +0 -6
  4. shotgun/agents/config/manager.py +14 -3
  5. shotgun/agents/config/models.py +16 -0
  6. shotgun/agents/config/provider.py +68 -13
  7. shotgun/agents/context_analyzer/__init__.py +28 -0
  8. shotgun/agents/context_analyzer/analyzer.py +493 -0
  9. shotgun/agents/context_analyzer/constants.py +9 -0
  10. shotgun/agents/context_analyzer/formatter.py +115 -0
  11. shotgun/agents/context_analyzer/models.py +212 -0
  12. shotgun/agents/conversation_history.py +125 -2
  13. shotgun/agents/conversation_manager.py +24 -2
  14. shotgun/agents/export.py +4 -5
  15. shotgun/agents/history/compaction.py +9 -4
  16. shotgun/agents/history/context_extraction.py +93 -6
  17. shotgun/agents/history/history_processors.py +14 -2
  18. shotgun/agents/history/token_counting/anthropic.py +32 -10
  19. shotgun/agents/models.py +50 -2
  20. shotgun/agents/plan.py +4 -5
  21. shotgun/agents/research.py +4 -5
  22. shotgun/agents/specify.py +4 -5
  23. shotgun/agents/tasks.py +4 -5
  24. shotgun/agents/tools/__init__.py +0 -2
  25. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  26. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  27. shotgun/agents/tools/codebase/file_read.py +6 -0
  28. shotgun/agents/tools/codebase/query_graph.py +6 -0
  29. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  30. shotgun/agents/tools/file_management.py +71 -9
  31. shotgun/agents/tools/registry.py +217 -0
  32. shotgun/agents/tools/web_search/__init__.py +24 -12
  33. shotgun/agents/tools/web_search/anthropic.py +24 -3
  34. shotgun/agents/tools/web_search/gemini.py +22 -10
  35. shotgun/agents/tools/web_search/openai.py +21 -12
  36. shotgun/api_endpoints.py +7 -3
  37. shotgun/build_constants.py +1 -1
  38. shotgun/cli/clear.py +52 -0
  39. shotgun/cli/compact.py +186 -0
  40. shotgun/cli/context.py +111 -0
  41. shotgun/cli/models.py +1 -0
  42. shotgun/cli/update.py +16 -2
  43. shotgun/codebase/core/manager.py +10 -1
  44. shotgun/llm_proxy/__init__.py +5 -2
  45. shotgun/llm_proxy/clients.py +12 -7
  46. shotgun/logging_config.py +8 -10
  47. shotgun/main.py +70 -10
  48. shotgun/posthog_telemetry.py +9 -3
  49. shotgun/prompts/agents/export.j2 +18 -1
  50. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
  51. shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
  52. shotgun/prompts/agents/plan.j2 +1 -1
  53. shotgun/prompts/agents/research.j2 +1 -1
  54. shotgun/prompts/agents/specify.j2 +270 -3
  55. shotgun/prompts/agents/state/system_state.j2 +4 -0
  56. shotgun/prompts/agents/tasks.j2 +1 -1
  57. shotgun/prompts/loader.py +2 -2
  58. shotgun/prompts/tools/web_search.j2 +14 -0
  59. shotgun/sentry_telemetry.py +4 -15
  60. shotgun/settings.py +238 -0
  61. shotgun/telemetry.py +15 -32
  62. shotgun/tui/app.py +203 -9
  63. shotgun/tui/commands/__init__.py +1 -1
  64. shotgun/tui/components/context_indicator.py +136 -0
  65. shotgun/tui/components/mode_indicator.py +70 -0
  66. shotgun/tui/components/status_bar.py +48 -0
  67. shotgun/tui/containers.py +93 -0
  68. shotgun/tui/dependencies.py +39 -0
  69. shotgun/tui/protocols.py +45 -0
  70. shotgun/tui/screens/chat/__init__.py +5 -0
  71. shotgun/tui/screens/chat/chat.tcss +54 -0
  72. shotgun/tui/screens/chat/chat_screen.py +1110 -0
  73. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
  74. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  75. shotgun/tui/screens/chat/help_text.py +39 -0
  76. shotgun/tui/screens/chat/prompt_history.py +48 -0
  77. shotgun/tui/screens/chat.tcss +11 -0
  78. shotgun/tui/screens/chat_screen/command_providers.py +68 -2
  79. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  80. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  81. shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
  82. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  83. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  84. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  85. shotgun/tui/screens/confirmation_dialog.py +151 -0
  86. shotgun/tui/screens/model_picker.py +30 -6
  87. shotgun/tui/screens/pipx_migration.py +153 -0
  88. shotgun/tui/screens/welcome.py +24 -5
  89. shotgun/tui/services/__init__.py +5 -0
  90. shotgun/tui/services/conversation_service.py +182 -0
  91. shotgun/tui/state/__init__.py +7 -0
  92. shotgun/tui/state/processing_state.py +185 -0
  93. shotgun/tui/widgets/__init__.py +5 -0
  94. shotgun/tui/widgets/widget_coordinator.py +247 -0
  95. shotgun/utils/datetime_utils.py +77 -0
  96. shotgun/utils/file_system_utils.py +3 -2
  97. shotgun/utils/update_checker.py +69 -14
  98. shotgun_sh-0.2.11.dev1.dist-info/METADATA +129 -0
  99. shotgun_sh-0.2.11.dev1.dist-info/RECORD +190 -0
  100. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev1.dist-info}/entry_points.txt +1 -0
  101. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev1.dist-info}/licenses/LICENSE +1 -1
  102. shotgun/agents/tools/user_interaction.py +0 -37
  103. shotgun/tui/screens/chat.py +0 -804
  104. shotgun/tui/screens/chat_screen/history.py +0 -352
  105. shotgun_sh-0.2.3.dev2.dist-info/METADATA +0 -467
  106. shotgun_sh-0.2.3.dev2.dist-info/RECORD +0 -154
  107. {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1110 @@
1
+ """Main chat screen implementation."""
2
+
3
+ import asyncio
4
+ import logging
5
+ from pathlib import Path
6
+ from typing import cast
7
+
8
+ from pydantic_ai.messages import (
9
+ ModelMessage,
10
+ ModelRequest,
11
+ ModelResponse,
12
+ TextPart,
13
+ ToolReturnPart,
14
+ UserPromptPart,
15
+ )
16
+ from textual import events, on, work
17
+ from textual.app import ComposeResult
18
+ from textual.command import CommandPalette
19
+ from textual.containers import Container, Grid
20
+ from textual.keys import Keys
21
+ from textual.reactive import reactive
22
+ from textual.screen import Screen
23
+ from textual.widgets import Static
24
+
25
+ from shotgun.agents.agent_manager import (
26
+ AgentManager,
27
+ ClarifyingQuestionsMessage,
28
+ CompactionCompletedMessage,
29
+ CompactionStartedMessage,
30
+ MessageHistoryUpdated,
31
+ ModelConfigUpdated,
32
+ PartialResponseMessage,
33
+ )
34
+ from shotgun.agents.config.models import MODEL_SPECS
35
+ from shotgun.agents.conversation_manager import ConversationManager
36
+ from shotgun.agents.history.compaction import apply_persistent_compaction
37
+ from shotgun.agents.history.token_estimation import estimate_tokens_from_messages
38
+ from shotgun.agents.models import (
39
+ AgentDeps,
40
+ AgentType,
41
+ FileOperationTracker,
42
+ )
43
+ from shotgun.codebase.core.manager import (
44
+ CodebaseAlreadyIndexedError,
45
+ CodebaseGraphManager,
46
+ )
47
+ from shotgun.codebase.models import IndexProgress, ProgressPhase
48
+ from shotgun.posthog_telemetry import track_event
49
+ from shotgun.sdk.codebase import CodebaseSDK
50
+ from shotgun.sdk.exceptions import CodebaseNotFoundError, InvalidPathError
51
+ from shotgun.tui.commands import CommandHandler
52
+ from shotgun.tui.components.context_indicator import ContextIndicator
53
+ from shotgun.tui.components.mode_indicator import ModeIndicator
54
+ from shotgun.tui.components.prompt_input import PromptInput
55
+ from shotgun.tui.components.spinner import Spinner
56
+ from shotgun.tui.components.status_bar import StatusBar
57
+ from shotgun.tui.screens.chat.codebase_index_prompt_screen import (
58
+ CodebaseIndexPromptScreen,
59
+ )
60
+ from shotgun.tui.screens.chat.codebase_index_selection import CodebaseIndexSelection
61
+ from shotgun.tui.screens.chat.help_text import (
62
+ help_text_empty_dir,
63
+ help_text_with_codebase,
64
+ )
65
+ from shotgun.tui.screens.chat.prompt_history import PromptHistory
66
+ from shotgun.tui.screens.chat_screen.command_providers import (
67
+ DeleteCodebasePaletteProvider,
68
+ UnifiedCommandProvider,
69
+ )
70
+ from shotgun.tui.screens.chat_screen.hint_message import HintMessage
71
+ from shotgun.tui.screens.chat_screen.history import ChatHistory
72
+ from shotgun.tui.screens.confirmation_dialog import ConfirmationDialog
73
+ from shotgun.tui.services.conversation_service import ConversationService
74
+ from shotgun.tui.state.processing_state import ProcessingStateManager
75
+ from shotgun.tui.utils.mode_progress import PlaceholderHints
76
+ from shotgun.tui.widgets.widget_coordinator import WidgetCoordinator
77
+ from shotgun.utils import get_shotgun_home
78
+
79
+ logger = logging.getLogger(__name__)
80
+
81
+
82
+ class ChatScreen(Screen[None]):
83
+ CSS_PATH = "chat.tcss"
84
+
85
+ BINDINGS = [
86
+ ("ctrl+p", "command_palette", "Command Palette"),
87
+ ("shift+tab", "toggle_mode", "Toggle mode"),
88
+ ("ctrl+u", "show_usage", "Show usage"),
89
+ ]
90
+
91
+ COMMANDS = {
92
+ UnifiedCommandProvider,
93
+ }
94
+
95
+ value = reactive("")
96
+ mode = reactive(AgentType.RESEARCH)
97
+ history: PromptHistory = PromptHistory()
98
+ messages = reactive(list[ModelMessage | HintMessage]())
99
+ indexing_job: reactive[CodebaseIndexSelection | None] = reactive(None)
100
+ partial_message: reactive[ModelMessage | None] = reactive(None)
101
+
102
+ # Q&A mode state (for structured output clarifying questions)
103
+ qa_mode = reactive(False)
104
+ qa_questions: list[str] = []
105
+ qa_current_index = reactive(0)
106
+ qa_answers: list[str] = []
107
+
108
+ # Working state - keep reactive for Textual watchers
109
+ working = reactive(False)
110
+
111
+ def __init__(
112
+ self,
113
+ agent_manager: AgentManager,
114
+ conversation_manager: ConversationManager,
115
+ conversation_service: ConversationService,
116
+ widget_coordinator: WidgetCoordinator,
117
+ processing_state: ProcessingStateManager,
118
+ command_handler: CommandHandler,
119
+ placeholder_hints: PlaceholderHints,
120
+ codebase_sdk: CodebaseSDK,
121
+ deps: AgentDeps,
122
+ continue_session: bool = False,
123
+ force_reindex: bool = False,
124
+ ) -> None:
125
+ """Initialize the ChatScreen.
126
+
127
+ All dependencies must be provided via dependency injection.
128
+ No objects are created in the constructor.
129
+
130
+ Args:
131
+ agent_manager: AgentManager instance for managing agent interactions
132
+ conversation_manager: ConversationManager for conversation persistence
133
+ conversation_service: ConversationService for conversation save/load/restore
134
+ widget_coordinator: WidgetCoordinator for centralized widget updates
135
+ processing_state: ProcessingStateManager for managing processing state
136
+ command_handler: CommandHandler for handling slash commands
137
+ placeholder_hints: PlaceholderHints for providing input hints
138
+ codebase_sdk: CodebaseSDK for codebase indexing operations
139
+ deps: AgentDeps configuration for agent dependencies
140
+ continue_session: Whether to continue a previous session
141
+ force_reindex: Whether to force reindexing of codebases
142
+ """
143
+ super().__init__()
144
+
145
+ # All dependencies are now required and injected
146
+ self.deps = deps
147
+ self.codebase_sdk = codebase_sdk
148
+ self.agent_manager = agent_manager
149
+ self.command_handler = command_handler
150
+ self.placeholder_hints = placeholder_hints
151
+ self.conversation_manager = conversation_manager
152
+ self.conversation_service = conversation_service
153
+ self.widget_coordinator = widget_coordinator
154
+ self.processing_state = processing_state
155
+ self.continue_session = continue_session
156
+ self.force_reindex = force_reindex
157
+
158
+ def on_mount(self) -> None:
159
+ # Use widget coordinator to focus input
160
+ self.widget_coordinator.update_prompt_input(focus=True)
161
+ # Hide spinner initially
162
+ self.query_one("#spinner").display = False
163
+
164
+ # Bind spinner to processing state manager
165
+ self.processing_state.bind_spinner(self.query_one("#spinner", Spinner))
166
+
167
+ # Load conversation history if --continue flag was provided
168
+ if self.continue_session and self.conversation_manager.exists():
169
+ self._load_conversation()
170
+
171
+ self.call_later(self.check_if_codebase_is_indexed)
172
+ # Initial update of context indicator
173
+ self.update_context_indicator()
174
+
175
+ async def on_key(self, event: events.Key) -> None:
176
+ """Handle key presses for cancellation."""
177
+ # If escape is pressed during Q&A mode, exit Q&A
178
+ if event.key in (Keys.Escape, Keys.ControlC) and self.qa_mode:
179
+ self._exit_qa_mode()
180
+ # Re-enable the input
181
+ self.widget_coordinator.update_prompt_input(focus=True)
182
+ # Prevent the event from propagating (don't quit the app)
183
+ event.stop()
184
+ return
185
+
186
+ # If escape or ctrl+c is pressed while agent is working, cancel the operation
187
+ if event.key in (Keys.Escape, Keys.ControlC):
188
+ if self.processing_state.cancel_current_operation(cancel_key=event.key):
189
+ # Show cancellation message
190
+ self.mount_hint("⚠️ Cancelling operation...")
191
+ # Re-enable the input
192
+ self.widget_coordinator.update_prompt_input(focus=True)
193
+ # Prevent the event from propagating (don't quit the app)
194
+ event.stop()
195
+
196
+ @work
197
+ async def check_if_codebase_is_indexed(self) -> None:
198
+ cur_dir = Path.cwd().resolve()
199
+ is_empty = all(
200
+ dir.is_dir() and dir.name in ["__pycache__", ".git", ".shotgun"]
201
+ for dir in cur_dir.iterdir()
202
+ )
203
+ if is_empty or self.continue_session:
204
+ return
205
+
206
+ # If force_reindex is True, delete any existing graphs for this directory
207
+ if self.force_reindex:
208
+ accessible_graphs = (
209
+ await self.codebase_sdk.list_codebases_for_directory()
210
+ ).graphs
211
+ for graph in accessible_graphs:
212
+ try:
213
+ await self.codebase_sdk.delete_codebase(graph.graph_id)
214
+ logger.info(
215
+ f"Deleted existing graph {graph.graph_id} due to --force-reindex"
216
+ )
217
+ except Exception as e:
218
+ logger.warning(
219
+ f"Failed to delete graph {graph.graph_id} during force reindex: {e}"
220
+ )
221
+
222
+ # Check if the current directory has any accessible codebases
223
+ accessible_graphs = (
224
+ await self.codebase_sdk.list_codebases_for_directory()
225
+ ).graphs
226
+ if accessible_graphs:
227
+ self.mount_hint(help_text_with_codebase(already_indexed=True))
228
+ return
229
+
230
+ # Ask user if they want to index the current directory
231
+ should_index = await self.app.push_screen_wait(CodebaseIndexPromptScreen())
232
+ if not should_index:
233
+ self.mount_hint(help_text_empty_dir())
234
+ return
235
+
236
+ self.mount_hint(help_text_with_codebase(already_indexed=False))
237
+
238
+ # Auto-index the current directory with its name
239
+ cwd_name = cur_dir.name
240
+ selection = CodebaseIndexSelection(repo_path=cur_dir, name=cwd_name)
241
+ self.call_later(lambda: self.index_codebase(selection))
242
+
243
+ def watch_mode(self, new_mode: AgentType) -> None:
244
+ """React to mode changes by updating the agent manager."""
245
+
246
+ if self.is_mounted:
247
+ self.agent_manager.set_agent(new_mode)
248
+ # Use widget coordinator for all widget updates
249
+ self.widget_coordinator.update_for_mode_change(new_mode)
250
+
251
+ def watch_working(self, is_working: bool) -> None:
252
+ """Show or hide the spinner based on working state."""
253
+ logger.debug(f"[WATCH] watch_working called - is_working={is_working}")
254
+ if self.is_mounted:
255
+ # Use widget coordinator for all widget updates
256
+ self.widget_coordinator.update_for_processing_state(is_working)
257
+
258
+ def watch_qa_mode(self, qa_mode_active: bool) -> None:
259
+ """Update UI when Q&A mode state changes."""
260
+ if self.is_mounted:
261
+ # Use widget coordinator for all widget updates
262
+ self.widget_coordinator.update_for_qa_mode(qa_mode_active)
263
+
264
+ def watch_messages(self, messages: list[ModelMessage | HintMessage]) -> None:
265
+ """Update the chat history when messages change."""
266
+ if self.is_mounted:
267
+ # Use widget coordinator for all widget updates
268
+ self.widget_coordinator.update_messages(messages)
269
+
270
+ def action_toggle_mode(self) -> None:
271
+ # Prevent mode switching during Q&A
272
+ if self.qa_mode:
273
+ self.notify(
274
+ "Cannot switch modes while answering questions",
275
+ severity="warning",
276
+ timeout=3,
277
+ )
278
+ return
279
+
280
+ modes = [
281
+ AgentType.RESEARCH,
282
+ AgentType.SPECIFY,
283
+ AgentType.PLAN,
284
+ AgentType.TASKS,
285
+ AgentType.EXPORT,
286
+ ]
287
+ self.mode = modes[(modes.index(self.mode) + 1) % len(modes)]
288
+ self.agent_manager.set_agent(self.mode)
289
+ # Re-focus input after mode change
290
+ self.call_later(lambda: self.widget_coordinator.update_prompt_input(focus=True))
291
+
292
+ def action_show_usage(self) -> None:
293
+ usage_hint = self.agent_manager.get_usage_hint()
294
+ logger.info(f"Usage hint: {usage_hint}")
295
+ if usage_hint:
296
+ self.mount_hint(usage_hint)
297
+ else:
298
+ self.notify("No usage hint available", severity="error")
299
+
300
+ async def action_show_context(self) -> None:
301
+ context_hint = await self.agent_manager.get_context_hint()
302
+ if context_hint:
303
+ self.mount_hint(context_hint)
304
+ else:
305
+ self.notify("No context analysis available", severity="error")
306
+
307
+ @work
308
+ async def action_compact_conversation(self) -> None:
309
+ """Compact the conversation history to reduce size."""
310
+ logger.debug(f"[COMPACT] Starting compaction - working={self.working}")
311
+
312
+ try:
313
+ # Show spinner and enable ESC cancellation
314
+ from textual.worker import get_current_worker
315
+
316
+ self.processing_state.start_processing("Compacting Conversation...")
317
+ self.processing_state.bind_worker(get_current_worker())
318
+ logger.debug(f"[COMPACT] Processing started - working={self.working}")
319
+
320
+ # Get current message count and tokens
321
+ original_count = len(self.agent_manager.message_history)
322
+ original_tokens = await estimate_tokens_from_messages(
323
+ self.agent_manager.message_history, self.deps.llm_model
324
+ )
325
+
326
+ # Log compaction start
327
+ logger.info(
328
+ f"Starting conversation compaction - {original_count} messages, {original_tokens} tokens"
329
+ )
330
+
331
+ # Post compaction started event
332
+ self.agent_manager.post_message(CompactionStartedMessage())
333
+ logger.debug("[COMPACT] Posted CompactionStartedMessage")
334
+
335
+ # Apply compaction with force=True to bypass threshold checks
336
+ compacted_messages = await apply_persistent_compaction(
337
+ self.agent_manager.message_history, self.deps, force=True
338
+ )
339
+
340
+ logger.debug(
341
+ f"[COMPACT] Compacted messages: count={len(compacted_messages)}, "
342
+ f"last_message_type={type(compacted_messages[-1]).__name__ if compacted_messages else 'None'}"
343
+ )
344
+
345
+ # Check last response usage
346
+ last_response = next(
347
+ (
348
+ msg
349
+ for msg in reversed(compacted_messages)
350
+ if isinstance(msg, ModelResponse)
351
+ ),
352
+ None,
353
+ )
354
+ if last_response:
355
+ logger.debug(
356
+ f"[COMPACT] Last response has usage: {last_response.usage is not None}, "
357
+ f"usage={last_response.usage if last_response.usage else 'None'}"
358
+ )
359
+ else:
360
+ logger.warning(
361
+ "[COMPACT] No ModelResponse found in compacted messages!"
362
+ )
363
+
364
+ # Update agent manager's message history
365
+ self.agent_manager.message_history = compacted_messages
366
+ logger.debug("[COMPACT] Updated agent_manager.message_history")
367
+
368
+ # Calculate after metrics
369
+ compacted_count = len(compacted_messages)
370
+ compacted_tokens = await estimate_tokens_from_messages(
371
+ compacted_messages, self.deps.llm_model
372
+ )
373
+
374
+ # Calculate reductions
375
+ message_reduction = (
376
+ ((original_count - compacted_count) / original_count) * 100
377
+ if original_count > 0
378
+ else 0
379
+ )
380
+ token_reduction = (
381
+ ((original_tokens - compacted_tokens) / original_tokens) * 100
382
+ if original_tokens > 0
383
+ else 0
384
+ )
385
+
386
+ # Save to conversation file
387
+ conversation_file = get_shotgun_home() / "conversation.json"
388
+ manager = ConversationManager(conversation_file)
389
+ conversation = manager.load()
390
+
391
+ if conversation:
392
+ conversation.set_agent_messages(compacted_messages)
393
+ manager.save(conversation)
394
+
395
+ # Post compaction completed event
396
+ self.agent_manager.post_message(CompactionCompletedMessage())
397
+
398
+ # Post message history updated event
399
+ self.agent_manager.post_message(
400
+ MessageHistoryUpdated(
401
+ messages=self.agent_manager.ui_message_history.copy(),
402
+ agent_type=self.agent_manager._current_agent_type,
403
+ file_operations=None,
404
+ )
405
+ )
406
+ logger.debug("[COMPACT] Posted MessageHistoryUpdated event")
407
+
408
+ # Force immediate context indicator update
409
+ logger.debug("[COMPACT] Calling update_context_indicator()")
410
+ self.update_context_indicator()
411
+
412
+ # Log compaction completion
413
+ logger.info(
414
+ f"Compaction completed: {original_count} → {compacted_count} messages "
415
+ f"({message_reduction:.0f}% message reduction, {token_reduction:.0f}% token reduction)"
416
+ )
417
+
418
+ # Add persistent hint message with stats
419
+ self.mount_hint(
420
+ f"✓ Compacted conversation: {original_count} → {compacted_count} messages "
421
+ f"({message_reduction:.0f}% message reduction, {token_reduction:.0f}% token reduction)"
422
+ )
423
+
424
+ except Exception as e:
425
+ logger.error(f"Failed to compact conversation: {e}", exc_info=True)
426
+ self.notify(f"Failed to compact: {e}", severity="error")
427
+ finally:
428
+ # Hide spinner
429
+ self.processing_state.stop_processing()
430
+ logger.debug(f"[COMPACT] Processing stopped - working={self.working}")
431
+
432
+ @work
433
+ async def action_clear_conversation(self) -> None:
434
+ """Clear the conversation history."""
435
+ # Show confirmation dialog
436
+ should_clear = await self.app.push_screen_wait(
437
+ ConfirmationDialog(
438
+ title="Clear conversation?",
439
+ message="This will permanently delete your entire conversation history. "
440
+ "All messages, context, and progress will be lost. "
441
+ "This action cannot be undone.",
442
+ confirm_label="Clear",
443
+ cancel_label="Keep",
444
+ confirm_variant="warning",
445
+ danger=True,
446
+ )
447
+ )
448
+
449
+ if not should_clear:
450
+ return # User cancelled
451
+
452
+ try:
453
+ # Clear message histories
454
+ self.agent_manager.message_history = []
455
+ self.agent_manager.ui_message_history = []
456
+
457
+ # Use conversation service to clear conversation
458
+ self.conversation_service.clear_conversation()
459
+
460
+ # Post message history updated event to refresh UI
461
+ self.agent_manager.post_message(
462
+ MessageHistoryUpdated(
463
+ messages=[],
464
+ agent_type=self.agent_manager._current_agent_type,
465
+ file_operations=None,
466
+ )
467
+ )
468
+
469
+ # Show persistent success message
470
+ self.mount_hint("✓ Conversation cleared - Starting fresh!")
471
+
472
+ except Exception as e:
473
+ logger.error(f"Failed to clear conversation: {e}", exc_info=True)
474
+ self.notify(f"Failed to clear: {e}", severity="error")
475
+
476
+ @work(exclusive=False)
477
+ async def update_context_indicator(self) -> None:
478
+ """Update the context indicator with current usage data."""
479
+ logger.debug("[CONTEXT] update_context_indicator called")
480
+ try:
481
+ logger.debug(
482
+ f"[CONTEXT] Getting context analysis - "
483
+ f"message_history_count={len(self.agent_manager.message_history)}"
484
+ )
485
+ analysis = await self.agent_manager.get_context_analysis()
486
+
487
+ if analysis:
488
+ logger.debug(
489
+ f"[CONTEXT] Analysis received - "
490
+ f"agent_context_tokens={analysis.agent_context_tokens}, "
491
+ f"max_usable_tokens={analysis.max_usable_tokens}, "
492
+ f"percentage={round((analysis.agent_context_tokens / analysis.max_usable_tokens) * 100, 1) if analysis.max_usable_tokens > 0 else 0}%"
493
+ )
494
+ else:
495
+ logger.warning("[CONTEXT] Analysis is None!")
496
+
497
+ model_name = self.deps.llm_model.name
498
+ # Use widget coordinator for context indicator update
499
+ self.widget_coordinator.update_context_indicator(analysis, model_name)
500
+ except Exception as e:
501
+ logger.error(
502
+ f"[CONTEXT] Failed to update context indicator: {e}", exc_info=True
503
+ )
504
+
505
+ def compose(self) -> ComposeResult:
506
+ """Create child widgets for the app."""
507
+ with Container(id="window"):
508
+ yield self.agent_manager
509
+ yield ChatHistory()
510
+ with Container(id="footer"):
511
+ yield Spinner(
512
+ text="Processing...",
513
+ id="spinner",
514
+ classes="" if self.working else "hidden",
515
+ )
516
+ yield StatusBar(working=self.working)
517
+ yield PromptInput(
518
+ text=self.value,
519
+ highlight_cursor_line=False,
520
+ id="prompt-input",
521
+ placeholder=self._placeholder_for_mode(self.mode),
522
+ )
523
+ with Grid():
524
+ yield ModeIndicator(mode=self.mode)
525
+ with Container(id="right-footer-indicators"):
526
+ yield ContextIndicator(id="context-indicator")
527
+ yield Static("", id="indexing-job-display")
528
+
529
+ def mount_hint(self, markdown: str) -> None:
530
+ hint = HintMessage(message=markdown)
531
+ self.agent_manager.add_hint_message(hint)
532
+
533
+ @on(PartialResponseMessage)
534
+ def handle_partial_response(self, event: PartialResponseMessage) -> None:
535
+ self.partial_message = event.message
536
+
537
+ # Filter event.messages to exclude ModelRequest with only ToolReturnPart
538
+ # These are intermediate tool results that would render as empty (UserQuestionWidget
539
+ # filters out ToolReturnPart in format_prompt_parts), causing user messages to disappear
540
+ filtered_event_messages: list[ModelMessage] = []
541
+ for msg in event.messages:
542
+ if isinstance(msg, ModelRequest):
543
+ # Check if this ModelRequest has any user-visible parts
544
+ has_user_content = any(
545
+ not isinstance(part, ToolReturnPart) for part in msg.parts
546
+ )
547
+ if has_user_content:
548
+ filtered_event_messages.append(msg)
549
+ # Skip ModelRequest with only ToolReturnPart
550
+ else:
551
+ # Keep all ModelResponse and other message types
552
+ filtered_event_messages.append(msg)
553
+
554
+ # Build new message list
555
+ new_message_list = self.messages + cast(
556
+ list[ModelMessage | HintMessage], filtered_event_messages
557
+ )
558
+
559
+ # Use widget coordinator to set partial response
560
+ self.widget_coordinator.set_partial_response(
561
+ self.partial_message, new_message_list
562
+ )
563
+
564
+ def _clear_partial_response(self) -> None:
565
+ # Use widget coordinator to clear partial response
566
+ self.widget_coordinator.set_partial_response(None, self.messages)
567
+
568
+ def _exit_qa_mode(self) -> None:
569
+ """Exit Q&A mode and clean up state."""
570
+ # Track cancellation event
571
+ track_event(
572
+ "qa_mode_cancelled",
573
+ {
574
+ "questions_total": len(self.qa_questions),
575
+ "questions_answered": len(self.qa_answers),
576
+ },
577
+ )
578
+
579
+ # Clear Q&A state
580
+ self.qa_mode = False
581
+ self.qa_questions = []
582
+ self.qa_answers = []
583
+ self.qa_current_index = 0
584
+
585
+ # Show cancellation message
586
+ self.mount_hint("⚠️ Q&A cancelled - You can continue the conversation.")
587
+
588
+ @on(ClarifyingQuestionsMessage)
589
+ def handle_clarifying_questions(self, event: ClarifyingQuestionsMessage) -> None:
590
+ """Handle clarifying questions from agent structured output.
591
+
592
+ Note: Hints are now added synchronously in agent_manager.run() before this
593
+ handler is called, so we only need to set up Q&A mode state here.
594
+ """
595
+ # Clear any streaming partial response (removes final_result JSON)
596
+ self._clear_partial_response()
597
+
598
+ # Enter Q&A mode
599
+ self.qa_mode = True
600
+ self.qa_questions = event.questions
601
+ self.qa_current_index = 0
602
+ self.qa_answers = []
603
+
604
+ @on(MessageHistoryUpdated)
605
+ def handle_message_history_updated(self, event: MessageHistoryUpdated) -> None:
606
+ """Handle message history updates from the agent manager."""
607
+ self._clear_partial_response()
608
+ self.messages = event.messages
609
+
610
+ # Use widget coordinator to refresh placeholder and mode indicator
611
+ self.widget_coordinator.update_prompt_input(
612
+ placeholder=self._placeholder_for_mode(self.mode)
613
+ )
614
+ self.widget_coordinator.refresh_mode_indicator()
615
+
616
+ # Update context indicator
617
+ self.update_context_indicator()
618
+
619
+ # If there are file operations, add a message showing the modified files
620
+ if event.file_operations:
621
+ chat_history = self.query_one(ChatHistory)
622
+ if chat_history.vertical_tail:
623
+ tracker = FileOperationTracker(operations=event.file_operations)
624
+ display_path = tracker.get_display_path()
625
+
626
+ if display_path:
627
+ # Create a simple markdown message with the file path
628
+ # The terminal emulator will make this clickable automatically
629
+ path_obj = Path(display_path)
630
+
631
+ if len(event.file_operations) == 1:
632
+ message = f"📝 Modified: `{display_path}`"
633
+ else:
634
+ num_files = len({op.file_path for op in event.file_operations})
635
+ if path_obj.is_dir():
636
+ message = (
637
+ f"📁 Modified {num_files} files in: `{display_path}`"
638
+ )
639
+ else:
640
+ # Common path is a file, show parent directory
641
+ message = (
642
+ f"📁 Modified {num_files} files in: `{path_obj.parent}`"
643
+ )
644
+
645
+ self.mount_hint(message)
646
+
647
+ @on(CompactionStartedMessage)
648
+ def handle_compaction_started(self, event: CompactionStartedMessage) -> None:
649
+ """Update spinner text when compaction starts."""
650
+ # Use widget coordinator to update spinner text
651
+ self.widget_coordinator.update_spinner_text("Compacting Conversation...")
652
+
653
+ @on(CompactionCompletedMessage)
654
+ def handle_compaction_completed(self, event: CompactionCompletedMessage) -> None:
655
+ """Reset spinner text when compaction completes."""
656
+ # Use widget coordinator to update spinner text
657
+ self.widget_coordinator.update_spinner_text("Processing...")
658
+
659
+ async def handle_model_selected(self, result: ModelConfigUpdated | None) -> None:
660
+ """Handle model selection from ModelPickerScreen.
661
+
662
+ Called as a callback when the ModelPickerScreen is dismissed.
663
+
664
+ Args:
665
+ result: ModelConfigUpdated if a model was selected, None if cancelled
666
+ """
667
+ if result is None:
668
+ return
669
+
670
+ try:
671
+ # Update the model configuration in dependencies
672
+ self.deps.llm_model = result.model_config
673
+
674
+ # Update the agent manager's model configuration
675
+ self.agent_manager.deps.llm_model = result.model_config
676
+
677
+ # Get current analysis and update context indicator via coordinator
678
+ analysis = await self.agent_manager.get_context_analysis()
679
+ self.widget_coordinator.update_context_indicator(analysis, result.new_model)
680
+
681
+ # Get model display name for user feedback
682
+ model_spec = MODEL_SPECS.get(result.new_model)
683
+ model_display = (
684
+ model_spec.short_name if model_spec else str(result.new_model)
685
+ )
686
+
687
+ # Format provider information
688
+ key_method = (
689
+ "Shotgun Account" if result.key_provider == "shotgun" else "BYOK"
690
+ )
691
+ provider_display = result.provider.value.title()
692
+
693
+ # Track model switch in telemetry
694
+ track_event(
695
+ "model_switched",
696
+ {
697
+ "old_model": str(result.old_model) if result.old_model else None,
698
+ "new_model": str(result.new_model),
699
+ "provider": result.provider.value,
700
+ "key_provider": result.key_provider.value,
701
+ },
702
+ )
703
+
704
+ # Show confirmation to user with provider info
705
+ self.agent_manager.add_hint_message(
706
+ HintMessage(
707
+ message=f"✓ Switched to {model_display} ({provider_display}, {key_method})"
708
+ )
709
+ )
710
+
711
+ except Exception as e:
712
+ logger.error(f"Failed to handle model selection: {e}")
713
+ self.agent_manager.add_hint_message(
714
+ HintMessage(message=f"⚠ Failed to update model configuration: {e}")
715
+ )
716
+
717
+ @on(PromptInput.Submitted)
718
+ async def handle_submit(self, message: PromptInput.Submitted) -> None:
719
+ text = message.text.strip()
720
+
721
+ # If empty text, just clear input and return
722
+ if not text:
723
+ self.widget_coordinator.update_prompt_input(clear=True)
724
+ self.value = ""
725
+ return
726
+
727
+ # Handle Q&A mode (from structured output clarifying questions)
728
+ if self.qa_mode and self.qa_questions:
729
+ # Collect answer
730
+ self.qa_answers.append(text)
731
+
732
+ # Show answer
733
+ if len(self.qa_questions) == 1:
734
+ self.agent_manager.add_hint_message(
735
+ HintMessage(message=f"**A:** {text}")
736
+ )
737
+ else:
738
+ q_num = self.qa_current_index + 1
739
+ self.agent_manager.add_hint_message(
740
+ HintMessage(message=f"**A{q_num}:** {text}")
741
+ )
742
+
743
+ # Move to next or finish
744
+ self.qa_current_index += 1
745
+
746
+ if self.qa_current_index < len(self.qa_questions):
747
+ # Show next question
748
+ next_q = self.qa_questions[self.qa_current_index]
749
+ next_q_num = self.qa_current_index + 1
750
+ self.agent_manager.add_hint_message(
751
+ HintMessage(message=f"**Q{next_q_num}:** {next_q}")
752
+ )
753
+ else:
754
+ # All answered - format and send back
755
+ if len(self.qa_questions) == 1:
756
+ # Single question - just send the answer
757
+ formatted_qa = f"Q: {self.qa_questions[0]}\nA: {self.qa_answers[0]}"
758
+ else:
759
+ # Multiple questions - format all Q&A pairs
760
+ formatted_qa = "\n\n".join(
761
+ f"Q{i + 1}: {q}\nA{i + 1}: {a}"
762
+ for i, (q, a) in enumerate(
763
+ zip(self.qa_questions, self.qa_answers, strict=True)
764
+ )
765
+ )
766
+
767
+ # Exit Q&A mode
768
+ self.qa_mode = False
769
+ self.qa_questions = []
770
+ self.qa_answers = []
771
+ self.qa_current_index = 0
772
+
773
+ # Send answers back to agent
774
+ self.run_agent(formatted_qa)
775
+
776
+ # Clear input
777
+ self.widget_coordinator.update_prompt_input(clear=True)
778
+ self.value = ""
779
+ return
780
+
781
+ # Check if it's a command
782
+ if self.command_handler.is_command(text):
783
+ success, response = self.command_handler.handle_command(text)
784
+
785
+ # Add the command to history
786
+ self.history.append(message.text)
787
+
788
+ # Display the command in chat history
789
+ user_message = ModelRequest(parts=[UserPromptPart(content=text)])
790
+ self.messages = self.messages + [user_message]
791
+
792
+ # Display the response (help text or error message)
793
+ response_message = ModelResponse(parts=[TextPart(content=response)])
794
+ self.messages = self.messages + [response_message]
795
+
796
+ # Clear the input
797
+ self.widget_coordinator.update_prompt_input(clear=True)
798
+ self.value = ""
799
+ return
800
+
801
+ # Not a command, process as normal
802
+ self.history.append(message.text)
803
+
804
+ # Add user message to agent_manager's history BEFORE running the agent
805
+ # This ensures immediate visual feedback AND proper deduplication
806
+ user_message = ModelRequest.user_text_prompt(text)
807
+ self.agent_manager.ui_message_history.append(user_message)
808
+ self.messages = self.agent_manager.ui_message_history.copy()
809
+
810
+ # Clear the input
811
+ self.value = ""
812
+ self.run_agent(text) # Use stripped text
813
+
814
+ self.widget_coordinator.update_prompt_input(clear=True)
815
+
816
+ def _placeholder_for_mode(self, mode: AgentType, force_new: bool = False) -> str:
817
+ """Return the placeholder text appropriate for the current mode.
818
+
819
+ Args:
820
+ mode: The current agent mode.
821
+ force_new: If True, force selection of a new random hint.
822
+
823
+ Returns:
824
+ Dynamic placeholder hint based on mode and progress.
825
+ """
826
+ return self.placeholder_hints.get_placeholder_for_mode(mode)
827
+
828
+ def index_codebase_command(self) -> None:
829
+ # Simplified: always index current working directory with its name
830
+ cur_dir = Path.cwd().resolve()
831
+ cwd_name = cur_dir.name
832
+ selection = CodebaseIndexSelection(repo_path=cur_dir, name=cwd_name)
833
+ self.call_later(lambda: self.index_codebase(selection))
834
+
835
+ def delete_codebase_command(self) -> None:
836
+ self.app.push_screen(
837
+ CommandPalette(
838
+ providers=[DeleteCodebasePaletteProvider],
839
+ placeholder="Select a codebase to delete…",
840
+ )
841
+ )
842
+
843
+ def delete_codebase_from_palette(self, graph_id: str) -> None:
844
+ stack = getattr(self.app, "screen_stack", None)
845
+ if stack and isinstance(stack[-1], CommandPalette):
846
+ self.app.pop_screen()
847
+
848
+ self.call_later(lambda: self.delete_codebase(graph_id))
849
+
850
+ @work
851
+ async def delete_codebase(self, graph_id: str) -> None:
852
+ try:
853
+ await self.codebase_sdk.delete_codebase(graph_id)
854
+ self.notify(f"Deleted codebase: {graph_id}", severity="information")
855
+ except CodebaseNotFoundError as exc:
856
+ self.notify(str(exc), severity="error")
857
+ except Exception as exc: # pragma: no cover - defensive UI path
858
+ self.notify(f"Failed to delete codebase: {exc}", severity="error")
859
+
860
+ def _is_kuzu_corruption_error(self, exception: Exception) -> bool:
861
+ """Check if error is related to kuzu database corruption.
862
+
863
+ Args:
864
+ exception: The exception to check
865
+
866
+ Returns:
867
+ True if the error indicates kuzu database corruption
868
+ """
869
+ error_str = str(exception).lower()
870
+ error_indicators = [
871
+ "not a directory",
872
+ "errno 20",
873
+ "corrupted",
874
+ ".kuzu",
875
+ "ioexception",
876
+ "unordered_map", # C++ STL map errors from kuzu
877
+ "key not found", # unordered_map::at errors
878
+ "std::exception", # Generic C++ exceptions from kuzu
879
+ ]
880
+ return any(indicator in error_str for indicator in error_indicators)
881
+
882
+ @work
883
+ async def index_codebase(self, selection: CodebaseIndexSelection) -> None:
884
+ label = self.query_one("#indexing-job-display", Static)
885
+ label.update(
886
+ f"[$foreground-muted]Indexing codebase: [bold $text-accent]{selection.name}[/][/]"
887
+ )
888
+ label.refresh()
889
+
890
+ def create_progress_bar(percentage: float, width: int = 20) -> str:
891
+ """Create a visual progress bar using Unicode block characters."""
892
+ filled = int((percentage / 100) * width)
893
+ empty = width - filled
894
+ return "▓" * filled + "░" * empty
895
+
896
+ # Spinner animation frames
897
+ spinner_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
898
+
899
+ # Progress state (shared between timer and progress callback)
900
+ progress_state: dict[str, int | float] = {
901
+ "frame_index": 0,
902
+ "percentage": 0.0,
903
+ }
904
+
905
+ def update_progress_display() -> None:
906
+ """Update progress bar on timer - runs every 100ms."""
907
+ # Advance spinner frame
908
+ frame_idx = int(progress_state["frame_index"])
909
+ progress_state["frame_index"] = (frame_idx + 1) % len(spinner_frames)
910
+ spinner = spinner_frames[frame_idx]
911
+
912
+ # Get current state
913
+ pct = float(progress_state["percentage"])
914
+ bar = create_progress_bar(pct)
915
+
916
+ # Update label
917
+ label.update(
918
+ f"[$foreground-muted]Indexing codebase: {spinner} {bar} {pct:.0f}%[/]"
919
+ )
920
+
921
+ def progress_callback(progress_info: IndexProgress) -> None:
922
+ """Update progress state (timer renders it independently)."""
923
+ # Calculate overall percentage (0-95%, reserve 95-100% for finalization)
924
+ if progress_info.phase == ProgressPhase.STRUCTURE:
925
+ # Phase 1: 0-10%, always show 5% while running, 10% when complete
926
+ overall_pct = 10.0 if progress_info.phase_complete else 5.0
927
+ elif progress_info.phase == ProgressPhase.DEFINITIONS:
928
+ # Phase 2: 10-80% based on files processed
929
+ if progress_info.total and progress_info.total > 0:
930
+ phase_pct = (progress_info.current / progress_info.total) * 70.0
931
+ overall_pct = 10.0 + phase_pct
932
+ else:
933
+ overall_pct = 10.0
934
+ elif progress_info.phase == ProgressPhase.RELATIONSHIPS:
935
+ # Phase 3: 80-95% based on relationships processed (cap at 95%)
936
+ if progress_info.total and progress_info.total > 0:
937
+ phase_pct = (progress_info.current / progress_info.total) * 15.0
938
+ overall_pct = 80.0 + phase_pct
939
+ else:
940
+ overall_pct = 80.0
941
+ else:
942
+ overall_pct = 0.0
943
+
944
+ # Update shared state (timer will render it)
945
+ progress_state["percentage"] = overall_pct
946
+
947
+ # Start progress animation timer (10 fps = 100ms interval)
948
+ progress_timer = self.set_interval(0.1, update_progress_display)
949
+
950
+ # Retry logic for handling kuzu corruption
951
+ max_retries = 3
952
+
953
+ for attempt in range(max_retries):
954
+ try:
955
+ # Clean up corrupted DBs before retry (skip on first attempt)
956
+ if attempt > 0:
957
+ logger.info(
958
+ f"Retry attempt {attempt + 1}/{max_retries} - cleaning up corrupted databases"
959
+ )
960
+ manager = CodebaseGraphManager(
961
+ self.codebase_sdk.service.storage_dir
962
+ )
963
+ cleaned = await manager.cleanup_corrupted_databases()
964
+ logger.info(f"Cleaned up {len(cleaned)} corrupted database(s)")
965
+ self.notify(
966
+ f"Retrying indexing after cleanup (attempt {attempt + 1}/{max_retries})...",
967
+ severity="information",
968
+ )
969
+
970
+ # Pass the current working directory as the indexed_from_cwd
971
+ logger.debug(
972
+ f"Starting indexing - repo_path: {selection.repo_path}, "
973
+ f"name: {selection.name}, cwd: {Path.cwd().resolve()}"
974
+ )
975
+ result = await self.codebase_sdk.index_codebase(
976
+ selection.repo_path,
977
+ selection.name,
978
+ indexed_from_cwd=str(Path.cwd().resolve()),
979
+ progress_callback=progress_callback,
980
+ )
981
+
982
+ # Success! Stop progress animation
983
+ progress_timer.stop()
984
+
985
+ # Show 100% completion after indexing finishes
986
+ final_bar = create_progress_bar(100.0)
987
+ label.update(
988
+ f"[$foreground-muted]Indexing codebase: {final_bar} 100%[/]"
989
+ )
990
+ label.refresh()
991
+
992
+ logger.info(
993
+ f"Successfully indexed codebase '{result.name}' (ID: {result.graph_id})"
994
+ )
995
+ self.notify(
996
+ f"Indexed codebase '{result.name}' (ID: {result.graph_id})",
997
+ severity="information",
998
+ timeout=8,
999
+ )
1000
+ break # Success - exit retry loop
1001
+
1002
+ except CodebaseAlreadyIndexedError as exc:
1003
+ progress_timer.stop()
1004
+ logger.warning(f"Codebase already indexed: {exc}")
1005
+ self.notify(str(exc), severity="warning")
1006
+ return
1007
+ except InvalidPathError as exc:
1008
+ progress_timer.stop()
1009
+ logger.error(f"Invalid path error: {exc}")
1010
+ self.notify(str(exc), severity="error")
1011
+ return
1012
+
1013
+ except Exception as exc: # pragma: no cover - defensive UI path
1014
+ # Check if this is a kuzu corruption error and we have retries left
1015
+ if attempt < max_retries - 1 and self._is_kuzu_corruption_error(exc):
1016
+ logger.warning(
1017
+ f"Kuzu corruption detected on attempt {attempt + 1}/{max_retries}: {exc}. "
1018
+ f"Will retry after cleanup..."
1019
+ )
1020
+ # Exponential backoff: 1s, 2s
1021
+ await asyncio.sleep(2**attempt)
1022
+ continue
1023
+
1024
+ # Either final retry failed OR not a corruption error - show error
1025
+ logger.exception(
1026
+ f"Failed to index codebase after {attempt + 1} attempts - "
1027
+ f"repo_path: {selection.repo_path}, name: {selection.name}, error: {exc}"
1028
+ )
1029
+ self.notify(
1030
+ f"Failed to index codebase after {attempt + 1} attempts: {exc}",
1031
+ severity="error",
1032
+ timeout=30, # Keep error visible for 30 seconds
1033
+ )
1034
+ break
1035
+
1036
+ # Always stop the progress timer and clean up label
1037
+ progress_timer.stop()
1038
+ label.update("")
1039
+ label.refresh()
1040
+
1041
+ @work
1042
+ async def run_agent(self, message: str) -> None:
1043
+ prompt = None
1044
+
1045
+ # Start processing with spinner
1046
+ from textual.worker import get_current_worker
1047
+
1048
+ self.processing_state.start_processing("Processing...")
1049
+ self.processing_state.bind_worker(get_current_worker())
1050
+
1051
+ prompt = message
1052
+
1053
+ try:
1054
+ await self.agent_manager.run(
1055
+ prompt=prompt,
1056
+ )
1057
+ except asyncio.CancelledError:
1058
+ # Handle cancellation gracefully - DO NOT re-raise
1059
+ self.mount_hint("⚠️ Operation cancelled by user")
1060
+ except Exception as e:
1061
+ # Log with full stack trace to shotgun.log
1062
+ logger.exception(
1063
+ "Agent run failed",
1064
+ extra={
1065
+ "agent_mode": self.mode.value,
1066
+ "error_type": type(e).__name__,
1067
+ },
1068
+ )
1069
+
1070
+ # Determine user-friendly message based on error type
1071
+ error_name = type(e).__name__
1072
+ error_message = str(e)
1073
+
1074
+ if "APIStatusError" in error_name and "overload" in error_message.lower():
1075
+ hint = "⚠️ The AI service is temporarily overloaded. Please wait a moment and try again."
1076
+ elif "APIStatusError" in error_name and "rate" in error_message.lower():
1077
+ hint = "⚠️ Rate limit reached. Please wait before trying again."
1078
+ elif "APIStatusError" in error_name:
1079
+ hint = f"⚠️ AI service error: {error_message}"
1080
+ else:
1081
+ hint = f"⚠️ An error occurred: {error_message}\n\nCheck logs at ~/.shotgun-sh/logs/shotgun.log"
1082
+
1083
+ self.mount_hint(hint)
1084
+ finally:
1085
+ self.processing_state.stop_processing()
1086
+
1087
+ # Save conversation after each interaction
1088
+ self._save_conversation()
1089
+
1090
+ self.widget_coordinator.update_prompt_input(focus=True)
1091
+
1092
+ def _save_conversation(self) -> None:
1093
+ """Save the current conversation to persistent storage."""
1094
+ # Use conversation service for saving
1095
+ self.conversation_service.save_conversation(self.agent_manager)
1096
+
1097
+ def _load_conversation(self) -> None:
1098
+ """Load conversation from persistent storage."""
1099
+ # Use conversation service for restoration
1100
+ success, error_msg, restored_type = (
1101
+ self.conversation_service.restore_conversation(
1102
+ self.agent_manager, self.deps.usage_manager
1103
+ )
1104
+ )
1105
+
1106
+ if not success and error_msg:
1107
+ self.mount_hint(error_msg)
1108
+ elif success and restored_type:
1109
+ # Update the current mode to match restored conversation
1110
+ self.mode = restored_type