shotgun-sh 0.2.8.dev2__py3-none-any.whl → 0.2.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- shotgun/agents/agent_manager.py +354 -46
- shotgun/agents/common.py +14 -8
- shotgun/agents/config/constants.py +0 -6
- shotgun/agents/config/manager.py +66 -35
- shotgun/agents/config/models.py +41 -1
- shotgun/agents/config/provider.py +33 -5
- shotgun/agents/context_analyzer/__init__.py +28 -0
- shotgun/agents/context_analyzer/analyzer.py +471 -0
- shotgun/agents/context_analyzer/constants.py +9 -0
- shotgun/agents/context_analyzer/formatter.py +115 -0
- shotgun/agents/context_analyzer/models.py +212 -0
- shotgun/agents/conversation_history.py +2 -0
- shotgun/agents/conversation_manager.py +35 -19
- shotgun/agents/export.py +2 -2
- shotgun/agents/history/compaction.py +9 -4
- shotgun/agents/history/history_processors.py +113 -5
- shotgun/agents/history/token_counting/anthropic.py +17 -1
- shotgun/agents/history/token_counting/base.py +14 -3
- shotgun/agents/history/token_counting/openai.py +11 -1
- shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
- shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
- shotgun/agents/history/token_counting/utils.py +0 -3
- shotgun/agents/plan.py +2 -2
- shotgun/agents/research.py +3 -3
- shotgun/agents/specify.py +2 -2
- shotgun/agents/tasks.py +2 -2
- shotgun/agents/tools/codebase/codebase_shell.py +6 -0
- shotgun/agents/tools/codebase/directory_lister.py +6 -0
- shotgun/agents/tools/codebase/file_read.py +11 -2
- shotgun/agents/tools/codebase/query_graph.py +6 -0
- shotgun/agents/tools/codebase/retrieve_code.py +6 -0
- shotgun/agents/tools/file_management.py +27 -7
- shotgun/agents/tools/registry.py +217 -0
- shotgun/agents/tools/web_search/__init__.py +8 -8
- shotgun/agents/tools/web_search/anthropic.py +8 -2
- shotgun/agents/tools/web_search/gemini.py +7 -1
- shotgun/agents/tools/web_search/openai.py +7 -1
- shotgun/agents/tools/web_search/utils.py +2 -2
- shotgun/agents/usage_manager.py +16 -11
- shotgun/api_endpoints.py +7 -3
- shotgun/build_constants.py +3 -3
- shotgun/cli/clear.py +53 -0
- shotgun/cli/compact.py +186 -0
- shotgun/cli/config.py +8 -5
- shotgun/cli/context.py +111 -0
- shotgun/cli/export.py +1 -1
- shotgun/cli/feedback.py +4 -2
- shotgun/cli/models.py +1 -0
- shotgun/cli/plan.py +1 -1
- shotgun/cli/research.py +1 -1
- shotgun/cli/specify.py +1 -1
- shotgun/cli/tasks.py +1 -1
- shotgun/cli/update.py +16 -2
- shotgun/codebase/core/change_detector.py +5 -3
- shotgun/codebase/core/code_retrieval.py +4 -2
- shotgun/codebase/core/ingestor.py +10 -8
- shotgun/codebase/core/manager.py +13 -4
- shotgun/codebase/core/nl_query.py +1 -1
- shotgun/exceptions.py +32 -0
- shotgun/logging_config.py +18 -27
- shotgun/main.py +73 -11
- shotgun/posthog_telemetry.py +37 -28
- shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +3 -2
- shotgun/sentry_telemetry.py +163 -16
- shotgun/settings.py +238 -0
- shotgun/telemetry.py +10 -33
- shotgun/tui/app.py +243 -43
- shotgun/tui/commands/__init__.py +1 -1
- shotgun/tui/components/context_indicator.py +179 -0
- shotgun/tui/components/mode_indicator.py +70 -0
- shotgun/tui/components/status_bar.py +48 -0
- shotgun/tui/containers.py +91 -0
- shotgun/tui/dependencies.py +39 -0
- shotgun/tui/protocols.py +45 -0
- shotgun/tui/screens/chat/__init__.py +5 -0
- shotgun/tui/screens/chat/chat.tcss +54 -0
- shotgun/tui/screens/chat/chat_screen.py +1254 -0
- shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
- shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
- shotgun/tui/screens/chat/help_text.py +40 -0
- shotgun/tui/screens/chat/prompt_history.py +48 -0
- shotgun/tui/screens/chat.tcss +11 -0
- shotgun/tui/screens/chat_screen/command_providers.py +78 -2
- shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
- shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
- shotgun/tui/screens/chat_screen/history/chat_history.py +115 -0
- shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
- shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
- shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
- shotgun/tui/screens/confirmation_dialog.py +151 -0
- shotgun/tui/screens/feedback.py +4 -4
- shotgun/tui/screens/github_issue.py +102 -0
- shotgun/tui/screens/model_picker.py +49 -24
- shotgun/tui/screens/onboarding.py +431 -0
- shotgun/tui/screens/pipx_migration.py +153 -0
- shotgun/tui/screens/provider_config.py +50 -27
- shotgun/tui/screens/shotgun_auth.py +2 -2
- shotgun/tui/screens/welcome.py +14 -11
- shotgun/tui/services/__init__.py +5 -0
- shotgun/tui/services/conversation_service.py +184 -0
- shotgun/tui/state/__init__.py +7 -0
- shotgun/tui/state/processing_state.py +185 -0
- shotgun/tui/utils/mode_progress.py +14 -7
- shotgun/tui/widgets/__init__.py +5 -0
- shotgun/tui/widgets/widget_coordinator.py +263 -0
- shotgun/utils/file_system_utils.py +22 -2
- shotgun/utils/marketing.py +110 -0
- shotgun/utils/update_checker.py +69 -14
- shotgun_sh-0.2.17.dist-info/METADATA +465 -0
- shotgun_sh-0.2.17.dist-info/RECORD +194 -0
- {shotgun_sh-0.2.8.dev2.dist-info → shotgun_sh-0.2.17.dist-info}/entry_points.txt +1 -0
- {shotgun_sh-0.2.8.dev2.dist-info → shotgun_sh-0.2.17.dist-info}/licenses/LICENSE +1 -1
- shotgun/tui/screens/chat.py +0 -996
- shotgun/tui/screens/chat_screen/history.py +0 -335
- shotgun_sh-0.2.8.dev2.dist-info/METADATA +0 -126
- shotgun_sh-0.2.8.dev2.dist-info/RECORD +0 -155
- {shotgun_sh-0.2.8.dev2.dist-info → shotgun_sh-0.2.17.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,1254 @@
|
|
|
1
|
+
"""Main chat screen implementation."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
import time
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import cast
|
|
9
|
+
|
|
10
|
+
from pydantic_ai.messages import (
|
|
11
|
+
ModelMessage,
|
|
12
|
+
ModelRequest,
|
|
13
|
+
ModelResponse,
|
|
14
|
+
TextPart,
|
|
15
|
+
ToolCallPart,
|
|
16
|
+
ToolReturnPart,
|
|
17
|
+
UserPromptPart,
|
|
18
|
+
)
|
|
19
|
+
from textual import events, on, work
|
|
20
|
+
from textual.app import ComposeResult
|
|
21
|
+
from textual.command import CommandPalette
|
|
22
|
+
from textual.containers import Container, Grid
|
|
23
|
+
from textual.keys import Keys
|
|
24
|
+
from textual.reactive import reactive
|
|
25
|
+
from textual.screen import Screen
|
|
26
|
+
from textual.widgets import Static
|
|
27
|
+
|
|
28
|
+
from shotgun.agents.agent_manager import (
|
|
29
|
+
AgentManager,
|
|
30
|
+
ClarifyingQuestionsMessage,
|
|
31
|
+
CompactionCompletedMessage,
|
|
32
|
+
CompactionStartedMessage,
|
|
33
|
+
MessageHistoryUpdated,
|
|
34
|
+
ModelConfigUpdated,
|
|
35
|
+
PartialResponseMessage,
|
|
36
|
+
)
|
|
37
|
+
from shotgun.agents.config import get_config_manager
|
|
38
|
+
from shotgun.agents.config.models import MODEL_SPECS
|
|
39
|
+
from shotgun.agents.conversation_manager import ConversationManager
|
|
40
|
+
from shotgun.agents.history.compaction import apply_persistent_compaction
|
|
41
|
+
from shotgun.agents.history.token_estimation import estimate_tokens_from_messages
|
|
42
|
+
from shotgun.agents.models import (
|
|
43
|
+
AgentDeps,
|
|
44
|
+
AgentType,
|
|
45
|
+
FileOperationTracker,
|
|
46
|
+
)
|
|
47
|
+
from shotgun.codebase.core.manager import (
|
|
48
|
+
CodebaseAlreadyIndexedError,
|
|
49
|
+
CodebaseGraphManager,
|
|
50
|
+
)
|
|
51
|
+
from shotgun.codebase.models import IndexProgress, ProgressPhase
|
|
52
|
+
from shotgun.exceptions import ContextSizeLimitExceeded
|
|
53
|
+
from shotgun.posthog_telemetry import track_event
|
|
54
|
+
from shotgun.sdk.codebase import CodebaseSDK
|
|
55
|
+
from shotgun.sdk.exceptions import CodebaseNotFoundError, InvalidPathError
|
|
56
|
+
from shotgun.tui.commands import CommandHandler
|
|
57
|
+
from shotgun.tui.components.context_indicator import ContextIndicator
|
|
58
|
+
from shotgun.tui.components.mode_indicator import ModeIndicator
|
|
59
|
+
from shotgun.tui.components.prompt_input import PromptInput
|
|
60
|
+
from shotgun.tui.components.spinner import Spinner
|
|
61
|
+
from shotgun.tui.components.status_bar import StatusBar
|
|
62
|
+
from shotgun.tui.screens.chat.codebase_index_prompt_screen import (
|
|
63
|
+
CodebaseIndexPromptScreen,
|
|
64
|
+
)
|
|
65
|
+
from shotgun.tui.screens.chat.codebase_index_selection import CodebaseIndexSelection
|
|
66
|
+
from shotgun.tui.screens.chat.help_text import (
|
|
67
|
+
help_text_empty_dir,
|
|
68
|
+
help_text_with_codebase,
|
|
69
|
+
)
|
|
70
|
+
from shotgun.tui.screens.chat.prompt_history import PromptHistory
|
|
71
|
+
from shotgun.tui.screens.chat_screen.command_providers import (
|
|
72
|
+
DeleteCodebasePaletteProvider,
|
|
73
|
+
UnifiedCommandProvider,
|
|
74
|
+
)
|
|
75
|
+
from shotgun.tui.screens.chat_screen.hint_message import HintMessage
|
|
76
|
+
from shotgun.tui.screens.chat_screen.history import ChatHistory
|
|
77
|
+
from shotgun.tui.screens.confirmation_dialog import ConfirmationDialog
|
|
78
|
+
from shotgun.tui.screens.onboarding import OnboardingModal
|
|
79
|
+
from shotgun.tui.services.conversation_service import ConversationService
|
|
80
|
+
from shotgun.tui.state.processing_state import ProcessingStateManager
|
|
81
|
+
from shotgun.tui.utils.mode_progress import PlaceholderHints
|
|
82
|
+
from shotgun.tui.widgets.widget_coordinator import WidgetCoordinator
|
|
83
|
+
from shotgun.utils import get_shotgun_home
|
|
84
|
+
from shotgun.utils.marketing import MarketingManager
|
|
85
|
+
|
|
86
|
+
logger = logging.getLogger(__name__)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class ChatScreen(Screen[None]):
|
|
90
|
+
CSS_PATH = "chat.tcss"
|
|
91
|
+
|
|
92
|
+
BINDINGS = [
|
|
93
|
+
("ctrl+p", "command_palette", "Command Palette"),
|
|
94
|
+
("shift+tab", "toggle_mode", "Toggle mode"),
|
|
95
|
+
("ctrl+u", "show_usage", "Show usage"),
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
COMMANDS = {
|
|
99
|
+
UnifiedCommandProvider,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
value = reactive("")
|
|
103
|
+
mode = reactive(AgentType.RESEARCH)
|
|
104
|
+
history: PromptHistory = PromptHistory()
|
|
105
|
+
messages = reactive(list[ModelMessage | HintMessage]())
|
|
106
|
+
indexing_job: reactive[CodebaseIndexSelection | None] = reactive(None)
|
|
107
|
+
|
|
108
|
+
# Q&A mode state (for structured output clarifying questions)
|
|
109
|
+
qa_mode = reactive(False)
|
|
110
|
+
qa_questions: list[str] = []
|
|
111
|
+
qa_current_index = reactive(0)
|
|
112
|
+
qa_answers: list[str] = []
|
|
113
|
+
|
|
114
|
+
# Working state - keep reactive for Textual watchers
|
|
115
|
+
working = reactive(False)
|
|
116
|
+
|
|
117
|
+
# Throttle context indicator updates (in seconds)
|
|
118
|
+
_last_context_update: float = 0.0
|
|
119
|
+
_context_update_throttle: float = 5.0 # 5 seconds
|
|
120
|
+
|
|
121
|
+
def __init__(
|
|
122
|
+
self,
|
|
123
|
+
agent_manager: AgentManager,
|
|
124
|
+
conversation_manager: ConversationManager,
|
|
125
|
+
conversation_service: ConversationService,
|
|
126
|
+
widget_coordinator: WidgetCoordinator,
|
|
127
|
+
processing_state: ProcessingStateManager,
|
|
128
|
+
command_handler: CommandHandler,
|
|
129
|
+
placeholder_hints: PlaceholderHints,
|
|
130
|
+
codebase_sdk: CodebaseSDK,
|
|
131
|
+
deps: AgentDeps,
|
|
132
|
+
continue_session: bool = False,
|
|
133
|
+
force_reindex: bool = False,
|
|
134
|
+
) -> None:
|
|
135
|
+
"""Initialize the ChatScreen.
|
|
136
|
+
|
|
137
|
+
All dependencies must be provided via dependency injection.
|
|
138
|
+
No objects are created in the constructor.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
agent_manager: AgentManager instance for managing agent interactions
|
|
142
|
+
conversation_manager: ConversationManager for conversation persistence
|
|
143
|
+
conversation_service: ConversationService for conversation save/load/restore
|
|
144
|
+
widget_coordinator: WidgetCoordinator for centralized widget updates
|
|
145
|
+
processing_state: ProcessingStateManager for managing processing state
|
|
146
|
+
command_handler: CommandHandler for handling slash commands
|
|
147
|
+
placeholder_hints: PlaceholderHints for providing input hints
|
|
148
|
+
codebase_sdk: CodebaseSDK for codebase indexing operations
|
|
149
|
+
deps: AgentDeps configuration for agent dependencies
|
|
150
|
+
continue_session: Whether to continue a previous session
|
|
151
|
+
force_reindex: Whether to force reindexing of codebases
|
|
152
|
+
"""
|
|
153
|
+
super().__init__()
|
|
154
|
+
|
|
155
|
+
# All dependencies are now required and injected
|
|
156
|
+
self.deps = deps
|
|
157
|
+
self.codebase_sdk = codebase_sdk
|
|
158
|
+
self.agent_manager = agent_manager
|
|
159
|
+
self.command_handler = command_handler
|
|
160
|
+
self.placeholder_hints = placeholder_hints
|
|
161
|
+
self.conversation_manager = conversation_manager
|
|
162
|
+
self.conversation_service = conversation_service
|
|
163
|
+
self.widget_coordinator = widget_coordinator
|
|
164
|
+
self.processing_state = processing_state
|
|
165
|
+
self.continue_session = continue_session
|
|
166
|
+
self.force_reindex = force_reindex
|
|
167
|
+
|
|
168
|
+
def on_mount(self) -> None:
|
|
169
|
+
# Use widget coordinator to focus input
|
|
170
|
+
self.widget_coordinator.update_prompt_input(focus=True)
|
|
171
|
+
# Hide spinner initially
|
|
172
|
+
self.query_one("#spinner").display = False
|
|
173
|
+
|
|
174
|
+
# Bind spinner to processing state manager
|
|
175
|
+
self.processing_state.bind_spinner(self.query_one("#spinner", Spinner))
|
|
176
|
+
|
|
177
|
+
# Load conversation history if --continue flag was provided
|
|
178
|
+
# Use call_later to handle async exists() check
|
|
179
|
+
if self.continue_session:
|
|
180
|
+
self.call_later(self._check_and_load_conversation)
|
|
181
|
+
|
|
182
|
+
self.call_later(self.check_if_codebase_is_indexed)
|
|
183
|
+
# Initial update of context indicator
|
|
184
|
+
self.update_context_indicator()
|
|
185
|
+
|
|
186
|
+
# Show onboarding popup if not shown before
|
|
187
|
+
self.call_later(self._check_and_show_onboarding)
|
|
188
|
+
|
|
189
|
+
async def on_key(self, event: events.Key) -> None:
|
|
190
|
+
"""Handle key presses for cancellation."""
|
|
191
|
+
# If escape is pressed during Q&A mode, exit Q&A
|
|
192
|
+
if event.key in (Keys.Escape, Keys.ControlC) and self.qa_mode:
|
|
193
|
+
self._exit_qa_mode()
|
|
194
|
+
# Re-enable the input
|
|
195
|
+
self.widget_coordinator.update_prompt_input(focus=True)
|
|
196
|
+
# Prevent the event from propagating (don't quit the app)
|
|
197
|
+
event.stop()
|
|
198
|
+
return
|
|
199
|
+
|
|
200
|
+
# If escape or ctrl+c is pressed while agent is working, cancel the operation
|
|
201
|
+
if event.key in (Keys.Escape, Keys.ControlC):
|
|
202
|
+
if self.processing_state.cancel_current_operation(cancel_key=event.key):
|
|
203
|
+
# Show cancellation message
|
|
204
|
+
self.mount_hint("⚠️ Cancelling operation...")
|
|
205
|
+
# Re-enable the input
|
|
206
|
+
self.widget_coordinator.update_prompt_input(focus=True)
|
|
207
|
+
# Prevent the event from propagating (don't quit the app)
|
|
208
|
+
event.stop()
|
|
209
|
+
|
|
210
|
+
@work
|
|
211
|
+
async def check_if_codebase_is_indexed(self) -> None:
|
|
212
|
+
cur_dir = Path.cwd().resolve()
|
|
213
|
+
is_empty = all(
|
|
214
|
+
dir.is_dir() and dir.name in ["__pycache__", ".git", ".shotgun"]
|
|
215
|
+
for dir in cur_dir.iterdir()
|
|
216
|
+
)
|
|
217
|
+
if is_empty or self.continue_session:
|
|
218
|
+
return
|
|
219
|
+
|
|
220
|
+
# If force_reindex is True, delete any existing graphs for this directory
|
|
221
|
+
if self.force_reindex:
|
|
222
|
+
accessible_graphs = (
|
|
223
|
+
await self.codebase_sdk.list_codebases_for_directory()
|
|
224
|
+
).graphs
|
|
225
|
+
for graph in accessible_graphs:
|
|
226
|
+
try:
|
|
227
|
+
await self.codebase_sdk.delete_codebase(graph.graph_id)
|
|
228
|
+
logger.info(
|
|
229
|
+
f"Deleted existing graph {graph.graph_id} due to --force-reindex"
|
|
230
|
+
)
|
|
231
|
+
except Exception as e:
|
|
232
|
+
logger.warning(
|
|
233
|
+
f"Failed to delete graph {graph.graph_id} during force reindex: {e}"
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# Check if the current directory has any accessible codebases
|
|
237
|
+
accessible_graphs = (
|
|
238
|
+
await self.codebase_sdk.list_codebases_for_directory()
|
|
239
|
+
).graphs
|
|
240
|
+
if accessible_graphs:
|
|
241
|
+
self.mount_hint(help_text_with_codebase(already_indexed=True))
|
|
242
|
+
return
|
|
243
|
+
|
|
244
|
+
# Ask user if they want to index the current directory
|
|
245
|
+
should_index = await self.app.push_screen_wait(CodebaseIndexPromptScreen())
|
|
246
|
+
if not should_index:
|
|
247
|
+
self.mount_hint(help_text_empty_dir())
|
|
248
|
+
return
|
|
249
|
+
|
|
250
|
+
self.mount_hint(help_text_with_codebase(already_indexed=False))
|
|
251
|
+
|
|
252
|
+
# Auto-index the current directory with its name
|
|
253
|
+
cwd_name = cur_dir.name
|
|
254
|
+
selection = CodebaseIndexSelection(repo_path=cur_dir, name=cwd_name)
|
|
255
|
+
self.call_later(lambda: self.index_codebase(selection))
|
|
256
|
+
|
|
257
|
+
def watch_mode(self, new_mode: AgentType) -> None:
|
|
258
|
+
"""React to mode changes by updating the agent manager."""
|
|
259
|
+
|
|
260
|
+
if self.is_mounted:
|
|
261
|
+
self.agent_manager.set_agent(new_mode)
|
|
262
|
+
# Use widget coordinator for all widget updates
|
|
263
|
+
self.widget_coordinator.update_for_mode_change(new_mode)
|
|
264
|
+
|
|
265
|
+
def watch_working(self, is_working: bool) -> None:
|
|
266
|
+
"""Show or hide the spinner based on working state."""
|
|
267
|
+
logger.debug(f"[WATCH] watch_working called - is_working={is_working}")
|
|
268
|
+
if self.is_mounted:
|
|
269
|
+
# Use widget coordinator for all widget updates
|
|
270
|
+
self.widget_coordinator.update_for_processing_state(is_working)
|
|
271
|
+
|
|
272
|
+
def watch_qa_mode(self, qa_mode_active: bool) -> None:
|
|
273
|
+
"""Update UI when Q&A mode state changes."""
|
|
274
|
+
if self.is_mounted:
|
|
275
|
+
# Use widget coordinator for all widget updates
|
|
276
|
+
self.widget_coordinator.update_for_qa_mode(qa_mode_active)
|
|
277
|
+
|
|
278
|
+
def watch_messages(self, messages: list[ModelMessage | HintMessage]) -> None:
|
|
279
|
+
"""Update the chat history when messages change."""
|
|
280
|
+
if self.is_mounted:
|
|
281
|
+
# Use widget coordinator for all widget updates
|
|
282
|
+
self.widget_coordinator.update_messages(messages)
|
|
283
|
+
|
|
284
|
+
def action_toggle_mode(self) -> None:
|
|
285
|
+
# Prevent mode switching during Q&A
|
|
286
|
+
if self.qa_mode:
|
|
287
|
+
self.notify(
|
|
288
|
+
"Cannot switch modes while answering questions",
|
|
289
|
+
severity="warning",
|
|
290
|
+
timeout=3,
|
|
291
|
+
)
|
|
292
|
+
return
|
|
293
|
+
|
|
294
|
+
modes = [
|
|
295
|
+
AgentType.RESEARCH,
|
|
296
|
+
AgentType.SPECIFY,
|
|
297
|
+
AgentType.PLAN,
|
|
298
|
+
AgentType.TASKS,
|
|
299
|
+
AgentType.EXPORT,
|
|
300
|
+
]
|
|
301
|
+
self.mode = modes[(modes.index(self.mode) + 1) % len(modes)]
|
|
302
|
+
self.agent_manager.set_agent(self.mode)
|
|
303
|
+
# Re-focus input after mode change
|
|
304
|
+
self.call_later(lambda: self.widget_coordinator.update_prompt_input(focus=True))
|
|
305
|
+
|
|
306
|
+
def action_show_usage(self) -> None:
|
|
307
|
+
usage_hint = self.agent_manager.get_usage_hint()
|
|
308
|
+
logger.info(f"Usage hint: {usage_hint}")
|
|
309
|
+
if usage_hint:
|
|
310
|
+
self.mount_hint(usage_hint)
|
|
311
|
+
else:
|
|
312
|
+
self.notify("No usage hint available", severity="error")
|
|
313
|
+
|
|
314
|
+
async def action_show_context(self) -> None:
|
|
315
|
+
context_hint = await self.agent_manager.get_context_hint()
|
|
316
|
+
if context_hint:
|
|
317
|
+
self.mount_hint(context_hint)
|
|
318
|
+
else:
|
|
319
|
+
self.notify("No context analysis available", severity="error")
|
|
320
|
+
|
|
321
|
+
def action_view_onboarding(self) -> None:
|
|
322
|
+
"""Show the onboarding modal."""
|
|
323
|
+
self.app.push_screen(OnboardingModal())
|
|
324
|
+
|
|
325
|
+
@work
|
|
326
|
+
async def action_compact_conversation(self) -> None:
|
|
327
|
+
"""Compact the conversation history to reduce size."""
|
|
328
|
+
logger.debug(f"[COMPACT] Starting compaction - working={self.working}")
|
|
329
|
+
|
|
330
|
+
try:
|
|
331
|
+
# Show spinner and enable ESC cancellation
|
|
332
|
+
from textual.worker import get_current_worker
|
|
333
|
+
|
|
334
|
+
self.processing_state.start_processing("Compacting Conversation...")
|
|
335
|
+
self.processing_state.bind_worker(get_current_worker())
|
|
336
|
+
logger.debug(f"[COMPACT] Processing started - working={self.working}")
|
|
337
|
+
|
|
338
|
+
# Get current message count and tokens
|
|
339
|
+
original_count = len(self.agent_manager.message_history)
|
|
340
|
+
original_tokens = await estimate_tokens_from_messages(
|
|
341
|
+
self.agent_manager.message_history, self.deps.llm_model
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
# Log compaction start
|
|
345
|
+
logger.info(
|
|
346
|
+
f"Starting conversation compaction - {original_count} messages, {original_tokens} tokens"
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
# Post compaction started event
|
|
350
|
+
self.agent_manager.post_message(CompactionStartedMessage())
|
|
351
|
+
logger.debug("[COMPACT] Posted CompactionStartedMessage")
|
|
352
|
+
|
|
353
|
+
# Apply compaction with force=True to bypass threshold checks
|
|
354
|
+
compacted_messages = await apply_persistent_compaction(
|
|
355
|
+
self.agent_manager.message_history, self.deps, force=True
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
logger.debug(
|
|
359
|
+
f"[COMPACT] Compacted messages: count={len(compacted_messages)}, "
|
|
360
|
+
f"last_message_type={type(compacted_messages[-1]).__name__ if compacted_messages else 'None'}"
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
# Check last response usage
|
|
364
|
+
last_response = next(
|
|
365
|
+
(
|
|
366
|
+
msg
|
|
367
|
+
for msg in reversed(compacted_messages)
|
|
368
|
+
if isinstance(msg, ModelResponse)
|
|
369
|
+
),
|
|
370
|
+
None,
|
|
371
|
+
)
|
|
372
|
+
if last_response:
|
|
373
|
+
logger.debug(
|
|
374
|
+
f"[COMPACT] Last response has usage: {last_response.usage is not None}, "
|
|
375
|
+
f"usage={last_response.usage if last_response.usage else 'None'}"
|
|
376
|
+
)
|
|
377
|
+
else:
|
|
378
|
+
logger.warning(
|
|
379
|
+
"[COMPACT] No ModelResponse found in compacted messages!"
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
# Update agent manager's message history
|
|
383
|
+
self.agent_manager.message_history = compacted_messages
|
|
384
|
+
logger.debug("[COMPACT] Updated agent_manager.message_history")
|
|
385
|
+
|
|
386
|
+
# Calculate after metrics
|
|
387
|
+
compacted_count = len(compacted_messages)
|
|
388
|
+
compacted_tokens = await estimate_tokens_from_messages(
|
|
389
|
+
compacted_messages, self.deps.llm_model
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
# Calculate reductions
|
|
393
|
+
message_reduction = (
|
|
394
|
+
((original_count - compacted_count) / original_count) * 100
|
|
395
|
+
if original_count > 0
|
|
396
|
+
else 0
|
|
397
|
+
)
|
|
398
|
+
token_reduction = (
|
|
399
|
+
((original_tokens - compacted_tokens) / original_tokens) * 100
|
|
400
|
+
if original_tokens > 0
|
|
401
|
+
else 0
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
# Save to conversation file
|
|
405
|
+
conversation_file = get_shotgun_home() / "conversation.json"
|
|
406
|
+
manager = ConversationManager(conversation_file)
|
|
407
|
+
conversation = await manager.load()
|
|
408
|
+
|
|
409
|
+
if conversation:
|
|
410
|
+
conversation.set_agent_messages(compacted_messages)
|
|
411
|
+
await manager.save(conversation)
|
|
412
|
+
|
|
413
|
+
# Post compaction completed event
|
|
414
|
+
self.agent_manager.post_message(CompactionCompletedMessage())
|
|
415
|
+
|
|
416
|
+
# Post message history updated event
|
|
417
|
+
self.agent_manager.post_message(
|
|
418
|
+
MessageHistoryUpdated(
|
|
419
|
+
messages=self.agent_manager.ui_message_history.copy(),
|
|
420
|
+
agent_type=self.agent_manager._current_agent_type,
|
|
421
|
+
file_operations=None,
|
|
422
|
+
)
|
|
423
|
+
)
|
|
424
|
+
logger.debug("[COMPACT] Posted MessageHistoryUpdated event")
|
|
425
|
+
|
|
426
|
+
# Force immediate context indicator update
|
|
427
|
+
logger.debug("[COMPACT] Calling update_context_indicator()")
|
|
428
|
+
self.update_context_indicator()
|
|
429
|
+
|
|
430
|
+
# Log compaction completion
|
|
431
|
+
logger.info(
|
|
432
|
+
f"Compaction completed: {original_count} → {compacted_count} messages "
|
|
433
|
+
f"({message_reduction:.0f}% message reduction, {token_reduction:.0f}% token reduction)"
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
# Add persistent hint message with stats
|
|
437
|
+
self.mount_hint(
|
|
438
|
+
f"✓ Compacted conversation: {original_count} → {compacted_count} messages "
|
|
439
|
+
f"({message_reduction:.0f}% message reduction, {token_reduction:.0f}% token reduction)"
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
except Exception as e:
|
|
443
|
+
logger.error(f"Failed to compact conversation: {e}", exc_info=True)
|
|
444
|
+
self.notify(f"Failed to compact: {e}", severity="error")
|
|
445
|
+
finally:
|
|
446
|
+
# Hide spinner
|
|
447
|
+
self.processing_state.stop_processing()
|
|
448
|
+
logger.debug(f"[COMPACT] Processing stopped - working={self.working}")
|
|
449
|
+
|
|
450
|
+
@work
|
|
451
|
+
async def action_clear_conversation(self) -> None:
|
|
452
|
+
"""Clear the conversation history."""
|
|
453
|
+
# Show confirmation dialog
|
|
454
|
+
should_clear = await self.app.push_screen_wait(
|
|
455
|
+
ConfirmationDialog(
|
|
456
|
+
title="Clear conversation?",
|
|
457
|
+
message="This will permanently delete your entire conversation history. "
|
|
458
|
+
"All messages, context, and progress will be lost. "
|
|
459
|
+
"This action cannot be undone.",
|
|
460
|
+
confirm_label="Clear",
|
|
461
|
+
cancel_label="Keep",
|
|
462
|
+
confirm_variant="warning",
|
|
463
|
+
danger=True,
|
|
464
|
+
)
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
if not should_clear:
|
|
468
|
+
return # User cancelled
|
|
469
|
+
|
|
470
|
+
try:
|
|
471
|
+
# Clear message histories
|
|
472
|
+
self.agent_manager.message_history = []
|
|
473
|
+
self.agent_manager.ui_message_history = []
|
|
474
|
+
|
|
475
|
+
# Use conversation service to clear conversation
|
|
476
|
+
await self.conversation_service.clear_conversation()
|
|
477
|
+
|
|
478
|
+
# Post message history updated event to refresh UI
|
|
479
|
+
self.agent_manager.post_message(
|
|
480
|
+
MessageHistoryUpdated(
|
|
481
|
+
messages=[],
|
|
482
|
+
agent_type=self.agent_manager._current_agent_type,
|
|
483
|
+
file_operations=None,
|
|
484
|
+
)
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
# Show persistent success message
|
|
488
|
+
self.mount_hint("✓ Conversation cleared - Starting fresh!")
|
|
489
|
+
|
|
490
|
+
except Exception as e:
|
|
491
|
+
logger.error(f"Failed to clear conversation: {e}", exc_info=True)
|
|
492
|
+
self.notify(f"Failed to clear: {e}", severity="error")
|
|
493
|
+
|
|
494
|
+
@work(exclusive=False)
|
|
495
|
+
async def update_context_indicator(self) -> None:
|
|
496
|
+
"""Update the context indicator with current usage data."""
|
|
497
|
+
logger.debug("[CONTEXT] update_context_indicator called")
|
|
498
|
+
try:
|
|
499
|
+
logger.debug(
|
|
500
|
+
f"[CONTEXT] Getting context analysis - "
|
|
501
|
+
f"message_history_count={len(self.agent_manager.message_history)}"
|
|
502
|
+
)
|
|
503
|
+
analysis = await self.agent_manager.get_context_analysis()
|
|
504
|
+
|
|
505
|
+
if analysis:
|
|
506
|
+
logger.debug(
|
|
507
|
+
f"[CONTEXT] Analysis received - "
|
|
508
|
+
f"agent_context_tokens={analysis.agent_context_tokens}, "
|
|
509
|
+
f"max_usable_tokens={analysis.max_usable_tokens}, "
|
|
510
|
+
f"percentage={round((analysis.agent_context_tokens / analysis.max_usable_tokens) * 100, 1) if analysis.max_usable_tokens > 0 else 0}%"
|
|
511
|
+
)
|
|
512
|
+
else:
|
|
513
|
+
logger.warning("[CONTEXT] Analysis is None!")
|
|
514
|
+
|
|
515
|
+
model_name = self.deps.llm_model.name
|
|
516
|
+
# Use widget coordinator for context indicator update
|
|
517
|
+
self.widget_coordinator.update_context_indicator(analysis, model_name)
|
|
518
|
+
except Exception as e:
|
|
519
|
+
logger.error(
|
|
520
|
+
f"[CONTEXT] Failed to update context indicator: {e}", exc_info=True
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
@work(exclusive=False)
|
|
524
|
+
async def update_context_indicator_with_messages(
|
|
525
|
+
self,
|
|
526
|
+
agent_messages: list[ModelMessage],
|
|
527
|
+
ui_messages: list[ModelMessage | HintMessage],
|
|
528
|
+
) -> None:
|
|
529
|
+
"""Update the context indicator with specific message sets (for streaming updates).
|
|
530
|
+
|
|
531
|
+
Args:
|
|
532
|
+
agent_messages: Agent message history including streaming messages (for token counting)
|
|
533
|
+
ui_messages: UI message history including hints and streaming messages
|
|
534
|
+
"""
|
|
535
|
+
try:
|
|
536
|
+
from shotgun.agents.context_analyzer.analyzer import ContextAnalyzer
|
|
537
|
+
|
|
538
|
+
analyzer = ContextAnalyzer(self.deps.llm_model)
|
|
539
|
+
# Analyze the combined message histories for accurate progressive token counts
|
|
540
|
+
analysis = await analyzer.analyze_conversation(agent_messages, ui_messages)
|
|
541
|
+
|
|
542
|
+
if analysis:
|
|
543
|
+
model_name = self.deps.llm_model.name
|
|
544
|
+
self.widget_coordinator.update_context_indicator(analysis, model_name)
|
|
545
|
+
except Exception as e:
|
|
546
|
+
logger.error(
|
|
547
|
+
f"Failed to update context indicator with streaming messages: {e}",
|
|
548
|
+
exc_info=True,
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
def compose(self) -> ComposeResult:
|
|
552
|
+
"""Create child widgets for the app."""
|
|
553
|
+
with Container(id="window"):
|
|
554
|
+
yield self.agent_manager
|
|
555
|
+
yield ChatHistory()
|
|
556
|
+
with Container(id="footer"):
|
|
557
|
+
yield Spinner(
|
|
558
|
+
text="Processing...",
|
|
559
|
+
id="spinner",
|
|
560
|
+
classes="" if self.working else "hidden",
|
|
561
|
+
)
|
|
562
|
+
yield StatusBar(working=self.working)
|
|
563
|
+
yield PromptInput(
|
|
564
|
+
text=self.value,
|
|
565
|
+
highlight_cursor_line=False,
|
|
566
|
+
id="prompt-input",
|
|
567
|
+
placeholder=self._placeholder_for_mode(self.mode),
|
|
568
|
+
)
|
|
569
|
+
with Grid():
|
|
570
|
+
yield ModeIndicator(mode=self.mode)
|
|
571
|
+
with Container(id="right-footer-indicators"):
|
|
572
|
+
yield ContextIndicator(id="context-indicator")
|
|
573
|
+
yield Static("", id="indexing-job-display")
|
|
574
|
+
|
|
575
|
+
def mount_hint(self, markdown: str) -> None:
|
|
576
|
+
hint = HintMessage(message=markdown)
|
|
577
|
+
self.agent_manager.add_hint_message(hint)
|
|
578
|
+
|
|
579
|
+
@on(PartialResponseMessage)
|
|
580
|
+
def handle_partial_response(self, event: PartialResponseMessage) -> None:
|
|
581
|
+
# Filter event.messages to exclude ModelRequest with only ToolReturnPart
|
|
582
|
+
# These are intermediate tool results that would render as empty (UserQuestionWidget
|
|
583
|
+
# filters out ToolReturnPart in format_prompt_parts), causing user messages to disappear
|
|
584
|
+
filtered_event_messages: list[ModelMessage] = []
|
|
585
|
+
for msg in event.messages:
|
|
586
|
+
if isinstance(msg, ModelRequest):
|
|
587
|
+
# Check if this ModelRequest has any user-visible parts
|
|
588
|
+
has_user_content = any(
|
|
589
|
+
not isinstance(part, ToolReturnPart) for part in msg.parts
|
|
590
|
+
)
|
|
591
|
+
if has_user_content:
|
|
592
|
+
filtered_event_messages.append(msg)
|
|
593
|
+
# Skip ModelRequest with only ToolReturnPart
|
|
594
|
+
else:
|
|
595
|
+
# Keep all ModelResponse and other message types
|
|
596
|
+
filtered_event_messages.append(msg)
|
|
597
|
+
|
|
598
|
+
# Build new message list combining existing messages with new streaming content
|
|
599
|
+
new_message_list = self.messages + cast(
|
|
600
|
+
list[ModelMessage | HintMessage], filtered_event_messages
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
# Use widget coordinator to set partial response
|
|
604
|
+
self.widget_coordinator.set_partial_response(event.message, new_message_list)
|
|
605
|
+
|
|
606
|
+
# Skip context updates for file write operations (they don't add to input context)
|
|
607
|
+
has_file_write = any(
|
|
608
|
+
isinstance(msg, ModelResponse)
|
|
609
|
+
and any(
|
|
610
|
+
isinstance(part, ToolCallPart)
|
|
611
|
+
and part.tool_name in ("write_file", "append_file")
|
|
612
|
+
for part in msg.parts
|
|
613
|
+
)
|
|
614
|
+
for msg in event.messages
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
if has_file_write:
|
|
618
|
+
return # Skip context update for file writes
|
|
619
|
+
|
|
620
|
+
# Throttle context indicator updates to improve performance during streaming
|
|
621
|
+
# Only update at most once per 5 seconds to avoid excessive token calculations
|
|
622
|
+
current_time = time.time()
|
|
623
|
+
if current_time - self._last_context_update >= self._context_update_throttle:
|
|
624
|
+
self._last_context_update = current_time
|
|
625
|
+
# Update context indicator with full message history including streaming messages
|
|
626
|
+
# Combine existing agent history with new streaming messages for accurate token count
|
|
627
|
+
combined_agent_history = self.agent_manager.message_history + event.messages
|
|
628
|
+
self.update_context_indicator_with_messages(
|
|
629
|
+
combined_agent_history, new_message_list
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
def _clear_partial_response(self) -> None:
|
|
633
|
+
# Use widget coordinator to clear partial response
|
|
634
|
+
self.widget_coordinator.set_partial_response(None, self.messages)
|
|
635
|
+
|
|
636
|
+
def _exit_qa_mode(self) -> None:
|
|
637
|
+
"""Exit Q&A mode and clean up state."""
|
|
638
|
+
# Track cancellation event
|
|
639
|
+
track_event(
|
|
640
|
+
"qa_mode_cancelled",
|
|
641
|
+
{
|
|
642
|
+
"questions_total": len(self.qa_questions),
|
|
643
|
+
"questions_answered": len(self.qa_answers),
|
|
644
|
+
},
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
# Clear Q&A state
|
|
648
|
+
self.qa_mode = False
|
|
649
|
+
self.qa_questions = []
|
|
650
|
+
self.qa_answers = []
|
|
651
|
+
self.qa_current_index = 0
|
|
652
|
+
|
|
653
|
+
# Show cancellation message
|
|
654
|
+
self.mount_hint("⚠️ Q&A cancelled - You can continue the conversation.")
|
|
655
|
+
|
|
656
|
+
@on(ClarifyingQuestionsMessage)
|
|
657
|
+
def handle_clarifying_questions(self, event: ClarifyingQuestionsMessage) -> None:
|
|
658
|
+
"""Handle clarifying questions from agent structured output.
|
|
659
|
+
|
|
660
|
+
Note: Hints are now added synchronously in agent_manager.run() before this
|
|
661
|
+
handler is called, so we only need to set up Q&A mode state here.
|
|
662
|
+
"""
|
|
663
|
+
# Clear any streaming partial response (removes final_result JSON)
|
|
664
|
+
self._clear_partial_response()
|
|
665
|
+
|
|
666
|
+
# Enter Q&A mode
|
|
667
|
+
self.qa_mode = True
|
|
668
|
+
self.qa_questions = event.questions
|
|
669
|
+
self.qa_current_index = 0
|
|
670
|
+
self.qa_answers = []
|
|
671
|
+
|
|
672
|
+
@on(MessageHistoryUpdated)
|
|
673
|
+
async def handle_message_history_updated(
|
|
674
|
+
self, event: MessageHistoryUpdated
|
|
675
|
+
) -> None:
|
|
676
|
+
"""Handle message history updates from the agent manager."""
|
|
677
|
+
self._clear_partial_response()
|
|
678
|
+
self.messages = event.messages
|
|
679
|
+
|
|
680
|
+
# Use widget coordinator to refresh placeholder and mode indicator
|
|
681
|
+
self.widget_coordinator.update_prompt_input(
|
|
682
|
+
placeholder=self._placeholder_for_mode(self.mode)
|
|
683
|
+
)
|
|
684
|
+
self.widget_coordinator.refresh_mode_indicator()
|
|
685
|
+
|
|
686
|
+
# Update context indicator
|
|
687
|
+
self.update_context_indicator()
|
|
688
|
+
|
|
689
|
+
# If there are file operations, add a message showing the modified files
|
|
690
|
+
# Skip if hint was already added by agent_manager (e.g., in QA mode)
|
|
691
|
+
if event.file_operations:
|
|
692
|
+
# Check if file operation hint already exists in recent messages
|
|
693
|
+
file_hint_exists = any(
|
|
694
|
+
isinstance(msg, HintMessage)
|
|
695
|
+
and (
|
|
696
|
+
msg.message.startswith("📝 Modified:")
|
|
697
|
+
or msg.message.startswith("📁 Modified")
|
|
698
|
+
)
|
|
699
|
+
for msg in event.messages[-5:] # Check last 5 messages
|
|
700
|
+
)
|
|
701
|
+
|
|
702
|
+
if not file_hint_exists:
|
|
703
|
+
chat_history = self.query_one(ChatHistory)
|
|
704
|
+
if chat_history.vertical_tail:
|
|
705
|
+
tracker = FileOperationTracker(operations=event.file_operations)
|
|
706
|
+
display_path = tracker.get_display_path()
|
|
707
|
+
|
|
708
|
+
if display_path:
|
|
709
|
+
# Create a simple markdown message with the file path
|
|
710
|
+
# The terminal emulator will make this clickable automatically
|
|
711
|
+
path_obj = Path(display_path)
|
|
712
|
+
|
|
713
|
+
if len(event.file_operations) == 1:
|
|
714
|
+
message = f"📝 Modified: `{display_path}`"
|
|
715
|
+
else:
|
|
716
|
+
num_files = len(
|
|
717
|
+
{op.file_path for op in event.file_operations}
|
|
718
|
+
)
|
|
719
|
+
if path_obj.is_dir():
|
|
720
|
+
message = f"📁 Modified {num_files} files in: `{display_path}`"
|
|
721
|
+
else:
|
|
722
|
+
# Common path is a file, show parent directory
|
|
723
|
+
message = f"📁 Modified {num_files} files in: `{path_obj.parent}`"
|
|
724
|
+
|
|
725
|
+
self.mount_hint(message)
|
|
726
|
+
|
|
727
|
+
# Check and display any marketing messages
|
|
728
|
+
from shotgun.tui.app import ShotgunApp
|
|
729
|
+
|
|
730
|
+
app = cast(ShotgunApp, self.app)
|
|
731
|
+
await MarketingManager.check_and_display_messages(
|
|
732
|
+
app.config_manager, event.file_operations, self.mount_hint
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
@on(CompactionStartedMessage)
|
|
736
|
+
def handle_compaction_started(self, event: CompactionStartedMessage) -> None:
|
|
737
|
+
"""Update spinner text when compaction starts."""
|
|
738
|
+
# Use widget coordinator to update spinner text
|
|
739
|
+
self.widget_coordinator.update_spinner_text("Compacting Conversation...")
|
|
740
|
+
|
|
741
|
+
@on(CompactionCompletedMessage)
|
|
742
|
+
def handle_compaction_completed(self, event: CompactionCompletedMessage) -> None:
|
|
743
|
+
"""Reset spinner text when compaction completes."""
|
|
744
|
+
# Use widget coordinator to update spinner text
|
|
745
|
+
self.widget_coordinator.update_spinner_text("Processing...")
|
|
746
|
+
|
|
747
|
+
async def handle_model_selected(self, result: ModelConfigUpdated | None) -> None:
|
|
748
|
+
"""Handle model selection from ModelPickerScreen.
|
|
749
|
+
|
|
750
|
+
Called as a callback when the ModelPickerScreen is dismissed.
|
|
751
|
+
|
|
752
|
+
Args:
|
|
753
|
+
result: ModelConfigUpdated if a model was selected, None if cancelled
|
|
754
|
+
"""
|
|
755
|
+
if result is None:
|
|
756
|
+
return
|
|
757
|
+
|
|
758
|
+
try:
|
|
759
|
+
# Update the model configuration in dependencies
|
|
760
|
+
self.deps.llm_model = result.model_config
|
|
761
|
+
|
|
762
|
+
# Update the agent manager's model configuration
|
|
763
|
+
self.agent_manager.deps.llm_model = result.model_config
|
|
764
|
+
|
|
765
|
+
# Get current analysis and update context indicator via coordinator
|
|
766
|
+
analysis = await self.agent_manager.get_context_analysis()
|
|
767
|
+
self.widget_coordinator.update_context_indicator(analysis, result.new_model)
|
|
768
|
+
|
|
769
|
+
# Get model display name for user feedback
|
|
770
|
+
model_spec = MODEL_SPECS.get(result.new_model)
|
|
771
|
+
model_display = (
|
|
772
|
+
model_spec.short_name if model_spec else str(result.new_model)
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
# Format provider information
|
|
776
|
+
key_method = (
|
|
777
|
+
"Shotgun Account" if result.key_provider == "shotgun" else "BYOK"
|
|
778
|
+
)
|
|
779
|
+
provider_display = result.provider.value.title()
|
|
780
|
+
|
|
781
|
+
# Track model switch in telemetry
|
|
782
|
+
track_event(
|
|
783
|
+
"model_switched",
|
|
784
|
+
{
|
|
785
|
+
"old_model": str(result.old_model) if result.old_model else None,
|
|
786
|
+
"new_model": str(result.new_model),
|
|
787
|
+
"provider": result.provider.value,
|
|
788
|
+
"key_provider": result.key_provider.value,
|
|
789
|
+
},
|
|
790
|
+
)
|
|
791
|
+
|
|
792
|
+
# Show confirmation to user with provider info
|
|
793
|
+
self.agent_manager.add_hint_message(
|
|
794
|
+
HintMessage(
|
|
795
|
+
message=f"✓ Switched to {model_display} ({provider_display}, {key_method})"
|
|
796
|
+
)
|
|
797
|
+
)
|
|
798
|
+
|
|
799
|
+
except Exception as e:
|
|
800
|
+
logger.error(f"Failed to handle model selection: {e}")
|
|
801
|
+
self.agent_manager.add_hint_message(
|
|
802
|
+
HintMessage(message=f"⚠ Failed to update model configuration: {e}")
|
|
803
|
+
)
|
|
804
|
+
|
|
805
|
+
@on(PromptInput.Submitted)
|
|
806
|
+
async def handle_submit(self, message: PromptInput.Submitted) -> None:
|
|
807
|
+
text = message.text.strip()
|
|
808
|
+
|
|
809
|
+
# If empty text, just clear input and return
|
|
810
|
+
if not text:
|
|
811
|
+
self.widget_coordinator.update_prompt_input(clear=True)
|
|
812
|
+
self.value = ""
|
|
813
|
+
return
|
|
814
|
+
|
|
815
|
+
# Handle Q&A mode (from structured output clarifying questions)
|
|
816
|
+
if self.qa_mode and self.qa_questions:
|
|
817
|
+
# Collect answer
|
|
818
|
+
self.qa_answers.append(text)
|
|
819
|
+
|
|
820
|
+
# Show answer
|
|
821
|
+
if len(self.qa_questions) == 1:
|
|
822
|
+
self.agent_manager.add_hint_message(
|
|
823
|
+
HintMessage(message=f"**A:** {text}")
|
|
824
|
+
)
|
|
825
|
+
else:
|
|
826
|
+
q_num = self.qa_current_index + 1
|
|
827
|
+
self.agent_manager.add_hint_message(
|
|
828
|
+
HintMessage(message=f"**A{q_num}:** {text}")
|
|
829
|
+
)
|
|
830
|
+
|
|
831
|
+
# Move to next or finish
|
|
832
|
+
self.qa_current_index += 1
|
|
833
|
+
|
|
834
|
+
if self.qa_current_index < len(self.qa_questions):
|
|
835
|
+
# Show next question
|
|
836
|
+
next_q = self.qa_questions[self.qa_current_index]
|
|
837
|
+
next_q_num = self.qa_current_index + 1
|
|
838
|
+
self.agent_manager.add_hint_message(
|
|
839
|
+
HintMessage(message=f"**Q{next_q_num}:** {next_q}")
|
|
840
|
+
)
|
|
841
|
+
else:
|
|
842
|
+
# All answered - format and send back
|
|
843
|
+
if len(self.qa_questions) == 1:
|
|
844
|
+
# Single question - just send the answer
|
|
845
|
+
formatted_qa = f"Q: {self.qa_questions[0]}\nA: {self.qa_answers[0]}"
|
|
846
|
+
else:
|
|
847
|
+
# Multiple questions - format all Q&A pairs
|
|
848
|
+
formatted_qa = "\n\n".join(
|
|
849
|
+
f"Q{i + 1}: {q}\nA{i + 1}: {a}"
|
|
850
|
+
for i, (q, a) in enumerate(
|
|
851
|
+
zip(self.qa_questions, self.qa_answers, strict=True)
|
|
852
|
+
)
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
# Exit Q&A mode
|
|
856
|
+
self.qa_mode = False
|
|
857
|
+
self.qa_questions = []
|
|
858
|
+
self.qa_answers = []
|
|
859
|
+
self.qa_current_index = 0
|
|
860
|
+
|
|
861
|
+
# Send answers back to agent
|
|
862
|
+
self.run_agent(formatted_qa)
|
|
863
|
+
|
|
864
|
+
# Clear input
|
|
865
|
+
self.widget_coordinator.update_prompt_input(clear=True)
|
|
866
|
+
self.value = ""
|
|
867
|
+
return
|
|
868
|
+
|
|
869
|
+
# Check if it's a command
|
|
870
|
+
if self.command_handler.is_command(text):
|
|
871
|
+
success, response = self.command_handler.handle_command(text)
|
|
872
|
+
|
|
873
|
+
# Add the command to history
|
|
874
|
+
self.history.append(message.text)
|
|
875
|
+
|
|
876
|
+
# Display the command in chat history
|
|
877
|
+
user_message = ModelRequest(parts=[UserPromptPart(content=text)])
|
|
878
|
+
self.messages = self.messages + [user_message]
|
|
879
|
+
|
|
880
|
+
# Display the response (help text or error message)
|
|
881
|
+
response_message = ModelResponse(parts=[TextPart(content=response)])
|
|
882
|
+
self.messages = self.messages + [response_message]
|
|
883
|
+
|
|
884
|
+
# Clear the input
|
|
885
|
+
self.widget_coordinator.update_prompt_input(clear=True)
|
|
886
|
+
self.value = ""
|
|
887
|
+
return
|
|
888
|
+
|
|
889
|
+
# Not a command, process as normal
|
|
890
|
+
self.history.append(message.text)
|
|
891
|
+
|
|
892
|
+
# Add user message to agent_manager's history BEFORE running the agent
|
|
893
|
+
# This ensures immediate visual feedback AND proper deduplication
|
|
894
|
+
user_message = ModelRequest.user_text_prompt(text)
|
|
895
|
+
self.agent_manager.ui_message_history.append(user_message)
|
|
896
|
+
self.messages = self.agent_manager.ui_message_history.copy()
|
|
897
|
+
|
|
898
|
+
# Clear the input
|
|
899
|
+
self.value = ""
|
|
900
|
+
self.run_agent(text) # Use stripped text
|
|
901
|
+
|
|
902
|
+
self.widget_coordinator.update_prompt_input(clear=True)
|
|
903
|
+
|
|
904
|
+
def _placeholder_for_mode(self, mode: AgentType, force_new: bool = False) -> str:
|
|
905
|
+
"""Return the placeholder text appropriate for the current mode.
|
|
906
|
+
|
|
907
|
+
Args:
|
|
908
|
+
mode: The current agent mode.
|
|
909
|
+
force_new: If True, force selection of a new random hint.
|
|
910
|
+
|
|
911
|
+
Returns:
|
|
912
|
+
Dynamic placeholder hint based on mode and progress.
|
|
913
|
+
"""
|
|
914
|
+
return self.placeholder_hints.get_placeholder_for_mode(mode)
|
|
915
|
+
|
|
916
|
+
def index_codebase_command(self) -> None:
|
|
917
|
+
# Simplified: always index current working directory with its name
|
|
918
|
+
cur_dir = Path.cwd().resolve()
|
|
919
|
+
cwd_name = cur_dir.name
|
|
920
|
+
selection = CodebaseIndexSelection(repo_path=cur_dir, name=cwd_name)
|
|
921
|
+
self.call_later(lambda: self.index_codebase(selection))
|
|
922
|
+
|
|
923
|
+
def delete_codebase_command(self) -> None:
|
|
924
|
+
self.app.push_screen(
|
|
925
|
+
CommandPalette(
|
|
926
|
+
providers=[DeleteCodebasePaletteProvider],
|
|
927
|
+
placeholder="Select a codebase to delete…",
|
|
928
|
+
)
|
|
929
|
+
)
|
|
930
|
+
|
|
931
|
+
def delete_codebase_from_palette(self, graph_id: str) -> None:
|
|
932
|
+
stack = getattr(self.app, "screen_stack", None)
|
|
933
|
+
if stack and isinstance(stack[-1], CommandPalette):
|
|
934
|
+
self.app.pop_screen()
|
|
935
|
+
|
|
936
|
+
self.call_later(lambda: self.delete_codebase(graph_id))
|
|
937
|
+
|
|
938
|
+
@work
|
|
939
|
+
async def delete_codebase(self, graph_id: str) -> None:
|
|
940
|
+
try:
|
|
941
|
+
await self.codebase_sdk.delete_codebase(graph_id)
|
|
942
|
+
self.notify(f"Deleted codebase: {graph_id}", severity="information")
|
|
943
|
+
except CodebaseNotFoundError as exc:
|
|
944
|
+
self.notify(str(exc), severity="error")
|
|
945
|
+
except Exception as exc: # pragma: no cover - defensive UI path
|
|
946
|
+
self.notify(f"Failed to delete codebase: {exc}", severity="error")
|
|
947
|
+
|
|
948
|
+
def _is_kuzu_corruption_error(self, exception: Exception) -> bool:
|
|
949
|
+
"""Check if error is related to kuzu database corruption.
|
|
950
|
+
|
|
951
|
+
Args:
|
|
952
|
+
exception: The exception to check
|
|
953
|
+
|
|
954
|
+
Returns:
|
|
955
|
+
True if the error indicates kuzu database corruption
|
|
956
|
+
"""
|
|
957
|
+
error_str = str(exception).lower()
|
|
958
|
+
error_indicators = [
|
|
959
|
+
"not a directory",
|
|
960
|
+
"errno 20",
|
|
961
|
+
"corrupted",
|
|
962
|
+
".kuzu",
|
|
963
|
+
"ioexception",
|
|
964
|
+
"unordered_map", # C++ STL map errors from kuzu
|
|
965
|
+
"key not found", # unordered_map::at errors
|
|
966
|
+
"std::exception", # Generic C++ exceptions from kuzu
|
|
967
|
+
]
|
|
968
|
+
return any(indicator in error_str for indicator in error_indicators)
|
|
969
|
+
|
|
970
|
+
@work
|
|
971
|
+
async def index_codebase(self, selection: CodebaseIndexSelection) -> None:
|
|
972
|
+
label = self.query_one("#indexing-job-display", Static)
|
|
973
|
+
label.update(
|
|
974
|
+
f"[$foreground-muted]Indexing codebase: [bold $text-accent]{selection.name}[/][/]"
|
|
975
|
+
)
|
|
976
|
+
label.refresh()
|
|
977
|
+
|
|
978
|
+
def create_progress_bar(percentage: float, width: int = 20) -> str:
|
|
979
|
+
"""Create a visual progress bar using Unicode block characters."""
|
|
980
|
+
filled = int((percentage / 100) * width)
|
|
981
|
+
empty = width - filled
|
|
982
|
+
return "▓" * filled + "░" * empty
|
|
983
|
+
|
|
984
|
+
# Spinner animation frames
|
|
985
|
+
spinner_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
|
986
|
+
|
|
987
|
+
# Progress state (shared between timer and progress callback)
|
|
988
|
+
progress_state: dict[str, int | float] = {
|
|
989
|
+
"frame_index": 0,
|
|
990
|
+
"percentage": 0.0,
|
|
991
|
+
}
|
|
992
|
+
|
|
993
|
+
def update_progress_display() -> None:
|
|
994
|
+
"""Update progress bar on timer - runs every 100ms."""
|
|
995
|
+
# Advance spinner frame
|
|
996
|
+
frame_idx = int(progress_state["frame_index"])
|
|
997
|
+
progress_state["frame_index"] = (frame_idx + 1) % len(spinner_frames)
|
|
998
|
+
spinner = spinner_frames[frame_idx]
|
|
999
|
+
|
|
1000
|
+
# Get current state
|
|
1001
|
+
pct = float(progress_state["percentage"])
|
|
1002
|
+
bar = create_progress_bar(pct)
|
|
1003
|
+
|
|
1004
|
+
# Update label
|
|
1005
|
+
label.update(
|
|
1006
|
+
f"[$foreground-muted]Indexing codebase: {spinner} {bar} {pct:.0f}%[/]"
|
|
1007
|
+
)
|
|
1008
|
+
|
|
1009
|
+
def progress_callback(progress_info: IndexProgress) -> None:
|
|
1010
|
+
"""Update progress state (timer renders it independently)."""
|
|
1011
|
+
# Calculate overall percentage (0-95%, reserve 95-100% for finalization)
|
|
1012
|
+
if progress_info.phase == ProgressPhase.STRUCTURE:
|
|
1013
|
+
# Phase 1: 0-10%, always show 5% while running, 10% when complete
|
|
1014
|
+
overall_pct = 10.0 if progress_info.phase_complete else 5.0
|
|
1015
|
+
elif progress_info.phase == ProgressPhase.DEFINITIONS:
|
|
1016
|
+
# Phase 2: 10-80% based on files processed
|
|
1017
|
+
if progress_info.total and progress_info.total > 0:
|
|
1018
|
+
phase_pct = (progress_info.current / progress_info.total) * 70.0
|
|
1019
|
+
overall_pct = 10.0 + phase_pct
|
|
1020
|
+
else:
|
|
1021
|
+
overall_pct = 10.0
|
|
1022
|
+
elif progress_info.phase == ProgressPhase.RELATIONSHIPS:
|
|
1023
|
+
# Phase 3: 80-95% based on relationships processed (cap at 95%)
|
|
1024
|
+
if progress_info.total and progress_info.total > 0:
|
|
1025
|
+
phase_pct = (progress_info.current / progress_info.total) * 15.0
|
|
1026
|
+
overall_pct = 80.0 + phase_pct
|
|
1027
|
+
else:
|
|
1028
|
+
overall_pct = 80.0
|
|
1029
|
+
else:
|
|
1030
|
+
overall_pct = 0.0
|
|
1031
|
+
|
|
1032
|
+
# Update shared state (timer will render it)
|
|
1033
|
+
progress_state["percentage"] = overall_pct
|
|
1034
|
+
|
|
1035
|
+
# Start progress animation timer (10 fps = 100ms interval)
|
|
1036
|
+
progress_timer = self.set_interval(0.1, update_progress_display)
|
|
1037
|
+
|
|
1038
|
+
# Retry logic for handling kuzu corruption
|
|
1039
|
+
max_retries = 3
|
|
1040
|
+
|
|
1041
|
+
for attempt in range(max_retries):
|
|
1042
|
+
try:
|
|
1043
|
+
# Clean up corrupted DBs before retry (skip on first attempt)
|
|
1044
|
+
if attempt > 0:
|
|
1045
|
+
logger.info(
|
|
1046
|
+
f"Retry attempt {attempt + 1}/{max_retries} - cleaning up corrupted databases"
|
|
1047
|
+
)
|
|
1048
|
+
manager = CodebaseGraphManager(
|
|
1049
|
+
self.codebase_sdk.service.storage_dir
|
|
1050
|
+
)
|
|
1051
|
+
cleaned = await manager.cleanup_corrupted_databases()
|
|
1052
|
+
logger.info(f"Cleaned up {len(cleaned)} corrupted database(s)")
|
|
1053
|
+
self.notify(
|
|
1054
|
+
f"Retrying indexing after cleanup (attempt {attempt + 1}/{max_retries})...",
|
|
1055
|
+
severity="information",
|
|
1056
|
+
)
|
|
1057
|
+
|
|
1058
|
+
# Pass the current working directory as the indexed_from_cwd
|
|
1059
|
+
logger.debug(
|
|
1060
|
+
f"Starting indexing - repo_path: {selection.repo_path}, "
|
|
1061
|
+
f"name: {selection.name}, cwd: {Path.cwd().resolve()}"
|
|
1062
|
+
)
|
|
1063
|
+
result = await self.codebase_sdk.index_codebase(
|
|
1064
|
+
selection.repo_path,
|
|
1065
|
+
selection.name,
|
|
1066
|
+
indexed_from_cwd=str(Path.cwd().resolve()),
|
|
1067
|
+
progress_callback=progress_callback,
|
|
1068
|
+
)
|
|
1069
|
+
|
|
1070
|
+
# Success! Stop progress animation
|
|
1071
|
+
progress_timer.stop()
|
|
1072
|
+
|
|
1073
|
+
# Show 100% completion after indexing finishes
|
|
1074
|
+
final_bar = create_progress_bar(100.0)
|
|
1075
|
+
label.update(
|
|
1076
|
+
f"[$foreground-muted]Indexing codebase: {final_bar} 100%[/]"
|
|
1077
|
+
)
|
|
1078
|
+
label.refresh()
|
|
1079
|
+
|
|
1080
|
+
logger.info(
|
|
1081
|
+
f"Successfully indexed codebase '{result.name}' (ID: {result.graph_id})"
|
|
1082
|
+
)
|
|
1083
|
+
self.notify(
|
|
1084
|
+
f"Indexed codebase '{result.name}' (ID: {result.graph_id})",
|
|
1085
|
+
severity="information",
|
|
1086
|
+
timeout=8,
|
|
1087
|
+
)
|
|
1088
|
+
break # Success - exit retry loop
|
|
1089
|
+
|
|
1090
|
+
except CodebaseAlreadyIndexedError as exc:
|
|
1091
|
+
progress_timer.stop()
|
|
1092
|
+
logger.warning(f"Codebase already indexed: {exc}")
|
|
1093
|
+
self.notify(str(exc), severity="warning")
|
|
1094
|
+
return
|
|
1095
|
+
except InvalidPathError as exc:
|
|
1096
|
+
progress_timer.stop()
|
|
1097
|
+
logger.error(f"Invalid path error: {exc}")
|
|
1098
|
+
self.notify(str(exc), severity="error")
|
|
1099
|
+
return
|
|
1100
|
+
|
|
1101
|
+
except Exception as exc: # pragma: no cover - defensive UI path
|
|
1102
|
+
# Check if this is a kuzu corruption error and we have retries left
|
|
1103
|
+
if attempt < max_retries - 1 and self._is_kuzu_corruption_error(exc):
|
|
1104
|
+
logger.warning(
|
|
1105
|
+
f"Kuzu corruption detected on attempt {attempt + 1}/{max_retries}: {exc}. "
|
|
1106
|
+
f"Will retry after cleanup..."
|
|
1107
|
+
)
|
|
1108
|
+
# Exponential backoff: 1s, 2s
|
|
1109
|
+
await asyncio.sleep(2**attempt)
|
|
1110
|
+
continue
|
|
1111
|
+
|
|
1112
|
+
# Either final retry failed OR not a corruption error - show error
|
|
1113
|
+
logger.exception(
|
|
1114
|
+
f"Failed to index codebase after {attempt + 1} attempts - "
|
|
1115
|
+
f"repo_path: {selection.repo_path}, name: {selection.name}, error: {exc}"
|
|
1116
|
+
)
|
|
1117
|
+
self.notify(
|
|
1118
|
+
f"Failed to index codebase after {attempt + 1} attempts: {exc}",
|
|
1119
|
+
severity="error",
|
|
1120
|
+
timeout=30, # Keep error visible for 30 seconds
|
|
1121
|
+
)
|
|
1122
|
+
break
|
|
1123
|
+
|
|
1124
|
+
# Always stop the progress timer and clean up label
|
|
1125
|
+
progress_timer.stop()
|
|
1126
|
+
label.update("")
|
|
1127
|
+
label.refresh()
|
|
1128
|
+
|
|
1129
|
+
@work
|
|
1130
|
+
async def run_agent(self, message: str) -> None:
|
|
1131
|
+
prompt = None
|
|
1132
|
+
|
|
1133
|
+
# Start processing with spinner
|
|
1134
|
+
from textual.worker import get_current_worker
|
|
1135
|
+
|
|
1136
|
+
self.processing_state.start_processing("Processing...")
|
|
1137
|
+
self.processing_state.bind_worker(get_current_worker())
|
|
1138
|
+
|
|
1139
|
+
# Start context indicator animation immediately
|
|
1140
|
+
self.widget_coordinator.set_context_streaming(True)
|
|
1141
|
+
|
|
1142
|
+
prompt = message
|
|
1143
|
+
|
|
1144
|
+
try:
|
|
1145
|
+
await self.agent_manager.run(
|
|
1146
|
+
prompt=prompt,
|
|
1147
|
+
)
|
|
1148
|
+
except asyncio.CancelledError:
|
|
1149
|
+
# Handle cancellation gracefully - DO NOT re-raise
|
|
1150
|
+
self.mount_hint("⚠️ Operation cancelled by user")
|
|
1151
|
+
except ContextSizeLimitExceeded as e:
|
|
1152
|
+
# User-friendly error with actionable options
|
|
1153
|
+
hint = (
|
|
1154
|
+
f"⚠️ **Context too large for {e.model_name}**\n\n"
|
|
1155
|
+
f"Your conversation history exceeds this model's limit ({e.max_tokens:,} tokens).\n\n"
|
|
1156
|
+
f"**Choose an action:**\n\n"
|
|
1157
|
+
f"1. Switch to a larger model (`Ctrl+P` → Change Model)\n"
|
|
1158
|
+
f"2. Switch to a larger model, compact (`/compact`), then switch back to {e.model_name}\n"
|
|
1159
|
+
f"3. Clear conversation (`/clear`)\n"
|
|
1160
|
+
)
|
|
1161
|
+
|
|
1162
|
+
self.mount_hint(hint)
|
|
1163
|
+
|
|
1164
|
+
# Log for debugging (won't send to Sentry due to ErrorNotPickedUpBySentry)
|
|
1165
|
+
logger.info(
|
|
1166
|
+
"Context size limit exceeded",
|
|
1167
|
+
extra={
|
|
1168
|
+
"max_tokens": e.max_tokens,
|
|
1169
|
+
"model_name": e.model_name,
|
|
1170
|
+
},
|
|
1171
|
+
)
|
|
1172
|
+
except Exception as e:
|
|
1173
|
+
# Log with full stack trace to shotgun.log
|
|
1174
|
+
logger.exception(
|
|
1175
|
+
"Agent run failed",
|
|
1176
|
+
extra={
|
|
1177
|
+
"agent_mode": self.mode.value,
|
|
1178
|
+
"error_type": type(e).__name__,
|
|
1179
|
+
},
|
|
1180
|
+
)
|
|
1181
|
+
|
|
1182
|
+
# Determine user-friendly message based on error type
|
|
1183
|
+
error_name = type(e).__name__
|
|
1184
|
+
error_message = str(e)
|
|
1185
|
+
|
|
1186
|
+
if "APIStatusError" in error_name and "overload" in error_message.lower():
|
|
1187
|
+
hint = "⚠️ The AI service is temporarily overloaded. Please wait a moment and try again."
|
|
1188
|
+
elif "APIStatusError" in error_name and "rate" in error_message.lower():
|
|
1189
|
+
hint = "⚠️ Rate limit reached. Please wait before trying again."
|
|
1190
|
+
elif "APIStatusError" in error_name:
|
|
1191
|
+
hint = f"⚠️ AI service error: {error_message}"
|
|
1192
|
+
else:
|
|
1193
|
+
hint = f"⚠️ An error occurred: {error_message}\n\nCheck logs at ~/.shotgun-sh/logs/shotgun.log"
|
|
1194
|
+
|
|
1195
|
+
self.mount_hint(hint)
|
|
1196
|
+
finally:
|
|
1197
|
+
self.processing_state.stop_processing()
|
|
1198
|
+
# Stop context indicator animation
|
|
1199
|
+
self.widget_coordinator.set_context_streaming(False)
|
|
1200
|
+
|
|
1201
|
+
# Save conversation after each interaction
|
|
1202
|
+
self._save_conversation()
|
|
1203
|
+
|
|
1204
|
+
self.widget_coordinator.update_prompt_input(focus=True)
|
|
1205
|
+
|
|
1206
|
+
def _save_conversation(self) -> None:
|
|
1207
|
+
"""Save the current conversation to persistent storage."""
|
|
1208
|
+
# Use conversation service for saving (run async in background)
|
|
1209
|
+
# Use exclusive=True to prevent concurrent saves that can cause file contention
|
|
1210
|
+
self.run_worker(
|
|
1211
|
+
self.conversation_service.save_conversation(self.agent_manager),
|
|
1212
|
+
exclusive=True,
|
|
1213
|
+
)
|
|
1214
|
+
|
|
1215
|
+
async def _check_and_load_conversation(self) -> None:
|
|
1216
|
+
"""Check if conversation exists and load it if it does."""
|
|
1217
|
+
if await self.conversation_manager.exists():
|
|
1218
|
+
self._load_conversation()
|
|
1219
|
+
|
|
1220
|
+
def _load_conversation(self) -> None:
|
|
1221
|
+
"""Load conversation from persistent storage."""
|
|
1222
|
+
|
|
1223
|
+
# Use conversation service for restoration (run async)
|
|
1224
|
+
async def _do_load() -> None:
|
|
1225
|
+
(
|
|
1226
|
+
success,
|
|
1227
|
+
error_msg,
|
|
1228
|
+
restored_type,
|
|
1229
|
+
) = await self.conversation_service.restore_conversation(
|
|
1230
|
+
self.agent_manager, self.deps.usage_manager
|
|
1231
|
+
)
|
|
1232
|
+
|
|
1233
|
+
if not success and error_msg:
|
|
1234
|
+
self.mount_hint(error_msg)
|
|
1235
|
+
elif success and restored_type:
|
|
1236
|
+
# Update the current mode to match restored conversation
|
|
1237
|
+
self.mode = restored_type
|
|
1238
|
+
|
|
1239
|
+
self.run_worker(_do_load(), exclusive=False)
|
|
1240
|
+
|
|
1241
|
+
@work
|
|
1242
|
+
async def _check_and_show_onboarding(self) -> None:
|
|
1243
|
+
"""Check if onboarding should be shown and display modal if needed."""
|
|
1244
|
+
config_manager = get_config_manager()
|
|
1245
|
+
config = await config_manager.load()
|
|
1246
|
+
|
|
1247
|
+
# Only show onboarding if it hasn't been shown before
|
|
1248
|
+
if config.shown_onboarding_popup is None:
|
|
1249
|
+
# Show the onboarding modal
|
|
1250
|
+
await self.app.push_screen_wait(OnboardingModal())
|
|
1251
|
+
|
|
1252
|
+
# Mark as shown in config with current timestamp
|
|
1253
|
+
config.shown_onboarding_popup = datetime.now(timezone.utc)
|
|
1254
|
+
await config_manager.save(config)
|