ripperdoc 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +3 -0
- ripperdoc/__main__.py +20 -0
- ripperdoc/cli/__init__.py +1 -0
- ripperdoc/cli/cli.py +405 -0
- ripperdoc/cli/commands/__init__.py +82 -0
- ripperdoc/cli/commands/agents_cmd.py +263 -0
- ripperdoc/cli/commands/base.py +19 -0
- ripperdoc/cli/commands/clear_cmd.py +18 -0
- ripperdoc/cli/commands/compact_cmd.py +23 -0
- ripperdoc/cli/commands/config_cmd.py +31 -0
- ripperdoc/cli/commands/context_cmd.py +144 -0
- ripperdoc/cli/commands/cost_cmd.py +82 -0
- ripperdoc/cli/commands/doctor_cmd.py +221 -0
- ripperdoc/cli/commands/exit_cmd.py +19 -0
- ripperdoc/cli/commands/help_cmd.py +20 -0
- ripperdoc/cli/commands/mcp_cmd.py +70 -0
- ripperdoc/cli/commands/memory_cmd.py +202 -0
- ripperdoc/cli/commands/models_cmd.py +413 -0
- ripperdoc/cli/commands/permissions_cmd.py +302 -0
- ripperdoc/cli/commands/resume_cmd.py +98 -0
- ripperdoc/cli/commands/status_cmd.py +167 -0
- ripperdoc/cli/commands/tasks_cmd.py +278 -0
- ripperdoc/cli/commands/todos_cmd.py +69 -0
- ripperdoc/cli/commands/tools_cmd.py +19 -0
- ripperdoc/cli/ui/__init__.py +1 -0
- ripperdoc/cli/ui/context_display.py +298 -0
- ripperdoc/cli/ui/helpers.py +22 -0
- ripperdoc/cli/ui/rich_ui.py +1557 -0
- ripperdoc/cli/ui/spinner.py +49 -0
- ripperdoc/cli/ui/thinking_spinner.py +128 -0
- ripperdoc/cli/ui/tool_renderers.py +298 -0
- ripperdoc/core/__init__.py +1 -0
- ripperdoc/core/agents.py +486 -0
- ripperdoc/core/commands.py +33 -0
- ripperdoc/core/config.py +559 -0
- ripperdoc/core/default_tools.py +88 -0
- ripperdoc/core/permissions.py +252 -0
- ripperdoc/core/providers/__init__.py +47 -0
- ripperdoc/core/providers/anthropic.py +250 -0
- ripperdoc/core/providers/base.py +265 -0
- ripperdoc/core/providers/gemini.py +615 -0
- ripperdoc/core/providers/openai.py +487 -0
- ripperdoc/core/query.py +1058 -0
- ripperdoc/core/query_utils.py +622 -0
- ripperdoc/core/skills.py +295 -0
- ripperdoc/core/system_prompt.py +431 -0
- ripperdoc/core/tool.py +240 -0
- ripperdoc/sdk/__init__.py +9 -0
- ripperdoc/sdk/client.py +333 -0
- ripperdoc/tools/__init__.py +1 -0
- ripperdoc/tools/ask_user_question_tool.py +431 -0
- ripperdoc/tools/background_shell.py +389 -0
- ripperdoc/tools/bash_output_tool.py +98 -0
- ripperdoc/tools/bash_tool.py +1016 -0
- ripperdoc/tools/dynamic_mcp_tool.py +428 -0
- ripperdoc/tools/enter_plan_mode_tool.py +226 -0
- ripperdoc/tools/exit_plan_mode_tool.py +153 -0
- ripperdoc/tools/file_edit_tool.py +346 -0
- ripperdoc/tools/file_read_tool.py +203 -0
- ripperdoc/tools/file_write_tool.py +205 -0
- ripperdoc/tools/glob_tool.py +179 -0
- ripperdoc/tools/grep_tool.py +370 -0
- ripperdoc/tools/kill_bash_tool.py +136 -0
- ripperdoc/tools/ls_tool.py +471 -0
- ripperdoc/tools/mcp_tools.py +591 -0
- ripperdoc/tools/multi_edit_tool.py +456 -0
- ripperdoc/tools/notebook_edit_tool.py +386 -0
- ripperdoc/tools/skill_tool.py +205 -0
- ripperdoc/tools/task_tool.py +379 -0
- ripperdoc/tools/todo_tool.py +494 -0
- ripperdoc/tools/tool_search_tool.py +380 -0
- ripperdoc/utils/__init__.py +1 -0
- ripperdoc/utils/bash_constants.py +51 -0
- ripperdoc/utils/bash_output_utils.py +43 -0
- ripperdoc/utils/coerce.py +34 -0
- ripperdoc/utils/context_length_errors.py +252 -0
- ripperdoc/utils/exit_code_handlers.py +241 -0
- ripperdoc/utils/file_watch.py +135 -0
- ripperdoc/utils/git_utils.py +274 -0
- ripperdoc/utils/json_utils.py +27 -0
- ripperdoc/utils/log.py +176 -0
- ripperdoc/utils/mcp.py +560 -0
- ripperdoc/utils/memory.py +253 -0
- ripperdoc/utils/message_compaction.py +676 -0
- ripperdoc/utils/messages.py +519 -0
- ripperdoc/utils/output_utils.py +258 -0
- ripperdoc/utils/path_ignore.py +677 -0
- ripperdoc/utils/path_utils.py +46 -0
- ripperdoc/utils/permissions/__init__.py +27 -0
- ripperdoc/utils/permissions/path_validation_utils.py +174 -0
- ripperdoc/utils/permissions/shell_command_validation.py +552 -0
- ripperdoc/utils/permissions/tool_permission_utils.py +279 -0
- ripperdoc/utils/prompt.py +17 -0
- ripperdoc/utils/safe_get_cwd.py +31 -0
- ripperdoc/utils/sandbox_utils.py +38 -0
- ripperdoc/utils/session_history.py +260 -0
- ripperdoc/utils/session_usage.py +117 -0
- ripperdoc/utils/shell_token_utils.py +95 -0
- ripperdoc/utils/shell_utils.py +159 -0
- ripperdoc/utils/todo.py +203 -0
- ripperdoc/utils/token_estimation.py +34 -0
- ripperdoc-0.2.6.dist-info/METADATA +193 -0
- ripperdoc-0.2.6.dist-info/RECORD +107 -0
- ripperdoc-0.2.6.dist-info/WHEEL +5 -0
- ripperdoc-0.2.6.dist-info/entry_points.txt +3 -0
- ripperdoc-0.2.6.dist-info/licenses/LICENSE +53 -0
- ripperdoc-0.2.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1557 @@
|
|
|
1
|
+
"""Rich-based CLI interface for Ripperdoc.
|
|
2
|
+
|
|
3
|
+
This module provides a clean, minimal terminal UI using Rich for the Ripperdoc agent.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import contextlib
|
|
8
|
+
import json
|
|
9
|
+
import sys
|
|
10
|
+
import uuid
|
|
11
|
+
import re
|
|
12
|
+
from typing import List, Dict, Any, Optional, Union, Iterable
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
from rich.console import Console
|
|
16
|
+
from rich.panel import Panel
|
|
17
|
+
from rich.markdown import Markdown
|
|
18
|
+
from rich.text import Text
|
|
19
|
+
from rich import box
|
|
20
|
+
from rich.markup import escape
|
|
21
|
+
|
|
22
|
+
from prompt_toolkit import PromptSession
|
|
23
|
+
from prompt_toolkit.completion import Completer, Completion
|
|
24
|
+
from prompt_toolkit.shortcuts.prompt import CompleteStyle
|
|
25
|
+
from prompt_toolkit.history import InMemoryHistory
|
|
26
|
+
from prompt_toolkit.key_binding import KeyBindings
|
|
27
|
+
|
|
28
|
+
from ripperdoc import __version__
|
|
29
|
+
from ripperdoc.core.config import get_global_config, provider_protocol
|
|
30
|
+
from ripperdoc.core.default_tools import get_default_tools
|
|
31
|
+
from ripperdoc.core.query import query, QueryContext
|
|
32
|
+
from ripperdoc.core.system_prompt import build_system_prompt
|
|
33
|
+
from ripperdoc.core.skills import build_skill_summary, load_all_skills
|
|
34
|
+
from ripperdoc.cli.commands import (
|
|
35
|
+
get_slash_command,
|
|
36
|
+
list_slash_commands,
|
|
37
|
+
slash_command_completions,
|
|
38
|
+
)
|
|
39
|
+
from ripperdoc.cli.ui.helpers import get_profile_for_pointer
|
|
40
|
+
from ripperdoc.core.permissions import make_permission_checker
|
|
41
|
+
from ripperdoc.cli.ui.spinner import Spinner
|
|
42
|
+
from ripperdoc.cli.ui.thinking_spinner import ThinkingSpinner
|
|
43
|
+
from ripperdoc.cli.ui.context_display import context_usage_lines
|
|
44
|
+
from ripperdoc.utils.message_compaction import (
|
|
45
|
+
compact_messages,
|
|
46
|
+
estimate_conversation_tokens,
|
|
47
|
+
estimate_used_tokens,
|
|
48
|
+
get_context_usage_status,
|
|
49
|
+
get_remaining_context_tokens,
|
|
50
|
+
resolve_auto_compact_enabled,
|
|
51
|
+
)
|
|
52
|
+
from ripperdoc.utils.token_estimation import estimate_tokens
|
|
53
|
+
from ripperdoc.utils.mcp import (
|
|
54
|
+
ensure_mcp_runtime,
|
|
55
|
+
format_mcp_instructions,
|
|
56
|
+
load_mcp_servers_async,
|
|
57
|
+
shutdown_mcp_runtime,
|
|
58
|
+
)
|
|
59
|
+
from ripperdoc.tools.mcp_tools import load_dynamic_mcp_tools_async, merge_tools_with_dynamic
|
|
60
|
+
from ripperdoc.utils.session_history import SessionHistory
|
|
61
|
+
from ripperdoc.utils.memory import build_memory_instructions
|
|
62
|
+
from ripperdoc.core.query import query_llm
|
|
63
|
+
from ripperdoc.utils.messages import (
|
|
64
|
+
UserMessage,
|
|
65
|
+
AssistantMessage,
|
|
66
|
+
ProgressMessage,
|
|
67
|
+
create_user_message,
|
|
68
|
+
create_assistant_message,
|
|
69
|
+
)
|
|
70
|
+
from ripperdoc.utils.log import enable_session_file_logging, get_logger
|
|
71
|
+
from ripperdoc.cli.ui.tool_renderers import ToolResultRendererRegistry
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
# Type alias for conversation messages
|
|
75
|
+
ConversationMessage = Union[UserMessage, AssistantMessage, ProgressMessage]
|
|
76
|
+
|
|
77
|
+
THINKING_WORDS: list[str] = [
|
|
78
|
+
"Accomplishing",
|
|
79
|
+
"Actioning",
|
|
80
|
+
"Actualizing",
|
|
81
|
+
"Baking",
|
|
82
|
+
"Booping",
|
|
83
|
+
"Brewing",
|
|
84
|
+
"Calculating",
|
|
85
|
+
"Cerebrating",
|
|
86
|
+
"Channelling",
|
|
87
|
+
"Churning",
|
|
88
|
+
"Coalescing",
|
|
89
|
+
"Cogitating",
|
|
90
|
+
"Computing",
|
|
91
|
+
"Combobulating",
|
|
92
|
+
"Concocting",
|
|
93
|
+
"Conjuring",
|
|
94
|
+
"Considering",
|
|
95
|
+
"Contemplating",
|
|
96
|
+
"Cooking",
|
|
97
|
+
"Crafting",
|
|
98
|
+
"Creating",
|
|
99
|
+
"Crunching",
|
|
100
|
+
"Deciphering",
|
|
101
|
+
"Deliberating",
|
|
102
|
+
"Determining",
|
|
103
|
+
"Discombobulating",
|
|
104
|
+
"Divining",
|
|
105
|
+
"Doing",
|
|
106
|
+
"Effecting",
|
|
107
|
+
"Elucidating",
|
|
108
|
+
"Enchanting",
|
|
109
|
+
"Envisioning",
|
|
110
|
+
"Finagling",
|
|
111
|
+
"Flibbertigibbeting",
|
|
112
|
+
"Forging",
|
|
113
|
+
"Forming",
|
|
114
|
+
"Frolicking",
|
|
115
|
+
"Generating",
|
|
116
|
+
"Germinating",
|
|
117
|
+
"Hatching",
|
|
118
|
+
"Herding",
|
|
119
|
+
"Honking",
|
|
120
|
+
"Ideating",
|
|
121
|
+
"Imagining",
|
|
122
|
+
"Incubating",
|
|
123
|
+
"Inferring",
|
|
124
|
+
"Manifesting",
|
|
125
|
+
"Marinating",
|
|
126
|
+
"Meandering",
|
|
127
|
+
"Moseying",
|
|
128
|
+
"Mulling",
|
|
129
|
+
"Mustering",
|
|
130
|
+
"Musing",
|
|
131
|
+
"Noodling",
|
|
132
|
+
"Percolating",
|
|
133
|
+
"Perusing",
|
|
134
|
+
"Philosophising",
|
|
135
|
+
"Pontificating",
|
|
136
|
+
"Pondering",
|
|
137
|
+
"Processing",
|
|
138
|
+
"Puttering",
|
|
139
|
+
"Puzzling",
|
|
140
|
+
"Reticulating",
|
|
141
|
+
"Ruminating",
|
|
142
|
+
"Scheming",
|
|
143
|
+
"Schlepping",
|
|
144
|
+
"Shimmying",
|
|
145
|
+
"Simmering",
|
|
146
|
+
"Smooshing",
|
|
147
|
+
"Spelunking",
|
|
148
|
+
"Spinning",
|
|
149
|
+
"Stewing",
|
|
150
|
+
"Sussing",
|
|
151
|
+
"Synthesizing",
|
|
152
|
+
"Thinking",
|
|
153
|
+
"Tinkering",
|
|
154
|
+
"Transmuting",
|
|
155
|
+
"Unfurling",
|
|
156
|
+
"Unravelling",
|
|
157
|
+
"Vibing",
|
|
158
|
+
"Wandering",
|
|
159
|
+
"Whirring",
|
|
160
|
+
"Wibbling",
|
|
161
|
+
"Wizarding",
|
|
162
|
+
"Working",
|
|
163
|
+
"Wrangling",
|
|
164
|
+
]
|
|
165
|
+
|
|
166
|
+
console = Console()
|
|
167
|
+
logger = get_logger()
|
|
168
|
+
|
|
169
|
+
# Keep a small window of recent messages alongside the summary after /compact so
|
|
170
|
+
# the model retains immediate context.
|
|
171
|
+
RECENT_MESSAGES_AFTER_COMPACT = 8
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def create_welcome_panel() -> Panel:
|
|
175
|
+
"""Create a welcome panel."""
|
|
176
|
+
|
|
177
|
+
welcome_content = """
|
|
178
|
+
[bold cyan]Welcome to Ripperdoc![/bold cyan]
|
|
179
|
+
|
|
180
|
+
Ripperdoc is an AI-powered coding assistant that helps with software development tasks.
|
|
181
|
+
You can read files, edit code, run commands, and help with various programming tasks.
|
|
182
|
+
|
|
183
|
+
[dim]Type your questions below. Press Ctrl+C to exit.[/dim]
|
|
184
|
+
"""
|
|
185
|
+
|
|
186
|
+
return Panel(
|
|
187
|
+
welcome_content,
|
|
188
|
+
title=f"Ripperdoc v{__version__}",
|
|
189
|
+
border_style="cyan",
|
|
190
|
+
box=box.ROUNDED,
|
|
191
|
+
padding=(1, 2),
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def create_status_bar() -> Text:
|
|
196
|
+
"""Create a status bar with current information."""
|
|
197
|
+
profile = get_profile_for_pointer("main")
|
|
198
|
+
model_name = profile.model if profile else "Not configured"
|
|
199
|
+
|
|
200
|
+
status_text = Text()
|
|
201
|
+
status_text.append("Ripperdoc", style="bold cyan")
|
|
202
|
+
status_text.append(" • ")
|
|
203
|
+
status_text.append(model_name, style="dim")
|
|
204
|
+
status_text.append(" • ")
|
|
205
|
+
status_text.append("Ready", style="green")
|
|
206
|
+
|
|
207
|
+
return status_text
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
class RichUI:
|
|
211
|
+
"""Rich-based UI for Ripperdoc."""
|
|
212
|
+
|
|
213
|
+
def __init__(
|
|
214
|
+
self,
|
|
215
|
+
safe_mode: bool = False,
|
|
216
|
+
verbose: bool = False,
|
|
217
|
+
session_id: Optional[str] = None,
|
|
218
|
+
log_file_path: Optional[Path] = None,
|
|
219
|
+
):
|
|
220
|
+
self._loop = asyncio.new_event_loop()
|
|
221
|
+
asyncio.set_event_loop(self._loop)
|
|
222
|
+
self.console = console
|
|
223
|
+
self.safe_mode = safe_mode
|
|
224
|
+
self.verbose = verbose
|
|
225
|
+
self.conversation_messages: List[ConversationMessage] = []
|
|
226
|
+
self._saved_conversation: Optional[List[ConversationMessage]] = None
|
|
227
|
+
self.query_context: Optional[QueryContext] = None
|
|
228
|
+
self._current_tool: Optional[str] = None
|
|
229
|
+
self._should_exit: bool = False
|
|
230
|
+
self._query_interrupted: bool = False # Track if query was interrupted by ESC
|
|
231
|
+
self._esc_listener_active: bool = False # Track if ESC listener is active
|
|
232
|
+
self._esc_listener_paused: bool = False # Pause ESC listener during blocking prompts
|
|
233
|
+
self._stdin_fd: Optional[int] = None # Track stdin for raw mode restoration
|
|
234
|
+
self._stdin_old_settings: Optional[list] = None # Original terminal settings
|
|
235
|
+
self._stdin_in_raw_mode: bool = False # Whether we currently own raw mode
|
|
236
|
+
self.command_list = list_slash_commands()
|
|
237
|
+
self._command_completions = slash_command_completions()
|
|
238
|
+
self._prompt_session: Optional[PromptSession] = None
|
|
239
|
+
self.project_path = Path.cwd()
|
|
240
|
+
# Track a stable session identifier for the current UI run.
|
|
241
|
+
self.session_id = session_id or str(uuid.uuid4())
|
|
242
|
+
if log_file_path:
|
|
243
|
+
self.log_file_path = log_file_path
|
|
244
|
+
logger.attach_file_handler(self.log_file_path)
|
|
245
|
+
else:
|
|
246
|
+
self.log_file_path = enable_session_file_logging(self.project_path, self.session_id)
|
|
247
|
+
logger.info(
|
|
248
|
+
"[ui] Initialized Rich UI session",
|
|
249
|
+
extra={
|
|
250
|
+
"session_id": self.session_id,
|
|
251
|
+
"project_path": str(self.project_path),
|
|
252
|
+
"log_file": str(self.log_file_path),
|
|
253
|
+
"safe_mode": self.safe_mode,
|
|
254
|
+
"verbose": self.verbose,
|
|
255
|
+
},
|
|
256
|
+
)
|
|
257
|
+
self._session_history = SessionHistory(self.project_path, self.session_id)
|
|
258
|
+
self._permission_checker = (
|
|
259
|
+
make_permission_checker(self.project_path, safe_mode) if safe_mode else None
|
|
260
|
+
)
|
|
261
|
+
# Keep MCP runtime alive for the whole UI session. Create it on the UI loop up front.
|
|
262
|
+
try:
|
|
263
|
+
self._run_async(ensure_mcp_runtime(self.project_path))
|
|
264
|
+
except (OSError, RuntimeError, ConnectionError) as exc:
|
|
265
|
+
logger.warning(
|
|
266
|
+
"[ui] Failed to initialize MCP runtime at startup: %s: %s",
|
|
267
|
+
type(exc).__name__, exc,
|
|
268
|
+
extra={"session_id": self.session_id},
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
def _context_usage_lines(
|
|
272
|
+
self, breakdown: Any, model_label: str, auto_compact_enabled: bool
|
|
273
|
+
) -> List[str]:
|
|
274
|
+
return context_usage_lines(breakdown, model_label, auto_compact_enabled)
|
|
275
|
+
|
|
276
|
+
def _set_session(self, session_id: str) -> None:
|
|
277
|
+
"""Switch to a different session id and reset logging."""
|
|
278
|
+
self.session_id = session_id
|
|
279
|
+
self.log_file_path = enable_session_file_logging(self.project_path, self.session_id)
|
|
280
|
+
logger.info(
|
|
281
|
+
"[ui] Switched session",
|
|
282
|
+
extra={
|
|
283
|
+
"session_id": self.session_id,
|
|
284
|
+
"project_path": str(self.project_path),
|
|
285
|
+
"log_file": str(self.log_file_path),
|
|
286
|
+
},
|
|
287
|
+
)
|
|
288
|
+
self._session_history = SessionHistory(self.project_path, session_id)
|
|
289
|
+
|
|
290
|
+
def _log_message(self, message: Any) -> None:
|
|
291
|
+
"""Best-effort persistence of a message to the session log."""
|
|
292
|
+
try:
|
|
293
|
+
self._session_history.append(message)
|
|
294
|
+
except (OSError, IOError, json.JSONDecodeError) as exc:
|
|
295
|
+
# Logging failures should never interrupt the UI flow
|
|
296
|
+
logger.warning(
|
|
297
|
+
"[ui] Failed to append message to session history: %s: %s",
|
|
298
|
+
type(exc).__name__, exc,
|
|
299
|
+
extra={"session_id": self.session_id},
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
def _append_prompt_history(self, text: str) -> None:
|
|
303
|
+
"""Append text to the interactive prompt history."""
|
|
304
|
+
if not text or not text.strip():
|
|
305
|
+
return
|
|
306
|
+
session = self.get_prompt_session()
|
|
307
|
+
try:
|
|
308
|
+
session.history.append_string(text)
|
|
309
|
+
except (AttributeError, TypeError, ValueError) as exc:
|
|
310
|
+
logger.warning(
|
|
311
|
+
"[ui] Failed to append prompt history: %s: %s",
|
|
312
|
+
type(exc).__name__, exc,
|
|
313
|
+
extra={"session_id": self.session_id},
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
def replay_conversation(self, messages: List[Dict[str, Any]]) -> None:
|
|
317
|
+
"""Render a conversation history in the console and seed prompt history."""
|
|
318
|
+
if not messages:
|
|
319
|
+
return
|
|
320
|
+
self.console.print("\n[dim]Restored conversation:[/dim]")
|
|
321
|
+
for msg in messages:
|
|
322
|
+
msg_type = getattr(msg, "type", "")
|
|
323
|
+
message_payload = getattr(msg, "message", None) or getattr(msg, "content", None)
|
|
324
|
+
content = getattr(message_payload, "content", None) if message_payload else None
|
|
325
|
+
has_tool_result = False
|
|
326
|
+
if isinstance(content, list):
|
|
327
|
+
for block in content:
|
|
328
|
+
block_type = getattr(block, "type", None) or (
|
|
329
|
+
block.get("type") if isinstance(block, dict) else None
|
|
330
|
+
)
|
|
331
|
+
if block_type == "tool_result":
|
|
332
|
+
has_tool_result = True
|
|
333
|
+
break
|
|
334
|
+
text = self._stringify_message_content(content)
|
|
335
|
+
if not text:
|
|
336
|
+
continue
|
|
337
|
+
if msg_type == "user" and not has_tool_result:
|
|
338
|
+
self.display_message("You", text)
|
|
339
|
+
self._append_prompt_history(text)
|
|
340
|
+
elif msg_type == "user" and has_tool_result:
|
|
341
|
+
# Tool results are part of the conversation but should not enter prompt history.
|
|
342
|
+
self.display_message("Tool", text, is_tool=True, tool_type="result")
|
|
343
|
+
elif msg_type == "assistant":
|
|
344
|
+
self.display_message("Ripperdoc", text)
|
|
345
|
+
|
|
346
|
+
def get_default_tools(self) -> list:
|
|
347
|
+
"""Get the default set of tools."""
|
|
348
|
+
return get_default_tools()
|
|
349
|
+
|
|
350
|
+
def display_message(
|
|
351
|
+
self,
|
|
352
|
+
sender: str,
|
|
353
|
+
content: str,
|
|
354
|
+
is_tool: bool = False,
|
|
355
|
+
tool_type: Optional[str] = None,
|
|
356
|
+
tool_args: Optional[dict] = None,
|
|
357
|
+
tool_data: Any = None,
|
|
358
|
+
tool_error: bool = False,
|
|
359
|
+
) -> None:
|
|
360
|
+
"""Display a message in the conversation."""
|
|
361
|
+
if not is_tool:
|
|
362
|
+
self._print_human_or_assistant(sender, content)
|
|
363
|
+
return
|
|
364
|
+
|
|
365
|
+
if tool_type == "call":
|
|
366
|
+
self._print_tool_call(sender, content, tool_args)
|
|
367
|
+
return
|
|
368
|
+
|
|
369
|
+
if tool_type == "result":
|
|
370
|
+
self._print_tool_result(sender, content, tool_data, tool_error)
|
|
371
|
+
return
|
|
372
|
+
|
|
373
|
+
self._print_generic_tool(sender, content)
|
|
374
|
+
|
|
375
|
+
def _format_tool_args(self, tool_name: str, tool_args: Optional[dict]) -> list[str]:
|
|
376
|
+
"""Render tool arguments into concise display-friendly parts."""
|
|
377
|
+
if not tool_args:
|
|
378
|
+
return []
|
|
379
|
+
|
|
380
|
+
args_parts: list[str] = []
|
|
381
|
+
|
|
382
|
+
def _format_arg(arg_key: str, arg_value: Any) -> str:
|
|
383
|
+
if arg_key == "todos" and isinstance(arg_value, list):
|
|
384
|
+
counts = {"pending": 0, "in_progress": 0, "completed": 0}
|
|
385
|
+
for item in arg_value:
|
|
386
|
+
status = ""
|
|
387
|
+
if isinstance(item, dict):
|
|
388
|
+
status = item.get("status", "")
|
|
389
|
+
elif hasattr(item, "get"):
|
|
390
|
+
status = item.get("status", "")
|
|
391
|
+
elif hasattr(item, "status"):
|
|
392
|
+
status = getattr(item, "status")
|
|
393
|
+
if status in counts:
|
|
394
|
+
counts[status] += 1
|
|
395
|
+
total = len(arg_value)
|
|
396
|
+
return f"{arg_key}: {total} items"
|
|
397
|
+
if isinstance(arg_value, (list, dict)):
|
|
398
|
+
return f"{arg_key}: {len(arg_value)} items"
|
|
399
|
+
if isinstance(arg_value, str) and len(arg_value) > 50:
|
|
400
|
+
return f'{arg_key}: "{arg_value[:50]}..."'
|
|
401
|
+
return f"{arg_key}: {arg_value}"
|
|
402
|
+
|
|
403
|
+
if tool_name == "Bash":
|
|
404
|
+
command_value = tool_args.get("command")
|
|
405
|
+
if command_value is not None:
|
|
406
|
+
args_parts.append(_format_arg("command", command_value))
|
|
407
|
+
|
|
408
|
+
background_value = tool_args.get("run_in_background", tool_args.get("runInBackground"))
|
|
409
|
+
background_value = bool(background_value) if background_value is not None else False
|
|
410
|
+
args_parts.append(f"background: {background_value}")
|
|
411
|
+
|
|
412
|
+
sandbox_value = tool_args.get("sandbox")
|
|
413
|
+
sandbox_value = bool(sandbox_value) if sandbox_value is not None else False
|
|
414
|
+
args_parts.append(f"sandbox: {sandbox_value}")
|
|
415
|
+
|
|
416
|
+
for key, value in tool_args.items():
|
|
417
|
+
if key in {"command", "run_in_background", "runInBackground", "sandbox"}:
|
|
418
|
+
continue
|
|
419
|
+
args_parts.append(_format_arg(key, value))
|
|
420
|
+
return args_parts
|
|
421
|
+
|
|
422
|
+
# Special handling for Edit and MultiEdit tools - don't show old_string
|
|
423
|
+
if tool_name in ["Edit", "MultiEdit"]:
|
|
424
|
+
for key, value in tool_args.items():
|
|
425
|
+
if key == "new_string":
|
|
426
|
+
continue # Skip new_string for Edit/MultiEdit tools
|
|
427
|
+
if key == "old_string":
|
|
428
|
+
continue # Skip old_string for Edit/MultiEdit tools
|
|
429
|
+
# For MultiEdit, also handle edits array
|
|
430
|
+
if key == "edits" and isinstance(value, list):
|
|
431
|
+
args_parts.append(f"edits: {len(value)} operations")
|
|
432
|
+
continue
|
|
433
|
+
args_parts.append(_format_arg(key, value))
|
|
434
|
+
return args_parts
|
|
435
|
+
|
|
436
|
+
for key, value in tool_args.items():
|
|
437
|
+
args_parts.append(_format_arg(key, value))
|
|
438
|
+
return args_parts
|
|
439
|
+
|
|
440
|
+
def _print_tool_call(self, sender: str, content: str, tool_args: Optional[dict]) -> None:
|
|
441
|
+
"""Render a tool invocation line."""
|
|
442
|
+
if sender == "Task":
|
|
443
|
+
subagent = ""
|
|
444
|
+
if isinstance(tool_args, dict):
|
|
445
|
+
subagent = tool_args.get("subagent_type") or tool_args.get("subagent") or ""
|
|
446
|
+
desc = ""
|
|
447
|
+
if isinstance(tool_args, dict):
|
|
448
|
+
raw_desc = tool_args.get("description") or tool_args.get("prompt") or ""
|
|
449
|
+
desc = raw_desc if len(str(raw_desc)) <= 120 else str(raw_desc)[:117] + "..."
|
|
450
|
+
label = f"-> Launching subagent: {subagent or 'unknown'}"
|
|
451
|
+
if desc:
|
|
452
|
+
label += f" — {desc}"
|
|
453
|
+
self.console.print(f"[cyan]{escape(label)}[/cyan]")
|
|
454
|
+
return
|
|
455
|
+
|
|
456
|
+
tool_name = sender if sender != "Ripperdoc" else content
|
|
457
|
+
tool_display = f"● {tool_name}("
|
|
458
|
+
|
|
459
|
+
args_parts = self._format_tool_args(tool_name, tool_args)
|
|
460
|
+
if args_parts:
|
|
461
|
+
tool_display += ", ".join(args_parts)
|
|
462
|
+
tool_display += ")"
|
|
463
|
+
|
|
464
|
+
self.console.print(f"[dim cyan]{escape(tool_display)}[/]")
|
|
465
|
+
|
|
466
|
+
def _print_tool_result(
|
|
467
|
+
self, sender: str, content: str, tool_data: Any, tool_error: bool = False
|
|
468
|
+
) -> None:
|
|
469
|
+
"""Render a tool result summary using the renderer registry."""
|
|
470
|
+
# Check for failure states
|
|
471
|
+
failed = tool_error
|
|
472
|
+
if tool_data is not None:
|
|
473
|
+
if isinstance(tool_data, dict):
|
|
474
|
+
failed = failed or (tool_data.get("success") is False)
|
|
475
|
+
else:
|
|
476
|
+
success = getattr(tool_data, "success", None)
|
|
477
|
+
failed = failed or (success is False)
|
|
478
|
+
failed = failed or bool(self._get_tool_field(tool_data, "is_error"))
|
|
479
|
+
|
|
480
|
+
# Extract warning/token info
|
|
481
|
+
warning_text = None
|
|
482
|
+
token_estimate = None
|
|
483
|
+
if tool_data is not None:
|
|
484
|
+
warning_text = self._get_tool_field(tool_data, "warning")
|
|
485
|
+
token_estimate = self._get_tool_field(tool_data, "token_estimate")
|
|
486
|
+
|
|
487
|
+
# Handle failure case
|
|
488
|
+
if failed:
|
|
489
|
+
if content:
|
|
490
|
+
self.console.print(f" ⎿ [red]{escape(content)}[/red]")
|
|
491
|
+
else:
|
|
492
|
+
self.console.print(f" ⎿ [red]{escape(sender)} failed[/red]")
|
|
493
|
+
return
|
|
494
|
+
|
|
495
|
+
# Display warnings and token estimates
|
|
496
|
+
if warning_text:
|
|
497
|
+
self.console.print(f" ⎿ [yellow]{escape(str(warning_text))}[/yellow]")
|
|
498
|
+
if token_estimate:
|
|
499
|
+
self.console.print(
|
|
500
|
+
f" [dim]Estimated tokens: {escape(str(token_estimate))}[/dim]"
|
|
501
|
+
)
|
|
502
|
+
elif token_estimate and self.verbose:
|
|
503
|
+
self.console.print(f" ⎿ [dim]Estimated tokens: {escape(str(token_estimate))}[/dim]")
|
|
504
|
+
|
|
505
|
+
# Handle empty content
|
|
506
|
+
if not content:
|
|
507
|
+
self.console.print(" ⎿ [dim]Tool completed[/]")
|
|
508
|
+
return
|
|
509
|
+
|
|
510
|
+
# Use renderer registry for tool-specific rendering
|
|
511
|
+
registry = ToolResultRendererRegistry(
|
|
512
|
+
self.console, self.verbose, self._parse_bash_output_sections
|
|
513
|
+
)
|
|
514
|
+
if registry.render(sender, content, tool_data):
|
|
515
|
+
return
|
|
516
|
+
|
|
517
|
+
# Fallback for unhandled tools
|
|
518
|
+
self.console.print(" ⎿ [dim]Tool completed[/]")
|
|
519
|
+
|
|
520
|
+
def _print_generic_tool(self, sender: str, content: str) -> None:
|
|
521
|
+
"""Fallback rendering for miscellaneous tool messages."""
|
|
522
|
+
if sender == "Task" and isinstance(content, str) and content.startswith("[subagent:"):
|
|
523
|
+
agent_label = content.split("]", 1)[0].replace("[subagent:", "").strip()
|
|
524
|
+
summary = content.split("]", 1)[1].strip() if "]" in content else ""
|
|
525
|
+
self.console.print(f"[green]↳ Subagent {escape(agent_label)} finished[/green]")
|
|
526
|
+
if summary:
|
|
527
|
+
self.console.print(f" {summary}", markup=False)
|
|
528
|
+
return
|
|
529
|
+
self.console.print(f"[dim cyan][Tool] {escape(sender)}: {escape(content)}[/]")
|
|
530
|
+
|
|
531
|
+
def _print_human_or_assistant(self, sender: str, content: str) -> None:
|
|
532
|
+
"""Render messages from the user or assistant."""
|
|
533
|
+
if sender.lower() == "you":
|
|
534
|
+
self.console.print(f"[bold green]{escape(sender)}:[/] {escape(content)}")
|
|
535
|
+
return
|
|
536
|
+
self.console.print(Markdown(content))
|
|
537
|
+
|
|
538
|
+
def _get_tool_field(self, data: Any, key: str, default: Any = None) -> Any:
|
|
539
|
+
"""Safely fetch a field from either an object or a dict."""
|
|
540
|
+
if isinstance(data, dict):
|
|
541
|
+
return data.get(key, default)
|
|
542
|
+
return getattr(data, key, default)
|
|
543
|
+
|
|
544
|
+
def _parse_bash_output_sections(self, content: str) -> tuple[List[str], List[str]]:
|
|
545
|
+
"""Fallback parser to pull stdout/stderr sections from a text block."""
|
|
546
|
+
stdout_lines: List[str] = []
|
|
547
|
+
stderr_lines: List[str] = []
|
|
548
|
+
if not content:
|
|
549
|
+
return stdout_lines, stderr_lines
|
|
550
|
+
|
|
551
|
+
current: Optional[str] = None
|
|
552
|
+
for line in content.splitlines():
|
|
553
|
+
stripped = line.strip()
|
|
554
|
+
if stripped.startswith("stdout:"):
|
|
555
|
+
current = "stdout"
|
|
556
|
+
remainder = line.split("stdout:", 1)[1].strip()
|
|
557
|
+
if remainder:
|
|
558
|
+
stdout_lines.append(remainder)
|
|
559
|
+
continue
|
|
560
|
+
if stripped.startswith("stderr:"):
|
|
561
|
+
current = "stderr"
|
|
562
|
+
remainder = line.split("stderr:", 1)[1].strip()
|
|
563
|
+
if remainder:
|
|
564
|
+
stderr_lines.append(remainder)
|
|
565
|
+
continue
|
|
566
|
+
if stripped.startswith("exit code:"):
|
|
567
|
+
break
|
|
568
|
+
if current == "stdout":
|
|
569
|
+
stdout_lines.append(line)
|
|
570
|
+
elif current == "stderr":
|
|
571
|
+
stderr_lines.append(line)
|
|
572
|
+
|
|
573
|
+
return stdout_lines, stderr_lines
|
|
574
|
+
|
|
575
|
+
def _stringify_message_content(self, content: Any) -> str:
|
|
576
|
+
"""Extract readable text from a message content payload."""
|
|
577
|
+
if isinstance(content, str):
|
|
578
|
+
return content
|
|
579
|
+
if isinstance(content, list):
|
|
580
|
+
parts: List[str] = []
|
|
581
|
+
for block in content:
|
|
582
|
+
text = getattr(block, "text", None)
|
|
583
|
+
if text is None:
|
|
584
|
+
text = getattr(block, "thinking", None)
|
|
585
|
+
if not text and isinstance(block, dict):
|
|
586
|
+
text = block.get("text") or block.get("thinking") or block.get("data")
|
|
587
|
+
if text:
|
|
588
|
+
parts.append(str(text))
|
|
589
|
+
return "\n".join(parts)
|
|
590
|
+
return ""
|
|
591
|
+
|
|
592
|
+
def _format_reasoning_preview(self, reasoning: Any) -> str:
|
|
593
|
+
"""Best-effort stringify for reasoning/thinking traces."""
|
|
594
|
+
if reasoning is None:
|
|
595
|
+
return ""
|
|
596
|
+
if isinstance(reasoning, str):
|
|
597
|
+
preview = reasoning.strip()
|
|
598
|
+
else:
|
|
599
|
+
try:
|
|
600
|
+
preview = json.dumps(reasoning, ensure_ascii=False)
|
|
601
|
+
except (TypeError, ValueError, OverflowError):
|
|
602
|
+
preview = str(reasoning)
|
|
603
|
+
preview = preview.strip()
|
|
604
|
+
if len(preview) > 4000:
|
|
605
|
+
preview = preview[:4000] + "…"
|
|
606
|
+
return preview
|
|
607
|
+
|
|
608
|
+
def _print_reasoning(self, reasoning: Any) -> None:
|
|
609
|
+
"""Display thinking traces in a dim style."""
|
|
610
|
+
preview = self._format_reasoning_preview(reasoning)
|
|
611
|
+
if not preview:
|
|
612
|
+
return
|
|
613
|
+
# Collapse excessive blank lines to keep the thinking block compact.
|
|
614
|
+
preview = re.sub(r"\n{2,}", "\n", preview)
|
|
615
|
+
self.console.print(f"[dim]🧠 Thinking: {escape(preview)}[/]")
|
|
616
|
+
|
|
617
|
+
def _render_transcript(self, messages: List[ConversationMessage]) -> str:
|
|
618
|
+
"""Render a simple transcript for summarization."""
|
|
619
|
+
lines: List[str] = []
|
|
620
|
+
for msg in messages:
|
|
621
|
+
role = getattr(msg, "type", "") or getattr(msg, "role", "")
|
|
622
|
+
message_payload = getattr(msg, "message", None) or getattr(msg, "content", None)
|
|
623
|
+
if hasattr(message_payload, "content"):
|
|
624
|
+
message_payload = getattr(message_payload, "content")
|
|
625
|
+
text = self._stringify_message_content(message_payload)
|
|
626
|
+
if not text:
|
|
627
|
+
continue
|
|
628
|
+
label = "User" if role == "user" else "Assistant" if role == "assistant" else "Other"
|
|
629
|
+
lines.append(f"{label}: {text}")
|
|
630
|
+
return "\n".join(lines)
|
|
631
|
+
|
|
632
|
+
def _extract_assistant_text(self, assistant_message: Any) -> str:
|
|
633
|
+
"""Extract plain text from an AssistantMessage."""
|
|
634
|
+
if isinstance(assistant_message.message.content, str):
|
|
635
|
+
return assistant_message.message.content
|
|
636
|
+
if isinstance(assistant_message.message.content, list):
|
|
637
|
+
parts: List[str] = []
|
|
638
|
+
for block in assistant_message.message.content:
|
|
639
|
+
if getattr(block, "type", None) == "text" and getattr(block, "text", None):
|
|
640
|
+
parts.append(str(block.text))
|
|
641
|
+
return "\n".join(parts)
|
|
642
|
+
return ""
|
|
643
|
+
|
|
644
|
+
async def _prepare_query_context(self, user_input: str) -> tuple[str, Dict[str, str]]:
|
|
645
|
+
"""Load MCP servers, skills, and build system prompt.
|
|
646
|
+
|
|
647
|
+
Returns:
|
|
648
|
+
Tuple of (system_prompt, context_dict)
|
|
649
|
+
"""
|
|
650
|
+
context: Dict[str, str] = {}
|
|
651
|
+
servers = await load_mcp_servers_async(self.project_path)
|
|
652
|
+
dynamic_tools = await load_dynamic_mcp_tools_async(self.project_path)
|
|
653
|
+
|
|
654
|
+
if dynamic_tools and self.query_context:
|
|
655
|
+
self.query_context.tools = merge_tools_with_dynamic(
|
|
656
|
+
self.query_context.tools, dynamic_tools
|
|
657
|
+
)
|
|
658
|
+
|
|
659
|
+
logger.debug(
|
|
660
|
+
"[ui] Prepared tools and MCP servers",
|
|
661
|
+
extra={
|
|
662
|
+
"session_id": self.session_id,
|
|
663
|
+
"tool_count": len(self.query_context.tools) if self.query_context else 0,
|
|
664
|
+
"mcp_servers": len(servers),
|
|
665
|
+
"dynamic_tools": len(dynamic_tools),
|
|
666
|
+
},
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
mcp_instructions = format_mcp_instructions(servers)
|
|
670
|
+
skill_result = load_all_skills(self.project_path)
|
|
671
|
+
|
|
672
|
+
for err in skill_result.errors:
|
|
673
|
+
logger.warning(
|
|
674
|
+
"[skills] Failed to load skill",
|
|
675
|
+
extra={
|
|
676
|
+
"path": str(err.path),
|
|
677
|
+
"reason": err.reason,
|
|
678
|
+
"session_id": self.session_id,
|
|
679
|
+
},
|
|
680
|
+
)
|
|
681
|
+
|
|
682
|
+
skill_instructions = build_skill_summary(skill_result.skills)
|
|
683
|
+
additional_instructions: List[str] = []
|
|
684
|
+
if skill_instructions:
|
|
685
|
+
additional_instructions.append(skill_instructions)
|
|
686
|
+
|
|
687
|
+
memory_instructions = build_memory_instructions()
|
|
688
|
+
if memory_instructions:
|
|
689
|
+
additional_instructions.append(memory_instructions)
|
|
690
|
+
|
|
691
|
+
system_prompt = build_system_prompt(
|
|
692
|
+
self.query_context.tools if self.query_context else [],
|
|
693
|
+
user_input,
|
|
694
|
+
context,
|
|
695
|
+
additional_instructions=additional_instructions or None,
|
|
696
|
+
mcp_instructions=mcp_instructions,
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
return system_prompt, context
|
|
700
|
+
|
|
701
|
+
def _check_and_compact_messages(
|
|
702
|
+
self,
|
|
703
|
+
messages: List[ConversationMessage],
|
|
704
|
+
max_context_tokens: int,
|
|
705
|
+
auto_compact_enabled: bool,
|
|
706
|
+
protocol: str,
|
|
707
|
+
) -> List[ConversationMessage]:
|
|
708
|
+
"""Check context usage and compact if needed.
|
|
709
|
+
|
|
710
|
+
Returns:
|
|
711
|
+
Possibly compacted list of messages.
|
|
712
|
+
"""
|
|
713
|
+
used_tokens = estimate_used_tokens(messages, protocol=protocol) # type: ignore[arg-type]
|
|
714
|
+
usage_status = get_context_usage_status(
|
|
715
|
+
used_tokens, max_context_tokens, auto_compact_enabled
|
|
716
|
+
)
|
|
717
|
+
|
|
718
|
+
logger.debug(
|
|
719
|
+
"[ui] Context usage snapshot",
|
|
720
|
+
extra={
|
|
721
|
+
"session_id": self.session_id,
|
|
722
|
+
"used_tokens": used_tokens,
|
|
723
|
+
"max_context_tokens": max_context_tokens,
|
|
724
|
+
"percent_used": round(usage_status.percent_used, 2),
|
|
725
|
+
"auto_compact_enabled": auto_compact_enabled,
|
|
726
|
+
},
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
if usage_status.is_above_warning:
|
|
730
|
+
console.print(
|
|
731
|
+
f"[yellow]Context usage is {usage_status.percent_used:.1f}% "
|
|
732
|
+
f"({usage_status.total_tokens}/{usage_status.max_context_tokens} tokens).[/yellow]"
|
|
733
|
+
)
|
|
734
|
+
if not auto_compact_enabled:
|
|
735
|
+
console.print(
|
|
736
|
+
"[dim]Auto-compaction is disabled; run /compact to trim history.[/dim]"
|
|
737
|
+
)
|
|
738
|
+
|
|
739
|
+
if usage_status.should_auto_compact:
|
|
740
|
+
original_messages = list(messages)
|
|
741
|
+
compaction = compact_messages(messages, protocol=protocol) # type: ignore[arg-type]
|
|
742
|
+
if compaction.was_compacted:
|
|
743
|
+
if self._saved_conversation is None:
|
|
744
|
+
self._saved_conversation = original_messages # type: ignore[assignment]
|
|
745
|
+
console.print(
|
|
746
|
+
f"[yellow]Auto-compacted conversation (saved ~{compaction.tokens_saved} tokens). "
|
|
747
|
+
f"Estimated usage: {compaction.tokens_after}/{max_context_tokens} tokens.[/yellow]"
|
|
748
|
+
)
|
|
749
|
+
logger.info(
|
|
750
|
+
"[ui] Auto-compacted conversation",
|
|
751
|
+
extra={
|
|
752
|
+
"session_id": self.session_id,
|
|
753
|
+
"tokens_before": compaction.tokens_before,
|
|
754
|
+
"tokens_after": compaction.tokens_after,
|
|
755
|
+
"tokens_saved": compaction.tokens_saved,
|
|
756
|
+
"cleared_tool_ids": list(compaction.cleared_tool_ids),
|
|
757
|
+
},
|
|
758
|
+
)
|
|
759
|
+
return compaction.messages # type: ignore[return-value]
|
|
760
|
+
|
|
761
|
+
return messages
|
|
762
|
+
|
|
763
|
+
def _handle_assistant_message(
|
|
764
|
+
self,
|
|
765
|
+
message: AssistantMessage,
|
|
766
|
+
tool_registry: Dict[str, Dict[str, Any]],
|
|
767
|
+
) -> Optional[str]:
|
|
768
|
+
"""Handle an assistant message from the query stream.
|
|
769
|
+
|
|
770
|
+
Returns:
|
|
771
|
+
The last tool name if a tool_use block was processed, None otherwise.
|
|
772
|
+
"""
|
|
773
|
+
meta = getattr(getattr(message, "message", None), "metadata", {}) or {}
|
|
774
|
+
reasoning_payload = (
|
|
775
|
+
meta.get("reasoning_content")
|
|
776
|
+
or meta.get("reasoning")
|
|
777
|
+
or meta.get("reasoning_details")
|
|
778
|
+
)
|
|
779
|
+
if reasoning_payload:
|
|
780
|
+
self._print_reasoning(reasoning_payload)
|
|
781
|
+
|
|
782
|
+
last_tool_name: Optional[str] = None
|
|
783
|
+
|
|
784
|
+
if isinstance(message.message.content, str):
|
|
785
|
+
self.display_message("Ripperdoc", message.message.content)
|
|
786
|
+
elif isinstance(message.message.content, list):
|
|
787
|
+
for block in message.message.content:
|
|
788
|
+
if hasattr(block, "type") and block.type == "text" and block.text:
|
|
789
|
+
self.display_message("Ripperdoc", block.text)
|
|
790
|
+
elif hasattr(block, "type") and block.type == "tool_use":
|
|
791
|
+
tool_name = getattr(block, "name", "unknown tool")
|
|
792
|
+
tool_args = getattr(block, "input", {})
|
|
793
|
+
tool_use_id = getattr(block, "tool_use_id", None) or getattr(block, "id", None)
|
|
794
|
+
|
|
795
|
+
if tool_use_id:
|
|
796
|
+
tool_registry[tool_use_id] = {
|
|
797
|
+
"name": tool_name,
|
|
798
|
+
"args": tool_args,
|
|
799
|
+
"printed": False,
|
|
800
|
+
}
|
|
801
|
+
|
|
802
|
+
if tool_name == "Task":
|
|
803
|
+
self.display_message(
|
|
804
|
+
tool_name, "", is_tool=True, tool_type="call", tool_args=tool_args
|
|
805
|
+
)
|
|
806
|
+
if tool_use_id:
|
|
807
|
+
tool_registry[tool_use_id]["printed"] = True
|
|
808
|
+
|
|
809
|
+
last_tool_name = tool_name
|
|
810
|
+
|
|
811
|
+
return last_tool_name
|
|
812
|
+
|
|
813
|
+
def _handle_tool_result_message(
|
|
814
|
+
self,
|
|
815
|
+
message: UserMessage,
|
|
816
|
+
tool_registry: Dict[str, Dict[str, Any]],
|
|
817
|
+
last_tool_name: Optional[str],
|
|
818
|
+
) -> None:
|
|
819
|
+
"""Handle a user message containing tool results."""
|
|
820
|
+
if not isinstance(message.message.content, list):
|
|
821
|
+
return
|
|
822
|
+
|
|
823
|
+
for block in message.message.content:
|
|
824
|
+
if not (hasattr(block, "type") and block.type == "tool_result" and block.text):
|
|
825
|
+
continue
|
|
826
|
+
|
|
827
|
+
tool_name = "Tool"
|
|
828
|
+
tool_data = getattr(message, "tool_use_result", None)
|
|
829
|
+
is_error = bool(getattr(block, "is_error", False))
|
|
830
|
+
tool_use_id = getattr(block, "tool_use_id", None)
|
|
831
|
+
|
|
832
|
+
entry = tool_registry.get(tool_use_id) if tool_use_id else None
|
|
833
|
+
if entry:
|
|
834
|
+
tool_name = entry.get("name", tool_name)
|
|
835
|
+
if not entry.get("printed"):
|
|
836
|
+
self.display_message(
|
|
837
|
+
tool_name,
|
|
838
|
+
"",
|
|
839
|
+
is_tool=True,
|
|
840
|
+
tool_type="call",
|
|
841
|
+
tool_args=entry.get("args", {}),
|
|
842
|
+
)
|
|
843
|
+
entry["printed"] = True
|
|
844
|
+
elif last_tool_name:
|
|
845
|
+
tool_name = last_tool_name
|
|
846
|
+
|
|
847
|
+
self.display_message(
|
|
848
|
+
tool_name,
|
|
849
|
+
block.text,
|
|
850
|
+
is_tool=True,
|
|
851
|
+
tool_type="result",
|
|
852
|
+
tool_data=tool_data,
|
|
853
|
+
tool_error=is_error,
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
def _handle_progress_message(
|
|
857
|
+
self,
|
|
858
|
+
message: ProgressMessage,
|
|
859
|
+
spinner: ThinkingSpinner,
|
|
860
|
+
output_token_est: int,
|
|
861
|
+
) -> int:
|
|
862
|
+
"""Handle a progress message and update spinner.
|
|
863
|
+
|
|
864
|
+
Returns:
|
|
865
|
+
Updated output token estimate.
|
|
866
|
+
"""
|
|
867
|
+
if self.verbose:
|
|
868
|
+
self.display_message("System", f"Progress: {message.content}", is_tool=True)
|
|
869
|
+
elif message.content and isinstance(message.content, str):
|
|
870
|
+
if message.content.startswith("Subagent: "):
|
|
871
|
+
self.display_message(
|
|
872
|
+
"Subagent", message.content[len("Subagent: ") :], is_tool=True
|
|
873
|
+
)
|
|
874
|
+
elif message.content.startswith("Subagent"):
|
|
875
|
+
self.display_message("Subagent", message.content, is_tool=True)
|
|
876
|
+
|
|
877
|
+
if message.tool_use_id == "stream":
|
|
878
|
+
delta_tokens = estimate_tokens(message.content)
|
|
879
|
+
output_token_est += delta_tokens
|
|
880
|
+
spinner.update_tokens(output_token_est)
|
|
881
|
+
else:
|
|
882
|
+
spinner.update_tokens(output_token_est, suffix=f"Working... {message.content}")
|
|
883
|
+
|
|
884
|
+
return output_token_est
|
|
885
|
+
|
|
886
|
+
async def process_query(self, user_input: str) -> None:
|
|
887
|
+
"""Process a user query and display the response."""
|
|
888
|
+
# Initialize or reset query context
|
|
889
|
+
if not self.query_context:
|
|
890
|
+
self.query_context = QueryContext(
|
|
891
|
+
tools=self.get_default_tools(), safe_mode=self.safe_mode, verbose=self.verbose
|
|
892
|
+
)
|
|
893
|
+
else:
|
|
894
|
+
abort_controller = getattr(self.query_context, "abort_controller", None)
|
|
895
|
+
if abort_controller is not None:
|
|
896
|
+
abort_controller.clear()
|
|
897
|
+
|
|
898
|
+
logger.info(
|
|
899
|
+
"[ui] Starting query processing",
|
|
900
|
+
extra={
|
|
901
|
+
"session_id": self.session_id,
|
|
902
|
+
"prompt_length": len(user_input),
|
|
903
|
+
"prompt_preview": user_input[:200],
|
|
904
|
+
},
|
|
905
|
+
)
|
|
906
|
+
|
|
907
|
+
try:
|
|
908
|
+
# Prepare context and system prompt
|
|
909
|
+
system_prompt, context = await self._prepare_query_context(user_input)
|
|
910
|
+
|
|
911
|
+
# Create and log user message
|
|
912
|
+
user_message = create_user_message(user_input)
|
|
913
|
+
messages: List[ConversationMessage] = self.conversation_messages + [user_message]
|
|
914
|
+
self._log_message(user_message)
|
|
915
|
+
self._append_prompt_history(user_input)
|
|
916
|
+
|
|
917
|
+
# Get model configuration
|
|
918
|
+
config = get_global_config()
|
|
919
|
+
model_profile = get_profile_for_pointer("main")
|
|
920
|
+
max_context_tokens = get_remaining_context_tokens(
|
|
921
|
+
model_profile, config.context_token_limit
|
|
922
|
+
)
|
|
923
|
+
auto_compact_enabled = resolve_auto_compact_enabled(config)
|
|
924
|
+
protocol = provider_protocol(model_profile.provider) if model_profile else "openai"
|
|
925
|
+
|
|
926
|
+
# Check and potentially compact messages
|
|
927
|
+
messages = self._check_and_compact_messages(
|
|
928
|
+
messages, max_context_tokens, auto_compact_enabled, protocol
|
|
929
|
+
)
|
|
930
|
+
|
|
931
|
+
# Setup spinner and callbacks
|
|
932
|
+
prompt_tokens_est = estimate_conversation_tokens(messages, protocol=protocol)
|
|
933
|
+
spinner = ThinkingSpinner(console, prompt_tokens_est)
|
|
934
|
+
|
|
935
|
+
def pause_ui() -> None:
|
|
936
|
+
spinner.stop()
|
|
937
|
+
|
|
938
|
+
def resume_ui() -> None:
|
|
939
|
+
spinner.start()
|
|
940
|
+
spinner.update("Thinking...")
|
|
941
|
+
|
|
942
|
+
self.query_context.pause_ui = pause_ui
|
|
943
|
+
self.query_context.resume_ui = resume_ui
|
|
944
|
+
|
|
945
|
+
# Create permission checker with spinner control
|
|
946
|
+
base_permission_checker = self._permission_checker
|
|
947
|
+
|
|
948
|
+
async def permission_checker(tool: Any, parsed_input: Any) -> bool:
|
|
949
|
+
spinner.stop()
|
|
950
|
+
was_paused = self._pause_interrupt_listener()
|
|
951
|
+
try:
|
|
952
|
+
if base_permission_checker is not None:
|
|
953
|
+
result = await base_permission_checker(tool, parsed_input)
|
|
954
|
+
allowed = result.result if hasattr(result, "result") else True
|
|
955
|
+
logger.debug(
|
|
956
|
+
"[ui] Permission check result",
|
|
957
|
+
extra={
|
|
958
|
+
"tool": getattr(tool, "name", None),
|
|
959
|
+
"allowed": allowed,
|
|
960
|
+
"session_id": self.session_id,
|
|
961
|
+
},
|
|
962
|
+
)
|
|
963
|
+
return allowed
|
|
964
|
+
return True
|
|
965
|
+
finally:
|
|
966
|
+
self._resume_interrupt_listener(was_paused)
|
|
967
|
+
# Wrap spinner restart in try-except to prevent exceptions
|
|
968
|
+
# from discarding the permission result
|
|
969
|
+
try:
|
|
970
|
+
spinner.start()
|
|
971
|
+
spinner.update("Thinking...")
|
|
972
|
+
except (RuntimeError, ValueError, OSError) as exc:
|
|
973
|
+
logger.debug(
|
|
974
|
+
"[ui] Failed to restart spinner after permission check: %s: %s",
|
|
975
|
+
type(exc).__name__, exc,
|
|
976
|
+
)
|
|
977
|
+
|
|
978
|
+
# Process query stream
|
|
979
|
+
tool_registry: Dict[str, Dict[str, Any]] = {}
|
|
980
|
+
last_tool_name: Optional[str] = None
|
|
981
|
+
output_token_est = 0
|
|
982
|
+
|
|
983
|
+
try:
|
|
984
|
+
spinner.start()
|
|
985
|
+
async for message in query(
|
|
986
|
+
messages,
|
|
987
|
+
system_prompt,
|
|
988
|
+
context,
|
|
989
|
+
self.query_context,
|
|
990
|
+
permission_checker, # type: ignore[arg-type]
|
|
991
|
+
):
|
|
992
|
+
if message.type == "assistant" and isinstance(message, AssistantMessage):
|
|
993
|
+
result = self._handle_assistant_message(message, tool_registry)
|
|
994
|
+
if result:
|
|
995
|
+
last_tool_name = result
|
|
996
|
+
|
|
997
|
+
elif message.type == "user" and isinstance(message, UserMessage):
|
|
998
|
+
self._handle_tool_result_message(message, tool_registry, last_tool_name)
|
|
999
|
+
|
|
1000
|
+
elif message.type == "progress" and isinstance(message, ProgressMessage):
|
|
1001
|
+
output_token_est = self._handle_progress_message(
|
|
1002
|
+
message, spinner, output_token_est
|
|
1003
|
+
)
|
|
1004
|
+
|
|
1005
|
+
self._log_message(message)
|
|
1006
|
+
messages.append(message) # type: ignore[arg-type]
|
|
1007
|
+
|
|
1008
|
+
except asyncio.CancelledError:
|
|
1009
|
+
# Re-raise cancellation to allow proper cleanup
|
|
1010
|
+
raise
|
|
1011
|
+
except (OSError, ConnectionError, RuntimeError, ValueError, KeyError, TypeError) as e:
|
|
1012
|
+
logger.warning(
|
|
1013
|
+
"[ui] Error while processing streamed query response: %s: %s",
|
|
1014
|
+
type(e).__name__, e,
|
|
1015
|
+
extra={"session_id": self.session_id},
|
|
1016
|
+
)
|
|
1017
|
+
self.display_message("System", f"Error: {str(e)}", is_tool=True)
|
|
1018
|
+
finally:
|
|
1019
|
+
try:
|
|
1020
|
+
spinner.stop()
|
|
1021
|
+
except (RuntimeError, ValueError, OSError) as exc:
|
|
1022
|
+
logger.warning(
|
|
1023
|
+
"[ui] Failed to stop spinner: %s: %s",
|
|
1024
|
+
type(exc).__name__, exc,
|
|
1025
|
+
extra={"session_id": self.session_id},
|
|
1026
|
+
)
|
|
1027
|
+
|
|
1028
|
+
self.conversation_messages = messages
|
|
1029
|
+
logger.info(
|
|
1030
|
+
"[ui] Query processing completed",
|
|
1031
|
+
extra={
|
|
1032
|
+
"session_id": self.session_id,
|
|
1033
|
+
"conversation_messages": len(self.conversation_messages),
|
|
1034
|
+
"project_path": str(self.project_path),
|
|
1035
|
+
},
|
|
1036
|
+
)
|
|
1037
|
+
|
|
1038
|
+
except asyncio.CancelledError:
|
|
1039
|
+
# Re-raise cancellation to allow proper cleanup
|
|
1040
|
+
raise
|
|
1041
|
+
except (OSError, ConnectionError, RuntimeError, ValueError, KeyError, TypeError) as exc:
|
|
1042
|
+
logger.warning(
|
|
1043
|
+
"[ui] Error during query processing: %s: %s",
|
|
1044
|
+
type(exc).__name__, exc,
|
|
1045
|
+
extra={"session_id": self.session_id},
|
|
1046
|
+
)
|
|
1047
|
+
self.display_message("System", f"Error: {str(exc)}", is_tool=True)
|
|
1048
|
+
|
|
1049
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
1050
|
+
# ESC Key Interrupt Support
|
|
1051
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
1052
|
+
|
|
1053
|
+
# Keys that trigger interrupt
|
|
1054
|
+
_INTERRUPT_KEYS = {'\x1b', '\x03'} # ESC, Ctrl+C
|
|
1055
|
+
|
|
1056
|
+
def _pause_interrupt_listener(self) -> bool:
|
|
1057
|
+
"""Pause ESC listener and restore cooked terminal mode if we own raw mode."""
|
|
1058
|
+
prev = self._esc_listener_paused
|
|
1059
|
+
self._esc_listener_paused = True
|
|
1060
|
+
try:
|
|
1061
|
+
import termios
|
|
1062
|
+
except ImportError:
|
|
1063
|
+
return prev
|
|
1064
|
+
|
|
1065
|
+
if (
|
|
1066
|
+
self._stdin_fd is not None
|
|
1067
|
+
and self._stdin_old_settings is not None
|
|
1068
|
+
and self._stdin_in_raw_mode
|
|
1069
|
+
):
|
|
1070
|
+
with contextlib.suppress(OSError, termios.error, ValueError):
|
|
1071
|
+
termios.tcsetattr(self._stdin_fd, termios.TCSADRAIN, self._stdin_old_settings)
|
|
1072
|
+
self._stdin_in_raw_mode = False
|
|
1073
|
+
return prev
|
|
1074
|
+
|
|
1075
|
+
def _resume_interrupt_listener(self, previous_state: bool) -> None:
|
|
1076
|
+
"""Restore paused state to what it was before a blocking prompt."""
|
|
1077
|
+
self._esc_listener_paused = previous_state
|
|
1078
|
+
|
|
1079
|
+
async def _listen_for_interrupt_key(self) -> bool:
|
|
1080
|
+
"""Listen for interrupt keys (ESC/Ctrl+C) during query execution.
|
|
1081
|
+
|
|
1082
|
+
Uses raw terminal mode for immediate key detection without waiting
|
|
1083
|
+
for escape sequences to complete.
|
|
1084
|
+
"""
|
|
1085
|
+
import sys
|
|
1086
|
+
import select
|
|
1087
|
+
import termios
|
|
1088
|
+
import tty
|
|
1089
|
+
|
|
1090
|
+
try:
|
|
1091
|
+
fd = sys.stdin.fileno()
|
|
1092
|
+
old_settings = termios.tcgetattr(fd)
|
|
1093
|
+
except (OSError, termios.error, ValueError):
|
|
1094
|
+
return False
|
|
1095
|
+
|
|
1096
|
+
self._stdin_fd = fd
|
|
1097
|
+
self._stdin_old_settings = old_settings
|
|
1098
|
+
raw_enabled = False
|
|
1099
|
+
try:
|
|
1100
|
+
while self._esc_listener_active:
|
|
1101
|
+
if self._esc_listener_paused:
|
|
1102
|
+
if raw_enabled:
|
|
1103
|
+
with contextlib.suppress(OSError, termios.error, ValueError):
|
|
1104
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
|
1105
|
+
raw_enabled = False
|
|
1106
|
+
self._stdin_in_raw_mode = False
|
|
1107
|
+
await asyncio.sleep(0.05)
|
|
1108
|
+
continue
|
|
1109
|
+
|
|
1110
|
+
if not raw_enabled:
|
|
1111
|
+
tty.setraw(fd)
|
|
1112
|
+
raw_enabled = True
|
|
1113
|
+
self._stdin_in_raw_mode = True
|
|
1114
|
+
|
|
1115
|
+
await asyncio.sleep(0.02)
|
|
1116
|
+
if select.select([sys.stdin], [], [], 0)[0]:
|
|
1117
|
+
if sys.stdin.read(1) in self._INTERRUPT_KEYS:
|
|
1118
|
+
return True
|
|
1119
|
+
except (OSError, ValueError):
|
|
1120
|
+
pass
|
|
1121
|
+
finally:
|
|
1122
|
+
self._stdin_in_raw_mode = False
|
|
1123
|
+
with contextlib.suppress(OSError, termios.error, ValueError):
|
|
1124
|
+
if raw_enabled:
|
|
1125
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
|
1126
|
+
self._stdin_fd = None
|
|
1127
|
+
self._stdin_old_settings = None
|
|
1128
|
+
|
|
1129
|
+
return False
|
|
1130
|
+
|
|
1131
|
+
async def _cancel_task(self, task: asyncio.Task) -> None:
|
|
1132
|
+
"""Cancel a task and wait for it to finish."""
|
|
1133
|
+
if not task.done():
|
|
1134
|
+
task.cancel()
|
|
1135
|
+
with contextlib.suppress(asyncio.CancelledError):
|
|
1136
|
+
await task
|
|
1137
|
+
|
|
1138
|
+
def _trigger_abort(self) -> None:
|
|
1139
|
+
"""Signal the query to abort."""
|
|
1140
|
+
if self.query_context and hasattr(self.query_context, "abort_controller"):
|
|
1141
|
+
self.query_context.abort_controller.set()
|
|
1142
|
+
|
|
1143
|
+
async def _run_query_with_esc_interrupt(self, query_coro: Any) -> bool:
|
|
1144
|
+
"""Run a query with ESC key interrupt support.
|
|
1145
|
+
|
|
1146
|
+
Returns True if interrupted, False if completed normally.
|
|
1147
|
+
"""
|
|
1148
|
+
self._query_interrupted = False
|
|
1149
|
+
self._esc_listener_active = True
|
|
1150
|
+
|
|
1151
|
+
query_task = asyncio.create_task(query_coro)
|
|
1152
|
+
interrupt_task = asyncio.create_task(self._listen_for_interrupt_key())
|
|
1153
|
+
|
|
1154
|
+
try:
|
|
1155
|
+
done, _ = await asyncio.wait(
|
|
1156
|
+
{query_task, interrupt_task},
|
|
1157
|
+
return_when=asyncio.FIRST_COMPLETED
|
|
1158
|
+
)
|
|
1159
|
+
|
|
1160
|
+
# Check if interrupted
|
|
1161
|
+
if interrupt_task in done and interrupt_task.result():
|
|
1162
|
+
self._query_interrupted = True
|
|
1163
|
+
self._trigger_abort()
|
|
1164
|
+
await self._cancel_task(query_task)
|
|
1165
|
+
return True
|
|
1166
|
+
|
|
1167
|
+
# Query completed normally
|
|
1168
|
+
if query_task in done:
|
|
1169
|
+
await self._cancel_task(interrupt_task)
|
|
1170
|
+
with contextlib.suppress(Exception):
|
|
1171
|
+
query_task.result()
|
|
1172
|
+
return False
|
|
1173
|
+
|
|
1174
|
+
return False
|
|
1175
|
+
|
|
1176
|
+
finally:
|
|
1177
|
+
self._esc_listener_active = False
|
|
1178
|
+
await self._cancel_task(query_task)
|
|
1179
|
+
await self._cancel_task(interrupt_task)
|
|
1180
|
+
|
|
1181
|
+
def _run_async(self, coro: Any) -> Any:
|
|
1182
|
+
"""Run a coroutine on the persistent event loop."""
|
|
1183
|
+
if self._loop.is_closed():
|
|
1184
|
+
self._loop = asyncio.new_event_loop()
|
|
1185
|
+
asyncio.set_event_loop(self._loop)
|
|
1186
|
+
return self._loop.run_until_complete(coro)
|
|
1187
|
+
|
|
1188
|
+
def _run_async_with_esc_interrupt(self, coro: Any) -> bool:
|
|
1189
|
+
"""Run a coroutine with ESC key interrupt support.
|
|
1190
|
+
|
|
1191
|
+
Returns True if interrupted by ESC, False if completed normally.
|
|
1192
|
+
"""
|
|
1193
|
+
if self._loop.is_closed():
|
|
1194
|
+
self._loop = asyncio.new_event_loop()
|
|
1195
|
+
asyncio.set_event_loop(self._loop)
|
|
1196
|
+
return self._loop.run_until_complete(self._run_query_with_esc_interrupt(coro))
|
|
1197
|
+
|
|
1198
|
+
def run_async(self, coro: Any) -> Any:
|
|
1199
|
+
"""Public wrapper for running coroutines on the UI event loop."""
|
|
1200
|
+
return self._run_async(coro)
|
|
1201
|
+
|
|
1202
|
+
def handle_slash_command(self, user_input: str) -> bool:
|
|
1203
|
+
"""Handle slash commands. Returns True if the input was handled."""
|
|
1204
|
+
|
|
1205
|
+
if not user_input.startswith("/"):
|
|
1206
|
+
return False
|
|
1207
|
+
|
|
1208
|
+
parts = user_input[1:].strip().split()
|
|
1209
|
+
if not parts:
|
|
1210
|
+
self.console.print("[red]No command provided after '/'.[/red]")
|
|
1211
|
+
return True
|
|
1212
|
+
|
|
1213
|
+
command_name = parts[0].lower()
|
|
1214
|
+
trimmed_arg = " ".join(parts[1:]).strip()
|
|
1215
|
+
command = get_slash_command(command_name)
|
|
1216
|
+
if command is None:
|
|
1217
|
+
self.console.print(f"[red]Unknown command: {escape(command_name)}[/red]")
|
|
1218
|
+
return True
|
|
1219
|
+
|
|
1220
|
+
return command.handler(self, trimmed_arg)
|
|
1221
|
+
|
|
1222
|
+
def get_prompt_session(self) -> PromptSession:
|
|
1223
|
+
"""Create (or return) the prompt session with command completion."""
|
|
1224
|
+
if self._prompt_session:
|
|
1225
|
+
return self._prompt_session
|
|
1226
|
+
|
|
1227
|
+
class SlashCommandCompleter(Completer):
|
|
1228
|
+
"""Autocomplete for slash commands."""
|
|
1229
|
+
|
|
1230
|
+
def __init__(self, completions: List):
|
|
1231
|
+
self.completions = completions
|
|
1232
|
+
|
|
1233
|
+
def get_completions(self, document: Any, complete_event: Any) -> Iterable[Completion]:
|
|
1234
|
+
text = document.text_before_cursor
|
|
1235
|
+
if not text.startswith("/"):
|
|
1236
|
+
return
|
|
1237
|
+
query = text[1:]
|
|
1238
|
+
for name, cmd in self.completions:
|
|
1239
|
+
if name.startswith(query):
|
|
1240
|
+
yield Completion(
|
|
1241
|
+
name,
|
|
1242
|
+
start_position=-len(query),
|
|
1243
|
+
display=name,
|
|
1244
|
+
display_meta=cmd.description,
|
|
1245
|
+
)
|
|
1246
|
+
|
|
1247
|
+
self._prompt_session = PromptSession(
|
|
1248
|
+
completer=SlashCommandCompleter(self._command_completions),
|
|
1249
|
+
complete_style=CompleteStyle.COLUMN,
|
|
1250
|
+
complete_while_typing=True,
|
|
1251
|
+
history=InMemoryHistory(),
|
|
1252
|
+
)
|
|
1253
|
+
return self._prompt_session
|
|
1254
|
+
|
|
1255
|
+
def run(self) -> None:
|
|
1256
|
+
"""Run the Rich-based interface."""
|
|
1257
|
+
# Display welcome panel
|
|
1258
|
+
console.print()
|
|
1259
|
+
console.print(create_welcome_panel())
|
|
1260
|
+
console.print()
|
|
1261
|
+
|
|
1262
|
+
# Display status
|
|
1263
|
+
console.print(create_status_bar())
|
|
1264
|
+
console.print()
|
|
1265
|
+
console.print("[dim]Tip: type '/' then press Tab to see available commands. Press ESC to interrupt a running query.[/dim]\n")
|
|
1266
|
+
|
|
1267
|
+
session = self.get_prompt_session()
|
|
1268
|
+
logger.info(
|
|
1269
|
+
"[ui] Starting interactive loop",
|
|
1270
|
+
extra={"session_id": self.session_id, "log_file": str(self.log_file_path)},
|
|
1271
|
+
)
|
|
1272
|
+
|
|
1273
|
+
try:
|
|
1274
|
+
while not self._should_exit:
|
|
1275
|
+
try:
|
|
1276
|
+
# Get user input
|
|
1277
|
+
user_input = session.prompt("> ")
|
|
1278
|
+
|
|
1279
|
+
if not user_input.strip():
|
|
1280
|
+
continue
|
|
1281
|
+
|
|
1282
|
+
if user_input.strip() == "?":
|
|
1283
|
+
self._print_shortcuts()
|
|
1284
|
+
console.print()
|
|
1285
|
+
continue
|
|
1286
|
+
|
|
1287
|
+
# Handle slash commands locally
|
|
1288
|
+
if user_input.startswith("/"):
|
|
1289
|
+
logger.debug(
|
|
1290
|
+
"[ui] Received slash command",
|
|
1291
|
+
extra={"session_id": self.session_id, "command": user_input},
|
|
1292
|
+
)
|
|
1293
|
+
handled = self.handle_slash_command(user_input)
|
|
1294
|
+
if self._should_exit:
|
|
1295
|
+
break
|
|
1296
|
+
if handled:
|
|
1297
|
+
console.print() # spacing
|
|
1298
|
+
continue
|
|
1299
|
+
|
|
1300
|
+
# Process the query
|
|
1301
|
+
logger.info(
|
|
1302
|
+
"[ui] Processing interactive prompt",
|
|
1303
|
+
extra={
|
|
1304
|
+
"session_id": self.session_id,
|
|
1305
|
+
"prompt_length": len(user_input),
|
|
1306
|
+
"prompt_preview": user_input[:200],
|
|
1307
|
+
},
|
|
1308
|
+
)
|
|
1309
|
+
interrupted = self._run_async_with_esc_interrupt(self.process_query(user_input))
|
|
1310
|
+
|
|
1311
|
+
if interrupted:
|
|
1312
|
+
console.print("\n[red]■ Conversation interrupted[/red] · [dim]Tell the model what to do differently.[/dim]")
|
|
1313
|
+
logger.info(
|
|
1314
|
+
"[ui] Query interrupted by ESC key",
|
|
1315
|
+
extra={"session_id": self.session_id},
|
|
1316
|
+
)
|
|
1317
|
+
|
|
1318
|
+
console.print() # Add spacing between interactions
|
|
1319
|
+
|
|
1320
|
+
except KeyboardInterrupt:
|
|
1321
|
+
# Signal abort to cancel running queries
|
|
1322
|
+
if self.query_context:
|
|
1323
|
+
abort_controller = getattr(self.query_context, "abort_controller", None)
|
|
1324
|
+
if abort_controller is not None:
|
|
1325
|
+
abort_controller.set()
|
|
1326
|
+
console.print("\n[yellow]Goodbye![/yellow]")
|
|
1327
|
+
break
|
|
1328
|
+
except EOFError:
|
|
1329
|
+
console.print("\n[yellow]Goodbye![/yellow]")
|
|
1330
|
+
break
|
|
1331
|
+
except (OSError, ConnectionError, RuntimeError, ValueError, KeyError, TypeError) as e:
|
|
1332
|
+
console.print(f"[red]Error: {escape(str(e))}[/]")
|
|
1333
|
+
logger.warning(
|
|
1334
|
+
"[ui] Error in interactive loop: %s: %s",
|
|
1335
|
+
type(e).__name__, e,
|
|
1336
|
+
extra={"session_id": self.session_id},
|
|
1337
|
+
)
|
|
1338
|
+
if self.verbose:
|
|
1339
|
+
import traceback
|
|
1340
|
+
|
|
1341
|
+
console.print(traceback.format_exc())
|
|
1342
|
+
finally:
|
|
1343
|
+
# Cancel any running tasks before shutdown
|
|
1344
|
+
if self.query_context:
|
|
1345
|
+
abort_controller = getattr(self.query_context, "abort_controller", None)
|
|
1346
|
+
if abort_controller is not None:
|
|
1347
|
+
abort_controller.set()
|
|
1348
|
+
|
|
1349
|
+
# Suppress async generator cleanup errors during shutdown
|
|
1350
|
+
original_hook = sys.unraisablehook
|
|
1351
|
+
|
|
1352
|
+
def _quiet_unraisable_hook(unraisable: Any) -> None:
|
|
1353
|
+
# Suppress "asynchronous generator is already running" errors during shutdown
|
|
1354
|
+
if isinstance(unraisable.exc_value, RuntimeError):
|
|
1355
|
+
if "asynchronous generator is already running" in str(unraisable.exc_value):
|
|
1356
|
+
return
|
|
1357
|
+
# Call original hook for other errors
|
|
1358
|
+
original_hook(unraisable)
|
|
1359
|
+
|
|
1360
|
+
sys.unraisablehook = _quiet_unraisable_hook
|
|
1361
|
+
|
|
1362
|
+
try:
|
|
1363
|
+
try:
|
|
1364
|
+
self._run_async(shutdown_mcp_runtime())
|
|
1365
|
+
except (OSError, RuntimeError, ConnectionError, asyncio.CancelledError) as exc:
|
|
1366
|
+
# pragma: no cover - defensive shutdown
|
|
1367
|
+
logger.warning(
|
|
1368
|
+
"[ui] Failed to shut down MCP runtime cleanly: %s: %s",
|
|
1369
|
+
type(exc).__name__, exc,
|
|
1370
|
+
extra={"session_id": self.session_id},
|
|
1371
|
+
)
|
|
1372
|
+
finally:
|
|
1373
|
+
if not self._loop.is_closed():
|
|
1374
|
+
# Cancel all pending tasks
|
|
1375
|
+
pending = asyncio.all_tasks(self._loop)
|
|
1376
|
+
for task in pending:
|
|
1377
|
+
task.cancel()
|
|
1378
|
+
|
|
1379
|
+
# Allow cancelled tasks to clean up
|
|
1380
|
+
if pending:
|
|
1381
|
+
try:
|
|
1382
|
+
self._loop.run_until_complete(
|
|
1383
|
+
asyncio.gather(*pending, return_exceptions=True)
|
|
1384
|
+
)
|
|
1385
|
+
except (RuntimeError, asyncio.CancelledError):
|
|
1386
|
+
pass # Ignore errors during task cancellation
|
|
1387
|
+
|
|
1388
|
+
# Shutdown async generators (suppress expected errors)
|
|
1389
|
+
try:
|
|
1390
|
+
self._loop.run_until_complete(self._loop.shutdown_asyncgens())
|
|
1391
|
+
except (RuntimeError, asyncio.CancelledError):
|
|
1392
|
+
# Expected during forced shutdown - async generators may already be running
|
|
1393
|
+
pass
|
|
1394
|
+
|
|
1395
|
+
self._loop.close()
|
|
1396
|
+
asyncio.set_event_loop(None)
|
|
1397
|
+
sys.unraisablehook = original_hook
|
|
1398
|
+
|
|
1399
|
+
async def _run_manual_compact(self, custom_instructions: str) -> None:
|
|
1400
|
+
"""Manual compaction: clear bulky tool output and summarize conversation."""
|
|
1401
|
+
if len(self.conversation_messages) < 2:
|
|
1402
|
+
console.print("[yellow]Not enough conversation history to compact.[/yellow]")
|
|
1403
|
+
return
|
|
1404
|
+
|
|
1405
|
+
model_profile = get_profile_for_pointer("main")
|
|
1406
|
+
protocol = provider_protocol(model_profile.provider) if model_profile else "openai"
|
|
1407
|
+
|
|
1408
|
+
original_messages = list(self.conversation_messages)
|
|
1409
|
+
tokens_before = estimate_conversation_tokens(original_messages, protocol=protocol)
|
|
1410
|
+
|
|
1411
|
+
compaction = compact_messages(original_messages, protocol=protocol)
|
|
1412
|
+
messages_for_summary = compaction.messages
|
|
1413
|
+
|
|
1414
|
+
spinner = Spinner(console, "Summarizing conversation...", spinner="dots")
|
|
1415
|
+
summary_text = ""
|
|
1416
|
+
try:
|
|
1417
|
+
spinner.start()
|
|
1418
|
+
summary_text = await self._summarize_conversation(
|
|
1419
|
+
messages_for_summary, custom_instructions
|
|
1420
|
+
)
|
|
1421
|
+
except (OSError, RuntimeError, ConnectionError, ValueError, KeyError) as e:
|
|
1422
|
+
console.print(f"[red]Error during compaction: {escape(str(e))}[/red]")
|
|
1423
|
+
logger.warning(
|
|
1424
|
+
"[ui] Error during manual compaction: %s: %s",
|
|
1425
|
+
type(e).__name__, e,
|
|
1426
|
+
extra={"session_id": self.session_id},
|
|
1427
|
+
)
|
|
1428
|
+
return
|
|
1429
|
+
finally:
|
|
1430
|
+
spinner.stop()
|
|
1431
|
+
|
|
1432
|
+
if not summary_text:
|
|
1433
|
+
console.print("[red]Failed to summarize conversation for compaction.[/red]")
|
|
1434
|
+
return
|
|
1435
|
+
|
|
1436
|
+
if summary_text.strip() == "":
|
|
1437
|
+
console.print("[red]Summarization returned empty content; aborting compaction.[/red]")
|
|
1438
|
+
return
|
|
1439
|
+
|
|
1440
|
+
self._saved_conversation = original_messages
|
|
1441
|
+
summary_message = create_assistant_message(
|
|
1442
|
+
f"Conversation summary (generated by /compact):\n{summary_text}"
|
|
1443
|
+
)
|
|
1444
|
+
non_progress_messages = [
|
|
1445
|
+
m for m in messages_for_summary if getattr(m, "type", "") != "progress"
|
|
1446
|
+
]
|
|
1447
|
+
recent_tail = (
|
|
1448
|
+
non_progress_messages[-RECENT_MESSAGES_AFTER_COMPACT:]
|
|
1449
|
+
if RECENT_MESSAGES_AFTER_COMPACT > 0
|
|
1450
|
+
else []
|
|
1451
|
+
)
|
|
1452
|
+
new_conversation = [
|
|
1453
|
+
create_user_message(
|
|
1454
|
+
"Conversation compacted. Summary plus recent turns are kept; older tool output may "
|
|
1455
|
+
"be cleared."
|
|
1456
|
+
),
|
|
1457
|
+
summary_message,
|
|
1458
|
+
*recent_tail,
|
|
1459
|
+
]
|
|
1460
|
+
self.conversation_messages = new_conversation
|
|
1461
|
+
tokens_after = estimate_conversation_tokens(new_conversation, protocol=protocol)
|
|
1462
|
+
tokens_saved = max(0, tokens_before - tokens_after)
|
|
1463
|
+
console.print(
|
|
1464
|
+
f"[green]✓ Conversation compacted[/green] "
|
|
1465
|
+
f"(saved ~{tokens_saved} tokens). Use /resume to restore full history."
|
|
1466
|
+
)
|
|
1467
|
+
|
|
1468
|
+
async def _summarize_conversation(
|
|
1469
|
+
self,
|
|
1470
|
+
messages: List[ConversationMessage],
|
|
1471
|
+
custom_instructions: str,
|
|
1472
|
+
) -> str:
|
|
1473
|
+
"""Summarize the given conversation using the configured model."""
|
|
1474
|
+
# Keep transcript bounded to recent turns to avoid blowing context.
|
|
1475
|
+
recent_messages = messages[-40:]
|
|
1476
|
+
transcript = self._render_transcript(recent_messages)
|
|
1477
|
+
if not transcript.strip():
|
|
1478
|
+
return ""
|
|
1479
|
+
|
|
1480
|
+
instructions = (
|
|
1481
|
+
"You are a helpful assistant summarizing the prior conversation. "
|
|
1482
|
+
"Produce a concise bullet-list summary covering key decisions, important context, "
|
|
1483
|
+
"commands run, files touched, and pending TODOs. Include blockers or open questions. "
|
|
1484
|
+
"Keep it brief."
|
|
1485
|
+
)
|
|
1486
|
+
if custom_instructions.strip():
|
|
1487
|
+
instructions += f"\nCustom instructions: {custom_instructions.strip()}"
|
|
1488
|
+
|
|
1489
|
+
user_content = (
|
|
1490
|
+
f"Summarize the following conversation between a user and an assistant:\n\n{transcript}"
|
|
1491
|
+
)
|
|
1492
|
+
|
|
1493
|
+
assistant_response = await query_llm(
|
|
1494
|
+
messages=[{"role": "user", "content": user_content}], # type: ignore[list-item]
|
|
1495
|
+
system_prompt=instructions,
|
|
1496
|
+
tools=[],
|
|
1497
|
+
max_thinking_tokens=0,
|
|
1498
|
+
model="main",
|
|
1499
|
+
)
|
|
1500
|
+
return self._extract_assistant_text(assistant_response)
|
|
1501
|
+
|
|
1502
|
+
def _print_shortcuts(self) -> None:
|
|
1503
|
+
"""Show common keyboard shortcuts and prefixes."""
|
|
1504
|
+
pairs = [
|
|
1505
|
+
("? for shortcuts", "! for bash mode"),
|
|
1506
|
+
("/ for commands", "shift + tab to auto-accept edits"),
|
|
1507
|
+
# "@ for file paths", "ctrl + o for verbose output"),
|
|
1508
|
+
# "# to memorize", "ctrl + v to paste images"),
|
|
1509
|
+
# "& for background", "ctrl + t to show todos"),
|
|
1510
|
+
# "double tap esc to clear input", "tab to toggle thinking"),
|
|
1511
|
+
# "ctrl + _ to undo", "ctrl + z to suspend"),
|
|
1512
|
+
# "shift + enter for newline", ""),
|
|
1513
|
+
]
|
|
1514
|
+
console.print("[dim]Shortcuts[/dim]")
|
|
1515
|
+
for left, right in pairs:
|
|
1516
|
+
left_text = f" {left}".ljust(32)
|
|
1517
|
+
right_text = f"{right}" if right else ""
|
|
1518
|
+
console.print(f"{left_text}{right_text}")
|
|
1519
|
+
|
|
1520
|
+
|
|
1521
|
+
def check_onboarding_rich() -> bool:
|
|
1522
|
+
"""Check if onboarding is complete and run if needed."""
|
|
1523
|
+
config = get_global_config()
|
|
1524
|
+
|
|
1525
|
+
if config.has_completed_onboarding:
|
|
1526
|
+
return True
|
|
1527
|
+
|
|
1528
|
+
# Use simple console onboarding
|
|
1529
|
+
from ripperdoc.cli.cli import check_onboarding
|
|
1530
|
+
|
|
1531
|
+
return check_onboarding()
|
|
1532
|
+
|
|
1533
|
+
|
|
1534
|
+
def main_rich(
|
|
1535
|
+
safe_mode: bool = False,
|
|
1536
|
+
verbose: bool = False,
|
|
1537
|
+
session_id: Optional[str] = None,
|
|
1538
|
+
log_file_path: Optional[Path] = None,
|
|
1539
|
+
) -> None:
|
|
1540
|
+
"""Main entry point for Rich interface."""
|
|
1541
|
+
|
|
1542
|
+
# Ensure onboarding is complete
|
|
1543
|
+
if not check_onboarding_rich():
|
|
1544
|
+
sys.exit(1)
|
|
1545
|
+
|
|
1546
|
+
# Run the Rich UI
|
|
1547
|
+
ui = RichUI(
|
|
1548
|
+
safe_mode=safe_mode,
|
|
1549
|
+
verbose=verbose,
|
|
1550
|
+
session_id=session_id,
|
|
1551
|
+
log_file_path=log_file_path,
|
|
1552
|
+
)
|
|
1553
|
+
ui.run()
|
|
1554
|
+
|
|
1555
|
+
|
|
1556
|
+
if __name__ == "__main__":
|
|
1557
|
+
main_rich()
|