ripperdoc 0.2.9__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +379 -51
  3. ripperdoc/cli/commands/__init__.py +6 -0
  4. ripperdoc/cli/commands/agents_cmd.py +128 -5
  5. ripperdoc/cli/commands/clear_cmd.py +8 -0
  6. ripperdoc/cli/commands/doctor_cmd.py +29 -0
  7. ripperdoc/cli/commands/exit_cmd.py +1 -0
  8. ripperdoc/cli/commands/memory_cmd.py +2 -1
  9. ripperdoc/cli/commands/models_cmd.py +63 -7
  10. ripperdoc/cli/commands/resume_cmd.py +5 -0
  11. ripperdoc/cli/commands/skills_cmd.py +103 -0
  12. ripperdoc/cli/commands/stats_cmd.py +244 -0
  13. ripperdoc/cli/commands/status_cmd.py +10 -0
  14. ripperdoc/cli/commands/tasks_cmd.py +6 -3
  15. ripperdoc/cli/commands/themes_cmd.py +139 -0
  16. ripperdoc/cli/ui/file_mention_completer.py +63 -13
  17. ripperdoc/cli/ui/helpers.py +6 -3
  18. ripperdoc/cli/ui/interrupt_handler.py +34 -0
  19. ripperdoc/cli/ui/panels.py +14 -8
  20. ripperdoc/cli/ui/rich_ui.py +737 -47
  21. ripperdoc/cli/ui/spinner.py +93 -18
  22. ripperdoc/cli/ui/thinking_spinner.py +1 -2
  23. ripperdoc/cli/ui/tool_renderers.py +10 -9
  24. ripperdoc/cli/ui/wizard.py +24 -19
  25. ripperdoc/core/agents.py +14 -3
  26. ripperdoc/core/config.py +238 -6
  27. ripperdoc/core/default_tools.py +91 -10
  28. ripperdoc/core/hooks/events.py +4 -0
  29. ripperdoc/core/hooks/llm_callback.py +58 -0
  30. ripperdoc/core/hooks/manager.py +6 -0
  31. ripperdoc/core/permissions.py +160 -9
  32. ripperdoc/core/providers/openai.py +84 -28
  33. ripperdoc/core/query.py +489 -87
  34. ripperdoc/core/query_utils.py +17 -14
  35. ripperdoc/core/skills.py +1 -0
  36. ripperdoc/core/theme.py +298 -0
  37. ripperdoc/core/tool.py +15 -5
  38. ripperdoc/protocol/__init__.py +14 -0
  39. ripperdoc/protocol/models.py +300 -0
  40. ripperdoc/protocol/stdio.py +1453 -0
  41. ripperdoc/tools/background_shell.py +354 -139
  42. ripperdoc/tools/bash_tool.py +117 -22
  43. ripperdoc/tools/file_edit_tool.py +228 -50
  44. ripperdoc/tools/file_read_tool.py +154 -3
  45. ripperdoc/tools/file_write_tool.py +53 -11
  46. ripperdoc/tools/grep_tool.py +98 -8
  47. ripperdoc/tools/lsp_tool.py +609 -0
  48. ripperdoc/tools/multi_edit_tool.py +26 -3
  49. ripperdoc/tools/skill_tool.py +52 -1
  50. ripperdoc/tools/task_tool.py +539 -65
  51. ripperdoc/utils/conversation_compaction.py +1 -1
  52. ripperdoc/utils/file_watch.py +216 -7
  53. ripperdoc/utils/image_utils.py +125 -0
  54. ripperdoc/utils/log.py +30 -3
  55. ripperdoc/utils/lsp.py +812 -0
  56. ripperdoc/utils/mcp.py +80 -18
  57. ripperdoc/utils/message_formatting.py +7 -4
  58. ripperdoc/utils/messages.py +198 -33
  59. ripperdoc/utils/pending_messages.py +50 -0
  60. ripperdoc/utils/permissions/shell_command_validation.py +3 -3
  61. ripperdoc/utils/permissions/tool_permission_utils.py +180 -15
  62. ripperdoc/utils/platform.py +198 -0
  63. ripperdoc/utils/session_heatmap.py +242 -0
  64. ripperdoc/utils/session_history.py +2 -2
  65. ripperdoc/utils/session_stats.py +294 -0
  66. ripperdoc/utils/shell_utils.py +8 -5
  67. ripperdoc/utils/todo.py +0 -6
  68. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/METADATA +55 -17
  69. ripperdoc-0.3.0.dist-info/RECORD +136 -0
  70. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/WHEEL +1 -1
  71. ripperdoc/sdk/__init__.py +0 -9
  72. ripperdoc/sdk/client.py +0 -333
  73. ripperdoc-0.2.9.dist-info/RECORD +0 -123
  74. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/entry_points.txt +0 -0
  75. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/licenses/LICENSE +0 -0
  76. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/top_level.txt +0 -0
@@ -4,8 +4,10 @@ This module provides a clean, minimal terminal UI using Rich for the Ripperdoc a
4
4
  """
5
5
 
6
6
  import asyncio
7
+ import difflib
7
8
  import json
8
9
  import sys
10
+ import time
9
11
  import uuid
10
12
  from typing import List, Dict, Any, Optional, Union, Iterable
11
13
  from pathlib import Path
@@ -15,16 +17,20 @@ from rich.markup import escape
15
17
 
16
18
  from prompt_toolkit import PromptSession
17
19
  from prompt_toolkit.completion import Completer, Completion, merge_completers
20
+ from prompt_toolkit.formatted_text import FormattedText
18
21
  from prompt_toolkit.history import InMemoryHistory
19
22
  from prompt_toolkit.key_binding import KeyBindings
20
23
  from prompt_toolkit.shortcuts.prompt import CompleteStyle
24
+ from prompt_toolkit.styles import Style
21
25
 
22
- from ripperdoc.core.config import get_global_config, provider_protocol
26
+ from ripperdoc.core.config import get_global_config, provider_protocol, model_supports_vision
23
27
  from ripperdoc.core.default_tools import get_default_tools
28
+ from ripperdoc.core.theme import get_theme_manager
24
29
  from ripperdoc.core.query import query, QueryContext
25
30
  from ripperdoc.core.system_prompt import build_system_prompt
26
31
  from ripperdoc.core.skills import build_skill_summary, load_all_skills
27
32
  from ripperdoc.core.hooks.manager import hook_manager
33
+ from ripperdoc.core.hooks.llm_callback import build_hook_llm_callback
28
34
  from ripperdoc.cli.commands import (
29
35
  get_slash_command,
30
36
  get_custom_command,
@@ -46,8 +52,6 @@ from ripperdoc.utils.conversation_compaction import (
46
52
  compact_conversation,
47
53
  CompactionResult,
48
54
  CompactionError,
49
- extract_tool_ids_from_message,
50
- get_complete_tool_pairs_tail,
51
55
  )
52
56
  from ripperdoc.utils.message_compaction import (
53
57
  estimate_conversation_tokens,
@@ -64,6 +68,8 @@ from ripperdoc.utils.mcp import (
64
68
  load_mcp_servers_async,
65
69
  shutdown_mcp_runtime,
66
70
  )
71
+ from ripperdoc.utils.lsp import shutdown_lsp_manager
72
+ from ripperdoc.tools.background_shell import shutdown_background_shell
67
73
  from ripperdoc.tools.mcp_tools import load_dynamic_mcp_tools_async, merge_tools_with_dynamic
68
74
  from ripperdoc.utils.session_history import SessionHistory
69
75
  from ripperdoc.utils.memory import build_memory_instructions
@@ -77,6 +83,7 @@ from ripperdoc.utils.log import enable_session_file_logging, get_logger
77
83
  from ripperdoc.utils.path_ignore import build_ignore_filter
78
84
  from ripperdoc.cli.ui.file_mention_completer import FileMentionCompleter
79
85
  from ripperdoc.utils.message_formatting import stringify_message_content
86
+ from ripperdoc.utils.image_utils import read_image_as_base64, is_image_file
80
87
 
81
88
 
82
89
  # Type alias for conversation messages
@@ -86,9 +93,199 @@ console = Console()
86
93
  logger = get_logger()
87
94
 
88
95
 
89
- # Legacy aliases for backward compatibility with tests
90
- _extract_tool_ids_from_message = extract_tool_ids_from_message
91
- _get_complete_tool_pairs_tail = get_complete_tool_pairs_tail
96
+ def _suggest_slash_commands(name: str, project_path: Optional[Path]) -> List[str]:
97
+ """Return close matching slash commands for a mistyped name."""
98
+ if not name:
99
+ return []
100
+ seen = set()
101
+ candidates: List[str] = []
102
+ for command_name, _cmd in slash_command_completions(project_path):
103
+ if command_name not in seen:
104
+ candidates.append(command_name)
105
+ seen.add(command_name)
106
+ return difflib.get_close_matches(name, candidates, n=3, cutoff=0.6)
107
+
108
+
109
+ def _extract_image_paths(text: str) -> List[str]:
110
+ """Extract @-referenced image paths from text.
111
+
112
+ Handles cases like:
113
+ - "@image.png describe this" (space after)
114
+ - "@image.png描述这个" (no space after, Chinese text)
115
+ - "@image.png.whatIsThis" (no space, ASCII text)
116
+
117
+ Args:
118
+ text: User input text
119
+
120
+ Returns:
121
+ List of file paths (without the @ prefix)
122
+ """
123
+ import re
124
+ from pathlib import Path
125
+
126
+ result = []
127
+
128
+ # Find all @ followed by content until space or end
129
+ for match in re.finditer(r"@(\S+)", text):
130
+ candidate = match.group(1)
131
+ if not candidate:
132
+ continue
133
+
134
+ # Try to find the actual file path by progressively trimming
135
+ # First, check if the full candidate is a file
136
+ if Path(candidate).exists():
137
+ result.append(candidate)
138
+ continue
139
+
140
+ # Not a file, try to find where the file path ends
141
+ # Common file extensions
142
+ extensions = [
143
+ ".png",
144
+ ".jpg",
145
+ ".jpeg",
146
+ ".gif",
147
+ ".webp",
148
+ ".bmp",
149
+ ".svg",
150
+ ".py",
151
+ ".js",
152
+ ".ts",
153
+ ".tsx",
154
+ ".jsx",
155
+ ".vue",
156
+ ".go",
157
+ ".rs",
158
+ ".java",
159
+ ".c",
160
+ ".cpp",
161
+ ".h",
162
+ ".hpp",
163
+ ".cs",
164
+ ".php",
165
+ ".rb",
166
+ ".sh",
167
+ ".md",
168
+ ".txt",
169
+ ".json",
170
+ ".yaml",
171
+ ".yml",
172
+ ".xml",
173
+ ".html",
174
+ ".css",
175
+ ".scss",
176
+ ".sql",
177
+ ".db",
178
+ ]
179
+
180
+ found_path = None
181
+ for ext in extensions:
182
+ # Look for this extension in the candidate
183
+ if ext.lower() in candidate.lower():
184
+ # Found extension, extract path up to and including it
185
+ ext_pos = candidate.lower().find(ext.lower())
186
+ potential_path = candidate[: ext_pos + len(ext)]
187
+ if Path(potential_path).exists():
188
+ found_path = potential_path
189
+ break
190
+
191
+ # Also try to find the LAST occurrence of this extension
192
+ # For cases like "file.txt.extraText"
193
+ last_ext_pos = candidate.lower().rfind(ext.lower())
194
+ if last_ext_pos > ext_pos:
195
+ potential_path = candidate[: last_ext_pos + len(ext)]
196
+ if Path(potential_path).exists():
197
+ found_path = potential_path
198
+ break
199
+
200
+ if found_path:
201
+ result.append(found_path)
202
+ else:
203
+ # No file found, keep the original candidate
204
+ # The processing function will handle non-existent files
205
+ result.append(candidate)
206
+
207
+ return result
208
+
209
+
210
+ def _process_images_in_input(
211
+ user_input: str,
212
+ project_path: Path,
213
+ model_pointer: str,
214
+ ) -> tuple[str, List[Dict[str, Any]]]:
215
+ """Process @ references for images in user input.
216
+
217
+ Only image files are processed and converted to image blocks.
218
+ Text files and non-existent files are left as-is in the text.
219
+
220
+ Args:
221
+ user_input: Raw user input text
222
+ project_path: Project root path
223
+ model_pointer: Model pointer to check vision support
224
+
225
+ Returns:
226
+ (processed_text, image_blocks) tuple
227
+ """
228
+ import re
229
+ from ripperdoc.cli.ui.helpers import get_profile_for_pointer
230
+
231
+ image_blocks: List[Dict[str, Any]] = []
232
+ processed_text = user_input
233
+
234
+ # Check if current model supports vision
235
+ profile = get_profile_for_pointer(model_pointer)
236
+ supports_vision = profile and model_supports_vision(profile)
237
+
238
+ if not supports_vision:
239
+ # Model doesn't support vision, leave all @ references as-is
240
+ return processed_text, image_blocks
241
+
242
+ referenced_paths = _extract_image_paths(user_input)
243
+
244
+ for ref_path in referenced_paths:
245
+ # Try relative path first, then absolute path
246
+ path_candidate = project_path / ref_path
247
+ if not path_candidate.exists():
248
+ path_candidate = Path(ref_path)
249
+
250
+ if not path_candidate.exists():
251
+ logger.debug(
252
+ "[ui] @ referenced file not found",
253
+ extra={"path": ref_path},
254
+ )
255
+ # Keep the reference in text (LLM should know file doesn't exist)
256
+ continue
257
+
258
+ # Only process image files
259
+ if not is_image_file(path_candidate):
260
+ # Not an image file, keep @ reference in text
261
+ # The LLM can decide to read it with the Read tool if needed
262
+ continue
263
+
264
+ # Process image file
265
+ result = read_image_as_base64(path_candidate)
266
+ if result:
267
+ base64_data, mime_type = result
268
+ image_blocks.append(
269
+ {
270
+ "type": "image",
271
+ "source_type": "base64",
272
+ "media_type": mime_type,
273
+ "image_data": base64_data,
274
+ }
275
+ )
276
+ # Remove image reference from text (content included separately as image block)
277
+ processed_text = processed_text.replace(f"@{ref_path}", "")
278
+ else:
279
+ # Failed to read image, keep reference in text
280
+ logger.warning(
281
+ "[ui] Failed to read @ referenced image",
282
+ extra={"path": ref_path},
283
+ )
284
+
285
+ # Clean up extra whitespace
286
+ processed_text = re.sub(r"\s+", " ", processed_text).strip()
287
+
288
+ return processed_text, image_blocks
92
289
 
93
290
 
94
291
  class RichUI:
@@ -101,17 +298,29 @@ class RichUI:
101
298
  show_full_thinking: Optional[bool] = None,
102
299
  session_id: Optional[str] = None,
103
300
  log_file_path: Optional[Path] = None,
301
+ allowed_tools: Optional[List[str]] = None,
302
+ custom_system_prompt: Optional[str] = None,
303
+ append_system_prompt: Optional[str] = None,
304
+ model: Optional[str] = None,
305
+ resume_messages: Optional[List[Any]] = None,
306
+ initial_query: Optional[str] = None,
104
307
  ):
105
308
  self._loop = asyncio.new_event_loop()
106
309
  asyncio.set_event_loop(self._loop)
107
310
  self.console = console
108
311
  self.yolo_mode = yolo_mode
109
312
  self.verbose = verbose
313
+ self.allowed_tools = allowed_tools
314
+ self.custom_system_prompt = custom_system_prompt
315
+ self.append_system_prompt = append_system_prompt
316
+ self.model = model or "main"
110
317
  self.conversation_messages: List[ConversationMessage] = []
111
318
  self._saved_conversation: Optional[List[ConversationMessage]] = None
112
319
  self.query_context: Optional[QueryContext] = None
113
320
  self._current_tool: Optional[str] = None
114
321
  self._should_exit: bool = False
322
+ self._last_ctrl_c_time: float = 0.0 # Track Ctrl+C timing for double-press exit
323
+ self._initial_query = initial_query # Query from piped stdin to auto-send on startup
115
324
  self.command_list = list_slash_commands()
116
325
  self._custom_command_list = list_custom_commands()
117
326
  self._prompt_session: Optional[PromptSession] = None
@@ -134,9 +343,35 @@ class RichUI:
134
343
  },
135
344
  )
136
345
  self._session_history = SessionHistory(self.project_path, self.session_id)
137
- self._permission_checker = (
138
- None if yolo_mode else make_permission_checker(self.project_path, yolo_mode=False)
139
- )
346
+ self._session_hook_contexts: List[str] = []
347
+ self._session_start_time = time.time()
348
+ self._session_end_sent = False
349
+ self._exit_reason: Optional[str] = None
350
+ self._using_tty_input = False # Track if we're using /dev/tty for input
351
+ self._thinking_mode_enabled = False # Toggle for extended thinking mode
352
+ hook_manager.set_transcript_path(str(self._session_history.path))
353
+
354
+ # Create permission checker with Rich console and PromptSession support
355
+ if not yolo_mode:
356
+ # Create a dedicated PromptSession for permission dialogs
357
+ # This provides better interrupt handling than console.input()
358
+ from prompt_toolkit import PromptSession
359
+
360
+ # Disable CPR (Cursor Position Request) to avoid warnings in terminals
361
+ # that don't support it (like some remote/CI terminals)
362
+ import os
363
+ os.environ['PROMPT_TOOLKIT_NO_CPR'] = '1'
364
+
365
+ permission_session = PromptSession()
366
+
367
+ self._permission_checker = make_permission_checker(
368
+ self.project_path,
369
+ yolo_mode=False,
370
+ console=console, # Pass console for Rich Panel rendering
371
+ prompt_session=permission_session, # Use PromptSession for input
372
+ )
373
+ else:
374
+ self._permission_checker = None
140
375
  # Build ignore filter for file completion
141
376
  from ripperdoc.utils.path_ignore import get_project_ignore_patterns
142
377
 
@@ -155,10 +390,14 @@ class RichUI:
155
390
  else:
156
391
  self.show_full_thinking = show_full_thinking
157
392
 
393
+ # Initialize theme from config
394
+ theme_manager = get_theme_manager()
395
+ theme_name = getattr(config, "theme", None) or "dark"
396
+ if not theme_manager.set_theme(theme_name):
397
+ theme_manager.set_theme("dark") # Fallback to default
398
+
158
399
  # Initialize component handlers
159
- self._message_display = MessageDisplay(
160
- self.console, self.verbose, self.show_full_thinking
161
- )
400
+ self._message_display = MessageDisplay(self.console, self.verbose, self.show_full_thinking)
162
401
  self._interrupt_handler = InterruptHandler()
163
402
  self._interrupt_handler.set_abort_callback(self._trigger_abort)
164
403
 
@@ -176,6 +415,7 @@ class RichUI:
176
415
  # Initialize hook manager with project context
177
416
  hook_manager.set_project_dir(self.project_path)
178
417
  hook_manager.set_session_id(self.session_id)
418
+ hook_manager.set_llm_callback(build_hook_llm_callback())
179
419
  logger.debug(
180
420
  "[ui] Initialized hook manager",
181
421
  extra={
@@ -183,6 +423,18 @@ class RichUI:
183
423
  "project_path": str(self.project_path),
184
424
  },
185
425
  )
426
+ self._run_session_start("startup")
427
+
428
+ # Handle resume_messages if provided (for --continue)
429
+ if resume_messages:
430
+ self.conversation_messages = list(resume_messages)
431
+ logger.info(
432
+ "[ui] Resumed conversation with messages",
433
+ extra={
434
+ "session_id": self.session_id,
435
+ "message_count": len(resume_messages),
436
+ },
437
+ )
186
438
 
187
439
  # ─────────────────────────────────────────────────────────────────────────────
188
440
  # Properties for backward compatibility with interrupt handler
@@ -200,6 +452,58 @@ class RichUI:
200
452
  def _esc_listener_paused(self, value: bool) -> None:
201
453
  self._interrupt_handler._esc_listener_paused = value
202
454
 
455
+ # ─────────────────────────────────────────────────────────────────────────────
456
+ # Thinking mode toggle
457
+ # ─────────────────────────────────────────────────────────────────────────────
458
+
459
+ def _supports_thinking_mode(self) -> bool:
460
+ """Check if the current model supports extended thinking mode."""
461
+ from ripperdoc.core.query import infer_thinking_mode
462
+ from ripperdoc.core.config import ProviderType
463
+
464
+ model_profile = get_profile_for_pointer("main")
465
+ if not model_profile:
466
+ return False
467
+ # Anthropic natively supports thinking mode
468
+ if model_profile.provider == ProviderType.ANTHROPIC:
469
+ return True
470
+ # For other providers, check if we can infer a thinking mode
471
+ return infer_thinking_mode(model_profile) is not None
472
+
473
+ def _toggle_thinking_mode(self) -> None:
474
+ """Toggle thinking mode on/off. Status is shown in rprompt."""
475
+ if not self._supports_thinking_mode():
476
+ self.console.print("[yellow]Current model does not support thinking mode.[/yellow]")
477
+ return
478
+ self._thinking_mode_enabled = not self._thinking_mode_enabled
479
+
480
+ def _get_thinking_tokens(self) -> int:
481
+ """Get the thinking tokens budget based on current mode."""
482
+ if not self._thinking_mode_enabled:
483
+ return 0
484
+ config = get_global_config()
485
+ return config.default_thinking_tokens
486
+
487
+ def _get_prompt(self) -> str:
488
+ """Generate the input prompt."""
489
+ return "> "
490
+
491
+ def _get_rprompt(self) -> Union[str, FormattedText]:
492
+ """Generate the right prompt with thinking mode status."""
493
+ if not self._supports_thinking_mode():
494
+ return ""
495
+ if self._thinking_mode_enabled:
496
+ return FormattedText(
497
+ [
498
+ ("class:rprompt-on", "⚡ Thinking"),
499
+ ]
500
+ )
501
+ return FormattedText(
502
+ [
503
+ ("class:rprompt-off", "Thinking: off"),
504
+ ]
505
+ )
506
+
203
507
  def _context_usage_lines(
204
508
  self, breakdown: Any, model_label: str, auto_compact_enabled: bool
205
509
  ) -> List[str]:
@@ -218,6 +522,69 @@ class RichUI:
218
522
  },
219
523
  )
220
524
  self._session_history = SessionHistory(self.project_path, session_id)
525
+ hook_manager.set_session_id(self.session_id)
526
+ hook_manager.set_transcript_path(str(self._session_history.path))
527
+
528
+ def _collect_hook_contexts(self, hook_result: Any) -> List[str]:
529
+ contexts: List[str] = []
530
+ system_message = getattr(hook_result, "system_message", None)
531
+ additional_context = getattr(hook_result, "additional_context", None)
532
+ if system_message:
533
+ contexts.append(str(system_message))
534
+ if additional_context:
535
+ contexts.append(str(additional_context))
536
+ return contexts
537
+
538
+ def _set_session_hook_contexts(self, hook_result: Any) -> None:
539
+ self._session_hook_contexts = self._collect_hook_contexts(hook_result)
540
+ self._session_start_time = time.time()
541
+ self._session_end_sent = False
542
+
543
+ def _run_session_start(self, source: str) -> None:
544
+ try:
545
+ result = self._run_async(hook_manager.run_session_start_async(source))
546
+ except (OSError, RuntimeError, ConnectionError, ValueError, TypeError) as exc:
547
+ logger.warning(
548
+ "[ui] SessionStart hook failed: %s: %s",
549
+ type(exc).__name__,
550
+ exc,
551
+ extra={"session_id": self.session_id, "source": source},
552
+ )
553
+ return
554
+ self._set_session_hook_contexts(result)
555
+
556
+ async def _run_session_start_async(self, source: str) -> None:
557
+ try:
558
+ result = await hook_manager.run_session_start_async(source)
559
+ except (OSError, RuntimeError, ConnectionError, ValueError, TypeError) as exc:
560
+ logger.warning(
561
+ "[ui] SessionStart hook failed: %s: %s",
562
+ type(exc).__name__,
563
+ exc,
564
+ extra={"session_id": self.session_id, "source": source},
565
+ )
566
+ return
567
+ self._set_session_hook_contexts(result)
568
+
569
+ def _run_session_end(self, reason: str) -> None:
570
+ if self._session_end_sent:
571
+ return
572
+ duration = max(time.time() - self._session_start_time, 0.0)
573
+ message_count = len(self.conversation_messages)
574
+ try:
575
+ self._run_async(
576
+ hook_manager.run_session_end_async(
577
+ reason, duration_seconds=duration, message_count=message_count
578
+ )
579
+ )
580
+ except (OSError, RuntimeError, ConnectionError, ValueError, TypeError) as exc:
581
+ logger.warning(
582
+ "[ui] SessionEnd hook failed: %s: %s",
583
+ type(exc).__name__,
584
+ exc,
585
+ extra={"session_id": self.session_id, "reason": reason},
586
+ )
587
+ self._session_end_sent = True
221
588
 
222
589
  def _log_message(self, message: Any) -> None:
223
590
  """Best-effort persistence of a message to the session log."""
@@ -278,8 +645,8 @@ class RichUI:
278
645
  self.display_message("Ripperdoc", text)
279
646
 
280
647
  def get_default_tools(self) -> list:
281
- """Get the default set of tools."""
282
- return get_default_tools()
648
+ """Get the default set of tools, filtered by allowed_tools if specified."""
649
+ return get_default_tools(allowed_tools=self.allowed_tools)
283
650
 
284
651
  def display_message(
285
652
  self,
@@ -334,7 +701,9 @@ class RichUI:
334
701
  def _print_reasoning(self, reasoning: Any) -> None:
335
702
  self._message_display.print_reasoning(reasoning)
336
703
 
337
- async def _prepare_query_context(self, user_input: str) -> tuple[str, Dict[str, str]]:
704
+ async def _prepare_query_context(
705
+ self, user_input: str, hook_instructions: Optional[List[str]] = None
706
+ ) -> tuple[str, Dict[str, str]]:
338
707
  """Load MCP servers, skills, and build system prompt.
339
708
 
340
709
  Returns:
@@ -380,14 +749,32 @@ class RichUI:
380
749
  memory_instructions = build_memory_instructions()
381
750
  if memory_instructions:
382
751
  additional_instructions.append(memory_instructions)
383
-
384
- system_prompt = build_system_prompt(
385
- self.query_context.tools if self.query_context else [],
386
- user_input,
387
- context,
388
- additional_instructions=additional_instructions or None,
389
- mcp_instructions=mcp_instructions,
390
- )
752
+ if self._session_hook_contexts:
753
+ additional_instructions.extend(self._session_hook_contexts)
754
+ if hook_instructions:
755
+ additional_instructions.extend([text for text in hook_instructions if text])
756
+
757
+ # Build system prompt based on options:
758
+ # - custom_system_prompt: replaces the default entirely
759
+ # - append_system_prompt: appends to the default system prompt
760
+ if self.custom_system_prompt:
761
+ # Complete replacement
762
+ system_prompt = self.custom_system_prompt
763
+ # Still append if both are provided
764
+ if self.append_system_prompt:
765
+ system_prompt = f"{system_prompt}\n\n{self.append_system_prompt}"
766
+ else:
767
+ # Build default with optional append
768
+ all_instructions = list(additional_instructions) if additional_instructions else []
769
+ if self.append_system_prompt:
770
+ all_instructions.append(self.append_system_prompt)
771
+ system_prompt = build_system_prompt(
772
+ self.query_context.tools if self.query_context else [],
773
+ user_input,
774
+ context,
775
+ additional_instructions=all_instructions or None,
776
+ mcp_instructions=mcp_instructions,
777
+ )
391
778
 
392
779
  return system_prompt, context
393
780
 
@@ -452,10 +839,33 @@ class RichUI:
452
839
  if usage_status.should_auto_compact:
453
840
  original_messages = list(messages)
454
841
  spinner = Spinner(self.console, "Summarizing conversation...", spinner="dots")
842
+ hook_instructions = ""
843
+ try:
844
+ hook_result = await hook_manager.run_pre_compact_async(
845
+ trigger="auto", custom_instructions=""
846
+ )
847
+ if hook_result.should_block or not hook_result.should_continue:
848
+ reason = (
849
+ hook_result.block_reason
850
+ or hook_result.stop_reason
851
+ or "Compaction blocked by hook."
852
+ )
853
+ console.print(f"[yellow]{escape(str(reason))}[/yellow]")
854
+ return messages
855
+ hook_contexts = self._collect_hook_contexts(hook_result)
856
+ if hook_contexts:
857
+ hook_instructions = "\n\n".join(hook_contexts)
858
+ except (OSError, RuntimeError, ConnectionError, ValueError, TypeError) as exc:
859
+ logger.warning(
860
+ "[ui] PreCompact hook failed: %s: %s",
861
+ type(exc).__name__,
862
+ exc,
863
+ extra={"session_id": self.session_id},
864
+ )
455
865
  try:
456
866
  spinner.start()
457
867
  result = await compact_conversation(
458
- messages, custom_instructions="", protocol=protocol
868
+ messages, custom_instructions=hook_instructions, protocol=protocol
459
869
  )
460
870
  finally:
461
871
  spinner.stop()
@@ -476,6 +886,7 @@ class RichUI:
476
886
  "tokens_saved": result.tokens_saved,
477
887
  },
478
888
  )
889
+ await self._run_session_start_async("compact")
479
890
  return result.messages
480
891
  elif isinstance(result, CompactionError):
481
892
  logger.warning(
@@ -624,21 +1035,59 @@ class RichUI:
624
1035
  output_token_est += delta_tokens
625
1036
  spinner.update_tokens(output_token_est)
626
1037
  else:
627
- spinner.update_tokens(output_token_est, suffix=f"Working... {message.content}")
1038
+ # Simplify spinner suffix for bash command progress to avoid clutter
1039
+ suffix = self._simplify_progress_suffix(message.content)
1040
+ spinner.update_tokens(output_token_est, suffix=suffix)
628
1041
 
629
1042
  return output_token_est
630
1043
 
1044
+ def _simplify_progress_suffix(self, content: Any) -> str:
1045
+ """Simplify progress message content for cleaner spinner display.
1046
+
1047
+ For bash command progress (format: "Running... (elapsed)\nstdout_preview"),
1048
+ extract only the timing information to avoid cluttering the spinner with
1049
+ multi-line stdout content that causes terminal wrapping issues.
1050
+
1051
+ Args:
1052
+ content: Progress message content (can be str or other types)
1053
+
1054
+ Returns:
1055
+ Simplified suffix string for spinner display
1056
+ """
1057
+ if not isinstance(content, str):
1058
+ return f"Working... {content}"
1059
+
1060
+ # Handle bash command progress: "Running... (10s)\nstdout..."
1061
+ if content.startswith("Running..."):
1062
+ # Extract just the "Running... (time)" part before any newline
1063
+ first_line = content.split("\n", 1)[0]
1064
+ return first_line
1065
+
1066
+ # For other progress messages, limit length to avoid terminal wrapping
1067
+ max_length = 60
1068
+ if len(content) > max_length:
1069
+ return f"Working... {content[:max_length]}..."
1070
+
1071
+ return f"Working... {content}"
1072
+
631
1073
  async def process_query(self, user_input: str) -> None:
632
1074
  """Process a user query and display the response."""
633
1075
  # Initialize or reset query context
634
1076
  if not self.query_context:
635
1077
  self.query_context = QueryContext(
636
- tools=self.get_default_tools(), yolo_mode=self.yolo_mode, verbose=self.verbose
1078
+ tools=self.get_default_tools(),
1079
+ max_thinking_tokens=self._get_thinking_tokens(),
1080
+ yolo_mode=self.yolo_mode,
1081
+ verbose=self.verbose,
1082
+ model=self.model,
637
1083
  )
638
1084
  else:
639
1085
  abort_controller = getattr(self.query_context, "abort_controller", None)
640
1086
  if abort_controller is not None:
641
1087
  abort_controller.clear()
1088
+ # Update thinking tokens in case user toggled thinking mode
1089
+ self.query_context.max_thinking_tokens = self._get_thinking_tokens()
1090
+ self.query_context.stop_hook_active = False
642
1091
 
643
1092
  logger.info(
644
1093
  "[ui] Starting query processing",
@@ -650,14 +1099,43 @@ class RichUI:
650
1099
  )
651
1100
 
652
1101
  try:
1102
+ hook_result = await hook_manager.run_user_prompt_submit_async(user_input)
1103
+ if hook_result.should_block or not hook_result.should_continue:
1104
+ reason = (
1105
+ hook_result.block_reason or hook_result.stop_reason or "Prompt blocked by hook."
1106
+ )
1107
+ self.console.print(f"[red]{escape(str(reason))}[/red]")
1108
+ return
1109
+ hook_instructions = self._collect_hook_contexts(hook_result)
1110
+
653
1111
  # Prepare context and system prompt
654
- system_prompt, context = await self._prepare_query_context(user_input)
1112
+ system_prompt, context = await self._prepare_query_context(
1113
+ user_input, hook_instructions
1114
+ )
1115
+
1116
+ # Process images in user input
1117
+ processed_input, image_blocks = _process_images_in_input(
1118
+ user_input, self.project_path, self.model
1119
+ )
655
1120
 
656
1121
  # Create and log user message
657
- user_message = create_user_message(user_input)
1122
+ if image_blocks:
1123
+ # Has images: use structured content
1124
+ content_blocks = []
1125
+ # Add images first
1126
+ for block in image_blocks:
1127
+ content_blocks.append({"type": "image", **block})
1128
+ # Add user's text input
1129
+ if processed_input:
1130
+ content_blocks.append({"type": "text", "text": processed_input})
1131
+ user_message = create_user_message(content=content_blocks)
1132
+ else:
1133
+ # No images: use plain text
1134
+ user_message = create_user_message(content=processed_input)
1135
+
658
1136
  messages: List[ConversationMessage] = self.conversation_messages + [user_message]
659
1137
  self._log_message(user_message)
660
- self._append_prompt_history(user_input)
1138
+ self._append_prompt_history(processed_input)
661
1139
 
662
1140
  # Get model configuration
663
1141
  config = get_global_config()
@@ -872,7 +1350,14 @@ class RichUI:
872
1350
  # Return the expanded content to be processed as a query
873
1351
  return expanded_content
874
1352
 
875
- self.console.print(f"[red]Unknown command: {escape(command_name)}[/red]")
1353
+ suggestions = _suggest_slash_commands(command_name, self.project_path)
1354
+ hint = ""
1355
+ if suggestions:
1356
+ hint = " [dim]Did you mean "
1357
+ hint += ", ".join(f"/{escape(s)}" for s in suggestions)
1358
+ hint += "?[/dim]"
1359
+
1360
+ self.console.print(f"[red]Unknown command: {escape(command_name)}[/red]{hint}")
876
1361
  return True
877
1362
 
878
1363
  def get_prompt_session(self) -> PromptSession:
@@ -886,7 +1371,7 @@ class RichUI:
886
1371
  def __init__(self, project_path: Path):
887
1372
  self.project_path = project_path
888
1373
 
889
- def get_completions(self, document: Any, complete_event: Any) -> Iterable[Completion]:
1374
+ def get_completions(self, document: Any, _complete_event: Any) -> Iterable[Completion]:
890
1375
  text = document.text_before_cursor
891
1376
  if not text.startswith("/"):
892
1377
  return
@@ -930,19 +1415,113 @@ class RichUI:
930
1415
 
931
1416
  @key_bindings.add("tab")
932
1417
  def _(event: Any) -> None:
933
- """Use Tab to accept the highlighted completion when visible."""
1418
+ """Toggle thinking mode when input is empty; otherwise handle completion."""
934
1419
  buf = event.current_buffer
1420
+ # If input is empty, toggle thinking mode
1421
+ if not buf.text.strip():
1422
+ from prompt_toolkit.application import run_in_terminal
1423
+
1424
+ def _toggle() -> None:
1425
+ ui_instance._toggle_thinking_mode()
1426
+
1427
+ run_in_terminal(_toggle)
1428
+ return
1429
+ # Otherwise, handle completion as usual
935
1430
  if buf.complete_state and buf.complete_state.current_completion:
936
1431
  buf.apply_completion(buf.complete_state.current_completion)
937
1432
  else:
938
1433
  buf.start_completion(select_first=True)
939
1434
 
1435
+ @key_bindings.add("escape", "enter")
1436
+ def _(event: Any) -> None:
1437
+ """Insert newline on Alt+Enter."""
1438
+ event.current_buffer.insert_text("\n")
1439
+
1440
+ # Capture self for use in Ctrl+C handler closure
1441
+ ui_instance = self
1442
+
1443
+ @key_bindings.add("c-c")
1444
+ def _(event: Any) -> None:
1445
+ """Handle Ctrl+C: first press clears input, second press exits."""
1446
+ import time as time_module
1447
+
1448
+ buf = event.current_buffer
1449
+ current_text = buf.text
1450
+ current_time = time_module.time()
1451
+
1452
+ # Check if this is a double Ctrl+C (within 1.5 seconds)
1453
+ if current_time - ui_instance._last_ctrl_c_time < 1.5:
1454
+ # Double Ctrl+C - exit
1455
+ buf.reset()
1456
+ raise KeyboardInterrupt()
1457
+
1458
+ # First Ctrl+C - save to history and clear
1459
+ ui_instance._last_ctrl_c_time = current_time
1460
+
1461
+ if current_text.strip():
1462
+ # Save current input to history before clearing
1463
+ try:
1464
+ event.app.current_buffer.history.append_string(current_text)
1465
+ except (AttributeError, TypeError, ValueError):
1466
+ pass
1467
+
1468
+ # Print hint message in clean terminal context, then clear buffer
1469
+ from prompt_toolkit.application import run_in_terminal
1470
+
1471
+ def _print_hint() -> None:
1472
+ print("\n\033[2mPress Ctrl+C again to exit, or continue typing.\033[0m")
1473
+
1474
+ run_in_terminal(_print_hint)
1475
+
1476
+ # Clear the buffer after printing
1477
+ buf.reset()
1478
+
1479
+ # If stdin is not a TTY (e.g., piped input), try to use /dev/tty for interactive input
1480
+ # This allows the user to continue interacting after processing piped content
1481
+ input_obj = None
1482
+ if not sys.stdin.isatty():
1483
+ # First check if /dev/tty exists and is accessible
1484
+ try:
1485
+ import os
1486
+
1487
+ if os.path.exists("/dev/tty"):
1488
+ from prompt_toolkit.input import create_input
1489
+
1490
+ input_obj = create_input(always_prefer_tty=True)
1491
+ self._using_tty_input = True # Mark that we're using /dev/tty
1492
+ logger.info(
1493
+ "[ui] Stdin is not a TTY, using /dev/tty for prompt input",
1494
+ extra={"session_id": self.session_id},
1495
+ )
1496
+ else:
1497
+ logger.info(
1498
+ "[ui] Stdin is not a TTY and /dev/tty not available",
1499
+ extra={"session_id": self.session_id},
1500
+ )
1501
+ except (OSError, RuntimeError, ValueError, ImportError) as exc:
1502
+ logger.warning(
1503
+ "[ui] Failed to create TTY input: %s: %s",
1504
+ type(exc).__name__,
1505
+ exc,
1506
+ extra={"session_id": self.session_id},
1507
+ )
1508
+
1509
+ prompt_style = Style.from_dict(
1510
+ {
1511
+ "rprompt-on": "fg:ansicyan bold",
1512
+ "rprompt-off": "fg:ansibrightblack",
1513
+ }
1514
+ )
940
1515
  self._prompt_session = PromptSession(
941
1516
  completer=combined_completer,
942
1517
  complete_style=CompleteStyle.COLUMN,
943
1518
  complete_while_typing=True,
944
1519
  history=InMemoryHistory(),
945
1520
  key_bindings=key_bindings,
1521
+ multiline=True,
1522
+ input=input_obj,
1523
+ style=prompt_style,
1524
+ rprompt=self._get_rprompt,
946
1525
  )
947
1526
  return self._prompt_session
948
1527
 
@@ -957,7 +1536,9 @@ class RichUI:
957
1536
  console.print(create_status_bar())
958
1537
  console.print()
959
1538
  console.print(
960
- "[dim]Tip: type '/' then press Tab to see available commands. Type '@' to mention files. Press ESC to interrupt a running query.[/dim]\n"
1539
+ "[dim]Tip: type '/' then press Tab to see available commands. Type '@' to mention files. "
1540
+ "Press Alt+Enter for newline. Press Tab to toggle thinking mode. "
1541
+ "Press ESC to interrupt.[/dim]\n"
961
1542
  )
962
1543
 
963
1544
  session = self.get_prompt_session()
@@ -966,11 +1547,36 @@ class RichUI:
966
1547
  extra={"session_id": self.session_id, "log_file": str(self.log_file_path)},
967
1548
  )
968
1549
 
1550
+ exit_reason = "other"
969
1551
  try:
1552
+ # Process initial query from piped stdin if provided
1553
+ if self._initial_query:
1554
+ console.print(f"> {self._initial_query}")
1555
+ logger.info(
1556
+ "[ui] Processing initial query from stdin",
1557
+ extra={
1558
+ "session_id": self.session_id,
1559
+ "prompt_length": len(self._initial_query),
1560
+ "prompt_preview": self._initial_query[:200],
1561
+ },
1562
+ )
1563
+ console.print() # Add spacing before response
1564
+
1565
+ # Use _run_async instead of _run_async_with_esc_interrupt for piped stdin
1566
+ # since there's no TTY for ESC key detection
1567
+ self._run_async(self.process_query(self._initial_query))
1568
+
1569
+ logger.info(
1570
+ "[ui] Initial query completed successfully",
1571
+ extra={"session_id": self.session_id},
1572
+ )
1573
+ console.print() # Add spacing after response
1574
+ self._initial_query = None # Clear after processing
1575
+
970
1576
  while not self._should_exit:
971
1577
  try:
972
- # Get user input
973
- user_input = session.prompt("> ")
1578
+ # Get user input with dynamic prompt
1579
+ user_input = session.prompt(self._get_prompt())
974
1580
 
975
1581
  if not user_input.strip():
976
1582
  continue
@@ -988,6 +1594,7 @@ class RichUI:
988
1594
  )
989
1595
  handled = self.handle_slash_command(user_input)
990
1596
  if self._should_exit:
1597
+ exit_reason = self._exit_reason or "other"
991
1598
  break
992
1599
  # If handled is a string, it's expanded custom command content
993
1600
  if isinstance(handled, str):
@@ -1006,29 +1613,49 @@ class RichUI:
1006
1613
  "prompt_preview": user_input[:200],
1007
1614
  },
1008
1615
  )
1009
- interrupted = self._run_async_with_esc_interrupt(self.process_query(user_input))
1010
1616
 
1011
- if interrupted:
1012
- console.print(
1013
- "\n[red]■ Conversation interrupted[/red] · [dim]Tell the model what to do differently.[/dim]"
1014
- )
1015
- logger.info(
1016
- "[ui] Query interrupted by ESC key",
1017
- extra={"session_id": self.session_id},
1617
+ # When using /dev/tty input, disable ESC interrupt to avoid conflicts
1618
+ if self._using_tty_input:
1619
+ self._run_async(self.process_query(user_input))
1620
+ else:
1621
+ interrupted = self._run_async_with_esc_interrupt(
1622
+ self.process_query(user_input)
1018
1623
  )
1624
+ if interrupted:
1625
+ console.print(
1626
+ "\n[red]■ Conversation interrupted[/red] · [dim]Tell the model what to do differently.[/dim]"
1627
+ )
1628
+ logger.info(
1629
+ "[ui] Query interrupted by ESC key",
1630
+ extra={"session_id": self.session_id},
1631
+ )
1019
1632
 
1020
1633
  console.print() # Add spacing between interactions
1021
1634
 
1022
1635
  except KeyboardInterrupt:
1636
+ # Handle Ctrl+C: first press during query aborts it,
1637
+ # double press exits the CLI
1638
+ current_time = time.time()
1639
+
1023
1640
  # Signal abort to cancel running queries
1024
1641
  if self.query_context:
1025
1642
  abort_controller = getattr(self.query_context, "abort_controller", None)
1026
1643
  if abort_controller is not None:
1027
1644
  abort_controller.set()
1028
- console.print("\n[yellow]Goodbye![/yellow]")
1029
- break
1645
+
1646
+ # Check if this is a double Ctrl+C (within 1.5 seconds)
1647
+ if current_time - self._last_ctrl_c_time < 1.5:
1648
+ console.print("\n[yellow]Goodbye![/yellow]")
1649
+ exit_reason = "prompt_input_exit"
1650
+ break
1651
+
1652
+ # First Ctrl+C - just abort the query and continue
1653
+ self._last_ctrl_c_time = current_time
1654
+ console.print("\n[dim]Query interrupted. Press Ctrl+C again to exit.[/dim]")
1655
+ continue
1030
1656
  except EOFError:
1031
1657
  console.print("\n[yellow]Goodbye![/yellow]")
1658
+ exit_reason = "prompt_input_exit"
1032
1659
  break
1033
1660
  except (
1034
1661
  OSError,
@@ -1056,6 +1683,8 @@ class RichUI:
1056
1683
  if abort_controller is not None:
1057
1684
  abort_controller.set()
1058
1685
 
1686
+ self._run_session_end(exit_reason)
1687
+
1059
1688
  # Suppress async generator cleanup errors during shutdown
1060
1689
  original_hook = sys.unraisablehook
1061
1690
 
@@ -1072,6 +1701,7 @@ class RichUI:
1072
1701
  try:
1073
1702
  try:
1074
1703
  self._run_async(shutdown_mcp_runtime())
1704
+ self._run_async(shutdown_lsp_manager())
1075
1705
  except (OSError, RuntimeError, ConnectionError, asyncio.CancelledError) as exc:
1076
1706
  # pragma: no cover - defensive shutdown
1077
1707
  logger.warning(
@@ -1080,6 +1710,17 @@ class RichUI:
1080
1710
  exc,
1081
1711
  extra={"session_id": self.session_id},
1082
1712
  )
1713
+
1714
+ # Shutdown background shell manager to clean up any background tasks
1715
+ try:
1716
+ shutdown_background_shell(force=True)
1717
+ except (OSError, RuntimeError) as exc:
1718
+ logger.debug(
1719
+ "[ui] Failed to shut down background shell cleanly: %s: %s",
1720
+ type(exc).__name__,
1721
+ exc,
1722
+ extra={"session_id": self.session_id},
1723
+ )
1083
1724
  finally:
1084
1725
  if not self._loop.is_closed():
1085
1726
  # Cancel all pending tasks
@@ -1121,11 +1762,42 @@ class RichUI:
1121
1762
  original_messages = list(self.conversation_messages)
1122
1763
  spinner = Spinner(self.console, "Summarizing conversation...", spinner="dots")
1123
1764
 
1765
+ hook_instructions = ""
1766
+ try:
1767
+ hook_result = await hook_manager.run_pre_compact_async(
1768
+ trigger="manual", custom_instructions=custom_instructions
1769
+ )
1770
+ if hook_result.should_block or not hook_result.should_continue:
1771
+ reason = (
1772
+ hook_result.block_reason
1773
+ or hook_result.stop_reason
1774
+ or "Compaction blocked by hook."
1775
+ )
1776
+ self.console.print(f"[yellow]{escape(str(reason))}[/yellow]")
1777
+ return
1778
+ hook_contexts = self._collect_hook_contexts(hook_result)
1779
+ if hook_contexts:
1780
+ hook_instructions = "\n\n".join(hook_contexts)
1781
+ except (OSError, RuntimeError, ConnectionError, ValueError, TypeError) as exc:
1782
+ logger.warning(
1783
+ "[ui] PreCompact hook failed: %s: %s",
1784
+ type(exc).__name__,
1785
+ exc,
1786
+ extra={"session_id": self.session_id},
1787
+ )
1788
+
1789
+ merged_instructions = custom_instructions.strip()
1790
+ if hook_instructions:
1791
+ merged_instructions = (
1792
+ f"{merged_instructions}\n\n{hook_instructions}".strip()
1793
+ if merged_instructions
1794
+ else hook_instructions
1795
+ )
1124
1796
  try:
1125
1797
  spinner.start()
1126
1798
  result = await compact_conversation(
1127
1799
  self.conversation_messages,
1128
- custom_instructions,
1800
+ merged_instructions,
1129
1801
  protocol=protocol,
1130
1802
  )
1131
1803
  except Exception as exc:
@@ -1144,6 +1816,7 @@ class RichUI:
1144
1816
  f"[green]✓ Conversation compacted[/green] "
1145
1817
  f"(saved ~{result.tokens_saved} tokens). Use /resume to restore full history."
1146
1818
  )
1819
+ await self._run_session_start_async("compact")
1147
1820
  elif isinstance(result, CompactionError):
1148
1821
  self.console.print(f"[red]{escape(result.message)}[/red]")
1149
1822
 
@@ -1171,8 +1844,19 @@ def main_rich(
1171
1844
  show_full_thinking: Optional[bool] = None,
1172
1845
  session_id: Optional[str] = None,
1173
1846
  log_file_path: Optional[Path] = None,
1847
+ allowed_tools: Optional[List[str]] = None,
1848
+ custom_system_prompt: Optional[str] = None,
1849
+ append_system_prompt: Optional[str] = None,
1850
+ model: Optional[str] = None,
1851
+ resume_messages: Optional[List[Any]] = None,
1852
+ initial_query: Optional[str] = None,
1174
1853
  ) -> None:
1175
- """Main entry point for Rich interface."""
1854
+ """Main entry point for Rich interface.
1855
+
1856
+ Args:
1857
+ initial_query: If provided, automatically send this query after starting the session.
1858
+ Used for piped stdin input (e.g., `echo "query" | ripperdoc`).
1859
+ """
1176
1860
 
1177
1861
  # Ensure onboarding is complete
1178
1862
  if not check_onboarding_rich():
@@ -1185,6 +1869,12 @@ def main_rich(
1185
1869
  show_full_thinking=show_full_thinking,
1186
1870
  session_id=session_id,
1187
1871
  log_file_path=log_file_path,
1872
+ allowed_tools=allowed_tools,
1873
+ custom_system_prompt=custom_system_prompt,
1874
+ append_system_prompt=append_system_prompt,
1875
+ model=model,
1876
+ resume_messages=resume_messages,
1877
+ initial_query=initial_query,
1188
1878
  )
1189
1879
  ui.run()
1190
1880