ripperdoc 0.2.10__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +164 -57
  3. ripperdoc/cli/commands/__init__.py +4 -0
  4. ripperdoc/cli/commands/agents_cmd.py +3 -7
  5. ripperdoc/cli/commands/doctor_cmd.py +29 -0
  6. ripperdoc/cli/commands/memory_cmd.py +2 -1
  7. ripperdoc/cli/commands/models_cmd.py +61 -5
  8. ripperdoc/cli/commands/resume_cmd.py +1 -0
  9. ripperdoc/cli/commands/skills_cmd.py +103 -0
  10. ripperdoc/cli/commands/stats_cmd.py +4 -4
  11. ripperdoc/cli/commands/status_cmd.py +10 -0
  12. ripperdoc/cli/commands/tasks_cmd.py +6 -3
  13. ripperdoc/cli/commands/themes_cmd.py +139 -0
  14. ripperdoc/cli/ui/file_mention_completer.py +63 -13
  15. ripperdoc/cli/ui/helpers.py +6 -3
  16. ripperdoc/cli/ui/interrupt_listener.py +233 -0
  17. ripperdoc/cli/ui/message_display.py +7 -0
  18. ripperdoc/cli/ui/panels.py +13 -8
  19. ripperdoc/cli/ui/rich_ui.py +513 -84
  20. ripperdoc/cli/ui/spinner.py +68 -5
  21. ripperdoc/cli/ui/tool_renderers.py +10 -9
  22. ripperdoc/cli/ui/wizard.py +18 -11
  23. ripperdoc/core/agents.py +4 -0
  24. ripperdoc/core/config.py +235 -0
  25. ripperdoc/core/default_tools.py +1 -0
  26. ripperdoc/core/hooks/llm_callback.py +0 -1
  27. ripperdoc/core/hooks/manager.py +6 -0
  28. ripperdoc/core/permissions.py +123 -39
  29. ripperdoc/core/providers/openai.py +55 -9
  30. ripperdoc/core/query.py +349 -108
  31. ripperdoc/core/query_utils.py +17 -14
  32. ripperdoc/core/skills.py +1 -0
  33. ripperdoc/core/theme.py +298 -0
  34. ripperdoc/core/tool.py +8 -3
  35. ripperdoc/protocol/__init__.py +14 -0
  36. ripperdoc/protocol/models.py +300 -0
  37. ripperdoc/protocol/stdio.py +1453 -0
  38. ripperdoc/tools/background_shell.py +49 -5
  39. ripperdoc/tools/bash_tool.py +75 -9
  40. ripperdoc/tools/file_edit_tool.py +98 -29
  41. ripperdoc/tools/file_read_tool.py +139 -8
  42. ripperdoc/tools/file_write_tool.py +46 -3
  43. ripperdoc/tools/grep_tool.py +98 -8
  44. ripperdoc/tools/lsp_tool.py +9 -15
  45. ripperdoc/tools/multi_edit_tool.py +26 -3
  46. ripperdoc/tools/skill_tool.py +52 -1
  47. ripperdoc/tools/task_tool.py +33 -8
  48. ripperdoc/utils/file_watch.py +12 -6
  49. ripperdoc/utils/image_utils.py +125 -0
  50. ripperdoc/utils/log.py +30 -3
  51. ripperdoc/utils/lsp.py +9 -3
  52. ripperdoc/utils/mcp.py +80 -18
  53. ripperdoc/utils/message_formatting.py +2 -2
  54. ripperdoc/utils/messages.py +177 -32
  55. ripperdoc/utils/pending_messages.py +50 -0
  56. ripperdoc/utils/permissions/shell_command_validation.py +3 -3
  57. ripperdoc/utils/permissions/tool_permission_utils.py +9 -3
  58. ripperdoc/utils/platform.py +198 -0
  59. ripperdoc/utils/session_heatmap.py +1 -3
  60. ripperdoc/utils/session_history.py +2 -2
  61. ripperdoc/utils/session_stats.py +1 -0
  62. ripperdoc/utils/shell_utils.py +8 -5
  63. ripperdoc/utils/todo.py +0 -6
  64. {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/METADATA +49 -17
  65. ripperdoc-0.3.1.dist-info/RECORD +136 -0
  66. {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/WHEEL +1 -1
  67. ripperdoc/cli/ui/interrupt_handler.py +0 -174
  68. ripperdoc/sdk/__init__.py +0 -9
  69. ripperdoc/sdk/client.py +0 -408
  70. ripperdoc-0.2.10.dist-info/RECORD +0 -129
  71. {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/entry_points.txt +0 -0
  72. {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/licenses/LICENSE +0 -0
  73. {ripperdoc-0.2.10.dist-info → ripperdoc-0.3.1.dist-info}/top_level.txt +0 -0
@@ -3,10 +3,17 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import asyncio
6
+ import html
6
7
  from collections import defaultdict
7
8
  from dataclasses import dataclass
8
9
  from pathlib import Path
9
- from typing import Any, Awaitable, Callable, Optional, Set
10
+ from typing import Any, Awaitable, Callable, Optional, Set, TYPE_CHECKING, TYPE_CHECKING as TYPE_CHECKING
11
+
12
+ from prompt_toolkit.filters import is_done
13
+ from prompt_toolkit.formatted_text import HTML
14
+ from prompt_toolkit.key_binding import KeyBindings
15
+ from prompt_toolkit.shortcuts import choice
16
+ from prompt_toolkit.styles import Style
10
17
 
11
18
  from ripperdoc.core.config import config_manager
12
19
  from ripperdoc.core.hooks.manager import hook_manager
@@ -14,6 +21,10 @@ from ripperdoc.core.tool import Tool
14
21
  from ripperdoc.utils.permissions import PermissionDecision, ToolRule
15
22
  from ripperdoc.utils.log import get_logger
16
23
 
24
+ if TYPE_CHECKING:
25
+ from rich.console import Console
26
+ from prompt_toolkit import PromptSession
27
+
17
28
  logger = get_logger()
18
29
 
19
30
 
@@ -32,35 +43,37 @@ def _format_input_preview(parsed_input: Any, tool_name: Optional[str] = None) ->
32
43
 
33
44
  For Bash commands, shows full details for security review.
34
45
  For other tools, shows a concise preview.
46
+ Returns HTML-formatted text with color tags.
35
47
  """
36
48
  # For Bash tool, show full command details for security review
37
49
  if tool_name == "Bash" and hasattr(parsed_input, "command"):
38
- lines = [f"Command: {getattr(parsed_input, 'command')}"]
50
+ command = html.escape(getattr(parsed_input, "command"))
51
+ lines = [f"<label>Command:</label> <value>{command}</value>"]
39
52
 
40
53
  # Add other relevant parameters
41
54
  if hasattr(parsed_input, "timeout") and parsed_input.timeout:
42
- lines.append(f"Timeout: {parsed_input.timeout}ms")
55
+ lines.append(f"<label>Timeout:</label> <value>{parsed_input.timeout}ms</value>")
43
56
  if hasattr(parsed_input, "sandbox"):
44
- lines.append(f"Sandbox: {parsed_input.sandbox}")
57
+ lines.append(f"<label>Sandbox:</label> <value>{parsed_input.sandbox}</value>")
45
58
  if hasattr(parsed_input, "run_in_background"):
46
- lines.append(f"Background: {parsed_input.run_in_background}")
59
+ lines.append(f"<label>Background:</label> <value>{parsed_input.run_in_background}</value>")
47
60
  if hasattr(parsed_input, "shell_executable") and parsed_input.shell_executable:
48
- lines.append(f"Shell: {parsed_input.shell_executable}")
61
+ lines.append(f"<label>Shell:</label> <value>{html.escape(parsed_input.shell_executable)}</value>")
49
62
 
50
63
  return "\n ".join(lines)
51
64
 
52
65
  # For other tools with commands, show concise preview
53
66
  if hasattr(parsed_input, "command"):
54
- return f"command='{getattr(parsed_input, 'command')}'"
67
+ return f"<label>command:</label> <value>'{html.escape(getattr(parsed_input, 'command'))}'</value>"
55
68
  if hasattr(parsed_input, "file_path"):
56
- return f"file='{getattr(parsed_input, 'file_path')}'"
69
+ return f"<label>file:</label> <value>'{html.escape(getattr(parsed_input, 'file_path'))}'</value>"
57
70
  if hasattr(parsed_input, "path"):
58
- return f"path='{getattr(parsed_input, 'path')}'"
71
+ return f"<label>path:</label> <value>'{html.escape(getattr(parsed_input, 'path'))}'</value>"
59
72
 
60
73
  preview = str(parsed_input)
61
74
  if len(preview) > 140:
62
- return preview[:137] + "..."
63
- return preview
75
+ preview = preview[:137] + "..."
76
+ return f"<value>{html.escape(preview)}</value>"
64
77
 
65
78
 
66
79
  def permission_key(tool: Tool[Any, Any], parsed_input: Any) -> str:
@@ -88,17 +101,23 @@ def permission_key(tool: Tool[Any, Any], parsed_input: Any) -> str:
88
101
  return tool.name
89
102
 
90
103
 
91
- def _render_options_prompt(prompt: str, options: list[tuple[str, str]]) -> str:
92
- """Render a simple numbered prompt."""
93
- border = "─" * 120
94
- lines = [border, prompt, ""]
95
- for idx, (_, label) in enumerate(options, start=1):
96
- prefix = "" if idx == 1 else " "
97
- lines.append(f"{prefix} {idx}. {label}")
98
- numeric_choices = "/".join(str(i) for i in range(1, len(options) + 1))
99
- shortcut_choices = "/".join(opt[0] for opt in options)
100
- lines.append(f"Choice ({numeric_choices} or {shortcut_choices}): ")
101
- return "\n".join(lines)
104
+ def _permission_style() -> Style:
105
+ """Create the style for permission choice prompts."""
106
+ return Style.from_dict(
107
+ {
108
+ "frame.border": "#d4a017", # Golden/amber border
109
+ "selected-option": "bold",
110
+ "option": "#5fd7ff", # Cyan for unselected options
111
+ "title": "#ffaf00", # Orange/amber for tool name
112
+ "description": "#ffffff", # White for descriptions
113
+ "question": "#ffd700", # Gold for the question
114
+ "label": "#87afff", # Light blue for field labels (Command:, Sandbox:, etc.)
115
+ "warning": "#ff5555", # Red for warnings
116
+ "yes-option": "#ffffff", # Neutral for Yes options
117
+ "no-option": "#ffffff", # Neutral for No option
118
+ "value": "#f8f8f2", # Off-white for values
119
+ }
120
+ )
102
121
 
103
122
 
104
123
  def _rule_strings(rule_suggestions: Optional[Any]) -> list[str]:
@@ -118,9 +137,18 @@ def make_permission_checker(
118
137
  project_path: Path,
119
138
  yolo_mode: bool,
120
139
  prompt_fn: Optional[Callable[[str], str]] = None,
140
+ console: Optional["Console"] = None,
141
+ prompt_session: Optional["PromptSession"] = None,
121
142
  ) -> Callable[[Tool[Any, Any], Any], Awaitable[PermissionResult]]:
122
143
  """Create a permission checking function for the current project.
123
144
 
145
+ Args:
146
+ project_path: Path to the project directory
147
+ yolo_mode: If True, all tool calls are allowed without prompting
148
+ prompt_fn: Optional function to use for prompting (defaults to input())
149
+ console: Optional Rich console for rich permission dialogs
150
+ prompt_session: Optional PromptSession for better interrupt handling
151
+
124
152
  In yolo mode, all tool calls are allowed without prompting.
125
153
  """
126
154
 
@@ -130,14 +158,69 @@ def make_permission_checker(
130
158
  session_allowed_tools: Set[str] = set()
131
159
  session_tool_rules: dict[str, Set[str]] = defaultdict(set)
132
160
 
133
- async def _prompt_user(prompt: str, options: list[tuple[str, str]]) -> str:
134
- """Prompt the user without blocking the event loop."""
161
+ async def _prompt_user(prompt: str, options: list[tuple[str, str]], is_html: bool = False) -> str:
162
+ """Prompt the user with proper interrupt handling using choice().
163
+
164
+ Args:
165
+ prompt: The prompt text to display.
166
+ options: List of (value, label) tuples for choices.
167
+ is_html: If True, prompt is already formatted HTML and should not be escaped.
168
+ """
135
169
  loop = asyncio.get_running_loop()
136
- responder = prompt_fn or input
137
170
 
138
171
  def _ask() -> str:
139
- rendered = _render_options_prompt(prompt, options)
140
- return responder(rendered)
172
+ try:
173
+ # If a custom prompt_fn is provided (e.g., for tests), use it directly
174
+ responder = prompt_fn or None
175
+ if responder is not None:
176
+ # Build a simple text prompt for the prompt_fn
177
+ numeric_choices = "/".join(str(i) for i in range(1, len(options) + 1))
178
+ shortcut_choices = "/".join(opt[0] for opt in options)
179
+ input_prompt = f"Choice ({numeric_choices} or {shortcut_choices}): "
180
+ return responder(input_prompt)
181
+
182
+ # Convert options to choice() format (value, label)
183
+ # Labels can be HTML-formatted strings
184
+ choice_options = [(value, HTML(label) if "<" in label else label) for value, label in options]
185
+
186
+ # Build formatted message with prompt text
187
+ # Add visual separation with lines
188
+ if is_html:
189
+ # Prompt is already HTML formatted
190
+ formatted_prompt = HTML(f"\n{prompt}\n")
191
+ else:
192
+ # Escape HTML special characters in plain text prompt
193
+ formatted_prompt = HTML(f"\n{html.escape(prompt)}\n")
194
+
195
+ esc_bindings = KeyBindings()
196
+
197
+ @esc_bindings.add("escape", eager=True)
198
+ def _esc_to_deny(event: Any) -> None:
199
+ event.app.exit(result="n", style="class:aborting")
200
+
201
+ result = choice(
202
+ message=formatted_prompt,
203
+ options=choice_options,
204
+ style=_permission_style(),
205
+ show_frame=~is_done, # Frame disappears after selection
206
+ key_bindings=esc_bindings,
207
+ )
208
+
209
+ # Clear the entire prompt after selection
210
+ # ANSI codes: ESC[F = move cursor to beginning of previous line
211
+ # ESC[2K = clear entire line
212
+ # We need to clear: blank + prompt + blank + options (each option takes 1 line)
213
+ # plus frame borders (top and bottom) = approximately 6-8 lines
214
+ for _ in range(12): # Clear enough lines to cover the prompt
215
+ print("\033[F\033[2K", end="", flush=True)
216
+
217
+ return result
218
+ except KeyboardInterrupt:
219
+ logger.debug("[permissions] KeyboardInterrupt in choice")
220
+ return "n"
221
+ except EOFError:
222
+ logger.debug("[permissions] EOFError in choice")
223
+ return "n"
141
224
 
142
225
  return await loop.run_in_executor(None, _ask)
143
226
 
@@ -310,23 +393,24 @@ def make_permission_checker(
310
393
  )
311
394
 
312
395
  input_preview = _format_input_preview(parsed_input, tool_name=tool.name)
313
- prompt_lines = [
314
- f"{tool.name}",
315
- "",
316
- f" {input_preview}",
317
- ]
396
+ # Use inline styles for prompt_toolkit HTML formatting
397
+ # The style names must match keys in the _permission_style() dict
398
+ prompt_html = f"""<title>{html.escape(tool.name)}</title>
399
+
400
+ <description>{input_preview}</description>"""
318
401
  if decision.message:
319
- prompt_lines.append(f" {decision.message}")
320
- prompt_lines.append(" Do you want to proceed?")
321
- prompt = "\n".join(prompt_lines)
402
+ # Use warning style for warning messages
403
+ prompt_html += f"\n <warning>{html.escape(decision.message)}</warning>"
404
+ prompt_html += "\n <question>Do you want to proceed?</question>"
405
+ prompt = prompt_html
322
406
 
323
407
  options = [
324
- ("y", "Yes"),
325
- ("s", "Yes, for this session"),
326
- ("n", "No"),
408
+ ("y", "<yes-option>Yes</yes-option>"),
409
+ ("s", "<yes-option>Yes, for this session</yes-option>"),
410
+ ("n", "<no-option>No</no-option>"),
327
411
  ]
328
412
 
329
- answer = (await _prompt_user(prompt, options=options)).strip().lower()
413
+ answer = (await _prompt_user(prompt, options=options, is_html=True)).strip().lower()
330
414
  logger.debug(
331
415
  "[permissions] User answer for permission prompt",
332
416
  extra={"answer": answer, "tool": getattr(tool, "name", None)},
@@ -80,10 +80,18 @@ def _effort_from_tokens(max_thinking_tokens: int) -> Optional[str]:
80
80
 
81
81
 
82
82
  def _detect_openai_vendor(model_profile: ModelProfile) -> str:
83
- """Best-effort vendor hint for OpenAI-compatible endpoints."""
83
+ """Best-effort vendor hint for OpenAI-compatible endpoints.
84
+
85
+ If thinking_mode is explicitly set to "none" or "disabled", returns "none"
86
+ to skip all thinking protocol handling.
87
+ """
84
88
  override = getattr(model_profile, "thinking_mode", None)
85
89
  if isinstance(override, str) and override.strip():
86
- return override.strip().lower()
90
+ mode = override.strip().lower()
91
+ # Allow explicit disable of thinking protocol
92
+ if mode in ("disabled", "off"):
93
+ return "none"
94
+ return mode
87
95
  base = (model_profile.api_base or "").lower()
88
96
  name = (model_profile.model or "").lower()
89
97
  if "openrouter.ai" in base:
@@ -106,21 +114,25 @@ def _build_thinking_kwargs(
106
114
  extra_body: Dict[str, Any] = {}
107
115
  top_level: Dict[str, Any] = {}
108
116
  vendor = _detect_openai_vendor(model_profile)
117
+
118
+ # Skip thinking protocol if explicitly disabled
119
+ if vendor == "none":
120
+ return extra_body, top_level
121
+
109
122
  effort = _effort_from_tokens(max_thinking_tokens)
110
123
 
111
124
  if vendor == "deepseek":
112
125
  if max_thinking_tokens != 0:
113
126
  extra_body["thinking"] = {"type": "enabled"}
114
127
  elif vendor == "qwen":
128
+ # Only send enable_thinking when explicitly enabling thinking mode
129
+ # Some qwen-compatible APIs don't support this parameter
115
130
  if max_thinking_tokens > 0:
116
131
  extra_body["enable_thinking"] = True
117
- elif max_thinking_tokens == 0:
118
- extra_body["enable_thinking"] = False
119
132
  elif vendor == "openrouter":
133
+ # Only send reasoning when explicitly enabling thinking mode
120
134
  if max_thinking_tokens > 0:
121
135
  extra_body["reasoning"] = {"max_tokens": max_thinking_tokens}
122
- elif max_thinking_tokens == 0:
123
- extra_body["reasoning"] = {"effort": "none"}
124
136
  elif vendor == "gemini_openai":
125
137
  google_cfg: Dict[str, Any] = {}
126
138
  if max_thinking_tokens > 0:
@@ -250,6 +262,15 @@ class OpenAIClient(ProviderClient):
250
262
  model_profile, max_thinking_tokens
251
263
  )
252
264
 
265
+ logger.debug(
266
+ "[openai_client] Starting API request",
267
+ extra={
268
+ "model": model_profile.model,
269
+ "api_base": model_profile.api_base,
270
+ "request_timeout": request_timeout,
271
+ },
272
+ )
273
+
253
274
  logger.debug(
254
275
  "[openai_client] Request parameters",
255
276
  extra={
@@ -420,12 +441,13 @@ class OpenAIClient(ProviderClient):
420
441
  )
421
442
 
422
443
  if (
423
- can_stream_text
444
+ can_stream
424
445
  and not collected_text
425
446
  and not streamed_tool_calls
426
447
  and not streamed_tool_text
448
+ and not stream_reasoning_text
427
449
  ):
428
- logger.debug(
450
+ logger.warning(
429
451
  "[openai_client] Streaming returned no content; retrying without stream",
430
452
  extra={"model": model_profile.model},
431
453
  )
@@ -450,6 +472,30 @@ class OpenAIClient(ProviderClient):
450
472
  if not can_stream and (
451
473
  not openai_response or not getattr(openai_response, "choices", None)
452
474
  ):
475
+ # Check for non-standard error response (e.g., iflow returns HTTP 200 with error JSON)
476
+ error_msg = (
477
+ getattr(openai_response, "msg", None)
478
+ or getattr(openai_response, "message", None)
479
+ or getattr(openai_response, "error", None)
480
+ )
481
+ error_status = getattr(openai_response, "status", None)
482
+ if error_msg or error_status:
483
+ error_text = f"API Error: {error_msg or 'Unknown error'}"
484
+ if error_status:
485
+ error_text = f"API Error ({error_status}): {error_msg or 'Unknown error'}"
486
+ logger.error(
487
+ "[openai_client] Non-standard error response from API",
488
+ extra={
489
+ "model": model_profile.model,
490
+ "error_status": error_status,
491
+ "error_msg": error_msg,
492
+ },
493
+ )
494
+ return ProviderResponse.create_error(
495
+ error_code="api_error",
496
+ error_message=error_text,
497
+ duration_ms=duration_ms,
498
+ )
453
499
  logger.warning(
454
500
  "[openai_client] No choices returned from OpenAI response",
455
501
  extra={"model": model_profile.model},
@@ -532,7 +578,7 @@ class OpenAIClient(ProviderClient):
532
578
  },
533
579
  )
534
580
 
535
- logger.info(
581
+ logger.debug(
536
582
  "[openai_client] Response received",
537
583
  extra={
538
584
  "model": model_profile.model,