ripperdoc 0.2.9__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +379 -51
  3. ripperdoc/cli/commands/__init__.py +6 -0
  4. ripperdoc/cli/commands/agents_cmd.py +128 -5
  5. ripperdoc/cli/commands/clear_cmd.py +8 -0
  6. ripperdoc/cli/commands/doctor_cmd.py +29 -0
  7. ripperdoc/cli/commands/exit_cmd.py +1 -0
  8. ripperdoc/cli/commands/memory_cmd.py +2 -1
  9. ripperdoc/cli/commands/models_cmd.py +63 -7
  10. ripperdoc/cli/commands/resume_cmd.py +5 -0
  11. ripperdoc/cli/commands/skills_cmd.py +103 -0
  12. ripperdoc/cli/commands/stats_cmd.py +244 -0
  13. ripperdoc/cli/commands/status_cmd.py +10 -0
  14. ripperdoc/cli/commands/tasks_cmd.py +6 -3
  15. ripperdoc/cli/commands/themes_cmd.py +139 -0
  16. ripperdoc/cli/ui/file_mention_completer.py +63 -13
  17. ripperdoc/cli/ui/helpers.py +6 -3
  18. ripperdoc/cli/ui/interrupt_handler.py +34 -0
  19. ripperdoc/cli/ui/panels.py +14 -8
  20. ripperdoc/cli/ui/rich_ui.py +737 -47
  21. ripperdoc/cli/ui/spinner.py +93 -18
  22. ripperdoc/cli/ui/thinking_spinner.py +1 -2
  23. ripperdoc/cli/ui/tool_renderers.py +10 -9
  24. ripperdoc/cli/ui/wizard.py +24 -19
  25. ripperdoc/core/agents.py +14 -3
  26. ripperdoc/core/config.py +238 -6
  27. ripperdoc/core/default_tools.py +91 -10
  28. ripperdoc/core/hooks/events.py +4 -0
  29. ripperdoc/core/hooks/llm_callback.py +58 -0
  30. ripperdoc/core/hooks/manager.py +6 -0
  31. ripperdoc/core/permissions.py +160 -9
  32. ripperdoc/core/providers/openai.py +84 -28
  33. ripperdoc/core/query.py +489 -87
  34. ripperdoc/core/query_utils.py +17 -14
  35. ripperdoc/core/skills.py +1 -0
  36. ripperdoc/core/theme.py +298 -0
  37. ripperdoc/core/tool.py +15 -5
  38. ripperdoc/protocol/__init__.py +14 -0
  39. ripperdoc/protocol/models.py +300 -0
  40. ripperdoc/protocol/stdio.py +1453 -0
  41. ripperdoc/tools/background_shell.py +354 -139
  42. ripperdoc/tools/bash_tool.py +117 -22
  43. ripperdoc/tools/file_edit_tool.py +228 -50
  44. ripperdoc/tools/file_read_tool.py +154 -3
  45. ripperdoc/tools/file_write_tool.py +53 -11
  46. ripperdoc/tools/grep_tool.py +98 -8
  47. ripperdoc/tools/lsp_tool.py +609 -0
  48. ripperdoc/tools/multi_edit_tool.py +26 -3
  49. ripperdoc/tools/skill_tool.py +52 -1
  50. ripperdoc/tools/task_tool.py +539 -65
  51. ripperdoc/utils/conversation_compaction.py +1 -1
  52. ripperdoc/utils/file_watch.py +216 -7
  53. ripperdoc/utils/image_utils.py +125 -0
  54. ripperdoc/utils/log.py +30 -3
  55. ripperdoc/utils/lsp.py +812 -0
  56. ripperdoc/utils/mcp.py +80 -18
  57. ripperdoc/utils/message_formatting.py +7 -4
  58. ripperdoc/utils/messages.py +198 -33
  59. ripperdoc/utils/pending_messages.py +50 -0
  60. ripperdoc/utils/permissions/shell_command_validation.py +3 -3
  61. ripperdoc/utils/permissions/tool_permission_utils.py +180 -15
  62. ripperdoc/utils/platform.py +198 -0
  63. ripperdoc/utils/session_heatmap.py +242 -0
  64. ripperdoc/utils/session_history.py +2 -2
  65. ripperdoc/utils/session_stats.py +294 -0
  66. ripperdoc/utils/shell_utils.py +8 -5
  67. ripperdoc/utils/todo.py +0 -6
  68. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/METADATA +55 -17
  69. ripperdoc-0.3.0.dist-info/RECORD +136 -0
  70. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/WHEEL +1 -1
  71. ripperdoc/sdk/__init__.py +0 -9
  72. ripperdoc/sdk/client.py +0 -333
  73. ripperdoc-0.2.9.dist-info/RECORD +0 -123
  74. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/entry_points.txt +0 -0
  75. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/licenses/LICENSE +0 -0
  76. {ripperdoc-0.2.9.dist-info → ripperdoc-0.3.0.dist-info}/top_level.txt +0 -0
@@ -6,13 +6,18 @@ import asyncio
6
6
  from collections import defaultdict
7
7
  from dataclasses import dataclass
8
8
  from pathlib import Path
9
- from typing import Any, Awaitable, Callable, Optional, Set
9
+ from typing import Any, Awaitable, Callable, Optional, Set, TYPE_CHECKING, TYPE_CHECKING as TYPE_CHECKING
10
10
 
11
11
  from ripperdoc.core.config import config_manager
12
+ from ripperdoc.core.hooks.manager import hook_manager
12
13
  from ripperdoc.core.tool import Tool
13
14
  from ripperdoc.utils.permissions import PermissionDecision, ToolRule
14
15
  from ripperdoc.utils.log import get_logger
15
16
 
17
+ if TYPE_CHECKING:
18
+ from rich.console import Console
19
+ from prompt_toolkit import PromptSession
20
+
16
21
  logger = get_logger()
17
22
 
18
23
 
@@ -88,7 +93,7 @@ def permission_key(tool: Tool[Any, Any], parsed_input: Any) -> str:
88
93
 
89
94
 
90
95
  def _render_options_prompt(prompt: str, options: list[tuple[str, str]]) -> str:
91
- """Render a simple numbered prompt."""
96
+ """Render a simple numbered prompt (fallback for non-Rich environments)."""
92
97
  border = "─" * 120
93
98
  lines = [border, prompt, ""]
94
99
  for idx, (_, label) in enumerate(options, start=1):
@@ -100,6 +105,42 @@ def _render_options_prompt(prompt: str, options: list[tuple[str, str]]) -> str:
100
105
  return "\n".join(lines)
101
106
 
102
107
 
108
+ def _render_options_prompt_rich(
109
+ console: "Console",
110
+ prompt: str,
111
+ options: list[tuple[str, str]]
112
+ ) -> None:
113
+ """Render permission dialog using Rich Panel for better visual consistency."""
114
+ from rich.panel import Panel
115
+ from rich.text import Text
116
+
117
+ # Build option lines with markup
118
+ option_lines = []
119
+ for idx, (_, label) in enumerate(options, start=1):
120
+ prefix = "[cyan]❯[/cyan]" if idx == 1 else " "
121
+ option_lines.append(f"{prefix} {idx}. {label}")
122
+
123
+ numeric_choices = "/".join(str(i) for i in range(1, len(options) + 1))
124
+ shortcut_choices = "/".join(opt[0] for opt in options)
125
+
126
+ # Build the prompt content as a markup string
127
+ markup_content = f"{prompt}\n\n" + "\n".join(option_lines) + "\n"
128
+ markup_content += f"Choice ([cyan]{numeric_choices}[/cyan] or [cyan]{shortcut_choices}[/cyan]): "
129
+
130
+ # Parse markup to create a Text object
131
+ content = Text.from_markup(markup_content)
132
+
133
+ # Render the panel
134
+ panel = Panel(
135
+ content,
136
+ title=Text.from_markup("[yellow]Permission Required[/yellow]"),
137
+ title_align="left",
138
+ border_style="yellow",
139
+ padding=(0, 1),
140
+ )
141
+ console.print(panel)
142
+
143
+
103
144
  def _rule_strings(rule_suggestions: Optional[Any]) -> list[str]:
104
145
  """Normalize rule suggestions to simple strings."""
105
146
  if not rule_suggestions:
@@ -117,9 +158,18 @@ def make_permission_checker(
117
158
  project_path: Path,
118
159
  yolo_mode: bool,
119
160
  prompt_fn: Optional[Callable[[str], str]] = None,
161
+ console: Optional["Console"] = None,
162
+ prompt_session: Optional["PromptSession"] = None,
120
163
  ) -> Callable[[Tool[Any, Any], Any], Awaitable[PermissionResult]]:
121
164
  """Create a permission checking function for the current project.
122
165
 
166
+ Args:
167
+ project_path: Path to the project directory
168
+ yolo_mode: If True, all tool calls are allowed without prompting
169
+ prompt_fn: Optional function to use for prompting (defaults to input())
170
+ console: Optional Rich console for rich permission dialogs
171
+ prompt_session: Optional PromptSession for better interrupt handling
172
+
123
173
  In yolo mode, all tool calls are allowed without prompting.
124
174
  """
125
175
 
@@ -130,13 +180,41 @@ def make_permission_checker(
130
180
  session_tool_rules: dict[str, Set[str]] = defaultdict(set)
131
181
 
132
182
  async def _prompt_user(prompt: str, options: list[tuple[str, str]]) -> str:
133
- """Prompt the user without blocking the event loop."""
183
+ """Prompt the user with proper interrupt handling."""
184
+ # Build the prompt message
185
+ if console is not None:
186
+ # Use Rich Panel for the dialog
187
+ _render_options_prompt_rich(console, prompt, options)
188
+ # Build simple prompt for the input line
189
+ numeric_choices = "/".join(str(i) for i in range(1, len(options) + 1))
190
+ shortcut_choices = "/".join(opt[0] for opt in options)
191
+ input_prompt = f"Choice ({numeric_choices} or {shortcut_choices}): "
192
+ else:
193
+ # Use plain text rendering
194
+ rendered = _render_options_prompt(prompt, options)
195
+ input_prompt = rendered
196
+
197
+ # Try to use PromptSession if available (better interrupt handling)
198
+ if prompt_session is not None:
199
+ try:
200
+ # PromptSession.prompt() can handle Ctrl+C gracefully
201
+ return await prompt_session.prompt_async(input_prompt)
202
+ except KeyboardInterrupt:
203
+ logger.debug("[permissions] KeyboardInterrupt in prompt_session")
204
+ return "n"
205
+ except EOFError:
206
+ logger.debug("[permissions] EOFError in prompt_session")
207
+ return "n"
208
+
209
+ # Fallback to simple input() via executor
134
210
  loop = asyncio.get_running_loop()
135
211
  responder = prompt_fn or input
136
212
 
137
213
  def _ask() -> str:
138
- rendered = _render_options_prompt(prompt, options)
139
- return responder(rendered)
214
+ try:
215
+ return responder(input_prompt)
216
+ except (KeyboardInterrupt, EOFError):
217
+ return "n"
140
218
 
141
219
  return await loop.run_in_executor(None, _ask)
142
220
 
@@ -148,8 +226,9 @@ def make_permission_checker(
148
226
  return PermissionResult(result=True)
149
227
 
150
228
  try:
151
- if hasattr(tool, "needs_permissions") and not tool.needs_permissions(parsed_input):
152
- return PermissionResult(result=True)
229
+ needs_permission = True
230
+ if hasattr(tool, "needs_permissions"):
231
+ needs_permission = tool.needs_permissions(parsed_input)
153
232
  except (TypeError, AttributeError, ValueError) as exc:
154
233
  # Tool implementation error - log and deny for safety
155
234
  logger.warning(
@@ -166,10 +245,25 @@ def make_permission_checker(
166
245
  )
167
246
 
168
247
  allowed_tools = set(config.allowed_tools or [])
248
+
249
+ global_config = config_manager.get_global_config()
250
+ local_config = config_manager.get_project_local_config(project_path)
251
+
169
252
  allow_rules = {
170
- "Bash": set(config.bash_allow_rules or []) | session_tool_rules.get("Bash", set())
253
+ "Bash": (
254
+ set(config.bash_allow_rules or [])
255
+ | set(global_config.user_allow_rules or [])
256
+ | set(local_config.local_allow_rules or [])
257
+ | session_tool_rules.get("Bash", set())
258
+ )
259
+ }
260
+ deny_rules = {
261
+ "Bash": (
262
+ set(config.bash_deny_rules or [])
263
+ | set(global_config.user_deny_rules or [])
264
+ | set(local_config.local_deny_rules or [])
265
+ )
171
266
  }
172
- deny_rules = {"Bash": set(config.bash_deny_rules or [])}
173
267
  allowed_working_dirs = {
174
268
  str(project_path.resolve()),
175
269
  *[str(Path(p).resolve()) for p in config.working_directories or []],
@@ -219,6 +313,22 @@ def make_permission_checker(
219
313
  rule_suggestions=[ToolRule(tool_name=tool.name, rule_content=tool.name)],
220
314
  )
221
315
 
316
+ # If tool doesn't normally require permission (e.g., read-only Bash),
317
+ # enforce deny rules but otherwise skip prompting.
318
+ if not needs_permission:
319
+ if decision.behavior == "deny":
320
+ return PermissionResult(
321
+ result=False,
322
+ message=decision.message or f"Permission denied for tool '{tool.name}'.",
323
+ decision=decision,
324
+ )
325
+ return PermissionResult(
326
+ result=True,
327
+ message=decision.message,
328
+ updated_input=decision.updated_input,
329
+ decision=decision,
330
+ )
331
+
222
332
  if decision.behavior == "allow":
223
333
  return PermissionResult(
224
334
  result=True,
@@ -235,6 +345,47 @@ def make_permission_checker(
235
345
  )
236
346
 
237
347
  # Ask/passthrough flows prompt the user.
348
+ tool_input_dict = (
349
+ parsed_input.model_dump()
350
+ if hasattr(parsed_input, "model_dump")
351
+ else dict(parsed_input)
352
+ if isinstance(parsed_input, dict)
353
+ else {}
354
+ )
355
+ try:
356
+ hook_result = await hook_manager.run_permission_request_async(
357
+ tool.name, tool_input_dict
358
+ )
359
+ if hook_result.outputs:
360
+ updated_input = hook_result.updated_input or decision.updated_input
361
+ if hook_result.should_allow:
362
+ return PermissionResult(
363
+ result=True,
364
+ message=decision.message,
365
+ updated_input=updated_input,
366
+ decision=decision,
367
+ )
368
+ if hook_result.should_block or not hook_result.should_continue:
369
+ reason = (
370
+ hook_result.block_reason
371
+ or hook_result.stop_reason
372
+ or decision.message
373
+ or f"Permission denied for tool '{tool.name}'."
374
+ )
375
+ return PermissionResult(
376
+ result=False,
377
+ message=reason,
378
+ updated_input=updated_input,
379
+ decision=decision,
380
+ )
381
+ except (RuntimeError, ValueError, TypeError, OSError) as exc:
382
+ logger.warning(
383
+ "[permissions] PermissionRequest hook failed: %s: %s",
384
+ type(exc).__name__,
385
+ exc,
386
+ extra={"tool": getattr(tool, "name", None)},
387
+ )
388
+
238
389
  input_preview = _format_input_preview(parsed_input, tool_name=tool.name)
239
390
  prompt_lines = [
240
391
  f"{tool.name}",
@@ -80,10 +80,18 @@ def _effort_from_tokens(max_thinking_tokens: int) -> Optional[str]:
80
80
 
81
81
 
82
82
  def _detect_openai_vendor(model_profile: ModelProfile) -> str:
83
- """Best-effort vendor hint for OpenAI-compatible endpoints."""
83
+ """Best-effort vendor hint for OpenAI-compatible endpoints.
84
+
85
+ If thinking_mode is explicitly set to "none" or "disabled", returns "none"
86
+ to skip all thinking protocol handling.
87
+ """
84
88
  override = getattr(model_profile, "thinking_mode", None)
85
89
  if isinstance(override, str) and override.strip():
86
- return override.strip().lower()
90
+ mode = override.strip().lower()
91
+ # Allow explicit disable of thinking protocol
92
+ if mode in ("disabled", "off"):
93
+ return "none"
94
+ return mode
87
95
  base = (model_profile.api_base or "").lower()
88
96
  name = (model_profile.model or "").lower()
89
97
  if "openrouter.ai" in base:
@@ -106,21 +114,25 @@ def _build_thinking_kwargs(
106
114
  extra_body: Dict[str, Any] = {}
107
115
  top_level: Dict[str, Any] = {}
108
116
  vendor = _detect_openai_vendor(model_profile)
117
+
118
+ # Skip thinking protocol if explicitly disabled
119
+ if vendor == "none":
120
+ return extra_body, top_level
121
+
109
122
  effort = _effort_from_tokens(max_thinking_tokens)
110
123
 
111
124
  if vendor == "deepseek":
112
125
  if max_thinking_tokens != 0:
113
126
  extra_body["thinking"] = {"type": "enabled"}
114
127
  elif vendor == "qwen":
128
+ # Only send enable_thinking when explicitly enabling thinking mode
129
+ # Some qwen-compatible APIs don't support this parameter
115
130
  if max_thinking_tokens > 0:
116
131
  extra_body["enable_thinking"] = True
117
- elif max_thinking_tokens == 0:
118
- extra_body["enable_thinking"] = False
119
132
  elif vendor == "openrouter":
133
+ # Only send reasoning when explicitly enabling thinking mode
120
134
  if max_thinking_tokens > 0:
121
135
  extra_body["reasoning"] = {"max_tokens": max_thinking_tokens}
122
- elif max_thinking_tokens == 0:
123
- extra_body["reasoning"] = {"effort": "none"}
124
136
  elif vendor == "gemini_openai":
125
137
  google_cfg: Dict[str, Any] = {}
126
138
  if max_thinking_tokens > 0:
@@ -250,6 +262,15 @@ class OpenAIClient(ProviderClient):
250
262
  model_profile, max_thinking_tokens
251
263
  )
252
264
 
265
+ logger.debug(
266
+ "[openai_client] Starting API request",
267
+ extra={
268
+ "model": model_profile.model,
269
+ "api_base": model_profile.api_base,
270
+ "request_timeout": request_timeout,
271
+ },
272
+ )
273
+
253
274
  logger.debug(
254
275
  "[openai_client] Request parameters",
255
276
  extra={
@@ -300,9 +321,10 @@ class OpenAIClient(ProviderClient):
300
321
  if getattr(chunk, "usage", None):
301
322
  streamed_usage.update(openai_usage_tokens(chunk.usage))
302
323
 
303
- if not getattr(chunk, "choices", None):
324
+ choices = getattr(chunk, "choices", None)
325
+ if not choices or len(choices) == 0:
304
326
  continue
305
- delta = getattr(chunk.choices[0], "delta", None)
327
+ delta = getattr(choices[0], "delta", None)
306
328
  if not delta:
307
329
  continue
308
330
 
@@ -419,12 +441,13 @@ class OpenAIClient(ProviderClient):
419
441
  )
420
442
 
421
443
  if (
422
- can_stream_text
444
+ can_stream
423
445
  and not collected_text
424
446
  and not streamed_tool_calls
425
447
  and not streamed_tool_text
448
+ and not stream_reasoning_text
426
449
  ):
427
- logger.debug(
450
+ logger.warning(
428
451
  "[openai_client] Streaming returned no content; retrying without stream",
429
452
  extra={"model": model_profile.model},
430
453
  )
@@ -449,6 +472,30 @@ class OpenAIClient(ProviderClient):
449
472
  if not can_stream and (
450
473
  not openai_response or not getattr(openai_response, "choices", None)
451
474
  ):
475
+ # Check for non-standard error response (e.g., iflow returns HTTP 200 with error JSON)
476
+ error_msg = (
477
+ getattr(openai_response, "msg", None)
478
+ or getattr(openai_response, "message", None)
479
+ or getattr(openai_response, "error", None)
480
+ )
481
+ error_status = getattr(openai_response, "status", None)
482
+ if error_msg or error_status:
483
+ error_text = f"API Error: {error_msg or 'Unknown error'}"
484
+ if error_status:
485
+ error_text = f"API Error ({error_status}): {error_msg or 'Unknown error'}"
486
+ logger.error(
487
+ "[openai_client] Non-standard error response from API",
488
+ extra={
489
+ "model": model_profile.model,
490
+ "error_status": error_status,
491
+ "error_msg": error_msg,
492
+ },
493
+ )
494
+ return ProviderResponse.create_error(
495
+ error_code="api_error",
496
+ error_message=error_text,
497
+ duration_ms=duration_ms,
498
+ )
452
499
  logger.warning(
453
500
  "[openai_client] No choices returned from OpenAI response",
454
501
  extra={"model": model_profile.model},
@@ -486,23 +533,32 @@ class OpenAIClient(ProviderClient):
486
533
  )
487
534
  finish_reason = "stream"
488
535
  else:
489
- choice = openai_response.choices[0]
490
- content_blocks = content_blocks_from_openai_choice(choice, tool_mode)
491
- finish_reason = cast(Optional[str], getattr(choice, "finish_reason", None))
492
- message_obj = getattr(choice, "message", None) or choice
493
- reasoning_content = getattr(message_obj, "reasoning_content", None)
494
- if reasoning_content:
495
- response_metadata["reasoning_content"] = reasoning_content
496
- reasoning_field = getattr(message_obj, "reasoning", None)
497
- if reasoning_field:
498
- response_metadata["reasoning"] = reasoning_field
499
- if "reasoning_content" not in response_metadata and isinstance(
500
- reasoning_field, str
501
- ):
502
- response_metadata["reasoning_content"] = reasoning_field
503
- reasoning_details = getattr(message_obj, "reasoning_details", None)
504
- if reasoning_details:
505
- response_metadata["reasoning_details"] = reasoning_details
536
+ response_choices = getattr(openai_response, "choices", None)
537
+ if not response_choices or len(response_choices) == 0:
538
+ logger.warning(
539
+ "[openai_client] Empty choices in response",
540
+ extra={"model": model_profile.model},
541
+ )
542
+ content_blocks = [{"type": "text", "text": ""}]
543
+ finish_reason = "error"
544
+ else:
545
+ choice = response_choices[0]
546
+ content_blocks = content_blocks_from_openai_choice(choice, tool_mode)
547
+ finish_reason = cast(Optional[str], getattr(choice, "finish_reason", None))
548
+ message_obj = getattr(choice, "message", None) or choice
549
+ reasoning_content = getattr(message_obj, "reasoning_content", None)
550
+ if reasoning_content:
551
+ response_metadata["reasoning_content"] = reasoning_content
552
+ reasoning_field = getattr(message_obj, "reasoning", None)
553
+ if reasoning_field:
554
+ response_metadata["reasoning"] = reasoning_field
555
+ if "reasoning_content" not in response_metadata and isinstance(
556
+ reasoning_field, str
557
+ ):
558
+ response_metadata["reasoning_content"] = reasoning_field
559
+ reasoning_details = getattr(message_obj, "reasoning_details", None)
560
+ if reasoning_details:
561
+ response_metadata["reasoning_details"] = reasoning_details
506
562
 
507
563
  if can_stream:
508
564
  if stream_reasoning_text:
@@ -522,7 +578,7 @@ class OpenAIClient(ProviderClient):
522
578
  },
523
579
  )
524
580
 
525
- logger.info(
581
+ logger.debug(
526
582
  "[openai_client] Response received",
527
583
  extra={
528
584
  "model": model_profile.model,