ripperdoc 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/__main__.py +0 -5
  3. ripperdoc/cli/cli.py +37 -16
  4. ripperdoc/cli/commands/__init__.py +2 -0
  5. ripperdoc/cli/commands/agents_cmd.py +12 -9
  6. ripperdoc/cli/commands/compact_cmd.py +7 -3
  7. ripperdoc/cli/commands/context_cmd.py +33 -13
  8. ripperdoc/cli/commands/doctor_cmd.py +27 -14
  9. ripperdoc/cli/commands/exit_cmd.py +1 -1
  10. ripperdoc/cli/commands/mcp_cmd.py +13 -8
  11. ripperdoc/cli/commands/memory_cmd.py +5 -5
  12. ripperdoc/cli/commands/models_cmd.py +47 -16
  13. ripperdoc/cli/commands/permissions_cmd.py +302 -0
  14. ripperdoc/cli/commands/resume_cmd.py +1 -2
  15. ripperdoc/cli/commands/tasks_cmd.py +24 -13
  16. ripperdoc/cli/ui/rich_ui.py +500 -406
  17. ripperdoc/cli/ui/tool_renderers.py +298 -0
  18. ripperdoc/core/agents.py +17 -9
  19. ripperdoc/core/config.py +130 -6
  20. ripperdoc/core/default_tools.py +7 -2
  21. ripperdoc/core/permissions.py +20 -14
  22. ripperdoc/core/providers/anthropic.py +107 -4
  23. ripperdoc/core/providers/base.py +33 -4
  24. ripperdoc/core/providers/gemini.py +169 -50
  25. ripperdoc/core/providers/openai.py +257 -23
  26. ripperdoc/core/query.py +294 -61
  27. ripperdoc/core/query_utils.py +50 -6
  28. ripperdoc/core/skills.py +295 -0
  29. ripperdoc/core/system_prompt.py +13 -7
  30. ripperdoc/core/tool.py +8 -6
  31. ripperdoc/sdk/client.py +14 -1
  32. ripperdoc/tools/ask_user_question_tool.py +20 -22
  33. ripperdoc/tools/background_shell.py +19 -13
  34. ripperdoc/tools/bash_tool.py +356 -209
  35. ripperdoc/tools/dynamic_mcp_tool.py +428 -0
  36. ripperdoc/tools/enter_plan_mode_tool.py +5 -2
  37. ripperdoc/tools/exit_plan_mode_tool.py +6 -3
  38. ripperdoc/tools/file_edit_tool.py +53 -10
  39. ripperdoc/tools/file_read_tool.py +17 -7
  40. ripperdoc/tools/file_write_tool.py +49 -13
  41. ripperdoc/tools/glob_tool.py +10 -9
  42. ripperdoc/tools/grep_tool.py +182 -51
  43. ripperdoc/tools/ls_tool.py +6 -6
  44. ripperdoc/tools/mcp_tools.py +106 -456
  45. ripperdoc/tools/multi_edit_tool.py +49 -9
  46. ripperdoc/tools/notebook_edit_tool.py +57 -13
  47. ripperdoc/tools/skill_tool.py +205 -0
  48. ripperdoc/tools/task_tool.py +7 -8
  49. ripperdoc/tools/todo_tool.py +12 -12
  50. ripperdoc/tools/tool_search_tool.py +5 -6
  51. ripperdoc/utils/coerce.py +34 -0
  52. ripperdoc/utils/context_length_errors.py +252 -0
  53. ripperdoc/utils/file_watch.py +5 -4
  54. ripperdoc/utils/json_utils.py +4 -4
  55. ripperdoc/utils/log.py +3 -3
  56. ripperdoc/utils/mcp.py +36 -15
  57. ripperdoc/utils/memory.py +9 -6
  58. ripperdoc/utils/message_compaction.py +16 -11
  59. ripperdoc/utils/messages.py +73 -8
  60. ripperdoc/utils/path_ignore.py +677 -0
  61. ripperdoc/utils/permissions/__init__.py +7 -1
  62. ripperdoc/utils/permissions/path_validation_utils.py +5 -3
  63. ripperdoc/utils/permissions/shell_command_validation.py +496 -18
  64. ripperdoc/utils/prompt.py +1 -1
  65. ripperdoc/utils/safe_get_cwd.py +5 -2
  66. ripperdoc/utils/session_history.py +38 -19
  67. ripperdoc/utils/todo.py +6 -2
  68. ripperdoc/utils/token_estimation.py +4 -3
  69. {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/METADATA +12 -1
  70. ripperdoc-0.2.5.dist-info/RECORD +107 -0
  71. ripperdoc-0.2.4.dist-info/RECORD +0 -99
  72. {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/WHEEL +0 -0
  73. {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/entry_points.txt +0 -0
  74. {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/licenses/LICENSE +0 -0
  75. {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/top_level.txt +0 -0
@@ -7,6 +7,7 @@ import time
7
7
  from typing import Any, Dict, List, Optional, cast
8
8
  from uuid import uuid4
9
9
 
10
+ import openai
10
11
  from openai import AsyncOpenAI
11
12
 
12
13
  from ripperdoc.core.config import ModelProfile
@@ -32,6 +33,113 @@ from ripperdoc.utils.session_usage import record_usage
32
33
  logger = get_logger()
33
34
 
34
35
 
36
+ def _classify_openai_error(exc: Exception) -> tuple[str, str]:
37
+ """Classify an OpenAI exception into error code and user-friendly message."""
38
+ exc_type = type(exc).__name__
39
+ exc_msg = str(exc)
40
+
41
+ if isinstance(exc, openai.AuthenticationError):
42
+ return "authentication_error", f"Authentication failed: {exc_msg}"
43
+ if isinstance(exc, openai.PermissionDeniedError):
44
+ # Check for common permission denied reasons
45
+ if "balance" in exc_msg.lower() or "insufficient" in exc_msg.lower():
46
+ return "insufficient_balance", f"Insufficient balance: {exc_msg}"
47
+ return "permission_denied", f"Permission denied: {exc_msg}"
48
+ if isinstance(exc, openai.NotFoundError):
49
+ return "model_not_found", f"Model not found: {exc_msg}"
50
+ if isinstance(exc, openai.BadRequestError):
51
+ # Check for context length errors
52
+ if "context" in exc_msg.lower() or "token" in exc_msg.lower():
53
+ return "context_length_exceeded", f"Context length exceeded: {exc_msg}"
54
+ if "content" in exc_msg.lower() and "policy" in exc_msg.lower():
55
+ return "content_policy_violation", f"Content policy violation: {exc_msg}"
56
+ return "bad_request", f"Invalid request: {exc_msg}"
57
+ if isinstance(exc, openai.RateLimitError):
58
+ return "rate_limit", f"Rate limit exceeded: {exc_msg}"
59
+ if isinstance(exc, openai.APIConnectionError):
60
+ return "connection_error", f"Connection error: {exc_msg}"
61
+ if isinstance(exc, openai.APIStatusError):
62
+ return "api_error", f"API error ({exc.status_code}): {exc_msg}"
63
+ if isinstance(exc, asyncio.TimeoutError):
64
+ return "timeout", f"Request timed out: {exc_msg}"
65
+
66
+ # Generic fallback
67
+ return "unknown_error", f"Unexpected error ({exc_type}): {exc_msg}"
68
+
69
+
70
+ def _effort_from_tokens(max_thinking_tokens: int) -> Optional[str]:
71
+ """Map a thinking token budget to a coarse effort label."""
72
+ if max_thinking_tokens <= 0:
73
+ return None
74
+ if max_thinking_tokens <= 1024:
75
+ return "low"
76
+ if max_thinking_tokens <= 8192:
77
+ return "medium"
78
+ return "high"
79
+
80
+
81
+ def _detect_openai_vendor(model_profile: ModelProfile) -> str:
82
+ """Best-effort vendor hint for OpenAI-compatible endpoints."""
83
+ override = getattr(model_profile, "thinking_mode", None)
84
+ if isinstance(override, str) and override.strip():
85
+ return override.strip().lower()
86
+ base = (model_profile.api_base or "").lower()
87
+ name = (model_profile.model or "").lower()
88
+ if "openrouter.ai" in base:
89
+ return "openrouter"
90
+ if "deepseek" in base or name.startswith("deepseek"):
91
+ return "deepseek"
92
+ if "dashscope" in base or "qwen" in name:
93
+ return "qwen"
94
+ if "generativelanguage.googleapis.com" in base or name.startswith("gemini"):
95
+ return "gemini_openai"
96
+ if "gpt-5" in name:
97
+ return "openai_reasoning"
98
+ return "openai"
99
+
100
+
101
+ def _build_thinking_kwargs(
102
+ model_profile: ModelProfile, max_thinking_tokens: int
103
+ ) -> tuple[Dict[str, Any], Dict[str, Any]]:
104
+ """Return (extra_body, top_level_kwargs) for thinking-enabled calls."""
105
+ extra_body: Dict[str, Any] = {}
106
+ top_level: Dict[str, Any] = {}
107
+ vendor = _detect_openai_vendor(model_profile)
108
+ effort = _effort_from_tokens(max_thinking_tokens)
109
+
110
+ if vendor == "deepseek":
111
+ if max_thinking_tokens != 0:
112
+ extra_body["thinking"] = {"type": "enabled"}
113
+ elif vendor == "qwen":
114
+ if max_thinking_tokens > 0:
115
+ extra_body["enable_thinking"] = True
116
+ elif max_thinking_tokens == 0:
117
+ extra_body["enable_thinking"] = False
118
+ elif vendor == "openrouter":
119
+ if max_thinking_tokens > 0:
120
+ extra_body["reasoning"] = {"max_tokens": max_thinking_tokens}
121
+ elif max_thinking_tokens == 0:
122
+ extra_body["reasoning"] = {"effort": "none"}
123
+ elif vendor == "gemini_openai":
124
+ google_cfg: Dict[str, Any] = {}
125
+ if max_thinking_tokens > 0:
126
+ google_cfg["thinking_budget"] = max_thinking_tokens
127
+ google_cfg["include_thoughts"] = True
128
+ if google_cfg:
129
+ extra_body["google"] = {"thinking_config": google_cfg}
130
+ if effort:
131
+ top_level["reasoning_effort"] = effort
132
+ extra_body.setdefault("reasoning", {"effort": effort})
133
+ elif vendor == "openai_reasoning":
134
+ if effort:
135
+ extra_body["reasoning"] = {"effort": effort}
136
+ else:
137
+ if effort:
138
+ extra_body["reasoning"] = {"effort": effort}
139
+
140
+ return extra_body, top_level
141
+
142
+
35
143
  class OpenAIClient(ProviderClient):
36
144
  """OpenAI-compatible client with streaming and non-streaming support."""
37
145
 
@@ -47,8 +155,60 @@ class OpenAIClient(ProviderClient):
47
155
  progress_callback: Optional[ProgressCallback],
48
156
  request_timeout: Optional[float],
49
157
  max_retries: int,
158
+ max_thinking_tokens: int,
50
159
  ) -> ProviderResponse:
51
160
  start_time = time.time()
161
+
162
+ try:
163
+ return await self._call_impl(
164
+ model_profile=model_profile,
165
+ system_prompt=system_prompt,
166
+ normalized_messages=normalized_messages,
167
+ tools=tools,
168
+ tool_mode=tool_mode,
169
+ stream=stream,
170
+ progress_callback=progress_callback,
171
+ request_timeout=request_timeout,
172
+ max_retries=max_retries,
173
+ max_thinking_tokens=max_thinking_tokens,
174
+ start_time=start_time,
175
+ )
176
+ except asyncio.CancelledError:
177
+ raise # Don't suppress task cancellation
178
+ except Exception as exc:
179
+ duration_ms = (time.time() - start_time) * 1000
180
+ error_code, error_message = _classify_openai_error(exc)
181
+ logger.error(
182
+ "[openai_client] API call failed",
183
+ extra={
184
+ "model": model_profile.model,
185
+ "error_code": error_code,
186
+ "error_message": error_message,
187
+ "duration_ms": round(duration_ms, 2),
188
+ },
189
+ )
190
+ return ProviderResponse.create_error(
191
+ error_code=error_code,
192
+ error_message=error_message,
193
+ duration_ms=duration_ms,
194
+ )
195
+
196
+ async def _call_impl(
197
+ self,
198
+ *,
199
+ model_profile: ModelProfile,
200
+ system_prompt: str,
201
+ normalized_messages: List[Dict[str, Any]],
202
+ tools: List[Tool[Any, Any]],
203
+ tool_mode: str,
204
+ stream: bool,
205
+ progress_callback: Optional[ProgressCallback],
206
+ request_timeout: Optional[float],
207
+ max_retries: int,
208
+ max_thinking_tokens: int,
209
+ start_time: float,
210
+ ) -> ProviderResponse:
211
+ """Internal implementation of call, may raise exceptions."""
52
212
  openai_tools = await build_openai_tool_schemas(tools)
53
213
  openai_messages: List[Dict[str, object]] = [
54
214
  {"role": "system", "content": system_prompt}
@@ -57,10 +217,16 @@ class OpenAIClient(ProviderClient):
57
217
  streamed_tool_calls: Dict[int, Dict[str, Optional[str]]] = {}
58
218
  streamed_tool_text: List[str] = []
59
219
  streamed_usage: Dict[str, int] = {}
220
+ stream_reasoning_text: List[str] = []
221
+ stream_reasoning_details: List[Any] = []
222
+ response_metadata: Dict[str, Any] = {}
60
223
 
61
224
  can_stream_text = stream and tool_mode == "text" and not openai_tools
62
225
  can_stream_tools = stream and tool_mode != "text" and bool(openai_tools)
63
226
  can_stream = can_stream_text or can_stream_tools
227
+ thinking_extra_body, thinking_top_level = _build_thinking_kwargs(
228
+ model_profile, max_thinking_tokens
229
+ )
64
230
 
65
231
  async with AsyncOpenAI(
66
232
  api_key=model_profile.api_key, base_url=model_profile.api_base
@@ -68,14 +234,20 @@ class OpenAIClient(ProviderClient):
68
234
 
69
235
  async def _stream_request() -> Dict[str, Dict[str, int]]:
70
236
  announced_tool_indexes: set[int] = set()
237
+ stream_kwargs: Dict[str, Any] = {
238
+ "model": model_profile.model,
239
+ "messages": cast(Any, openai_messages),
240
+ "tools": openai_tools if openai_tools else None,
241
+ "temperature": model_profile.temperature,
242
+ "max_tokens": model_profile.max_tokens,
243
+ "stream": True,
244
+ "stream_options": {"include_usage": True},
245
+ **thinking_top_level,
246
+ }
247
+ if thinking_extra_body:
248
+ stream_kwargs["extra_body"] = thinking_extra_body
71
249
  stream_coro = client.chat.completions.create( # type: ignore[call-overload]
72
- model=model_profile.model,
73
- messages=cast(Any, openai_messages),
74
- tools=openai_tools if can_stream_tools else None,
75
- temperature=model_profile.temperature,
76
- max_tokens=model_profile.max_tokens,
77
- stream=True,
78
- stream_options={"include_usage": True},
250
+ **stream_kwargs
79
251
  )
80
252
  stream_resp = (
81
253
  await asyncio.wait_for(stream_coro, timeout=request_timeout)
@@ -105,14 +277,34 @@ class OpenAIClient(ProviderClient):
105
277
  text_delta += text_val
106
278
  elif isinstance(delta_content, str):
107
279
  text_delta += delta_content
280
+ delta_reasoning = getattr(delta, "reasoning_content", None) or getattr(
281
+ delta, "reasoning", None
282
+ )
283
+ if isinstance(delta_reasoning, str):
284
+ stream_reasoning_text.append(delta_reasoning)
285
+ elif isinstance(delta_reasoning, list):
286
+ for item in delta_reasoning:
287
+ if isinstance(item, str):
288
+ stream_reasoning_text.append(item)
289
+ delta_reasoning_details = getattr(delta, "reasoning_details", None)
290
+ if delta_reasoning_details:
291
+ if isinstance(delta_reasoning_details, list):
292
+ stream_reasoning_details.extend(delta_reasoning_details)
293
+ else:
294
+ stream_reasoning_details.append(delta_reasoning_details)
108
295
  if text_delta:
109
- target_collector = streamed_tool_text if can_stream_tools else collected_text
296
+ target_collector = (
297
+ streamed_tool_text if can_stream_tools else collected_text
298
+ )
110
299
  target_collector.append(text_delta)
111
300
  if progress_callback:
112
301
  try:
113
302
  await progress_callback(text_delta)
114
- except Exception:
115
- logger.exception("[openai_client] Stream callback failed")
303
+ except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
304
+ logger.warning(
305
+ "[openai_client] Stream callback failed: %s: %s",
306
+ type(cb_exc).__name__, cb_exc,
307
+ )
116
308
 
117
309
  # Tool call deltas for native tool mode
118
310
  if not can_stream_tools:
@@ -120,7 +312,9 @@ class OpenAIClient(ProviderClient):
120
312
 
121
313
  for tool_delta in getattr(delta, "tool_calls", []) or []:
122
314
  idx = getattr(tool_delta, "index", 0) or 0
123
- state = streamed_tool_calls.get(idx, {"id": None, "name": None, "arguments": ""})
315
+ state = streamed_tool_calls.get(
316
+ idx, {"id": None, "name": None, "arguments": ""}
317
+ )
124
318
 
125
319
  if getattr(tool_delta, "id", None):
126
320
  state["id"] = tool_delta.id
@@ -136,28 +330,40 @@ class OpenAIClient(ProviderClient):
136
330
  if progress_callback:
137
331
  try:
138
332
  await progress_callback(args_delta)
139
- except Exception:
140
- logger.exception("[openai_client] Stream callback failed")
333
+ except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
334
+ logger.warning(
335
+ "[openai_client] Stream callback failed: %s: %s",
336
+ type(cb_exc).__name__, cb_exc,
337
+ )
141
338
 
142
339
  if idx not in announced_tool_indexes and state.get("name"):
143
340
  announced_tool_indexes.add(idx)
144
341
  if progress_callback:
145
342
  try:
146
343
  await progress_callback(f"[tool:{state['name']}]")
147
- except Exception:
148
- logger.exception("[openai_client] Stream callback failed")
344
+ except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
345
+ logger.warning(
346
+ "[openai_client] Stream callback failed: %s: %s",
347
+ type(cb_exc).__name__, cb_exc,
348
+ )
149
349
 
150
350
  streamed_tool_calls[idx] = state
151
351
 
152
352
  return {"usage": streamed_usage}
153
353
 
154
354
  async def _non_stream_request() -> Any:
355
+ kwargs: Dict[str, Any] = {
356
+ "model": model_profile.model,
357
+ "messages": cast(Any, openai_messages),
358
+ "tools": openai_tools if openai_tools else None, # type: ignore[arg-type]
359
+ "temperature": model_profile.temperature,
360
+ "max_tokens": model_profile.max_tokens,
361
+ **thinking_top_level,
362
+ }
363
+ if thinking_extra_body:
364
+ kwargs["extra_body"] = thinking_extra_body
155
365
  return await client.chat.completions.create( # type: ignore[call-overload]
156
- model=model_profile.model,
157
- messages=cast(Any, openai_messages),
158
- tools=openai_tools if openai_tools else None, # type: ignore[arg-type]
159
- temperature=model_profile.temperature,
160
- max_tokens=model_profile.max_tokens,
366
+ **kwargs
161
367
  )
162
368
 
163
369
  timeout_for_call = None if can_stream else request_timeout
@@ -185,15 +391,19 @@ class OpenAIClient(ProviderClient):
185
391
  )
186
392
 
187
393
  duration_ms = (time.time() - start_time) * 1000
188
- usage_tokens = streamed_usage if can_stream else openai_usage_tokens(
189
- getattr(openai_response, "usage", None)
394
+ usage_tokens = (
395
+ streamed_usage
396
+ if can_stream
397
+ else openai_usage_tokens(getattr(openai_response, "usage", None))
190
398
  )
191
399
  cost_usd = estimate_cost_usd(model_profile, usage_tokens)
192
400
  record_usage(
193
401
  model_profile.model, duration_ms=duration_ms, cost_usd=cost_usd, **usage_tokens
194
402
  )
195
403
 
196
- if not can_stream and (not openai_response or not getattr(openai_response, "choices", None)):
404
+ if not can_stream and (
405
+ not openai_response or not getattr(openai_response, "choices", None)
406
+ ):
197
407
  logger.warning(
198
408
  "[openai_client] No choices returned from OpenAI response",
199
409
  extra={"model": model_profile.model},
@@ -204,6 +414,7 @@ class OpenAIClient(ProviderClient):
204
414
  usage_tokens=usage_tokens,
205
415
  cost_usd=cost_usd,
206
416
  duration_ms=duration_ms,
417
+ metadata=response_metadata,
207
418
  )
208
419
 
209
420
  content_blocks: List[Dict[str, Any]] = []
@@ -233,6 +444,28 @@ class OpenAIClient(ProviderClient):
233
444
  choice = openai_response.choices[0]
234
445
  content_blocks = content_blocks_from_openai_choice(choice, tool_mode)
235
446
  finish_reason = cast(Optional[str], getattr(choice, "finish_reason", None))
447
+ message_obj = getattr(choice, "message", None) or choice
448
+ reasoning_content = getattr(message_obj, "reasoning_content", None)
449
+ if reasoning_content:
450
+ response_metadata["reasoning_content"] = reasoning_content
451
+ reasoning_field = getattr(message_obj, "reasoning", None)
452
+ if reasoning_field:
453
+ response_metadata["reasoning"] = reasoning_field
454
+ if "reasoning_content" not in response_metadata and isinstance(
455
+ reasoning_field, str
456
+ ):
457
+ response_metadata["reasoning_content"] = reasoning_field
458
+ reasoning_details = getattr(message_obj, "reasoning_details", None)
459
+ if reasoning_details:
460
+ response_metadata["reasoning_details"] = reasoning_details
461
+
462
+ if can_stream:
463
+ if stream_reasoning_text:
464
+ joined = "".join(stream_reasoning_text)
465
+ response_metadata["reasoning_content"] = joined
466
+ response_metadata.setdefault("reasoning", joined)
467
+ if stream_reasoning_details:
468
+ response_metadata["reasoning_details"] = stream_reasoning_details
236
469
 
237
470
  logger.info(
238
471
  "[openai_client] Response received",
@@ -250,4 +483,5 @@ class OpenAIClient(ProviderClient):
250
483
  usage_tokens=usage_tokens,
251
484
  cost_usd=cost_usd,
252
485
  duration_ms=duration_ms,
486
+ metadata=response_metadata,
253
487
  )