ripperdoc 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/__main__.py +0 -5
- ripperdoc/cli/cli.py +37 -16
- ripperdoc/cli/commands/__init__.py +2 -0
- ripperdoc/cli/commands/agents_cmd.py +12 -9
- ripperdoc/cli/commands/compact_cmd.py +7 -3
- ripperdoc/cli/commands/context_cmd.py +35 -15
- ripperdoc/cli/commands/doctor_cmd.py +27 -14
- ripperdoc/cli/commands/exit_cmd.py +1 -1
- ripperdoc/cli/commands/mcp_cmd.py +13 -8
- ripperdoc/cli/commands/memory_cmd.py +5 -5
- ripperdoc/cli/commands/models_cmd.py +47 -16
- ripperdoc/cli/commands/permissions_cmd.py +302 -0
- ripperdoc/cli/commands/resume_cmd.py +1 -2
- ripperdoc/cli/commands/tasks_cmd.py +24 -13
- ripperdoc/cli/ui/rich_ui.py +523 -396
- ripperdoc/cli/ui/tool_renderers.py +298 -0
- ripperdoc/core/agents.py +172 -4
- ripperdoc/core/config.py +130 -6
- ripperdoc/core/default_tools.py +13 -2
- ripperdoc/core/permissions.py +20 -14
- ripperdoc/core/providers/__init__.py +31 -15
- ripperdoc/core/providers/anthropic.py +122 -8
- ripperdoc/core/providers/base.py +93 -15
- ripperdoc/core/providers/gemini.py +539 -96
- ripperdoc/core/providers/openai.py +371 -26
- ripperdoc/core/query.py +301 -62
- ripperdoc/core/query_utils.py +51 -7
- ripperdoc/core/skills.py +295 -0
- ripperdoc/core/system_prompt.py +79 -67
- ripperdoc/core/tool.py +15 -6
- ripperdoc/sdk/client.py +14 -1
- ripperdoc/tools/ask_user_question_tool.py +431 -0
- ripperdoc/tools/background_shell.py +82 -26
- ripperdoc/tools/bash_tool.py +356 -209
- ripperdoc/tools/dynamic_mcp_tool.py +428 -0
- ripperdoc/tools/enter_plan_mode_tool.py +226 -0
- ripperdoc/tools/exit_plan_mode_tool.py +153 -0
- ripperdoc/tools/file_edit_tool.py +53 -10
- ripperdoc/tools/file_read_tool.py +17 -7
- ripperdoc/tools/file_write_tool.py +49 -13
- ripperdoc/tools/glob_tool.py +10 -9
- ripperdoc/tools/grep_tool.py +182 -51
- ripperdoc/tools/ls_tool.py +6 -6
- ripperdoc/tools/mcp_tools.py +172 -413
- ripperdoc/tools/multi_edit_tool.py +49 -9
- ripperdoc/tools/notebook_edit_tool.py +57 -13
- ripperdoc/tools/skill_tool.py +205 -0
- ripperdoc/tools/task_tool.py +91 -9
- ripperdoc/tools/todo_tool.py +12 -12
- ripperdoc/tools/tool_search_tool.py +5 -6
- ripperdoc/utils/coerce.py +34 -0
- ripperdoc/utils/context_length_errors.py +252 -0
- ripperdoc/utils/file_watch.py +5 -4
- ripperdoc/utils/json_utils.py +4 -4
- ripperdoc/utils/log.py +3 -3
- ripperdoc/utils/mcp.py +82 -22
- ripperdoc/utils/memory.py +9 -6
- ripperdoc/utils/message_compaction.py +19 -16
- ripperdoc/utils/messages.py +73 -8
- ripperdoc/utils/path_ignore.py +677 -0
- ripperdoc/utils/permissions/__init__.py +7 -1
- ripperdoc/utils/permissions/path_validation_utils.py +5 -3
- ripperdoc/utils/permissions/shell_command_validation.py +496 -18
- ripperdoc/utils/prompt.py +1 -1
- ripperdoc/utils/safe_get_cwd.py +5 -2
- ripperdoc/utils/session_history.py +38 -19
- ripperdoc/utils/todo.py +6 -2
- ripperdoc/utils/token_estimation.py +34 -0
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/METADATA +14 -1
- ripperdoc-0.2.5.dist-info/RECORD +107 -0
- ripperdoc-0.2.3.dist-info/RECORD +0 -95
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/WHEEL +0 -0
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/top_level.txt +0 -0
ripperdoc/utils/messages.py
CHANGED
|
@@ -6,7 +6,7 @@ for communication with AI models.
|
|
|
6
6
|
|
|
7
7
|
import json
|
|
8
8
|
from typing import Any, Dict, List, Optional, Union
|
|
9
|
-
from pydantic import BaseModel, ConfigDict
|
|
9
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
10
10
|
from uuid import uuid4
|
|
11
11
|
from enum import Enum
|
|
12
12
|
from ripperdoc.utils.log import get_logger
|
|
@@ -27,6 +27,9 @@ class MessageContent(BaseModel):
|
|
|
27
27
|
|
|
28
28
|
type: str
|
|
29
29
|
text: Optional[str] = None
|
|
30
|
+
thinking: Optional[str] = None
|
|
31
|
+
signature: Optional[str] = None
|
|
32
|
+
data: Optional[str] = None
|
|
30
33
|
# Some providers return tool_use IDs as "id", others as "tool_use_id"
|
|
31
34
|
id: Optional[str] = None
|
|
32
35
|
tool_use_id: Optional[str] = None
|
|
@@ -38,6 +41,18 @@ class MessageContent(BaseModel):
|
|
|
38
41
|
def _content_block_to_api(block: MessageContent) -> Dict[str, Any]:
|
|
39
42
|
"""Convert a MessageContent block to API-ready dict for tool protocols."""
|
|
40
43
|
block_type = getattr(block, "type", None)
|
|
44
|
+
if block_type == "thinking":
|
|
45
|
+
return {
|
|
46
|
+
"type": "thinking",
|
|
47
|
+
"thinking": getattr(block, "thinking", None) or getattr(block, "text", None) or "",
|
|
48
|
+
"signature": getattr(block, "signature", None),
|
|
49
|
+
}
|
|
50
|
+
if block_type == "redacted_thinking":
|
|
51
|
+
return {
|
|
52
|
+
"type": "redacted_thinking",
|
|
53
|
+
"data": getattr(block, "data", None) or getattr(block, "text", None) or "",
|
|
54
|
+
"signature": getattr(block, "signature", None),
|
|
55
|
+
}
|
|
41
56
|
if block_type == "tool_use":
|
|
42
57
|
return {
|
|
43
58
|
"type": "tool_use",
|
|
@@ -75,8 +90,11 @@ def _content_block_to_openai(block: MessageContent) -> Dict[str, Any]:
|
|
|
75
90
|
args = getattr(block, "input", None) or {}
|
|
76
91
|
try:
|
|
77
92
|
args_str = json.dumps(args)
|
|
78
|
-
except
|
|
79
|
-
logger.
|
|
93
|
+
except (TypeError, ValueError) as exc:
|
|
94
|
+
logger.warning(
|
|
95
|
+
"[_content_block_to_openai] Failed to serialize tool arguments: %s: %s",
|
|
96
|
+
type(exc).__name__, exc,
|
|
97
|
+
)
|
|
80
98
|
args_str = "{}"
|
|
81
99
|
tool_call_id = (
|
|
82
100
|
getattr(block, "id", None) or getattr(block, "tool_use_id", "") or str(uuid4())
|
|
@@ -118,6 +136,8 @@ class Message(BaseModel):
|
|
|
118
136
|
|
|
119
137
|
role: MessageRole
|
|
120
138
|
content: Union[str, List[MessageContent]]
|
|
139
|
+
reasoning: Optional[Any] = None
|
|
140
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
121
141
|
uuid: str = ""
|
|
122
142
|
|
|
123
143
|
def __init__(self, **data: object) -> None:
|
|
@@ -187,9 +207,12 @@ def create_user_message(
|
|
|
187
207
|
try:
|
|
188
208
|
if hasattr(tool_use_result, "model_dump"):
|
|
189
209
|
tool_use_result = tool_use_result.model_dump()
|
|
190
|
-
except
|
|
210
|
+
except (AttributeError, TypeError, ValueError) as exc:
|
|
191
211
|
# Fallback: keep as-is if conversion fails
|
|
192
|
-
logger.
|
|
212
|
+
logger.warning(
|
|
213
|
+
"[create_user_message] Failed to normalize tool_use_result: %s: %s",
|
|
214
|
+
type(exc).__name__, exc,
|
|
215
|
+
)
|
|
193
216
|
|
|
194
217
|
message = Message(role=MessageRole.USER, content=message_content)
|
|
195
218
|
|
|
@@ -208,7 +231,11 @@ def create_user_message(
|
|
|
208
231
|
|
|
209
232
|
|
|
210
233
|
def create_assistant_message(
|
|
211
|
-
content: Union[str, List[Dict[str, Any]]],
|
|
234
|
+
content: Union[str, List[Dict[str, Any]]],
|
|
235
|
+
cost_usd: float = 0.0,
|
|
236
|
+
duration_ms: float = 0.0,
|
|
237
|
+
reasoning: Optional[Any] = None,
|
|
238
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
212
239
|
) -> AssistantMessage:
|
|
213
240
|
"""Create an assistant message."""
|
|
214
241
|
if isinstance(content, str):
|
|
@@ -216,7 +243,12 @@ def create_assistant_message(
|
|
|
216
243
|
else:
|
|
217
244
|
message_content = [MessageContent(**item) for item in content]
|
|
218
245
|
|
|
219
|
-
message = Message(
|
|
246
|
+
message = Message(
|
|
247
|
+
role=MessageRole.ASSISTANT,
|
|
248
|
+
content=message_content,
|
|
249
|
+
reasoning=reasoning,
|
|
250
|
+
metadata=metadata or {},
|
|
251
|
+
)
|
|
220
252
|
|
|
221
253
|
return AssistantMessage(message=message, cost_usd=cost_usd, duration_ms=duration_ms)
|
|
222
254
|
|
|
@@ -264,6 +296,28 @@ def normalize_messages_for_api(
|
|
|
264
296
|
return msg.get("content")
|
|
265
297
|
return None
|
|
266
298
|
|
|
299
|
+
def _msg_metadata(msg: Any) -> Dict[str, Any]:
|
|
300
|
+
message_obj = getattr(msg, "message", None)
|
|
301
|
+
if message_obj is not None and hasattr(message_obj, "metadata"):
|
|
302
|
+
try:
|
|
303
|
+
meta = getattr(message_obj, "metadata", {}) or {}
|
|
304
|
+
meta_dict = dict(meta) if isinstance(meta, dict) else {}
|
|
305
|
+
except (TypeError, ValueError):
|
|
306
|
+
meta_dict = {}
|
|
307
|
+
reasoning_val = getattr(message_obj, "reasoning", None)
|
|
308
|
+
if reasoning_val is not None and "reasoning" not in meta_dict:
|
|
309
|
+
meta_dict["reasoning"] = reasoning_val
|
|
310
|
+
return meta_dict
|
|
311
|
+
if isinstance(msg, dict):
|
|
312
|
+
message_payload = msg.get("message")
|
|
313
|
+
if isinstance(message_payload, dict):
|
|
314
|
+
meta = message_payload.get("metadata") or {}
|
|
315
|
+
meta_dict = dict(meta) if isinstance(meta, dict) else {}
|
|
316
|
+
if "reasoning" not in meta_dict and "reasoning" in message_payload:
|
|
317
|
+
meta_dict["reasoning"] = message_payload.get("reasoning")
|
|
318
|
+
return meta_dict
|
|
319
|
+
return {}
|
|
320
|
+
|
|
267
321
|
def _block_type(block: Any) -> Optional[str]:
|
|
268
322
|
if hasattr(block, "type"):
|
|
269
323
|
return getattr(block, "type", None)
|
|
@@ -299,7 +353,7 @@ def normalize_messages_for_api(
|
|
|
299
353
|
if input_data not in (None, {}):
|
|
300
354
|
try:
|
|
301
355
|
input_preview = json.dumps(input_data)
|
|
302
|
-
except
|
|
356
|
+
except (TypeError, ValueError):
|
|
303
357
|
input_preview = str(input_data)
|
|
304
358
|
tool_id = _block_attr(blk, "tool_use_id") or _block_attr(blk, "id")
|
|
305
359
|
desc = "Tool call"
|
|
@@ -352,6 +406,7 @@ def normalize_messages_for_api(
|
|
|
352
406
|
|
|
353
407
|
if msg_type == "user":
|
|
354
408
|
user_content = _msg_content(msg)
|
|
409
|
+
meta = _msg_metadata(msg)
|
|
355
410
|
if isinstance(user_content, list):
|
|
356
411
|
if protocol == "openai":
|
|
357
412
|
# Map each block to an OpenAI-style message
|
|
@@ -362,6 +417,11 @@ def normalize_messages_for_api(
|
|
|
362
417
|
mapped = _content_block_to_openai(block)
|
|
363
418
|
if mapped:
|
|
364
419
|
openai_msgs.append(mapped)
|
|
420
|
+
if meta and openai_msgs:
|
|
421
|
+
for candidate in openai_msgs:
|
|
422
|
+
for key in ("reasoning_content", "reasoning_details", "reasoning"):
|
|
423
|
+
if key in meta and meta[key] is not None:
|
|
424
|
+
candidate[key] = meta[key]
|
|
365
425
|
normalized.extend(openai_msgs)
|
|
366
426
|
continue
|
|
367
427
|
api_blocks = []
|
|
@@ -374,6 +434,7 @@ def normalize_messages_for_api(
|
|
|
374
434
|
normalized.append({"role": "user", "content": user_content}) # type: ignore
|
|
375
435
|
elif msg_type == "assistant":
|
|
376
436
|
asst_content = _msg_content(msg)
|
|
437
|
+
meta = _msg_metadata(msg)
|
|
377
438
|
if isinstance(asst_content, list):
|
|
378
439
|
if protocol == "openai":
|
|
379
440
|
assistant_openai_msgs: List[Dict[str, Any]] = []
|
|
@@ -417,6 +478,10 @@ def normalize_messages_for_api(
|
|
|
417
478
|
"tool_calls": tool_calls,
|
|
418
479
|
}
|
|
419
480
|
)
|
|
481
|
+
if meta and assistant_openai_msgs:
|
|
482
|
+
for key in ("reasoning_content", "reasoning_details", "reasoning"):
|
|
483
|
+
if key in meta and meta[key] is not None:
|
|
484
|
+
assistant_openai_msgs[-1][key] = meta[key]
|
|
420
485
|
normalized.extend(assistant_openai_msgs)
|
|
421
486
|
continue
|
|
422
487
|
api_blocks = []
|