ripperdoc 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/cli/cli.py +33 -115
- ripperdoc/cli/commands/__init__.py +70 -6
- ripperdoc/cli/commands/agents_cmd.py +6 -3
- ripperdoc/cli/commands/clear_cmd.py +1 -4
- ripperdoc/cli/commands/config_cmd.py +1 -1
- ripperdoc/cli/commands/context_cmd.py +3 -2
- ripperdoc/cli/commands/doctor_cmd.py +18 -4
- ripperdoc/cli/commands/help_cmd.py +11 -1
- ripperdoc/cli/commands/hooks_cmd.py +610 -0
- ripperdoc/cli/commands/models_cmd.py +26 -9
- ripperdoc/cli/commands/permissions_cmd.py +57 -37
- ripperdoc/cli/commands/resume_cmd.py +6 -4
- ripperdoc/cli/commands/status_cmd.py +4 -4
- ripperdoc/cli/commands/tasks_cmd.py +8 -4
- ripperdoc/cli/ui/file_mention_completer.py +64 -8
- ripperdoc/cli/ui/interrupt_handler.py +3 -4
- ripperdoc/cli/ui/message_display.py +5 -3
- ripperdoc/cli/ui/panels.py +13 -10
- ripperdoc/cli/ui/provider_options.py +247 -0
- ripperdoc/cli/ui/rich_ui.py +196 -77
- ripperdoc/cli/ui/spinner.py +25 -1
- ripperdoc/cli/ui/tool_renderers.py +8 -2
- ripperdoc/cli/ui/wizard.py +215 -0
- ripperdoc/core/agents.py +9 -3
- ripperdoc/core/config.py +49 -12
- ripperdoc/core/custom_commands.py +412 -0
- ripperdoc/core/default_tools.py +11 -2
- ripperdoc/core/hooks/__init__.py +99 -0
- ripperdoc/core/hooks/config.py +301 -0
- ripperdoc/core/hooks/events.py +535 -0
- ripperdoc/core/hooks/executor.py +496 -0
- ripperdoc/core/hooks/integration.py +344 -0
- ripperdoc/core/hooks/manager.py +745 -0
- ripperdoc/core/permissions.py +40 -8
- ripperdoc/core/providers/anthropic.py +548 -68
- ripperdoc/core/providers/gemini.py +70 -5
- ripperdoc/core/providers/openai.py +60 -5
- ripperdoc/core/query.py +140 -39
- ripperdoc/core/query_utils.py +2 -0
- ripperdoc/core/skills.py +9 -3
- ripperdoc/core/system_prompt.py +4 -2
- ripperdoc/core/tool.py +9 -5
- ripperdoc/sdk/client.py +2 -2
- ripperdoc/tools/ask_user_question_tool.py +5 -3
- ripperdoc/tools/background_shell.py +2 -1
- ripperdoc/tools/bash_output_tool.py +1 -1
- ripperdoc/tools/bash_tool.py +30 -20
- ripperdoc/tools/dynamic_mcp_tool.py +29 -8
- ripperdoc/tools/enter_plan_mode_tool.py +1 -1
- ripperdoc/tools/exit_plan_mode_tool.py +1 -1
- ripperdoc/tools/file_edit_tool.py +8 -4
- ripperdoc/tools/file_read_tool.py +9 -5
- ripperdoc/tools/file_write_tool.py +9 -5
- ripperdoc/tools/glob_tool.py +3 -2
- ripperdoc/tools/grep_tool.py +3 -2
- ripperdoc/tools/kill_bash_tool.py +1 -1
- ripperdoc/tools/ls_tool.py +1 -1
- ripperdoc/tools/mcp_tools.py +13 -10
- ripperdoc/tools/multi_edit_tool.py +8 -7
- ripperdoc/tools/notebook_edit_tool.py +7 -4
- ripperdoc/tools/skill_tool.py +1 -1
- ripperdoc/tools/task_tool.py +5 -4
- ripperdoc/tools/todo_tool.py +2 -2
- ripperdoc/tools/tool_search_tool.py +3 -2
- ripperdoc/utils/conversation_compaction.py +11 -7
- ripperdoc/utils/file_watch.py +8 -2
- ripperdoc/utils/json_utils.py +2 -1
- ripperdoc/utils/mcp.py +11 -3
- ripperdoc/utils/memory.py +4 -2
- ripperdoc/utils/message_compaction.py +21 -7
- ripperdoc/utils/message_formatting.py +11 -7
- ripperdoc/utils/messages.py +105 -66
- ripperdoc/utils/path_ignore.py +38 -12
- ripperdoc/utils/permissions/path_validation_utils.py +2 -1
- ripperdoc/utils/permissions/shell_command_validation.py +427 -91
- ripperdoc/utils/safe_get_cwd.py +2 -1
- ripperdoc/utils/session_history.py +13 -6
- ripperdoc/utils/todo.py +2 -1
- ripperdoc/utils/token_estimation.py +6 -1
- {ripperdoc-0.2.7.dist-info → ripperdoc-0.2.9.dist-info}/METADATA +24 -3
- ripperdoc-0.2.9.dist-info/RECORD +123 -0
- ripperdoc-0.2.7.dist-info/RECORD +0 -113
- {ripperdoc-0.2.7.dist-info → ripperdoc-0.2.9.dist-info}/WHEEL +0 -0
- {ripperdoc-0.2.7.dist-info → ripperdoc-0.2.9.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.7.dist-info → ripperdoc-0.2.9.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.7.dist-info → ripperdoc-0.2.9.dist-info}/top_level.txt +0 -0
ripperdoc/tools/mcp_tools.py
CHANGED
|
@@ -47,6 +47,7 @@ DEFAULT_MCP_WARNING_FRACTION = 0.8
|
|
|
47
47
|
# Base class for MCP tools to reduce code duplication
|
|
48
48
|
# =============================================================================
|
|
49
49
|
|
|
50
|
+
|
|
50
51
|
class BaseMcpTool(Tool): # type: ignore[type-arg]
|
|
51
52
|
"""Base class for MCP tools with common default implementations.
|
|
52
53
|
|
|
@@ -76,9 +77,7 @@ class BaseMcpTool(Tool): # type: ignore[type-arg]
|
|
|
76
77
|
runtime = await ensure_mcp_runtime()
|
|
77
78
|
server_names = {s.name for s in runtime.servers}
|
|
78
79
|
if server_name not in server_names:
|
|
79
|
-
return ValidationResult(
|
|
80
|
-
result=False, message=f"Unknown MCP server '{server_name}'."
|
|
81
|
-
)
|
|
80
|
+
return ValidationResult(result=False, message=f"Unknown MCP server '{server_name}'.")
|
|
82
81
|
return ValidationResult(result=True)
|
|
83
82
|
|
|
84
83
|
|
|
@@ -160,7 +159,7 @@ class ListMcpServersTool(BaseMcpTool, Tool[ListMcpServersInput, ListMcpServersOu
|
|
|
160
159
|
def input_schema(self) -> type[ListMcpServersInput]:
|
|
161
160
|
return ListMcpServersInput
|
|
162
161
|
|
|
163
|
-
async def prompt(self,
|
|
162
|
+
async def prompt(self, _yolo_mode: bool = False) -> str:
|
|
164
163
|
servers = await load_mcp_servers_async()
|
|
165
164
|
return format_mcp_instructions(servers)
|
|
166
165
|
|
|
@@ -243,7 +242,7 @@ class ListMcpResourcesTool(BaseMcpTool, Tool[ListMcpResourcesInput, ListMcpResou
|
|
|
243
242
|
def input_schema(self) -> type[ListMcpResourcesInput]:
|
|
244
243
|
return ListMcpResourcesInput
|
|
245
244
|
|
|
246
|
-
async def prompt(self,
|
|
245
|
+
async def prompt(self, _yolo_mode: bool = False) -> str:
|
|
247
246
|
return (
|
|
248
247
|
"List available resources from configured MCP servers.\n"
|
|
249
248
|
"Each returned resource will include all standard MCP resource fields plus a 'server' field\n"
|
|
@@ -268,7 +267,8 @@ class ListMcpResourcesTool(BaseMcpTool, Tool[ListMcpResourcesInput, ListMcpResou
|
|
|
268
267
|
except (TypeError, ValueError) as exc:
|
|
269
268
|
logger.warning(
|
|
270
269
|
"[mcp_tools] Failed to serialize MCP resources for assistant output: %s: %s",
|
|
271
|
-
type(exc).__name__,
|
|
270
|
+
type(exc).__name__,
|
|
271
|
+
exc,
|
|
272
272
|
)
|
|
273
273
|
return str(output.resources)
|
|
274
274
|
|
|
@@ -314,7 +314,8 @@ class ListMcpResourcesTool(BaseMcpTool, Tool[ListMcpResourcesInput, ListMcpResou
|
|
|
314
314
|
# pragma: no cover - runtime errors
|
|
315
315
|
logger.warning(
|
|
316
316
|
"Failed to fetch resources from MCP server: %s: %s",
|
|
317
|
-
type(exc).__name__,
|
|
317
|
+
type(exc).__name__,
|
|
318
|
+
exc,
|
|
318
319
|
extra={"server": server.name},
|
|
319
320
|
)
|
|
320
321
|
fetched = []
|
|
@@ -394,7 +395,7 @@ class ReadMcpResourceTool(BaseMcpTool, Tool[ReadMcpResourceInput, ReadMcpResourc
|
|
|
394
395
|
def input_schema(self) -> type[ReadMcpResourceInput]:
|
|
395
396
|
return ReadMcpResourceInput
|
|
396
397
|
|
|
397
|
-
async def prompt(self,
|
|
398
|
+
async def prompt(self, _yolo_mode: bool = False) -> str:
|
|
398
399
|
return (
|
|
399
400
|
"Reads a specific resource from an MCP server, identified by server name and resource URI.\n\n"
|
|
400
401
|
"Parameters:\n"
|
|
@@ -482,7 +483,8 @@ class ReadMcpResourceTool(BaseMcpTool, Tool[ReadMcpResourceInput, ReadMcpResourc
|
|
|
482
483
|
except (ValueError, binascii.Error) as exc:
|
|
483
484
|
logger.warning(
|
|
484
485
|
"[mcp_tools] Failed to decode base64 blob content: %s: %s",
|
|
485
|
-
type(exc).__name__,
|
|
486
|
+
type(exc).__name__,
|
|
487
|
+
exc,
|
|
486
488
|
extra={"server": input_data.server, "uri": input_data.uri},
|
|
487
489
|
)
|
|
488
490
|
raw_bytes = None
|
|
@@ -515,7 +517,8 @@ class ReadMcpResourceTool(BaseMcpTool, Tool[ReadMcpResourceInput, ReadMcpResourc
|
|
|
515
517
|
# pragma: no cover - runtime errors
|
|
516
518
|
logger.warning(
|
|
517
519
|
"Error reading MCP resource: %s: %s",
|
|
518
|
-
type(exc).__name__,
|
|
520
|
+
type(exc).__name__,
|
|
521
|
+
exc,
|
|
519
522
|
extra={"server": input_data.server, "uri": input_data.uri},
|
|
520
523
|
)
|
|
521
524
|
content_text = f"Error reading MCP resource: {exc}"
|
|
@@ -149,7 +149,7 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
149
149
|
),
|
|
150
150
|
]
|
|
151
151
|
|
|
152
|
-
async def prompt(self,
|
|
152
|
+
async def prompt(self, yolo_mode: bool = False) -> str:
|
|
153
153
|
return MULTI_EDIT_DESCRIPTION
|
|
154
154
|
|
|
155
155
|
def is_read_only(self) -> bool:
|
|
@@ -190,9 +190,7 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
190
190
|
|
|
191
191
|
# Check if this is a file creation (first edit has empty old_string)
|
|
192
192
|
is_creation = (
|
|
193
|
-
not path.exists()
|
|
194
|
-
and len(input_data.edits) > 0
|
|
195
|
-
and input_data.edits[0].old_string == ""
|
|
193
|
+
not path.exists() and len(input_data.edits) > 0 and input_data.edits[0].old_string == ""
|
|
196
194
|
)
|
|
197
195
|
|
|
198
196
|
# If file exists, check if it has been read before editing
|
|
@@ -350,7 +348,8 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
350
348
|
# pragma: no cover - unlikely permission issue
|
|
351
349
|
logger.warning(
|
|
352
350
|
"[multi_edit_tool] Error reading file before edits: %s: %s",
|
|
353
|
-
type(exc).__name__,
|
|
351
|
+
type(exc).__name__,
|
|
352
|
+
exc,
|
|
354
353
|
extra={"file_path": str(file_path)},
|
|
355
354
|
)
|
|
356
355
|
output = MultiEditToolOutput(
|
|
@@ -408,13 +407,15 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
408
407
|
except (OSError, IOError, RuntimeError) as exc:
|
|
409
408
|
logger.warning(
|
|
410
409
|
"[multi_edit_tool] Failed to record file snapshot: %s: %s",
|
|
411
|
-
type(exc).__name__,
|
|
410
|
+
type(exc).__name__,
|
|
411
|
+
exc,
|
|
412
412
|
extra={"file_path": str(file_path)},
|
|
413
413
|
)
|
|
414
414
|
except (OSError, IOError, PermissionError, UnicodeDecodeError) as exc:
|
|
415
415
|
logger.warning(
|
|
416
416
|
"[multi_edit_tool] Error writing edited file: %s: %s",
|
|
417
|
-
type(exc).__name__,
|
|
417
|
+
type(exc).__name__,
|
|
418
|
+
exc,
|
|
418
419
|
extra={"file_path": str(file_path)},
|
|
419
420
|
)
|
|
420
421
|
output = MultiEditToolOutput(
|
|
@@ -122,7 +122,7 @@ class NotebookEditTool(Tool[NotebookEditInput, NotebookEditOutput]):
|
|
|
122
122
|
),
|
|
123
123
|
]
|
|
124
124
|
|
|
125
|
-
async def prompt(self,
|
|
125
|
+
async def prompt(self, yolo_mode: bool = False) -> str:
|
|
126
126
|
return NOTEBOOK_EDIT_DESCRIPTION
|
|
127
127
|
|
|
128
128
|
def is_read_only(self) -> bool:
|
|
@@ -204,7 +204,8 @@ class NotebookEditTool(Tool[NotebookEditInput, NotebookEditOutput]):
|
|
|
204
204
|
except (OSError, json.JSONDecodeError, UnicodeDecodeError) as exc:
|
|
205
205
|
logger.warning(
|
|
206
206
|
"Failed to parse notebook: %s: %s",
|
|
207
|
-
type(exc).__name__,
|
|
207
|
+
type(exc).__name__,
|
|
208
|
+
exc,
|
|
208
209
|
extra={"path": str(path)},
|
|
209
210
|
)
|
|
210
211
|
return ValidationResult(
|
|
@@ -325,7 +326,8 @@ class NotebookEditTool(Tool[NotebookEditInput, NotebookEditOutput]):
|
|
|
325
326
|
except (OSError, IOError, RuntimeError) as exc:
|
|
326
327
|
logger.warning(
|
|
327
328
|
"[notebook_edit_tool] Failed to record file snapshot: %s: %s",
|
|
328
|
-
type(exc).__name__,
|
|
329
|
+
type(exc).__name__,
|
|
330
|
+
exc,
|
|
329
331
|
extra={"file_path": abs_notebook_path},
|
|
330
332
|
)
|
|
331
333
|
|
|
@@ -344,7 +346,8 @@ class NotebookEditTool(Tool[NotebookEditInput, NotebookEditOutput]):
|
|
|
344
346
|
# pragma: no cover - error path
|
|
345
347
|
logger.warning(
|
|
346
348
|
"Error editing notebook: %s: %s",
|
|
347
|
-
type(exc).__name__,
|
|
349
|
+
type(exc).__name__,
|
|
350
|
+
exc,
|
|
348
351
|
extra={"path": input_data.notebook_path},
|
|
349
352
|
)
|
|
350
353
|
output = NotebookEditOutput(
|
ripperdoc/tools/skill_tool.py
CHANGED
|
@@ -82,7 +82,7 @@ class SkillTool(Tool[SkillToolInput, SkillToolOutput]):
|
|
|
82
82
|
),
|
|
83
83
|
]
|
|
84
84
|
|
|
85
|
-
async def prompt(self,
|
|
85
|
+
async def prompt(self, yolo_mode: bool = False) -> str: # noqa: ARG002
|
|
86
86
|
return (
|
|
87
87
|
"Load a skill by name to read its SKILL.md content. "
|
|
88
88
|
"Only call this when the skill description is clearly relevant. "
|
ripperdoc/tools/task_tool.py
CHANGED
|
@@ -69,8 +69,8 @@ class TaskTool(Tool[TaskToolInput, TaskToolOutput]):
|
|
|
69
69
|
def input_schema(self) -> type[TaskToolInput]:
|
|
70
70
|
return TaskToolInput
|
|
71
71
|
|
|
72
|
-
async def prompt(self,
|
|
73
|
-
del
|
|
72
|
+
async def prompt(self, yolo_mode: bool = False) -> str:
|
|
73
|
+
del yolo_mode
|
|
74
74
|
clear_agent_cache()
|
|
75
75
|
agents: AgentLoadResult = load_agent_definitions()
|
|
76
76
|
|
|
@@ -221,7 +221,7 @@ class TaskTool(Tool[TaskToolInput, TaskToolOutput]):
|
|
|
221
221
|
|
|
222
222
|
subagent_context = QueryContext(
|
|
223
223
|
tools=typed_agent_tools,
|
|
224
|
-
|
|
224
|
+
yolo_mode=context.yolo_mode,
|
|
225
225
|
verbose=context.verbose,
|
|
226
226
|
model=target_agent.model or "task",
|
|
227
227
|
)
|
|
@@ -370,7 +370,8 @@ class TaskTool(Tool[TaskToolInput, TaskToolOutput]):
|
|
|
370
370
|
except (TypeError, ValueError) as exc:
|
|
371
371
|
logger.warning(
|
|
372
372
|
"[task_tool] Failed to serialize tool_use input: %s: %s",
|
|
373
|
-
type(exc).__name__,
|
|
373
|
+
type(exc).__name__,
|
|
374
|
+
exc,
|
|
374
375
|
extra={"tool_use_input": str(inp)[:200]},
|
|
375
376
|
)
|
|
376
377
|
serialized = str(inp)
|
ripperdoc/tools/todo_tool.py
CHANGED
|
@@ -309,7 +309,7 @@ class TodoWriteTool(Tool[TodoWriteToolInput, TodoToolOutput]):
|
|
|
309
309
|
),
|
|
310
310
|
]
|
|
311
311
|
|
|
312
|
-
async def prompt(self,
|
|
312
|
+
async def prompt(self, _yolo_mode: bool = False) -> str:
|
|
313
313
|
return TODO_WRITE_PROMPT
|
|
314
314
|
|
|
315
315
|
def is_read_only(self) -> bool:
|
|
@@ -403,7 +403,7 @@ class TodoReadTool(Tool[TodoReadToolInput, TodoToolOutput]):
|
|
|
403
403
|
),
|
|
404
404
|
]
|
|
405
405
|
|
|
406
|
-
async def prompt(self,
|
|
406
|
+
async def prompt(self, _yolo_mode: bool = False) -> str:
|
|
407
407
|
return (
|
|
408
408
|
"Use TodoRead to fetch the current todo list before making progress or when you need "
|
|
409
409
|
"to confirm the next action. You can request only the next actionable item or filter "
|
|
@@ -106,7 +106,7 @@ class ToolSearchTool(Tool[ToolSearchInput, ToolSearchOutput]):
|
|
|
106
106
|
),
|
|
107
107
|
]
|
|
108
108
|
|
|
109
|
-
async def prompt(self,
|
|
109
|
+
async def prompt(self, yolo_mode: bool = False) -> str: # noqa: ARG002
|
|
110
110
|
return (
|
|
111
111
|
"Search for a tool by providing a short description (e.g., 'query database', 'render notebook'). "
|
|
112
112
|
"Use names to activate tools you've already discovered. "
|
|
@@ -193,7 +193,8 @@ class ToolSearchTool(Tool[ToolSearchInput, ToolSearchOutput]):
|
|
|
193
193
|
description = ""
|
|
194
194
|
logger.warning(
|
|
195
195
|
"[tool_search] Failed to build tool description: %s: %s",
|
|
196
|
-
type(exc).__name__,
|
|
196
|
+
type(exc).__name__,
|
|
197
|
+
exc,
|
|
197
198
|
extra={"tool_name": getattr(tool, "name", None)},
|
|
198
199
|
)
|
|
199
200
|
doc_text = " ".join([name, tool.user_facing_name(), description])
|
|
@@ -34,6 +34,7 @@ RECENT_MESSAGES_AFTER_COMPACT = 8
|
|
|
34
34
|
# Summary Prompt Generation
|
|
35
35
|
# ─────────────────────────────────────────────────────────────────────────────
|
|
36
36
|
|
|
37
|
+
|
|
37
38
|
def generate_summary_prompt(additional_instructions: Optional[str] = None) -> str:
|
|
38
39
|
"""Generate the system prompt for conversation summarization.
|
|
39
40
|
|
|
@@ -203,9 +204,11 @@ Please continue the conversation from where we left it off without asking the us
|
|
|
203
204
|
# Data Classes
|
|
204
205
|
# ─────────────────────────────────────────────────────────────────────────────
|
|
205
206
|
|
|
207
|
+
|
|
206
208
|
@dataclass
|
|
207
209
|
class CompactionResult:
|
|
208
210
|
"""Result of a conversation compaction operation."""
|
|
211
|
+
|
|
209
212
|
messages: List[ConversationMessage]
|
|
210
213
|
summary_text: str
|
|
211
214
|
continuation_prompt: str
|
|
@@ -219,6 +222,7 @@ class CompactionResult:
|
|
|
219
222
|
@dataclass
|
|
220
223
|
class CompactionError:
|
|
221
224
|
"""Error during compaction."""
|
|
225
|
+
|
|
222
226
|
error_type: str # "not_enough_messages", "empty_summary", "exception"
|
|
223
227
|
message: str
|
|
224
228
|
exception: Optional[Exception] = None
|
|
@@ -325,7 +329,7 @@ async def summarize_conversation(
|
|
|
325
329
|
user_content = f"{user_prompt}\n\nHere is the conversation to summarize:\n\n{transcript}"
|
|
326
330
|
|
|
327
331
|
assistant_response = await query_llm(
|
|
328
|
-
messages=[
|
|
332
|
+
messages=[create_user_message(user_content)],
|
|
329
333
|
system_prompt=system_prompt,
|
|
330
334
|
tools=[],
|
|
331
335
|
max_thinking_tokens=0,
|
|
@@ -346,7 +350,7 @@ async def compact_conversation(
|
|
|
346
350
|
protocol: str = "anthropic",
|
|
347
351
|
tail_count: int = RECENT_MESSAGES_AFTER_COMPACT,
|
|
348
352
|
attachment_provider: Optional[Callable[[], List[ConversationMessage]]] = None,
|
|
349
|
-
) -> Union[CompactionResult, CompactionError]:
|
|
353
|
+
) -> Union["CompactionResult", "CompactionError"]:
|
|
350
354
|
"""Compact a conversation by summarizing and rebuilding.
|
|
351
355
|
|
|
352
356
|
This is a pure logic function with no UI dependencies.
|
|
@@ -373,16 +377,15 @@ async def compact_conversation(
|
|
|
373
377
|
messages_for_summary = micro.messages
|
|
374
378
|
|
|
375
379
|
# Summarize the conversation
|
|
376
|
-
|
|
380
|
+
|
|
377
381
|
non_progress_messages = [
|
|
378
382
|
m for m in messages_for_summary if getattr(m, "type", "") != "progress"
|
|
379
383
|
]
|
|
380
384
|
try:
|
|
381
|
-
summary_text = await summarize_conversation(
|
|
382
|
-
non_progress_messages, custom_instructions
|
|
383
|
-
)
|
|
385
|
+
summary_text = await summarize_conversation(non_progress_messages, custom_instructions)
|
|
384
386
|
except Exception as exc:
|
|
385
387
|
import traceback
|
|
388
|
+
|
|
386
389
|
logger.warning(
|
|
387
390
|
"[compaction] Error during compaction: %s: %s\n%s",
|
|
388
391
|
type(exc).__name__,
|
|
@@ -443,6 +446,7 @@ class ConversationCompactor:
|
|
|
443
446
|
Deprecated: Use compact_conversation() function directly instead.
|
|
444
447
|
This class is kept for backward compatibility.
|
|
445
448
|
"""
|
|
449
|
+
|
|
446
450
|
# Keep CompactionResult as a nested class for backward compatibility
|
|
447
451
|
CompactionResult = CompactionResult
|
|
448
452
|
|
|
@@ -462,7 +466,7 @@ class ConversationCompactor:
|
|
|
462
466
|
custom_instructions: str,
|
|
463
467
|
protocol: str = "anthropic",
|
|
464
468
|
tail_count: int = RECENT_MESSAGES_AFTER_COMPACT,
|
|
465
|
-
) -> Optional[CompactionResult]:
|
|
469
|
+
) -> Optional["CompactionResult"]: # type: ignore[valid-type]
|
|
466
470
|
"""Compact the conversation. Returns None on error."""
|
|
467
471
|
result = await compact_conversation(
|
|
468
472
|
messages=messages,
|
ripperdoc/utils/file_watch.py
CHANGED
|
@@ -102,10 +102,16 @@ def detect_changed_files(
|
|
|
102
102
|
|
|
103
103
|
try:
|
|
104
104
|
new_content = _read_portion(file_path, snapshot.offset, snapshot.limit)
|
|
105
|
-
except (
|
|
105
|
+
except (
|
|
106
|
+
OSError,
|
|
107
|
+
IOError,
|
|
108
|
+
UnicodeDecodeError,
|
|
109
|
+
ValueError,
|
|
110
|
+
) as exc: # pragma: no cover - best-effort telemetry
|
|
106
111
|
logger.warning(
|
|
107
112
|
"[file_watch] Failed reading changed file: %s: %s",
|
|
108
|
-
type(exc).__name__,
|
|
113
|
+
type(exc).__name__,
|
|
114
|
+
exc,
|
|
109
115
|
extra={"file_path": file_path},
|
|
110
116
|
)
|
|
111
117
|
notices.append(
|
ripperdoc/utils/json_utils.py
CHANGED
|
@@ -21,7 +21,8 @@ def safe_parse_json(json_text: Optional[str], log_error: bool = True) -> Optiona
|
|
|
21
21
|
if log_error:
|
|
22
22
|
logger.debug(
|
|
23
23
|
"[json_utils] Failed to parse JSON: %s: %s",
|
|
24
|
-
type(exc).__name__,
|
|
24
|
+
type(exc).__name__,
|
|
25
|
+
exc,
|
|
25
26
|
extra={"length": len(json_text)},
|
|
26
27
|
)
|
|
27
28
|
return None
|
ripperdoc/utils/mcp.py
CHANGED
|
@@ -92,7 +92,8 @@ def _ensure_str_dict(raw: object) -> Dict[str, str]:
|
|
|
92
92
|
except (TypeError, ValueError) as exc:
|
|
93
93
|
logger.warning(
|
|
94
94
|
"[mcp] Failed to coerce env/header value to string: %s: %s",
|
|
95
|
-
type(exc).__name__,
|
|
95
|
+
type(exc).__name__,
|
|
96
|
+
exc,
|
|
96
97
|
extra={"key": key},
|
|
97
98
|
)
|
|
98
99
|
continue
|
|
@@ -365,10 +366,17 @@ class McpRuntime:
|
|
|
365
366
|
"capabilities": list(info.capabilities.keys()),
|
|
366
367
|
},
|
|
367
368
|
)
|
|
368
|
-
except (
|
|
369
|
+
except (
|
|
370
|
+
OSError,
|
|
371
|
+
RuntimeError,
|
|
372
|
+
ConnectionError,
|
|
373
|
+
ValueError,
|
|
374
|
+
TimeoutError,
|
|
375
|
+
) as exc: # pragma: no cover - network/process errors
|
|
369
376
|
logger.warning(
|
|
370
377
|
"Failed to connect to MCP server: %s: %s",
|
|
371
|
-
type(exc).__name__,
|
|
378
|
+
type(exc).__name__,
|
|
379
|
+
exc,
|
|
372
380
|
extra={"server": config.name},
|
|
373
381
|
)
|
|
374
382
|
info.status = "failed"
|
ripperdoc/utils/memory.py
CHANGED
|
@@ -48,7 +48,8 @@ def _is_path_under_directory(path: Path, directory: Path) -> bool:
|
|
|
48
48
|
except (ValueError, OSError) as exc:
|
|
49
49
|
logger.warning(
|
|
50
50
|
"[memory] Failed to compare path containment: %s: %s",
|
|
51
|
-
type(exc).__name__,
|
|
51
|
+
type(exc).__name__,
|
|
52
|
+
exc,
|
|
52
53
|
extra={"path": str(path), "directory": str(directory)},
|
|
53
54
|
)
|
|
54
55
|
return False
|
|
@@ -126,7 +127,8 @@ def _collect_files(
|
|
|
126
127
|
except (OSError, ValueError) as exc:
|
|
127
128
|
logger.warning(
|
|
128
129
|
"[memory] Failed to resolve memory file path: %s: %s",
|
|
129
|
-
type(exc).__name__,
|
|
130
|
+
type(exc).__name__,
|
|
131
|
+
exc,
|
|
130
132
|
extra={"path": str(resolved_path)},
|
|
131
133
|
)
|
|
132
134
|
|
|
@@ -27,7 +27,7 @@ MAX_TOKENS_SOFT = 20_000
|
|
|
27
27
|
MAX_TOKENS_HARD = 40_000
|
|
28
28
|
MAX_TOOL_USES_TO_PRESERVE = 3
|
|
29
29
|
IMAGE_TOKEN_COST = 2_000
|
|
30
|
-
AUTO_COMPACT_BUFFER =
|
|
30
|
+
AUTO_COMPACT_BUFFER = 24_000
|
|
31
31
|
WARNING_THRESHOLD = 20_000
|
|
32
32
|
ERROR_THRESHOLD = 20_000
|
|
33
33
|
MICRO_PLACEHOLDER = "[Old tool result content cleared]"
|
|
@@ -270,7 +270,9 @@ def get_remaining_context_tokens(
|
|
|
270
270
|
"""Context window minus configured output tokens."""
|
|
271
271
|
context_limit = max(get_model_context_limit(model_profile, explicit_limit), MIN_CONTEXT_TOKENS)
|
|
272
272
|
try:
|
|
273
|
-
max_output_tokens =
|
|
273
|
+
max_output_tokens = (
|
|
274
|
+
int(getattr(model_profile, "max_tokens", 0) or 0) if model_profile else 0
|
|
275
|
+
)
|
|
274
276
|
except (TypeError, ValueError):
|
|
275
277
|
max_output_tokens = 0
|
|
276
278
|
return max(MIN_CONTEXT_TOKENS, context_limit - max(0, max_output_tokens))
|
|
@@ -298,7 +300,9 @@ def get_context_usage_status(
|
|
|
298
300
|
)
|
|
299
301
|
|
|
300
302
|
tokens_left = max(effective_limit - used_tokens, 0)
|
|
301
|
-
percent_left =
|
|
303
|
+
percent_left = (
|
|
304
|
+
0.0 if effective_limit <= 0 else min(100.0, (tokens_left / effective_limit) * 100)
|
|
305
|
+
)
|
|
302
306
|
percent_used = 100.0 - percent_left
|
|
303
307
|
|
|
304
308
|
warning_limit = max(0, effective_limit - WARNING_THRESHOLD)
|
|
@@ -419,7 +423,9 @@ def _estimate_message_tokens(content_block: Any) -> int:
|
|
|
419
423
|
if isinstance(content, list):
|
|
420
424
|
total = 0
|
|
421
425
|
for part in content:
|
|
422
|
-
part_type = getattr(part, "type", None) or (
|
|
426
|
+
part_type = getattr(part, "type", None) or (
|
|
427
|
+
part.get("type") if isinstance(part, dict) else None
|
|
428
|
+
)
|
|
423
429
|
if part_type == "text":
|
|
424
430
|
text_val = getattr(part, "text", None) if hasattr(part, "text") else None
|
|
425
431
|
if text_val is None and isinstance(part, dict):
|
|
@@ -501,7 +507,9 @@ def micro_compact_messages(
|
|
|
501
507
|
token_counts_by_tool_use_id[tool_use_id] = token_count
|
|
502
508
|
|
|
503
509
|
latest_tool_use_ids = (
|
|
504
|
-
tool_use_ids_to_compact[-MAX_TOOL_USES_TO_PRESERVE:]
|
|
510
|
+
tool_use_ids_to_compact[-MAX_TOOL_USES_TO_PRESERVE:]
|
|
511
|
+
if MAX_TOOL_USES_TO_PRESERVE > 0
|
|
512
|
+
else []
|
|
505
513
|
)
|
|
506
514
|
total_token_count = sum(token_counts_by_tool_use_id.values())
|
|
507
515
|
|
|
@@ -525,7 +533,9 @@ def micro_compact_messages(
|
|
|
525
533
|
messages, protocol=protocol, precomputed_total_tokens=tokens_before
|
|
526
534
|
)
|
|
527
535
|
status = get_context_usage_status(
|
|
528
|
-
usage_tokens,
|
|
536
|
+
usage_tokens,
|
|
537
|
+
max_context_tokens=context_limit,
|
|
538
|
+
auto_compact_enabled=resolved_auto_compact,
|
|
529
539
|
)
|
|
530
540
|
if not status.is_above_warning_threshold or total_tokens_removed < MAX_TOKENS_SOFT:
|
|
531
541
|
ids_to_remove.clear()
|
|
@@ -571,7 +581,11 @@ def micro_compact_messages(
|
|
|
571
581
|
new_block = content_item.model_copy()
|
|
572
582
|
new_block.text = MICRO_PLACEHOLDER
|
|
573
583
|
else:
|
|
574
|
-
block_dict =
|
|
584
|
+
block_dict = (
|
|
585
|
+
dict(content_item)
|
|
586
|
+
if isinstance(content_item, dict)
|
|
587
|
+
else {"type": "tool_result"}
|
|
588
|
+
)
|
|
575
589
|
block_dict["text"] = MICRO_PLACEHOLDER
|
|
576
590
|
block_dict["tool_use_id"] = tool_use_id
|
|
577
591
|
new_block = MessageContent(**block_dict)
|
|
@@ -12,9 +12,7 @@ from ripperdoc.utils.messages import UserMessage, AssistantMessage, ProgressMess
|
|
|
12
12
|
ConversationMessage = Union[UserMessage, AssistantMessage, ProgressMessage]
|
|
13
13
|
|
|
14
14
|
|
|
15
|
-
def stringify_message_content(
|
|
16
|
-
content: Any, *, include_tool_details: bool = False
|
|
17
|
-
) -> str:
|
|
15
|
+
def stringify_message_content(content: Any, *, include_tool_details: bool = False) -> str:
|
|
18
16
|
"""Convert message content to plain string.
|
|
19
17
|
|
|
20
18
|
Args:
|
|
@@ -128,14 +126,16 @@ def format_tool_result_detail(result_text: str, is_error: bool = False) -> str:
|
|
|
128
126
|
return f"{prefix}: {result_preview}"
|
|
129
127
|
|
|
130
128
|
|
|
131
|
-
def format_reasoning_preview(reasoning: Any) -> str:
|
|
129
|
+
def format_reasoning_preview(reasoning: Any, show_full_thinking: bool = False) -> str:
|
|
132
130
|
"""Return a short preview of reasoning/thinking content.
|
|
133
131
|
|
|
134
132
|
Args:
|
|
135
133
|
reasoning: The reasoning content (string, list, or other).
|
|
134
|
+
show_full_thinking: If True, return full reasoning content without truncation.
|
|
135
|
+
If False, return a truncated preview (max 250 chars).
|
|
136
136
|
|
|
137
137
|
Returns:
|
|
138
|
-
A short preview string
|
|
138
|
+
A short preview string or full reasoning content.
|
|
139
139
|
"""
|
|
140
140
|
if reasoning is None:
|
|
141
141
|
return ""
|
|
@@ -151,11 +151,15 @@ def format_reasoning_preview(reasoning: Any) -> str:
|
|
|
151
151
|
text = "\n".join(p for p in parts if p)
|
|
152
152
|
else:
|
|
153
153
|
text = str(reasoning)
|
|
154
|
+
|
|
155
|
+
if show_full_thinking:
|
|
156
|
+
return text
|
|
157
|
+
|
|
154
158
|
lines = text.strip().splitlines()
|
|
155
159
|
if not lines:
|
|
156
160
|
return ""
|
|
157
|
-
preview = lines[0][:
|
|
158
|
-
if len(lines) > 1 or len(lines[0]) >
|
|
161
|
+
preview = lines[0][:250]
|
|
162
|
+
if len(lines) > 1 or len(lines[0]) > 250:
|
|
159
163
|
preview += "..."
|
|
160
164
|
return preview
|
|
161
165
|
|