ripperdoc 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/__main__.py +0 -5
- ripperdoc/cli/cli.py +37 -16
- ripperdoc/cli/commands/__init__.py +2 -0
- ripperdoc/cli/commands/agents_cmd.py +12 -9
- ripperdoc/cli/commands/compact_cmd.py +7 -3
- ripperdoc/cli/commands/context_cmd.py +35 -15
- ripperdoc/cli/commands/doctor_cmd.py +27 -14
- ripperdoc/cli/commands/exit_cmd.py +1 -1
- ripperdoc/cli/commands/mcp_cmd.py +13 -8
- ripperdoc/cli/commands/memory_cmd.py +5 -5
- ripperdoc/cli/commands/models_cmd.py +47 -16
- ripperdoc/cli/commands/permissions_cmd.py +302 -0
- ripperdoc/cli/commands/resume_cmd.py +1 -2
- ripperdoc/cli/commands/tasks_cmd.py +24 -13
- ripperdoc/cli/ui/rich_ui.py +523 -396
- ripperdoc/cli/ui/tool_renderers.py +298 -0
- ripperdoc/core/agents.py +172 -4
- ripperdoc/core/config.py +130 -6
- ripperdoc/core/default_tools.py +13 -2
- ripperdoc/core/permissions.py +20 -14
- ripperdoc/core/providers/__init__.py +31 -15
- ripperdoc/core/providers/anthropic.py +122 -8
- ripperdoc/core/providers/base.py +93 -15
- ripperdoc/core/providers/gemini.py +539 -96
- ripperdoc/core/providers/openai.py +371 -26
- ripperdoc/core/query.py +301 -62
- ripperdoc/core/query_utils.py +51 -7
- ripperdoc/core/skills.py +295 -0
- ripperdoc/core/system_prompt.py +79 -67
- ripperdoc/core/tool.py +15 -6
- ripperdoc/sdk/client.py +14 -1
- ripperdoc/tools/ask_user_question_tool.py +431 -0
- ripperdoc/tools/background_shell.py +82 -26
- ripperdoc/tools/bash_tool.py +356 -209
- ripperdoc/tools/dynamic_mcp_tool.py +428 -0
- ripperdoc/tools/enter_plan_mode_tool.py +226 -0
- ripperdoc/tools/exit_plan_mode_tool.py +153 -0
- ripperdoc/tools/file_edit_tool.py +53 -10
- ripperdoc/tools/file_read_tool.py +17 -7
- ripperdoc/tools/file_write_tool.py +49 -13
- ripperdoc/tools/glob_tool.py +10 -9
- ripperdoc/tools/grep_tool.py +182 -51
- ripperdoc/tools/ls_tool.py +6 -6
- ripperdoc/tools/mcp_tools.py +172 -413
- ripperdoc/tools/multi_edit_tool.py +49 -9
- ripperdoc/tools/notebook_edit_tool.py +57 -13
- ripperdoc/tools/skill_tool.py +205 -0
- ripperdoc/tools/task_tool.py +91 -9
- ripperdoc/tools/todo_tool.py +12 -12
- ripperdoc/tools/tool_search_tool.py +5 -6
- ripperdoc/utils/coerce.py +34 -0
- ripperdoc/utils/context_length_errors.py +252 -0
- ripperdoc/utils/file_watch.py +5 -4
- ripperdoc/utils/json_utils.py +4 -4
- ripperdoc/utils/log.py +3 -3
- ripperdoc/utils/mcp.py +82 -22
- ripperdoc/utils/memory.py +9 -6
- ripperdoc/utils/message_compaction.py +19 -16
- ripperdoc/utils/messages.py +73 -8
- ripperdoc/utils/path_ignore.py +677 -0
- ripperdoc/utils/permissions/__init__.py +7 -1
- ripperdoc/utils/permissions/path_validation_utils.py +5 -3
- ripperdoc/utils/permissions/shell_command_validation.py +496 -18
- ripperdoc/utils/prompt.py +1 -1
- ripperdoc/utils/safe_get_cwd.py +5 -2
- ripperdoc/utils/session_history.py +38 -19
- ripperdoc/utils/todo.py +6 -2
- ripperdoc/utils/token_estimation.py +34 -0
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/METADATA +14 -1
- ripperdoc-0.2.5.dist-info/RECORD +107 -0
- ripperdoc-0.2.3.dist-info/RECORD +0 -95
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/WHEEL +0 -0
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.3.dist-info → ripperdoc-0.2.5.dist-info}/top_level.txt +0 -0
|
@@ -4,6 +4,7 @@ Allows performing multiple exact string replacements in a single file atomically
|
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
import difflib
|
|
7
|
+
import os
|
|
7
8
|
from pathlib import Path
|
|
8
9
|
from typing import AsyncGenerator, Optional, List
|
|
9
10
|
from textwrap import dedent
|
|
@@ -168,6 +169,7 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
168
169
|
path = Path(input_data.file_path).expanduser()
|
|
169
170
|
if not path.is_absolute():
|
|
170
171
|
path = Path.cwd() / path
|
|
172
|
+
resolved_path = str(path.resolve())
|
|
171
173
|
|
|
172
174
|
# Ensure edits differ.
|
|
173
175
|
for edit in input_data.edits:
|
|
@@ -175,6 +177,7 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
175
177
|
return ValidationResult(
|
|
176
178
|
result=False,
|
|
177
179
|
message="old_string and new_string must be different",
|
|
180
|
+
error_code=1,
|
|
178
181
|
)
|
|
179
182
|
|
|
180
183
|
# If the file exists, ensure it is not a directory.
|
|
@@ -182,8 +185,41 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
182
185
|
return ValidationResult(
|
|
183
186
|
result=False,
|
|
184
187
|
message=f"Path is a directory, not a file: {path}",
|
|
188
|
+
error_code=2,
|
|
185
189
|
)
|
|
186
190
|
|
|
191
|
+
# Check if this is a file creation (first edit has empty old_string)
|
|
192
|
+
is_creation = (
|
|
193
|
+
not path.exists()
|
|
194
|
+
and len(input_data.edits) > 0
|
|
195
|
+
and input_data.edits[0].old_string == ""
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# If file exists, check if it has been read before editing
|
|
199
|
+
if path.exists() and not is_creation:
|
|
200
|
+
file_state_cache = getattr(context, "file_state_cache", {}) if context else {}
|
|
201
|
+
file_snapshot = file_state_cache.get(resolved_path)
|
|
202
|
+
|
|
203
|
+
if not file_snapshot:
|
|
204
|
+
return ValidationResult(
|
|
205
|
+
result=False,
|
|
206
|
+
message="File has not been read yet. Read it first before editing.",
|
|
207
|
+
error_code=3,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
# Check if file has been modified since it was read
|
|
211
|
+
try:
|
|
212
|
+
current_mtime = os.path.getmtime(resolved_path)
|
|
213
|
+
if current_mtime > file_snapshot.timestamp:
|
|
214
|
+
return ValidationResult(
|
|
215
|
+
result=False,
|
|
216
|
+
message="File has been modified since read, either by the user or by a linter. "
|
|
217
|
+
"Read it again before attempting to edit it.",
|
|
218
|
+
error_code=4,
|
|
219
|
+
)
|
|
220
|
+
except OSError:
|
|
221
|
+
pass # File mtime check failed, proceed anyway
|
|
222
|
+
|
|
187
223
|
return ValidationResult(result=True)
|
|
188
224
|
|
|
189
225
|
def render_result_for_assistant(self, output: MultiEditToolOutput) -> str:
|
|
@@ -310,9 +346,11 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
310
346
|
try:
|
|
311
347
|
if existing:
|
|
312
348
|
original_content = file_path.read_text(encoding="utf-8")
|
|
313
|
-
except
|
|
314
|
-
|
|
315
|
-
|
|
349
|
+
except (OSError, IOError, PermissionError) as exc:
|
|
350
|
+
# pragma: no cover - unlikely permission issue
|
|
351
|
+
logger.warning(
|
|
352
|
+
"[multi_edit_tool] Error reading file before edits: %s: %s",
|
|
353
|
+
type(exc).__name__, exc,
|
|
316
354
|
extra={"file_path": str(file_path)},
|
|
317
355
|
)
|
|
318
356
|
output = MultiEditToolOutput(
|
|
@@ -367,14 +405,16 @@ class MultiEditTool(Tool[MultiEditToolInput, MultiEditToolOutput]):
|
|
|
367
405
|
updated_content,
|
|
368
406
|
getattr(context, "file_state_cache", {}),
|
|
369
407
|
)
|
|
370
|
-
except
|
|
371
|
-
logger.
|
|
372
|
-
"[multi_edit_tool] Failed to record file snapshot",
|
|
408
|
+
except (OSError, IOError, RuntimeError) as exc:
|
|
409
|
+
logger.warning(
|
|
410
|
+
"[multi_edit_tool] Failed to record file snapshot: %s: %s",
|
|
411
|
+
type(exc).__name__, exc,
|
|
373
412
|
extra={"file_path": str(file_path)},
|
|
374
413
|
)
|
|
375
|
-
except
|
|
376
|
-
logger.
|
|
377
|
-
"[multi_edit_tool] Error writing edited file",
|
|
414
|
+
except (OSError, IOError, PermissionError, UnicodeDecodeError) as exc:
|
|
415
|
+
logger.warning(
|
|
416
|
+
"[multi_edit_tool] Error writing edited file: %s: %s",
|
|
417
|
+
type(exc).__name__, exc,
|
|
378
418
|
extra={"file_path": str(file_path)},
|
|
379
419
|
)
|
|
380
420
|
output = MultiEditToolOutput(
|
|
@@ -4,6 +4,7 @@ Allows performing insert/replace/delete operations on Jupyter notebook cells.
|
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
import json
|
|
7
|
+
import os
|
|
7
8
|
import random
|
|
8
9
|
import string
|
|
9
10
|
from pathlib import Path
|
|
@@ -137,39 +138,79 @@ class NotebookEditTool(Tool[NotebookEditInput, NotebookEditOutput]):
|
|
|
137
138
|
self, input_data: NotebookEditInput, context: Optional[ToolUseContext] = None
|
|
138
139
|
) -> ValidationResult:
|
|
139
140
|
path = _resolve_path(input_data.notebook_path)
|
|
141
|
+
resolved_path = str(path.resolve())
|
|
140
142
|
|
|
141
143
|
if not path.exists():
|
|
142
|
-
return ValidationResult(
|
|
144
|
+
return ValidationResult(
|
|
145
|
+
result=False,
|
|
146
|
+
message="Notebook file does not exist.",
|
|
147
|
+
error_code=1,
|
|
148
|
+
)
|
|
143
149
|
if path.suffix != ".ipynb":
|
|
144
150
|
return ValidationResult(
|
|
145
151
|
result=False,
|
|
146
152
|
message="File must be a Jupyter notebook (.ipynb file). Use Edit for other file types.",
|
|
153
|
+
error_code=2,
|
|
147
154
|
)
|
|
148
155
|
|
|
149
156
|
mode = (input_data.edit_mode or "replace").lower()
|
|
150
157
|
if mode not in {"replace", "insert", "delete"}:
|
|
151
158
|
return ValidationResult(
|
|
152
|
-
result=False,
|
|
159
|
+
result=False,
|
|
160
|
+
message="edit_mode must be replace, insert, or delete.",
|
|
161
|
+
error_code=3,
|
|
153
162
|
)
|
|
154
163
|
if mode == "insert" and not input_data.cell_type:
|
|
155
164
|
return ValidationResult(
|
|
156
165
|
result=False,
|
|
157
166
|
message="cell_type is required when using edit_mode=insert.",
|
|
167
|
+
error_code=4,
|
|
158
168
|
)
|
|
159
169
|
if mode != "insert" and not input_data.cell_id:
|
|
160
170
|
return ValidationResult(
|
|
161
171
|
result=False,
|
|
162
172
|
message="cell_id must be specified when using edit_mode=replace or delete.",
|
|
173
|
+
error_code=5,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Check if file has been read before editing
|
|
177
|
+
file_state_cache = getattr(context, "file_state_cache", {}) if context else {}
|
|
178
|
+
file_snapshot = file_state_cache.get(resolved_path)
|
|
179
|
+
|
|
180
|
+
if not file_snapshot:
|
|
181
|
+
return ValidationResult(
|
|
182
|
+
result=False,
|
|
183
|
+
message="Notebook has not been read yet. Read it first before editing.",
|
|
184
|
+
error_code=6,
|
|
163
185
|
)
|
|
164
186
|
|
|
187
|
+
# Check if file has been modified since it was read
|
|
188
|
+
try:
|
|
189
|
+
current_mtime = os.path.getmtime(resolved_path)
|
|
190
|
+
if current_mtime > file_snapshot.timestamp:
|
|
191
|
+
return ValidationResult(
|
|
192
|
+
result=False,
|
|
193
|
+
message="Notebook has been modified since read, either by the user or by a linter. "
|
|
194
|
+
"Read it again before attempting to edit it.",
|
|
195
|
+
error_code=7,
|
|
196
|
+
)
|
|
197
|
+
except OSError:
|
|
198
|
+
pass # File mtime check failed, proceed anyway
|
|
199
|
+
|
|
165
200
|
# Validate notebook structure and target cell.
|
|
166
201
|
try:
|
|
167
202
|
raw = path.read_text(encoding="utf-8")
|
|
168
203
|
nb_json = json.loads(raw)
|
|
169
|
-
except
|
|
170
|
-
logger.
|
|
204
|
+
except (OSError, json.JSONDecodeError, UnicodeDecodeError) as exc:
|
|
205
|
+
logger.warning(
|
|
206
|
+
"Failed to parse notebook: %s: %s",
|
|
207
|
+
type(exc).__name__, exc,
|
|
208
|
+
extra={"path": str(path)},
|
|
209
|
+
)
|
|
171
210
|
return ValidationResult(
|
|
172
|
-
result=False,
|
|
211
|
+
result=False,
|
|
212
|
+
message="Notebook is not valid JSON.",
|
|
213
|
+
error_code=8,
|
|
173
214
|
)
|
|
174
215
|
|
|
175
216
|
cells = nb_json.get("cells", [])
|
|
@@ -180,7 +221,7 @@ class NotebookEditTool(Tool[NotebookEditInput, NotebookEditOutput]):
|
|
|
180
221
|
return ValidationResult(
|
|
181
222
|
result=False,
|
|
182
223
|
message=f"Cell '{input_data.cell_id}' not found in notebook.",
|
|
183
|
-
error_code=
|
|
224
|
+
error_code=9,
|
|
184
225
|
)
|
|
185
226
|
|
|
186
227
|
return ValidationResult(result=True)
|
|
@@ -279,9 +320,10 @@ class NotebookEditTool(Tool[NotebookEditInput, NotebookEditOutput]):
|
|
|
279
320
|
json.dumps(nb_json, indent=1),
|
|
280
321
|
getattr(context, "file_state_cache", {}),
|
|
281
322
|
)
|
|
282
|
-
except
|
|
283
|
-
logger.
|
|
284
|
-
"[notebook_edit_tool] Failed to record file snapshot",
|
|
323
|
+
except (OSError, IOError, RuntimeError) as exc:
|
|
324
|
+
logger.warning(
|
|
325
|
+
"[notebook_edit_tool] Failed to record file snapshot: %s: %s",
|
|
326
|
+
type(exc).__name__, exc,
|
|
285
327
|
extra={"file_path": input_data.notebook_path},
|
|
286
328
|
)
|
|
287
329
|
|
|
@@ -296,10 +338,12 @@ class NotebookEditTool(Tool[NotebookEditInput, NotebookEditOutput]):
|
|
|
296
338
|
yield ToolResult(
|
|
297
339
|
data=output, result_for_assistant=self.render_result_for_assistant(output)
|
|
298
340
|
)
|
|
299
|
-
except
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
341
|
+
except (OSError, json.JSONDecodeError, ValueError, KeyError) as exc:
|
|
342
|
+
# pragma: no cover - error path
|
|
343
|
+
logger.warning(
|
|
344
|
+
"Error editing notebook: %s: %s",
|
|
345
|
+
type(exc).__name__, exc,
|
|
346
|
+
extra={"path": input_data.notebook_path},
|
|
303
347
|
)
|
|
304
348
|
output = NotebookEditOutput(
|
|
305
349
|
new_source=new_source,
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
"""Skill loader tool.
|
|
2
|
+
|
|
3
|
+
Loads SKILL.md content from .ripperdoc/skills or ~/.ripperdoc/skills so the
|
|
4
|
+
assistant can pull in specialized instructions only when needed.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import AsyncGenerator, List, Optional
|
|
11
|
+
|
|
12
|
+
from pydantic import BaseModel, Field
|
|
13
|
+
|
|
14
|
+
from ripperdoc.core.skills import SkillDefinition, find_skill
|
|
15
|
+
from ripperdoc.core.tool import (
|
|
16
|
+
Tool,
|
|
17
|
+
ToolOutput,
|
|
18
|
+
ToolResult,
|
|
19
|
+
ToolUseContext,
|
|
20
|
+
ToolUseExample,
|
|
21
|
+
ValidationResult,
|
|
22
|
+
)
|
|
23
|
+
from ripperdoc.utils.log import get_logger
|
|
24
|
+
|
|
25
|
+
logger = get_logger()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class SkillToolInput(BaseModel):
|
|
29
|
+
"""Input schema for the Skill tool."""
|
|
30
|
+
|
|
31
|
+
skill: str = Field(description='The skill name (e.g. "pdf-processing").')
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class SkillToolOutput(BaseModel):
|
|
35
|
+
"""Structured output for a loaded skill."""
|
|
36
|
+
|
|
37
|
+
success: bool = True
|
|
38
|
+
skill: str
|
|
39
|
+
description: str
|
|
40
|
+
location: str
|
|
41
|
+
base_dir: str
|
|
42
|
+
path: str
|
|
43
|
+
allowed_tools: List[str] = Field(default_factory=list)
|
|
44
|
+
model: Optional[str] = None
|
|
45
|
+
max_thinking_tokens: Optional[int] = None
|
|
46
|
+
skill_type: str = "prompt"
|
|
47
|
+
disable_model_invocation: bool = False
|
|
48
|
+
content: str
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class SkillTool(Tool[SkillToolInput, SkillToolOutput]):
|
|
52
|
+
"""Load a skill's instructions by name."""
|
|
53
|
+
|
|
54
|
+
def __init__(self, project_path: Optional[Path] = None, home: Optional[Path] = None) -> None:
|
|
55
|
+
self._project_path = project_path
|
|
56
|
+
self._home = home
|
|
57
|
+
|
|
58
|
+
@property
|
|
59
|
+
def name(self) -> str:
|
|
60
|
+
return "Skill"
|
|
61
|
+
|
|
62
|
+
async def description(self) -> str:
|
|
63
|
+
return (
|
|
64
|
+
"Execute a skill by name to load its SKILL.md instructions. "
|
|
65
|
+
"Use this only when the skill description clearly matches the user's request. "
|
|
66
|
+
"Skill metadata may include allowed-tools, model, or max-thinking-tokens hints."
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def input_schema(self) -> type[SkillToolInput]:
|
|
71
|
+
return SkillToolInput
|
|
72
|
+
|
|
73
|
+
def input_examples(self) -> List[ToolUseExample]:
|
|
74
|
+
return [
|
|
75
|
+
ToolUseExample(
|
|
76
|
+
description="Load PDF processing guidance",
|
|
77
|
+
example={"skill": "pdf-processing"},
|
|
78
|
+
),
|
|
79
|
+
ToolUseExample(
|
|
80
|
+
description="Load commit message helper instructions",
|
|
81
|
+
example={"skill": "generating-commit-messages"},
|
|
82
|
+
),
|
|
83
|
+
]
|
|
84
|
+
|
|
85
|
+
async def prompt(self, safe_mode: bool = False) -> str: # noqa: ARG002
|
|
86
|
+
return (
|
|
87
|
+
"Load a skill by name to read its SKILL.md content. "
|
|
88
|
+
"Only call this when the skill description is clearly relevant. "
|
|
89
|
+
"If the skill specifies allowed-tools, model, or max-thinking-tokens in frontmatter, "
|
|
90
|
+
"assume those hints apply for subsequent reasoning. "
|
|
91
|
+
"Skill files may reference additional files under the same directory; "
|
|
92
|
+
"use file tools to read them if needed."
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
def is_read_only(self) -> bool:
|
|
96
|
+
return True
|
|
97
|
+
|
|
98
|
+
def is_concurrency_safe(self) -> bool:
|
|
99
|
+
return False
|
|
100
|
+
|
|
101
|
+
def needs_permissions(self, input_data: Optional[SkillToolInput] = None) -> bool: # noqa: ARG002
|
|
102
|
+
return False
|
|
103
|
+
|
|
104
|
+
async def validate_input(
|
|
105
|
+
self,
|
|
106
|
+
input_data: SkillToolInput,
|
|
107
|
+
context: Optional[ToolUseContext] = None, # noqa: ARG002
|
|
108
|
+
) -> ValidationResult:
|
|
109
|
+
skill_name = (input_data.skill or "").strip().lstrip("/")
|
|
110
|
+
if not skill_name:
|
|
111
|
+
return ValidationResult(
|
|
112
|
+
result=False, message="Provide a skill name to load.", error_code=1
|
|
113
|
+
)
|
|
114
|
+
skill = find_skill(skill_name, project_path=self._project_path, home=self._home)
|
|
115
|
+
if not skill:
|
|
116
|
+
return ValidationResult(
|
|
117
|
+
result=False, message=f"Unknown skill: {skill_name}", error_code=2
|
|
118
|
+
)
|
|
119
|
+
if skill.disable_model_invocation:
|
|
120
|
+
return ValidationResult(
|
|
121
|
+
result=False,
|
|
122
|
+
message=f"Skill {skill_name} is blocked by disable-model-invocation.",
|
|
123
|
+
error_code=4,
|
|
124
|
+
)
|
|
125
|
+
if skill.skill_type and skill.skill_type != "prompt":
|
|
126
|
+
return ValidationResult(
|
|
127
|
+
result=False,
|
|
128
|
+
message=f"Skill {skill_name} is not a prompt-based skill (type={skill.skill_type}).",
|
|
129
|
+
error_code=5,
|
|
130
|
+
meta={"skill_type": skill.skill_type},
|
|
131
|
+
)
|
|
132
|
+
return ValidationResult(result=True)
|
|
133
|
+
|
|
134
|
+
def _render_result(self, skill: SkillDefinition) -> str:
|
|
135
|
+
allowed = ", ".join(skill.allowed_tools) if skill.allowed_tools else "no specific limit"
|
|
136
|
+
model_hint = f"\nModel hint: {skill.model}" if skill.model else ""
|
|
137
|
+
max_tokens = (
|
|
138
|
+
f"\nMax thinking tokens hint: {skill.max_thinking_tokens}"
|
|
139
|
+
if skill.max_thinking_tokens is not None
|
|
140
|
+
else ""
|
|
141
|
+
)
|
|
142
|
+
lines = [
|
|
143
|
+
f"Skill loaded: {skill.name} ({skill.location.value})",
|
|
144
|
+
f"Description: {skill.description}",
|
|
145
|
+
f"Skill directory: {skill.base_dir}",
|
|
146
|
+
f"Allowed tools (if specified): {allowed}{model_hint}{max_tokens}",
|
|
147
|
+
"SKILL.md content:",
|
|
148
|
+
skill.content,
|
|
149
|
+
]
|
|
150
|
+
return "\n".join(lines)
|
|
151
|
+
|
|
152
|
+
def _to_output(self, skill: SkillDefinition) -> SkillToolOutput:
|
|
153
|
+
return SkillToolOutput(
|
|
154
|
+
success=True,
|
|
155
|
+
skill=skill.name,
|
|
156
|
+
description=skill.description,
|
|
157
|
+
location=skill.location.value,
|
|
158
|
+
base_dir=str(skill.base_dir),
|
|
159
|
+
path=str(skill.path),
|
|
160
|
+
allowed_tools=list(skill.allowed_tools),
|
|
161
|
+
model=skill.model,
|
|
162
|
+
max_thinking_tokens=skill.max_thinking_tokens,
|
|
163
|
+
skill_type=skill.skill_type,
|
|
164
|
+
disable_model_invocation=skill.disable_model_invocation,
|
|
165
|
+
content=skill.content,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
async def call(
|
|
169
|
+
self, input_data: SkillToolInput, context: ToolUseContext
|
|
170
|
+
) -> AsyncGenerator[ToolOutput, None]: # noqa: ARG002
|
|
171
|
+
skill_name = (input_data.skill or "").strip().lstrip("/")
|
|
172
|
+
skill = find_skill(skill_name, project_path=self._project_path, home=self._home)
|
|
173
|
+
if not skill:
|
|
174
|
+
error_text = (
|
|
175
|
+
f"Skill '{skill_name}' not found. Ensure it exists under "
|
|
176
|
+
"~/.ripperdoc/skills or ./.ripperdoc/skills."
|
|
177
|
+
)
|
|
178
|
+
yield ToolResult(data={"error": error_text}, result_for_assistant=error_text)
|
|
179
|
+
return
|
|
180
|
+
if skill.allowed_tools and context.tool_registry is not None:
|
|
181
|
+
# Ensure preferred tools for this skill are activated in the registry.
|
|
182
|
+
context.tool_registry.activate_tools(skill.allowed_tools)
|
|
183
|
+
|
|
184
|
+
output = self._to_output(skill)
|
|
185
|
+
yield ToolResult(data=output, result_for_assistant=self._render_result(skill))
|
|
186
|
+
|
|
187
|
+
def render_result_for_assistant(self, output: SkillToolOutput) -> str:
|
|
188
|
+
allowed = ", ".join(output.allowed_tools) if output.allowed_tools else "no specific limit"
|
|
189
|
+
model_hint = f"\nModel hint: {output.model}" if output.model else ""
|
|
190
|
+
max_tokens = (
|
|
191
|
+
f"\nMax thinking tokens hint: {output.max_thinking_tokens}"
|
|
192
|
+
if output.max_thinking_tokens is not None
|
|
193
|
+
else ""
|
|
194
|
+
)
|
|
195
|
+
return (
|
|
196
|
+
f"Skill loaded: {output.skill} ({output.location})\n"
|
|
197
|
+
f"Description: {output.description}\n"
|
|
198
|
+
f"Skill directory: {output.base_dir}\n"
|
|
199
|
+
f"Allowed tools (if specified): {allowed}{model_hint}{max_tokens}\n"
|
|
200
|
+
"SKILL.md content:\n"
|
|
201
|
+
f"{output.content}"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
def render_tool_use_message(self, input_data: SkillToolInput, verbose: bool = False) -> str: # noqa: ARG002
|
|
205
|
+
return f"Load skill '{input_data.skill}'"
|
ripperdoc/tools/task_tool.py
CHANGED
|
@@ -10,6 +10,9 @@ from pydantic import BaseModel, Field
|
|
|
10
10
|
from ripperdoc.core.agents import (
|
|
11
11
|
AgentDefinition,
|
|
12
12
|
AgentLoadResult,
|
|
13
|
+
FILE_EDIT_TOOL_NAME,
|
|
14
|
+
GREP_TOOL_NAME,
|
|
15
|
+
VIEW_TOOL_NAME,
|
|
13
16
|
clear_agent_cache,
|
|
14
17
|
load_agent_definitions,
|
|
15
18
|
resolve_agent_tools,
|
|
@@ -70,12 +73,90 @@ class TaskTool(Tool[TaskToolInput, TaskToolOutput]):
|
|
|
70
73
|
del safe_mode
|
|
71
74
|
clear_agent_cache()
|
|
72
75
|
agents: AgentLoadResult = load_agent_definitions()
|
|
73
|
-
|
|
76
|
+
|
|
77
|
+
agent_lines: List[str] = []
|
|
78
|
+
for agent in agents.active_agents:
|
|
79
|
+
properties = (
|
|
80
|
+
"Properties: access to current context; "
|
|
81
|
+
if getattr(agent, "fork_context", False)
|
|
82
|
+
else ""
|
|
83
|
+
)
|
|
84
|
+
tools_label = "All tools"
|
|
85
|
+
if getattr(agent, "tools", None):
|
|
86
|
+
tools_label = "All tools" if "*" in agent.tools else ", ".join(agent.tools)
|
|
87
|
+
agent_lines.append(
|
|
88
|
+
f"- {agent.agent_type}: {agent.when_to_use} ({properties}Tools: {tools_label})"
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
agent_block = "\n".join(agent_lines) or "- general-purpose (built-in)"
|
|
92
|
+
|
|
93
|
+
task_tool_name = self.name
|
|
94
|
+
file_read_tool_name = VIEW_TOOL_NAME
|
|
95
|
+
search_tool_name = GREP_TOOL_NAME
|
|
96
|
+
code_tool_name = FILE_EDIT_TOOL_NAME
|
|
97
|
+
background_fetch_tool_name = task_tool_name
|
|
98
|
+
|
|
74
99
|
return (
|
|
75
|
-
"
|
|
76
|
-
"
|
|
77
|
-
"
|
|
78
|
-
f"
|
|
100
|
+
f"Launch a new agent to handle complex, multi-step tasks autonomously. \n\n"
|
|
101
|
+
f"The {task_tool_name} tool launches specialized agents (subprocesses) that autonomously handle complex tasks. Each agent type has specific capabilities and tools available to it.\n\n"
|
|
102
|
+
f"Available agent types and the tools they have access to:\n"
|
|
103
|
+
f"{agent_block}\n\n"
|
|
104
|
+
f"When using the {task_tool_name} tool, you must specify a subagent_type parameter to select which agent type to use.\n\n"
|
|
105
|
+
f"When NOT to use the {task_tool_name} tool:\n"
|
|
106
|
+
f"- If you want to read a specific file path, use the {file_read_tool_name} or {search_tool_name} tool instead of the {task_tool_name} tool, to find the match more quickly\n"
|
|
107
|
+
f'- If you are searching for a specific class definition like "class Foo", use the {search_tool_name} tool instead, to find the match more quickly\n'
|
|
108
|
+
f"- If you are searching for code within a specific file or set of 2-3 files, use the {file_read_tool_name} tool instead of the {task_tool_name} tool, to find the match more quickly\n"
|
|
109
|
+
"- Other tasks that are not related to the agent descriptions above\n"
|
|
110
|
+
"\n"
|
|
111
|
+
"\n"
|
|
112
|
+
"Usage notes:\n"
|
|
113
|
+
"- Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses\n"
|
|
114
|
+
"- When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.\n"
|
|
115
|
+
f"- You can optionally run agents in the background using the run_in_background parameter. When an agent runs in the background, you will need to use {background_fetch_tool_name} to retrieve its results once it's done. You can continue to work while background agents run - When you need their results to continue you can use {background_fetch_tool_name} in blocking mode to pause and wait for their results.\n"
|
|
116
|
+
"- Agents can be resumed using the `resume` parameter by passing the agent ID from a previous invocation. When resumed, the agent continues with its full previous context preserved. When NOT resuming, each invocation starts fresh and you should provide a detailed task description with all necessary context.\n"
|
|
117
|
+
"- When the agent is done, it will return a single message back to you along with its agent ID. You can use this ID to resume the agent later if needed for follow-up work.\n"
|
|
118
|
+
"- Provide clear, detailed prompts so the agent can work autonomously and return exactly the information you need.\n"
|
|
119
|
+
'- Agents with "access to current context" can see the full conversation history before the tool call. When using these agents, you can write concise prompts that reference earlier context (e.g., "investigate the error discussed above") instead of repeating information. The agent will receive all prior messages and understand the context.\n'
|
|
120
|
+
"- The agent's outputs should generally be trusted\n"
|
|
121
|
+
"- Clearly tell the agent whether you expect it to write code or just to do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent\n"
|
|
122
|
+
"- If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.\n"
|
|
123
|
+
f'- If the user specifies that they want you to run agents "in parallel", you MUST send a single message with multiple {task_tool_name} tool use content blocks. For example, if you need to launch both a code-reviewer agent and a test-runner agent in parallel, send a single message with both tool calls.\n'
|
|
124
|
+
"\n"
|
|
125
|
+
"Example usage:\n"
|
|
126
|
+
"\n"
|
|
127
|
+
"<example_agent_descriptions>\n"
|
|
128
|
+
'"code-reviewer": use this agent after you are done writing a signficant piece of code\n'
|
|
129
|
+
'"greeting-responder": use this agent when to respond to user greetings with a friendly joke\n'
|
|
130
|
+
"</example_agent_description>\n"
|
|
131
|
+
"\n"
|
|
132
|
+
"<example>\n"
|
|
133
|
+
'user: "Please write a function that checks if a number is prime"\n'
|
|
134
|
+
"assistant: Sure let me write a function that checks if a number is prime\n"
|
|
135
|
+
f"assistant: First let me use the {code_tool_name} tool to write a function that checks if a number is prime\n"
|
|
136
|
+
f"assistant: I'm going to use the {code_tool_name} tool to write the following code:\n"
|
|
137
|
+
"<code>\n"
|
|
138
|
+
"function isPrime(n) {\n"
|
|
139
|
+
" if (n <= 1) return false\n"
|
|
140
|
+
" for (let i = 2; i * i <= n; i++) {\n"
|
|
141
|
+
" if (n % i === 0) return false\n"
|
|
142
|
+
" }\n"
|
|
143
|
+
" return true\n"
|
|
144
|
+
"}\n"
|
|
145
|
+
"</code>\n"
|
|
146
|
+
"<commentary>\n"
|
|
147
|
+
"Since a signficant piece of code was written and the task was completed, now use the code-reviewer agent to review the code\n"
|
|
148
|
+
"</commentary>\n"
|
|
149
|
+
"assistant: Now let me use the code-reviewer agent to review the code\n"
|
|
150
|
+
f"assistant: Uses the {task_tool_name} tool to launch the code-reviewer agent \n"
|
|
151
|
+
"</example>\n"
|
|
152
|
+
"\n"
|
|
153
|
+
"<example>\n"
|
|
154
|
+
'user: "Hello"\n'
|
|
155
|
+
"<commentary>\n"
|
|
156
|
+
"Since the user is greeting, use the greeting-responder agent to respond with a friendly joke\n"
|
|
157
|
+
"</commentary>\n"
|
|
158
|
+
f'assistant: "I\'m going to use the {task_tool_name} tool to launch the greeting-responder agent"\n'
|
|
159
|
+
"</example>"
|
|
79
160
|
)
|
|
80
161
|
|
|
81
162
|
def is_read_only(self) -> bool:
|
|
@@ -286,10 +367,11 @@ class TaskTool(Tool[TaskToolInput, TaskToolOutput]):
|
|
|
286
367
|
|
|
287
368
|
try:
|
|
288
369
|
serialized = json.dumps(inp, ensure_ascii=False)
|
|
289
|
-
except
|
|
290
|
-
logger.
|
|
291
|
-
"[task_tool] Failed to serialize tool_use input",
|
|
292
|
-
|
|
370
|
+
except (TypeError, ValueError) as exc:
|
|
371
|
+
logger.warning(
|
|
372
|
+
"[task_tool] Failed to serialize tool_use input: %s: %s",
|
|
373
|
+
type(exc).__name__, exc,
|
|
374
|
+
extra={"tool_use_input": str(inp)[:200]},
|
|
293
375
|
)
|
|
294
376
|
serialized = str(inp)
|
|
295
377
|
return serialized if len(serialized) <= 120 else serialized[:117] + "..."
|
ripperdoc/tools/todo_tool.py
CHANGED
|
@@ -309,7 +309,7 @@ class TodoWriteTool(Tool[TodoWriteToolInput, TodoToolOutput]):
|
|
|
309
309
|
),
|
|
310
310
|
]
|
|
311
311
|
|
|
312
|
-
async def prompt(self,
|
|
312
|
+
async def prompt(self, _safe_mode: bool = False) -> str:
|
|
313
313
|
return TODO_WRITE_PROMPT
|
|
314
314
|
|
|
315
315
|
def is_read_only(self) -> bool:
|
|
@@ -318,13 +318,13 @@ class TodoWriteTool(Tool[TodoWriteToolInput, TodoToolOutput]):
|
|
|
318
318
|
def is_concurrency_safe(self) -> bool:
|
|
319
319
|
return False
|
|
320
320
|
|
|
321
|
-
def needs_permissions(self,
|
|
321
|
+
def needs_permissions(self, _input_data: Optional[TodoWriteToolInput] = None) -> bool:
|
|
322
322
|
return False
|
|
323
323
|
|
|
324
324
|
async def validate_input(
|
|
325
325
|
self,
|
|
326
326
|
input_data: TodoWriteToolInput,
|
|
327
|
-
|
|
327
|
+
_context: Optional[ToolUseContext] = None,
|
|
328
328
|
) -> ValidationResult:
|
|
329
329
|
todos = [TodoItem(**todo.model_dump()) for todo in input_data.todos]
|
|
330
330
|
ok, message = validate_todos(todos)
|
|
@@ -338,14 +338,14 @@ class TodoWriteTool(Tool[TodoWriteToolInput, TodoToolOutput]):
|
|
|
338
338
|
def render_tool_use_message(
|
|
339
339
|
self,
|
|
340
340
|
input_data: TodoWriteToolInput,
|
|
341
|
-
|
|
341
|
+
_verbose: bool = False,
|
|
342
342
|
) -> str:
|
|
343
343
|
return f"Updating todo list with {len(input_data.todos)} item(s)"
|
|
344
344
|
|
|
345
345
|
async def call(
|
|
346
346
|
self,
|
|
347
347
|
input_data: TodoWriteToolInput,
|
|
348
|
-
|
|
348
|
+
_context: ToolUseContext,
|
|
349
349
|
) -> AsyncGenerator[ToolOutput, None]:
|
|
350
350
|
try:
|
|
351
351
|
todos = [TodoItem(**todo.model_dump()) for todo in input_data.todos]
|
|
@@ -360,8 +360,8 @@ class TodoWriteTool(Tool[TodoWriteToolInput, TodoToolOutput]):
|
|
|
360
360
|
next_todo=get_next_actionable(updated),
|
|
361
361
|
)
|
|
362
362
|
yield ToolResult(data=output, result_for_assistant=result_text)
|
|
363
|
-
except
|
|
364
|
-
logger.
|
|
363
|
+
except (OSError, ValueError, KeyError, TypeError) as exc:
|
|
364
|
+
logger.warning("[todo_tool] Error updating todos: %s: %s", type(exc).__name__, exc)
|
|
365
365
|
error = f"Error updating todos: {exc}"
|
|
366
366
|
yield ToolResult(
|
|
367
367
|
data=TodoToolOutput(
|
|
@@ -403,7 +403,7 @@ class TodoReadTool(Tool[TodoReadToolInput, TodoToolOutput]):
|
|
|
403
403
|
),
|
|
404
404
|
]
|
|
405
405
|
|
|
406
|
-
async def prompt(self,
|
|
406
|
+
async def prompt(self, _safe_mode: bool = False) -> str:
|
|
407
407
|
return (
|
|
408
408
|
"Use TodoRead to fetch the current todo list before making progress or when you need "
|
|
409
409
|
"to confirm the next action. You can request only the next actionable item or filter "
|
|
@@ -416,13 +416,13 @@ class TodoReadTool(Tool[TodoReadToolInput, TodoToolOutput]):
|
|
|
416
416
|
def is_concurrency_safe(self) -> bool:
|
|
417
417
|
return True
|
|
418
418
|
|
|
419
|
-
def needs_permissions(self,
|
|
419
|
+
def needs_permissions(self, _input_data: Optional[TodoReadToolInput] = None) -> bool:
|
|
420
420
|
return False
|
|
421
421
|
|
|
422
422
|
async def validate_input(
|
|
423
423
|
self,
|
|
424
424
|
input_data: TodoReadToolInput,
|
|
425
|
-
|
|
425
|
+
_context: Optional[ToolUseContext] = None,
|
|
426
426
|
) -> ValidationResult:
|
|
427
427
|
if input_data.limit < 0:
|
|
428
428
|
return ValidationResult(result=False, message="limit cannot be negative")
|
|
@@ -445,7 +445,7 @@ class TodoReadTool(Tool[TodoReadToolInput, TodoToolOutput]):
|
|
|
445
445
|
def render_tool_use_message(
|
|
446
446
|
self,
|
|
447
447
|
input_data: TodoReadToolInput,
|
|
448
|
-
|
|
448
|
+
_verbose: bool = False,
|
|
449
449
|
) -> str:
|
|
450
450
|
if input_data.next_only:
|
|
451
451
|
return "Reading next actionable todo"
|
|
@@ -454,7 +454,7 @@ class TodoReadTool(Tool[TodoReadToolInput, TodoToolOutput]):
|
|
|
454
454
|
async def call(
|
|
455
455
|
self,
|
|
456
456
|
input_data: TodoReadToolInput,
|
|
457
|
-
|
|
457
|
+
_context: ToolUseContext,
|
|
458
458
|
) -> AsyncGenerator[ToolOutput, None]:
|
|
459
459
|
all_todos = load_todos()
|
|
460
460
|
filtered = all_todos
|