ripperdoc 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/__main__.py +0 -5
- ripperdoc/cli/cli.py +37 -16
- ripperdoc/cli/commands/__init__.py +2 -0
- ripperdoc/cli/commands/agents_cmd.py +12 -9
- ripperdoc/cli/commands/compact_cmd.py +7 -3
- ripperdoc/cli/commands/context_cmd.py +33 -13
- ripperdoc/cli/commands/doctor_cmd.py +27 -14
- ripperdoc/cli/commands/exit_cmd.py +1 -1
- ripperdoc/cli/commands/mcp_cmd.py +13 -8
- ripperdoc/cli/commands/memory_cmd.py +5 -5
- ripperdoc/cli/commands/models_cmd.py +47 -16
- ripperdoc/cli/commands/permissions_cmd.py +302 -0
- ripperdoc/cli/commands/resume_cmd.py +1 -2
- ripperdoc/cli/commands/tasks_cmd.py +24 -13
- ripperdoc/cli/ui/rich_ui.py +500 -406
- ripperdoc/cli/ui/tool_renderers.py +298 -0
- ripperdoc/core/agents.py +17 -9
- ripperdoc/core/config.py +130 -6
- ripperdoc/core/default_tools.py +7 -2
- ripperdoc/core/permissions.py +20 -14
- ripperdoc/core/providers/anthropic.py +107 -4
- ripperdoc/core/providers/base.py +33 -4
- ripperdoc/core/providers/gemini.py +169 -50
- ripperdoc/core/providers/openai.py +257 -23
- ripperdoc/core/query.py +294 -61
- ripperdoc/core/query_utils.py +50 -6
- ripperdoc/core/skills.py +295 -0
- ripperdoc/core/system_prompt.py +13 -7
- ripperdoc/core/tool.py +8 -6
- ripperdoc/sdk/client.py +14 -1
- ripperdoc/tools/ask_user_question_tool.py +20 -22
- ripperdoc/tools/background_shell.py +19 -13
- ripperdoc/tools/bash_tool.py +356 -209
- ripperdoc/tools/dynamic_mcp_tool.py +428 -0
- ripperdoc/tools/enter_plan_mode_tool.py +5 -2
- ripperdoc/tools/exit_plan_mode_tool.py +6 -3
- ripperdoc/tools/file_edit_tool.py +53 -10
- ripperdoc/tools/file_read_tool.py +17 -7
- ripperdoc/tools/file_write_tool.py +49 -13
- ripperdoc/tools/glob_tool.py +10 -9
- ripperdoc/tools/grep_tool.py +182 -51
- ripperdoc/tools/ls_tool.py +6 -6
- ripperdoc/tools/mcp_tools.py +106 -456
- ripperdoc/tools/multi_edit_tool.py +49 -9
- ripperdoc/tools/notebook_edit_tool.py +57 -13
- ripperdoc/tools/skill_tool.py +205 -0
- ripperdoc/tools/task_tool.py +7 -8
- ripperdoc/tools/todo_tool.py +12 -12
- ripperdoc/tools/tool_search_tool.py +5 -6
- ripperdoc/utils/coerce.py +34 -0
- ripperdoc/utils/context_length_errors.py +252 -0
- ripperdoc/utils/file_watch.py +5 -4
- ripperdoc/utils/json_utils.py +4 -4
- ripperdoc/utils/log.py +3 -3
- ripperdoc/utils/mcp.py +36 -15
- ripperdoc/utils/memory.py +9 -6
- ripperdoc/utils/message_compaction.py +16 -11
- ripperdoc/utils/messages.py +73 -8
- ripperdoc/utils/path_ignore.py +677 -0
- ripperdoc/utils/permissions/__init__.py +7 -1
- ripperdoc/utils/permissions/path_validation_utils.py +5 -3
- ripperdoc/utils/permissions/shell_command_validation.py +496 -18
- ripperdoc/utils/prompt.py +1 -1
- ripperdoc/utils/safe_get_cwd.py +5 -2
- ripperdoc/utils/session_history.py +38 -19
- ripperdoc/utils/todo.py +6 -2
- ripperdoc/utils/token_estimation.py +4 -3
- {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/METADATA +12 -1
- ripperdoc-0.2.5.dist-info/RECORD +107 -0
- ripperdoc-0.2.4.dist-info/RECORD +0 -99
- {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/WHEEL +0 -0
- {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.4.dist-info → ripperdoc-0.2.5.dist-info}/top_level.txt +0 -0
|
@@ -18,6 +18,7 @@ from ripperdoc.core.tool import (
|
|
|
18
18
|
)
|
|
19
19
|
from ripperdoc.utils.log import get_logger
|
|
20
20
|
from ripperdoc.utils.file_watch import record_snapshot
|
|
21
|
+
from ripperdoc.utils.path_ignore import check_path_for_tool
|
|
21
22
|
|
|
22
23
|
logger = get_logger()
|
|
23
24
|
|
|
@@ -92,18 +93,51 @@ NEVER write new files unless explicitly required by the user."""
|
|
|
92
93
|
async def validate_input(
|
|
93
94
|
self, input_data: FileWriteToolInput, context: Optional[ToolUseContext] = None
|
|
94
95
|
) -> ValidationResult:
|
|
95
|
-
# Check if file already exists (warning)
|
|
96
|
-
if os.path.exists(input_data.file_path):
|
|
97
|
-
# In safe mode, this should be handled by permissions
|
|
98
|
-
pass
|
|
99
|
-
|
|
100
96
|
# Check if parent directory exists
|
|
101
97
|
parent = Path(input_data.file_path).parent
|
|
102
98
|
if not parent.exists():
|
|
103
99
|
return ValidationResult(
|
|
104
|
-
result=False,
|
|
100
|
+
result=False,
|
|
101
|
+
message=f"Parent directory does not exist: {parent}",
|
|
102
|
+
error_code=1,
|
|
105
103
|
)
|
|
106
104
|
|
|
105
|
+
file_path = os.path.abspath(input_data.file_path)
|
|
106
|
+
|
|
107
|
+
# If file doesn't exist, it's a new file - allow without reading first
|
|
108
|
+
if not os.path.exists(file_path):
|
|
109
|
+
return ValidationResult(result=True)
|
|
110
|
+
|
|
111
|
+
# File exists - check if it has been read before writing
|
|
112
|
+
file_state_cache = getattr(context, "file_state_cache", {}) if context else {}
|
|
113
|
+
file_snapshot = file_state_cache.get(file_path)
|
|
114
|
+
|
|
115
|
+
if not file_snapshot:
|
|
116
|
+
return ValidationResult(
|
|
117
|
+
result=False,
|
|
118
|
+
message="File has not been read yet. Read it first before writing to it.",
|
|
119
|
+
error_code=2,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Check if file has been modified since it was read
|
|
123
|
+
try:
|
|
124
|
+
current_mtime = os.path.getmtime(file_path)
|
|
125
|
+
if current_mtime > file_snapshot.timestamp:
|
|
126
|
+
return ValidationResult(
|
|
127
|
+
result=False,
|
|
128
|
+
message="File has been modified since read, either by the user or by a linter. "
|
|
129
|
+
"Read it again before attempting to write it.",
|
|
130
|
+
error_code=3,
|
|
131
|
+
)
|
|
132
|
+
except OSError:
|
|
133
|
+
pass # File mtime check failed, proceed anyway
|
|
134
|
+
|
|
135
|
+
# Check if path is ignored (warning for write operations)
|
|
136
|
+
file_path_obj = Path(file_path)
|
|
137
|
+
should_proceed, warning_msg = check_path_for_tool(file_path_obj, tool_name="Write", warn_only=True)
|
|
138
|
+
if warning_msg:
|
|
139
|
+
logger.warning("[file_write_tool] %s", warning_msg)
|
|
140
|
+
|
|
107
141
|
return ValidationResult(result=True)
|
|
108
142
|
|
|
109
143
|
def render_result_for_assistant(self, output: FileWriteToolOutput) -> str:
|
|
@@ -132,9 +166,10 @@ NEVER write new files unless explicitly required by the user."""
|
|
|
132
166
|
input_data.content,
|
|
133
167
|
getattr(context, "file_state_cache", {}),
|
|
134
168
|
)
|
|
135
|
-
except
|
|
136
|
-
logger.
|
|
137
|
-
"[file_write_tool] Failed to record file snapshot",
|
|
169
|
+
except (OSError, IOError, RuntimeError) as exc:
|
|
170
|
+
logger.warning(
|
|
171
|
+
"[file_write_tool] Failed to record file snapshot: %s: %s",
|
|
172
|
+
type(exc).__name__, exc,
|
|
138
173
|
extra={"file_path": input_data.file_path},
|
|
139
174
|
)
|
|
140
175
|
|
|
@@ -149,10 +184,11 @@ NEVER write new files unless explicitly required by the user."""
|
|
|
149
184
|
data=output, result_for_assistant=self.render_result_for_assistant(output)
|
|
150
185
|
)
|
|
151
186
|
|
|
152
|
-
except
|
|
153
|
-
logger.
|
|
154
|
-
"[file_write_tool] Error writing file",
|
|
155
|
-
|
|
187
|
+
except (OSError, IOError, PermissionError, UnicodeEncodeError) as e:
|
|
188
|
+
logger.warning(
|
|
189
|
+
"[file_write_tool] Error writing file: %s: %s",
|
|
190
|
+
type(e).__name__, e,
|
|
191
|
+
extra={"file_path": input_data.file_path},
|
|
156
192
|
)
|
|
157
193
|
error_output = FileWriteToolOutput(
|
|
158
194
|
file_path=input_data.file_path,
|
ripperdoc/tools/glob_tool.py
CHANGED
|
@@ -76,7 +76,7 @@ class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
|
|
|
76
76
|
),
|
|
77
77
|
]
|
|
78
78
|
|
|
79
|
-
async def prompt(self,
|
|
79
|
+
async def prompt(self, _safe_mode: bool = False) -> str:
|
|
80
80
|
return GLOB_USAGE
|
|
81
81
|
|
|
82
82
|
def is_read_only(self) -> bool:
|
|
@@ -85,11 +85,11 @@ class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
|
|
|
85
85
|
def is_concurrency_safe(self) -> bool:
|
|
86
86
|
return True
|
|
87
87
|
|
|
88
|
-
def needs_permissions(self,
|
|
88
|
+
def needs_permissions(self, _input_data: Optional[GlobToolInput] = None) -> bool:
|
|
89
89
|
return False
|
|
90
90
|
|
|
91
91
|
async def validate_input(
|
|
92
|
-
self,
|
|
92
|
+
self, _input_data: GlobToolInput, _context: Optional[ToolUseContext] = None
|
|
93
93
|
) -> ValidationResult:
|
|
94
94
|
return ValidationResult(result=True)
|
|
95
95
|
|
|
@@ -103,7 +103,7 @@ class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
|
|
|
103
103
|
lines.append("(Results are truncated. Consider using a more specific path or pattern.)")
|
|
104
104
|
return "\n".join(lines)
|
|
105
105
|
|
|
106
|
-
def render_tool_use_message(self, input_data: GlobToolInput,
|
|
106
|
+
def render_tool_use_message(self, input_data: GlobToolInput, _verbose: bool = False) -> str:
|
|
107
107
|
"""Format the tool use for display."""
|
|
108
108
|
if not input_data.pattern:
|
|
109
109
|
return "Glob"
|
|
@@ -123,7 +123,7 @@ class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
|
|
|
123
123
|
except ValueError:
|
|
124
124
|
relative_path = None
|
|
125
125
|
|
|
126
|
-
if
|
|
126
|
+
if _verbose or not relative_path or str(relative_path) == ".":
|
|
127
127
|
rendered_path = str(absolute_path)
|
|
128
128
|
else:
|
|
129
129
|
rendered_path = str(relative_path)
|
|
@@ -132,7 +132,7 @@ class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
|
|
|
132
132
|
return f'pattern: "{input_data.pattern}"{path_fragment}'
|
|
133
133
|
|
|
134
134
|
async def call(
|
|
135
|
-
self, input_data: GlobToolInput,
|
|
135
|
+
self, input_data: GlobToolInput, _context: ToolUseContext
|
|
136
136
|
) -> AsyncGenerator[ToolOutput, None]:
|
|
137
137
|
"""Find files matching the pattern."""
|
|
138
138
|
|
|
@@ -166,9 +166,10 @@ class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
|
|
|
166
166
|
data=output, result_for_assistant=self.render_result_for_assistant(output)
|
|
167
167
|
)
|
|
168
168
|
|
|
169
|
-
except
|
|
170
|
-
logger.
|
|
171
|
-
"[glob_tool] Error executing glob",
|
|
169
|
+
except (OSError, RuntimeError, ValueError) as e:
|
|
170
|
+
logger.warning(
|
|
171
|
+
"[glob_tool] Error executing glob: %s: %s",
|
|
172
|
+
type(e).__name__, e,
|
|
172
173
|
extra={"pattern": input_data.pattern, "path": input_data.path},
|
|
173
174
|
)
|
|
174
175
|
error_output = GlobToolOutput(matches=[], pattern=input_data.pattern, count=0)
|
ripperdoc/tools/grep_tool.py
CHANGED
|
@@ -4,7 +4,10 @@ Allows the AI to search for patterns in files.
|
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
import asyncio
|
|
7
|
-
|
|
7
|
+
import re
|
|
8
|
+
import shutil
|
|
9
|
+
import subprocess
|
|
10
|
+
from typing import AsyncGenerator, Optional, List, Tuple
|
|
8
11
|
from pydantic import BaseModel, Field
|
|
9
12
|
|
|
10
13
|
from ripperdoc.core.tool import (
|
|
@@ -19,6 +22,8 @@ from ripperdoc.utils.log import get_logger
|
|
|
19
22
|
|
|
20
23
|
logger = get_logger()
|
|
21
24
|
|
|
25
|
+
MAX_GREP_OUTPUT_CHARS = 20000
|
|
26
|
+
|
|
22
27
|
|
|
23
28
|
GREP_USAGE = (
|
|
24
29
|
"A powerful search tool built on ripgrep.\n\n"
|
|
@@ -27,11 +32,52 @@ GREP_USAGE = (
|
|
|
27
32
|
'- Supports regex patterns (e.g., "log.*Error", "function\\s+\\w+")\n'
|
|
28
33
|
'- Filter files with the glob parameter (e.g., "*.js", "**/*.tsx")\n'
|
|
29
34
|
'- Output modes: "content" shows matching lines, "files_with_matches" (default) shows only file paths, "count" shows match counts\n'
|
|
35
|
+
"- Use head_limit to cap the number of returned entries (similar to piping through head -N) to avoid overwhelming output\n"
|
|
36
|
+
f"- Outputs are automatically truncated to around {MAX_GREP_OUTPUT_CHARS} characters to stay within context limits; narrow patterns for more detail\n"
|
|
30
37
|
"- For open-ended searches that need multiple rounds, iterate with Glob and Grep rather than shell commands\n"
|
|
31
38
|
"- Patterns are line-based; craft patterns accordingly and escape braces if needed (e.g., use `interface\\{\\}` to find `interface{}`)"
|
|
32
39
|
)
|
|
33
40
|
|
|
34
41
|
|
|
42
|
+
def truncate_with_ellipsis(
|
|
43
|
+
text: str, max_chars: int = MAX_GREP_OUTPUT_CHARS
|
|
44
|
+
) -> Tuple[str, bool, int]:
|
|
45
|
+
"""Trim long output and note how many lines were removed."""
|
|
46
|
+
if len(text) <= max_chars:
|
|
47
|
+
return text, False, 0
|
|
48
|
+
|
|
49
|
+
remaining = text[max_chars:]
|
|
50
|
+
truncated_lines = remaining.count("\n") + (1 if remaining else 0)
|
|
51
|
+
truncated_text = f"{text[:max_chars]}\n\n... [{truncated_lines} lines truncated] ..."
|
|
52
|
+
return truncated_text, True, truncated_lines
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def apply_head_limit(lines: List[str], head_limit: Optional[int]) -> Tuple[List[str], int]:
|
|
56
|
+
"""Limit the number of lines returned, recording how many were omitted."""
|
|
57
|
+
if head_limit is None or head_limit <= 0:
|
|
58
|
+
return lines, 0
|
|
59
|
+
if len(lines) <= head_limit:
|
|
60
|
+
return lines, 0
|
|
61
|
+
return lines[:head_limit], len(lines) - head_limit
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _split_globs(glob_value: str) -> List[str]:
|
|
65
|
+
"""Split a glob string by whitespace and commas."""
|
|
66
|
+
if not glob_value:
|
|
67
|
+
return []
|
|
68
|
+
globs: List[str] = []
|
|
69
|
+
for token in re.split(r"\s+", glob_value.strip()):
|
|
70
|
+
if not token:
|
|
71
|
+
continue
|
|
72
|
+
globs.extend([part for part in token.split(",") if part])
|
|
73
|
+
return globs
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _normalize_glob_for_grep(glob_pattern: str) -> str:
|
|
77
|
+
"""grep --include matches basenames; drop path components to avoid mismatches like **/*.py."""
|
|
78
|
+
return glob_pattern.split("/")[-1] or glob_pattern
|
|
79
|
+
|
|
80
|
+
|
|
35
81
|
class GrepToolInput(BaseModel):
|
|
36
82
|
"""Input schema for GrepTool."""
|
|
37
83
|
|
|
@@ -45,6 +91,10 @@ class GrepToolInput(BaseModel):
|
|
|
45
91
|
default="files_with_matches",
|
|
46
92
|
description="Output mode: 'files_with_matches', 'content', or 'count'",
|
|
47
93
|
)
|
|
94
|
+
head_limit: Optional[int] = Field(
|
|
95
|
+
default=None,
|
|
96
|
+
description="Limit output to the first N results (similar to piping to head -N) to avoid huge responses.",
|
|
97
|
+
)
|
|
48
98
|
|
|
49
99
|
|
|
50
100
|
class GrepMatch(BaseModel):
|
|
@@ -63,6 +113,9 @@ class GrepToolOutput(BaseModel):
|
|
|
63
113
|
pattern: str
|
|
64
114
|
total_files: int
|
|
65
115
|
total_matches: int
|
|
116
|
+
output_mode: str = "files_with_matches"
|
|
117
|
+
head_limit: Optional[int] = None
|
|
118
|
+
omitted_results: int = 0
|
|
66
119
|
|
|
67
120
|
|
|
68
121
|
class GrepTool(Tool[GrepToolInput, GrepToolOutput]):
|
|
@@ -95,7 +148,7 @@ class GrepTool(Tool[GrepToolInput, GrepToolOutput]):
|
|
|
95
148
|
),
|
|
96
149
|
]
|
|
97
150
|
|
|
98
|
-
async def prompt(self,
|
|
151
|
+
async def prompt(self, _safe_mode: bool = False) -> str:
|
|
99
152
|
return GREP_USAGE
|
|
100
153
|
|
|
101
154
|
def is_read_only(self) -> bool:
|
|
@@ -104,72 +157,133 @@ class GrepTool(Tool[GrepToolInput, GrepToolOutput]):
|
|
|
104
157
|
def is_concurrency_safe(self) -> bool:
|
|
105
158
|
return True
|
|
106
159
|
|
|
107
|
-
def needs_permissions(self,
|
|
160
|
+
def needs_permissions(self, _input_data: Optional[GrepToolInput] = None) -> bool:
|
|
108
161
|
return False
|
|
109
162
|
|
|
110
163
|
async def validate_input(
|
|
111
|
-
self, input_data: GrepToolInput,
|
|
164
|
+
self, input_data: GrepToolInput, _context: Optional[ToolUseContext] = None
|
|
112
165
|
) -> ValidationResult:
|
|
113
166
|
valid_modes = ["files_with_matches", "content", "count"]
|
|
114
167
|
if input_data.output_mode not in valid_modes:
|
|
115
168
|
return ValidationResult(
|
|
116
169
|
result=False, message=f"Invalid output_mode. Must be one of: {valid_modes}"
|
|
117
170
|
)
|
|
171
|
+
if input_data.head_limit is not None and input_data.head_limit <= 0:
|
|
172
|
+
return ValidationResult(result=False, message="head_limit must be positive")
|
|
118
173
|
return ValidationResult(result=True)
|
|
119
174
|
|
|
120
175
|
def render_result_for_assistant(self, output: GrepToolOutput) -> str:
|
|
121
176
|
"""Format output for the AI."""
|
|
122
|
-
if output.total_files == 0:
|
|
177
|
+
if output.total_files == 0 or output.total_matches == 0:
|
|
123
178
|
return f"No matches found for pattern: {output.pattern}"
|
|
124
179
|
|
|
125
|
-
|
|
180
|
+
lines: List[str] = []
|
|
181
|
+
summary: str
|
|
126
182
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
183
|
+
if output.output_mode == "files_with_matches":
|
|
184
|
+
summary = f"Found {output.total_files} file(s) matching '{output.pattern}'."
|
|
185
|
+
lines = [match.file for match in output.matches if match.file]
|
|
186
|
+
elif output.output_mode == "count":
|
|
187
|
+
summary = (
|
|
188
|
+
f"Found {output.total_matches} total match(es) across {output.total_files} file(s) "
|
|
189
|
+
f"for '{output.pattern}'."
|
|
190
|
+
)
|
|
191
|
+
lines = [
|
|
192
|
+
f"{match.file}: {match.count if match.count is not None else 0}"
|
|
193
|
+
for match in output.matches
|
|
194
|
+
if match.file
|
|
195
|
+
]
|
|
196
|
+
else:
|
|
197
|
+
summary = (
|
|
198
|
+
f"Found {output.total_matches} match(es) in {output.total_files} file(s) "
|
|
199
|
+
f"for '{output.pattern}':"
|
|
200
|
+
)
|
|
201
|
+
for match in output.matches:
|
|
202
|
+
if match.content is None:
|
|
203
|
+
continue
|
|
204
|
+
line_number = f":{match.line_number}" if match.line_number is not None else ""
|
|
205
|
+
lines.append(f"{match.file}{line_number}: {match.content}")
|
|
206
|
+
|
|
207
|
+
if output.omitted_results:
|
|
208
|
+
lines.append(
|
|
209
|
+
f"... and {output.omitted_results} more result(s) not shown"
|
|
210
|
+
f"{' (use head_limit to control output size)' if output.head_limit else ''}"
|
|
211
|
+
)
|
|
134
212
|
|
|
135
|
-
|
|
136
|
-
|
|
213
|
+
result = summary
|
|
214
|
+
if lines:
|
|
215
|
+
result += "\n\n" + "\n".join(lines)
|
|
137
216
|
|
|
138
|
-
|
|
217
|
+
truncated_result, did_truncate, _ = truncate_with_ellipsis(result)
|
|
218
|
+
if did_truncate:
|
|
219
|
+
truncated_result += (
|
|
220
|
+
"\n(Output truncated; refine the pattern or lower head_limit for more detail.)"
|
|
221
|
+
)
|
|
222
|
+
return truncated_result
|
|
139
223
|
|
|
140
|
-
def render_tool_use_message(self, input_data: GrepToolInput,
|
|
224
|
+
def render_tool_use_message(self, input_data: GrepToolInput, _verbose: bool = False) -> str:
|
|
141
225
|
"""Format the tool use for display."""
|
|
142
226
|
msg = f"Grep: {input_data.pattern}"
|
|
143
227
|
if input_data.glob:
|
|
144
228
|
msg += f" in {input_data.glob}"
|
|
229
|
+
if input_data.head_limit:
|
|
230
|
+
msg += f" (head_limit={input_data.head_limit})"
|
|
145
231
|
return msg
|
|
146
232
|
|
|
147
233
|
async def call(
|
|
148
|
-
self, input_data: GrepToolInput,
|
|
234
|
+
self, input_data: GrepToolInput, _context: ToolUseContext
|
|
149
235
|
) -> AsyncGenerator[ToolOutput, None]:
|
|
150
236
|
"""Search for the pattern."""
|
|
151
237
|
|
|
152
238
|
try:
|
|
153
239
|
search_path = input_data.path or "."
|
|
154
240
|
|
|
155
|
-
|
|
156
|
-
|
|
241
|
+
use_ripgrep = shutil.which("rg") is not None
|
|
242
|
+
pattern = input_data.pattern
|
|
243
|
+
|
|
244
|
+
if use_ripgrep:
|
|
245
|
+
cmd = ["rg", "--color", "never"]
|
|
246
|
+
if input_data.case_insensitive:
|
|
247
|
+
cmd.append("-i")
|
|
248
|
+
if input_data.output_mode == "files_with_matches":
|
|
249
|
+
cmd.append("-l")
|
|
250
|
+
elif input_data.output_mode == "count":
|
|
251
|
+
cmd.append("-c")
|
|
252
|
+
else:
|
|
253
|
+
cmd.append("-n")
|
|
254
|
+
|
|
255
|
+
for glob_pattern in _split_globs(input_data.glob or ""):
|
|
256
|
+
cmd.extend(["--glob", glob_pattern])
|
|
257
|
+
|
|
258
|
+
if pattern.startswith("-"):
|
|
259
|
+
cmd.extend(["-e", pattern])
|
|
260
|
+
else:
|
|
261
|
+
cmd.append(pattern)
|
|
262
|
+
|
|
263
|
+
cmd.append(search_path)
|
|
264
|
+
else:
|
|
265
|
+
# Fallback to grep (note: grep --include matches basenames only)
|
|
266
|
+
cmd = ["grep", "-r", "--color=never", "-P"]
|
|
157
267
|
|
|
158
|
-
|
|
159
|
-
|
|
268
|
+
if input_data.case_insensitive:
|
|
269
|
+
cmd.append("-i")
|
|
160
270
|
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
271
|
+
if input_data.output_mode == "files_with_matches":
|
|
272
|
+
cmd.extend(["-l"]) # Files with matches
|
|
273
|
+
elif input_data.output_mode == "count":
|
|
274
|
+
cmd.extend(["-c"]) # Count per file
|
|
275
|
+
else:
|
|
276
|
+
cmd.extend(["-n"]) # Line numbers
|
|
167
277
|
|
|
168
|
-
|
|
169
|
-
|
|
278
|
+
for glob_pattern in _split_globs(input_data.glob or ""):
|
|
279
|
+
cmd.extend(["--include", _normalize_glob_for_grep(glob_pattern)])
|
|
170
280
|
|
|
171
|
-
|
|
172
|
-
|
|
281
|
+
if pattern.startswith("-"):
|
|
282
|
+
cmd.extend(["-e", pattern])
|
|
283
|
+
else:
|
|
284
|
+
cmd.append(pattern)
|
|
285
|
+
|
|
286
|
+
cmd.append(search_path)
|
|
173
287
|
|
|
174
288
|
# Run grep asynchronously
|
|
175
289
|
process = await asyncio.create_subprocess_exec(
|
|
@@ -181,19 +295,30 @@ class GrepTool(Tool[GrepToolInput, GrepToolOutput]):
|
|
|
181
295
|
|
|
182
296
|
# Parse output
|
|
183
297
|
matches: List[GrepMatch] = []
|
|
298
|
+
total_matches = 0
|
|
299
|
+
total_files = 0
|
|
300
|
+
omitted_results = 0
|
|
301
|
+
stdout_text = stdout.decode("utf-8", errors="ignore") if stdout else ""
|
|
302
|
+
lines = [line for line in stdout_text.split("\n") if line]
|
|
303
|
+
|
|
304
|
+
if returncode in (0, 1): # 0 = matches found, 1 = no matches (ripgrep/grep)
|
|
305
|
+
display_lines, omitted_results = apply_head_limit(lines, input_data.head_limit)
|
|
306
|
+
|
|
307
|
+
if input_data.output_mode == "files_with_matches":
|
|
308
|
+
total_files = len(set(lines))
|
|
309
|
+
total_matches = len(lines)
|
|
310
|
+
matches = [GrepMatch(file=line) for line in display_lines]
|
|
311
|
+
|
|
312
|
+
elif input_data.output_mode == "count":
|
|
313
|
+
total_files = len(set(line.split(":", 1)[0] for line in lines if line))
|
|
314
|
+
total_match_count = 0
|
|
315
|
+
for line in lines:
|
|
316
|
+
parts = line.rsplit(":", 1)
|
|
317
|
+
if len(parts) == 2 and parts[1].isdigit():
|
|
318
|
+
total_match_count += int(parts[1])
|
|
319
|
+
total_matches = total_match_count
|
|
184
320
|
|
|
185
|
-
|
|
186
|
-
lines = stdout.decode("utf-8").strip().split("\n")
|
|
187
|
-
|
|
188
|
-
for line in lines:
|
|
189
|
-
if not line:
|
|
190
|
-
continue
|
|
191
|
-
|
|
192
|
-
if input_data.output_mode == "files_with_matches":
|
|
193
|
-
matches.append(GrepMatch(file=line))
|
|
194
|
-
|
|
195
|
-
elif input_data.output_mode == "count":
|
|
196
|
-
# Format: file:count
|
|
321
|
+
for line in display_lines:
|
|
197
322
|
parts = line.rsplit(":", 1)
|
|
198
323
|
if len(parts) == 2:
|
|
199
324
|
matches.append(
|
|
@@ -202,8 +327,10 @@ class GrepTool(Tool[GrepToolInput, GrepToolOutput]):
|
|
|
202
327
|
)
|
|
203
328
|
)
|
|
204
329
|
|
|
205
|
-
|
|
206
|
-
|
|
330
|
+
else: # content mode
|
|
331
|
+
total_files = len({line.split(":", 1)[0] for line in lines if line})
|
|
332
|
+
total_matches = len(lines)
|
|
333
|
+
for line in display_lines:
|
|
207
334
|
parts = line.split(":", 2)
|
|
208
335
|
if len(parts) >= 3:
|
|
209
336
|
matches.append(
|
|
@@ -217,17 +344,21 @@ class GrepTool(Tool[GrepToolInput, GrepToolOutput]):
|
|
|
217
344
|
output = GrepToolOutput(
|
|
218
345
|
matches=matches,
|
|
219
346
|
pattern=input_data.pattern,
|
|
220
|
-
total_files=
|
|
221
|
-
total_matches=
|
|
347
|
+
total_files=total_files,
|
|
348
|
+
total_matches=total_matches,
|
|
349
|
+
output_mode=input_data.output_mode,
|
|
350
|
+
head_limit=input_data.head_limit,
|
|
351
|
+
omitted_results=omitted_results,
|
|
222
352
|
)
|
|
223
353
|
|
|
224
354
|
yield ToolResult(
|
|
225
355
|
data=output, result_for_assistant=self.render_result_for_assistant(output)
|
|
226
356
|
)
|
|
227
357
|
|
|
228
|
-
except
|
|
229
|
-
logger.
|
|
230
|
-
"[grep_tool] Error executing grep",
|
|
358
|
+
except (OSError, RuntimeError, ValueError, subprocess.SubprocessError) as e:
|
|
359
|
+
logger.warning(
|
|
360
|
+
"[grep_tool] Error executing grep: %s: %s",
|
|
361
|
+
type(e).__name__, e,
|
|
231
362
|
extra={"pattern": input_data.pattern, "path": input_data.path},
|
|
232
363
|
)
|
|
233
364
|
error_output = GrepToolOutput(
|
ripperdoc/tools/ls_tool.py
CHANGED
|
@@ -119,7 +119,7 @@ def _resolve_directory_path(raw_path: str) -> Path:
|
|
|
119
119
|
candidate = base_path / candidate
|
|
120
120
|
try:
|
|
121
121
|
return candidate.resolve()
|
|
122
|
-
except
|
|
122
|
+
except (OSError, RuntimeError):
|
|
123
123
|
return candidate
|
|
124
124
|
|
|
125
125
|
|
|
@@ -166,15 +166,15 @@ def _relative_path_for_display(path: Path, base_path: Path) -> str:
|
|
|
166
166
|
resolved_path = path
|
|
167
167
|
try:
|
|
168
168
|
resolved_path = path.resolve()
|
|
169
|
-
except
|
|
169
|
+
except (OSError, RuntimeError):
|
|
170
170
|
pass
|
|
171
171
|
|
|
172
172
|
try:
|
|
173
173
|
rel_path = resolved_path.relative_to(base_path.resolve()).as_posix()
|
|
174
|
-
except
|
|
174
|
+
except (OSError, ValueError, RuntimeError):
|
|
175
175
|
try:
|
|
176
176
|
rel_path = os.path.relpath(resolved_path, base_path)
|
|
177
|
-
except
|
|
177
|
+
except (OSError, ValueError):
|
|
178
178
|
rel_path = resolved_path.as_posix()
|
|
179
179
|
rel_path = rel_path.replace(os.sep, "/")
|
|
180
180
|
|
|
@@ -345,7 +345,7 @@ class LSTool(Tool[LSToolInput, LSToolOutput]):
|
|
|
345
345
|
) -> ValidationResult:
|
|
346
346
|
try:
|
|
347
347
|
root_path = _resolve_directory_path(input_data.path)
|
|
348
|
-
except
|
|
348
|
+
except (OSError, RuntimeError, ValueError):
|
|
349
349
|
return ValidationResult(
|
|
350
350
|
result=False, message=f"Unable to resolve path: {input_data.path}"
|
|
351
351
|
)
|
|
@@ -397,7 +397,7 @@ class LSTool(Tool[LSToolInput, LSToolOutput]):
|
|
|
397
397
|
relative_path = (
|
|
398
398
|
_relative_path_for_display(resolved_path, base_path) or resolved_path.as_posix()
|
|
399
399
|
)
|
|
400
|
-
except
|
|
400
|
+
except (OSError, RuntimeError, ValueError):
|
|
401
401
|
relative_path = str(resolved_path)
|
|
402
402
|
|
|
403
403
|
return relative_path
|