ripperdoc 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +74 -9
  3. ripperdoc/cli/commands/__init__.py +4 -0
  4. ripperdoc/cli/commands/agents_cmd.py +30 -4
  5. ripperdoc/cli/commands/context_cmd.py +11 -1
  6. ripperdoc/cli/commands/cost_cmd.py +5 -0
  7. ripperdoc/cli/commands/doctor_cmd.py +208 -0
  8. ripperdoc/cli/commands/memory_cmd.py +202 -0
  9. ripperdoc/cli/commands/models_cmd.py +61 -6
  10. ripperdoc/cli/commands/resume_cmd.py +4 -2
  11. ripperdoc/cli/commands/status_cmd.py +1 -1
  12. ripperdoc/cli/commands/tasks_cmd.py +27 -0
  13. ripperdoc/cli/ui/rich_ui.py +258 -11
  14. ripperdoc/cli/ui/thinking_spinner.py +128 -0
  15. ripperdoc/core/agents.py +14 -4
  16. ripperdoc/core/config.py +56 -3
  17. ripperdoc/core/default_tools.py +16 -2
  18. ripperdoc/core/permissions.py +19 -0
  19. ripperdoc/core/providers/__init__.py +31 -0
  20. ripperdoc/core/providers/anthropic.py +136 -0
  21. ripperdoc/core/providers/base.py +187 -0
  22. ripperdoc/core/providers/gemini.py +172 -0
  23. ripperdoc/core/providers/openai.py +142 -0
  24. ripperdoc/core/query.py +510 -386
  25. ripperdoc/core/query_utils.py +578 -0
  26. ripperdoc/core/system_prompt.py +2 -1
  27. ripperdoc/core/tool.py +16 -1
  28. ripperdoc/sdk/client.py +12 -1
  29. ripperdoc/tools/background_shell.py +63 -21
  30. ripperdoc/tools/bash_tool.py +48 -13
  31. ripperdoc/tools/file_edit_tool.py +20 -0
  32. ripperdoc/tools/file_read_tool.py +23 -0
  33. ripperdoc/tools/file_write_tool.py +20 -0
  34. ripperdoc/tools/glob_tool.py +59 -15
  35. ripperdoc/tools/grep_tool.py +7 -0
  36. ripperdoc/tools/ls_tool.py +246 -73
  37. ripperdoc/tools/mcp_tools.py +32 -10
  38. ripperdoc/tools/multi_edit_tool.py +23 -0
  39. ripperdoc/tools/notebook_edit_tool.py +18 -3
  40. ripperdoc/tools/task_tool.py +7 -0
  41. ripperdoc/tools/todo_tool.py +157 -25
  42. ripperdoc/tools/tool_search_tool.py +17 -4
  43. ripperdoc/utils/file_watch.py +134 -0
  44. ripperdoc/utils/git_utils.py +274 -0
  45. ripperdoc/utils/json_utils.py +27 -0
  46. ripperdoc/utils/log.py +129 -29
  47. ripperdoc/utils/mcp.py +71 -6
  48. ripperdoc/utils/memory.py +12 -1
  49. ripperdoc/utils/message_compaction.py +22 -5
  50. ripperdoc/utils/messages.py +72 -17
  51. ripperdoc/utils/output_utils.py +34 -9
  52. ripperdoc/utils/permissions/path_validation_utils.py +6 -0
  53. ripperdoc/utils/prompt.py +17 -0
  54. ripperdoc/utils/safe_get_cwd.py +4 -0
  55. ripperdoc/utils/session_history.py +27 -9
  56. ripperdoc/utils/session_usage.py +7 -0
  57. ripperdoc/utils/shell_utils.py +159 -0
  58. ripperdoc/utils/todo.py +2 -2
  59. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/METADATA +4 -2
  60. ripperdoc-0.2.3.dist-info/RECORD +95 -0
  61. ripperdoc-0.2.0.dist-info/RECORD +0 -81
  62. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/WHEEL +0 -0
  63. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/entry_points.txt +0 -0
  64. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/licenses/LICENSE +0 -0
  65. {ripperdoc-0.2.0.dist-info → ripperdoc-0.2.3.dist-info}/top_level.txt +0 -0
@@ -14,6 +14,9 @@ import uuid
14
14
  from dataclasses import dataclass, field
15
15
  from typing import Any, Dict, List, Optional
16
16
 
17
+ import atexit
18
+
19
+ from ripperdoc.utils.shell_utils import build_shell_command, find_suitable_shell
17
20
  from ripperdoc.utils.log import get_logger
18
21
 
19
22
 
@@ -43,6 +46,7 @@ _tasks_lock = threading.Lock()
43
46
  _background_loop: Optional[asyncio.AbstractEventLoop] = None
44
47
  _background_thread: Optional[threading.Thread] = None
45
48
  _loop_lock = threading.Lock()
49
+ _shutdown_registered = False
46
50
 
47
51
 
48
52
  def _ensure_background_loop() -> asyncio.AbstractEventLoop:
@@ -74,9 +78,18 @@ def _ensure_background_loop() -> asyncio.AbstractEventLoop:
74
78
 
75
79
  _background_loop = loop
76
80
  _background_thread = thread
81
+ _register_shutdown_hook()
77
82
  return loop
78
83
 
79
84
 
85
+ def _register_shutdown_hook() -> None:
86
+ global _shutdown_registered
87
+ if _shutdown_registered:
88
+ return
89
+ atexit.register(shutdown_background_shell)
90
+ _shutdown_registered = True
91
+
92
+
80
93
  def _submit_to_background_loop(coro: Any) -> concurrent.futures.Future:
81
94
  """Run a coroutine on the background loop and return a thread-safe future."""
82
95
  loop = _ensure_background_loop()
@@ -95,7 +108,10 @@ async def _pump_stream(stream: asyncio.StreamReader, sink: List[str]) -> None:
95
108
  sink.append(text)
96
109
  except Exception as exc:
97
110
  # Best effort; ignore stream read errors to avoid leaking tasks.
98
- logger.debug(f"Stream pump error for background task: {exc}")
111
+ logger.debug(
112
+ f"Stream pump error for background task: {exc}",
113
+ exc_info=True,
114
+ )
99
115
 
100
116
 
101
117
  async def _finalize_reader_tasks(reader_tasks: List[asyncio.Task], timeout: float = 1.0) -> None:
@@ -133,8 +149,11 @@ async def _monitor_task(task: BackgroundTask) -> None:
133
149
  task.exit_code = -1
134
150
  except asyncio.CancelledError:
135
151
  return
136
- except Exception as exc:
137
- logger.error(f"Error monitoring background task {task.id}: {exc}")
152
+ except Exception:
153
+ logger.exception(
154
+ "Error monitoring background task",
155
+ extra={"task_id": task.id, "command": task.command},
156
+ )
138
157
  with _tasks_lock:
139
158
  task.exit_code = -1
140
159
  finally:
@@ -147,24 +166,15 @@ async def _start_background_command(
147
166
  command: str, timeout: Optional[float] = None, shell_executable: Optional[str] = None
148
167
  ) -> str:
149
168
  """Launch a background shell command on the dedicated loop."""
150
- if shell_executable:
151
- process = await asyncio.create_subprocess_exec(
152
- shell_executable,
153
- "-c",
154
- command,
155
- stdout=asyncio.subprocess.PIPE,
156
- stderr=asyncio.subprocess.PIPE,
157
- stdin=asyncio.subprocess.DEVNULL,
158
- start_new_session=False,
159
- )
160
- else:
161
- process = await asyncio.create_subprocess_shell(
162
- command,
163
- stdout=asyncio.subprocess.PIPE,
164
- stderr=asyncio.subprocess.PIPE,
165
- stdin=asyncio.subprocess.DEVNULL,
166
- start_new_session=False,
167
- )
169
+ selected_shell = shell_executable or find_suitable_shell()
170
+ argv = build_shell_command(selected_shell, command)
171
+ process = await asyncio.create_subprocess_exec(
172
+ *argv,
173
+ stdout=asyncio.subprocess.PIPE,
174
+ stderr=asyncio.subprocess.PIPE,
175
+ stdin=asyncio.subprocess.DEVNULL,
176
+ start_new_session=False,
177
+ )
168
178
 
169
179
  task_id = f"bash_{uuid.uuid4().hex[:8]}"
170
180
  record = BackgroundTask(
@@ -289,3 +299,35 @@ def list_background_tasks() -> List[str]:
289
299
  """Return known background task ids."""
290
300
  with _tasks_lock:
291
301
  return list(_tasks.keys())
302
+
303
+
304
+ def shutdown_background_shell() -> None:
305
+ """Stop background tasks/loop to avoid asyncio 'Event loop is closed' warnings."""
306
+ global _background_loop, _background_thread
307
+
308
+ loop = _background_loop
309
+ with _tasks_lock:
310
+ tasks = list(_tasks.values())
311
+ _tasks.clear()
312
+
313
+ for task in tasks:
314
+ try:
315
+ task.killed = True
316
+ task.process.kill()
317
+ except Exception:
318
+ pass
319
+ for reader in task.reader_tasks:
320
+ if loop and loop.is_running():
321
+ loop.call_soon_threadsafe(reader.cancel)
322
+ task.done_event.set()
323
+
324
+ if loop and loop.is_running():
325
+ try:
326
+ loop.call_soon_threadsafe(loop.stop)
327
+ except Exception:
328
+ pass
329
+ if _background_thread and _background_thread.is_alive():
330
+ _background_thread.join(timeout=2)
331
+
332
+ _background_loop = None
333
+ _background_thread = None
@@ -49,6 +49,10 @@ from ripperdoc.utils.permissions.tool_permission_utils import (
49
49
  from ripperdoc.utils.permissions import PermissionDecision
50
50
  from ripperdoc.utils.sandbox_utils import create_sandbox_wrapper, is_sandbox_available
51
51
  from ripperdoc.utils.safe_get_cwd import get_original_cwd, safe_get_cwd
52
+ from ripperdoc.utils.shell_utils import build_shell_command, find_suitable_shell
53
+ from ripperdoc.utils.log import get_logger
54
+
55
+ logger = get_logger()
52
56
 
53
57
 
54
58
  DEFAULT_TIMEOUT_MS = get_bash_default_timeout_ms()
@@ -148,6 +152,15 @@ build projects, run tests, and interact with the file system."""
148
152
 
149
153
  async def prompt(self, safe_mode: bool = False) -> str:
150
154
  sandbox_available = is_sandbox_available()
155
+ try:
156
+ current_shell = find_suitable_shell()
157
+ except Exception as exc: # pragma: no cover - defensive guard
158
+ current_shell = f"Unavailable ({exc})"
159
+
160
+ shell_info = (
161
+ f"Current shell used for execution: {current_shell}\n"
162
+ f"- Override via RIPPERDOC_SHELL or RIPPERDOC_SHELL_PATH env vars, or pass shellExecutable input.\n"
163
+ )
151
164
 
152
165
  read_only_section = ""
153
166
  if sandbox_available:
@@ -232,6 +245,8 @@ build projects, run tests, and interact with the file system."""
232
245
  f"""\
233
246
  Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures.
234
247
 
248
+ {shell_info}
249
+
235
250
  Before executing the command, please follow these steps:
236
251
 
237
252
  1. Directory Verification:
@@ -483,6 +498,23 @@ build projects, run tests, and interact with the file system."""
483
498
  """Execute the bash command."""
484
499
 
485
500
  effective_command, auto_background = self._detect_auto_background(input_data.command)
501
+ try:
502
+ resolved_shell = input_data.shell_executable or find_suitable_shell()
503
+ except Exception as exc: # pragma: no cover - defensive guard
504
+ error_output = BashToolOutput(
505
+ stdout="",
506
+ stderr=f"Failed to select shell: {exc}",
507
+ exit_code=-1,
508
+ command=effective_command,
509
+ sandbox=bool(input_data.sandbox),
510
+ is_error=True,
511
+ )
512
+ yield ToolResult(
513
+ data=error_output,
514
+ result_for_assistant=self.render_result_for_assistant(error_output),
515
+ )
516
+ return
517
+
486
518
  timeout_ms = input_data.timeout or DEFAULT_TIMEOUT_MS
487
519
  if MAX_BASH_TIMEOUT_MS:
488
520
  timeout_ms = min(timeout_ms, MAX_BASH_TIMEOUT_MS)
@@ -516,6 +548,10 @@ build projects, run tests, and interact with the file system."""
516
548
  final_command = wrapper.final_command
517
549
  sandbox_cleanup = wrapper.cleanup
518
550
  except Exception as exc:
551
+ logger.exception(
552
+ "[bash_tool] Failed to enable sandbox",
553
+ extra={"command": effective_command, "error": str(exc)},
554
+ )
519
555
  error_output = BashToolOutput(
520
556
  stdout="",
521
557
  stderr=f"Failed to enable sandbox: {exc}",
@@ -537,18 +573,9 @@ build projects, run tests, and interact with the file system."""
537
573
  should_background = False
538
574
 
539
575
  async def _spawn_process() -> asyncio.subprocess.Process:
540
- if input_data.shell_executable:
541
- return await asyncio.create_subprocess_exec(
542
- input_data.shell_executable,
543
- "-c",
544
- final_command,
545
- stdout=asyncio.subprocess.PIPE,
546
- stderr=asyncio.subprocess.PIPE,
547
- stdin=asyncio.subprocess.DEVNULL,
548
- start_new_session=False,
549
- )
550
- return await asyncio.create_subprocess_shell(
551
- final_command,
576
+ argv = build_shell_command(resolved_shell, final_command)
577
+ return await asyncio.create_subprocess_exec(
578
+ *argv,
552
579
  stdout=asyncio.subprocess.PIPE,
553
580
  stderr=asyncio.subprocess.PIPE,
554
581
  stdin=asyncio.subprocess.DEVNULL,
@@ -561,6 +588,10 @@ build projects, run tests, and interact with the file system."""
561
588
  try:
562
589
  from ripperdoc.tools.background_shell import start_background_command
563
590
  except Exception as e: # pragma: no cover - defensive import
591
+ logger.exception(
592
+ "[bash_tool] Failed to import background shell runner",
593
+ extra={"command": effective_command},
594
+ )
564
595
  error_output = BashToolOutput(
565
596
  stdout="",
566
597
  stderr=f"Failed to start background task: {str(e)}",
@@ -581,7 +612,7 @@ build projects, run tests, and interact with the file system."""
581
612
  else (timeout_seconds if timeout_seconds > 0 else None)
582
613
  )
583
614
  task_id = await start_background_command(
584
- final_command, timeout=bg_timeout, shell_executable=input_data.shell_executable
615
+ final_command, timeout=bg_timeout, shell_executable=resolved_shell
585
616
  )
586
617
 
587
618
  output = BashToolOutput(
@@ -767,6 +798,10 @@ build projects, run tests, and interact with the file system."""
767
798
  )
768
799
 
769
800
  except Exception as e:
801
+ logger.exception(
802
+ "[bash_tool] Error executing command",
803
+ extra={"command": effective_command, "error": str(e)},
804
+ )
770
805
  error_output = BashToolOutput(
771
806
  stdout="",
772
807
  stderr=f"Error executing command: {str(e)}",
@@ -15,6 +15,10 @@ from ripperdoc.core.tool import (
15
15
  ToolUseExample,
16
16
  ValidationResult,
17
17
  )
18
+ from ripperdoc.utils.log import get_logger
19
+ from ripperdoc.utils.file_watch import record_snapshot
20
+
21
+ logger = get_logger()
18
22
 
19
23
 
20
24
  class FileEditToolInput(BaseModel):
@@ -182,6 +186,18 @@ match exactly (including whitespace and indentation)."""
182
186
  with open(input_data.file_path, "w", encoding="utf-8") as f:
183
187
  f.write(new_content)
184
188
 
189
+ try:
190
+ record_snapshot(
191
+ input_data.file_path,
192
+ new_content,
193
+ getattr(context, "file_state_cache", {}),
194
+ )
195
+ except Exception:
196
+ logger.exception(
197
+ "[file_edit_tool] Failed to record file snapshot",
198
+ extra={"file_path": input_data.file_path},
199
+ )
200
+
185
201
  # Generate diff for display
186
202
  import difflib
187
203
 
@@ -268,6 +284,10 @@ match exactly (including whitespace and indentation)."""
268
284
  )
269
285
 
270
286
  except Exception as e:
287
+ logger.exception(
288
+ "[file_edit_tool] Error editing file",
289
+ extra={"file_path": input_data.file_path, "error": str(e)},
290
+ )
271
291
  error_output = FileEditToolOutput(
272
292
  file_path=input_data.file_path,
273
293
  replacements_made=0,
@@ -15,6 +15,10 @@ from ripperdoc.core.tool import (
15
15
  ToolUseExample,
16
16
  ValidationResult,
17
17
  )
18
+ from ripperdoc.utils.log import get_logger
19
+ from ripperdoc.utils.file_watch import record_snapshot
20
+
21
+ logger = get_logger()
18
22
 
19
23
 
20
24
  class FileReadToolInput(BaseModel):
@@ -140,6 +144,21 @@ and limit to read only a portion of the file."""
140
144
 
141
145
  content = "".join(selected_lines)
142
146
 
147
+ # Remember what we read so we can detect user edits later.
148
+ try:
149
+ record_snapshot(
150
+ input_data.file_path,
151
+ content,
152
+ getattr(context, "file_state_cache", {}),
153
+ offset=offset,
154
+ limit=limit,
155
+ )
156
+ except Exception:
157
+ logger.exception(
158
+ "[file_read_tool] Failed to record file snapshot",
159
+ extra={"file_path": input_data.file_path},
160
+ )
161
+
143
162
  output = FileReadToolOutput(
144
163
  content=content,
145
164
  file_path=input_data.file_path,
@@ -153,6 +172,10 @@ and limit to read only a portion of the file."""
153
172
  )
154
173
 
155
174
  except Exception as e:
175
+ logger.exception(
176
+ "[file_read_tool] Error reading file",
177
+ extra={"file_path": input_data.file_path, "error": str(e)},
178
+ )
156
179
  # Create an error output
157
180
  error_output = FileReadToolOutput(
158
181
  content=f"Error reading file: {str(e)}",
@@ -16,6 +16,10 @@ from ripperdoc.core.tool import (
16
16
  ToolUseExample,
17
17
  ValidationResult,
18
18
  )
19
+ from ripperdoc.utils.log import get_logger
20
+ from ripperdoc.utils.file_watch import record_snapshot
21
+
22
+ logger = get_logger()
19
23
 
20
24
 
21
25
  class FileWriteToolInput(BaseModel):
@@ -122,6 +126,18 @@ NEVER write new files unless explicitly required by the user."""
122
126
 
123
127
  bytes_written = len(input_data.content.encode("utf-8"))
124
128
 
129
+ try:
130
+ record_snapshot(
131
+ input_data.file_path,
132
+ input_data.content,
133
+ getattr(context, "file_state_cache", {}),
134
+ )
135
+ except Exception:
136
+ logger.exception(
137
+ "[file_write_tool] Failed to record file snapshot",
138
+ extra={"file_path": input_data.file_path},
139
+ )
140
+
125
141
  output = FileWriteToolOutput(
126
142
  file_path=input_data.file_path,
127
143
  bytes_written=bytes_written,
@@ -134,6 +150,10 @@ NEVER write new files unless explicitly required by the user."""
134
150
  )
135
151
 
136
152
  except Exception as e:
153
+ logger.exception(
154
+ "[file_write_tool] Error writing file",
155
+ extra={"file_path": input_data.file_path, "error": str(e)},
156
+ )
137
157
  error_output = FileWriteToolOutput(
138
158
  file_path=input_data.file_path,
139
159
  bytes_written=0,
@@ -15,17 +15,22 @@ from ripperdoc.core.tool import (
15
15
  ToolUseExample,
16
16
  ValidationResult,
17
17
  )
18
+ from ripperdoc.utils.log import get_logger
19
+
20
+ logger = get_logger()
18
21
 
19
22
 
20
23
  GLOB_USAGE = (
21
- "- Fast file pattern matching tool for any codebase size\n"
24
+ "- Fast file pattern matching tool that works with any codebase size\n"
22
25
  '- Supports glob patterns like "**/*.js" or "src/**/*.ts"\n'
23
- "- Returns matching file paths sorted by modification time (newest first)\n"
24
- "- Use this when you need to find files by name patterns\n"
25
- "- For open-ended searches that need multiple rounds of globbing and grepping, run the searches iteratively with these tools\n"
26
- "- You can call multiple tools in a single response; speculatively batch useful searches together"
26
+ "- Returns matching file paths sorted by modification time\n"
27
+ "- Use this tool when you need to find files by name patterns\n"
28
+ "- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Agent tool instead\n"
29
+ "- You have the capability to call multiple tools in a single response. It is always better to speculatively perform multiple searches as a batch that are potentially useful.\n"
27
30
  )
28
31
 
32
+ RESULT_LIMIT = 100
33
+
29
34
 
30
35
  class GlobToolInput(BaseModel):
31
36
  """Input schema for GlobTool."""
@@ -42,6 +47,7 @@ class GlobToolOutput(BaseModel):
42
47
  matches: List[str]
43
48
  pattern: str
44
49
  count: int
50
+ truncated: bool = False
45
51
 
46
52
 
47
53
  class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
@@ -92,14 +98,38 @@ class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
92
98
  if not output.matches:
93
99
  return f"No files found matching pattern: {output.pattern}"
94
100
 
95
- result = f"Found {output.count} file(s) matching '{output.pattern}':\n\n"
96
- result += "\n".join(output.matches)
97
-
98
- return result
101
+ lines = list(output.matches)
102
+ if output.truncated:
103
+ lines.append("(Results are truncated. Consider using a more specific path or pattern.)")
104
+ return "\n".join(lines)
99
105
 
100
106
  def render_tool_use_message(self, input_data: GlobToolInput, verbose: bool = False) -> str:
101
107
  """Format the tool use for display."""
102
- return f"Glob: {input_data.pattern}"
108
+ if not input_data.pattern:
109
+ return "Glob"
110
+
111
+ base_path = Path.cwd()
112
+ rendered_path = ""
113
+ if input_data.path:
114
+ candidate_path = Path(input_data.path)
115
+ absolute_path = (
116
+ candidate_path
117
+ if candidate_path.is_absolute()
118
+ else (base_path / candidate_path).resolve()
119
+ )
120
+
121
+ try:
122
+ relative_path = absolute_path.relative_to(base_path)
123
+ except ValueError:
124
+ relative_path = None
125
+
126
+ if verbose or not relative_path or str(relative_path) == ".":
127
+ rendered_path = str(absolute_path)
128
+ else:
129
+ rendered_path = str(relative_path)
130
+
131
+ path_fragment = f', path: "{rendered_path}"' if rendered_path else ""
132
+ return f'pattern: "{input_data.pattern}"{path_fragment}'
103
133
 
104
134
  async def call(
105
135
  self, input_data: GlobToolInput, context: ToolUseContext
@@ -108,9 +138,8 @@ class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
108
138
 
109
139
  try:
110
140
  search_path = Path(input_data.path) if input_data.path else Path.cwd()
111
-
112
- # Use glob to find matches, sorted by modification time (newest first)
113
- paths = list(search_path.glob(input_data.pattern))
141
+ if not search_path.is_absolute():
142
+ search_path = (Path.cwd() / search_path).resolve()
114
143
 
115
144
  def _mtime(path: Path) -> float:
116
145
  try:
@@ -118,15 +147,30 @@ class GlobTool(Tool[GlobToolInput, GlobToolOutput]):
118
147
  except OSError:
119
148
  return float("-inf")
120
149
 
121
- matches = [str(p) for p in sorted(paths, key=_mtime, reverse=True)]
150
+ # Find matching files, sorted by modification time
151
+ paths = sorted(
152
+ (p for p in search_path.glob(input_data.pattern) if p.is_file()),
153
+ key=_mtime,
154
+ )
155
+
156
+ truncated = len(paths) > RESULT_LIMIT
157
+ paths = paths[:RESULT_LIMIT]
122
158
 
123
- output = GlobToolOutput(matches=matches, pattern=input_data.pattern, count=len(matches))
159
+ matches = [str(p) for p in paths]
160
+
161
+ output = GlobToolOutput(
162
+ matches=matches, pattern=input_data.pattern, count=len(matches), truncated=truncated
163
+ )
124
164
 
125
165
  yield ToolResult(
126
166
  data=output, result_for_assistant=self.render_result_for_assistant(output)
127
167
  )
128
168
 
129
169
  except Exception as e:
170
+ logger.exception(
171
+ "[glob_tool] Error executing glob",
172
+ extra={"pattern": input_data.pattern, "path": input_data.path},
173
+ )
130
174
  error_output = GlobToolOutput(matches=[], pattern=input_data.pattern, count=0)
131
175
 
132
176
  yield ToolResult(
@@ -15,6 +15,9 @@ from ripperdoc.core.tool import (
15
15
  ToolUseExample,
16
16
  ValidationResult,
17
17
  )
18
+ from ripperdoc.utils.log import get_logger
19
+
20
+ logger = get_logger()
18
21
 
19
22
 
20
23
  GREP_USAGE = (
@@ -223,6 +226,10 @@ class GrepTool(Tool[GrepToolInput, GrepToolOutput]):
223
226
  )
224
227
 
225
228
  except Exception as e:
229
+ logger.exception(
230
+ "[grep_tool] Error executing grep",
231
+ extra={"pattern": input_data.pattern, "path": input_data.path},
232
+ )
226
233
  error_output = GrepToolOutput(
227
234
  matches=[], pattern=input_data.pattern, total_files=0, total_matches=0
228
235
  )