zrb 1.5.4__py3-none-any.whl → 1.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,510 +1,374 @@
1
1
  import fnmatch
2
+ import json
2
3
  import os
3
4
  import re
4
- from typing import Dict, List, Optional, Tuple, Union
5
+ from typing import Any, Dict, List, Optional
5
6
 
6
7
  from zrb.util.file import read_file as _read_file
7
8
  from zrb.util.file import write_file as _write_file
8
9
 
9
- # Common directories and files to exclude from file operations
10
- _DEFAULT_EXCLUDES = [
11
- # Version control
12
- ".git",
13
- ".svn",
14
- ".hg",
15
- # Dependencies and packages
16
- "node_modules",
17
- "venv",
18
- ".venv",
19
- "env",
20
- ".env",
21
- # Build and cache
22
- "__pycache__",
23
- "*.pyc",
24
- "build",
25
- "dist",
26
- "target",
27
- # IDE and editor files
28
- ".idea",
29
- ".vscode",
30
- "*.swp",
31
- "*.swo",
32
- # OS-specific
33
- ".DS_Store",
34
- "Thumbs.db",
35
- # Temporary and backup files
36
- "*.tmp",
37
- "*.bak",
38
- "*.log",
39
- ]
40
-
41
- # Maximum number of lines to read before truncating
42
- _MAX_LINES_BEFORE_TRUNCATION = 1000
43
-
44
- # Number of context lines to show around method definitions when truncating
45
- _CONTEXT_LINES = 5
46
-
47
10
 
48
11
  def list_files(
49
- path: str = ".",
50
- recursive: bool = True,
51
- file_pattern: Optional[str] = None,
52
- excluded_patterns: list[str] = _DEFAULT_EXCLUDES,
53
- ) -> list[str]:
12
+ path: str = ".", recursive: bool = True, include_hidden: bool = False
13
+ ) -> str:
54
14
  """
55
- List files in a directory that match specified patterns.
56
-
15
+ Request to list files and directories within the specified directory.
16
+ If recursive is true, it will list all files and directories recursively.
17
+ If recursive is false or not provided, it will only list the top-level contents.
57
18
  Args:
58
- path: The path of the directory to list contents for
59
- (relative to the current working directory)
60
- recursive: Whether to list files recursively.
61
- Use True for recursive listing, False for top-level only.
62
- file_pattern: Optional glob pattern to filter files.
63
- None by default (all files will be included).
64
- excluded_patterns: List of glob patterns to exclude. By default, contains sane values
65
- to exclude common directories and files like version control, build artifacts,
66
- and temporary files.
67
-
19
+ path: (required) The path of the directory to list contents for (relative to the CWD)
20
+ recursive: (optional) Whether to list files recursively.
21
+ Use true for recursive listing, false or omit for top-level only.
22
+ include_hidden: (optional) Whether to include hidden files/directories.
23
+ Defaults to False (exclude hidden files).
68
24
  Returns:
69
- A list of file paths matching the criteria
25
+ A JSON string containing a list of file paths or an error message.
26
+ Example success: '{"files": ["file1.txt", "subdir/file2.py"]}'
27
+ Example error: '{"error": "Error listing files: [Errno 2] No such file..."}'
70
28
  """
71
- all_files: list[str] = []
72
-
73
- if recursive:
74
- for root, dirs, files in os.walk(path):
75
- # Filter out excluded directories to avoid descending into them
76
- dirs[:] = [
77
- d
78
- for d in dirs
79
- if not _should_exclude(os.path.join(root, d), excluded_patterns)
80
- ]
81
-
82
- for filename in files:
83
- full_path = os.path.join(root, filename)
84
- # If file_pattern is None, include all files, otherwise match the pattern
85
- if file_pattern is None or fnmatch.fnmatch(filename, file_pattern):
86
- if not _should_exclude(full_path, excluded_patterns):
87
- all_files.append(full_path)
88
- else:
89
- # Non-recursive listing (top-level only)
29
+ all_files: List[str] = []
30
+ abs_path = os.path.abspath(path)
31
+ try:
32
+ if recursive:
33
+ for root, dirs, files in os.walk(abs_path):
34
+ # Skip hidden directories (like .git) for performance and relevance
35
+ dirs[:] = [d for d in dirs if include_hidden or not _is_hidden(d)]
36
+ for filename in files:
37
+ # Skip hidden files
38
+ if include_hidden or not _is_hidden(filename):
39
+ all_files.append(os.path.join(root, filename))
40
+ else:
41
+ # Non-recursive listing (top-level only)
42
+ for item in os.listdir(abs_path):
43
+ full_path = os.path.join(abs_path, item)
44
+ # Include both files and directories if not recursive
45
+ if include_hidden or not _is_hidden(
46
+ item
47
+ ): # Skip hidden items unless included
48
+ all_files.append(full_path)
49
+
50
+ # Return paths relative to the original path requested
90
51
  try:
91
- for item in os.listdir(path):
92
- full_path = os.path.join(path, item)
93
- if os.path.isfile(full_path):
94
- # If file_pattern is None, include all files, otherwise match the pattern
95
- if file_pattern is None or fnmatch.fnmatch(item, file_pattern):
96
- if not _should_exclude(full_path, excluded_patterns):
97
- all_files.append(full_path)
98
- except (FileNotFoundError, PermissionError) as e:
99
- print(f"Error listing files in {path}: {e}")
100
-
101
- return sorted(all_files)
102
-
103
-
104
- def _should_exclude(
105
- full_path: str, excluded_patterns: list[str] = _DEFAULT_EXCLUDES
106
- ) -> bool:
107
- """
108
- Return True if the file at full_path should be excluded based on
109
- the list of excluded_patterns. Patterns that include a path separator
110
- are applied to the full normalized path; otherwise they are matched
111
- against each individual component of the path.
112
-
113
- Args:
114
- full_path: The full path to check
115
- excluded_patterns: List of patterns to exclude
116
-
117
- Returns:
118
- True if the path should be excluded, False otherwise
119
- """
120
- norm_path = os.path.normpath(full_path)
121
- path_parts = norm_path.split(os.sep)
52
+ rel_files = [
53
+ os.path.relpath(f, os.path.dirname(abs_path)) for f in all_files
54
+ ]
55
+ return json.dumps({"files": sorted(rel_files)})
56
+ except (
57
+ ValueError
58
+ ) as e: # Handle case where path is '.' and abs_path is CWD root
59
+ if "path is on mount '" in str(e) and "' which is not on mount '" in str(e):
60
+ # If paths are on different mounts, just use absolute paths
61
+ rel_files = all_files
62
+ return json.dumps({"files": sorted(rel_files)})
63
+ raise
64
+ except Exception as e:
65
+ raise Exception(f"Error listing files in {path}: {e}")
122
66
 
123
- for pat in excluded_patterns:
124
- # If the pattern seems intended for full path matching (contains a separator)
125
- if os.sep in pat or "/" in pat:
126
- if fnmatch.fnmatch(norm_path, pat):
127
- return True
128
- else:
129
- # Otherwise check each part of the path
130
- if any(fnmatch.fnmatch(part, pat) for part in path_parts):
131
- return True
132
- # Also check the filename against the pattern
133
- if os.path.isfile(full_path) and fnmatch.fnmatch(
134
- os.path.basename(full_path), pat
135
- ):
136
- return True
137
67
 
138
- return False
68
+ def _is_hidden(path: str) -> bool:
69
+ """Check if path is hidden (starts with '.')."""
70
+ return os.path.basename(path).startswith(".")
139
71
 
140
72
 
141
73
  def read_from_file(
142
74
  path: str,
143
75
  start_line: Optional[int] = None,
144
76
  end_line: Optional[int] = None,
145
- auto_truncate: bool = False,
146
77
  ) -> str:
147
78
  """
148
- Read the contents of a file at the specified path.
149
-
79
+ Request to read the contents of a file at the specified path. Use this when you need
80
+ to examine the contents of an existing file you do not know the contents of, for example
81
+ to analyze code, review text files, or extract information from configuration files.
82
+ The output includes line numbers prefixed to each line (e.g. "1 | const x = 1"),
83
+ making it easier to reference specific lines when creating diffs or discussing code.
84
+ By specifying start_line and end_line parameters, you can efficiently read specific
85
+ portions of large files without loading the entire file into memory. Automatically
86
+ extracts raw text from PDF and DOCX files. May not be suitable for other types of
87
+ binary files, as it returns the raw content as a string.
150
88
  Args:
151
- path: The path of the file to read (relative to the current working directory)
152
- start_line: The starting line number to read from (1-based).
153
- If not provided, starts from the beginning.
154
- end_line: The ending line number to read to (1-based, inclusive).
155
- If not provided, reads to the end.
156
- auto_truncate: Whether to automatically truncate large files when start_line
157
- and end_line are not specified. If true and the file exceeds a certain
158
- line threshold, it will return a subset of lines with information about
159
- the total line count and method definitions. Default is False for backward
160
- compatibility, but setting to True is recommended for large files.
161
-
89
+ path: (required) The path of the file to read (relative to the CWD)
90
+ start_line: (optional) The starting line number to read from (1-based).
91
+ If not provided, it starts from the beginning of the file.
92
+ end_line: (optional) The ending line number to read to (1-based, inclusive).
93
+ If not provided, it reads to the end of the file.
162
94
  Returns:
163
- A string containing the file content, with line numbers prefixed to each line.
164
- For truncated files, includes summary information.
95
+ A JSON string containing the file path, content, and line range, or an error.
96
+ Example success: '{"path": "f.py", "content": "...", "start_line": 1, "end_line": 2}'
97
+ Example error: '{"error": "File not found: data.txt"}'
165
98
  """
166
99
  try:
167
100
  abs_path = os.path.abspath(path)
168
-
169
- # Read the entire file content
101
+ # Check if file exists
102
+ if not os.path.exists(abs_path):
103
+ return json.dumps({"error": f"File {path} does not exist"})
170
104
  content = _read_file(abs_path)
171
105
  lines = content.splitlines()
172
106
  total_lines = len(lines)
173
-
174
- # Determine if we should truncate
175
- should_truncate = (
176
- auto_truncate
177
- and start_line is None
178
- and end_line is None
179
- and total_lines > _MAX_LINES_BEFORE_TRUNCATION
180
- )
181
-
182
107
  # Adjust line indices (convert from 1-based to 0-based)
183
108
  start_idx = (start_line - 1) if start_line is not None else 0
184
109
  end_idx = end_line if end_line is not None else total_lines
185
-
186
110
  # Validate indices
187
111
  if start_idx < 0:
188
112
  start_idx = 0
189
113
  if end_idx > total_lines:
190
114
  end_idx = total_lines
191
-
192
- if should_truncate:
193
- # Find method definitions and their line ranges
194
- method_info = _find_method_definitions(lines)
195
-
196
- # Create a truncated view with method definitions
197
- result_lines = []
198
-
199
- # Add file info header
200
- result_lines.append(f"File: {path} (truncated, {total_lines} lines total)")
201
- result_lines.append("")
202
-
203
- # Add beginning of file (first 100 lines)
204
- first_chunk = min(100, total_lines // 3)
205
- for i in range(first_chunk):
206
- result_lines.append(f"{i+1} | {lines[i]}")
207
-
208
- result_lines.append("...")
209
- omitted_msg = (
210
- f"[{first_chunk+1} - {total_lines-100}] Lines omitted for brevity"
211
- )
212
- result_lines.append(omitted_msg)
213
- result_lines.append("...")
214
-
215
- # Add end of file (last 100 lines)
216
- for i in range(max(first_chunk, total_lines - 100), total_lines):
217
- result_lines.append(f"{i+1} | {lines[i]}")
218
-
219
- # Add method definitions summary
220
- if method_info:
221
- result_lines.append("")
222
- result_lines.append("Method definitions found:")
223
- for method in method_info:
224
- method_line = (
225
- f"- {method['name']} "
226
- f"(lines {method['start_line']}-{method['end_line']})"
227
- )
228
- result_lines.append(method_line)
229
-
230
- return "\n".join(result_lines)
231
- else:
232
- # Return the requested range with line numbers
233
- result_lines = []
234
- for i in range(start_idx, end_idx):
235
- result_lines.append(f"{i+1} | {lines[i]}")
236
-
237
- return "\n".join(result_lines)
238
-
115
+ if start_idx > end_idx:
116
+ start_idx = end_idx
117
+ # Select the lines for the result
118
+ selected_lines = lines[start_idx:end_idx]
119
+ content_result = "\n".join(selected_lines)
120
+ return json.dumps(
121
+ {
122
+ "path": path,
123
+ "content": content_result,
124
+ "start_line": start_idx + 1, # Convert back to 1-based for output
125
+ "end_line": end_idx, # end_idx is already exclusive upper bound
126
+ "total_lines": total_lines,
127
+ }
128
+ )
239
129
  except Exception as e:
240
- return f"Error reading file {path}: {str(e)}"
241
-
242
-
243
- def _find_method_definitions(lines: List[str]) -> List[Dict[str, Union[str, int]]]:
244
- """
245
- Find method definitions in the given lines of code.
246
-
247
- Args:
248
- lines: List of code lines to analyze
249
-
250
- Returns:
251
- List of dictionaries containing method name, start line, and end line
252
- """
253
- method_info = []
254
-
255
- # Simple regex patterns for common method/function definitions
256
- patterns = [
257
- # Python
258
- r"^\s*def\s+([a-zA-Z0-9_]+)\s*\(",
259
- # JavaScript/TypeScript
260
- r"^\s*(function\s+([a-zA-Z0-9_]+)|([a-zA-Z0-9_]+)\s*=\s*function|"
261
- r"\s*([a-zA-Z0-9_]+)\s*\([^)]*\)\s*{)",
262
- # Java/C#/C++
263
- r"^\s*(?:public|private|protected|static|final|abstract|synchronized)?"
264
- r"\s+(?:[a-zA-Z0-9_<>[\]]+\s+)+([a-zA-Z0-9_]+)\s*\(",
265
- ]
266
-
267
- current_method = None
268
-
269
- for i, line in enumerate(lines):
270
- # Check if this line starts a method definition
271
- for pattern in patterns:
272
- match = re.search(pattern, line)
273
- if match:
274
- # If we were tracking a method, close it
275
- if current_method:
276
- current_method["end_line"] = i
277
- method_info.append(current_method)
278
-
279
- # Start tracking a new method
280
- method_name = next(
281
- group for group in match.groups() if group is not None
282
- )
283
- current_method = {
284
- "name": method_name,
285
- "start_line": i + 1, # 1-based line numbering
286
- "end_line": None,
287
- }
288
- break
289
-
290
- # Check for method end (simplistic approach)
291
- if current_method and line.strip() == "}":
292
- current_method["end_line"] = i + 1
293
- method_info.append(current_method)
294
- current_method = None
130
+ raise Exception(f"Error reading file {path}: {e}")
295
131
 
296
- # Close any open method at the end of the file
297
- if current_method:
298
- current_method["end_line"] = len(lines)
299
- method_info.append(current_method)
300
132
 
301
- return method_info
302
-
303
-
304
- def write_to_file(path: str, content: str) -> bool:
133
+ def write_to_file(path: str, content: str, line_count: int) -> str:
305
134
  """
306
- Write content to a file at the specified path.
307
-
135
+ Request to write full content to a file at the specified path. If the file exists,
136
+ it will be overwritten with the provided content. If the file doesn't exist,
137
+ it will be created. This tool will automatically create any directories needed
138
+ to write the file.
308
139
  Args:
309
- path: The path of the file to write to (relative to the current working directory)
310
- content: The content to write to the file
311
-
140
+ path: (required) The path of the file to write to (relative to the CWD)
141
+ content: (required) The content to write to the file. ALWAYS provide the COMPLETE
142
+ intended content of the file, without any truncation or omissions. You MUST
143
+ include ALL parts of the file, even if they haven't been modified. Do NOT
144
+ include the line numbers in the content though, just the actual content
145
+ of the file.
146
+ line_count: (required) The number of lines in the file. Make sure to compute
147
+ this based on the actual content of the file, not the number of lines
148
+ in the content you're providing.
312
149
  Returns:
313
- True if successful, False otherwise
150
+ A JSON string indicating success or failure, including any warnings.
151
+ Example success: '{"success": true, "path": "new_config.json"}'
152
+ Example success with warning: '{"success": true, "path": "f.txt", "warning": "..."}'
153
+ Example error: '{"success": false, "error": "Permission denied: /etc/hosts"}'
314
154
  """
155
+ actual_lines = len(content.splitlines())
156
+ warning = None
157
+ if actual_lines != line_count:
158
+ warning = (
159
+ f"Provided line_count ({line_count}) does not match actual "
160
+ f"content lines ({actual_lines}) for file {path}"
161
+ )
315
162
  try:
163
+ abs_path = os.path.abspath(path)
316
164
  # Ensure directory exists
317
- directory = os.path.dirname(os.path.abspath(path))
165
+ directory = os.path.dirname(abs_path)
318
166
  if directory and not os.path.exists(directory):
319
167
  os.makedirs(directory, exist_ok=True)
320
-
321
- # Write the content
322
- _write_file(os.path.abspath(path), content)
323
- return True
168
+ _write_file(abs_path, content)
169
+ result_data = {"success": True, "path": path}
170
+ if warning:
171
+ result_data["warning"] = warning
172
+ return json.dumps(result_data)
324
173
  except Exception as e:
325
- print(f"Error writing to file {path}: {str(e)}")
326
- return False
174
+ raise Exception(f"Error writing file {e}")
327
175
 
328
176
 
329
177
  def search_files(
330
- path: str, regex: str, file_pattern: Optional[str] = None, context_lines: int = 2
178
+ path: str,
179
+ regex: str,
180
+ file_pattern: Optional[str] = None,
181
+ include_hidden: bool = False,
331
182
  ) -> str:
332
183
  """
333
- Search for a regex pattern across files in a specified directory.
334
-
184
+ Request to perform a regex search across files in a specified directory,
185
+ providing context-rich results. This tool searches for patterns or specific
186
+ content across multiple files, displaying each match with encapsulating context.
335
187
  Args:
336
- path: The path of the directory to search in
337
- (relative to the current working directory)
338
- regex: The regular expression pattern to search for
339
- file_pattern: Optional glob pattern to filter files.
340
- Default is None, which includes all files. Only specify this if you need to
341
- filter to specific file types (but in most cases, leaving as None is better).
342
- context_lines: Number of context lines to show before and after each match.
343
- Default is 2, which provides good context without overwhelming output.
344
-
188
+ path: (required) The path of the directory to search in (relative to the CWD).
189
+ This directory will be recursively searched.
190
+ regex: (required) The regular expression pattern to search for. Uses Rust regex syntax.
191
+ (Note: Python's `re` module will be used here, which has similar syntax)
192
+ file_pattern: (optional) Glob pattern to filter files (e.g., '*.ts').
193
+ If not provided, searches all files (*).
194
+ include_hidden: (optional) Whether to include hidden files.
195
+ Defaults to False (exclude hidden files).
345
196
  Returns:
346
- A string containing the search results with context
197
+ A JSON string containing the search results or an error message.
198
+ Example success: '{"summary": "Found 5 matches...", "results": [{"file":"f.py", ...}]}'
199
+ Example no match: '{"summary": "No matches found...", "results": []}'
200
+ Example error: '{"error": "Invalid regex: ..."}'
347
201
  """
348
202
  try:
349
- # Compile the regex pattern
350
203
  pattern = re.compile(regex)
204
+ except re.error as e:
205
+ raise Exception(f"Invalid regex pattern: {e}")
206
+ search_results = {"summary": "", "results": []}
207
+ match_count = 0
208
+ searched_file_count = 0
209
+ file_match_count = 0
210
+ try:
211
+ abs_path = os.path.abspath(path)
212
+ for root, dirs, files in os.walk(abs_path):
213
+ # Skip hidden directories
214
+ dirs[:] = [d for d in dirs if include_hidden or not _is_hidden(d)]
215
+ for filename in files:
216
+ # Skip hidden files
217
+ if not include_hidden and _is_hidden(filename):
218
+ continue
219
+ # Apply file pattern filter if provided
220
+ if file_pattern and not fnmatch.fnmatch(filename, file_pattern):
221
+ continue
222
+ file_path = os.path.join(root, filename)
223
+ rel_file_path = os.path.relpath(file_path, os.getcwd())
224
+ searched_file_count += 1
225
+ try:
226
+ matches = _get_file_matches(file_path, pattern)
227
+ if matches:
228
+ file_match_count += 1
229
+ match_count += len(matches)
230
+ search_results["results"].append(
231
+ {"file": rel_file_path, "matches": matches}
232
+ )
233
+ except IOError as e:
234
+ search_results["results"].append(
235
+ {"file": rel_file_path, "error": str(e)}
236
+ )
237
+ if match_count == 0:
238
+ search_results["summary"] = (
239
+ f"No matches found for pattern '{regex}' in path '{path}' "
240
+ f"(searched {searched_file_count} files)."
241
+ )
242
+ else:
243
+ search_results["summary"] = (
244
+ f"Found {match_count} matches in {file_match_count} files "
245
+ f"(searched {searched_file_count} files)."
246
+ )
247
+ return json.dumps(search_results, indent=2) # Pretty print for readability
248
+ except Exception as e:
249
+ raise Exception(f"Error searching files: {e}")
351
250
 
352
- # Get the list of files to search
353
- files = list_files(path, recursive=True, file_pattern=file_pattern)
354
-
355
- results = []
356
- match_count = 0
357
-
358
- for file_path in files:
359
- try:
360
- with open(file_path, "r", encoding="utf-8", errors="replace") as f:
361
- lines = f.readlines()
362
-
363
- file_matches = []
364
-
365
- for i, line in enumerate(lines):
366
- if pattern.search(line):
367
- # Determine context range
368
- start = max(0, i - context_lines)
369
- end = min(len(lines), i + context_lines + 1)
370
-
371
- # Add file header if this is the first match in the file
372
- if not file_matches:
373
- file_matches.append(
374
- f"\n{'-' * 80}\n{file_path}\n{'-' * 80}"
375
- )
376
-
377
- # Add separator if this isn't the first match and isn't contiguous
378
- # with previous
379
- if (
380
- file_matches
381
- and file_matches[-1] != f"Line {start+1}-{end}:"
382
- ):
383
- file_matches.append(f"\nLine {start+1}-{end}:")
384
-
385
- # Add context lines
386
- for j in range(start, end):
387
- prefix = ">" if j == i else " "
388
- file_matches.append(f"{prefix} {j+1}: {lines[j].rstrip()}")
389
-
390
- match_count += 1
391
-
392
- if file_matches:
393
- results.extend(file_matches)
394
-
395
- except Exception as e:
396
- results.append(f"Error reading {file_path}: {str(e)}")
397
-
398
- if not results:
399
- return f"No matches found for pattern '{regex}' in {path}"
400
-
401
- # Count unique files by counting headers
402
- file_count = len([r for r in results if r.startswith("-" * 80)])
403
- summary = f"Found {match_count} matches in {file_count} files:\n"
404
- return summary + "\n".join(results)
405
251
 
252
+ def _get_file_matches(
253
+ file_path: str, pattern: re.Pattern, context_lines: int = 2
254
+ ) -> List[Dict[str, Any]]:
255
+ """Search for regex matches in a file with context."""
256
+ try:
257
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
258
+ lines = f.readlines()
259
+ matches = []
260
+ for line_idx, line in enumerate(lines):
261
+ if pattern.search(line):
262
+ line_num = line_idx + 1
263
+ context_start = max(0, line_idx - context_lines)
264
+ context_end = min(len(lines), line_idx + context_lines + 1)
265
+ match_data = {
266
+ "line_number": line_num,
267
+ "line_content": line.rstrip(),
268
+ "context_before": [
269
+ lines[j].rstrip() for j in range(context_start, line_idx)
270
+ ],
271
+ "context_after": [
272
+ lines[j].rstrip() for j in range(line_idx + 1, context_end)
273
+ ],
274
+ }
275
+ matches.append(match_data)
276
+ return matches
406
277
  except Exception as e:
407
- return f"Error searching files: {str(e)}"
278
+ raise IOError(f"Error reading {file_path}: {str(e)}")
408
279
 
409
280
 
410
- def apply_diff(path: str, diff: str, start_line: int, end_line: int) -> bool:
281
+ def apply_diff(path: str, diff: str) -> str:
411
282
  """
412
- Replace existing code using a search and replace block.
413
-
283
+ Request to replace existing code using a search and replace block.
284
+ This tool allows for precise, surgical replaces to files by specifying exactly
285
+ what content to search for and what to replace it with.
286
+ The tool will maintain proper indentation and formatting while making changes.
287
+ Only a single operation is allowed per tool use.
288
+ The SEARCH section must exactly match existing content including whitespace
289
+ and indentation.
290
+ If you're not confident in the exact content to search for, use the read_file tool
291
+ first to get the exact content.
414
292
  Args:
415
- path: The path of the file to modify (relative to the current working directory)
416
- diff: The search/replace block defining the changes
417
- start_line: The line number where the search block starts (1-based)
418
- end_line: The line number where the search block ends (1-based)
419
-
293
+ path: (required) The path of the file to modify (relative to the CWD)
294
+ diff: (required) The search/replace block defining the changes.
295
+ Format:
296
+ <<<<<<< SEARCH
297
+ :start_line:START_LINE_NUMBER
298
+ :end_line:END_LINE_NUMBER
299
+ -------
300
+ [exact content to find including whitespace]
301
+ =======
302
+ [new content to replace with]
303
+ >>>>>>> REPLACE
420
304
  Returns:
421
- True if successful, False otherwise
422
-
423
- The diff format should be:
424
- ```
425
- <<<<<<< SEARCH
426
- [exact content to find including whitespace]
427
- =======
428
- [new content to replace with]
429
- >>>>>>> REPLACE
430
- ```
305
+ A JSON string indicating success or failure.
431
306
  """
432
307
  try:
433
- # Read the file
308
+ start_line, end_line, search_content, replace_content = _parse_diff(diff)
434
309
  abs_path = os.path.abspath(path)
310
+ if not os.path.exists(abs_path):
311
+ return json.dumps(
312
+ {"success": False, "path": path, "error": f"File not found at {path}"}
313
+ )
435
314
  content = _read_file(abs_path)
436
315
  lines = content.splitlines()
437
-
438
- # Validate line numbers
439
316
  if start_line < 1 or end_line > len(lines) or start_line > end_line:
440
- print(
441
- f"Invalid line range: {start_line}-{end_line} (file has {len(lines)} lines)"
317
+ return json.dumps(
318
+ {
319
+ "success": False,
320
+ "path": path,
321
+ "error": (
322
+ f"Invalid line range {start_line}-{end_line} "
323
+ f"for file with {len(lines)} lines."
324
+ ),
325
+ }
442
326
  )
443
- return False
444
-
445
- # Parse the diff
446
- search_content, replace_content = _parse_diff(diff)
447
- if search_content is None or replace_content is None:
448
- print("Invalid diff format")
449
- return False
450
-
451
- # Extract the content to be replaced
452
327
  original_content = "\n".join(lines[start_line - 1 : end_line])
453
-
454
- # Verify the search content matches
455
328
  if original_content != search_content:
456
- print("Search content does not match the specified lines in the file")
457
- return False
458
-
459
- # Replace the content
329
+ error_message = (
330
+ f"Search content does not match file content at "
331
+ f"lines {start_line}-{end_line}.\n"
332
+ f"Expected ({len(search_content.splitlines())} lines):\n"
333
+ f"---\n{search_content}\n---\n"
334
+ f"Actual ({len(lines[start_line-1:end_line])} lines):\n"
335
+ f"---\n{original_content}\n---"
336
+ )
337
+ return json.dumps({"success": False, "path": path, "error": error_message})
460
338
  new_lines = (
461
339
  lines[: start_line - 1] + replace_content.splitlines() + lines[end_line:]
462
340
  )
463
341
  new_content = "\n".join(new_lines)
464
-
465
- # Write the modified content back to the file
342
+ if content.endswith("\n"):
343
+ new_content += "\n"
466
344
  _write_file(abs_path, new_content)
467
- return True
468
-
345
+ return json.dumps({"success": True, "path": path})
469
346
  except Exception as e:
470
- print(f"Error applying diff to {path}: {str(e)}")
471
- return False
472
-
473
-
474
- def _parse_diff(diff: str) -> Tuple[Optional[str], Optional[str]]:
475
- """
476
- Parse a diff string to extract search and replace content.
477
-
478
- Args:
479
- diff: The diff string to parse
480
-
481
- Returns:
482
- A tuple of (search_content, replace_content), or (None, None) if parsing fails
483
- """
484
- try:
485
- # Split the diff into sections
486
- search_marker = "<<<<<<< SEARCH"
487
- separator = "======="
488
- replace_marker = ">>>>>>> REPLACE"
489
-
490
- if (
491
- search_marker not in diff
492
- or separator not in diff
493
- or replace_marker not in diff
494
- ):
495
- return None, None
496
-
497
- # Extract search content
498
- search_start = diff.index(search_marker) + len(search_marker)
499
- search_end = diff.index(separator)
500
- search_content = diff[search_start:search_end].strip()
501
-
502
- # Extract replace content
503
- replace_start = diff.index(separator) + len(separator)
504
- replace_end = diff.index(replace_marker)
505
- replace_content = diff[replace_start:replace_end].strip()
506
-
507
- return search_content, replace_content
508
-
509
- except Exception:
510
- return None, None
347
+ raise Exception(f"Error applying diff on {path}: {e}")
348
+
349
+
350
+ def _parse_diff(diff: str) -> tuple[int, int, str, str]:
351
+ """Parse diff content into components."""
352
+ search_marker = "<<<<<<< SEARCH"
353
+ meta_marker = "-------"
354
+ separator = "======="
355
+ replace_marker = ">>>>>>> REPLACE"
356
+ search_start_idx = diff.find(search_marker)
357
+ meta_start_idx = diff.find(meta_marker)
358
+ separator_idx = diff.find(separator)
359
+ replace_end_idx = diff.find(replace_marker)
360
+ if any(
361
+ idx == -1
362
+ for idx in [search_start_idx, meta_start_idx, separator_idx, replace_end_idx]
363
+ ):
364
+ raise ValueError("Invalid diff format - missing markers")
365
+ meta_content = diff[search_start_idx + len(search_marker) : meta_start_idx].strip()
366
+ start_line = int(re.search(r":start_line:(\d+)", meta_content).group(1))
367
+ end_line = int(re.search(r":end_line:(\d+)", meta_content).group(1))
368
+ search_content = diff[meta_start_idx + len(meta_marker) : separator_idx].strip(
369
+ "\r\n"
370
+ )
371
+ replace_content = diff[separator_idx + len(separator) : replace_end_idx].strip(
372
+ "\r\n"
373
+ )
374
+ return start_line, end_line, search_content, replace_content
zrb/task/llm_task.py CHANGED
@@ -382,7 +382,7 @@ def _wrap_tool(func):
382
382
  except Exception as e:
383
383
  # Optionally, you can include more details from traceback if needed.
384
384
  error_details = traceback.format_exc()
385
- return f"Error: {e}\nDetails: {error_details}"
385
+ return json.dumps({"error": f"{e}", "details": f"{error_details}"})
386
386
 
387
387
  new_sig = inspect.Signature(
388
388
  parameters=[
@@ -403,7 +403,7 @@ def _wrap_tool(func):
403
403
  except Exception as e:
404
404
  # Optionally, you can include more details from traceback if needed.
405
405
  error_details = traceback.format_exc()
406
- return f"Error: {e}\nDetails: {error_details}"
406
+ return json.dumps({"error": f"{e}", "details": f"{error_details}"})
407
407
 
408
408
  return wrapper
409
409
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: zrb
3
- Version: 1.5.4
3
+ Version: 1.5.5
4
4
  Summary: Your Automation Powerhouse
5
5
  Home-page: https://github.com/state-alchemists/zrb
6
6
  License: AGPL-3.0-or-later
@@ -11,7 +11,7 @@ zrb/builtin/llm/llm_chat.py,sha256=QFuuZJm4tonykbY1P5Vdnn2acVqwM8GcsJ0gnaNB2uo,6
11
11
  zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
12
12
  zrb/builtin/llm/tool/api.py,sha256=U0_PhVuoDLpq4Jak5S45IHhCF1jKmfS0JC8XAnfnOhA,858
13
13
  zrb/builtin/llm/tool/cli.py,sha256=to_IjkfrMGs6eLfG0cpVN9oyADWYsJQCtyluUhUdBww,253
14
- zrb/builtin/llm/tool/file.py,sha256=A6x0f93oBU4JvrujVF3NQAUY6Hkrf_Iv9cfAMNsaDi4,17469
14
+ zrb/builtin/llm/tool/file.py,sha256=9AmTLYUILmQYHlfcb9Z9UisCH3nUVLCgleec2goP4Ao,16737
15
15
  zrb/builtin/llm/tool/rag.py,sha256=pX8N_bYv4axsjhULLvvZtQYW2klZOkeQZ2Tn16083vM,6860
16
16
  zrb/builtin/llm/tool/web.py,sha256=ZvIgOIMPIEfdih5I3TgVTsqTrwiKmDy60zeKHVWrVeo,4922
17
17
  zrb/builtin/md5.py,sha256=0pNlrfZA0wlZlHvFHLgyqN0JZJWGKQIF5oXxO44_OJk,949
@@ -300,7 +300,7 @@ zrb/task/base_task.py,sha256=H1D2KyJ9qK0GIPJ4kYyvfRe7fseJibmLVGjj8iRuLs4,21320
300
300
  zrb/task/base_trigger.py,sha256=jC722rDvodaBLeNaFghkTyv1u0QXrK6BLZUUqcmBJ7Q,4581
301
301
  zrb/task/cmd_task.py,sha256=pUKRSR4DZKjbmluB6vi7cxqyhxOLfJ2czSpYeQbiDvo,10705
302
302
  zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
303
- zrb/task/llm_task.py,sha256=L5VFRJBgcYuucBtF3fTemjD0E-8fzA40HHeCm8T56CQ,20659
303
+ zrb/task/llm_task.py,sha256=kwJG6hhCga3sbXM8iiaTkFc1V4jlj4X5-3VeNjL5omE,20701
304
304
  zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
305
305
  zrb/task/rsync_task.py,sha256=GSL9144bmp6F0EckT6m-2a1xG25AzrrWYzH4k3SVUKM,6370
306
306
  zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
@@ -341,7 +341,7 @@ zrb/util/string/name.py,sha256=8picJfUBXNpdh64GNaHv3om23QHhUZux7DguFLrXHp8,1163
341
341
  zrb/util/todo.py,sha256=1nDdwPc22oFoK_1ZTXyf3638Bg6sqE2yp_U4_-frHoc,16015
342
342
  zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
343
343
  zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
344
- zrb-1.5.4.dist-info/METADATA,sha256=OeBW0SVw6MmYpkFACsrADLiJctonRhH9ZVQPKzgrjAI,8470
345
- zrb-1.5.4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
346
- zrb-1.5.4.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
347
- zrb-1.5.4.dist-info/RECORD,,
344
+ zrb-1.5.5.dist-info/METADATA,sha256=tuRLFvxurb0dl4sd2n3ztedkjrGsWn4xSS3zOuCjSl8,8470
345
+ zrb-1.5.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
346
+ zrb-1.5.5.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
347
+ zrb-1.5.5.dist-info/RECORD,,
File without changes