zrb 1.5.3__py3-none-any.whl → 1.5.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/tool/file.py +289 -425
- zrb/task/base_task.py +6 -4
- zrb/task/llm_task.py +40 -26
- {zrb-1.5.3.dist-info → zrb-1.5.5.dist-info}/METADATA +2 -2
- {zrb-1.5.3.dist-info → zrb-1.5.5.dist-info}/RECORD +7 -7
- {zrb-1.5.3.dist-info → zrb-1.5.5.dist-info}/WHEEL +0 -0
- {zrb-1.5.3.dist-info → zrb-1.5.5.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/tool/file.py
CHANGED
@@ -1,510 +1,374 @@
|
|
1
1
|
import fnmatch
|
2
|
+
import json
|
2
3
|
import os
|
3
4
|
import re
|
4
|
-
from typing import Dict, List, Optional
|
5
|
+
from typing import Any, Dict, List, Optional
|
5
6
|
|
6
7
|
from zrb.util.file import read_file as _read_file
|
7
8
|
from zrb.util.file import write_file as _write_file
|
8
9
|
|
9
|
-
# Common directories and files to exclude from file operations
|
10
|
-
_DEFAULT_EXCLUDES = [
|
11
|
-
# Version control
|
12
|
-
".git",
|
13
|
-
".svn",
|
14
|
-
".hg",
|
15
|
-
# Dependencies and packages
|
16
|
-
"node_modules",
|
17
|
-
"venv",
|
18
|
-
".venv",
|
19
|
-
"env",
|
20
|
-
".env",
|
21
|
-
# Build and cache
|
22
|
-
"__pycache__",
|
23
|
-
"*.pyc",
|
24
|
-
"build",
|
25
|
-
"dist",
|
26
|
-
"target",
|
27
|
-
# IDE and editor files
|
28
|
-
".idea",
|
29
|
-
".vscode",
|
30
|
-
"*.swp",
|
31
|
-
"*.swo",
|
32
|
-
# OS-specific
|
33
|
-
".DS_Store",
|
34
|
-
"Thumbs.db",
|
35
|
-
# Temporary and backup files
|
36
|
-
"*.tmp",
|
37
|
-
"*.bak",
|
38
|
-
"*.log",
|
39
|
-
]
|
40
|
-
|
41
|
-
# Maximum number of lines to read before truncating
|
42
|
-
_MAX_LINES_BEFORE_TRUNCATION = 1000
|
43
|
-
|
44
|
-
# Number of context lines to show around method definitions when truncating
|
45
|
-
_CONTEXT_LINES = 5
|
46
|
-
|
47
10
|
|
48
11
|
def list_files(
|
49
|
-
path: str = ".",
|
50
|
-
|
51
|
-
file_pattern: Optional[str] = None,
|
52
|
-
excluded_patterns: list[str] = _DEFAULT_EXCLUDES,
|
53
|
-
) -> list[str]:
|
12
|
+
path: str = ".", recursive: bool = True, include_hidden: bool = False
|
13
|
+
) -> str:
|
54
14
|
"""
|
55
|
-
|
56
|
-
|
15
|
+
Request to list files and directories within the specified directory.
|
16
|
+
If recursive is true, it will list all files and directories recursively.
|
17
|
+
If recursive is false or not provided, it will only list the top-level contents.
|
57
18
|
Args:
|
58
|
-
path: The path of the directory to list contents for
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
None by default (all files will be included).
|
64
|
-
excluded_patterns: List of glob patterns to exclude. By default, contains sane values
|
65
|
-
to exclude common directories and files like version control, build artifacts,
|
66
|
-
and temporary files.
|
67
|
-
|
19
|
+
path: (required) The path of the directory to list contents for (relative to the CWD)
|
20
|
+
recursive: (optional) Whether to list files recursively.
|
21
|
+
Use true for recursive listing, false or omit for top-level only.
|
22
|
+
include_hidden: (optional) Whether to include hidden files/directories.
|
23
|
+
Defaults to False (exclude hidden files).
|
68
24
|
Returns:
|
69
|
-
A list of file paths
|
25
|
+
A JSON string containing a list of file paths or an error message.
|
26
|
+
Example success: '{"files": ["file1.txt", "subdir/file2.py"]}'
|
27
|
+
Example error: '{"error": "Error listing files: [Errno 2] No such file..."}'
|
70
28
|
"""
|
71
|
-
all_files:
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
d
|
78
|
-
for
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
29
|
+
all_files: List[str] = []
|
30
|
+
abs_path = os.path.abspath(path)
|
31
|
+
try:
|
32
|
+
if recursive:
|
33
|
+
for root, dirs, files in os.walk(abs_path):
|
34
|
+
# Skip hidden directories (like .git) for performance and relevance
|
35
|
+
dirs[:] = [d for d in dirs if include_hidden or not _is_hidden(d)]
|
36
|
+
for filename in files:
|
37
|
+
# Skip hidden files
|
38
|
+
if include_hidden or not _is_hidden(filename):
|
39
|
+
all_files.append(os.path.join(root, filename))
|
40
|
+
else:
|
41
|
+
# Non-recursive listing (top-level only)
|
42
|
+
for item in os.listdir(abs_path):
|
43
|
+
full_path = os.path.join(abs_path, item)
|
44
|
+
# Include both files and directories if not recursive
|
45
|
+
if include_hidden or not _is_hidden(
|
46
|
+
item
|
47
|
+
): # Skip hidden items unless included
|
48
|
+
all_files.append(full_path)
|
49
|
+
|
50
|
+
# Return paths relative to the original path requested
|
90
51
|
try:
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
full_path: str, excluded_patterns: list[str] = _DEFAULT_EXCLUDES
|
106
|
-
) -> bool:
|
107
|
-
"""
|
108
|
-
Return True if the file at full_path should be excluded based on
|
109
|
-
the list of excluded_patterns. Patterns that include a path separator
|
110
|
-
are applied to the full normalized path; otherwise they are matched
|
111
|
-
against each individual component of the path.
|
112
|
-
|
113
|
-
Args:
|
114
|
-
full_path: The full path to check
|
115
|
-
excluded_patterns: List of patterns to exclude
|
116
|
-
|
117
|
-
Returns:
|
118
|
-
True if the path should be excluded, False otherwise
|
119
|
-
"""
|
120
|
-
norm_path = os.path.normpath(full_path)
|
121
|
-
path_parts = norm_path.split(os.sep)
|
52
|
+
rel_files = [
|
53
|
+
os.path.relpath(f, os.path.dirname(abs_path)) for f in all_files
|
54
|
+
]
|
55
|
+
return json.dumps({"files": sorted(rel_files)})
|
56
|
+
except (
|
57
|
+
ValueError
|
58
|
+
) as e: # Handle case where path is '.' and abs_path is CWD root
|
59
|
+
if "path is on mount '" in str(e) and "' which is not on mount '" in str(e):
|
60
|
+
# If paths are on different mounts, just use absolute paths
|
61
|
+
rel_files = all_files
|
62
|
+
return json.dumps({"files": sorted(rel_files)})
|
63
|
+
raise
|
64
|
+
except Exception as e:
|
65
|
+
raise Exception(f"Error listing files in {path}: {e}")
|
122
66
|
|
123
|
-
for pat in excluded_patterns:
|
124
|
-
# If the pattern seems intended for full path matching (contains a separator)
|
125
|
-
if os.sep in pat or "/" in pat:
|
126
|
-
if fnmatch.fnmatch(norm_path, pat):
|
127
|
-
return True
|
128
|
-
else:
|
129
|
-
# Otherwise check each part of the path
|
130
|
-
if any(fnmatch.fnmatch(part, pat) for part in path_parts):
|
131
|
-
return True
|
132
|
-
# Also check the filename against the pattern
|
133
|
-
if os.path.isfile(full_path) and fnmatch.fnmatch(
|
134
|
-
os.path.basename(full_path), pat
|
135
|
-
):
|
136
|
-
return True
|
137
67
|
|
138
|
-
|
68
|
+
def _is_hidden(path: str) -> bool:
|
69
|
+
"""Check if path is hidden (starts with '.')."""
|
70
|
+
return os.path.basename(path).startswith(".")
|
139
71
|
|
140
72
|
|
141
73
|
def read_from_file(
|
142
74
|
path: str,
|
143
75
|
start_line: Optional[int] = None,
|
144
76
|
end_line: Optional[int] = None,
|
145
|
-
auto_truncate: bool = False,
|
146
77
|
) -> str:
|
147
78
|
"""
|
148
|
-
|
149
|
-
|
79
|
+
Request to read the contents of a file at the specified path. Use this when you need
|
80
|
+
to examine the contents of an existing file you do not know the contents of, for example
|
81
|
+
to analyze code, review text files, or extract information from configuration files.
|
82
|
+
The output includes line numbers prefixed to each line (e.g. "1 | const x = 1"),
|
83
|
+
making it easier to reference specific lines when creating diffs or discussing code.
|
84
|
+
By specifying start_line and end_line parameters, you can efficiently read specific
|
85
|
+
portions of large files without loading the entire file into memory. Automatically
|
86
|
+
extracts raw text from PDF and DOCX files. May not be suitable for other types of
|
87
|
+
binary files, as it returns the raw content as a string.
|
150
88
|
Args:
|
151
|
-
path: The path of the file to read (relative to the
|
152
|
-
start_line: The starting line number to read from (1-based).
|
153
|
-
If not provided, starts from the beginning.
|
154
|
-
end_line: The ending line number to read to (1-based, inclusive).
|
155
|
-
If not provided, reads to the end.
|
156
|
-
auto_truncate: Whether to automatically truncate large files when start_line
|
157
|
-
and end_line are not specified. If true and the file exceeds a certain
|
158
|
-
line threshold, it will return a subset of lines with information about
|
159
|
-
the total line count and method definitions. Default is False for backward
|
160
|
-
compatibility, but setting to True is recommended for large files.
|
161
|
-
|
89
|
+
path: (required) The path of the file to read (relative to the CWD)
|
90
|
+
start_line: (optional) The starting line number to read from (1-based).
|
91
|
+
If not provided, it starts from the beginning of the file.
|
92
|
+
end_line: (optional) The ending line number to read to (1-based, inclusive).
|
93
|
+
If not provided, it reads to the end of the file.
|
162
94
|
Returns:
|
163
|
-
A string containing the file content,
|
164
|
-
|
95
|
+
A JSON string containing the file path, content, and line range, or an error.
|
96
|
+
Example success: '{"path": "f.py", "content": "...", "start_line": 1, "end_line": 2}'
|
97
|
+
Example error: '{"error": "File not found: data.txt"}'
|
165
98
|
"""
|
166
99
|
try:
|
167
100
|
abs_path = os.path.abspath(path)
|
168
|
-
|
169
|
-
|
101
|
+
# Check if file exists
|
102
|
+
if not os.path.exists(abs_path):
|
103
|
+
return json.dumps({"error": f"File {path} does not exist"})
|
170
104
|
content = _read_file(abs_path)
|
171
105
|
lines = content.splitlines()
|
172
106
|
total_lines = len(lines)
|
173
|
-
|
174
|
-
# Determine if we should truncate
|
175
|
-
should_truncate = (
|
176
|
-
auto_truncate
|
177
|
-
and start_line is None
|
178
|
-
and end_line is None
|
179
|
-
and total_lines > _MAX_LINES_BEFORE_TRUNCATION
|
180
|
-
)
|
181
|
-
|
182
107
|
# Adjust line indices (convert from 1-based to 0-based)
|
183
108
|
start_idx = (start_line - 1) if start_line is not None else 0
|
184
109
|
end_idx = end_line if end_line is not None else total_lines
|
185
|
-
|
186
110
|
# Validate indices
|
187
111
|
if start_idx < 0:
|
188
112
|
start_idx = 0
|
189
113
|
if end_idx > total_lines:
|
190
114
|
end_idx = total_lines
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
for i in range(first_chunk):
|
206
|
-
result_lines.append(f"{i+1} | {lines[i]}")
|
207
|
-
|
208
|
-
result_lines.append("...")
|
209
|
-
omitted_msg = (
|
210
|
-
f"[{first_chunk+1} - {total_lines-100}] Lines omitted for brevity"
|
211
|
-
)
|
212
|
-
result_lines.append(omitted_msg)
|
213
|
-
result_lines.append("...")
|
214
|
-
|
215
|
-
# Add end of file (last 100 lines)
|
216
|
-
for i in range(max(first_chunk, total_lines - 100), total_lines):
|
217
|
-
result_lines.append(f"{i+1} | {lines[i]}")
|
218
|
-
|
219
|
-
# Add method definitions summary
|
220
|
-
if method_info:
|
221
|
-
result_lines.append("")
|
222
|
-
result_lines.append("Method definitions found:")
|
223
|
-
for method in method_info:
|
224
|
-
method_line = (
|
225
|
-
f"- {method['name']} "
|
226
|
-
f"(lines {method['start_line']}-{method['end_line']})"
|
227
|
-
)
|
228
|
-
result_lines.append(method_line)
|
229
|
-
|
230
|
-
return "\n".join(result_lines)
|
231
|
-
else:
|
232
|
-
# Return the requested range with line numbers
|
233
|
-
result_lines = []
|
234
|
-
for i in range(start_idx, end_idx):
|
235
|
-
result_lines.append(f"{i+1} | {lines[i]}")
|
236
|
-
|
237
|
-
return "\n".join(result_lines)
|
238
|
-
|
115
|
+
if start_idx > end_idx:
|
116
|
+
start_idx = end_idx
|
117
|
+
# Select the lines for the result
|
118
|
+
selected_lines = lines[start_idx:end_idx]
|
119
|
+
content_result = "\n".join(selected_lines)
|
120
|
+
return json.dumps(
|
121
|
+
{
|
122
|
+
"path": path,
|
123
|
+
"content": content_result,
|
124
|
+
"start_line": start_idx + 1, # Convert back to 1-based for output
|
125
|
+
"end_line": end_idx, # end_idx is already exclusive upper bound
|
126
|
+
"total_lines": total_lines,
|
127
|
+
}
|
128
|
+
)
|
239
129
|
except Exception as e:
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
def _find_method_definitions(lines: List[str]) -> List[Dict[str, Union[str, int]]]:
|
244
|
-
"""
|
245
|
-
Find method definitions in the given lines of code.
|
246
|
-
|
247
|
-
Args:
|
248
|
-
lines: List of code lines to analyze
|
249
|
-
|
250
|
-
Returns:
|
251
|
-
List of dictionaries containing method name, start line, and end line
|
252
|
-
"""
|
253
|
-
method_info = []
|
254
|
-
|
255
|
-
# Simple regex patterns for common method/function definitions
|
256
|
-
patterns = [
|
257
|
-
# Python
|
258
|
-
r"^\s*def\s+([a-zA-Z0-9_]+)\s*\(",
|
259
|
-
# JavaScript/TypeScript
|
260
|
-
r"^\s*(function\s+([a-zA-Z0-9_]+)|([a-zA-Z0-9_]+)\s*=\s*function|"
|
261
|
-
r"\s*([a-zA-Z0-9_]+)\s*\([^)]*\)\s*{)",
|
262
|
-
# Java/C#/C++
|
263
|
-
r"^\s*(?:public|private|protected|static|final|abstract|synchronized)?"
|
264
|
-
r"\s+(?:[a-zA-Z0-9_<>[\]]+\s+)+([a-zA-Z0-9_]+)\s*\(",
|
265
|
-
]
|
266
|
-
|
267
|
-
current_method = None
|
268
|
-
|
269
|
-
for i, line in enumerate(lines):
|
270
|
-
# Check if this line starts a method definition
|
271
|
-
for pattern in patterns:
|
272
|
-
match = re.search(pattern, line)
|
273
|
-
if match:
|
274
|
-
# If we were tracking a method, close it
|
275
|
-
if current_method:
|
276
|
-
current_method["end_line"] = i
|
277
|
-
method_info.append(current_method)
|
278
|
-
|
279
|
-
# Start tracking a new method
|
280
|
-
method_name = next(
|
281
|
-
group for group in match.groups() if group is not None
|
282
|
-
)
|
283
|
-
current_method = {
|
284
|
-
"name": method_name,
|
285
|
-
"start_line": i + 1, # 1-based line numbering
|
286
|
-
"end_line": None,
|
287
|
-
}
|
288
|
-
break
|
289
|
-
|
290
|
-
# Check for method end (simplistic approach)
|
291
|
-
if current_method and line.strip() == "}":
|
292
|
-
current_method["end_line"] = i + 1
|
293
|
-
method_info.append(current_method)
|
294
|
-
current_method = None
|
130
|
+
raise Exception(f"Error reading file {path}: {e}")
|
295
131
|
|
296
|
-
# Close any open method at the end of the file
|
297
|
-
if current_method:
|
298
|
-
current_method["end_line"] = len(lines)
|
299
|
-
method_info.append(current_method)
|
300
132
|
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
def write_to_file(path: str, content: str) -> bool:
|
133
|
+
def write_to_file(path: str, content: str, line_count: int) -> str:
|
305
134
|
"""
|
306
|
-
|
307
|
-
|
135
|
+
Request to write full content to a file at the specified path. If the file exists,
|
136
|
+
it will be overwritten with the provided content. If the file doesn't exist,
|
137
|
+
it will be created. This tool will automatically create any directories needed
|
138
|
+
to write the file.
|
308
139
|
Args:
|
309
|
-
path: The path of the file to write to (relative to the
|
310
|
-
content: The content to write to the file
|
311
|
-
|
140
|
+
path: (required) The path of the file to write to (relative to the CWD)
|
141
|
+
content: (required) The content to write to the file. ALWAYS provide the COMPLETE
|
142
|
+
intended content of the file, without any truncation or omissions. You MUST
|
143
|
+
include ALL parts of the file, even if they haven't been modified. Do NOT
|
144
|
+
include the line numbers in the content though, just the actual content
|
145
|
+
of the file.
|
146
|
+
line_count: (required) The number of lines in the file. Make sure to compute
|
147
|
+
this based on the actual content of the file, not the number of lines
|
148
|
+
in the content you're providing.
|
312
149
|
Returns:
|
313
|
-
|
150
|
+
A JSON string indicating success or failure, including any warnings.
|
151
|
+
Example success: '{"success": true, "path": "new_config.json"}'
|
152
|
+
Example success with warning: '{"success": true, "path": "f.txt", "warning": "..."}'
|
153
|
+
Example error: '{"success": false, "error": "Permission denied: /etc/hosts"}'
|
314
154
|
"""
|
155
|
+
actual_lines = len(content.splitlines())
|
156
|
+
warning = None
|
157
|
+
if actual_lines != line_count:
|
158
|
+
warning = (
|
159
|
+
f"Provided line_count ({line_count}) does not match actual "
|
160
|
+
f"content lines ({actual_lines}) for file {path}"
|
161
|
+
)
|
315
162
|
try:
|
163
|
+
abs_path = os.path.abspath(path)
|
316
164
|
# Ensure directory exists
|
317
|
-
directory = os.path.dirname(
|
165
|
+
directory = os.path.dirname(abs_path)
|
318
166
|
if directory and not os.path.exists(directory):
|
319
167
|
os.makedirs(directory, exist_ok=True)
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
168
|
+
_write_file(abs_path, content)
|
169
|
+
result_data = {"success": True, "path": path}
|
170
|
+
if warning:
|
171
|
+
result_data["warning"] = warning
|
172
|
+
return json.dumps(result_data)
|
324
173
|
except Exception as e:
|
325
|
-
|
326
|
-
return False
|
174
|
+
raise Exception(f"Error writing file {e}")
|
327
175
|
|
328
176
|
|
329
177
|
def search_files(
|
330
|
-
path: str,
|
178
|
+
path: str,
|
179
|
+
regex: str,
|
180
|
+
file_pattern: Optional[str] = None,
|
181
|
+
include_hidden: bool = False,
|
331
182
|
) -> str:
|
332
183
|
"""
|
333
|
-
|
334
|
-
|
184
|
+
Request to perform a regex search across files in a specified directory,
|
185
|
+
providing context-rich results. This tool searches for patterns or specific
|
186
|
+
content across multiple files, displaying each match with encapsulating context.
|
335
187
|
Args:
|
336
|
-
path: The path of the directory to search in
|
337
|
-
|
338
|
-
regex: The regular expression pattern to search for
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
188
|
+
path: (required) The path of the directory to search in (relative to the CWD).
|
189
|
+
This directory will be recursively searched.
|
190
|
+
regex: (required) The regular expression pattern to search for. Uses Rust regex syntax.
|
191
|
+
(Note: Python's `re` module will be used here, which has similar syntax)
|
192
|
+
file_pattern: (optional) Glob pattern to filter files (e.g., '*.ts').
|
193
|
+
If not provided, searches all files (*).
|
194
|
+
include_hidden: (optional) Whether to include hidden files.
|
195
|
+
Defaults to False (exclude hidden files).
|
345
196
|
Returns:
|
346
|
-
A string containing the search results
|
197
|
+
A JSON string containing the search results or an error message.
|
198
|
+
Example success: '{"summary": "Found 5 matches...", "results": [{"file":"f.py", ...}]}'
|
199
|
+
Example no match: '{"summary": "No matches found...", "results": []}'
|
200
|
+
Example error: '{"error": "Invalid regex: ..."}'
|
347
201
|
"""
|
348
202
|
try:
|
349
|
-
# Compile the regex pattern
|
350
203
|
pattern = re.compile(regex)
|
204
|
+
except re.error as e:
|
205
|
+
raise Exception(f"Invalid regex pattern: {e}")
|
206
|
+
search_results = {"summary": "", "results": []}
|
207
|
+
match_count = 0
|
208
|
+
searched_file_count = 0
|
209
|
+
file_match_count = 0
|
210
|
+
try:
|
211
|
+
abs_path = os.path.abspath(path)
|
212
|
+
for root, dirs, files in os.walk(abs_path):
|
213
|
+
# Skip hidden directories
|
214
|
+
dirs[:] = [d for d in dirs if include_hidden or not _is_hidden(d)]
|
215
|
+
for filename in files:
|
216
|
+
# Skip hidden files
|
217
|
+
if not include_hidden and _is_hidden(filename):
|
218
|
+
continue
|
219
|
+
# Apply file pattern filter if provided
|
220
|
+
if file_pattern and not fnmatch.fnmatch(filename, file_pattern):
|
221
|
+
continue
|
222
|
+
file_path = os.path.join(root, filename)
|
223
|
+
rel_file_path = os.path.relpath(file_path, os.getcwd())
|
224
|
+
searched_file_count += 1
|
225
|
+
try:
|
226
|
+
matches = _get_file_matches(file_path, pattern)
|
227
|
+
if matches:
|
228
|
+
file_match_count += 1
|
229
|
+
match_count += len(matches)
|
230
|
+
search_results["results"].append(
|
231
|
+
{"file": rel_file_path, "matches": matches}
|
232
|
+
)
|
233
|
+
except IOError as e:
|
234
|
+
search_results["results"].append(
|
235
|
+
{"file": rel_file_path, "error": str(e)}
|
236
|
+
)
|
237
|
+
if match_count == 0:
|
238
|
+
search_results["summary"] = (
|
239
|
+
f"No matches found for pattern '{regex}' in path '{path}' "
|
240
|
+
f"(searched {searched_file_count} files)."
|
241
|
+
)
|
242
|
+
else:
|
243
|
+
search_results["summary"] = (
|
244
|
+
f"Found {match_count} matches in {file_match_count} files "
|
245
|
+
f"(searched {searched_file_count} files)."
|
246
|
+
)
|
247
|
+
return json.dumps(search_results, indent=2) # Pretty print for readability
|
248
|
+
except Exception as e:
|
249
|
+
raise Exception(f"Error searching files: {e}")
|
351
250
|
|
352
|
-
# Get the list of files to search
|
353
|
-
files = list_files(path, recursive=True, file_pattern=file_pattern)
|
354
|
-
|
355
|
-
results = []
|
356
|
-
match_count = 0
|
357
|
-
|
358
|
-
for file_path in files:
|
359
|
-
try:
|
360
|
-
with open(file_path, "r", encoding="utf-8", errors="replace") as f:
|
361
|
-
lines = f.readlines()
|
362
|
-
|
363
|
-
file_matches = []
|
364
|
-
|
365
|
-
for i, line in enumerate(lines):
|
366
|
-
if pattern.search(line):
|
367
|
-
# Determine context range
|
368
|
-
start = max(0, i - context_lines)
|
369
|
-
end = min(len(lines), i + context_lines + 1)
|
370
|
-
|
371
|
-
# Add file header if this is the first match in the file
|
372
|
-
if not file_matches:
|
373
|
-
file_matches.append(
|
374
|
-
f"\n{'-' * 80}\n{file_path}\n{'-' * 80}"
|
375
|
-
)
|
376
|
-
|
377
|
-
# Add separator if this isn't the first match and isn't contiguous
|
378
|
-
# with previous
|
379
|
-
if (
|
380
|
-
file_matches
|
381
|
-
and file_matches[-1] != f"Line {start+1}-{end}:"
|
382
|
-
):
|
383
|
-
file_matches.append(f"\nLine {start+1}-{end}:")
|
384
|
-
|
385
|
-
# Add context lines
|
386
|
-
for j in range(start, end):
|
387
|
-
prefix = ">" if j == i else " "
|
388
|
-
file_matches.append(f"{prefix} {j+1}: {lines[j].rstrip()}")
|
389
|
-
|
390
|
-
match_count += 1
|
391
|
-
|
392
|
-
if file_matches:
|
393
|
-
results.extend(file_matches)
|
394
|
-
|
395
|
-
except Exception as e:
|
396
|
-
results.append(f"Error reading {file_path}: {str(e)}")
|
397
|
-
|
398
|
-
if not results:
|
399
|
-
return f"No matches found for pattern '{regex}' in {path}"
|
400
|
-
|
401
|
-
# Count unique files by counting headers
|
402
|
-
file_count = len([r for r in results if r.startswith("-" * 80)])
|
403
|
-
summary = f"Found {match_count} matches in {file_count} files:\n"
|
404
|
-
return summary + "\n".join(results)
|
405
251
|
|
252
|
+
def _get_file_matches(
|
253
|
+
file_path: str, pattern: re.Pattern, context_lines: int = 2
|
254
|
+
) -> List[Dict[str, Any]]:
|
255
|
+
"""Search for regex matches in a file with context."""
|
256
|
+
try:
|
257
|
+
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
258
|
+
lines = f.readlines()
|
259
|
+
matches = []
|
260
|
+
for line_idx, line in enumerate(lines):
|
261
|
+
if pattern.search(line):
|
262
|
+
line_num = line_idx + 1
|
263
|
+
context_start = max(0, line_idx - context_lines)
|
264
|
+
context_end = min(len(lines), line_idx + context_lines + 1)
|
265
|
+
match_data = {
|
266
|
+
"line_number": line_num,
|
267
|
+
"line_content": line.rstrip(),
|
268
|
+
"context_before": [
|
269
|
+
lines[j].rstrip() for j in range(context_start, line_idx)
|
270
|
+
],
|
271
|
+
"context_after": [
|
272
|
+
lines[j].rstrip() for j in range(line_idx + 1, context_end)
|
273
|
+
],
|
274
|
+
}
|
275
|
+
matches.append(match_data)
|
276
|
+
return matches
|
406
277
|
except Exception as e:
|
407
|
-
|
278
|
+
raise IOError(f"Error reading {file_path}: {str(e)}")
|
408
279
|
|
409
280
|
|
410
|
-
def apply_diff(path: str, diff: str
|
281
|
+
def apply_diff(path: str, diff: str) -> str:
|
411
282
|
"""
|
412
|
-
|
413
|
-
|
283
|
+
Request to replace existing code using a search and replace block.
|
284
|
+
This tool allows for precise, surgical replaces to files by specifying exactly
|
285
|
+
what content to search for and what to replace it with.
|
286
|
+
The tool will maintain proper indentation and formatting while making changes.
|
287
|
+
Only a single operation is allowed per tool use.
|
288
|
+
The SEARCH section must exactly match existing content including whitespace
|
289
|
+
and indentation.
|
290
|
+
If you're not confident in the exact content to search for, use the read_file tool
|
291
|
+
first to get the exact content.
|
414
292
|
Args:
|
415
|
-
path: The path of the file to modify (relative to the
|
416
|
-
diff: The search/replace block defining the changes
|
417
|
-
|
418
|
-
|
419
|
-
|
293
|
+
path: (required) The path of the file to modify (relative to the CWD)
|
294
|
+
diff: (required) The search/replace block defining the changes.
|
295
|
+
Format:
|
296
|
+
<<<<<<< SEARCH
|
297
|
+
:start_line:START_LINE_NUMBER
|
298
|
+
:end_line:END_LINE_NUMBER
|
299
|
+
-------
|
300
|
+
[exact content to find including whitespace]
|
301
|
+
=======
|
302
|
+
[new content to replace with]
|
303
|
+
>>>>>>> REPLACE
|
420
304
|
Returns:
|
421
|
-
|
422
|
-
|
423
|
-
The diff format should be:
|
424
|
-
```
|
425
|
-
<<<<<<< SEARCH
|
426
|
-
[exact content to find including whitespace]
|
427
|
-
=======
|
428
|
-
[new content to replace with]
|
429
|
-
>>>>>>> REPLACE
|
430
|
-
```
|
305
|
+
A JSON string indicating success or failure.
|
431
306
|
"""
|
432
307
|
try:
|
433
|
-
|
308
|
+
start_line, end_line, search_content, replace_content = _parse_diff(diff)
|
434
309
|
abs_path = os.path.abspath(path)
|
310
|
+
if not os.path.exists(abs_path):
|
311
|
+
return json.dumps(
|
312
|
+
{"success": False, "path": path, "error": f"File not found at {path}"}
|
313
|
+
)
|
435
314
|
content = _read_file(abs_path)
|
436
315
|
lines = content.splitlines()
|
437
|
-
|
438
|
-
# Validate line numbers
|
439
316
|
if start_line < 1 or end_line > len(lines) or start_line > end_line:
|
440
|
-
|
441
|
-
|
317
|
+
return json.dumps(
|
318
|
+
{
|
319
|
+
"success": False,
|
320
|
+
"path": path,
|
321
|
+
"error": (
|
322
|
+
f"Invalid line range {start_line}-{end_line} "
|
323
|
+
f"for file with {len(lines)} lines."
|
324
|
+
),
|
325
|
+
}
|
442
326
|
)
|
443
|
-
return False
|
444
|
-
|
445
|
-
# Parse the diff
|
446
|
-
search_content, replace_content = _parse_diff(diff)
|
447
|
-
if search_content is None or replace_content is None:
|
448
|
-
print("Invalid diff format")
|
449
|
-
return False
|
450
|
-
|
451
|
-
# Extract the content to be replaced
|
452
327
|
original_content = "\n".join(lines[start_line - 1 : end_line])
|
453
|
-
|
454
|
-
# Verify the search content matches
|
455
328
|
if original_content != search_content:
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
329
|
+
error_message = (
|
330
|
+
f"Search content does not match file content at "
|
331
|
+
f"lines {start_line}-{end_line}.\n"
|
332
|
+
f"Expected ({len(search_content.splitlines())} lines):\n"
|
333
|
+
f"---\n{search_content}\n---\n"
|
334
|
+
f"Actual ({len(lines[start_line-1:end_line])} lines):\n"
|
335
|
+
f"---\n{original_content}\n---"
|
336
|
+
)
|
337
|
+
return json.dumps({"success": False, "path": path, "error": error_message})
|
460
338
|
new_lines = (
|
461
339
|
lines[: start_line - 1] + replace_content.splitlines() + lines[end_line:]
|
462
340
|
)
|
463
341
|
new_content = "\n".join(new_lines)
|
464
|
-
|
465
|
-
|
342
|
+
if content.endswith("\n"):
|
343
|
+
new_content += "\n"
|
466
344
|
_write_file(abs_path, new_content)
|
467
|
-
return True
|
468
|
-
|
345
|
+
return json.dumps({"success": True, "path": path})
|
469
346
|
except Exception as e:
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
""
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
search_start = diff.index(search_marker) + len(search_marker)
|
499
|
-
search_end = diff.index(separator)
|
500
|
-
search_content = diff[search_start:search_end].strip()
|
501
|
-
|
502
|
-
# Extract replace content
|
503
|
-
replace_start = diff.index(separator) + len(separator)
|
504
|
-
replace_end = diff.index(replace_marker)
|
505
|
-
replace_content = diff[replace_start:replace_end].strip()
|
506
|
-
|
507
|
-
return search_content, replace_content
|
508
|
-
|
509
|
-
except Exception:
|
510
|
-
return None, None
|
347
|
+
raise Exception(f"Error applying diff on {path}: {e}")
|
348
|
+
|
349
|
+
|
350
|
+
def _parse_diff(diff: str) -> tuple[int, int, str, str]:
|
351
|
+
"""Parse diff content into components."""
|
352
|
+
search_marker = "<<<<<<< SEARCH"
|
353
|
+
meta_marker = "-------"
|
354
|
+
separator = "======="
|
355
|
+
replace_marker = ">>>>>>> REPLACE"
|
356
|
+
search_start_idx = diff.find(search_marker)
|
357
|
+
meta_start_idx = diff.find(meta_marker)
|
358
|
+
separator_idx = diff.find(separator)
|
359
|
+
replace_end_idx = diff.find(replace_marker)
|
360
|
+
if any(
|
361
|
+
idx == -1
|
362
|
+
for idx in [search_start_idx, meta_start_idx, separator_idx, replace_end_idx]
|
363
|
+
):
|
364
|
+
raise ValueError("Invalid diff format - missing markers")
|
365
|
+
meta_content = diff[search_start_idx + len(search_marker) : meta_start_idx].strip()
|
366
|
+
start_line = int(re.search(r":start_line:(\d+)", meta_content).group(1))
|
367
|
+
end_line = int(re.search(r":end_line:(\d+)", meta_content).group(1))
|
368
|
+
search_content = diff[meta_start_idx + len(meta_marker) : separator_idx].strip(
|
369
|
+
"\r\n"
|
370
|
+
)
|
371
|
+
replace_content = diff[separator_idx + len(separator) : replace_end_idx].strip(
|
372
|
+
"\r\n"
|
373
|
+
)
|
374
|
+
return start_line, end_line, search_content, replace_content
|
zrb/task/base_task.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import asyncio
|
2
2
|
import os
|
3
|
-
from collections.abc import Callable
|
3
|
+
from collections.abc import Callable, Coroutine
|
4
4
|
from typing import Any
|
5
5
|
|
6
6
|
from zrb.attr.type import BoolAttr, fstring
|
@@ -64,7 +64,7 @@ class BaseTask(AnyTask):
|
|
64
64
|
def __repr__(self):
|
65
65
|
return f"<{self.__class__.__name__} name={self._name}>"
|
66
66
|
|
67
|
-
def __rshift__(self, other: AnyTask | list[AnyTask]) -> AnyTask:
|
67
|
+
def __rshift__(self, other: AnyTask | list[AnyTask]) -> AnyTask | list[AnyTask]:
|
68
68
|
try:
|
69
69
|
if isinstance(other, AnyTask):
|
70
70
|
other.append_upstream(self)
|
@@ -123,7 +123,9 @@ class BaseTask(AnyTask):
|
|
123
123
|
return [task_input for task_input in inputs if task_input is not None]
|
124
124
|
|
125
125
|
def __combine_inputs(
|
126
|
-
self,
|
126
|
+
self,
|
127
|
+
inputs: list[AnyInput],
|
128
|
+
other_inputs: list[AnyInput | None] | AnyInput | None,
|
127
129
|
):
|
128
130
|
input_names = [task_input.name for task_input in inputs]
|
129
131
|
if isinstance(other_inputs, AnyInput):
|
@@ -429,7 +431,7 @@ class BaseTask(AnyTask):
|
|
429
431
|
session.get_task_status(self).reset()
|
430
432
|
# defer this action
|
431
433
|
ctx.log_info("Running")
|
432
|
-
action_coro = asyncio.create_task(
|
434
|
+
action_coro: Coroutine = asyncio.create_task(
|
433
435
|
run_async(self.__exec_action_and_retry(session))
|
434
436
|
)
|
435
437
|
session.defer_action(self, action_coro)
|
zrb/task/llm_task.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1
|
-
import copy
|
2
1
|
import functools
|
3
2
|
import inspect
|
4
3
|
import json
|
@@ -9,6 +8,7 @@ from typing import Any
|
|
9
8
|
|
10
9
|
from openai import APIError
|
11
10
|
from pydantic_ai import Agent, Tool
|
11
|
+
from pydantic_ai.mcp import MCPServer
|
12
12
|
from pydantic_ai.messages import (
|
13
13
|
FinalResultEvent,
|
14
14
|
FunctionToolCallEvent,
|
@@ -54,9 +54,9 @@ class LLMTask(BaseTask):
|
|
54
54
|
Callable[[AnySharedContext], Model | str | fstring] | Model | None
|
55
55
|
) = None,
|
56
56
|
render_model: bool = True,
|
57
|
-
model_base_url: StrAttr = None,
|
57
|
+
model_base_url: StrAttr | None = None,
|
58
58
|
render_model_base_url: bool = True,
|
59
|
-
model_api_key: StrAttr = None,
|
59
|
+
model_api_key: StrAttr | None = None,
|
60
60
|
render_model_api_key: bool = True,
|
61
61
|
model_settings: (
|
62
62
|
ModelSettings | Callable[[AnySharedContext], ModelSettings] | None
|
@@ -68,6 +68,9 @@ class LLMTask(BaseTask):
|
|
68
68
|
tools: (
|
69
69
|
list[ToolOrCallable] | Callable[[AnySharedContext], list[ToolOrCallable]]
|
70
70
|
) = [],
|
71
|
+
mcp_servers: (
|
72
|
+
list[MCPServer] | Callable[[AnySharedContext], list[MCPServer]]
|
73
|
+
) = [],
|
71
74
|
conversation_history: (
|
72
75
|
ListOfDict | Callable[[AnySharedContext], ListOfDict]
|
73
76
|
) = [],
|
@@ -127,6 +130,8 @@ class LLMTask(BaseTask):
|
|
127
130
|
self._message = message
|
128
131
|
self._tools = tools
|
129
132
|
self._additional_tools: list[ToolOrCallable] = []
|
133
|
+
self._mcp_servers = mcp_servers
|
134
|
+
self._additional_mcp_servers: list[MCPServer] = []
|
130
135
|
self._conversation_history = conversation_history
|
131
136
|
self._conversation_history_reader = conversation_history_reader
|
132
137
|
self._conversation_history_writer = conversation_history_writer
|
@@ -137,32 +142,36 @@ class LLMTask(BaseTask):
|
|
137
142
|
def add_tool(self, tool: ToolOrCallable):
|
138
143
|
self._additional_tools.append(tool)
|
139
144
|
|
145
|
+
def add_mcp_server(self, mcp_server: MCPServer):
|
146
|
+
self._additional_mcp_servers.append(mcp_server)
|
147
|
+
|
140
148
|
async def _exec_action(self, ctx: AnyContext) -> Any:
|
141
149
|
history = await self._read_conversation_history(ctx)
|
142
150
|
user_prompt = self._get_message(ctx)
|
143
151
|
agent = self._get_agent(ctx)
|
144
152
|
try:
|
145
|
-
async with agent.
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
153
|
+
async with agent.run_mcp_servers():
|
154
|
+
async with agent.iter(
|
155
|
+
user_prompt=user_prompt,
|
156
|
+
message_history=ModelMessagesTypeAdapter.validate_python(history),
|
157
|
+
) as agent_run:
|
158
|
+
async for node in agent_run:
|
159
|
+
# Each node represents a step in the agent's execution
|
160
|
+
# Reference: https://ai.pydantic.dev/agents/#streaming
|
161
|
+
try:
|
162
|
+
await self._print_node(ctx, agent_run, node)
|
163
|
+
except APIError as e:
|
164
|
+
# Extract detailed error information from the response
|
165
|
+
error_details = _extract_api_error_details(e)
|
166
|
+
ctx.log_error(f"API Error: {error_details}")
|
167
|
+
raise
|
168
|
+
except Exception as e:
|
169
|
+
ctx.log_error(f"Error processing node: {str(e)}")
|
170
|
+
ctx.log_error(f"Error type: {type(e).__name__}")
|
171
|
+
raise
|
172
|
+
new_history = json.loads(agent_run.result.all_messages_json())
|
173
|
+
await self._write_conversation_history(ctx, new_history)
|
174
|
+
return agent_run.result.data
|
166
175
|
except Exception as e:
|
167
176
|
ctx.log_error(f"Error in agent execution: {str(e)}")
|
168
177
|
raise
|
@@ -277,10 +286,15 @@ class LLMTask(BaseTask):
|
|
277
286
|
tool if isinstance(tool, Tool) else Tool(_wrap_tool(tool), takes_ctx=False)
|
278
287
|
for tool in tools_or_callables
|
279
288
|
]
|
289
|
+
mcp_servers = list(
|
290
|
+
self._mcp_servers(ctx) if callable(self._mcp_servers) else self._mcp_servers
|
291
|
+
)
|
292
|
+
mcp_servers.extend(self._additional_mcp_servers)
|
280
293
|
return Agent(
|
281
294
|
self._get_model(ctx),
|
282
295
|
system_prompt=self._get_system_prompt(ctx),
|
283
296
|
tools=tools,
|
297
|
+
mcp_servers=mcp_servers,
|
284
298
|
model_settings=self._get_model_settings(ctx),
|
285
299
|
retries=3,
|
286
300
|
)
|
@@ -368,7 +382,7 @@ def _wrap_tool(func):
|
|
368
382
|
except Exception as e:
|
369
383
|
# Optionally, you can include more details from traceback if needed.
|
370
384
|
error_details = traceback.format_exc()
|
371
|
-
return
|
385
|
+
return json.dumps({"error": f"{e}", "details": f"{error_details}"})
|
372
386
|
|
373
387
|
new_sig = inspect.Signature(
|
374
388
|
parameters=[
|
@@ -389,7 +403,7 @@ def _wrap_tool(func):
|
|
389
403
|
except Exception as e:
|
390
404
|
# Optionally, you can include more details from traceback if needed.
|
391
405
|
error_details = traceback.format_exc()
|
392
|
-
return
|
406
|
+
return json.dumps({"error": f"{e}", "details": f"{error_details}"})
|
393
407
|
|
394
408
|
return wrapper
|
395
409
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: zrb
|
3
|
-
Version: 1.5.
|
3
|
+
Version: 1.5.5
|
4
4
|
Summary: Your Automation Powerhouse
|
5
5
|
Home-page: https://github.com/state-alchemists/zrb
|
6
6
|
License: AGPL-3.0-or-later
|
@@ -26,7 +26,7 @@ Requires-Dist: openai (>=1.70.0,<2.0.0) ; extra == "rag" or extra == "all"
|
|
26
26
|
Requires-Dist: pdfplumber (>=0.11.6,<0.12.0) ; extra == "rag" or extra == "all"
|
27
27
|
Requires-Dist: playwright (>=1.51.0,<2.0.0) ; extra == "playwright" or extra == "all"
|
28
28
|
Requires-Dist: psutil (>=7.0.0,<8.0.0)
|
29
|
-
Requires-Dist: pydantic-ai (>=0.0.
|
29
|
+
Requires-Dist: pydantic-ai (>=0.0.52,<0.0.53)
|
30
30
|
Requires-Dist: python-dotenv (>=1.1.0,<2.0.0)
|
31
31
|
Requires-Dist: python-jose[cryptography] (>=3.4.0,<4.0.0)
|
32
32
|
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
@@ -11,7 +11,7 @@ zrb/builtin/llm/llm_chat.py,sha256=QFuuZJm4tonykbY1P5Vdnn2acVqwM8GcsJ0gnaNB2uo,6
|
|
11
11
|
zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
|
12
12
|
zrb/builtin/llm/tool/api.py,sha256=U0_PhVuoDLpq4Jak5S45IHhCF1jKmfS0JC8XAnfnOhA,858
|
13
13
|
zrb/builtin/llm/tool/cli.py,sha256=to_IjkfrMGs6eLfG0cpVN9oyADWYsJQCtyluUhUdBww,253
|
14
|
-
zrb/builtin/llm/tool/file.py,sha256=
|
14
|
+
zrb/builtin/llm/tool/file.py,sha256=9AmTLYUILmQYHlfcb9Z9UisCH3nUVLCgleec2goP4Ao,16737
|
15
15
|
zrb/builtin/llm/tool/rag.py,sha256=pX8N_bYv4axsjhULLvvZtQYW2klZOkeQZ2Tn16083vM,6860
|
16
16
|
zrb/builtin/llm/tool/web.py,sha256=ZvIgOIMPIEfdih5I3TgVTsqTrwiKmDy60zeKHVWrVeo,4922
|
17
17
|
zrb/builtin/md5.py,sha256=0pNlrfZA0wlZlHvFHLgyqN0JZJWGKQIF5oXxO44_OJk,949
|
@@ -296,11 +296,11 @@ zrb/session_state_logger/file_session_state_logger.py,sha256=1ue7-Bcwg4wlLn2G_7A
|
|
296
296
|
zrb/session_state_logger/session_state_logger_factory.py,sha256=wXf2DVmeRmx399MFYYty6uNcPZMcf7iayHBYCLGlhfc,189
|
297
297
|
zrb/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
298
298
|
zrb/task/any_task.py,sha256=9rCdKe-Sayr34Han9AsbhRxFpkbk6Rteg1DOyETulwQ,4917
|
299
|
-
zrb/task/base_task.py,sha256=
|
299
|
+
zrb/task/base_task.py,sha256=H1D2KyJ9qK0GIPJ4kYyvfRe7fseJibmLVGjj8iRuLs4,21320
|
300
300
|
zrb/task/base_trigger.py,sha256=jC722rDvodaBLeNaFghkTyv1u0QXrK6BLZUUqcmBJ7Q,4581
|
301
301
|
zrb/task/cmd_task.py,sha256=pUKRSR4DZKjbmluB6vi7cxqyhxOLfJ2czSpYeQbiDvo,10705
|
302
302
|
zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
|
303
|
-
zrb/task/llm_task.py,sha256=
|
303
|
+
zrb/task/llm_task.py,sha256=kwJG6hhCga3sbXM8iiaTkFc1V4jlj4X5-3VeNjL5omE,20701
|
304
304
|
zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
|
305
305
|
zrb/task/rsync_task.py,sha256=GSL9144bmp6F0EckT6m-2a1xG25AzrrWYzH4k3SVUKM,6370
|
306
306
|
zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
|
@@ -341,7 +341,7 @@ zrb/util/string/name.py,sha256=8picJfUBXNpdh64GNaHv3om23QHhUZux7DguFLrXHp8,1163
|
|
341
341
|
zrb/util/todo.py,sha256=1nDdwPc22oFoK_1ZTXyf3638Bg6sqE2yp_U4_-frHoc,16015
|
342
342
|
zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
343
343
|
zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
|
344
|
-
zrb-1.5.
|
345
|
-
zrb-1.5.
|
346
|
-
zrb-1.5.
|
347
|
-
zrb-1.5.
|
344
|
+
zrb-1.5.5.dist-info/METADATA,sha256=tuRLFvxurb0dl4sd2n3ztedkjrGsWn4xSS3zOuCjSl8,8470
|
345
|
+
zrb-1.5.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
346
|
+
zrb-1.5.5.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
|
347
|
+
zrb-1.5.5.dist-info/RECORD,,
|
File without changes
|
File without changes
|