zrb 1.4.3__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/llm_chat.py +8 -6
- zrb/builtin/llm/tool/file.py +471 -115
- zrb/builtin/llm/tool/web.py +2 -10
- zrb/builtin/todo.py +37 -12
- zrb/llm_config.py +31 -11
- {zrb-1.4.3.dist-info → zrb-1.5.0.dist-info}/METADATA +4 -5
- {zrb-1.4.3.dist-info → zrb-1.5.0.dist-info}/RECORD +9 -9
- {zrb-1.4.3.dist-info → zrb-1.5.0.dist-info}/WHEEL +0 -0
- {zrb-1.4.3.dist-info → zrb-1.5.0.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/llm_chat.py
CHANGED
@@ -6,10 +6,11 @@ from zrb.builtin.group import llm_group
|
|
6
6
|
from zrb.builtin.llm.tool.api import get_current_location, get_current_weather
|
7
7
|
from zrb.builtin.llm.tool.cli import run_shell_command
|
8
8
|
from zrb.builtin.llm.tool.file import (
|
9
|
+
apply_diff,
|
9
10
|
list_files,
|
10
|
-
|
11
|
-
|
12
|
-
|
11
|
+
read_from_file,
|
12
|
+
search_files,
|
13
|
+
write_to_file,
|
13
14
|
)
|
14
15
|
from zrb.builtin.llm.tool.web import (
|
15
16
|
create_search_internet_tool,
|
@@ -161,10 +162,11 @@ llm_chat: LLMTask = llm_group.add_task(
|
|
161
162
|
|
162
163
|
|
163
164
|
if LLM_ALLOW_ACCESS_LOCAL_FILE:
|
164
|
-
llm_chat.add_tool(read_all_files)
|
165
165
|
llm_chat.add_tool(list_files)
|
166
|
-
llm_chat.add_tool(
|
167
|
-
llm_chat.add_tool(
|
166
|
+
llm_chat.add_tool(read_from_file)
|
167
|
+
llm_chat.add_tool(write_to_file)
|
168
|
+
llm_chat.add_tool(search_files)
|
169
|
+
llm_chat.add_tool(apply_diff)
|
168
170
|
|
169
171
|
if LLM_ALLOW_ACCESS_SHELL:
|
170
172
|
llm_chat.add_tool(run_shell_command)
|
zrb/builtin/llm/tool/file.py
CHANGED
@@ -1,120 +1,125 @@
|
|
1
1
|
import fnmatch
|
2
2
|
import os
|
3
|
+
import re
|
4
|
+
from typing import Dict, List, Optional, Tuple, Union
|
3
5
|
|
4
|
-
from zrb.util.file import read_file
|
5
|
-
|
6
|
-
_INCLUDED_PATTERNS: list[str] = [
|
7
|
-
"*.py", # Python
|
8
|
-
"*.go", # Go
|
9
|
-
"*.rs", # Rust
|
10
|
-
"*.js", # JavaScript
|
11
|
-
"*.ts", # TypeScript
|
12
|
-
"*.java", # Java
|
13
|
-
"*.c", # C
|
14
|
-
"*.cpp", # C++
|
15
|
-
"*.cc", # Alternative C++ extension
|
16
|
-
"*.cxx", # Alternative C++ extension
|
17
|
-
"*.rb", # Ruby
|
18
|
-
"*.swift", # Swift
|
19
|
-
"*.kt", # Kotlin
|
20
|
-
"*.php", # PHP
|
21
|
-
"*.pl", # Perl / Prolog
|
22
|
-
"*.pm", # Perl module
|
23
|
-
"*.sh", # Shell
|
24
|
-
"*.bat", # Batch
|
25
|
-
"*.ps1", # PowerShell
|
26
|
-
"*.R", # R (capital)
|
27
|
-
"*.r", # R (lowercase)
|
28
|
-
"*.scala", # Scala
|
29
|
-
"*.hs", # Haskell
|
30
|
-
"*.cs", # C#
|
31
|
-
"*.fs", # F#
|
32
|
-
"*.ex", # Elixir
|
33
|
-
"*.exs", # Elixir script
|
34
|
-
"*.erl", # Erlang
|
35
|
-
"*.hrl", # Erlang header
|
36
|
-
"*.dart", # Dart
|
37
|
-
"*.m", # Objective-C / Matlab (note: conflicts may arise)
|
38
|
-
"*.mm", # Objective-C++
|
39
|
-
"*.lua", # Lua
|
40
|
-
"*.jl", # Julia
|
41
|
-
"*.groovy", # Groovy
|
42
|
-
"*.clj", # Clojure
|
43
|
-
"*.cljs", # ClojureScript
|
44
|
-
"*.cljc", # Clojure common
|
45
|
-
"*.vb", # Visual Basic
|
46
|
-
"*.f90", # Fortran
|
47
|
-
"*.f95", # Fortran
|
48
|
-
"*.adb", # Ada
|
49
|
-
"*.ads", # Ada specification
|
50
|
-
"*.pas", # Pascal
|
51
|
-
"*.pp", # Pascal
|
52
|
-
"*.ml", # OCaml
|
53
|
-
"*.mli", # OCaml interface
|
54
|
-
"*.nim", # Nim
|
55
|
-
"*.rkt", # Racket
|
56
|
-
"*.d", # D
|
57
|
-
"*.lisp", # Common Lisp
|
58
|
-
"*.lsp", # Lisp variant
|
59
|
-
"*.cl", # Common Lisp
|
60
|
-
"*.scm", # Scheme
|
61
|
-
"*.st", # Smalltalk
|
62
|
-
"*.vhd", # VHDL
|
63
|
-
"*.vhdl", # VHDL
|
64
|
-
"*.v", # Verilog
|
65
|
-
"*.asm", # Assembly
|
66
|
-
"*.s", # Assembly (alternative)
|
67
|
-
"*.sql", # SQL (if desired)
|
68
|
-
]
|
6
|
+
from zrb.util.file import read_file as _read_file
|
7
|
+
from zrb.util.file import write_file as _write_file
|
69
8
|
|
70
|
-
#
|
71
|
-
|
72
|
-
|
9
|
+
# Common directories and files to exclude from file operations
|
10
|
+
_DEFAULT_EXCLUDES = [
|
11
|
+
# Version control
|
12
|
+
".git",
|
13
|
+
".svn",
|
14
|
+
".hg",
|
15
|
+
# Dependencies and packages
|
16
|
+
"node_modules",
|
17
|
+
"venv",
|
73
18
|
".venv",
|
74
|
-
"
|
75
|
-
".
|
76
|
-
|
77
|
-
"
|
78
|
-
"
|
79
|
-
"
|
80
|
-
"
|
81
|
-
"
|
82
|
-
|
83
|
-
".
|
84
|
-
".
|
19
|
+
"env",
|
20
|
+
".env",
|
21
|
+
# Build and cache
|
22
|
+
"__pycache__",
|
23
|
+
"*.pyc",
|
24
|
+
"build",
|
25
|
+
"dist",
|
26
|
+
"target",
|
27
|
+
# IDE and editor files
|
28
|
+
".idea",
|
29
|
+
".vscode",
|
30
|
+
"*.swp",
|
31
|
+
"*.swo",
|
32
|
+
# OS-specific
|
33
|
+
".DS_Store",
|
34
|
+
"Thumbs.db",
|
35
|
+
# Temporary and backup files
|
36
|
+
"*.tmp",
|
37
|
+
"*.bak",
|
38
|
+
"*.log",
|
85
39
|
]
|
86
40
|
|
41
|
+
# Maximum number of lines to read before truncating
|
42
|
+
_MAX_LINES_BEFORE_TRUNCATION = 1000
|
43
|
+
|
44
|
+
# Number of context lines to show around method definitions when truncating
|
45
|
+
_CONTEXT_LINES = 5
|
46
|
+
|
87
47
|
|
88
48
|
def list_files(
|
89
|
-
|
90
|
-
|
91
|
-
|
49
|
+
path: str = ".",
|
50
|
+
recursive: bool = True,
|
51
|
+
file_pattern: Optional[str] = None,
|
52
|
+
excluded_patterns: list[str] = _DEFAULT_EXCLUDES,
|
92
53
|
) -> list[str]:
|
93
|
-
"""
|
94
|
-
|
95
|
-
|
96
|
-
|
54
|
+
"""
|
55
|
+
List files in a directory that match specified patterns.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
path: The path of the directory to list contents for
|
59
|
+
(relative to the current working directory)
|
60
|
+
recursive: Whether to list files recursively.
|
61
|
+
Use True for recursive listing, False for top-level only.
|
62
|
+
file_pattern: Optional glob pattern to filter files.
|
63
|
+
None by default (all files will be included).
|
64
|
+
excluded_patterns: List of glob patterns to exclude. By default, contains sane values
|
65
|
+
to exclude common directories and files like version control, build artifacts,
|
66
|
+
and temporary files.
|
67
|
+
|
68
|
+
Returns:
|
69
|
+
A list of file paths matching the criteria
|
97
70
|
"""
|
98
71
|
all_files: list[str] = []
|
99
|
-
|
100
|
-
|
101
|
-
|
72
|
+
|
73
|
+
if recursive:
|
74
|
+
for root, dirs, files in os.walk(path):
|
75
|
+
# Filter out excluded directories to avoid descending into them
|
76
|
+
dirs[:] = [
|
77
|
+
d
|
78
|
+
for d in dirs
|
79
|
+
if not _should_exclude(os.path.join(root, d), excluded_patterns)
|
80
|
+
]
|
81
|
+
|
82
|
+
for filename in files:
|
102
83
|
full_path = os.path.join(root, filename)
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
84
|
+
# If file_pattern is None, include all files, otherwise match the pattern
|
85
|
+
if file_pattern is None or fnmatch.fnmatch(filename, file_pattern):
|
86
|
+
if not _should_exclude(full_path, excluded_patterns):
|
87
|
+
all_files.append(full_path)
|
88
|
+
else:
|
89
|
+
# Non-recursive listing (top-level only)
|
90
|
+
try:
|
91
|
+
for item in os.listdir(path):
|
92
|
+
full_path = os.path.join(path, item)
|
93
|
+
if os.path.isfile(full_path):
|
94
|
+
# If file_pattern is None, include all files, otherwise match the pattern
|
95
|
+
if file_pattern is None or fnmatch.fnmatch(item, file_pattern):
|
96
|
+
if not _should_exclude(full_path, excluded_patterns):
|
97
|
+
all_files.append(full_path)
|
98
|
+
except (FileNotFoundError, PermissionError) as e:
|
99
|
+
print(f"Error listing files in {path}: {e}")
|
100
|
+
|
101
|
+
return sorted(all_files)
|
107
102
|
|
108
103
|
|
109
|
-
def _should_exclude(
|
104
|
+
def _should_exclude(
|
105
|
+
full_path: str, excluded_patterns: list[str] = _DEFAULT_EXCLUDES
|
106
|
+
) -> bool:
|
110
107
|
"""
|
111
108
|
Return True if the file at full_path should be excluded based on
|
112
109
|
the list of excluded_patterns. Patterns that include a path separator
|
113
110
|
are applied to the full normalized path; otherwise they are matched
|
114
111
|
against each individual component of the path.
|
112
|
+
|
113
|
+
Args:
|
114
|
+
full_path: The full path to check
|
115
|
+
excluded_patterns: List of patterns to exclude
|
116
|
+
|
117
|
+
Returns:
|
118
|
+
True if the path should be excluded, False otherwise
|
115
119
|
"""
|
116
120
|
norm_path = os.path.normpath(full_path)
|
117
121
|
path_parts = norm_path.split(os.sep)
|
122
|
+
|
118
123
|
for pat in excluded_patterns:
|
119
124
|
# If the pattern seems intended for full path matching (contains a separator)
|
120
125
|
if os.sep in pat or "/" in pat:
|
@@ -124,31 +129,382 @@ def _should_exclude(full_path: str, excluded_patterns: list[str]) -> bool:
|
|
124
129
|
# Otherwise check each part of the path
|
125
130
|
if any(fnmatch.fnmatch(part, pat) for part in path_parts):
|
126
131
|
return True
|
132
|
+
# Also check the filename against the pattern
|
133
|
+
if os.path.isfile(full_path) and fnmatch.fnmatch(
|
134
|
+
os.path.basename(full_path), pat
|
135
|
+
):
|
136
|
+
return True
|
137
|
+
|
127
138
|
return False
|
128
139
|
|
129
140
|
|
130
|
-
def
|
131
|
-
|
132
|
-
|
141
|
+
def read_from_file(
|
142
|
+
path: str,
|
143
|
+
start_line: Optional[int] = None,
|
144
|
+
end_line: Optional[int] = None,
|
145
|
+
auto_truncate: bool = False,
|
146
|
+
) -> str:
|
147
|
+
"""
|
148
|
+
Read the contents of a file at the specified path.
|
149
|
+
|
150
|
+
Args:
|
151
|
+
path: The path of the file to read (relative to the current working directory)
|
152
|
+
start_line: The starting line number to read from (1-based).
|
153
|
+
If not provided, starts from the beginning.
|
154
|
+
end_line: The ending line number to read to (1-based, inclusive).
|
155
|
+
If not provided, reads to the end.
|
156
|
+
auto_truncate: Whether to automatically truncate large files when start_line
|
157
|
+
and end_line are not specified. If true and the file exceeds a certain
|
158
|
+
line threshold, it will return a subset of lines with information about
|
159
|
+
the total line count and method definitions. Default is False for backward
|
160
|
+
compatibility, but setting to True is recommended for large files.
|
133
161
|
|
162
|
+
Returns:
|
163
|
+
A string containing the file content, with line numbers prefixed to each line.
|
164
|
+
For truncated files, includes summary information.
|
165
|
+
"""
|
166
|
+
try:
|
167
|
+
abs_path = os.path.abspath(path)
|
134
168
|
|
135
|
-
|
136
|
-
|
137
|
-
|
169
|
+
# Read the entire file content
|
170
|
+
content = _read_file(abs_path)
|
171
|
+
lines = content.splitlines()
|
172
|
+
total_lines = len(lines)
|
138
173
|
|
174
|
+
# Determine if we should truncate
|
175
|
+
should_truncate = (
|
176
|
+
auto_truncate
|
177
|
+
and start_line is None
|
178
|
+
and end_line is None
|
179
|
+
and total_lines > _MAX_LINES_BEFORE_TRUNCATION
|
180
|
+
)
|
139
181
|
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
182
|
+
# Adjust line indices (convert from 1-based to 0-based)
|
183
|
+
start_idx = (start_line - 1) if start_line is not None else 0
|
184
|
+
end_idx = end_line if end_line is not None else total_lines
|
185
|
+
|
186
|
+
# Validate indices
|
187
|
+
if start_idx < 0:
|
188
|
+
start_idx = 0
|
189
|
+
if end_idx > total_lines:
|
190
|
+
end_idx = total_lines
|
191
|
+
|
192
|
+
if should_truncate:
|
193
|
+
# Find method definitions and their line ranges
|
194
|
+
method_info = _find_method_definitions(lines)
|
195
|
+
|
196
|
+
# Create a truncated view with method definitions
|
197
|
+
result_lines = []
|
198
|
+
|
199
|
+
# Add file info header
|
200
|
+
result_lines.append(f"File: {path} (truncated, {total_lines} lines total)")
|
201
|
+
result_lines.append("")
|
202
|
+
|
203
|
+
# Add beginning of file (first 100 lines)
|
204
|
+
first_chunk = min(100, total_lines // 3)
|
205
|
+
for i in range(first_chunk):
|
206
|
+
result_lines.append(f"{i+1} | {lines[i]}")
|
207
|
+
|
208
|
+
result_lines.append("...")
|
209
|
+
omitted_msg = (
|
210
|
+
f"[{first_chunk+1} - {total_lines-100}] Lines omitted for brevity"
|
211
|
+
)
|
212
|
+
result_lines.append(omitted_msg)
|
213
|
+
result_lines.append("...")
|
214
|
+
|
215
|
+
# Add end of file (last 100 lines)
|
216
|
+
for i in range(max(first_chunk, total_lines - 100), total_lines):
|
217
|
+
result_lines.append(f"{i+1} | {lines[i]}")
|
218
|
+
|
219
|
+
# Add method definitions summary
|
220
|
+
if method_info:
|
221
|
+
result_lines.append("")
|
222
|
+
result_lines.append("Method definitions found:")
|
223
|
+
for method in method_info:
|
224
|
+
method_line = (
|
225
|
+
f"- {method['name']} "
|
226
|
+
f"(lines {method['start_line']}-{method['end_line']})"
|
227
|
+
)
|
228
|
+
result_lines.append(method_line)
|
229
|
+
|
230
|
+
return "\n".join(result_lines)
|
231
|
+
else:
|
232
|
+
# Return the requested range with line numbers
|
233
|
+
result_lines = []
|
234
|
+
for i in range(start_idx, end_idx):
|
235
|
+
result_lines.append(f"{i+1} | {lines[i]}")
|
236
|
+
|
237
|
+
return "\n".join(result_lines)
|
238
|
+
|
239
|
+
except Exception as e:
|
240
|
+
return f"Error reading file {path}: {str(e)}"
|
241
|
+
|
242
|
+
|
243
|
+
def _find_method_definitions(lines: List[str]) -> List[Dict[str, Union[str, int]]]:
|
244
|
+
"""
|
245
|
+
Find method definitions in the given lines of code.
|
246
|
+
|
247
|
+
Args:
|
248
|
+
lines: List of code lines to analyze
|
249
|
+
|
250
|
+
Returns:
|
251
|
+
List of dictionaries containing method name, start line, and end line
|
252
|
+
"""
|
253
|
+
method_info = []
|
254
|
+
|
255
|
+
# Simple regex patterns for common method/function definitions
|
256
|
+
patterns = [
|
257
|
+
# Python
|
258
|
+
r"^\s*def\s+([a-zA-Z0-9_]+)\s*\(",
|
259
|
+
# JavaScript/TypeScript
|
260
|
+
r"^\s*(function\s+([a-zA-Z0-9_]+)|([a-zA-Z0-9_]+)\s*=\s*function|"
|
261
|
+
r"\s*([a-zA-Z0-9_]+)\s*\([^)]*\)\s*{)",
|
262
|
+
# Java/C#/C++
|
263
|
+
r"^\s*(?:public|private|protected|static|final|abstract|synchronized)?"
|
264
|
+
r"\s+(?:[a-zA-Z0-9_<>[\]]+\s+)+([a-zA-Z0-9_]+)\s*\(",
|
265
|
+
]
|
266
|
+
|
267
|
+
current_method = None
|
268
|
+
|
269
|
+
for i, line in enumerate(lines):
|
270
|
+
# Check if this line starts a method definition
|
271
|
+
for pattern in patterns:
|
272
|
+
match = re.search(pattern, line)
|
273
|
+
if match:
|
274
|
+
# If we were tracking a method, close it
|
275
|
+
if current_method:
|
276
|
+
current_method["end_line"] = i
|
277
|
+
method_info.append(current_method)
|
278
|
+
|
279
|
+
# Start tracking a new method
|
280
|
+
method_name = next(
|
281
|
+
group for group in match.groups() if group is not None
|
282
|
+
)
|
283
|
+
current_method = {
|
284
|
+
"name": method_name,
|
285
|
+
"start_line": i + 1, # 1-based line numbering
|
286
|
+
"end_line": None,
|
287
|
+
}
|
288
|
+
break
|
289
|
+
|
290
|
+
# Check for method end (simplistic approach)
|
291
|
+
if current_method and line.strip() == "}":
|
292
|
+
current_method["end_line"] = i + 1
|
293
|
+
method_info.append(current_method)
|
294
|
+
current_method = None
|
295
|
+
|
296
|
+
# Close any open method at the end of the file
|
297
|
+
if current_method:
|
298
|
+
current_method["end_line"] = len(lines)
|
299
|
+
method_info.append(current_method)
|
300
|
+
|
301
|
+
return method_info
|
302
|
+
|
303
|
+
|
304
|
+
def write_to_file(path: str, content: str) -> bool:
|
305
|
+
"""
|
306
|
+
Write content to a file at the specified path.
|
307
|
+
|
308
|
+
Args:
|
309
|
+
path: The path of the file to write to (relative to the current working directory)
|
310
|
+
content: The content to write to the file
|
311
|
+
|
312
|
+
Returns:
|
313
|
+
True if successful, False otherwise
|
314
|
+
"""
|
315
|
+
try:
|
316
|
+
# Ensure directory exists
|
317
|
+
directory = os.path.dirname(os.path.abspath(path))
|
318
|
+
if directory and not os.path.exists(directory):
|
319
|
+
os.makedirs(directory, exist_ok=True)
|
320
|
+
|
321
|
+
# Write the content
|
322
|
+
_write_file(os.path.abspath(path), content)
|
323
|
+
return True
|
324
|
+
except Exception as e:
|
325
|
+
print(f"Error writing to file {path}: {str(e)}")
|
326
|
+
return False
|
327
|
+
|
328
|
+
|
329
|
+
def search_files(
|
330
|
+
path: str, regex: str, file_pattern: Optional[str] = None, context_lines: int = 2
|
331
|
+
) -> str:
|
332
|
+
"""
|
333
|
+
Search for a regex pattern across files in a specified directory.
|
334
|
+
|
335
|
+
Args:
|
336
|
+
path: The path of the directory to search in
|
337
|
+
(relative to the current working directory)
|
338
|
+
regex: The regular expression pattern to search for
|
339
|
+
file_pattern: Optional glob pattern to filter files.
|
340
|
+
Default is None, which includes all files. Only specify this if you need to
|
341
|
+
filter to specific file types (but in most cases, leaving as None is better).
|
342
|
+
context_lines: Number of context lines to show before and after each match.
|
343
|
+
Default is 2, which provides good context without overwhelming output.
|
344
|
+
|
345
|
+
Returns:
|
346
|
+
A string containing the search results with context
|
347
|
+
"""
|
348
|
+
try:
|
349
|
+
# Compile the regex pattern
|
350
|
+
pattern = re.compile(regex)
|
351
|
+
|
352
|
+
# Get the list of files to search
|
353
|
+
files = list_files(path, recursive=True, file_pattern=file_pattern)
|
354
|
+
|
355
|
+
results = []
|
356
|
+
match_count = 0
|
357
|
+
|
358
|
+
for file_path in files:
|
359
|
+
try:
|
360
|
+
with open(file_path, "r", encoding="utf-8", errors="replace") as f:
|
361
|
+
lines = f.readlines()
|
362
|
+
|
363
|
+
file_matches = []
|
364
|
+
|
365
|
+
for i, line in enumerate(lines):
|
366
|
+
if pattern.search(line):
|
367
|
+
# Determine context range
|
368
|
+
start = max(0, i - context_lines)
|
369
|
+
end = min(len(lines), i + context_lines + 1)
|
370
|
+
|
371
|
+
# Add file header if this is the first match in the file
|
372
|
+
if not file_matches:
|
373
|
+
file_matches.append(
|
374
|
+
f"\n{'-' * 80}\n{file_path}\n{'-' * 80}"
|
375
|
+
)
|
376
|
+
|
377
|
+
# Add separator if this isn't the first match and isn't contiguous
|
378
|
+
# with previous
|
379
|
+
if (
|
380
|
+
file_matches
|
381
|
+
and file_matches[-1] != f"Line {start+1}-{end}:"
|
382
|
+
):
|
383
|
+
file_matches.append(f"\nLine {start+1}-{end}:")
|
384
|
+
|
385
|
+
# Add context lines
|
386
|
+
for j in range(start, end):
|
387
|
+
prefix = ">" if j == i else " "
|
388
|
+
file_matches.append(f"{prefix} {j+1}: {lines[j].rstrip()}")
|
389
|
+
|
390
|
+
match_count += 1
|
391
|
+
|
392
|
+
if file_matches:
|
393
|
+
results.extend(file_matches)
|
394
|
+
|
395
|
+
except Exception as e:
|
396
|
+
results.append(f"Error reading {file_path}: {str(e)}")
|
397
|
+
|
398
|
+
if not results:
|
399
|
+
return f"No matches found for pattern '{regex}' in {path}"
|
400
|
+
|
401
|
+
# Count unique files by counting headers
|
402
|
+
file_count = len([r for r in results if r.startswith("-" * 80)])
|
403
|
+
summary = f"Found {match_count} matches in {file_count} files:\n"
|
404
|
+
return summary + "\n".join(results)
|
405
|
+
|
406
|
+
except Exception as e:
|
407
|
+
return f"Error searching files: {str(e)}"
|
408
|
+
|
409
|
+
|
410
|
+
def apply_diff(path: str, diff: str, start_line: int, end_line: int) -> bool:
|
411
|
+
"""
|
412
|
+
Replace existing code using a search and replace block.
|
413
|
+
|
414
|
+
Args:
|
415
|
+
path: The path of the file to modify (relative to the current working directory)
|
416
|
+
diff: The search/replace block defining the changes
|
417
|
+
start_line: The line number where the search block starts (1-based)
|
418
|
+
end_line: The line number where the search block ends (1-based)
|
419
|
+
|
420
|
+
Returns:
|
421
|
+
True if successful, False otherwise
|
422
|
+
|
423
|
+
The diff format should be:
|
424
|
+
```
|
425
|
+
<<<<<<< SEARCH
|
426
|
+
[exact content to find including whitespace]
|
427
|
+
=======
|
428
|
+
[new content to replace with]
|
429
|
+
>>>>>>> REPLACE
|
430
|
+
```
|
431
|
+
"""
|
432
|
+
try:
|
433
|
+
# Read the file
|
434
|
+
abs_path = os.path.abspath(path)
|
435
|
+
content = _read_file(abs_path)
|
436
|
+
lines = content.splitlines()
|
437
|
+
|
438
|
+
# Validate line numbers
|
439
|
+
if start_line < 1 or end_line > len(lines) or start_line > end_line:
|
440
|
+
print(
|
441
|
+
f"Invalid line range: {start_line}-{end_line} (file has {len(lines)} lines)"
|
442
|
+
)
|
443
|
+
return False
|
444
|
+
|
445
|
+
# Parse the diff
|
446
|
+
search_content, replace_content = _parse_diff(diff)
|
447
|
+
if search_content is None or replace_content is None:
|
448
|
+
print("Invalid diff format")
|
449
|
+
return False
|
450
|
+
|
451
|
+
# Extract the content to be replaced
|
452
|
+
original_content = "\n".join(lines[start_line - 1 : end_line])
|
453
|
+
|
454
|
+
# Verify the search content matches
|
455
|
+
if original_content != search_content:
|
456
|
+
print("Search content does not match the specified lines in the file")
|
457
|
+
return False
|
458
|
+
|
459
|
+
# Replace the content
|
460
|
+
new_lines = (
|
461
|
+
lines[: start_line - 1] + replace_content.splitlines() + lines[end_line:]
|
462
|
+
)
|
463
|
+
new_content = "\n".join(new_lines)
|
464
|
+
|
465
|
+
# Write the modified content back to the file
|
466
|
+
_write_file(abs_path, new_content)
|
467
|
+
return True
|
468
|
+
|
469
|
+
except Exception as e:
|
470
|
+
print(f"Error applying diff to {path}: {str(e)}")
|
471
|
+
return False
|
472
|
+
|
473
|
+
|
474
|
+
def _parse_diff(diff: str) -> Tuple[Optional[str], Optional[str]]:
|
475
|
+
"""
|
476
|
+
Parse a diff string to extract search and replace content.
|
477
|
+
|
478
|
+
Args:
|
479
|
+
diff: The diff string to parse
|
480
|
+
|
481
|
+
Returns:
|
482
|
+
A tuple of (search_content, replace_content), or (None, None) if parsing fails
|
483
|
+
"""
|
484
|
+
try:
|
485
|
+
# Split the diff into sections
|
486
|
+
search_marker = "<<<<<<< SEARCH"
|
487
|
+
separator = "======="
|
488
|
+
replace_marker = ">>>>>>> REPLACE"
|
489
|
+
|
490
|
+
if (
|
491
|
+
search_marker not in diff
|
492
|
+
or separator not in diff
|
493
|
+
or replace_marker not in diff
|
494
|
+
):
|
495
|
+
return None, None
|
496
|
+
|
497
|
+
# Extract search content
|
498
|
+
search_start = diff.index(search_marker) + len(search_marker)
|
499
|
+
search_end = diff.index(separator)
|
500
|
+
search_content = diff[search_start:search_end].strip()
|
501
|
+
|
502
|
+
# Extract replace content
|
503
|
+
replace_start = diff.index(separator) + len(separator)
|
504
|
+
replace_end = diff.index(replace_marker)
|
505
|
+
replace_content = diff[replace_start:replace_end].strip()
|
506
|
+
|
507
|
+
return search_content, replace_content
|
508
|
+
|
509
|
+
except Exception:
|
510
|
+
return None, None
|
zrb/builtin/llm/tool/web.py
CHANGED
@@ -7,16 +7,13 @@ async def open_web_page(url: str) -> str:
|
|
7
7
|
"""Get content from a web page using a headless browser."""
|
8
8
|
|
9
9
|
async def get_page_content(page_url: str):
|
10
|
+
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
|
10
11
|
try:
|
11
12
|
from playwright.async_api import async_playwright
|
12
13
|
|
13
14
|
async with async_playwright() as p:
|
14
15
|
browser = await p.chromium.launch(headless=True)
|
15
16
|
page = await browser.new_page()
|
16
|
-
# Set user agent to mimic a regular browser
|
17
|
-
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
18
|
-
user_agent += "AppleWebKit/537.36 (KHTML, like Gecko) "
|
19
|
-
user_agent += "Chrome/91.0.4472.124 Safari/537.36"
|
20
17
|
await page.set_extra_http_headers({"User-Agent": user_agent})
|
21
18
|
try:
|
22
19
|
# Navigate to the URL with a timeout of 30 seconds
|
@@ -44,12 +41,7 @@ async def open_web_page(url: str) -> str:
|
|
44
41
|
except ImportError:
|
45
42
|
import requests
|
46
43
|
|
47
|
-
response = requests.get(
|
48
|
-
url,
|
49
|
-
headers={
|
50
|
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
|
51
|
-
},
|
52
|
-
)
|
44
|
+
response = requests.get(url, headers={"User-Agent": user_agent})
|
53
45
|
if response.status_code != 200:
|
54
46
|
msg = f"Unable to retrieve search results. Status code: {response.status_code}"
|
55
47
|
raise Exception(msg)
|
zrb/builtin/todo.py
CHANGED
@@ -25,6 +25,18 @@ from zrb.util.todo import (
|
|
25
25
|
)
|
26
26
|
|
27
27
|
|
28
|
+
def _get_filter_input(allow_positional_parsing: bool = False) -> StrInput:
|
29
|
+
return StrInput(
|
30
|
+
name="filter",
|
31
|
+
description="Visual filter",
|
32
|
+
prompt="Visual Filter",
|
33
|
+
allow_empty=True,
|
34
|
+
allow_positional_parsing=allow_positional_parsing,
|
35
|
+
always_prompt=False,
|
36
|
+
default=TODO_VISUAL_FILTER,
|
37
|
+
)
|
38
|
+
|
39
|
+
|
28
40
|
@make_task(
|
29
41
|
name="add-todo",
|
30
42
|
input=[
|
@@ -51,6 +63,7 @@ from zrb.util.todo import (
|
|
51
63
|
prompt="Task context (space separated)",
|
52
64
|
allow_empty=True,
|
53
65
|
),
|
66
|
+
_get_filter_input(),
|
54
67
|
],
|
55
68
|
description="➕ Add todo",
|
56
69
|
group=todo_group,
|
@@ -82,16 +95,22 @@ def add_todo(ctx: AnyContext):
|
|
82
95
|
)
|
83
96
|
)
|
84
97
|
save_todo_list(todo_file_path, todo_list)
|
85
|
-
return get_visual_todo_list(todo_list,
|
98
|
+
return get_visual_todo_list(todo_list, filter=ctx.input.filter)
|
86
99
|
|
87
100
|
|
88
|
-
@make_task(
|
101
|
+
@make_task(
|
102
|
+
name="list-todo",
|
103
|
+
input=_get_filter_input(allow_positional_parsing=True),
|
104
|
+
description="📋 List todo",
|
105
|
+
group=todo_group,
|
106
|
+
alias="list",
|
107
|
+
)
|
89
108
|
def list_todo(ctx: AnyContext):
|
90
109
|
todo_file_path = os.path.join(TODO_DIR, "todo.txt")
|
91
110
|
todo_list: list[TodoTaskModel] = []
|
92
111
|
if os.path.isfile(todo_file_path):
|
93
112
|
todo_list = load_todo_list(todo_file_path)
|
94
|
-
return get_visual_todo_list(todo_list,
|
113
|
+
return get_visual_todo_list(todo_list, filter=ctx.input.filter)
|
95
114
|
|
96
115
|
|
97
116
|
@make_task(
|
@@ -127,7 +146,10 @@ def show_todo(ctx: AnyContext):
|
|
127
146
|
|
128
147
|
@make_task(
|
129
148
|
name="complete-todo",
|
130
|
-
input=
|
149
|
+
input=[
|
150
|
+
StrInput(name="keyword", prompt="Task keyword", description="Task Keyword"),
|
151
|
+
_get_filter_input(),
|
152
|
+
],
|
131
153
|
description="✅ Complete todo",
|
132
154
|
group=todo_group,
|
133
155
|
alias="complete",
|
@@ -141,10 +163,10 @@ def complete_todo(ctx: AnyContext):
|
|
141
163
|
todo_task = select_todo_task(todo_list, ctx.input.keyword)
|
142
164
|
if todo_task is None:
|
143
165
|
ctx.log_error("Task not found")
|
144
|
-
return get_visual_todo_list(todo_list,
|
166
|
+
return get_visual_todo_list(todo_list, filter=ctx.input.filter)
|
145
167
|
if todo_task.completed:
|
146
168
|
ctx.log_error("Task already completed")
|
147
|
-
return get_visual_todo_list(todo_list,
|
169
|
+
return get_visual_todo_list(todo_list, filter=ctx.input.filter)
|
148
170
|
# Update todo task
|
149
171
|
todo_task = cascade_todo_task(todo_task)
|
150
172
|
if todo_task.creation_date is not None:
|
@@ -152,11 +174,12 @@ def complete_todo(ctx: AnyContext):
|
|
152
174
|
todo_task.completed = True
|
153
175
|
# Save todo list
|
154
176
|
save_todo_list(todo_file_path, todo_list)
|
155
|
-
return get_visual_todo_list(todo_list,
|
177
|
+
return get_visual_todo_list(todo_list, filter=ctx.input.filter)
|
156
178
|
|
157
179
|
|
158
180
|
@make_task(
|
159
181
|
name="archive-todo",
|
182
|
+
input=_get_filter_input(),
|
160
183
|
description="📚 Archive todo",
|
161
184
|
group=todo_group,
|
162
185
|
alias="archive",
|
@@ -180,7 +203,7 @@ def archive_todo(ctx: AnyContext):
|
|
180
203
|
]
|
181
204
|
if len(new_archived_todo_list) == 0:
|
182
205
|
ctx.print("No completed task to archive")
|
183
|
-
return get_visual_todo_list(todo_list,
|
206
|
+
return get_visual_todo_list(todo_list, filter=ctx.input.filter)
|
184
207
|
archive_file_path = os.path.join(TODO_DIR, "archive.txt")
|
185
208
|
if not os.path.isdir(TODO_DIR):
|
186
209
|
os.make_dirs(TODO_DIR, exist_ok=True)
|
@@ -192,7 +215,7 @@ def archive_todo(ctx: AnyContext):
|
|
192
215
|
# Save the new todo list and add the archived ones
|
193
216
|
save_todo_list(archive_file_path, archived_todo_list)
|
194
217
|
save_todo_list(todo_file_path, working_todo_list)
|
195
|
-
return get_visual_todo_list(todo_list,
|
218
|
+
return get_visual_todo_list(todo_list, filter=ctx.input.filter)
|
196
219
|
|
197
220
|
|
198
221
|
@make_task(
|
@@ -216,6 +239,7 @@ def archive_todo(ctx: AnyContext):
|
|
216
239
|
description="Working stop time",
|
217
240
|
default=lambda _: _get_default_stop_work_time_str(),
|
218
241
|
),
|
242
|
+
_get_filter_input(),
|
219
243
|
],
|
220
244
|
description="🕒 Log work todo",
|
221
245
|
group=todo_group,
|
@@ -230,7 +254,7 @@ def log_todo(ctx: AnyContext):
|
|
230
254
|
todo_task = select_todo_task(todo_list, ctx.input.keyword)
|
231
255
|
if todo_task is None:
|
232
256
|
ctx.log_error("Task not found")
|
233
|
-
return get_visual_todo_list(todo_list,
|
257
|
+
return get_visual_todo_list(todo_list, filter=ctx.input.filter)
|
234
258
|
# Update todo task
|
235
259
|
todo_task = cascade_todo_task(todo_task)
|
236
260
|
current_duration_str = todo_task.keyval.get("duration", "0")
|
@@ -268,7 +292,7 @@ def log_todo(ctx: AnyContext):
|
|
268
292
|
log_work_list = json.loads(read_file(log_work_path))
|
269
293
|
return "\n".join(
|
270
294
|
[
|
271
|
-
get_visual_todo_list(todo_list,
|
295
|
+
get_visual_todo_list(todo_list, filter=ctx.input.filter),
|
272
296
|
"",
|
273
297
|
get_visual_todo_card(todo_task, log_work_list),
|
274
298
|
]
|
@@ -296,6 +320,7 @@ def _get_default_stop_work_time_str() -> str:
|
|
296
320
|
default=lambda _: _get_todo_txt_content(),
|
297
321
|
allow_positional_parsing=False,
|
298
322
|
),
|
323
|
+
_get_filter_input(),
|
299
324
|
],
|
300
325
|
description="📝 Edit todo",
|
301
326
|
group=todo_group,
|
@@ -311,7 +336,7 @@ def edit_todo(ctx: AnyContext):
|
|
311
336
|
todo_file_path = os.path.join(TODO_DIR, "todo.txt")
|
312
337
|
write_file(todo_file_path, new_content)
|
313
338
|
todo_list = load_todo_list(todo_file_path)
|
314
|
-
return get_visual_todo_list(todo_list,
|
339
|
+
return get_visual_todo_list(todo_list, filter=ctx.input.filter)
|
315
340
|
|
316
341
|
|
317
342
|
def _get_todo_txt_content() -> str:
|
zrb/llm_config.py
CHANGED
@@ -7,16 +7,24 @@ from pydantic_ai.providers.openai import OpenAIProvider
|
|
7
7
|
|
8
8
|
DEFAULT_SYSTEM_PROMPT = """
|
9
9
|
You have access to tools.
|
10
|
-
Your goal is to
|
10
|
+
Your goal is to provide insightful and accurate information based on user queries.
|
11
11
|
Follow these instructions precisely:
|
12
|
-
1. ALWAYS use available tools to gather information BEFORE asking the user questions
|
13
|
-
2. For tools that require arguments: provide arguments in valid JSON format
|
12
|
+
1. ALWAYS use available tools to gather information BEFORE asking the user questions.
|
13
|
+
2. For tools that require arguments: provide arguments in valid JSON format.
|
14
14
|
3. For tools with no args: call the tool without args. Do NOT pass "" or {}.
|
15
|
-
4. NEVER pass arguments to tools that don't accept parameters
|
16
|
-
5. NEVER ask users for information obtainable through tools
|
17
|
-
6. Use tools in a logical sequence until you have sufficient information
|
18
|
-
7. If a tool call fails, check if you're passing arguments in the correct format
|
19
|
-
|
15
|
+
4. NEVER pass arguments to tools that don't accept parameters.
|
16
|
+
5. NEVER ask users for information obtainable through tools.
|
17
|
+
6. Use tools in a logical sequence until you have sufficient information.
|
18
|
+
7. If a tool call fails, check if you're passing arguments in the correct format.
|
19
|
+
Consider alternative strategies if the issue persists.
|
20
|
+
8. Only after exhausting relevant tools should you request clarification.
|
21
|
+
9. Understand the context of user queries to provide relevant and accurate responses.
|
22
|
+
10. Engage with users in a conversational manner once the necessary information is gathered.
|
23
|
+
11. Adapt to different query types or scenarios to improve flexibility and effectiveness.
|
24
|
+
""".strip()
|
25
|
+
|
26
|
+
DEFAULT_PERSONA = """
|
27
|
+
You are an expert in various fields including technology, science, history, and more.
|
20
28
|
""".strip()
|
21
29
|
|
22
30
|
|
@@ -27,6 +35,7 @@ class LLMConfig:
|
|
27
35
|
default_model_name: str | None = None,
|
28
36
|
default_base_url: str | None = None,
|
29
37
|
default_api_key: str | None = None,
|
38
|
+
default_persona: str | None = None,
|
30
39
|
default_system_prompt: str | None = None,
|
31
40
|
):
|
32
41
|
self._model_name = (
|
@@ -49,6 +58,11 @@ class LLMConfig:
|
|
49
58
|
if default_system_prompt is not None
|
50
59
|
else os.getenv("ZRB_LLM_SYSTEM_PROMPT", None)
|
51
60
|
)
|
61
|
+
self._persona = (
|
62
|
+
default_persona
|
63
|
+
if default_persona is not None
|
64
|
+
else os.getenv("ZRB_LLM_PERSONA", None)
|
65
|
+
)
|
52
66
|
self._default_provider = None
|
53
67
|
self._default_model = None
|
54
68
|
|
@@ -65,9 +79,15 @@ class LLMConfig:
|
|
65
79
|
)
|
66
80
|
|
67
81
|
def get_default_system_prompt(self) -> str:
|
68
|
-
|
69
|
-
|
70
|
-
|
82
|
+
system_prompt = (
|
83
|
+
DEFAULT_SYSTEM_PROMPT
|
84
|
+
if self._system_prompt is None
|
85
|
+
else self._system_prompt
|
86
|
+
)
|
87
|
+
persona = DEFAULT_PERSONA if self._persona is None else self._persona
|
88
|
+
if persona is not None:
|
89
|
+
return f"{persona}\n{system_prompt}"
|
90
|
+
return system_prompt
|
71
91
|
|
72
92
|
def get_default_model(self) -> Model | str | None:
|
73
93
|
if self._default_model is not None:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: zrb
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.5.0
|
4
4
|
Summary: Your Automation Powerhouse
|
5
5
|
Home-page: https://github.com/state-alchemists/zrb
|
6
6
|
License: AGPL-3.0-or-later
|
@@ -97,9 +97,10 @@ Add the following content to your zrb_init.py:
|
|
97
97
|
import os
|
98
98
|
from zrb import cli, LLMTask, CmdTask, StrInput, Group
|
99
99
|
from zrb.builtin.llm.tool.file import (
|
100
|
-
|
100
|
+
list_files, read_from_file, search_files, write_to_file
|
101
101
|
)
|
102
102
|
|
103
|
+
|
103
104
|
CURRENT_DIR = os.getcwd()
|
104
105
|
|
105
106
|
# Create a group for UML-related tasks
|
@@ -117,9 +118,7 @@ make_uml_script = uml_group.add_task(
|
|
117
118
|
f"Write the script into {CURRENT_DIR}/{{ctx.input.diagram}}.uml"
|
118
119
|
),
|
119
120
|
tools=[
|
120
|
-
list_files,
|
121
|
-
read_text_file,
|
122
|
-
write_text_file,
|
121
|
+
list_files, read_from_file, search_files, write_to_file
|
123
122
|
],
|
124
123
|
)
|
125
124
|
)
|
@@ -7,13 +7,13 @@ zrb/builtin/base64.py,sha256=1YnSwASp7OEAvQcsnHZGpJEvYoI1Z2zTIJ1bCDHfcPQ,921
|
|
7
7
|
zrb/builtin/git.py,sha256=8_qVE_2lVQEVXQ9vhiw8Tn4Prj1VZB78ZjEJJS5Ab3M,5461
|
8
8
|
zrb/builtin/git_subtree.py,sha256=7BKwOkVTWDrR0DXXQ4iJyHqeR6sV5VYRt8y_rEB0EHg,3505
|
9
9
|
zrb/builtin/group.py,sha256=-phJfVpTX3_gUwS1u8-RbZUHe-X41kxDBSmrVh4rq8E,1682
|
10
|
-
zrb/builtin/llm/llm_chat.py,sha256=
|
10
|
+
zrb/builtin/llm/llm_chat.py,sha256=QFuuZJm4tonykbY1P5Vdnn2acVqwM8GcsJ0gnaNB2uo,6182
|
11
11
|
zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
|
12
12
|
zrb/builtin/llm/tool/api.py,sha256=U0_PhVuoDLpq4Jak5S45IHhCF1jKmfS0JC8XAnfnOhA,858
|
13
13
|
zrb/builtin/llm/tool/cli.py,sha256=to_IjkfrMGs6eLfG0cpVN9oyADWYsJQCtyluUhUdBww,253
|
14
|
-
zrb/builtin/llm/tool/file.py,sha256=
|
14
|
+
zrb/builtin/llm/tool/file.py,sha256=A6x0f93oBU4JvrujVF3NQAUY6Hkrf_Iv9cfAMNsaDi4,17469
|
15
15
|
zrb/builtin/llm/tool/rag.py,sha256=pX8N_bYv4axsjhULLvvZtQYW2klZOkeQZ2Tn16083vM,6860
|
16
|
-
zrb/builtin/llm/tool/web.py,sha256=
|
16
|
+
zrb/builtin/llm/tool/web.py,sha256=ZvIgOIMPIEfdih5I3TgVTsqTrwiKmDy60zeKHVWrVeo,4922
|
17
17
|
zrb/builtin/md5.py,sha256=0pNlrfZA0wlZlHvFHLgyqN0JZJWGKQIF5oXxO44_OJk,949
|
18
18
|
zrb/builtin/project/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
19
|
zrb/builtin/project/add/fastapp/fastapp_input.py,sha256=MKlWR_LxWhM_DcULCtLfL_IjTxpDnDBkn9KIqNmajFs,310
|
@@ -201,7 +201,7 @@ zrb/builtin/shell/autocomplete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5
|
|
201
201
|
zrb/builtin/shell/autocomplete/bash.py,sha256=-7YDVV7txgJH9mAYSYN0jmvUEeDIzWFvVNY-cY0myF8,1181
|
202
202
|
zrb/builtin/shell/autocomplete/subcmd.py,sha256=WZI6cGWJcn80zSyxOHG7sCMO3Ucix3mZf4xm_xyB_Y0,606
|
203
203
|
zrb/builtin/shell/autocomplete/zsh.py,sha256=9hlq0Wt3fhRz326mAQTypEd4_4lZdrbBx_3A-Ti3mvw,1022
|
204
|
-
zrb/builtin/todo.py,sha256=
|
204
|
+
zrb/builtin/todo.py,sha256=qxQb0EjWk5Eg4lZIOIGDQVw3wz_Bb9wzG2J38b9iCig,11463
|
205
205
|
zrb/callback/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
206
206
|
zrb/callback/any_callback.py,sha256=Yhdv5UWHAZSVzj5K2JdxcVQx8x8VX8aZJEivj3NTfZc,247
|
207
207
|
zrb/callback/callback.py,sha256=hKefB_Jd1XGjPSLQdMKDsGLHPzEGO2dqrIArLl_EmD0,848
|
@@ -237,7 +237,7 @@ zrb/input/option_input.py,sha256=TQB82ko5odgzkULEizBZi0e9TIHEbIgvdP0AR3RhA74,213
|
|
237
237
|
zrb/input/password_input.py,sha256=szBojWxSP9QJecgsgA87OIYwQrY2AQ3USIKdDZY6snU,1465
|
238
238
|
zrb/input/str_input.py,sha256=NevZHX9rf1g8eMatPyy-kUX3DglrVAQpzvVpKAzf7bA,81
|
239
239
|
zrb/input/text_input.py,sha256=shvVbc2U8Is36h23M5lcW8IEwKc9FR-4uEPZZroj3rU,3377
|
240
|
-
zrb/llm_config.py,sha256=
|
240
|
+
zrb/llm_config.py,sha256=wmb0XNeaAjb5JlWRU3G-1D1Q0XCQmjjY7SEPcN3cHeA,4512
|
241
241
|
zrb/runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
242
242
|
zrb/runner/cli.py,sha256=0mT0oO_yEhc8N4nYCJNujhgLjVykZ0B-kAOFXyAvAqM,6672
|
243
243
|
zrb/runner/common_util.py,sha256=0zhZn1Jdmr194_nsL5_L-Kn9-_NDpMTI2z6_LXUQJ-U,1369
|
@@ -341,7 +341,7 @@ zrb/util/string/name.py,sha256=8picJfUBXNpdh64GNaHv3om23QHhUZux7DguFLrXHp8,1163
|
|
341
341
|
zrb/util/todo.py,sha256=1nDdwPc22oFoK_1ZTXyf3638Bg6sqE2yp_U4_-frHoc,16015
|
342
342
|
zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
343
343
|
zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
|
344
|
-
zrb-1.
|
345
|
-
zrb-1.
|
346
|
-
zrb-1.
|
347
|
-
zrb-1.
|
344
|
+
zrb-1.5.0.dist-info/METADATA,sha256=165bE0daRzv-SVfO2Oc04XO0fu6D1JP5uh1pdQWouwY,8557
|
345
|
+
zrb-1.5.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
346
|
+
zrb-1.5.0.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
|
347
|
+
zrb-1.5.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|