zrb 1.9.5__py3-none-any.whl → 1.9.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/llm_ask.py +2 -2
- zrb/builtin/llm/tool/api.py +19 -9
- zrb/builtin/llm/tool/cli.py +11 -5
- zrb/builtin/llm/tool/code.py +19 -19
- zrb/builtin/llm/tool/file.py +106 -154
- zrb/builtin/llm/tool/rag.py +27 -4
- zrb/builtin/llm/tool/sub_agent.py +12 -14
- zrb/builtin/llm/tool/web.py +46 -14
- zrb/config/llm_config.py +142 -157
- zrb-1.9.7.dist-info/METADATA +250 -0
- {zrb-1.9.5.dist-info → zrb-1.9.7.dist-info}/RECORD +13 -13
- zrb-1.9.5.dist-info/METADATA +0 -245
- {zrb-1.9.5.dist-info → zrb-1.9.7.dist-info}/WHEEL +0 -0
- {zrb-1.9.5.dist-info → zrb-1.9.7.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/llm_ask.py
CHANGED
@@ -7,10 +7,10 @@ from zrb.builtin.llm.tool.cli import run_shell_command
|
|
7
7
|
from zrb.builtin.llm.tool.code import analyze_repo
|
8
8
|
from zrb.builtin.llm.tool.file import (
|
9
9
|
analyze_file,
|
10
|
-
apply_diff,
|
11
10
|
list_files,
|
12
11
|
read_from_file,
|
13
12
|
read_many_files,
|
13
|
+
replace_in_file,
|
14
14
|
search_files,
|
15
15
|
write_many_files,
|
16
16
|
write_to_file,
|
@@ -135,7 +135,7 @@ if CFG.LLM_ALLOW_ACCESS_LOCAL_FILE:
|
|
135
135
|
list_files,
|
136
136
|
read_from_file,
|
137
137
|
read_many_files,
|
138
|
-
|
138
|
+
replace_in_file,
|
139
139
|
write_to_file,
|
140
140
|
write_many_files,
|
141
141
|
)
|
zrb/builtin/llm/tool/api.py
CHANGED
@@ -3,11 +3,16 @@ from typing import Literal
|
|
3
3
|
|
4
4
|
|
5
5
|
def get_current_location() -> str:
|
6
|
-
"""
|
6
|
+
"""
|
7
|
+
Fetches the user's current geographical location (latitude and longitude) based on their IP address.
|
8
|
+
|
9
|
+
Use this tool when the user asks "Where am I?", "What is my current location?", or has a query that requires knowing their location to be answered.
|
10
|
+
|
7
11
|
Returns:
|
8
|
-
str: JSON string
|
12
|
+
str: A JSON string containing the 'lat' and 'lon' of the current location.
|
13
|
+
Example: '{"lat": 48.8584, "lon": 2.2945}'
|
9
14
|
Raises:
|
10
|
-
requests.RequestException: If the API request fails.
|
15
|
+
requests.RequestException: If the API request to the location service fails.
|
11
16
|
"""
|
12
17
|
import requests
|
13
18
|
|
@@ -24,15 +29,20 @@ def get_current_weather(
|
|
24
29
|
longitude: float,
|
25
30
|
temperature_unit: Literal["celsius", "fahrenheit"],
|
26
31
|
) -> str:
|
27
|
-
"""
|
32
|
+
"""
|
33
|
+
Retrieves the current weather conditions for a given geographical location.
|
34
|
+
|
35
|
+
Use this tool when the user asks about the weather. If the user does not provide a location, first use the `get_current_location` tool to determine their location.
|
36
|
+
|
28
37
|
Args:
|
29
|
-
latitude (float):
|
30
|
-
longitude (float):
|
31
|
-
temperature_unit (Literal["celsius", "fahrenheit"]):
|
38
|
+
latitude (float): The latitude of the location.
|
39
|
+
longitude (float): The longitude of the location.
|
40
|
+
temperature_unit (Literal["celsius", "fahrenheit"]): The desired unit for the temperature reading.
|
41
|
+
|
32
42
|
Returns:
|
33
|
-
str: JSON string
|
43
|
+
str: A JSON string containing detailed weather data, including temperature, wind speed, and weather code.
|
34
44
|
Raises:
|
35
|
-
requests.RequestException: If the API request fails.
|
45
|
+
requests.RequestException: If the API request to the weather service fails.
|
36
46
|
"""
|
37
47
|
import requests
|
38
48
|
|
zrb/builtin/llm/tool/cli.py
CHANGED
@@ -2,14 +2,20 @@ import subprocess
|
|
2
2
|
|
3
3
|
|
4
4
|
def run_shell_command(command: str) -> str:
|
5
|
-
"""
|
5
|
+
"""
|
6
|
+
Executes a shell command on the user's local machine and returns the output.
|
7
|
+
|
8
|
+
This tool is powerful and should be used for tasks that require interacting with the command line, such as running scripts, managing system processes, or using command-line tools.
|
9
|
+
|
10
|
+
**Security Warning:** This tool executes commands with the same permissions as the user running the assistant. Before executing any command that could modify files or system state (e.g., `git`, `npm`, `pip`, `docker`), you MUST explain what the command does and ask the user for confirmation.
|
11
|
+
|
6
12
|
Args:
|
7
|
-
command (str):
|
13
|
+
command (str): The exact shell command to execute.
|
14
|
+
|
8
15
|
Returns:
|
9
|
-
str: The
|
16
|
+
str: The combined standard output (stdout) and standard error (stderr) from the command. If the command fails, this will contain the error message.
|
10
17
|
Raises:
|
11
|
-
subprocess.CalledProcessError: If the command returns a non-zero exit code.
|
12
|
-
subprocess.SubprocessError: If there's an issue with subprocess execution.
|
18
|
+
subprocess.CalledProcessError: If the command returns a non-zero exit code, indicating an error.
|
13
19
|
"""
|
14
20
|
try:
|
15
21
|
output = subprocess.check_output(
|
zrb/builtin/llm/tool/code.py
CHANGED
@@ -86,29 +86,29 @@ async def analyze_repo(
|
|
86
86
|
summarization_token_limit: int = 40000,
|
87
87
|
) -> str:
|
88
88
|
"""
|
89
|
-
|
90
|
-
|
89
|
+
Performs a deep, goal-oriented analysis of a code repository or directory.
|
90
|
+
|
91
|
+
This powerful tool recursively reads all relevant files in a directory, extracts key information, and then summarizes that information in relation to a specific goal. It uses intelligent sub-agents for extraction and summarization, making it ideal for complex tasks that require a holistic understanding of a codebase.
|
92
|
+
|
91
93
|
Use this tool for:
|
92
|
-
-
|
93
|
-
-
|
94
|
-
- code review
|
95
|
-
-
|
96
|
-
-
|
94
|
+
- Understanding a large or unfamiliar codebase.
|
95
|
+
- Generating high-level summaries of a project's architecture.
|
96
|
+
- Performing a preliminary code review.
|
97
|
+
- Creating documentation or diagrams (e.g., "Generate a Mermaid C4 diagram for this service").
|
98
|
+
- Answering broad questions like "How does the authentication in this project work?".
|
99
|
+
|
97
100
|
Args:
|
98
|
-
path (str):
|
99
|
-
goal(str):
|
100
|
-
extensions(
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
the extraction assistant able to handle. Defaults to 150000
|
106
|
-
summarization_token_limit(Optional[int]): Max resource content char length
|
107
|
-
the summarization assistant able to handle. Defaults to 150000
|
101
|
+
path (str): The path to the directory or repository to analyze.
|
102
|
+
goal (str): A clear and specific description of what you want to achieve. A good goal is critical for getting a useful result. For example: "Understand the database schema by analyzing all the .sql files" or "Create a summary of all the API endpoints defined in the 'api' directory".
|
103
|
+
extensions (list[str], optional): A list of file extensions to include in the analysis. Defaults to a comprehensive list of common code and configuration files.
|
104
|
+
exclude_patterns (list[str], optional): A list of glob patterns for files and directories to exclude from the analysis. Defaults to common patterns like '.git', 'node_modules', and '.venv'.
|
105
|
+
extraction_token_limit (int, optional): The maximum token limit for the extraction sub-agent.
|
106
|
+
summarization_token_limit (int, optional): The maximum token limit for the summarization sub-agent.
|
107
|
+
|
108
108
|
Returns:
|
109
|
-
str:
|
109
|
+
str: A detailed, markdown-formatted analysis and summary of the repository, tailored to the specified goal.
|
110
110
|
Raises:
|
111
|
-
Exception: If an error occurs.
|
111
|
+
Exception: If an error occurs during the analysis.
|
112
112
|
"""
|
113
113
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
114
114
|
file_metadatas = _get_file_metadatas(abs_path, extensions, exclude_patterns)
|
zrb/builtin/llm/tool/file.py
CHANGED
@@ -106,17 +106,22 @@ def list_files(
|
|
106
106
|
include_hidden: bool = False,
|
107
107
|
excluded_patterns: Optional[list[str]] = None,
|
108
108
|
) -> str:
|
109
|
-
"""
|
109
|
+
"""
|
110
|
+
Lists the files and directories within a specified path.
|
111
|
+
|
112
|
+
This is a fundamental tool for exploring the file system. Use it to discover the structure of a directory, find specific files, or get a general overview of the project layout before performing other operations.
|
113
|
+
|
110
114
|
Args:
|
111
|
-
path (str):
|
112
|
-
recursive (bool):
|
113
|
-
include_hidden (bool):
|
114
|
-
excluded_patterns (
|
115
|
-
|
115
|
+
path (str, optional): The directory path to list. Defaults to the current directory (".").
|
116
|
+
recursive (bool, optional): If True, lists files and directories recursively. If False, lists only the top-level contents. Defaults to True.
|
117
|
+
include_hidden (bool, optional): If True, includes hidden files and directories (those starting with a dot). Defaults to False.
|
118
|
+
excluded_patterns (list[str], optional): A list of glob patterns to exclude from the listing. This is useful for ignoring irrelevant files like build artifacts or virtual environments. Defaults to a standard list of common exclusion patterns.
|
119
|
+
|
116
120
|
Returns:
|
117
|
-
str: JSON string
|
121
|
+
str: A JSON string containing a list of file and directory paths relative to the input path.
|
122
|
+
Example: '{"files": ["src/main.py", "README.md"]}'
|
118
123
|
Raises:
|
119
|
-
|
124
|
+
FileNotFoundError: If the specified path does not exist.
|
120
125
|
"""
|
121
126
|
all_files: list[str] = []
|
122
127
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
@@ -214,19 +219,26 @@ def read_from_file(
|
|
214
219
|
start_line: Optional[int] = None,
|
215
220
|
end_line: Optional[int] = None,
|
216
221
|
) -> str:
|
217
|
-
"""
|
218
|
-
|
222
|
+
"""
|
223
|
+
Reads the content of a file, optionally from a specific start line to an end line.
|
224
|
+
|
225
|
+
This tool is essential for inspecting file contents. It can read both text and PDF files. The returned content is prefixed with line numbers, which is crucial for providing context when you need to modify the file later with the `apply_diff` tool.
|
226
|
+
|
227
|
+
Use this tool to:
|
228
|
+
- Examine the source code of a file.
|
229
|
+
- Read configuration files.
|
230
|
+
- Check the contents of a document.
|
231
|
+
|
219
232
|
Args:
|
220
|
-
path (str):
|
221
|
-
start_line (
|
222
|
-
|
223
|
-
|
224
|
-
Defaults to None (end of file).
|
233
|
+
path (str): The path to the file to read.
|
234
|
+
start_line (int, optional): The 1-based line number to start reading from. If omitted, reading starts from the beginning of the file.
|
235
|
+
end_line (int, optional): The 1-based line number to stop reading at (inclusive). If omitted, reads to the end of the file.
|
236
|
+
|
225
237
|
Returns:
|
226
|
-
str: JSON
|
227
|
-
|
238
|
+
str: A JSON object containing the file path, the requested content with line numbers, the start and end lines, and the total number of lines in the file.
|
239
|
+
Example: '{"path": "src/main.py", "content": "1: import os\n2: \n3: print(\"Hello, World!\")", "start_line": 1, "end_line": 3, "total_lines": 3}'
|
228
240
|
Raises:
|
229
|
-
|
241
|
+
FileNotFoundError: If the specified file does not exist.
|
230
242
|
"""
|
231
243
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
232
244
|
# Check if file exists
|
@@ -267,26 +279,20 @@ def read_from_file(
|
|
267
279
|
def write_to_file(
|
268
280
|
path: str,
|
269
281
|
content: str,
|
270
|
-
line_count: int,
|
271
282
|
) -> str:
|
272
|
-
"""
|
283
|
+
"""
|
284
|
+
Writes content to a file, completely overwriting it if it exists or creating it if it doesn't.
|
285
|
+
|
286
|
+
Use this tool to create new files or to replace the entire content of existing files. This is a destructive operation, so be certain of your actions. Always read the file first to understand its contents before overwriting it, unless you are creating a new file.
|
287
|
+
|
273
288
|
Args:
|
274
|
-
path (str):
|
275
|
-
content (str):
|
276
|
-
|
277
|
-
line_count (int): Number of lines in the provided content.
|
289
|
+
path (str): The path to the file to write to.
|
290
|
+
content (str): The full, complete content to be written to the file. Do not use partial content or omit any lines.
|
291
|
+
|
278
292
|
Returns:
|
279
|
-
str: JSON
|
280
|
-
|
281
|
-
Exception: If an error occurs.
|
293
|
+
str: A JSON object indicating success or failure.
|
294
|
+
Example: '{"success": true, "path": "new_file.txt"}'
|
282
295
|
"""
|
283
|
-
actual_lines = len(content.splitlines())
|
284
|
-
warning = None
|
285
|
-
if actual_lines != line_count:
|
286
|
-
warning = (
|
287
|
-
f"Provided line_count ({line_count}) does not match actual "
|
288
|
-
f"content lines ({actual_lines}) for file {path}"
|
289
|
-
)
|
290
296
|
try:
|
291
297
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
292
298
|
# Ensure directory exists
|
@@ -295,8 +301,6 @@ def write_to_file(
|
|
295
301
|
os.makedirs(directory, exist_ok=True)
|
296
302
|
write_file(abs_path, content)
|
297
303
|
result_data = {"success": True, "path": path}
|
298
|
-
if warning:
|
299
|
-
result_data["warning"] = warning
|
300
304
|
return json.dumps(result_data)
|
301
305
|
except (OSError, IOError) as e:
|
302
306
|
raise OSError(f"Error writing file {path}: {e}")
|
@@ -310,17 +314,21 @@ def search_files(
|
|
310
314
|
file_pattern: Optional[str] = None,
|
311
315
|
include_hidden: bool = True,
|
312
316
|
) -> str:
|
313
|
-
"""
|
317
|
+
"""
|
318
|
+
Searches for a regular expression (regex) pattern within files in a specified directory.
|
319
|
+
|
320
|
+
This tool is invaluable for finding specific code, configuration, or text across multiple files. Use it to locate function definitions, variable assignments, error messages, or any other text pattern.
|
321
|
+
|
314
322
|
Args:
|
315
|
-
path (str):
|
316
|
-
regex (str): Python
|
317
|
-
file_pattern (
|
318
|
-
|
319
|
-
|
323
|
+
path (str): The directory path to start the search from.
|
324
|
+
regex (str): The Python-compatible regular expression pattern to search for.
|
325
|
+
file_pattern (str, optional): A glob pattern to filter which files get searched (e.g., "*.py", "*.md"). If omitted, all files are searched.
|
326
|
+
include_hidden (bool, optional): If True, the search will include hidden files and directories. Defaults to True.
|
327
|
+
|
320
328
|
Returns:
|
321
|
-
str: JSON
|
329
|
+
str: A JSON object containing a summary of the search and a list of results. Each result includes the file path and a list of matches, with each match showing the line number, line content, and a few lines of context from before and after the match.
|
322
330
|
Raises:
|
323
|
-
|
331
|
+
ValueError: If the provided `regex` pattern is invalid.
|
324
332
|
"""
|
325
333
|
try:
|
326
334
|
pattern = re.compile(regex)
|
@@ -407,83 +415,70 @@ def _get_file_matches(
|
|
407
415
|
raise RuntimeError(f"Unexpected error processing {file_path}: {e}")
|
408
416
|
|
409
417
|
|
410
|
-
def
|
418
|
+
def replace_in_file(
|
411
419
|
path: str,
|
412
|
-
|
413
|
-
|
414
|
-
search_content: str,
|
415
|
-
replace_content: str,
|
420
|
+
old_string: str,
|
421
|
+
new_string: str,
|
416
422
|
) -> str:
|
417
|
-
"""
|
418
|
-
|
423
|
+
"""
|
424
|
+
Replaces the first occurrence of a string in a file.
|
425
|
+
|
426
|
+
This tool is for making targeted modifications to a file. It is a single-step operation that is generally safer and more ergonomic than `write_to_file` for small changes.
|
427
|
+
|
428
|
+
To ensure the replacement is applied correctly and to avoid ambiguity, the `old_string` parameter should be a unique, multi-line string that includes context from before and after the code you want to change.
|
429
|
+
|
419
430
|
Args:
|
420
|
-
path (str):
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
line range. Must exactly match file content including whitespace/indentation,
|
425
|
-
excluding line numbers.
|
426
|
-
replace_content (str): The new content to replace the search_content with.
|
427
|
-
Excluding line numbers.
|
431
|
+
path (str): The path of the file to modify.
|
432
|
+
old_string (str): The exact, verbatim string to search for and replace. This should be a unique, multi-line block of text.
|
433
|
+
new_string (str): The new string that will replace the `old_string`.
|
434
|
+
|
428
435
|
Returns:
|
429
|
-
str: JSON
|
436
|
+
str: A JSON object indicating the success or failure of the operation.
|
430
437
|
Raises:
|
431
|
-
|
438
|
+
FileNotFoundError: If the specified file does not exist.
|
439
|
+
ValueError: If the `old_string` is not found in the file.
|
432
440
|
"""
|
433
441
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
434
442
|
if not os.path.exists(abs_path):
|
435
443
|
raise FileNotFoundError(f"File not found: {path}")
|
436
444
|
try:
|
437
445
|
content = read_file(abs_path)
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
f"Invalid line range {start_line}-{end_line} for file with {len(lines)} lines"
|
442
|
-
)
|
443
|
-
original_content = "\n".join(lines[start_line - 1 : end_line])
|
444
|
-
if original_content != search_content:
|
445
|
-
error_message = (
|
446
|
-
f"Search content does not match file content at "
|
447
|
-
f"lines {start_line}-{end_line}.\n"
|
448
|
-
f"Expected ({len(search_content.splitlines())} lines):\n"
|
449
|
-
f"---\n{search_content}\n---\n"
|
450
|
-
f"Actual ({len(lines[start_line-1:end_line])} lines):\n"
|
451
|
-
f"---\n{original_content}\n---"
|
452
|
-
)
|
453
|
-
return json.dumps({"success": False, "path": path, "error": error_message})
|
454
|
-
new_lines = (
|
455
|
-
lines[: start_line - 1] + replace_content.splitlines() + lines[end_line:]
|
456
|
-
)
|
457
|
-
new_content = "\n".join(new_lines)
|
458
|
-
if content.endswith("\n"):
|
459
|
-
new_content += "\n"
|
446
|
+
if old_string not in content:
|
447
|
+
raise ValueError(f"old_string not found in file: {path}")
|
448
|
+
new_content = content.replace(old_string, new_string, 1)
|
460
449
|
write_file(abs_path, new_content)
|
461
450
|
return json.dumps({"success": True, "path": path})
|
462
451
|
except ValueError as e:
|
463
|
-
raise
|
452
|
+
raise e
|
464
453
|
except (OSError, IOError) as e:
|
465
|
-
raise OSError(f"Error applying
|
454
|
+
raise OSError(f"Error applying replacement to {path}: {e}")
|
466
455
|
except Exception as e:
|
467
|
-
raise RuntimeError(f"Unexpected error applying
|
456
|
+
raise RuntimeError(f"Unexpected error applying replacement to {path}: {e}")
|
468
457
|
|
469
458
|
|
470
459
|
async def analyze_file(
|
471
460
|
ctx: AnyContext, path: str, query: str, token_limit: int = 40000
|
472
461
|
) -> str:
|
473
|
-
"""
|
474
|
-
|
475
|
-
|
476
|
-
-
|
477
|
-
|
478
|
-
|
462
|
+
"""
|
463
|
+
Performs a deep, goal-oriented analysis of a single file using a sub-agent.
|
464
|
+
|
465
|
+
This tool is ideal for complex questions about a single file that go beyond simple reading or searching. It uses a specialized sub-agent to analyze the file's content in relation to a specific query.
|
466
|
+
|
467
|
+
Use this tool to:
|
468
|
+
- Summarize the purpose and functionality of a script or configuration file.
|
469
|
+
- Extract the structure of a file (e.g., "List all the function names in this Python file").
|
470
|
+
- Perform a detailed code review of a specific file.
|
471
|
+
- Answer complex questions like, "How is the 'User' class used in this file?".
|
472
|
+
|
479
473
|
Args:
|
480
|
-
path (str):
|
481
|
-
query(str):
|
482
|
-
token_limit(
|
474
|
+
path (str): The path to the file to be analyzed.
|
475
|
+
query (str): A clear and specific question or instruction about what to analyze in the file.
|
476
|
+
token_limit (int, optional): The maximum token length of the file content to be passed to the analysis sub-agent.
|
477
|
+
|
483
478
|
Returns:
|
484
|
-
str:
|
479
|
+
str: A detailed, markdown-formatted analysis of the file, tailored to the specified query.
|
485
480
|
Raises:
|
486
|
-
|
481
|
+
FileNotFoundError: If the specified file does not exist.
|
487
482
|
"""
|
488
483
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
489
484
|
if not os.path.exists(abs_path):
|
@@ -504,38 +499,16 @@ async def analyze_file(
|
|
504
499
|
|
505
500
|
def read_many_files(paths: List[str]) -> str:
|
506
501
|
"""
|
507
|
-
|
502
|
+
Reads and returns the full content of multiple files at once.
|
508
503
|
|
509
|
-
This
|
510
|
-
several files at once. For each file path provided in the input list,
|
511
|
-
it reads the entire file content. The result is a JSON string
|
512
|
-
containing a dictionary where keys are the file paths and values are
|
513
|
-
the corresponding file contents.
|
514
|
-
|
515
|
-
Use this tool when you need a comprehensive view of multiple files,
|
516
|
-
for example, to understand how different parts of a module interact,
|
517
|
-
to check configurations across various files, or to gather context
|
518
|
-
before making widespread changes.
|
504
|
+
This tool is highly efficient for gathering context from several files simultaneously. Use it when you need to understand how different files in a project relate to each other, or when you need to inspect a set of related configuration or source code files.
|
519
505
|
|
520
506
|
Args:
|
521
|
-
paths (List[str]): A list of
|
522
|
-
files you want to read. It is crucial to
|
523
|
-
provide accurate paths. Use the `list_files`
|
524
|
-
tool if you are unsure about the exact file
|
525
|
-
locations.
|
507
|
+
paths (List[str]): A list of paths to the files you want to read. It is crucial to provide accurate paths. Use the `list_files` tool first if you are unsure about the exact file locations.
|
526
508
|
|
527
509
|
Returns:
|
528
|
-
str: A JSON
|
529
|
-
|
530
|
-
file. If a file cannot be read, its entry in the dictionary
|
531
|
-
will contain an error message.
|
532
|
-
Example:
|
533
|
-
{
|
534
|
-
"results": {
|
535
|
-
"path/to/file1.py": "...",
|
536
|
-
"path/to/file2.txt": "..."
|
537
|
-
}
|
538
|
-
}
|
510
|
+
str: A JSON object where keys are the file paths and values are their corresponding contents, prefixed with line numbers. If a file cannot be read, its value will be an error message.
|
511
|
+
Example: '{"results": {"src/api.py": "1: import ...", "config.yaml": "1: key: value"}}'
|
539
512
|
"""
|
540
513
|
results = {}
|
541
514
|
for path in paths:
|
@@ -552,42 +525,18 @@ def read_many_files(paths: List[str]) -> str:
|
|
552
525
|
|
553
526
|
def write_many_files(files: Dict[str, str]) -> str:
|
554
527
|
"""
|
555
|
-
|
556
|
-
|
557
|
-
This function allows you to create, overwrite, or update multiple
|
558
|
-
files in a single operation. You provide a dictionary where each
|
559
|
-
key is a file path and the corresponding value is the content to be
|
560
|
-
written to that file. This is particularly useful for applying
|
561
|
-
changes across a project, such as refactoring code, updating
|
562
|
-
configuration files, or creating a set of new files from a template.
|
528
|
+
Writes content to multiple files in a single, atomic operation.
|
563
529
|
|
564
|
-
|
565
|
-
file does not exist, it will be created. If it already exists, its
|
530
|
+
This tool is for applying widespread changes to a project, such as creating a set of new files from a template, updating multiple configuration files, or performing a large-scale refactoring.
|
566
531
|
|
567
|
-
|
568
|
-
Therefore, it is essential to provide the full, intended content for
|
569
|
-
each file.
|
532
|
+
Each file's content is completely replaced. If a file does not exist, it will be created. If it exists, its current content will be entirely overwritten. Therefore, you must provide the full, intended content for each file.
|
570
533
|
|
571
534
|
Args:
|
572
|
-
files (Dict[str, str]): A dictionary where keys are the file paths
|
573
|
-
(absolute or relative) and values are the
|
574
|
-
complete contents to be written to those
|
575
|
-
files.
|
535
|
+
files (Dict[str, str]): A dictionary where keys are the file paths and values are the complete contents to be written to those files.
|
576
536
|
|
577
537
|
Returns:
|
578
|
-
str: A JSON
|
579
|
-
|
580
|
-
failed, along with the corresponding error messages.
|
581
|
-
Example:
|
582
|
-
{
|
583
|
-
"success": [
|
584
|
-
"path/to/file1.py",
|
585
|
-
"path/to/file2.txt"
|
586
|
-
],
|
587
|
-
"errors": {
|
588
|
-
"path/to/problematic/file.py": "Error message"
|
589
|
-
}
|
590
|
-
}
|
538
|
+
str: A JSON object summarizing the operation, listing successfully written files and any files that failed, along with corresponding error messages.
|
539
|
+
Example: '{"success": ["file1.py", "file2.txt"], "errors": {}}'
|
591
540
|
"""
|
592
541
|
success = []
|
593
542
|
errors = {}
|
@@ -602,3 +551,6 @@ def write_many_files(files: Dict[str, str]) -> str:
|
|
602
551
|
except Exception as e:
|
603
552
|
errors[path] = f"Error writing file: {e}"
|
604
553
|
return json.dumps({"success": success, "errors": errors})
|
554
|
+
|
555
|
+
|
556
|
+
apply_diff = replace_in_file
|
zrb/builtin/llm/tool/rag.py
CHANGED
@@ -43,10 +43,33 @@ def create_rag_from_directory(
|
|
43
43
|
openai_base_url: str | None = None,
|
44
44
|
openai_embedding_model: str | None = None,
|
45
45
|
):
|
46
|
-
"""
|
47
|
-
|
48
|
-
|
49
|
-
|
46
|
+
"""
|
47
|
+
Creates a powerful Retrieval-Augmented Generation (RAG) tool for querying a local knowledge base.
|
48
|
+
|
49
|
+
This factory function generates a tool that can perform semantic searches over a directory of documents. It automatically indexes the documents into a vector database, keeping it updated as files change. The generated tool is ideal for answering questions based on a specific set of documents, such as project documentation, research papers, or internal wikis.
|
50
|
+
|
51
|
+
The created tool will:
|
52
|
+
1. Monitor a specified directory for file changes.
|
53
|
+
2. Automatically update a vector database (ChromaDB) with the latest content.
|
54
|
+
3. Accept a user query, embed it, and perform a similarity search against the document vectors.
|
55
|
+
4. Return the most relevant document chunks that match the query.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
tool_name (str): The name for the generated RAG tool (e.g., "search_project_docs").
|
59
|
+
tool_description (str): A clear description of what the generated tool does and when to use it (e.g., "Searches the project's technical documentation to answer questions.").
|
60
|
+
document_dir_path (str, optional): The path to the directory containing the documents to be indexed.
|
61
|
+
vector_db_path (str, optional): The path to store the ChromaDB vector database.
|
62
|
+
vector_db_collection (str, optional): The name of the collection within the vector database.
|
63
|
+
chunk_size (int, optional): The size of text chunks for embedding.
|
64
|
+
overlap (int, optional): The overlap between text chunks.
|
65
|
+
max_result_count (int, optional): The maximum number of search results to return.
|
66
|
+
file_reader (list[RAGFileReader], optional): Custom file readers for specific file types.
|
67
|
+
openai_api_key (str, optional): OpenAI API key for embeddings.
|
68
|
+
openai_base_url (str, optional): OpenAI base URL for embeddings.
|
69
|
+
openai_embedding_model (str, optional): The embedding model to use.
|
70
|
+
|
71
|
+
Returns:
|
72
|
+
Callable: An asynchronous function that serves as the RAG tool.
|
50
73
|
"""
|
51
74
|
|
52
75
|
async def retrieve(query: str) -> str:
|
@@ -29,25 +29,23 @@ def create_sub_agent_tool(
|
|
29
29
|
mcp_servers: list["MCPServer"] = [],
|
30
30
|
) -> Callable[[AnyContext, str], Coroutine[Any, Any, str]]:
|
31
31
|
"""
|
32
|
-
|
32
|
+
Creates a "tool that is another AI agent," capable of handling complex, multi-step sub-tasks.
|
33
33
|
|
34
|
-
This factory
|
35
|
-
|
36
|
-
|
34
|
+
This powerful factory function generates a tool that, when used, spins up a temporary, specialized AI agent. This "sub-agent" has its own system prompt, tools, and context, allowing it to focus exclusively on accomplishing the task it's given without being distracted by the main conversation.
|
35
|
+
|
36
|
+
This is ideal for delegating complex tasks like analyzing a file or a repository.
|
37
37
|
|
38
38
|
Args:
|
39
|
-
tool_name: The name
|
40
|
-
tool_description:
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
sub_agent_mcp_servers: A list of MCP servers for the sub-agent (optional).
|
39
|
+
tool_name (str): The name for the generated sub-agent tool.
|
40
|
+
tool_description (str): A clear description of the sub-agent's purpose and when to use it.
|
41
|
+
system_prompt (str, optional): The system prompt that will guide the sub-agent's behavior.
|
42
|
+
model (str | Model, optional): The language model the sub-agent will use.
|
43
|
+
model_settings (ModelSettings, optional): Specific settings for the sub-agent's model.
|
44
|
+
tools (list, optional): A list of tools that will be exclusively available to the sub-agent.
|
45
|
+
mcp_servers (list, optional): A list of MCP servers for the sub-agent.
|
47
46
|
|
48
47
|
Returns:
|
49
|
-
An
|
50
|
-
runs the sub-agent, and returns the sub-agent's final message content.
|
48
|
+
Callable: An asynchronous function that serves as the sub-agent tool. When called, it runs the sub-agent with a given query and returns its final result.
|
51
49
|
"""
|
52
50
|
|
53
51
|
async def run_sub_agent(ctx: AnyContext, query: str) -> str:
|