zrb 1.21.9__py3-none-any.whl → 1.21.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of zrb might be problematic. Click here for more details.
- zrb/attr/type.py +10 -7
- zrb/builtin/git.py +12 -1
- zrb/builtin/llm/chat_completion.py +287 -0
- zrb/builtin/llm/chat_session_cmd.py +90 -28
- zrb/builtin/llm/chat_trigger.py +6 -1
- zrb/builtin/llm/history.py +4 -4
- zrb/builtin/llm/tool/cli.py +25 -13
- zrb/builtin/llm/tool/code.py +9 -2
- zrb/builtin/llm/tool/file.py +42 -81
- zrb/builtin/llm/tool/note.py +36 -16
- zrb/builtin/llm/tool/search/__init__.py +1 -0
- zrb/builtin/llm/tool/search/brave.py +60 -0
- zrb/builtin/llm/tool/search/searxng.py +55 -0
- zrb/builtin/llm/tool/search/serpapi.py +55 -0
- zrb/builtin/llm/tool/sub_agent.py +30 -10
- zrb/builtin/llm/tool/web.py +12 -72
- zrb/config/config.py +108 -13
- zrb/config/default_prompt/interactive_system_prompt.md +1 -1
- zrb/config/default_prompt/summarization_prompt.md +54 -8
- zrb/config/default_prompt/system_prompt.md +1 -1
- zrb/config/llm_rate_limitter.py +24 -5
- zrb/input/option_input.py +13 -1
- zrb/task/llm/agent.py +42 -144
- zrb/task/llm/agent_runner.py +152 -0
- zrb/task/llm/config.py +7 -5
- zrb/task/llm/conversation_history.py +35 -24
- zrb/task/llm/conversation_history_model.py +4 -11
- zrb/task/llm/default_workflow/coding/workflow.md +2 -3
- zrb/task/llm/file_replacement.py +206 -0
- zrb/task/llm/file_tool_model.py +57 -0
- zrb/task/llm/history_processor.py +206 -0
- zrb/task/llm/history_summarization.py +2 -179
- zrb/task/llm/print_node.py +14 -5
- zrb/task/llm/prompt.py +7 -18
- zrb/task/llm/subagent_conversation_history.py +41 -0
- zrb/task/llm/tool_confirmation_completer.py +41 -0
- zrb/task/llm/tool_wrapper.py +26 -12
- zrb/task/llm_task.py +55 -47
- zrb/util/attr.py +17 -10
- zrb/util/cli/text.py +6 -4
- zrb/util/git.py +2 -2
- zrb/util/yaml.py +1 -0
- zrb/xcom/xcom.py +10 -0
- {zrb-1.21.9.dist-info → zrb-1.21.31.dist-info}/METADATA +5 -5
- {zrb-1.21.9.dist-info → zrb-1.21.31.dist-info}/RECORD +47 -37
- zrb/task/llm/history_summarization_tool.py +0 -24
- {zrb-1.21.9.dist-info → zrb-1.21.31.dist-info}/WHEEL +0 -0
- {zrb-1.21.9.dist-info → zrb-1.21.31.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/tool/file.py
CHANGED
|
@@ -2,71 +2,15 @@ import fnmatch
|
|
|
2
2
|
import json
|
|
3
3
|
import os
|
|
4
4
|
import re
|
|
5
|
-
import
|
|
6
|
-
from typing import Any, Literal, Optional
|
|
5
|
+
from typing import Any, Optional
|
|
7
6
|
|
|
8
7
|
from zrb.builtin.llm.tool.sub_agent import create_sub_agent_tool
|
|
9
8
|
from zrb.config.config import CFG
|
|
10
9
|
from zrb.config.llm_rate_limitter import llm_rate_limitter
|
|
11
10
|
from zrb.context.any_context import AnyContext
|
|
11
|
+
from zrb.task.llm.file_tool_model import FileReplacement, FileToRead, FileToWrite
|
|
12
12
|
from zrb.util.file import read_file, read_file_with_line_numbers, write_file
|
|
13
13
|
|
|
14
|
-
if sys.version_info >= (3, 12):
|
|
15
|
-
from typing import NotRequired, TypedDict
|
|
16
|
-
else:
|
|
17
|
-
from typing_extensions import NotRequired, TypedDict
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class FileToRead(TypedDict):
|
|
21
|
-
"""
|
|
22
|
-
Configuration for reading a file or file section.
|
|
23
|
-
|
|
24
|
-
Attributes:
|
|
25
|
-
path (str): Absolute or relative path to the file
|
|
26
|
-
start_line (int | None): Starting line number (1-based, inclusive).
|
|
27
|
-
If None, reads from beginning.
|
|
28
|
-
end_line (int | None): Ending line number (1-based, exclusive). If None, reads to end.
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
path: str
|
|
32
|
-
start_line: NotRequired[int | None]
|
|
33
|
-
end_line: NotRequired[int | None]
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class FileToWrite(TypedDict):
|
|
37
|
-
"""
|
|
38
|
-
Configuration for writing content to a file.
|
|
39
|
-
|
|
40
|
-
Attributes:
|
|
41
|
-
path (str): Absolute or relative path where file will be written.
|
|
42
|
-
content (str): Content to write. CRITICAL: For JSON, ensure all special characters
|
|
43
|
-
in this string are properly escaped.
|
|
44
|
-
mode (str): Mode for writing:
|
|
45
|
-
'w' (overwrite, default), 'a' (append), 'x' (create exclusively).
|
|
46
|
-
"""
|
|
47
|
-
|
|
48
|
-
path: str
|
|
49
|
-
content: str
|
|
50
|
-
mode: NotRequired[Literal["w", "wt", "tw", "a", "at", "ta", "x", "xt", "tx"]]
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
class FileReplacement(TypedDict):
|
|
54
|
-
"""
|
|
55
|
-
Configuration for a single text replacement operation in a file.
|
|
56
|
-
|
|
57
|
-
Attributes:
|
|
58
|
-
path (str): Absolute or relative path to the file
|
|
59
|
-
old_text (str): Exact text to find and replace (must match file content exactly)
|
|
60
|
-
new_text (str): New text to replace with
|
|
61
|
-
count (int): Optional. Number of occurrences to replace. Defaults to -1 (all).
|
|
62
|
-
"""
|
|
63
|
-
|
|
64
|
-
path: str
|
|
65
|
-
old_text: str
|
|
66
|
-
new_text: str
|
|
67
|
-
count: NotRequired[int]
|
|
68
|
-
|
|
69
|
-
|
|
70
14
|
DEFAULT_EXCLUDED_PATTERNS = [
|
|
71
15
|
# Common Python artifacts
|
|
72
16
|
"__pycache__",
|
|
@@ -184,7 +128,6 @@ def list_files(
|
|
|
184
128
|
if (include_hidden or not _is_hidden(d))
|
|
185
129
|
and not is_excluded(d, patterns_to_exclude)
|
|
186
130
|
]
|
|
187
|
-
|
|
188
131
|
for filename in files:
|
|
189
132
|
if (include_hidden or not _is_hidden(filename)) and not is_excluded(
|
|
190
133
|
filename, patterns_to_exclude
|
|
@@ -237,10 +180,12 @@ def read_from_file(
|
|
|
237
180
|
Reads content from one or more files, optionally specifying line ranges.
|
|
238
181
|
|
|
239
182
|
Examples:
|
|
183
|
+
```
|
|
240
184
|
# Read entire content of a single file
|
|
241
185
|
read_from_file(file={'path': 'path/to/file.txt'})
|
|
242
186
|
|
|
243
187
|
# Read specific lines from a file
|
|
188
|
+
# The content will be returned with line numbers in the format: "LINE_NUMBER | line content"
|
|
244
189
|
read_from_file(file={'path': 'path/to/large_file.log', 'start_line': 100, 'end_line': 150})
|
|
245
190
|
|
|
246
191
|
# Read multiple files
|
|
@@ -248,12 +193,14 @@ def read_from_file(
|
|
|
248
193
|
{'path': 'path/to/file1.txt'},
|
|
249
194
|
{'path': 'path/to/file2.txt', 'start_line': 1, 'end_line': 5}
|
|
250
195
|
])
|
|
196
|
+
```
|
|
251
197
|
|
|
252
198
|
Args:
|
|
253
199
|
file (FileToRead | list[FileToRead]): A single file configuration or a list of them.
|
|
254
200
|
|
|
255
201
|
Returns:
|
|
256
202
|
dict: Content and metadata for a single file, or a dict of results for multiple files.
|
|
203
|
+
The `content` field in the returned dictionary will have line numbers in the format: "LINE_NUMBER | line content"
|
|
257
204
|
"""
|
|
258
205
|
is_list = isinstance(file, list)
|
|
259
206
|
files = file if is_list else [file]
|
|
@@ -312,18 +259,20 @@ def write_to_file(
|
|
|
312
259
|
Writes content to one or more files, with options for overwrite, append, or exclusive
|
|
313
260
|
creation.
|
|
314
261
|
|
|
315
|
-
**CRITICAL:**
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
262
|
+
**CRITICAL - PREVENT JSON ERRORS:**
|
|
263
|
+
1. **ESCAPING:** Do NOT double-escape quotes.
|
|
264
|
+
- CORRECT: "content": "He said \"Hello\""
|
|
265
|
+
- WRONG: "content": "He said \\"Hello\\"" <-- This breaks JSON parsing!
|
|
266
|
+
2. **SIZE LIMIT:** Content MUST NOT exceed 4000 characters.
|
|
267
|
+
- Exceeding this causes truncation and EOF errors.
|
|
268
|
+
- Split larger content into multiple sequential calls (first 'w', then 'a').
|
|
321
269
|
|
|
322
270
|
Examples:
|
|
271
|
+
```
|
|
323
272
|
# Overwrite 'file.txt' with initial content
|
|
324
273
|
write_to_file(file={'path': 'path/to/file.txt', 'content': 'Initial content.'})
|
|
325
274
|
|
|
326
|
-
# Append a second chunk to 'file.txt' (note the newline
|
|
275
|
+
# Append a second chunk to 'file.txt' (note the newline at the beginning of the content)
|
|
327
276
|
write_to_file(file={'path': 'path/to/file.txt', 'content': '\nSecond chunk.', 'mode': 'a'})
|
|
328
277
|
|
|
329
278
|
# Write to multiple files
|
|
@@ -331,6 +280,7 @@ def write_to_file(
|
|
|
331
280
|
{'path': 'path/to/file1.txt', 'content': 'Content for file 1'},
|
|
332
281
|
{'path': 'path/to/file2.txt', 'content': 'Content for file 2', 'mode': 'w'}
|
|
333
282
|
])
|
|
283
|
+
```
|
|
334
284
|
|
|
335
285
|
Args:
|
|
336
286
|
file (FileToWrite | list[FileToWrite]): A single file configuration or a list of them.
|
|
@@ -481,18 +431,18 @@ def replace_in_file(
|
|
|
481
431
|
**CRITICAL INSTRUCTIONS:**
|
|
482
432
|
1. **READ FIRST:** Use `read_file` to get exact content. Do not guess.
|
|
483
433
|
2. **EXACT MATCH:** `old_text` must match file content EXACTLY (whitespace, newlines).
|
|
484
|
-
3. **
|
|
485
|
-
|
|
486
|
-
|
|
434
|
+
3. **ESCAPING:** Do NOT double-escape quotes in `new_text`. Use `\"`, not `\\"`.
|
|
435
|
+
4. **SIZE LIMIT:** `new_text` MUST NOT exceed 4000 chars to avoid truncation/EOF errors.
|
|
436
|
+
5. **MINIMAL CONTEXT:** Keep `old_text` small (target lines + 2-3 context lines).
|
|
437
|
+
6. **DEFAULT:** Replaces **ALL** occurrences. Set `count=1` for first occurrence only.
|
|
487
438
|
|
|
488
439
|
Examples:
|
|
440
|
+
```
|
|
489
441
|
# Replace ALL occurrences
|
|
490
|
-
replace_in_file(
|
|
491
|
-
file
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
]
|
|
495
|
-
)
|
|
442
|
+
replace_in_file(file=[
|
|
443
|
+
{'path': 'file.txt', 'old_text': 'foo', 'new_text': 'bar'},
|
|
444
|
+
{'path': 'file.txt', 'old_text': 'baz', 'new_text': 'qux'}
|
|
445
|
+
])
|
|
496
446
|
|
|
497
447
|
# Replace ONLY the first occurrence
|
|
498
448
|
replace_in_file(
|
|
@@ -507,6 +457,7 @@ def replace_in_file(
|
|
|
507
457
|
'new_text': ' def new_fn():\n pass'
|
|
508
458
|
}
|
|
509
459
|
)
|
|
460
|
+
```
|
|
510
461
|
|
|
511
462
|
Args:
|
|
512
463
|
file: Single replacement config or list of them.
|
|
@@ -517,7 +468,7 @@ def replace_in_file(
|
|
|
517
468
|
# Normalize to list
|
|
518
469
|
file_replacements = file if isinstance(file, list) else [file]
|
|
519
470
|
# Group replacements by file path to minimize file I/O
|
|
520
|
-
replacements_by_path = {}
|
|
471
|
+
replacements_by_path: dict[str, list[FileReplacement]] = {}
|
|
521
472
|
for r in file_replacements:
|
|
522
473
|
path = r["path"]
|
|
523
474
|
if path not in replacements_by_path:
|
|
@@ -560,11 +511,17 @@ def replace_in_file(
|
|
|
560
511
|
|
|
561
512
|
|
|
562
513
|
async def analyze_file(
|
|
563
|
-
ctx: AnyContext, path: str, query: str,
|
|
514
|
+
ctx: AnyContext, path: str, query: str, token_threshold: int | None = None
|
|
564
515
|
) -> dict[str, Any]:
|
|
565
516
|
"""
|
|
566
517
|
Analyzes a file using a sub-agent for complex questions.
|
|
567
518
|
|
|
519
|
+
CRITICAL: The query must contain ALL necessary context, instructions, and information.
|
|
520
|
+
The sub-agent performing the analysis does NOT share your current conversation
|
|
521
|
+
history, memory, or global context.
|
|
522
|
+
The quality of analysis depends entirely on the query. Vague queries yield poor
|
|
523
|
+
results.
|
|
524
|
+
|
|
568
525
|
Example:
|
|
569
526
|
analyze_file(path='src/main.py', query='Summarize the main function.')
|
|
570
527
|
|
|
@@ -573,13 +530,13 @@ async def analyze_file(
|
|
|
573
530
|
path (str): The path to the file to analyze.
|
|
574
531
|
query (str): A specific analysis query with clear guidelines and
|
|
575
532
|
necessary information.
|
|
576
|
-
|
|
533
|
+
token_threshold (int | None): Max tokens.
|
|
577
534
|
|
|
578
535
|
Returns:
|
|
579
536
|
Analysis results.
|
|
580
537
|
"""
|
|
581
|
-
if
|
|
582
|
-
|
|
538
|
+
if token_threshold is None:
|
|
539
|
+
token_threshold = CFG.LLM_FILE_ANALYSIS_TOKEN_THRESHOLD
|
|
583
540
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
|
584
541
|
if not os.path.exists(abs_path):
|
|
585
542
|
raise FileNotFoundError(f"File not found: {path}")
|
|
@@ -594,12 +551,16 @@ async def analyze_file(
|
|
|
594
551
|
),
|
|
595
552
|
system_prompt=CFG.LLM_FILE_EXTRACTOR_SYSTEM_PROMPT,
|
|
596
553
|
tools=[read_from_file, search_files],
|
|
554
|
+
auto_summarize=False,
|
|
555
|
+
remember_history=False,
|
|
597
556
|
)
|
|
598
557
|
payload = json.dumps(
|
|
599
558
|
{
|
|
600
559
|
"instruction": query,
|
|
601
560
|
"file_path": abs_path,
|
|
602
|
-
"file_content": llm_rate_limitter.clip_prompt(
|
|
561
|
+
"file_content": llm_rate_limitter.clip_prompt(
|
|
562
|
+
file_content, token_threshold
|
|
563
|
+
),
|
|
603
564
|
}
|
|
604
565
|
)
|
|
605
566
|
return await _analyze_file(ctx, payload)
|
zrb/builtin/llm/tool/note.py
CHANGED
|
@@ -5,10 +5,12 @@ from zrb.config.llm_context.config import llm_context_config
|
|
|
5
5
|
|
|
6
6
|
def read_long_term_note() -> str:
|
|
7
7
|
"""
|
|
8
|
-
|
|
8
|
+
Retrieves the GLOBAL 🧠 Long Term Note shared across ALL sessions and projects.
|
|
9
|
+
|
|
10
|
+
Use this to recall user preferences, facts, and cross-project context.
|
|
9
11
|
|
|
10
12
|
Returns:
|
|
11
|
-
str: The
|
|
13
|
+
str: The current global note content.
|
|
12
14
|
"""
|
|
13
15
|
contexts = llm_context_config.get_notes()
|
|
14
16
|
return contexts.get("/", "")
|
|
@@ -16,28 +18,39 @@ def read_long_term_note() -> str:
|
|
|
16
18
|
|
|
17
19
|
def write_long_term_note(content: str) -> str:
|
|
18
20
|
"""
|
|
19
|
-
|
|
20
|
-
|
|
21
|
+
Persists CRITICAL facts to the GLOBAL 🧠 Long Term Note.
|
|
22
|
+
|
|
23
|
+
USE EAGERLY to save or update:
|
|
24
|
+
- User preferences (e.g., "I prefer Python", "No unit tests").
|
|
25
|
+
- User information (e.g., user name, user email address).
|
|
26
|
+
- Important facts (e.g., "My API key is in .env").
|
|
27
|
+
- Cross-project goals.
|
|
28
|
+
- Anything that will be useful for future interaction across projects.
|
|
29
|
+
|
|
30
|
+
WARNING: This OVERWRITES the entire Long Term Note.
|
|
21
31
|
|
|
22
32
|
Args:
|
|
23
|
-
content (str): The
|
|
33
|
+
content (str): The text to strictly memorize.
|
|
24
34
|
|
|
25
35
|
Returns:
|
|
26
|
-
str:
|
|
36
|
+
str: Confirmation message.
|
|
27
37
|
"""
|
|
28
38
|
llm_context_config.write_note(content, "/")
|
|
29
|
-
return "
|
|
39
|
+
return "Global long-term note saved."
|
|
30
40
|
|
|
31
41
|
|
|
32
42
|
def read_contextual_note(path: str | None = None) -> str:
|
|
33
43
|
"""
|
|
34
|
-
|
|
44
|
+
Retrieves LOCAL 📝 Contextual Note specific to a directory path.
|
|
45
|
+
|
|
46
|
+
Use to recall project-specific architecture, code summaries, or past decisions
|
|
47
|
+
relevant to the current working location.
|
|
35
48
|
|
|
36
49
|
Args:
|
|
37
|
-
path (str | None):
|
|
50
|
+
path (str | None): Target file/dir. Defaults to current working directory (CWD).
|
|
38
51
|
|
|
39
52
|
Returns:
|
|
40
|
-
str: The content
|
|
53
|
+
str: The local note content for the path.
|
|
41
54
|
"""
|
|
42
55
|
if path is None:
|
|
43
56
|
path = os.getcwd()
|
|
@@ -48,17 +61,24 @@ def read_contextual_note(path: str | None = None) -> str:
|
|
|
48
61
|
|
|
49
62
|
def write_contextual_note(content: str, path: str | None = None) -> str:
|
|
50
63
|
"""
|
|
51
|
-
|
|
52
|
-
|
|
64
|
+
Persists LOCAL facts specific to a directory into 📝 Contextual Note.
|
|
65
|
+
|
|
66
|
+
USE EAGERLY to save or update:
|
|
67
|
+
- Architectural patterns for this project/directory.
|
|
68
|
+
- Summaries of large files or directories.
|
|
69
|
+
- Specific guidelines for this project.
|
|
70
|
+
- Anything related to this directory that will be useful for future interaction.
|
|
71
|
+
|
|
72
|
+
WARNING: This OVERWRITES the entire Contextual Note for a directory.
|
|
53
73
|
|
|
54
74
|
Args:
|
|
55
|
-
content (str): The
|
|
56
|
-
path (str | None):
|
|
75
|
+
content (str): The text to memorize for this location.
|
|
76
|
+
path (str | None): Target file/dir. Defaults to CWD.
|
|
57
77
|
|
|
58
78
|
Returns:
|
|
59
|
-
str:
|
|
79
|
+
str: Confirmation message.
|
|
60
80
|
"""
|
|
61
81
|
if path is None:
|
|
62
82
|
path = os.getcwd()
|
|
63
83
|
llm_context_config.write_note(content, path)
|
|
64
|
-
return f"Contextual note saved
|
|
84
|
+
return f"Contextual note saved for: {path}"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# This file makes the directory a Python package
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import requests
|
|
4
|
+
|
|
5
|
+
from zrb.config.config import CFG
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def search_internet(
|
|
9
|
+
query: str,
|
|
10
|
+
page: int = 1,
|
|
11
|
+
safe_search: str | None = None,
|
|
12
|
+
language: str | None = None,
|
|
13
|
+
) -> dict[str, Any]:
|
|
14
|
+
"""
|
|
15
|
+
Performs an internet search using Brave Search.
|
|
16
|
+
|
|
17
|
+
Use this tool to find up-to-date information, answer questions about current events,
|
|
18
|
+
or research topics using a search engine.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
query (str): The natural language search query (e.g., 'Soto Madura').
|
|
22
|
+
Do NOT include instructions, meta-talk, or internal reasoning.
|
|
23
|
+
Use concise terms as a human would in a search engine.
|
|
24
|
+
page (int): Search result page number. Defaults to 1.
|
|
25
|
+
safe_search (str | None): Safety setting. 'strict', 'moderate', or 'off'.
|
|
26
|
+
If None, uses the system default configuration.
|
|
27
|
+
language (str | None): Language code (e.g., 'en').
|
|
28
|
+
If None, uses the system default configuration.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
dict: Summary of search results (titles, links, snippets).
|
|
32
|
+
"""
|
|
33
|
+
if safe_search is None:
|
|
34
|
+
safe_search = CFG.BRAVE_API_SAFE
|
|
35
|
+
if language is None:
|
|
36
|
+
language = CFG.BRAVE_API_LANG
|
|
37
|
+
|
|
38
|
+
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
|
|
39
|
+
|
|
40
|
+
response = requests.get(
|
|
41
|
+
"https://api.search.brave.com/res/v1/web/search",
|
|
42
|
+
headers={
|
|
43
|
+
"User-Agent": user_agent,
|
|
44
|
+
"Accept": "application/json",
|
|
45
|
+
"x-subscription-token": CFG.BRAVE_API_KEY,
|
|
46
|
+
},
|
|
47
|
+
params={
|
|
48
|
+
"q": query,
|
|
49
|
+
"count": "10",
|
|
50
|
+
"offset": (page - 1) * 10,
|
|
51
|
+
"safesearch": safe_search,
|
|
52
|
+
"search_lang": language,
|
|
53
|
+
"summary": "true",
|
|
54
|
+
},
|
|
55
|
+
)
|
|
56
|
+
if response.status_code != 200:
|
|
57
|
+
raise Exception(
|
|
58
|
+
f"Error: Unable to retrieve search results (status code: {response.status_code})"
|
|
59
|
+
)
|
|
60
|
+
return response.json()
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import requests
|
|
4
|
+
|
|
5
|
+
from zrb.config.config import CFG
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def search_internet(
|
|
9
|
+
query: str,
|
|
10
|
+
page: int = 1,
|
|
11
|
+
safe_search: int | None = None,
|
|
12
|
+
language: str | None = None,
|
|
13
|
+
) -> dict[str, Any]:
|
|
14
|
+
"""
|
|
15
|
+
Performs an internet search using SearXNG.
|
|
16
|
+
|
|
17
|
+
Use this tool to find up-to-date information, answer questions about current events,
|
|
18
|
+
or research topics using a search engine.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
query (str): The natural language search query (e.g., 'Soto Madura').
|
|
22
|
+
Do NOT include instructions, meta-talk, or internal reasoning.
|
|
23
|
+
Use concise terms as a human would in a search engine.
|
|
24
|
+
page (int): Search result page number. Defaults to 1.
|
|
25
|
+
safe_search (int | None): Safety setting. 0 (None), 1 (Moderate), 2 (Strict).
|
|
26
|
+
If None, uses the system default configuration.
|
|
27
|
+
language (str | None): Language code (e.g., 'en').
|
|
28
|
+
If None, uses the system default configuration.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
dict: Summary of search results (titles, links, snippets).
|
|
32
|
+
"""
|
|
33
|
+
if safe_search is None:
|
|
34
|
+
safe_search = CFG.SEARXNG_SAFE
|
|
35
|
+
if language is None:
|
|
36
|
+
language = CFG.SEARXNG_LANG
|
|
37
|
+
|
|
38
|
+
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
|
|
39
|
+
|
|
40
|
+
response = requests.get(
|
|
41
|
+
url=f"{CFG.SEARXNG_BASE_URL}/search",
|
|
42
|
+
headers={"User-Agent": user_agent},
|
|
43
|
+
params={
|
|
44
|
+
"q": query,
|
|
45
|
+
"format": "json",
|
|
46
|
+
"pageno": page,
|
|
47
|
+
"safesearch": safe_search,
|
|
48
|
+
"language": language,
|
|
49
|
+
},
|
|
50
|
+
)
|
|
51
|
+
if response.status_code != 200:
|
|
52
|
+
raise Exception(
|
|
53
|
+
f"Error: Unable to retrieve search results (status code: {response.status_code})"
|
|
54
|
+
)
|
|
55
|
+
return response.json()
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import requests
|
|
4
|
+
|
|
5
|
+
from zrb.config.config import CFG
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def search_internet(
|
|
9
|
+
query: str,
|
|
10
|
+
page: int = 1,
|
|
11
|
+
safe_search: str | None = None,
|
|
12
|
+
language: str | None = None,
|
|
13
|
+
) -> dict[str, Any]:
|
|
14
|
+
"""
|
|
15
|
+
Performs an internet search using SerpApi (Google).
|
|
16
|
+
|
|
17
|
+
Use this tool to find up-to-date information, answer questions about current events,
|
|
18
|
+
or research topics using a search engine.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
query (str): The natural language search query (e.g., 'Soto Madura').
|
|
22
|
+
Do NOT include instructions, meta-talk, or internal reasoning.
|
|
23
|
+
Use concise terms as a human would in a search engine.
|
|
24
|
+
page (int): Search result page number. Defaults to 1.
|
|
25
|
+
safe_search (str | None): Safety setting. 'active' or 'off'.
|
|
26
|
+
If None, uses the system default configuration.
|
|
27
|
+
language (str | None): Two-letter language code (e.g., 'en', 'id').
|
|
28
|
+
If None, uses the system default configuration.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
dict: Summary of search results (titles, links, snippets).
|
|
32
|
+
"""
|
|
33
|
+
if safe_search is None:
|
|
34
|
+
safe_search = CFG.SERPAPI_SAFE
|
|
35
|
+
if language is None:
|
|
36
|
+
language = CFG.SERPAPI_LANG
|
|
37
|
+
|
|
38
|
+
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
|
|
39
|
+
|
|
40
|
+
response = requests.get(
|
|
41
|
+
"https://serpapi.com/search",
|
|
42
|
+
headers={"User-Agent": user_agent},
|
|
43
|
+
params={
|
|
44
|
+
"q": query,
|
|
45
|
+
"start": (page - 1) * 10,
|
|
46
|
+
"hl": language,
|
|
47
|
+
"safe": safe_search,
|
|
48
|
+
"api_key": CFG.SERPAPI_KEY,
|
|
49
|
+
},
|
|
50
|
+
)
|
|
51
|
+
if response.status_code != 200:
|
|
52
|
+
raise Exception(
|
|
53
|
+
f"Error: Unable to retrieve search results (status code: {response.status_code})"
|
|
54
|
+
)
|
|
55
|
+
return response.json()
|
|
@@ -1,14 +1,21 @@
|
|
|
1
|
+
import json
|
|
1
2
|
from collections.abc import Callable
|
|
2
3
|
from textwrap import dedent
|
|
3
4
|
from typing import TYPE_CHECKING, Any, Coroutine
|
|
4
5
|
|
|
5
6
|
from zrb.context.any_context import AnyContext
|
|
6
|
-
from zrb.task.llm.agent import create_agent_instance
|
|
7
|
+
from zrb.task.llm.agent import create_agent_instance
|
|
8
|
+
from zrb.task.llm.agent_runner import run_agent_iteration
|
|
7
9
|
from zrb.task.llm.config import get_model, get_model_settings
|
|
8
10
|
from zrb.task.llm.prompt import get_system_and_user_prompt
|
|
11
|
+
from zrb.task.llm.subagent_conversation_history import (
|
|
12
|
+
get_ctx_subagent_history,
|
|
13
|
+
set_ctx_subagent_history,
|
|
14
|
+
)
|
|
9
15
|
|
|
10
16
|
if TYPE_CHECKING:
|
|
11
17
|
from pydantic_ai import Tool
|
|
18
|
+
from pydantic_ai._agent_graph import HistoryProcessor
|
|
12
19
|
from pydantic_ai.models import Model
|
|
13
20
|
from pydantic_ai.settings import ModelSettings
|
|
14
21
|
from pydantic_ai.toolsets import AbstractToolset
|
|
@@ -25,8 +32,12 @@ def create_sub_agent_tool(
|
|
|
25
32
|
tools: "list[ToolOrCallable]" = [],
|
|
26
33
|
toolsets: list["AbstractToolset[None]"] = [],
|
|
27
34
|
yolo_mode: bool | list[str] | None = None,
|
|
35
|
+
history_processors: list["HistoryProcessor"] | None = None,
|
|
28
36
|
log_indent_level: int = 2,
|
|
29
|
-
|
|
37
|
+
agent_name: str | None = None,
|
|
38
|
+
auto_summarize: bool = True,
|
|
39
|
+
remember_history: bool = True,
|
|
40
|
+
) -> Callable[[AnyContext, str], Coroutine[Any, Any, Any]]:
|
|
30
41
|
"""
|
|
31
42
|
Create a tool that is another AI agent, capable of handling complex, multi-step sub-tasks.
|
|
32
43
|
|
|
@@ -52,8 +63,10 @@ def create_sub_agent_tool(
|
|
|
52
63
|
An asynchronous function that serves as the sub-agent tool. When called, it runs the
|
|
53
64
|
sub-agent with a given query and returns its final result.
|
|
54
65
|
"""
|
|
66
|
+
if agent_name is None:
|
|
67
|
+
agent_name = f"{tool_name}_agent"
|
|
55
68
|
|
|
56
|
-
async def run_sub_agent(ctx: AnyContext, query: str) ->
|
|
69
|
+
async def run_sub_agent(ctx: AnyContext, query: str) -> Any:
|
|
57
70
|
"""
|
|
58
71
|
Runs the sub-agent with the given query.
|
|
59
72
|
"""
|
|
@@ -72,7 +85,6 @@ def create_sub_agent_tool(
|
|
|
72
85
|
ctx=ctx,
|
|
73
86
|
model_settings_attr=model_settings,
|
|
74
87
|
)
|
|
75
|
-
|
|
76
88
|
if system_prompt is None:
|
|
77
89
|
resolved_system_prompt, query = get_system_and_user_prompt(
|
|
78
90
|
ctx=ctx,
|
|
@@ -92,24 +104,32 @@ def create_sub_agent_tool(
|
|
|
92
104
|
tools=tools,
|
|
93
105
|
toolsets=toolsets,
|
|
94
106
|
yolo_mode=yolo_mode,
|
|
107
|
+
history_processors=history_processors,
|
|
108
|
+
auto_summarize=auto_summarize,
|
|
95
109
|
)
|
|
96
|
-
|
|
97
110
|
sub_agent_run = None
|
|
98
111
|
# Run the sub-agent iteration
|
|
99
|
-
|
|
112
|
+
history_list = (
|
|
113
|
+
get_ctx_subagent_history(ctx, agent_name) if remember_history else []
|
|
114
|
+
)
|
|
100
115
|
sub_agent_run = await run_agent_iteration(
|
|
101
116
|
ctx=ctx,
|
|
102
117
|
agent=sub_agent_agent,
|
|
103
118
|
user_prompt=query,
|
|
104
119
|
attachments=[],
|
|
105
|
-
history_list=
|
|
120
|
+
history_list=history_list,
|
|
106
121
|
log_indent_level=log_indent_level,
|
|
107
122
|
)
|
|
108
|
-
|
|
109
123
|
# Return the sub-agent's final message content
|
|
110
124
|
if sub_agent_run and sub_agent_run.result:
|
|
111
125
|
# Return the final message content
|
|
112
|
-
|
|
126
|
+
if remember_history:
|
|
127
|
+
set_ctx_subagent_history(
|
|
128
|
+
ctx,
|
|
129
|
+
agent_name,
|
|
130
|
+
json.loads(sub_agent_run.result.all_messages_json()),
|
|
131
|
+
)
|
|
132
|
+
return sub_agent_run.result.output
|
|
113
133
|
ctx.log_warning("Sub-agent run did not produce a result.")
|
|
114
134
|
raise ValueError(f"{tool_name} not returning any result")
|
|
115
135
|
|
|
@@ -123,7 +143,7 @@ def create_sub_agent_tool(
|
|
|
123
143
|
query (str): The query or task for the sub-agent.
|
|
124
144
|
|
|
125
145
|
Returns:
|
|
126
|
-
|
|
146
|
+
Any: The final response or result from the sub-agent.
|
|
127
147
|
"""
|
|
128
148
|
).strip()
|
|
129
149
|
|