zrb 1.21.9__py3-none-any.whl → 1.21.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of zrb might be problematic. Click here for more details.
- zrb/attr/type.py +10 -7
- zrb/builtin/git.py +12 -1
- zrb/builtin/llm/chat_completion.py +274 -0
- zrb/builtin/llm/chat_session_cmd.py +90 -28
- zrb/builtin/llm/chat_trigger.py +7 -1
- zrb/builtin/llm/history.py +4 -4
- zrb/builtin/llm/tool/code.py +4 -1
- zrb/builtin/llm/tool/file.py +36 -81
- zrb/builtin/llm/tool/note.py +36 -16
- zrb/builtin/llm/tool/sub_agent.py +30 -10
- zrb/config/config.py +108 -13
- zrb/config/default_prompt/interactive_system_prompt.md +1 -1
- zrb/config/default_prompt/summarization_prompt.md +54 -8
- zrb/config/default_prompt/system_prompt.md +1 -1
- zrb/config/llm_rate_limitter.py +24 -5
- zrb/input/option_input.py +13 -1
- zrb/task/llm/agent.py +42 -144
- zrb/task/llm/agent_runner.py +152 -0
- zrb/task/llm/config.py +7 -5
- zrb/task/llm/conversation_history.py +35 -24
- zrb/task/llm/conversation_history_model.py +4 -11
- zrb/task/llm/default_workflow/coding/workflow.md +2 -3
- zrb/task/llm/file_replacement.py +206 -0
- zrb/task/llm/file_tool_model.py +57 -0
- zrb/task/llm/history_processor.py +206 -0
- zrb/task/llm/history_summarization.py +2 -179
- zrb/task/llm/print_node.py +14 -5
- zrb/task/llm/prompt.py +7 -18
- zrb/task/llm/subagent_conversation_history.py +41 -0
- zrb/task/llm/tool_wrapper.py +27 -12
- zrb/task/llm_task.py +55 -47
- zrb/util/attr.py +17 -10
- zrb/util/cli/text.py +6 -4
- zrb/util/git.py +2 -2
- zrb/util/yaml.py +1 -0
- zrb/xcom/xcom.py +10 -0
- {zrb-1.21.9.dist-info → zrb-1.21.28.dist-info}/METADATA +5 -5
- {zrb-1.21.9.dist-info → zrb-1.21.28.dist-info}/RECORD +40 -35
- zrb/task/llm/history_summarization_tool.py +0 -24
- {zrb-1.21.9.dist-info → zrb-1.21.28.dist-info}/WHEEL +0 -0
- {zrb-1.21.9.dist-info → zrb-1.21.28.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/tool/file.py
CHANGED
|
@@ -2,71 +2,15 @@ import fnmatch
|
|
|
2
2
|
import json
|
|
3
3
|
import os
|
|
4
4
|
import re
|
|
5
|
-
import
|
|
6
|
-
from typing import Any, Literal, Optional
|
|
5
|
+
from typing import Any, Optional
|
|
7
6
|
|
|
8
7
|
from zrb.builtin.llm.tool.sub_agent import create_sub_agent_tool
|
|
9
8
|
from zrb.config.config import CFG
|
|
10
9
|
from zrb.config.llm_rate_limitter import llm_rate_limitter
|
|
11
10
|
from zrb.context.any_context import AnyContext
|
|
11
|
+
from zrb.task.llm.file_tool_model import FileReplacement, FileToRead, FileToWrite
|
|
12
12
|
from zrb.util.file import read_file, read_file_with_line_numbers, write_file
|
|
13
13
|
|
|
14
|
-
if sys.version_info >= (3, 12):
|
|
15
|
-
from typing import NotRequired, TypedDict
|
|
16
|
-
else:
|
|
17
|
-
from typing_extensions import NotRequired, TypedDict
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class FileToRead(TypedDict):
|
|
21
|
-
"""
|
|
22
|
-
Configuration for reading a file or file section.
|
|
23
|
-
|
|
24
|
-
Attributes:
|
|
25
|
-
path (str): Absolute or relative path to the file
|
|
26
|
-
start_line (int | None): Starting line number (1-based, inclusive).
|
|
27
|
-
If None, reads from beginning.
|
|
28
|
-
end_line (int | None): Ending line number (1-based, exclusive). If None, reads to end.
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
path: str
|
|
32
|
-
start_line: NotRequired[int | None]
|
|
33
|
-
end_line: NotRequired[int | None]
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class FileToWrite(TypedDict):
|
|
37
|
-
"""
|
|
38
|
-
Configuration for writing content to a file.
|
|
39
|
-
|
|
40
|
-
Attributes:
|
|
41
|
-
path (str): Absolute or relative path where file will be written.
|
|
42
|
-
content (str): Content to write. CRITICAL: For JSON, ensure all special characters
|
|
43
|
-
in this string are properly escaped.
|
|
44
|
-
mode (str): Mode for writing:
|
|
45
|
-
'w' (overwrite, default), 'a' (append), 'x' (create exclusively).
|
|
46
|
-
"""
|
|
47
|
-
|
|
48
|
-
path: str
|
|
49
|
-
content: str
|
|
50
|
-
mode: NotRequired[Literal["w", "wt", "tw", "a", "at", "ta", "x", "xt", "tx"]]
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
class FileReplacement(TypedDict):
|
|
54
|
-
"""
|
|
55
|
-
Configuration for a single text replacement operation in a file.
|
|
56
|
-
|
|
57
|
-
Attributes:
|
|
58
|
-
path (str): Absolute or relative path to the file
|
|
59
|
-
old_text (str): Exact text to find and replace (must match file content exactly)
|
|
60
|
-
new_text (str): New text to replace with
|
|
61
|
-
count (int): Optional. Number of occurrences to replace. Defaults to -1 (all).
|
|
62
|
-
"""
|
|
63
|
-
|
|
64
|
-
path: str
|
|
65
|
-
old_text: str
|
|
66
|
-
new_text: str
|
|
67
|
-
count: NotRequired[int]
|
|
68
|
-
|
|
69
|
-
|
|
70
14
|
DEFAULT_EXCLUDED_PATTERNS = [
|
|
71
15
|
# Common Python artifacts
|
|
72
16
|
"__pycache__",
|
|
@@ -184,7 +128,6 @@ def list_files(
|
|
|
184
128
|
if (include_hidden or not _is_hidden(d))
|
|
185
129
|
and not is_excluded(d, patterns_to_exclude)
|
|
186
130
|
]
|
|
187
|
-
|
|
188
131
|
for filename in files:
|
|
189
132
|
if (include_hidden or not _is_hidden(filename)) and not is_excluded(
|
|
190
133
|
filename, patterns_to_exclude
|
|
@@ -237,10 +180,12 @@ def read_from_file(
|
|
|
237
180
|
Reads content from one or more files, optionally specifying line ranges.
|
|
238
181
|
|
|
239
182
|
Examples:
|
|
183
|
+
```
|
|
240
184
|
# Read entire content of a single file
|
|
241
185
|
read_from_file(file={'path': 'path/to/file.txt'})
|
|
242
186
|
|
|
243
187
|
# Read specific lines from a file
|
|
188
|
+
# The content will be returned with line numbers in the format: "LINE_NUMBER | line content"
|
|
244
189
|
read_from_file(file={'path': 'path/to/large_file.log', 'start_line': 100, 'end_line': 150})
|
|
245
190
|
|
|
246
191
|
# Read multiple files
|
|
@@ -248,12 +193,14 @@ def read_from_file(
|
|
|
248
193
|
{'path': 'path/to/file1.txt'},
|
|
249
194
|
{'path': 'path/to/file2.txt', 'start_line': 1, 'end_line': 5}
|
|
250
195
|
])
|
|
196
|
+
```
|
|
251
197
|
|
|
252
198
|
Args:
|
|
253
199
|
file (FileToRead | list[FileToRead]): A single file configuration or a list of them.
|
|
254
200
|
|
|
255
201
|
Returns:
|
|
256
202
|
dict: Content and metadata for a single file, or a dict of results for multiple files.
|
|
203
|
+
The `content` field in the returned dictionary will have line numbers in the format: "LINE_NUMBER | line content"
|
|
257
204
|
"""
|
|
258
205
|
is_list = isinstance(file, list)
|
|
259
206
|
files = file if is_list else [file]
|
|
@@ -312,18 +259,20 @@ def write_to_file(
|
|
|
312
259
|
Writes content to one or more files, with options for overwrite, append, or exclusive
|
|
313
260
|
creation.
|
|
314
261
|
|
|
315
|
-
**CRITICAL:**
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
262
|
+
**CRITICAL - PREVENT JSON ERRORS:**
|
|
263
|
+
1. **ESCAPING:** Do NOT double-escape quotes.
|
|
264
|
+
- CORRECT: "content": "He said \"Hello\""
|
|
265
|
+
- WRONG: "content": "He said \\"Hello\\"" <-- This breaks JSON parsing!
|
|
266
|
+
2. **SIZE LIMIT:** Content MUST NOT exceed 4000 characters.
|
|
267
|
+
- Exceeding this causes truncation and EOF errors.
|
|
268
|
+
- Split larger content into multiple sequential calls (first 'w', then 'a').
|
|
321
269
|
|
|
322
270
|
Examples:
|
|
271
|
+
```
|
|
323
272
|
# Overwrite 'file.txt' with initial content
|
|
324
273
|
write_to_file(file={'path': 'path/to/file.txt', 'content': 'Initial content.'})
|
|
325
274
|
|
|
326
|
-
# Append a second chunk to 'file.txt' (note the newline
|
|
275
|
+
# Append a second chunk to 'file.txt' (note the newline at the beginning of the content)
|
|
327
276
|
write_to_file(file={'path': 'path/to/file.txt', 'content': '\nSecond chunk.', 'mode': 'a'})
|
|
328
277
|
|
|
329
278
|
# Write to multiple files
|
|
@@ -331,6 +280,7 @@ def write_to_file(
|
|
|
331
280
|
{'path': 'path/to/file1.txt', 'content': 'Content for file 1'},
|
|
332
281
|
{'path': 'path/to/file2.txt', 'content': 'Content for file 2', 'mode': 'w'}
|
|
333
282
|
])
|
|
283
|
+
```
|
|
334
284
|
|
|
335
285
|
Args:
|
|
336
286
|
file (FileToWrite | list[FileToWrite]): A single file configuration or a list of them.
|
|
@@ -481,18 +431,18 @@ def replace_in_file(
|
|
|
481
431
|
**CRITICAL INSTRUCTIONS:**
|
|
482
432
|
1. **READ FIRST:** Use `read_file` to get exact content. Do not guess.
|
|
483
433
|
2. **EXACT MATCH:** `old_text` must match file content EXACTLY (whitespace, newlines).
|
|
484
|
-
3. **
|
|
485
|
-
|
|
486
|
-
|
|
434
|
+
3. **ESCAPING:** Do NOT double-escape quotes in `new_text`. Use `\"`, not `\\"`.
|
|
435
|
+
4. **SIZE LIMIT:** `new_text` MUST NOT exceed 4000 chars to avoid truncation/EOF errors.
|
|
436
|
+
5. **MINIMAL CONTEXT:** Keep `old_text` small (target lines + 2-3 context lines).
|
|
437
|
+
6. **DEFAULT:** Replaces **ALL** occurrences. Set `count=1` for first occurrence only.
|
|
487
438
|
|
|
488
439
|
Examples:
|
|
440
|
+
```
|
|
489
441
|
# Replace ALL occurrences
|
|
490
|
-
replace_in_file(
|
|
491
|
-
file
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
]
|
|
495
|
-
)
|
|
442
|
+
replace_in_file(file=[
|
|
443
|
+
{'path': 'file.txt', 'old_text': 'foo', 'new_text': 'bar'},
|
|
444
|
+
{'path': 'file.txt', 'old_text': 'baz', 'new_text': 'qux'}
|
|
445
|
+
])
|
|
496
446
|
|
|
497
447
|
# Replace ONLY the first occurrence
|
|
498
448
|
replace_in_file(
|
|
@@ -507,6 +457,7 @@ def replace_in_file(
|
|
|
507
457
|
'new_text': ' def new_fn():\n pass'
|
|
508
458
|
}
|
|
509
459
|
)
|
|
460
|
+
```
|
|
510
461
|
|
|
511
462
|
Args:
|
|
512
463
|
file: Single replacement config or list of them.
|
|
@@ -517,7 +468,7 @@ def replace_in_file(
|
|
|
517
468
|
# Normalize to list
|
|
518
469
|
file_replacements = file if isinstance(file, list) else [file]
|
|
519
470
|
# Group replacements by file path to minimize file I/O
|
|
520
|
-
replacements_by_path = {}
|
|
471
|
+
replacements_by_path: dict[str, list[FileReplacement]] = {}
|
|
521
472
|
for r in file_replacements:
|
|
522
473
|
path = r["path"]
|
|
523
474
|
if path not in replacements_by_path:
|
|
@@ -560,7 +511,7 @@ def replace_in_file(
|
|
|
560
511
|
|
|
561
512
|
|
|
562
513
|
async def analyze_file(
|
|
563
|
-
ctx: AnyContext, path: str, query: str,
|
|
514
|
+
ctx: AnyContext, path: str, query: str, token_threshold: int | None = None
|
|
564
515
|
) -> dict[str, Any]:
|
|
565
516
|
"""
|
|
566
517
|
Analyzes a file using a sub-agent for complex questions.
|
|
@@ -573,13 +524,13 @@ async def analyze_file(
|
|
|
573
524
|
path (str): The path to the file to analyze.
|
|
574
525
|
query (str): A specific analysis query with clear guidelines and
|
|
575
526
|
necessary information.
|
|
576
|
-
|
|
527
|
+
token_threshold (int | None): Max tokens.
|
|
577
528
|
|
|
578
529
|
Returns:
|
|
579
530
|
Analysis results.
|
|
580
531
|
"""
|
|
581
|
-
if
|
|
582
|
-
|
|
532
|
+
if token_threshold is None:
|
|
533
|
+
token_threshold = CFG.LLM_FILE_ANALYSIS_TOKEN_THRESHOLD
|
|
583
534
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
|
584
535
|
if not os.path.exists(abs_path):
|
|
585
536
|
raise FileNotFoundError(f"File not found: {path}")
|
|
@@ -594,12 +545,16 @@ async def analyze_file(
|
|
|
594
545
|
),
|
|
595
546
|
system_prompt=CFG.LLM_FILE_EXTRACTOR_SYSTEM_PROMPT,
|
|
596
547
|
tools=[read_from_file, search_files],
|
|
548
|
+
auto_summarize=False,
|
|
549
|
+
remember_history=False,
|
|
597
550
|
)
|
|
598
551
|
payload = json.dumps(
|
|
599
552
|
{
|
|
600
553
|
"instruction": query,
|
|
601
554
|
"file_path": abs_path,
|
|
602
|
-
"file_content": llm_rate_limitter.clip_prompt(
|
|
555
|
+
"file_content": llm_rate_limitter.clip_prompt(
|
|
556
|
+
file_content, token_threshold
|
|
557
|
+
),
|
|
603
558
|
}
|
|
604
559
|
)
|
|
605
560
|
return await _analyze_file(ctx, payload)
|
zrb/builtin/llm/tool/note.py
CHANGED
|
@@ -5,10 +5,12 @@ from zrb.config.llm_context.config import llm_context_config
|
|
|
5
5
|
|
|
6
6
|
def read_long_term_note() -> str:
|
|
7
7
|
"""
|
|
8
|
-
|
|
8
|
+
Retrieves the GLOBAL long-term memory shared across ALL sessions and projects.
|
|
9
|
+
|
|
10
|
+
CRITICAL: Consult this first for user preferences, facts, and cross-project context.
|
|
9
11
|
|
|
10
12
|
Returns:
|
|
11
|
-
str: The
|
|
13
|
+
str: The current global note content.
|
|
12
14
|
"""
|
|
13
15
|
contexts = llm_context_config.get_notes()
|
|
14
16
|
return contexts.get("/", "")
|
|
@@ -16,28 +18,39 @@ def read_long_term_note() -> str:
|
|
|
16
18
|
|
|
17
19
|
def write_long_term_note(content: str) -> str:
|
|
18
20
|
"""
|
|
19
|
-
|
|
20
|
-
|
|
21
|
+
Persists CRITICAL facts to the GLOBAL long-term memory.
|
|
22
|
+
|
|
23
|
+
USE EAGERLY to save or update:
|
|
24
|
+
- User preferences (e.g., "I prefer Python", "No unit tests").
|
|
25
|
+
- User information (e.g., user name, user email address).
|
|
26
|
+
- Important facts (e.g., "My API key is in .env").
|
|
27
|
+
- Cross-project goals.
|
|
28
|
+
- Anything that will be useful for future interaction across projects.
|
|
29
|
+
|
|
30
|
+
WARNING: This OVERWRITES the entire global note.
|
|
21
31
|
|
|
22
32
|
Args:
|
|
23
|
-
content (str): The
|
|
33
|
+
content (str): The text to strictly memorize.
|
|
24
34
|
|
|
25
35
|
Returns:
|
|
26
|
-
str:
|
|
36
|
+
str: Confirmation message.
|
|
27
37
|
"""
|
|
28
38
|
llm_context_config.write_note(content, "/")
|
|
29
|
-
return "
|
|
39
|
+
return "Global long-term note saved."
|
|
30
40
|
|
|
31
41
|
|
|
32
42
|
def read_contextual_note(path: str | None = None) -> str:
|
|
33
43
|
"""
|
|
34
|
-
|
|
44
|
+
Retrieves LOCAL memory specific to a file or directory path.
|
|
45
|
+
|
|
46
|
+
Use to recall project-specific architecture, code summaries, or past decisions
|
|
47
|
+
relevant to the current working location.
|
|
35
48
|
|
|
36
49
|
Args:
|
|
37
|
-
path (str | None):
|
|
50
|
+
path (str | None): Target file/dir. Defaults to current working directory (CWD).
|
|
38
51
|
|
|
39
52
|
Returns:
|
|
40
|
-
str: The content
|
|
53
|
+
str: The local note content for the path.
|
|
41
54
|
"""
|
|
42
55
|
if path is None:
|
|
43
56
|
path = os.getcwd()
|
|
@@ -48,17 +61,24 @@ def read_contextual_note(path: str | None = None) -> str:
|
|
|
48
61
|
|
|
49
62
|
def write_contextual_note(content: str, path: str | None = None) -> str:
|
|
50
63
|
"""
|
|
51
|
-
|
|
52
|
-
|
|
64
|
+
Persists LOCAL facts specific to a file or directory.
|
|
65
|
+
|
|
66
|
+
USE EAGERLY to save or update:
|
|
67
|
+
- Architectural patterns for this project/directory.
|
|
68
|
+
- Summaries of large files or directories.
|
|
69
|
+
- Specific guidelines for this project.
|
|
70
|
+
- Anything related to this directory that will be useful for future interaction.
|
|
71
|
+
|
|
72
|
+
WARNING: This OVERWRITES the note for the specific path.
|
|
53
73
|
|
|
54
74
|
Args:
|
|
55
|
-
content (str): The
|
|
56
|
-
path (str | None):
|
|
75
|
+
content (str): The text to memorize for this location.
|
|
76
|
+
path (str | None): Target file/dir. Defaults to CWD.
|
|
57
77
|
|
|
58
78
|
Returns:
|
|
59
|
-
str:
|
|
79
|
+
str: Confirmation message.
|
|
60
80
|
"""
|
|
61
81
|
if path is None:
|
|
62
82
|
path = os.getcwd()
|
|
63
83
|
llm_context_config.write_note(content, path)
|
|
64
|
-
return f"Contextual note saved
|
|
84
|
+
return f"Contextual note saved for: {path}"
|
|
@@ -1,14 +1,21 @@
|
|
|
1
|
+
import json
|
|
1
2
|
from collections.abc import Callable
|
|
2
3
|
from textwrap import dedent
|
|
3
4
|
from typing import TYPE_CHECKING, Any, Coroutine
|
|
4
5
|
|
|
5
6
|
from zrb.context.any_context import AnyContext
|
|
6
|
-
from zrb.task.llm.agent import create_agent_instance
|
|
7
|
+
from zrb.task.llm.agent import create_agent_instance
|
|
8
|
+
from zrb.task.llm.agent_runner import run_agent_iteration
|
|
7
9
|
from zrb.task.llm.config import get_model, get_model_settings
|
|
8
10
|
from zrb.task.llm.prompt import get_system_and_user_prompt
|
|
11
|
+
from zrb.task.llm.subagent_conversation_history import (
|
|
12
|
+
get_ctx_subagent_history,
|
|
13
|
+
set_ctx_subagent_history,
|
|
14
|
+
)
|
|
9
15
|
|
|
10
16
|
if TYPE_CHECKING:
|
|
11
17
|
from pydantic_ai import Tool
|
|
18
|
+
from pydantic_ai._agent_graph import HistoryProcessor
|
|
12
19
|
from pydantic_ai.models import Model
|
|
13
20
|
from pydantic_ai.settings import ModelSettings
|
|
14
21
|
from pydantic_ai.toolsets import AbstractToolset
|
|
@@ -25,8 +32,12 @@ def create_sub_agent_tool(
|
|
|
25
32
|
tools: "list[ToolOrCallable]" = [],
|
|
26
33
|
toolsets: list["AbstractToolset[None]"] = [],
|
|
27
34
|
yolo_mode: bool | list[str] | None = None,
|
|
35
|
+
history_processors: list["HistoryProcessor"] | None = None,
|
|
28
36
|
log_indent_level: int = 2,
|
|
29
|
-
|
|
37
|
+
agent_name: str | None = None,
|
|
38
|
+
auto_summarize: bool = True,
|
|
39
|
+
remember_history: bool = True,
|
|
40
|
+
) -> Callable[[AnyContext, str], Coroutine[Any, Any, Any]]:
|
|
30
41
|
"""
|
|
31
42
|
Create a tool that is another AI agent, capable of handling complex, multi-step sub-tasks.
|
|
32
43
|
|
|
@@ -52,8 +63,10 @@ def create_sub_agent_tool(
|
|
|
52
63
|
An asynchronous function that serves as the sub-agent tool. When called, it runs the
|
|
53
64
|
sub-agent with a given query and returns its final result.
|
|
54
65
|
"""
|
|
66
|
+
if agent_name is None:
|
|
67
|
+
agent_name = f"{tool_name}_agent"
|
|
55
68
|
|
|
56
|
-
async def run_sub_agent(ctx: AnyContext, query: str) ->
|
|
69
|
+
async def run_sub_agent(ctx: AnyContext, query: str) -> Any:
|
|
57
70
|
"""
|
|
58
71
|
Runs the sub-agent with the given query.
|
|
59
72
|
"""
|
|
@@ -72,7 +85,6 @@ def create_sub_agent_tool(
|
|
|
72
85
|
ctx=ctx,
|
|
73
86
|
model_settings_attr=model_settings,
|
|
74
87
|
)
|
|
75
|
-
|
|
76
88
|
if system_prompt is None:
|
|
77
89
|
resolved_system_prompt, query = get_system_and_user_prompt(
|
|
78
90
|
ctx=ctx,
|
|
@@ -92,24 +104,32 @@ def create_sub_agent_tool(
|
|
|
92
104
|
tools=tools,
|
|
93
105
|
toolsets=toolsets,
|
|
94
106
|
yolo_mode=yolo_mode,
|
|
107
|
+
history_processors=history_processors,
|
|
108
|
+
auto_summarize=auto_summarize,
|
|
95
109
|
)
|
|
96
|
-
|
|
97
110
|
sub_agent_run = None
|
|
98
111
|
# Run the sub-agent iteration
|
|
99
|
-
|
|
112
|
+
history_list = (
|
|
113
|
+
get_ctx_subagent_history(ctx, agent_name) if remember_history else []
|
|
114
|
+
)
|
|
100
115
|
sub_agent_run = await run_agent_iteration(
|
|
101
116
|
ctx=ctx,
|
|
102
117
|
agent=sub_agent_agent,
|
|
103
118
|
user_prompt=query,
|
|
104
119
|
attachments=[],
|
|
105
|
-
history_list=
|
|
120
|
+
history_list=history_list,
|
|
106
121
|
log_indent_level=log_indent_level,
|
|
107
122
|
)
|
|
108
|
-
|
|
109
123
|
# Return the sub-agent's final message content
|
|
110
124
|
if sub_agent_run and sub_agent_run.result:
|
|
111
125
|
# Return the final message content
|
|
112
|
-
|
|
126
|
+
if remember_history:
|
|
127
|
+
set_ctx_subagent_history(
|
|
128
|
+
ctx,
|
|
129
|
+
agent_name,
|
|
130
|
+
json.loads(sub_agent_run.result.all_messages_json()),
|
|
131
|
+
)
|
|
132
|
+
return sub_agent_run.result.output
|
|
113
133
|
ctx.log_warning("Sub-agent run did not produce a result.")
|
|
114
134
|
raise ValueError(f"{tool_name} not returning any result")
|
|
115
135
|
|
|
@@ -123,7 +143,7 @@ def create_sub_agent_tool(
|
|
|
123
143
|
query (str): The query or task for the sub-agent.
|
|
124
144
|
|
|
125
145
|
Returns:
|
|
126
|
-
|
|
146
|
+
Any: The final response or result from the sub-agent.
|
|
127
147
|
"""
|
|
128
148
|
).strip()
|
|
129
149
|
|
zrb/config/config.py
CHANGED
|
@@ -28,8 +28,13 @@ class Config:
|
|
|
28
28
|
def ENV_PREFIX(self) -> str:
|
|
29
29
|
return os.getenv("_ZRB_ENV_PREFIX", "ZRB")
|
|
30
30
|
|
|
31
|
-
def _getenv(self, env_name: str, default: str = "") -> str:
|
|
32
|
-
|
|
31
|
+
def _getenv(self, env_name: str | list[str], default: str = "") -> str:
|
|
32
|
+
env_name_list = env_name if isinstance(env_name, list) else [env_name]
|
|
33
|
+
for env_name in env_name_list:
|
|
34
|
+
value = os.getenv(f"{self.ENV_PREFIX}_{env_name}", None)
|
|
35
|
+
if value is not None:
|
|
36
|
+
return value
|
|
37
|
+
return default
|
|
33
38
|
|
|
34
39
|
def _get_internal_default_prompt(self, name: str) -> str:
|
|
35
40
|
if name not in self.__internal_default_prompt:
|
|
@@ -60,6 +65,38 @@ class Config:
|
|
|
60
65
|
def DEFAULT_EDITOR(self) -> str:
|
|
61
66
|
return self._getenv("EDITOR", "nano")
|
|
62
67
|
|
|
68
|
+
@property
|
|
69
|
+
def DEFAULT_DIFF_EDIT_COMMAND_TPL(self) -> str:
|
|
70
|
+
return self._getenv("DIFF_EDIT_COMMAND", self._get_default_diff_edit_command())
|
|
71
|
+
|
|
72
|
+
def _get_default_diff_edit_command(self) -> str:
|
|
73
|
+
editor = self.DEFAULT_EDITOR
|
|
74
|
+
if editor in [
|
|
75
|
+
"code",
|
|
76
|
+
"vscode",
|
|
77
|
+
"vscodium",
|
|
78
|
+
"windsurf",
|
|
79
|
+
"cursor",
|
|
80
|
+
"zed",
|
|
81
|
+
"zeditor",
|
|
82
|
+
"agy",
|
|
83
|
+
]:
|
|
84
|
+
return f"{editor} --wait --diff {{old}} {{new}}"
|
|
85
|
+
if editor == "emacs":
|
|
86
|
+
return 'emacs --eval \'(ediff-files "{old}" "{new}")\''
|
|
87
|
+
if editor in ["nvim", "vim"]:
|
|
88
|
+
return (
|
|
89
|
+
f"{editor} -d {{old}} {{new}} "
|
|
90
|
+
"-i NONE "
|
|
91
|
+
'-c "wincmd h | set readonly | wincmd l" '
|
|
92
|
+
'-c "highlight DiffAdd cterm=bold ctermbg=22 guibg=#005f00 | highlight DiffChange cterm=bold ctermbg=24 guibg=#005f87 | highlight DiffText ctermbg=21 guibg=#0000af | highlight DiffDelete ctermbg=52 guibg=#5f0000" ' # noqa
|
|
93
|
+
'-c "set showtabline=2 | set tabline=[Instructions]\\ :wqa(save\\ &\\ quit)\\ \\|\\ i/esc(toggle\\ edit\\ mode)" ' # noqa
|
|
94
|
+
'-c "wincmd h | setlocal statusline=OLD\\ FILE" '
|
|
95
|
+
'-c "wincmd l | setlocal statusline=%#StatusBold#NEW\\ FILE\\ :wqa(save\\ &\\ quit)\\ \\|\\ i/esc(toggle\\ edit\\ mode)" ' # noqa
|
|
96
|
+
'-c "autocmd BufWritePost * wqa"'
|
|
97
|
+
)
|
|
98
|
+
return 'vimdiff {old} {new} +"setlocal ro" +"wincmd l" +"autocmd BufWritePost <buffer> qa"' # noqa
|
|
99
|
+
|
|
63
100
|
@property
|
|
64
101
|
def INIT_MODULES(self) -> list[str]:
|
|
65
102
|
init_modules_str = self._getenv("INIT_MODULES", "")
|
|
@@ -287,7 +324,9 @@ class Config:
|
|
|
287
324
|
@property
|
|
288
325
|
def LLM_BUILTIN_WORKFLOW_PATHS(self) -> list[str]:
|
|
289
326
|
"""Get a list of additional builtin workflow paths from environment variables."""
|
|
290
|
-
builtin_workflow_paths_str = self._getenv(
|
|
327
|
+
builtin_workflow_paths_str = self._getenv(
|
|
328
|
+
["LLM_BUILTIN_WORFKLOW_PATH", "LLM_BUILTIN_WORKFLOW_PATHS"], ""
|
|
329
|
+
)
|
|
291
330
|
if builtin_workflow_paths_str != "":
|
|
292
331
|
return [
|
|
293
332
|
path.strip()
|
|
@@ -306,13 +345,21 @@ class Config:
|
|
|
306
345
|
value = self._getenv("LLM_SUMMARIZATION_PROMPT")
|
|
307
346
|
return None if value == "" else value
|
|
308
347
|
|
|
348
|
+
@property
|
|
349
|
+
def LLM_SHOW_TOOL_CALL_RESULT(self) -> bool:
|
|
350
|
+
return to_boolean(self._getenv("LLM_SHOW_TOOL_CALL_RESULT", "false"))
|
|
351
|
+
|
|
309
352
|
@property
|
|
310
353
|
def LLM_MAX_REQUESTS_PER_MINUTE(self) -> int:
|
|
311
354
|
"""
|
|
312
355
|
Maximum number of LLM requests allowed per minute.
|
|
313
356
|
Default is conservative to accommodate free-tier LLM providers.
|
|
314
357
|
"""
|
|
315
|
-
return int(
|
|
358
|
+
return int(
|
|
359
|
+
self._getenv(
|
|
360
|
+
["LLM_MAX_REQUEST_PER_MINUTE", "LLM_MAX_REQUESTS_PER_MINUTE"], "60"
|
|
361
|
+
)
|
|
362
|
+
)
|
|
316
363
|
|
|
317
364
|
@property
|
|
318
365
|
def LLM_MAX_TOKENS_PER_MINUTE(self) -> int:
|
|
@@ -320,22 +367,38 @@ class Config:
|
|
|
320
367
|
Maximum number of LLM tokens allowed per minute.
|
|
321
368
|
Default is conservative to accommodate free-tier LLM providers.
|
|
322
369
|
"""
|
|
323
|
-
return int(
|
|
370
|
+
return int(
|
|
371
|
+
self._getenv(
|
|
372
|
+
["LLM_MAX_TOKEN_PER_MINUTE", "LLM_MAX_TOKENS_PER_MINUTE"], "100000"
|
|
373
|
+
)
|
|
374
|
+
)
|
|
324
375
|
|
|
325
376
|
@property
|
|
326
377
|
def LLM_MAX_TOKENS_PER_REQUEST(self) -> int:
|
|
327
378
|
"""Maximum number of tokens allowed per individual LLM request."""
|
|
328
|
-
return int(
|
|
379
|
+
return int(
|
|
380
|
+
self._getenv(
|
|
381
|
+
["LLM_MAX_TOKEN_PER_REQUEST", "LLM_MAX_TOKENS_PER_REQUEST"], "120000"
|
|
382
|
+
)
|
|
383
|
+
)
|
|
329
384
|
|
|
330
385
|
@property
|
|
331
386
|
def LLM_MAX_TOKENS_PER_TOOL_CALL_RESULT(self) -> int:
|
|
332
387
|
"""Maximum number of tokens allowed per tool call result."""
|
|
333
|
-
return int(
|
|
388
|
+
return int(
|
|
389
|
+
self._getenv(
|
|
390
|
+
[
|
|
391
|
+
"LLM_MAX_TOKEN_PER_TOOL_CALL_RESULT",
|
|
392
|
+
"LLM_MAX_TOKENS_PER_TOOL_CALL_RESULT",
|
|
393
|
+
],
|
|
394
|
+
str(self._get_max_threshold(0.4)),
|
|
395
|
+
)
|
|
396
|
+
)
|
|
334
397
|
|
|
335
398
|
@property
|
|
336
399
|
def LLM_THROTTLE_SLEEP(self) -> float:
|
|
337
400
|
"""Number of seconds to sleep when throttling is required."""
|
|
338
|
-
return float(self._getenv("LLM_THROTTLE_SLEEP", "
|
|
401
|
+
return float(self._getenv("LLM_THROTTLE_SLEEP", "5.0"))
|
|
339
402
|
|
|
340
403
|
@property
|
|
341
404
|
def LLM_YOLO_MODE(self) -> bool | list[str]:
|
|
@@ -351,19 +414,51 @@ class Config:
|
|
|
351
414
|
|
|
352
415
|
@property
|
|
353
416
|
def LLM_HISTORY_SUMMARIZATION_TOKEN_THRESHOLD(self) -> int:
|
|
354
|
-
|
|
417
|
+
threshold = int(
|
|
418
|
+
self._getenv(
|
|
419
|
+
"LLM_HISTORY_SUMMARIZATION_TOKEN_THRESHOLD",
|
|
420
|
+
str(self._get_max_threshold(0.6)),
|
|
421
|
+
)
|
|
422
|
+
)
|
|
423
|
+
return self._limit_token_threshold(threshold, 0.6)
|
|
355
424
|
|
|
356
425
|
@property
|
|
357
426
|
def LLM_REPO_ANALYSIS_EXTRACTION_TOKEN_THRESHOLD(self) -> int:
|
|
358
|
-
|
|
427
|
+
threshold = int(
|
|
428
|
+
self._getenv(
|
|
429
|
+
"LLM_REPO_ANALYSIS_EXTRACTION_TOKEN_THRESHOLD",
|
|
430
|
+
str(self._get_max_threshold(0.4)),
|
|
431
|
+
)
|
|
432
|
+
)
|
|
433
|
+
return self._limit_token_threshold(threshold, 0.4)
|
|
359
434
|
|
|
360
435
|
@property
|
|
361
436
|
def LLM_REPO_ANALYSIS_SUMMARIZATION_TOKEN_THRESHOLD(self) -> int:
|
|
362
|
-
|
|
437
|
+
threshold = int(
|
|
438
|
+
self._getenv(
|
|
439
|
+
"LLM_REPO_ANALYSIS_SUMMARIZATION_TOKEN_THRESHOLD",
|
|
440
|
+
str(self._get_max_threshold(0.4)),
|
|
441
|
+
)
|
|
442
|
+
)
|
|
443
|
+
return self._limit_token_threshold(threshold, 0.4)
|
|
363
444
|
|
|
364
445
|
@property
|
|
365
|
-
def
|
|
366
|
-
|
|
446
|
+
def LLM_FILE_ANALYSIS_TOKEN_THRESHOLD(self) -> int:
|
|
447
|
+
threshold = int(
|
|
448
|
+
self._getenv(
|
|
449
|
+
"LLM_FILE_ANALYSIS_TOKEN_THRESHOLD", str(self._get_max_threshold(0.4))
|
|
450
|
+
)
|
|
451
|
+
)
|
|
452
|
+
return self._limit_token_threshold(threshold, 0.4)
|
|
453
|
+
|
|
454
|
+
def _limit_token_threshold(self, threshold: int, factor: float) -> int:
|
|
455
|
+
return min(threshold, self._get_max_threshold(factor))
|
|
456
|
+
|
|
457
|
+
def _get_max_threshold(self, factor: float) -> int:
|
|
458
|
+
return round(
|
|
459
|
+
factor
|
|
460
|
+
* min(self.LLM_MAX_TOKENS_PER_MINUTE, self.LLM_MAX_TOKENS_PER_REQUEST)
|
|
461
|
+
)
|
|
367
462
|
|
|
368
463
|
@property
|
|
369
464
|
def LLM_FILE_EXTRACTOR_SYSTEM_PROMPT(self) -> str:
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
|
|
1
|
+
This is an interactive session. Your primary goal is to help users effectively and efficiently.
|
|
2
2
|
|
|
3
3
|
# Core Principles
|
|
4
4
|
- **Tool-Centric:** Describe what you are about to do, then call the appropriate tool.
|