zrb 1.9.7__py3-none-any.whl → 1.9.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/tool/code.py +15 -39
- zrb/builtin/llm/tool/file.py +6 -22
- zrb/config/config.py +106 -2
- zrb/config/llm_config.py +74 -38
- zrb/task/llm_task.py +2 -133
- {zrb-1.9.7.dist-info → zrb-1.9.9.dist-info}/METADATA +1 -1
- {zrb-1.9.7.dist-info → zrb-1.9.9.dist-info}/RECORD +9 -9
- {zrb-1.9.7.dist-info → zrb-1.9.9.dist-info}/WHEEL +0 -0
- {zrb-1.9.7.dist-info → zrb-1.9.9.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/tool/code.py
CHANGED
@@ -3,40 +3,10 @@ import os
|
|
3
3
|
|
4
4
|
from zrb.builtin.llm.tool.file import DEFAULT_EXCLUDED_PATTERNS, is_excluded
|
5
5
|
from zrb.builtin.llm.tool.sub_agent import create_sub_agent_tool
|
6
|
+
from zrb.config.config import CFG
|
6
7
|
from zrb.config.llm_rate_limitter import llm_rate_limitter
|
7
8
|
from zrb.context.any_context import AnyContext
|
8
9
|
|
9
|
-
_EXTRACT_INFO_FROM_REPO_SYSTEM_PROMPT = """
|
10
|
-
You are an extraction info agent.
|
11
|
-
Your goal is to help to extract relevant information to help the main assistant.
|
12
|
-
You write your output is in markdown format containing path and relevant information.
|
13
|
-
Extract only information that relevant to main assistant's goal.
|
14
|
-
|
15
|
-
Extracted Information format (Use this as reference, extract relevant information only):
|
16
|
-
# <file-name>
|
17
|
-
## imports
|
18
|
-
- <imported-package>
|
19
|
-
- ...
|
20
|
-
## variables
|
21
|
-
- <variable-type> <variable-name>: <the-purpose-of-the-variable>
|
22
|
-
- ...
|
23
|
-
## functions
|
24
|
-
- <function-name>:
|
25
|
-
- parameters: <parameters>
|
26
|
-
- logic/description: <what-the-function-do-and-how-it-works>
|
27
|
-
...
|
28
|
-
# <other-file-name>
|
29
|
-
...
|
30
|
-
""".strip()
|
31
|
-
|
32
|
-
|
33
|
-
_SUMMARIZE_INFO_SYSTEM_PROMPT = """
|
34
|
-
You are an information summarization agent.
|
35
|
-
Your goal is to summarize information to help the main assistant.
|
36
|
-
The summarization result should contains all necessary details
|
37
|
-
to help main assistant achieve the goal.
|
38
|
-
"""
|
39
|
-
|
40
10
|
_DEFAULT_EXTENSIONS = [
|
41
11
|
"py",
|
42
12
|
"go",
|
@@ -82,8 +52,8 @@ async def analyze_repo(
|
|
82
52
|
goal: str,
|
83
53
|
extensions: list[str] = _DEFAULT_EXTENSIONS,
|
84
54
|
exclude_patterns: list[str] = DEFAULT_EXCLUDED_PATTERNS,
|
85
|
-
|
86
|
-
|
55
|
+
extraction_token_threshold: int | None = None,
|
56
|
+
summarization_token_threshold: int | None = None,
|
87
57
|
) -> str:
|
88
58
|
"""
|
89
59
|
Performs a deep, goal-oriented analysis of a code repository or directory.
|
@@ -102,14 +72,20 @@ async def analyze_repo(
|
|
102
72
|
goal (str): A clear and specific description of what you want to achieve. A good goal is critical for getting a useful result. For example: "Understand the database schema by analyzing all the .sql files" or "Create a summary of all the API endpoints defined in the 'api' directory".
|
103
73
|
extensions (list[str], optional): A list of file extensions to include in the analysis. Defaults to a comprehensive list of common code and configuration files.
|
104
74
|
exclude_patterns (list[str], optional): A list of glob patterns for files and directories to exclude from the analysis. Defaults to common patterns like '.git', 'node_modules', and '.venv'.
|
105
|
-
|
106
|
-
|
75
|
+
extraction_token_threshold (int, optional): The maximum token threshold for the extraction sub-agent.
|
76
|
+
summarization_token_threshold (int, optional): The maximum token threshold for the summarization sub-agent.
|
107
77
|
|
108
78
|
Returns:
|
109
79
|
str: A detailed, markdown-formatted analysis and summary of the repository, tailored to the specified goal.
|
110
80
|
Raises:
|
111
81
|
Exception: If an error occurs during the analysis.
|
112
82
|
"""
|
83
|
+
if extraction_token_threshold is None:
|
84
|
+
extraction_token_threshold = CFG.LLM_REPO_ANALYSIS_EXTRACTION_TOKEN_THRESHOLD
|
85
|
+
if summarization_token_threshold is None:
|
86
|
+
summarization_token_threshold = (
|
87
|
+
CFG.LLM_REPO_ANALYSIS_SUMMARIZATION_TOKEN_THRESHOLD
|
88
|
+
)
|
113
89
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
114
90
|
file_metadatas = _get_file_metadatas(abs_path, extensions, exclude_patterns)
|
115
91
|
ctx.print("Extraction")
|
@@ -117,7 +93,7 @@ async def analyze_repo(
|
|
117
93
|
ctx,
|
118
94
|
file_metadatas=file_metadatas,
|
119
95
|
goal=goal,
|
120
|
-
token_limit=
|
96
|
+
token_limit=extraction_token_threshold,
|
121
97
|
)
|
122
98
|
if len(extracted_infos) == 1:
|
123
99
|
return extracted_infos[0]
|
@@ -129,7 +105,7 @@ async def analyze_repo(
|
|
129
105
|
ctx,
|
130
106
|
extracted_infos=summarized_infos,
|
131
107
|
goal=goal,
|
132
|
-
token_limit=
|
108
|
+
token_limit=summarization_token_threshold,
|
133
109
|
)
|
134
110
|
return summarized_infos[0]
|
135
111
|
|
@@ -167,7 +143,7 @@ async def _extract_info(
|
|
167
143
|
extract = create_sub_agent_tool(
|
168
144
|
tool_name="extract",
|
169
145
|
tool_description="extract",
|
170
|
-
system_prompt=
|
146
|
+
system_prompt=CFG.LLM_REPO_EXTRACTOR_SYSTEM_PROMPT,
|
171
147
|
)
|
172
148
|
extracted_infos = []
|
173
149
|
content_buffer = []
|
@@ -218,7 +194,7 @@ async def _summarize_info(
|
|
218
194
|
summarize = create_sub_agent_tool(
|
219
195
|
tool_name="extract",
|
220
196
|
tool_description="extract",
|
221
|
-
system_prompt=
|
197
|
+
system_prompt=CFG.LLM_REPO_SUMMARIZER_SYSTEM_PROMPT,
|
222
198
|
)
|
223
199
|
summarized_infos = []
|
224
200
|
content_buffer = ""
|
zrb/builtin/llm/tool/file.py
CHANGED
@@ -5,30 +5,12 @@ import re
|
|
5
5
|
from typing import Any, Dict, List, Optional
|
6
6
|
|
7
7
|
from zrb.builtin.llm.tool.sub_agent import create_sub_agent_tool
|
8
|
+
from zrb.config.config import CFG
|
8
9
|
from zrb.config.llm_rate_limitter import llm_rate_limitter
|
9
10
|
from zrb.context.any_context import AnyContext
|
10
11
|
from zrb.util.file import read_file, read_file_with_line_numbers, write_file
|
11
12
|
|
12
|
-
_EXTRACT_INFO_FROM_FILE_SYSTEM_PROMPT =
|
13
|
-
You are an extraction info agent.
|
14
|
-
Your goal is to help to extract relevant information to help the main assistant.
|
15
|
-
You write your output is in markdown format containing path and relevant information.
|
16
|
-
Extract only information that relevant to main assistant's goal.
|
17
|
-
|
18
|
-
Extracted Information format (Use this as reference, extract relevant information only):
|
19
|
-
# imports
|
20
|
-
- <imported-package>
|
21
|
-
- ...
|
22
|
-
# variables
|
23
|
-
- <variable-type> <variable-name>: <the-purpose-of-the-variable>
|
24
|
-
- ...
|
25
|
-
# functions
|
26
|
-
- <function-name>:
|
27
|
-
- parameters: <parameters>
|
28
|
-
- logic/description: <what-the-function-do-and-how-it-works>
|
29
|
-
...
|
30
|
-
...
|
31
|
-
""".strip()
|
13
|
+
_EXTRACT_INFO_FROM_FILE_SYSTEM_PROMPT = CFG.LLM_ANALYZE_FILE_EXTRACTOR_SYSTEM_PROMPT
|
32
14
|
|
33
15
|
|
34
16
|
DEFAULT_EXCLUDED_PATTERNS = [
|
@@ -457,7 +439,7 @@ def replace_in_file(
|
|
457
439
|
|
458
440
|
|
459
441
|
async def analyze_file(
|
460
|
-
ctx: AnyContext, path: str, query: str, token_limit: int =
|
442
|
+
ctx: AnyContext, path: str, query: str, token_limit: int | None = None
|
461
443
|
) -> str:
|
462
444
|
"""
|
463
445
|
Performs a deep, goal-oriented analysis of a single file using a sub-agent.
|
@@ -480,6 +462,8 @@ async def analyze_file(
|
|
480
462
|
Raises:
|
481
463
|
FileNotFoundError: If the specified file does not exist.
|
482
464
|
"""
|
465
|
+
if token_limit is None:
|
466
|
+
token_limit = CFG.LLM_FILE_ANALYSIS_TOKEN_LIMIT
|
483
467
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
484
468
|
if not os.path.exists(abs_path):
|
485
469
|
raise FileNotFoundError(f"File not found: {path}")
|
@@ -487,7 +471,7 @@ async def analyze_file(
|
|
487
471
|
_analyze_file = create_sub_agent_tool(
|
488
472
|
tool_name="analyze_file",
|
489
473
|
tool_description="analyze file with LLM capability",
|
490
|
-
system_prompt=
|
474
|
+
system_prompt=CFG.LLM_ANALYZE_FILE_EXTRACTOR_SYSTEM_PROMPT,
|
491
475
|
tools=[read_from_file, search_files],
|
492
476
|
)
|
493
477
|
payload = json.dumps(
|
zrb/config/config.py
CHANGED
@@ -19,6 +19,75 @@ Your Automation Powerhouse
|
|
19
19
|
🐤 Follow us at: https://twitter.com/zarubastalchmst
|
20
20
|
"""
|
21
21
|
|
22
|
+
_DEFAULT_LLM_ANALYZE_FILE_EXTRACTOR_SYSTEM_PROMPT = (
|
23
|
+
"You are an intelligent code and configuration analysis agent.\n"
|
24
|
+
"Your primary goal is to extract key information from the provided file(s) "
|
25
|
+
"that is directly relevant to the main assistant's objective.\n"
|
26
|
+
"\n"
|
27
|
+
"Analyze the file content and determine its type (e.g., Python script, "
|
28
|
+
"YAML configuration, Dockerfile, Markdown documentation).\n"
|
29
|
+
"Based on the file type, extract the most important information in a "
|
30
|
+
"structured markdown format.\n"
|
31
|
+
"\n"
|
32
|
+
"- For source code (e.g., .py, .js, .go): Extract key components like "
|
33
|
+
"classes, functions, important variables, and their purposes.\n"
|
34
|
+
"- For configuration files (e.g., .yaml, .toml, .json): Extract the main "
|
35
|
+
"configuration sections, keys, and their values.\n"
|
36
|
+
"- For infrastructure files (e.g., Dockerfile, .tf): Extract resources, "
|
37
|
+
"settings, and commands.\n"
|
38
|
+
"- For documentation (e.g., .md): Extract headings, summaries, code "
|
39
|
+
"blocks, and links.\n"
|
40
|
+
"\n"
|
41
|
+
"Focus on quality and relevance over quantity. The output should be a "
|
42
|
+
"concise yet comprehensive summary that directly helps the main "
|
43
|
+
"assistant achieve its goal."
|
44
|
+
).strip()
|
45
|
+
|
46
|
+
_DEFAULT_LLM_REPO_EXTRACTOR_SYSTEM_PROMPT = (
|
47
|
+
"You are an intelligent code and configuration analysis agent.\n"
|
48
|
+
"Your primary goal is to extract key information from the provided file(s) "
|
49
|
+
"that is directly relevant to the main assistant's objective.\n"
|
50
|
+
"\n"
|
51
|
+
"Analyze the file content and determine its type (e.g., Python script, "
|
52
|
+
"YAML configuration, Dockerfile, Markdown documentation).\n"
|
53
|
+
"Based on the file type, extract the most important information in a "
|
54
|
+
"structured markdown format.\n"
|
55
|
+
"\n"
|
56
|
+
"- For source code (e.g., .py, .js, .go): Extract key components like "
|
57
|
+
"classes, functions, important variables, and their purposes.\n"
|
58
|
+
"- For configuration files (e.g., .yaml, .toml, .json): Extract the main "
|
59
|
+
"configuration sections, keys, and their values.\n"
|
60
|
+
"- For infrastructure files (e.g., Dockerfile, .tf): Extract resources, "
|
61
|
+
"settings, and commands.\n"
|
62
|
+
"- For documentation (e.g., .md): Extract headings, summaries, code "
|
63
|
+
"blocks, and links.\n"
|
64
|
+
"\n"
|
65
|
+
"Focus on quality and relevance over quantity. The output should be a "
|
66
|
+
"concise yet comprehensive summary that directly helps the main "
|
67
|
+
"assistant achieve its goal."
|
68
|
+
).strip()
|
69
|
+
|
70
|
+
_DEFAULT_LLM_REPO_SUMMARIZER_SYSTEM_PROMPT = (
|
71
|
+
"You are an expert summarization and synthesis agent.\n"
|
72
|
+
"Your goal is to consolidate multiple pieces of extracted information into a "
|
73
|
+
"single, coherent summary that directly addresses the main assistant's "
|
74
|
+
"objective.\n"
|
75
|
+
"\n"
|
76
|
+
"Do not simply list the information you receive. Instead, perform the "
|
77
|
+
"following actions:\n"
|
78
|
+
"1. **Synthesize**: Combine related pieces of information from different "
|
79
|
+
"sources into a unified narrative.\n"
|
80
|
+
"2. **Consolidate**: Merge duplicate or overlapping information to create a "
|
81
|
+
"concise summary.\n"
|
82
|
+
"3. **Identify Patterns**: Look for high-level patterns, architectural "
|
83
|
+
"structures, or recurring themes in the data.\n"
|
84
|
+
"4. **Structure**: Organize the final output in a logical markdown format "
|
85
|
+
"that tells a clear story and directly answers the main assistant's goal.\n"
|
86
|
+
"\n"
|
87
|
+
"Focus on creating a holistic understanding of the subject matter based on "
|
88
|
+
"the provided context."
|
89
|
+
).strip()
|
90
|
+
|
22
91
|
|
23
92
|
class Config:
|
24
93
|
@property
|
@@ -277,7 +346,7 @@ class Config:
|
|
277
346
|
|
278
347
|
@property
|
279
348
|
def LLM_HISTORY_SUMMARIZATION_TOKEN_THRESHOLD(self) -> int:
|
280
|
-
return int(os.getenv("ZRB_LLM_HISTORY_SUMMARIZATION_TOKEN_THRESHOLD", "
|
349
|
+
return int(os.getenv("ZRB_LLM_HISTORY_SUMMARIZATION_TOKEN_THRESHOLD", "20000"))
|
281
350
|
|
282
351
|
@property
|
283
352
|
def LLM_ENRICH_CONTEXT(self) -> bool:
|
@@ -285,7 +354,42 @@ class Config:
|
|
285
354
|
|
286
355
|
@property
|
287
356
|
def LLM_CONTEXT_ENRICHMENT_TOKEN_THRESHOLD(self) -> int:
|
288
|
-
return int(os.getenv("ZRB_LLM_CONTEXT_ENRICHMENT_TOKEN_THRESHOLD", "
|
357
|
+
return int(os.getenv("ZRB_LLM_CONTEXT_ENRICHMENT_TOKEN_THRESHOLD", "20000"))
|
358
|
+
|
359
|
+
@property
|
360
|
+
def LLM_REPO_ANALYSIS_EXTRACTION_TOKEN_THRESHOLD(self) -> int:
|
361
|
+
return int(os.getenv("ZRB_LLM_REPO_ANALYSIS_EXTRACTION_TOKEN_LIMIT", "35000"))
|
362
|
+
|
363
|
+
@property
|
364
|
+
def LLM_REPO_ANALYSIS_SUMMARIZATION_TOKEN_THRESHOLD(self) -> int:
|
365
|
+
return int(
|
366
|
+
os.getenv("ZRB_LLM_REPO_ANALYSIS_SUMMARIZATION_TOKEN_LIMIT", "35000")
|
367
|
+
)
|
368
|
+
|
369
|
+
@property
|
370
|
+
def LLM_FILE_ANALYSIS_TOKEN_LIMIT(self) -> int:
|
371
|
+
return int(os.getenv("ZRB_LLM_FILE_ANALYSIS_TOKEN_LIMIT", "35000"))
|
372
|
+
|
373
|
+
@property
|
374
|
+
def LLM_ANALYZE_FILE_EXTRACTOR_SYSTEM_PROMPT(self) -> str:
|
375
|
+
return os.getenv(
|
376
|
+
"ZRB_LLM_ANALYZE_FILE_EXTRACTOR_SYSTEM_PROMPT",
|
377
|
+
_DEFAULT_LLM_ANALYZE_FILE_EXTRACTOR_SYSTEM_PROMPT,
|
378
|
+
)
|
379
|
+
|
380
|
+
@property
|
381
|
+
def LLM_REPO_EXTRACTOR_SYSTEM_PROMPT(self) -> str:
|
382
|
+
return os.getenv(
|
383
|
+
"ZRB_LLM_REPO_EXTRACTOR_SYSTEM_PROMPT",
|
384
|
+
_DEFAULT_LLM_REPO_EXTRACTOR_SYSTEM_PROMPT,
|
385
|
+
)
|
386
|
+
|
387
|
+
@property
|
388
|
+
def LLM_REPO_SUMMARIZER_SYSTEM_PROMPT(self) -> str:
|
389
|
+
return os.getenv(
|
390
|
+
"ZRB_LLM_REPO_SUMMARIZER_SYSTEM_PROMPT",
|
391
|
+
_DEFAULT_LLM_REPO_SUMMARIZER_SYSTEM_PROMPT,
|
392
|
+
)
|
289
393
|
|
290
394
|
@property
|
291
395
|
def LLM_HISTORY_DIR(self) -> str:
|
zrb/config/llm_config.py
CHANGED
@@ -8,57 +8,79 @@ if TYPE_CHECKING:
|
|
8
8
|
from pydantic_ai.settings import ModelSettings
|
9
9
|
|
10
10
|
|
11
|
-
|
11
|
+
_DEFAULT_PERSONA = (
|
12
12
|
"You are a helpful and efficient AI agent specializing in CLI " "interaction."
|
13
13
|
)
|
14
14
|
|
15
|
-
|
15
|
+
_DEFAULT_INTERACTIVE_SYSTEM_PROMPT = (
|
16
16
|
"This is an interactive CLI session. Your standard response format is\n"
|
17
17
|
"GitHub-flavored Markdown. You MUST follow this thinking process:\n\n"
|
18
|
-
"1. **Analyze Request:** Use the `Scratchpad
|
19
|
-
" fully understand the user's request
|
20
|
-
"
|
21
|
-
"
|
18
|
+
"1. **Analyze Request, Scope & Identify Gaps:** Use the `Scratchpad`\n"
|
19
|
+
" and `Narrative Summary` to fully understand the user's request.\n"
|
20
|
+
" - **Determine Scope:** Critically assess if the request is a\n"
|
21
|
+
" one-time command or a standing order that should affect future\n"
|
22
|
+
" interactions (e.g., 'From now on...'). Resolve contextual\n"
|
23
|
+
" references like 'it' or 'that' to the immediate topic.\n"
|
24
|
+
" - **Identify Gaps:** Assess if you have enough information to\n"
|
25
|
+
" proceed. If not, identify the missing information as an\n"
|
26
|
+
" 'information gap'.\n\n"
|
27
|
+
"2. **Fill Information Gaps:** Before planning, you MUST proactively use\n"
|
28
|
+
" your tools to fill any identified information gaps. Be persistent.\n"
|
29
|
+
" If one tool or approach fails, try another until you have the\n"
|
30
|
+
" necessary information or determine it's impossible to obtain.\n\n"
|
31
|
+
"3. **Plan & Verify Pre-conditions:** Create a step-by-step plan. Before\n"
|
22
32
|
" executing, use read-only tools to check the current state. For\n"
|
23
33
|
" example, if the plan is to create a file, check if it already\n"
|
24
34
|
" exists. If pre-conditions are not as expected, inform the user.\n\n"
|
25
|
-
"
|
35
|
+
"4. **Assess Consent & Execute:**\n"
|
36
|
+
" - **You have standing consent to use any read-only tools** for\n"
|
37
|
+
" information gathering, planning, and verification. You do not\n"
|
38
|
+
" need to ask for permission for these actions.\n"
|
26
39
|
" - If the user's last instruction was an explicit command (e.g.,\n"
|
27
40
|
' "create file X", "delete Y"), you have consent. Proceed with the\n'
|
28
41
|
" action.\n"
|
29
42
|
' - If the request was general (e.g., "fix the bug") and your plan\n'
|
30
43
|
" involves a potentially altering action, you MUST explain the\n"
|
31
44
|
" action and ask for user approval before proceeding.\n\n"
|
32
|
-
"
|
45
|
+
"5. **Verify Outcome:** After executing the action, use read-only tools to\n"
|
33
46
|
" confirm it was successful. Report the outcome to the user.\n\n"
|
34
|
-
"For software engineering tasks, you MUST follow the guidelines in the\n"
|
35
|
-
"`DEFAULT_SPECIAL_INSTRUCTION_PROMPT`."
|
36
47
|
).strip()
|
37
48
|
|
38
|
-
|
49
|
+
_DEFAULT_SYSTEM_PROMPT = (
|
39
50
|
"This is a one-shot CLI session. Your final answer MUST be in\n"
|
40
51
|
"GitHub-flavored Markdown. You MUST follow this thinking process:\n\n"
|
41
|
-
"1. **Analyze Request:** Use the `Scratchpad
|
42
|
-
" fully understand the user's request
|
43
|
-
"
|
44
|
-
"
|
52
|
+
"1. **Analyze Request, Scope & Identify Gaps:** Use the `Scratchpad`\n"
|
53
|
+
" and `Narrative Summary` to fully understand the user's request.\n"
|
54
|
+
" - **Determine Scope:** Critically assess if the request is a\n"
|
55
|
+
" one-time command or a standing order that should affect future\n"
|
56
|
+
" interactions (e.g., 'From now on...'). Resolve contextual\n"
|
57
|
+
" references like 'it' or 'that' to the immediate topic.\n"
|
58
|
+
" - **Identify Gaps:** Assess if you have enough information to\n"
|
59
|
+
" proceed. If not, identify the missing information as an\n"
|
60
|
+
" 'information gap'.\n\n"
|
61
|
+
"2. **Fill Information Gaps:** Before planning, you MUST proactively use\n"
|
62
|
+
" your tools to fill any identified information gaps. Be persistent.\n"
|
63
|
+
" If one tool or approach fails, try another until you have the\n"
|
64
|
+
" necessary information or determine it's impossible to obtain.\n\n"
|
65
|
+
"3. **Plan & Verify Pre-conditions:** Create a step-by-step plan. Before\n"
|
45
66
|
" executing, use read-only tools to check the current state. For\n"
|
46
67
|
" example, if the plan is to create a file, check if it already\n"
|
47
68
|
" exists. If pre-conditions are not as expected, state that and stop.\n\n"
|
48
|
-
"
|
69
|
+
"4. **Assess Consent & Execute:**\n"
|
70
|
+
" - **You have standing consent to use any read-only tools** for\n"
|
71
|
+
" information gathering, planning, and verification. You do not\n"
|
72
|
+
" need to ask for permission for these actions.\n"
|
49
73
|
" - If the user's last instruction was an explicit command (e.g.,\n"
|
50
74
|
' "create file X", "delete Y"), you have consent. Proceed with the\n'
|
51
75
|
" action.\n"
|
52
76
|
' - If the request was general (e.g., "fix the bug") and your plan\n'
|
53
77
|
" involves a potentially altering action, you MUST explain the\n"
|
54
78
|
" action and ask for user approval before proceeding.\n\n"
|
55
|
-
"
|
79
|
+
"5. **Verify Outcome:** After executing the action, use read-only tools to\n"
|
56
80
|
" confirm it was successful. Report the outcome to the user.\n\n"
|
57
|
-
"For software engineering tasks, you MUST follow the guidelines in the\n"
|
58
|
-
"`DEFAULT_SPECIAL_INSTRUCTION_PROMPT`."
|
59
81
|
).strip()
|
60
82
|
|
61
|
-
|
83
|
+
_DEFAULT_SPECIAL_INSTRUCTION_PROMPT = (
|
62
84
|
"## Software Engineering Tasks\n"
|
63
85
|
"When requested to perform tasks like fixing bugs, adding features,\n"
|
64
86
|
"refactoring, or explaining code, follow this sequence:\n"
|
@@ -77,6 +99,10 @@ DEFAULT_SPECIAL_INSTRUCTION_PROMPT = (
|
|
77
99
|
"5. **Verify (Standards):** After making code changes, execute the\n"
|
78
100
|
"project-specific build, linting and type-checking commands. This\n"
|
79
101
|
"ensures code quality and adherence to standards.\n\n"
|
102
|
+
"## Shell Command Guidelines\n"
|
103
|
+
"NEVER use backticks (`` ` ``) for command substitution; use `$(...)` "
|
104
|
+
"instead. Always enclose literal strings and paths in single quotes (`'`) "
|
105
|
+
"to prevent unintended interpretation of special characters.\n\n"
|
80
106
|
"## New Applications\n"
|
81
107
|
"When asked to create a new application, follow this workflow:\n"
|
82
108
|
"1. **Understand Requirements:** Analyze the user's request to identify\n"
|
@@ -111,7 +137,7 @@ DEFAULT_SPECIAL_INSTRUCTION_PROMPT = (
|
|
111
137
|
).strip()
|
112
138
|
|
113
139
|
|
114
|
-
|
140
|
+
_DEFAULT_SUMMARIZATION_PROMPT = (
|
115
141
|
"You are a Conversation Historian. Your task is to distill the\n"
|
116
142
|
"conversation history into a dense, structured snapshot for the main\n"
|
117
143
|
"assistant. This snapshot is CRITICAL, as it will become the agent's\n"
|
@@ -143,7 +169,8 @@ DEFAULT_SUMMARIZATION_PROMPT = (
|
|
143
169
|
"2. `## Scratchpad` (The new, non-truncated recent history)"
|
144
170
|
).strip()
|
145
171
|
|
146
|
-
|
172
|
+
|
173
|
+
_DEFAULT_CONTEXT_ENRICHMENT_PROMPT = (
|
147
174
|
"You are a Memory Curator. Your sole purpose is to process a\n"
|
148
175
|
"conversation and produce a concise, up-to-date Markdown block of\n"
|
149
176
|
"long-term context for the main assistant.\n\n"
|
@@ -157,22 +184,31 @@ DEFAULT_CONTEXT_ENRICHMENT_PROMPT = (
|
|
157
184
|
"3. **Re-write:** Create the new 'Long-Term Context' by applying these\n"
|
158
185
|
" changes.\n\n"
|
159
186
|
"**CRITICAL CURATION RULES:**\n"
|
187
|
+
"- **Do Not Assume Permanence:** A one-time request (e.g., 'Write it in\n"
|
188
|
+
" JSON') is NOT a permanent preference. Only save preferences that are\n"
|
189
|
+
" explicitly stated as long-term (e.g., 'From now on, always...').\n"
|
160
190
|
"- **The context MUST NOT grow indefinitely.** Your primary goal is to\n"
|
161
191
|
" keep it concise and relevant to the *current* state of the\n"
|
162
192
|
" conversation.\n"
|
163
|
-
"- **ADD** new, stable facts (e.g., long-term user
|
164
|
-
"
|
165
|
-
"- **
|
166
|
-
"
|
193
|
+
"- **ADD** new, explicitly stable facts (e.g., long-term user\n"
|
194
|
+
" preferences).\n"
|
195
|
+
"- **UPDATE** existing facts if the user provides new information or if\n"
|
196
|
+
" new information overrides the previous one.\n"
|
197
|
+
"- **Your primary goal is to create a concise, relevant context.** "
|
198
|
+
"Aggressively prune outdated or irrelevant information, but retain any "
|
199
|
+
"detail, fact, or nuance that is critical for understanding the user's "
|
200
|
+
"current and future goals.\n"
|
167
201
|
"- **CONDENSE** older entries that are still relevant but not the\n"
|
168
202
|
" immediate focus. For example, a completed high-level goal might be\n"
|
169
|
-
" condensed into a single 'Past Accomplishments' line item.\n
|
203
|
+
" condensed into a single 'Past Accomplishments' line item.\n"
|
204
|
+
"\n"
|
170
205
|
"**A Note on Dynamic Information:**\n"
|
171
|
-
"Be mindful that some information is temporary
|
172
|
-
"
|
173
|
-
"
|
174
|
-
"
|
175
|
-
"
|
206
|
+
"Be mindful that some information is temporary and highly dynamic (e.g.,\n"
|
207
|
+
"current weather, location, current working directory, project context,\n"
|
208
|
+
"or file contents). You MUST add a note to this kind of information,\n"
|
209
|
+
"for example: `(short-term, must be re-verified)`.\n"
|
210
|
+
"The main assistant MUST NOT assume this information is current and\n"
|
211
|
+
"should always use its tools to verify the latest state when needed."
|
176
212
|
).strip()
|
177
213
|
|
178
214
|
|
@@ -265,7 +301,7 @@ class LLMConfig:
|
|
265
301
|
return self._default_system_prompt
|
266
302
|
if CFG.LLM_SYSTEM_PROMPT is not None:
|
267
303
|
return CFG.LLM_SYSTEM_PROMPT
|
268
|
-
return
|
304
|
+
return _DEFAULT_SYSTEM_PROMPT
|
269
305
|
|
270
306
|
@property
|
271
307
|
def default_interactive_system_prompt(self) -> str:
|
@@ -273,7 +309,7 @@ class LLMConfig:
|
|
273
309
|
return self._default_interactive_system_prompt
|
274
310
|
if CFG.LLM_INTERACTIVE_SYSTEM_PROMPT is not None:
|
275
311
|
return CFG.LLM_INTERACTIVE_SYSTEM_PROMPT
|
276
|
-
return
|
312
|
+
return _DEFAULT_INTERACTIVE_SYSTEM_PROMPT
|
277
313
|
|
278
314
|
@property
|
279
315
|
def default_persona(self) -> str:
|
@@ -281,7 +317,7 @@ class LLMConfig:
|
|
281
317
|
return self._default_persona
|
282
318
|
if CFG.LLM_PERSONA is not None:
|
283
319
|
return CFG.LLM_PERSONA
|
284
|
-
return
|
320
|
+
return _DEFAULT_PERSONA
|
285
321
|
|
286
322
|
@property
|
287
323
|
def default_special_instruction_prompt(self) -> str:
|
@@ -289,7 +325,7 @@ class LLMConfig:
|
|
289
325
|
return self._default_special_instruction_prompt
|
290
326
|
if CFG.LLM_SPECIAL_INSTRUCTION_PROMPT is not None:
|
291
327
|
return CFG.LLM_SPECIAL_INSTRUCTION_PROMPT
|
292
|
-
return
|
328
|
+
return _DEFAULT_SPECIAL_INSTRUCTION_PROMPT
|
293
329
|
|
294
330
|
@property
|
295
331
|
def default_summarization_prompt(self) -> str:
|
@@ -297,7 +333,7 @@ class LLMConfig:
|
|
297
333
|
return self._default_summarization_prompt
|
298
334
|
if CFG.LLM_SUMMARIZATION_PROMPT is not None:
|
299
335
|
return CFG.LLM_SUMMARIZATION_PROMPT
|
300
|
-
return
|
336
|
+
return _DEFAULT_SUMMARIZATION_PROMPT
|
301
337
|
|
302
338
|
@property
|
303
339
|
def default_context_enrichment_prompt(self) -> str:
|
@@ -305,7 +341,7 @@ class LLMConfig:
|
|
305
341
|
return self._default_context_enrichment_prompt
|
306
342
|
if CFG.LLM_CONTEXT_ENRICHMENT_PROMPT is not None:
|
307
343
|
return CFG.LLM_CONTEXT_ENRICHMENT_PROMPT
|
308
|
-
return
|
344
|
+
return _DEFAULT_CONTEXT_ENRICHMENT_PROMPT
|
309
345
|
|
310
346
|
@property
|
311
347
|
def default_model(self) -> "Model | str | None":
|
zrb/task/llm_task.py
CHANGED
@@ -262,7 +262,7 @@ class LLMTask(BaseTask):
|
|
262
262
|
should_enrich_context_attr=self._should_enrich_context,
|
263
263
|
render_enrich_context=self._render_enrich_context,
|
264
264
|
context_enrichment_token_threshold_attr=self._context_enrichment_token_threshold,
|
265
|
-
render_context_enrichment_token_threshold=self._render_context_enrichment_token_threshold,
|
265
|
+
render_context_enrichment_token_threshold=self._render_context_enrichment_token_threshold, # noqa
|
266
266
|
model=model,
|
267
267
|
model_settings=model_settings,
|
268
268
|
context_enrichment_prompt=context_enrichment_prompt,
|
@@ -274,7 +274,7 @@ class LLMTask(BaseTask):
|
|
274
274
|
conversation_summary=conversation_summary,
|
275
275
|
should_summarize_history_attr=self._should_summarize_history,
|
276
276
|
render_summarize_history=self._render_summarize_history,
|
277
|
-
history_summarization_token_threshold_attr=self._history_summarization_token_threshold,
|
277
|
+
history_summarization_token_threshold_attr=self._history_summarization_token_threshold, # noqa
|
278
278
|
render_history_summarization_token_threshold=(
|
279
279
|
self._render_history_summarization_token_threshold
|
280
280
|
),
|
@@ -364,134 +364,3 @@ class LLMTask(BaseTask):
|
|
364
364
|
except Exception as e:
|
365
365
|
ctx.log_error(f"Error during agent execution or history saving: {str(e)}")
|
366
366
|
raise # Re-raise the exception after logging
|
367
|
-
|
368
|
-
|
369
|
-
def llm_task(
|
370
|
-
name: str,
|
371
|
-
color: int | None = None,
|
372
|
-
icon: str | None = None,
|
373
|
-
description: str | None = None,
|
374
|
-
cli_only: bool = False,
|
375
|
-
input: list[AnyInput | None] | AnyInput | None = None,
|
376
|
-
env: list[AnyEnv | None] | AnyEnv | None = None,
|
377
|
-
model: "Callable[[AnySharedContext], Model | str | fstring] | Model | None" = None,
|
378
|
-
render_model: bool = True,
|
379
|
-
model_base_url: StrAttr | None = None,
|
380
|
-
render_model_base_url: bool = True,
|
381
|
-
model_api_key: StrAttr | None = None,
|
382
|
-
render_model_api_key: bool = True,
|
383
|
-
model_settings: "ModelSettings | Callable[[AnySharedContext], ModelSettings] | None" = None,
|
384
|
-
agent: "Agent | Callable[[AnySharedContext], Agent] | None" = None,
|
385
|
-
persona: StrAttr | None = None,
|
386
|
-
system_prompt: StrAttr | None = None,
|
387
|
-
special_instruction_prompt: StrAttr | None = None,
|
388
|
-
message: StrAttr | None = None,
|
389
|
-
render_message: bool = True,
|
390
|
-
enrich_context: BoolAttr | None = None,
|
391
|
-
render_enrich_context: bool = True,
|
392
|
-
context_enrichment_prompt: StrAttr | None = None,
|
393
|
-
render_context_enrichment_prompt: bool = True,
|
394
|
-
context_enrichment_token_threshold: IntAttr | None = None,
|
395
|
-
render_context_enrichment_token_threshold: bool = True,
|
396
|
-
tools: (
|
397
|
-
list["ToolOrCallable"] | Callable[[AnySharedContext], list["ToolOrCallable"]]
|
398
|
-
) = [],
|
399
|
-
mcp_servers: (
|
400
|
-
list["MCPServer"] | Callable[[AnySharedContext], list["MCPServer"]]
|
401
|
-
) = [],
|
402
|
-
conversation_history: (
|
403
|
-
ConversationHistoryData
|
404
|
-
| Callable[[AnySharedContext], ConversationHistoryData | dict | list]
|
405
|
-
| dict
|
406
|
-
| list
|
407
|
-
) = ConversationHistoryData(),
|
408
|
-
conversation_history_reader: (
|
409
|
-
Callable[[AnySharedContext], ConversationHistoryData | dict | list | None]
|
410
|
-
| None
|
411
|
-
) = None,
|
412
|
-
conversation_history_writer: (
|
413
|
-
Callable[[AnySharedContext, ConversationHistoryData], None] | None
|
414
|
-
) = None,
|
415
|
-
conversation_history_file: StrAttr | None = None,
|
416
|
-
render_history_file: bool = True,
|
417
|
-
summarize_history: BoolAttr | None = None,
|
418
|
-
render_summarize_history: bool = True,
|
419
|
-
summarization_prompt: StrAttr | None = None,
|
420
|
-
history_summarization_token_threshold: IntAttr | None = None,
|
421
|
-
render_history_summarization_token_threshold: bool = True,
|
422
|
-
rate_limitter: LLMRateLimiter | None = None,
|
423
|
-
execute_condition: bool | str | Callable[[AnySharedContext], bool] = True,
|
424
|
-
retries: int = 2,
|
425
|
-
retry_period: float = 0,
|
426
|
-
readiness_check: list[AnyTask] | AnyTask | None = None,
|
427
|
-
readiness_check_delay: float = 0.5,
|
428
|
-
readiness_check_period: float = 5,
|
429
|
-
readiness_failure_threshold: int = 1,
|
430
|
-
readiness_timeout: int = 60,
|
431
|
-
monitor_readiness: bool = False,
|
432
|
-
max_call_iteration: int = 20,
|
433
|
-
upstream: list[AnyTask] | AnyTask | None = None,
|
434
|
-
fallback: list[AnyTask] | AnyTask | None = None,
|
435
|
-
successor: list[AnyTask] | AnyTask | None = None,
|
436
|
-
conversation_context: (
|
437
|
-
dict[str, Any] | Callable[[AnySharedContext], dict[str, Any]] | None
|
438
|
-
) = None,
|
439
|
-
) -> LLMTask:
|
440
|
-
"""
|
441
|
-
Create a new LLM task.
|
442
|
-
"""
|
443
|
-
return LLMTask(
|
444
|
-
name=name,
|
445
|
-
color=color,
|
446
|
-
icon=icon,
|
447
|
-
description=description,
|
448
|
-
cli_only=cli_only,
|
449
|
-
input=input,
|
450
|
-
env=env,
|
451
|
-
model=model,
|
452
|
-
render_model=render_model,
|
453
|
-
model_base_url=model_base_url,
|
454
|
-
render_model_base_url=render_model_base_url,
|
455
|
-
model_api_key=model_api_key,
|
456
|
-
render_model_api_key=render_model_api_key,
|
457
|
-
model_settings=model_settings,
|
458
|
-
agent=agent,
|
459
|
-
persona=persona,
|
460
|
-
system_prompt=system_prompt,
|
461
|
-
special_instruction_prompt=special_instruction_prompt,
|
462
|
-
message=message,
|
463
|
-
render_message=render_message,
|
464
|
-
enrich_context=enrich_context,
|
465
|
-
render_enrich_context=render_enrich_context,
|
466
|
-
context_enrichment_prompt=context_enrichment_prompt,
|
467
|
-
render_context_enrichment_prompt=render_context_enrichment_prompt,
|
468
|
-
context_enrichment_token_threshold=context_enrichment_token_threshold,
|
469
|
-
render_context_enrichment_token_threshold=render_context_enrichment_token_threshold,
|
470
|
-
tools=tools,
|
471
|
-
mcp_servers=mcp_servers,
|
472
|
-
conversation_history=conversation_history,
|
473
|
-
conversation_history_reader=conversation_history_reader,
|
474
|
-
conversation_history_writer=conversation_history_writer,
|
475
|
-
conversation_history_file=conversation_history_file,
|
476
|
-
render_history_file=render_history_file,
|
477
|
-
summarize_history=summarize_history,
|
478
|
-
render_summarize_history=render_summarize_history,
|
479
|
-
summarization_prompt=summarization_prompt,
|
480
|
-
history_summarization_token_threshold=history_summarization_token_threshold,
|
481
|
-
render_history_summarization_token_threshold=render_history_summarization_token_threshold,
|
482
|
-
rate_limitter=rate_limitter,
|
483
|
-
execute_condition=execute_condition,
|
484
|
-
retries=retries,
|
485
|
-
retry_period=retry_period,
|
486
|
-
readiness_check=readiness_check,
|
487
|
-
readiness_check_delay=readiness_check_delay,
|
488
|
-
readiness_check_period=readiness_check_period,
|
489
|
-
readiness_failure_threshold=readiness_failure_threshold,
|
490
|
-
readiness_timeout=readiness_timeout,
|
491
|
-
monitor_readiness=monitor_readiness,
|
492
|
-
max_call_iteration=max_call_iteration,
|
493
|
-
upstream=upstream,
|
494
|
-
fallback=fallback,
|
495
|
-
successor=successor,
|
496
|
-
conversation_context=conversation_context,
|
497
|
-
)
|
@@ -17,8 +17,8 @@ zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sgueP
|
|
17
17
|
zrb/builtin/llm/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
18
|
zrb/builtin/llm/tool/api.py,sha256=OhmfLc2TwWKQYIMweGelqb5s4JF4nB-YynbSO4yb_Jk,2342
|
19
19
|
zrb/builtin/llm/tool/cli.py,sha256=QqIil29dVOjbTxwb9Gib4KhlaJcOcto-OxEX5hHmA1s,1377
|
20
|
-
zrb/builtin/llm/tool/code.py,sha256=
|
21
|
-
zrb/builtin/llm/tool/file.py,sha256=
|
20
|
+
zrb/builtin/llm/tool/code.py,sha256=GRP_IZAkeL6RIlUm407BQRF992ES57pdzPaQdC5UsJU,8218
|
21
|
+
zrb/builtin/llm/tool/file.py,sha256=qoQh5C0RPlQcIoLJp_nT16-w3FAekj7YtIdtsjigARg,22290
|
22
22
|
zrb/builtin/llm/tool/rag.py,sha256=wB74JV7bxs0ec77b_09Z2lPjoR1WzPUvZbuXOdb9Q9g,9675
|
23
23
|
zrb/builtin/llm/tool/sub_agent.py,sha256=7Awa9dpXqtJAZhxyXaKeZv5oIE2N_OqXhAbNmsOG49Y,4951
|
24
24
|
zrb/builtin/llm/tool/web.py,sha256=gQlUsmYCJOFJtNjwpjK-xk13LMvrMSpSaFHXUTnIayQ,7090
|
@@ -217,8 +217,8 @@ zrb/callback/callback.py,sha256=PFhCqzfxdk6IAthmXcZ13DokT62xtBzJr_ciLw6I8Zg,4030
|
|
217
217
|
zrb/cmd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
218
218
|
zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
|
219
219
|
zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
|
220
|
-
zrb/config/config.py,sha256=
|
221
|
-
zrb/config/llm_config.py,sha256
|
220
|
+
zrb/config/config.py,sha256=UpVm_IFD_bSfGS-QJoRo86xV63eGIuIwWICMaUZgR00,15268
|
221
|
+
zrb/config/llm_config.py,sha256=-eQCD6A3rCpIrR6XtcEA-aeTX73FcyFTacXa_T0A-4o,21524
|
222
222
|
zrb/config/llm_rate_limitter.py,sha256=0U0qm4qgCWqBjohPdwANNUzLR3joJCFYr6oW6Xpccfo,4436
|
223
223
|
zrb/config/web_auth_config.py,sha256=_PXatQTYh2mX9H3HSYSQKp13zm1RlLyVIoeIr6KYMQ8,6279
|
224
224
|
zrb/content_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -348,7 +348,7 @@ zrb/task/llm/print_node.py,sha256=zocTKi9gZDxl2I6KNu095TmMc13Yip6SNuWYnswS680,40
|
|
348
348
|
zrb/task/llm/prompt.py,sha256=qhR8qS8RgaQ23D3amaHSHnBNv_NOnFB_1uxaQNc8KFw,3417
|
349
349
|
zrb/task/llm/tool_wrapper.py,sha256=8_bL8m_WpRf-pVKSrvQIVqT-m2sUA87a1RBQG13lhp4,6457
|
350
350
|
zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
|
351
|
-
zrb/task/llm_task.py,sha256=
|
351
|
+
zrb/task/llm_task.py,sha256=Vq2kPnE40xJZtHYHjeCBv-nNFKzSCkyMJaVUNXmEmuc,15616
|
352
352
|
zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
|
353
353
|
zrb/task/rsync_task.py,sha256=GSL9144bmp6F0EckT6m-2a1xG25AzrrWYzH4k3SVUKM,6370
|
354
354
|
zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
|
@@ -391,7 +391,7 @@ zrb/util/todo.py,sha256=r9_KYF2-hLKMNjsp6AFK9zivykMrywd-kJ4bCwfdafI,19323
|
|
391
391
|
zrb/util/todo_model.py,sha256=0SJ8aLYfJAscDOk5JsH7pXP3h1rAG91VMCS20-c2Y6A,1576
|
392
392
|
zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
393
393
|
zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
|
394
|
-
zrb-1.9.
|
395
|
-
zrb-1.9.
|
396
|
-
zrb-1.9.
|
397
|
-
zrb-1.9.
|
394
|
+
zrb-1.9.9.dist-info/METADATA,sha256=Zcc7Rl-rD3Viu68pG9hu2gNQtCbjAaRf41JodYCXm2A,9777
|
395
|
+
zrb-1.9.9.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
396
|
+
zrb-1.9.9.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
|
397
|
+
zrb-1.9.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|