zrb 1.13.1__py3-none-any.whl → 1.21.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/__init__.py +2 -6
- zrb/attr/type.py +8 -8
- zrb/builtin/__init__.py +2 -0
- zrb/builtin/group.py +31 -15
- zrb/builtin/http.py +7 -8
- zrb/builtin/llm/attachment.py +40 -0
- zrb/builtin/llm/chat_session.py +130 -144
- zrb/builtin/llm/chat_session_cmd.py +226 -0
- zrb/builtin/llm/chat_trigger.py +73 -0
- zrb/builtin/llm/history.py +4 -4
- zrb/builtin/llm/llm_ask.py +218 -110
- zrb/builtin/llm/tool/api.py +74 -62
- zrb/builtin/llm/tool/cli.py +35 -16
- zrb/builtin/llm/tool/code.py +49 -47
- zrb/builtin/llm/tool/file.py +262 -251
- zrb/builtin/llm/tool/note.py +84 -0
- zrb/builtin/llm/tool/rag.py +25 -18
- zrb/builtin/llm/tool/sub_agent.py +29 -22
- zrb/builtin/llm/tool/web.py +135 -143
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
- zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
- zrb/builtin/searxng/config/settings.yml +5671 -0
- zrb/builtin/searxng/start.py +21 -0
- zrb/builtin/setup/latex/ubuntu.py +1 -0
- zrb/builtin/setup/ubuntu.py +1 -1
- zrb/builtin/shell/autocomplete/bash.py +4 -3
- zrb/builtin/shell/autocomplete/zsh.py +4 -3
- zrb/config/config.py +255 -78
- zrb/config/default_prompt/file_extractor_system_prompt.md +109 -9
- zrb/config/default_prompt/interactive_system_prompt.md +24 -30
- zrb/config/default_prompt/persona.md +1 -1
- zrb/config/default_prompt/repo_extractor_system_prompt.md +31 -31
- zrb/config/default_prompt/repo_summarizer_system_prompt.md +27 -8
- zrb/config/default_prompt/summarization_prompt.md +8 -13
- zrb/config/default_prompt/system_prompt.md +36 -30
- zrb/config/llm_config.py +129 -24
- zrb/config/llm_context/config.py +127 -90
- zrb/config/llm_context/config_parser.py +1 -7
- zrb/config/llm_context/workflow.py +81 -0
- zrb/config/llm_rate_limitter.py +89 -45
- zrb/context/any_shared_context.py +7 -1
- zrb/context/context.py +8 -2
- zrb/context/shared_context.py +6 -8
- zrb/group/any_group.py +12 -5
- zrb/group/group.py +67 -3
- zrb/input/any_input.py +5 -1
- zrb/input/base_input.py +18 -6
- zrb/input/text_input.py +7 -24
- zrb/runner/cli.py +21 -20
- zrb/runner/common_util.py +24 -19
- zrb/runner/web_route/task_input_api_route.py +5 -5
- zrb/runner/web_route/task_session_api_route.py +1 -4
- zrb/runner/web_util/user.py +7 -3
- zrb/session/any_session.py +12 -6
- zrb/session/session.py +39 -18
- zrb/task/any_task.py +24 -3
- zrb/task/base/context.py +17 -9
- zrb/task/base/execution.py +15 -8
- zrb/task/base/lifecycle.py +8 -4
- zrb/task/base/monitoring.py +12 -7
- zrb/task/base_task.py +69 -5
- zrb/task/base_trigger.py +12 -5
- zrb/task/llm/agent.py +138 -52
- zrb/task/llm/config.py +45 -13
- zrb/task/llm/conversation_history.py +76 -6
- zrb/task/llm/conversation_history_model.py +0 -168
- zrb/task/llm/default_workflow/coding/workflow.md +41 -0
- zrb/task/llm/default_workflow/copywriting/workflow.md +68 -0
- zrb/task/llm/default_workflow/git/workflow.md +118 -0
- zrb/task/llm/default_workflow/golang/workflow.md +128 -0
- zrb/task/llm/default_workflow/html-css/workflow.md +135 -0
- zrb/task/llm/default_workflow/java/workflow.md +146 -0
- zrb/task/llm/default_workflow/javascript/workflow.md +158 -0
- zrb/task/llm/default_workflow/python/workflow.md +160 -0
- zrb/task/llm/default_workflow/researching/workflow.md +153 -0
- zrb/task/llm/default_workflow/rust/workflow.md +162 -0
- zrb/task/llm/default_workflow/shell/workflow.md +299 -0
- zrb/task/llm/file_replacement.py +206 -0
- zrb/task/llm/file_tool_model.py +57 -0
- zrb/task/llm/history_summarization.py +22 -35
- zrb/task/llm/history_summarization_tool.py +24 -0
- zrb/task/llm/print_node.py +182 -63
- zrb/task/llm/prompt.py +213 -153
- zrb/task/llm/tool_wrapper.py +210 -53
- zrb/task/llm/workflow.py +76 -0
- zrb/task/llm_task.py +98 -47
- zrb/task/make_task.py +2 -3
- zrb/task/rsync_task.py +25 -10
- zrb/task/scheduler.py +4 -4
- zrb/util/attr.py +50 -40
- zrb/util/cli/markdown.py +12 -0
- zrb/util/cli/text.py +30 -0
- zrb/util/file.py +27 -11
- zrb/util/{llm/prompt.py → markdown.py} +2 -3
- zrb/util/string/conversion.py +1 -1
- zrb/util/truncate.py +23 -0
- zrb/util/yaml.py +204 -0
- {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/METADATA +40 -20
- {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/RECORD +102 -79
- {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/WHEEL +1 -1
- zrb/task/llm/default_workflow/coding.md +0 -24
- zrb/task/llm/default_workflow/copywriting.md +0 -17
- zrb/task/llm/default_workflow/researching.md +0 -18
- {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
import difflib
|
|
2
|
+
import os
|
|
3
|
+
import shlex
|
|
4
|
+
import subprocess
|
|
5
|
+
import tempfile
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from zrb.config.config import CFG
|
|
9
|
+
from zrb.task.llm.file_tool_model import FileReplacement
|
|
10
|
+
from zrb.util.file import read_file
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def is_single_path_replacement(param: Any):
|
|
14
|
+
if isinstance(param, dict):
|
|
15
|
+
return _dict_has_exact_keys(
|
|
16
|
+
param, {"path", "old_text", "new_text"}
|
|
17
|
+
) or _dict_has_exact_keys(param, {"path", "old_text", "new_text", "count"})
|
|
18
|
+
if isinstance(param, list):
|
|
19
|
+
current_path = None
|
|
20
|
+
for single_replacement in param:
|
|
21
|
+
if not is_single_path_replacement(single_replacement):
|
|
22
|
+
return False
|
|
23
|
+
if current_path is not None and current_path != single_replacement["path"]:
|
|
24
|
+
return False
|
|
25
|
+
current_path = single_replacement["path"]
|
|
26
|
+
return True
|
|
27
|
+
return False
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _dict_has_exact_keys(dictionary: dict, required_keys: set) -> bool:
|
|
31
|
+
"""
|
|
32
|
+
Check if a dictionary contains exactly the specified keys.
|
|
33
|
+
More efficient for large dictionaries.
|
|
34
|
+
"""
|
|
35
|
+
if len(dictionary) != len(required_keys):
|
|
36
|
+
return False
|
|
37
|
+
return all(key in dictionary for key in required_keys)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def edit_replacement(
|
|
41
|
+
replacement: list[FileReplacement] | FileReplacement,
|
|
42
|
+
diff_edit_command_tpl: str | None = None,
|
|
43
|
+
) -> tuple[list[FileReplacement] | FileReplacement, bool]:
|
|
44
|
+
# Normalize input to list
|
|
45
|
+
replacement_list = [replacement] if isinstance(replacement, dict) else replacement
|
|
46
|
+
if not replacement_list:
|
|
47
|
+
return replacement, False
|
|
48
|
+
path = replacement_list[0]["path"]
|
|
49
|
+
original_content = read_file(path)
|
|
50
|
+
# Calculate initial proposed content based on AI's suggestion
|
|
51
|
+
proposed_content = _apply_initial_replacements(original_content, replacement_list)
|
|
52
|
+
# Open external editor for user modification
|
|
53
|
+
edited_content = _open_diff_editor(
|
|
54
|
+
path, original_content, proposed_content, diff_edit_command_tpl
|
|
55
|
+
)
|
|
56
|
+
# If content hasn't changed from proposal, return original replacement
|
|
57
|
+
if edited_content == proposed_content:
|
|
58
|
+
return replacement, False
|
|
59
|
+
# Calculate optimized replacements based on user's final edit
|
|
60
|
+
optimized_replacements = _generate_optimized_replacements(
|
|
61
|
+
path, original_content, edited_content
|
|
62
|
+
)
|
|
63
|
+
return optimized_replacements, True
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _apply_initial_replacements(
|
|
67
|
+
content: str, replacement_list: list[FileReplacement]
|
|
68
|
+
) -> str:
|
|
69
|
+
new_content = content
|
|
70
|
+
for single_replacement in replacement_list:
|
|
71
|
+
old_text = single_replacement["old_text"]
|
|
72
|
+
new_text = single_replacement["new_text"]
|
|
73
|
+
count = single_replacement.get("count", -1)
|
|
74
|
+
new_content = new_content.replace(old_text, new_text, count)
|
|
75
|
+
return new_content
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _open_diff_editor(
|
|
79
|
+
original_path: str,
|
|
80
|
+
original_content: str,
|
|
81
|
+
proposed_content: str,
|
|
82
|
+
diff_edit_command_tpl: str | None,
|
|
83
|
+
) -> str:
|
|
84
|
+
if diff_edit_command_tpl is None:
|
|
85
|
+
diff_edit_command_tpl = CFG.DEFAULT_DIFF_EDIT_COMMAND_TPL
|
|
86
|
+
_, extension = os.path.splitext(original_path)
|
|
87
|
+
with tempfile.NamedTemporaryFile(delete=False, suffix=extension) as old_file:
|
|
88
|
+
old_file_name = old_file.name
|
|
89
|
+
old_file.write(original_content.encode())
|
|
90
|
+
old_file.flush()
|
|
91
|
+
with tempfile.NamedTemporaryFile(delete=False, suffix=extension) as new_file:
|
|
92
|
+
new_file_name = new_file.name
|
|
93
|
+
new_file.write(proposed_content.encode())
|
|
94
|
+
new_file.flush()
|
|
95
|
+
diff_edit_command = diff_edit_command_tpl.format(
|
|
96
|
+
old=old_file_name, new=new_file_name
|
|
97
|
+
)
|
|
98
|
+
subprocess.call(shlex.split(diff_edit_command))
|
|
99
|
+
edited_content = read_file(new_file_name)
|
|
100
|
+
if os.path.exists(old_file_name):
|
|
101
|
+
os.remove(old_file_name)
|
|
102
|
+
if os.path.exists(new_file_name):
|
|
103
|
+
os.remove(new_file_name)
|
|
104
|
+
return edited_content
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _generate_optimized_replacements(
|
|
108
|
+
path: str, original_content: str, edited_content: str
|
|
109
|
+
) -> list[FileReplacement]:
|
|
110
|
+
matcher = difflib.SequenceMatcher(None, original_content, edited_content)
|
|
111
|
+
hunks = _group_opcodes_into_hunks(matcher.get_opcodes())
|
|
112
|
+
replacements = []
|
|
113
|
+
for hunk in hunks:
|
|
114
|
+
replacement = _create_replacement_from_hunk(
|
|
115
|
+
path, original_content, edited_content, hunk
|
|
116
|
+
)
|
|
117
|
+
if replacement:
|
|
118
|
+
replacements.append(replacement)
|
|
119
|
+
return replacements
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _group_opcodes_into_hunks(opcodes, merge_threshold=200):
|
|
123
|
+
"""
|
|
124
|
+
Groups opcodes into hunks.
|
|
125
|
+
'equal' blocks smaller than merge_threshold are treated as context (glue) within a hunk.
|
|
126
|
+
"""
|
|
127
|
+
hunks = []
|
|
128
|
+
current_hunk = []
|
|
129
|
+
for tag, i1, i2, j1, j2 in opcodes:
|
|
130
|
+
if tag == "equal":
|
|
131
|
+
if i2 - i1 < merge_threshold:
|
|
132
|
+
if current_hunk:
|
|
133
|
+
current_hunk.append((tag, i1, i2, j1, j2))
|
|
134
|
+
else:
|
|
135
|
+
if current_hunk:
|
|
136
|
+
hunks.append(current_hunk)
|
|
137
|
+
current_hunk = []
|
|
138
|
+
else:
|
|
139
|
+
current_hunk.append((tag, i1, i2, j1, j2))
|
|
140
|
+
if current_hunk:
|
|
141
|
+
hunks.append(current_hunk)
|
|
142
|
+
return hunks
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _create_replacement_from_hunk(
|
|
146
|
+
path: str, original_content: str, edited_content: str, hunk: list
|
|
147
|
+
) -> FileReplacement | None:
|
|
148
|
+
# Trim leading/trailing 'equal' opcodes
|
|
149
|
+
while hunk and hunk[0][0] == "equal":
|
|
150
|
+
hunk.pop(0)
|
|
151
|
+
while hunk and hunk[-1][0] == "equal":
|
|
152
|
+
hunk.pop()
|
|
153
|
+
if not hunk:
|
|
154
|
+
return None
|
|
155
|
+
# Determine range of modification
|
|
156
|
+
i_start = hunk[0][1]
|
|
157
|
+
i_end = hunk[-1][2]
|
|
158
|
+
j_start = hunk[0][3]
|
|
159
|
+
j_end = hunk[-1][4]
|
|
160
|
+
base_old_text = original_content[i_start:i_end]
|
|
161
|
+
base_new_text = edited_content[j_start:j_end]
|
|
162
|
+
if base_old_text == base_new_text:
|
|
163
|
+
return None
|
|
164
|
+
# Expand context
|
|
165
|
+
start, end = _expand_context_for_uniqueness(original_content, i_start, i_end)
|
|
166
|
+
start, end = _expand_to_word_boundary(original_content, start, end)
|
|
167
|
+
final_old_text = original_content[start:end]
|
|
168
|
+
# Reconstruct new text
|
|
169
|
+
prefix = original_content[start:i_start]
|
|
170
|
+
suffix = original_content[i_end:end]
|
|
171
|
+
final_new_text = prefix + base_new_text + suffix
|
|
172
|
+
if final_old_text == final_new_text:
|
|
173
|
+
return None
|
|
174
|
+
return {
|
|
175
|
+
"path": path,
|
|
176
|
+
"old_text": final_old_text,
|
|
177
|
+
"new_text": final_new_text,
|
|
178
|
+
"count": 1,
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _expand_context_for_uniqueness(
|
|
183
|
+
content: str, start: int, end: int
|
|
184
|
+
) -> tuple[int, int]:
|
|
185
|
+
"""Expands the range [start, end] until the substring content[start:end] is unique."""
|
|
186
|
+
while content.count(content[start:end]) > 1:
|
|
187
|
+
if start == 0 and end == len(content):
|
|
188
|
+
break
|
|
189
|
+
if start > 0:
|
|
190
|
+
start -= 1
|
|
191
|
+
if end < len(content):
|
|
192
|
+
end += 1
|
|
193
|
+
return start, end
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def _expand_to_word_boundary(content: str, start: int, end: int) -> tuple[int, int]:
|
|
197
|
+
"""Expands the range [start, end] outwards to the nearest whitespace boundaries."""
|
|
198
|
+
|
|
199
|
+
def is_boundary(char):
|
|
200
|
+
return char.isspace()
|
|
201
|
+
|
|
202
|
+
while start > 0 and not is_boundary(content[start - 1]):
|
|
203
|
+
start -= 1
|
|
204
|
+
while end < len(content) and not is_boundary(content[end]):
|
|
205
|
+
end += 1
|
|
206
|
+
return start, end
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
from typing import Literal
|
|
3
|
+
|
|
4
|
+
if sys.version_info >= (3, 12):
|
|
5
|
+
from typing import NotRequired, TypedDict
|
|
6
|
+
else:
|
|
7
|
+
from typing_extensions import NotRequired, TypedDict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class FileToRead(TypedDict):
|
|
11
|
+
"""
|
|
12
|
+
Configuration for reading a file or file section.
|
|
13
|
+
|
|
14
|
+
Attributes:
|
|
15
|
+
path (str): Absolute or relative path to the file
|
|
16
|
+
start_line (int | None): Starting line number (1-based, inclusive).
|
|
17
|
+
If None, reads from beginning.
|
|
18
|
+
end_line (int | None): Ending line number (1-based, exclusive). If None, reads to end.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
path: str
|
|
22
|
+
start_line: NotRequired[int | None]
|
|
23
|
+
end_line: NotRequired[int | None]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class FileToWrite(TypedDict):
|
|
27
|
+
"""
|
|
28
|
+
Configuration for writing content to a file.
|
|
29
|
+
|
|
30
|
+
Attributes:
|
|
31
|
+
path (str): Absolute or relative path where file will be written.
|
|
32
|
+
content (str): Content to write. CRITICAL: For JSON, ensure all special characters
|
|
33
|
+
in this string are properly escaped.
|
|
34
|
+
mode (str): Mode for writing:
|
|
35
|
+
'w' (overwrite, default), 'a' (append), 'x' (create exclusively).
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
path: str
|
|
39
|
+
content: str
|
|
40
|
+
mode: NotRequired[Literal["w", "wt", "tw", "a", "at", "ta", "x", "xt", "tx"]]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class FileReplacement(TypedDict):
|
|
44
|
+
"""
|
|
45
|
+
Configuration for a single text replacement operation in a file.
|
|
46
|
+
|
|
47
|
+
Attributes:
|
|
48
|
+
path (str): Absolute or relative path to the file
|
|
49
|
+
old_text (str): Exact text to find and replace (must match file content exactly)
|
|
50
|
+
new_text (str): New text to replace with
|
|
51
|
+
count (int): Optional. Number of occurrences to replace. Defaults to -1 (all).
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
path: str
|
|
55
|
+
old_text: str
|
|
56
|
+
new_text: str
|
|
57
|
+
count: NotRequired[int]
|
|
@@ -9,13 +9,18 @@ from zrb.context.any_context import AnyContext
|
|
|
9
9
|
from zrb.task.llm.agent import run_agent_iteration
|
|
10
10
|
from zrb.task.llm.conversation_history import (
|
|
11
11
|
count_part_in_history_list,
|
|
12
|
+
inject_conversation_history_notes,
|
|
12
13
|
replace_system_prompt_in_history,
|
|
13
14
|
)
|
|
14
15
|
from zrb.task.llm.conversation_history_model import ConversationHistory
|
|
16
|
+
from zrb.task.llm.history_summarization_tool import (
|
|
17
|
+
create_history_summarization_tool,
|
|
18
|
+
)
|
|
15
19
|
from zrb.task.llm.typing import ListOfDict
|
|
16
20
|
from zrb.util.attr import get_bool_attr, get_int_attr
|
|
17
21
|
from zrb.util.cli.style import stylize_faint
|
|
18
|
-
from zrb.util.
|
|
22
|
+
from zrb.util.markdown import make_markdown_section
|
|
23
|
+
from zrb.util.truncate import truncate_str
|
|
19
24
|
|
|
20
25
|
if TYPE_CHECKING:
|
|
21
26
|
from pydantic_ai.models import Model
|
|
@@ -88,24 +93,25 @@ async def summarize_history(
|
|
|
88
93
|
conversation_history: ConversationHistory,
|
|
89
94
|
rate_limitter: LLMRateLimiter | None = None,
|
|
90
95
|
retries: int = 3,
|
|
91
|
-
) ->
|
|
96
|
+
) -> ConversationHistory:
|
|
92
97
|
"""Runs an LLM call to update the conversation summary."""
|
|
93
98
|
from pydantic_ai import Agent
|
|
94
99
|
|
|
100
|
+
inject_conversation_history_notes(conversation_history)
|
|
95
101
|
ctx.log_info("Attempting to summarize conversation history...")
|
|
96
102
|
# Construct the user prompt for the summarization agent
|
|
97
103
|
user_prompt = "\n".join(
|
|
98
104
|
[
|
|
99
|
-
|
|
105
|
+
make_markdown_section(
|
|
100
106
|
"Past Conversation",
|
|
101
107
|
"\n".join(
|
|
102
108
|
[
|
|
103
|
-
|
|
109
|
+
make_markdown_section(
|
|
104
110
|
"Summary",
|
|
105
111
|
conversation_history.past_conversation_summary,
|
|
106
112
|
as_code=True,
|
|
107
113
|
),
|
|
108
|
-
|
|
114
|
+
make_markdown_section(
|
|
109
115
|
"Last Transcript",
|
|
110
116
|
conversation_history.past_conversation_transcript,
|
|
111
117
|
as_code=True,
|
|
@@ -113,56 +119,37 @@ async def summarize_history(
|
|
|
113
119
|
]
|
|
114
120
|
),
|
|
115
121
|
),
|
|
116
|
-
|
|
122
|
+
make_markdown_section(
|
|
117
123
|
"Recent Conversation (JSON)",
|
|
118
|
-
json.dumps(conversation_history.history),
|
|
124
|
+
json.dumps(truncate_str(conversation_history.history, 1000)),
|
|
119
125
|
as_code=True,
|
|
120
126
|
),
|
|
121
|
-
make_prompt_section(
|
|
122
|
-
"Notes",
|
|
123
|
-
"\n".join(
|
|
124
|
-
[
|
|
125
|
-
make_prompt_section(
|
|
126
|
-
"Long Term",
|
|
127
|
-
conversation_history.long_term_note,
|
|
128
|
-
as_code=True,
|
|
129
|
-
),
|
|
130
|
-
make_prompt_section(
|
|
131
|
-
"Contextual",
|
|
132
|
-
conversation_history.contextual_note,
|
|
133
|
-
as_code=True,
|
|
134
|
-
),
|
|
135
|
-
]
|
|
136
|
-
),
|
|
137
|
-
),
|
|
138
127
|
]
|
|
139
128
|
)
|
|
140
|
-
|
|
129
|
+
summarize = create_history_summarization_tool(conversation_history)
|
|
130
|
+
summarization_agent = Agent[None, str](
|
|
141
131
|
model=model,
|
|
132
|
+
output_type=summarize,
|
|
142
133
|
system_prompt=system_prompt,
|
|
143
134
|
model_settings=settings,
|
|
144
135
|
retries=retries,
|
|
145
|
-
tools=[
|
|
146
|
-
conversation_history.write_past_conversation_summary,
|
|
147
|
-
conversation_history.write_past_conversation_transcript,
|
|
148
|
-
conversation_history.read_long_term_note,
|
|
149
|
-
conversation_history.write_long_term_note,
|
|
150
|
-
conversation_history.read_contextual_note,
|
|
151
|
-
conversation_history.write_contextual_note,
|
|
152
|
-
],
|
|
153
136
|
)
|
|
154
137
|
try:
|
|
155
|
-
ctx.print(stylize_faint("📝
|
|
138
|
+
ctx.print(stylize_faint(" 📝 Rollup Conversation"), plain=True)
|
|
156
139
|
summary_run = await run_agent_iteration(
|
|
157
140
|
ctx=ctx,
|
|
158
141
|
agent=summarization_agent,
|
|
159
142
|
user_prompt=user_prompt,
|
|
143
|
+
attachments=[],
|
|
160
144
|
history_list=[],
|
|
161
145
|
rate_limitter=rate_limitter,
|
|
146
|
+
log_indent_level=2,
|
|
162
147
|
)
|
|
163
148
|
if summary_run and summary_run.result and summary_run.result.output:
|
|
164
149
|
usage = summary_run.result.usage()
|
|
165
|
-
ctx.print(
|
|
150
|
+
ctx.print(
|
|
151
|
+
stylize_faint(f" 📝 Rollup Conversation Token: {usage}"), plain=True
|
|
152
|
+
)
|
|
166
153
|
ctx.print(plain=True)
|
|
167
154
|
ctx.log_info("History summarized and updated.")
|
|
168
155
|
else:
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from typing import Callable
|
|
2
|
+
|
|
3
|
+
from zrb.task.llm.conversation_history_model import ConversationHistory
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def create_history_summarization_tool(
|
|
7
|
+
conversation_history: ConversationHistory,
|
|
8
|
+
) -> Callable[[str, str], str]:
|
|
9
|
+
def update_conversation_memory(
|
|
10
|
+
past_conversation_summary: str,
|
|
11
|
+
past_conversation_transcript: str,
|
|
12
|
+
) -> str:
|
|
13
|
+
"""
|
|
14
|
+
Update the conversation memory including summary and transcript.
|
|
15
|
+
- past_conversation_summary: A concise narrative that integrates the
|
|
16
|
+
previous summary with the recent conversation.
|
|
17
|
+
- past_conversation_transcript: MUST be ONLY the last 4 (four) turns
|
|
18
|
+
of the conversation.
|
|
19
|
+
"""
|
|
20
|
+
conversation_history.past_conversation_summary = past_conversation_summary
|
|
21
|
+
conversation_history.past_conversation_transcript = past_conversation_transcript
|
|
22
|
+
return "Conversation memory updated"
|
|
23
|
+
|
|
24
|
+
return update_conversation_memory
|
zrb/task/llm/print_node.py
CHANGED
|
@@ -1,12 +1,16 @@
|
|
|
1
|
+
import json
|
|
1
2
|
from collections.abc import Callable
|
|
2
3
|
from typing import Any
|
|
3
4
|
|
|
4
5
|
from zrb.util.cli.style import stylize_faint
|
|
5
6
|
|
|
6
7
|
|
|
7
|
-
async def print_node(
|
|
8
|
+
async def print_node(
|
|
9
|
+
print_func: Callable, agent_run: Any, node: Any, log_indent_level: int = 0
|
|
10
|
+
):
|
|
8
11
|
"""Prints the details of an agent execution node using a provided print function."""
|
|
9
12
|
from pydantic_ai import Agent
|
|
13
|
+
from pydantic_ai.exceptions import UnexpectedModelBehavior
|
|
10
14
|
from pydantic_ai.messages import (
|
|
11
15
|
FinalResultEvent,
|
|
12
16
|
FunctionToolCallEvent,
|
|
@@ -18,76 +22,191 @@ async def print_node(print_func: Callable, agent_run: Any, node: Any):
|
|
|
18
22
|
ToolCallPartDelta,
|
|
19
23
|
)
|
|
20
24
|
|
|
25
|
+
meta = getattr(node, "id", None) or getattr(node, "request_id", None)
|
|
21
26
|
if Agent.is_user_prompt_node(node):
|
|
22
|
-
print_func(
|
|
27
|
+
print_func(_format_header("🔠 Receiving input...", log_indent_level))
|
|
23
28
|
elif Agent.is_model_request_node(node):
|
|
24
29
|
# A model request node => We can stream tokens from the model's request
|
|
25
|
-
print_func(
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
async
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
event.delta,
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
30
|
+
print_func(_format_header("🧠 Processing...", log_indent_level))
|
|
31
|
+
# Reference: https://ai.pydantic.dev/agents/#streaming-all-events-and-output
|
|
32
|
+
try:
|
|
33
|
+
async with node.stream(agent_run.ctx) as request_stream:
|
|
34
|
+
is_streaming = False
|
|
35
|
+
async for event in request_stream:
|
|
36
|
+
if isinstance(event, PartStartEvent) and event.part:
|
|
37
|
+
if is_streaming:
|
|
38
|
+
print_func("")
|
|
39
|
+
content = _get_event_part_content(event)
|
|
40
|
+
print_func(_format_content(content, log_indent_level), end="")
|
|
41
|
+
is_streaming = True
|
|
42
|
+
elif isinstance(event, PartDeltaEvent):
|
|
43
|
+
if isinstance(event.delta, TextPartDelta):
|
|
44
|
+
content_delta = event.delta.content_delta
|
|
45
|
+
print_func(
|
|
46
|
+
_format_stream_content(content_delta, log_indent_level),
|
|
47
|
+
end="",
|
|
48
|
+
)
|
|
49
|
+
elif isinstance(event.delta, ThinkingPartDelta):
|
|
50
|
+
content_delta = event.delta.content_delta
|
|
51
|
+
print_func(
|
|
52
|
+
_format_stream_content(content_delta, log_indent_level),
|
|
53
|
+
end="",
|
|
54
|
+
)
|
|
55
|
+
elif isinstance(event.delta, ToolCallPartDelta):
|
|
56
|
+
args_delta = event.delta.args_delta
|
|
57
|
+
if isinstance(args_delta, dict):
|
|
58
|
+
args_delta = json.dumps(args_delta)
|
|
59
|
+
print_func(
|
|
60
|
+
_format_stream_content(args_delta, log_indent_level),
|
|
61
|
+
end="",
|
|
62
|
+
)
|
|
63
|
+
is_streaming = True
|
|
64
|
+
elif isinstance(event, FinalResultEvent) and event.tool_name:
|
|
65
|
+
if is_streaming:
|
|
66
|
+
print_func("")
|
|
67
|
+
tool_name = event.tool_name
|
|
45
68
|
print_func(
|
|
46
|
-
|
|
47
|
-
|
|
69
|
+
_format_content(
|
|
70
|
+
f"Result: tool_name={tool_name}", log_indent_level
|
|
71
|
+
)
|
|
48
72
|
)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
73
|
+
is_streaming = False
|
|
74
|
+
if is_streaming:
|
|
75
|
+
print_func("")
|
|
76
|
+
except UnexpectedModelBehavior as e:
|
|
77
|
+
print_func("") # ensure newline consistency
|
|
78
|
+
print_func(
|
|
79
|
+
_format_content(
|
|
80
|
+
(
|
|
81
|
+
f"🟡 Unexpected Model Behavior: {e}. "
|
|
82
|
+
f"Cause: {e.__cause__}. Node.Id: {meta}"
|
|
83
|
+
),
|
|
84
|
+
log_indent_level,
|
|
85
|
+
)
|
|
86
|
+
)
|
|
59
87
|
elif Agent.is_call_tools_node(node):
|
|
60
88
|
# A handle-response node => The model returned some data, potentially calls a tool
|
|
61
|
-
print_func(
|
|
62
|
-
|
|
63
|
-
async
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
event.part.
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
# Handle dummy property if present (from our schema sanitization)
|
|
74
|
-
if (
|
|
75
|
-
isinstance(event.part.args, dict)
|
|
76
|
-
and "_dummy" in event.part.args
|
|
77
|
-
):
|
|
78
|
-
del event.part.args["_dummy"]
|
|
79
|
-
print_func(
|
|
80
|
-
stylize_faint(
|
|
81
|
-
f" {event.part.tool_call_id} | "
|
|
82
|
-
f"Call {event.part.tool_name} {event.part.args}"
|
|
89
|
+
print_func(_format_header("🧰 Calling Tool...", log_indent_level))
|
|
90
|
+
try:
|
|
91
|
+
async with node.stream(agent_run.ctx) as handle_stream:
|
|
92
|
+
async for event in handle_stream:
|
|
93
|
+
if isinstance(event, FunctionToolCallEvent):
|
|
94
|
+
args = _get_event_part_args(event)
|
|
95
|
+
call_id = event.part.tool_call_id
|
|
96
|
+
tool_name = event.part.tool_name
|
|
97
|
+
print_func(
|
|
98
|
+
_format_content(
|
|
99
|
+
f"{call_id} | Call {tool_name} {args}", log_indent_level
|
|
100
|
+
)
|
|
83
101
|
)
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
102
|
+
elif (
|
|
103
|
+
isinstance(event, FunctionToolResultEvent)
|
|
104
|
+
and event.tool_call_id
|
|
105
|
+
):
|
|
106
|
+
call_id = event.tool_call_id
|
|
107
|
+
result_content = event.result.content
|
|
108
|
+
print_func(
|
|
109
|
+
_format_content(
|
|
110
|
+
f"{call_id} | {result_content}", log_indent_level
|
|
111
|
+
)
|
|
89
112
|
)
|
|
90
|
-
|
|
113
|
+
except UnexpectedModelBehavior as e:
|
|
114
|
+
print_func("") # ensure newline consistency
|
|
115
|
+
print_func(
|
|
116
|
+
_format_content(
|
|
117
|
+
(
|
|
118
|
+
f"🟡 Unexpected Model Behavior: {e}. "
|
|
119
|
+
f"Cause: {e.__cause__}. Node.Id: {meta}"
|
|
120
|
+
),
|
|
121
|
+
log_indent_level,
|
|
122
|
+
)
|
|
123
|
+
)
|
|
91
124
|
elif Agent.is_end_node(node):
|
|
92
125
|
# Once an End node is reached, the agent run is complete
|
|
93
|
-
print_func(
|
|
126
|
+
print_func(_format_header("✅ Completed...", log_indent_level))
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _format_header(text: str | None, log_indent_level: int = 0) -> str:
|
|
130
|
+
return _format(
|
|
131
|
+
text,
|
|
132
|
+
base_indent=2,
|
|
133
|
+
first_indent=0,
|
|
134
|
+
indent=0,
|
|
135
|
+
log_indent_level=log_indent_level,
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _format_content(text: str | None, log_indent_level: int = 0) -> str:
|
|
140
|
+
return _format(
|
|
141
|
+
text,
|
|
142
|
+
base_indent=2,
|
|
143
|
+
first_indent=3,
|
|
144
|
+
indent=3,
|
|
145
|
+
log_indent_level=log_indent_level,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def _format_stream_content(text: str | None, log_indent_level: int = 0) -> str:
|
|
150
|
+
return _format(
|
|
151
|
+
text,
|
|
152
|
+
base_indent=2,
|
|
153
|
+
indent=3,
|
|
154
|
+
log_indent_level=log_indent_level,
|
|
155
|
+
is_stream=True,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def _format(
|
|
160
|
+
text: str | None,
|
|
161
|
+
base_indent: int = 0,
|
|
162
|
+
first_indent: int = 0,
|
|
163
|
+
indent: int = 0,
|
|
164
|
+
log_indent_level: int = 0,
|
|
165
|
+
is_stream: bool = False,
|
|
166
|
+
) -> str:
|
|
167
|
+
if text is None:
|
|
168
|
+
text = ""
|
|
169
|
+
line_prefix = (base_indent * (log_indent_level + 1) + indent) * " "
|
|
170
|
+
processed_text = text.replace("\n", f"\n{line_prefix}")
|
|
171
|
+
if is_stream:
|
|
172
|
+
return stylize_faint(processed_text)
|
|
173
|
+
first_line_prefix = (base_indent * (log_indent_level + 1) + first_indent) * " "
|
|
174
|
+
return stylize_faint(f"{first_line_prefix}{processed_text}")
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def _get_event_part_args(event: Any) -> Any:
|
|
178
|
+
# Handle empty arguments across different providers
|
|
179
|
+
if event.part.args == "" or event.part.args is None:
|
|
180
|
+
return {}
|
|
181
|
+
if isinstance(event.part.args, str):
|
|
182
|
+
# Some providers might send "null" or "{}" as a string
|
|
183
|
+
if event.part.args.strip() in ["null", "{}"]:
|
|
184
|
+
return {}
|
|
185
|
+
try:
|
|
186
|
+
obj = json.loads(event.part.args)
|
|
187
|
+
if isinstance(obj, dict):
|
|
188
|
+
return _truncate_kwargs(obj)
|
|
189
|
+
except json.JSONDecodeError:
|
|
190
|
+
pass
|
|
191
|
+
# Handle dummy property if present (from our schema sanitization)
|
|
192
|
+
if isinstance(event.part.args, dict):
|
|
193
|
+
return _truncate_kwargs(event.part.args)
|
|
194
|
+
return event.part.args
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def _truncate_kwargs(kwargs: dict[str, Any]) -> dict[str, Any]:
|
|
198
|
+
return {key: _truncate_arg(val) for key, val in kwargs.items()}
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _truncate_arg(arg: str, length: int = 19) -> str:
|
|
202
|
+
if isinstance(arg, str) and len(arg) > length:
|
|
203
|
+
return f"{arg[:length-4]} ..."
|
|
204
|
+
return arg
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def _get_event_part_content(event: Any) -> str:
|
|
208
|
+
if not hasattr(event, "part"):
|
|
209
|
+
return f"{event}"
|
|
210
|
+
if not hasattr(event.part, "content"):
|
|
211
|
+
return f"{event.part}"
|
|
212
|
+
return getattr(event.part, "content")
|