zrb 1.9.17__py3-none-any.whl → 1.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/__init__.py +2 -2
- zrb/builtin/llm/history.py +3 -5
- zrb/builtin/llm/tool/cli.py +17 -13
- zrb/builtin/llm/tool/file.py +2 -2
- zrb/builtin/llm/tool/sub_agent.py +3 -5
- zrb/config/config.py +8 -12
- zrb/config/llm_config.py +132 -107
- zrb/config/llm_rate_limitter.py +13 -2
- zrb/task/llm/conversation_history.py +128 -0
- zrb/task/llm/conversation_history_model.py +438 -0
- zrb/task/llm/history_summarization.py +76 -26
- zrb/task/llm/prompt.py +106 -14
- zrb/task/llm_task.py +53 -92
- zrb/util/llm/prompt.py +18 -0
- {zrb-1.9.17.dist-info → zrb-1.10.1.dist-info}/METADATA +1 -1
- {zrb-1.9.17.dist-info → zrb-1.10.1.dist-info}/RECORD +18 -18
- zrb/task/llm/context.py +0 -58
- zrb/task/llm/context_enrichment.py +0 -172
- zrb/task/llm/history.py +0 -233
- {zrb-1.9.17.dist-info → zrb-1.10.1.dist-info}/WHEEL +0 -0
- {zrb-1.9.17.dist-info → zrb-1.10.1.dist-info}/entry_points.txt +0 -0
@@ -7,13 +7,15 @@ from zrb.config.llm_config import llm_config
|
|
7
7
|
from zrb.config.llm_rate_limitter import LLMRateLimiter, llm_rate_limitter
|
8
8
|
from zrb.context.any_context import AnyContext
|
9
9
|
from zrb.task.llm.agent import run_agent_iteration
|
10
|
-
from zrb.task.llm.
|
10
|
+
from zrb.task.llm.conversation_history import (
|
11
11
|
count_part_in_history_list,
|
12
|
-
|
12
|
+
replace_system_prompt_in_history,
|
13
13
|
)
|
14
|
+
from zrb.task.llm.conversation_history_model import ConversationHistory
|
14
15
|
from zrb.task.llm.typing import ListOfDict
|
15
16
|
from zrb.util.attr import get_bool_attr, get_int_attr
|
16
17
|
from zrb.util.cli.style import stylize_faint
|
18
|
+
from zrb.util.llm.prompt import make_prompt_section
|
17
19
|
|
18
20
|
if TYPE_CHECKING:
|
19
21
|
from pydantic_ai.models import Model
|
@@ -82,9 +84,8 @@ async def summarize_history(
|
|
82
84
|
ctx: AnyContext,
|
83
85
|
model: "Model | str | None",
|
84
86
|
settings: "ModelSettings | None",
|
85
|
-
|
86
|
-
|
87
|
-
history_list: ListOfDict,
|
87
|
+
system_prompt: str,
|
88
|
+
conversation_history: ConversationHistory,
|
88
89
|
rate_limitter: LLMRateLimiter | None = None,
|
89
90
|
retries: int = 3,
|
90
91
|
) -> str:
|
@@ -93,16 +94,65 @@ async def summarize_history(
|
|
93
94
|
|
94
95
|
ctx.log_info("Attempting to summarize conversation history...")
|
95
96
|
# Construct the user prompt for the summarization agent
|
96
|
-
user_prompt =
|
97
|
-
|
97
|
+
user_prompt = "\n".join(
|
98
|
+
[
|
99
|
+
make_prompt_section(
|
100
|
+
"Past Conversation",
|
101
|
+
"\n".join(
|
102
|
+
[
|
103
|
+
make_prompt_section(
|
104
|
+
"Summary",
|
105
|
+
conversation_history.past_conversation_summary,
|
106
|
+
as_code=True,
|
107
|
+
),
|
108
|
+
make_prompt_section(
|
109
|
+
"Last Transcript",
|
110
|
+
conversation_history.past_conversation_transcript,
|
111
|
+
as_code=True,
|
112
|
+
),
|
113
|
+
]
|
114
|
+
),
|
115
|
+
),
|
116
|
+
make_prompt_section(
|
117
|
+
"Recent Conversation (JSON)",
|
118
|
+
json.dumps(conversation_history.history),
|
119
|
+
as_code=True,
|
120
|
+
),
|
121
|
+
make_prompt_section(
|
122
|
+
"Notes",
|
123
|
+
"\n".join(
|
124
|
+
[
|
125
|
+
make_prompt_section(
|
126
|
+
"Long Term",
|
127
|
+
conversation_history.long_term_note,
|
128
|
+
as_code=True,
|
129
|
+
),
|
130
|
+
make_prompt_section(
|
131
|
+
"Contextual",
|
132
|
+
conversation_history.contextual_note,
|
133
|
+
as_code=True,
|
134
|
+
),
|
135
|
+
]
|
136
|
+
),
|
137
|
+
),
|
138
|
+
]
|
98
139
|
)
|
99
140
|
summarization_agent = Agent(
|
100
141
|
model=model,
|
101
|
-
system_prompt=
|
142
|
+
system_prompt=system_prompt,
|
102
143
|
model_settings=settings,
|
103
144
|
retries=retries,
|
145
|
+
tools=[
|
146
|
+
conversation_history.write_past_conversation_summary,
|
147
|
+
conversation_history.write_past_conversation_transcript,
|
148
|
+
conversation_history.read_contextual_note,
|
149
|
+
conversation_history.write_contextual_note,
|
150
|
+
conversation_history.replace_in_contextual_note,
|
151
|
+
conversation_history.read_long_term_note,
|
152
|
+
conversation_history.write_long_term_note,
|
153
|
+
conversation_history.replace_in_long_term_note,
|
154
|
+
],
|
104
155
|
)
|
105
|
-
|
106
156
|
try:
|
107
157
|
ctx.print(stylize_faint("📝 Summarize"), plain=True)
|
108
158
|
summary_run = await run_agent_iteration(
|
@@ -113,27 +163,22 @@ async def summarize_history(
|
|
113
163
|
rate_limitter=rate_limitter,
|
114
164
|
)
|
115
165
|
if summary_run and summary_run.result and summary_run.result.output:
|
116
|
-
new_summary = str(summary_run.result.output)
|
117
166
|
usage = summary_run.result.usage()
|
118
167
|
ctx.print(stylize_faint(f"📝 Summarization Token: {usage}"), plain=True)
|
119
168
|
ctx.print(plain=True)
|
120
169
|
ctx.log_info("History summarized and updated.")
|
121
|
-
ctx.log_info(f"New conversation summary:\n{new_summary}")
|
122
|
-
return new_summary
|
123
170
|
else:
|
124
171
|
ctx.log_warning("History summarization failed or returned no data.")
|
125
172
|
except BaseException as e:
|
126
173
|
ctx.log_warning(f"Error during history summarization: {e}")
|
127
174
|
traceback.print_exc()
|
128
|
-
|
129
175
|
# Return the original summary if summarization fails
|
130
|
-
return
|
176
|
+
return conversation_history
|
131
177
|
|
132
178
|
|
133
179
|
async def maybe_summarize_history(
|
134
180
|
ctx: AnyContext,
|
135
|
-
|
136
|
-
conversation_summary: str,
|
181
|
+
conversation_history: ConversationHistory,
|
137
182
|
should_summarize_history_attr: BoolAttr | None,
|
138
183
|
render_summarize_history: bool,
|
139
184
|
history_summarization_token_threshold_attr: IntAttr | None,
|
@@ -142,26 +187,31 @@ async def maybe_summarize_history(
|
|
142
187
|
model_settings: "ModelSettings | None",
|
143
188
|
summarization_prompt: str,
|
144
189
|
rate_limitter: LLMRateLimiter | None = None,
|
145
|
-
) ->
|
190
|
+
) -> ConversationHistory:
|
146
191
|
"""Summarizes history and updates context if enabled and threshold met."""
|
147
|
-
|
192
|
+
shorten_history = replace_system_prompt_in_history(conversation_history.history)
|
148
193
|
if should_summarize_history(
|
149
194
|
ctx,
|
150
|
-
|
195
|
+
shorten_history,
|
151
196
|
should_summarize_history_attr,
|
152
197
|
render_summarize_history,
|
153
198
|
history_summarization_token_threshold_attr,
|
154
199
|
render_history_summarization_token_threshold,
|
155
200
|
):
|
156
|
-
|
201
|
+
original_history = conversation_history.history
|
202
|
+
conversation_history.history = shorten_history
|
203
|
+
conversation_history = await summarize_history(
|
157
204
|
ctx=ctx,
|
158
205
|
model=model,
|
159
206
|
settings=model_settings,
|
160
|
-
|
161
|
-
|
162
|
-
history_list=shorten_history_list,
|
207
|
+
system_prompt=summarization_prompt,
|
208
|
+
conversation_history=conversation_history,
|
163
209
|
rate_limitter=rate_limitter,
|
164
210
|
)
|
165
|
-
|
166
|
-
|
167
|
-
|
211
|
+
conversation_history.history = original_history
|
212
|
+
if (
|
213
|
+
conversation_history.past_conversation_summary != ""
|
214
|
+
and conversation_history.past_conversation_transcript != ""
|
215
|
+
):
|
216
|
+
conversation_history.history = []
|
217
|
+
return conversation_history
|
zrb/task/llm/prompt.py
CHANGED
@@ -1,7 +1,15 @@
|
|
1
|
+
import os
|
2
|
+
import platform
|
3
|
+
import re
|
4
|
+
from datetime import datetime, timezone
|
5
|
+
|
1
6
|
from zrb.attr.type import StrAttr
|
2
7
|
from zrb.config.llm_config import llm_config as llm_config
|
3
8
|
from zrb.context.any_context import AnyContext
|
9
|
+
from zrb.task.llm.conversation_history_model import ConversationHistory
|
4
10
|
from zrb.util.attr import get_attr, get_str_attr
|
11
|
+
from zrb.util.file import read_dir, read_file_with_line_numbers
|
12
|
+
from zrb.util.llm.prompt import make_prompt_section
|
5
13
|
|
6
14
|
|
7
15
|
def get_persona(
|
@@ -52,26 +60,110 @@ def get_special_instruction_prompt(
|
|
52
60
|
return llm_config.default_special_instruction_prompt
|
53
61
|
|
54
62
|
|
55
|
-
def
|
63
|
+
def get_system_and_user_prompt(
|
56
64
|
ctx: AnyContext,
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
65
|
+
user_message: str,
|
66
|
+
persona_attr: StrAttr | None = None,
|
67
|
+
system_prompt_attr: StrAttr | None = None,
|
68
|
+
special_instruction_prompt_attr: StrAttr | None = None,
|
69
|
+
conversation_history: ConversationHistory | None = None,
|
70
|
+
) -> tuple[str, str]:
|
61
71
|
"""Combines persona, base system prompt, and special instructions."""
|
62
72
|
persona = get_persona(ctx, persona_attr)
|
63
73
|
base_system_prompt = get_base_system_prompt(ctx, system_prompt_attr)
|
64
74
|
special_instruction = get_special_instruction_prompt(
|
65
75
|
ctx, special_instruction_prompt_attr
|
66
76
|
)
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
77
|
+
if conversation_history is None:
|
78
|
+
conversation_history = ConversationHistory()
|
79
|
+
conversation_context, new_user_message = extract_conversation_context(user_message)
|
80
|
+
new_system_prompt = "\n".join(
|
81
|
+
[
|
82
|
+
make_prompt_section("Persona", persona),
|
83
|
+
make_prompt_section("System Prompt", base_system_prompt),
|
84
|
+
make_prompt_section("Special Instruction", special_instruction),
|
85
|
+
make_prompt_section(
|
86
|
+
"Past Conversation",
|
87
|
+
"\n".join(
|
88
|
+
[
|
89
|
+
make_prompt_section(
|
90
|
+
"Summary",
|
91
|
+
conversation_history.past_conversation_summary,
|
92
|
+
as_code=True,
|
93
|
+
),
|
94
|
+
make_prompt_section(
|
95
|
+
"Last Transcript",
|
96
|
+
conversation_history.past_conversation_transcript,
|
97
|
+
as_code=True,
|
98
|
+
),
|
99
|
+
]
|
100
|
+
),
|
101
|
+
),
|
102
|
+
make_prompt_section(
|
103
|
+
"Notes",
|
104
|
+
"\n".join(
|
105
|
+
[
|
106
|
+
make_prompt_section(
|
107
|
+
"Long Term",
|
108
|
+
conversation_history.long_term_note,
|
109
|
+
as_code=True,
|
110
|
+
),
|
111
|
+
make_prompt_section(
|
112
|
+
"Contextual",
|
113
|
+
conversation_history.contextual_note,
|
114
|
+
as_code=True,
|
115
|
+
),
|
116
|
+
]
|
117
|
+
),
|
118
|
+
),
|
119
|
+
make_prompt_section("Conversation Context", conversation_context),
|
120
|
+
]
|
121
|
+
)
|
122
|
+
return new_system_prompt, new_user_message
|
123
|
+
|
124
|
+
|
125
|
+
def extract_conversation_context(user_message: str) -> tuple[str, str]:
|
126
|
+
modified_user_message = user_message
|
127
|
+
# Match “@” + any non-space/comma sequence that contains at least one “/”
|
128
|
+
pattern = r"(?<!\w)@(?=[^,\s]*/)([^,\s]+)"
|
129
|
+
potential_resource_path = re.findall(pattern, user_message)
|
130
|
+
apendixes = []
|
131
|
+
for ref in potential_resource_path:
|
132
|
+
resource_path = os.path.abspath(os.path.expanduser(ref))
|
133
|
+
print("RESOURCE PATH", resource_path)
|
134
|
+
if os.path.isfile(resource_path):
|
135
|
+
content = read_file_with_line_numbers(resource_path)
|
136
|
+
apendixes.append(
|
137
|
+
make_prompt_section(
|
138
|
+
f"`{ref}` (file path: `{resource_path}`)", content, as_code=True
|
139
|
+
)
|
140
|
+
)
|
141
|
+
# Remove the '@' from the modified user message for valid file paths
|
142
|
+
modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
|
143
|
+
elif os.path.isdir(resource_path):
|
144
|
+
content = read_dir(resource_path)
|
145
|
+
apendixes.append(
|
146
|
+
make_prompt_section(
|
147
|
+
f"`{ref}` (directory path: `{resource_path}`)",
|
148
|
+
content,
|
149
|
+
as_code=True,
|
150
|
+
)
|
151
|
+
)
|
152
|
+
# Remove the '@' from the modified user message for valid directory paths
|
153
|
+
modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
|
154
|
+
conversation_context = "\n".join(
|
155
|
+
[
|
156
|
+
make_prompt_section(
|
157
|
+
"Current Time", datetime.now(timezone.utc).astimezone().isoformat()
|
158
|
+
),
|
159
|
+
make_prompt_section("Current Working Directory", os.getcwd()),
|
160
|
+
make_prompt_section("Current OS", platform.system()),
|
161
|
+
make_prompt_section("OS Version", platform.version()),
|
162
|
+
make_prompt_section("Python Version", platform.python_version()),
|
163
|
+
make_prompt_section("Apendixes", "\n".join(apendixes)),
|
164
|
+
]
|
165
|
+
)
|
166
|
+
return conversation_context, modified_user_message
|
75
167
|
|
76
168
|
|
77
169
|
def get_user_message(
|
@@ -85,7 +177,7 @@ def get_user_message(
|
|
85
177
|
)
|
86
178
|
|
87
179
|
|
88
|
-
def
|
180
|
+
def get_summarization_system_prompt(
|
89
181
|
ctx: AnyContext,
|
90
182
|
summarization_prompt_attr: StrAttr | None,
|
91
183
|
) -> str:
|
zrb/task/llm_task.py
CHANGED
@@ -15,19 +15,17 @@ from zrb.task.llm.config import (
|
|
15
15
|
get_model,
|
16
16
|
get_model_settings,
|
17
17
|
)
|
18
|
-
from zrb.task.llm.
|
19
|
-
from zrb.task.llm.context_enrichment import maybe_enrich_context
|
20
|
-
from zrb.task.llm.history import (
|
21
|
-
ConversationHistoryData,
|
18
|
+
from zrb.task.llm.conversation_history import (
|
22
19
|
ListOfDict,
|
23
20
|
read_conversation_history,
|
24
21
|
write_conversation_history,
|
25
22
|
)
|
23
|
+
from zrb.task.llm.conversation_history_model import ConversationHistory
|
26
24
|
from zrb.task.llm.history_summarization import maybe_summarize_history
|
27
25
|
from zrb.task.llm.prompt import (
|
28
|
-
get_combined_system_prompt,
|
29
26
|
get_context_enrichment_prompt,
|
30
|
-
|
27
|
+
get_summarization_system_prompt,
|
28
|
+
get_system_and_user_prompt,
|
31
29
|
get_user_message,
|
32
30
|
)
|
33
31
|
from zrb.util.cli.style import stylize_faint
|
@@ -85,17 +83,17 @@ class LLMTask(BaseTask):
|
|
85
83
|
list["MCPServer"] | Callable[[AnySharedContext], list["MCPServer"]]
|
86
84
|
) = [],
|
87
85
|
conversation_history: (
|
88
|
-
|
89
|
-
| Callable[[AnySharedContext],
|
86
|
+
ConversationHistory
|
87
|
+
| Callable[[AnySharedContext], ConversationHistory | dict | list]
|
90
88
|
| dict
|
91
89
|
| list
|
92
|
-
) =
|
90
|
+
) = ConversationHistory(),
|
93
91
|
conversation_history_reader: (
|
94
|
-
Callable[[AnySharedContext],
|
92
|
+
Callable[[AnySharedContext], ConversationHistory | dict | list | None]
|
95
93
|
| None
|
96
94
|
) = None,
|
97
95
|
conversation_history_writer: (
|
98
|
-
Callable[[AnySharedContext,
|
96
|
+
Callable[[AnySharedContext, ConversationHistory], None] | None
|
99
97
|
) = None,
|
100
98
|
conversation_history_file: StrAttr | None = None,
|
101
99
|
render_history_file: bool = True,
|
@@ -226,55 +224,57 @@ class LLMTask(BaseTask):
|
|
226
224
|
model_api_key_attr=self._model_api_key,
|
227
225
|
render_model_api_key=self._render_model_api_key,
|
228
226
|
)
|
229
|
-
|
230
|
-
ctx=ctx,
|
231
|
-
context_enrichment_prompt_attr=self._context_enrichment_prompt,
|
232
|
-
)
|
233
|
-
summarization_prompt = get_summarization_prompt(
|
227
|
+
summarization_prompt = get_summarization_system_prompt(
|
234
228
|
ctx=ctx,
|
235
229
|
summarization_prompt_attr=self._summarization_prompt,
|
236
230
|
)
|
237
231
|
user_message = get_user_message(ctx, self._message, self._render_message)
|
238
|
-
# Get the combined system prompt using the new getter
|
239
|
-
system_prompt = get_combined_system_prompt(
|
240
|
-
ctx=ctx,
|
241
|
-
persona_attr=self._persona,
|
242
|
-
system_prompt_attr=self._system_prompt,
|
243
|
-
special_instruction_prompt_attr=self._special_instruction_prompt,
|
244
|
-
)
|
245
232
|
# 1. Prepare initial state (read history from previous session)
|
246
|
-
|
233
|
+
conversation_history = await read_conversation_history(
|
247
234
|
ctx=ctx,
|
248
235
|
conversation_history_reader=self._conversation_history_reader,
|
249
236
|
conversation_history_file_attr=self._conversation_history_file,
|
250
237
|
render_history_file=self._render_history_file,
|
251
238
|
conversation_history_attr=self._conversation_history,
|
252
239
|
)
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
240
|
+
conversation_history.fetch_newest_notes()
|
241
|
+
# 2. Get system prompt and user prompt
|
242
|
+
system_prompt, user_message = get_system_and_user_prompt(
|
243
|
+
ctx=ctx,
|
244
|
+
user_message=user_message,
|
245
|
+
persona_attr=self._persona,
|
246
|
+
system_prompt_attr=self._system_prompt,
|
247
|
+
special_instruction_prompt_attr=self._special_instruction_prompt,
|
248
|
+
conversation_history=conversation_history,
|
249
|
+
)
|
250
|
+
# 3. Get the agent instance
|
251
|
+
agent = get_agent(
|
259
252
|
ctx=ctx,
|
260
|
-
|
261
|
-
long_term_context=long_term_context,
|
262
|
-
should_enrich_context_attr=self._should_enrich_context,
|
263
|
-
render_enrich_context=self._render_enrich_context,
|
264
|
-
context_enrichment_token_threshold_attr=self._context_enrichment_token_threshold,
|
265
|
-
render_context_enrichment_token_threshold=self._render_context_enrichment_token_threshold, # noqa
|
253
|
+
agent_attr=self._agent,
|
266
254
|
model=model,
|
255
|
+
system_prompt=system_prompt,
|
267
256
|
model_settings=model_settings,
|
268
|
-
|
269
|
-
|
257
|
+
tools_attr=self._tools,
|
258
|
+
additional_tools=self._additional_tools,
|
259
|
+
mcp_servers_attr=self._mcp_servers,
|
260
|
+
additional_mcp_servers=self._additional_mcp_servers,
|
270
261
|
)
|
271
|
-
|
262
|
+
# 4. Run the agent iteration and save the results/history
|
263
|
+
result = await self._execute_agent(
|
264
|
+
ctx,
|
265
|
+
agent,
|
266
|
+
user_message,
|
267
|
+
conversation_history,
|
268
|
+
)
|
269
|
+
# 5. Summarize
|
270
|
+
conversation_history = await maybe_summarize_history(
|
272
271
|
ctx=ctx,
|
273
|
-
|
274
|
-
conversation_summary=conversation_summary,
|
272
|
+
conversation_history=conversation_history,
|
275
273
|
should_summarize_history_attr=self._should_summarize_history,
|
276
274
|
render_summarize_history=self._render_summarize_history,
|
277
|
-
history_summarization_token_threshold_attr=
|
275
|
+
history_summarization_token_threshold_attr=(
|
276
|
+
self._history_summarization_token_threshold
|
277
|
+
),
|
278
278
|
render_history_summarization_token_threshold=(
|
279
279
|
self._render_history_summarization_token_threshold
|
280
280
|
),
|
@@ -283,50 +283,22 @@ class LLMTask(BaseTask):
|
|
283
283
|
summarization_prompt=summarization_prompt,
|
284
284
|
rate_limitter=self._rate_limitter,
|
285
285
|
)
|
286
|
-
|
287
|
-
|
288
|
-
final_user_prompt, system_info = extract_default_context(user_message)
|
289
|
-
context_parts = [
|
290
|
-
f"## System Information\n{json.dumps(system_info, indent=2)}",
|
291
|
-
]
|
292
|
-
if new_long_term_context:
|
293
|
-
context_parts.append(new_long_term_context)
|
294
|
-
if new_conversation_summary:
|
295
|
-
context_parts.append(new_conversation_summary)
|
296
|
-
|
297
|
-
final_system_prompt = "\n\n".join(
|
298
|
-
[system_prompt, "# Context", "\n\n---\n\n".join(context_parts)]
|
299
|
-
)
|
300
|
-
# 4. Get the agent instance
|
301
|
-
agent = get_agent(
|
286
|
+
# 6. Write conversation history
|
287
|
+
await write_conversation_history(
|
302
288
|
ctx=ctx,
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
tools_attr=self._tools,
|
308
|
-
additional_tools=self._additional_tools,
|
309
|
-
mcp_servers_attr=self._mcp_servers,
|
310
|
-
additional_mcp_servers=self._additional_mcp_servers,
|
311
|
-
)
|
312
|
-
# 5. Run the agent iteration and save the results/history
|
313
|
-
return await self._run_agent_and_save_history(
|
314
|
-
ctx,
|
315
|
-
agent,
|
316
|
-
final_user_prompt,
|
317
|
-
new_history_list,
|
318
|
-
new_long_term_context,
|
319
|
-
new_conversation_summary,
|
289
|
+
history_data=conversation_history,
|
290
|
+
conversation_history_writer=self._conversation_history_writer,
|
291
|
+
conversation_history_file_attr=self._conversation_history_file,
|
292
|
+
render_history_file=self._render_history_file,
|
320
293
|
)
|
294
|
+
return result
|
321
295
|
|
322
|
-
async def
|
296
|
+
async def _execute_agent(
|
323
297
|
self,
|
324
298
|
ctx: AnyContext,
|
325
299
|
agent: "Agent",
|
326
300
|
user_prompt: str,
|
327
|
-
|
328
|
-
long_term_context: str,
|
329
|
-
conversation_summary: str,
|
301
|
+
conversation_history: ConversationHistory,
|
330
302
|
) -> Any:
|
331
303
|
"""Executes the agent, processes results, and saves history."""
|
332
304
|
try:
|
@@ -334,23 +306,12 @@ class LLMTask(BaseTask):
|
|
334
306
|
ctx=ctx,
|
335
307
|
agent=agent,
|
336
308
|
user_prompt=user_prompt,
|
337
|
-
history_list=
|
309
|
+
history_list=conversation_history.history,
|
338
310
|
rate_limitter=self._rate_limitter,
|
339
311
|
)
|
340
312
|
if agent_run and agent_run.result:
|
341
313
|
new_history_list = json.loads(agent_run.result.all_messages_json())
|
342
|
-
|
343
|
-
long_term_context=long_term_context,
|
344
|
-
conversation_summary=conversation_summary,
|
345
|
-
history=new_history_list,
|
346
|
-
)
|
347
|
-
await write_conversation_history(
|
348
|
-
ctx=ctx,
|
349
|
-
history_data=data_to_write,
|
350
|
-
conversation_history_writer=self._conversation_history_writer,
|
351
|
-
conversation_history_file_attr=self._conversation_history_file,
|
352
|
-
render_history_file=self._render_history_file,
|
353
|
-
)
|
314
|
+
conversation_history.history = new_history_list
|
354
315
|
xcom_usage_key = f"{self.name}-usage"
|
355
316
|
if xcom_usage_key not in ctx.xcom:
|
356
317
|
ctx.xcom[xcom_usage_key] = Xcom([])
|
zrb/util/llm/prompt.py
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
import re
|
2
|
+
|
3
|
+
|
4
|
+
def _demote_markdown_headers(md: str) -> str:
|
5
|
+
def demote(match):
|
6
|
+
hashes = match.group(1)
|
7
|
+
return "#" + hashes + match.group(2) # add one `#`
|
8
|
+
|
9
|
+
# Replace headers at the beginning of a line
|
10
|
+
return re.sub(r"^(#{1,6})(\s)", demote, md, flags=re.MULTILINE)
|
11
|
+
|
12
|
+
|
13
|
+
def make_prompt_section(header: str, content: str, as_code: bool = False) -> str:
|
14
|
+
if content.strip() == "":
|
15
|
+
return ""
|
16
|
+
if as_code:
|
17
|
+
return f"# {header}\n````\n{content.strip()}\n````\n"
|
18
|
+
return f"# {header}\n{_demote_markdown_headers(content.strip())}\n"
|
@@ -1,4 +1,4 @@
|
|
1
|
-
zrb/__init__.py,sha256=
|
1
|
+
zrb/__init__.py,sha256=GSVTmbO6guFI2oW5gGxQvn7ELOMa5-qPxfyQMv9H07M,5119
|
2
2
|
zrb/__main__.py,sha256=9SXH9MK4PVyU9lkEyHxiIUABbcsV2wseP94HmlqTR4M,2657
|
3
3
|
zrb/attr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
zrb/attr/type.py,sha256=4TV5gPYMMrKh5V-yB6iRYKCbsXAH_AvGXMsjxKLHcUs,568
|
@@ -10,17 +10,17 @@ zrb/builtin/group.py,sha256=t008xLM4_fgbjfZrPoi_fQAnSHIo6MOiQSCHBO4GDYU,2379
|
|
10
10
|
zrb/builtin/http.py,sha256=sLqEczuSxGYXWzyJR6frGOHkPTviu4BeyroUr3-ZuAI,4322
|
11
11
|
zrb/builtin/jwt.py,sha256=3M5uaQhJZbKQLjTUft1OwPz_JxtmK-xtkjxWjciOQho,2859
|
12
12
|
zrb/builtin/llm/chat_session.py,sha256=0R04DpBr_LGfNJbXIQ_4XQSxL7kY2M3U-bbu5lsXZ54,8542
|
13
|
-
zrb/builtin/llm/history.py,sha256=
|
13
|
+
zrb/builtin/llm/history.py,sha256=LDOrL0p7r_AHLa5L8Dp7bHNsOALugmJd7OguXRWGnm4,3087
|
14
14
|
zrb/builtin/llm/input.py,sha256=Nw-26uTWp2QhUgKJcP_IMHmtk-b542CCSQ_vCOjhvhM,877
|
15
15
|
zrb/builtin/llm/llm_ask.py,sha256=oozfQwa1i2PnXV4qWbn60Pmd3fS0kgmhYCbfKlhr25o,4549
|
16
16
|
zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
|
17
17
|
zrb/builtin/llm/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
18
|
zrb/builtin/llm/tool/api.py,sha256=OhmfLc2TwWKQYIMweGelqb5s4JF4nB-YynbSO4yb_Jk,2342
|
19
|
-
zrb/builtin/llm/tool/cli.py,sha256=
|
19
|
+
zrb/builtin/llm/tool/cli.py,sha256=dUWZrW2X5J_lONuzR__6-SbewSdi28E3RRuksjd4mWo,1234
|
20
20
|
zrb/builtin/llm/tool/code.py,sha256=GRP_IZAkeL6RIlUm407BQRF992ES57pdzPaQdC5UsJU,8218
|
21
|
-
zrb/builtin/llm/tool/file.py,sha256=
|
21
|
+
zrb/builtin/llm/tool/file.py,sha256=vUpkHPJHszdFKWjsh5Ma8_WGFwZMcm1nlJ-rGCIA_tI,22290
|
22
22
|
zrb/builtin/llm/tool/rag.py,sha256=wB74JV7bxs0ec77b_09Z2lPjoR1WzPUvZbuXOdb9Q9g,9675
|
23
|
-
zrb/builtin/llm/tool/sub_agent.py,sha256=
|
23
|
+
zrb/builtin/llm/tool/sub_agent.py,sha256=UWBLiuCK6FT8Ku0yPfSxd_k67h_Pme1K7d2VSABacjQ,4855
|
24
24
|
zrb/builtin/llm/tool/web.py,sha256=gQlUsmYCJOFJtNjwpjK-xk13LMvrMSpSaFHXUTnIayQ,7090
|
25
25
|
zrb/builtin/md5.py,sha256=690RV2LbW7wQeTFxY-lmmqTSVEEZv3XZbjEUW1Q3XpE,1480
|
26
26
|
zrb/builtin/project/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -217,9 +217,9 @@ zrb/callback/callback.py,sha256=PFhCqzfxdk6IAthmXcZ13DokT62xtBzJr_ciLw6I8Zg,4030
|
|
217
217
|
zrb/cmd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
218
218
|
zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
|
219
219
|
zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
|
220
|
-
zrb/config/config.py,sha256=
|
221
|
-
zrb/config/llm_config.py,sha256=
|
222
|
-
zrb/config/llm_rate_limitter.py,sha256=
|
220
|
+
zrb/config/config.py,sha256=vUmJWQHRgmMVL1FmoukjQe9J6vCuyzqBIg4FncREGmw,15108
|
221
|
+
zrb/config/llm_config.py,sha256=nsJWHToreHjJ84H-qR7qEbhTKsy0O_NhGbsb5kT1538,20818
|
222
|
+
zrb/config/llm_rate_limitter.py,sha256=P4vR7qxwiGwjlKx2kHcfdIxwGbJB98vdN-UQEH-Q2WU,4894
|
223
223
|
zrb/config/web_auth_config.py,sha256=_PXatQTYh2mX9H3HSYSQKp13zm1RlLyVIoeIr6KYMQ8,6279
|
224
224
|
zrb/content_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
225
225
|
zrb/content_transformer/any_content_transformer.py,sha256=v8ZUbcix1GGeDQwB6OKX_1TjpY__ksxWVeqibwa_iZA,850
|
@@ -339,16 +339,15 @@ zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
|
|
339
339
|
zrb/task/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
340
340
|
zrb/task/llm/agent.py,sha256=BZHbz-YXgSdm1tTwGMR_maqcd3yMFGSdzLyDjuxT_XI,6702
|
341
341
|
zrb/task/llm/config.py,sha256=TlyH925_fboIlK2Ixf34tynmenqs9s9rfsnPs4jff78,3490
|
342
|
-
zrb/task/llm/
|
343
|
-
zrb/task/llm/
|
342
|
+
zrb/task/llm/conversation_history.py,sha256=B_PDWYL_q66s0xwWBzMSomqPN6u3gkXlIeXBD5A0Apg,4416
|
343
|
+
zrb/task/llm/conversation_history_model.py,sha256=Zbz7w0M2FeOklWG4AVz8C2y2AZ87aNMZ5I0YbBanO0Y,16312
|
344
344
|
zrb/task/llm/error.py,sha256=QR-nIohS6pBpC_16cWR-fw7Mevo1sNYAiXMBsh_CJDE,4157
|
345
|
-
zrb/task/llm/
|
346
|
-
zrb/task/llm/history_summarization.py,sha256=V0G1BiISnxxmD8040PrvT0_dfqGE7zbLtk74KUpuqig,6050
|
345
|
+
zrb/task/llm/history_summarization.py,sha256=vY2_iLULgSNTaqW1xJqOhI8oOH3vNEsZn_yNcx6jYX8,8104
|
347
346
|
zrb/task/llm/print_node.py,sha256=zocTKi9gZDxl2I6KNu095TmMc13Yip6SNuWYnswS680,4060
|
348
|
-
zrb/task/llm/prompt.py,sha256=
|
347
|
+
zrb/task/llm/prompt.py,sha256=ARyJ2Q0G6N7I2X2KZfLQW45yj9nmzl5SfX90rrY7TVU,7417
|
349
348
|
zrb/task/llm/tool_wrapper.py,sha256=8_bL8m_WpRf-pVKSrvQIVqT-m2sUA87a1RBQG13lhp4,6457
|
350
349
|
zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
|
351
|
-
zrb/task/llm_task.py,sha256=
|
350
|
+
zrb/task/llm_task.py,sha256=TTYb9FYqZX_OIgDE6q5Z9IVuM6NcsKFeCVIi6ovQDE8,13712
|
352
351
|
zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
|
353
352
|
zrb/task/rsync_task.py,sha256=WfqNSaicJgYWpunNU34eYxXDqHDHOftuDHyWJKjqwg0,6365
|
354
353
|
zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
|
@@ -383,6 +382,7 @@ zrb/util/git_subtree.py,sha256=AyQWCWEi2EIzEpYXRnYN55157KMUql0WHj70QNw5PHU,4612
|
|
383
382
|
zrb/util/git_subtree_model.py,sha256=P_gJ0zhOAc3gFM6sYcjc0Ack9dFBt75TI5fXdE0q320,871
|
384
383
|
zrb/util/group.py,sha256=T82yr3qg9I5k10VPXkMyrIRIqyfzadSH813bqzwKEPI,4718
|
385
384
|
zrb/util/init_path.py,sha256=9eN7CkWNGhDBpjTQs2j9YHVMzui7Y8DEb1WP4aTPzeo,659
|
385
|
+
zrb/util/llm/prompt.py,sha256=tJEGV2X7v13b1PXUzRXzu1e1HnY6d9JLtqbUGiZqHoo,573
|
386
386
|
zrb/util/load.py,sha256=DK0KYSlu48HCoGPqnW1IxnE3pHrZSPCstfz8Fjyqqv8,2140
|
387
387
|
zrb/util/run.py,sha256=vu-mcSWDP_WuuvIKqM_--Gk3WkABO1oTXiHmBRTvVQk,546
|
388
388
|
zrb/util/string/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -393,7 +393,7 @@ zrb/util/todo.py,sha256=r9_KYF2-hLKMNjsp6AFK9zivykMrywd-kJ4bCwfdafI,19323
|
|
393
393
|
zrb/util/todo_model.py,sha256=hhzAX-uFl5rsg7iVX1ULlJOfBtblwQ_ieNUxBWfc-Os,1670
|
394
394
|
zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
395
395
|
zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
|
396
|
-
zrb-1.
|
397
|
-
zrb-1.
|
398
|
-
zrb-1.
|
399
|
-
zrb-1.
|
396
|
+
zrb-1.10.1.dist-info/METADATA,sha256=ggHQjxCiRomL7Z9iErAHhnQF6Hf71MZi0wwukGvetME,9778
|
397
|
+
zrb-1.10.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
398
|
+
zrb-1.10.1.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
|
399
|
+
zrb-1.10.1.dist-info/RECORD,,
|