zrb 1.6.0__py3-none-any.whl → 1.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/llm_config.py +8 -5
- zrb/task/llm/agent.py +1 -2
- zrb/task/llm/context.py +1 -3
- zrb/task/llm/context_enrichment.py +21 -15
- zrb/task/llm/history.py +32 -63
- zrb/task/llm/history_summarization.py +19 -19
- zrb/task/llm/prompt.py +0 -27
- zrb/task/llm_task.py +22 -18
- {zrb-1.6.0.dist-info → zrb-1.6.1.dist-info}/METADATA +1 -1
- {zrb-1.6.0.dist-info → zrb-1.6.1.dist-info}/RECORD +12 -12
- {zrb-1.6.0.dist-info → zrb-1.6.1.dist-info}/WHEEL +0 -0
- {zrb-1.6.0.dist-info → zrb-1.6.1.dist-info}/entry_points.txt +0 -0
zrb/llm_config.py
CHANGED
@@ -26,9 +26,10 @@ You are an expert in various fields including technology, science, history, and
|
|
26
26
|
|
27
27
|
# Concise summarization focused on preserving critical context for continuity.
|
28
28
|
DEFAULT_SUMMARIZATION_PROMPT = """
|
29
|
-
You are a summarization assistant.
|
30
|
-
|
31
|
-
|
29
|
+
You are a summarization assistant.
|
30
|
+
Your goal is to help main assistant to continue the conversation by creating an updated,
|
31
|
+
concise summary integrating the previous summary (if any) with the new conversation history.
|
32
|
+
Preserve ALL critical context needed for the main assistant
|
32
33
|
to continue the task effectively. This includes key facts, decisions, tool usage
|
33
34
|
results, and essential background. Do not omit details that would force the main
|
34
35
|
assistant to re-gather information.
|
@@ -37,11 +38,13 @@ Output *only* the updated summary text.
|
|
37
38
|
|
38
39
|
DEFAULT_CONTEXT_ENRICHMENT_PROMPT = """
|
39
40
|
You are an information extraction assistant.
|
41
|
+
Your goal is to help main assistant to continue the conversation by extracting
|
42
|
+
important informations.
|
40
43
|
Analyze the conversation history and current context to extract key facts like
|
41
44
|
user_name, user_roles, preferences, goals, etc.
|
42
45
|
Return only a JSON object containing a single key "response", whose value is
|
43
|
-
another JSON object with these details.
|
44
|
-
If
|
46
|
+
another JSON object with these details (i.e., {"response": {"context_name": "value"}}).
|
47
|
+
If no context can be extracted, return {"response": {}}.
|
45
48
|
""".strip()
|
46
49
|
|
47
50
|
DEFAULT_SPECIAL_INSTRUCTION_PROMPT = "" # Default to empty
|
zrb/task/llm/agent.py
CHANGED
@@ -2,14 +2,12 @@ from collections.abc import Callable
|
|
2
2
|
from typing import TYPE_CHECKING, Any
|
3
3
|
|
4
4
|
if TYPE_CHECKING:
|
5
|
-
from openai import APIError
|
6
5
|
from pydantic_ai import Agent, Tool
|
7
6
|
from pydantic_ai.agent import AgentRun
|
8
7
|
from pydantic_ai.mcp import MCPServer
|
9
8
|
from pydantic_ai.models import Model
|
10
9
|
from pydantic_ai.settings import ModelSettings
|
11
10
|
else:
|
12
|
-
APIError = Any
|
13
11
|
Agent = Any
|
14
12
|
Tool = Any
|
15
13
|
AgentRun = Any
|
@@ -130,6 +128,7 @@ async def run_agent_iteration(
|
|
130
128
|
Raises:
|
131
129
|
Exception: If any error occurs during agent execution.
|
132
130
|
"""
|
131
|
+
from openai import APIError
|
133
132
|
from pydantic_ai.messages import ModelMessagesTypeAdapter
|
134
133
|
|
135
134
|
async with agent.run_mcp_servers():
|
zrb/task/llm/context.py
CHANGED
@@ -73,9 +73,7 @@ def get_conversation_context(
|
|
73
73
|
Retrieves the conversation context.
|
74
74
|
If a value in the context dict is callable, it executes it with ctx.
|
75
75
|
"""
|
76
|
-
raw_context = get_attr(
|
77
|
-
ctx, conversation_context_attr, {}, auto_render=False
|
78
|
-
) # Context usually shouldn't be rendered
|
76
|
+
raw_context = get_attr(ctx, conversation_context_attr, {}, auto_render=False)
|
79
77
|
if not isinstance(raw_context, dict):
|
80
78
|
ctx.log_warning(
|
81
79
|
f"Conversation context resolved to type {type(raw_context)}, "
|
@@ -1,6 +1,5 @@
|
|
1
1
|
import json
|
2
2
|
import traceback
|
3
|
-
from textwrap import dedent
|
4
3
|
from typing import TYPE_CHECKING, Any
|
5
4
|
|
6
5
|
from pydantic import BaseModel
|
@@ -9,8 +8,13 @@ from zrb.attr.type import BoolAttr, IntAttr
|
|
9
8
|
from zrb.context.any_context import AnyContext
|
10
9
|
from zrb.llm_config import llm_config
|
11
10
|
from zrb.task.llm.agent import run_agent_iteration
|
11
|
+
from zrb.task.llm.history import (
|
12
|
+
count_part_in_history_list,
|
13
|
+
replace_system_prompt_in_history_list,
|
14
|
+
)
|
12
15
|
from zrb.task.llm.typing import ListOfDict
|
13
16
|
from zrb.util.attr import get_bool_attr, get_int_attr
|
17
|
+
from zrb.util.cli.style import stylize_faint
|
14
18
|
|
15
19
|
if TYPE_CHECKING:
|
16
20
|
from pydantic_ai.models import Model
|
@@ -47,15 +51,13 @@ async def enrich_context(
|
|
47
51
|
context_json = json.dumps(conversation_context)
|
48
52
|
history_json = json.dumps(history_list)
|
49
53
|
# The user prompt will now contain the dynamic data
|
50
|
-
user_prompt_data =
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
"""
|
58
|
-
).strip()
|
54
|
+
user_prompt_data = "\n".join(
|
55
|
+
[
|
56
|
+
"Extract context from the following conversation info",
|
57
|
+
f"Existing Context: {context_json}",
|
58
|
+
f"Conversation History: {history_json}",
|
59
|
+
]
|
60
|
+
)
|
59
61
|
except Exception as e:
|
60
62
|
ctx.log_warning(f"Error formatting context/history for enrichment: {e}")
|
61
63
|
return conversation_context # Return original context if formatting fails
|
@@ -72,6 +74,7 @@ async def enrich_context(
|
|
72
74
|
)
|
73
75
|
|
74
76
|
try:
|
77
|
+
ctx.print(stylize_faint("[Context Enrichment Triggered]"), plain=True)
|
75
78
|
enrichment_run = await run_agent_iteration(
|
76
79
|
ctx=ctx,
|
77
80
|
agent=enrichment_agent,
|
@@ -80,6 +83,8 @@ async def enrich_context(
|
|
80
83
|
)
|
81
84
|
if enrichment_run and enrichment_run.result.output:
|
82
85
|
response = enrichment_run.result.output.response
|
86
|
+
usage = enrichment_run.result.usage()
|
87
|
+
ctx.print(stylize_faint(f"[Token Usage] {usage}"), plain=True)
|
83
88
|
if response:
|
84
89
|
conversation_context.update(response)
|
85
90
|
ctx.log_info("Context enriched based on history.")
|
@@ -127,15 +132,15 @@ def should_enrich_context(
|
|
127
132
|
"""
|
128
133
|
Determines if context enrichment should occur based on history, threshold, and config.
|
129
134
|
"""
|
130
|
-
|
131
|
-
if
|
135
|
+
history_part_count = count_part_in_history_list(history_list)
|
136
|
+
if history_part_count == 0:
|
132
137
|
return False
|
133
138
|
enrichment_threshold = get_context_enrichment_threshold(
|
134
139
|
ctx,
|
135
140
|
context_enrichment_threshold_attr,
|
136
141
|
render_context_enrichment_threshold,
|
137
142
|
)
|
138
|
-
if enrichment_threshold == -1 or enrichment_threshold >
|
143
|
+
if enrichment_threshold == -1 or enrichment_threshold > history_part_count:
|
139
144
|
return False
|
140
145
|
return get_bool_attr(
|
141
146
|
ctx,
|
@@ -158,9 +163,10 @@ async def maybe_enrich_context(
|
|
158
163
|
context_enrichment_prompt: str,
|
159
164
|
) -> dict[str, Any]:
|
160
165
|
"""Enriches context based on history if enabled and threshold met."""
|
166
|
+
shorten_history_list = replace_system_prompt_in_history_list(history_list)
|
161
167
|
if should_enrich_context(
|
162
168
|
ctx,
|
163
|
-
|
169
|
+
shorten_history_list,
|
164
170
|
should_enrich_context_attr,
|
165
171
|
render_enrich_context,
|
166
172
|
context_enrichment_threshold_attr,
|
@@ -174,6 +180,6 @@ async def maybe_enrich_context(
|
|
174
180
|
prompt=context_enrichment_prompt,
|
175
181
|
),
|
176
182
|
conversation_context=conversation_context,
|
177
|
-
history_list=
|
183
|
+
history_list=shorten_history_list,
|
178
184
|
)
|
179
185
|
return conversation_context
|
zrb/task/llm/history.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import json
|
2
2
|
import os
|
3
3
|
from collections.abc import Callable
|
4
|
+
from copy import deepcopy
|
4
5
|
from typing import Any, Optional
|
5
6
|
|
6
7
|
from pydantic import BaseModel
|
@@ -177,66 +178,34 @@ async def write_conversation_history(
|
|
177
178
|
write_file(history_file, history_data.model_dump_json(indent=2))
|
178
179
|
|
179
180
|
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
)
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
# Merge history context from loaded data without overwriting existing keys
|
212
|
-
for key, value in history_data.context.items():
|
213
|
-
if key not in conversation_context:
|
214
|
-
conversation_context[key] = value
|
215
|
-
# Return the CLEANED history list
|
216
|
-
return cleaned_history_list, conversation_context
|
217
|
-
|
218
|
-
|
219
|
-
def remove_context_from_interaction_history(
|
220
|
-
interaction: dict[str, Any],
|
221
|
-
) -> dict[str, Any]:
|
222
|
-
try:
|
223
|
-
cleaned_interaction = json.loads(json.dumps(interaction))
|
224
|
-
except Exception:
|
225
|
-
# Fallback to shallow copy if not JSON serializable (less safe)
|
226
|
-
cleaned_interaction = interaction.copy()
|
227
|
-
if "parts" in cleaned_interaction and isinstance(
|
228
|
-
cleaned_interaction["parts"], list
|
229
|
-
):
|
230
|
-
for part in cleaned_interaction["parts"]:
|
231
|
-
is_user_prompt = part.get("part_kind") == "user-prompt"
|
232
|
-
has_str_content = isinstance(part.get("content"), str)
|
233
|
-
if is_user_prompt and has_str_content:
|
234
|
-
content = part["content"]
|
235
|
-
user_message_marker = "# User Message\n"
|
236
|
-
marker_index = content.find(user_message_marker)
|
237
|
-
if marker_index != -1:
|
238
|
-
# Extract message after the marker and strip whitespace
|
239
|
-
start_index = marker_index + len(user_message_marker)
|
240
|
-
part["content"] = content[start_index:].strip()
|
241
|
-
# else: If marker not found, leave content as is (old format/error)
|
242
|
-
return cleaned_interaction
|
181
|
+
def replace_system_prompt_in_history_list(
|
182
|
+
history_list: ListOfDict, replacement: str = "<main LLM system prompt>"
|
183
|
+
) -> ListOfDict:
|
184
|
+
"""
|
185
|
+
Returns a new history list where any part with part_kind 'system-prompt'
|
186
|
+
has its 'content' replaced with the given replacement string.
|
187
|
+
Args:
|
188
|
+
history: List of history items (each item is a dict with a 'parts' list).
|
189
|
+
replacement: The string to use in place of system-prompt content.
|
190
|
+
|
191
|
+
Returns:
|
192
|
+
A deep-copied list of history items with system-prompt content replaced.
|
193
|
+
"""
|
194
|
+
new_history = deepcopy(history_list)
|
195
|
+
for item in new_history:
|
196
|
+
parts = item.get("parts", [])
|
197
|
+
for part in parts:
|
198
|
+
if part.get("part_kind") == "system-prompt":
|
199
|
+
part["content"] = replacement
|
200
|
+
return new_history
|
201
|
+
|
202
|
+
|
203
|
+
def count_part_in_history_list(history_list: ListOfDict) -> int:
|
204
|
+
"""Calculates the total number of 'parts' in a history list."""
|
205
|
+
history_part_len = 0
|
206
|
+
for history in history_list:
|
207
|
+
if "parts" in history:
|
208
|
+
history_part_len += len(history["parts"])
|
209
|
+
else:
|
210
|
+
history_part_len += 1
|
211
|
+
return history_part_len
|
@@ -7,8 +7,13 @@ from zrb.attr.type import BoolAttr, IntAttr
|
|
7
7
|
from zrb.context.any_context import AnyContext
|
8
8
|
from zrb.llm_config import llm_config
|
9
9
|
from zrb.task.llm.agent import run_agent_iteration
|
10
|
+
from zrb.task.llm.history import (
|
11
|
+
count_part_in_history_list,
|
12
|
+
replace_system_prompt_in_history_list,
|
13
|
+
)
|
10
14
|
from zrb.task.llm.typing import ListOfDict
|
11
15
|
from zrb.util.attr import get_bool_attr, get_int_attr
|
16
|
+
from zrb.util.cli.style import stylize_faint
|
12
17
|
|
13
18
|
if TYPE_CHECKING:
|
14
19
|
from pydantic_ai.models import Model
|
@@ -18,17 +23,6 @@ else:
|
|
18
23
|
ModelSettings = Any
|
19
24
|
|
20
25
|
|
21
|
-
def get_history_part_len(history_list: ListOfDict) -> int:
|
22
|
-
"""Calculates the total number of 'parts' in a history list."""
|
23
|
-
history_part_len = 0
|
24
|
-
for history in history_list:
|
25
|
-
if "parts" in history:
|
26
|
-
history_part_len += len(history["parts"])
|
27
|
-
else:
|
28
|
-
history_part_len += 1
|
29
|
-
return history_part_len
|
30
|
-
|
31
|
-
|
32
26
|
def get_history_summarization_threshold(
|
33
27
|
ctx: AnyContext,
|
34
28
|
history_summarization_threshold_attr: IntAttr | None,
|
@@ -60,15 +54,15 @@ def should_summarize_history(
|
|
60
54
|
render_history_summarization_threshold: bool,
|
61
55
|
) -> bool:
|
62
56
|
"""Determines if history summarization should occur based on length and config."""
|
63
|
-
|
64
|
-
if
|
57
|
+
history_part_count = count_part_in_history_list(history_list)
|
58
|
+
if history_part_count == 0:
|
65
59
|
return False
|
66
60
|
summarization_threshold = get_history_summarization_threshold(
|
67
61
|
ctx,
|
68
62
|
history_summarization_threshold_attr,
|
69
63
|
render_history_summarization_threshold,
|
70
64
|
)
|
71
|
-
if summarization_threshold == -1 or summarization_threshold >
|
65
|
+
if summarization_threshold == -1 or summarization_threshold > history_part_count:
|
72
66
|
return False
|
73
67
|
return get_bool_attr(
|
74
68
|
ctx,
|
@@ -111,15 +105,18 @@ async def summarize_history(
|
|
111
105
|
try:
|
112
106
|
context_json = json.dumps(conversation_context)
|
113
107
|
history_to_summarize_json = json.dumps(history_list)
|
114
|
-
summarization_user_prompt = (
|
115
|
-
|
116
|
-
|
108
|
+
summarization_user_prompt = "\n".join(
|
109
|
+
[
|
110
|
+
f"Current Context: {context_json}",
|
111
|
+
f"Conversation History to Summarize: {history_to_summarize_json}",
|
112
|
+
]
|
117
113
|
)
|
118
114
|
except Exception as e:
|
119
115
|
ctx.log_warning(f"Error formatting context/history for summarization: {e}")
|
120
116
|
return conversation_context # Return original context if formatting fails
|
121
117
|
|
122
118
|
try:
|
119
|
+
ctx.print(stylize_faint("[Summarization Triggered]"), plain=True)
|
123
120
|
summary_run = await run_agent_iteration(
|
124
121
|
ctx=ctx,
|
125
122
|
agent=summarization_agent,
|
@@ -128,6 +125,8 @@ async def summarize_history(
|
|
128
125
|
)
|
129
126
|
if summary_run and summary_run.result.output:
|
130
127
|
summary_text = str(summary_run.result.output)
|
128
|
+
usage = summary_run.result.usage()
|
129
|
+
ctx.print(stylize_faint(f"[Token Usage] {usage}"), plain=True)
|
131
130
|
# Update context with the new summary
|
132
131
|
conversation_context["history_summary"] = summary_text
|
133
132
|
ctx.log_info("History summarized and added/updated in context.")
|
@@ -153,9 +152,10 @@ async def maybe_summarize_history(
|
|
153
152
|
summarization_prompt: str,
|
154
153
|
) -> tuple[ListOfDict, dict[str, Any]]:
|
155
154
|
"""Summarizes history and updates context if enabled and threshold met."""
|
155
|
+
shorten_history_list = replace_system_prompt_in_history_list(history_list)
|
156
156
|
if should_summarize_history(
|
157
157
|
ctx,
|
158
|
-
|
158
|
+
shorten_history_list,
|
159
159
|
should_summarize_history_attr,
|
160
160
|
render_summarize_history,
|
161
161
|
history_summarization_threshold_attr,
|
@@ -170,7 +170,7 @@ async def maybe_summarize_history(
|
|
170
170
|
prompt=summarization_prompt,
|
171
171
|
),
|
172
172
|
conversation_context=conversation_context,
|
173
|
-
history_list=
|
173
|
+
history_list=shorten_history_list, # Pass the full list for context
|
174
174
|
)
|
175
175
|
# Truncate the history list after summarization
|
176
176
|
return [], updated_context
|
zrb/task/llm/prompt.py
CHANGED
@@ -1,11 +1,6 @@
|
|
1
|
-
import json
|
2
|
-
from textwrap import dedent
|
3
|
-
from typing import Any
|
4
|
-
|
5
1
|
from zrb.attr.type import StrAttr
|
6
2
|
from zrb.context.any_context import AnyContext
|
7
3
|
from zrb.llm_config import llm_config as default_llm_config
|
8
|
-
from zrb.task.llm.context import extract_default_context
|
9
4
|
from zrb.util.attr import get_attr, get_str_attr
|
10
5
|
|
11
6
|
|
@@ -127,25 +122,3 @@ def get_context_enrichment_prompt(
|
|
127
122
|
if context_enrichment_prompt is not None:
|
128
123
|
return context_enrichment_prompt
|
129
124
|
return default_llm_config.get_default_context_enrichment_prompt()
|
130
|
-
|
131
|
-
|
132
|
-
def build_user_prompt(
|
133
|
-
ctx: AnyContext,
|
134
|
-
message_attr: StrAttr | None,
|
135
|
-
conversation_context: dict[str, Any],
|
136
|
-
) -> str:
|
137
|
-
"""Constructs the final user prompt including context."""
|
138
|
-
original_user_message = get_user_message(ctx, message_attr)
|
139
|
-
# Combine default context, conversation context (potentially enriched/summarized)
|
140
|
-
modified_user_message, default_context = extract_default_context(
|
141
|
-
original_user_message
|
142
|
-
)
|
143
|
-
enriched_context = {**default_context, **conversation_context}
|
144
|
-
return dedent(
|
145
|
-
f"""
|
146
|
-
# Context
|
147
|
-
{json.dumps(enriched_context)}
|
148
|
-
# User Message
|
149
|
-
{modified_user_message}
|
150
|
-
"""
|
151
|
-
).strip()
|
zrb/task/llm_task.py
CHANGED
@@ -22,26 +22,24 @@ from zrb.input.any_input import AnyInput
|
|
22
22
|
from zrb.task.any_task import AnyTask
|
23
23
|
from zrb.task.base_task import BaseTask
|
24
24
|
from zrb.task.llm.agent import get_agent, run_agent_iteration
|
25
|
-
|
26
|
-
# No longer need llm_config here
|
27
25
|
from zrb.task.llm.config import (
|
28
26
|
get_model,
|
29
27
|
get_model_settings,
|
30
28
|
)
|
31
|
-
from zrb.task.llm.context import get_conversation_context
|
29
|
+
from zrb.task.llm.context import extract_default_context, get_conversation_context
|
32
30
|
from zrb.task.llm.context_enrichment import maybe_enrich_context
|
33
31
|
from zrb.task.llm.history import (
|
34
32
|
ConversationHistoryData,
|
35
33
|
ListOfDict,
|
36
|
-
|
34
|
+
read_conversation_history,
|
37
35
|
write_conversation_history,
|
38
36
|
)
|
39
37
|
from zrb.task.llm.history_summarization import maybe_summarize_history
|
40
38
|
from zrb.task.llm.prompt import (
|
41
|
-
build_user_prompt,
|
42
39
|
get_combined_system_prompt,
|
43
40
|
get_context_enrichment_prompt,
|
44
41
|
get_summarization_prompt,
|
42
|
+
get_user_message,
|
45
43
|
)
|
46
44
|
from zrb.util.cli.style import stylize_faint
|
47
45
|
from zrb.xcom.xcom import Xcom
|
@@ -240,6 +238,7 @@ class LLMTask(BaseTask):
|
|
240
238
|
summarization_prompt_attr=self._summarization_prompt,
|
241
239
|
render_summarization_prompt=self._render_summarization_prompt,
|
242
240
|
)
|
241
|
+
user_message = get_user_message(ctx, self._message)
|
243
242
|
# Get the combined system prompt using the new getter
|
244
243
|
system_prompt = get_combined_system_prompt(
|
245
244
|
ctx=ctx,
|
@@ -250,17 +249,19 @@ class LLMTask(BaseTask):
|
|
250
249
|
special_instruction_prompt_attr=self._special_instruction_prompt,
|
251
250
|
render_special_instruction_prompt=self._render_special_instruction_prompt,
|
252
251
|
)
|
253
|
-
# 1. Prepare initial state (read history
|
254
|
-
|
252
|
+
# 1. Prepare initial state (read history from previous session)
|
253
|
+
conversation_history = await read_conversation_history(
|
255
254
|
ctx=ctx,
|
256
255
|
conversation_history_reader=self._conversation_history_reader,
|
257
256
|
conversation_history_file_attr=self._conversation_history_file,
|
258
257
|
render_history_file=self._render_history_file,
|
259
258
|
conversation_history_attr=self._conversation_history,
|
260
|
-
conversation_context_getter=lambda c: get_conversation_context(
|
261
|
-
c, self._conversation_context
|
262
|
-
),
|
263
259
|
)
|
260
|
+
history_list = conversation_history.history
|
261
|
+
conversation_context = {
|
262
|
+
**conversation_history.context,
|
263
|
+
**get_conversation_context(ctx, self._conversation_context),
|
264
|
+
}
|
264
265
|
# 2. Enrich context (optional)
|
265
266
|
conversation_context = await maybe_enrich_context(
|
266
267
|
ctx=ctx,
|
@@ -289,18 +290,21 @@ class LLMTask(BaseTask):
|
|
289
290
|
model_settings=model_settings,
|
290
291
|
summarization_prompt=summarization_prompt,
|
291
292
|
)
|
292
|
-
# 4. Build the final user prompt
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
293
|
+
# 4. Build the final user prompt and system prompt
|
294
|
+
final_user_prompt, default_context = extract_default_context(user_message)
|
295
|
+
final_system_prompt = "\n".join(
|
296
|
+
[
|
297
|
+
system_prompt,
|
298
|
+
"# Context",
|
299
|
+
json.dumps({**default_context, **conversation_context}),
|
300
|
+
]
|
297
301
|
)
|
298
302
|
# 5. Get the agent instance
|
299
303
|
agent = get_agent(
|
300
304
|
ctx=ctx,
|
301
305
|
agent_attr=self._agent,
|
302
306
|
model=model,
|
303
|
-
system_prompt=
|
307
|
+
system_prompt=final_system_prompt,
|
304
308
|
model_settings=model_settings,
|
305
309
|
tools_attr=self._tools,
|
306
310
|
additional_tools=self._additional_tools,
|
@@ -309,7 +313,7 @@ class LLMTask(BaseTask):
|
|
309
313
|
)
|
310
314
|
# 6. Run the agent iteration and save the results/history
|
311
315
|
return await self._run_agent_and_save_history(
|
312
|
-
ctx, agent,
|
316
|
+
ctx, agent, final_user_prompt, history_list, conversation_context
|
313
317
|
)
|
314
318
|
|
315
319
|
async def _run_agent_and_save_history(
|
@@ -346,7 +350,7 @@ class LLMTask(BaseTask):
|
|
346
350
|
ctx.xcom[xcom_usage_key] = Xcom([])
|
347
351
|
usage = agent_run.result.usage()
|
348
352
|
ctx.xcom.get(xcom_usage_key).push(usage)
|
349
|
-
ctx.print(stylize_faint(f"[
|
353
|
+
ctx.print(stylize_faint(f"[Token Usage] {usage}"), plain=True)
|
350
354
|
return agent_run.result.output
|
351
355
|
else:
|
352
356
|
ctx.log_warning("Agent run did not produce a result.")
|
@@ -245,7 +245,7 @@ zrb/input/option_input.py,sha256=TQB82ko5odgzkULEizBZi0e9TIHEbIgvdP0AR3RhA74,213
|
|
245
245
|
zrb/input/password_input.py,sha256=szBojWxSP9QJecgsgA87OIYwQrY2AQ3USIKdDZY6snU,1465
|
246
246
|
zrb/input/str_input.py,sha256=NevZHX9rf1g8eMatPyy-kUX3DglrVAQpzvVpKAzf7bA,81
|
247
247
|
zrb/input/text_input.py,sha256=NdceGgtbFFZkAoWOBW4xarjM-N1oePTStBJb8RbOqLw,3499
|
248
|
-
zrb/llm_config.py,sha256=
|
248
|
+
zrb/llm_config.py,sha256=TAGha1PAiS-vvxM3UyAlIK_0mh3qe5clOfRKbQhGUF8,9477
|
249
249
|
zrb/runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
250
250
|
zrb/runner/cli.py,sha256=Fh1p5qwa8sK2IlHbPoBHWLBA7rplDMlQpZfmbMq6gy8,6835
|
251
251
|
zrb/runner/common_util.py,sha256=JDMcwvQ8cxnv9kQrAoKVLA40Q1omfv-u5_d5MvvwHeE,1373
|
@@ -317,18 +317,18 @@ zrb/task/base_trigger.py,sha256=WSGcmBcGAZw8EzUXfmCjqJQkz8GEmi1RzogpF6A1V4s,6902
|
|
317
317
|
zrb/task/cmd_task.py,sha256=3JFkWZEhyrQAwbQJs2pgICBmkohUR9T-hjXw82JyNtA,10720
|
318
318
|
zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
|
319
319
|
zrb/task/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
320
|
-
zrb/task/llm/agent.py,sha256=
|
320
|
+
zrb/task/llm/agent.py,sha256=6wGSsw03GdY_fj12CsJh7wxB6BnE13N8RYXaWfbiUsk,5451
|
321
321
|
zrb/task/llm/config.py,sha256=R6mkbm4d5ecN4KjjZaXbqNq9-8bXfdUGl_BML8hUWqY,3205
|
322
|
-
zrb/task/llm/context.py,sha256=
|
323
|
-
zrb/task/llm/context_enrichment.py,sha256=
|
322
|
+
zrb/task/llm/context.py,sha256=U9a8lxa2ikz6my0Sd5vpO763legHrMHyvBjbrqNmv0Y,3838
|
323
|
+
zrb/task/llm/context_enrichment.py,sha256=dyUYhgozHZBQo-pDSoBkrlt39hOxS0i2IzDUKpPVF5U,6458
|
324
324
|
zrb/task/llm/error.py,sha256=27DQXSG8SH1-XuvXFdZQKzP39wZDWmd_YnSTz6DJKKI,3690
|
325
|
-
zrb/task/llm/history.py,sha256=
|
326
|
-
zrb/task/llm/history_summarization.py,sha256=
|
325
|
+
zrb/task/llm/history.py,sha256=3WMXoi7RquxosXQf3iv2_BCeF8iKtY1f407pR71xERs,7745
|
326
|
+
zrb/task/llm/history_summarization.py,sha256=UTyw7JF6cDmOJhuQOYVumF65QIqJHc3eQOa8p9Buwgw,6444
|
327
327
|
zrb/task/llm/print_node.py,sha256=bpISOUxSH_JBLR-4Nq6-iLrzNWFagrKFX6u8ogYYMw8,4395
|
328
|
-
zrb/task/llm/prompt.py,sha256=
|
328
|
+
zrb/task/llm/prompt.py,sha256=56qabihh9ZaDF-QJkOW2EUJ4_hhLsRZxzkZ8nAHrrz0,3911
|
329
329
|
zrb/task/llm/tool_wrapper.py,sha256=Xygd4VCY3ykjVv63pqlTI16ZG41ySkp683_5VTnL-Zo,6481
|
330
330
|
zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
|
331
|
-
zrb/task/llm_task.py,sha256=
|
331
|
+
zrb/task/llm_task.py,sha256=Bj_EJRv2qrCw6bgt7aHbaZwS5u0kc1R6NlqLkRw-0SE,15516
|
332
332
|
zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
|
333
333
|
zrb/task/rsync_task.py,sha256=GSL9144bmp6F0EckT6m-2a1xG25AzrrWYzH4k3SVUKM,6370
|
334
334
|
zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
|
@@ -369,7 +369,7 @@ zrb/util/string/name.py,sha256=SXEfxJ1-tDOzHqmSV8kvepRVyMqs2XdV_vyoh_9XUu0,1584
|
|
369
369
|
zrb/util/todo.py,sha256=VGISej2KQZERpornK-8X7bysp4JydMrMUTnG8B0-liI,20708
|
370
370
|
zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
371
371
|
zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
|
372
|
-
zrb-1.6.
|
373
|
-
zrb-1.6.
|
374
|
-
zrb-1.6.
|
375
|
-
zrb-1.6.
|
372
|
+
zrb-1.6.1.dist-info/METADATA,sha256=Mvuy3HzhIukGZ6QTM3X9RbFhQz6UFXhdngsC53dqSYQ,8385
|
373
|
+
zrb-1.6.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
374
|
+
zrb-1.6.1.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
|
375
|
+
zrb-1.6.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|