zrb 1.5.11__py3-none-any.whl → 1.5.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/llm_chat.py +1 -1
- zrb/builtin/llm/tool/__init__.py +0 -0
- zrb/builtin/llm/tool/sub_agent.py +125 -0
- zrb/builtin/llm/tool/web.py +0 -2
- zrb/config.py +0 -3
- zrb/llm_config.py +16 -2
- zrb/task/base_task.py +20 -0
- zrb/task/llm/agent.py +5 -8
- zrb/task/llm/context.py +17 -8
- zrb/task/llm/context_enrichment.py +52 -13
- zrb/task/llm/history_summarization.py +3 -5
- zrb/task/llm/prompt.py +7 -4
- zrb/task/llm/tool_wrapper.py +115 -53
- zrb/task/llm_task.py +16 -1
- zrb/util/attr.py +84 -1
- zrb/util/cli/style.py +147 -0
- zrb/util/cli/subcommand.py +22 -1
- zrb/util/cmd/command.py +18 -0
- zrb/util/cmd/remote.py +15 -0
- zrb/util/codemod/modification_mode.py +4 -0
- zrb/util/codemod/modify_class.py +72 -0
- zrb/util/codemod/modify_class_parent.py +68 -0
- zrb/util/codemod/modify_class_property.py +67 -0
- zrb/util/codemod/modify_dict.py +62 -0
- zrb/util/codemod/modify_function.py +75 -3
- zrb/util/codemod/modify_function_call.py +72 -0
- zrb/util/codemod/modify_method.py +77 -0
- zrb/util/codemod/modify_module.py +10 -0
- zrb/util/cron.py +37 -3
- zrb/util/file.py +32 -0
- zrb/util/git.py +113 -0
- zrb/util/git_subtree.py +58 -0
- zrb/util/group.py +64 -2
- zrb/util/load.py +29 -0
- zrb/util/run.py +9 -0
- zrb/util/string/conversion.py +86 -0
- zrb/util/string/format.py +20 -0
- zrb/util/string/name.py +12 -0
- zrb/util/todo.py +165 -4
- {zrb-1.5.11.dist-info → zrb-1.5.12.dist-info}/METADATA +3 -3
- {zrb-1.5.11.dist-info → zrb-1.5.12.dist-info}/RECORD +43 -41
- {zrb-1.5.11.dist-info → zrb-1.5.12.dist-info}/WHEEL +0 -0
- {zrb-1.5.11.dist-info → zrb-1.5.12.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/llm_chat.py
CHANGED
@@ -93,7 +93,7 @@ llm_chat: LLMTask = llm_group.add_task(
|
|
93
93
|
),
|
94
94
|
conversation_history_reader=read_chat_conversation,
|
95
95
|
conversation_history_writer=write_chat_conversation,
|
96
|
-
description="Chat with LLM",
|
96
|
+
description="💬 Chat with LLM",
|
97
97
|
system_prompt=lambda ctx: (
|
98
98
|
None if ctx.input.system_prompt.strip() == "" else ctx.input.system_prompt
|
99
99
|
),
|
File without changes
|
@@ -0,0 +1,125 @@
|
|
1
|
+
import json
|
2
|
+
from collections.abc import Callable
|
3
|
+
from textwrap import dedent
|
4
|
+
|
5
|
+
from pydantic_ai import Tool
|
6
|
+
from pydantic_ai.mcp import MCPServer
|
7
|
+
from pydantic_ai.models import Model
|
8
|
+
from pydantic_ai.settings import ModelSettings
|
9
|
+
|
10
|
+
from zrb.context.any_context import AnyContext
|
11
|
+
from zrb.task.llm.agent import create_agent_instance, run_agent_iteration
|
12
|
+
from zrb.task.llm.config import get_model, get_model_settings
|
13
|
+
from zrb.task.llm.prompt import get_combined_system_prompt
|
14
|
+
|
15
|
+
ToolOrCallable = Tool | Callable
|
16
|
+
|
17
|
+
|
18
|
+
def create_sub_agent_tool(
|
19
|
+
tool_name: str,
|
20
|
+
tool_description: str,
|
21
|
+
sub_agent_system_prompt: str | None = None, # Make optional
|
22
|
+
sub_agent_model: str | Model | None = None,
|
23
|
+
sub_agent_model_settings: ModelSettings | None = None,
|
24
|
+
sub_agent_tools: list[ToolOrCallable] = [],
|
25
|
+
sub_agent_mcp_servers: list[MCPServer] = [],
|
26
|
+
) -> Callable[[AnyContext, str], str]:
|
27
|
+
"""
|
28
|
+
Create an LLM "sub-agent" tool function for use by a main LLM agent.
|
29
|
+
|
30
|
+
This factory configures and returns an async function that, when called
|
31
|
+
by the main agent, instantiates and runs a sub-agent (the sub-agent)
|
32
|
+
with a given query and returns the sub-agent's final response.
|
33
|
+
|
34
|
+
Args:
|
35
|
+
tool_name: The name of the tool for the main agent.
|
36
|
+
tool_description: The description of the tool for the main agent.
|
37
|
+
sub_agent_system_prompt: The system prompt for the sub-agent.
|
38
|
+
sub_agent_model: The model for the sub-agent (optional).
|
39
|
+
sub_agent_model_settings: Model settings for the sub-agent (optional).
|
40
|
+
sub_agent_tools: A list of tools (Tool instances or callables) for the
|
41
|
+
sub-agent (optional).
|
42
|
+
sub_agent_mcp_servers: A list of MCP servers for the sub-agent (optional).
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
An async callable function that takes a context and a query string,
|
46
|
+
runs the sub-agent, and returns the sub-agent's final message content.
|
47
|
+
"""
|
48
|
+
|
49
|
+
async def run_sub_agent(ctx: AnyContext, query: str) -> str:
|
50
|
+
"""
|
51
|
+
Runs the sub-agent with the given query.
|
52
|
+
"""
|
53
|
+
# Resolve parameters, falling back to llm_config defaults if None
|
54
|
+
resolved_model = get_model(
|
55
|
+
ctx=ctx,
|
56
|
+
model_attr=sub_agent_model,
|
57
|
+
render_model=True, # Assuming we always want to render model string attributes
|
58
|
+
model_base_url_attr=None,
|
59
|
+
# Sub-agent tool doesn't have separate base_url/api_key params
|
60
|
+
render_model_base_url=False,
|
61
|
+
model_api_key_attr=None,
|
62
|
+
render_model_api_key=False,
|
63
|
+
)
|
64
|
+
resolved_model_settings = get_model_settings(
|
65
|
+
ctx=ctx,
|
66
|
+
model_settings_attr=sub_agent_model_settings,
|
67
|
+
)
|
68
|
+
|
69
|
+
if sub_agent_system_prompt is None:
|
70
|
+
resolved_system_prompt = get_combined_system_prompt(
|
71
|
+
ctx=ctx,
|
72
|
+
persona_attr=None,
|
73
|
+
render_persona=False,
|
74
|
+
system_prompt_attr=None,
|
75
|
+
render_system_prompt=False,
|
76
|
+
special_instruction_prompt_attr=None,
|
77
|
+
render_special_instruction_prompt=False,
|
78
|
+
)
|
79
|
+
else:
|
80
|
+
resolved_system_prompt = sub_agent_system_prompt
|
81
|
+
|
82
|
+
# Create the sub-agent instance
|
83
|
+
sub_agent_agent = create_agent_instance(
|
84
|
+
ctx=ctx,
|
85
|
+
model=resolved_model,
|
86
|
+
system_prompt=resolved_system_prompt,
|
87
|
+
model_settings=resolved_model_settings,
|
88
|
+
tools_attr=sub_agent_tools, # Pass tools from factory closure
|
89
|
+
additional_tools=[], # No additional tools added after factory creation
|
90
|
+
mcp_servers_attr=sub_agent_mcp_servers, # Pass servers from factory closure
|
91
|
+
additional_mcp_servers=[], # No additional servers added after factory creation
|
92
|
+
)
|
93
|
+
|
94
|
+
# Run the sub-agent iteration
|
95
|
+
# Start with an empty history for the sub-agent
|
96
|
+
sub_agent_run = await run_agent_iteration(
|
97
|
+
ctx=ctx,
|
98
|
+
agent=sub_agent_agent,
|
99
|
+
user_prompt=query,
|
100
|
+
history_list=[], # Start with empty history for the sub-agent
|
101
|
+
)
|
102
|
+
|
103
|
+
# Return the sub-agent's final message content
|
104
|
+
if sub_agent_run and sub_agent_run.result:
|
105
|
+
# Return the final message content as a string
|
106
|
+
return json.dumps({"result": sub_agent_run.result.output})
|
107
|
+
else:
|
108
|
+
ctx.log_warning("Sub-agent run did not produce a result.")
|
109
|
+
return "Sub-agent failed to produce a result."
|
110
|
+
|
111
|
+
# Set the name and docstring for the callable function
|
112
|
+
run_sub_agent.__name__ = tool_name
|
113
|
+
run_sub_agent.__doc__ = dedent(
|
114
|
+
f"""
|
115
|
+
{tool_description}
|
116
|
+
|
117
|
+
Args:
|
118
|
+
query (str): The query or task for the sub-agent.
|
119
|
+
|
120
|
+
Returns:
|
121
|
+
str: The final response or result from the sub-agent.
|
122
|
+
"""
|
123
|
+
).strip()
|
124
|
+
|
125
|
+
return run_sub_agent
|
zrb/builtin/llm/tool/web.py
CHANGED
zrb/config.py
CHANGED
@@ -80,9 +80,6 @@ WEB_AUTH_REFRESH_TOKEN_EXPIRE_MINUTES = int(
|
|
80
80
|
LLM_HISTORY_DIR = os.getenv(
|
81
81
|
"ZRB_LLM_HISTORY_DIR", os.path.expanduser(os.path.join("~", ".zrb-llm-history"))
|
82
82
|
)
|
83
|
-
LLM_HISTORY_FILE = os.getenv(
|
84
|
-
"ZRB_LLM_HISTORY_FILE", os.path.join(LLM_HISTORY_DIR, "history.json")
|
85
|
-
)
|
86
83
|
LLM_ALLOW_ACCESS_LOCAL_FILE = to_boolean(os.getenv("ZRB_LLM_ACCESS_LOCAL_FILE", "1"))
|
87
84
|
LLM_ALLOW_ACCESS_SHELL = to_boolean(os.getenv("ZRB_LLM_ACCESS_SHELL", "1"))
|
88
85
|
LLM_ALLOW_ACCESS_INTERNET = to_boolean(os.getenv("ZRB_LLM_ACCESS_INTERNET", "1"))
|
zrb/llm_config.py
CHANGED
@@ -60,6 +60,7 @@ class LLMConfig:
|
|
60
60
|
default_summarize_history: bool | None = None,
|
61
61
|
default_history_summarization_threshold: int | None = None,
|
62
62
|
default_enrich_context: bool | None = None,
|
63
|
+
default_context_enrichment_threshold: int | None = None,
|
63
64
|
):
|
64
65
|
self._default_model_name = (
|
65
66
|
default_model_name
|
@@ -104,7 +105,7 @@ class LLMConfig:
|
|
104
105
|
self._default_summarize_history = (
|
105
106
|
default_summarize_history
|
106
107
|
if default_summarize_history is not None
|
107
|
-
else os.getenv("ZRB_LLM_SUMMARIZE_HISTORY", "true")
|
108
|
+
else to_boolean(os.getenv("ZRB_LLM_SUMMARIZE_HISTORY", "true"))
|
108
109
|
)
|
109
110
|
self._default_history_summarization_threshold = (
|
110
111
|
default_history_summarization_threshold
|
@@ -114,7 +115,12 @@ class LLMConfig:
|
|
114
115
|
self._default_enrich_context = (
|
115
116
|
default_enrich_context
|
116
117
|
if default_enrich_context is not None
|
117
|
-
else to_boolean(os.getenv("ZRB_LLM_ENRICH_CONTEXT", "
|
118
|
+
else to_boolean(os.getenv("ZRB_LLM_ENRICH_CONTEXT", "true"))
|
119
|
+
)
|
120
|
+
self._default_context_enrichment_threshold = (
|
121
|
+
default_context_enrichment_threshold
|
122
|
+
if default_context_enrichment_threshold is not None
|
123
|
+
else int(os.getenv("ZRB_LLM_CONTEXT_ENRICHMENT_THRESHOLD", "5"))
|
118
124
|
)
|
119
125
|
self._default_provider = None
|
120
126
|
self._default_model = None
|
@@ -186,6 +192,9 @@ class LLMConfig:
|
|
186
192
|
def get_default_enrich_context(self) -> bool:
|
187
193
|
return self._default_enrich_context
|
188
194
|
|
195
|
+
def get_default_context_enrichment_threshold(self) -> int:
|
196
|
+
return self._default_context_enrichment_threshold
|
197
|
+
|
189
198
|
def set_default_persona(self, persona: str):
|
190
199
|
self._default_persona = persona
|
191
200
|
|
@@ -227,5 +236,10 @@ class LLMConfig:
|
|
227
236
|
def set_default_enrich_context(self, enrich_context: bool):
|
228
237
|
self._default_enrich_context = enrich_context
|
229
238
|
|
239
|
+
def set_default_context_enrichment_threshold(
|
240
|
+
self, context_enrichment_threshold: int
|
241
|
+
):
|
242
|
+
self._default_context_enrichment_threshold = context_enrichment_threshold
|
243
|
+
|
230
244
|
|
231
245
|
llm_config = LLMConfig()
|
zrb/task/base_task.py
CHANGED
@@ -23,6 +23,26 @@ from zrb.task.base.operators import handle_lshift, handle_rshift
|
|
23
23
|
|
24
24
|
|
25
25
|
class BaseTask(AnyTask):
|
26
|
+
"""
|
27
|
+
Implements a concrete task class `BaseTask` derived from the abstract base class `AnyTask`.
|
28
|
+
|
29
|
+
This class serves as a robust and flexible task implementation that can be tailored for
|
30
|
+
various execution scenarios within the Zrb framework. It supports functionalities such as:
|
31
|
+
|
32
|
+
- **Task Definition and Initialization:** Setting up task attributes like `name`, `color`,
|
33
|
+
`icon`, `description`, `cli_only`, `inputs`, `envs`, `action`, among others.
|
34
|
+
- **Dependency Management:** Managing task dependencies using properties and methods to
|
35
|
+
append upstreams, fallbacks, readiness checks, and successors, ensuring tasks are executed
|
36
|
+
in the correct order and conditions.
|
37
|
+
- **Execution Control:** Contains methods for both synchronous (`run`) and asynchronous
|
38
|
+
execution (`async_run`), alongside internal task lifecycle methods (`exec_root_tasks`,
|
39
|
+
`exec_chain`, `exec`).
|
40
|
+
- **Readiness and Monitoring:** Supports readiness checks, retry mechanisms, and monitoring
|
41
|
+
before task execution to ensure the task is executed under proper conditions.
|
42
|
+
- **Operator Overloading:** Implements operators to handle task chaining and dependencies
|
43
|
+
conveniently.
|
44
|
+
"""
|
45
|
+
|
26
46
|
def __init__(
|
27
47
|
self,
|
28
48
|
name: str,
|
zrb/task/llm/agent.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1
|
-
import inspect
|
2
1
|
from collections.abc import Callable
|
3
2
|
|
4
3
|
from openai import APIError
|
@@ -32,6 +31,7 @@ def create_agent_instance(
|
|
32
31
|
additional_mcp_servers: list[MCPServer],
|
33
32
|
) -> Agent:
|
34
33
|
"""Creates a new Agent instance with configured tools and servers."""
|
34
|
+
# Get tools
|
35
35
|
tools_or_callables = list(tools_attr(ctx) if callable(tools_attr) else tools_attr)
|
36
36
|
tools_or_callables.extend(additional_tools)
|
37
37
|
tools = []
|
@@ -39,17 +39,14 @@ def create_agent_instance(
|
|
39
39
|
if isinstance(tool_or_callable, Tool):
|
40
40
|
tools.append(tool_or_callable)
|
41
41
|
else:
|
42
|
-
#
|
43
|
-
|
44
|
-
|
45
|
-
wrapped_tool = wrap_tool(tool_or_callable)
|
46
|
-
tools.append(Tool(wrapped_tool, takes_ctx=takes_ctx))
|
47
|
-
|
42
|
+
# Pass ctx to wrap_tool
|
43
|
+
tools.append(wrap_tool(tool_or_callable, ctx))
|
44
|
+
# Get MCP Servers
|
48
45
|
mcp_servers = list(
|
49
46
|
mcp_servers_attr(ctx) if callable(mcp_servers_attr) else mcp_servers_attr
|
50
47
|
)
|
51
48
|
mcp_servers.extend(additional_mcp_servers)
|
52
|
-
|
49
|
+
# Return Agent
|
53
50
|
return Agent(
|
54
51
|
model=model,
|
55
52
|
system_prompt=system_prompt,
|
zrb/task/llm/context.py
CHANGED
@@ -12,13 +12,19 @@ from zrb.util.attr import get_attr
|
|
12
12
|
from zrb.util.file import read_dir, read_file_with_line_numbers
|
13
13
|
|
14
14
|
|
15
|
-
def
|
16
|
-
"""
|
17
|
-
|
15
|
+
def extract_default_context(user_message: str) -> tuple[str, dict[str, Any]]:
|
16
|
+
"""
|
17
|
+
Return modified user message and default context including time, OS, and file references.
|
18
|
+
"""
|
19
|
+
modified_user_message = user_message
|
20
|
+
# Match “@” + any non-space/comma sequence that contains at least one “/”
|
21
|
+
pattern = r"(?<!\w)@(?=[^,\s]*/)([^,\s]+)"
|
22
|
+
potential_resource_path = re.findall(pattern, user_message)
|
18
23
|
current_references = []
|
19
24
|
|
20
|
-
for ref in
|
25
|
+
for ref in potential_resource_path:
|
21
26
|
resource_path = os.path.abspath(os.path.expanduser(ref))
|
27
|
+
print("RESOURCE PATH", resource_path)
|
22
28
|
if os.path.isfile(resource_path):
|
23
29
|
content = read_file_with_line_numbers(resource_path)
|
24
30
|
current_references.append(
|
@@ -30,6 +36,8 @@ def get_default_context(user_message: str) -> dict[str, Any]:
|
|
30
36
|
"content": content,
|
31
37
|
}
|
32
38
|
)
|
39
|
+
# Remove the '@' from the modified user message for valid file paths
|
40
|
+
modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
|
33
41
|
elif os.path.isdir(resource_path):
|
34
42
|
content = read_dir(resource_path)
|
35
43
|
current_references.append(
|
@@ -40,8 +48,10 @@ def get_default_context(user_message: str) -> dict[str, Any]:
|
|
40
48
|
"content": content,
|
41
49
|
}
|
42
50
|
)
|
51
|
+
# Remove the '@' from the modified user message for valid directory paths
|
52
|
+
modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
|
43
53
|
|
44
|
-
|
54
|
+
context = {
|
45
55
|
"current_time": datetime.datetime.now().isoformat(),
|
46
56
|
"current_working_directory": os.getcwd(),
|
47
57
|
"current_os": platform.system(),
|
@@ -50,6 +60,8 @@ def get_default_context(user_message: str) -> dict[str, Any]:
|
|
50
60
|
"current_references": current_references,
|
51
61
|
}
|
52
62
|
|
63
|
+
return modified_user_message, context
|
64
|
+
|
53
65
|
|
54
66
|
def get_conversation_context(
|
55
67
|
ctx: AnyContext,
|
@@ -90,6 +102,3 @@ def get_conversation_context(
|
|
90
102
|
else:
|
91
103
|
processed_context[key] = value
|
92
104
|
return processed_context
|
93
|
-
|
94
|
-
|
95
|
-
# Context enrichment functions moved to context_enrichment.py
|
@@ -8,12 +8,12 @@ from pydantic_ai import Agent
|
|
8
8
|
from pydantic_ai.models import Model
|
9
9
|
from pydantic_ai.settings import ModelSettings
|
10
10
|
|
11
|
-
from zrb.attr.type import BoolAttr
|
11
|
+
from zrb.attr.type import BoolAttr, IntAttr
|
12
12
|
from zrb.context.any_context import AnyContext
|
13
13
|
from zrb.llm_config import llm_config
|
14
14
|
from zrb.task.llm.agent import run_agent_iteration
|
15
15
|
from zrb.task.llm.typing import ListOfDict
|
16
|
-
from zrb.util.attr import get_bool_attr
|
16
|
+
from zrb.util.attr import get_bool_attr, get_int_attr
|
17
17
|
|
18
18
|
|
19
19
|
class EnrichmentConfig(BaseModel):
|
@@ -62,7 +62,7 @@ async def enrich_context(
|
|
62
62
|
mcp_servers=[],
|
63
63
|
model_settings=config.settings,
|
64
64
|
retries=config.retries,
|
65
|
-
|
65
|
+
output_type=EnrichmentResult,
|
66
66
|
)
|
67
67
|
|
68
68
|
try:
|
@@ -72,8 +72,8 @@ async def enrich_context(
|
|
72
72
|
user_prompt=user_prompt_data, # Pass the formatted data as user prompt
|
73
73
|
history_list=[], # Enrichment agent doesn't need prior history itself
|
74
74
|
)
|
75
|
-
if enrichment_run and enrichment_run.result.
|
76
|
-
response = enrichment_run.result.
|
75
|
+
if enrichment_run and enrichment_run.result.output:
|
76
|
+
response = enrichment_run.result.output.response
|
77
77
|
if response:
|
78
78
|
conversation_context.update(response)
|
79
79
|
ctx.log_info("Context enriched based on history.")
|
@@ -88,21 +88,53 @@ async def enrich_context(
|
|
88
88
|
return conversation_context
|
89
89
|
|
90
90
|
|
91
|
+
def get_context_enrichment_threshold(
|
92
|
+
ctx: AnyContext,
|
93
|
+
context_enrichment_threshold_attr: IntAttr | None,
|
94
|
+
render_context_enrichment_threshold: bool,
|
95
|
+
) -> int:
|
96
|
+
"""Gets the context enrichment threshold, handling defaults and errors."""
|
97
|
+
try:
|
98
|
+
return get_int_attr(
|
99
|
+
ctx,
|
100
|
+
context_enrichment_threshold_attr,
|
101
|
+
# Use llm_config default if attribute is None
|
102
|
+
llm_config.get_default_context_enrichment_threshold(),
|
103
|
+
auto_render=render_context_enrichment_threshold,
|
104
|
+
)
|
105
|
+
except ValueError as e:
|
106
|
+
ctx.log_warning(
|
107
|
+
f"Could not convert context_enrichment_threshold to int: {e}. "
|
108
|
+
"Defaulting to -1 (no threshold)."
|
109
|
+
)
|
110
|
+
return -1
|
111
|
+
|
112
|
+
|
91
113
|
def should_enrich_context(
|
92
114
|
ctx: AnyContext,
|
93
115
|
history_list: ListOfDict,
|
94
116
|
should_enrich_context_attr: BoolAttr | None, # Allow None
|
95
117
|
render_enrich_context: bool,
|
118
|
+
context_enrichment_threshold_attr: IntAttr | None,
|
119
|
+
render_context_enrichment_threshold: bool,
|
96
120
|
) -> bool:
|
97
|
-
"""
|
98
|
-
if
|
121
|
+
"""
|
122
|
+
Determines if context enrichment should occur based on history, threshold, and config.
|
123
|
+
"""
|
124
|
+
history_len = len(history_list)
|
125
|
+
if history_len == 0:
|
126
|
+
return False
|
127
|
+
enrichment_threshold = get_context_enrichment_threshold(
|
128
|
+
ctx,
|
129
|
+
context_enrichment_threshold_attr,
|
130
|
+
render_context_enrichment_threshold,
|
131
|
+
)
|
132
|
+
if enrichment_threshold == -1 or enrichment_threshold > history_len:
|
99
133
|
return False
|
100
|
-
# Use llm_config default if attribute is None
|
101
|
-
default_value = llm_config.get_default_enrich_context()
|
102
134
|
return get_bool_attr(
|
103
135
|
ctx,
|
104
136
|
should_enrich_context_attr,
|
105
|
-
|
137
|
+
llm_config.get_default_enrich_context(),
|
106
138
|
auto_render=render_enrich_context,
|
107
139
|
)
|
108
140
|
|
@@ -111,15 +143,22 @@ async def maybe_enrich_context(
|
|
111
143
|
ctx: AnyContext,
|
112
144
|
history_list: ListOfDict,
|
113
145
|
conversation_context: dict[str, Any],
|
114
|
-
should_enrich_context_attr: BoolAttr | None,
|
146
|
+
should_enrich_context_attr: BoolAttr | None,
|
115
147
|
render_enrich_context: bool,
|
148
|
+
context_enrichment_threshold_attr: IntAttr | None,
|
149
|
+
render_context_enrichment_threshold: bool,
|
116
150
|
model: str | Model | None,
|
117
151
|
model_settings: ModelSettings | None,
|
118
152
|
context_enrichment_prompt: str,
|
119
153
|
) -> dict[str, Any]:
|
120
|
-
"""Enriches context based on history if enabled."""
|
154
|
+
"""Enriches context based on history if enabled and threshold met."""
|
121
155
|
if should_enrich_context(
|
122
|
-
ctx,
|
156
|
+
ctx,
|
157
|
+
history_list,
|
158
|
+
should_enrich_context_attr,
|
159
|
+
render_enrich_context,
|
160
|
+
context_enrichment_threshold_attr,
|
161
|
+
render_context_enrichment_threshold,
|
123
162
|
):
|
124
163
|
# Use the enrich_context function now defined in this file
|
125
164
|
return await enrich_context(
|
@@ -64,9 +64,7 @@ def should_summarize_history(
|
|
64
64
|
history_summarization_threshold_attr,
|
65
65
|
render_history_summarization_threshold,
|
66
66
|
)
|
67
|
-
if summarization_threshold == -1
|
68
|
-
return False
|
69
|
-
if summarization_threshold > history_part_len:
|
67
|
+
if summarization_threshold == -1 or summarization_threshold > history_part_len:
|
70
68
|
return False
|
71
69
|
return get_bool_attr(
|
72
70
|
ctx,
|
@@ -122,8 +120,8 @@ async def summarize_history(
|
|
122
120
|
user_prompt=summarization_user_prompt,
|
123
121
|
history_list=[], # Summarization agent doesn't need prior history
|
124
122
|
)
|
125
|
-
if summary_run and summary_run.result.
|
126
|
-
summary_text = str(summary_run.result.
|
123
|
+
if summary_run and summary_run.result.output:
|
124
|
+
summary_text = str(summary_run.result.output)
|
127
125
|
# Update context with the new summary
|
128
126
|
conversation_context["history_summary"] = summary_text
|
129
127
|
ctx.log_info("History summarized and added/updated in context.")
|
zrb/task/llm/prompt.py
CHANGED
@@ -5,7 +5,7 @@ from typing import Any
|
|
5
5
|
from zrb.attr.type import StrAttr
|
6
6
|
from zrb.context.any_context import AnyContext
|
7
7
|
from zrb.llm_config import llm_config as default_llm_config
|
8
|
-
from zrb.task.llm.context import
|
8
|
+
from zrb.task.llm.context import extract_default_context
|
9
9
|
from zrb.util.attr import get_attr, get_str_attr
|
10
10
|
|
11
11
|
|
@@ -135,14 +135,17 @@ def build_user_prompt(
|
|
135
135
|
conversation_context: dict[str, Any],
|
136
136
|
) -> str:
|
137
137
|
"""Constructs the final user prompt including context."""
|
138
|
-
|
138
|
+
original_user_message = get_user_message(ctx, message_attr)
|
139
139
|
# Combine default context, conversation context (potentially enriched/summarized)
|
140
|
-
|
140
|
+
modified_user_message, default_context = extract_default_context(
|
141
|
+
original_user_message
|
142
|
+
)
|
143
|
+
enriched_context = {**default_context, **conversation_context}
|
141
144
|
return dedent(
|
142
145
|
f"""
|
143
146
|
# Context
|
144
147
|
{json.dumps(enriched_context)}
|
145
148
|
# User Message
|
146
|
-
{
|
149
|
+
{modified_user_message}
|
147
150
|
"""
|
148
151
|
).strip()
|