zrb 1.5.7__py3-none-any.whl → 1.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,173 @@
1
+ import json
2
+ from typing import Any
3
+
4
+ from pydantic import BaseModel
5
+ from pydantic_ai import Agent
6
+ from pydantic_ai.models import Model
7
+ from pydantic_ai.settings import ModelSettings
8
+
9
+ from zrb.attr.type import BoolAttr, IntAttr
10
+ from zrb.context.any_context import AnyContext
11
+ from zrb.llm_config import llm_config
12
+ from zrb.task.llm.agent import run_agent_iteration
13
+ from zrb.task.llm.typing import ListOfDict
14
+ from zrb.util.attr import get_bool_attr, get_int_attr
15
+
16
+
17
+ def get_history_part_len(history_list: ListOfDict) -> int:
18
+ """Calculates the total number of 'parts' in a history list."""
19
+ history_part_len = 0
20
+ for history in history_list:
21
+ if "parts" in history:
22
+ history_part_len += len(history["parts"])
23
+ else:
24
+ history_part_len += 1
25
+ return history_part_len
26
+
27
+
28
+ def get_history_summarization_threshold(
29
+ ctx: AnyContext,
30
+ history_summarization_threshold_attr: IntAttr | None,
31
+ render_history_summarization_threshold: bool,
32
+ ) -> int:
33
+ """Gets the history summarization threshold, handling defaults and errors."""
34
+ try:
35
+ return get_int_attr(
36
+ ctx,
37
+ history_summarization_threshold_attr,
38
+ # Use llm_config default if attribute is None
39
+ llm_config.get_default_history_summarization_threshold(),
40
+ auto_render=render_history_summarization_threshold,
41
+ )
42
+ except ValueError as e:
43
+ ctx.log_warning(
44
+ f"Could not convert history_summarization_threshold to int: {e}. "
45
+ "Defaulting to -1 (no threshold)."
46
+ )
47
+ return -1
48
+
49
+
50
+ def should_summarize_history(
51
+ ctx: AnyContext,
52
+ history_list: ListOfDict,
53
+ should_summarize_history_attr: BoolAttr | None, # Allow None
54
+ render_summarize_history: bool,
55
+ history_summarization_threshold_attr: IntAttr | None, # Allow None
56
+ render_history_summarization_threshold: bool,
57
+ ) -> bool:
58
+ """Determines if history summarization should occur based on length and config."""
59
+ history_part_len = get_history_part_len(history_list)
60
+ if history_part_len == 0:
61
+ return False
62
+ summarization_threshold = get_history_summarization_threshold(
63
+ ctx,
64
+ history_summarization_threshold_attr,
65
+ render_history_summarization_threshold,
66
+ )
67
+ if summarization_threshold == -1: # -1 means no summarization trigger
68
+ return False
69
+ if summarization_threshold > history_part_len:
70
+ return False
71
+ return get_bool_attr(
72
+ ctx,
73
+ should_summarize_history_attr,
74
+ # Use llm_config default if attribute is None
75
+ llm_config.get_default_summarize_history(),
76
+ auto_render=render_summarize_history,
77
+ )
78
+
79
+
80
+ class SummarizationConfig(BaseModel):
81
+ model_config = {"arbitrary_types_allowed": True}
82
+ model: Model | str | None = None
83
+ settings: ModelSettings | None = None
84
+ prompt: str
85
+ retries: int = 1
86
+
87
+
88
+ async def summarize_history(
89
+ ctx: AnyContext,
90
+ config: SummarizationConfig,
91
+ conversation_context: dict[str, Any],
92
+ history_list: ListOfDict,
93
+ ) -> dict[str, Any]:
94
+ """Runs an LLM call to summarize history and update the context."""
95
+ ctx.log_info("Attempting to summarize conversation history...")
96
+
97
+ summarization_agent = Agent(
98
+ model=config.model,
99
+ system_prompt=config.prompt,
100
+ tools=[], # No tools needed for summarization
101
+ mcp_servers=[],
102
+ model_settings=config.settings,
103
+ retries=config.retries,
104
+ )
105
+
106
+ # Prepare context and history for summarization prompt
107
+ try:
108
+ context_json = json.dumps(conversation_context)
109
+ history_to_summarize_json = json.dumps(history_list)
110
+ summarization_user_prompt = (
111
+ f"# Current Context\n{context_json}\n\n"
112
+ f"# Conversation History to Summarize\n{history_to_summarize_json}"
113
+ )
114
+ except Exception as e:
115
+ ctx.log_warning(f"Error formatting context/history for summarization: {e}")
116
+ return conversation_context # Return original context if formatting fails
117
+
118
+ try:
119
+ summary_run = await run_agent_iteration(
120
+ ctx=ctx,
121
+ agent=summarization_agent,
122
+ user_prompt=summarization_user_prompt,
123
+ history_list=[], # Summarization agent doesn't need prior history
124
+ )
125
+ if summary_run and summary_run.result.data:
126
+ summary_text = str(summary_run.result.data)
127
+ # Update context with the new summary
128
+ conversation_context["history_summary"] = summary_text
129
+ ctx.log_info("History summarized and added/updated in context.")
130
+ ctx.log_info(f"Conversation summary: {summary_text}")
131
+ else:
132
+ ctx.log_warning("History summarization failed or returned no data.")
133
+ except Exception as e:
134
+ ctx.log_warning(f"Error during history summarization: {e}")
135
+
136
+ return conversation_context
137
+
138
+
139
+ async def maybe_summarize_history(
140
+ ctx: AnyContext,
141
+ history_list: ListOfDict,
142
+ conversation_context: dict[str, Any],
143
+ should_summarize_history_attr: BoolAttr | None, # Allow None
144
+ render_summarize_history: bool,
145
+ history_summarization_threshold_attr: IntAttr | None, # Allow None
146
+ render_history_summarization_threshold: bool,
147
+ model: str | Model | None,
148
+ model_settings: ModelSettings | None,
149
+ summarization_prompt: str,
150
+ ) -> tuple[ListOfDict, dict[str, Any]]:
151
+ """Summarizes history and updates context if enabled and threshold met."""
152
+ if should_summarize_history(
153
+ ctx,
154
+ history_list,
155
+ should_summarize_history_attr,
156
+ render_summarize_history,
157
+ history_summarization_threshold_attr,
158
+ render_history_summarization_threshold,
159
+ ):
160
+ # Use summarize_history defined above
161
+ updated_context = await summarize_history(
162
+ ctx=ctx,
163
+ config=SummarizationConfig(
164
+ model=model,
165
+ settings=model_settings,
166
+ prompt=summarization_prompt,
167
+ ),
168
+ conversation_context=conversation_context,
169
+ history_list=history_list, # Pass the full list for context
170
+ )
171
+ # Truncate the history list after summarization
172
+ return [], updated_context
173
+ return history_list, conversation_context
zrb/task/llm/prompt.py ADDED
@@ -0,0 +1,87 @@
1
+ import json
2
+ from textwrap import dedent
3
+ from typing import Any
4
+
5
+ from zrb.attr.type import StrAttr
6
+ from zrb.context.any_context import AnyContext
7
+ from zrb.llm_config import llm_config as default_llm_config
8
+ from zrb.task.llm.context import get_default_context # Updated import
9
+ from zrb.util.attr import get_attr, get_str_attr
10
+
11
+
12
+ def get_system_prompt(
13
+ ctx: AnyContext,
14
+ system_prompt_attr: StrAttr | None,
15
+ render_system_prompt: bool,
16
+ ) -> str:
17
+ """Gets the system prompt, rendering if configured and handling defaults."""
18
+ system_prompt = get_attr(
19
+ ctx,
20
+ system_prompt_attr,
21
+ None,
22
+ auto_render=render_system_prompt,
23
+ )
24
+ if system_prompt is not None:
25
+ return system_prompt
26
+ return default_llm_config.get_default_system_prompt()
27
+
28
+
29
+ def get_user_message(
30
+ ctx: AnyContext,
31
+ message_attr: StrAttr | None,
32
+ ) -> str:
33
+ """Gets the user message, rendering and providing a default."""
34
+ return get_str_attr(ctx, message_attr, "How are you?", auto_render=True)
35
+
36
+
37
+ def get_summarization_prompt(
38
+ ctx: AnyContext,
39
+ summarization_prompt_attr: StrAttr | None,
40
+ render_summarization_prompt: bool,
41
+ ) -> str:
42
+ """Gets the summarization prompt, rendering if configured and handling defaults."""
43
+ summarization_prompt = get_attr(
44
+ ctx,
45
+ summarization_prompt_attr,
46
+ None,
47
+ auto_render=render_summarization_prompt,
48
+ )
49
+ if summarization_prompt is not None:
50
+ return summarization_prompt
51
+ return default_llm_config.get_default_summarization_prompt()
52
+
53
+
54
+ def get_context_enrichment_prompt(
55
+ ctx: AnyContext,
56
+ context_enrichment_prompt_attr: StrAttr | None,
57
+ render_context_enrichment_prompt: bool,
58
+ ) -> str:
59
+ """Gets the context enrichment prompt, rendering if configured and handling defaults."""
60
+ context_enrichment_prompt = get_attr(
61
+ ctx,
62
+ context_enrichment_prompt_attr,
63
+ None,
64
+ auto_render=render_context_enrichment_prompt,
65
+ )
66
+ if context_enrichment_prompt is not None:
67
+ return context_enrichment_prompt
68
+ return default_llm_config.get_default_context_enrichment_prompt()
69
+
70
+
71
+ def build_user_prompt(
72
+ ctx: AnyContext,
73
+ message_attr: StrAttr | None,
74
+ conversation_context: dict[str, Any],
75
+ ) -> str:
76
+ """Constructs the final user prompt including context."""
77
+ user_message = get_user_message(ctx, message_attr)
78
+ # Combine default context, conversation context (potentially enriched/summarized)
79
+ enriched_context = {**get_default_context(user_message), **conversation_context}
80
+ return dedent(
81
+ f"""
82
+ # Context
83
+ {json.dumps(enriched_context)}
84
+ # User Message
85
+ {user_message}
86
+ """
87
+ ).strip()
zrb/task/llm/typing.py ADDED
@@ -0,0 +1,3 @@
1
+ from typing import Any
2
+
3
+ ListOfDict = list[dict[str, Any]]