zrb 1.8.15__py3-none-any.whl → 1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,11 +19,11 @@ async def print_node(print_func: Callable, agent_run: Any, node: Any):
19
19
 
20
20
  if Agent.is_user_prompt_node(node):
21
21
  # A user prompt node => The user has provided input
22
- print_func(stylize_faint(f">> UserPromptNode: {node.user_prompt}"))
22
+ print_func(stylize_faint(f" >> UserPromptNode: {node.user_prompt}"))
23
23
  elif Agent.is_model_request_node(node):
24
24
  # A model request node => We can stream tokens from the model's request
25
25
  print_func(
26
- stylize_faint(">> ModelRequestNode: streaming partial request tokens")
26
+ stylize_faint(" >> ModelRequestNode: streaming partial request tokens")
27
27
  )
28
28
  async with node.stream(agent_run.ctx) as request_stream:
29
29
  is_streaming = False
@@ -33,7 +33,7 @@ async def print_node(print_func: Callable, agent_run: Any, node: Any):
33
33
  print_func("")
34
34
  print_func(
35
35
  stylize_faint(
36
- f"[Request] Starting part {event.index}: {event.part!r}"
36
+ f" [Request] Starting part {event.index}: {event.part!r}"
37
37
  ),
38
38
  )
39
39
  is_streaming = False
@@ -53,7 +53,7 @@ async def print_node(print_func: Callable, agent_run: Any, node: Any):
53
53
  if is_streaming:
54
54
  print_func("")
55
55
  print_func(
56
- stylize_faint(f"[Result] tool_name={event.tool_name}"),
56
+ stylize_faint(f" [Result] tool_name={event.tool_name}"),
57
57
  )
58
58
  is_streaming = False
59
59
  if is_streaming:
@@ -61,7 +61,9 @@ async def print_node(print_func: Callable, agent_run: Any, node: Any):
61
61
  elif Agent.is_call_tools_node(node):
62
62
  # A handle-response node => The model returned some data, potentially calls a tool
63
63
  print_func(
64
- stylize_faint(">> CallToolsNode: streaming partial response & tool usage")
64
+ stylize_faint(
65
+ " >> CallToolsNode: streaming partial response & tool usage"
66
+ )
65
67
  )
66
68
  async with node.stream(agent_run.ctx) as handle_stream:
67
69
  async for event in handle_stream:
@@ -82,16 +84,16 @@ async def print_node(print_func: Callable, agent_run: Any, node: Any):
82
84
  del event.part.args["_dummy"]
83
85
  print_func(
84
86
  stylize_faint(
85
- f"[Tools] The LLM calls tool={event.part.tool_name!r} with args={event.part.args} (tool_call_id={event.part.tool_call_id!r})" # noqa
87
+ f" [Tools] The LLM calls tool={event.part.tool_name!r} with args={event.part.args} (tool_call_id={event.part.tool_call_id!r})" # noqa
86
88
  )
87
89
  )
88
90
  elif isinstance(event, FunctionToolResultEvent):
89
91
  print_func(
90
92
  stylize_faint(
91
- f"[Tools] Tool call {event.tool_call_id!r} returned => {event.result.content}" # noqa
93
+ f" [Tools] Tool call {event.tool_call_id!r} returned => {event.result.content}" # noqa
92
94
  )
93
95
  )
94
96
  elif Agent.is_end_node(node):
95
97
  # Once an End node is reached, the agent run is complete
96
- print_func(stylize_faint("[End of Response]"))
98
+ print_func(stylize_faint(" [End of Response]"))
97
99
  # print_func(stylize_faint(f"{agent_run.result.data}"))
zrb/task/llm/prompt.py CHANGED
@@ -7,14 +7,13 @@ from zrb.util.attr import get_attr, get_str_attr
7
7
  def get_persona(
8
8
  ctx: AnyContext,
9
9
  persona_attr: StrAttr | None,
10
- render_persona: bool,
11
10
  ) -> str:
12
11
  """Gets the persona, prioritizing task-specific, then default."""
13
12
  persona = get_attr(
14
13
  ctx,
15
14
  persona_attr,
16
15
  None,
17
- auto_render=render_persona,
16
+ auto_render=False,
18
17
  )
19
18
  if persona is not None:
20
19
  return persona
@@ -24,14 +23,13 @@ def get_persona(
24
23
  def get_base_system_prompt(
25
24
  ctx: AnyContext,
26
25
  system_prompt_attr: StrAttr | None,
27
- render_system_prompt: bool,
28
26
  ) -> str:
29
27
  """Gets the base system prompt, prioritizing task-specific, then default."""
30
28
  system_prompt = get_attr(
31
29
  ctx,
32
30
  system_prompt_attr,
33
31
  None,
34
- auto_render=render_system_prompt,
32
+ auto_render=False,
35
33
  )
36
34
  if system_prompt is not None:
37
35
  return system_prompt
@@ -41,14 +39,13 @@ def get_base_system_prompt(
41
39
  def get_special_instruction_prompt(
42
40
  ctx: AnyContext,
43
41
  special_instruction_prompt_attr: StrAttr | None,
44
- render_special_instruction_prompt: bool,
45
42
  ) -> str:
46
43
  """Gets the special instruction prompt, prioritizing task-specific, then default."""
47
44
  special_instruction = get_attr(
48
45
  ctx,
49
46
  special_instruction_prompt_attr,
50
47
  None,
51
- auto_render=render_special_instruction_prompt,
48
+ auto_render=False,
52
49
  )
53
50
  if special_instruction is not None:
54
51
  return special_instruction
@@ -58,19 +55,14 @@ def get_special_instruction_prompt(
58
55
  def get_combined_system_prompt(
59
56
  ctx: AnyContext,
60
57
  persona_attr: StrAttr | None,
61
- render_persona: bool,
62
58
  system_prompt_attr: StrAttr | None,
63
- render_system_prompt: bool,
64
59
  special_instruction_prompt_attr: StrAttr | None,
65
- render_special_instruction_prompt: bool,
66
60
  ) -> str:
67
61
  """Combines persona, base system prompt, and special instructions."""
68
- persona = get_persona(ctx, persona_attr, render_persona)
69
- base_system_prompt = get_base_system_prompt(
70
- ctx, system_prompt_attr, render_system_prompt
71
- )
62
+ persona = get_persona(ctx, persona_attr)
63
+ base_system_prompt = get_base_system_prompt(ctx, system_prompt_attr)
72
64
  special_instruction = get_special_instruction_prompt(
73
- ctx, special_instruction_prompt_attr, render_special_instruction_prompt
65
+ ctx, special_instruction_prompt_attr
74
66
  )
75
67
  parts = []
76
68
  if persona:
@@ -85,22 +77,24 @@ def get_combined_system_prompt(
85
77
  def get_user_message(
86
78
  ctx: AnyContext,
87
79
  message_attr: StrAttr | None,
80
+ render_user_message: bool,
88
81
  ) -> str:
89
82
  """Gets the user message, rendering and providing a default."""
90
- return get_str_attr(ctx, message_attr, "How are you?", auto_render=True)
83
+ return get_str_attr(
84
+ ctx, message_attr, "How are you?", auto_render=render_user_message
85
+ )
91
86
 
92
87
 
93
88
  def get_summarization_prompt(
94
89
  ctx: AnyContext,
95
90
  summarization_prompt_attr: StrAttr | None,
96
- render_summarization_prompt: bool,
97
91
  ) -> str:
98
92
  """Gets the summarization prompt, rendering if configured and handling defaults."""
99
93
  summarization_prompt = get_attr(
100
94
  ctx,
101
95
  summarization_prompt_attr,
102
96
  None,
103
- auto_render=render_summarization_prompt,
97
+ auto_render=False,
104
98
  )
105
99
  if summarization_prompt is not None:
106
100
  return summarization_prompt
@@ -110,14 +104,13 @@ def get_summarization_prompt(
110
104
  def get_context_enrichment_prompt(
111
105
  ctx: AnyContext,
112
106
  context_enrichment_prompt_attr: StrAttr | None,
113
- render_context_enrichment_prompt: bool,
114
107
  ) -> str:
115
108
  """Gets the context enrichment prompt, rendering if configured and handling defaults."""
116
109
  context_enrichment_prompt = get_attr(
117
110
  ctx,
118
111
  context_enrichment_prompt_attr,
119
112
  None,
120
- auto_render=render_context_enrichment_prompt,
113
+ auto_render=False,
121
114
  )
122
115
  if context_enrichment_prompt is not None:
123
116
  return context_enrichment_prompt
@@ -3,7 +3,7 @@ import inspect
3
3
  import traceback
4
4
  import typing
5
5
  from collections.abc import Callable
6
- from typing import TYPE_CHECKING, Any
6
+ from typing import TYPE_CHECKING
7
7
 
8
8
  from zrb.context.any_context import AnyContext
9
9
  from zrb.task.llm.error import ToolExecutionError
@@ -11,11 +11,9 @@ from zrb.util.run import run_async
11
11
 
12
12
  if TYPE_CHECKING:
13
13
  from pydantic_ai import Tool
14
- else:
15
- Tool = Any
16
14
 
17
15
 
18
- def wrap_tool(func: Callable, ctx: AnyContext) -> Tool:
16
+ def wrap_tool(func: Callable, ctx: AnyContext) -> "Tool":
19
17
  """Wraps a tool function to handle exceptions and context propagation."""
20
18
  from pydantic_ai import RunContext, Tool
21
19
 
zrb/task/llm_task.py CHANGED
@@ -2,18 +2,6 @@ import json
2
2
  from collections.abc import Callable
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
- if TYPE_CHECKING:
6
- from pydantic_ai import Agent, Tool
7
- from pydantic_ai.mcp import MCPServer
8
- from pydantic_ai.models import Model
9
- from pydantic_ai.settings import ModelSettings
10
- else:
11
- Agent = Any
12
- Tool = Any
13
- MCPServer = Any
14
- Model = Any
15
- ModelSettings = Any
16
-
17
5
  from zrb.attr.type import BoolAttr, IntAttr, StrAttr, fstring
18
6
  from zrb.context.any_context import AnyContext
19
7
  from zrb.context.any_shared_context import AnySharedContext
@@ -27,7 +15,7 @@ from zrb.task.llm.config import (
27
15
  get_model,
28
16
  get_model_settings,
29
17
  )
30
- from zrb.task.llm.context import extract_default_context, get_conversation_context
18
+ from zrb.task.llm.context import extract_default_context
31
19
  from zrb.task.llm.context_enrichment import maybe_enrich_context
32
20
  from zrb.task.llm.history import (
33
21
  ConversationHistoryData,
@@ -46,6 +34,11 @@ from zrb.util.cli.style import stylize_faint
46
34
  from zrb.xcom.xcom import Xcom
47
35
 
48
36
  if TYPE_CHECKING:
37
+ from pydantic_ai import Agent, Tool
38
+ from pydantic_ai.mcp import MCPServer
39
+ from pydantic_ai.models import Model
40
+ from pydantic_ai.settings import ModelSettings
41
+
49
42
  ToolOrCallable = Tool | Callable
50
43
  else:
51
44
  ToolOrCallable = Any
@@ -62,7 +55,7 @@ class LLMTask(BaseTask):
62
55
  input: list[AnyInput | None] | AnyInput | None = None,
63
56
  env: list[AnyEnv | None] | AnyEnv | None = None,
64
57
  model: (
65
- Callable[[AnySharedContext], Model | str | fstring] | Model | None
58
+ "Callable[[AnySharedContext], Model | str | fstring] | Model | None"
66
59
  ) = None,
67
60
  render_model: bool = True,
68
61
  model_base_url: StrAttr | None = None,
@@ -70,24 +63,20 @@ class LLMTask(BaseTask):
70
63
  model_api_key: StrAttr | None = None,
71
64
  render_model_api_key: bool = True,
72
65
  model_settings: (
73
- ModelSettings | Callable[[AnySharedContext], ModelSettings] | None
66
+ "ModelSettings | Callable[[AnySharedContext], ModelSettings] | None"
74
67
  ) = None,
75
- agent: Agent | Callable[[AnySharedContext], Agent] | None = None,
68
+ agent: "Agent | Callable[[AnySharedContext], Agent] | None" = None,
76
69
  persona: StrAttr | None = None,
77
- render_persona: bool = True,
78
70
  system_prompt: StrAttr | None = None,
79
- render_system_prompt: bool = True,
80
71
  special_instruction_prompt: StrAttr | None = None,
81
- render_special_instruction_prompt: bool = True,
82
72
  message: StrAttr | None = None,
83
- summarization_prompt: StrAttr | None = None,
84
- render_summarization_prompt: bool = True,
73
+ render_message: bool = True,
85
74
  enrich_context: BoolAttr | None = None,
86
75
  render_enrich_context: bool = True,
87
76
  context_enrichment_prompt: StrAttr | None = None,
88
77
  render_context_enrichment_prompt: bool = True,
89
- context_enrichment_threshold: IntAttr | None = None,
90
- render_context_enrichment_threshold: bool = True,
78
+ context_enrichment_token_threshold: IntAttr | None = None,
79
+ render_context_enrichment_token_threshold: bool = True,
91
80
  tools: (
92
81
  list["ToolOrCallable"]
93
82
  | Callable[[AnySharedContext], list["ToolOrCallable"]]
@@ -112,8 +101,9 @@ class LLMTask(BaseTask):
112
101
  render_history_file: bool = True,
113
102
  summarize_history: BoolAttr | None = None,
114
103
  render_summarize_history: bool = True,
115
- history_summarization_threshold: IntAttr | None = None,
116
- render_history_summarization_threshold: bool = True,
104
+ summarization_prompt: StrAttr | None = None,
105
+ history_summarization_token_threshold: IntAttr | None = None,
106
+ render_history_summarization_token_threshold: bool = True,
117
107
  rate_limitter: LLMRateLimiter | None = None,
118
108
  execute_condition: bool | str | Callable[[AnySharedContext], bool] = True,
119
109
  retries: int = 2,
@@ -162,20 +152,19 @@ class LLMTask(BaseTask):
162
152
  self._model_settings = model_settings
163
153
  self._agent = agent
164
154
  self._persona = persona
165
- self._render_persona = render_persona
166
155
  self._system_prompt = system_prompt
167
- self._render_system_prompt = render_system_prompt
168
156
  self._special_instruction_prompt = special_instruction_prompt
169
- self._render_special_instruction_prompt = render_special_instruction_prompt
170
157
  self._message = message
158
+ self._render_message = render_message
171
159
  self._summarization_prompt = summarization_prompt
172
- self._render_summarization_prompt = render_summarization_prompt
173
160
  self._should_enrich_context = enrich_context
174
161
  self._render_enrich_context = render_enrich_context
175
162
  self._context_enrichment_prompt = context_enrichment_prompt
176
163
  self._render_context_enrichment_prompt = render_context_enrichment_prompt
177
- self._context_enrichment_threshold = context_enrichment_threshold
178
- self._render_context_enrichment_threshold = render_context_enrichment_threshold
164
+ self._context_enrichment_token_threshold = context_enrichment_token_threshold
165
+ self._render_context_enrichment_token_threshold = (
166
+ render_context_enrichment_token_threshold
167
+ )
179
168
  self._tools = tools
180
169
  self._rate_limitter = rate_limitter
181
170
  self._additional_tools: list["ToolOrCallable"] = []
@@ -188,9 +177,11 @@ class LLMTask(BaseTask):
188
177
  self._render_history_file = render_history_file
189
178
  self._should_summarize_history = summarize_history
190
179
  self._render_summarize_history = render_summarize_history
191
- self._history_summarization_threshold = history_summarization_threshold
192
- self._render_history_summarization_threshold = (
193
- render_history_summarization_threshold
180
+ self._history_summarization_token_threshold = (
181
+ history_summarization_token_threshold
182
+ )
183
+ self._render_history_summarization_token_threshold = (
184
+ render_history_summarization_token_threshold
194
185
  )
195
186
  self._max_call_iteration = max_call_iteration
196
187
  self._conversation_context = conversation_context
@@ -202,24 +193,26 @@ class LLMTask(BaseTask):
202
193
  for single_tool in tool:
203
194
  self._additional_tools.append(single_tool)
204
195
 
205
- def add_mcp_server(self, *mcp_server: MCPServer):
196
+ def add_mcp_server(self, *mcp_server: "MCPServer"):
206
197
  self.append_mcp_server(*mcp_server)
207
198
 
208
- def append_mcp_server(self, *mcp_server: MCPServer):
199
+ def append_mcp_server(self, *mcp_server: "MCPServer"):
209
200
  for single_mcp_server in mcp_server:
210
201
  self._additional_mcp_servers.append(single_mcp_server)
211
202
 
212
203
  def set_should_enrich_context(self, enrich_context: bool):
213
204
  self._should_enrich_context = enrich_context
214
205
 
215
- def set_context_enrichment_threshold(self, enrichment_threshold: int):
216
- self._context_enrichment_threshold = enrichment_threshold
206
+ def set_context_enrichment_token_threshold(self, enrichment_token_threshold: int):
207
+ self._context_enrichment_token_threshold = enrichment_token_threshold
217
208
 
218
209
  def set_should_summarize_history(self, summarize_history: bool):
219
210
  self._should_summarize_history = summarize_history
220
211
 
221
- def set_history_summarization_threshold(self, summarization_threshold: int):
222
- self._history_summarization_threshold = summarization_threshold
212
+ def set_history_summarization_token_threshold(
213
+ self, summarization_token_threshold: int
214
+ ):
215
+ self._history_summarization_token_threshold = summarization_token_threshold
223
216
 
224
217
  async def _exec_action(self, ctx: AnyContext) -> Any:
225
218
  # Get dependent configurations first
@@ -236,77 +229,75 @@ class LLMTask(BaseTask):
236
229
  context_enrichment_prompt = get_context_enrichment_prompt(
237
230
  ctx=ctx,
238
231
  context_enrichment_prompt_attr=self._context_enrichment_prompt,
239
- render_context_enrichment_prompt=self._render_context_enrichment_prompt,
240
232
  )
241
233
  summarization_prompt = get_summarization_prompt(
242
234
  ctx=ctx,
243
235
  summarization_prompt_attr=self._summarization_prompt,
244
- render_summarization_prompt=self._render_summarization_prompt,
245
236
  )
246
- user_message = get_user_message(ctx, self._message)
237
+ user_message = get_user_message(ctx, self._message, self._render_message)
247
238
  # Get the combined system prompt using the new getter
248
239
  system_prompt = get_combined_system_prompt(
249
240
  ctx=ctx,
250
241
  persona_attr=self._persona,
251
- render_persona=self._render_persona,
252
242
  system_prompt_attr=self._system_prompt,
253
- render_system_prompt=self._render_system_prompt,
254
243
  special_instruction_prompt_attr=self._special_instruction_prompt,
255
- render_special_instruction_prompt=self._render_special_instruction_prompt,
256
244
  )
257
245
  # 1. Prepare initial state (read history from previous session)
258
- conversation_history = await read_conversation_history(
246
+ history_data = await read_conversation_history(
259
247
  ctx=ctx,
260
248
  conversation_history_reader=self._conversation_history_reader,
261
249
  conversation_history_file_attr=self._conversation_history_file,
262
250
  render_history_file=self._render_history_file,
263
251
  conversation_history_attr=self._conversation_history,
264
252
  )
265
- history_list = conversation_history.history
266
- conversation_context = {
267
- **conversation_history.context,
268
- **get_conversation_context(ctx, self._conversation_context),
269
- }
270
- # 2. Enrich context (optional)
271
- conversation_context = await maybe_enrich_context(
253
+ history_list = history_data.history
254
+ long_term_context = history_data.long_term_context
255
+ conversation_summary = history_data.conversation_summary
256
+
257
+ # 2. Enrich context and summarize history sequentially
258
+ new_long_term_context = await maybe_enrich_context(
272
259
  ctx=ctx,
273
260
  history_list=history_list,
274
- conversation_context=conversation_context,
261
+ long_term_context=long_term_context,
275
262
  should_enrich_context_attr=self._should_enrich_context,
276
263
  render_enrich_context=self._render_enrich_context,
277
- context_enrichment_threshold_attr=self._context_enrichment_threshold,
278
- render_context_enrichment_threshold=self._render_context_enrichment_threshold,
264
+ context_enrichment_token_threshold_attr=self._context_enrichment_token_threshold,
265
+ render_context_enrichment_token_threshold=self._render_context_enrichment_token_threshold,
279
266
  model=model,
280
267
  model_settings=model_settings,
281
268
  context_enrichment_prompt=context_enrichment_prompt,
282
269
  rate_limitter=self._rate_limitter,
283
270
  )
284
- # 3. Summarize history (optional, modifies history_list and context)
285
- history_list, conversation_context = await maybe_summarize_history(
271
+ new_history_list, new_conversation_summary = await maybe_summarize_history(
286
272
  ctx=ctx,
287
273
  history_list=history_list,
288
- conversation_context=conversation_context,
274
+ conversation_summary=conversation_summary,
289
275
  should_summarize_history_attr=self._should_summarize_history,
290
276
  render_summarize_history=self._render_summarize_history,
291
- history_summarization_threshold_attr=self._history_summarization_threshold,
292
- render_history_summarization_threshold=(
293
- self._render_history_summarization_threshold
277
+ history_summarization_token_threshold_attr=self._history_summarization_token_threshold,
278
+ render_history_summarization_token_threshold=(
279
+ self._render_history_summarization_token_threshold
294
280
  ),
295
281
  model=model,
296
282
  model_settings=model_settings,
297
283
  summarization_prompt=summarization_prompt,
298
284
  rate_limitter=self._rate_limitter,
299
285
  )
300
- # 4. Build the final user prompt and system prompt
301
- final_user_prompt, default_context = extract_default_context(user_message)
302
- final_system_prompt = "\n".join(
303
- [
304
- system_prompt,
305
- "# Context",
306
- json.dumps({**default_context, **conversation_context}),
307
- ]
286
+
287
+ # 3. Build the final user prompt and system prompt
288
+ final_user_prompt, system_info = extract_default_context(user_message)
289
+ context_parts = [
290
+ f"## System Information\n{json.dumps(system_info, indent=2)}",
291
+ ]
292
+ if new_long_term_context:
293
+ context_parts.append(new_long_term_context)
294
+ if new_conversation_summary:
295
+ context_parts.append(new_conversation_summary)
296
+
297
+ final_system_prompt = "\n\n".join(
298
+ [system_prompt, "# Context", "\n\n---\n\n".join(context_parts)]
308
299
  )
309
- # 5. Get the agent instance
300
+ # 4. Get the agent instance
310
301
  agent = get_agent(
311
302
  ctx=ctx,
312
303
  agent_attr=self._agent,
@@ -318,18 +309,24 @@ class LLMTask(BaseTask):
318
309
  mcp_servers_attr=self._mcp_servers,
319
310
  additional_mcp_servers=self._additional_mcp_servers,
320
311
  )
321
- # 6. Run the agent iteration and save the results/history
312
+ # 5. Run the agent iteration and save the results/history
322
313
  return await self._run_agent_and_save_history(
323
- ctx, agent, final_user_prompt, history_list, conversation_context
314
+ ctx,
315
+ agent,
316
+ final_user_prompt,
317
+ new_history_list,
318
+ new_long_term_context,
319
+ new_conversation_summary,
324
320
  )
325
321
 
326
322
  async def _run_agent_and_save_history(
327
323
  self,
328
324
  ctx: AnyContext,
329
- agent: Agent,
325
+ agent: "Agent",
330
326
  user_prompt: str,
331
327
  history_list: ListOfDict,
332
- conversation_context: dict[str, Any],
328
+ long_term_context: str,
329
+ conversation_summary: str,
333
330
  ) -> Any:
334
331
  """Executes the agent, processes results, and saves history."""
335
332
  try:
@@ -343,7 +340,8 @@ class LLMTask(BaseTask):
343
340
  if agent_run and agent_run.result:
344
341
  new_history_list = json.loads(agent_run.result.all_messages_json())
345
342
  data_to_write = ConversationHistoryData(
346
- context=conversation_context, # Save the final context state
343
+ long_term_context=long_term_context,
344
+ conversation_summary=conversation_summary,
347
345
  history=new_history_list,
348
346
  )
349
347
  await write_conversation_history(
@@ -358,7 +356,7 @@ class LLMTask(BaseTask):
358
356
  ctx.xcom[xcom_usage_key] = Xcom([])
359
357
  usage = agent_run.result.usage()
360
358
  ctx.xcom[xcom_usage_key].push(usage)
361
- ctx.print(stylize_faint(f"[Token Usage] {usage}"), plain=True)
359
+ ctx.print(stylize_faint(f" Token: {usage}"), plain=True)
362
360
  return agent_run.result.output
363
361
  else:
364
362
  ctx.log_warning("Agent run did not produce a result.")
@@ -366,3 +364,134 @@ class LLMTask(BaseTask):
366
364
  except Exception as e:
367
365
  ctx.log_error(f"Error during agent execution or history saving: {str(e)}")
368
366
  raise # Re-raise the exception after logging
367
+
368
+
369
+ def llm_task(
370
+ name: str,
371
+ color: int | None = None,
372
+ icon: str | None = None,
373
+ description: str | None = None,
374
+ cli_only: bool = False,
375
+ input: list[AnyInput | None] | AnyInput | None = None,
376
+ env: list[AnyEnv | None] | AnyEnv | None = None,
377
+ model: "Callable[[AnySharedContext], Model | str | fstring] | Model | None" = None,
378
+ render_model: bool = True,
379
+ model_base_url: StrAttr | None = None,
380
+ render_model_base_url: bool = True,
381
+ model_api_key: StrAttr | None = None,
382
+ render_model_api_key: bool = True,
383
+ model_settings: "ModelSettings | Callable[[AnySharedContext], ModelSettings] | None" = None,
384
+ agent: "Agent | Callable[[AnySharedContext], Agent] | None" = None,
385
+ persona: StrAttr | None = None,
386
+ system_prompt: StrAttr | None = None,
387
+ special_instruction_prompt: StrAttr | None = None,
388
+ message: StrAttr | None = None,
389
+ render_message: bool = True,
390
+ enrich_context: BoolAttr | None = None,
391
+ render_enrich_context: bool = True,
392
+ context_enrichment_prompt: StrAttr | None = None,
393
+ render_context_enrichment_prompt: bool = True,
394
+ context_enrichment_token_threshold: IntAttr | None = None,
395
+ render_context_enrichment_token_threshold: bool = True,
396
+ tools: (
397
+ list["ToolOrCallable"] | Callable[[AnySharedContext], list["ToolOrCallable"]]
398
+ ) = [],
399
+ mcp_servers: (
400
+ list["MCPServer"] | Callable[[AnySharedContext], list["MCPServer"]]
401
+ ) = [],
402
+ conversation_history: (
403
+ ConversationHistoryData
404
+ | Callable[[AnySharedContext], ConversationHistoryData | dict | list]
405
+ | dict
406
+ | list
407
+ ) = ConversationHistoryData(),
408
+ conversation_history_reader: (
409
+ Callable[[AnySharedContext], ConversationHistoryData | dict | list | None]
410
+ | None
411
+ ) = None,
412
+ conversation_history_writer: (
413
+ Callable[[AnySharedContext, ConversationHistoryData], None] | None
414
+ ) = None,
415
+ conversation_history_file: StrAttr | None = None,
416
+ render_history_file: bool = True,
417
+ summarize_history: BoolAttr | None = None,
418
+ render_summarize_history: bool = True,
419
+ summarization_prompt: StrAttr | None = None,
420
+ history_summarization_token_threshold: IntAttr | None = None,
421
+ render_history_summarization_token_threshold: bool = True,
422
+ rate_limitter: LLMRateLimiter | None = None,
423
+ execute_condition: bool | str | Callable[[AnySharedContext], bool] = True,
424
+ retries: int = 2,
425
+ retry_period: float = 0,
426
+ readiness_check: list[AnyTask] | AnyTask | None = None,
427
+ readiness_check_delay: float = 0.5,
428
+ readiness_check_period: float = 5,
429
+ readiness_failure_threshold: int = 1,
430
+ readiness_timeout: int = 60,
431
+ monitor_readiness: bool = False,
432
+ max_call_iteration: int = 20,
433
+ upstream: list[AnyTask] | AnyTask | None = None,
434
+ fallback: list[AnyTask] | AnyTask | None = None,
435
+ successor: list[AnyTask] | AnyTask | None = None,
436
+ conversation_context: (
437
+ dict[str, Any] | Callable[[AnySharedContext], dict[str, Any]] | None
438
+ ) = None,
439
+ ) -> LLMTask:
440
+ """
441
+ Create a new LLM task.
442
+ """
443
+ return LLMTask(
444
+ name=name,
445
+ color=color,
446
+ icon=icon,
447
+ description=description,
448
+ cli_only=cli_only,
449
+ input=input,
450
+ env=env,
451
+ model=model,
452
+ render_model=render_model,
453
+ model_base_url=model_base_url,
454
+ render_model_base_url=render_model_base_url,
455
+ model_api_key=model_api_key,
456
+ render_model_api_key=render_model_api_key,
457
+ model_settings=model_settings,
458
+ agent=agent,
459
+ persona=persona,
460
+ system_prompt=system_prompt,
461
+ special_instruction_prompt=special_instruction_prompt,
462
+ message=message,
463
+ render_message=render_message,
464
+ enrich_context=enrich_context,
465
+ render_enrich_context=render_enrich_context,
466
+ context_enrichment_prompt=context_enrichment_prompt,
467
+ render_context_enrichment_prompt=render_context_enrichment_prompt,
468
+ context_enrichment_token_threshold=context_enrichment_token_threshold,
469
+ render_context_enrichment_token_threshold=render_context_enrichment_token_threshold,
470
+ tools=tools,
471
+ mcp_servers=mcp_servers,
472
+ conversation_history=conversation_history,
473
+ conversation_history_reader=conversation_history_reader,
474
+ conversation_history_writer=conversation_history_writer,
475
+ conversation_history_file=conversation_history_file,
476
+ render_history_file=render_history_file,
477
+ summarize_history=summarize_history,
478
+ render_summarize_history=render_summarize_history,
479
+ summarization_prompt=summarization_prompt,
480
+ history_summarization_token_threshold=history_summarization_token_threshold,
481
+ render_history_summarization_token_threshold=render_history_summarization_token_threshold,
482
+ rate_limitter=rate_limitter,
483
+ execute_condition=execute_condition,
484
+ retries=retries,
485
+ retry_period=retry_period,
486
+ readiness_check=readiness_check,
487
+ readiness_check_delay=readiness_check_delay,
488
+ readiness_check_period=readiness_check_period,
489
+ readiness_failure_threshold=readiness_failure_threshold,
490
+ readiness_timeout=readiness_timeout,
491
+ monitor_readiness=monitor_readiness,
492
+ max_call_iteration=max_call_iteration,
493
+ upstream=upstream,
494
+ fallback=fallback,
495
+ successor=successor,
496
+ conversation_context=conversation_context,
497
+ )
zrb/util/file.py CHANGED
@@ -31,9 +31,10 @@ def _read_text_file_content(file_path: str) -> str:
31
31
 
32
32
 
33
33
  def _read_pdf_file_content(file_path: str) -> str:
34
- from pdfplumber.pdf import PDF, open
34
+ import pdfplumber
35
+ from pdfplumber.pdf import PDF
35
36
 
36
- with open(file_path) as pdf:
37
+ with pdfplumber.open(file_path) as pdf:
37
38
  pdf: PDF
38
39
  return "\n".join(
39
40
  page.extract_text() for page in pdf.pages if page.extract_text()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: zrb
3
- Version: 1.8.15
3
+ Version: 1.9.1
4
4
  Summary: Your Automation Powerhouse
5
5
  Home-page: https://github.com/state-alchemists/zrb
6
6
  License: AGPL-3.0-or-later