zrb 1.5.7__py3-none-any.whl → 1.5.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/tool/rag.py +4 -3
- zrb/llm_config.py +38 -0
- zrb/task/any_task.py +22 -6
- zrb/task/base/__init__.py +0 -0
- zrb/task/base/context.py +108 -0
- zrb/task/base/dependencies.py +57 -0
- zrb/task/base/execution.py +274 -0
- zrb/task/base/lifecycle.py +182 -0
- zrb/task/base/monitoring.py +134 -0
- zrb/task/base/operators.py +41 -0
- zrb/task/base_task.py +76 -382
- zrb/task/cmd_task.py +2 -1
- zrb/task/llm/agent.py +141 -0
- zrb/task/llm/config.py +83 -0
- zrb/task/llm/context.py +95 -0
- zrb/task/llm/{context_enricher.py → context_enrichment.py} +55 -6
- zrb/task/llm/history.py +153 -3
- zrb/task/llm/history_summarization.py +173 -0
- zrb/task/llm/prompt.py +87 -0
- zrb/task/llm/typing.py +3 -0
- zrb/task/llm_task.py +140 -323
- {zrb-1.5.7.dist-info → zrb-1.5.9.dist-info}/METADATA +2 -2
- {zrb-1.5.7.dist-info → zrb-1.5.9.dist-info}/RECORD +25 -15
- zrb/task/llm/agent_runner.py +0 -53
- zrb/task/llm/default_context.py +0 -45
- zrb/task/llm/history_summarizer.py +0 -71
- {zrb-1.5.7.dist-info → zrb-1.5.9.dist-info}/WHEEL +0 -0
- {zrb-1.5.7.dist-info → zrb-1.5.9.dist-info}/entry_points.txt +0 -0
zrb/task/llm_task.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1
|
-
import inspect
|
2
1
|
import json
|
3
2
|
from collections.abc import Callable
|
4
|
-
from textwrap import dedent
|
5
3
|
from typing import Any
|
6
4
|
|
7
5
|
from pydantic_ai import Agent, Tool
|
@@ -14,22 +12,33 @@ from zrb.context.any_context import AnyContext
|
|
14
12
|
from zrb.context.any_shared_context import AnySharedContext
|
15
13
|
from zrb.env.any_env import AnyEnv
|
16
14
|
from zrb.input.any_input import AnyInput
|
17
|
-
from zrb.llm_config import LLMConfig
|
18
|
-
from zrb.llm_config import llm_config as default_llm_config
|
19
15
|
from zrb.task.any_task import AnyTask
|
20
16
|
from zrb.task.base_task import BaseTask
|
21
|
-
from zrb.task.llm.
|
22
|
-
|
23
|
-
|
24
|
-
from zrb.task.llm.
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
from zrb.
|
29
|
-
from zrb.
|
17
|
+
from zrb.task.llm.agent import get_agent, run_agent_iteration
|
18
|
+
|
19
|
+
# No longer need llm_config here
|
20
|
+
from zrb.task.llm.config import (
|
21
|
+
get_model,
|
22
|
+
get_model_settings,
|
23
|
+
)
|
24
|
+
from zrb.task.llm.context import get_conversation_context
|
25
|
+
from zrb.task.llm.context_enrichment import maybe_enrich_context
|
26
|
+
from zrb.task.llm.history import (
|
27
|
+
ConversationHistoryData,
|
28
|
+
ListOfDict,
|
29
|
+
prepare_initial_state,
|
30
|
+
write_conversation_history,
|
31
|
+
)
|
32
|
+
from zrb.task.llm.history_summarization import maybe_summarize_history
|
33
|
+
from zrb.task.llm.prompt import (
|
34
|
+
build_user_prompt,
|
35
|
+
get_context_enrichment_prompt,
|
36
|
+
get_summarization_prompt,
|
37
|
+
get_system_prompt,
|
38
|
+
)
|
39
|
+
from zrb.util.cli.style import stylize_faint
|
40
|
+
from zrb.xcom.xcom import Xcom
|
30
41
|
|
31
|
-
# ListOfDict moved to history.py
|
32
|
-
# Removed old ConversationHistoryData type alias
|
33
42
|
ToolOrCallable = Tool | Callable
|
34
43
|
|
35
44
|
|
@@ -60,7 +69,7 @@ class LLMTask(BaseTask):
|
|
60
69
|
message: StrAttr | None = None,
|
61
70
|
summarization_prompt: StrAttr | None = None,
|
62
71
|
render_summarization_prompt: bool = True,
|
63
|
-
enrich_context: BoolAttr =
|
72
|
+
enrich_context: BoolAttr | None = None, # Default to None
|
64
73
|
render_enrich_context: bool = True,
|
65
74
|
context_enrichment_prompt: StrAttr | None = None,
|
66
75
|
render_context_enrichment_prompt: bool = True,
|
@@ -90,9 +99,9 @@ class LLMTask(BaseTask):
|
|
90
99
|
) = None,
|
91
100
|
conversation_history_file: StrAttr | None = None,
|
92
101
|
render_history_file: bool = True,
|
93
|
-
summarize_history: BoolAttr =
|
102
|
+
summarize_history: BoolAttr | None = None, # Default to None
|
94
103
|
render_summarize_history: bool = True,
|
95
|
-
history_summarization_threshold: IntAttr =
|
104
|
+
history_summarization_threshold: IntAttr | None = None, # Default to None
|
96
105
|
render_history_summarization_threshold: bool = True,
|
97
106
|
execute_condition: bool | str | Callable[[AnySharedContext], bool] = True,
|
98
107
|
retries: int = 2,
|
@@ -183,323 +192,131 @@ class LLMTask(BaseTask):
|
|
183
192
|
self._history_summarization_threshold = summarization_threshold
|
184
193
|
|
185
194
|
async def _exec_action(self, ctx: AnyContext) -> Any:
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
# Enrich context based on history (if enabled)
|
197
|
-
if self._get_should_enrich_context(ctx, history_list):
|
198
|
-
conversation_context = await enrich_context(
|
199
|
-
ctx=ctx,
|
200
|
-
config=EnrichmentConfig(
|
201
|
-
model=self._get_model(ctx),
|
202
|
-
settings=self._get_model_settings(ctx),
|
203
|
-
prompt=self._get_context_enrichment_prompt(ctx),
|
204
|
-
),
|
205
|
-
conversation_context=conversation_context,
|
206
|
-
history_list=history_list,
|
207
|
-
)
|
208
|
-
# Get history handling parameters
|
209
|
-
if self._get_should_summarize_history(ctx, history_list):
|
210
|
-
ctx.log_info("Summarize previous conversation")
|
211
|
-
# Summarize the part to be removed and update context
|
212
|
-
conversation_context = await summarize_history(
|
213
|
-
ctx=ctx,
|
214
|
-
config=SummarizationConfig(
|
215
|
-
model=self._get_model(ctx),
|
216
|
-
settings=self._get_model_settings(ctx),
|
217
|
-
prompt=self._get_summarization_prompt(ctx),
|
218
|
-
),
|
219
|
-
conversation_context=conversation_context,
|
220
|
-
history_list=history_list, # Pass the full list for context
|
221
|
-
)
|
222
|
-
# Truncate the history list after summarization
|
223
|
-
history_list = []
|
224
|
-
# Construct user prompt
|
225
|
-
user_prompt = self._get_user_prompt(ctx, conversation_context)
|
226
|
-
# Create and run agent
|
227
|
-
agent = self._get_agent(ctx)
|
228
|
-
try:
|
229
|
-
agent_run = await run_agent_iteration(
|
230
|
-
ctx=ctx,
|
231
|
-
agent=agent,
|
232
|
-
user_prompt=user_prompt,
|
233
|
-
history_list=history_list,
|
234
|
-
)
|
235
|
-
if agent_run:
|
236
|
-
new_history_list = json.loads(agent_run.result.all_messages_json())
|
237
|
-
data_to_write = ConversationHistoryData(
|
238
|
-
context=conversation_context,
|
239
|
-
history=new_history_list,
|
240
|
-
)
|
241
|
-
await self._write_conversation_history(
|
242
|
-
ctx, data_to_write
|
243
|
-
) # Pass the model instance
|
244
|
-
return agent_run.result.data
|
245
|
-
except Exception as e:
|
246
|
-
ctx.log_error(f"Error in agent execution: {str(e)}")
|
247
|
-
raise
|
248
|
-
|
249
|
-
async def _write_conversation_history(
|
250
|
-
self, ctx: AnyContext, history_data: ConversationHistoryData
|
251
|
-
):
|
252
|
-
# Expects the model instance
|
253
|
-
if self._conversation_history_writer is not None:
|
254
|
-
# Pass the model instance directly to the writer
|
255
|
-
await run_async(self._conversation_history_writer(ctx, history_data))
|
256
|
-
history_file = self._get_history_file(ctx)
|
257
|
-
if history_file != "":
|
258
|
-
# Use model_dump_json for serialization
|
259
|
-
write_file(history_file, history_data.model_dump_json(indent=2))
|
260
|
-
|
261
|
-
def _get_model_settings(self, ctx: AnyContext) -> ModelSettings | None:
|
262
|
-
if callable(self._model_settings):
|
263
|
-
return self._model_settings(ctx)
|
264
|
-
return self._model_settings
|
265
|
-
|
266
|
-
def _get_agent(self, ctx: AnyContext) -> Agent:
|
267
|
-
if isinstance(self._agent, Agent):
|
268
|
-
return self._agent
|
269
|
-
if callable(self._agent):
|
270
|
-
return self._agent(ctx)
|
271
|
-
tools_or_callables = list(
|
272
|
-
self._tools(ctx) if callable(self._tools) else self._tools
|
273
|
-
)
|
274
|
-
tools_or_callables.extend(self._additional_tools)
|
275
|
-
tools = []
|
276
|
-
for tool_or_callable in tools_or_callables:
|
277
|
-
if isinstance(tool_or_callable, Tool):
|
278
|
-
tools.append(tool_or_callable)
|
279
|
-
else:
|
280
|
-
# Inspect original callable for 'ctx' parameter
|
281
|
-
# This ctx refer to pydantic AI's ctx, not task ctx.
|
282
|
-
original_sig = inspect.signature(tool_or_callable)
|
283
|
-
takes_ctx = "ctx" in original_sig.parameters
|
284
|
-
wrapped_tool = wrap_tool(tool_or_callable)
|
285
|
-
tools.append(Tool(wrapped_tool, takes_ctx=takes_ctx))
|
286
|
-
mcp_servers = list(
|
287
|
-
self._mcp_servers(ctx) if callable(self._mcp_servers) else self._mcp_servers
|
288
|
-
)
|
289
|
-
mcp_servers.extend(self._additional_mcp_servers)
|
290
|
-
return Agent(
|
291
|
-
self._get_model(ctx),
|
292
|
-
system_prompt=self._get_system_prompt(ctx),
|
293
|
-
tools=tools,
|
294
|
-
mcp_servers=mcp_servers,
|
295
|
-
model_settings=self._get_model_settings(ctx),
|
296
|
-
retries=3,
|
297
|
-
)
|
298
|
-
|
299
|
-
def _get_model(self, ctx: AnyContext) -> str | Model | None:
|
300
|
-
model = get_attr(ctx, self._model, None, auto_render=self._render_model)
|
301
|
-
if model is None:
|
302
|
-
return default_llm_config.get_default_model()
|
303
|
-
if isinstance(model, str):
|
304
|
-
model_base_url = self._get_model_base_url(ctx)
|
305
|
-
model_api_key = self._get_model_api_key(ctx)
|
306
|
-
llm_config = LLMConfig(
|
307
|
-
default_model_name=model,
|
308
|
-
default_base_url=model_base_url,
|
309
|
-
default_api_key=model_api_key,
|
310
|
-
)
|
311
|
-
if model_base_url is None and model_api_key is None:
|
312
|
-
default_model_provider = default_llm_config.get_default_model_provider()
|
313
|
-
if default_model_provider is not None:
|
314
|
-
llm_config.set_default_provider(default_model_provider)
|
315
|
-
return llm_config.get_default_model()
|
316
|
-
raise ValueError(f"Invalid model: {model}")
|
317
|
-
|
318
|
-
def _get_model_base_url(self, ctx: AnyContext) -> str | None:
|
319
|
-
base_url = get_attr(
|
320
|
-
ctx, self._model_base_url, None, auto_render=self._render_model_base_url
|
195
|
+
# Get dependent configurations first
|
196
|
+
model_settings = get_model_settings(ctx, self._model_settings)
|
197
|
+
model = get_model(
|
198
|
+
ctx=ctx,
|
199
|
+
model_attr=self._model,
|
200
|
+
render_model=self._render_model,
|
201
|
+
model_base_url_attr=self._model_base_url,
|
202
|
+
render_model_base_url=self._render_model_base_url,
|
203
|
+
model_api_key_attr=self._model_api_key,
|
204
|
+
render_model_api_key=self._render_model_api_key,
|
321
205
|
)
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
def _get_model_api_key(self, ctx: AnyContext) -> str | None:
|
327
|
-
api_key = get_attr(
|
328
|
-
ctx, self._model_api_key, None, auto_render=self._render_model_api_key
|
206
|
+
context_enrichment_prompt = get_context_enrichment_prompt(
|
207
|
+
ctx=ctx,
|
208
|
+
context_enrichment_prompt_attr=self._context_enrichment_prompt,
|
209
|
+
render_context_enrichment_prompt=self._render_context_enrichment_prompt,
|
329
210
|
)
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
def _get_system_prompt(self, ctx: AnyContext) -> str:
|
335
|
-
system_prompt = get_attr(
|
336
|
-
ctx,
|
337
|
-
self._system_prompt,
|
338
|
-
None,
|
339
|
-
auto_render=self._render_system_prompt,
|
211
|
+
summarization_prompt = get_summarization_prompt(
|
212
|
+
ctx=ctx,
|
213
|
+
summarization_prompt_attr=self._summarization_prompt,
|
214
|
+
render_summarization_prompt=self._render_summarization_prompt,
|
340
215
|
)
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
def _get_user_prompt(
|
346
|
-
self, ctx: AnyContext, conversation_context: dict[str, Any]
|
347
|
-
) -> str:
|
348
|
-
user_message = self._get_user_message(ctx)
|
349
|
-
enriched_context = {**get_default_context(user_message), **conversation_context}
|
350
|
-
return dedent(
|
351
|
-
f"""
|
352
|
-
# Context
|
353
|
-
{json.dumps(enriched_context)}
|
354
|
-
# User Message
|
355
|
-
{user_message}
|
356
|
-
""".strip()
|
216
|
+
system_prompt = get_system_prompt(
|
217
|
+
ctx=ctx,
|
218
|
+
system_prompt_attr=self._system_prompt,
|
219
|
+
render_system_prompt=self._render_system_prompt,
|
357
220
|
)
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
221
|
+
# 1. Prepare initial state (read history, get initial context)
|
222
|
+
history_list, conversation_context = await prepare_initial_state(
|
223
|
+
ctx=ctx,
|
224
|
+
conversation_history_reader=self._conversation_history_reader,
|
225
|
+
conversation_history_file_attr=self._conversation_history_file,
|
226
|
+
render_history_file=self._render_history_file,
|
227
|
+
conversation_history_attr=self._conversation_history,
|
228
|
+
conversation_context_getter=lambda c: get_conversation_context(
|
229
|
+
c, self._conversation_context
|
230
|
+
),
|
368
231
|
)
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
ctx,
|
380
|
-
self._should_enrich_context,
|
381
|
-
True, # Default to True if not specified
|
382
|
-
auto_render=self._render_enrich_context,
|
232
|
+
# 2. Enrich context (optional)
|
233
|
+
conversation_context = await maybe_enrich_context(
|
234
|
+
ctx=ctx,
|
235
|
+
history_list=history_list,
|
236
|
+
conversation_context=conversation_context,
|
237
|
+
should_enrich_context_attr=self._should_enrich_context,
|
238
|
+
render_enrich_context=self._render_enrich_context,
|
239
|
+
model=model,
|
240
|
+
model_settings=model_settings,
|
241
|
+
context_enrichment_prompt=context_enrichment_prompt,
|
383
242
|
)
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
243
|
+
# 3. Summarize history (optional, modifies history_list and context)
|
244
|
+
history_list, conversation_context = await maybe_summarize_history(
|
245
|
+
ctx=ctx,
|
246
|
+
history_list=history_list,
|
247
|
+
conversation_context=conversation_context,
|
248
|
+
should_summarize_history_attr=self._should_summarize_history,
|
249
|
+
render_summarize_history=self._render_summarize_history,
|
250
|
+
history_summarization_threshold_attr=self._history_summarization_threshold,
|
251
|
+
render_history_summarization_threshold=(
|
252
|
+
self._render_history_summarization_threshold
|
253
|
+
),
|
254
|
+
model=model,
|
255
|
+
model_settings=model_settings,
|
256
|
+
summarization_prompt=summarization_prompt,
|
391
257
|
)
|
392
|
-
|
393
|
-
|
394
|
-
return default_llm_config.get_default_context_enrichment_prompt()
|
395
|
-
|
396
|
-
async def _read_conversation_history(
|
397
|
-
self, ctx: AnyContext
|
398
|
-
) -> ConversationHistoryData: # Returns the model instance
|
399
|
-
"""Reads conversation history from reader, file, or attribute, with validation."""
|
400
|
-
history_file = self._get_history_file(ctx)
|
401
|
-
# Priority 1 & 2: Reader and File (handled by ConversationHistoryData)
|
402
|
-
history_data = await ConversationHistoryData.read_from_sources(
|
258
|
+
# 4. Build the final user prompt
|
259
|
+
user_prompt = build_user_prompt(
|
403
260
|
ctx=ctx,
|
404
|
-
|
405
|
-
|
261
|
+
message_attr=self._message,
|
262
|
+
conversation_context=conversation_context,
|
406
263
|
)
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
)
|
419
|
-
if raw_data_attr is None:
|
420
|
-
raw_data_attr = self._conversation_history
|
421
|
-
if raw_data_attr:
|
422
|
-
history_data = ConversationHistoryData.parse_and_validate(
|
423
|
-
ctx, raw_data_attr, "attribute"
|
424
|
-
)
|
425
|
-
if history_data:
|
426
|
-
return history_data
|
427
|
-
# Fallback: Return default value
|
428
|
-
return ConversationHistoryData()
|
429
|
-
|
430
|
-
def _get_history_file(self, ctx: AnyContext) -> str:
|
431
|
-
return get_str_attr(
|
432
|
-
ctx,
|
433
|
-
self._conversation_history_file,
|
434
|
-
"",
|
435
|
-
auto_render=self._render_history_file,
|
264
|
+
# 5. Get the agent instance
|
265
|
+
agent = get_agent(
|
266
|
+
ctx=ctx,
|
267
|
+
agent_attr=self._agent,
|
268
|
+
model=model,
|
269
|
+
system_prompt=system_prompt,
|
270
|
+
model_settings=model_settings,
|
271
|
+
tools_attr=self._tools,
|
272
|
+
additional_tools=self._additional_tools,
|
273
|
+
mcp_servers_attr=self._mcp_servers,
|
274
|
+
additional_mcp_servers=self._additional_mcp_servers,
|
436
275
|
)
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
) -> bool:
|
441
|
-
history_len = len(history_list)
|
442
|
-
if history_len == 0:
|
443
|
-
return False
|
444
|
-
summarization_threshold = self._get_history_summarization_threshold(ctx)
|
445
|
-
if summarization_threshold == -1:
|
446
|
-
return False
|
447
|
-
if summarization_threshold > history_len:
|
448
|
-
return False
|
449
|
-
return get_bool_attr(
|
450
|
-
ctx,
|
451
|
-
self._should_summarize_history,
|
452
|
-
False,
|
453
|
-
auto_render=self._render_summarize_history,
|
276
|
+
# 6. Run the agent iteration and save the results/history
|
277
|
+
return await self._run_agent_and_save_history(
|
278
|
+
ctx, agent, user_prompt, history_list, conversation_context
|
454
279
|
)
|
455
280
|
|
456
|
-
def
|
457
|
-
|
281
|
+
async def _run_agent_and_save_history(
|
282
|
+
self,
|
283
|
+
ctx: AnyContext,
|
284
|
+
agent: Agent,
|
285
|
+
user_prompt: str,
|
286
|
+
history_list: ListOfDict,
|
287
|
+
conversation_context: dict[str, Any],
|
288
|
+
) -> Any:
|
289
|
+
"""Executes the agent, processes results, and saves history."""
|
458
290
|
try:
|
459
|
-
|
460
|
-
ctx,
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
)
|
465
|
-
except ValueError as e:
|
466
|
-
ctx.log_warning(
|
467
|
-
f"Could not convert history_summarization_threshold to int: {e}. "
|
468
|
-
"Defaulting to -1 (no threshold)."
|
469
|
-
)
|
470
|
-
return -1
|
471
|
-
|
472
|
-
def _get_conversation_context(self, ctx: AnyContext) -> dict[str, Any]:
|
473
|
-
"""
|
474
|
-
Retrieves the conversation context.
|
475
|
-
If a value in the context dict is callable, it executes it with ctx.
|
476
|
-
"""
|
477
|
-
raw_context = get_attr(
|
478
|
-
ctx, self._conversation_context, {}, auto_render=False
|
479
|
-
) # Context usually shouldn't be rendered
|
480
|
-
if not isinstance(raw_context, dict):
|
481
|
-
ctx.log_warning(
|
482
|
-
f"Conversation context resolved to type {type(raw_context)}, "
|
483
|
-
"expected dict. Returning empty context."
|
291
|
+
agent_run = await run_agent_iteration(
|
292
|
+
ctx=ctx,
|
293
|
+
agent=agent,
|
294
|
+
user_prompt=user_prompt,
|
295
|
+
history_list=history_list,
|
484
296
|
)
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
297
|
+
if agent_run:
|
298
|
+
new_history_list = json.loads(agent_run.result.all_messages_json())
|
299
|
+
data_to_write = ConversationHistoryData(
|
300
|
+
context=conversation_context, # Save the final context state
|
301
|
+
history=new_history_list,
|
302
|
+
)
|
303
|
+
await write_conversation_history(
|
304
|
+
ctx=ctx,
|
305
|
+
history_data=data_to_write,
|
306
|
+
conversation_history_writer=self._conversation_history_writer,
|
307
|
+
conversation_history_file_attr=self._conversation_history_file,
|
308
|
+
render_history_file=self._render_history_file,
|
309
|
+
)
|
310
|
+
xcom_usage_key = f"{self.name}-usage"
|
311
|
+
if xcom_usage_key not in ctx.xcom:
|
312
|
+
ctx.xcom[xcom_usage_key] = Xcom([])
|
313
|
+
usage = agent_run.result.usage()
|
314
|
+
ctx.xcom.get(xcom_usage_key).push(usage)
|
315
|
+
ctx.print(stylize_faint(f"[USAGE] {usage}"))
|
316
|
+
return agent_run.result.data
|
503
317
|
else:
|
504
|
-
|
505
|
-
|
318
|
+
ctx.log_warning("Agent run did not produce a result.")
|
319
|
+
return None # Or handle as appropriate
|
320
|
+
except Exception as e:
|
321
|
+
ctx.log_error(f"Error during agent execution or history saving: {str(e)}")
|
322
|
+
raise # Re-raise the exception after logging
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: zrb
|
3
|
-
Version: 1.5.
|
3
|
+
Version: 1.5.9
|
4
4
|
Summary: Your Automation Powerhouse
|
5
5
|
Home-page: https://github.com/state-alchemists/zrb
|
6
6
|
License: AGPL-3.0-or-later
|
@@ -26,7 +26,7 @@ Requires-Dist: openai (>=1.70.0,<2.0.0) ; extra == "rag" or extra == "all"
|
|
26
26
|
Requires-Dist: pdfplumber (>=0.11.6,<0.12.0) ; extra == "rag" or extra == "all"
|
27
27
|
Requires-Dist: playwright (>=1.51.0,<2.0.0) ; extra == "playwright" or extra == "all"
|
28
28
|
Requires-Dist: psutil (>=7.0.0,<8.0.0)
|
29
|
-
Requires-Dist: pydantic-ai (>=0.
|
29
|
+
Requires-Dist: pydantic-ai (>=0.1.2,<0.2.0)
|
30
30
|
Requires-Dist: python-dotenv (>=1.1.0,<2.0.0)
|
31
31
|
Requires-Dist: python-jose[cryptography] (>=3.4.0,<4.0.0)
|
32
32
|
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
@@ -14,7 +14,7 @@ zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sgueP
|
|
14
14
|
zrb/builtin/llm/tool/api.py,sha256=yR9I0ZsI96OeQl9pgwORMASVuXsAL0a89D_iPS4C8Dc,1699
|
15
15
|
zrb/builtin/llm/tool/cli.py,sha256=_CNEmEc6K2Z0i9ppYeM7jGpqaEdT3uxaWQatmxP3jKE,858
|
16
16
|
zrb/builtin/llm/tool/file.py,sha256=ecFBmbMaqCIfqNGTmvd-Yt3vLtKuQ15KbjXlnIMWdho,19269
|
17
|
-
zrb/builtin/llm/tool/rag.py,sha256=
|
17
|
+
zrb/builtin/llm/tool/rag.py,sha256=45t0o88l7F62oq2P61NnC1hsZJ4h72dZsVQfcsOIUc8,7521
|
18
18
|
zrb/builtin/llm/tool/web.py,sha256=4qzom9xX-JxztIaTWneNfyTRlgweHIxzC1uSEAxJ00A,5507
|
19
19
|
zrb/builtin/md5.py,sha256=0pNlrfZA0wlZlHvFHLgyqN0JZJWGKQIF5oXxO44_OJk,949
|
20
20
|
zrb/builtin/project/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -239,7 +239,7 @@ zrb/input/option_input.py,sha256=TQB82ko5odgzkULEizBZi0e9TIHEbIgvdP0AR3RhA74,213
|
|
239
239
|
zrb/input/password_input.py,sha256=szBojWxSP9QJecgsgA87OIYwQrY2AQ3USIKdDZY6snU,1465
|
240
240
|
zrb/input/str_input.py,sha256=NevZHX9rf1g8eMatPyy-kUX3DglrVAQpzvVpKAzf7bA,81
|
241
241
|
zrb/input/text_input.py,sha256=shvVbc2U8Is36h23M5lcW8IEwKc9FR-4uEPZZroj3rU,3377
|
242
|
-
zrb/llm_config.py,sha256=
|
242
|
+
zrb/llm_config.py,sha256=Zfmv1sv1KbfUxB_jOBRYKbbsajenEwQ1jsB3-Fk5DUM,9173
|
243
243
|
zrb/runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
244
244
|
zrb/runner/cli.py,sha256=0mT0oO_yEhc8N4nYCJNujhgLjVykZ0B-kAOFXyAvAqM,6672
|
245
245
|
zrb/runner/common_util.py,sha256=0zhZn1Jdmr194_nsL5_L-Kn9-_NDpMTI2z6_LXUQJ-U,1369
|
@@ -297,21 +297,31 @@ zrb/session_state_logger/any_session_state_logger.py,sha256=OEP7RQD6sPSJP0OY8oDK
|
|
297
297
|
zrb/session_state_logger/file_session_state_logger.py,sha256=1ue7-Bcwg4wlLn2G_7ARR4Rij2zUISj_Y56VBQsCaMQ,3666
|
298
298
|
zrb/session_state_logger/session_state_logger_factory.py,sha256=wXf2DVmeRmx399MFYYty6uNcPZMcf7iayHBYCLGlhfc,189
|
299
299
|
zrb/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
300
|
-
zrb/task/any_task.py,sha256=
|
301
|
-
zrb/task/
|
300
|
+
zrb/task/any_task.py,sha256=zklUjkLRQ62TEvfnOUUYfXChj8Zk4igee3w8V3_rN08,5846
|
301
|
+
zrb/task/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
302
|
+
zrb/task/base/context.py,sha256=73k3fKwup0AJwTTLpay0f_-axJextaxgbTem4w4Bmas,3670
|
303
|
+
zrb/task/base/dependencies.py,sha256=Kcxhn7z4OU9Fc_y-cD1Sc96wgNXs0VDoi8r5cJMu0oY,1952
|
304
|
+
zrb/task/base/execution.py,sha256=lB6cfivk-EM6sZSaPjYs_ufb7jb-A2jLJNhBupwBFgI,11101
|
305
|
+
zrb/task/base/lifecycle.py,sha256=3p3mDxE97oizmh9PnF54ud9eoI_PmCXI_VsikIs-VQ8,7293
|
306
|
+
zrb/task/base/monitoring.py,sha256=UAOEcPiYNtZR4FFxzWCosuOEFE_P3c4GT5vAhQmohqI,5663
|
307
|
+
zrb/task/base/operators.py,sha256=uAMFqpZJsPnCrojgOl1FUDXTS15mtOa_IqiAXltyYRU,1576
|
308
|
+
zrb/task/base_task.py,sha256=jgo9WKMMppSWvXiWCaPnm42KH6NgbFZKYfJfzRCZKeA,8222
|
302
309
|
zrb/task/base_trigger.py,sha256=jC722rDvodaBLeNaFghkTyv1u0QXrK6BLZUUqcmBJ7Q,4581
|
303
|
-
zrb/task/cmd_task.py,sha256=
|
310
|
+
zrb/task/cmd_task.py,sha256=f1OWajOBmdtx2QcXBr_8s6o82Fp4UTLqCXJqp2gxwzU,10750
|
304
311
|
zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
|
305
312
|
zrb/task/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
306
|
-
zrb/task/llm/
|
307
|
-
zrb/task/llm/
|
308
|
-
zrb/task/llm/
|
313
|
+
zrb/task/llm/agent.py,sha256=2u1zlX41oBzMKozXWXD3gDEOzOsHNsFpedRcJXbUNHI,5105
|
314
|
+
zrb/task/llm/config.py,sha256=fXasnGb_DVTJIeY--HZ8bf1jd7iCUNttfUDJB5PvHRk,3071
|
315
|
+
zrb/task/llm/context.py,sha256=JAI1DqqiXlDkyL4aEXVyeutU8K5YfdSsWMbzx1rxMZU,3281
|
316
|
+
zrb/task/llm/context_enrichment.py,sha256=zz2hmJZgXRa6354eTz8fAz3NclGgwD5VciWzD_mFTEM,4641
|
309
317
|
zrb/task/llm/error.py,sha256=YOwnEdFMtqOlaiA83tDHpC6uh2_9r5NeS-inrlb5a8E,3622
|
310
|
-
zrb/task/llm/history.py,sha256=
|
311
|
-
zrb/task/llm/
|
318
|
+
zrb/task/llm/history.py,sha256=LnrJdXLyo2qz-bNCwLorhoqGmgSiPTUU0bzY63w67-E,9257
|
319
|
+
zrb/task/llm/history_summarization.py,sha256=UaeepcIVMTxJTwqy3V22rpeBXXN04KLvEzOsFtWmyDM,6259
|
312
320
|
zrb/task/llm/print_node.py,sha256=Dkb0xFyEXpNRKFRCM4Md0lfg6K3nI0t8yH3Abh20PjE,4430
|
321
|
+
zrb/task/llm/prompt.py,sha256=PmzeSeUBoqZqqHox1Kq8FkikvmB2lNA3hus-pybXAQg,2719
|
313
322
|
zrb/task/llm/tool_wrapper.py,sha256=gZgoxcuOCgAVDPnLqfJ3ps57ZCVQi7q68z_KnS5Mx1U,3350
|
314
|
-
zrb/task/
|
323
|
+
zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
|
324
|
+
zrb/task/llm_task.py,sha256=ZfIRryhpS3NsczIUVZBl-f2XWzJkCcvBFmU95v0q4t0,13808
|
315
325
|
zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
|
316
326
|
zrb/task/rsync_task.py,sha256=GSL9144bmp6F0EckT6m-2a1xG25AzrrWYzH4k3SVUKM,6370
|
317
327
|
zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
|
@@ -352,7 +362,7 @@ zrb/util/string/name.py,sha256=8picJfUBXNpdh64GNaHv3om23QHhUZux7DguFLrXHp8,1163
|
|
352
362
|
zrb/util/todo.py,sha256=1nDdwPc22oFoK_1ZTXyf3638Bg6sqE2yp_U4_-frHoc,16015
|
353
363
|
zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
354
364
|
zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
|
355
|
-
zrb-1.5.
|
356
|
-
zrb-1.5.
|
357
|
-
zrb-1.5.
|
358
|
-
zrb-1.5.
|
365
|
+
zrb-1.5.9.dist-info/METADATA,sha256=jAqyRUhFM5nG5wTG_YW1QaL6nIlo1g2N95EoVACbWaU,8468
|
366
|
+
zrb-1.5.9.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
367
|
+
zrb-1.5.9.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
|
368
|
+
zrb-1.5.9.dist-info/RECORD,,
|
zrb/task/llm/agent_runner.py
DELETED
@@ -1,53 +0,0 @@
|
|
1
|
-
from typing import Any
|
2
|
-
|
3
|
-
from openai import APIError
|
4
|
-
from pydantic_ai import Agent
|
5
|
-
from pydantic_ai.messages import ModelMessagesTypeAdapter
|
6
|
-
|
7
|
-
from zrb.context.any_context import AnyContext
|
8
|
-
from zrb.task.llm.error import extract_api_error_details
|
9
|
-
from zrb.task.llm.history import ListOfDict
|
10
|
-
from zrb.task.llm.print_node import print_node
|
11
|
-
|
12
|
-
|
13
|
-
async def run_agent_iteration(
|
14
|
-
ctx: AnyContext,
|
15
|
-
agent: Agent,
|
16
|
-
user_prompt: str,
|
17
|
-
history_list: ListOfDict,
|
18
|
-
) -> Any:
|
19
|
-
"""
|
20
|
-
Runs a single iteration of the agent execution loop.
|
21
|
-
|
22
|
-
Args:
|
23
|
-
ctx: The task context.
|
24
|
-
agent: The Pydantic AI agent instance.
|
25
|
-
user_prompt: The user's input prompt.
|
26
|
-
history_list: The current conversation history.
|
27
|
-
|
28
|
-
Returns:
|
29
|
-
The agent run result object.
|
30
|
-
|
31
|
-
Raises:
|
32
|
-
Exception: If any error occurs during agent execution.
|
33
|
-
"""
|
34
|
-
async with agent.run_mcp_servers():
|
35
|
-
async with agent.iter(
|
36
|
-
user_prompt=user_prompt,
|
37
|
-
message_history=ModelMessagesTypeAdapter.validate_python(history_list),
|
38
|
-
) as agent_run:
|
39
|
-
async for node in agent_run:
|
40
|
-
# Each node represents a step in the agent's execution
|
41
|
-
# Reference: https://ai.pydantic.dev/agents/#streaming
|
42
|
-
try:
|
43
|
-
await print_node(ctx.print, agent_run, node)
|
44
|
-
except APIError as e:
|
45
|
-
# Extract detailed error information from the response
|
46
|
-
error_details = extract_api_error_details(e)
|
47
|
-
ctx.log_error(f"API Error: {error_details}")
|
48
|
-
raise
|
49
|
-
except Exception as e:
|
50
|
-
ctx.log_error(f"Error processing node: {str(e)}")
|
51
|
-
ctx.log_error(f"Error type: {type(e).__name__}")
|
52
|
-
raise
|
53
|
-
return agent_run
|