agno 2.1.3__py3-none-any.whl → 2.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +1779 -577
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/async_postgres/async_postgres.py +1668 -0
- agno/db/async_postgres/schemas.py +124 -0
- agno/db/async_postgres/utils.py +289 -0
- agno/db/base.py +237 -2
- agno/db/dynamo/dynamo.py +10 -8
- agno/db/dynamo/schemas.py +1 -10
- agno/db/dynamo/utils.py +2 -2
- agno/db/firestore/firestore.py +2 -2
- agno/db/firestore/utils.py +4 -2
- agno/db/gcs_json/gcs_json_db.py +2 -2
- agno/db/in_memory/in_memory_db.py +2 -2
- agno/db/json/json_db.py +2 -2
- agno/db/migrations/v1_to_v2.py +30 -13
- agno/db/mongo/mongo.py +18 -6
- agno/db/mysql/mysql.py +35 -13
- agno/db/postgres/postgres.py +29 -6
- agno/db/redis/redis.py +2 -2
- agno/db/singlestore/singlestore.py +2 -2
- agno/db/sqlite/sqlite.py +34 -12
- agno/db/sqlite/utils.py +8 -3
- agno/eval/accuracy.py +50 -43
- agno/eval/performance.py +6 -3
- agno/eval/reliability.py +6 -3
- agno/eval/utils.py +33 -16
- agno/exceptions.py +8 -2
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/knowledge.py +260 -46
- agno/knowledge/reader/pdf_reader.py +4 -6
- agno/knowledge/reader/reader_factory.py +2 -3
- agno/memory/manager.py +241 -33
- agno/models/anthropic/claude.py +37 -0
- agno/os/app.py +15 -10
- agno/os/interfaces/a2a/router.py +3 -5
- agno/os/interfaces/agui/router.py +4 -1
- agno/os/interfaces/agui/utils.py +33 -6
- agno/os/interfaces/slack/router.py +2 -4
- agno/os/mcp.py +98 -41
- agno/os/router.py +23 -0
- agno/os/routers/evals/evals.py +52 -20
- agno/os/routers/evals/utils.py +14 -14
- agno/os/routers/knowledge/knowledge.py +130 -9
- agno/os/routers/knowledge/schemas.py +57 -0
- agno/os/routers/memory/memory.py +116 -44
- agno/os/routers/metrics/metrics.py +16 -6
- agno/os/routers/session/session.py +65 -22
- agno/os/schema.py +38 -0
- agno/os/utils.py +69 -13
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/openai.py +5 -0
- agno/reasoning/vertexai.py +76 -0
- agno/session/workflow.py +69 -1
- agno/team/team.py +934 -241
- agno/tools/function.py +36 -18
- agno/tools/google_drive.py +270 -0
- agno/tools/googlesheets.py +20 -5
- agno/tools/mcp_toolbox.py +3 -3
- agno/tools/scrapegraph.py +1 -1
- agno/utils/models/claude.py +3 -1
- agno/utils/print_response/workflow.py +112 -12
- agno/utils/streamlit.py +1 -1
- agno/vectordb/base.py +22 -1
- agno/vectordb/cassandra/cassandra.py +9 -0
- agno/vectordb/chroma/chromadb.py +26 -6
- agno/vectordb/clickhouse/clickhousedb.py +9 -1
- agno/vectordb/couchbase/couchbase.py +11 -0
- agno/vectordb/lancedb/lance_db.py +20 -0
- agno/vectordb/langchaindb/langchaindb.py +11 -0
- agno/vectordb/lightrag/lightrag.py +9 -0
- agno/vectordb/llamaindex/llamaindexdb.py +15 -1
- agno/vectordb/milvus/milvus.py +23 -0
- agno/vectordb/mongodb/mongodb.py +22 -0
- agno/vectordb/pgvector/pgvector.py +19 -0
- agno/vectordb/pineconedb/pineconedb.py +35 -4
- agno/vectordb/qdrant/qdrant.py +24 -0
- agno/vectordb/singlestore/singlestore.py +25 -17
- agno/vectordb/surrealdb/surrealdb.py +18 -1
- agno/vectordb/upstashdb/upstashdb.py +26 -1
- agno/vectordb/weaviate/weaviate.py +18 -0
- agno/workflow/condition.py +29 -0
- agno/workflow/loop.py +29 -0
- agno/workflow/parallel.py +141 -113
- agno/workflow/router.py +29 -0
- agno/workflow/step.py +146 -25
- agno/workflow/steps.py +29 -0
- agno/workflow/types.py +26 -1
- agno/workflow/workflow.py +507 -22
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/METADATA +100 -41
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/RECORD +94 -86
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/WHEEL +0 -0
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/top_level.txt +0 -0
agno/workflow/loop.py
CHANGED
|
@@ -13,6 +13,7 @@ from agno.run.workflow import (
|
|
|
13
13
|
WorkflowRunOutput,
|
|
14
14
|
WorkflowRunOutputEvent,
|
|
15
15
|
)
|
|
16
|
+
from agno.session.workflow import WorkflowSession
|
|
16
17
|
from agno.utils.log import log_debug, logger
|
|
17
18
|
from agno.workflow.step import Step
|
|
18
19
|
from agno.workflow.types import StepInput, StepOutput, StepType
|
|
@@ -132,6 +133,9 @@ class Loop:
|
|
|
132
133
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
133
134
|
store_executor_outputs: bool = True,
|
|
134
135
|
session_state: Optional[Dict[str, Any]] = None,
|
|
136
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
137
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
138
|
+
num_history_runs: int = 3,
|
|
135
139
|
) -> StepOutput:
|
|
136
140
|
"""Execute loop steps with iteration control - mirrors workflow execution logic"""
|
|
137
141
|
# Use workflow logger for loop orchestration
|
|
@@ -157,6 +161,9 @@ class Loop:
|
|
|
157
161
|
workflow_run_response=workflow_run_response,
|
|
158
162
|
store_executor_outputs=store_executor_outputs,
|
|
159
163
|
session_state=session_state,
|
|
164
|
+
workflow_session=workflow_session,
|
|
165
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
166
|
+
num_history_runs=num_history_runs,
|
|
160
167
|
)
|
|
161
168
|
|
|
162
169
|
# Handle both single StepOutput and List[StepOutput] (from Loop/Condition steps)
|
|
@@ -220,11 +227,15 @@ class Loop:
|
|
|
220
227
|
session_id: Optional[str] = None,
|
|
221
228
|
user_id: Optional[str] = None,
|
|
222
229
|
stream_intermediate_steps: bool = False,
|
|
230
|
+
stream_executor_events: bool = True,
|
|
223
231
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
224
232
|
step_index: Optional[Union[int, tuple]] = None,
|
|
225
233
|
store_executor_outputs: bool = True,
|
|
226
234
|
session_state: Optional[Dict[str, Any]] = None,
|
|
227
235
|
parent_step_id: Optional[str] = None,
|
|
236
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
237
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
238
|
+
num_history_runs: int = 3,
|
|
228
239
|
) -> Iterator[Union[WorkflowRunOutputEvent, StepOutput]]:
|
|
229
240
|
"""Execute loop steps with streaming support - mirrors workflow execution logic"""
|
|
230
241
|
log_debug(f"Loop Start: {self.name}", center=True, symbol="=")
|
|
@@ -292,11 +303,15 @@ class Loop:
|
|
|
292
303
|
session_id=session_id,
|
|
293
304
|
user_id=user_id,
|
|
294
305
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
306
|
+
stream_executor_events=stream_executor_events,
|
|
295
307
|
workflow_run_response=workflow_run_response,
|
|
296
308
|
step_index=composite_step_index,
|
|
297
309
|
store_executor_outputs=store_executor_outputs,
|
|
298
310
|
session_state=session_state,
|
|
299
311
|
parent_step_id=loop_step_id,
|
|
312
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
313
|
+
workflow_session=workflow_session,
|
|
314
|
+
num_history_runs=num_history_runs,
|
|
300
315
|
):
|
|
301
316
|
if isinstance(event, StepOutput):
|
|
302
317
|
step_outputs_for_iteration.append(event)
|
|
@@ -410,6 +425,9 @@ class Loop:
|
|
|
410
425
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
411
426
|
store_executor_outputs: bool = True,
|
|
412
427
|
session_state: Optional[Dict[str, Any]] = None,
|
|
428
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
429
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
430
|
+
num_history_runs: int = 3,
|
|
413
431
|
) -> StepOutput:
|
|
414
432
|
"""Execute loop steps asynchronously with iteration control - mirrors workflow execution logic"""
|
|
415
433
|
# Use workflow logger for async loop orchestration
|
|
@@ -437,6 +455,9 @@ class Loop:
|
|
|
437
455
|
workflow_run_response=workflow_run_response,
|
|
438
456
|
store_executor_outputs=store_executor_outputs,
|
|
439
457
|
session_state=session_state,
|
|
458
|
+
workflow_session=workflow_session,
|
|
459
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
460
|
+
num_history_runs=num_history_runs,
|
|
440
461
|
)
|
|
441
462
|
|
|
442
463
|
# Handle both single StepOutput and List[StepOutput] (from Loop/Condition steps)
|
|
@@ -503,11 +524,15 @@ class Loop:
|
|
|
503
524
|
session_id: Optional[str] = None,
|
|
504
525
|
user_id: Optional[str] = None,
|
|
505
526
|
stream_intermediate_steps: bool = False,
|
|
527
|
+
stream_executor_events: bool = True,
|
|
506
528
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
507
529
|
step_index: Optional[Union[int, tuple]] = None,
|
|
508
530
|
store_executor_outputs: bool = True,
|
|
509
531
|
session_state: Optional[Dict[str, Any]] = None,
|
|
510
532
|
parent_step_id: Optional[str] = None,
|
|
533
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
534
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
535
|
+
num_history_runs: int = 3,
|
|
511
536
|
) -> AsyncIterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent, StepOutput]]:
|
|
512
537
|
"""Execute loop steps with async streaming support - mirrors workflow execution logic"""
|
|
513
538
|
log_debug(f"Loop Start: {self.name}", center=True, symbol="=")
|
|
@@ -575,11 +600,15 @@ class Loop:
|
|
|
575
600
|
session_id=session_id,
|
|
576
601
|
user_id=user_id,
|
|
577
602
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
603
|
+
stream_executor_events=stream_executor_events,
|
|
578
604
|
workflow_run_response=workflow_run_response,
|
|
579
605
|
step_index=composite_step_index,
|
|
580
606
|
store_executor_outputs=store_executor_outputs,
|
|
581
607
|
session_state=session_state,
|
|
582
608
|
parent_step_id=loop_step_id,
|
|
609
|
+
workflow_session=workflow_session,
|
|
610
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
611
|
+
num_history_runs=num_history_runs,
|
|
583
612
|
):
|
|
584
613
|
if isinstance(event, StepOutput):
|
|
585
614
|
step_outputs_for_iteration.append(event)
|
agno/workflow/parallel.py
CHANGED
|
@@ -14,6 +14,7 @@ from agno.run.workflow import (
|
|
|
14
14
|
WorkflowRunOutput,
|
|
15
15
|
WorkflowRunOutputEvent,
|
|
16
16
|
)
|
|
17
|
+
from agno.session.workflow import WorkflowSession
|
|
17
18
|
from agno.utils.log import log_debug, logger
|
|
18
19
|
from agno.utils.merge_dict import merge_parallel_session_states
|
|
19
20
|
from agno.workflow.condition import Condition
|
|
@@ -200,6 +201,9 @@ class Parallel:
|
|
|
200
201
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
201
202
|
store_executor_outputs: bool = True,
|
|
202
203
|
session_state: Optional[Dict[str, Any]] = None,
|
|
204
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
205
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
206
|
+
num_history_runs: int = 3,
|
|
203
207
|
) -> StepOutput:
|
|
204
208
|
"""Execute all steps in parallel and return aggregated result"""
|
|
205
209
|
# Use workflow logger for parallel orchestration
|
|
@@ -228,6 +232,9 @@ class Parallel:
|
|
|
228
232
|
user_id=user_id,
|
|
229
233
|
workflow_run_response=workflow_run_response,
|
|
230
234
|
store_executor_outputs=store_executor_outputs,
|
|
235
|
+
workflow_session=workflow_session,
|
|
236
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
237
|
+
num_history_runs=num_history_runs,
|
|
231
238
|
session_state=step_session_state,
|
|
232
239
|
) # type: ignore[union-attr]
|
|
233
240
|
return idx, step_result, step_session_state
|
|
@@ -310,11 +317,15 @@ class Parallel:
|
|
|
310
317
|
session_id: Optional[str] = None,
|
|
311
318
|
user_id: Optional[str] = None,
|
|
312
319
|
stream_intermediate_steps: bool = False,
|
|
320
|
+
stream_executor_events: bool = True,
|
|
313
321
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
314
322
|
step_index: Optional[Union[int, tuple]] = None,
|
|
315
323
|
store_executor_outputs: bool = True,
|
|
316
324
|
session_state: Optional[Dict[str, Any]] = None,
|
|
317
325
|
parent_step_id: Optional[str] = None,
|
|
326
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
327
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
328
|
+
num_history_runs: int = 3,
|
|
318
329
|
) -> Iterator[Union[WorkflowRunOutputEvent, StepOutput]]:
|
|
319
330
|
"""Execute all steps in parallel with streaming support"""
|
|
320
331
|
log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
@@ -345,14 +356,20 @@ class Parallel:
|
|
|
345
356
|
parent_step_id=parent_step_id,
|
|
346
357
|
)
|
|
347
358
|
|
|
359
|
+
import queue
|
|
360
|
+
|
|
361
|
+
event_queue = queue.Queue() # type: ignore
|
|
362
|
+
step_results = []
|
|
363
|
+
modified_session_states = []
|
|
364
|
+
|
|
348
365
|
def execute_step_stream_with_index(step_with_index):
|
|
349
|
-
"""Execute a single step with streaming and
|
|
366
|
+
"""Execute a single step with streaming and put events in queue immediately"""
|
|
350
367
|
idx, step = step_with_index
|
|
351
368
|
# Use the individual session_state copy for this step
|
|
352
369
|
step_session_state = session_state_copies[idx]
|
|
353
370
|
|
|
354
371
|
try:
|
|
355
|
-
|
|
372
|
+
step_outputs = []
|
|
356
373
|
|
|
357
374
|
# If step_index is None or integer (main step): create (step_index, sub_index)
|
|
358
375
|
# If step_index is tuple (child step): all parallel sub-steps get same index
|
|
@@ -369,84 +386,87 @@ class Parallel:
|
|
|
369
386
|
session_id=session_id,
|
|
370
387
|
user_id=user_id,
|
|
371
388
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
389
|
+
stream_executor_events=stream_executor_events,
|
|
372
390
|
workflow_run_response=workflow_run_response,
|
|
373
391
|
step_index=sub_step_index,
|
|
374
392
|
store_executor_outputs=store_executor_outputs,
|
|
375
393
|
session_state=step_session_state,
|
|
376
394
|
parent_step_id=parallel_step_id,
|
|
395
|
+
workflow_session=workflow_session,
|
|
396
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
397
|
+
num_history_runs=num_history_runs,
|
|
377
398
|
):
|
|
378
|
-
|
|
379
|
-
|
|
399
|
+
# Put event immediately in queue
|
|
400
|
+
event_queue.put(("event", idx, event))
|
|
401
|
+
if isinstance(event, StepOutput):
|
|
402
|
+
step_outputs.append(event)
|
|
403
|
+
|
|
404
|
+
# Signal completion for this step
|
|
405
|
+
event_queue.put(("complete", idx, step_outputs, step_session_state))
|
|
406
|
+
return idx, step_outputs, step_session_state
|
|
380
407
|
except Exception as exc:
|
|
381
408
|
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
382
409
|
logger.error(f"Parallel step {parallel_step_name} streaming failed: {exc}")
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
content=f"Step {parallel_step_name} failed: {str(exc)}",
|
|
389
|
-
success=False,
|
|
390
|
-
error=str(exc),
|
|
391
|
-
)
|
|
392
|
-
],
|
|
393
|
-
step_session_state,
|
|
410
|
+
error_event = StepOutput(
|
|
411
|
+
step_name=parallel_step_name,
|
|
412
|
+
content=f"Step {parallel_step_name} failed: {str(exc)}",
|
|
413
|
+
success=False,
|
|
414
|
+
error=str(exc),
|
|
394
415
|
)
|
|
416
|
+
event_queue.put(("event", idx, error_event))
|
|
417
|
+
event_queue.put(("complete", idx, [error_event], step_session_state))
|
|
418
|
+
return idx, [error_event], step_session_state
|
|
395
419
|
|
|
396
|
-
#
|
|
420
|
+
# Submit all parallel tasks
|
|
397
421
|
indexed_steps = list(enumerate(self.steps))
|
|
398
|
-
all_events_with_indices = []
|
|
399
|
-
step_results = []
|
|
400
|
-
modified_session_states = []
|
|
401
422
|
|
|
402
423
|
with ThreadPoolExecutor(max_workers=len(self.steps)) as executor:
|
|
403
|
-
# Submit all tasks
|
|
404
|
-
|
|
405
|
-
executor.submit(execute_step_stream_with_index, indexed_step): indexed_step[0]
|
|
406
|
-
for indexed_step in indexed_steps
|
|
407
|
-
}
|
|
424
|
+
# Submit all tasks
|
|
425
|
+
futures = [executor.submit(execute_step_stream_with_index, indexed_step) for indexed_step in indexed_steps]
|
|
408
426
|
|
|
409
|
-
#
|
|
410
|
-
|
|
427
|
+
# Process events from queue as they arrive
|
|
428
|
+
completed_steps = 0
|
|
429
|
+
total_steps = len(self.steps)
|
|
430
|
+
|
|
431
|
+
while completed_steps < total_steps:
|
|
411
432
|
try:
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
433
|
+
message_type, step_idx, *data = event_queue.get(timeout=1.0)
|
|
434
|
+
|
|
435
|
+
if message_type == "event":
|
|
436
|
+
event = data[0]
|
|
437
|
+
# Yield events immediately as they arrive (except StepOutputs)
|
|
438
|
+
if not isinstance(event, StepOutput):
|
|
439
|
+
yield event
|
|
415
440
|
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
if step_outputs:
|
|
441
|
+
elif message_type == "complete":
|
|
442
|
+
step_outputs, step_session_state = data
|
|
419
443
|
step_results.extend(step_outputs)
|
|
444
|
+
modified_session_states.append(step_session_state)
|
|
445
|
+
completed_steps += 1
|
|
446
|
+
|
|
447
|
+
step_name = getattr(self.steps[step_idx], "name", f"step_{step_idx}")
|
|
448
|
+
log_debug(f"Parallel step {step_name} streaming completed")
|
|
449
|
+
|
|
450
|
+
except queue.Empty:
|
|
451
|
+
for i, future in enumerate(futures):
|
|
452
|
+
if future.done() and future.exception():
|
|
453
|
+
logger.error(f"Parallel step {i} failed: {future.exception()}")
|
|
454
|
+
if completed_steps < total_steps:
|
|
455
|
+
completed_steps += 1
|
|
456
|
+
except Exception as e:
|
|
457
|
+
logger.error(f"Error processing parallel step events: {e}")
|
|
458
|
+
completed_steps += 1
|
|
420
459
|
|
|
421
|
-
|
|
422
|
-
|
|
460
|
+
for future in futures:
|
|
461
|
+
try:
|
|
462
|
+
future.result()
|
|
423
463
|
except Exception as e:
|
|
424
|
-
|
|
425
|
-
step_name = getattr(self.steps[index], "name", f"step_{index}")
|
|
426
|
-
logger.error(f"Parallel step {step_name} streaming failed: {e}")
|
|
427
|
-
error_event = StepOutput(
|
|
428
|
-
step_name=step_name,
|
|
429
|
-
content=f"Step {step_name} failed: {str(e)}",
|
|
430
|
-
success=False,
|
|
431
|
-
error=str(e),
|
|
432
|
-
)
|
|
433
|
-
all_events_with_indices.append((index, [error_event]))
|
|
434
|
-
step_results.append(error_event)
|
|
464
|
+
logger.error(f"Future completion error: {e}")
|
|
435
465
|
|
|
436
466
|
# Merge all session_state changes back into the original session_state
|
|
437
467
|
if session_state is not None:
|
|
438
468
|
merge_parallel_session_states(session_state, modified_session_states)
|
|
439
469
|
|
|
440
|
-
# Sort events by original index to preserve order
|
|
441
|
-
all_events_with_indices.sort(key=lambda x: x[0])
|
|
442
|
-
|
|
443
|
-
# Yield all collected streaming events in order (but not final StepOutputs)
|
|
444
|
-
for _, events in all_events_with_indices:
|
|
445
|
-
for event in events:
|
|
446
|
-
# Only yield non-StepOutput events during streaming to avoid duplication
|
|
447
|
-
if not isinstance(event, StepOutput):
|
|
448
|
-
yield event
|
|
449
|
-
|
|
450
470
|
# Flatten step_results - handle steps that return List[StepOutput] (like Condition/Loop)
|
|
451
471
|
flattened_step_results: List[StepOutput] = []
|
|
452
472
|
for result in step_results:
|
|
@@ -473,7 +493,7 @@ class Parallel:
|
|
|
473
493
|
step_name=self.name,
|
|
474
494
|
step_index=step_index,
|
|
475
495
|
parallel_step_count=len(self.steps),
|
|
476
|
-
step_results=
|
|
496
|
+
step_results=flattened_step_results,
|
|
477
497
|
step_id=parallel_step_id,
|
|
478
498
|
parent_step_id=parent_step_id,
|
|
479
499
|
)
|
|
@@ -486,6 +506,9 @@ class Parallel:
|
|
|
486
506
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
487
507
|
store_executor_outputs: bool = True,
|
|
488
508
|
session_state: Optional[Dict[str, Any]] = None,
|
|
509
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
510
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
511
|
+
num_history_runs: int = 3,
|
|
489
512
|
) -> StepOutput:
|
|
490
513
|
"""Execute all steps in parallel using asyncio and return aggregated result"""
|
|
491
514
|
# Use workflow logger for async parallel orchestration
|
|
@@ -514,6 +537,9 @@ class Parallel:
|
|
|
514
537
|
user_id=user_id,
|
|
515
538
|
workflow_run_response=workflow_run_response,
|
|
516
539
|
store_executor_outputs=store_executor_outputs,
|
|
540
|
+
workflow_session=workflow_session,
|
|
541
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
542
|
+
num_history_runs=num_history_runs,
|
|
517
543
|
session_state=step_session_state,
|
|
518
544
|
) # type: ignore[union-attr]
|
|
519
545
|
return idx, inner_step_result, step_session_state
|
|
@@ -597,11 +623,15 @@ class Parallel:
|
|
|
597
623
|
session_id: Optional[str] = None,
|
|
598
624
|
user_id: Optional[str] = None,
|
|
599
625
|
stream_intermediate_steps: bool = False,
|
|
626
|
+
stream_executor_events: bool = True,
|
|
600
627
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
601
628
|
step_index: Optional[Union[int, tuple]] = None,
|
|
602
629
|
store_executor_outputs: bool = True,
|
|
603
630
|
session_state: Optional[Dict[str, Any]] = None,
|
|
604
631
|
parent_step_id: Optional[str] = None,
|
|
632
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
633
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
634
|
+
num_history_runs: int = 3,
|
|
605
635
|
) -> AsyncIterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent, StepOutput]]:
|
|
606
636
|
"""Execute all steps in parallel with async streaming support"""
|
|
607
637
|
log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
@@ -632,14 +662,20 @@ class Parallel:
|
|
|
632
662
|
parent_step_id=parent_step_id,
|
|
633
663
|
)
|
|
634
664
|
|
|
665
|
+
import asyncio
|
|
666
|
+
|
|
667
|
+
event_queue = asyncio.Queue() # type: ignore
|
|
668
|
+
step_results = []
|
|
669
|
+
modified_session_states = []
|
|
670
|
+
|
|
635
671
|
async def execute_step_stream_async_with_index(step_with_index):
|
|
636
|
-
"""Execute a single step with async streaming and
|
|
672
|
+
"""Execute a single step with async streaming and yield events immediately"""
|
|
637
673
|
idx, step = step_with_index
|
|
638
674
|
# Use the individual session_state copy for this step
|
|
639
675
|
step_session_state = session_state_copies[idx]
|
|
640
676
|
|
|
641
677
|
try:
|
|
642
|
-
|
|
678
|
+
step_outputs = []
|
|
643
679
|
|
|
644
680
|
# If step_index is None or integer (main step): create (step_index, sub_index)
|
|
645
681
|
# If step_index is tuple (child step): all parallel sub-steps get same index
|
|
@@ -656,83 +692,75 @@ class Parallel:
|
|
|
656
692
|
session_id=session_id,
|
|
657
693
|
user_id=user_id,
|
|
658
694
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
695
|
+
stream_executor_events=stream_executor_events,
|
|
659
696
|
workflow_run_response=workflow_run_response,
|
|
660
697
|
step_index=sub_step_index,
|
|
661
698
|
store_executor_outputs=store_executor_outputs,
|
|
662
699
|
session_state=step_session_state,
|
|
663
700
|
parent_step_id=parallel_step_id,
|
|
701
|
+
workflow_session=workflow_session,
|
|
702
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
703
|
+
num_history_runs=num_history_runs,
|
|
664
704
|
): # type: ignore[union-attr]
|
|
665
|
-
|
|
666
|
-
|
|
705
|
+
# Yield events immediately to the queue
|
|
706
|
+
await event_queue.put(("event", idx, event))
|
|
707
|
+
if isinstance(event, StepOutput):
|
|
708
|
+
step_outputs.append(event)
|
|
709
|
+
|
|
710
|
+
# Signal completion for this step
|
|
711
|
+
await event_queue.put(("complete", idx, step_outputs, step_session_state))
|
|
712
|
+
return idx, step_outputs, step_session_state
|
|
667
713
|
except Exception as e:
|
|
668
714
|
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
669
715
|
logger.error(f"Parallel step {parallel_step_name} async streaming failed: {e}")
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
content=f"Step {parallel_step_name} failed: {str(e)}",
|
|
676
|
-
success=False,
|
|
677
|
-
error=str(e),
|
|
678
|
-
)
|
|
679
|
-
],
|
|
680
|
-
step_session_state,
|
|
716
|
+
error_event = StepOutput(
|
|
717
|
+
step_name=parallel_step_name,
|
|
718
|
+
content=f"Step {parallel_step_name} failed: {str(e)}",
|
|
719
|
+
success=False,
|
|
720
|
+
error=str(e),
|
|
681
721
|
)
|
|
722
|
+
await event_queue.put(("event", idx, error_event))
|
|
723
|
+
await event_queue.put(("complete", idx, [error_event], step_session_state))
|
|
724
|
+
return idx, [error_event], step_session_state
|
|
682
725
|
|
|
683
|
-
#
|
|
726
|
+
# Start all parallel tasks
|
|
684
727
|
indexed_steps = list(enumerate(self.steps))
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
728
|
+
tasks = [
|
|
729
|
+
asyncio.create_task(execute_step_stream_async_with_index(indexed_step)) for indexed_step in indexed_steps
|
|
730
|
+
]
|
|
688
731
|
|
|
689
|
-
#
|
|
690
|
-
|
|
732
|
+
# Process events as they arrive and track completion
|
|
733
|
+
completed_steps = 0
|
|
734
|
+
total_steps = len(self.steps)
|
|
691
735
|
|
|
692
|
-
|
|
693
|
-
|
|
736
|
+
while completed_steps < total_steps:
|
|
737
|
+
try:
|
|
738
|
+
message_type, step_idx, *data = await event_queue.get()
|
|
694
739
|
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
logger.error(f"Parallel step {step_name} async streaming failed: {result}")
|
|
700
|
-
error_event = StepOutput(
|
|
701
|
-
step_name=step_name,
|
|
702
|
-
content=f"Step {step_name} failed: {str(result)}",
|
|
703
|
-
success=False,
|
|
704
|
-
error=str(result),
|
|
705
|
-
)
|
|
706
|
-
all_events_with_indices.append((i, [error_event]))
|
|
707
|
-
step_results.append(error_event)
|
|
708
|
-
modified_session_states.append(session_state_copies[i])
|
|
709
|
-
else:
|
|
710
|
-
index, events, modified_session_state = result # type: ignore[misc]
|
|
711
|
-
all_events_with_indices.append((index, events))
|
|
712
|
-
modified_session_states.append(modified_session_state)
|
|
740
|
+
if message_type == "event":
|
|
741
|
+
event = data[0]
|
|
742
|
+
if not isinstance(event, StepOutput):
|
|
743
|
+
yield event
|
|
713
744
|
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
if step_outputs:
|
|
745
|
+
elif message_type == "complete":
|
|
746
|
+
step_outputs, step_session_state = data
|
|
717
747
|
step_results.extend(step_outputs)
|
|
748
|
+
modified_session_states.append(step_session_state)
|
|
749
|
+
completed_steps += 1
|
|
718
750
|
|
|
719
|
-
|
|
720
|
-
|
|
751
|
+
step_name = getattr(self.steps[step_idx], "name", f"step_{step_idx}")
|
|
752
|
+
log_debug(f"Parallel step {step_name} async streaming completed")
|
|
753
|
+
|
|
754
|
+
except Exception as e:
|
|
755
|
+
logger.error(f"Error processing parallel step events: {e}")
|
|
756
|
+
completed_steps += 1
|
|
757
|
+
|
|
758
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
721
759
|
|
|
722
760
|
# Merge all session_state changes back into the original session_state
|
|
723
761
|
if session_state is not None:
|
|
724
762
|
merge_parallel_session_states(session_state, modified_session_states)
|
|
725
763
|
|
|
726
|
-
# Sort events by original index to preserve order
|
|
727
|
-
all_events_with_indices.sort(key=lambda x: x[0])
|
|
728
|
-
|
|
729
|
-
# Yield all collected streaming events in order (but not final StepOutputs)
|
|
730
|
-
for _, events in all_events_with_indices:
|
|
731
|
-
for event in events:
|
|
732
|
-
# Only yield non-StepOutput events during streaming to avoid duplication
|
|
733
|
-
if not isinstance(event, StepOutput):
|
|
734
|
-
yield event
|
|
735
|
-
|
|
736
764
|
# Flatten step_results - handle steps that return List[StepOutput] (like Condition/Loop)
|
|
737
765
|
flattened_step_results: List[StepOutput] = []
|
|
738
766
|
for result in step_results:
|
|
@@ -759,7 +787,7 @@ class Parallel:
|
|
|
759
787
|
step_name=self.name,
|
|
760
788
|
step_index=step_index,
|
|
761
789
|
parallel_step_count=len(self.steps),
|
|
762
|
-
step_results=
|
|
790
|
+
step_results=flattened_step_results,
|
|
763
791
|
step_id=parallel_step_id,
|
|
764
792
|
parent_step_id=parent_step_id,
|
|
765
793
|
)
|
agno/workflow/router.py
CHANGED
|
@@ -11,6 +11,7 @@ from agno.run.workflow import (
|
|
|
11
11
|
WorkflowRunOutput,
|
|
12
12
|
WorkflowRunOutputEvent,
|
|
13
13
|
)
|
|
14
|
+
from agno.session.workflow import WorkflowSession
|
|
14
15
|
from agno.utils.log import log_debug, logger
|
|
15
16
|
from agno.workflow.step import Step
|
|
16
17
|
from agno.workflow.types import StepInput, StepOutput, StepType
|
|
@@ -150,6 +151,9 @@ class Router:
|
|
|
150
151
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
151
152
|
session_state: Optional[Dict[str, Any]] = None,
|
|
152
153
|
store_executor_outputs: bool = True,
|
|
154
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
155
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
156
|
+
num_history_runs: int = 3,
|
|
153
157
|
) -> StepOutput:
|
|
154
158
|
"""Execute the router and its selected steps with sequential chaining"""
|
|
155
159
|
log_debug(f"Router Start: {self.name}", center=True, symbol="-")
|
|
@@ -184,6 +188,9 @@ class Router:
|
|
|
184
188
|
workflow_run_response=workflow_run_response,
|
|
185
189
|
store_executor_outputs=store_executor_outputs,
|
|
186
190
|
session_state=session_state,
|
|
191
|
+
workflow_session=workflow_session,
|
|
192
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
193
|
+
num_history_runs=num_history_runs,
|
|
187
194
|
)
|
|
188
195
|
|
|
189
196
|
# Handle both single StepOutput and List[StepOutput]
|
|
@@ -239,10 +246,14 @@ class Router:
|
|
|
239
246
|
user_id: Optional[str] = None,
|
|
240
247
|
session_state: Optional[Dict[str, Any]] = None,
|
|
241
248
|
stream_intermediate_steps: bool = False,
|
|
249
|
+
stream_executor_events: bool = True,
|
|
242
250
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
243
251
|
step_index: Optional[Union[int, tuple]] = None,
|
|
244
252
|
store_executor_outputs: bool = True,
|
|
245
253
|
parent_step_id: Optional[str] = None,
|
|
254
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
255
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
256
|
+
num_history_runs: int = 3,
|
|
246
257
|
) -> Iterator[Union[WorkflowRunOutputEvent, StepOutput]]:
|
|
247
258
|
"""Execute the router with streaming support"""
|
|
248
259
|
log_debug(f"Router Start: {self.name}", center=True, symbol="-")
|
|
@@ -300,11 +311,15 @@ class Router:
|
|
|
300
311
|
session_id=session_id,
|
|
301
312
|
user_id=user_id,
|
|
302
313
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
314
|
+
stream_executor_events=stream_executor_events,
|
|
303
315
|
workflow_run_response=workflow_run_response,
|
|
304
316
|
step_index=step_index,
|
|
305
317
|
store_executor_outputs=store_executor_outputs,
|
|
306
318
|
session_state=session_state,
|
|
307
319
|
parent_step_id=router_step_id,
|
|
320
|
+
workflow_session=workflow_session,
|
|
321
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
322
|
+
num_history_runs=num_history_runs,
|
|
308
323
|
):
|
|
309
324
|
if isinstance(event, StepOutput):
|
|
310
325
|
step_outputs_for_step.append(event)
|
|
@@ -386,6 +401,9 @@ class Router:
|
|
|
386
401
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
387
402
|
session_state: Optional[Dict[str, Any]] = None,
|
|
388
403
|
store_executor_outputs: bool = True,
|
|
404
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
405
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
406
|
+
num_history_runs: int = 3,
|
|
389
407
|
) -> StepOutput:
|
|
390
408
|
"""Async execute the router and its selected steps with sequential chaining"""
|
|
391
409
|
log_debug(f"Router Start: {self.name}", center=True, symbol="-")
|
|
@@ -421,6 +439,9 @@ class Router:
|
|
|
421
439
|
workflow_run_response=workflow_run_response,
|
|
422
440
|
store_executor_outputs=store_executor_outputs,
|
|
423
441
|
session_state=session_state,
|
|
442
|
+
workflow_session=workflow_session,
|
|
443
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
444
|
+
num_history_runs=num_history_runs,
|
|
424
445
|
)
|
|
425
446
|
# Handle both single StepOutput and List[StepOutput]
|
|
426
447
|
if isinstance(step_output, list):
|
|
@@ -478,10 +499,14 @@ class Router:
|
|
|
478
499
|
user_id: Optional[str] = None,
|
|
479
500
|
session_state: Optional[Dict[str, Any]] = None,
|
|
480
501
|
stream_intermediate_steps: bool = False,
|
|
502
|
+
stream_executor_events: bool = True,
|
|
481
503
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
482
504
|
step_index: Optional[Union[int, tuple]] = None,
|
|
483
505
|
store_executor_outputs: bool = True,
|
|
484
506
|
parent_step_id: Optional[str] = None,
|
|
507
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
508
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
509
|
+
num_history_runs: int = 3,
|
|
485
510
|
) -> AsyncIterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent, StepOutput]]:
|
|
486
511
|
"""Async execute the router with streaming support"""
|
|
487
512
|
log_debug(f"Router Start: {self.name}", center=True, symbol="-")
|
|
@@ -541,11 +566,15 @@ class Router:
|
|
|
541
566
|
session_id=session_id,
|
|
542
567
|
user_id=user_id,
|
|
543
568
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
569
|
+
stream_executor_events=stream_executor_events,
|
|
544
570
|
workflow_run_response=workflow_run_response,
|
|
545
571
|
step_index=step_index,
|
|
546
572
|
store_executor_outputs=store_executor_outputs,
|
|
547
573
|
session_state=session_state,
|
|
548
574
|
parent_step_id=router_step_id,
|
|
575
|
+
workflow_session=workflow_session,
|
|
576
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
577
|
+
num_history_runs=num_history_runs,
|
|
549
578
|
):
|
|
550
579
|
if isinstance(event, StepOutput):
|
|
551
580
|
step_outputs_for_step.append(event)
|