agno 2.1.0__py3-none-any.whl → 2.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +13 -1
- agno/db/base.py +8 -4
- agno/db/dynamo/dynamo.py +69 -17
- agno/db/firestore/firestore.py +68 -29
- agno/db/gcs_json/gcs_json_db.py +68 -17
- agno/db/in_memory/in_memory_db.py +83 -14
- agno/db/json/json_db.py +79 -15
- agno/db/mongo/mongo.py +92 -74
- agno/db/mysql/mysql.py +17 -3
- agno/db/postgres/postgres.py +21 -3
- agno/db/redis/redis.py +38 -11
- agno/db/singlestore/singlestore.py +14 -3
- agno/db/sqlite/sqlite.py +34 -46
- agno/db/utils.py +50 -22
- agno/knowledge/knowledge.py +6 -0
- agno/knowledge/reader/field_labeled_csv_reader.py +294 -0
- agno/knowledge/reader/pdf_reader.py +28 -52
- agno/knowledge/reader/reader_factory.py +12 -0
- agno/memory/manager.py +12 -4
- agno/models/anthropic/claude.py +4 -1
- agno/models/aws/bedrock.py +52 -112
- agno/models/openai/responses.py +1 -1
- agno/os/app.py +24 -30
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +252 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/agui.py +21 -5
- agno/os/interfaces/agui/router.py +12 -0
- agno/os/interfaces/base.py +4 -2
- agno/os/interfaces/slack/slack.py +13 -8
- agno/os/interfaces/whatsapp/whatsapp.py +12 -5
- agno/os/mcp.py +1 -1
- agno/os/router.py +39 -9
- agno/os/routers/memory/memory.py +5 -3
- agno/os/routers/memory/schemas.py +1 -0
- agno/os/utils.py +36 -10
- agno/run/base.py +2 -13
- agno/team/team.py +13 -1
- agno/tools/mcp.py +46 -1
- agno/utils/merge_dict.py +22 -1
- agno/utils/serialize.py +32 -0
- agno/utils/streamlit.py +1 -1
- agno/workflow/parallel.py +90 -14
- agno/workflow/step.py +30 -27
- agno/workflow/types.py +4 -6
- agno/workflow/workflow.py +5 -3
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/METADATA +16 -14
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/RECORD +53 -47
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/WHEEL +0 -0
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/top_level.txt +0 -0
agno/workflow/parallel.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
3
|
+
from copy import deepcopy
|
|
3
4
|
from dataclasses import dataclass
|
|
4
5
|
from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List, Optional, Union
|
|
5
6
|
from uuid import uuid4
|
|
@@ -13,6 +14,7 @@ from agno.run.workflow import (
|
|
|
13
14
|
WorkflowRunOutput,
|
|
14
15
|
WorkflowRunOutputEvent,
|
|
15
16
|
)
|
|
17
|
+
from agno.utils.merge_dict import merge_parallel_session_states
|
|
16
18
|
from agno.utils.log import log_debug, logger
|
|
17
19
|
from agno.workflow.condition import Condition
|
|
18
20
|
from agno.workflow.step import Step
|
|
@@ -205,9 +207,20 @@ class Parallel:
|
|
|
205
207
|
|
|
206
208
|
self._prepare_steps()
|
|
207
209
|
|
|
210
|
+
# Create individual session_state copies for each step to prevent race conditions
|
|
211
|
+
session_state_copies = []
|
|
212
|
+
for _ in range(len(self.steps)):
|
|
213
|
+
if session_state is not None:
|
|
214
|
+
session_state_copies.append(deepcopy(session_state))
|
|
215
|
+
else:
|
|
216
|
+
session_state_copies.append({})
|
|
217
|
+
|
|
208
218
|
def execute_step_with_index(step_with_index):
|
|
209
219
|
"""Execute a single step and preserve its original index"""
|
|
210
220
|
idx, step = step_with_index
|
|
221
|
+
# Use the individual session_state copy for this step
|
|
222
|
+
step_session_state = session_state_copies[idx]
|
|
223
|
+
|
|
211
224
|
try:
|
|
212
225
|
step_result = step.execute(
|
|
213
226
|
step_input,
|
|
@@ -215,9 +228,9 @@ class Parallel:
|
|
|
215
228
|
user_id=user_id,
|
|
216
229
|
workflow_run_response=workflow_run_response,
|
|
217
230
|
store_executor_outputs=store_executor_outputs,
|
|
218
|
-
session_state=
|
|
231
|
+
session_state=step_session_state,
|
|
219
232
|
) # type: ignore[union-attr]
|
|
220
|
-
return idx, step_result
|
|
233
|
+
return idx, step_result, step_session_state
|
|
221
234
|
except Exception as exc:
|
|
222
235
|
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
223
236
|
logger.error(f"Parallel step {parallel_step_name} failed: {exc}")
|
|
@@ -229,6 +242,7 @@ class Parallel:
|
|
|
229
242
|
success=False,
|
|
230
243
|
error=str(exc),
|
|
231
244
|
),
|
|
245
|
+
step_session_state,
|
|
232
246
|
)
|
|
233
247
|
|
|
234
248
|
# Use index to preserve order
|
|
@@ -241,12 +255,14 @@ class Parallel:
|
|
|
241
255
|
for indexed_step in indexed_steps
|
|
242
256
|
}
|
|
243
257
|
|
|
244
|
-
# Collect results
|
|
258
|
+
# Collect results and modified session_state copies
|
|
245
259
|
results_with_indices = []
|
|
260
|
+
modified_session_states = []
|
|
246
261
|
for future in as_completed(future_to_index):
|
|
247
262
|
try:
|
|
248
|
-
index, result = future.result()
|
|
263
|
+
index, result, modified_session_state = future.result()
|
|
249
264
|
results_with_indices.append((index, result))
|
|
265
|
+
modified_session_states.append(modified_session_state)
|
|
250
266
|
step_name = getattr(self.steps[index], "name", f"step_{index}")
|
|
251
267
|
log_debug(f"Parallel step {step_name} completed")
|
|
252
268
|
except Exception as e:
|
|
@@ -265,6 +281,9 @@ class Parallel:
|
|
|
265
281
|
)
|
|
266
282
|
)
|
|
267
283
|
|
|
284
|
+
if session_state is not None:
|
|
285
|
+
merge_parallel_session_states(session_state, modified_session_states)
|
|
286
|
+
|
|
268
287
|
# Sort by original index to preserve order
|
|
269
288
|
results_with_indices.sort(key=lambda x: x[0])
|
|
270
289
|
results = [result for _, result in results_with_indices]
|
|
@@ -304,6 +323,14 @@ class Parallel:
|
|
|
304
323
|
|
|
305
324
|
self._prepare_steps()
|
|
306
325
|
|
|
326
|
+
# Create individual session_state copies for each step to prevent race conditions
|
|
327
|
+
session_state_copies = []
|
|
328
|
+
for _ in range(len(self.steps)):
|
|
329
|
+
if session_state is not None:
|
|
330
|
+
session_state_copies.append(deepcopy(session_state))
|
|
331
|
+
else:
|
|
332
|
+
session_state_copies.append({})
|
|
333
|
+
|
|
307
334
|
if stream_intermediate_steps and workflow_run_response:
|
|
308
335
|
# Yield parallel step started event
|
|
309
336
|
yield ParallelExecutionStartedEvent(
|
|
@@ -321,6 +348,9 @@ class Parallel:
|
|
|
321
348
|
def execute_step_stream_with_index(step_with_index):
|
|
322
349
|
"""Execute a single step with streaming and preserve its original index"""
|
|
323
350
|
idx, step = step_with_index
|
|
351
|
+
# Use the individual session_state copy for this step
|
|
352
|
+
step_session_state = session_state_copies[idx]
|
|
353
|
+
|
|
324
354
|
try:
|
|
325
355
|
step_events = []
|
|
326
356
|
|
|
@@ -342,11 +372,11 @@ class Parallel:
|
|
|
342
372
|
workflow_run_response=workflow_run_response,
|
|
343
373
|
step_index=sub_step_index,
|
|
344
374
|
store_executor_outputs=store_executor_outputs,
|
|
345
|
-
session_state=
|
|
375
|
+
session_state=step_session_state,
|
|
346
376
|
parent_step_id=parallel_step_id,
|
|
347
377
|
):
|
|
348
378
|
step_events.append(event)
|
|
349
|
-
return idx, step_events
|
|
379
|
+
return idx, step_events, step_session_state
|
|
350
380
|
except Exception as exc:
|
|
351
381
|
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
352
382
|
logger.error(f"Parallel step {parallel_step_name} streaming failed: {exc}")
|
|
@@ -360,12 +390,14 @@ class Parallel:
|
|
|
360
390
|
error=str(exc),
|
|
361
391
|
)
|
|
362
392
|
],
|
|
393
|
+
step_session_state,
|
|
363
394
|
)
|
|
364
395
|
|
|
365
396
|
# Use index to preserve order
|
|
366
397
|
indexed_steps = list(enumerate(self.steps))
|
|
367
398
|
all_events_with_indices = []
|
|
368
399
|
step_results = []
|
|
400
|
+
modified_session_states = []
|
|
369
401
|
|
|
370
402
|
with ThreadPoolExecutor(max_workers=len(self.steps)) as executor:
|
|
371
403
|
# Submit all tasks with their original indices
|
|
@@ -374,11 +406,12 @@ class Parallel:
|
|
|
374
406
|
for indexed_step in indexed_steps
|
|
375
407
|
}
|
|
376
408
|
|
|
377
|
-
# Collect results
|
|
409
|
+
# Collect results and modified session_state copies
|
|
378
410
|
for future in as_completed(future_to_index):
|
|
379
411
|
try:
|
|
380
|
-
index, events = future.result()
|
|
412
|
+
index, events, modified_session_state = future.result()
|
|
381
413
|
all_events_with_indices.append((index, events))
|
|
414
|
+
modified_session_states.append(modified_session_state)
|
|
382
415
|
|
|
383
416
|
# Extract StepOutput from events for the final result
|
|
384
417
|
step_outputs = [event for event in events if isinstance(event, StepOutput)]
|
|
@@ -400,6 +433,10 @@ class Parallel:
|
|
|
400
433
|
all_events_with_indices.append((index, [error_event]))
|
|
401
434
|
step_results.append(error_event)
|
|
402
435
|
|
|
436
|
+
# Merge all session_state changes back into the original session_state
|
|
437
|
+
if session_state is not None:
|
|
438
|
+
merge_parallel_session_states(session_state, modified_session_states)
|
|
439
|
+
|
|
403
440
|
# Sort events by original index to preserve order
|
|
404
441
|
all_events_with_indices.sort(key=lambda x: x[0])
|
|
405
442
|
|
|
@@ -456,9 +493,20 @@ class Parallel:
|
|
|
456
493
|
|
|
457
494
|
self._prepare_steps()
|
|
458
495
|
|
|
496
|
+
# Create individual session_state copies for each step to prevent race conditions
|
|
497
|
+
session_state_copies = []
|
|
498
|
+
for _ in range(len(self.steps)):
|
|
499
|
+
if session_state is not None:
|
|
500
|
+
session_state_copies.append(deepcopy(session_state))
|
|
501
|
+
else:
|
|
502
|
+
session_state_copies.append({})
|
|
503
|
+
|
|
459
504
|
async def execute_step_async_with_index(step_with_index):
|
|
460
505
|
"""Execute a single step asynchronously and preserve its original index"""
|
|
461
506
|
idx, step = step_with_index
|
|
507
|
+
# Use the individual session_state copy for this step
|
|
508
|
+
step_session_state = session_state_copies[idx]
|
|
509
|
+
|
|
462
510
|
try:
|
|
463
511
|
inner_step_result = await step.aexecute(
|
|
464
512
|
step_input,
|
|
@@ -466,9 +514,9 @@ class Parallel:
|
|
|
466
514
|
user_id=user_id,
|
|
467
515
|
workflow_run_response=workflow_run_response,
|
|
468
516
|
store_executor_outputs=store_executor_outputs,
|
|
469
|
-
session_state=
|
|
517
|
+
session_state=step_session_state,
|
|
470
518
|
) # type: ignore[union-attr]
|
|
471
|
-
return idx, inner_step_result
|
|
519
|
+
return idx, inner_step_result, step_session_state
|
|
472
520
|
except Exception as exc:
|
|
473
521
|
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
474
522
|
logger.error(f"Parallel step {parallel_step_name} failed: {exc}")
|
|
@@ -480,6 +528,7 @@ class Parallel:
|
|
|
480
528
|
success=False,
|
|
481
529
|
error=str(exc),
|
|
482
530
|
),
|
|
531
|
+
step_session_state,
|
|
483
532
|
)
|
|
484
533
|
|
|
485
534
|
# Use index to preserve order
|
|
@@ -493,6 +542,7 @@ class Parallel:
|
|
|
493
542
|
|
|
494
543
|
# Process results and handle exceptions, preserving order
|
|
495
544
|
processed_results_with_indices = []
|
|
545
|
+
modified_session_states = []
|
|
496
546
|
for i, result in enumerate(results_with_indices):
|
|
497
547
|
if isinstance(result, Exception):
|
|
498
548
|
step_name = getattr(self.steps[i], "name", f"step_{i}")
|
|
@@ -508,12 +558,19 @@ class Parallel:
|
|
|
508
558
|
),
|
|
509
559
|
)
|
|
510
560
|
)
|
|
561
|
+
# Still collect the session state copy for failed steps
|
|
562
|
+
modified_session_states.append(session_state_copies[i])
|
|
511
563
|
else:
|
|
512
|
-
index, step_result = result # type: ignore[misc]
|
|
564
|
+
index, step_result, modified_session_state = result # type: ignore[misc]
|
|
513
565
|
processed_results_with_indices.append((index, step_result))
|
|
566
|
+
modified_session_states.append(modified_session_state)
|
|
514
567
|
step_name = getattr(self.steps[index], "name", f"step_{index}")
|
|
515
568
|
log_debug(f"Parallel step {step_name} completed")
|
|
516
569
|
|
|
570
|
+
# Smart merge all session_state changes back into the original session_state
|
|
571
|
+
if session_state is not None:
|
|
572
|
+
merge_parallel_session_states(session_state, modified_session_states)
|
|
573
|
+
|
|
517
574
|
# Sort by original index to preserve order
|
|
518
575
|
processed_results_with_indices.sort(key=lambda x: x[0])
|
|
519
576
|
results = [result for _, result in processed_results_with_indices]
|
|
@@ -553,6 +610,14 @@ class Parallel:
|
|
|
553
610
|
|
|
554
611
|
self._prepare_steps()
|
|
555
612
|
|
|
613
|
+
# Create individual session_state copies for each step to prevent race conditions
|
|
614
|
+
session_state_copies = []
|
|
615
|
+
for _ in range(len(self.steps)):
|
|
616
|
+
if session_state is not None:
|
|
617
|
+
session_state_copies.append(deepcopy(session_state))
|
|
618
|
+
else:
|
|
619
|
+
session_state_copies.append({})
|
|
620
|
+
|
|
556
621
|
if stream_intermediate_steps and workflow_run_response:
|
|
557
622
|
# Yield parallel step started event
|
|
558
623
|
yield ParallelExecutionStartedEvent(
|
|
@@ -570,6 +635,9 @@ class Parallel:
|
|
|
570
635
|
async def execute_step_stream_async_with_index(step_with_index):
|
|
571
636
|
"""Execute a single step with async streaming and preserve its original index"""
|
|
572
637
|
idx, step = step_with_index
|
|
638
|
+
# Use the individual session_state copy for this step
|
|
639
|
+
step_session_state = session_state_copies[idx]
|
|
640
|
+
|
|
573
641
|
try:
|
|
574
642
|
step_events = []
|
|
575
643
|
|
|
@@ -591,11 +659,11 @@ class Parallel:
|
|
|
591
659
|
workflow_run_response=workflow_run_response,
|
|
592
660
|
step_index=sub_step_index,
|
|
593
661
|
store_executor_outputs=store_executor_outputs,
|
|
594
|
-
session_state=
|
|
662
|
+
session_state=step_session_state,
|
|
595
663
|
parent_step_id=parallel_step_id,
|
|
596
664
|
): # type: ignore[union-attr]
|
|
597
665
|
step_events.append(event)
|
|
598
|
-
return idx, step_events
|
|
666
|
+
return idx, step_events, step_session_state
|
|
599
667
|
except Exception as e:
|
|
600
668
|
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
601
669
|
logger.error(f"Parallel step {parallel_step_name} async streaming failed: {e}")
|
|
@@ -609,12 +677,14 @@ class Parallel:
|
|
|
609
677
|
error=str(e),
|
|
610
678
|
)
|
|
611
679
|
],
|
|
680
|
+
step_session_state,
|
|
612
681
|
)
|
|
613
682
|
|
|
614
683
|
# Use index to preserve order
|
|
615
684
|
indexed_steps = list(enumerate(self.steps))
|
|
616
685
|
all_events_with_indices = []
|
|
617
686
|
step_results = []
|
|
687
|
+
modified_session_states = []
|
|
618
688
|
|
|
619
689
|
# Create tasks for all steps with their indices
|
|
620
690
|
tasks = [execute_step_stream_async_with_index(indexed_step) for indexed_step in indexed_steps]
|
|
@@ -635,9 +705,11 @@ class Parallel:
|
|
|
635
705
|
)
|
|
636
706
|
all_events_with_indices.append((i, [error_event]))
|
|
637
707
|
step_results.append(error_event)
|
|
708
|
+
modified_session_states.append(session_state_copies[i])
|
|
638
709
|
else:
|
|
639
|
-
index, events = result # type: ignore[misc]
|
|
710
|
+
index, events, modified_session_state = result # type: ignore[misc]
|
|
640
711
|
all_events_with_indices.append((index, events))
|
|
712
|
+
modified_session_states.append(modified_session_state)
|
|
641
713
|
|
|
642
714
|
# Extract StepOutput from events for the final result
|
|
643
715
|
step_outputs = [event for event in events if isinstance(event, StepOutput)]
|
|
@@ -647,6 +719,10 @@ class Parallel:
|
|
|
647
719
|
step_name = getattr(self.steps[index], "name", f"step_{index}")
|
|
648
720
|
log_debug(f"Parallel step {step_name} async streaming completed")
|
|
649
721
|
|
|
722
|
+
# Merge all session_state changes back into the original session_state
|
|
723
|
+
if session_state is not None:
|
|
724
|
+
merge_parallel_session_states(session_state, modified_session_states)
|
|
725
|
+
|
|
650
726
|
# Sort events by original index to preserve order
|
|
651
727
|
all_events_with_indices.sort(key=lambda x: x[0])
|
|
652
728
|
|
agno/workflow/step.py
CHANGED
|
@@ -211,6 +211,8 @@ class Step:
|
|
|
211
211
|
if step_input.previous_step_outputs:
|
|
212
212
|
step_input.previous_step_content = step_input.get_last_step_content()
|
|
213
213
|
|
|
214
|
+
session_state_copy = copy(session_state) if session_state is not None else {}
|
|
215
|
+
|
|
214
216
|
# Execute with retries
|
|
215
217
|
for attempt in range(self.max_retries + 1):
|
|
216
218
|
try:
|
|
@@ -223,7 +225,6 @@ class Step:
|
|
|
223
225
|
if inspect.isgeneratorfunction(self.active_executor):
|
|
224
226
|
content = ""
|
|
225
227
|
final_response = None
|
|
226
|
-
session_state_copy = copy(session_state) if session_state else None
|
|
227
228
|
try:
|
|
228
229
|
for chunk in self._call_custom_function(
|
|
229
230
|
self.active_executor, step_input, session_state_copy
|
|
@@ -244,7 +245,7 @@ class Step:
|
|
|
244
245
|
final_response = e.value
|
|
245
246
|
|
|
246
247
|
# Merge session_state changes back
|
|
247
|
-
if
|
|
248
|
+
if session_state is not None:
|
|
248
249
|
merge_dictionaries(session_state, session_state_copy)
|
|
249
250
|
|
|
250
251
|
if final_response is not None:
|
|
@@ -253,11 +254,10 @@ class Step:
|
|
|
253
254
|
response = StepOutput(content=content)
|
|
254
255
|
else:
|
|
255
256
|
# Execute function with signature inspection for session_state support
|
|
256
|
-
session_state_copy = copy(session_state) if session_state else None
|
|
257
257
|
result = self._call_custom_function(self.active_executor, step_input, session_state_copy) # type: ignore
|
|
258
258
|
|
|
259
259
|
# Merge session_state changes back
|
|
260
|
-
if
|
|
260
|
+
if session_state is not None:
|
|
261
261
|
merge_dictionaries(session_state, session_state_copy)
|
|
262
262
|
|
|
263
263
|
# If function returns StepOutput, use it directly
|
|
@@ -292,7 +292,6 @@ class Step:
|
|
|
292
292
|
if isinstance(self.active_executor, Team):
|
|
293
293
|
kwargs["store_member_responses"] = True
|
|
294
294
|
|
|
295
|
-
session_state_copy = copy(session_state)
|
|
296
295
|
response = self.active_executor.run( # type: ignore
|
|
297
296
|
input=message, # type: ignore
|
|
298
297
|
images=images,
|
|
@@ -305,8 +304,9 @@ class Step:
|
|
|
305
304
|
**kwargs,
|
|
306
305
|
)
|
|
307
306
|
|
|
308
|
-
|
|
309
|
-
|
|
307
|
+
if session_state is not None:
|
|
308
|
+
# Update workflow session state
|
|
309
|
+
merge_dictionaries(session_state, session_state_copy) # type: ignore
|
|
310
310
|
|
|
311
311
|
if store_executor_outputs and workflow_run_response is not None:
|
|
312
312
|
self._store_executor_response(workflow_run_response, response) # type: ignore
|
|
@@ -365,6 +365,9 @@ class Step:
|
|
|
365
365
|
if step_input.previous_step_outputs:
|
|
366
366
|
step_input.previous_step_content = step_input.get_last_step_content()
|
|
367
367
|
|
|
368
|
+
# Create session_state copy once to avoid duplication
|
|
369
|
+
session_state_copy = copy(session_state) if session_state is not None else {}
|
|
370
|
+
|
|
368
371
|
# Emit StepStartedEvent
|
|
369
372
|
if stream_intermediate_steps and workflow_run_response:
|
|
370
373
|
yield StepStartedEvent(
|
|
@@ -395,7 +398,6 @@ class Step:
|
|
|
395
398
|
if inspect.isgeneratorfunction(self.active_executor):
|
|
396
399
|
log_debug("Function returned iterable, streaming events")
|
|
397
400
|
content = ""
|
|
398
|
-
session_state_copy = copy(session_state) if session_state else None
|
|
399
401
|
try:
|
|
400
402
|
iterator = self._call_custom_function(self.active_executor, step_input, session_state_copy) # type: ignore
|
|
401
403
|
for event in iterator: # type: ignore
|
|
@@ -414,7 +416,7 @@ class Step:
|
|
|
414
416
|
yield event # type: ignore[misc]
|
|
415
417
|
|
|
416
418
|
# Merge session_state changes back
|
|
417
|
-
if
|
|
419
|
+
if session_state is not None:
|
|
418
420
|
merge_dictionaries(session_state, session_state_copy)
|
|
419
421
|
|
|
420
422
|
if not final_response:
|
|
@@ -424,11 +426,10 @@ class Step:
|
|
|
424
426
|
final_response = e.value
|
|
425
427
|
|
|
426
428
|
else:
|
|
427
|
-
session_state_copy = copy(session_state) if session_state else None
|
|
428
429
|
result = self._call_custom_function(self.active_executor, step_input, session_state_copy) # type: ignore
|
|
429
430
|
|
|
430
431
|
# Merge session_state changes back
|
|
431
|
-
if
|
|
432
|
+
if session_state is not None:
|
|
432
433
|
merge_dictionaries(session_state, session_state_copy)
|
|
433
434
|
|
|
434
435
|
if isinstance(result, StepOutput):
|
|
@@ -462,7 +463,6 @@ class Step:
|
|
|
462
463
|
if isinstance(self.active_executor, Team):
|
|
463
464
|
kwargs["store_member_responses"] = True
|
|
464
465
|
|
|
465
|
-
session_state_copy = copy(session_state)
|
|
466
466
|
response_stream = self.active_executor.run( # type: ignore[call-overload, misc]
|
|
467
467
|
input=message,
|
|
468
468
|
images=images,
|
|
@@ -493,8 +493,9 @@ class Step:
|
|
|
493
493
|
break
|
|
494
494
|
yield event # type: ignore[misc]
|
|
495
495
|
|
|
496
|
-
|
|
497
|
-
|
|
496
|
+
if session_state is not None:
|
|
497
|
+
# Update workflow session state
|
|
498
|
+
merge_dictionaries(session_state, session_state_copy) # type: ignore
|
|
498
499
|
|
|
499
500
|
if store_executor_outputs and workflow_run_response is not None:
|
|
500
501
|
self._store_executor_response(workflow_run_response, active_executor_run_response) # type: ignore
|
|
@@ -565,6 +566,9 @@ class Step:
|
|
|
565
566
|
if step_input.previous_step_outputs:
|
|
566
567
|
step_input.previous_step_content = step_input.get_last_step_content()
|
|
567
568
|
|
|
569
|
+
# Create session_state copy once to avoid duplication
|
|
570
|
+
session_state_copy = copy(session_state) if session_state is not None else {}
|
|
571
|
+
|
|
568
572
|
# Execute with retries
|
|
569
573
|
for attempt in range(self.max_retries + 1):
|
|
570
574
|
try:
|
|
@@ -576,7 +580,6 @@ class Step:
|
|
|
576
580
|
):
|
|
577
581
|
content = ""
|
|
578
582
|
final_response = None
|
|
579
|
-
session_state_copy = copy(session_state) if session_state else None
|
|
580
583
|
try:
|
|
581
584
|
if inspect.isgeneratorfunction(self.active_executor):
|
|
582
585
|
iterator = self._call_custom_function(
|
|
@@ -615,7 +618,7 @@ class Step:
|
|
|
615
618
|
final_response = e.value
|
|
616
619
|
|
|
617
620
|
# Merge session_state changes back
|
|
618
|
-
if
|
|
621
|
+
if session_state is not None:
|
|
619
622
|
merge_dictionaries(session_state, session_state_copy)
|
|
620
623
|
|
|
621
624
|
if final_response is not None:
|
|
@@ -623,7 +626,6 @@ class Step:
|
|
|
623
626
|
else:
|
|
624
627
|
response = StepOutput(content=content)
|
|
625
628
|
else:
|
|
626
|
-
session_state_copy = copy(session_state) if session_state else None
|
|
627
629
|
if inspect.iscoroutinefunction(self.active_executor):
|
|
628
630
|
result = await self._acall_custom_function(
|
|
629
631
|
self.active_executor, step_input, session_state_copy
|
|
@@ -632,7 +634,7 @@ class Step:
|
|
|
632
634
|
result = self._call_custom_function(self.active_executor, step_input, session_state_copy) # type: ignore
|
|
633
635
|
|
|
634
636
|
# Merge session_state changes back
|
|
635
|
-
if
|
|
637
|
+
if session_state is not None:
|
|
636
638
|
merge_dictionaries(session_state, session_state_copy)
|
|
637
639
|
|
|
638
640
|
# If function returns StepOutput, use it directly
|
|
@@ -668,7 +670,6 @@ class Step:
|
|
|
668
670
|
if isinstance(self.active_executor, Team):
|
|
669
671
|
kwargs["store_member_responses"] = True
|
|
670
672
|
|
|
671
|
-
session_state_copy = copy(session_state)
|
|
672
673
|
response = await self.active_executor.arun( # type: ignore
|
|
673
674
|
input=message, # type: ignore
|
|
674
675
|
images=images,
|
|
@@ -681,8 +682,9 @@ class Step:
|
|
|
681
682
|
**kwargs,
|
|
682
683
|
)
|
|
683
684
|
|
|
684
|
-
|
|
685
|
-
|
|
685
|
+
if session_state is not None:
|
|
686
|
+
# Update workflow session state
|
|
687
|
+
merge_dictionaries(session_state, session_state_copy) # type: ignore
|
|
686
688
|
|
|
687
689
|
if store_executor_outputs and workflow_run_response is not None:
|
|
688
690
|
self._store_executor_response(workflow_run_response, response) # type: ignore
|
|
@@ -728,6 +730,9 @@ class Step:
|
|
|
728
730
|
if step_input.previous_step_outputs:
|
|
729
731
|
step_input.previous_step_content = step_input.get_last_step_content()
|
|
730
732
|
|
|
733
|
+
# Create session_state copy once to avoid duplication
|
|
734
|
+
session_state_copy = copy(session_state) if session_state is not None else {}
|
|
735
|
+
|
|
731
736
|
if stream_intermediate_steps and workflow_run_response:
|
|
732
737
|
# Emit StepStartedEvent
|
|
733
738
|
yield StepStartedEvent(
|
|
@@ -751,8 +756,6 @@ class Step:
|
|
|
751
756
|
log_debug(f"Executing async function executor for step: {self.name}")
|
|
752
757
|
import inspect
|
|
753
758
|
|
|
754
|
-
session_state_copy = copy(session_state) if session_state else None
|
|
755
|
-
|
|
756
759
|
# Check if the function is an async generator
|
|
757
760
|
if inspect.isasyncgenfunction(self.active_executor):
|
|
758
761
|
content = ""
|
|
@@ -812,7 +815,7 @@ class Step:
|
|
|
812
815
|
final_response = StepOutput(content=str(result))
|
|
813
816
|
|
|
814
817
|
# Merge session_state changes back
|
|
815
|
-
if
|
|
818
|
+
if session_state is not None:
|
|
816
819
|
merge_dictionaries(session_state, session_state_copy)
|
|
817
820
|
else:
|
|
818
821
|
# For agents and teams, prepare message with context
|
|
@@ -840,7 +843,6 @@ class Step:
|
|
|
840
843
|
if isinstance(self.active_executor, Team):
|
|
841
844
|
kwargs["store_member_responses"] = True
|
|
842
845
|
|
|
843
|
-
session_state_copy = copy(session_state)
|
|
844
846
|
response_stream = self.active_executor.arun( # type: ignore
|
|
845
847
|
input=message,
|
|
846
848
|
images=images,
|
|
@@ -871,8 +873,9 @@ class Step:
|
|
|
871
873
|
break
|
|
872
874
|
yield event # type: ignore[misc]
|
|
873
875
|
|
|
874
|
-
|
|
875
|
-
|
|
876
|
+
if session_state is not None:
|
|
877
|
+
# Update workflow session state
|
|
878
|
+
merge_dictionaries(session_state, session_state_copy) # type: ignore
|
|
876
879
|
|
|
877
880
|
if store_executor_outputs and workflow_run_response is not None:
|
|
878
881
|
self._store_executor_response(workflow_run_response, active_executor_run_response) # type: ignore
|
agno/workflow/types.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import json
|
|
1
2
|
from dataclasses import dataclass
|
|
2
3
|
from enum import Enum
|
|
3
4
|
from typing import Any, Dict, List, Optional, Union
|
|
@@ -8,6 +9,7 @@ from pydantic import BaseModel
|
|
|
8
9
|
from agno.media import Audio, File, Image, Video
|
|
9
10
|
from agno.models.metrics import Metrics
|
|
10
11
|
from agno.utils.log import log_warning
|
|
12
|
+
from agno.utils.serialize import json_serializer
|
|
11
13
|
|
|
12
14
|
|
|
13
15
|
@dataclass
|
|
@@ -443,9 +445,7 @@ class WebSocketHandler:
|
|
|
443
445
|
else:
|
|
444
446
|
data = {"type": "message", "content": str(event)}
|
|
445
447
|
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
await self.websocket.send_text(self.format_sse_event(json.dumps(data)))
|
|
448
|
+
await self.websocket.send_text(self.format_sse_event(json.dumps(data, default=json_serializer)))
|
|
449
449
|
|
|
450
450
|
except Exception as e:
|
|
451
451
|
log_warning(f"Failed to handle WebSocket event: {e}")
|
|
@@ -466,9 +466,7 @@ class WebSocketHandler:
|
|
|
466
466
|
return
|
|
467
467
|
|
|
468
468
|
try:
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
await self.websocket.send_text(self.format_sse_event(json.dumps(data)))
|
|
469
|
+
await self.websocket.send_text(self.format_sse_event(json.dumps(data, default=json_serializer)))
|
|
472
470
|
except Exception as e:
|
|
473
471
|
log_warning(f"Failed to send WebSocket dict: {e}")
|
|
474
472
|
|
agno/workflow/workflow.py
CHANGED
|
@@ -875,10 +875,10 @@ class Workflow:
|
|
|
875
875
|
return func(**call_kwargs)
|
|
876
876
|
except TypeError as e:
|
|
877
877
|
# If signature inspection fails, fall back to original method
|
|
878
|
-
logger.
|
|
879
|
-
f"
|
|
878
|
+
logger.error(
|
|
879
|
+
f"Function signature inspection failed: {e}. Falling back to original calling convention."
|
|
880
880
|
)
|
|
881
|
-
return func(**
|
|
881
|
+
return func(**kwargs)
|
|
882
882
|
|
|
883
883
|
def _execute(
|
|
884
884
|
self,
|
|
@@ -2158,6 +2158,7 @@ class Workflow:
|
|
|
2158
2158
|
additional_data=additional_data,
|
|
2159
2159
|
user_id=user_id,
|
|
2160
2160
|
session_id=session_id,
|
|
2161
|
+
session_state=session_state,
|
|
2161
2162
|
audio=audio,
|
|
2162
2163
|
images=images,
|
|
2163
2164
|
videos=videos,
|
|
@@ -2176,6 +2177,7 @@ class Workflow:
|
|
|
2176
2177
|
additional_data=additional_data,
|
|
2177
2178
|
user_id=user_id,
|
|
2178
2179
|
session_id=session_id,
|
|
2180
|
+
session_state=session_state,
|
|
2179
2181
|
audio=audio,
|
|
2180
2182
|
images=images,
|
|
2181
2183
|
videos=videos,
|