agno 2.3.7__py3-none-any.whl → 2.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +391 -335
- agno/db/mongo/async_mongo.py +0 -24
- agno/db/mongo/mongo.py +0 -16
- agno/db/mysql/__init__.py +2 -1
- agno/db/mysql/async_mysql.py +2888 -0
- agno/db/mysql/mysql.py +17 -27
- agno/db/mysql/utils.py +139 -6
- agno/db/postgres/async_postgres.py +10 -26
- agno/db/postgres/postgres.py +7 -25
- agno/db/redis/redis.py +0 -4
- agno/db/schemas/evals.py +1 -0
- agno/db/singlestore/singlestore.py +5 -12
- agno/db/sqlite/async_sqlite.py +2 -26
- agno/db/sqlite/sqlite.py +0 -20
- agno/eval/__init__.py +10 -0
- agno/eval/agent_as_judge.py +860 -0
- agno/eval/base.py +29 -0
- agno/eval/utils.py +2 -1
- agno/exceptions.py +7 -0
- agno/knowledge/embedder/openai.py +8 -8
- agno/knowledge/knowledge.py +1142 -176
- agno/media.py +22 -6
- agno/models/aws/claude.py +8 -7
- agno/models/base.py +160 -11
- agno/models/deepseek/deepseek.py +67 -0
- agno/models/google/gemini.py +65 -11
- agno/models/google/utils.py +22 -0
- agno/models/message.py +2 -0
- agno/models/openai/chat.py +4 -0
- agno/models/openai/responses.py +3 -2
- agno/os/app.py +64 -74
- agno/os/interfaces/a2a/router.py +3 -4
- agno/os/interfaces/a2a/utils.py +1 -1
- agno/os/interfaces/agui/router.py +2 -0
- agno/os/middleware/jwt.py +8 -6
- agno/os/router.py +3 -1607
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +581 -0
- agno/os/routers/agents/schema.py +261 -0
- agno/os/routers/evals/evals.py +26 -6
- agno/os/routers/evals/schemas.py +34 -2
- agno/os/routers/evals/utils.py +101 -20
- agno/os/routers/knowledge/knowledge.py +1 -1
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +496 -0
- agno/os/routers/teams/schema.py +257 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +545 -0
- agno/os/routers/workflows/schema.py +75 -0
- agno/os/schema.py +1 -559
- agno/os/utils.py +139 -2
- agno/team/team.py +159 -100
- agno/tools/file_generation.py +12 -6
- agno/tools/firecrawl.py +15 -7
- agno/tools/workflow.py +8 -1
- agno/utils/hooks.py +64 -5
- agno/utils/http.py +2 -2
- agno/utils/media.py +11 -1
- agno/utils/print_response/agent.py +8 -0
- agno/utils/print_response/team.py +8 -0
- agno/vectordb/pgvector/pgvector.py +88 -51
- agno/workflow/parallel.py +11 -5
- agno/workflow/step.py +17 -5
- agno/workflow/types.py +38 -2
- agno/workflow/workflow.py +12 -4
- {agno-2.3.7.dist-info → agno-2.3.9.dist-info}/METADATA +8 -3
- {agno-2.3.7.dist-info → agno-2.3.9.dist-info}/RECORD +70 -58
- agno/tools/memori.py +0 -339
- {agno-2.3.7.dist-info → agno-2.3.9.dist-info}/WHEEL +0 -0
- {agno-2.3.7.dist-info → agno-2.3.9.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.7.dist-info → agno-2.3.9.dist-info}/top_level.txt +0 -0
agno/agent/agent.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import time
|
|
3
4
|
import warnings
|
|
4
5
|
from asyncio import CancelledError, create_task
|
|
5
6
|
from collections import ChainMap, deque
|
|
@@ -8,6 +9,7 @@ from inspect import iscoroutinefunction
|
|
|
8
9
|
from os import getenv
|
|
9
10
|
from textwrap import dedent
|
|
10
11
|
from typing import (
|
|
12
|
+
TYPE_CHECKING,
|
|
11
13
|
Any,
|
|
12
14
|
AsyncIterator,
|
|
13
15
|
Callable,
|
|
@@ -29,16 +31,17 @@ from uuid import uuid4
|
|
|
29
31
|
|
|
30
32
|
from pydantic import BaseModel
|
|
31
33
|
|
|
34
|
+
if TYPE_CHECKING:
|
|
35
|
+
from agno.eval.base import BaseEval
|
|
36
|
+
|
|
32
37
|
from agno.compression.manager import CompressionManager
|
|
33
38
|
from agno.culture.manager import CultureManager
|
|
34
39
|
from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
|
|
35
40
|
from agno.db.schemas.culture import CulturalKnowledge
|
|
36
41
|
from agno.exceptions import (
|
|
37
42
|
InputCheckError,
|
|
38
|
-
ModelProviderError,
|
|
39
43
|
OutputCheckError,
|
|
40
44
|
RunCancelledException,
|
|
41
|
-
StopAgentRun,
|
|
42
45
|
)
|
|
43
46
|
from agno.filters import FilterExpr
|
|
44
47
|
from agno.guardrails import BaseGuardrail
|
|
@@ -132,7 +135,13 @@ from agno.utils.events import (
|
|
|
132
135
|
create_tool_call_started_event,
|
|
133
136
|
handle_event,
|
|
134
137
|
)
|
|
135
|
-
from agno.utils.hooks import
|
|
138
|
+
from agno.utils.hooks import (
|
|
139
|
+
copy_args_for_background,
|
|
140
|
+
filter_hook_args,
|
|
141
|
+
normalize_post_hooks,
|
|
142
|
+
normalize_pre_hooks,
|
|
143
|
+
should_run_hook_in_background,
|
|
144
|
+
)
|
|
136
145
|
from agno.utils.knowledge import get_agentic_or_user_search_filters
|
|
137
146
|
from agno.utils.log import (
|
|
138
147
|
log_debug,
|
|
@@ -271,9 +280,9 @@ class Agent:
|
|
|
271
280
|
|
|
272
281
|
# --- Agent Hooks ---
|
|
273
282
|
# Functions called right after agent-session is loaded, before processing starts
|
|
274
|
-
pre_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail]]] = None
|
|
283
|
+
pre_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, "BaseEval"]]] = None
|
|
275
284
|
# Functions called after output is generated but before the response is returned
|
|
276
|
-
post_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail]]] = None
|
|
285
|
+
post_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, "BaseEval"]]] = None
|
|
277
286
|
# If True, run hooks as FastAPI background tasks (non-blocking). Set by AgentOS.
|
|
278
287
|
_run_hooks_in_background: Optional[bool] = None
|
|
279
288
|
|
|
@@ -309,6 +318,8 @@ class Agent:
|
|
|
309
318
|
system_message: Optional[Union[str, Callable, Message]] = None
|
|
310
319
|
# Role for the system message
|
|
311
320
|
system_message_role: str = "system"
|
|
321
|
+
# Provide the introduction as the first message from the Agent
|
|
322
|
+
introduction: Optional[str] = None
|
|
312
323
|
# Set to False to skip context building
|
|
313
324
|
build_context: bool = True
|
|
314
325
|
|
|
@@ -437,7 +448,6 @@ class Agent:
|
|
|
437
448
|
model: Optional[Union[Model, str]] = None,
|
|
438
449
|
name: Optional[str] = None,
|
|
439
450
|
id: Optional[str] = None,
|
|
440
|
-
introduction: Optional[str] = None,
|
|
441
451
|
user_id: Optional[str] = None,
|
|
442
452
|
session_id: Optional[str] = None,
|
|
443
453
|
session_state: Optional[Dict[str, Any]] = None,
|
|
@@ -477,8 +487,8 @@ class Agent:
|
|
|
477
487
|
tool_call_limit: Optional[int] = None,
|
|
478
488
|
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
479
489
|
tool_hooks: Optional[List[Callable]] = None,
|
|
480
|
-
pre_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail]]] = None,
|
|
481
|
-
post_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail]]] = None,
|
|
490
|
+
pre_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, "BaseEval"]]] = None,
|
|
491
|
+
post_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, "BaseEval"]]] = None,
|
|
482
492
|
reasoning: bool = False,
|
|
483
493
|
reasoning_model: Optional[Union[Model, str]] = None,
|
|
484
494
|
reasoning_agent: Optional[Agent] = None,
|
|
@@ -491,6 +501,7 @@ class Agent:
|
|
|
491
501
|
send_media_to_model: bool = True,
|
|
492
502
|
system_message: Optional[Union[str, Callable, Message]] = None,
|
|
493
503
|
system_message_role: str = "system",
|
|
504
|
+
introduction: Optional[str] = None,
|
|
494
505
|
build_context: bool = True,
|
|
495
506
|
description: Optional[str] = None,
|
|
496
507
|
instructions: Optional[Union[str, List[str], Callable]] = None,
|
|
@@ -558,8 +569,12 @@ class Agent:
|
|
|
558
569
|
self.enable_user_memories = enable_user_memories
|
|
559
570
|
self.add_memories_to_context = add_memories_to_context
|
|
560
571
|
|
|
561
|
-
self.session_summary_manager = session_summary_manager
|
|
562
572
|
self.enable_session_summaries = enable_session_summaries
|
|
573
|
+
|
|
574
|
+
if session_summary_manager is not None:
|
|
575
|
+
self.session_summary_manager = session_summary_manager
|
|
576
|
+
self.enable_session_summaries = True
|
|
577
|
+
|
|
563
578
|
self.add_session_summary_to_context = add_session_summary_to_context
|
|
564
579
|
|
|
565
580
|
# Context compression settings
|
|
@@ -1086,7 +1101,12 @@ class Agent:
|
|
|
1086
1101
|
# Start memory creation on a separate thread (runs concurrently with the main execution loop)
|
|
1087
1102
|
memory_future = None
|
|
1088
1103
|
# 4. Start memory creation in background thread if memory manager is enabled and agentic memory is disabled
|
|
1089
|
-
if
|
|
1104
|
+
if (
|
|
1105
|
+
run_messages.user_message is not None
|
|
1106
|
+
and self.memory_manager is not None
|
|
1107
|
+
and self.enable_user_memories
|
|
1108
|
+
and not self.enable_agentic_memory
|
|
1109
|
+
):
|
|
1090
1110
|
log_debug("Starting memory creation in background thread.")
|
|
1091
1111
|
memory_future = self.background_executor.submit(
|
|
1092
1112
|
self._make_memories, run_messages=run_messages, user_id=user_id
|
|
@@ -1175,7 +1195,7 @@ class Agent:
|
|
|
1175
1195
|
wait_for_open_threads(memory_future=memory_future, cultural_knowledge_future=cultural_knowledge_future)
|
|
1176
1196
|
|
|
1177
1197
|
# 12. Create session summary
|
|
1178
|
-
if self.session_summary_manager is not None:
|
|
1198
|
+
if self.session_summary_manager is not None and self.enable_session_summaries:
|
|
1179
1199
|
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
1180
1200
|
session.upsert_run(run=run_response)
|
|
1181
1201
|
try:
|
|
@@ -1305,7 +1325,12 @@ class Agent:
|
|
|
1305
1325
|
# Start memory creation on a separate thread (runs concurrently with the main execution loop)
|
|
1306
1326
|
memory_future = None
|
|
1307
1327
|
# 4. Start memory creation in background thread if memory manager is enabled and agentic memory is disabled
|
|
1308
|
-
if
|
|
1328
|
+
if (
|
|
1329
|
+
run_messages.user_message is not None
|
|
1330
|
+
and self.memory_manager is not None
|
|
1331
|
+
and self.enable_user_memories
|
|
1332
|
+
and not self.enable_agentic_memory
|
|
1333
|
+
):
|
|
1309
1334
|
log_debug("Starting memory creation in background thread.")
|
|
1310
1335
|
memory_future = self.background_executor.submit(
|
|
1311
1336
|
self._make_memories, run_messages=run_messages, user_id=user_id
|
|
@@ -1450,7 +1475,7 @@ class Agent:
|
|
|
1450
1475
|
)
|
|
1451
1476
|
|
|
1452
1477
|
# 9. Create session summary
|
|
1453
|
-
if self.session_summary_manager is not None:
|
|
1478
|
+
if self.session_summary_manager is not None and self.enable_session_summaries:
|
|
1454
1479
|
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
1455
1480
|
session.upsert_run(run=run_response)
|
|
1456
1481
|
|
|
@@ -1546,11 +1571,11 @@ class Agent:
|
|
|
1546
1571
|
session_id: Optional[str] = None,
|
|
1547
1572
|
session_state: Optional[Dict[str, Any]] = None,
|
|
1548
1573
|
run_context: Optional[RunContext] = None,
|
|
1574
|
+
run_id: Optional[str] = None,
|
|
1549
1575
|
audio: Optional[Sequence[Audio]] = None,
|
|
1550
1576
|
images: Optional[Sequence[Image]] = None,
|
|
1551
1577
|
videos: Optional[Sequence[Video]] = None,
|
|
1552
1578
|
files: Optional[Sequence[File]] = None,
|
|
1553
|
-
retries: Optional[int] = None,
|
|
1554
1579
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
1555
1580
|
add_history_to_context: Optional[bool] = None,
|
|
1556
1581
|
add_dependencies_to_context: Optional[bool] = None,
|
|
@@ -1574,11 +1599,11 @@ class Agent:
|
|
|
1574
1599
|
session_id: Optional[str] = None,
|
|
1575
1600
|
session_state: Optional[Dict[str, Any]] = None,
|
|
1576
1601
|
run_context: Optional[RunContext] = None,
|
|
1602
|
+
run_id: Optional[str] = None,
|
|
1577
1603
|
audio: Optional[Sequence[Audio]] = None,
|
|
1578
1604
|
images: Optional[Sequence[Image]] = None,
|
|
1579
1605
|
videos: Optional[Sequence[Video]] = None,
|
|
1580
1606
|
files: Optional[Sequence[File]] = None,
|
|
1581
|
-
retries: Optional[int] = None,
|
|
1582
1607
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
1583
1608
|
add_history_to_context: Optional[bool] = None,
|
|
1584
1609
|
add_dependencies_to_context: Optional[bool] = None,
|
|
@@ -1603,11 +1628,11 @@ class Agent:
|
|
|
1603
1628
|
session_id: Optional[str] = None,
|
|
1604
1629
|
session_state: Optional[Dict[str, Any]] = None,
|
|
1605
1630
|
run_context: Optional[RunContext] = None,
|
|
1631
|
+
run_id: Optional[str] = None,
|
|
1606
1632
|
audio: Optional[Sequence[Audio]] = None,
|
|
1607
1633
|
images: Optional[Sequence[Image]] = None,
|
|
1608
1634
|
videos: Optional[Sequence[Video]] = None,
|
|
1609
1635
|
files: Optional[Sequence[File]] = None,
|
|
1610
|
-
retries: Optional[int] = None,
|
|
1611
1636
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
1612
1637
|
add_history_to_context: Optional[bool] = None,
|
|
1613
1638
|
add_dependencies_to_context: Optional[bool] = None,
|
|
@@ -1626,8 +1651,8 @@ class Agent:
|
|
|
1626
1651
|
"`run` method is not supported with an async database. Please use `arun` method instead."
|
|
1627
1652
|
)
|
|
1628
1653
|
|
|
1629
|
-
#
|
|
1630
|
-
run_id = str(uuid4())
|
|
1654
|
+
# Set the id for the run and register it immediately for cancellation tracking
|
|
1655
|
+
run_id = run_id or str(uuid4())
|
|
1631
1656
|
register_run(run_id)
|
|
1632
1657
|
|
|
1633
1658
|
if (add_history_to_context or self.add_history_to_context) and not self.db and not self.team_id:
|
|
@@ -1654,9 +1679,9 @@ class Agent:
|
|
|
1654
1679
|
# Normalise hook & guardails
|
|
1655
1680
|
if not self._hooks_normalised:
|
|
1656
1681
|
if self.pre_hooks:
|
|
1657
|
-
self.pre_hooks =
|
|
1682
|
+
self.pre_hooks = normalize_pre_hooks(self.pre_hooks) # type: ignore
|
|
1658
1683
|
if self.post_hooks:
|
|
1659
|
-
self.post_hooks =
|
|
1684
|
+
self.post_hooks = normalize_post_hooks(self.post_hooks) # type: ignore
|
|
1660
1685
|
self._hooks_normalised = True
|
|
1661
1686
|
|
|
1662
1687
|
session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
|
|
@@ -1710,84 +1735,90 @@ class Agent:
|
|
|
1710
1735
|
# output_schema parameter takes priority, even if run_context was provided
|
|
1711
1736
|
run_context.output_schema = output_schema
|
|
1712
1737
|
|
|
1713
|
-
#
|
|
1714
|
-
|
|
1715
|
-
self._resolve_run_dependencies(run_context=run_context)
|
|
1716
|
-
|
|
1717
|
-
add_dependencies = (
|
|
1718
|
-
add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
|
|
1719
|
-
)
|
|
1720
|
-
add_session_state = (
|
|
1721
|
-
add_session_state_to_context
|
|
1722
|
-
if add_session_state_to_context is not None
|
|
1723
|
-
else self.add_session_state_to_context
|
|
1724
|
-
)
|
|
1725
|
-
add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
|
|
1738
|
+
# Set up retry logic
|
|
1739
|
+
num_attempts = self.retries + 1
|
|
1726
1740
|
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1741
|
+
for attempt in range(num_attempts):
|
|
1742
|
+
if attempt > 0:
|
|
1743
|
+
log_debug(f"Retrying Agent run {run_id}. Attempt {attempt + 1} of {num_attempts}...")
|
|
1730
1744
|
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
|
|
1745
|
+
try:
|
|
1746
|
+
# Resolve dependencies
|
|
1747
|
+
if run_context.dependencies is not None:
|
|
1748
|
+
self._resolve_run_dependencies(run_context=run_context)
|
|
1749
|
+
|
|
1750
|
+
add_dependencies = (
|
|
1751
|
+
add_dependencies_to_context
|
|
1752
|
+
if add_dependencies_to_context is not None
|
|
1753
|
+
else self.add_dependencies_to_context
|
|
1754
|
+
)
|
|
1755
|
+
add_session_state = (
|
|
1756
|
+
add_session_state_to_context
|
|
1757
|
+
if add_session_state_to_context is not None
|
|
1758
|
+
else self.add_session_state_to_context
|
|
1759
|
+
)
|
|
1760
|
+
add_history = (
|
|
1761
|
+
add_history_to_context if add_history_to_context is not None else self.add_history_to_context
|
|
1762
|
+
)
|
|
1734
1763
|
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
|
|
1739
|
-
DeprecationWarning,
|
|
1740
|
-
stacklevel=2,
|
|
1741
|
-
)
|
|
1742
|
-
stream_events = stream_events or stream_intermediate_steps
|
|
1764
|
+
# When filters are passed manually
|
|
1765
|
+
if self.knowledge_filters or knowledge_filters:
|
|
1766
|
+
run_context.knowledge_filters = self._get_effective_filters(knowledge_filters)
|
|
1743
1767
|
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
|
|
1768
|
+
# Use stream override value when necessary
|
|
1769
|
+
if stream is None:
|
|
1770
|
+
stream = False if self.stream is None else self.stream
|
|
1747
1771
|
|
|
1748
|
-
|
|
1749
|
-
|
|
1772
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
1773
|
+
if stream_intermediate_steps is not None:
|
|
1774
|
+
warnings.warn(
|
|
1775
|
+
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
|
|
1776
|
+
DeprecationWarning,
|
|
1777
|
+
stacklevel=2,
|
|
1778
|
+
)
|
|
1779
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
1750
1780
|
|
|
1751
|
-
|
|
1752
|
-
|
|
1781
|
+
# Can't stream events if streaming is disabled
|
|
1782
|
+
if stream is False:
|
|
1783
|
+
stream_events = False
|
|
1753
1784
|
|
|
1754
|
-
|
|
1755
|
-
|
|
1756
|
-
self.model = cast(Model, self.model)
|
|
1785
|
+
if stream_events is None:
|
|
1786
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
1757
1787
|
|
|
1758
|
-
|
|
1759
|
-
|
|
1760
|
-
merge_dictionaries(metadata, self.metadata)
|
|
1788
|
+
self.stream = self.stream or stream
|
|
1789
|
+
self.stream_events = self.stream_events or stream_events
|
|
1761
1790
|
|
|
1762
|
-
|
|
1763
|
-
|
|
1764
|
-
|
|
1765
|
-
|
|
1766
|
-
|
|
1767
|
-
user_id=user_id,
|
|
1768
|
-
agent_name=self.name,
|
|
1769
|
-
metadata=run_context.metadata,
|
|
1770
|
-
session_state=run_context.session_state,
|
|
1771
|
-
input=run_input,
|
|
1772
|
-
)
|
|
1791
|
+
# Prepare arguments for the model
|
|
1792
|
+
response_format = (
|
|
1793
|
+
self._get_response_format(run_context=run_context) if self.parser_model is None else None
|
|
1794
|
+
)
|
|
1795
|
+
self.model = cast(Model, self.model)
|
|
1773
1796
|
|
|
1774
|
-
|
|
1775
|
-
|
|
1797
|
+
# Merge agent metadata with run metadata
|
|
1798
|
+
if self.metadata is not None and metadata is not None:
|
|
1799
|
+
merge_dictionaries(metadata, self.metadata)
|
|
1776
1800
|
|
|
1777
|
-
|
|
1778
|
-
|
|
1779
|
-
|
|
1801
|
+
# Create a new run_response for this attempt
|
|
1802
|
+
run_response = RunOutput(
|
|
1803
|
+
run_id=run_id,
|
|
1804
|
+
session_id=session_id,
|
|
1805
|
+
agent_id=self.id,
|
|
1806
|
+
user_id=user_id,
|
|
1807
|
+
agent_name=self.name,
|
|
1808
|
+
metadata=run_context.metadata,
|
|
1809
|
+
session_state=run_context.session_state,
|
|
1810
|
+
input=run_input,
|
|
1811
|
+
)
|
|
1780
1812
|
|
|
1781
|
-
|
|
1782
|
-
|
|
1813
|
+
run_response.model = self.model.id if self.model is not None else None
|
|
1814
|
+
run_response.model_provider = self.model.provider if self.model is not None else None
|
|
1783
1815
|
|
|
1784
|
-
|
|
1785
|
-
|
|
1816
|
+
# Start the run metrics timer, to calculate the run duration
|
|
1817
|
+
run_response.metrics = Metrics()
|
|
1818
|
+
run_response.metrics.start_timer()
|
|
1786
1819
|
|
|
1787
|
-
|
|
1820
|
+
yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
|
|
1788
1821
|
|
|
1789
|
-
for attempt in range(num_attempts):
|
|
1790
|
-
try:
|
|
1791
1822
|
if stream:
|
|
1792
1823
|
response_iterator = self._run_stream(
|
|
1793
1824
|
run_response=run_response,
|
|
@@ -1823,19 +1854,6 @@ class Agent:
|
|
|
1823
1854
|
except (InputCheckError, OutputCheckError) as e:
|
|
1824
1855
|
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
|
|
1825
1856
|
raise e
|
|
1826
|
-
except ModelProviderError as e:
|
|
1827
|
-
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
|
|
1828
|
-
if isinstance(e, StopAgentRun):
|
|
1829
|
-
raise e
|
|
1830
|
-
last_exception = e
|
|
1831
|
-
if attempt < num_attempts - 1: # Don't sleep on the last attempt
|
|
1832
|
-
if self.exponential_backoff:
|
|
1833
|
-
delay = 2**attempt * self.delay_between_retries
|
|
1834
|
-
else:
|
|
1835
|
-
delay = self.delay_between_retries
|
|
1836
|
-
import time
|
|
1837
|
-
|
|
1838
|
-
time.sleep(delay)
|
|
1839
1857
|
except KeyboardInterrupt:
|
|
1840
1858
|
run_response.content = "Operation cancelled by user"
|
|
1841
1859
|
run_response.status = RunStatus.cancelled
|
|
@@ -1849,20 +1867,27 @@ class Agent:
|
|
|
1849
1867
|
)
|
|
1850
1868
|
else:
|
|
1851
1869
|
return run_response
|
|
1870
|
+
except Exception as e:
|
|
1871
|
+
# Check if this is the last attempt
|
|
1872
|
+
if attempt < num_attempts - 1:
|
|
1873
|
+
# Calculate delay with exponential backoff if enabled
|
|
1874
|
+
if self.exponential_backoff:
|
|
1875
|
+
delay = self.delay_between_retries * (2**attempt)
|
|
1876
|
+
else:
|
|
1877
|
+
delay = self.delay_between_retries
|
|
1852
1878
|
|
|
1853
|
-
|
|
1854
|
-
|
|
1855
|
-
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
|
|
1879
|
+
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
|
|
1880
|
+
time.sleep(delay)
|
|
1881
|
+
continue
|
|
1882
|
+
else:
|
|
1883
|
+
# Final attempt failed - re-raise the exception
|
|
1884
|
+
log_error(f"All {num_attempts} attempts failed. Final error: {str(e)}")
|
|
1885
|
+
if stream:
|
|
1886
|
+
return generator_wrapper(create_run_error_event(run_response, error=str(e))) # type: ignore
|
|
1887
|
+
raise e
|
|
1860
1888
|
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
if stream:
|
|
1864
|
-
return generator_wrapper(create_run_error_event(run_response, error=str(last_exception))) # type: ignore
|
|
1865
|
-
raise Exception(f"Failed after {num_attempts} attempts.")
|
|
1889
|
+
# If we get here, all retries failed (shouldn't happen with current logic)
|
|
1890
|
+
raise Exception(f"Failed after {num_attempts} attempts.")
|
|
1866
1891
|
|
|
1867
1892
|
async def _arun(
|
|
1868
1893
|
self,
|
|
@@ -1981,7 +2006,12 @@ class Agent:
|
|
|
1981
2006
|
|
|
1982
2007
|
# 7. Start memory creation as a background task (runs concurrently with the main execution)
|
|
1983
2008
|
memory_task = None
|
|
1984
|
-
if
|
|
2009
|
+
if (
|
|
2010
|
+
run_messages.user_message is not None
|
|
2011
|
+
and self.memory_manager is not None
|
|
2012
|
+
and self.enable_user_memories
|
|
2013
|
+
and not self.enable_agentic_memory
|
|
2014
|
+
):
|
|
1985
2015
|
log_debug("Starting memory creation in background task.")
|
|
1986
2016
|
memory_task = create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
|
|
1987
2017
|
|
|
@@ -2071,7 +2101,7 @@ class Agent:
|
|
|
2071
2101
|
await await_for_open_threads(memory_task=memory_task, cultural_knowledge_task=cultural_knowledge_task)
|
|
2072
2102
|
|
|
2073
2103
|
# 15. Create session summary
|
|
2074
|
-
if self.session_summary_manager is not None:
|
|
2104
|
+
if self.session_summary_manager is not None and self.enable_session_summaries:
|
|
2075
2105
|
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
2076
2106
|
agent_session.upsert_run(run=run_response)
|
|
2077
2107
|
try:
|
|
@@ -2261,7 +2291,12 @@ class Agent:
|
|
|
2261
2291
|
|
|
2262
2292
|
# 7. Start memory creation as a background task (runs concurrently with the main execution)
|
|
2263
2293
|
memory_task = None
|
|
2264
|
-
if
|
|
2294
|
+
if (
|
|
2295
|
+
run_messages.user_message is not None
|
|
2296
|
+
and self.memory_manager is not None
|
|
2297
|
+
and self.enable_user_memories
|
|
2298
|
+
and not self.enable_agentic_memory
|
|
2299
|
+
):
|
|
2265
2300
|
log_debug("Starting memory creation in background task.")
|
|
2266
2301
|
memory_task = create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
|
|
2267
2302
|
|
|
@@ -2397,7 +2432,7 @@ class Agent:
|
|
|
2397
2432
|
yield item
|
|
2398
2433
|
|
|
2399
2434
|
# 12. Create session summary
|
|
2400
|
-
if self.session_summary_manager is not None:
|
|
2435
|
+
if self.session_summary_manager is not None and self.enable_session_summaries:
|
|
2401
2436
|
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
2402
2437
|
agent_session.upsert_run(run=run_response)
|
|
2403
2438
|
|
|
@@ -2515,13 +2550,13 @@ class Agent:
|
|
|
2515
2550
|
session_id: Optional[str] = None,
|
|
2516
2551
|
session_state: Optional[Dict[str, Any]] = None,
|
|
2517
2552
|
run_context: Optional[RunContext] = None,
|
|
2553
|
+
run_id: Optional[str] = None,
|
|
2518
2554
|
audio: Optional[Sequence[Audio]] = None,
|
|
2519
2555
|
images: Optional[Sequence[Image]] = None,
|
|
2520
2556
|
videos: Optional[Sequence[Video]] = None,
|
|
2521
2557
|
files: Optional[Sequence[File]] = None,
|
|
2522
2558
|
stream_events: Optional[bool] = None,
|
|
2523
2559
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2524
|
-
retries: Optional[int] = None,
|
|
2525
2560
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
2526
2561
|
add_history_to_context: Optional[bool] = None,
|
|
2527
2562
|
add_dependencies_to_context: Optional[bool] = None,
|
|
@@ -2542,13 +2577,13 @@ class Agent:
|
|
|
2542
2577
|
user_id: Optional[str] = None,
|
|
2543
2578
|
session_id: Optional[str] = None,
|
|
2544
2579
|
run_context: Optional[RunContext] = None,
|
|
2580
|
+
run_id: Optional[str] = None,
|
|
2545
2581
|
audio: Optional[Sequence[Audio]] = None,
|
|
2546
2582
|
images: Optional[Sequence[Image]] = None,
|
|
2547
2583
|
videos: Optional[Sequence[Video]] = None,
|
|
2548
2584
|
files: Optional[Sequence[File]] = None,
|
|
2549
2585
|
stream_events: Optional[bool] = None,
|
|
2550
2586
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2551
|
-
retries: Optional[int] = None,
|
|
2552
2587
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
2553
2588
|
add_history_to_context: Optional[bool] = None,
|
|
2554
2589
|
add_dependencies_to_context: Optional[bool] = None,
|
|
@@ -2571,13 +2606,13 @@ class Agent:
|
|
|
2571
2606
|
session_id: Optional[str] = None,
|
|
2572
2607
|
session_state: Optional[Dict[str, Any]] = None,
|
|
2573
2608
|
run_context: Optional[RunContext] = None,
|
|
2609
|
+
run_id: Optional[str] = None,
|
|
2574
2610
|
audio: Optional[Sequence[Audio]] = None,
|
|
2575
2611
|
images: Optional[Sequence[Image]] = None,
|
|
2576
2612
|
videos: Optional[Sequence[Video]] = None,
|
|
2577
2613
|
files: Optional[Sequence[File]] = None,
|
|
2578
2614
|
stream_events: Optional[bool] = None,
|
|
2579
2615
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2580
|
-
retries: Optional[int] = None,
|
|
2581
2616
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
2582
2617
|
add_history_to_context: Optional[bool] = None,
|
|
2583
2618
|
add_dependencies_to_context: Optional[bool] = None,
|
|
@@ -2592,8 +2627,8 @@ class Agent:
|
|
|
2592
2627
|
) -> Union[RunOutput, AsyncIterator[RunOutputEvent]]:
|
|
2593
2628
|
"""Async Run the Agent and return the response."""
|
|
2594
2629
|
|
|
2595
|
-
#
|
|
2596
|
-
run_id = str(uuid4())
|
|
2630
|
+
# Set the id for the run and register it immediately for cancellation tracking
|
|
2631
|
+
run_id = run_id or str(uuid4())
|
|
2597
2632
|
register_run(run_id)
|
|
2598
2633
|
|
|
2599
2634
|
if (add_history_to_context or self.add_history_to_context) and not self.db and not self.team_id:
|
|
@@ -2620,9 +2655,9 @@ class Agent:
|
|
|
2620
2655
|
# Normalise hooks & guardails
|
|
2621
2656
|
if not self._hooks_normalised:
|
|
2622
2657
|
if self.pre_hooks:
|
|
2623
|
-
self.pre_hooks =
|
|
2658
|
+
self.pre_hooks = normalize_pre_hooks(self.pre_hooks, async_mode=True) # type: ignore
|
|
2624
2659
|
if self.post_hooks:
|
|
2625
|
-
self.post_hooks =
|
|
2660
|
+
self.post_hooks = normalize_post_hooks(self.post_hooks, async_mode=True) # type: ignore
|
|
2626
2661
|
self._hooks_normalised = True
|
|
2627
2662
|
|
|
2628
2663
|
# Initialize session
|
|
@@ -2714,9 +2749,6 @@ class Agent:
|
|
|
2714
2749
|
# Prepare arguments for the model (must be after run_context is fully initialized)
|
|
2715
2750
|
response_format = self._get_response_format(run_context=run_context) if self.parser_model is None else None
|
|
2716
2751
|
|
|
2717
|
-
# If no retries are set, use the agent's default retries
|
|
2718
|
-
retries = retries if retries is not None else self.retries
|
|
2719
|
-
|
|
2720
2752
|
# Create a new run_response for this attempt
|
|
2721
2753
|
run_response = RunOutput(
|
|
2722
2754
|
run_id=run_id,
|
|
@@ -2736,12 +2768,15 @@ class Agent:
|
|
|
2736
2768
|
run_response.metrics = Metrics()
|
|
2737
2769
|
run_response.metrics.start_timer()
|
|
2738
2770
|
|
|
2739
|
-
last_exception = None
|
|
2740
|
-
num_attempts = retries + 1
|
|
2741
|
-
|
|
2742
2771
|
yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
|
|
2743
2772
|
|
|
2773
|
+
# Set up retry logic
|
|
2774
|
+
num_attempts = self.retries + 1
|
|
2775
|
+
|
|
2744
2776
|
for attempt in range(num_attempts):
|
|
2777
|
+
if attempt > 0:
|
|
2778
|
+
log_debug(f"Retrying Agent run {run_id}. Attempt {attempt + 1} of {num_attempts}...")
|
|
2779
|
+
|
|
2745
2780
|
try:
|
|
2746
2781
|
# Pass the new run_response to _arun
|
|
2747
2782
|
if stream:
|
|
@@ -2774,23 +2809,9 @@ class Agent:
|
|
|
2774
2809
|
background_tasks=background_tasks,
|
|
2775
2810
|
**kwargs,
|
|
2776
2811
|
)
|
|
2777
|
-
|
|
2778
2812
|
except (InputCheckError, OutputCheckError) as e:
|
|
2779
2813
|
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
|
|
2780
2814
|
raise e
|
|
2781
|
-
except ModelProviderError as e:
|
|
2782
|
-
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
|
|
2783
|
-
if isinstance(e, StopAgentRun):
|
|
2784
|
-
raise e
|
|
2785
|
-
last_exception = e
|
|
2786
|
-
if attempt < num_attempts - 1: # Don't sleep on the last attempt
|
|
2787
|
-
if self.exponential_backoff:
|
|
2788
|
-
delay = 2**attempt * self.delay_between_retries
|
|
2789
|
-
else:
|
|
2790
|
-
delay = self.delay_between_retries
|
|
2791
|
-
import time
|
|
2792
|
-
|
|
2793
|
-
time.sleep(delay)
|
|
2794
2815
|
except KeyboardInterrupt:
|
|
2795
2816
|
run_response.content = "Operation cancelled by user"
|
|
2796
2817
|
run_response.status = RunStatus.cancelled
|
|
@@ -2804,20 +2825,27 @@ class Agent:
|
|
|
2804
2825
|
)
|
|
2805
2826
|
else:
|
|
2806
2827
|
return run_response
|
|
2828
|
+
except Exception as e:
|
|
2829
|
+
# Check if this is the last attempt
|
|
2830
|
+
if attempt < num_attempts - 1:
|
|
2831
|
+
# Calculate delay with exponential backoff if enabled
|
|
2832
|
+
if self.exponential_backoff:
|
|
2833
|
+
delay = self.delay_between_retries * (2**attempt)
|
|
2834
|
+
else:
|
|
2835
|
+
delay = self.delay_between_retries
|
|
2807
2836
|
|
|
2808
|
-
|
|
2809
|
-
|
|
2810
|
-
|
|
2811
|
-
|
|
2812
|
-
|
|
2837
|
+
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
|
|
2838
|
+
time.sleep(delay)
|
|
2839
|
+
continue
|
|
2840
|
+
else:
|
|
2841
|
+
# Final attempt failed - re-raise the exception
|
|
2842
|
+
log_error(f"All {num_attempts} attempts failed. Final error: {str(e)}")
|
|
2843
|
+
if stream:
|
|
2844
|
+
return async_generator_wrapper(create_run_error_event(run_response, error=str(e))) # type: ignore
|
|
2845
|
+
raise e
|
|
2813
2846
|
|
|
2814
|
-
|
|
2815
|
-
|
|
2816
|
-
raise last_exception
|
|
2817
|
-
else:
|
|
2818
|
-
if stream:
|
|
2819
|
-
return async_generator_wrapper(create_run_error_event(run_response, error=str(last_exception))) # type: ignore
|
|
2820
|
-
raise Exception(f"Failed after {num_attempts} attempts.")
|
|
2847
|
+
# If we get here, all retries failed
|
|
2848
|
+
raise Exception(f"Failed after {num_attempts} attempts.")
|
|
2821
2849
|
|
|
2822
2850
|
@overload
|
|
2823
2851
|
def continue_run(
|
|
@@ -2832,7 +2860,6 @@ class Agent:
|
|
|
2832
2860
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2833
2861
|
user_id: Optional[str] = None,
|
|
2834
2862
|
session_id: Optional[str] = None,
|
|
2835
|
-
retries: Optional[int] = None,
|
|
2836
2863
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
2837
2864
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2838
2865
|
metadata: Optional[Dict[str, Any]] = None,
|
|
@@ -2853,7 +2880,6 @@ class Agent:
|
|
|
2853
2880
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2854
2881
|
user_id: Optional[str] = None,
|
|
2855
2882
|
session_id: Optional[str] = None,
|
|
2856
|
-
retries: Optional[int] = None,
|
|
2857
2883
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
2858
2884
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2859
2885
|
metadata: Optional[Dict[str, Any]] = None,
|
|
@@ -2874,7 +2900,6 @@ class Agent:
|
|
|
2874
2900
|
user_id: Optional[str] = None,
|
|
2875
2901
|
session_id: Optional[str] = None,
|
|
2876
2902
|
run_context: Optional[RunContext] = None,
|
|
2877
|
-
retries: Optional[int] = None,
|
|
2878
2903
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
2879
2904
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2880
2905
|
metadata: Optional[Dict[str, Any]] = None,
|
|
@@ -2893,7 +2918,6 @@ class Agent:
|
|
|
2893
2918
|
user_id: The user id to continue the run for.
|
|
2894
2919
|
session_id: The session id to continue the run for.
|
|
2895
2920
|
run_context: The run context to use for the run.
|
|
2896
|
-
retries: The number of retries to continue the run for.
|
|
2897
2921
|
knowledge_filters: The knowledge filters to use for the run.
|
|
2898
2922
|
dependencies: The dependencies to use for the run.
|
|
2899
2923
|
metadata: The metadata to use for the run.
|
|
@@ -2948,125 +2972,130 @@ class Agent:
|
|
|
2948
2972
|
dependencies=dependencies,
|
|
2949
2973
|
)
|
|
2950
2974
|
|
|
2951
|
-
# Resolve
|
|
2952
|
-
|
|
2953
|
-
self._resolve_run_dependencies(run_context=run_context)
|
|
2954
|
-
|
|
2955
|
-
# When filters are passed manually
|
|
2956
|
-
if self.knowledge_filters or run_context.knowledge_filters or knowledge_filters:
|
|
2957
|
-
run_context.knowledge_filters = self._get_effective_filters(knowledge_filters)
|
|
2958
|
-
|
|
2959
|
-
# Merge agent metadata with run metadata
|
|
2960
|
-
run_context.metadata = metadata
|
|
2961
|
-
if self.metadata is not None:
|
|
2962
|
-
if run_context.metadata is None:
|
|
2963
|
-
run_context.metadata = self.metadata
|
|
2964
|
-
else:
|
|
2965
|
-
merge_dictionaries(run_context.metadata, self.metadata)
|
|
2966
|
-
|
|
2967
|
-
# If no retries are set, use the agent's default retries
|
|
2968
|
-
retries = retries if retries is not None else self.retries
|
|
2969
|
-
|
|
2970
|
-
# Use stream override value when necessary
|
|
2971
|
-
if stream is None:
|
|
2972
|
-
stream = False if self.stream is None else self.stream
|
|
2973
|
-
|
|
2974
|
-
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
2975
|
-
if stream_intermediate_steps is not None:
|
|
2976
|
-
warnings.warn(
|
|
2977
|
-
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
|
|
2978
|
-
DeprecationWarning,
|
|
2979
|
-
stacklevel=2,
|
|
2980
|
-
)
|
|
2981
|
-
stream_events = stream_events or stream_intermediate_steps
|
|
2982
|
-
|
|
2983
|
-
# Can't stream events if streaming is disabled
|
|
2984
|
-
if stream is False:
|
|
2985
|
-
stream_events = False
|
|
2986
|
-
|
|
2987
|
-
if stream_events is None:
|
|
2988
|
-
stream_events = False if self.stream_events is None else self.stream_events
|
|
2989
|
-
|
|
2990
|
-
# Can't stream events if streaming is disabled
|
|
2991
|
-
if stream is False:
|
|
2992
|
-
stream_events = False
|
|
2993
|
-
|
|
2994
|
-
self.stream = self.stream or stream
|
|
2995
|
-
self.stream_events = self.stream_events or stream_events
|
|
2975
|
+
# Resolve retry parameters
|
|
2976
|
+
num_attempts = self.retries + 1
|
|
2996
2977
|
|
|
2997
|
-
|
|
2998
|
-
|
|
2999
|
-
|
|
3000
|
-
input = run_response.messages or []
|
|
3001
|
-
elif run_id is not None:
|
|
3002
|
-
# The run is continued from a run_id, one of requirements or updated_tool (deprecated) is required.
|
|
3003
|
-
if updated_tools is None and requirements is None:
|
|
3004
|
-
raise ValueError("To continue a run from a given run_id, the requirements parameter must be provided.")
|
|
2978
|
+
for attempt in range(num_attempts):
|
|
2979
|
+
if attempt > 0:
|
|
2980
|
+
log_debug(f"Retrying Agent continue_run {run_id}. Attempt {attempt + 1} of {num_attempts}...")
|
|
3005
2981
|
|
|
3006
|
-
|
|
3007
|
-
|
|
3008
|
-
|
|
3009
|
-
|
|
2982
|
+
try:
|
|
2983
|
+
# Resolve dependencies
|
|
2984
|
+
if run_context.dependencies is not None:
|
|
2985
|
+
self._resolve_run_dependencies(run_context=run_context)
|
|
2986
|
+
|
|
2987
|
+
# When filters are passed manually
|
|
2988
|
+
if self.knowledge_filters or run_context.knowledge_filters or knowledge_filters:
|
|
2989
|
+
run_context.knowledge_filters = self._get_effective_filters(knowledge_filters)
|
|
2990
|
+
|
|
2991
|
+
# Merge agent metadata with run metadata
|
|
2992
|
+
run_context.metadata = metadata
|
|
2993
|
+
if self.metadata is not None:
|
|
2994
|
+
if run_context.metadata is None:
|
|
2995
|
+
run_context.metadata = self.metadata
|
|
2996
|
+
else:
|
|
2997
|
+
merge_dictionaries(run_context.metadata, self.metadata)
|
|
2998
|
+
|
|
2999
|
+
# Use stream override value when necessary
|
|
3000
|
+
if stream is None:
|
|
3001
|
+
stream = False if self.stream is None else self.stream
|
|
3002
|
+
|
|
3003
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
3004
|
+
if stream_intermediate_steps is not None:
|
|
3005
|
+
warnings.warn(
|
|
3006
|
+
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
|
|
3007
|
+
DeprecationWarning,
|
|
3008
|
+
stacklevel=2,
|
|
3009
|
+
)
|
|
3010
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
3011
|
+
|
|
3012
|
+
# Can't stream events if streaming is disabled
|
|
3013
|
+
if stream is False:
|
|
3014
|
+
stream_events = False
|
|
3015
|
+
|
|
3016
|
+
if stream_events is None:
|
|
3017
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
3018
|
+
|
|
3019
|
+
# Can't stream events if streaming is disabled
|
|
3020
|
+
if stream is False:
|
|
3021
|
+
stream_events = False
|
|
3022
|
+
|
|
3023
|
+
self.stream = self.stream or stream
|
|
3024
|
+
self.stream_events = self.stream_events or stream_events
|
|
3025
|
+
|
|
3026
|
+
# Run can be continued from previous run response or from passed run_response context
|
|
3027
|
+
if run_response is not None:
|
|
3028
|
+
# The run is continued from a provided run_response. This contains the updated tools.
|
|
3029
|
+
input = run_response.messages or []
|
|
3030
|
+
elif run_id is not None:
|
|
3031
|
+
# The run is continued from a run_id, one of requirements or updated_tool (deprecated) is required.
|
|
3032
|
+
if updated_tools is None and requirements is None:
|
|
3033
|
+
raise ValueError(
|
|
3034
|
+
"To continue a run from a given run_id, the requirements parameter must be provided."
|
|
3035
|
+
)
|
|
3010
3036
|
|
|
3011
|
-
|
|
3037
|
+
runs = agent_session.runs
|
|
3038
|
+
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
|
|
3039
|
+
if run_response is None:
|
|
3040
|
+
raise RuntimeError(f"No runs found for run ID {run_id}")
|
|
3012
3041
|
|
|
3013
|
-
|
|
3014
|
-
if updated_tools is not None:
|
|
3015
|
-
warnings.warn(
|
|
3016
|
-
"The 'updated_tools' parameter is deprecated and will be removed in future versions. Use 'requirements' instead.",
|
|
3017
|
-
DeprecationWarning,
|
|
3018
|
-
stacklevel=2,
|
|
3019
|
-
)
|
|
3020
|
-
run_response.tools = updated_tools
|
|
3042
|
+
input = run_response.messages or []
|
|
3021
3043
|
|
|
3022
|
-
|
|
3023
|
-
|
|
3024
|
-
|
|
3025
|
-
|
|
3026
|
-
|
|
3027
|
-
|
|
3028
|
-
|
|
3044
|
+
# If we have updated_tools, set them in the run_response
|
|
3045
|
+
if updated_tools is not None:
|
|
3046
|
+
warnings.warn(
|
|
3047
|
+
"The 'updated_tools' parameter is deprecated and will be removed in future versions. Use 'requirements' instead.",
|
|
3048
|
+
DeprecationWarning,
|
|
3049
|
+
stacklevel=2,
|
|
3050
|
+
)
|
|
3051
|
+
run_response.tools = updated_tools
|
|
3052
|
+
|
|
3053
|
+
# If we have requirements, get the updated tools and set them in the run_response
|
|
3054
|
+
elif requirements is not None:
|
|
3055
|
+
run_response.requirements = requirements
|
|
3056
|
+
updated_tools = [req.tool_execution for req in requirements if req.tool_execution is not None]
|
|
3057
|
+
if updated_tools and run_response.tools:
|
|
3058
|
+
updated_tools_map = {tool.tool_call_id: tool for tool in updated_tools}
|
|
3059
|
+
run_response.tools = [
|
|
3060
|
+
updated_tools_map.get(tool.tool_call_id, tool) for tool in run_response.tools
|
|
3061
|
+
]
|
|
3062
|
+
else:
|
|
3063
|
+
run_response.tools = updated_tools
|
|
3029
3064
|
else:
|
|
3030
|
-
run_response
|
|
3031
|
-
else:
|
|
3032
|
-
raise ValueError("Either run_response or run_id must be provided.")
|
|
3065
|
+
raise ValueError("Either run_response or run_id must be provided.")
|
|
3033
3066
|
|
|
3034
|
-
|
|
3035
|
-
|
|
3036
|
-
|
|
3037
|
-
|
|
3067
|
+
# Prepare arguments for the model
|
|
3068
|
+
self._set_default_model()
|
|
3069
|
+
response_format = self._get_response_format(run_context=run_context)
|
|
3070
|
+
self.model = cast(Model, self.model)
|
|
3038
3071
|
|
|
3039
|
-
|
|
3040
|
-
|
|
3041
|
-
|
|
3042
|
-
|
|
3043
|
-
|
|
3044
|
-
|
|
3072
|
+
processed_tools = self.get_tools(
|
|
3073
|
+
run_response=run_response,
|
|
3074
|
+
run_context=run_context,
|
|
3075
|
+
session=agent_session,
|
|
3076
|
+
user_id=user_id,
|
|
3077
|
+
)
|
|
3045
3078
|
|
|
3046
|
-
|
|
3047
|
-
|
|
3048
|
-
|
|
3049
|
-
|
|
3050
|
-
|
|
3051
|
-
|
|
3052
|
-
|
|
3079
|
+
_tools = self._determine_tools_for_model(
|
|
3080
|
+
model=self.model,
|
|
3081
|
+
processed_tools=processed_tools,
|
|
3082
|
+
run_response=run_response,
|
|
3083
|
+
run_context=run_context,
|
|
3084
|
+
session=agent_session,
|
|
3085
|
+
)
|
|
3053
3086
|
|
|
3054
|
-
|
|
3055
|
-
num_attempts = retries + 1
|
|
3056
|
-
for attempt in range(num_attempts):
|
|
3057
|
-
run_response = cast(RunOutput, run_response)
|
|
3087
|
+
run_response = cast(RunOutput, run_response)
|
|
3058
3088
|
|
|
3059
|
-
|
|
3089
|
+
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
3060
3090
|
|
|
3061
|
-
|
|
3062
|
-
|
|
3063
|
-
|
|
3064
|
-
|
|
3091
|
+
# Prepare run messages
|
|
3092
|
+
run_messages = self._get_continue_run_messages(
|
|
3093
|
+
input=input,
|
|
3094
|
+
)
|
|
3065
3095
|
|
|
3066
|
-
|
|
3067
|
-
|
|
3096
|
+
# Reset the run state
|
|
3097
|
+
run_response.status = RunStatus.running
|
|
3068
3098
|
|
|
3069
|
-
try:
|
|
3070
3099
|
if stream:
|
|
3071
3100
|
response_iterator = self._continue_run_stream(
|
|
3072
3101
|
run_response=run_response,
|
|
@@ -3097,42 +3126,34 @@ class Agent:
|
|
|
3097
3126
|
**kwargs,
|
|
3098
3127
|
)
|
|
3099
3128
|
return response
|
|
3100
|
-
except
|
|
3101
|
-
|
|
3102
|
-
|
|
3103
|
-
|
|
3104
|
-
|
|
3105
|
-
|
|
3129
|
+
except KeyboardInterrupt:
|
|
3130
|
+
if stream:
|
|
3131
|
+
return generator_wrapper( # type: ignore
|
|
3132
|
+
create_run_cancelled_event(run_response, "Operation cancelled by user") # type: ignore
|
|
3133
|
+
)
|
|
3134
|
+
else:
|
|
3135
|
+
run_response.content = "Operation cancelled by user" # type: ignore
|
|
3136
|
+
run_response.status = RunStatus.cancelled # type: ignore
|
|
3137
|
+
return run_response # type: ignore
|
|
3138
|
+
except Exception as e:
|
|
3139
|
+
# Check if this is the last attempt
|
|
3140
|
+
if attempt < num_attempts - 1:
|
|
3141
|
+
# Calculate delay with exponential backoff if enabled
|
|
3106
3142
|
if self.exponential_backoff:
|
|
3107
|
-
delay = 2**attempt
|
|
3143
|
+
delay = self.delay_between_retries * (2**attempt)
|
|
3108
3144
|
else:
|
|
3109
3145
|
delay = self.delay_between_retries
|
|
3110
|
-
import time
|
|
3111
3146
|
|
|
3147
|
+
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
|
|
3112
3148
|
time.sleep(delay)
|
|
3113
|
-
|
|
3114
|
-
if stream:
|
|
3115
|
-
return generator_wrapper( # type: ignore
|
|
3116
|
-
create_run_cancelled_event(run_response, "Operation cancelled by user")
|
|
3117
|
-
)
|
|
3149
|
+
continue
|
|
3118
3150
|
else:
|
|
3119
|
-
|
|
3120
|
-
|
|
3121
|
-
|
|
3151
|
+
# Final attempt failed - re-raise the exception
|
|
3152
|
+
log_error(f"All {num_attempts} attempts failed. Final error: {str(e)}")
|
|
3153
|
+
raise
|
|
3122
3154
|
|
|
3123
3155
|
# If we get here, all retries failed
|
|
3124
|
-
|
|
3125
|
-
log_error(
|
|
3126
|
-
f"Failed after {num_attempts} attempts. Last error using {last_exception.model_name}({last_exception.model_id})"
|
|
3127
|
-
)
|
|
3128
|
-
|
|
3129
|
-
if stream:
|
|
3130
|
-
return generator_wrapper(create_run_error_event(run_response, error=str(last_exception))) # type: ignore
|
|
3131
|
-
raise last_exception
|
|
3132
|
-
else:
|
|
3133
|
-
if stream:
|
|
3134
|
-
return generator_wrapper(create_run_error_event(run_response, error=str(last_exception))) # type: ignore
|
|
3135
|
-
raise Exception(f"Failed after {num_attempts} attempts.")
|
|
3156
|
+
raise Exception(f"Failed after {num_attempts} attempts.")
|
|
3136
3157
|
|
|
3137
3158
|
def _continue_run(
|
|
3138
3159
|
self,
|
|
@@ -3217,7 +3238,7 @@ class Agent:
|
|
|
3217
3238
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3218
3239
|
|
|
3219
3240
|
# 7. Create session summary
|
|
3220
|
-
if self.session_summary_manager is not None:
|
|
3241
|
+
if self.session_summary_manager is not None and self.enable_session_summaries:
|
|
3221
3242
|
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
3222
3243
|
session.upsert_run(run=run_response)
|
|
3223
3244
|
|
|
@@ -3353,7 +3374,7 @@ class Agent:
|
|
|
3353
3374
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3354
3375
|
|
|
3355
3376
|
# 4. Create session summary
|
|
3356
|
-
if self.session_summary_manager is not None:
|
|
3377
|
+
if self.session_summary_manager is not None and self.enable_session_summaries:
|
|
3357
3378
|
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
3358
3379
|
session.upsert_run(run=run_response)
|
|
3359
3380
|
|
|
@@ -3448,7 +3469,6 @@ class Agent:
|
|
|
3448
3469
|
requirements: Optional[List[RunRequirement]] = None,
|
|
3449
3470
|
user_id: Optional[str] = None,
|
|
3450
3471
|
session_id: Optional[str] = None,
|
|
3451
|
-
retries: Optional[int] = None,
|
|
3452
3472
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
3453
3473
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
3454
3474
|
metadata: Optional[Dict[str, Any]] = None,
|
|
@@ -3469,7 +3489,6 @@ class Agent:
|
|
|
3469
3489
|
requirements: Optional[List[RunRequirement]] = None,
|
|
3470
3490
|
user_id: Optional[str] = None,
|
|
3471
3491
|
session_id: Optional[str] = None,
|
|
3472
|
-
retries: Optional[int] = None,
|
|
3473
3492
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
3474
3493
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
3475
3494
|
metadata: Optional[Dict[str, Any]] = None,
|
|
@@ -3490,7 +3509,6 @@ class Agent:
|
|
|
3490
3509
|
user_id: Optional[str] = None,
|
|
3491
3510
|
session_id: Optional[str] = None,
|
|
3492
3511
|
run_context: Optional[RunContext] = None,
|
|
3493
|
-
retries: Optional[int] = None,
|
|
3494
3512
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
3495
3513
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
3496
3514
|
metadata: Optional[Dict[str, Any]] = None,
|
|
@@ -3510,7 +3528,6 @@ class Agent:
|
|
|
3510
3528
|
user_id: The user id to continue the run for.
|
|
3511
3529
|
session_id: The session id to continue the run for.
|
|
3512
3530
|
run_context: The run context to use for the run.
|
|
3513
|
-
retries: The number of retries to continue the run for.
|
|
3514
3531
|
knowledge_filters: The knowledge filters to use for the run.
|
|
3515
3532
|
dependencies: The dependencies to use for continuing the run.
|
|
3516
3533
|
metadata: The metadata to use for continuing the run.
|
|
@@ -3548,9 +3565,6 @@ class Agent:
|
|
|
3548
3565
|
|
|
3549
3566
|
dependencies = dependencies if dependencies is not None else self.dependencies
|
|
3550
3567
|
|
|
3551
|
-
# If no retries are set, use the agent's default retries
|
|
3552
|
-
retries = retries if retries is not None else self.retries
|
|
3553
|
-
|
|
3554
3568
|
# Use stream override value when necessary
|
|
3555
3569
|
if stream is None:
|
|
3556
3570
|
stream = False if self.stream is None else self.stream
|
|
@@ -3590,6 +3604,9 @@ class Agent:
|
|
|
3590
3604
|
else:
|
|
3591
3605
|
merge_dictionaries(metadata, self.metadata)
|
|
3592
3606
|
|
|
3607
|
+
# Resolve retry parameters
|
|
3608
|
+
num_attempts = self.retries + 1
|
|
3609
|
+
|
|
3593
3610
|
# Prepare arguments for the model
|
|
3594
3611
|
response_format = self._get_response_format(run_context=run_context)
|
|
3595
3612
|
self.model = cast(Model, self.model)
|
|
@@ -3605,9 +3622,10 @@ class Agent:
|
|
|
3605
3622
|
metadata=metadata,
|
|
3606
3623
|
)
|
|
3607
3624
|
|
|
3608
|
-
last_exception = None
|
|
3609
|
-
num_attempts = retries + 1
|
|
3610
3625
|
for attempt in range(num_attempts):
|
|
3626
|
+
if attempt > 0:
|
|
3627
|
+
log_debug(f"Retrying Agent acontinue_run {run_id}. Attempt {attempt + 1} of {num_attempts}...")
|
|
3628
|
+
|
|
3611
3629
|
try:
|
|
3612
3630
|
if stream:
|
|
3613
3631
|
return self._acontinue_run_stream(
|
|
@@ -3639,19 +3657,6 @@ class Agent:
|
|
|
3639
3657
|
background_tasks=background_tasks,
|
|
3640
3658
|
**kwargs,
|
|
3641
3659
|
)
|
|
3642
|
-
except ModelProviderError as e:
|
|
3643
|
-
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
|
|
3644
|
-
if isinstance(e, StopAgentRun):
|
|
3645
|
-
raise e
|
|
3646
|
-
last_exception = e
|
|
3647
|
-
if attempt < num_attempts - 1: # Don't sleep on the last attempt
|
|
3648
|
-
if self.exponential_backoff:
|
|
3649
|
-
delay = 2**attempt * self.delay_between_retries
|
|
3650
|
-
else:
|
|
3651
|
-
delay = self.delay_between_retries
|
|
3652
|
-
import time
|
|
3653
|
-
|
|
3654
|
-
time.sleep(delay)
|
|
3655
3660
|
except KeyboardInterrupt:
|
|
3656
3661
|
run_response = cast(RunOutput, run_response)
|
|
3657
3662
|
if stream:
|
|
@@ -3662,19 +3667,25 @@ class Agent:
|
|
|
3662
3667
|
run_response.content = "Operation cancelled by user"
|
|
3663
3668
|
run_response.status = RunStatus.cancelled
|
|
3664
3669
|
return run_response
|
|
3670
|
+
except Exception as e:
|
|
3671
|
+
# Check if this is the last attempt
|
|
3672
|
+
if attempt < num_attempts - 1:
|
|
3673
|
+
# Calculate delay with exponential backoff if enabled
|
|
3674
|
+
if self.exponential_backoff:
|
|
3675
|
+
delay = self.delay_between_retries * (2**attempt)
|
|
3676
|
+
else:
|
|
3677
|
+
delay = self.delay_between_retries
|
|
3678
|
+
|
|
3679
|
+
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
|
|
3680
|
+
time.sleep(delay)
|
|
3681
|
+
continue
|
|
3682
|
+
else:
|
|
3683
|
+
# Final attempt failed - re-raise the exception
|
|
3684
|
+
log_error(f"All {num_attempts} attempts failed. Final error: {str(e)}")
|
|
3685
|
+
raise
|
|
3665
3686
|
|
|
3666
3687
|
# If we get here, all retries failed
|
|
3667
|
-
|
|
3668
|
-
log_error(
|
|
3669
|
-
f"Failed after {num_attempts} attempts. Last error using {last_exception.model_name}({last_exception.model_id})"
|
|
3670
|
-
)
|
|
3671
|
-
if stream:
|
|
3672
|
-
return async_generator_wrapper(create_run_error_event(run_response, error=str(last_exception))) # type: ignore
|
|
3673
|
-
raise last_exception
|
|
3674
|
-
else:
|
|
3675
|
-
if stream:
|
|
3676
|
-
return async_generator_wrapper(create_run_error_event(run_response, error=str(last_exception))) # type: ignore
|
|
3677
|
-
raise Exception(f"Failed after {num_attempts} attempts.")
|
|
3688
|
+
raise Exception(f"Failed after {num_attempts} attempts.")
|
|
3678
3689
|
|
|
3679
3690
|
async def _acontinue_run(
|
|
3680
3691
|
self,
|
|
@@ -3853,7 +3864,7 @@ class Agent:
|
|
|
3853
3864
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3854
3865
|
|
|
3855
3866
|
# 13. Create session summary
|
|
3856
|
-
if self.session_summary_manager is not None:
|
|
3867
|
+
if self.session_summary_manager is not None and self.enable_session_summaries:
|
|
3857
3868
|
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
3858
3869
|
agent_session.upsert_run(run=run_response)
|
|
3859
3870
|
|
|
@@ -4126,7 +4137,7 @@ class Agent:
|
|
|
4126
4137
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
4127
4138
|
|
|
4128
4139
|
# 9. Create session summary
|
|
4129
|
-
if self.session_summary_manager is not None:
|
|
4140
|
+
if self.session_summary_manager is not None and self.enable_session_summaries:
|
|
4130
4141
|
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
4131
4142
|
agent_session.upsert_run(run=run_response)
|
|
4132
4143
|
|
|
@@ -5667,7 +5678,12 @@ class Agent:
|
|
|
5667
5678
|
user_message_str = (
|
|
5668
5679
|
run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
|
|
5669
5680
|
)
|
|
5670
|
-
if
|
|
5681
|
+
if (
|
|
5682
|
+
user_message_str is not None
|
|
5683
|
+
and user_message_str.strip() != ""
|
|
5684
|
+
and self.memory_manager is not None
|
|
5685
|
+
and self.enable_user_memories
|
|
5686
|
+
):
|
|
5671
5687
|
log_debug("Managing user memories")
|
|
5672
5688
|
self.memory_manager.create_user_memories( # type: ignore
|
|
5673
5689
|
message=user_message_str,
|
|
@@ -5695,7 +5711,7 @@ class Agent:
|
|
|
5695
5711
|
for msg in parsed_messages
|
|
5696
5712
|
if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
|
|
5697
5713
|
]
|
|
5698
|
-
if len(non_empty_messages) > 0 and self.memory_manager is not None:
|
|
5714
|
+
if len(non_empty_messages) > 0 and self.memory_manager is not None and self.enable_user_memories:
|
|
5699
5715
|
self.memory_manager.create_user_memories(messages=non_empty_messages, user_id=user_id, agent_id=self.id) # type: ignore
|
|
5700
5716
|
else:
|
|
5701
5717
|
log_warning("Unable to add messages to memory")
|
|
@@ -5708,7 +5724,12 @@ class Agent:
|
|
|
5708
5724
|
user_message_str = (
|
|
5709
5725
|
run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
|
|
5710
5726
|
)
|
|
5711
|
-
if
|
|
5727
|
+
if (
|
|
5728
|
+
user_message_str is not None
|
|
5729
|
+
and user_message_str.strip() != ""
|
|
5730
|
+
and self.memory_manager is not None
|
|
5731
|
+
and self.enable_user_memories
|
|
5732
|
+
):
|
|
5712
5733
|
log_debug("Managing user memories")
|
|
5713
5734
|
await self.memory_manager.acreate_user_memories( # type: ignore
|
|
5714
5735
|
message=user_message_str,
|
|
@@ -5736,7 +5757,7 @@ class Agent:
|
|
|
5736
5757
|
for msg in parsed_messages
|
|
5737
5758
|
if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
|
|
5738
5759
|
]
|
|
5739
|
-
if len(non_empty_messages) > 0 and self.memory_manager is not None:
|
|
5760
|
+
if len(non_empty_messages) > 0 and self.memory_manager is not None and self.enable_user_memories:
|
|
5740
5761
|
await self.memory_manager.acreate_user_memories( # type: ignore
|
|
5741
5762
|
messages=non_empty_messages, user_id=user_id, agent_id=self.id
|
|
5742
5763
|
)
|
|
@@ -6349,6 +6370,20 @@ class Agent:
|
|
|
6349
6370
|
metadata=self.metadata,
|
|
6350
6371
|
created_at=int(time()),
|
|
6351
6372
|
)
|
|
6373
|
+
if self.introduction is not None:
|
|
6374
|
+
agent_session.upsert_run(
|
|
6375
|
+
RunOutput(
|
|
6376
|
+
run_id=str(uuid4()),
|
|
6377
|
+
session_id=session_id,
|
|
6378
|
+
agent_id=self.id,
|
|
6379
|
+
agent_name=self.name,
|
|
6380
|
+
user_id=user_id,
|
|
6381
|
+
content=self.introduction,
|
|
6382
|
+
messages=[
|
|
6383
|
+
Message(role=self.model.assistant_message_role, content=self.introduction) # type: ignore
|
|
6384
|
+
],
|
|
6385
|
+
)
|
|
6386
|
+
)
|
|
6352
6387
|
|
|
6353
6388
|
if self.cache_session:
|
|
6354
6389
|
self._cached_session = agent_session
|
|
@@ -6392,6 +6427,20 @@ class Agent:
|
|
|
6392
6427
|
metadata=self.metadata,
|
|
6393
6428
|
created_at=int(time()),
|
|
6394
6429
|
)
|
|
6430
|
+
if self.introduction is not None:
|
|
6431
|
+
agent_session.upsert_run(
|
|
6432
|
+
RunOutput(
|
|
6433
|
+
run_id=str(uuid4()),
|
|
6434
|
+
session_id=session_id,
|
|
6435
|
+
agent_id=self.id,
|
|
6436
|
+
agent_name=self.name,
|
|
6437
|
+
user_id=user_id,
|
|
6438
|
+
content=self.introduction,
|
|
6439
|
+
messages=[
|
|
6440
|
+
Message(role=self.model.assistant_message_role, content=self.introduction) # type: ignore
|
|
6441
|
+
],
|
|
6442
|
+
)
|
|
6443
|
+
)
|
|
6395
6444
|
|
|
6396
6445
|
if self.cache_session:
|
|
6397
6446
|
self._cached_session = agent_session
|
|
@@ -7068,12 +7117,13 @@ class Agent:
|
|
|
7068
7117
|
Optional[List[UserMemory]]: The user memories.
|
|
7069
7118
|
"""
|
|
7070
7119
|
if self.memory_manager is None:
|
|
7071
|
-
|
|
7120
|
+
self._set_memory_manager()
|
|
7121
|
+
|
|
7072
7122
|
user_id = user_id if user_id is not None else self.user_id
|
|
7073
7123
|
if user_id is None:
|
|
7074
7124
|
user_id = "default"
|
|
7075
7125
|
|
|
7076
|
-
return self.memory_manager.get_user_memories(user_id=user_id)
|
|
7126
|
+
return self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
|
|
7077
7127
|
|
|
7078
7128
|
async def aget_user_memories(self, user_id: Optional[str] = None) -> Optional[List[UserMemory]]:
|
|
7079
7129
|
"""Get the user memories for the given user ID.
|
|
@@ -7084,12 +7134,13 @@ class Agent:
|
|
|
7084
7134
|
Optional[List[UserMemory]]: The user memories.
|
|
7085
7135
|
"""
|
|
7086
7136
|
if self.memory_manager is None:
|
|
7087
|
-
|
|
7137
|
+
self._set_memory_manager()
|
|
7138
|
+
|
|
7088
7139
|
user_id = user_id if user_id is not None else self.user_id
|
|
7089
7140
|
if user_id is None:
|
|
7090
7141
|
user_id = "default"
|
|
7091
7142
|
|
|
7092
|
-
return await self.memory_manager.aget_user_memories(user_id=user_id)
|
|
7143
|
+
return await self.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
|
|
7093
7144
|
|
|
7094
7145
|
def get_culture_knowledge(self) -> Optional[List[CulturalKnowledge]]:
|
|
7095
7146
|
"""Get the cultural knowledge the agent has access to
|
|
@@ -10248,9 +10299,8 @@ class Agent:
|
|
|
10248
10299
|
"""
|
|
10249
10300
|
retrieval_timer = Timer()
|
|
10250
10301
|
retrieval_timer.start()
|
|
10251
|
-
dependencies = run_context.dependencies if run_context else None
|
|
10252
10302
|
docs_from_knowledge = await self.aget_relevant_docs_from_knowledge(
|
|
10253
|
-
query=query, filters=knowledge_filters,
|
|
10303
|
+
query=query, filters=knowledge_filters, run_context=run_context
|
|
10254
10304
|
)
|
|
10255
10305
|
if docs_from_knowledge is not None:
|
|
10256
10306
|
references = MessageReferences(
|
|
@@ -10547,6 +10597,7 @@ class Agent:
|
|
|
10547
10597
|
session_id: Optional[str] = None,
|
|
10548
10598
|
session_state: Optional[Dict[str, Any]] = None,
|
|
10549
10599
|
user_id: Optional[str] = None,
|
|
10600
|
+
run_id: Optional[str] = None,
|
|
10550
10601
|
audio: Optional[Sequence[Audio]] = None,
|
|
10551
10602
|
images: Optional[Sequence[Image]] = None,
|
|
10552
10603
|
videos: Optional[Sequence[Video]] = None,
|
|
@@ -10596,6 +10647,7 @@ class Agent:
|
|
|
10596
10647
|
session_id=session_id,
|
|
10597
10648
|
session_state=session_state,
|
|
10598
10649
|
user_id=user_id,
|
|
10650
|
+
run_id=run_id,
|
|
10599
10651
|
audio=audio,
|
|
10600
10652
|
images=images,
|
|
10601
10653
|
videos=videos,
|
|
@@ -10624,6 +10676,7 @@ class Agent:
|
|
|
10624
10676
|
session_id=session_id,
|
|
10625
10677
|
session_state=session_state,
|
|
10626
10678
|
user_id=user_id,
|
|
10679
|
+
run_id=run_id,
|
|
10627
10680
|
audio=audio,
|
|
10628
10681
|
images=images,
|
|
10629
10682
|
videos=videos,
|
|
@@ -10651,6 +10704,7 @@ class Agent:
|
|
|
10651
10704
|
session_id: Optional[str] = None,
|
|
10652
10705
|
session_state: Optional[Dict[str, Any]] = None,
|
|
10653
10706
|
user_id: Optional[str] = None,
|
|
10707
|
+
run_id: Optional[str] = None,
|
|
10654
10708
|
audio: Optional[Sequence[Audio]] = None,
|
|
10655
10709
|
images: Optional[Sequence[Image]] = None,
|
|
10656
10710
|
videos: Optional[Sequence[Video]] = None,
|
|
@@ -10694,6 +10748,7 @@ class Agent:
|
|
|
10694
10748
|
session_id=session_id,
|
|
10695
10749
|
session_state=session_state,
|
|
10696
10750
|
user_id=user_id,
|
|
10751
|
+
run_id=run_id,
|
|
10697
10752
|
audio=audio,
|
|
10698
10753
|
images=images,
|
|
10699
10754
|
videos=videos,
|
|
@@ -10721,6 +10776,7 @@ class Agent:
|
|
|
10721
10776
|
session_id=session_id,
|
|
10722
10777
|
session_state=session_state,
|
|
10723
10778
|
user_id=user_id,
|
|
10779
|
+
run_id=run_id,
|
|
10724
10780
|
audio=audio,
|
|
10725
10781
|
images=images,
|
|
10726
10782
|
videos=videos,
|