aip-agents-binary 0.5.25__py3-none-macosx_13_0_arm64.whl → 0.6.8__py3-none-macosx_13_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/agent/__init__.py +44 -4
- aip_agents/agent/base_langgraph_agent.py +163 -74
- aip_agents/agent/base_langgraph_agent.pyi +3 -2
- aip_agents/agent/langgraph_memory_enhancer_agent.py +368 -34
- aip_agents/agent/langgraph_memory_enhancer_agent.pyi +3 -2
- aip_agents/agent/langgraph_react_agent.py +329 -22
- aip_agents/agent/langgraph_react_agent.pyi +41 -2
- aip_agents/examples/hello_world_ptc.py +49 -0
- aip_agents/examples/hello_world_ptc.pyi +5 -0
- aip_agents/examples/hello_world_ptc_custom_tools.py +83 -0
- aip_agents/examples/hello_world_ptc_custom_tools.pyi +7 -0
- aip_agents/examples/hello_world_tool_output_client.py +9 -0
- aip_agents/examples/tools/multiply_tool.py +43 -0
- aip_agents/examples/tools/multiply_tool.pyi +18 -0
- aip_agents/guardrails/engines/base.py +6 -6
- aip_agents/mcp/client/__init__.py +38 -2
- aip_agents/mcp/client/connection_manager.py +36 -1
- aip_agents/mcp/client/connection_manager.pyi +3 -0
- aip_agents/mcp/client/persistent_session.py +318 -68
- aip_agents/mcp/client/persistent_session.pyi +9 -0
- aip_agents/mcp/client/transports.py +37 -2
- aip_agents/mcp/client/transports.pyi +9 -0
- aip_agents/memory/adapters/base_adapter.py +98 -0
- aip_agents/memory/adapters/base_adapter.pyi +25 -0
- aip_agents/ptc/__init__.py +87 -0
- aip_agents/ptc/__init__.pyi +14 -0
- aip_agents/ptc/custom_tools.py +473 -0
- aip_agents/ptc/custom_tools.pyi +184 -0
- aip_agents/ptc/custom_tools_payload.py +400 -0
- aip_agents/ptc/custom_tools_payload.pyi +31 -0
- aip_agents/ptc/custom_tools_templates/__init__.py +1 -0
- aip_agents/ptc/custom_tools_templates/__init__.pyi +0 -0
- aip_agents/ptc/custom_tools_templates/custom_build_function.py.template +23 -0
- aip_agents/ptc/custom_tools_templates/custom_init.py.template +15 -0
- aip_agents/ptc/custom_tools_templates/custom_invoke.py.template +60 -0
- aip_agents/ptc/custom_tools_templates/custom_registry.py.template +87 -0
- aip_agents/ptc/custom_tools_templates/custom_sources_init.py.template +7 -0
- aip_agents/ptc/custom_tools_templates/custom_wrapper.py.template +19 -0
- aip_agents/ptc/doc_gen.py +122 -0
- aip_agents/ptc/doc_gen.pyi +40 -0
- aip_agents/ptc/exceptions.py +57 -0
- aip_agents/ptc/exceptions.pyi +37 -0
- aip_agents/ptc/executor.py +261 -0
- aip_agents/ptc/executor.pyi +99 -0
- aip_agents/ptc/mcp/__init__.py +45 -0
- aip_agents/ptc/mcp/__init__.pyi +7 -0
- aip_agents/ptc/mcp/sandbox_bridge.py +668 -0
- aip_agents/ptc/mcp/sandbox_bridge.pyi +47 -0
- aip_agents/ptc/mcp/templates/__init__.py +1 -0
- aip_agents/ptc/mcp/templates/__init__.pyi +0 -0
- aip_agents/ptc/mcp/templates/mcp_client.py.template +239 -0
- aip_agents/ptc/naming.py +196 -0
- aip_agents/ptc/naming.pyi +85 -0
- aip_agents/ptc/payload.py +26 -0
- aip_agents/ptc/payload.pyi +15 -0
- aip_agents/ptc/prompt_builder.py +673 -0
- aip_agents/ptc/prompt_builder.pyi +59 -0
- aip_agents/ptc/ptc_helper.py +16 -0
- aip_agents/ptc/ptc_helper.pyi +1 -0
- aip_agents/ptc/sandbox_bridge.py +256 -0
- aip_agents/ptc/sandbox_bridge.pyi +38 -0
- aip_agents/ptc/template_utils.py +33 -0
- aip_agents/ptc/template_utils.pyi +13 -0
- aip_agents/ptc/templates/__init__.py +1 -0
- aip_agents/ptc/templates/__init__.pyi +0 -0
- aip_agents/ptc/templates/ptc_helper.py.template +134 -0
- aip_agents/ptc/tool_def_helpers.py +101 -0
- aip_agents/ptc/tool_def_helpers.pyi +38 -0
- aip_agents/ptc/tool_enrichment.py +163 -0
- aip_agents/ptc/tool_enrichment.pyi +60 -0
- aip_agents/sandbox/__init__.py +43 -0
- aip_agents/sandbox/__init__.pyi +5 -0
- aip_agents/sandbox/defaults.py +205 -0
- aip_agents/sandbox/defaults.pyi +30 -0
- aip_agents/sandbox/e2b_runtime.py +295 -0
- aip_agents/sandbox/e2b_runtime.pyi +57 -0
- aip_agents/sandbox/template_builder.py +131 -0
- aip_agents/sandbox/template_builder.pyi +36 -0
- aip_agents/sandbox/types.py +24 -0
- aip_agents/sandbox/types.pyi +14 -0
- aip_agents/sandbox/validation.py +50 -0
- aip_agents/sandbox/validation.pyi +20 -0
- aip_agents/sentry/sentry.py +29 -8
- aip_agents/sentry/sentry.pyi +3 -2
- aip_agents/tools/__init__.py +13 -2
- aip_agents/tools/__init__.pyi +3 -1
- aip_agents/tools/browser_use/browser_use_tool.py +8 -0
- aip_agents/tools/browser_use/streaming.py +2 -0
- aip_agents/tools/date_range_tool.py +554 -0
- aip_agents/tools/date_range_tool.pyi +21 -0
- aip_agents/tools/execute_ptc_code.py +357 -0
- aip_agents/tools/execute_ptc_code.pyi +90 -0
- aip_agents/tools/memory_search/__init__.py +8 -1
- aip_agents/tools/memory_search/__init__.pyi +3 -3
- aip_agents/tools/memory_search/mem0.py +114 -1
- aip_agents/tools/memory_search/mem0.pyi +11 -1
- aip_agents/tools/memory_search/schema.py +33 -0
- aip_agents/tools/memory_search/schema.pyi +10 -0
- aip_agents/tools/memory_search_tool.py +8 -0
- aip_agents/tools/memory_search_tool.pyi +2 -2
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +26 -1
- aip_agents/utils/langgraph/tool_output_management.py +80 -0
- aip_agents/utils/langgraph/tool_output_management.pyi +37 -0
- {aip_agents_binary-0.5.25.dist-info → aip_agents_binary-0.6.8.dist-info}/METADATA +9 -19
- {aip_agents_binary-0.5.25.dist-info → aip_agents_binary-0.6.8.dist-info}/RECORD +107 -41
- {aip_agents_binary-0.5.25.dist-info → aip_agents_binary-0.6.8.dist-info}/WHEEL +1 -1
- aip_agents/examples/demo_memory_recall.py +0 -401
- aip_agents/examples/demo_memory_recall.pyi +0 -58
- {aip_agents_binary-0.5.25.dist-info → aip_agents_binary-0.6.8.dist-info}/top_level.txt +0 -0
aip_agents/agent/__init__.py
CHANGED
|
@@ -6,13 +6,25 @@ Author:
|
|
|
6
6
|
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from typing import TYPE_CHECKING, Any
|
|
12
|
+
|
|
9
13
|
from aip_agents.agent.base_agent import BaseAgent
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from aip_agents.agent.google_adk_agent import GoogleADKAgent
|
|
17
|
+
from aip_agents.agent.langflow_agent import LangflowAgent
|
|
10
18
|
from aip_agents.agent.base_langgraph_agent import BaseLangGraphAgent
|
|
11
|
-
from aip_agents.agent.google_adk_agent import GoogleADKAgent
|
|
12
19
|
from aip_agents.agent.interface import AgentInterface
|
|
13
|
-
from aip_agents.agent.
|
|
14
|
-
|
|
15
|
-
|
|
20
|
+
from aip_agents.agent.langgraph_memory_enhancer_agent import (
|
|
21
|
+
LangGraphMemoryEnhancerAgent,
|
|
22
|
+
)
|
|
23
|
+
from aip_agents.agent.langgraph_react_agent import (
|
|
24
|
+
LangChainAgent,
|
|
25
|
+
LangGraphAgent,
|
|
26
|
+
LangGraphReactAgent,
|
|
27
|
+
)
|
|
16
28
|
|
|
17
29
|
__all__ = [
|
|
18
30
|
"AgentInterface",
|
|
@@ -25,3 +37,31 @@ __all__ = [
|
|
|
25
37
|
"LangflowAgent",
|
|
26
38
|
"LangGraphMemoryEnhancerAgent",
|
|
27
39
|
]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def __getattr__(name: str) -> Any:
|
|
43
|
+
"""Lazy import of heavy agent implementations.
|
|
44
|
+
|
|
45
|
+
This avoids importing heavy dependencies (Google ADK, etc.)
|
|
46
|
+
when they are not needed.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
name: Attribute name to import.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
The requested class.
|
|
53
|
+
|
|
54
|
+
Raises:
|
|
55
|
+
AttributeError: If attribute is not found.
|
|
56
|
+
"""
|
|
57
|
+
if name == "GoogleADKAgent":
|
|
58
|
+
from aip_agents.agent.google_adk_agent import (
|
|
59
|
+
GoogleADKAgent as _GoogleADKAgent,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return _GoogleADKAgent
|
|
63
|
+
elif name == "LangflowAgent":
|
|
64
|
+
from aip_agents.agent.langflow_agent import LangflowAgent as _LangflowAgent
|
|
65
|
+
|
|
66
|
+
return _LangflowAgent
|
|
67
|
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
|
@@ -21,13 +21,13 @@ from concurrent.futures import Future
|
|
|
21
21
|
from contextlib import suppress
|
|
22
22
|
from contextvars import ContextVar
|
|
23
23
|
from dataclasses import dataclass
|
|
24
|
-
from typing import Annotated, Any
|
|
24
|
+
from typing import Annotated, Any, cast
|
|
25
25
|
|
|
26
26
|
from a2a.types import AgentCard
|
|
27
27
|
from aiostream import stream as astream
|
|
28
|
-
from gllm_core.event import EventEmitter
|
|
29
|
-
from gllm_core.event.handler import StreamEventHandler
|
|
30
|
-
from gllm_core.schema import Chunk
|
|
28
|
+
from gllm_core.event import EventEmitter # type: ignore[import-untyped]
|
|
29
|
+
from gllm_core.event.handler import StreamEventHandler # type: ignore[import-untyped]
|
|
30
|
+
from gllm_core.schema import Chunk # type: ignore[import-untyped]
|
|
31
31
|
from langchain_core.messages import AIMessage, BaseMessage, ToolMessage
|
|
32
32
|
from langchain_core.tools import BaseTool
|
|
33
33
|
from langgraph.graph import StateGraph
|
|
@@ -197,6 +197,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
197
197
|
self.enable_a2a_token_streaming = enable_a2a_token_streaming
|
|
198
198
|
self.event_emitter = event_emitter
|
|
199
199
|
self.checkpointer = checkpointer
|
|
200
|
+
self.tool_output_manager = None
|
|
200
201
|
|
|
201
202
|
self._mem0_client: Any | None = None
|
|
202
203
|
self.memory: BaseMemory | None = None
|
|
@@ -384,10 +385,13 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
384
385
|
try:
|
|
385
386
|
user_id = override_user_id or self.memory_agent_id
|
|
386
387
|
if hasattr(self.memory, MemoryMethod.SEARCH):
|
|
387
|
-
results = self.memory.search(
|
|
388
|
+
results = self.memory.search( # type: ignore[attr-defined]
|
|
389
|
+
query=query,
|
|
390
|
+
user_id=user_id,
|
|
391
|
+
limit=self.memory_retrieval_limit,
|
|
392
|
+
)
|
|
388
393
|
return results if isinstance(results, list) else []
|
|
389
|
-
|
|
390
|
-
return []
|
|
394
|
+
return []
|
|
391
395
|
except Exception as e: # noqa: BLE001
|
|
392
396
|
logger.debug(f"Memory: search ignored error: {e}")
|
|
393
397
|
return []
|
|
@@ -415,7 +419,11 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
415
419
|
future = save_async(user_text=str(user_text), ai_text=str(ai_text), user_id=user_id)
|
|
416
420
|
self._watch_memory_future(future, user_id)
|
|
417
421
|
elif hasattr(self.memory, MemoryMethod.SAVE_INTERACTION):
|
|
418
|
-
self.memory.save_interaction(
|
|
422
|
+
self.memory.save_interaction( # type: ignore[attr-defined]
|
|
423
|
+
user_text=str(user_text),
|
|
424
|
+
ai_text=str(ai_text),
|
|
425
|
+
user_id=user_id,
|
|
426
|
+
)
|
|
419
427
|
else:
|
|
420
428
|
logger.warning(
|
|
421
429
|
"Memory: save_interaction method NOT available on memory adapter "
|
|
@@ -447,6 +455,14 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
447
455
|
|
|
448
456
|
future.add_done_callback(_log_completion)
|
|
449
457
|
|
|
458
|
+
def _should_save_interaction(self, final_state: dict[str, Any] | None) -> bool:
|
|
459
|
+
"""Return True when interaction should be saved to memory.
|
|
460
|
+
|
|
461
|
+
Subclasses can override this to skip persistence for specific response types.
|
|
462
|
+
"""
|
|
463
|
+
del final_state
|
|
464
|
+
return True
|
|
465
|
+
|
|
450
466
|
def _resolve_and_validate_tools(self) -> list[BaseTool]:
|
|
451
467
|
"""Resolve and validate regular tools for LangGraph usage.
|
|
452
468
|
|
|
@@ -560,7 +576,11 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
560
576
|
return
|
|
561
577
|
|
|
562
578
|
try:
|
|
563
|
-
tool
|
|
579
|
+
set_tool_config = getattr(tool, "set_tool_config", None)
|
|
580
|
+
if callable(set_tool_config):
|
|
581
|
+
set_tool_config(tool_config_data)
|
|
582
|
+
else:
|
|
583
|
+
raise AttributeError("set_tool_config not available")
|
|
564
584
|
logger.info(f"Agent '{self.name}': Configured tool '{tool.name}' with agent defaults: {tool_config_data}")
|
|
565
585
|
except Exception as e:
|
|
566
586
|
logger.warning(f"Agent '{self.name}': Failed to configure tool '{tool.name}': {e}")
|
|
@@ -598,7 +618,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
598
618
|
self._sanitize_tool_names()
|
|
599
619
|
try:
|
|
600
620
|
if self.state_schema:
|
|
601
|
-
graph_builder = StateGraph(self.state_schema)
|
|
621
|
+
graph_builder: StateGraph = StateGraph(self.state_schema)
|
|
602
622
|
else:
|
|
603
623
|
|
|
604
624
|
class DefaultAgentState(TypedDict):
|
|
@@ -715,7 +735,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
715
735
|
return None
|
|
716
736
|
last_item = list_state[-1]
|
|
717
737
|
if isinstance(last_item, AIMessage) and getattr(last_item, "content", None) is not None:
|
|
718
|
-
output_content = last_item.content
|
|
738
|
+
output_content = self._normalize_event_content(last_item.content)
|
|
719
739
|
elif isinstance(last_item, str):
|
|
720
740
|
output_content = last_item
|
|
721
741
|
return output_content
|
|
@@ -886,7 +906,12 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
886
906
|
formatted_output = self._format_graph_output(final_state_result)
|
|
887
907
|
|
|
888
908
|
try:
|
|
889
|
-
self.
|
|
909
|
+
if self._should_save_interaction(final_state_result):
|
|
910
|
+
self._memory_save_interaction(
|
|
911
|
+
user_text=query,
|
|
912
|
+
ai_text=formatted_output,
|
|
913
|
+
memory_user_id=memory_user_id,
|
|
914
|
+
)
|
|
890
915
|
except Exception:
|
|
891
916
|
pass
|
|
892
917
|
|
|
@@ -995,7 +1020,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
995
1020
|
key = self.thread_id_key or "thread_id"
|
|
996
1021
|
return configurable.get(key)
|
|
997
1022
|
|
|
998
|
-
def _process_langgraph_event(self, event:
|
|
1023
|
+
def _process_langgraph_event(self, event: Any) -> str | dict[str, Any] | A2AEvent | None:
|
|
999
1024
|
"""Process a single LangGraph streaming event.
|
|
1000
1025
|
|
|
1001
1026
|
Args:
|
|
@@ -1045,6 +1070,36 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1045
1070
|
|
|
1046
1071
|
return True
|
|
1047
1072
|
|
|
1073
|
+
@staticmethod
|
|
1074
|
+
def _normalize_usage_metadata(usage: Any | None) -> dict[str, Any] | None:
|
|
1075
|
+
"""Normalize usage metadata to a dictionary when possible.
|
|
1076
|
+
|
|
1077
|
+
Args:
|
|
1078
|
+
usage: Usage metadata from LangChain messages.
|
|
1079
|
+
|
|
1080
|
+
Returns:
|
|
1081
|
+
A dictionary copy when usage is available, otherwise None.
|
|
1082
|
+
"""
|
|
1083
|
+
if usage is None:
|
|
1084
|
+
return None
|
|
1085
|
+
if isinstance(usage, dict):
|
|
1086
|
+
return dict(usage)
|
|
1087
|
+
return cast(dict[str, Any], usage)
|
|
1088
|
+
|
|
1089
|
+
@staticmethod
|
|
1090
|
+
def _normalize_event_content(content: Any) -> str:
|
|
1091
|
+
"""Normalize event content to a string payload.
|
|
1092
|
+
|
|
1093
|
+
Args:
|
|
1094
|
+
content: Raw content payload from LangChain/LangGraph.
|
|
1095
|
+
|
|
1096
|
+
Returns:
|
|
1097
|
+
String representation suitable for A2A events.
|
|
1098
|
+
"""
|
|
1099
|
+
if isinstance(content, str):
|
|
1100
|
+
return content
|
|
1101
|
+
return json.dumps(content)
|
|
1102
|
+
|
|
1048
1103
|
async def _stream_with_langgraph(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
|
|
1049
1104
|
"""Handle streaming for LangChain models using LangGraph's native streaming.
|
|
1050
1105
|
|
|
@@ -1135,9 +1190,13 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1135
1190
|
logger.info(f"Agent '{self.name}': Initializing MCP tools with persistent sessions.")
|
|
1136
1191
|
|
|
1137
1192
|
# Add timeout for initialization to prevent hanging
|
|
1138
|
-
|
|
1193
|
+
mcp_client = self.mcp_client
|
|
1194
|
+
if mcp_client is None:
|
|
1195
|
+
return
|
|
1196
|
+
|
|
1197
|
+
await asyncio.wait_for(mcp_client.initialize(), timeout=30.0)
|
|
1139
1198
|
|
|
1140
|
-
mcp_tools = await
|
|
1199
|
+
mcp_tools = await mcp_client.get_tools()
|
|
1141
1200
|
|
|
1142
1201
|
if not mcp_tools:
|
|
1143
1202
|
logger.warning(f"Agent '{self.name}': No MCP tools retrieved from configured servers.")
|
|
@@ -1169,7 +1228,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1169
1228
|
logger.warning(f"Agent '{self.name}': Error during MCP client cleanup: {e}")
|
|
1170
1229
|
# Don't re-raise - cleanup should be best-effort
|
|
1171
1230
|
|
|
1172
|
-
async def arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[
|
|
1231
|
+
async def arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[A2AEvent, None]:
|
|
1173
1232
|
"""Asynchronously streams the agent's response in A2A format.
|
|
1174
1233
|
|
|
1175
1234
|
Args:
|
|
@@ -1190,7 +1249,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1190
1249
|
task_id: str | None = None,
|
|
1191
1250
|
context_id: str | None = None,
|
|
1192
1251
|
**kwargs: Any,
|
|
1193
|
-
) -> AsyncGenerator[
|
|
1252
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
1194
1253
|
"""Stream agent response as SSE-compatible chunks.
|
|
1195
1254
|
|
|
1196
1255
|
This method wraps arun_a2a_stream and transforms output to the normalized
|
|
@@ -1222,7 +1281,8 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1222
1281
|
pii_mapping = kwargs.get("pii_mapping")
|
|
1223
1282
|
transformer = SSEChunkTransformer(task_id=task_id, context_id=context_id, pii_mapping=pii_mapping)
|
|
1224
1283
|
try:
|
|
1225
|
-
|
|
1284
|
+
stream = self.arun_a2a_stream(query, **kwargs)
|
|
1285
|
+
async for chunk in transformer.transform_stream(stream):
|
|
1226
1286
|
yield chunk
|
|
1227
1287
|
except Exception as e:
|
|
1228
1288
|
# Lazy import to support optional guardrails dependency
|
|
@@ -1358,14 +1418,20 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1358
1418
|
Returns:
|
|
1359
1419
|
A2AEvent with TOOL_CALL event type and structured tool information.
|
|
1360
1420
|
"""
|
|
1361
|
-
tool_calls_details = [
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1421
|
+
tool_calls_details: list[dict[str, Any]] = []
|
|
1422
|
+
manager = getattr(self, "tool_output_manager", None)
|
|
1423
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
1424
|
+
for tool_call in message.tool_calls:
|
|
1425
|
+
args = tool_call["args"]
|
|
1426
|
+
if manager and thread_id and isinstance(args, dict):
|
|
1427
|
+
args = manager.rewrite_args_with_latest_reference(args, thread_id)
|
|
1428
|
+
tool_calls_details.append(
|
|
1429
|
+
{
|
|
1430
|
+
"id": tool_call.get("id"),
|
|
1431
|
+
"name": tool_call["name"],
|
|
1432
|
+
"args": args,
|
|
1433
|
+
}
|
|
1434
|
+
)
|
|
1369
1435
|
tool_names = [details["name"] for details in tool_calls_details]
|
|
1370
1436
|
|
|
1371
1437
|
event = self._create_a2a_event(
|
|
@@ -1374,7 +1440,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1374
1440
|
tool_info={"tool_calls": tool_calls_details, "status": "running"},
|
|
1375
1441
|
metadata={"status": Status.RUNNING},
|
|
1376
1442
|
is_final=False,
|
|
1377
|
-
step_usage=message.usage_metadata,
|
|
1443
|
+
step_usage=self._normalize_usage_metadata(message.usage_metadata),
|
|
1378
1444
|
)
|
|
1379
1445
|
|
|
1380
1446
|
self._record_emitted_tool_calls(tool_calls_details)
|
|
@@ -1594,8 +1660,9 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1594
1660
|
"""
|
|
1595
1661
|
is_final_response = self._is_final_response(message)
|
|
1596
1662
|
metadata = self._build_metadata_for_final_response(is_final_response, state)
|
|
1663
|
+
raw_content = message.content
|
|
1597
1664
|
content = deanonymize_final_response_content(
|
|
1598
|
-
content=
|
|
1665
|
+
content=raw_content if isinstance(raw_content, str) else json.dumps(raw_content),
|
|
1599
1666
|
is_final_response=is_final_response,
|
|
1600
1667
|
metadata=metadata,
|
|
1601
1668
|
)
|
|
@@ -1605,7 +1672,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1605
1672
|
tool_info=None,
|
|
1606
1673
|
metadata=metadata,
|
|
1607
1674
|
is_final=is_final_response,
|
|
1608
|
-
step_usage=message.usage_metadata,
|
|
1675
|
+
step_usage=self._normalize_usage_metadata(message.usage_metadata),
|
|
1609
1676
|
)
|
|
1610
1677
|
return event, is_final_response
|
|
1611
1678
|
|
|
@@ -1882,7 +1949,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1882
1949
|
"""
|
|
1883
1950
|
current_thread_id: str | None = None
|
|
1884
1951
|
try:
|
|
1885
|
-
configurable = config.get("configurable", {})
|
|
1952
|
+
configurable = config.get("configurable", {})
|
|
1886
1953
|
thread_key = self.thread_id_key or "thread_id"
|
|
1887
1954
|
current_thread_id = str(configurable.get(thread_key)) if configurable.get(thread_key) else None
|
|
1888
1955
|
except Exception:
|
|
@@ -1964,7 +2031,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1964
2031
|
)
|
|
1965
2032
|
return events, is_final, updated_message_count
|
|
1966
2033
|
|
|
1967
|
-
async def _arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[
|
|
2034
|
+
async def _arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[A2AEvent, None]:
|
|
1968
2035
|
"""Internal implementation of arun_a2a_stream without MCP handling.
|
|
1969
2036
|
|
|
1970
2037
|
Args:
|
|
@@ -2031,7 +2098,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2031
2098
|
enable_token_streaming=self.enable_a2a_token_streaming,
|
|
2032
2099
|
)
|
|
2033
2100
|
|
|
2034
|
-
async def _handle_streaming_process(self, context: "_StreamingContext") -> AsyncGenerator[
|
|
2101
|
+
async def _handle_streaming_process(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
|
|
2035
2102
|
"""Handle the main streaming process including initial status and event processing.
|
|
2036
2103
|
|
|
2037
2104
|
Args:
|
|
@@ -2048,7 +2115,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2048
2115
|
self._log_streaming_event_debug("process_stream_item", event)
|
|
2049
2116
|
yield event
|
|
2050
2117
|
|
|
2051
|
-
def _create_initial_status_event(self) ->
|
|
2118
|
+
def _create_initial_status_event(self) -> A2AEvent:
|
|
2052
2119
|
"""Create and setup the initial status event."""
|
|
2053
2120
|
initial_status_event = self._create_a2a_event(
|
|
2054
2121
|
event_type=A2AStreamEventType.STATUS_UPDATE, content=DefaultStepMessages.EN.value
|
|
@@ -2065,7 +2132,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2065
2132
|
|
|
2066
2133
|
return initial_status_event
|
|
2067
2134
|
|
|
2068
|
-
async def _process_streaming_items(self, context: "_StreamingContext") -> AsyncGenerator[
|
|
2135
|
+
async def _process_streaming_items(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
|
|
2069
2136
|
"""Process individual streaming items from the LangGraph execution.
|
|
2070
2137
|
|
|
2071
2138
|
Handles the core streaming logic by iterating through items produced by
|
|
@@ -2098,9 +2165,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2098
2165
|
async for event in self._create_graph_stream_events(enhanced_input, context):
|
|
2099
2166
|
yield event
|
|
2100
2167
|
|
|
2101
|
-
async def _process_a2a_streaming_with_tokens(
|
|
2102
|
-
self, context: "_StreamingContext"
|
|
2103
|
-
) -> AsyncGenerator[dict[str, Any], None]:
|
|
2168
|
+
async def _process_a2a_streaming_with_tokens(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
|
|
2104
2169
|
"""Process A2A streaming with token streaming support using aiostream.
|
|
2105
2170
|
|
|
2106
2171
|
Supports both LM Invoker and LangChain models by detecting the appropriate
|
|
@@ -2133,6 +2198,9 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2133
2198
|
token_stream, enhanced_input = self._create_token_stream(context)
|
|
2134
2199
|
graph_stream = self._create_graph_stream_events(enhanced_input, context)
|
|
2135
2200
|
|
|
2201
|
+
if token_stream is None:
|
|
2202
|
+
raise RuntimeError(f"Agent '{self.name}': Token stream not available for LM invoker.")
|
|
2203
|
+
|
|
2136
2204
|
merged = astream.merge(token_stream, graph_stream)
|
|
2137
2205
|
async with merged.stream() as merged_stream:
|
|
2138
2206
|
async for event in merged_stream:
|
|
@@ -2148,7 +2216,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2148
2216
|
logger.error(f"Agent '{self.name}': Error during A2A token streaming: {e}")
|
|
2149
2217
|
raise
|
|
2150
2218
|
|
|
2151
|
-
async def _create_lm_invoker_token_stream(self) -> AsyncGenerator[
|
|
2219
|
+
async def _create_lm_invoker_token_stream(self) -> AsyncGenerator[A2AEvent, None]:
|
|
2152
2220
|
"""Generate A2A events from LM Invoker token stream.
|
|
2153
2221
|
|
|
2154
2222
|
Uses StreamEventHandler to capture tokens emitted by LM Invoker.
|
|
@@ -2160,6 +2228,8 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2160
2228
|
RuntimeError: If no StreamEventHandler is found in event_emitter.
|
|
2161
2229
|
"""
|
|
2162
2230
|
stream_handler = self._get_stream_handler()
|
|
2231
|
+
if stream_handler is None:
|
|
2232
|
+
raise RuntimeError(f"Agent '{self.name}': StreamEventHandler is required for token streaming.")
|
|
2163
2233
|
|
|
2164
2234
|
try:
|
|
2165
2235
|
async for event in stream_handler.stream():
|
|
@@ -2175,7 +2245,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2175
2245
|
def _create_token_stream(
|
|
2176
2246
|
self,
|
|
2177
2247
|
context: "_StreamingContext",
|
|
2178
|
-
) -> tuple[AsyncGenerator[
|
|
2248
|
+
) -> tuple[AsyncGenerator[A2AEvent, None] | None, dict[str, Any]]:
|
|
2179
2249
|
"""Create appropriate token stream and enhanced input for the active model backend.
|
|
2180
2250
|
|
|
2181
2251
|
Args:
|
|
@@ -2197,7 +2267,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2197
2267
|
|
|
2198
2268
|
async def _create_graph_stream_events(
|
|
2199
2269
|
self, enhanced_input: dict[str, Any], context: "_StreamingContext"
|
|
2200
|
-
) -> AsyncGenerator[
|
|
2270
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
2201
2271
|
"""Generate A2A events from graph execution.
|
|
2202
2272
|
|
|
2203
2273
|
Args:
|
|
@@ -2216,8 +2286,9 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2216
2286
|
async for item in graph_execution:
|
|
2217
2287
|
stream_mode, stream_data = item
|
|
2218
2288
|
|
|
2219
|
-
if stream_mode == StreamMode.MESSAGES:
|
|
2220
|
-
|
|
2289
|
+
if stream_mode == StreamMode.MESSAGES.value:
|
|
2290
|
+
message_data = cast(tuple[Any, dict[str, Any]], stream_data)
|
|
2291
|
+
async for token_event in self._process_message_stream_item(message_data):
|
|
2221
2292
|
yield token_event
|
|
2222
2293
|
continue
|
|
2223
2294
|
|
|
@@ -2236,10 +2307,10 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2236
2307
|
Returns:
|
|
2237
2308
|
List of stream modes to use for graph execution.
|
|
2238
2309
|
"""
|
|
2239
|
-
stream_modes = [StreamMode.VALUES, StreamMode.CUSTOM]
|
|
2310
|
+
stream_modes = [StreamMode.VALUES.value, StreamMode.CUSTOM.value]
|
|
2240
2311
|
|
|
2241
2312
|
if context.enable_token_streaming and not self._has_lm_invoker():
|
|
2242
|
-
stream_modes.append(StreamMode.MESSAGES)
|
|
2313
|
+
stream_modes.append(StreamMode.MESSAGES.value)
|
|
2243
2314
|
|
|
2244
2315
|
return stream_modes
|
|
2245
2316
|
|
|
@@ -2249,7 +2320,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2249
2320
|
stream_mode: str,
|
|
2250
2321
|
stream_data: Any,
|
|
2251
2322
|
context: "_StreamingContext",
|
|
2252
|
-
) -> AsyncGenerator[
|
|
2323
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
2253
2324
|
"""Process a single graph stream item and yield A2A events.
|
|
2254
2325
|
|
|
2255
2326
|
Args:
|
|
@@ -2261,10 +2332,12 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2261
2332
|
Yields:
|
|
2262
2333
|
A2A events generated from the stream item.
|
|
2263
2334
|
"""
|
|
2264
|
-
context.final_state = copy.copy(stream_data) if stream_mode == StreamMode.VALUES else context.final_state
|
|
2335
|
+
context.final_state = copy.copy(stream_data) if stream_mode == StreamMode.VALUES.value else context.final_state
|
|
2265
2336
|
|
|
2337
|
+
pending_artifacts = context.pending_artifacts if context.pending_artifacts is not None else []
|
|
2338
|
+
seen_artifact_hashes = context.seen_artifact_hashes if context.seen_artifact_hashes is not None else set()
|
|
2266
2339
|
events, is_final, context.processed_message_count = self._handle_stream_item(
|
|
2267
|
-
item,
|
|
2340
|
+
item, pending_artifacts, seen_artifact_hashes, context.processed_message_count
|
|
2268
2341
|
)
|
|
2269
2342
|
|
|
2270
2343
|
if is_final:
|
|
@@ -2277,7 +2350,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2277
2350
|
|
|
2278
2351
|
async def _process_message_stream_item(
|
|
2279
2352
|
self, message_data: tuple[Any, dict[str, Any]]
|
|
2280
|
-
) -> AsyncGenerator[
|
|
2353
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
2281
2354
|
"""Process message stream items to extract token events.
|
|
2282
2355
|
|
|
2283
2356
|
The "messages" stream mode yields tuples of (AIMessageChunk, metadata).
|
|
@@ -2314,9 +2387,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2314
2387
|
except Exception as e:
|
|
2315
2388
|
logger.error(f"Agent '{self.name}': Error processing message stream item: {e}")
|
|
2316
2389
|
|
|
2317
|
-
def _update_final_response_for_streaming(
|
|
2318
|
-
self, context: "_StreamingContext", event: dict[str, Any]
|
|
2319
|
-
) -> dict[str, Any]:
|
|
2390
|
+
def _update_final_response_for_streaming(self, context: "_StreamingContext", event: A2AEvent) -> A2AEvent:
|
|
2320
2391
|
"""Update final response events with appropriate streaming configuration.
|
|
2321
2392
|
|
|
2322
2393
|
For FINAL_RESPONSE events, this method updates the metadata and optionally clears
|
|
@@ -2330,13 +2401,17 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2330
2401
|
The processed event dictionary with updated metadata and content
|
|
2331
2402
|
"""
|
|
2332
2403
|
if event.get("event_type") == A2AStreamEventType.FINAL_RESPONSE:
|
|
2333
|
-
event
|
|
2404
|
+
metadata = event.get("metadata")
|
|
2405
|
+
if not isinstance(metadata, dict):
|
|
2406
|
+
metadata = {}
|
|
2407
|
+
event["metadata"] = metadata
|
|
2408
|
+
metadata[MetadataFieldKeys.TOKEN_STREAMING] = False
|
|
2334
2409
|
if context.enable_token_streaming:
|
|
2335
2410
|
event["content"] = ""
|
|
2336
|
-
|
|
2411
|
+
metadata[MetadataFieldKeys.TOKEN_STREAMING] = True
|
|
2337
2412
|
return event
|
|
2338
2413
|
|
|
2339
|
-
def _convert_raw_token_to_a2a_event(self, raw_event: str) ->
|
|
2414
|
+
def _convert_raw_token_to_a2a_event(self, raw_event: str) -> A2AEvent | None:
|
|
2340
2415
|
"""Parse raw token event into A2A event.
|
|
2341
2416
|
|
|
2342
2417
|
Args:
|
|
@@ -2359,7 +2434,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2359
2434
|
logger.debug(f"Agent '{self.name}': Error parsing token event: {e}")
|
|
2360
2435
|
return None
|
|
2361
2436
|
|
|
2362
|
-
def _capture_final_content_if_needed(self, context: "_StreamingContext", event:
|
|
2437
|
+
def _capture_final_content_if_needed(self, context: "_StreamingContext", event: A2AEvent) -> None:
|
|
2363
2438
|
"""Capture final content from A2A events for memory persistence.
|
|
2364
2439
|
|
|
2365
2440
|
Monitors A2A events for final response content and triggers early memory
|
|
@@ -2380,7 +2455,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2380
2455
|
and isinstance(context.last_final_content, str)
|
|
2381
2456
|
and context.last_final_content
|
|
2382
2457
|
)
|
|
2383
|
-
if should_save_early:
|
|
2458
|
+
if should_save_early and self._should_save_interaction(context.final_state):
|
|
2384
2459
|
try:
|
|
2385
2460
|
logger.info(
|
|
2386
2461
|
"Agent '%s': A2A persisting memory early (len=%d) for user_id='%s'",
|
|
@@ -2399,6 +2474,8 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2399
2474
|
context.saved_memory = True
|
|
2400
2475
|
except Exception:
|
|
2401
2476
|
pass
|
|
2477
|
+
elif should_save_early:
|
|
2478
|
+
context.saved_memory = True
|
|
2402
2479
|
except Exception:
|
|
2403
2480
|
pass
|
|
2404
2481
|
|
|
@@ -2430,14 +2507,19 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2430
2507
|
)
|
|
2431
2508
|
except Exception:
|
|
2432
2509
|
pass
|
|
2433
|
-
self.
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
|
|
2510
|
+
if self._should_save_interaction(context.final_state):
|
|
2511
|
+
self._memory_save_interaction(
|
|
2512
|
+
user_text=context.original_query,
|
|
2513
|
+
ai_text=final_text,
|
|
2514
|
+
memory_user_id=context.memory_user_id,
|
|
2515
|
+
)
|
|
2516
|
+
context.saved_memory = True
|
|
2517
|
+
else:
|
|
2518
|
+
context.saved_memory = True
|
|
2437
2519
|
except Exception:
|
|
2438
2520
|
pass
|
|
2439
2521
|
|
|
2440
|
-
async def _ensure_final_completion(self, context: "_StreamingContext") -> AsyncGenerator[
|
|
2522
|
+
async def _ensure_final_completion(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
|
|
2441
2523
|
"""Ensure final completion events are yielded if not already done.
|
|
2442
2524
|
|
|
2443
2525
|
Args:
|
|
@@ -2448,7 +2530,9 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2448
2530
|
dict[str, Any]: The final completion event.
|
|
2449
2531
|
"""
|
|
2450
2532
|
if not context.final_event_yielded:
|
|
2451
|
-
|
|
2533
|
+
pending_artifacts = context.pending_artifacts if context.pending_artifacts is not None else []
|
|
2534
|
+
final_state = context.final_state or {}
|
|
2535
|
+
completion_event = self._create_completion_event(pending_artifacts, final_state)
|
|
2452
2536
|
self._log_streaming_event_debug("final_completion", completion_event)
|
|
2453
2537
|
yield completion_event
|
|
2454
2538
|
|
|
@@ -2456,7 +2540,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2456
2540
|
self,
|
|
2457
2541
|
context: "_StreamingContext",
|
|
2458
2542
|
error: Exception,
|
|
2459
|
-
) -> AsyncGenerator[
|
|
2543
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
2460
2544
|
"""Handle streaming errors gracefully.
|
|
2461
2545
|
|
|
2462
2546
|
Provides error handling for the A2A streaming process, ensuring errors
|
|
@@ -2473,11 +2557,14 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2473
2557
|
error message, optionally including any pending artifacts.
|
|
2474
2558
|
"""
|
|
2475
2559
|
logger.error(f"Error in agent stream: {error}", exc_info=True)
|
|
2476
|
-
error_event =
|
|
2477
|
-
|
|
2478
|
-
|
|
2479
|
-
|
|
2480
|
-
|
|
2560
|
+
error_event = self._create_a2a_event(
|
|
2561
|
+
event_type=A2AStreamEventType.ERROR,
|
|
2562
|
+
content=f"Error: {str(error)}",
|
|
2563
|
+
metadata={"status": "failed"},
|
|
2564
|
+
artifacts=context.pending_artifacts,
|
|
2565
|
+
is_final=True,
|
|
2566
|
+
)
|
|
2567
|
+
error_event["status"] = "failed"
|
|
2481
2568
|
self._log_streaming_event_debug("error_event", error_event)
|
|
2482
2569
|
yield error_event
|
|
2483
2570
|
|
|
@@ -2533,7 +2620,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2533
2620
|
|
|
2534
2621
|
return metadata
|
|
2535
2622
|
|
|
2536
|
-
def _create_completion_event(self, pending_artifacts: list, final_state: dict[str, Any]):
|
|
2623
|
+
def _create_completion_event(self, pending_artifacts: list, final_state: dict[str, Any]) -> A2AEvent:
|
|
2537
2624
|
"""Helper to create the completion event with artifacts and references if available.
|
|
2538
2625
|
|
|
2539
2626
|
This method is used to create the completion event with artifacts and references if available.
|
|
@@ -2587,7 +2674,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2587
2674
|
else:
|
|
2588
2675
|
return tool_name[:4]
|
|
2589
2676
|
|
|
2590
|
-
def _generate_tool_call_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
|
|
2677
|
+
def _generate_tool_call_step_id(self, tool_info: dict[str, Any] | None, counter: int) -> str:
|
|
2591
2678
|
"""Generate step_id for tool call events.
|
|
2592
2679
|
|
|
2593
2680
|
Args:
|
|
@@ -2623,7 +2710,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2623
2710
|
|
|
2624
2711
|
return f"{category}_{combined_name}_parent_{counter:03d}"
|
|
2625
2712
|
|
|
2626
|
-
def _generate_tool_result_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
|
|
2713
|
+
def _generate_tool_result_step_id(self, tool_info: dict[str, Any] | None, counter: int) -> str:
|
|
2627
2714
|
"""Generate step_id for tool result events.
|
|
2628
2715
|
|
|
2629
2716
|
Args:
|
|
@@ -2749,7 +2836,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2749
2836
|
def _create_a2a_event( # noqa: PLR0913
|
|
2750
2837
|
self,
|
|
2751
2838
|
event_type: A2AStreamEventType,
|
|
2752
|
-
content:
|
|
2839
|
+
content: Any,
|
|
2753
2840
|
metadata: dict[str, Any] | None = None,
|
|
2754
2841
|
tool_info: dict[str, Any] | None = None,
|
|
2755
2842
|
thinking_and_activity_info: dict[str, Any] | None = None,
|
|
@@ -2787,9 +2874,11 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2787
2874
|
# Inject cumulative time since the first STATUS_UPDATE for this thread
|
|
2788
2875
|
# Do not set cumulative time here; server executor enforces it for all SSE events
|
|
2789
2876
|
|
|
2877
|
+
normalized_content = self._normalize_event_content(content)
|
|
2878
|
+
|
|
2790
2879
|
event = {
|
|
2791
2880
|
"event_type": event_type,
|
|
2792
|
-
"content":
|
|
2881
|
+
"content": normalized_content,
|
|
2793
2882
|
"metadata": enriched_metadata,
|
|
2794
2883
|
"tool_info": tool_info,
|
|
2795
2884
|
"is_final": is_final,
|
|
@@ -2803,7 +2892,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2803
2892
|
event["thinking_and_activity_info"] = thinking_and_activity_info
|
|
2804
2893
|
|
|
2805
2894
|
try:
|
|
2806
|
-
content_preview =
|
|
2895
|
+
content_preview = normalized_content
|
|
2807
2896
|
logger.info(
|
|
2808
2897
|
"A2A emitting event: type=%s step_id=%s final=%s preview=%s",
|
|
2809
2898
|
getattr(event_type, "value", event_type),
|