aip-agents-binary 0.5.21__py3-none-macosx_13_0_arm64.whl → 0.6.8__py3-none-macosx_13_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/agent/__init__.py +44 -4
- aip_agents/agent/base_langgraph_agent.py +169 -74
- aip_agents/agent/base_langgraph_agent.pyi +3 -2
- aip_agents/agent/langgraph_memory_enhancer_agent.py +368 -34
- aip_agents/agent/langgraph_memory_enhancer_agent.pyi +3 -2
- aip_agents/agent/langgraph_react_agent.py +424 -35
- aip_agents/agent/langgraph_react_agent.pyi +46 -2
- aip_agents/examples/{hello_world_langgraph_bosa_twitter.py → hello_world_langgraph_gl_connector_twitter.py} +10 -7
- aip_agents/examples/hello_world_langgraph_gl_connector_twitter.pyi +5 -0
- aip_agents/examples/hello_world_ptc.py +49 -0
- aip_agents/examples/hello_world_ptc.pyi +5 -0
- aip_agents/examples/hello_world_ptc_custom_tools.py +83 -0
- aip_agents/examples/hello_world_ptc_custom_tools.pyi +7 -0
- aip_agents/examples/hello_world_sentry.py +2 -2
- aip_agents/examples/hello_world_tool_output_client.py +9 -0
- aip_agents/examples/tools/multiply_tool.py +43 -0
- aip_agents/examples/tools/multiply_tool.pyi +18 -0
- aip_agents/guardrails/__init__.py +83 -0
- aip_agents/guardrails/__init__.pyi +6 -0
- aip_agents/guardrails/engines/__init__.py +69 -0
- aip_agents/guardrails/engines/__init__.pyi +4 -0
- aip_agents/guardrails/engines/base.py +90 -0
- aip_agents/guardrails/engines/base.pyi +61 -0
- aip_agents/guardrails/engines/nemo.py +101 -0
- aip_agents/guardrails/engines/nemo.pyi +46 -0
- aip_agents/guardrails/engines/phrase_matcher.py +113 -0
- aip_agents/guardrails/engines/phrase_matcher.pyi +48 -0
- aip_agents/guardrails/exceptions.py +39 -0
- aip_agents/guardrails/exceptions.pyi +23 -0
- aip_agents/guardrails/manager.py +163 -0
- aip_agents/guardrails/manager.pyi +42 -0
- aip_agents/guardrails/middleware.py +199 -0
- aip_agents/guardrails/middleware.pyi +87 -0
- aip_agents/guardrails/schemas.py +63 -0
- aip_agents/guardrails/schemas.pyi +43 -0
- aip_agents/guardrails/utils.py +45 -0
- aip_agents/guardrails/utils.pyi +19 -0
- aip_agents/mcp/client/__init__.py +38 -2
- aip_agents/mcp/client/connection_manager.py +36 -1
- aip_agents/mcp/client/connection_manager.pyi +3 -0
- aip_agents/mcp/client/persistent_session.py +318 -65
- aip_agents/mcp/client/persistent_session.pyi +9 -0
- aip_agents/mcp/client/transports.py +52 -4
- aip_agents/mcp/client/transports.pyi +9 -0
- aip_agents/memory/adapters/base_adapter.py +98 -0
- aip_agents/memory/adapters/base_adapter.pyi +25 -0
- aip_agents/middleware/base.py +8 -0
- aip_agents/middleware/base.pyi +4 -0
- aip_agents/middleware/manager.py +22 -0
- aip_agents/middleware/manager.pyi +4 -0
- aip_agents/ptc/__init__.py +87 -0
- aip_agents/ptc/__init__.pyi +14 -0
- aip_agents/ptc/custom_tools.py +473 -0
- aip_agents/ptc/custom_tools.pyi +184 -0
- aip_agents/ptc/custom_tools_payload.py +400 -0
- aip_agents/ptc/custom_tools_payload.pyi +31 -0
- aip_agents/ptc/custom_tools_templates/__init__.py +1 -0
- aip_agents/ptc/custom_tools_templates/__init__.pyi +0 -0
- aip_agents/ptc/custom_tools_templates/custom_build_function.py.template +23 -0
- aip_agents/ptc/custom_tools_templates/custom_init.py.template +15 -0
- aip_agents/ptc/custom_tools_templates/custom_invoke.py.template +60 -0
- aip_agents/ptc/custom_tools_templates/custom_registry.py.template +87 -0
- aip_agents/ptc/custom_tools_templates/custom_sources_init.py.template +7 -0
- aip_agents/ptc/custom_tools_templates/custom_wrapper.py.template +19 -0
- aip_agents/ptc/doc_gen.py +122 -0
- aip_agents/ptc/doc_gen.pyi +40 -0
- aip_agents/ptc/exceptions.py +57 -0
- aip_agents/ptc/exceptions.pyi +37 -0
- aip_agents/ptc/executor.py +261 -0
- aip_agents/ptc/executor.pyi +99 -0
- aip_agents/ptc/mcp/__init__.py +45 -0
- aip_agents/ptc/mcp/__init__.pyi +7 -0
- aip_agents/ptc/mcp/sandbox_bridge.py +668 -0
- aip_agents/ptc/mcp/sandbox_bridge.pyi +47 -0
- aip_agents/ptc/mcp/templates/__init__.py +1 -0
- aip_agents/ptc/mcp/templates/__init__.pyi +0 -0
- aip_agents/ptc/mcp/templates/mcp_client.py.template +239 -0
- aip_agents/ptc/naming.py +196 -0
- aip_agents/ptc/naming.pyi +85 -0
- aip_agents/ptc/payload.py +26 -0
- aip_agents/ptc/payload.pyi +15 -0
- aip_agents/ptc/prompt_builder.py +673 -0
- aip_agents/ptc/prompt_builder.pyi +59 -0
- aip_agents/ptc/ptc_helper.py +16 -0
- aip_agents/ptc/ptc_helper.pyi +1 -0
- aip_agents/ptc/sandbox_bridge.py +256 -0
- aip_agents/ptc/sandbox_bridge.pyi +38 -0
- aip_agents/ptc/template_utils.py +33 -0
- aip_agents/ptc/template_utils.pyi +13 -0
- aip_agents/ptc/templates/__init__.py +1 -0
- aip_agents/ptc/templates/__init__.pyi +0 -0
- aip_agents/ptc/templates/ptc_helper.py.template +134 -0
- aip_agents/ptc/tool_def_helpers.py +101 -0
- aip_agents/ptc/tool_def_helpers.pyi +38 -0
- aip_agents/ptc/tool_enrichment.py +163 -0
- aip_agents/ptc/tool_enrichment.pyi +60 -0
- aip_agents/sandbox/__init__.py +43 -0
- aip_agents/sandbox/__init__.pyi +5 -0
- aip_agents/sandbox/defaults.py +205 -0
- aip_agents/sandbox/defaults.pyi +30 -0
- aip_agents/sandbox/e2b_runtime.py +295 -0
- aip_agents/sandbox/e2b_runtime.pyi +57 -0
- aip_agents/sandbox/template_builder.py +131 -0
- aip_agents/sandbox/template_builder.pyi +36 -0
- aip_agents/sandbox/types.py +24 -0
- aip_agents/sandbox/types.pyi +14 -0
- aip_agents/sandbox/validation.py +50 -0
- aip_agents/sandbox/validation.pyi +20 -0
- aip_agents/sentry/__init__.py +1 -1
- aip_agents/sentry/sentry.py +33 -12
- aip_agents/sentry/sentry.pyi +5 -4
- aip_agents/tools/__init__.py +20 -3
- aip_agents/tools/__init__.pyi +4 -2
- aip_agents/tools/browser_use/browser_use_tool.py +8 -0
- aip_agents/tools/browser_use/streaming.py +2 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +80 -31
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +25 -9
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +6 -6
- aip_agents/tools/constants.py +24 -12
- aip_agents/tools/constants.pyi +14 -11
- aip_agents/tools/date_range_tool.py +554 -0
- aip_agents/tools/date_range_tool.pyi +21 -0
- aip_agents/tools/execute_ptc_code.py +357 -0
- aip_agents/tools/execute_ptc_code.pyi +90 -0
- aip_agents/tools/gl_connector/__init__.py +1 -1
- aip_agents/tools/gl_connector/tool.py +62 -30
- aip_agents/tools/gl_connector/tool.pyi +3 -3
- aip_agents/tools/gl_connector_tools.py +119 -0
- aip_agents/tools/gl_connector_tools.pyi +39 -0
- aip_agents/tools/memory_search/__init__.py +8 -1
- aip_agents/tools/memory_search/__init__.pyi +3 -3
- aip_agents/tools/memory_search/mem0.py +114 -1
- aip_agents/tools/memory_search/mem0.pyi +11 -1
- aip_agents/tools/memory_search/schema.py +33 -0
- aip_agents/tools/memory_search/schema.pyi +10 -0
- aip_agents/tools/memory_search_tool.py +8 -0
- aip_agents/tools/memory_search_tool.pyi +2 -2
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +26 -1
- aip_agents/utils/langgraph/tool_output_management.py +80 -0
- aip_agents/utils/langgraph/tool_output_management.pyi +37 -0
- {aip_agents_binary-0.5.21.dist-info → aip_agents_binary-0.6.8.dist-info}/METADATA +14 -22
- {aip_agents_binary-0.5.21.dist-info → aip_agents_binary-0.6.8.dist-info}/RECORD +144 -58
- {aip_agents_binary-0.5.21.dist-info → aip_agents_binary-0.6.8.dist-info}/WHEEL +1 -1
- aip_agents/examples/demo_memory_recall.py +0 -401
- aip_agents/examples/demo_memory_recall.pyi +0 -58
- aip_agents/examples/hello_world_langgraph_bosa_twitter.pyi +0 -5
- aip_agents/tools/bosa_tools.py +0 -105
- aip_agents/tools/bosa_tools.pyi +0 -37
- {aip_agents_binary-0.5.21.dist-info → aip_agents_binary-0.6.8.dist-info}/top_level.txt +0 -0
aip_agents/agent/__init__.py
CHANGED
|
@@ -6,13 +6,25 @@ Author:
|
|
|
6
6
|
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from typing import TYPE_CHECKING, Any
|
|
12
|
+
|
|
9
13
|
from aip_agents.agent.base_agent import BaseAgent
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from aip_agents.agent.google_adk_agent import GoogleADKAgent
|
|
17
|
+
from aip_agents.agent.langflow_agent import LangflowAgent
|
|
10
18
|
from aip_agents.agent.base_langgraph_agent import BaseLangGraphAgent
|
|
11
|
-
from aip_agents.agent.google_adk_agent import GoogleADKAgent
|
|
12
19
|
from aip_agents.agent.interface import AgentInterface
|
|
13
|
-
from aip_agents.agent.
|
|
14
|
-
|
|
15
|
-
|
|
20
|
+
from aip_agents.agent.langgraph_memory_enhancer_agent import (
|
|
21
|
+
LangGraphMemoryEnhancerAgent,
|
|
22
|
+
)
|
|
23
|
+
from aip_agents.agent.langgraph_react_agent import (
|
|
24
|
+
LangChainAgent,
|
|
25
|
+
LangGraphAgent,
|
|
26
|
+
LangGraphReactAgent,
|
|
27
|
+
)
|
|
16
28
|
|
|
17
29
|
__all__ = [
|
|
18
30
|
"AgentInterface",
|
|
@@ -25,3 +37,31 @@ __all__ = [
|
|
|
25
37
|
"LangflowAgent",
|
|
26
38
|
"LangGraphMemoryEnhancerAgent",
|
|
27
39
|
]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def __getattr__(name: str) -> Any:
|
|
43
|
+
"""Lazy import of heavy agent implementations.
|
|
44
|
+
|
|
45
|
+
This avoids importing heavy dependencies (Google ADK, etc.)
|
|
46
|
+
when they are not needed.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
name: Attribute name to import.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
The requested class.
|
|
53
|
+
|
|
54
|
+
Raises:
|
|
55
|
+
AttributeError: If attribute is not found.
|
|
56
|
+
"""
|
|
57
|
+
if name == "GoogleADKAgent":
|
|
58
|
+
from aip_agents.agent.google_adk_agent import (
|
|
59
|
+
GoogleADKAgent as _GoogleADKAgent,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return _GoogleADKAgent
|
|
63
|
+
elif name == "LangflowAgent":
|
|
64
|
+
from aip_agents.agent.langflow_agent import LangflowAgent as _LangflowAgent
|
|
65
|
+
|
|
66
|
+
return _LangflowAgent
|
|
67
|
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
|
@@ -21,13 +21,13 @@ from concurrent.futures import Future
|
|
|
21
21
|
from contextlib import suppress
|
|
22
22
|
from contextvars import ContextVar
|
|
23
23
|
from dataclasses import dataclass
|
|
24
|
-
from typing import Annotated, Any
|
|
24
|
+
from typing import Annotated, Any, cast
|
|
25
25
|
|
|
26
26
|
from a2a.types import AgentCard
|
|
27
27
|
from aiostream import stream as astream
|
|
28
|
-
from gllm_core.event import EventEmitter
|
|
29
|
-
from gllm_core.event.handler import StreamEventHandler
|
|
30
|
-
from gllm_core.schema import Chunk
|
|
28
|
+
from gllm_core.event import EventEmitter # type: ignore[import-untyped]
|
|
29
|
+
from gllm_core.event.handler import StreamEventHandler # type: ignore[import-untyped]
|
|
30
|
+
from gllm_core.schema import Chunk # type: ignore[import-untyped]
|
|
31
31
|
from langchain_core.messages import AIMessage, BaseMessage, ToolMessage
|
|
32
32
|
from langchain_core.tools import BaseTool
|
|
33
33
|
from langgraph.graph import StateGraph
|
|
@@ -197,6 +197,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
197
197
|
self.enable_a2a_token_streaming = enable_a2a_token_streaming
|
|
198
198
|
self.event_emitter = event_emitter
|
|
199
199
|
self.checkpointer = checkpointer
|
|
200
|
+
self.tool_output_manager = None
|
|
200
201
|
|
|
201
202
|
self._mem0_client: Any | None = None
|
|
202
203
|
self.memory: BaseMemory | None = None
|
|
@@ -384,10 +385,13 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
384
385
|
try:
|
|
385
386
|
user_id = override_user_id or self.memory_agent_id
|
|
386
387
|
if hasattr(self.memory, MemoryMethod.SEARCH):
|
|
387
|
-
results = self.memory.search(
|
|
388
|
+
results = self.memory.search( # type: ignore[attr-defined]
|
|
389
|
+
query=query,
|
|
390
|
+
user_id=user_id,
|
|
391
|
+
limit=self.memory_retrieval_limit,
|
|
392
|
+
)
|
|
388
393
|
return results if isinstance(results, list) else []
|
|
389
|
-
|
|
390
|
-
return []
|
|
394
|
+
return []
|
|
391
395
|
except Exception as e: # noqa: BLE001
|
|
392
396
|
logger.debug(f"Memory: search ignored error: {e}")
|
|
393
397
|
return []
|
|
@@ -415,7 +419,11 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
415
419
|
future = save_async(user_text=str(user_text), ai_text=str(ai_text), user_id=user_id)
|
|
416
420
|
self._watch_memory_future(future, user_id)
|
|
417
421
|
elif hasattr(self.memory, MemoryMethod.SAVE_INTERACTION):
|
|
418
|
-
self.memory.save_interaction(
|
|
422
|
+
self.memory.save_interaction( # type: ignore[attr-defined]
|
|
423
|
+
user_text=str(user_text),
|
|
424
|
+
ai_text=str(ai_text),
|
|
425
|
+
user_id=user_id,
|
|
426
|
+
)
|
|
419
427
|
else:
|
|
420
428
|
logger.warning(
|
|
421
429
|
"Memory: save_interaction method NOT available on memory adapter "
|
|
@@ -447,6 +455,14 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
447
455
|
|
|
448
456
|
future.add_done_callback(_log_completion)
|
|
449
457
|
|
|
458
|
+
def _should_save_interaction(self, final_state: dict[str, Any] | None) -> bool:
|
|
459
|
+
"""Return True when interaction should be saved to memory.
|
|
460
|
+
|
|
461
|
+
Subclasses can override this to skip persistence for specific response types.
|
|
462
|
+
"""
|
|
463
|
+
del final_state
|
|
464
|
+
return True
|
|
465
|
+
|
|
450
466
|
def _resolve_and_validate_tools(self) -> list[BaseTool]:
|
|
451
467
|
"""Resolve and validate regular tools for LangGraph usage.
|
|
452
468
|
|
|
@@ -560,7 +576,11 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
560
576
|
return
|
|
561
577
|
|
|
562
578
|
try:
|
|
563
|
-
tool
|
|
579
|
+
set_tool_config = getattr(tool, "set_tool_config", None)
|
|
580
|
+
if callable(set_tool_config):
|
|
581
|
+
set_tool_config(tool_config_data)
|
|
582
|
+
else:
|
|
583
|
+
raise AttributeError("set_tool_config not available")
|
|
564
584
|
logger.info(f"Agent '{self.name}': Configured tool '{tool.name}' with agent defaults: {tool_config_data}")
|
|
565
585
|
except Exception as e:
|
|
566
586
|
logger.warning(f"Agent '{self.name}': Failed to configure tool '{tool.name}': {e}")
|
|
@@ -598,7 +618,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
598
618
|
self._sanitize_tool_names()
|
|
599
619
|
try:
|
|
600
620
|
if self.state_schema:
|
|
601
|
-
graph_builder = StateGraph(self.state_schema)
|
|
621
|
+
graph_builder: StateGraph = StateGraph(self.state_schema)
|
|
602
622
|
else:
|
|
603
623
|
|
|
604
624
|
class DefaultAgentState(TypedDict):
|
|
@@ -715,7 +735,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
715
735
|
return None
|
|
716
736
|
last_item = list_state[-1]
|
|
717
737
|
if isinstance(last_item, AIMessage) and getattr(last_item, "content", None) is not None:
|
|
718
|
-
output_content = last_item.content
|
|
738
|
+
output_content = self._normalize_event_content(last_item.content)
|
|
719
739
|
elif isinstance(last_item, str):
|
|
720
740
|
output_content = last_item
|
|
721
741
|
return output_content
|
|
@@ -886,7 +906,12 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
886
906
|
formatted_output = self._format_graph_output(final_state_result)
|
|
887
907
|
|
|
888
908
|
try:
|
|
889
|
-
self.
|
|
909
|
+
if self._should_save_interaction(final_state_result):
|
|
910
|
+
self._memory_save_interaction(
|
|
911
|
+
user_text=query,
|
|
912
|
+
ai_text=formatted_output,
|
|
913
|
+
memory_user_id=memory_user_id,
|
|
914
|
+
)
|
|
890
915
|
except Exception:
|
|
891
916
|
pass
|
|
892
917
|
|
|
@@ -995,7 +1020,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
995
1020
|
key = self.thread_id_key or "thread_id"
|
|
996
1021
|
return configurable.get(key)
|
|
997
1022
|
|
|
998
|
-
def _process_langgraph_event(self, event:
|
|
1023
|
+
def _process_langgraph_event(self, event: Any) -> str | dict[str, Any] | A2AEvent | None:
|
|
999
1024
|
"""Process a single LangGraph streaming event.
|
|
1000
1025
|
|
|
1001
1026
|
Args:
|
|
@@ -1045,6 +1070,36 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1045
1070
|
|
|
1046
1071
|
return True
|
|
1047
1072
|
|
|
1073
|
+
@staticmethod
|
|
1074
|
+
def _normalize_usage_metadata(usage: Any | None) -> dict[str, Any] | None:
|
|
1075
|
+
"""Normalize usage metadata to a dictionary when possible.
|
|
1076
|
+
|
|
1077
|
+
Args:
|
|
1078
|
+
usage: Usage metadata from LangChain messages.
|
|
1079
|
+
|
|
1080
|
+
Returns:
|
|
1081
|
+
A dictionary copy when usage is available, otherwise None.
|
|
1082
|
+
"""
|
|
1083
|
+
if usage is None:
|
|
1084
|
+
return None
|
|
1085
|
+
if isinstance(usage, dict):
|
|
1086
|
+
return dict(usage)
|
|
1087
|
+
return cast(dict[str, Any], usage)
|
|
1088
|
+
|
|
1089
|
+
@staticmethod
|
|
1090
|
+
def _normalize_event_content(content: Any) -> str:
|
|
1091
|
+
"""Normalize event content to a string payload.
|
|
1092
|
+
|
|
1093
|
+
Args:
|
|
1094
|
+
content: Raw content payload from LangChain/LangGraph.
|
|
1095
|
+
|
|
1096
|
+
Returns:
|
|
1097
|
+
String representation suitable for A2A events.
|
|
1098
|
+
"""
|
|
1099
|
+
if isinstance(content, str):
|
|
1100
|
+
return content
|
|
1101
|
+
return json.dumps(content)
|
|
1102
|
+
|
|
1048
1103
|
async def _stream_with_langgraph(self, query: str, **kwargs: Any) -> AsyncGenerator[str | dict[str, Any], None]:
|
|
1049
1104
|
"""Handle streaming for LangChain models using LangGraph's native streaming.
|
|
1050
1105
|
|
|
@@ -1135,9 +1190,13 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1135
1190
|
logger.info(f"Agent '{self.name}': Initializing MCP tools with persistent sessions.")
|
|
1136
1191
|
|
|
1137
1192
|
# Add timeout for initialization to prevent hanging
|
|
1138
|
-
|
|
1193
|
+
mcp_client = self.mcp_client
|
|
1194
|
+
if mcp_client is None:
|
|
1195
|
+
return
|
|
1196
|
+
|
|
1197
|
+
await asyncio.wait_for(mcp_client.initialize(), timeout=30.0)
|
|
1139
1198
|
|
|
1140
|
-
mcp_tools = await
|
|
1199
|
+
mcp_tools = await mcp_client.get_tools()
|
|
1141
1200
|
|
|
1142
1201
|
if not mcp_tools:
|
|
1143
1202
|
logger.warning(f"Agent '{self.name}': No MCP tools retrieved from configured servers.")
|
|
@@ -1169,7 +1228,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1169
1228
|
logger.warning(f"Agent '{self.name}': Error during MCP client cleanup: {e}")
|
|
1170
1229
|
# Don't re-raise - cleanup should be best-effort
|
|
1171
1230
|
|
|
1172
|
-
async def arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[
|
|
1231
|
+
async def arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[A2AEvent, None]:
|
|
1173
1232
|
"""Asynchronously streams the agent's response in A2A format.
|
|
1174
1233
|
|
|
1175
1234
|
Args:
|
|
@@ -1190,7 +1249,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1190
1249
|
task_id: str | None = None,
|
|
1191
1250
|
context_id: str | None = None,
|
|
1192
1251
|
**kwargs: Any,
|
|
1193
|
-
) -> AsyncGenerator[
|
|
1252
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
1194
1253
|
"""Stream agent response as SSE-compatible chunks.
|
|
1195
1254
|
|
|
1196
1255
|
This method wraps arun_a2a_stream and transforms output to the normalized
|
|
@@ -1222,9 +1281,16 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1222
1281
|
pii_mapping = kwargs.get("pii_mapping")
|
|
1223
1282
|
transformer = SSEChunkTransformer(task_id=task_id, context_id=context_id, pii_mapping=pii_mapping)
|
|
1224
1283
|
try:
|
|
1225
|
-
|
|
1284
|
+
stream = self.arun_a2a_stream(query, **kwargs)
|
|
1285
|
+
async for chunk in transformer.transform_stream(stream):
|
|
1226
1286
|
yield chunk
|
|
1227
1287
|
except Exception as e:
|
|
1288
|
+
# Lazy import to support optional guardrails dependency
|
|
1289
|
+
from aip_agents.guardrails.exceptions import GuardrailViolationError
|
|
1290
|
+
|
|
1291
|
+
if isinstance(e, GuardrailViolationError):
|
|
1292
|
+
# Re-raise guardrail violations without modification
|
|
1293
|
+
raise
|
|
1228
1294
|
logger.error(f"Agent '{self.name}': Error in arun_sse_stream: {e}", exc_info=True)
|
|
1229
1295
|
yield SSEChunkTransformer._create_error_chunk(f"Error during streaming: {e}")
|
|
1230
1296
|
|
|
@@ -1352,14 +1418,20 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1352
1418
|
Returns:
|
|
1353
1419
|
A2AEvent with TOOL_CALL event type and structured tool information.
|
|
1354
1420
|
"""
|
|
1355
|
-
tool_calls_details = [
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1421
|
+
tool_calls_details: list[dict[str, Any]] = []
|
|
1422
|
+
manager = getattr(self, "tool_output_manager", None)
|
|
1423
|
+
thread_id = _THREAD_ID_CVAR.get()
|
|
1424
|
+
for tool_call in message.tool_calls:
|
|
1425
|
+
args = tool_call["args"]
|
|
1426
|
+
if manager and thread_id and isinstance(args, dict):
|
|
1427
|
+
args = manager.rewrite_args_with_latest_reference(args, thread_id)
|
|
1428
|
+
tool_calls_details.append(
|
|
1429
|
+
{
|
|
1430
|
+
"id": tool_call.get("id"),
|
|
1431
|
+
"name": tool_call["name"],
|
|
1432
|
+
"args": args,
|
|
1433
|
+
}
|
|
1434
|
+
)
|
|
1363
1435
|
tool_names = [details["name"] for details in tool_calls_details]
|
|
1364
1436
|
|
|
1365
1437
|
event = self._create_a2a_event(
|
|
@@ -1368,7 +1440,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1368
1440
|
tool_info={"tool_calls": tool_calls_details, "status": "running"},
|
|
1369
1441
|
metadata={"status": Status.RUNNING},
|
|
1370
1442
|
is_final=False,
|
|
1371
|
-
step_usage=message.usage_metadata,
|
|
1443
|
+
step_usage=self._normalize_usage_metadata(message.usage_metadata),
|
|
1372
1444
|
)
|
|
1373
1445
|
|
|
1374
1446
|
self._record_emitted_tool_calls(tool_calls_details)
|
|
@@ -1588,8 +1660,9 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1588
1660
|
"""
|
|
1589
1661
|
is_final_response = self._is_final_response(message)
|
|
1590
1662
|
metadata = self._build_metadata_for_final_response(is_final_response, state)
|
|
1663
|
+
raw_content = message.content
|
|
1591
1664
|
content = deanonymize_final_response_content(
|
|
1592
|
-
content=
|
|
1665
|
+
content=raw_content if isinstance(raw_content, str) else json.dumps(raw_content),
|
|
1593
1666
|
is_final_response=is_final_response,
|
|
1594
1667
|
metadata=metadata,
|
|
1595
1668
|
)
|
|
@@ -1599,7 +1672,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1599
1672
|
tool_info=None,
|
|
1600
1673
|
metadata=metadata,
|
|
1601
1674
|
is_final=is_final_response,
|
|
1602
|
-
step_usage=message.usage_metadata,
|
|
1675
|
+
step_usage=self._normalize_usage_metadata(message.usage_metadata),
|
|
1603
1676
|
)
|
|
1604
1677
|
return event, is_final_response
|
|
1605
1678
|
|
|
@@ -1876,7 +1949,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1876
1949
|
"""
|
|
1877
1950
|
current_thread_id: str | None = None
|
|
1878
1951
|
try:
|
|
1879
|
-
configurable = config.get("configurable", {})
|
|
1952
|
+
configurable = config.get("configurable", {})
|
|
1880
1953
|
thread_key = self.thread_id_key or "thread_id"
|
|
1881
1954
|
current_thread_id = str(configurable.get(thread_key)) if configurable.get(thread_key) else None
|
|
1882
1955
|
except Exception:
|
|
@@ -1958,7 +2031,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
1958
2031
|
)
|
|
1959
2032
|
return events, is_final, updated_message_count
|
|
1960
2033
|
|
|
1961
|
-
async def _arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[
|
|
2034
|
+
async def _arun_a2a_stream(self, query: str, **kwargs: Any) -> AsyncGenerator[A2AEvent, None]:
|
|
1962
2035
|
"""Internal implementation of arun_a2a_stream without MCP handling.
|
|
1963
2036
|
|
|
1964
2037
|
Args:
|
|
@@ -2025,7 +2098,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2025
2098
|
enable_token_streaming=self.enable_a2a_token_streaming,
|
|
2026
2099
|
)
|
|
2027
2100
|
|
|
2028
|
-
async def _handle_streaming_process(self, context: "_StreamingContext") -> AsyncGenerator[
|
|
2101
|
+
async def _handle_streaming_process(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
|
|
2029
2102
|
"""Handle the main streaming process including initial status and event processing.
|
|
2030
2103
|
|
|
2031
2104
|
Args:
|
|
@@ -2042,7 +2115,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2042
2115
|
self._log_streaming_event_debug("process_stream_item", event)
|
|
2043
2116
|
yield event
|
|
2044
2117
|
|
|
2045
|
-
def _create_initial_status_event(self) ->
|
|
2118
|
+
def _create_initial_status_event(self) -> A2AEvent:
|
|
2046
2119
|
"""Create and setup the initial status event."""
|
|
2047
2120
|
initial_status_event = self._create_a2a_event(
|
|
2048
2121
|
event_type=A2AStreamEventType.STATUS_UPDATE, content=DefaultStepMessages.EN.value
|
|
@@ -2059,7 +2132,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2059
2132
|
|
|
2060
2133
|
return initial_status_event
|
|
2061
2134
|
|
|
2062
|
-
async def _process_streaming_items(self, context: "_StreamingContext") -> AsyncGenerator[
|
|
2135
|
+
async def _process_streaming_items(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
|
|
2063
2136
|
"""Process individual streaming items from the LangGraph execution.
|
|
2064
2137
|
|
|
2065
2138
|
Handles the core streaming logic by iterating through items produced by
|
|
@@ -2092,9 +2165,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2092
2165
|
async for event in self._create_graph_stream_events(enhanced_input, context):
|
|
2093
2166
|
yield event
|
|
2094
2167
|
|
|
2095
|
-
async def _process_a2a_streaming_with_tokens(
|
|
2096
|
-
self, context: "_StreamingContext"
|
|
2097
|
-
) -> AsyncGenerator[dict[str, Any], None]:
|
|
2168
|
+
async def _process_a2a_streaming_with_tokens(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
|
|
2098
2169
|
"""Process A2A streaming with token streaming support using aiostream.
|
|
2099
2170
|
|
|
2100
2171
|
Supports both LM Invoker and LangChain models by detecting the appropriate
|
|
@@ -2127,6 +2198,9 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2127
2198
|
token_stream, enhanced_input = self._create_token_stream(context)
|
|
2128
2199
|
graph_stream = self._create_graph_stream_events(enhanced_input, context)
|
|
2129
2200
|
|
|
2201
|
+
if token_stream is None:
|
|
2202
|
+
raise RuntimeError(f"Agent '{self.name}': Token stream not available for LM invoker.")
|
|
2203
|
+
|
|
2130
2204
|
merged = astream.merge(token_stream, graph_stream)
|
|
2131
2205
|
async with merged.stream() as merged_stream:
|
|
2132
2206
|
async for event in merged_stream:
|
|
@@ -2142,7 +2216,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2142
2216
|
logger.error(f"Agent '{self.name}': Error during A2A token streaming: {e}")
|
|
2143
2217
|
raise
|
|
2144
2218
|
|
|
2145
|
-
async def _create_lm_invoker_token_stream(self) -> AsyncGenerator[
|
|
2219
|
+
async def _create_lm_invoker_token_stream(self) -> AsyncGenerator[A2AEvent, None]:
|
|
2146
2220
|
"""Generate A2A events from LM Invoker token stream.
|
|
2147
2221
|
|
|
2148
2222
|
Uses StreamEventHandler to capture tokens emitted by LM Invoker.
|
|
@@ -2154,6 +2228,8 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2154
2228
|
RuntimeError: If no StreamEventHandler is found in event_emitter.
|
|
2155
2229
|
"""
|
|
2156
2230
|
stream_handler = self._get_stream_handler()
|
|
2231
|
+
if stream_handler is None:
|
|
2232
|
+
raise RuntimeError(f"Agent '{self.name}': StreamEventHandler is required for token streaming.")
|
|
2157
2233
|
|
|
2158
2234
|
try:
|
|
2159
2235
|
async for event in stream_handler.stream():
|
|
@@ -2169,7 +2245,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2169
2245
|
def _create_token_stream(
|
|
2170
2246
|
self,
|
|
2171
2247
|
context: "_StreamingContext",
|
|
2172
|
-
) -> tuple[AsyncGenerator[
|
|
2248
|
+
) -> tuple[AsyncGenerator[A2AEvent, None] | None, dict[str, Any]]:
|
|
2173
2249
|
"""Create appropriate token stream and enhanced input for the active model backend.
|
|
2174
2250
|
|
|
2175
2251
|
Args:
|
|
@@ -2191,7 +2267,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2191
2267
|
|
|
2192
2268
|
async def _create_graph_stream_events(
|
|
2193
2269
|
self, enhanced_input: dict[str, Any], context: "_StreamingContext"
|
|
2194
|
-
) -> AsyncGenerator[
|
|
2270
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
2195
2271
|
"""Generate A2A events from graph execution.
|
|
2196
2272
|
|
|
2197
2273
|
Args:
|
|
@@ -2210,8 +2286,9 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2210
2286
|
async for item in graph_execution:
|
|
2211
2287
|
stream_mode, stream_data = item
|
|
2212
2288
|
|
|
2213
|
-
if stream_mode == StreamMode.MESSAGES:
|
|
2214
|
-
|
|
2289
|
+
if stream_mode == StreamMode.MESSAGES.value:
|
|
2290
|
+
message_data = cast(tuple[Any, dict[str, Any]], stream_data)
|
|
2291
|
+
async for token_event in self._process_message_stream_item(message_data):
|
|
2215
2292
|
yield token_event
|
|
2216
2293
|
continue
|
|
2217
2294
|
|
|
@@ -2230,10 +2307,10 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2230
2307
|
Returns:
|
|
2231
2308
|
List of stream modes to use for graph execution.
|
|
2232
2309
|
"""
|
|
2233
|
-
stream_modes = [StreamMode.VALUES, StreamMode.CUSTOM]
|
|
2310
|
+
stream_modes = [StreamMode.VALUES.value, StreamMode.CUSTOM.value]
|
|
2234
2311
|
|
|
2235
2312
|
if context.enable_token_streaming and not self._has_lm_invoker():
|
|
2236
|
-
stream_modes.append(StreamMode.MESSAGES)
|
|
2313
|
+
stream_modes.append(StreamMode.MESSAGES.value)
|
|
2237
2314
|
|
|
2238
2315
|
return stream_modes
|
|
2239
2316
|
|
|
@@ -2243,7 +2320,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2243
2320
|
stream_mode: str,
|
|
2244
2321
|
stream_data: Any,
|
|
2245
2322
|
context: "_StreamingContext",
|
|
2246
|
-
) -> AsyncGenerator[
|
|
2323
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
2247
2324
|
"""Process a single graph stream item and yield A2A events.
|
|
2248
2325
|
|
|
2249
2326
|
Args:
|
|
@@ -2255,10 +2332,12 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2255
2332
|
Yields:
|
|
2256
2333
|
A2A events generated from the stream item.
|
|
2257
2334
|
"""
|
|
2258
|
-
context.final_state = copy.copy(stream_data) if stream_mode == StreamMode.VALUES else context.final_state
|
|
2335
|
+
context.final_state = copy.copy(stream_data) if stream_mode == StreamMode.VALUES.value else context.final_state
|
|
2259
2336
|
|
|
2337
|
+
pending_artifacts = context.pending_artifacts if context.pending_artifacts is not None else []
|
|
2338
|
+
seen_artifact_hashes = context.seen_artifact_hashes if context.seen_artifact_hashes is not None else set()
|
|
2260
2339
|
events, is_final, context.processed_message_count = self._handle_stream_item(
|
|
2261
|
-
item,
|
|
2340
|
+
item, pending_artifacts, seen_artifact_hashes, context.processed_message_count
|
|
2262
2341
|
)
|
|
2263
2342
|
|
|
2264
2343
|
if is_final:
|
|
@@ -2271,7 +2350,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2271
2350
|
|
|
2272
2351
|
async def _process_message_stream_item(
|
|
2273
2352
|
self, message_data: tuple[Any, dict[str, Any]]
|
|
2274
|
-
) -> AsyncGenerator[
|
|
2353
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
2275
2354
|
"""Process message stream items to extract token events.
|
|
2276
2355
|
|
|
2277
2356
|
The "messages" stream mode yields tuples of (AIMessageChunk, metadata).
|
|
@@ -2308,9 +2387,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2308
2387
|
except Exception as e:
|
|
2309
2388
|
logger.error(f"Agent '{self.name}': Error processing message stream item: {e}")
|
|
2310
2389
|
|
|
2311
|
-
def _update_final_response_for_streaming(
|
|
2312
|
-
self, context: "_StreamingContext", event: dict[str, Any]
|
|
2313
|
-
) -> dict[str, Any]:
|
|
2390
|
+
def _update_final_response_for_streaming(self, context: "_StreamingContext", event: A2AEvent) -> A2AEvent:
|
|
2314
2391
|
"""Update final response events with appropriate streaming configuration.
|
|
2315
2392
|
|
|
2316
2393
|
For FINAL_RESPONSE events, this method updates the metadata and optionally clears
|
|
@@ -2324,13 +2401,17 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2324
2401
|
The processed event dictionary with updated metadata and content
|
|
2325
2402
|
"""
|
|
2326
2403
|
if event.get("event_type") == A2AStreamEventType.FINAL_RESPONSE:
|
|
2327
|
-
event
|
|
2404
|
+
metadata = event.get("metadata")
|
|
2405
|
+
if not isinstance(metadata, dict):
|
|
2406
|
+
metadata = {}
|
|
2407
|
+
event["metadata"] = metadata
|
|
2408
|
+
metadata[MetadataFieldKeys.TOKEN_STREAMING] = False
|
|
2328
2409
|
if context.enable_token_streaming:
|
|
2329
2410
|
event["content"] = ""
|
|
2330
|
-
|
|
2411
|
+
metadata[MetadataFieldKeys.TOKEN_STREAMING] = True
|
|
2331
2412
|
return event
|
|
2332
2413
|
|
|
2333
|
-
def _convert_raw_token_to_a2a_event(self, raw_event: str) ->
|
|
2414
|
+
def _convert_raw_token_to_a2a_event(self, raw_event: str) -> A2AEvent | None:
|
|
2334
2415
|
"""Parse raw token event into A2A event.
|
|
2335
2416
|
|
|
2336
2417
|
Args:
|
|
@@ -2353,7 +2434,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2353
2434
|
logger.debug(f"Agent '{self.name}': Error parsing token event: {e}")
|
|
2354
2435
|
return None
|
|
2355
2436
|
|
|
2356
|
-
def _capture_final_content_if_needed(self, context: "_StreamingContext", event:
|
|
2437
|
+
def _capture_final_content_if_needed(self, context: "_StreamingContext", event: A2AEvent) -> None:
|
|
2357
2438
|
"""Capture final content from A2A events for memory persistence.
|
|
2358
2439
|
|
|
2359
2440
|
Monitors A2A events for final response content and triggers early memory
|
|
@@ -2374,7 +2455,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2374
2455
|
and isinstance(context.last_final_content, str)
|
|
2375
2456
|
and context.last_final_content
|
|
2376
2457
|
)
|
|
2377
|
-
if should_save_early:
|
|
2458
|
+
if should_save_early and self._should_save_interaction(context.final_state):
|
|
2378
2459
|
try:
|
|
2379
2460
|
logger.info(
|
|
2380
2461
|
"Agent '%s': A2A persisting memory early (len=%d) for user_id='%s'",
|
|
@@ -2393,6 +2474,8 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2393
2474
|
context.saved_memory = True
|
|
2394
2475
|
except Exception:
|
|
2395
2476
|
pass
|
|
2477
|
+
elif should_save_early:
|
|
2478
|
+
context.saved_memory = True
|
|
2396
2479
|
except Exception:
|
|
2397
2480
|
pass
|
|
2398
2481
|
|
|
@@ -2424,14 +2507,19 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2424
2507
|
)
|
|
2425
2508
|
except Exception:
|
|
2426
2509
|
pass
|
|
2427
|
-
self.
|
|
2428
|
-
|
|
2429
|
-
|
|
2430
|
-
|
|
2510
|
+
if self._should_save_interaction(context.final_state):
|
|
2511
|
+
self._memory_save_interaction(
|
|
2512
|
+
user_text=context.original_query,
|
|
2513
|
+
ai_text=final_text,
|
|
2514
|
+
memory_user_id=context.memory_user_id,
|
|
2515
|
+
)
|
|
2516
|
+
context.saved_memory = True
|
|
2517
|
+
else:
|
|
2518
|
+
context.saved_memory = True
|
|
2431
2519
|
except Exception:
|
|
2432
2520
|
pass
|
|
2433
2521
|
|
|
2434
|
-
async def _ensure_final_completion(self, context: "_StreamingContext") -> AsyncGenerator[
|
|
2522
|
+
async def _ensure_final_completion(self, context: "_StreamingContext") -> AsyncGenerator[A2AEvent, None]:
|
|
2435
2523
|
"""Ensure final completion events are yielded if not already done.
|
|
2436
2524
|
|
|
2437
2525
|
Args:
|
|
@@ -2442,7 +2530,9 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2442
2530
|
dict[str, Any]: The final completion event.
|
|
2443
2531
|
"""
|
|
2444
2532
|
if not context.final_event_yielded:
|
|
2445
|
-
|
|
2533
|
+
pending_artifacts = context.pending_artifacts if context.pending_artifacts is not None else []
|
|
2534
|
+
final_state = context.final_state or {}
|
|
2535
|
+
completion_event = self._create_completion_event(pending_artifacts, final_state)
|
|
2446
2536
|
self._log_streaming_event_debug("final_completion", completion_event)
|
|
2447
2537
|
yield completion_event
|
|
2448
2538
|
|
|
@@ -2450,7 +2540,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2450
2540
|
self,
|
|
2451
2541
|
context: "_StreamingContext",
|
|
2452
2542
|
error: Exception,
|
|
2453
|
-
) -> AsyncGenerator[
|
|
2543
|
+
) -> AsyncGenerator[A2AEvent, None]:
|
|
2454
2544
|
"""Handle streaming errors gracefully.
|
|
2455
2545
|
|
|
2456
2546
|
Provides error handling for the A2A streaming process, ensuring errors
|
|
@@ -2467,11 +2557,14 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2467
2557
|
error message, optionally including any pending artifacts.
|
|
2468
2558
|
"""
|
|
2469
2559
|
logger.error(f"Error in agent stream: {error}", exc_info=True)
|
|
2470
|
-
error_event =
|
|
2471
|
-
|
|
2472
|
-
|
|
2473
|
-
|
|
2474
|
-
|
|
2560
|
+
error_event = self._create_a2a_event(
|
|
2561
|
+
event_type=A2AStreamEventType.ERROR,
|
|
2562
|
+
content=f"Error: {str(error)}",
|
|
2563
|
+
metadata={"status": "failed"},
|
|
2564
|
+
artifacts=context.pending_artifacts,
|
|
2565
|
+
is_final=True,
|
|
2566
|
+
)
|
|
2567
|
+
error_event["status"] = "failed"
|
|
2475
2568
|
self._log_streaming_event_debug("error_event", error_event)
|
|
2476
2569
|
yield error_event
|
|
2477
2570
|
|
|
@@ -2527,7 +2620,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2527
2620
|
|
|
2528
2621
|
return metadata
|
|
2529
2622
|
|
|
2530
|
-
def _create_completion_event(self, pending_artifacts: list, final_state: dict[str, Any]):
|
|
2623
|
+
def _create_completion_event(self, pending_artifacts: list, final_state: dict[str, Any]) -> A2AEvent:
|
|
2531
2624
|
"""Helper to create the completion event with artifacts and references if available.
|
|
2532
2625
|
|
|
2533
2626
|
This method is used to create the completion event with artifacts and references if available.
|
|
@@ -2581,7 +2674,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2581
2674
|
else:
|
|
2582
2675
|
return tool_name[:4]
|
|
2583
2676
|
|
|
2584
|
-
def _generate_tool_call_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
|
|
2677
|
+
def _generate_tool_call_step_id(self, tool_info: dict[str, Any] | None, counter: int) -> str:
|
|
2585
2678
|
"""Generate step_id for tool call events.
|
|
2586
2679
|
|
|
2587
2680
|
Args:
|
|
@@ -2617,7 +2710,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2617
2710
|
|
|
2618
2711
|
return f"{category}_{combined_name}_parent_{counter:03d}"
|
|
2619
2712
|
|
|
2620
|
-
def _generate_tool_result_step_id(self, tool_info: dict[str, Any], counter: int) -> str:
|
|
2713
|
+
def _generate_tool_result_step_id(self, tool_info: dict[str, Any] | None, counter: int) -> str:
|
|
2621
2714
|
"""Generate step_id for tool result events.
|
|
2622
2715
|
|
|
2623
2716
|
Args:
|
|
@@ -2743,7 +2836,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2743
2836
|
def _create_a2a_event( # noqa: PLR0913
|
|
2744
2837
|
self,
|
|
2745
2838
|
event_type: A2AStreamEventType,
|
|
2746
|
-
content:
|
|
2839
|
+
content: Any,
|
|
2747
2840
|
metadata: dict[str, Any] | None = None,
|
|
2748
2841
|
tool_info: dict[str, Any] | None = None,
|
|
2749
2842
|
thinking_and_activity_info: dict[str, Any] | None = None,
|
|
@@ -2781,9 +2874,11 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2781
2874
|
# Inject cumulative time since the first STATUS_UPDATE for this thread
|
|
2782
2875
|
# Do not set cumulative time here; server executor enforces it for all SSE events
|
|
2783
2876
|
|
|
2877
|
+
normalized_content = self._normalize_event_content(content)
|
|
2878
|
+
|
|
2784
2879
|
event = {
|
|
2785
2880
|
"event_type": event_type,
|
|
2786
|
-
"content":
|
|
2881
|
+
"content": normalized_content,
|
|
2787
2882
|
"metadata": enriched_metadata,
|
|
2788
2883
|
"tool_info": tool_info,
|
|
2789
2884
|
"is_final": is_final,
|
|
@@ -2797,7 +2892,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2797
2892
|
event["thinking_and_activity_info"] = thinking_and_activity_info
|
|
2798
2893
|
|
|
2799
2894
|
try:
|
|
2800
|
-
content_preview =
|
|
2895
|
+
content_preview = normalized_content
|
|
2801
2896
|
logger.info(
|
|
2802
2897
|
"A2A emitting event: type=%s step_id=%s final=%s preview=%s",
|
|
2803
2898
|
getattr(event_type, "value", event_type),
|