aip-agents-binary 0.5.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/__init__.py +65 -0
- aip_agents/a2a/__init__.py +19 -0
- aip_agents/a2a/server/__init__.py +10 -0
- aip_agents/a2a/server/base_executor.py +1086 -0
- aip_agents/a2a/server/google_adk_executor.py +198 -0
- aip_agents/a2a/server/langflow_executor.py +180 -0
- aip_agents/a2a/server/langgraph_executor.py +270 -0
- aip_agents/a2a/types.py +232 -0
- aip_agents/agent/__init__.py +27 -0
- aip_agents/agent/base_agent.py +970 -0
- aip_agents/agent/base_langgraph_agent.py +2942 -0
- aip_agents/agent/google_adk_agent.py +926 -0
- aip_agents/agent/google_adk_constants.py +6 -0
- aip_agents/agent/hitl/__init__.py +24 -0
- aip_agents/agent/hitl/config.py +28 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
- aip_agents/agent/hitl/manager.py +532 -0
- aip_agents/agent/hitl/models.py +18 -0
- aip_agents/agent/hitl/prompt/__init__.py +9 -0
- aip_agents/agent/hitl/prompt/base.py +42 -0
- aip_agents/agent/hitl/prompt/deferred.py +73 -0
- aip_agents/agent/hitl/registry.py +149 -0
- aip_agents/agent/interface.py +138 -0
- aip_agents/agent/interfaces.py +65 -0
- aip_agents/agent/langflow_agent.py +464 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.py +433 -0
- aip_agents/agent/langgraph_react_agent.py +2514 -0
- aip_agents/agent/system_instruction_context.py +34 -0
- aip_agents/clients/__init__.py +10 -0
- aip_agents/clients/langflow/__init__.py +10 -0
- aip_agents/clients/langflow/client.py +477 -0
- aip_agents/clients/langflow/types.py +18 -0
- aip_agents/constants.py +23 -0
- aip_agents/credentials/manager.py +132 -0
- aip_agents/examples/__init__.py +5 -0
- aip_agents/examples/compare_streaming_client.py +783 -0
- aip_agents/examples/compare_streaming_server.py +142 -0
- aip_agents/examples/demo_memory_recall.py +401 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
- aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
- aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
- aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
- aip_agents/examples/hello_world_google_adk.py +41 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_stream.py +44 -0
- aip_agents/examples/hello_world_langchain.py +28 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
- aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
- aip_agents/examples/hello_world_langchain_stream.py +36 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
- aip_agents/examples/hello_world_langflow_agent.py +163 -0
- aip_agents/examples/hello_world_langgraph.py +39 -0
- aip_agents/examples/hello_world_langgraph_bosa_twitter.py +41 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_stream.py +43 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_model_switch_cli.py +210 -0
- aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
- aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
- aip_agents/examples/hello_world_pii_logger.py +21 -0
- aip_agents/examples/hello_world_sentry.py +133 -0
- aip_agents/examples/hello_world_step_limits.py +273 -0
- aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
- aip_agents/examples/hello_world_tool_output_client.py +46 -0
- aip_agents/examples/hello_world_tool_output_server.py +114 -0
- aip_agents/examples/hitl_demo.py +724 -0
- aip_agents/examples/mcp_configs/configs.py +63 -0
- aip_agents/examples/mcp_servers/common.py +76 -0
- aip_agents/examples/mcp_servers/mcp_name.py +29 -0
- aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
- aip_agents/examples/mcp_servers/mcp_time.py +10 -0
- aip_agents/examples/pii_demo_langgraph_client.py +69 -0
- aip_agents/examples/pii_demo_langgraph_server.py +126 -0
- aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
- aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
- aip_agents/examples/tools/__init__.py +27 -0
- aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
- aip_agents/examples/tools/adk_weather_tool.py +60 -0
- aip_agents/examples/tools/data_generator_tool.py +103 -0
- aip_agents/examples/tools/data_visualization_tool.py +312 -0
- aip_agents/examples/tools/image_artifact_tool.py +136 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
- aip_agents/examples/tools/langchain_weather_tool.py +48 -0
- aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
- aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
- aip_agents/examples/tools/pii_demo_tools.py +189 -0
- aip_agents/examples/tools/random_chart_tool.py +142 -0
- aip_agents/examples/tools/serper_tool.py +202 -0
- aip_agents/examples/tools/stock_tools.py +82 -0
- aip_agents/examples/tools/table_generator_tool.py +167 -0
- aip_agents/examples/tools/time_tool.py +82 -0
- aip_agents/examples/tools/weather_forecast_tool.py +38 -0
- aip_agents/executor/agent_executor.py +473 -0
- aip_agents/executor/base.py +48 -0
- aip_agents/mcp/__init__.py +1 -0
- aip_agents/mcp/client/__init__.py +14 -0
- aip_agents/mcp/client/base_mcp_client.py +369 -0
- aip_agents/mcp/client/connection_manager.py +193 -0
- aip_agents/mcp/client/google_adk/__init__.py +11 -0
- aip_agents/mcp/client/google_adk/client.py +381 -0
- aip_agents/mcp/client/langchain/__init__.py +11 -0
- aip_agents/mcp/client/langchain/client.py +265 -0
- aip_agents/mcp/client/persistent_session.py +359 -0
- aip_agents/mcp/client/session_pool.py +351 -0
- aip_agents/mcp/client/transports.py +215 -0
- aip_agents/mcp/utils/__init__.py +7 -0
- aip_agents/mcp/utils/config_validator.py +139 -0
- aip_agents/memory/__init__.py +14 -0
- aip_agents/memory/adapters/__init__.py +10 -0
- aip_agents/memory/adapters/base_adapter.py +717 -0
- aip_agents/memory/adapters/mem0.py +84 -0
- aip_agents/memory/base.py +84 -0
- aip_agents/memory/constants.py +49 -0
- aip_agents/memory/factory.py +86 -0
- aip_agents/memory/guidance.py +20 -0
- aip_agents/memory/simple_memory.py +47 -0
- aip_agents/middleware/__init__.py +17 -0
- aip_agents/middleware/base.py +88 -0
- aip_agents/middleware/manager.py +128 -0
- aip_agents/middleware/todolist.py +274 -0
- aip_agents/schema/__init__.py +69 -0
- aip_agents/schema/a2a.py +56 -0
- aip_agents/schema/agent.py +111 -0
- aip_agents/schema/hitl.py +157 -0
- aip_agents/schema/langgraph.py +37 -0
- aip_agents/schema/model_id.py +97 -0
- aip_agents/schema/step_limit.py +108 -0
- aip_agents/schema/storage.py +40 -0
- aip_agents/sentry/__init__.py +11 -0
- aip_agents/sentry/sentry.py +151 -0
- aip_agents/storage/__init__.py +41 -0
- aip_agents/storage/base.py +85 -0
- aip_agents/storage/clients/__init__.py +12 -0
- aip_agents/storage/clients/minio_client.py +318 -0
- aip_agents/storage/config.py +62 -0
- aip_agents/storage/providers/__init__.py +15 -0
- aip_agents/storage/providers/base.py +106 -0
- aip_agents/storage/providers/memory.py +114 -0
- aip_agents/storage/providers/object_storage.py +214 -0
- aip_agents/tools/__init__.py +33 -0
- aip_agents/tools/bosa_tools.py +105 -0
- aip_agents/tools/browser_use/__init__.py +82 -0
- aip_agents/tools/browser_use/action_parser.py +103 -0
- aip_agents/tools/browser_use/browser_use_tool.py +1112 -0
- aip_agents/tools/browser_use/llm_config.py +120 -0
- aip_agents/tools/browser_use/minio_storage.py +198 -0
- aip_agents/tools/browser_use/schemas.py +119 -0
- aip_agents/tools/browser_use/session.py +76 -0
- aip_agents/tools/browser_use/session_errors.py +132 -0
- aip_agents/tools/browser_use/steel_session_recording.py +317 -0
- aip_agents/tools/browser_use/streaming.py +813 -0
- aip_agents/tools/browser_use/structured_data_parser.py +257 -0
- aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
- aip_agents/tools/browser_use/types.py +78 -0
- aip_agents/tools/code_sandbox/__init__.py +26 -0
- aip_agents/tools/code_sandbox/constant.py +13 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +257 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
- aip_agents/tools/constants.py +165 -0
- aip_agents/tools/document_loader/__init__.py +44 -0
- aip_agents/tools/document_loader/base_reader.py +302 -0
- aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
- aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
- aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
- aip_agents/tools/document_loader/pdf_splitter.py +169 -0
- aip_agents/tools/gl_connector/__init__.py +5 -0
- aip_agents/tools/gl_connector/tool.py +351 -0
- aip_agents/tools/memory_search/__init__.py +22 -0
- aip_agents/tools/memory_search/base.py +200 -0
- aip_agents/tools/memory_search/mem0.py +258 -0
- aip_agents/tools/memory_search/schema.py +48 -0
- aip_agents/tools/memory_search_tool.py +26 -0
- aip_agents/tools/time_tool.py +117 -0
- aip_agents/tools/tool_config_injector.py +300 -0
- aip_agents/tools/web_search/__init__.py +15 -0
- aip_agents/tools/web_search/serper_tool.py +187 -0
- aip_agents/types/__init__.py +70 -0
- aip_agents/types/a2a_events.py +13 -0
- aip_agents/utils/__init__.py +79 -0
- aip_agents/utils/a2a_connector.py +1757 -0
- aip_agents/utils/artifact_helpers.py +502 -0
- aip_agents/utils/constants.py +22 -0
- aip_agents/utils/datetime/__init__.py +34 -0
- aip_agents/utils/datetime/normalization.py +231 -0
- aip_agents/utils/datetime/timezone.py +206 -0
- aip_agents/utils/env_loader.py +27 -0
- aip_agents/utils/event_handler_registry.py +58 -0
- aip_agents/utils/file_prompt_utils.py +176 -0
- aip_agents/utils/final_response_builder.py +211 -0
- aip_agents/utils/formatter_llm_client.py +231 -0
- aip_agents/utils/langgraph/__init__.py +19 -0
- aip_agents/utils/langgraph/converter.py +128 -0
- aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1071 -0
- aip_agents/utils/langgraph/tool_output_management.py +967 -0
- aip_agents/utils/logger.py +195 -0
- aip_agents/utils/metadata/__init__.py +27 -0
- aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
- aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
- aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
- aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
- aip_agents/utils/metadata/activity_narrative/context.py +49 -0
- aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
- aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
- aip_agents/utils/metadata/schemas/__init__.py +16 -0
- aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
- aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
- aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
- aip_agents/utils/metadata_helper.py +358 -0
- aip_agents/utils/name_preprocessor/__init__.py +17 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
- aip_agents/utils/pii/__init__.py +25 -0
- aip_agents/utils/pii/pii_handler.py +397 -0
- aip_agents/utils/pii/pii_helper.py +207 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
- aip_agents/utils/reference_helper.py +273 -0
- aip_agents/utils/sse_chunk_transformer.py +831 -0
- aip_agents/utils/step_limit_manager.py +265 -0
- aip_agents/utils/token_usage_helper.py +156 -0
- aip_agents_binary-0.5.20.dist-info/METADATA +681 -0
- aip_agents_binary-0.5.20.dist-info/RECORD +280 -0
- aip_agents_binary-0.5.20.dist-info/WHEEL +5 -0
- aip_agents_binary-0.5.20.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"""Utilities for assembling final response events with consistent structure.
|
|
2
|
+
|
|
3
|
+
This module wraps the lower-level ``_build_final_response_event`` helper and
|
|
4
|
+
provides a single entry point for producing final response payloads that may
|
|
5
|
+
include accumulated artifacts, custom metadata overrides, and additional
|
|
6
|
+
top-level fields.
|
|
7
|
+
|
|
8
|
+
Authors:
|
|
9
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
from dataclasses import dataclass
|
|
15
|
+
from datetime import UTC, datetime
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
from aip_agents.utils.metadata_helper import Kind, MetadataFieldKeys, Status
|
|
19
|
+
|
|
20
|
+
_FINAL_METADATA_RESERVED_KEYS = {
|
|
21
|
+
MetadataFieldKeys.KIND.value,
|
|
22
|
+
MetadataFieldKeys.STATUS.value,
|
|
23
|
+
"timestamp",
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass(slots=True)
|
|
28
|
+
class FinalResponseMetadataOptions:
|
|
29
|
+
"""Container for optional metadata fields on final response events."""
|
|
30
|
+
|
|
31
|
+
step_id: str | None = None
|
|
32
|
+
previous_step_ids: list[str] | None = None
|
|
33
|
+
tool_info: dict[str, Any] | None = None
|
|
34
|
+
thinking_and_activity_info: dict[str, Any] | None = None
|
|
35
|
+
completion_reason: str | None = None
|
|
36
|
+
timeout_seconds: float | None = None
|
|
37
|
+
message: dict[str, Any] | None = None
|
|
38
|
+
partial_result: str | None = None
|
|
39
|
+
metadata_extra: dict[str, Any] | None = None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _metadata_overrides_from_options(options: FinalResponseMetadataOptions) -> dict[str, Any]:
|
|
43
|
+
"""Build a dictionary of metadata overrides from the provided options.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
options: The metadata options to extract overrides from.
|
|
47
|
+
"""
|
|
48
|
+
potential_overrides = {
|
|
49
|
+
"step_id": options.step_id,
|
|
50
|
+
"previous_step_ids": options.previous_step_ids,
|
|
51
|
+
MetadataFieldKeys.TOOL_INFO: options.tool_info,
|
|
52
|
+
MetadataFieldKeys.THINKING_AND_ACTIVITY_INFO: options.thinking_and_activity_info,
|
|
53
|
+
"completion_reason": options.completion_reason,
|
|
54
|
+
"timeout_seconds": options.timeout_seconds,
|
|
55
|
+
MetadataFieldKeys.MESSAGE: options.message,
|
|
56
|
+
"partial_result": options.partial_result,
|
|
57
|
+
}
|
|
58
|
+
return {key: value for key, value in potential_overrides.items() if value is not None}
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _normalized_metadata_extras(metadata_extra: dict[str, Any] | None) -> dict[str, Any]:
|
|
62
|
+
"""Return metadata extras filtered for reserved keys and normalized to strings.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
metadata_extra: Additional metadata to normalize, or None.
|
|
66
|
+
"""
|
|
67
|
+
if not metadata_extra:
|
|
68
|
+
return {}
|
|
69
|
+
normalized: dict[str, Any] = {}
|
|
70
|
+
for key, value in metadata_extra.items():
|
|
71
|
+
normalized_key = str(key)
|
|
72
|
+
if normalized_key in _FINAL_METADATA_RESERVED_KEYS:
|
|
73
|
+
continue
|
|
74
|
+
normalized[normalized_key] = value
|
|
75
|
+
return normalized
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _update_optional_top_level_fields(
|
|
79
|
+
event: dict[str, Any],
|
|
80
|
+
status: str,
|
|
81
|
+
task_state: str,
|
|
82
|
+
artifacts: list[dict[str, Any]] | None,
|
|
83
|
+
) -> None:
|
|
84
|
+
"""Apply optional top-level fields when they are provided.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
event: The event dictionary to update.
|
|
88
|
+
status: The status value to set.
|
|
89
|
+
task_state: The task state value to set.
|
|
90
|
+
artifacts: List of artifacts to include, or None.
|
|
91
|
+
"""
|
|
92
|
+
for key, value in (("status", status), ("task_state", task_state)):
|
|
93
|
+
if value:
|
|
94
|
+
event[key] = value
|
|
95
|
+
if artifacts:
|
|
96
|
+
event["artifacts"] = artifacts
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _ensure_timestamp_alignment(
|
|
100
|
+
event: dict[str, Any],
|
|
101
|
+
metadata: dict[str, Any],
|
|
102
|
+
timestamp: datetime | None,
|
|
103
|
+
) -> None:
|
|
104
|
+
"""Keep event-level and metadata timestamps synchronized.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
event: The event dictionary containing timestamp information.
|
|
108
|
+
metadata: The metadata dictionary to update.
|
|
109
|
+
timestamp: The timestamp to align, or None.
|
|
110
|
+
"""
|
|
111
|
+
event_timestamp = event.get("timestamp")
|
|
112
|
+
if event_timestamp is not None:
|
|
113
|
+
metadata["timestamp"] = event_timestamp
|
|
114
|
+
return
|
|
115
|
+
|
|
116
|
+
metadata_timestamp = metadata.get("timestamp")
|
|
117
|
+
if metadata_timestamp is not None:
|
|
118
|
+
event["timestamp"] = metadata_timestamp
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
resolved_timestamp = (timestamp or datetime.now(UTC)).isoformat()
|
|
122
|
+
metadata["timestamp"] = resolved_timestamp
|
|
123
|
+
event["timestamp"] = resolved_timestamp
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _build_final_response_event(
|
|
127
|
+
*,
|
|
128
|
+
content: str,
|
|
129
|
+
metadata_options: FinalResponseMetadataOptions | None = None,
|
|
130
|
+
event_extra: dict[str, Any] | None = None,
|
|
131
|
+
timestamp: datetime | None = None,
|
|
132
|
+
) -> dict[str, Any]:
|
|
133
|
+
"""Build a standardized final_response event payload.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
content: The human-readable message to include in the event.
|
|
137
|
+
metadata_options: Optional container for per-event metadata overrides such as
|
|
138
|
+
identifiers, tool info, localized messages, and completion details.
|
|
139
|
+
event_extra: Additional top-level event fields to merge (e.g., task_id).
|
|
140
|
+
timestamp: Explicit timestamp for the event; defaults to current UTC time.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
dict[str, Any]: Final response payload ready for SSE serialization.
|
|
144
|
+
"""
|
|
145
|
+
timestamp_value = (timestamp or datetime.now(UTC)).isoformat()
|
|
146
|
+
metadata: dict[str, Any] = {
|
|
147
|
+
MetadataFieldKeys.KIND: Kind.FINAL_RESPONSE,
|
|
148
|
+
MetadataFieldKeys.STATUS: Status.FINISHED,
|
|
149
|
+
MetadataFieldKeys.TIME: 0.0,
|
|
150
|
+
"timestamp": timestamp_value,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
if metadata_options is not None:
|
|
154
|
+
metadata.update(_metadata_overrides_from_options(metadata_options))
|
|
155
|
+
metadata.update(_normalized_metadata_extras(metadata_options.metadata_extra))
|
|
156
|
+
|
|
157
|
+
event: dict[str, Any] = {
|
|
158
|
+
"status": "success",
|
|
159
|
+
"task_state": "completed",
|
|
160
|
+
"content": content,
|
|
161
|
+
"event_type": "final_response",
|
|
162
|
+
"final": True,
|
|
163
|
+
"metadata": metadata,
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
if event_extra:
|
|
167
|
+
event.update(event_extra)
|
|
168
|
+
|
|
169
|
+
return event
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def assemble_final_response(
|
|
173
|
+
*,
|
|
174
|
+
content: str,
|
|
175
|
+
artifacts: list[dict[str, Any]] | None = None,
|
|
176
|
+
metadata_options: FinalResponseMetadataOptions | None = None,
|
|
177
|
+
status: str = "success",
|
|
178
|
+
task_state: str = "completed",
|
|
179
|
+
extra_fields: dict[str, Any] | None = None,
|
|
180
|
+
timestamp: datetime | None = None,
|
|
181
|
+
) -> dict[str, Any]:
|
|
182
|
+
"""Create a final response event with optional artifacts and overrides.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
content: Human readable message for the final response.
|
|
186
|
+
artifacts: Optional list of artifact dictionaries to attach.
|
|
187
|
+
metadata_options: Metadata overrides passed through to the underlying builder.
|
|
188
|
+
status: Top-level status string; defaults to ``"success"``.
|
|
189
|
+
task_state: State string describing the task; defaults to ``"completed"``.
|
|
190
|
+
extra_fields: Additional top-level fields to merge onto the event.
|
|
191
|
+
timestamp: Explicit timestamp for the event. Defaults to ``datetime.now(UTC)``.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
dict[str, Any]: Final response event payload ready for downstream streaming.
|
|
195
|
+
"""
|
|
196
|
+
event = _build_final_response_event(
|
|
197
|
+
content=content,
|
|
198
|
+
metadata_options=metadata_options,
|
|
199
|
+
event_extra=extra_fields,
|
|
200
|
+
timestamp=timestamp,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
_update_optional_top_level_fields(event, status, task_state, artifacts)
|
|
204
|
+
|
|
205
|
+
metadata = event.setdefault("metadata", {})
|
|
206
|
+
_ensure_timestamp_alignment(event, metadata, timestamp)
|
|
207
|
+
|
|
208
|
+
return event
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
__all__ = ["FinalResponseMetadataOptions", "assemble_final_response"]
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"""Utilities for working with the shared formatter LLM invoker.
|
|
2
|
+
|
|
3
|
+
Authors:
|
|
4
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import os
|
|
11
|
+
import threading
|
|
12
|
+
from collections.abc import Awaitable, Callable
|
|
13
|
+
from typing import Any, TypeVar
|
|
14
|
+
|
|
15
|
+
from gllm_inference.builder import build_lm_invoker
|
|
16
|
+
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker
|
|
17
|
+
|
|
18
|
+
from aip_agents.utils.logger import get_logger
|
|
19
|
+
|
|
20
|
+
logger = get_logger(__name__)
|
|
21
|
+
|
|
22
|
+
FORMATTER_ENV_VAR = "DEFAULT_MODEL_FORMATTER"
|
|
23
|
+
T = TypeVar("T")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class FormatterInvokerUnavailableError(RuntimeError):
|
|
27
|
+
"""Raised when no formatter LLM invoker can be resolved."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class FormatterInvocationError(RuntimeError):
|
|
31
|
+
"""Raised when invoking the formatter LLM fails."""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class FormatterLLMClient:
|
|
35
|
+
"""Stateful helper that manages formatter invoker resolution and execution."""
|
|
36
|
+
|
|
37
|
+
def __init__(self) -> None:
|
|
38
|
+
"""Initialize the formatter LLM client with caching and thread safety."""
|
|
39
|
+
self._failed_sentinel = object()
|
|
40
|
+
self._invoker_cache: dict[str, BaseLMInvoker | object] = {}
|
|
41
|
+
self._lock = threading.Lock()
|
|
42
|
+
|
|
43
|
+
# ------------------------------------------------------------------
|
|
44
|
+
# Public API
|
|
45
|
+
# ------------------------------------------------------------------
|
|
46
|
+
def seed_default(self, default_model_id: str | None) -> None:
|
|
47
|
+
"""Populate ``DEFAULT_MODEL_FORMATTER`` when unset.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
default_model_id: Preferred formatter model id to use as a fallback.
|
|
51
|
+
"""
|
|
52
|
+
current = self._normalize_model_id(os.getenv(FORMATTER_ENV_VAR))
|
|
53
|
+
if current:
|
|
54
|
+
return
|
|
55
|
+
|
|
56
|
+
fallback = self._normalize_model_id(default_model_id)
|
|
57
|
+
if fallback:
|
|
58
|
+
os.environ[FORMATTER_ENV_VAR] = fallback
|
|
59
|
+
|
|
60
|
+
def resolve_invoker(self, *, reset_cache: bool = False) -> BaseLMInvoker | None:
|
|
61
|
+
"""Return the cached formatter invoker, optionally refreshing it.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
reset_cache: When True, clear the cached invoker before resolving.
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
BaseLMInvoker | None: Cached invoker if the formatter is configured, otherwise None.
|
|
68
|
+
"""
|
|
69
|
+
resolved = self._normalize_model_id(os.getenv(FORMATTER_ENV_VAR))
|
|
70
|
+
if not resolved:
|
|
71
|
+
logger.warning("DEFAULT_MODEL_FORMATTER is not set; formatter summaries are disabled.")
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
with self._lock:
|
|
75
|
+
if reset_cache:
|
|
76
|
+
self._invoker_cache.pop(resolved, None)
|
|
77
|
+
|
|
78
|
+
cached = self._invoker_cache.get(resolved)
|
|
79
|
+
if cached is self._failed_sentinel:
|
|
80
|
+
return None
|
|
81
|
+
if cached is not None:
|
|
82
|
+
return cached # type: ignore[return-value]
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
invoker = build_lm_invoker(model_id=resolved)
|
|
86
|
+
except Exception as exc: # pragma: no cover - best effort
|
|
87
|
+
logger.warning("Failed to initialize formatter LLM invoker (%s): %s", resolved, exc)
|
|
88
|
+
self._invoker_cache[resolved] = self._failed_sentinel
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
self._invoker_cache[resolved] = invoker
|
|
92
|
+
return invoker
|
|
93
|
+
|
|
94
|
+
async def invoke(
|
|
95
|
+
self,
|
|
96
|
+
*args: Any,
|
|
97
|
+
invoker: BaseLMInvoker | None = None,
|
|
98
|
+
timeout: float | None = None,
|
|
99
|
+
**kwargs: Any,
|
|
100
|
+
) -> Any:
|
|
101
|
+
"""Dispatch formatter prompts asynchronously with timeout/error handling.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
*args: Positional arguments forwarded to the invoker.
|
|
105
|
+
invoker: Explicit invoker instance to reuse instead of resolving one.
|
|
106
|
+
timeout: Optional timeout (seconds) enforced with ``asyncio.timeout``.
|
|
107
|
+
**kwargs: Keyword arguments forwarded to the invoker.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Any: Result returned by the formatter LLM.
|
|
111
|
+
|
|
112
|
+
Raises:
|
|
113
|
+
FormatterInvokerUnavailableError: If no formatter model is configured.
|
|
114
|
+
FormatterInvocationError: When the invocation fails or exceeds the timeout.
|
|
115
|
+
"""
|
|
116
|
+
resolved = invoker or self.resolve_invoker()
|
|
117
|
+
if not resolved:
|
|
118
|
+
raise FormatterInvokerUnavailableError("Formatter LLM invoker is unavailable.")
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
invocation = resolved.invoke(*args, **kwargs)
|
|
122
|
+
if timeout is not None:
|
|
123
|
+
async with asyncio.timeout(timeout):
|
|
124
|
+
return await invocation
|
|
125
|
+
return await invocation
|
|
126
|
+
except asyncio.CancelledError:
|
|
127
|
+
raise
|
|
128
|
+
except TimeoutError:
|
|
129
|
+
raise FormatterInvocationError("Formatter LLM invocation timed out") from None
|
|
130
|
+
except Exception as exc: # pragma: no cover - best effort
|
|
131
|
+
raise FormatterInvocationError(f"Formatter LLM invocation failed: {exc}") from exc
|
|
132
|
+
|
|
133
|
+
def invoke_blocking(
|
|
134
|
+
self,
|
|
135
|
+
*args: Any,
|
|
136
|
+
invoker: BaseLMInvoker | None = None,
|
|
137
|
+
timeout: float | None = None,
|
|
138
|
+
**kwargs: Any,
|
|
139
|
+
) -> Any:
|
|
140
|
+
"""Invoke the formatter LLM from synchronous contexts.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
*args: Positional arguments forwarded to ``invoke``.
|
|
144
|
+
invoker: Optional invoker to reuse.
|
|
145
|
+
timeout: Optional timeout (seconds) for the async invocation.
|
|
146
|
+
**kwargs: Keyword arguments forwarded to ``invoke``.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Any: Result returned by the formatter LLM.
|
|
150
|
+
"""
|
|
151
|
+
|
|
152
|
+
async def _runner() -> Any:
|
|
153
|
+
return await self.invoke(*args, invoker=invoker, timeout=timeout, **kwargs)
|
|
154
|
+
|
|
155
|
+
return self._run_coroutine_blocking(_runner)
|
|
156
|
+
|
|
157
|
+
# ------------------------------------------------------------------
|
|
158
|
+
# Internal helpers
|
|
159
|
+
# ------------------------------------------------------------------
|
|
160
|
+
@staticmethod
|
|
161
|
+
def _normalize_model_id(value: str | None) -> str | None:
|
|
162
|
+
"""Strip whitespace and return the model id when a value was provided.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
value: Raw model identifier pulled from the environment.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
str | None: Sanitized model id or None when the input was blank.
|
|
169
|
+
"""
|
|
170
|
+
if isinstance(value, str):
|
|
171
|
+
stripped = value.strip()
|
|
172
|
+
if stripped:
|
|
173
|
+
return stripped
|
|
174
|
+
return None
|
|
175
|
+
|
|
176
|
+
def _run_coroutine_blocking(self, factory: Callable[[], Awaitable[T]]) -> T:
|
|
177
|
+
"""Execute an awaitable from sync code, even when a loop is already running.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
factory: Callable returning the coroutine to execute.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
T: Result of the awaited coroutine.
|
|
184
|
+
"""
|
|
185
|
+
|
|
186
|
+
async def _wrapper() -> T:
|
|
187
|
+
return await factory()
|
|
188
|
+
|
|
189
|
+
try:
|
|
190
|
+
asyncio.get_running_loop()
|
|
191
|
+
except RuntimeError:
|
|
192
|
+
return asyncio.run(_wrapper())
|
|
193
|
+
|
|
194
|
+
result: dict[str, T] = {}
|
|
195
|
+
error: list[BaseException] = []
|
|
196
|
+
done = threading.Event()
|
|
197
|
+
|
|
198
|
+
def _thread_runner() -> None:
|
|
199
|
+
try:
|
|
200
|
+
value = asyncio.run(_wrapper())
|
|
201
|
+
result["value"] = value
|
|
202
|
+
except Exception as exc:
|
|
203
|
+
error.append(exc)
|
|
204
|
+
finally:
|
|
205
|
+
done.set()
|
|
206
|
+
|
|
207
|
+
thread = threading.Thread(target=_thread_runner, daemon=True)
|
|
208
|
+
thread.start()
|
|
209
|
+
done.wait()
|
|
210
|
+
thread.join()
|
|
211
|
+
|
|
212
|
+
if error:
|
|
213
|
+
raise error[0]
|
|
214
|
+
return result["value"]
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
_formatter_llm_client = FormatterLLMClient()
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def get_formatter_llm_client() -> FormatterLLMClient:
|
|
221
|
+
"""Return the process-wide formatter LLM client."""
|
|
222
|
+
return _formatter_llm_client
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def seed_formatter_llm_default(default_model_id: str | None) -> None:
|
|
226
|
+
"""Convenience wrapper for seeding the formatter default model.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
default_model_id: Formatter model identifier to seed when missing.
|
|
230
|
+
"""
|
|
231
|
+
get_formatter_llm_client().seed_default(default_model_id)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""Define __init__ for langgraph converter.
|
|
2
|
+
|
|
3
|
+
Authors:
|
|
4
|
+
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from aip_agents.utils.langgraph.converter import (
|
|
8
|
+
convert_gllm_tool_call_to_langchain_tool_call,
|
|
9
|
+
convert_langchain_messages_to_gllm_messages,
|
|
10
|
+
convert_langchain_tool_call_to_gllm_tool_call,
|
|
11
|
+
convert_lm_output_to_langchain_message,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"convert_gllm_tool_call_to_langchain_tool_call",
|
|
16
|
+
"convert_langchain_tool_call_to_gllm_tool_call",
|
|
17
|
+
"convert_lm_output_to_langchain_message",
|
|
18
|
+
"convert_langchain_messages_to_gllm_messages",
|
|
19
|
+
]
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
"""Bridge utilities for converting between LangChain and LM Invoker formats.
|
|
2
|
+
|
|
3
|
+
This module provides conversion functions between LangChain's message format
|
|
4
|
+
and gllm-inference's Message format, enabling seamless integration
|
|
5
|
+
of LM Invoker with LangChain-based agents.
|
|
6
|
+
|
|
7
|
+
Authors:
|
|
8
|
+
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from collections.abc import Sequence
|
|
12
|
+
|
|
13
|
+
from gllm_inference.schema import LMOutput, Message, ToolResult
|
|
14
|
+
from gllm_inference.schema import ToolCall as GllmToolCall
|
|
15
|
+
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage
|
|
16
|
+
from langchain_core.messages.tool import ToolCall as LangChainToolCall
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def convert_langchain_messages_to_gllm_messages(messages: Sequence[BaseMessage], instruction: str) -> list[Message]:
|
|
20
|
+
"""Convert LangChain messages to gllm-inference Message format.
|
|
21
|
+
|
|
22
|
+
This function transforms a sequence of LangChain messages into the Message
|
|
23
|
+
format expected by LM Invoker. It handles system messages, human messages, AI messages
|
|
24
|
+
with tool calls, and tool result messages.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
messages: Sequence of LangChain BaseMessage objects to convert.
|
|
28
|
+
instruction: System instruction to prepend if not already present in messages.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
List of Message objects containing the converted message sequence.
|
|
32
|
+
"""
|
|
33
|
+
converted_messages = []
|
|
34
|
+
|
|
35
|
+
# Add system instruction first if provided
|
|
36
|
+
if instruction:
|
|
37
|
+
converted_messages.append(Message.system(instruction))
|
|
38
|
+
|
|
39
|
+
for msg in messages:
|
|
40
|
+
if isinstance(msg, SystemMessage):
|
|
41
|
+
# Skip if we already added instruction, or append if different
|
|
42
|
+
if not instruction or msg.content != instruction:
|
|
43
|
+
converted_messages.append(Message.system(msg.content))
|
|
44
|
+
elif isinstance(msg, HumanMessage):
|
|
45
|
+
converted_messages.append(Message.user(msg.content))
|
|
46
|
+
elif isinstance(msg, AIMessage):
|
|
47
|
+
if msg.tool_calls:
|
|
48
|
+
# Convert LangChain tool calls to gllm ToolCall objects
|
|
49
|
+
tool_calls = [convert_langchain_tool_call_to_gllm_tool_call(tc) for tc in msg.tool_calls]
|
|
50
|
+
converted_messages.append(Message.assistant(tool_calls))
|
|
51
|
+
else:
|
|
52
|
+
converted_messages.append(Message.assistant(msg.content))
|
|
53
|
+
elif isinstance(msg, ToolMessage):
|
|
54
|
+
# Convert ToolMessage to ToolResult
|
|
55
|
+
tool_result = ToolResult(id=msg.tool_call_id, output=msg.content)
|
|
56
|
+
converted_messages.append(Message.user([tool_result]))
|
|
57
|
+
|
|
58
|
+
return converted_messages
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def convert_lm_output_to_langchain_message(response: LMOutput | str) -> AIMessage:
|
|
62
|
+
"""Convert LM Invoker output to LangChain AIMessage.
|
|
63
|
+
|
|
64
|
+
This function transforms the output from LM Invoker back into LangChain's
|
|
65
|
+
AIMessage format, handling both text responses and tool calls.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
response: The response from LM Invoker (MultimodalOutput).
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
AIMessage containing the converted response.
|
|
72
|
+
"""
|
|
73
|
+
if not isinstance(response, LMOutput):
|
|
74
|
+
# when output_analytics lm_invoker is false
|
|
75
|
+
return AIMessage(
|
|
76
|
+
content=str(response),
|
|
77
|
+
response_metadata={
|
|
78
|
+
"finish_reason": "stop",
|
|
79
|
+
},
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
tool_calls = []
|
|
83
|
+
if response.tool_calls:
|
|
84
|
+
tool_calls = [convert_gllm_tool_call_to_langchain_tool_call(tc) for tc in response.tool_calls]
|
|
85
|
+
|
|
86
|
+
usage_metadata = None
|
|
87
|
+
if response.token_usage:
|
|
88
|
+
usage_metadata = {
|
|
89
|
+
"input_tokens": response.token_usage.input_tokens,
|
|
90
|
+
"output_tokens": response.token_usage.output_tokens,
|
|
91
|
+
"total_tokens": response.token_usage.input_tokens + response.token_usage.output_tokens,
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
# if add finish reason stop fo non-tool call ai messages
|
|
95
|
+
response_metadata = {}
|
|
96
|
+
if not tool_calls:
|
|
97
|
+
response_metadata["finish_reason"] = "stop"
|
|
98
|
+
|
|
99
|
+
return AIMessage(
|
|
100
|
+
content=str(response.response),
|
|
101
|
+
tool_calls=tool_calls,
|
|
102
|
+
usage_metadata=usage_metadata,
|
|
103
|
+
response_metadata=response_metadata,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def convert_langchain_tool_call_to_gllm_tool_call(lc_tool_call: LangChainToolCall) -> GllmToolCall:
|
|
108
|
+
"""Convert LangChain tool call to gllm ToolCall.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
lc_tool_call: LangChain ToolCall (TypedDict).
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
GllmToolCall object for gllm-inference.
|
|
115
|
+
"""
|
|
116
|
+
return GllmToolCall(id=lc_tool_call["id"], name=lc_tool_call["name"], args=lc_tool_call["args"])
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def convert_gllm_tool_call_to_langchain_tool_call(gllm_tool_call: GllmToolCall) -> LangChainToolCall:
|
|
120
|
+
"""Convert gllm ToolCall to LangChain ToolCall format.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
gllm_tool_call: GllmToolCall object from gllm-inference.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
LangChain ToolCall (TypedDict) with proper type annotation.
|
|
127
|
+
"""
|
|
128
|
+
return LangChainToolCall(id=gllm_tool_call.id, name=gllm_tool_call.name, args=gllm_tool_call.args)
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Tool managers for organizing LangGraph agent capabilities.
|
|
2
|
+
|
|
3
|
+
This package contains tool managers that convert different types of capabilities
|
|
4
|
+
(A2A communication, agent delegation) into unified LangChain tools for use in
|
|
5
|
+
LangGraph agents.
|
|
6
|
+
|
|
7
|
+
Authors:
|
|
8
|
+
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from aip_agents.utils.langgraph.tool_managers.a2a_tool_manager import A2AToolManager
|
|
12
|
+
from aip_agents.utils.langgraph.tool_managers.base_tool_manager import BaseLangGraphToolManager
|
|
13
|
+
from aip_agents.utils.langgraph.tool_managers.delegation_tool_manager import DelegationToolManager
|
|
14
|
+
|
|
15
|
+
__all__ = ["BaseLangGraphToolManager", "A2AToolManager", "DelegationToolManager"]
|