aip-agents-binary 0.5.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/__init__.py +65 -0
- aip_agents/a2a/__init__.py +19 -0
- aip_agents/a2a/server/__init__.py +10 -0
- aip_agents/a2a/server/base_executor.py +1086 -0
- aip_agents/a2a/server/google_adk_executor.py +198 -0
- aip_agents/a2a/server/langflow_executor.py +180 -0
- aip_agents/a2a/server/langgraph_executor.py +270 -0
- aip_agents/a2a/types.py +232 -0
- aip_agents/agent/__init__.py +27 -0
- aip_agents/agent/base_agent.py +970 -0
- aip_agents/agent/base_langgraph_agent.py +2942 -0
- aip_agents/agent/google_adk_agent.py +926 -0
- aip_agents/agent/google_adk_constants.py +6 -0
- aip_agents/agent/hitl/__init__.py +24 -0
- aip_agents/agent/hitl/config.py +28 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.py +515 -0
- aip_agents/agent/hitl/manager.py +532 -0
- aip_agents/agent/hitl/models.py +18 -0
- aip_agents/agent/hitl/prompt/__init__.py +9 -0
- aip_agents/agent/hitl/prompt/base.py +42 -0
- aip_agents/agent/hitl/prompt/deferred.py +73 -0
- aip_agents/agent/hitl/registry.py +149 -0
- aip_agents/agent/interface.py +138 -0
- aip_agents/agent/interfaces.py +65 -0
- aip_agents/agent/langflow_agent.py +464 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.py +433 -0
- aip_agents/agent/langgraph_react_agent.py +2514 -0
- aip_agents/agent/system_instruction_context.py +34 -0
- aip_agents/clients/__init__.py +10 -0
- aip_agents/clients/langflow/__init__.py +10 -0
- aip_agents/clients/langflow/client.py +477 -0
- aip_agents/clients/langflow/types.py +18 -0
- aip_agents/constants.py +23 -0
- aip_agents/credentials/manager.py +132 -0
- aip_agents/examples/__init__.py +5 -0
- aip_agents/examples/compare_streaming_client.py +783 -0
- aip_agents/examples/compare_streaming_server.py +142 -0
- aip_agents/examples/demo_memory_recall.py +401 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.py +49 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.py +48 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_client.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.py +39 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.py +41 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.py +60 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.py +105 -0
- aip_agents/examples/hello_world_a2a_langchain_server.py +79 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.py +78 -0
- aip_agents/examples/hello_world_a2a_langflow_client.py +83 -0
- aip_agents/examples/hello_world_a2a_langflow_server.py +82 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.py +73 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.py +76 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.py +54 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.py +32 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.py +50 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.py +44 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.py +92 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.py +84 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.py +79 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.py +132 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.py +196 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.py +244 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.py +251 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.py +57 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.py +80 -0
- aip_agents/examples/hello_world_google_adk.py +41 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.py +34 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.py +40 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.py +44 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.py +48 -0
- aip_agents/examples/hello_world_google_adk_stream.py +44 -0
- aip_agents/examples/hello_world_langchain.py +28 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.py +15 -0
- aip_agents/examples/hello_world_langchain_mcp_http.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.py +130 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.py +42 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.py +155 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.py +34 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.py +40 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.py +30 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.py +41 -0
- aip_agents/examples/hello_world_langchain_stream.py +36 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.py +39 -0
- aip_agents/examples/hello_world_langflow_agent.py +163 -0
- aip_agents/examples/hello_world_langgraph.py +39 -0
- aip_agents/examples/hello_world_langgraph_bosa_twitter.py +41 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.py +31 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.py +34 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.py +35 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.py +50 -0
- aip_agents/examples/hello_world_langgraph_stream.py +43 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.py +37 -0
- aip_agents/examples/hello_world_model_switch_cli.py +210 -0
- aip_agents/examples/hello_world_multi_agent_adk.py +75 -0
- aip_agents/examples/hello_world_multi_agent_langchain.py +54 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.py +66 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.py +69 -0
- aip_agents/examples/hello_world_pii_logger.py +21 -0
- aip_agents/examples/hello_world_sentry.py +133 -0
- aip_agents/examples/hello_world_step_limits.py +273 -0
- aip_agents/examples/hello_world_stock_a2a_server.py +103 -0
- aip_agents/examples/hello_world_tool_output_client.py +46 -0
- aip_agents/examples/hello_world_tool_output_server.py +114 -0
- aip_agents/examples/hitl_demo.py +724 -0
- aip_agents/examples/mcp_configs/configs.py +63 -0
- aip_agents/examples/mcp_servers/common.py +76 -0
- aip_agents/examples/mcp_servers/mcp_name.py +29 -0
- aip_agents/examples/mcp_servers/mcp_server_http.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_sse.py +19 -0
- aip_agents/examples/mcp_servers/mcp_server_stdio.py +19 -0
- aip_agents/examples/mcp_servers/mcp_time.py +10 -0
- aip_agents/examples/pii_demo_langgraph_client.py +69 -0
- aip_agents/examples/pii_demo_langgraph_server.py +126 -0
- aip_agents/examples/pii_demo_multi_agent_client.py +80 -0
- aip_agents/examples/pii_demo_multi_agent_server.py +247 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.py +70 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.py +88 -0
- aip_agents/examples/tools/__init__.py +27 -0
- aip_agents/examples/tools/adk_arithmetic_tools.py +36 -0
- aip_agents/examples/tools/adk_weather_tool.py +60 -0
- aip_agents/examples/tools/data_generator_tool.py +103 -0
- aip_agents/examples/tools/data_visualization_tool.py +312 -0
- aip_agents/examples/tools/image_artifact_tool.py +136 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.py +26 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.py +88 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.py +172 -0
- aip_agents/examples/tools/langchain_weather_tool.py +48 -0
- aip_agents/examples/tools/langgraph_streaming_tool.py +130 -0
- aip_agents/examples/tools/mock_retrieval_tool.py +56 -0
- aip_agents/examples/tools/pii_demo_tools.py +189 -0
- aip_agents/examples/tools/random_chart_tool.py +142 -0
- aip_agents/examples/tools/serper_tool.py +202 -0
- aip_agents/examples/tools/stock_tools.py +82 -0
- aip_agents/examples/tools/table_generator_tool.py +167 -0
- aip_agents/examples/tools/time_tool.py +82 -0
- aip_agents/examples/tools/weather_forecast_tool.py +38 -0
- aip_agents/executor/agent_executor.py +473 -0
- aip_agents/executor/base.py +48 -0
- aip_agents/mcp/__init__.py +1 -0
- aip_agents/mcp/client/__init__.py +14 -0
- aip_agents/mcp/client/base_mcp_client.py +369 -0
- aip_agents/mcp/client/connection_manager.py +193 -0
- aip_agents/mcp/client/google_adk/__init__.py +11 -0
- aip_agents/mcp/client/google_adk/client.py +381 -0
- aip_agents/mcp/client/langchain/__init__.py +11 -0
- aip_agents/mcp/client/langchain/client.py +265 -0
- aip_agents/mcp/client/persistent_session.py +359 -0
- aip_agents/mcp/client/session_pool.py +351 -0
- aip_agents/mcp/client/transports.py +215 -0
- aip_agents/mcp/utils/__init__.py +7 -0
- aip_agents/mcp/utils/config_validator.py +139 -0
- aip_agents/memory/__init__.py +14 -0
- aip_agents/memory/adapters/__init__.py +10 -0
- aip_agents/memory/adapters/base_adapter.py +717 -0
- aip_agents/memory/adapters/mem0.py +84 -0
- aip_agents/memory/base.py +84 -0
- aip_agents/memory/constants.py +49 -0
- aip_agents/memory/factory.py +86 -0
- aip_agents/memory/guidance.py +20 -0
- aip_agents/memory/simple_memory.py +47 -0
- aip_agents/middleware/__init__.py +17 -0
- aip_agents/middleware/base.py +88 -0
- aip_agents/middleware/manager.py +128 -0
- aip_agents/middleware/todolist.py +274 -0
- aip_agents/schema/__init__.py +69 -0
- aip_agents/schema/a2a.py +56 -0
- aip_agents/schema/agent.py +111 -0
- aip_agents/schema/hitl.py +157 -0
- aip_agents/schema/langgraph.py +37 -0
- aip_agents/schema/model_id.py +97 -0
- aip_agents/schema/step_limit.py +108 -0
- aip_agents/schema/storage.py +40 -0
- aip_agents/sentry/__init__.py +11 -0
- aip_agents/sentry/sentry.py +151 -0
- aip_agents/storage/__init__.py +41 -0
- aip_agents/storage/base.py +85 -0
- aip_agents/storage/clients/__init__.py +12 -0
- aip_agents/storage/clients/minio_client.py +318 -0
- aip_agents/storage/config.py +62 -0
- aip_agents/storage/providers/__init__.py +15 -0
- aip_agents/storage/providers/base.py +106 -0
- aip_agents/storage/providers/memory.py +114 -0
- aip_agents/storage/providers/object_storage.py +214 -0
- aip_agents/tools/__init__.py +33 -0
- aip_agents/tools/bosa_tools.py +105 -0
- aip_agents/tools/browser_use/__init__.py +82 -0
- aip_agents/tools/browser_use/action_parser.py +103 -0
- aip_agents/tools/browser_use/browser_use_tool.py +1112 -0
- aip_agents/tools/browser_use/llm_config.py +120 -0
- aip_agents/tools/browser_use/minio_storage.py +198 -0
- aip_agents/tools/browser_use/schemas.py +119 -0
- aip_agents/tools/browser_use/session.py +76 -0
- aip_agents/tools/browser_use/session_errors.py +132 -0
- aip_agents/tools/browser_use/steel_session_recording.py +317 -0
- aip_agents/tools/browser_use/streaming.py +813 -0
- aip_agents/tools/browser_use/structured_data_parser.py +257 -0
- aip_agents/tools/browser_use/structured_data_recovery.py +204 -0
- aip_agents/tools/browser_use/types.py +78 -0
- aip_agents/tools/code_sandbox/__init__.py +26 -0
- aip_agents/tools/code_sandbox/constant.py +13 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +257 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +411 -0
- aip_agents/tools/constants.py +165 -0
- aip_agents/tools/document_loader/__init__.py +44 -0
- aip_agents/tools/document_loader/base_reader.py +302 -0
- aip_agents/tools/document_loader/docx_reader_tool.py +68 -0
- aip_agents/tools/document_loader/excel_reader_tool.py +171 -0
- aip_agents/tools/document_loader/pdf_reader_tool.py +79 -0
- aip_agents/tools/document_loader/pdf_splitter.py +169 -0
- aip_agents/tools/gl_connector/__init__.py +5 -0
- aip_agents/tools/gl_connector/tool.py +351 -0
- aip_agents/tools/memory_search/__init__.py +22 -0
- aip_agents/tools/memory_search/base.py +200 -0
- aip_agents/tools/memory_search/mem0.py +258 -0
- aip_agents/tools/memory_search/schema.py +48 -0
- aip_agents/tools/memory_search_tool.py +26 -0
- aip_agents/tools/time_tool.py +117 -0
- aip_agents/tools/tool_config_injector.py +300 -0
- aip_agents/tools/web_search/__init__.py +15 -0
- aip_agents/tools/web_search/serper_tool.py +187 -0
- aip_agents/types/__init__.py +70 -0
- aip_agents/types/a2a_events.py +13 -0
- aip_agents/utils/__init__.py +79 -0
- aip_agents/utils/a2a_connector.py +1757 -0
- aip_agents/utils/artifact_helpers.py +502 -0
- aip_agents/utils/constants.py +22 -0
- aip_agents/utils/datetime/__init__.py +34 -0
- aip_agents/utils/datetime/normalization.py +231 -0
- aip_agents/utils/datetime/timezone.py +206 -0
- aip_agents/utils/env_loader.py +27 -0
- aip_agents/utils/event_handler_registry.py +58 -0
- aip_agents/utils/file_prompt_utils.py +176 -0
- aip_agents/utils/final_response_builder.py +211 -0
- aip_agents/utils/formatter_llm_client.py +231 -0
- aip_agents/utils/langgraph/__init__.py +19 -0
- aip_agents/utils/langgraph/converter.py +128 -0
- aip_agents/utils/langgraph/tool_managers/__init__.py +15 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.py +99 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.py +66 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +1071 -0
- aip_agents/utils/langgraph/tool_output_management.py +967 -0
- aip_agents/utils/logger.py +195 -0
- aip_agents/utils/metadata/__init__.py +27 -0
- aip_agents/utils/metadata/activity_metadata_helper.py +407 -0
- aip_agents/utils/metadata/activity_narrative/__init__.py +35 -0
- aip_agents/utils/metadata/activity_narrative/builder.py +817 -0
- aip_agents/utils/metadata/activity_narrative/constants.py +51 -0
- aip_agents/utils/metadata/activity_narrative/context.py +49 -0
- aip_agents/utils/metadata/activity_narrative/formatters.py +230 -0
- aip_agents/utils/metadata/activity_narrative/utils.py +35 -0
- aip_agents/utils/metadata/schemas/__init__.py +16 -0
- aip_agents/utils/metadata/schemas/activity_schema.py +29 -0
- aip_agents/utils/metadata/schemas/thinking_schema.py +31 -0
- aip_agents/utils/metadata/thinking_metadata_helper.py +38 -0
- aip_agents/utils/metadata_helper.py +358 -0
- aip_agents/utils/name_preprocessor/__init__.py +17 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.py +73 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.py +100 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.py +87 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.py +48 -0
- aip_agents/utils/pii/__init__.py +25 -0
- aip_agents/utils/pii/pii_handler.py +397 -0
- aip_agents/utils/pii/pii_helper.py +207 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.py +195 -0
- aip_agents/utils/reference_helper.py +273 -0
- aip_agents/utils/sse_chunk_transformer.py +831 -0
- aip_agents/utils/step_limit_manager.py +265 -0
- aip_agents/utils/token_usage_helper.py +156 -0
- aip_agents_binary-0.5.20.dist-info/METADATA +681 -0
- aip_agents_binary-0.5.20.dist-info/RECORD +280 -0
- aip_agents_binary-0.5.20.dist-info/WHEEL +5 -0
- aip_agents_binary-0.5.20.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
"""Three-Level Agent Hierarchy Client.
|
|
2
|
+
|
|
3
|
+
This client demonstrates connecting to and using a hierarchical agent system
|
|
4
|
+
that manages 3 levels of specialized agents through A2A protocol.
|
|
5
|
+
|
|
6
|
+
This client tests various scenarios:
|
|
7
|
+
1. Research-only tasks (Level 1 -> Level 2 Research -> Level 3 Workers)
|
|
8
|
+
2. Content-only tasks (Level 1 -> Level 2 Content -> Level 3 Workers)
|
|
9
|
+
3. Complex tasks requiring both research and content creation
|
|
10
|
+
4. Artifact generation across the hierarchy
|
|
11
|
+
|
|
12
|
+
To run this client:
|
|
13
|
+
1. First start the server: python examples/three_level_agent_hierarchy_server.py
|
|
14
|
+
2. Then run this client: python examples/three_level_agent_hierarchy_client.py
|
|
15
|
+
|
|
16
|
+
Authors:
|
|
17
|
+
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import asyncio
|
|
21
|
+
import base64
|
|
22
|
+
|
|
23
|
+
from dotenv import load_dotenv
|
|
24
|
+
from langchain_openai import ChatOpenAI
|
|
25
|
+
|
|
26
|
+
from aip_agents.agent import LangGraphAgent
|
|
27
|
+
from aip_agents.schema.agent import A2AClientConfig
|
|
28
|
+
|
|
29
|
+
load_dotenv()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
async def main():
|
|
33
|
+
"""Main function demonstrating the Three-Level Agent Hierarchy client."""
|
|
34
|
+
llm = ChatOpenAI(model="gpt-4.1", streaming=True)
|
|
35
|
+
|
|
36
|
+
# Create a simple client agent
|
|
37
|
+
client_agent = LangGraphAgent(
|
|
38
|
+
name="HierarchyTestClient",
|
|
39
|
+
instruction="You are a test client that communicates with hierarchical agent systems via A2A.",
|
|
40
|
+
model=llm,
|
|
41
|
+
tools=[],
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# Discover the hierarchical coordinator agent
|
|
45
|
+
client_a2a_config = A2AClientConfig(
|
|
46
|
+
discovery_urls=["http://localhost:8888"],
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
print("=== Three-Level Agent Hierarchy Client ===\n")
|
|
50
|
+
print("Discovering hierarchical coordinator...")
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
agent_cards = client_agent.discover_agents(client_a2a_config)
|
|
54
|
+
if not agent_cards:
|
|
55
|
+
print("No agents found. Make sure the server is running on http://localhost:9000")
|
|
56
|
+
print("Start server with: python examples/three_level_agent_hierarchy_server.py")
|
|
57
|
+
return
|
|
58
|
+
|
|
59
|
+
coordinator_card = agent_cards[0]
|
|
60
|
+
print(f"Found coordinator: {coordinator_card.name}")
|
|
61
|
+
print(f"Description: {coordinator_card.description}")
|
|
62
|
+
print(f"Skills: {len(coordinator_card.skills)} available")
|
|
63
|
+
print()
|
|
64
|
+
|
|
65
|
+
# Run test scenarios
|
|
66
|
+
await test_research_task(client_agent, coordinator_card)
|
|
67
|
+
await test_content_task(client_agent, coordinator_card)
|
|
68
|
+
await test_complex_task(client_agent, coordinator_card)
|
|
69
|
+
await test_artifact_generation(client_agent, coordinator_card)
|
|
70
|
+
|
|
71
|
+
except Exception as e:
|
|
72
|
+
print(f"Error connecting to coordinator: {e}")
|
|
73
|
+
print("Make sure the server is running: python examples/three_level_agent_hierarchy_server.py")
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
async def test_research_task(client_agent, coordinator_card):
|
|
77
|
+
"""Test a research-only task that flows through research specialists and workers.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
client_agent: The client agent to send the query with.
|
|
81
|
+
coordinator_card: The agent card of the hierarchical coordinator.
|
|
82
|
+
"""
|
|
83
|
+
print("--- Test 1: Research Task (Research Specialist -> Data Analysis Worker) ---")
|
|
84
|
+
query = (
|
|
85
|
+
"Research the current trends in renewable energy and create a data table showing "
|
|
86
|
+
"the top 5 technologies with their growth rates and market share."
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
print(f"Query: {query}\n")
|
|
90
|
+
|
|
91
|
+
result = client_agent.send_to_agent(agent_card=coordinator_card, message=query)
|
|
92
|
+
print(f"Response: {result.get('content', 'No content')}")
|
|
93
|
+
|
|
94
|
+
handle_artifacts(result, "Research Task")
|
|
95
|
+
print("\n" + "=" * 80 + "\n")
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
async def test_content_task(client_agent, coordinator_card):
|
|
99
|
+
"""Test a content-only task that flows through content specialists and workers.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
client_agent: The client agent to send the query with.
|
|
103
|
+
coordinator_card: The agent card of the hierarchical coordinator.
|
|
104
|
+
"""
|
|
105
|
+
print("--- Test 2: Content Creation Task (Content Specialist -> Writing + Formatting Workers) ---")
|
|
106
|
+
query = (
|
|
107
|
+
"Create an engaging blog post about the benefits of remote work, and include "
|
|
108
|
+
"a professional header image with the title 'Future of Work'."
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
print(f"Query: {query}\n")
|
|
112
|
+
|
|
113
|
+
result = client_agent.send_to_agent(agent_card=coordinator_card, message=query)
|
|
114
|
+
print(f"Response: {result.get('content', 'No content')}")
|
|
115
|
+
|
|
116
|
+
handle_artifacts(result, "Content Creation Task")
|
|
117
|
+
print("\n" + "=" * 80 + "\n")
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
async def test_complex_task(client_agent, coordinator_card):
|
|
121
|
+
"""Test a complex task requiring both research and content specialists.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
client_agent: The client agent to send the query with.
|
|
125
|
+
coordinator_card: The agent card of the hierarchical coordinator.
|
|
126
|
+
"""
|
|
127
|
+
print("--- Test 3: Complex Multi-Level Task (Both Specialists + All Workers) ---")
|
|
128
|
+
query = (
|
|
129
|
+
"Research the impact of AI on healthcare, analyze the data to create a table showing "
|
|
130
|
+
"AI applications and their adoption rates, then write a comprehensive report and "
|
|
131
|
+
"create visual elements including charts and professional graphics."
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
print(f"Query: {query}\n")
|
|
135
|
+
|
|
136
|
+
result = client_agent.send_to_agent(agent_card=coordinator_card, message=query)
|
|
137
|
+
print(f"Response: {result.get('content', 'No content')}")
|
|
138
|
+
|
|
139
|
+
handle_artifacts(result, "Complex Multi-Level Task")
|
|
140
|
+
print("\n" + "=" * 80 + "\n")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
async def test_artifact_generation(client_agent, coordinator_card):
|
|
144
|
+
"""Test artifact generation across the hierarchy.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
client_agent: The client agent to send the query with.
|
|
148
|
+
coordinator_card: The agent card of the hierarchical coordinator.
|
|
149
|
+
"""
|
|
150
|
+
print("--- Test 4: Multi-Artifact Generation (Testing All Workers) ---")
|
|
151
|
+
query = (
|
|
152
|
+
"Create a complete analysis package: research cloud computing trends, "
|
|
153
|
+
"generate data tables with market statistics, write executive summary, "
|
|
154
|
+
"and create professional presentation graphics including charts and title slides."
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
print(f"Query: {query}\n")
|
|
158
|
+
|
|
159
|
+
result = client_agent.send_to_agent(agent_card=coordinator_card, message=query)
|
|
160
|
+
print(f"Response: {result.get('content', 'No content')}")
|
|
161
|
+
|
|
162
|
+
handle_artifacts(result, "Multi-Artifact Generation")
|
|
163
|
+
print("\n" + "=" * 80 + "\n")
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def handle_artifacts(result, test_name):
|
|
167
|
+
"""Handle and display information about artifacts from the result.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
result: The result from the agent containing artifacts.
|
|
171
|
+
test_name: The name of the test for display purposes.
|
|
172
|
+
"""
|
|
173
|
+
artifacts = result.get("artifacts", [])
|
|
174
|
+
if artifacts:
|
|
175
|
+
print(f"\n--- {len(artifacts)} Artifacts Generated by {test_name} ---")
|
|
176
|
+
for i, artifact in enumerate(artifacts, 1):
|
|
177
|
+
print(f"Artifact {i}:")
|
|
178
|
+
print_artifact_info(artifact)
|
|
179
|
+
print("--- End Artifacts ---")
|
|
180
|
+
else:
|
|
181
|
+
print(f"No artifacts generated for {test_name}")
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def print_artifact_info(artifact: dict) -> None:
|
|
185
|
+
"""Print information about an artifact.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
artifact: Dictionary containing artifact information.
|
|
189
|
+
"""
|
|
190
|
+
name = artifact.get("name", "unknown")
|
|
191
|
+
mime_type = artifact.get("mime_type", "unknown")
|
|
192
|
+
file_data = artifact.get("file_data")
|
|
193
|
+
description = artifact.get("description", "No description")
|
|
194
|
+
|
|
195
|
+
print(f" Name: {name}")
|
|
196
|
+
print(f" Type: {mime_type}")
|
|
197
|
+
print(f" Description: {description}")
|
|
198
|
+
|
|
199
|
+
if file_data:
|
|
200
|
+
try:
|
|
201
|
+
if mime_type == "text/csv":
|
|
202
|
+
decoded_content = base64.b64decode(file_data).decode("utf-8")
|
|
203
|
+
lines = decoded_content.split("\n")
|
|
204
|
+
print(f" CSV Content ({len(lines)} lines):")
|
|
205
|
+
# Show first few lines
|
|
206
|
+
MAX_PREVIEW_LINES = 3
|
|
207
|
+
for _i, line in enumerate(lines[:MAX_PREVIEW_LINES]):
|
|
208
|
+
if line.strip():
|
|
209
|
+
print(f" {line}")
|
|
210
|
+
if len(lines) > MAX_PREVIEW_LINES:
|
|
211
|
+
print(f" ... and {len(lines) - MAX_PREVIEW_LINES} more lines")
|
|
212
|
+
elif mime_type.startswith("image/"):
|
|
213
|
+
file_size = len(base64.b64decode(file_data))
|
|
214
|
+
print(f" Image size: {file_size} bytes")
|
|
215
|
+
print(f" Image format: {mime_type}")
|
|
216
|
+
else:
|
|
217
|
+
data_size = len(file_data)
|
|
218
|
+
print(f" File data size: {data_size} characters (base64 encoded)")
|
|
219
|
+
except Exception as e:
|
|
220
|
+
print(f" Could not decode file data: {e}")
|
|
221
|
+
else:
|
|
222
|
+
print(" No file data available")
|
|
223
|
+
print()
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def print_hierarchy_info():
|
|
227
|
+
"""Print information about the 3-level hierarchy being tested."""
|
|
228
|
+
print("=== Agent Hierarchy Structure ===")
|
|
229
|
+
print("Level 1: HierarchicalCoordinator")
|
|
230
|
+
print("├── Level 2: ResearchSpecialist")
|
|
231
|
+
print("│ ├── Level 3: WebSearchWorker")
|
|
232
|
+
print("│ └── Level 3: DataAnalysisWorker (with table_generator)")
|
|
233
|
+
print("└── Level 2: ContentSpecialist")
|
|
234
|
+
print(" ├── Level 3: WritingWorker")
|
|
235
|
+
print(" └── Level 3: FormattingWorker (with image_generator)")
|
|
236
|
+
print("\nTask Flow:")
|
|
237
|
+
print("Client -> Coordinator -> Specialist -> Worker -> Results")
|
|
238
|
+
print("=" * 50)
|
|
239
|
+
print()
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
if __name__ == "__main__":
|
|
243
|
+
print_hierarchy_info()
|
|
244
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Three-Level Agent Hierarchy Server.
|
|
3
|
+
|
|
4
|
+
This server hosts a hierarchical agent system with three levels:
|
|
5
|
+
1. Level 1 (Coordinator): Main orchestrator agent that handles complex requests
|
|
6
|
+
2. Level 2 (Specialists): Domain-specific agents that handle specialized tasks
|
|
7
|
+
3. Level 3 (Workers): Task-specific agents that perform atomic operations
|
|
8
|
+
|
|
9
|
+
Architecture:
|
|
10
|
+
Coordinator Agent (A2A Server)
|
|
11
|
+
├── Research Specialist Agent
|
|
12
|
+
│ ├── Web Search Worker Agent
|
|
13
|
+
│ └── Data Analysis Worker Agent
|
|
14
|
+
└── Content Specialist Agent
|
|
15
|
+
├── Writing Worker Agent
|
|
16
|
+
└── Formatting Worker Agent
|
|
17
|
+
|
|
18
|
+
Authors:
|
|
19
|
+
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
import click
|
|
23
|
+
import uvicorn
|
|
24
|
+
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
|
|
25
|
+
from dotenv import load_dotenv
|
|
26
|
+
from langchain_openai import ChatOpenAI
|
|
27
|
+
|
|
28
|
+
from aip_agents.agent import LangGraphAgent
|
|
29
|
+
from aip_agents.examples.tools.image_artifact_tool import ImageArtifactTool
|
|
30
|
+
from aip_agents.examples.tools.table_generator_tool import TableGeneratorTool
|
|
31
|
+
from aip_agents.utils.logger import get_logger
|
|
32
|
+
|
|
33
|
+
logger = get_logger(__name__)
|
|
34
|
+
load_dotenv()
|
|
35
|
+
|
|
36
|
+
SERVER_AGENT_NAME = "HierarchicalCoordinator"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def create_worker_agents(llm) -> tuple[LangGraphAgent, LangGraphAgent, LangGraphAgent, LangGraphAgent]:
|
|
40
|
+
"""Create Level 3 worker agents that perform atomic operations.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
llm: The language model to use for the worker agents.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
tuple[LangGraphAgent, LangGraphAgent, LangGraphAgent, LangGraphAgent]: Tuple of (web_search_worker, data_analysis_worker, writing_worker, formatting_worker).
|
|
47
|
+
"""
|
|
48
|
+
# Research Workers
|
|
49
|
+
web_search_worker = LangGraphAgent(
|
|
50
|
+
name="WebSearchWorker",
|
|
51
|
+
instruction=(
|
|
52
|
+
"You are a web search specialist. You excel at finding information online. "
|
|
53
|
+
"When given a topic, provide comprehensive research findings with sources and key insights. "
|
|
54
|
+
"Format your responses clearly with bullet points and structured information."
|
|
55
|
+
),
|
|
56
|
+
model=llm,
|
|
57
|
+
tools=[],
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
data_analysis_worker = LangGraphAgent(
|
|
61
|
+
name="DataAnalysisWorker",
|
|
62
|
+
instruction=(
|
|
63
|
+
"You are a data analysis expert. You specialize in creating tables, charts, and analyzing patterns. "
|
|
64
|
+
"Use the table_generator tool to create structured data representations. "
|
|
65
|
+
"Always provide insights and interpretations of the data you generate."
|
|
66
|
+
),
|
|
67
|
+
model=llm,
|
|
68
|
+
tools=[TableGeneratorTool()],
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Content Workers
|
|
72
|
+
writing_worker = LangGraphAgent(
|
|
73
|
+
name="WritingWorker",
|
|
74
|
+
instruction=(
|
|
75
|
+
"You are a content writing specialist. You excel at creating engaging, well-structured content. "
|
|
76
|
+
"Focus on clarity, readability, and meeting the specific requirements provided. "
|
|
77
|
+
"Always maintain a professional yet engaging tone."
|
|
78
|
+
),
|
|
79
|
+
model=llm,
|
|
80
|
+
tools=[],
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
formatting_worker = LangGraphAgent(
|
|
84
|
+
name="FormattingWorker",
|
|
85
|
+
instruction=(
|
|
86
|
+
"You are a formatting and presentation specialist. You excel at creating visual content. "
|
|
87
|
+
"Use the image_generator tool to create visual elements and graphics. "
|
|
88
|
+
"Focus on clean, professional layouts and visual appeal."
|
|
89
|
+
),
|
|
90
|
+
model=llm,
|
|
91
|
+
tools=[ImageArtifactTool()],
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
return web_search_worker, data_analysis_worker, writing_worker, formatting_worker
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def create_specialist_agents(llm, worker_agents) -> tuple[LangGraphAgent, LangGraphAgent]:
|
|
98
|
+
"""Create Level 2 specialist agents that coordinate worker agents.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
llm: The language model to use for the specialist agents.
|
|
102
|
+
worker_agents: Tuple of worker agents from create_worker_agents function.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
tuple[LangGraphAgent, LangGraphAgent]: Tuple of (research_specialist, content_specialist).
|
|
106
|
+
"""
|
|
107
|
+
web_search_worker, data_analysis_worker, writing_worker, formatting_worker = worker_agents
|
|
108
|
+
|
|
109
|
+
research_specialist = LangGraphAgent(
|
|
110
|
+
name="ResearchSpecialist",
|
|
111
|
+
instruction=(
|
|
112
|
+
"You are a research coordinator who manages research and data analysis tasks. "
|
|
113
|
+
"You delegate web research to WebSearchWorker and data analysis to DataAnalysisWorker. "
|
|
114
|
+
"For research requests, use WebSearchWorker to gather information. "
|
|
115
|
+
"For data analysis or table creation requests, use DataAnalysisWorker. "
|
|
116
|
+
"Synthesize results from your workers into comprehensive research reports."
|
|
117
|
+
),
|
|
118
|
+
model=llm,
|
|
119
|
+
agents=[web_search_worker, data_analysis_worker],
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
content_specialist = LangGraphAgent(
|
|
123
|
+
name="ContentSpecialist",
|
|
124
|
+
instruction=(
|
|
125
|
+
"You are a content creation coordinator who manages writing and formatting tasks. "
|
|
126
|
+
"You delegate writing tasks to WritingWorker and visual/formatting tasks to FormattingWorker. "
|
|
127
|
+
"For text content requests, use WritingWorker to create written material. "
|
|
128
|
+
"For visual content or formatting requests, use FormattingWorker. "
|
|
129
|
+
"Ensure all content is cohesive and meets quality standards."
|
|
130
|
+
),
|
|
131
|
+
model=llm,
|
|
132
|
+
agents=[writing_worker, formatting_worker],
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
return research_specialist, content_specialist
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def create_coordinator_agent(llm, specialist_agents) -> LangGraphAgent:
|
|
139
|
+
"""Create Level 1 coordinator agent that orchestrates everything.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
llm: The language model to use for the coordinator agent.
|
|
143
|
+
specialist_agents: Tuple of specialist agents from create_specialist_agents function.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
LangGraphAgent: The configured coordinator agent.
|
|
147
|
+
"""
|
|
148
|
+
research_specialist, content_specialist = specialist_agents
|
|
149
|
+
|
|
150
|
+
coordinator = LangGraphAgent(
|
|
151
|
+
name=SERVER_AGENT_NAME,
|
|
152
|
+
instruction=(
|
|
153
|
+
"You are a hierarchical coordinator that manages complex multi-step projects. "
|
|
154
|
+
"You oversee two specialist teams:\n"
|
|
155
|
+
"- ResearchSpecialist: Handles research, data analysis, and information gathering\n"
|
|
156
|
+
"- ContentSpecialist: Handles content creation, writing, and visual formatting\n\n"
|
|
157
|
+
"For each request, analyze what needs to be done and delegate appropriately:\n"
|
|
158
|
+
"- Research tasks -> ResearchSpecialist\n"
|
|
159
|
+
"- Content creation tasks -> ContentSpecialist\n"
|
|
160
|
+
"- Complex requests may require both specialists\n\n"
|
|
161
|
+
"Always provide a comprehensive summary of the coordinated work and ensure "
|
|
162
|
+
"all deliverables meet the user's requirements."
|
|
163
|
+
),
|
|
164
|
+
model=llm,
|
|
165
|
+
agents=[research_specialist, content_specialist],
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return coordinator
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
@click.command()
|
|
172
|
+
@click.option("--host", "host", default="localhost", help="Host to bind the server to.")
|
|
173
|
+
@click.option("--port", "port", default=8888, help="Port to bind the server to.")
|
|
174
|
+
def main(host: str, port: int):
|
|
175
|
+
"""Runs the Three-Level Agent Hierarchy A2A server.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
host (str): Host to bind the server to.
|
|
179
|
+
port (int): Port to bind the server to.
|
|
180
|
+
"""
|
|
181
|
+
logger.info(f"Starting {SERVER_AGENT_NAME} on http://{host}:{port}")
|
|
182
|
+
|
|
183
|
+
agent_card = AgentCard(
|
|
184
|
+
name=SERVER_AGENT_NAME,
|
|
185
|
+
description=(
|
|
186
|
+
"A hierarchical coordinator managing 3 levels of specialized agents for complex research and content tasks."
|
|
187
|
+
),
|
|
188
|
+
url=f"http://{host}:{port}",
|
|
189
|
+
version="1.0.0",
|
|
190
|
+
defaultInputModes=["text"],
|
|
191
|
+
defaultOutputModes=["text"],
|
|
192
|
+
capabilities=AgentCapabilities(streaming=True),
|
|
193
|
+
skills=[
|
|
194
|
+
AgentSkill(
|
|
195
|
+
id="hierarchical_coordination",
|
|
196
|
+
name="Hierarchical Task Coordination",
|
|
197
|
+
description="Coordinates complex multi-step projects across 3 levels of specialized agents.",
|
|
198
|
+
examples=[
|
|
199
|
+
"Research market trends and create a presentation",
|
|
200
|
+
"Analyze data and write a comprehensive report",
|
|
201
|
+
"Create educational content with visuals and data tables",
|
|
202
|
+
],
|
|
203
|
+
tags=["coordination", "hierarchy", "multi-level", "delegation"],
|
|
204
|
+
),
|
|
205
|
+
AgentSkill(
|
|
206
|
+
id="research_management",
|
|
207
|
+
name="Research & Analysis Management",
|
|
208
|
+
description="Manages research specialists and data analysis workers for information gathering.",
|
|
209
|
+
examples=[
|
|
210
|
+
"Research renewable energy trends and create data tables",
|
|
211
|
+
"Analyze market competition and generate insights",
|
|
212
|
+
"Gather information on AI developments",
|
|
213
|
+
],
|
|
214
|
+
tags=["research", "analysis", "data", "information"],
|
|
215
|
+
),
|
|
216
|
+
AgentSkill(
|
|
217
|
+
id="content_management",
|
|
218
|
+
name="Content Creation Management",
|
|
219
|
+
description="Manages content specialists and formatting workers for content creation.",
|
|
220
|
+
examples=[
|
|
221
|
+
"Create engaging blog posts with visual elements",
|
|
222
|
+
"Develop training materials with graphics",
|
|
223
|
+
"Write reports with professional formatting",
|
|
224
|
+
],
|
|
225
|
+
tags=["content", "writing", "formatting", "visual"],
|
|
226
|
+
),
|
|
227
|
+
],
|
|
228
|
+
tags=["hierarchical", "coordination", "multi-agent", "complex-tasks"],
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
llm = ChatOpenAI(model="gpt-4.1", temperature=0, streaming=True)
|
|
232
|
+
|
|
233
|
+
# Build the 3-level hierarchy
|
|
234
|
+
logger.info("Creating Level 3 worker agents...")
|
|
235
|
+
worker_agents = create_worker_agents(llm)
|
|
236
|
+
|
|
237
|
+
logger.info("Creating Level 2 specialist agents...")
|
|
238
|
+
specialist_agents = create_specialist_agents(llm, worker_agents)
|
|
239
|
+
|
|
240
|
+
logger.info("Creating Level 1 coordinator agent...")
|
|
241
|
+
coordinator_agent = create_coordinator_agent(llm, specialist_agents)
|
|
242
|
+
|
|
243
|
+
app = coordinator_agent.to_a2a(agent_card=agent_card)
|
|
244
|
+
|
|
245
|
+
logger.info("A2A application configured. Starting Uvicorn server...")
|
|
246
|
+
logger.info("Hierarchy: 1 Coordinator -> 2 Specialists -> 4 Workers")
|
|
247
|
+
uvicorn.run(app, host=host, port=port)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
if __name__ == "__main__":
|
|
251
|
+
main()
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""Simple A2A client demonstrating RunnableConfig tool configuration."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
|
|
5
|
+
from dotenv import load_dotenv
|
|
6
|
+
|
|
7
|
+
from aip_agents.agent import LangGraphReactAgent
|
|
8
|
+
from aip_agents.schema.agent import A2AClientConfig
|
|
9
|
+
|
|
10
|
+
load_dotenv()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
async def main():
|
|
14
|
+
"""Demonstrate RunnableConfig with agent defaults and runtime overrides."""
|
|
15
|
+
# Create client agent
|
|
16
|
+
client = LangGraphReactAgent(
|
|
17
|
+
name="Client", instruction="You request currency services.", model="openai/gpt-4o-mini"
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# Discover server agent
|
|
21
|
+
agents = client.discover_agents(A2AClientConfig(discovery_urls=["http://localhost:8885"]))
|
|
22
|
+
server_agent = agents[0]
|
|
23
|
+
|
|
24
|
+
print("🚀 RunnableConfig Demo: Agent Defaults vs Runtime Overrides")
|
|
25
|
+
print("=" * 60)
|
|
26
|
+
|
|
27
|
+
# Test 1: Agent defaults (premium_corp from server)
|
|
28
|
+
print("📊 Test 1: Agent Defaults")
|
|
29
|
+
print("Query: Convert 100 USD to EUR")
|
|
30
|
+
async for chunk in client.astream_to_agent(server_agent, "Convert 100 USD to EUR"):
|
|
31
|
+
if chunk.get("content"):
|
|
32
|
+
print(chunk["content"], end="\n", flush=True)
|
|
33
|
+
if chunk.get("metadata"):
|
|
34
|
+
print(f"Metadata: {chunk['metadata']}", end="\n\n", flush=True)
|
|
35
|
+
print("\n")
|
|
36
|
+
|
|
37
|
+
# Test 2: Runtime override (standard_business)
|
|
38
|
+
print("📊 Test 2: Runtime Override")
|
|
39
|
+
print("Query: Convert 100 USD to EUR")
|
|
40
|
+
print("Metadata: Overriding to standard_business tenant")
|
|
41
|
+
|
|
42
|
+
metadata = {
|
|
43
|
+
"tool_configs": {"currency_exchange": {"tenant_id": "standard_business", "auth_key": "standard_key_456"}}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
async for chunk in client.astream_to_agent(server_agent, "Convert 100 USD to EUR", metadata=metadata):
|
|
47
|
+
if chunk.get("content"):
|
|
48
|
+
print(chunk["content"], end="\n", flush=True)
|
|
49
|
+
if chunk.get("metadata"):
|
|
50
|
+
print(f"Metadata: {chunk['metadata']}", end="\n\n", flush=True)
|
|
51
|
+
print("\n")
|
|
52
|
+
|
|
53
|
+
print("✅ Demo completed! Notice different rates between premium_corp and standard_business.")
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
if __name__ == "__main__":
|
|
57
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""Example A2A server for a LangChainAgent Weather Service.
|
|
2
|
+
|
|
3
|
+
This server instantiates a LangChainAgent with weather lookup capabilities and serves it
|
|
4
|
+
via the A2A protocol using the to_a2a convenience method.
|
|
5
|
+
|
|
6
|
+
To run this server:
|
|
7
|
+
python examples/a2a/langchain_server_example.py
|
|
8
|
+
|
|
9
|
+
It will listen on http://localhost:8001 by default.
|
|
10
|
+
|
|
11
|
+
Authors:
|
|
12
|
+
Putu Ravindra Wiguna (putu.r.wiguna@gdplabs.id)
|
|
13
|
+
Christian Trisno Sen Long Chen (christian.t.s.l.chen@gdplabs.id)
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import click
|
|
17
|
+
import uvicorn
|
|
18
|
+
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
|
|
19
|
+
from dotenv import load_dotenv
|
|
20
|
+
|
|
21
|
+
from aip_agents.agent import LangGraphReactAgent
|
|
22
|
+
from aip_agents.examples.hello_world_a2a_langchain_server import SERVER_AGENT_NAME
|
|
23
|
+
from aip_agents.examples.tools.langchain_currency_exchange_tool import CurrencyExchangeTool
|
|
24
|
+
from aip_agents.utils.logger import get_logger
|
|
25
|
+
|
|
26
|
+
load_dotenv()
|
|
27
|
+
|
|
28
|
+
logger = get_logger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@click.command()
|
|
32
|
+
@click.option("--host", "host", default="localhost", help="Host to bind the server to.")
|
|
33
|
+
@click.option("--port", "port", default=8885, help="Port to bind the server to.")
|
|
34
|
+
def main(host: str, port: int):
|
|
35
|
+
"""Runs the LangChain Currency Exchange A2A server.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
host (str): Host to bind the server to.
|
|
39
|
+
port (int): Port to bind the server to.
|
|
40
|
+
"""
|
|
41
|
+
logger.info(f"Starting {SERVER_AGENT_NAME} on http://{host}:{port}")
|
|
42
|
+
|
|
43
|
+
agent_card = AgentCard(
|
|
44
|
+
name=SERVER_AGENT_NAME,
|
|
45
|
+
description="A weather agent that provides weather information for cities",
|
|
46
|
+
url=f"http://{host}:{port}",
|
|
47
|
+
version="1.0.0",
|
|
48
|
+
defaultInputModes=["text"],
|
|
49
|
+
defaultOutputModes=["text"],
|
|
50
|
+
capabilities=AgentCapabilities(streaming=True),
|
|
51
|
+
skills=[
|
|
52
|
+
AgentSkill(
|
|
53
|
+
id="currency_exchange",
|
|
54
|
+
name="Currency Exchange",
|
|
55
|
+
description="Provides current currency exchange information for cities.",
|
|
56
|
+
examples=["What's the currency exchange in Tokyo?", "Get currency exchange for London"],
|
|
57
|
+
tags=["currency_exchange"],
|
|
58
|
+
)
|
|
59
|
+
],
|
|
60
|
+
tags=["currency_exchange"],
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
agent = LangGraphReactAgent(
|
|
64
|
+
name="CurrencyAgent",
|
|
65
|
+
instruction="You are a currency exchange agent. Use the currency_exchange tool for conversions.",
|
|
66
|
+
model="openai/gpt-4o-mini",
|
|
67
|
+
tools=[CurrencyExchangeTool()],
|
|
68
|
+
tool_configs={"currency_exchange": {"tenant_id": "premium_corp", "auth_key": "premium_key_123"}},
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
app = agent.to_a2a(
|
|
72
|
+
agent_card=agent_card,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
uvicorn.run(app, host=host, port=port)
|
|
76
|
+
logger.info("A2A application configured. Starting Uvicorn server...")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
if __name__ == "__main__":
|
|
80
|
+
main()
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""Minimal example demonstrating the GoogleADKAgent with tool usage and async operation.
|
|
2
|
+
|
|
3
|
+
This example shows how to create a simple calculator agent using Google's ADK
|
|
4
|
+
which automatically handles tool calling and conversation flow.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
|
|
9
|
+
from aip_agents.agent.google_adk_agent import GoogleADKAgent
|
|
10
|
+
from aip_agents.examples.tools.adk_arithmetic_tools import sum_numbers
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
async def google_adk_example():
|
|
14
|
+
"""Demonstrates the GoogleADKAgent's arun method."""
|
|
15
|
+
agent_name = "GoogleADKCalculator"
|
|
16
|
+
|
|
17
|
+
# Create the agent with simplified instructions and our tool
|
|
18
|
+
agent = GoogleADKAgent(
|
|
19
|
+
name=agent_name,
|
|
20
|
+
instruction="""You are a calculator assistant. When asked math problems,
|
|
21
|
+
extract numbers and call sum_numbers tool to add them.
|
|
22
|
+
For multi-step problems, use multiple tool calls.""",
|
|
23
|
+
model="gemini-2.0-flash",
|
|
24
|
+
tools=[sum_numbers],
|
|
25
|
+
max_iterations=5, # Allow multiple tool calls if needed
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
# Use the same query as in LangGraph example for consistency
|
|
29
|
+
query = "What is the sum of 23 and 47? And then add 10 to that, then add 5 more."
|
|
30
|
+
print(f"--- Agent: {agent_name} ---")
|
|
31
|
+
print(f"Query: {query}")
|
|
32
|
+
|
|
33
|
+
print("\nRunning arun...")
|
|
34
|
+
response = await agent.arun(query=query)
|
|
35
|
+
print(f"[arun] Final Response: {response.get('output')}")
|
|
36
|
+
print("--- End of Google ADK Example ---")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
if __name__ == "__main__":
|
|
40
|
+
# GOOGLE_API_KEY should be set in the environment.
|
|
41
|
+
asyncio.run(google_adk_example())
|