traia-iatp 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of traia-iatp might be problematic. Click here for more details.
- traia_iatp/README.md +368 -0
- traia_iatp/__init__.py +30 -0
- traia_iatp/cli/__init__.py +5 -0
- traia_iatp/cli/main.py +483 -0
- traia_iatp/client/__init__.py +10 -0
- traia_iatp/client/a2a_client.py +274 -0
- traia_iatp/client/crewai_a2a_tools.py +335 -0
- traia_iatp/client/grpc_a2a_tools.py +349 -0
- traia_iatp/client/root_path_a2a_client.py +1 -0
- traia_iatp/core/__init__.py +43 -0
- traia_iatp/core/models.py +161 -0
- traia_iatp/mcp/__init__.py +15 -0
- traia_iatp/mcp/client.py +201 -0
- traia_iatp/mcp/mcp_agent_template.py +422 -0
- traia_iatp/mcp/templates/Dockerfile.j2 +56 -0
- traia_iatp/mcp/templates/README.md.j2 +212 -0
- traia_iatp/mcp/templates/cursor-rules.md.j2 +326 -0
- traia_iatp/mcp/templates/deployment_params.json.j2 +20 -0
- traia_iatp/mcp/templates/docker-compose.yml.j2 +23 -0
- traia_iatp/mcp/templates/dockerignore.j2 +47 -0
- traia_iatp/mcp/templates/gitignore.j2 +77 -0
- traia_iatp/mcp/templates/mcp_health_check.py.j2 +150 -0
- traia_iatp/mcp/templates/pyproject.toml.j2 +26 -0
- traia_iatp/mcp/templates/run_local_docker.sh.j2 +94 -0
- traia_iatp/mcp/templates/server.py.j2 +240 -0
- traia_iatp/mcp/traia_mcp_adapter.py +381 -0
- traia_iatp/preview_diagrams.html +181 -0
- traia_iatp/registry/__init__.py +26 -0
- traia_iatp/registry/atlas_search_indexes.json +280 -0
- traia_iatp/registry/embeddings.py +298 -0
- traia_iatp/registry/iatp_search_api.py +839 -0
- traia_iatp/registry/mongodb_registry.py +771 -0
- traia_iatp/registry/readmes/ATLAS_SEARCH_INDEXES.md +252 -0
- traia_iatp/registry/readmes/ATLAS_SEARCH_SETUP.md +134 -0
- traia_iatp/registry/readmes/AUTHENTICATION_UPDATE.md +124 -0
- traia_iatp/registry/readmes/EMBEDDINGS_SETUP.md +172 -0
- traia_iatp/registry/readmes/IATP_SEARCH_API_GUIDE.md +257 -0
- traia_iatp/registry/readmes/MONGODB_X509_AUTH.md +208 -0
- traia_iatp/registry/readmes/README.md +251 -0
- traia_iatp/registry/readmes/REFACTORING_SUMMARY.md +191 -0
- traia_iatp/server/__init__.py +15 -0
- traia_iatp/server/a2a_server.py +215 -0
- traia_iatp/server/example_template_usage.py +72 -0
- traia_iatp/server/iatp_server_agent_generator.py +237 -0
- traia_iatp/server/iatp_server_template_generator.py +235 -0
- traia_iatp/server/templates/Dockerfile.j2 +49 -0
- traia_iatp/server/templates/README.md +137 -0
- traia_iatp/server/templates/README.md.j2 +425 -0
- traia_iatp/server/templates/__init__.py +1 -0
- traia_iatp/server/templates/__main__.py.j2 +450 -0
- traia_iatp/server/templates/agent.py.j2 +80 -0
- traia_iatp/server/templates/agent_config.json.j2 +22 -0
- traia_iatp/server/templates/agent_executor.py.j2 +264 -0
- traia_iatp/server/templates/docker-compose.yml.j2 +23 -0
- traia_iatp/server/templates/env.example.j2 +67 -0
- traia_iatp/server/templates/gitignore.j2 +78 -0
- traia_iatp/server/templates/grpc_server.py.j2 +218 -0
- traia_iatp/server/templates/pyproject.toml.j2 +76 -0
- traia_iatp/server/templates/run_local_docker.sh.j2 +103 -0
- traia_iatp/server/templates/server.py.j2 +190 -0
- traia_iatp/special_agencies/__init__.py +4 -0
- traia_iatp/special_agencies/registry_search_agency.py +392 -0
- traia_iatp/utils/__init__.py +10 -0
- traia_iatp/utils/docker_utils.py +251 -0
- traia_iatp/utils/general.py +64 -0
- traia_iatp/utils/iatp_utils.py +126 -0
- traia_iatp-0.1.1.dist-info/METADATA +414 -0
- traia_iatp-0.1.1.dist-info/RECORD +72 -0
- traia_iatp-0.1.1.dist-info/WHEEL +5 -0
- traia_iatp-0.1.1.dist-info/entry_points.txt +2 -0
- traia_iatp-0.1.1.dist-info/licenses/LICENSE +21 -0
- traia_iatp-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
"""
|
|
2
|
+
{{ agent_name }} - A2A Server Agent Executor Implementation
|
|
3
|
+
|
|
4
|
+
This module implements the agent executor for {{ agent_name }}.
|
|
5
|
+
Supports both synchronous responses and SSE streaming.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
from typing import AsyncGenerator, Optional, Dict, Any
|
|
13
|
+
from datetime import datetime
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
16
|
+
from a2a.server.agent_execution import AgentExecutor, RequestContext
|
|
17
|
+
from a2a.server.events.event_queue import EventQueue
|
|
18
|
+
from a2a.types import Message, TextPart
|
|
19
|
+
from a2a.utils import new_agent_text_message
|
|
20
|
+
from crewai import Task
|
|
21
|
+
from traia_iatp.mcp import MCPServerConfig, MCPAgentBuilder, run_with_mcp_tools, MCPServerInfo
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
# Create a thread pool for CPU-bound CrewAI operations
|
|
26
|
+
executor = ThreadPoolExecutor(max_workers=10)
|
|
27
|
+
|
|
28
|
+
# Get MCP server API key if required
|
|
29
|
+
{% if requires_api_key %}
|
|
30
|
+
# Check for API keys required by the MCP server
|
|
31
|
+
MCP_API_KEY = None
|
|
32
|
+
{% for api_key_name in api_keys %}
|
|
33
|
+
if not MCP_API_KEY and os.getenv("{{ api_key_name }}"):
|
|
34
|
+
MCP_API_KEY = os.getenv("{{ api_key_name }}")
|
|
35
|
+
logger.info(f"Using API key from {{ api_key_name }} environment variable")
|
|
36
|
+
{% endfor %}
|
|
37
|
+
|
|
38
|
+
if not MCP_API_KEY:
|
|
39
|
+
logger.warning("No API key found for MCP server authentication.")
|
|
40
|
+
logger.warning("The MCP server requires one of these environment variables to be set:")
|
|
41
|
+
{% for api_key_name in api_keys %}
|
|
42
|
+
logger.warning(" - {{ api_key_name }}")
|
|
43
|
+
{% endfor %}
|
|
44
|
+
else:
|
|
45
|
+
logger.info("MCP server API key loaded successfully")
|
|
46
|
+
{% else %}
|
|
47
|
+
MCP_API_KEY = None
|
|
48
|
+
{% endif %}
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class CustomEvent:
|
|
52
|
+
"""Custom event class for SSE streaming."""
|
|
53
|
+
def __init__(self, event_type: str, data: Dict[str, Any]):
|
|
54
|
+
self.type = event_type
|
|
55
|
+
self.data = data
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class {{ class_name }}AgentExecutor(AgentExecutor):
|
|
59
|
+
"""Agent executor for {{ agent_name }}."""
|
|
60
|
+
|
|
61
|
+
def __init__(self, mcp_config: MCPServerConfig, supports_streaming: bool = False):
|
|
62
|
+
self.mcp_config = mcp_config
|
|
63
|
+
self.supports_streaming = supports_streaming
|
|
64
|
+
self.mcp_server_info = MCPServerInfo(
|
|
65
|
+
id="", # Not needed for direct usage
|
|
66
|
+
name=mcp_config.name,
|
|
67
|
+
url=mcp_config.url,
|
|
68
|
+
description=mcp_config.description,
|
|
69
|
+
server_type=mcp_config.server_type,
|
|
70
|
+
capabilities=mcp_config.capabilities,
|
|
71
|
+
metadata=mcp_config.metadata,
|
|
72
|
+
tags=mcp_config.metadata.get("tags", [])
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
async def execute(self, context: RequestContext, event_queue: EventQueue) -> None:
|
|
76
|
+
"""Process a request using the {{ agent_name }} capabilities."""
|
|
77
|
+
try:
|
|
78
|
+
# Get the user's request from context
|
|
79
|
+
request_text = context.get_user_input()
|
|
80
|
+
if not request_text:
|
|
81
|
+
# Send empty response with task ID if available
|
|
82
|
+
msg = new_agent_text_message("No user message provided")
|
|
83
|
+
if hasattr(context, 'task_id') and context.task_id:
|
|
84
|
+
msg.taskId = context.task_id
|
|
85
|
+
await event_queue.enqueue_event(msg)
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
# Check if client requested streaming
|
|
89
|
+
stream_requested = False
|
|
90
|
+
if hasattr(context, 'configuration') and context.configuration:
|
|
91
|
+
output_mode = context.configuration.get('output_mode', '')
|
|
92
|
+
stream_requested = output_mode == 'text/event-stream'
|
|
93
|
+
|
|
94
|
+
# Execute the request
|
|
95
|
+
if stream_requested and self.supports_streaming:
|
|
96
|
+
await self._execute_streaming(context, event_queue, request_text)
|
|
97
|
+
else:
|
|
98
|
+
await self._execute_standard(context, event_queue, request_text)
|
|
99
|
+
|
|
100
|
+
except Exception as e:
|
|
101
|
+
logger.error(f"Error processing request: {e}")
|
|
102
|
+
msg = new_agent_text_message(f"Error processing request: {str(e)}")
|
|
103
|
+
if hasattr(context, 'task_id') and context.task_id:
|
|
104
|
+
msg.taskId = context.task_id
|
|
105
|
+
await event_queue.enqueue_event(msg)
|
|
106
|
+
|
|
107
|
+
async def _execute_standard(self, context: RequestContext, event_queue: EventQueue, request_text: str) -> None:
|
|
108
|
+
"""Execute standard (non-streaming) request."""
|
|
109
|
+
# Get additional context if provided
|
|
110
|
+
task_context = {}
|
|
111
|
+
if hasattr(context, 'metadata'):
|
|
112
|
+
task_context = context.metadata or {}
|
|
113
|
+
|
|
114
|
+
# Create an agent for this request
|
|
115
|
+
agent = MCPAgentBuilder.create_agent(
|
|
116
|
+
role=f"{{ agent_name }} Specialist",
|
|
117
|
+
goal=f"Process the request using {self.mcp_config.name} capabilities",
|
|
118
|
+
backstory=f"You are an expert at using {self.mcp_config.name}. {self.mcp_config.description}"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Create a task
|
|
122
|
+
task = Task(
|
|
123
|
+
description=request_text,
|
|
124
|
+
expected_output="The processed result based on the request",
|
|
125
|
+
agent=agent
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Create a wrapper function to handle the arguments properly
|
|
129
|
+
def run_crew_task():
|
|
130
|
+
# Build kwargs for run_with_mcp_tools
|
|
131
|
+
kwargs = {
|
|
132
|
+
"tasks": [task],
|
|
133
|
+
"mcp_server": self.mcp_server_info,
|
|
134
|
+
"inputs": task_context,
|
|
135
|
+
"skip_health_check": True
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
# Only add api_key if MCP server requires authentication
|
|
139
|
+
if self.mcp_config.metadata.get("requires_api_key", False) and MCP_API_KEY:
|
|
140
|
+
kwargs["api_key"] = MCP_API_KEY
|
|
141
|
+
logger.debug("Including API key for authenticated MCP connection")
|
|
142
|
+
|
|
143
|
+
return run_with_mcp_tools(**kwargs)
|
|
144
|
+
|
|
145
|
+
# Run CrewAI in thread pool to avoid blocking the event loop
|
|
146
|
+
loop = asyncio.get_event_loop()
|
|
147
|
+
result = await loop.run_in_executor(executor, run_crew_task)
|
|
148
|
+
|
|
149
|
+
# Send the result as agent message with task ID if available
|
|
150
|
+
msg = new_agent_text_message(str(result))
|
|
151
|
+
if hasattr(context, 'task_id') and context.task_id:
|
|
152
|
+
msg.taskId = context.task_id
|
|
153
|
+
await event_queue.enqueue_event(msg)
|
|
154
|
+
|
|
155
|
+
async def _execute_streaming(self, context: RequestContext, event_queue: EventQueue, request_text: str) -> None:
|
|
156
|
+
"""Execute streaming request using SSE."""
|
|
157
|
+
try:
|
|
158
|
+
# Send initial event to indicate streaming has started
|
|
159
|
+
await event_queue.enqueue_event(
|
|
160
|
+
CustomEvent("stream_start", {"message": "Starting streaming response"})
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# Stream chunks as they become available
|
|
164
|
+
chunk_count = 0
|
|
165
|
+
async for chunk in self._stream_mcp_response(request_text, context):
|
|
166
|
+
# Send each chunk as a separate SSE event
|
|
167
|
+
await event_queue.enqueue_event(
|
|
168
|
+
CustomEvent("stream_chunk", {
|
|
169
|
+
"chunk_id": chunk_count,
|
|
170
|
+
"content": chunk
|
|
171
|
+
})
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Also emit as message event for standard SSE subscribers
|
|
175
|
+
await event_queue.enqueue_event(
|
|
176
|
+
CustomEvent("message", {
|
|
177
|
+
"role": "agent",
|
|
178
|
+
"content": chunk,
|
|
179
|
+
"chunk_id": chunk_count
|
|
180
|
+
})
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
chunk_count += 1
|
|
184
|
+
|
|
185
|
+
# Yield control to allow other tasks to run
|
|
186
|
+
await asyncio.sleep(0)
|
|
187
|
+
|
|
188
|
+
# Send completion event
|
|
189
|
+
await event_queue.enqueue_event(
|
|
190
|
+
CustomEvent("stream_complete", {
|
|
191
|
+
"total_chunks": chunk_count,
|
|
192
|
+
"message": "Streaming completed successfully"
|
|
193
|
+
})
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
except Exception as e:
|
|
197
|
+
logger.error(f"Error in streaming execution: {e}")
|
|
198
|
+
await event_queue.enqueue_event(
|
|
199
|
+
CustomEvent("stream_error", {
|
|
200
|
+
"error": str(e),
|
|
201
|
+
"message": "Streaming encountered an error"
|
|
202
|
+
})
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
async def _stream_mcp_response(self, request_text: str, context: RequestContext) -> AsyncGenerator[str, None]:
|
|
206
|
+
"""
|
|
207
|
+
Stream responses from MCP server.
|
|
208
|
+
This is a placeholder that should be implemented based on specific MCP server capabilities.
|
|
209
|
+
"""
|
|
210
|
+
# For now, simulate streaming by breaking response into chunks
|
|
211
|
+
# In real implementation, this would connect to MCP server's streaming endpoint
|
|
212
|
+
|
|
213
|
+
# Get full response first (in real implementation, this would be streamed)
|
|
214
|
+
task_context = {}
|
|
215
|
+
if hasattr(context, 'metadata'):
|
|
216
|
+
task_context = context.metadata or {}
|
|
217
|
+
|
|
218
|
+
agent = MCPAgentBuilder.create_agent(
|
|
219
|
+
role=f"{{ agent_name }} Streaming Specialist",
|
|
220
|
+
goal=f"Process the streaming request using {self.mcp_config.name} capabilities",
|
|
221
|
+
backstory=f"You are an expert at using {self.mcp_config.name} for streaming data. {self.mcp_config.description}"
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
task = Task(
|
|
225
|
+
description=request_text,
|
|
226
|
+
expected_output="The processed streaming result",
|
|
227
|
+
agent=agent
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
# For demonstration, get the full result and stream it in chunks
|
|
231
|
+
def run_streaming_task():
|
|
232
|
+
# Build kwargs for run_with_mcp_tools
|
|
233
|
+
kwargs = {
|
|
234
|
+
"tasks": [task],
|
|
235
|
+
"mcp_server": self.mcp_server_info,
|
|
236
|
+
"inputs": task_context,
|
|
237
|
+
"skip_health_check": True
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
# Only add api_key if MCP server requires authentication
|
|
241
|
+
if self.mcp_config.metadata.get("requires_api_key", False) and MCP_API_KEY:
|
|
242
|
+
kwargs["api_key"] = MCP_API_KEY
|
|
243
|
+
logger.debug("Including API key for authenticated MCP streaming connection")
|
|
244
|
+
|
|
245
|
+
return run_with_mcp_tools(**kwargs)
|
|
246
|
+
|
|
247
|
+
loop = asyncio.get_event_loop()
|
|
248
|
+
result = await loop.run_in_executor(executor, run_streaming_task)
|
|
249
|
+
|
|
250
|
+
# Simulate streaming by chunking the response
|
|
251
|
+
result_str = str(result)
|
|
252
|
+
chunk_size = 100 # Characters per chunk
|
|
253
|
+
|
|
254
|
+
for i in range(0, len(result_str), chunk_size):
|
|
255
|
+
chunk = result_str[i:i + chunk_size]
|
|
256
|
+
yield chunk
|
|
257
|
+
await asyncio.sleep(0.1) # Simulate network delay
|
|
258
|
+
|
|
259
|
+
async def cancel(self, task_id: str) -> None:
|
|
260
|
+
"""Cancel a running task."""
|
|
261
|
+
logger.info(f"Cancelling task: {task_id}")
|
|
262
|
+
# Implementation depends on MCP server capabilities
|
|
263
|
+
# For now, just log the cancellation request
|
|
264
|
+
pass
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
version: '3.8'
|
|
2
|
+
|
|
3
|
+
services:
|
|
4
|
+
{{ agent_id }}:
|
|
5
|
+
build: .
|
|
6
|
+
container_name: {{ agent_id }}-local
|
|
7
|
+
ports:
|
|
8
|
+
- "${PORT:-8000}:${PORT:-8000}"
|
|
9
|
+
environment:
|
|
10
|
+
- PORT=${PORT:-8000}
|
|
11
|
+
- HOST=0.0.0.0
|
|
12
|
+
- LLM_MODEL=${LLM_MODEL:-openai/gpt-4.1}
|
|
13
|
+
{% for env_var in environment_variables %}
|
|
14
|
+
- {{ env_var.name }}={{ env_var.value }}
|
|
15
|
+
{% endfor %}
|
|
16
|
+
volumes:
|
|
17
|
+
- ./.env:/app/.env:ro
|
|
18
|
+
{% if mcp_server_url %}
|
|
19
|
+
# MCP Server configuration
|
|
20
|
+
extra_hosts:
|
|
21
|
+
- "host.docker.internal:host-gateway" # For accessing local MCP servers
|
|
22
|
+
{% endif %}
|
|
23
|
+
restart: unless-stopped
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
# {{ agent_name }} Environment Variables
|
|
2
|
+
# Copy this file to .env and fill in your values
|
|
3
|
+
|
|
4
|
+
# Server Configuration
|
|
5
|
+
PORT=8000
|
|
6
|
+
HOST=0.0.0.0
|
|
7
|
+
|
|
8
|
+
# Language Model Configuration
|
|
9
|
+
LLM_MODEL=openai/gpt-4.1
|
|
10
|
+
# You can also use other models:
|
|
11
|
+
# LLM_MODEL=vertex_ai/gemini-2.0-flash
|
|
12
|
+
# LLM_MODEL=anthropic/claude-4-sonnetf
|
|
13
|
+
|
|
14
|
+
# API Keys (add as needed based on your LLM choice)
|
|
15
|
+
# OPENAI_API_KEY=your-openai-key-here
|
|
16
|
+
# ANTHROPIC_API_KEY=your-anthropic-key-here
|
|
17
|
+
# GOOGLE_API_KEY=your-google-key-here
|
|
18
|
+
# VERTEX_PROJECT_ID=your-gcp-project-id
|
|
19
|
+
# VERTEX_LOCATION=us-central1
|
|
20
|
+
|
|
21
|
+
# MCP Server Configuration
|
|
22
|
+
MCP_SERVER_URL={{ mcp_server_url }}
|
|
23
|
+
{% if requires_api_key and api_keys %}
|
|
24
|
+
# MCP Server API Keys (REQUIRED - The MCP server requires authentication)
|
|
25
|
+
# Set one of the following environment variables:
|
|
26
|
+
{% for api_key_name in api_keys %}
|
|
27
|
+
{{ api_key_name }}=your-api-key-here
|
|
28
|
+
{% endfor %}
|
|
29
|
+
{% endif %}
|
|
30
|
+
{% if mcp_server_metadata %}
|
|
31
|
+
# Additional MCP configuration
|
|
32
|
+
{% for key, value in mcp_server_metadata.items() %}
|
|
33
|
+
{% if key.upper() == key %}
|
|
34
|
+
{{ key }}={{ value }}
|
|
35
|
+
{% endif %}
|
|
36
|
+
{% endfor %}
|
|
37
|
+
{% endif %}
|
|
38
|
+
|
|
39
|
+
# Logging
|
|
40
|
+
LOG_LEVEL=INFO
|
|
41
|
+
|
|
42
|
+
# MongoDB Configuration (if using registry)
|
|
43
|
+
# MONGODB_USER=your-mongodb-user
|
|
44
|
+
# MONGODB_PASSWORD=your-mongodb-password
|
|
45
|
+
# MONGODB_HOST=your-mongodb-host
|
|
46
|
+
# MONGODB_PORT=27017
|
|
47
|
+
# MONGODB_DATABASE=iatp
|
|
48
|
+
|
|
49
|
+
{% if auth_required %}
|
|
50
|
+
# Authentication Configuration
|
|
51
|
+
{% for scheme in auth_schemes %}
|
|
52
|
+
# Configure {{ scheme }} authentication
|
|
53
|
+
{% if scheme == "bearer" %}
|
|
54
|
+
# AUTH_TOKEN=your-bearer-token
|
|
55
|
+
{% elif scheme == "api_key" %}
|
|
56
|
+
# API_KEY=your-api-key
|
|
57
|
+
{% elif scheme == "oauth2" %}
|
|
58
|
+
# OAUTH2_CLIENT_ID=your-client-id
|
|
59
|
+
# OAUTH2_CLIENT_SECRET=your-client-secret
|
|
60
|
+
{% endif %}
|
|
61
|
+
{% endfor %}
|
|
62
|
+
{% endif %}
|
|
63
|
+
|
|
64
|
+
# Additional custom environment variables
|
|
65
|
+
{% for env_var in environment_variables %}
|
|
66
|
+
# {{ env_var.name }}={{ env_var.value }}
|
|
67
|
+
{% endfor %}
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
*.so
|
|
6
|
+
.Python
|
|
7
|
+
env/
|
|
8
|
+
venv/
|
|
9
|
+
.venv/
|
|
10
|
+
*.egg-info/
|
|
11
|
+
dist/
|
|
12
|
+
build/
|
|
13
|
+
.pytest_cache/
|
|
14
|
+
.coverage
|
|
15
|
+
htmlcov/
|
|
16
|
+
.mypy_cache/
|
|
17
|
+
.ruff_cache/
|
|
18
|
+
|
|
19
|
+
# Virtual Environments
|
|
20
|
+
bin/
|
|
21
|
+
include/
|
|
22
|
+
lib/
|
|
23
|
+
lib64/
|
|
24
|
+
pyvenv.cfg
|
|
25
|
+
|
|
26
|
+
# IDE
|
|
27
|
+
.vscode/
|
|
28
|
+
.idea/
|
|
29
|
+
*.swp
|
|
30
|
+
*.swo
|
|
31
|
+
*~
|
|
32
|
+
.project
|
|
33
|
+
.pydevproject
|
|
34
|
+
|
|
35
|
+
# Environment Variables
|
|
36
|
+
.env
|
|
37
|
+
.env.*
|
|
38
|
+
!.env.example
|
|
39
|
+
|
|
40
|
+
# Logs
|
|
41
|
+
*.log
|
|
42
|
+
logs/
|
|
43
|
+
|
|
44
|
+
# OS
|
|
45
|
+
.DS_Store
|
|
46
|
+
Thumbs.db
|
|
47
|
+
desktop.ini
|
|
48
|
+
|
|
49
|
+
# UV Package Manager
|
|
50
|
+
.uv/
|
|
51
|
+
uv.lock
|
|
52
|
+
|
|
53
|
+
# Docker
|
|
54
|
+
*.pid
|
|
55
|
+
docker-compose.override.yml
|
|
56
|
+
|
|
57
|
+
# {{ agent_name }} specific
|
|
58
|
+
{% if additional_ignores %}
|
|
59
|
+
# Custom ignores
|
|
60
|
+
{% for ignore in additional_ignores %}
|
|
61
|
+
{{ ignore }}
|
|
62
|
+
{% endfor %}
|
|
63
|
+
{% endif %}
|
|
64
|
+
|
|
65
|
+
# Temporary files
|
|
66
|
+
*.tmp
|
|
67
|
+
*.temp
|
|
68
|
+
*.bak
|
|
69
|
+
.cache/
|
|
70
|
+
|
|
71
|
+
# Database
|
|
72
|
+
*.db
|
|
73
|
+
*.sqlite
|
|
74
|
+
*.sqlite3
|
|
75
|
+
|
|
76
|
+
# Generated files
|
|
77
|
+
generated/
|
|
78
|
+
output/
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
"""
|
|
2
|
+
{{ agency_name }} - gRPC Server Implementation
|
|
3
|
+
|
|
4
|
+
This module provides a gRPC server for {{ agency_name }}.
|
|
5
|
+
This is an optional component that can run alongside the HTTP/2 server for high-performance scenarios.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import logging
|
|
10
|
+
import os
|
|
11
|
+
import json
|
|
12
|
+
from concurrent import futures
|
|
13
|
+
from typing import AsyncIterator, Optional
|
|
14
|
+
|
|
15
|
+
import grpc
|
|
16
|
+
from grpc import aio
|
|
17
|
+
from traia_iatp.mcp import MCPServerConfig
|
|
18
|
+
from .agent_executor import {{ class_name }}AgentExecutor
|
|
19
|
+
|
|
20
|
+
# Note: These would be generated from .proto files
|
|
21
|
+
# from . import a2a_pb2, a2a_pb2_grpc
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class {{ class_name }}GrpcService: # (a2a_pb2_grpc.A2AServiceServicer):
|
|
27
|
+
"""gRPC service implementation for {{ agency_name }}."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, mcp_config: MCPServerConfig):
|
|
30
|
+
self.mcp_config = mcp_config
|
|
31
|
+
self.executor = {{ class_name }}AgentExecutor(
|
|
32
|
+
mcp_config,
|
|
33
|
+
supports_streaming=True # gRPC always supports streaming
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
async def SendMessage(self, request, context):
|
|
37
|
+
"""Handle unary RPC: single request, single response."""
|
|
38
|
+
try:
|
|
39
|
+
# Extract message content
|
|
40
|
+
message_content = request.message.content
|
|
41
|
+
|
|
42
|
+
# Process with executor
|
|
43
|
+
result = await self.executor.process_unary_request(message_content)
|
|
44
|
+
|
|
45
|
+
# Create response
|
|
46
|
+
# response = a2a_pb2.SendMessageResponse()
|
|
47
|
+
# response.result.content = result
|
|
48
|
+
# return response
|
|
49
|
+
|
|
50
|
+
# Placeholder return
|
|
51
|
+
return {"result": {"content": result}}
|
|
52
|
+
|
|
53
|
+
except Exception as e:
|
|
54
|
+
logger.error(f"Error in SendMessage: {e}")
|
|
55
|
+
await context.abort(
|
|
56
|
+
grpc.StatusCode.INTERNAL,
|
|
57
|
+
f"Internal error: {str(e)}"
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
async def StreamMessage(self, request, context):
|
|
61
|
+
"""Handle server streaming RPC: single request, stream of responses."""
|
|
62
|
+
try:
|
|
63
|
+
# Extract message content
|
|
64
|
+
message_content = request.message.content
|
|
65
|
+
|
|
66
|
+
# Stream responses
|
|
67
|
+
async for chunk in self.executor.process_streaming_request(message_content):
|
|
68
|
+
# response = a2a_pb2.StreamMessageResponse()
|
|
69
|
+
# response.chunk.content = chunk
|
|
70
|
+
# yield response
|
|
71
|
+
|
|
72
|
+
# Placeholder yield
|
|
73
|
+
yield {"chunk": {"content": chunk}}
|
|
74
|
+
|
|
75
|
+
except Exception as e:
|
|
76
|
+
logger.error(f"Error in StreamMessage: {e}")
|
|
77
|
+
await context.abort(
|
|
78
|
+
grpc.StatusCode.INTERNAL,
|
|
79
|
+
f"Streaming error: {str(e)}"
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
async def BidirectionalStream(self, request_iterator, context):
|
|
83
|
+
"""Handle bidirectional streaming RPC."""
|
|
84
|
+
try:
|
|
85
|
+
async for request in request_iterator:
|
|
86
|
+
# Process each incoming request
|
|
87
|
+
message_content = request.message.content
|
|
88
|
+
|
|
89
|
+
# Stream responses for this request
|
|
90
|
+
async for chunk in self.executor.process_streaming_request(message_content):
|
|
91
|
+
# response = a2a_pb2.StreamMessageResponse()
|
|
92
|
+
# response.chunk.content = chunk
|
|
93
|
+
# yield response
|
|
94
|
+
|
|
95
|
+
# Placeholder yield
|
|
96
|
+
yield {"chunk": {"content": chunk}}
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.error(f"Error in BidirectionalStream: {e}")
|
|
100
|
+
await context.abort(
|
|
101
|
+
grpc.StatusCode.INTERNAL,
|
|
102
|
+
f"Bidirectional streaming error: {str(e)}"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
async def GetCapabilities(self, request, context):
|
|
106
|
+
"""Return agent capabilities."""
|
|
107
|
+
try:
|
|
108
|
+
# capabilities = a2a_pb2.CapabilitiesResponse()
|
|
109
|
+
# capabilities.name = "{{ agency_id }}"
|
|
110
|
+
# capabilities.description = "{{ agency_description }}"
|
|
111
|
+
# capabilities.version = "{{ agency_version }}"
|
|
112
|
+
# capabilities.supports_streaming = True
|
|
113
|
+
# capabilities.supports_bidirectional = True
|
|
114
|
+
# return capabilities
|
|
115
|
+
|
|
116
|
+
# Placeholder return
|
|
117
|
+
return {
|
|
118
|
+
"name": "{{ agency_id }}",
|
|
119
|
+
"description": "{{ agency_description }}",
|
|
120
|
+
"version": "{{ agency_version }}",
|
|
121
|
+
"supports_streaming": True,
|
|
122
|
+
"supports_bidirectional": True,
|
|
123
|
+
"mcp_capabilities": self.mcp_config.capabilities
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
except Exception as e:
|
|
127
|
+
logger.error(f"Error in GetCapabilities: {e}")
|
|
128
|
+
await context.abort(
|
|
129
|
+
grpc.StatusCode.INTERNAL,
|
|
130
|
+
f"Error getting capabilities: {str(e)}"
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
async def serve_grpc():
|
|
135
|
+
"""Start the gRPC server."""
|
|
136
|
+
# Load configuration
|
|
137
|
+
config_path = "agency_config.json"
|
|
138
|
+
if os.path.exists(config_path):
|
|
139
|
+
with open(config_path, "r") as f:
|
|
140
|
+
config_data = json.load(f)
|
|
141
|
+
mcp_data = config_data.get("mcp_server", {})
|
|
142
|
+
else:
|
|
143
|
+
mcp_data = {
|
|
144
|
+
"name": "{{ mcp_server_name }}",
|
|
145
|
+
"url": "{{ mcp_server_url }}",
|
|
146
|
+
"description": "{{ mcp_server_description }}",
|
|
147
|
+
"server_type": "{{ mcp_server_type }}",
|
|
148
|
+
"capabilities": {{ mcp_server_capabilities | tojson }},
|
|
149
|
+
"metadata": {{ mcp_server_metadata | tojson }}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
# Create MCP config
|
|
153
|
+
mcp_config = MCPServerConfig(
|
|
154
|
+
name=mcp_data["name"],
|
|
155
|
+
url=mcp_data["url"],
|
|
156
|
+
description=mcp_data["description"],
|
|
157
|
+
server_type=mcp_data.get("server_type", "streamable-http"),
|
|
158
|
+
capabilities=mcp_data.get("capabilities", []),
|
|
159
|
+
metadata=mcp_data.get("metadata", {})
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Create gRPC server
|
|
163
|
+
server = aio.server(
|
|
164
|
+
futures.ThreadPoolExecutor(max_workers=10),
|
|
165
|
+
options=[
|
|
166
|
+
('grpc.max_send_message_length', 4 * 1024 * 1024), # 4MB
|
|
167
|
+
('grpc.max_receive_message_length', 4 * 1024 * 1024), # 4MB
|
|
168
|
+
('grpc.keepalive_time_ms', 10000),
|
|
169
|
+
('grpc.keepalive_timeout_ms', 5000),
|
|
170
|
+
('grpc.http2.max_concurrent_streams', 100),
|
|
171
|
+
('grpc.http2.max_frame_size', 16384),
|
|
172
|
+
]
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Add service
|
|
176
|
+
service = {{ class_name }}GrpcService(mcp_config)
|
|
177
|
+
# a2a_pb2_grpc.add_A2AServiceServicer_to_server(service, server)
|
|
178
|
+
|
|
179
|
+
# Enable reflection for debugging
|
|
180
|
+
# from grpc_reflection.v1alpha import reflection
|
|
181
|
+
# SERVICE_NAMES = (
|
|
182
|
+
# a2a_pb2.DESCRIPTOR.services_by_name['A2AService'].full_name,
|
|
183
|
+
# reflection.SERVICE_NAME,
|
|
184
|
+
# )
|
|
185
|
+
# reflection.enable_server_reflection(SERVICE_NAMES, server)
|
|
186
|
+
|
|
187
|
+
# Get port from environment
|
|
188
|
+
port = int(os.environ.get("GRPC_PORT", 50051))
|
|
189
|
+
|
|
190
|
+
# Add insecure port (TLS can be added later)
|
|
191
|
+
server.add_insecure_port(f'[::]:{port}')
|
|
192
|
+
|
|
193
|
+
logger.info(f"Starting {{ agency_name }} gRPC Server")
|
|
194
|
+
logger.info(f"Listening on port {port}")
|
|
195
|
+
logger.info(f"MCP Server: {{ mcp_server_name }}")
|
|
196
|
+
|
|
197
|
+
# Start server
|
|
198
|
+
await server.start()
|
|
199
|
+
|
|
200
|
+
try:
|
|
201
|
+
await server.wait_for_termination()
|
|
202
|
+
except KeyboardInterrupt:
|
|
203
|
+
logger.info("Shutting down gRPC server...")
|
|
204
|
+
await server.stop(5)
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def main():
|
|
208
|
+
"""Main entry point for gRPC server."""
|
|
209
|
+
logging.basicConfig(
|
|
210
|
+
level=logging.INFO,
|
|
211
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
asyncio.run(serve_grpc())
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
if __name__ == "__main__":
|
|
218
|
+
main()
|