traia-iatp 0.1.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of traia-iatp might be problematic. Click here for more details.

Files changed (107) hide show
  1. traia_iatp/README.md +368 -0
  2. traia_iatp/__init__.py +54 -0
  3. traia_iatp/cli/__init__.py +5 -0
  4. traia_iatp/cli/main.py +483 -0
  5. traia_iatp/client/__init__.py +10 -0
  6. traia_iatp/client/a2a_client.py +274 -0
  7. traia_iatp/client/crewai_a2a_tools.py +335 -0
  8. traia_iatp/client/d402_a2a_client.py +293 -0
  9. traia_iatp/client/grpc_a2a_tools.py +349 -0
  10. traia_iatp/client/root_path_a2a_client.py +1 -0
  11. traia_iatp/contracts/__init__.py +12 -0
  12. traia_iatp/contracts/iatp_contracts_config.py +263 -0
  13. traia_iatp/contracts/wallet_creator.py +255 -0
  14. traia_iatp/core/__init__.py +43 -0
  15. traia_iatp/core/models.py +172 -0
  16. traia_iatp/d402/__init__.py +55 -0
  17. traia_iatp/d402/chains.py +102 -0
  18. traia_iatp/d402/client.py +150 -0
  19. traia_iatp/d402/clients/__init__.py +7 -0
  20. traia_iatp/d402/clients/base.py +218 -0
  21. traia_iatp/d402/clients/httpx.py +219 -0
  22. traia_iatp/d402/common.py +114 -0
  23. traia_iatp/d402/encoding.py +28 -0
  24. traia_iatp/d402/examples/client_example.py +197 -0
  25. traia_iatp/d402/examples/server_example.py +171 -0
  26. traia_iatp/d402/facilitator.py +453 -0
  27. traia_iatp/d402/fastapi_middleware/__init__.py +6 -0
  28. traia_iatp/d402/fastapi_middleware/middleware.py +225 -0
  29. traia_iatp/d402/fastmcp_middleware.py +147 -0
  30. traia_iatp/d402/mcp_middleware.py +434 -0
  31. traia_iatp/d402/middleware.py +193 -0
  32. traia_iatp/d402/models.py +116 -0
  33. traia_iatp/d402/networks.py +98 -0
  34. traia_iatp/d402/path.py +43 -0
  35. traia_iatp/d402/payment_introspection.py +104 -0
  36. traia_iatp/d402/payment_signing.py +178 -0
  37. traia_iatp/d402/paywall.py +119 -0
  38. traia_iatp/d402/starlette_middleware.py +326 -0
  39. traia_iatp/d402/template.py +1 -0
  40. traia_iatp/d402/types.py +300 -0
  41. traia_iatp/mcp/__init__.py +18 -0
  42. traia_iatp/mcp/client.py +201 -0
  43. traia_iatp/mcp/d402_mcp_tool_adapter.py +361 -0
  44. traia_iatp/mcp/mcp_agent_template.py +481 -0
  45. traia_iatp/mcp/templates/Dockerfile.j2 +80 -0
  46. traia_iatp/mcp/templates/README.md.j2 +310 -0
  47. traia_iatp/mcp/templates/cursor-rules.md.j2 +520 -0
  48. traia_iatp/mcp/templates/deployment_params.json.j2 +20 -0
  49. traia_iatp/mcp/templates/docker-compose.yml.j2 +32 -0
  50. traia_iatp/mcp/templates/dockerignore.j2 +47 -0
  51. traia_iatp/mcp/templates/env.example.j2 +57 -0
  52. traia_iatp/mcp/templates/gitignore.j2 +77 -0
  53. traia_iatp/mcp/templates/mcp_health_check.py.j2 +150 -0
  54. traia_iatp/mcp/templates/pyproject.toml.j2 +32 -0
  55. traia_iatp/mcp/templates/pyrightconfig.json.j2 +22 -0
  56. traia_iatp/mcp/templates/run_local_docker.sh.j2 +390 -0
  57. traia_iatp/mcp/templates/server.py.j2 +175 -0
  58. traia_iatp/mcp/traia_mcp_adapter.py +543 -0
  59. traia_iatp/preview_diagrams.html +181 -0
  60. traia_iatp/registry/__init__.py +26 -0
  61. traia_iatp/registry/atlas_search_indexes.json +280 -0
  62. traia_iatp/registry/embeddings.py +298 -0
  63. traia_iatp/registry/iatp_search_api.py +846 -0
  64. traia_iatp/registry/mongodb_registry.py +771 -0
  65. traia_iatp/registry/readmes/ATLAS_SEARCH_INDEXES.md +252 -0
  66. traia_iatp/registry/readmes/ATLAS_SEARCH_SETUP.md +134 -0
  67. traia_iatp/registry/readmes/AUTHENTICATION_UPDATE.md +124 -0
  68. traia_iatp/registry/readmes/EMBEDDINGS_SETUP.md +172 -0
  69. traia_iatp/registry/readmes/IATP_SEARCH_API_GUIDE.md +257 -0
  70. traia_iatp/registry/readmes/MONGODB_X509_AUTH.md +208 -0
  71. traia_iatp/registry/readmes/README.md +251 -0
  72. traia_iatp/registry/readmes/REFACTORING_SUMMARY.md +191 -0
  73. traia_iatp/scripts/__init__.py +2 -0
  74. traia_iatp/scripts/create_wallet.py +244 -0
  75. traia_iatp/server/__init__.py +15 -0
  76. traia_iatp/server/a2a_server.py +219 -0
  77. traia_iatp/server/example_template_usage.py +72 -0
  78. traia_iatp/server/iatp_server_agent_generator.py +237 -0
  79. traia_iatp/server/iatp_server_template_generator.py +235 -0
  80. traia_iatp/server/templates/.dockerignore.j2 +48 -0
  81. traia_iatp/server/templates/Dockerfile.j2 +49 -0
  82. traia_iatp/server/templates/README.md +137 -0
  83. traia_iatp/server/templates/README.md.j2 +425 -0
  84. traia_iatp/server/templates/__init__.py +1 -0
  85. traia_iatp/server/templates/__main__.py.j2 +565 -0
  86. traia_iatp/server/templates/agent.py.j2 +94 -0
  87. traia_iatp/server/templates/agent_config.json.j2 +22 -0
  88. traia_iatp/server/templates/agent_executor.py.j2 +279 -0
  89. traia_iatp/server/templates/docker-compose.yml.j2 +23 -0
  90. traia_iatp/server/templates/env.example.j2 +84 -0
  91. traia_iatp/server/templates/gitignore.j2 +78 -0
  92. traia_iatp/server/templates/grpc_server.py.j2 +218 -0
  93. traia_iatp/server/templates/pyproject.toml.j2 +78 -0
  94. traia_iatp/server/templates/run_local_docker.sh.j2 +103 -0
  95. traia_iatp/server/templates/server.py.j2 +243 -0
  96. traia_iatp/special_agencies/__init__.py +4 -0
  97. traia_iatp/special_agencies/registry_search_agency.py +392 -0
  98. traia_iatp/utils/__init__.py +10 -0
  99. traia_iatp/utils/docker_utils.py +251 -0
  100. traia_iatp/utils/general.py +64 -0
  101. traia_iatp/utils/iatp_utils.py +126 -0
  102. traia_iatp-0.1.29.dist-info/METADATA +423 -0
  103. traia_iatp-0.1.29.dist-info/RECORD +107 -0
  104. traia_iatp-0.1.29.dist-info/WHEEL +5 -0
  105. traia_iatp-0.1.29.dist-info/entry_points.txt +2 -0
  106. traia_iatp-0.1.29.dist-info/licenses/LICENSE +21 -0
  107. traia_iatp-0.1.29.dist-info/top_level.txt +1 -0
@@ -0,0 +1,279 @@
1
+ """
2
+ {{ agent_name }} - A2A Server Agent Executor Implementation
3
+
4
+ This module implements the agent executor for {{ agent_name }}.
5
+ Supports both synchronous responses and SSE streaming.
6
+ """
7
+
8
+ import asyncio
9
+ import json
10
+ import logging
11
+ import os
12
+ from typing import AsyncGenerator, Optional, Dict, Any
13
+ from datetime import datetime
14
+ from pathlib import Path
15
+ from concurrent.futures import ThreadPoolExecutor
16
+ from a2a.server.agent_execution import AgentExecutor, RequestContext
17
+ from a2a.server.events.event_queue import EventQueue
18
+ from a2a.types import Message, TextPart
19
+ from a2a.utils import new_agent_text_message
20
+ from crewai import Task, LLM
21
+ from traia_iatp.mcp import MCPServerConfig, MCPAgentBuilder, run_with_mcp_tools, MCPServerInfo
22
+
23
+ # Import AgentOps for operation tracking
24
+ import agentops
25
+
26
+
27
+
28
+ DEFAULT_LLM = LLM(
29
+ model=os.getenv("LLM_MODEL", "gpt-4.1-nano"), # Using environment variable with fallback
30
+ temperature=float(os.getenv("LLM_MODEL_TEMPERATURE", "0.1")),
31
+ api_key=os.getenv("OPENAI_API_KEY")
32
+ )
33
+ current_time = datetime.utcnow()
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+ logger.info(f"Current LLM model used: {os.getenv("LLM_MODEL", "gpt-4.1-nano")}")
38
+
39
+ # Create a thread pool for CPU-bound CrewAI operations
40
+ executor = ThreadPoolExecutor(max_workers=10)
41
+
42
+ # Get MCP server API key if required
43
+ {% if requires_api_key %}
44
+ # Check for API keys required by the MCP server
45
+ MCP_API_KEY = None
46
+ {% for api_key_name in api_keys %}
47
+ if not MCP_API_KEY and os.getenv("{{ api_key_name }}"):
48
+ MCP_API_KEY = os.getenv("{{ api_key_name }}")
49
+ logger.info(f"Using API key from {{ api_key_name }} environment variable")
50
+ {% endfor %}
51
+
52
+ if not MCP_API_KEY:
53
+ logger.warning("No API key found for MCP server authentication.")
54
+ logger.warning("The MCP server requires one of these environment variables to be set:")
55
+ {% for api_key_name in api_keys %}
56
+ logger.warning(" - {{ api_key_name }}")
57
+ {% endfor %}
58
+ else:
59
+ logger.info("MCP server API key loaded successfully")
60
+ {% else %}
61
+ MCP_API_KEY = None
62
+ {% endif %}
63
+
64
+
65
+ class CustomEvent:
66
+ """Custom event class for SSE streaming."""
67
+ def __init__(self, event_type: str, data: Dict[str, Any]):
68
+ self.type = event_type
69
+ self.data = data
70
+
71
+
72
+ class {{ class_name }}AgentExecutor(AgentExecutor):
73
+ """Agent executor for {{ agent_name }}."""
74
+
75
+ def __init__(self, mcp_config: MCPServerConfig, supports_streaming: bool = False):
76
+ self.mcp_config = mcp_config
77
+ self.supports_streaming = supports_streaming
78
+ self.mcp_server_info = MCPServerInfo(
79
+ id="", # Not needed for direct usage
80
+ name=mcp_config.name,
81
+ url=mcp_config.url,
82
+ description=mcp_config.description,
83
+ server_type=mcp_config.server_type,
84
+ capabilities=mcp_config.capabilities,
85
+ metadata=mcp_config.metadata,
86
+ tags=mcp_config.metadata.get("tags", [])
87
+ )
88
+
89
+ async def execute(self, context: RequestContext, event_queue: EventQueue) -> None:
90
+ """Process a request using the {{ agent_name }} capabilities."""
91
+ try:
92
+ # Get the user's request from context
93
+ request_text = context.get_user_input()
94
+ if not request_text:
95
+ # Send empty response with task ID if available
96
+ msg = new_agent_text_message("No user message provided")
97
+ if hasattr(context, 'task_id') and context.task_id:
98
+ msg.taskId = context.task_id
99
+ await event_queue.enqueue_event(msg)
100
+ return
101
+
102
+ # Check if client requested streaming
103
+ stream_requested = False
104
+ if hasattr(context, 'configuration') and context.configuration:
105
+ output_mode = context.configuration.get('output_mode', '')
106
+ stream_requested = output_mode == 'text/event-stream'
107
+
108
+ # Execute the request
109
+ if stream_requested and self.supports_streaming:
110
+ await self._execute_streaming(context, event_queue, request_text)
111
+ else:
112
+ await self._execute_standard(context, event_queue, request_text)
113
+
114
+ except Exception as e:
115
+ logger.error(f"Error processing request: {e}")
116
+ msg = new_agent_text_message(f"Error processing request: {str(e)}")
117
+ if hasattr(context, 'task_id') and context.task_id:
118
+ msg.taskId = context.task_id
119
+ await event_queue.enqueue_event(msg)
120
+
121
+ async def _execute_standard(self, context: RequestContext, event_queue: EventQueue, request_text: str) -> None:
122
+ """Execute standard (non-streaming) request."""
123
+ # Get additional context if provided
124
+ task_context = {}
125
+ if hasattr(context, 'metadata'):
126
+ task_context = context.metadata or {}
127
+
128
+ # Create an agent for this request
129
+ agent = MCPAgentBuilder.create_agent(
130
+ role=f"{{ agent_name }} Specialist",
131
+ goal=f"Process the request using {self.mcp_config.name} capabilities",
132
+ backstory=f"You are an expert at using {self.mcp_config.name}. {self.mcp_config.description}",
133
+ llm=DEFAULT_LLM
134
+ )
135
+
136
+ # Create a task
137
+ task = Task(
138
+ description=request_text,
139
+ expected_output="The processed result based on the request",
140
+ agent=agent
141
+ )
142
+
143
+ # Create a wrapper function to handle the arguments properly
144
+ def run_crew_task():
145
+ # Build kwargs for run_with_mcp_tools
146
+ kwargs = {
147
+ "tasks": [task],
148
+ "mcp_server": self.mcp_server_info,
149
+ "inputs": task_context,
150
+ "skip_health_check": True
151
+ }
152
+
153
+ # Only add api_key if MCP server requires authentication
154
+ if self.mcp_config.metadata.get("requires_api_key", False) and MCP_API_KEY:
155
+ kwargs["api_key"] = MCP_API_KEY
156
+ logger.debug("Including API key for authenticated MCP connection")
157
+
158
+ return run_with_mcp_tools(**kwargs)
159
+
160
+ # Run CrewAI in thread pool to avoid blocking the event loop
161
+ loop = asyncio.get_event_loop()
162
+ result = await loop.run_in_executor(executor, run_crew_task)
163
+
164
+ # Send the result as agent message with task ID if available
165
+ msg = new_agent_text_message(str(result))
166
+ if hasattr(context, 'task_id') and context.task_id:
167
+ msg.taskId = context.task_id
168
+ await event_queue.enqueue_event(msg)
169
+
170
+ async def _execute_streaming(self, context: RequestContext, event_queue: EventQueue, request_text: str) -> None:
171
+ """Execute streaming request using SSE."""
172
+ try:
173
+ # Send initial event to indicate streaming has started
174
+ await event_queue.enqueue_event(
175
+ CustomEvent("stream_start", {"message": "Starting streaming response"})
176
+ )
177
+
178
+ # Stream chunks as they become available
179
+ chunk_count = 0
180
+ async for chunk in self._stream_mcp_response(request_text, context):
181
+ # Send each chunk as a separate SSE event
182
+ await event_queue.enqueue_event(
183
+ CustomEvent("stream_chunk", {
184
+ "chunk_id": chunk_count,
185
+ "content": chunk
186
+ })
187
+ )
188
+
189
+ # Also emit as message event for standard SSE subscribers
190
+ await event_queue.enqueue_event(
191
+ CustomEvent("message", {
192
+ "role": "agent",
193
+ "content": chunk,
194
+ "chunk_id": chunk_count
195
+ })
196
+ )
197
+
198
+ chunk_count += 1
199
+
200
+ # Yield control to allow other tasks to run
201
+ await asyncio.sleep(0)
202
+
203
+ # Send completion event
204
+ await event_queue.enqueue_event(
205
+ CustomEvent("stream_complete", {
206
+ "total_chunks": chunk_count,
207
+ "message": "Streaming completed successfully"
208
+ })
209
+ )
210
+
211
+ except Exception as e:
212
+ logger.error(f"Error in streaming execution: {e}")
213
+ await event_queue.enqueue_event(
214
+ CustomEvent("stream_error", {
215
+ "error": str(e),
216
+ "message": "Streaming encountered an error"
217
+ })
218
+ )
219
+
220
+ async def _stream_mcp_response(self, request_text: str, context: RequestContext) -> AsyncGenerator[str, None]:
221
+ """
222
+ Stream responses from MCP server.
223
+ This is a placeholder that should be implemented based on specific MCP server capabilities.
224
+ """
225
+ # For now, simulate streaming by breaking response into chunks
226
+ # In real implementation, this would connect to MCP server's streaming endpoint
227
+
228
+ # Get full response first (in real implementation, this would be streamed)
229
+ task_context = {}
230
+ if hasattr(context, 'metadata'):
231
+ task_context = context.metadata or {}
232
+
233
+ agent = MCPAgentBuilder.create_agent(
234
+ role=f"{{ agent_name }} Streaming Specialist",
235
+ goal=f"Process the streaming request using {self.mcp_config.name} capabilities",
236
+ backstory=f"You are an expert at using {self.mcp_config.name} for streaming data. {self.mcp_config.description}"
237
+ )
238
+
239
+ task = Task(
240
+ description=request_text,
241
+ expected_output="The processed streaming result",
242
+ agent=agent
243
+ )
244
+
245
+ # For demonstration, get the full result and stream it in chunks
246
+ def run_streaming_task():
247
+ # Build kwargs for run_with_mcp_tools
248
+ kwargs = {
249
+ "tasks": [task],
250
+ "mcp_server": self.mcp_server_info,
251
+ "inputs": task_context,
252
+ "skip_health_check": True
253
+ }
254
+
255
+ # Only add api_key if MCP server requires authentication
256
+ if self.mcp_config.metadata.get("requires_api_key", False) and MCP_API_KEY:
257
+ kwargs["api_key"] = MCP_API_KEY
258
+ logger.debug("Including API key for authenticated MCP streaming connection")
259
+
260
+ return run_with_mcp_tools(**kwargs)
261
+
262
+ loop = asyncio.get_event_loop()
263
+ result = await loop.run_in_executor(executor, run_streaming_task)
264
+
265
+ # Simulate streaming by chunking the response
266
+ result_str = str(result)
267
+ chunk_size = 100 # Characters per chunk
268
+
269
+ for i in range(0, len(result_str), chunk_size):
270
+ chunk = result_str[i:i + chunk_size]
271
+ yield chunk
272
+ await asyncio.sleep(0.1) # Simulate network delay
273
+
274
+ async def cancel(self, task_id: str) -> None:
275
+ """Cancel a running task."""
276
+ logger.info(f"Cancelling task: {task_id}")
277
+ # Implementation depends on MCP server capabilities
278
+ # For now, just log the cancellation request
279
+ pass
@@ -0,0 +1,23 @@
1
+ version: '3.8'
2
+
3
+ services:
4
+ {{ agent_id }}:
5
+ build: .
6
+ container_name: {{ agent_id }}-local
7
+ ports:
8
+ - "${PORT:-8000}:${PORT:-8000}"
9
+ environment:
10
+ - PORT=${PORT:-8000}
11
+ - HOST=0.0.0.0
12
+ - LLM_MODEL=${LLM_MODEL:-openai/gpt-4.1}
13
+ {% for env_var in environment_variables %}
14
+ - {{ env_var.name }}={{ env_var.value }}
15
+ {% endfor %}
16
+ volumes:
17
+ - ./.env:/app/.env:ro
18
+ {% if mcp_server_url %}
19
+ # MCP Server configuration
20
+ extra_hosts:
21
+ - "host.docker.internal:host-gateway" # For accessing local MCP servers
22
+ {% endif %}
23
+ restart: unless-stopped
@@ -0,0 +1,84 @@
1
+ # {{ agent_name }} Environment Variables
2
+ # Copy this file to .env and fill in your values
3
+
4
+ # Server Configuration
5
+ PORT=8000
6
+ HOST=0.0.0.0
7
+
8
+ # Required ENV variables
9
+ AGENTOPS_API_KEY=your-agentops-api-key-here
10
+
11
+ # Language Model Configuration
12
+ LLM_MODEL=your-llm-model-name
13
+ LLM_MODEL_TEMPERATURE=your-llm-model-temperature
14
+ EMBEDDINGS_OPENAI_MODEL_NAME=your-embedding-model
15
+
16
+ # You can also use other models:
17
+ # LLM_MODEL=vertex_ai/gemini-2.0-flash
18
+ # LLM_MODEL=anthropic/claude-4-sonnetf
19
+
20
+ # API Keys (add as needed based on your LLM choice)
21
+ # OPENAI_API_KEY=your-openai-key-here
22
+ # ANTHROPIC_API_KEY=your-anthropic-key-here
23
+ # GOOGLE_API_KEY=your-google-key-here
24
+ # VERTEX_PROJECT_ID=your-gcp-project-id
25
+ # VERTEX_LOCATION=us-central1
26
+
27
+ # MCP Server Configuration
28
+ MCP_SERVER_URL={{ mcp_server_url }}
29
+ {% if requires_api_key and api_keys %}
30
+ # MCP Server API Keys (REQUIRED - The MCP server requires authentication)
31
+ # Set one of the following environment variables:
32
+ {% for api_key_name in api_keys %}
33
+ {{ api_key_name }}=your-api-key-here
34
+ {% endfor %}
35
+ {% endif %}
36
+ {% if mcp_server_metadata %}
37
+ # Additional MCP configuration
38
+ {% for key, value in mcp_server_metadata.items() %}
39
+ {% if key.upper() == key %}
40
+ {{ key }}={{ value }}
41
+ {% endif %}
42
+ {% endfor %}
43
+ {% endif %}
44
+
45
+ # Logging
46
+ LOG_LEVEL=INFO
47
+
48
+ # MongoDB Configuration (if using registry)
49
+ # MONGODB_USER=your-mongodb-user
50
+ # MONGODB_PASSWORD=your-mongodb-password
51
+ # MONGODB_HOST=your-mongodb-host
52
+ # MONGODB_PORT=27017
53
+ # MONGODB_DATABASE=iatp
54
+
55
+ {% if auth_required %}
56
+ # Authentication Configuration
57
+ {% for scheme in auth_schemes %}
58
+ # Configure {{ scheme }} authentication
59
+ {% if scheme == "bearer" %}
60
+ # AUTH_TOKEN=your-bearer-token
61
+ {% elif scheme == "api_key" %}
62
+ # API_KEY=your-api-key
63
+ {% elif scheme == "oauth2" %}
64
+ # OAUTH2_CLIENT_ID=your-client-id
65
+ # OAUTH2_CLIENT_SECRET=your-client-secret
66
+ {% endif %}
67
+ {% endfor %}
68
+ {% endif %}
69
+
70
+ # D402 Payment Configuration (if enabled)
71
+ # UTILITY_AGENT_CONTRACT_ADDRESS= # Contract address (set by deployment)
72
+ # OPERATOR_ADDRESS= # Operator address for signing attestations
73
+ # OPERATOR_PRIVATE_KEY= # Operator private key (set by deployment)
74
+ # D402_ENABLED=false
75
+ # D402_PRICE_USD=0.01
76
+ # D402_TOKEN=USDC
77
+ # D402_TOKEN_ADDRESS=
78
+ # D402_NETWORK=sepolia
79
+ # D402_FACILITATOR_URL=http://localhost:8080
80
+
81
+ # Additional custom environment variables
82
+ {% for env_var in environment_variables %}
83
+ # {{ env_var.name }}={{ env_var.value }}
84
+ {% endfor %}
@@ -0,0 +1,78 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ env/
8
+ venv/
9
+ .venv/
10
+ *.egg-info/
11
+ dist/
12
+ build/
13
+ .pytest_cache/
14
+ .coverage
15
+ htmlcov/
16
+ .mypy_cache/
17
+ .ruff_cache/
18
+
19
+ # Virtual Environments
20
+ bin/
21
+ include/
22
+ lib/
23
+ lib64/
24
+ pyvenv.cfg
25
+
26
+ # IDE
27
+ .vscode/
28
+ .idea/
29
+ *.swp
30
+ *.swo
31
+ *~
32
+ .project
33
+ .pydevproject
34
+
35
+ # Environment Variables
36
+ .env
37
+ .env.*
38
+ !.env.example
39
+
40
+ # Logs
41
+ *.log
42
+ logs/
43
+
44
+ # OS
45
+ .DS_Store
46
+ Thumbs.db
47
+ desktop.ini
48
+
49
+ # UV Package Manager
50
+ .uv/
51
+ uv.lock
52
+
53
+ # Docker
54
+ *.pid
55
+ docker-compose.override.yml
56
+
57
+ # {{ agent_name }} specific
58
+ {% if additional_ignores %}
59
+ # Custom ignores
60
+ {% for ignore in additional_ignores %}
61
+ {{ ignore }}
62
+ {% endfor %}
63
+ {% endif %}
64
+
65
+ # Temporary files
66
+ *.tmp
67
+ *.temp
68
+ *.bak
69
+ .cache/
70
+
71
+ # Database
72
+ *.db
73
+ *.sqlite
74
+ *.sqlite3
75
+
76
+ # Generated files
77
+ generated/
78
+ output/
@@ -0,0 +1,218 @@
1
+ """
2
+ {{ agency_name }} - gRPC Server Implementation
3
+
4
+ This module provides a gRPC server for {{ agency_name }}.
5
+ This is an optional component that can run alongside the HTTP/2 server for high-performance scenarios.
6
+ """
7
+
8
+ import asyncio
9
+ import logging
10
+ import os
11
+ import json
12
+ from concurrent import futures
13
+ from typing import AsyncIterator, Optional
14
+
15
+ import grpc
16
+ from grpc import aio
17
+ from traia_iatp.mcp import MCPServerConfig
18
+ from .agent_executor import {{ class_name }}AgentExecutor
19
+
20
+ # Note: These would be generated from .proto files
21
+ # from . import a2a_pb2, a2a_pb2_grpc
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class {{ class_name }}GrpcService: # (a2a_pb2_grpc.A2AServiceServicer):
27
+ """gRPC service implementation for {{ agency_name }}."""
28
+
29
+ def __init__(self, mcp_config: MCPServerConfig):
30
+ self.mcp_config = mcp_config
31
+ self.executor = {{ class_name }}AgentExecutor(
32
+ mcp_config,
33
+ supports_streaming=True # gRPC always supports streaming
34
+ )
35
+
36
+ async def SendMessage(self, request, context):
37
+ """Handle unary RPC: single request, single response."""
38
+ try:
39
+ # Extract message content
40
+ message_content = request.message.content
41
+
42
+ # Process with executor
43
+ result = await self.executor.process_unary_request(message_content)
44
+
45
+ # Create response
46
+ # response = a2a_pb2.SendMessageResponse()
47
+ # response.result.content = result
48
+ # return response
49
+
50
+ # Placeholder return
51
+ return {"result": {"content": result}}
52
+
53
+ except Exception as e:
54
+ logger.error(f"Error in SendMessage: {e}")
55
+ await context.abort(
56
+ grpc.StatusCode.INTERNAL,
57
+ f"Internal error: {str(e)}"
58
+ )
59
+
60
+ async def StreamMessage(self, request, context):
61
+ """Handle server streaming RPC: single request, stream of responses."""
62
+ try:
63
+ # Extract message content
64
+ message_content = request.message.content
65
+
66
+ # Stream responses
67
+ async for chunk in self.executor.process_streaming_request(message_content):
68
+ # response = a2a_pb2.StreamMessageResponse()
69
+ # response.chunk.content = chunk
70
+ # yield response
71
+
72
+ # Placeholder yield
73
+ yield {"chunk": {"content": chunk}}
74
+
75
+ except Exception as e:
76
+ logger.error(f"Error in StreamMessage: {e}")
77
+ await context.abort(
78
+ grpc.StatusCode.INTERNAL,
79
+ f"Streaming error: {str(e)}"
80
+ )
81
+
82
+ async def BidirectionalStream(self, request_iterator, context):
83
+ """Handle bidirectional streaming RPC."""
84
+ try:
85
+ async for request in request_iterator:
86
+ # Process each incoming request
87
+ message_content = request.message.content
88
+
89
+ # Stream responses for this request
90
+ async for chunk in self.executor.process_streaming_request(message_content):
91
+ # response = a2a_pb2.StreamMessageResponse()
92
+ # response.chunk.content = chunk
93
+ # yield response
94
+
95
+ # Placeholder yield
96
+ yield {"chunk": {"content": chunk}}
97
+
98
+ except Exception as e:
99
+ logger.error(f"Error in BidirectionalStream: {e}")
100
+ await context.abort(
101
+ grpc.StatusCode.INTERNAL,
102
+ f"Bidirectional streaming error: {str(e)}"
103
+ )
104
+
105
+ async def GetCapabilities(self, request, context):
106
+ """Return agent capabilities."""
107
+ try:
108
+ # capabilities = a2a_pb2.CapabilitiesResponse()
109
+ # capabilities.name = "{{ agency_id }}"
110
+ # capabilities.description = "{{ agency_description }}"
111
+ # capabilities.version = "{{ agency_version }}"
112
+ # capabilities.supports_streaming = True
113
+ # capabilities.supports_bidirectional = True
114
+ # return capabilities
115
+
116
+ # Placeholder return
117
+ return {
118
+ "name": "{{ agency_id }}",
119
+ "description": "{{ agency_description }}",
120
+ "version": "{{ agency_version }}",
121
+ "supports_streaming": True,
122
+ "supports_bidirectional": True,
123
+ "mcp_capabilities": self.mcp_config.capabilities
124
+ }
125
+
126
+ except Exception as e:
127
+ logger.error(f"Error in GetCapabilities: {e}")
128
+ await context.abort(
129
+ grpc.StatusCode.INTERNAL,
130
+ f"Error getting capabilities: {str(e)}"
131
+ )
132
+
133
+
134
+ async def serve_grpc():
135
+ """Start the gRPC server."""
136
+ # Load configuration
137
+ config_path = "agency_config.json"
138
+ if os.path.exists(config_path):
139
+ with open(config_path, "r") as f:
140
+ config_data = json.load(f)
141
+ mcp_data = config_data.get("mcp_server", {})
142
+ else:
143
+ mcp_data = {
144
+ "name": "{{ mcp_server_name }}",
145
+ "url": "{{ mcp_server_url }}",
146
+ "description": "{{ mcp_server_description }}",
147
+ "server_type": "{{ mcp_server_type }}",
148
+ "capabilities": {{ mcp_server_capabilities | tojson }},
149
+ "metadata": {{ mcp_server_metadata | tojson }}
150
+ }
151
+
152
+ # Create MCP config
153
+ mcp_config = MCPServerConfig(
154
+ name=mcp_data["name"],
155
+ url=mcp_data["url"],
156
+ description=mcp_data["description"],
157
+ server_type=mcp_data.get("server_type", "streamable-http"),
158
+ capabilities=mcp_data.get("capabilities", []),
159
+ metadata=mcp_data.get("metadata", {})
160
+ )
161
+
162
+ # Create gRPC server
163
+ server = aio.server(
164
+ futures.ThreadPoolExecutor(max_workers=10),
165
+ options=[
166
+ ('grpc.max_send_message_length', 4 * 1024 * 1024), # 4MB
167
+ ('grpc.max_receive_message_length', 4 * 1024 * 1024), # 4MB
168
+ ('grpc.keepalive_time_ms', 10000),
169
+ ('grpc.keepalive_timeout_ms', 5000),
170
+ ('grpc.http2.max_concurrent_streams', 100),
171
+ ('grpc.http2.max_frame_size', 16384),
172
+ ]
173
+ )
174
+
175
+ # Add service
176
+ service = {{ class_name }}GrpcService(mcp_config)
177
+ # a2a_pb2_grpc.add_A2AServiceServicer_to_server(service, server)
178
+
179
+ # Enable reflection for debugging
180
+ # from grpc_reflection.v1alpha import reflection
181
+ # SERVICE_NAMES = (
182
+ # a2a_pb2.DESCRIPTOR.services_by_name['A2AService'].full_name,
183
+ # reflection.SERVICE_NAME,
184
+ # )
185
+ # reflection.enable_server_reflection(SERVICE_NAMES, server)
186
+
187
+ # Get port from environment
188
+ port = int(os.environ.get("GRPC_PORT", 50051))
189
+
190
+ # Add insecure port (TLS can be added later)
191
+ server.add_insecure_port(f'[::]:{port}')
192
+
193
+ logger.info(f"Starting {{ agency_name }} gRPC Server")
194
+ logger.info(f"Listening on port {port}")
195
+ logger.info(f"MCP Server: {{ mcp_server_name }}")
196
+
197
+ # Start server
198
+ await server.start()
199
+
200
+ try:
201
+ await server.wait_for_termination()
202
+ except KeyboardInterrupt:
203
+ logger.info("Shutting down gRPC server...")
204
+ await server.stop(5)
205
+
206
+
207
+ def main():
208
+ """Main entry point for gRPC server."""
209
+ logging.basicConfig(
210
+ level=logging.INFO,
211
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
212
+ )
213
+
214
+ asyncio.run(serve_grpc())
215
+
216
+
217
+ if __name__ == "__main__":
218
+ main()