codetether 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. a2a_server/__init__.py +29 -0
  2. a2a_server/a2a_agent_card.py +365 -0
  3. a2a_server/a2a_errors.py +1133 -0
  4. a2a_server/a2a_executor.py +926 -0
  5. a2a_server/a2a_router.py +1033 -0
  6. a2a_server/a2a_types.py +344 -0
  7. a2a_server/agent_card.py +408 -0
  8. a2a_server/agents_server.py +271 -0
  9. a2a_server/auth_api.py +349 -0
  10. a2a_server/billing_api.py +638 -0
  11. a2a_server/billing_service.py +712 -0
  12. a2a_server/billing_webhooks.py +501 -0
  13. a2a_server/config.py +96 -0
  14. a2a_server/database.py +2165 -0
  15. a2a_server/email_inbound.py +398 -0
  16. a2a_server/email_notifications.py +486 -0
  17. a2a_server/enhanced_agents.py +919 -0
  18. a2a_server/enhanced_server.py +160 -0
  19. a2a_server/hosted_worker.py +1049 -0
  20. a2a_server/integrated_agents_server.py +347 -0
  21. a2a_server/keycloak_auth.py +750 -0
  22. a2a_server/livekit_bridge.py +439 -0
  23. a2a_server/marketing_tools.py +1364 -0
  24. a2a_server/mcp_client.py +196 -0
  25. a2a_server/mcp_http_server.py +2256 -0
  26. a2a_server/mcp_server.py +191 -0
  27. a2a_server/message_broker.py +725 -0
  28. a2a_server/mock_mcp.py +273 -0
  29. a2a_server/models.py +494 -0
  30. a2a_server/monitor_api.py +5904 -0
  31. a2a_server/opencode_bridge.py +1594 -0
  32. a2a_server/redis_task_manager.py +518 -0
  33. a2a_server/server.py +726 -0
  34. a2a_server/task_manager.py +668 -0
  35. a2a_server/task_queue.py +742 -0
  36. a2a_server/tenant_api.py +333 -0
  37. a2a_server/tenant_middleware.py +219 -0
  38. a2a_server/tenant_service.py +760 -0
  39. a2a_server/user_auth.py +721 -0
  40. a2a_server/vault_client.py +576 -0
  41. a2a_server/worker_sse.py +873 -0
  42. agent_worker/__init__.py +8 -0
  43. agent_worker/worker.py +4877 -0
  44. codetether/__init__.py +10 -0
  45. codetether/__main__.py +4 -0
  46. codetether/cli.py +112 -0
  47. codetether/worker_cli.py +57 -0
  48. codetether-1.2.2.dist-info/METADATA +570 -0
  49. codetether-1.2.2.dist-info/RECORD +66 -0
  50. codetether-1.2.2.dist-info/WHEEL +5 -0
  51. codetether-1.2.2.dist-info/entry_points.txt +4 -0
  52. codetether-1.2.2.dist-info/licenses/LICENSE +202 -0
  53. codetether-1.2.2.dist-info/top_level.txt +5 -0
  54. codetether_voice_agent/__init__.py +6 -0
  55. codetether_voice_agent/agent.py +445 -0
  56. codetether_voice_agent/codetether_mcp.py +345 -0
  57. codetether_voice_agent/config.py +16 -0
  58. codetether_voice_agent/functiongemma_caller.py +380 -0
  59. codetether_voice_agent/session_playback.py +247 -0
  60. codetether_voice_agent/tools/__init__.py +21 -0
  61. codetether_voice_agent/tools/definitions.py +135 -0
  62. codetether_voice_agent/tools/handlers.py +380 -0
  63. run_server.py +314 -0
  64. ui/monitor-tailwind.html +1790 -0
  65. ui/monitor.html +1775 -0
  66. ui/monitor.js +2662 -0
@@ -0,0 +1,2256 @@
1
+ """
2
+ HTTP/SSE-based MCP Server for external agent connections.
3
+
4
+ This allows external agents to connect to the MCP server over HTTP
5
+ instead of stdio, enabling distributed agent synchronization.
6
+
7
+ Integrates with the A2A server to expose actual agent capabilities as MCP tools.
8
+ """
9
+
10
+ import asyncio
11
+ import json
12
+ import logging
13
+ import uuid
14
+ from typing import Any, Dict, List, Optional
15
+ from datetime import datetime
16
+
17
+ from fastapi import FastAPI, HTTPException, Request
18
+ from fastapi.responses import StreamingResponse, JSONResponse
19
+ from pydantic import BaseModel
20
+ import uvicorn
21
+
22
+ from .models import Message, Part
23
+ from .monitor_api import (
24
+ monitor_router,
25
+ nextauth_router,
26
+ opencode_router,
27
+ voice_router,
28
+ log_agent_message,
29
+ get_opencode_bridge,
30
+ )
31
+ from .worker_sse import (
32
+ worker_sse_router,
33
+ get_worker_registry,
34
+ notify_workers_of_new_task,
35
+ setup_task_creation_hook,
36
+ )
37
+
38
+ # Import marketing tools
39
+ try:
40
+ from .marketing_tools import (
41
+ get_marketing_tools,
42
+ MARKETING_TOOL_HANDLERS,
43
+ )
44
+
45
+ MARKETING_TOOLS_AVAILABLE = True
46
+ except ImportError:
47
+ MARKETING_TOOLS_AVAILABLE = False
48
+ get_marketing_tools = lambda: []
49
+ MARKETING_TOOL_HANDLERS = {}
50
+
51
+ # Import user authentication router for self-service signups
52
+ try:
53
+ from .user_auth import router as user_auth_router
54
+
55
+ USER_AUTH_AVAILABLE = True
56
+ except ImportError:
57
+ USER_AUTH_AVAILABLE = False
58
+ user_auth_router = None
59
+
60
+ # Import queue status API router
61
+ try:
62
+ from .queue_api import router as queue_api_router
63
+
64
+ QUEUE_API_AVAILABLE = True
65
+ except ImportError:
66
+ QUEUE_API_AVAILABLE = False
67
+ queue_api_router = None
68
+
69
+ # Import task queue for hosted workers
70
+ try:
71
+ from .task_queue import enqueue_task, get_task_queue
72
+
73
+ TASK_QUEUE_AVAILABLE = True
74
+ except ImportError:
75
+ TASK_QUEUE_AVAILABLE = False
76
+ enqueue_task = None
77
+ get_task_queue = None
78
+
79
+ # Import billing webhook router for Stripe
80
+ try:
81
+ from .billing_webhooks import billing_webhook_router
82
+
83
+ BILLING_WEBHOOKS_AVAILABLE = True
84
+ except ImportError:
85
+ BILLING_WEBHOOKS_AVAILABLE = False
86
+ billing_webhook_router = None
87
+
88
+ logger = logging.getLogger(__name__)
89
+
90
+
91
+ class MCPRequest(BaseModel):
92
+ """MCP JSON-RPC request."""
93
+
94
+ jsonrpc: str = '2.0'
95
+ id: Optional[int] = None
96
+ method: str
97
+ params: Optional[Dict[str, Any]] = None
98
+
99
+
100
+ class MCPResponse(BaseModel):
101
+ """MCP JSON-RPC response."""
102
+
103
+ jsonrpc: str = '2.0'
104
+ id: Optional[int] = None
105
+ result: Optional[Any] = None
106
+ error: Optional[Dict[str, Any]] = None
107
+
108
+
109
+ class MCPHTTPServer:
110
+ """HTTP-based MCP server that exposes A2A agent capabilities as MCP tools."""
111
+
112
+ def __init__(
113
+ self, host: str = '0.0.0.0', port: int = 9000, a2a_server=None
114
+ ):
115
+ self.host = host
116
+ self.port = port
117
+ self.a2a_server = a2a_server # Reference to A2A server
118
+ self.app = FastAPI(title='MCP HTTP Server', version='1.0.0')
119
+
120
+ # Include the monitor router for UI and monitoring endpoints
121
+ self.app.include_router(monitor_router)
122
+
123
+ # Include OpenCode router for worker task management API
124
+ self.app.include_router(opencode_router)
125
+
126
+ # Include voice router for voice session management
127
+ self.app.include_router(voice_router)
128
+
129
+ # Include NextAuth compatibility routes for Cypress
130
+ self.app.include_router(nextauth_router)
131
+
132
+ # Include Worker SSE router for push-based task distribution
133
+ self.app.include_router(worker_sse_router)
134
+
135
+ # Include User Auth router for self-service registration (mid-market)
136
+ if USER_AUTH_AVAILABLE and user_auth_router:
137
+ self.app.include_router(user_auth_router)
138
+
139
+ # Include Queue API router for operational visibility
140
+ if QUEUE_API_AVAILABLE and queue_api_router:
141
+ self.app.include_router(queue_api_router)
142
+
143
+ # Include Billing Webhooks router for Stripe
144
+ if BILLING_WEBHOOKS_AVAILABLE and billing_webhook_router:
145
+ self.app.include_router(billing_webhook_router)
146
+
147
+ self._setup_routes()
148
+
149
+ def _get_tools_from_a2a_server(self) -> List[Dict[str, Any]]:
150
+ """Extract MCP tools from A2A server capabilities."""
151
+ if not self.a2a_server:
152
+ return self._get_fallback_tools()
153
+
154
+ tools = [
155
+ # Core A2A operations exposed as MCP tools
156
+ {
157
+ 'name': 'send_message',
158
+ 'description': 'Send a message to the A2A agent for processing and receive a response. Returns: success, response text, conversation_id (for threading follow-up messages), and timestamp. Use conversation_id in subsequent calls to maintain context.',
159
+ 'inputSchema': {
160
+ 'type': 'object',
161
+ 'properties': {
162
+ 'message': {
163
+ 'type': 'string',
164
+ 'description': 'The message to send to the agent',
165
+ },
166
+ 'conversation_id': {
167
+ 'type': 'string',
168
+ 'description': 'Optional conversation ID for message threading',
169
+ },
170
+ },
171
+ 'required': ['message'],
172
+ },
173
+ },
174
+ {
175
+ 'name': 'send_message_async',
176
+ 'description': 'Send a message asynchronously by creating a task that workers will pick up. Unlike send_message (synchronous), this immediately returns a task_id and run_id, allowing you to poll for results later. Use for long-running operations or when you want fire-and-forget semantics. Returns: task_id, run_id, status, conversation_id.',
177
+ 'inputSchema': {
178
+ 'type': 'object',
179
+ 'properties': {
180
+ 'message': {
181
+ 'type': 'string',
182
+ 'description': 'The message/prompt for the agent to process',
183
+ },
184
+ 'conversation_id': {
185
+ 'type': 'string',
186
+ 'description': 'Optional conversation ID for message threading',
187
+ },
188
+ 'codebase_id': {
189
+ 'type': 'string',
190
+ 'description': 'Target codebase ID (default: global)',
191
+ },
192
+ 'priority': {
193
+ 'type': 'integer',
194
+ 'description': 'Priority level (higher = more urgent, default: 0)',
195
+ },
196
+ 'notify_email': {
197
+ 'type': 'string',
198
+ 'description': 'Email to notify when task completes',
199
+ },
200
+ },
201
+ 'required': ['message'],
202
+ },
203
+ },
204
+ {
205
+ 'name': 'send_to_agent',
206
+ 'description': 'Send a message to a specific named agent. The task will be queued until that agent is available to claim it. If the agent is offline, the task queues indefinitely (unless deadline_seconds is set). Use discover_agents to find available agent names. Returns: task_id, run_id, target_agent_name, status.',
207
+ 'inputSchema': {
208
+ 'type': 'object',
209
+ 'properties': {
210
+ 'agent_name': {
211
+ 'type': 'string',
212
+ 'description': 'Name of the target agent (must match agent_name used during worker registration)',
213
+ },
214
+ 'message': {
215
+ 'type': 'string',
216
+ 'description': 'The message/prompt for the agent to process',
217
+ },
218
+ 'conversation_id': {
219
+ 'type': 'string',
220
+ 'description': 'Optional conversation ID for message threading',
221
+ },
222
+ 'codebase_id': {
223
+ 'type': 'string',
224
+ 'description': 'Target codebase ID (default: global)',
225
+ },
226
+ 'priority': {
227
+ 'type': 'integer',
228
+ 'description': 'Priority level (higher = more urgent, default: 0)',
229
+ },
230
+ 'deadline_seconds': {
231
+ 'type': 'integer',
232
+ 'description': 'Optional: fail if not claimed within this many seconds. If not set, task queues indefinitely.',
233
+ },
234
+ 'notify_email': {
235
+ 'type': 'string',
236
+ 'description': 'Email to notify when task completes',
237
+ },
238
+ },
239
+ 'required': ['agent_name', 'message'],
240
+ },
241
+ },
242
+ {
243
+ 'name': 'create_task',
244
+ 'description': "Create a new task in the task queue. Tasks start with 'pending' status and can be picked up by worker agents. Returns: task_id (use with get_task/cancel_task), title, description, status, and created_at timestamp. Task lifecycle: pending → working → completed/failed/cancelled.",
245
+ 'inputSchema': {
246
+ 'type': 'object',
247
+ 'properties': {
248
+ 'title': {
249
+ 'type': 'string',
250
+ 'description': 'Task title',
251
+ },
252
+ 'description': {
253
+ 'type': 'string',
254
+ 'description': 'Detailed task description',
255
+ },
256
+ 'codebase_id': {
257
+ 'type': 'string',
258
+ 'description': 'Target codebase ID (default: global)',
259
+ },
260
+ 'agent_type': {
261
+ 'type': 'string',
262
+ 'enum': ['build', 'plan', 'general', 'explore'],
263
+ 'description': 'Agent type (default: build)',
264
+ },
265
+ 'model': {
266
+ 'type': 'string',
267
+ 'enum': [
268
+ 'default',
269
+ 'claude-sonnet',
270
+ 'claude-sonnet-4',
271
+ 'sonnet',
272
+ 'claude-opus',
273
+ 'opus',
274
+ 'claude-haiku',
275
+ 'haiku',
276
+ 'minimax',
277
+ 'minimax-m2',
278
+ 'minimax-m2.1',
279
+ 'm2.1',
280
+ 'gpt-4',
281
+ 'gpt-4o',
282
+ 'gpt-4-turbo',
283
+ 'gpt-4.1',
284
+ 'o1',
285
+ 'o1-mini',
286
+ 'o3',
287
+ 'o3-mini',
288
+ 'gemini',
289
+ 'gemini-pro',
290
+ 'gemini-2.5-pro',
291
+ 'gemini-flash',
292
+ 'gemini-2.5-flash',
293
+ 'grok',
294
+ 'grok-3',
295
+ ],
296
+ 'description': 'Model to use for this task. Use friendly names like "minimax", "claude-sonnet", "gemini" - they are automatically mapped to the correct provider/model-id format.',
297
+ },
298
+ 'priority': {
299
+ 'type': 'integer',
300
+ 'description': 'Priority level (higher = more urgent, default: 0)',
301
+ },
302
+ },
303
+ 'required': ['title'],
304
+ },
305
+ },
306
+ {
307
+ 'name': 'get_task',
308
+ 'description': 'Get the current status and details of a specific task by its ID. Returns: task_id, title, description, status (pending/working/completed/failed/cancelled), created_at, and updated_at timestamps.',
309
+ 'inputSchema': {
310
+ 'type': 'object',
311
+ 'properties': {
312
+ 'task_id': {
313
+ 'type': 'string',
314
+ 'description': 'The ID of the task to retrieve',
315
+ }
316
+ },
317
+ 'required': ['task_id'],
318
+ },
319
+ },
320
+ {
321
+ 'name': 'list_tasks',
322
+ 'description': 'List all tasks in the queue, optionally filtered by status. Returns an array of tasks with their IDs, titles, statuses, and timestamps. Use to monitor queue state or find pending tasks to work on.',
323
+ 'inputSchema': {
324
+ 'type': 'object',
325
+ 'properties': {
326
+ 'status': {
327
+ 'type': 'string',
328
+ 'enum': [
329
+ 'pending',
330
+ 'working',
331
+ 'completed',
332
+ 'failed',
333
+ 'cancelled',
334
+ ],
335
+ 'description': 'Filter tasks by status',
336
+ },
337
+ 'codebase_id': {
338
+ 'type': 'string',
339
+ 'description': 'Filter by codebase ID',
340
+ },
341
+ },
342
+ },
343
+ },
344
+ {
345
+ 'name': 'cancel_task',
346
+ 'description': "Cancel a task by its ID. Only pending or working tasks can be cancelled. Returns the updated task with status set to 'cancelled'.",
347
+ 'inputSchema': {
348
+ 'type': 'object',
349
+ 'properties': {
350
+ 'task_id': {
351
+ 'type': 'string',
352
+ 'description': 'The ID of the task to cancel',
353
+ }
354
+ },
355
+ 'required': ['task_id'],
356
+ },
357
+ },
358
+ {
359
+ 'name': 'discover_agents',
360
+ 'description': 'List all registered worker agents in the network. Agents must call register_agent to appear here. Returns an array of agents with their name, description, and URL. Use to find available agents before delegating work.',
361
+ 'inputSchema': {'type': 'object', 'properties': {}},
362
+ },
363
+ {
364
+ 'name': 'get_agent',
365
+ 'description': 'Get detailed information about a specific registered agent by name. Returns: name, description, URL, and capabilities (streaming, push_notifications). Use after discover_agents to get full details.',
366
+ 'inputSchema': {
367
+ 'type': 'object',
368
+ 'properties': {
369
+ 'agent_name': {
370
+ 'type': 'string',
371
+ 'description': 'Name of the agent to retrieve',
372
+ }
373
+ },
374
+ 'required': ['agent_name'],
375
+ },
376
+ },
377
+ {
378
+ 'name': 'register_agent',
379
+ 'description': "Register this agent as a worker in the network so it can be discovered by other agents and receive tasks. Call once on startup. Requires: name (unique identifier), description, url (agent's endpoint). Optional: capabilities object. After registering, the agent will appear in discover_agents results.",
380
+ 'inputSchema': {
381
+ 'type': 'object',
382
+ 'properties': {
383
+ 'name': {
384
+ 'type': 'string',
385
+ 'description': 'Unique name/identifier for this agent',
386
+ },
387
+ 'description': {
388
+ 'type': 'string',
389
+ 'description': 'Human-readable description of what this agent does',
390
+ },
391
+ 'url': {
392
+ 'type': 'string',
393
+ 'description': 'Base URL where this agent can be reached',
394
+ },
395
+ 'capabilities': {
396
+ 'type': 'object',
397
+ 'description': 'Optional capabilities: {streaming: boolean, push_notifications: boolean}',
398
+ 'properties': {
399
+ 'streaming': {'type': 'boolean'},
400
+ 'push_notifications': {'type': 'boolean'},
401
+ },
402
+ },
403
+ },
404
+ 'required': ['name', 'description', 'url'],
405
+ },
406
+ },
407
+ {
408
+ 'name': 'get_agent_card',
409
+ 'description': "Get this server's agent card containing its identity, capabilities, and skills. Returns: name, description, URL, provider info, capabilities, and list of skills. Useful for understanding what this agent can do.",
410
+ 'inputSchema': {'type': 'object', 'properties': {}},
411
+ },
412
+ {
413
+ 'name': 'refresh_agent_heartbeat',
414
+ 'description': 'Refresh the last_seen timestamp for a registered agent. Call periodically (every 30-60s) to keep the agent visible in discovery. Agents not seen within 120s are filtered from discover_agents results.',
415
+ 'inputSchema': {
416
+ 'type': 'object',
417
+ 'properties': {
418
+ 'agent_name': {
419
+ 'type': 'string',
420
+ 'description': 'Name of the agent to refresh (must match name used in register_agent)',
421
+ },
422
+ },
423
+ 'required': ['agent_name'],
424
+ },
425
+ },
426
+ {
427
+ 'name': 'get_messages',
428
+ 'description': 'Retrieve conversation history from the monitoring system. Filter by conversation_id to get a specific thread. Returns messages with: id, timestamp, type (human/agent), agent_name, content, and metadata. Use to review past interactions.',
429
+ 'inputSchema': {
430
+ 'type': 'object',
431
+ 'properties': {
432
+ 'conversation_id': {
433
+ 'type': 'string',
434
+ 'description': 'Filter messages by conversation ID',
435
+ },
436
+ 'limit': {
437
+ 'type': 'number',
438
+ 'description': 'Maximum number of messages to retrieve (default: 50)',
439
+ },
440
+ },
441
+ },
442
+ },
443
+ {
444
+ 'name': 'get_task_updates',
445
+ 'description': 'Poll for recent task status changes. Filter by since_timestamp (ISO format) to get only new updates, or by specific task_ids. Returns tasks sorted by updated_at descending. Use for monitoring task progress without repeatedly calling get_task.',
446
+ 'inputSchema': {
447
+ 'type': 'object',
448
+ 'properties': {
449
+ 'since_timestamp': {
450
+ 'type': 'string',
451
+ 'description': 'ISO timestamp to get updates since (optional)',
452
+ },
453
+ 'task_ids': {
454
+ 'type': 'array',
455
+ 'items': {'type': 'string'},
456
+ 'description': 'Specific task IDs to check (optional)',
457
+ },
458
+ },
459
+ },
460
+ },
461
+ {
462
+ 'name': 'search_tools',
463
+ 'description': "Search for available tools by keyword or category. Use this FIRST to discover what tools are available without loading all definitions. Returns tool names and brief descriptions matching your query. Categories: 'messaging' (send_message, get_messages), 'tasks' (create_task, get_task, list_tasks, cancel_task, get_task_updates), 'agents' (discover_agents, get_agent, register_agent, get_agent_card). Example: search_tools({query: 'task'}) returns all task-related tools.",
464
+ 'inputSchema': {
465
+ 'type': 'object',
466
+ 'properties': {
467
+ 'query': {
468
+ 'type': 'string',
469
+ 'description': "Search keyword (e.g., 'task', 'agent', 'message') or category name",
470
+ },
471
+ 'detail_level': {
472
+ 'type': 'string',
473
+ 'enum': ['name_only', 'summary', 'full'],
474
+ 'description': "Level of detail: 'name_only' (just names), 'summary' (name + description), 'full' (complete schema). Default: summary",
475
+ },
476
+ },
477
+ 'required': ['query'],
478
+ },
479
+ },
480
+ {
481
+ 'name': 'get_tool_schema',
482
+ 'description': "Get the complete schema for a specific tool by name. Use after search_tools to get full parameter details for a tool you want to use. Returns the tool's inputSchema with all properties, types, and requirements.",
483
+ 'inputSchema': {
484
+ 'type': 'object',
485
+ 'properties': {
486
+ 'tool_name': {
487
+ 'type': 'string',
488
+ 'description': "Exact name of the tool (e.g., 'create_task', 'send_message')",
489
+ }
490
+ },
491
+ 'required': ['tool_name'],
492
+ },
493
+ },
494
+ ]
495
+
496
+ # Add marketing tools if available
497
+ if MARKETING_TOOLS_AVAILABLE:
498
+ tools.extend(get_marketing_tools())
499
+ logger.info(f'Added {len(get_marketing_tools())} marketing tools')
500
+
501
+ return tools
502
+
503
+ def _get_fallback_tools(self) -> List[Dict[str, Any]]:
504
+ """Fallback tools when no A2A server is available."""
505
+ fallback = [
506
+ {
507
+ 'name': 'echo',
508
+ 'description': 'Echo back a message',
509
+ 'inputSchema': {
510
+ 'type': 'object',
511
+ 'properties': {
512
+ 'message': {
513
+ 'type': 'string',
514
+ 'description': 'Message to echo',
515
+ }
516
+ },
517
+ 'required': ['message'],
518
+ },
519
+ }
520
+ ]
521
+
522
+ # Still include marketing tools even without A2A server
523
+ if MARKETING_TOOLS_AVAILABLE:
524
+ fallback.extend(get_marketing_tools())
525
+
526
+ return fallback
527
+
528
+ def _setup_routes(self):
529
+ """Set up HTTP routes for MCP."""
530
+
531
+ @self.app.get('/')
532
+ async def root():
533
+ """Health check endpoint."""
534
+ return {
535
+ 'status': 'ok',
536
+ 'server': 'MCP HTTP Server',
537
+ 'version': '1.0.0',
538
+ 'endpoints': {
539
+ 'rpc': '/mcp/v1/rpc',
540
+ 'sse': '/mcp/v1/sse',
541
+ 'message': '/mcp/v1/message',
542
+ 'tools': '/mcp/v1/tools',
543
+ 'health': '/',
544
+ },
545
+ }
546
+
547
+ @self.app.get('/mcp/v1/sse')
548
+ async def handle_sse(request: Request):
549
+ """Handle SSE connections for MCP."""
550
+
551
+ async def event_generator():
552
+ """Generate SSE events."""
553
+ try:
554
+ # Send initial connection event with the message endpoint URL
555
+ # The MCP SDK SSEClientTransport expects the endpoint event data
556
+ # to be a URL path that it will POST messages to
557
+ yield f'event: endpoint\ndata: /mcp/v1/message\n\n'
558
+
559
+ # Keep connection alive
560
+ while True:
561
+ # Send periodic ping to keep connection alive
562
+ await asyncio.sleep(30)
563
+ yield f'event: ping\ndata: {json.dumps({"timestamp": datetime.now().isoformat()})}\n\n'
564
+
565
+ except asyncio.CancelledError:
566
+ logger.info('SSE connection closed')
567
+ raise
568
+ except Exception as e:
569
+ logger.error(f'Error in SSE stream: {e}')
570
+ raise
571
+
572
+ return StreamingResponse(
573
+ event_generator(),
574
+ media_type='text/event-stream',
575
+ headers={
576
+ 'Cache-Control': 'no-cache',
577
+ 'Connection': 'keep-alive',
578
+ 'X-Accel-Buffering': 'no',
579
+ },
580
+ )
581
+
582
+ @self.app.get('/mcp')
583
+ async def mcp_root(request: Request):
584
+ """MCP SSE endpoint root - redirects to SSE endpoint."""
585
+ # Check if client accepts SSE
586
+ accept = request.headers.get('accept', '')
587
+ if 'text/event-stream' in accept:
588
+ # Forward to SSE handler
589
+ return await handle_sse(request)
590
+ else:
591
+ # Return info about available endpoints
592
+ return {
593
+ 'jsonrpc': '2.0',
594
+ 'protocol': 'mcp',
595
+ 'version': '1.0.0',
596
+ 'capabilities': {'tools': True, 'sse': True},
597
+ 'endpoints': {
598
+ 'sse': '/mcp/v1/sse',
599
+ 'message': '/mcp/v1/message',
600
+ 'rpc': '/mcp/v1/rpc',
601
+ 'tools': '/mcp/v1/tools',
602
+ },
603
+ }
604
+
605
+ @self.app.post('/mcp')
606
+ async def mcp_post(request: Request):
607
+ """Handle POST messages to /mcp for SSE transport."""
608
+ # Forward to the RPC handler
609
+ return await handle_rpc(request)
610
+
611
+ @self.app.get('/mcp/v1/tools')
612
+ async def list_tools():
613
+ """List available MCP tools - exposes A2A agents as tools."""
614
+ # Get tools dynamically from connected A2A server
615
+ tools = self._get_tools_from_a2a_server()
616
+ return {'tools': tools}
617
+
618
+ @self.app.post('/mcp/v1/message')
619
+ async def handle_message(request: Request):
620
+ """Handle POST messages from SSE clients."""
621
+ # This handles the same requests as RPC but for SSE clients
622
+ return await handle_rpc(request)
623
+
624
+ @self.app.post('/mcp/v1/rpc')
625
+ async def handle_rpc(request: Request):
626
+ """Handle MCP JSON-RPC requests with Streamable HTTP support.
627
+
628
+ - Accepts JSON-RPC notifications (no id) -> returns 202 Accepted
629
+ - Accepts JSON-RPC requests -> returns JSON or SSE stream depending on Accept header
630
+ - Basic protocol version header and Origin validation
631
+ """
632
+ try:
633
+ # Validate MCP Protocol header if present
634
+ protocol_version = request.headers.get('mcp-protocol-version')
635
+ if protocol_version and protocol_version not in (
636
+ '2025-06-18',
637
+ '2024-11-05',
638
+ '2025-03-26',
639
+ ):
640
+ return JSONResponse(
641
+ {'error': 'Unsupported MCP-Protocol-Version'},
642
+ status_code=400,
643
+ )
644
+
645
+ # Basic Origin check - allow localhost by default
646
+ origin = request.headers.get('origin')
647
+ if origin and origin not in (
648
+ 'http://localhost',
649
+ 'http://127.0.0.1',
650
+ ):
651
+ logger.warning(
652
+ f'Received request from unusual Origin: {origin}'
653
+ )
654
+
655
+ # GET requests: return SSE stream (legacy behavior / convenience)
656
+ if request.method == 'GET':
657
+ return await handle_sse(request)
658
+
659
+ # Read request body and parse JSON-RPC payload
660
+ body_bytes = await request.body()
661
+ if not body_bytes:
662
+ raise HTTPException(
663
+ status_code=400,
664
+ detail='Empty request body - expected JSON-RPC payload',
665
+ )
666
+
667
+ try:
668
+ payload = json.loads(body_bytes.decode('utf-8'))
669
+ except Exception:
670
+ raise HTTPException(
671
+ status_code=400, detail='Invalid JSON in request body'
672
+ )
673
+
674
+ # Detect if client mistakenly sent JSON in URL path
675
+ if (
676
+ '%7B' in request.url.path
677
+ or '{"jsonrpc"' in request.url.path
678
+ ):
679
+ return JSONResponse(
680
+ {
681
+ 'error': 'Invalid request: JSON appears to be URL-encoded in the path. Use POST with JSON body.'
682
+ },
683
+ status_code=400,
684
+ )
685
+
686
+ method = payload.get('method')
687
+ request_id = payload.get('id')
688
+ params = payload.get('params', {}) or {}
689
+
690
+ # Notification (no id): process in background and return 202 Accepted
691
+ if request_id is None:
692
+ asyncio.create_task(
693
+ self._call_tool_from_payload(method, params)
694
+ )
695
+ return JSONResponse(status_code=202, content=None)
696
+
697
+ # Handle some predefined MCP methods
698
+ if method == 'initialize':
699
+ # Return initialization result and prefer new protocol version
700
+ init_result = {
701
+ 'protocolVersion': '2025-06-18',
702
+ 'capabilities': {'tools': {}},
703
+ 'serverInfo': {
704
+ 'name': 'a2a-server',
705
+ 'version': '0.1.0',
706
+ },
707
+ }
708
+ return JSONResponse(
709
+ content={
710
+ 'jsonrpc': '2.0',
711
+ 'id': request_id,
712
+ 'result': init_result,
713
+ }
714
+ )
715
+
716
+ if method == 'tools/list':
717
+ tools_response = await list_tools()
718
+ return JSONResponse(
719
+ content={
720
+ 'jsonrpc': '2.0',
721
+ 'id': request_id,
722
+ 'result': tools_response,
723
+ }
724
+ )
725
+
726
+ # Build JSON-RPC response helper
727
+ # Per JSON-RPC 2.0 spec: response MUST have either result OR error, not both
728
+ def make_response(id_val, result=None, error=None):
729
+ response = {
730
+ 'jsonrpc': '2.0',
731
+ 'id': id_val,
732
+ }
733
+ if error is not None:
734
+ response['error'] = error
735
+ else:
736
+ response['result'] = result
737
+ return response
738
+
739
+ # Per MCP spec: server can choose to respond with JSON or SSE
740
+ # We prefer JSON for simple request/response (more compatible)
741
+ # SSE is only used when client explicitly requests it via query param
742
+ # or when streaming long-running operations
743
+ use_sse = (
744
+ request.query_params.get('stream', '').lower() == 'true'
745
+ )
746
+
747
+ if use_sse:
748
+
749
+ async def event_generator():
750
+ # Open stream for this request
751
+ yield f'data: {json.dumps({"type": "connected", "id": request_id})}\n\n'
752
+ try:
753
+ if method == 'tools/call':
754
+ result = await self._call_tool(
755
+ params.get('name'),
756
+ params.get('arguments', {}),
757
+ )
758
+ else:
759
+ response = make_response(
760
+ request_id,
761
+ error={
762
+ 'code': -32601,
763
+ 'message': f'Method not found: {method}',
764
+ },
765
+ )
766
+ yield f'data: {json.dumps(response)}\n\n'
767
+ return
768
+ response = make_response(request_id, result=result)
769
+ yield f'data: {json.dumps(response)}\n\n'
770
+ except Exception as e:
771
+ response = make_response(
772
+ request_id,
773
+ error={'code': -32603, 'message': str(e)},
774
+ )
775
+ yield f'data: {json.dumps(response)}\n\n'
776
+
777
+ return StreamingResponse(
778
+ event_generator(),
779
+ media_type='text/event-stream',
780
+ headers={
781
+ 'Cache-Control': 'no-cache',
782
+ 'Connection': 'keep-alive',
783
+ 'X-Accel-Buffering': 'no',
784
+ },
785
+ )
786
+
787
+ # Default: return single JSON response
788
+ try:
789
+ if method == 'tools/call':
790
+ result = await self._call_tool(
791
+ params.get('name'), params.get('arguments', {})
792
+ )
793
+ else:
794
+ response = make_response(
795
+ request_id,
796
+ error={
797
+ 'code': -32601,
798
+ 'message': f'Method not found: {method}',
799
+ },
800
+ )
801
+ return JSONResponse(content=response, status_code=400)
802
+ response = make_response(request_id, result=result)
803
+ return JSONResponse(content=response)
804
+ except Exception as e:
805
+ response = make_response(
806
+ request_id, error={'code': -32603, 'message': str(e)}
807
+ )
808
+ return JSONResponse(content=response, status_code=500)
809
+
810
+ except HTTPException:
811
+ raise
812
+ except Exception as e:
813
+ logger.error(f'Error handling RPC: {e}')
814
+ return JSONResponse(
815
+ {
816
+ 'jsonrpc': '2.0',
817
+ 'error': {'code': -32603, 'message': str(e)},
818
+ },
819
+ status_code=500,
820
+ )
821
+
822
+ # REST API endpoints for task queue (used by monitor UI)
823
+ @self.app.get('/mcp/v1/tasks')
824
+ async def list_tasks_rest(status: Optional[str] = None):
825
+ """REST endpoint to list tasks."""
826
+ try:
827
+ result = await self._list_tasks(
828
+ {'status': status} if status else {}
829
+ )
830
+ return result
831
+ except Exception as e:
832
+ logger.error(f'Error listing tasks: {e}')
833
+ raise HTTPException(status_code=500, detail=str(e))
834
+
835
+ @self.app.post('/mcp/v1/tasks')
836
+ async def create_task_rest(request: Request):
837
+ """REST endpoint to create a task."""
838
+ try:
839
+ body = await request.json()
840
+ result = await self._create_task(body)
841
+ return result
842
+ except Exception as e:
843
+ logger.error(f'Error creating task: {e}')
844
+ raise HTTPException(status_code=500, detail=str(e))
845
+
846
+ @self.app.get('/mcp/v1/tasks/{task_id}')
847
+ async def get_task_rest(task_id: str):
848
+ """REST endpoint to get a specific task."""
849
+ try:
850
+ result = await self._get_task({'task_id': task_id})
851
+ if 'error' in result:
852
+ raise HTTPException(status_code=404, detail=result['error'])
853
+ return result
854
+ except HTTPException:
855
+ raise
856
+ except Exception as e:
857
+ logger.error(f'Error getting task: {e}')
858
+ raise HTTPException(status_code=500, detail=str(e))
859
+
860
+ @self.app.put('/mcp/v1/tasks/{task_id}')
861
+ async def update_task_rest(task_id: str, request: Request):
862
+ """REST endpoint to update task status."""
863
+ try:
864
+ body = await request.json()
865
+ new_status = body.get('status')
866
+
867
+ if not hasattr(self.a2a_server, 'task_manager'):
868
+ raise HTTPException(
869
+ status_code=503, detail='Task manager not available'
870
+ )
871
+
872
+ from .models import TaskStatus
873
+
874
+ task = await self.a2a_server.task_manager.update_task_status(
875
+ task_id, TaskStatus(new_status)
876
+ )
877
+
878
+ if not task:
879
+ raise HTTPException(
880
+ status_code=404, detail=f'Task {task_id} not found'
881
+ )
882
+
883
+ return {
884
+ 'success': True,
885
+ 'task_id': task.id,
886
+ 'status': task.status.value,
887
+ 'updated_at': task.updated_at.isoformat(),
888
+ }
889
+ except HTTPException:
890
+ raise
891
+ except Exception as e:
892
+ logger.error(f'Error updating task: {e}')
893
+ raise HTTPException(status_code=500, detail=str(e))
894
+
895
+ async def _call_tool(
896
+ self, tool_name: str, arguments: Dict[str, Any]
897
+ ) -> Dict[str, Any]:
898
+ """Execute A2A operations based on tool name."""
899
+ if not self.a2a_server:
900
+ return {'error': 'No A2A server connected'}
901
+
902
+ try:
903
+ if tool_name == 'send_message':
904
+ return await self._send_message(arguments)
905
+
906
+ elif tool_name == 'send_message_async':
907
+ return await self._send_message_async(arguments)
908
+
909
+ elif tool_name == 'send_to_agent':
910
+ return await self._send_to_agent(arguments)
911
+
912
+ elif tool_name == 'create_task':
913
+ return await self._create_task(arguments)
914
+
915
+ elif tool_name == 'get_task':
916
+ return await self._get_task(arguments)
917
+
918
+ elif tool_name == 'list_tasks':
919
+ return await self._list_tasks(arguments)
920
+
921
+ elif tool_name == 'cancel_task':
922
+ return await self._cancel_task(arguments)
923
+
924
+ elif tool_name == 'discover_agents':
925
+ return await self._discover_agents()
926
+
927
+ elif tool_name == 'get_agent':
928
+ return await self._get_agent(arguments)
929
+
930
+ elif tool_name == 'register_agent':
931
+ return await self._register_agent(arguments)
932
+
933
+ elif tool_name == 'get_agent_card':
934
+ return await self._get_agent_card()
935
+
936
+ elif tool_name == 'refresh_agent_heartbeat':
937
+ return await self._refresh_agent_heartbeat(arguments)
938
+
939
+ elif tool_name == 'get_messages':
940
+ return await self._get_messages(arguments)
941
+
942
+ elif tool_name == 'get_task_updates':
943
+ return await self._get_task_updates(arguments)
944
+
945
+ elif tool_name == 'search_tools':
946
+ return await self._search_tools(arguments)
947
+
948
+ elif tool_name == 'get_tool_schema':
949
+ return await self._get_tool_schema(arguments)
950
+
951
+ elif tool_name == 'get_queue_stats':
952
+ return await self._get_queue_stats()
953
+
954
+ elif tool_name == 'list_task_runs':
955
+ return await self._list_task_runs(arguments)
956
+
957
+ # Check if it's a marketing tool
958
+ elif (
959
+ MARKETING_TOOLS_AVAILABLE
960
+ and tool_name in MARKETING_TOOL_HANDLERS
961
+ ):
962
+ handler = MARKETING_TOOL_HANDLERS[tool_name]
963
+ return await handler(arguments)
964
+
965
+ else:
966
+ return {'error': f'Unknown tool: {tool_name}'}
967
+
968
+ except Exception as e:
969
+ logger.error(f'Error calling tool {tool_name}: {e}', exc_info=True)
970
+ return {'error': f'Tool execution error: {str(e)}'}
971
+
972
+ async def _call_tool_from_payload(
973
+ self, method: str, params: Dict[str, Any]
974
+ ) -> None:
975
+ """Execute a tool call-style request from a JSON-RPC payload (used for notifications)."""
976
+ try:
977
+ if not method:
978
+ return
979
+ if method == 'tools/call':
980
+ tool_name = params.get('name')
981
+ arguments = params.get('arguments', {})
982
+ await self._call_tool(tool_name, arguments)
983
+ else:
984
+ # No-op for other MCP methods at this time
985
+ return
986
+ except Exception as exc:
987
+ logger.error(f'Error processing notification {method}: {exc}')
988
+
989
+ async def _send_message(self, args: Dict[str, Any]) -> Dict[str, Any]:
990
+ """Send a message to the A2A agent."""
991
+ message_text = args.get('message', '')
992
+ conversation_id = args.get('conversation_id') or str(uuid.uuid4())
993
+
994
+ # Log to monitoring UI
995
+ await log_agent_message(
996
+ agent_name='MCP Client',
997
+ content=message_text,
998
+ message_type='human',
999
+ metadata={'conversation_id': conversation_id, 'source': 'mcp'},
1000
+ )
1001
+
1002
+ # Publish to message broker for UI monitoring
1003
+ if hasattr(self.a2a_server, 'message_broker'):
1004
+ await self.a2a_server.message_broker.publish_event(
1005
+ 'mcp.message.received',
1006
+ {
1007
+ 'source': 'MCP Client',
1008
+ 'message': message_text[:100],
1009
+ 'timestamp': datetime.now().isoformat(),
1010
+ 'conversation_id': conversation_id,
1011
+ },
1012
+ )
1013
+
1014
+ # Check if message is asking about tasks
1015
+ if any(
1016
+ keyword in message_text.lower()
1017
+ for keyword in ['task', 'queue', 'status', 'update']
1018
+ ):
1019
+ # Include task queue information in the response
1020
+ if hasattr(self.a2a_server, 'task_manager'):
1021
+ tasks = await self.a2a_server.task_manager.list_tasks()
1022
+ pending_tasks = [
1023
+ t for t in tasks if t.status.value == 'pending'
1024
+ ]
1025
+ working_tasks = [
1026
+ t for t in tasks if t.status.value == 'working'
1027
+ ]
1028
+
1029
+ task_summary = f'\n\n📋 Task Queue Status:\n'
1030
+ task_summary += f'• Pending: {len(pending_tasks)} tasks\n'
1031
+ task_summary += f'• Working: {len(working_tasks)} tasks\n'
1032
+
1033
+ if pending_tasks:
1034
+ task_summary += f'\nPending tasks:\n'
1035
+ for task in pending_tasks[:3]: # Show first 3
1036
+ task_summary += f' - {task.title} (ID: {task.id})\n'
1037
+
1038
+ if working_tasks:
1039
+ task_summary += f'\nActive tasks:\n'
1040
+ for task in working_tasks[:3]: # Show first 3
1041
+ task_summary += f' - {task.title} (ID: {task.id})\n'
1042
+
1043
+ message_text += task_summary
1044
+
1045
+ message = Message(parts=[Part(type='text', content=message_text)])
1046
+ response = await self.a2a_server._process_message(message)
1047
+
1048
+ response_text = ' '.join(
1049
+ [part.content for part in response.parts if part.type == 'text']
1050
+ )
1051
+
1052
+ # Log response to monitoring UI
1053
+ await log_agent_message(
1054
+ agent_name=self.a2a_server.agent_card.card.name
1055
+ if hasattr(self.a2a_server, 'agent_card')
1056
+ else 'A2A Agent',
1057
+ content=response_text,
1058
+ message_type='agent',
1059
+ metadata={'conversation_id': conversation_id, 'source': 'a2a'},
1060
+ )
1061
+
1062
+ # Publish response to message broker
1063
+ if hasattr(self.a2a_server, 'message_broker'):
1064
+ await self.a2a_server.message_broker.publish_event(
1065
+ 'mcp.message.sent',
1066
+ {
1067
+ 'source': 'A2A Agent',
1068
+ 'response': response_text[:100],
1069
+ 'timestamp': datetime.now().isoformat(),
1070
+ 'conversation_id': conversation_id,
1071
+ },
1072
+ )
1073
+
1074
+ return {
1075
+ 'success': True,
1076
+ 'response': response_text,
1077
+ 'conversation_id': conversation_id,
1078
+ 'timestamp': datetime.now().isoformat(),
1079
+ }
1080
+
1081
+ async def _send_message_async(self, args: Dict[str, Any]) -> Dict[str, Any]:
1082
+ """
1083
+ Send a message asynchronously by creating a task and enqueuing it.
1084
+
1085
+ Unlike _send_message (synchronous), this immediately returns a task_id
1086
+ and run_id, allowing callers to poll for results later.
1087
+
1088
+ This is the primary entry point for the "fire and forget" flow.
1089
+ """
1090
+ from .monitor_api import get_opencode_bridge
1091
+
1092
+ message = args.get('message', '')
1093
+ conversation_id = args.get('conversation_id') or str(uuid.uuid4())
1094
+ codebase_id = args.get('codebase_id', 'global')
1095
+ priority = args.get('priority', 0)
1096
+ notify_email = args.get('notify_email')
1097
+
1098
+ bridge = get_opencode_bridge()
1099
+ if bridge is None:
1100
+ return {'error': 'OpenCode bridge not available'}
1101
+
1102
+ # Create a task with the message as the prompt
1103
+ task = await bridge.create_task(
1104
+ codebase_id=codebase_id,
1105
+ title=f'Async message: {message[:50]}...'
1106
+ if len(message) > 50
1107
+ else f'Async message: {message}',
1108
+ prompt=message,
1109
+ agent_type='general',
1110
+ priority=priority,
1111
+ metadata={
1112
+ 'conversation_id': conversation_id,
1113
+ 'source': 'send_message_async',
1114
+ },
1115
+ )
1116
+
1117
+ if task is None:
1118
+ return {'error': 'Failed to create task'}
1119
+
1120
+ # Build task data for SSE notification
1121
+ task_data = {
1122
+ 'id': task.id,
1123
+ 'title': task.title,
1124
+ 'description': task.prompt,
1125
+ 'codebase_id': task.codebase_id,
1126
+ 'agent_type': task.agent_type,
1127
+ 'priority': task.priority,
1128
+ 'status': task.status.value,
1129
+ 'created_at': task.created_at.isoformat(),
1130
+ }
1131
+
1132
+ # Notify SSE-connected workers
1133
+ try:
1134
+ notified = await notify_workers_of_new_task(task_data)
1135
+ logger.info(
1136
+ f'Async message task {task.id} created, notified {len(notified)} SSE workers'
1137
+ )
1138
+ except Exception as e:
1139
+ logger.warning(
1140
+ f'Failed to notify SSE workers of task {task.id}: {e}'
1141
+ )
1142
+
1143
+ # Enqueue for hosted workers
1144
+ run_id = None
1145
+ if TASK_QUEUE_AVAILABLE and enqueue_task:
1146
+ try:
1147
+ user_id = args.get('_user_id')
1148
+ task_run = await enqueue_task(
1149
+ task_id=task.id,
1150
+ user_id=user_id,
1151
+ priority=priority,
1152
+ notify_email=notify_email,
1153
+ )
1154
+ if task_run:
1155
+ run_id = task_run.id
1156
+ logger.info(
1157
+ f'Async message task {task.id} enqueued as run {run_id}'
1158
+ )
1159
+ except Exception as e:
1160
+ logger.warning(f'Failed to enqueue task {task.id}: {e}')
1161
+
1162
+ return {
1163
+ 'success': True,
1164
+ 'task_id': task.id,
1165
+ 'run_id': run_id,
1166
+ 'status': 'queued',
1167
+ 'conversation_id': conversation_id,
1168
+ 'timestamp': datetime.now().isoformat(),
1169
+ }
1170
+
1171
+ async def _send_to_agent(self, args: Dict[str, Any]) -> Dict[str, Any]:
1172
+ """
1173
+ Send a message to a specific named agent.
1174
+
1175
+ The task will be queued until that agent is available to claim it.
1176
+ If the agent is offline, the task queues indefinitely unless
1177
+ deadline_seconds is set.
1178
+
1179
+ This enables explicit agent-to-agent communication where the caller
1180
+ needs work done by a specific agent (not just any available worker).
1181
+ """
1182
+ from .monitor_api import get_opencode_bridge
1183
+ from datetime import timedelta
1184
+
1185
+ agent_name = args.get('agent_name')
1186
+ if not agent_name:
1187
+ return {'error': 'agent_name is required'}
1188
+
1189
+ message = args.get('message', '')
1190
+ conversation_id = args.get('conversation_id') or str(uuid.uuid4())
1191
+ codebase_id = args.get('codebase_id', 'global')
1192
+ priority = args.get('priority', 0)
1193
+ deadline_seconds = args.get('deadline_seconds')
1194
+ notify_email = args.get('notify_email')
1195
+
1196
+ bridge = get_opencode_bridge()
1197
+ if bridge is None:
1198
+ return {'error': 'OpenCode bridge not available'}
1199
+
1200
+ # Create a task with the message as the prompt
1201
+ task = await bridge.create_task(
1202
+ codebase_id=codebase_id,
1203
+ title=f'To {agent_name}: {message[:40]}...'
1204
+ if len(message) > 40
1205
+ else f'To {agent_name}: {message}',
1206
+ prompt=message,
1207
+ agent_type='general',
1208
+ priority=priority,
1209
+ metadata={
1210
+ 'conversation_id': conversation_id,
1211
+ 'source': 'send_to_agent',
1212
+ 'target_agent_name': agent_name,
1213
+ },
1214
+ )
1215
+
1216
+ if task is None:
1217
+ return {'error': 'Failed to create task'}
1218
+
1219
+ # Calculate deadline if specified
1220
+ deadline_at = None
1221
+ if deadline_seconds:
1222
+ from datetime import timezone
1223
+
1224
+ deadline_at = datetime.now(timezone.utc) + timedelta(
1225
+ seconds=deadline_seconds
1226
+ )
1227
+
1228
+ # Build task data for SSE notification (include routing fields)
1229
+ task_data = {
1230
+ 'id': task.id,
1231
+ 'title': task.title,
1232
+ 'description': task.prompt,
1233
+ 'codebase_id': task.codebase_id,
1234
+ 'agent_type': task.agent_type,
1235
+ 'priority': task.priority,
1236
+ 'status': task.status.value,
1237
+ 'created_at': task.created_at.isoformat(),
1238
+ # Routing fields for notify-time filtering
1239
+ 'target_agent_name': agent_name,
1240
+ }
1241
+
1242
+ # Notify SSE-connected workers (only the targeted agent will be notified)
1243
+ try:
1244
+ notified = await notify_workers_of_new_task(task_data)
1245
+ if notified:
1246
+ logger.info(
1247
+ f'Targeted task {task.id} for agent {agent_name}, '
1248
+ f'notified {len(notified)} workers'
1249
+ )
1250
+ else:
1251
+ logger.info(
1252
+ f'Targeted task {task.id} for agent {agent_name}, '
1253
+ f'no matching workers online (will queue)'
1254
+ )
1255
+ except Exception as e:
1256
+ logger.warning(f'Failed to notify workers of task {task.id}: {e}')
1257
+
1258
+ # Enqueue for hosted workers with routing fields
1259
+ run_id = None
1260
+ if TASK_QUEUE_AVAILABLE and enqueue_task:
1261
+ try:
1262
+ user_id = args.get('_user_id')
1263
+ task_run = await enqueue_task(
1264
+ task_id=task.id,
1265
+ user_id=user_id,
1266
+ priority=priority,
1267
+ notify_email=notify_email,
1268
+ # Agent routing fields
1269
+ target_agent_name=agent_name,
1270
+ deadline_at=deadline_at,
1271
+ )
1272
+ if task_run:
1273
+ run_id = task_run.id
1274
+ logger.info(
1275
+ f'Targeted task {task.id} enqueued as run {run_id} '
1276
+ f'(target={agent_name}, deadline={deadline_at})'
1277
+ )
1278
+ except Exception as e:
1279
+ logger.warning(f'Failed to enqueue task {task.id}: {e}')
1280
+
1281
+ # Build routing info for debugging/UX
1282
+ routing_info = {
1283
+ 'target_agent_name': agent_name,
1284
+ 'required_capabilities': None, # Not used in send_to_agent currently
1285
+ 'deadline_at': deadline_at.isoformat() if deadline_at else None,
1286
+ }
1287
+
1288
+ result = {
1289
+ 'success': True,
1290
+ 'task_id': task.id,
1291
+ 'run_id': run_id,
1292
+ 'status': 'queued',
1293
+ 'conversation_id': conversation_id,
1294
+ 'timestamp': datetime.now().isoformat(),
1295
+ # Routing info for debugging "why is my job stuck?"
1296
+ 'routing': routing_info,
1297
+ }
1298
+
1299
+ return result
1300
+
1301
+ async def _create_task(self, args: Dict[str, Any]) -> Dict[str, Any]:
1302
+ """Create a new task."""
1303
+ from .monitor_api import get_opencode_bridge
1304
+ from .opencode_bridge import AgentTaskStatus, resolve_model
1305
+
1306
+ title = args.get('title')
1307
+ description = args.get('description', '')
1308
+ codebase_id = args.get('codebase_id', 'global')
1309
+ agent_type = args.get('agent_type', 'build')
1310
+ priority = args.get('priority', 0)
1311
+ # Resolve user-friendly model name to full provider/model-id
1312
+ model_input = args.get('model')
1313
+ model = resolve_model(model_input) if model_input else None
1314
+
1315
+ bridge = get_opencode_bridge()
1316
+ if bridge is None:
1317
+ return {'error': 'OpenCode bridge not available'}
1318
+
1319
+ task = await bridge.create_task(
1320
+ codebase_id=codebase_id,
1321
+ title=title,
1322
+ prompt=description,
1323
+ agent_type=agent_type,
1324
+ priority=priority,
1325
+ model=model,
1326
+ )
1327
+
1328
+ if task is None:
1329
+ return {'error': 'Failed to create task'}
1330
+
1331
+ # Notify SSE-connected workers of the new task
1332
+ task_data = {
1333
+ 'id': task.id,
1334
+ 'title': task.title,
1335
+ 'description': task.prompt,
1336
+ 'codebase_id': task.codebase_id,
1337
+ 'agent_type': task.agent_type,
1338
+ 'model': task.model,
1339
+ 'priority': task.priority,
1340
+ 'status': task.status.value,
1341
+ 'created_at': task.created_at.isoformat(),
1342
+ }
1343
+ try:
1344
+ notified = await notify_workers_of_new_task(task_data)
1345
+ logger.info(
1346
+ f'Task {task.id} created, notified {len(notified)} SSE workers'
1347
+ )
1348
+ except Exception as e:
1349
+ logger.warning(
1350
+ f'Failed to notify SSE workers of task {task.id}: {e}'
1351
+ )
1352
+
1353
+ # Enqueue for hosted workers (if task queue is available)
1354
+ # This enables the mid-market "submit and get email" flow
1355
+ run_id = None
1356
+ if TASK_QUEUE_AVAILABLE and enqueue_task:
1357
+ try:
1358
+ # Extract user_id from args if available (set by auth middleware)
1359
+ user_id = args.get('_user_id')
1360
+ notify_email = args.get('notify_email')
1361
+ template_id = args.get('template_id')
1362
+ automation_id = args.get('automation_id')
1363
+
1364
+ task_run = await enqueue_task(
1365
+ task_id=task.id,
1366
+ user_id=user_id,
1367
+ template_id=template_id,
1368
+ automation_id=automation_id,
1369
+ priority=priority,
1370
+ notify_email=notify_email,
1371
+ )
1372
+ if task_run:
1373
+ run_id = task_run.id
1374
+ logger.info(f'Task {task.id} enqueued as run {run_id}')
1375
+ except Exception as e:
1376
+ logger.warning(f'Failed to enqueue task {task.id}: {e}')
1377
+
1378
+ return {
1379
+ 'success': True,
1380
+ 'task_id': task.id,
1381
+ 'run_id': run_id, # Include run_id if enqueued
1382
+ 'title': task.title,
1383
+ 'description': task.prompt,
1384
+ 'codebase_id': task.codebase_id,
1385
+ 'model': task.model,
1386
+ 'status': task.status.value,
1387
+ 'created_at': task.created_at.isoformat(),
1388
+ }
1389
+
1390
+ async def _get_task(self, args: Dict[str, Any]) -> Dict[str, Any]:
1391
+ """Get task details."""
1392
+ from .monitor_api import get_opencode_bridge
1393
+ from .opencode_bridge import AgentTaskStatus
1394
+
1395
+ task_id = args.get('task_id')
1396
+
1397
+ bridge = get_opencode_bridge()
1398
+ if bridge is None:
1399
+ return {'error': 'OpenCode bridge not available'}
1400
+
1401
+ task = await bridge.get_task(task_id)
1402
+
1403
+ if not task:
1404
+ return {'error': f'Task {task_id} not found'}
1405
+
1406
+ status_value = (
1407
+ task.status.value
1408
+ if hasattr(task.status, 'value')
1409
+ else str(task.status)
1410
+ )
1411
+ if status_value == 'running':
1412
+ status_value = 'working'
1413
+
1414
+ return {
1415
+ 'task_id': task.id,
1416
+ 'title': task.title,
1417
+ 'description': task.prompt or '',
1418
+ 'codebase_id': task.codebase_id,
1419
+ 'agent_type': task.agent_type,
1420
+ 'priority': task.priority,
1421
+ 'status': status_value,
1422
+ 'created_at': task.created_at.isoformat()
1423
+ if task.created_at
1424
+ else None,
1425
+ 'updated_at': task.created_at.isoformat()
1426
+ if task.created_at
1427
+ else None,
1428
+ 'started_at': task.started_at.isoformat()
1429
+ if task.started_at
1430
+ else None,
1431
+ 'completed_at': task.completed_at.isoformat()
1432
+ if task.completed_at
1433
+ else None,
1434
+ 'result': task.result,
1435
+ 'error': task.error,
1436
+ }
1437
+
1438
+ async def _list_tasks(self, args: Dict[str, Any]) -> Dict[str, Any]:
1439
+ """List all tasks."""
1440
+ from .monitor_api import get_opencode_bridge
1441
+ from .opencode_bridge import AgentTaskStatus
1442
+
1443
+ bridge = get_opencode_bridge()
1444
+ if bridge is None:
1445
+ return {'error': 'OpenCode bridge not available'}
1446
+
1447
+ codebase_id = args.get('codebase_id')
1448
+ status_filter = args.get('status')
1449
+
1450
+ status_map = {
1451
+ 'pending': AgentTaskStatus.PENDING,
1452
+ 'working': AgentTaskStatus.RUNNING,
1453
+ 'completed': AgentTaskStatus.COMPLETED,
1454
+ 'failed': AgentTaskStatus.FAILED,
1455
+ 'cancelled': AgentTaskStatus.CANCELLED,
1456
+ }
1457
+ status_enum = status_map.get(status_filter) if status_filter else None
1458
+
1459
+ tasks = await bridge.list_tasks(
1460
+ codebase_id=codebase_id, status=status_enum
1461
+ )
1462
+
1463
+ return {
1464
+ 'tasks': [
1465
+ {
1466
+ 'task_id': task.id,
1467
+ 'title': task.title,
1468
+ 'description': task.prompt or '',
1469
+ 'status': task.status.value
1470
+ if hasattr(task.status, 'value')
1471
+ else str(task.status),
1472
+ 'created_at': task.created_at.isoformat()
1473
+ if task.created_at
1474
+ else None,
1475
+ 'updated_at': task.created_at.isoformat()
1476
+ if task.created_at
1477
+ else None,
1478
+ }
1479
+ for task in tasks
1480
+ ],
1481
+ 'count': len(tasks),
1482
+ }
1483
+
1484
+ async def _cancel_task(self, args: Dict[str, Any]) -> Dict[str, Any]:
1485
+ """Cancel a task."""
1486
+ task_id = args.get('task_id')
1487
+
1488
+ bridge = get_opencode_bridge()
1489
+ if bridge is None:
1490
+ return {'error': 'OpenCode bridge not available'}
1491
+
1492
+ success = await bridge.cancel_task(task_id)
1493
+ if not success:
1494
+ return {'error': f'Task {task_id} not found or cannot be cancelled'}
1495
+
1496
+ task = await bridge.get_task(task_id)
1497
+ return {
1498
+ 'success': True,
1499
+ 'task_id': task_id,
1500
+ 'status': 'cancelled',
1501
+ }
1502
+
1503
+ async def _get_queue_stats(self) -> Dict[str, Any]:
1504
+ """Get task queue statistics for hosted workers."""
1505
+ if not TASK_QUEUE_AVAILABLE or not get_task_queue:
1506
+ return {'error': 'Task queue not available'}
1507
+
1508
+ queue = get_task_queue()
1509
+ if not queue:
1510
+ return {'error': 'Task queue not initialized'}
1511
+
1512
+ try:
1513
+ stats = await queue.get_queue_stats()
1514
+ return {
1515
+ 'success': True,
1516
+ 'stats': stats,
1517
+ }
1518
+ except Exception as e:
1519
+ return {'error': f'Failed to get queue stats: {str(e)}'}
1520
+
1521
+ async def _list_task_runs(self, args: Dict[str, Any]) -> Dict[str, Any]:
1522
+ """List task runs from the queue."""
1523
+ if not TASK_QUEUE_AVAILABLE or not get_task_queue:
1524
+ return {'error': 'Task queue not available'}
1525
+
1526
+ queue = get_task_queue()
1527
+ if not queue:
1528
+ return {'error': 'Task queue not initialized'}
1529
+
1530
+ try:
1531
+ user_id = args.get('user_id') or args.get('_user_id')
1532
+ status = args.get('status')
1533
+ limit = args.get('limit', 100)
1534
+
1535
+ # Convert status string to enum if provided
1536
+ status_enum = None
1537
+ if status:
1538
+ from .task_queue import TaskRunStatus
1539
+
1540
+ try:
1541
+ status_enum = TaskRunStatus(status)
1542
+ except ValueError:
1543
+ return {'error': f'Invalid status: {status}'}
1544
+
1545
+ runs = await queue.list_runs(
1546
+ user_id=user_id,
1547
+ status=status_enum,
1548
+ limit=limit,
1549
+ )
1550
+
1551
+ return {
1552
+ 'success': True,
1553
+ 'runs': [
1554
+ {
1555
+ 'id': run.id,
1556
+ 'task_id': run.task_id,
1557
+ 'user_id': run.user_id,
1558
+ 'status': run.status.value,
1559
+ 'priority': run.priority,
1560
+ 'attempts': run.attempts,
1561
+ 'started_at': run.started_at.isoformat()
1562
+ if run.started_at
1563
+ else None,
1564
+ 'completed_at': run.completed_at.isoformat()
1565
+ if run.completed_at
1566
+ else None,
1567
+ 'runtime_seconds': run.runtime_seconds,
1568
+ 'result_summary': run.result_summary,
1569
+ 'created_at': run.created_at.isoformat(),
1570
+ }
1571
+ for run in runs
1572
+ ],
1573
+ 'count': len(runs),
1574
+ }
1575
+ except Exception as e:
1576
+ return {'error': f'Failed to list task runs: {str(e)}'}
1577
+
1578
+ async def _discover_agents(self) -> Dict[str, Any]:
1579
+ """
1580
+ Discover available agents.
1581
+
1582
+ Returns agents with:
1583
+ - name: Unique discovery identity (e.g., "code-reviewer:dev-vm:abc123")
1584
+ - role: Routing identity for send_to_agent (e.g., "code-reviewer")
1585
+ - instance_id: Unique instance identifier
1586
+ - description, url, capabilities, last_seen
1587
+
1588
+ Note: Use 'role' with send_to_agent for routing, not 'name'.
1589
+ """
1590
+ import os
1591
+
1592
+ if not self.a2a_server or not hasattr(
1593
+ self.a2a_server, 'message_broker'
1594
+ ):
1595
+ return {'error': 'Message broker not available'}
1596
+
1597
+ broker = self.a2a_server.message_broker
1598
+ if broker is None:
1599
+ return {'error': 'Message broker not configured'}
1600
+
1601
+ # Check if broker is started (Redis broker requires this)
1602
+ if hasattr(broker, '_running') and not broker._running:
1603
+ return {
1604
+ 'error': 'Message broker not started. Ensure the server is fully initialized.'
1605
+ }
1606
+
1607
+ # Get max_age from environment (default 120s)
1608
+ max_age_seconds = int(
1609
+ os.environ.get('A2A_AGENT_DISCOVERY_MAX_AGE', '120')
1610
+ )
1611
+
1612
+ try:
1613
+ agents = await broker.discover_agents(
1614
+ max_age_seconds=max_age_seconds
1615
+ )
1616
+ except RuntimeError as e:
1617
+ return {'error': f'Message broker error: {str(e)}'}
1618
+
1619
+ # Handle both old (AgentCard) and new (dict) return formats
1620
+ agent_list = []
1621
+ for agent in agents:
1622
+ if isinstance(agent, dict):
1623
+ # New enriched format with role/instance_id
1624
+ agent_list.append(
1625
+ {
1626
+ 'name': agent.get('name'),
1627
+ 'role': agent.get(
1628
+ 'role'
1629
+ ), # Use this for send_to_agent routing
1630
+ 'instance_id': agent.get('instance_id'),
1631
+ 'description': agent.get('description'),
1632
+ 'url': agent.get('url'),
1633
+ 'capabilities': agent.get('capabilities'),
1634
+ 'last_seen': agent.get('last_seen'),
1635
+ }
1636
+ )
1637
+ else:
1638
+ # Legacy AgentCard format (backward compat)
1639
+ agent_list.append(
1640
+ {
1641
+ 'name': agent.name,
1642
+ 'role': agent.name.split(':')[0]
1643
+ if ':' in agent.name
1644
+ else agent.name,
1645
+ 'description': agent.description,
1646
+ 'url': agent.url,
1647
+ }
1648
+ )
1649
+
1650
+ return {
1651
+ 'agents': agent_list,
1652
+ 'count': len(agent_list),
1653
+ 'routing_note': (
1654
+ "IMPORTANT: Use 'role' with send_to_agent for routing. "
1655
+ "'name' is a unique instance identity and will NOT route tasks. "
1656
+ "Example: send_to_agent(agent_name='code-reviewer') routes by role."
1657
+ ),
1658
+ }
1659
+
1660
+ async def _get_agent(self, args: Dict[str, Any]) -> Dict[str, Any]:
1661
+ """Get specific agent information."""
1662
+ agent_name = args.get('agent_name')
1663
+
1664
+ if not self.a2a_server or not hasattr(
1665
+ self.a2a_server, 'message_broker'
1666
+ ):
1667
+ return {'error': 'Message broker not available'}
1668
+
1669
+ broker = self.a2a_server.message_broker
1670
+ if broker is None:
1671
+ return {'error': 'Message broker not configured'}
1672
+
1673
+ # Check if broker is started (Redis broker requires this)
1674
+ if hasattr(broker, '_running') and not broker._running:
1675
+ return {
1676
+ 'error': 'Message broker not started. Ensure the server is fully initialized.'
1677
+ }
1678
+
1679
+ try:
1680
+ agent = await broker.get_agent(agent_name)
1681
+ except RuntimeError as e:
1682
+ return {'error': f'Message broker error: {str(e)}'}
1683
+
1684
+ if not agent:
1685
+ return {'error': f"Agent '{agent_name}' not found"}
1686
+
1687
+ return {
1688
+ 'name': agent.name,
1689
+ 'description': agent.description,
1690
+ 'url': agent.url,
1691
+ 'capabilities': {
1692
+ 'streaming': agent.capabilities.streaming
1693
+ if hasattr(agent, 'capabilities')
1694
+ else False,
1695
+ 'push_notifications': agent.capabilities.push_notifications
1696
+ if hasattr(agent, 'capabilities')
1697
+ else False,
1698
+ },
1699
+ }
1700
+
1701
+ async def _get_agent_card(self) -> Dict[str, Any]:
1702
+ """Get the agent card for this server."""
1703
+ if not hasattr(self.a2a_server, 'agent_card'):
1704
+ return {'error': 'Agent card not available'}
1705
+
1706
+ card = self.a2a_server.agent_card.card
1707
+
1708
+ return {
1709
+ 'name': card.name,
1710
+ 'description': card.description,
1711
+ 'url': card.url,
1712
+ 'provider': {
1713
+ 'organization': card.provider.organization,
1714
+ 'url': card.provider.url,
1715
+ },
1716
+ 'capabilities': {
1717
+ 'streaming': card.capabilities.streaming
1718
+ if hasattr(card, 'capabilities')
1719
+ else False,
1720
+ 'push_notifications': card.capabilities.push_notifications
1721
+ if hasattr(card, 'capabilities')
1722
+ else False,
1723
+ },
1724
+ 'skills': [
1725
+ {
1726
+ 'id': skill.id,
1727
+ 'name': skill.name,
1728
+ 'description': skill.description,
1729
+ }
1730
+ for skill in (card.skills if hasattr(card, 'skills') else [])
1731
+ ],
1732
+ }
1733
+
1734
+ async def _register_agent(self, args: Dict[str, Any]) -> Dict[str, Any]:
1735
+ """Register a new agent in the network."""
1736
+ name = args.get('name')
1737
+ description = args.get('description')
1738
+ url = args.get('url')
1739
+ capabilities = args.get('capabilities', {})
1740
+
1741
+ if not name:
1742
+ return {'error': 'Agent name is required'}
1743
+ if not description:
1744
+ return {'error': 'Agent description is required'}
1745
+ if not url:
1746
+ return {'error': 'Agent URL is required'}
1747
+
1748
+ if not self.a2a_server or not hasattr(
1749
+ self.a2a_server, 'message_broker'
1750
+ ):
1751
+ return {'error': 'Message broker not available'}
1752
+
1753
+ broker = self.a2a_server.message_broker
1754
+ if broker is None:
1755
+ return {'error': 'Message broker not configured'}
1756
+
1757
+ # Check if broker is started (Redis broker requires this)
1758
+ if hasattr(broker, '_running') and not broker._running:
1759
+ return {
1760
+ 'error': 'Message broker not started. Ensure the server is fully initialized.'
1761
+ }
1762
+
1763
+ from .models import AgentCard, AgentProvider, AgentCapabilities
1764
+
1765
+ # Create agent card
1766
+ agent_card = AgentCard(
1767
+ name=name,
1768
+ description=description,
1769
+ url=url,
1770
+ provider=AgentProvider(organization='External Agent', url=url),
1771
+ capabilities=AgentCapabilities(
1772
+ streaming=capabilities.get('streaming', False),
1773
+ push_notifications=capabilities.get(
1774
+ 'push_notifications', False
1775
+ ),
1776
+ ),
1777
+ )
1778
+
1779
+ try:
1780
+ await broker.register_agent(agent_card)
1781
+ except RuntimeError as e:
1782
+ return {'error': f'Message broker error: {str(e)}'}
1783
+
1784
+ # Register with monitoring service for UI tracking
1785
+ from .monitor_api import monitoring_service
1786
+
1787
+ agent_id = f'external_{name}'
1788
+ await monitoring_service.register_agent(agent_id, name)
1789
+
1790
+ # Log to monitoring
1791
+ await log_agent_message(
1792
+ agent_name=name,
1793
+ content=f"Agent '{name}' registered at {url}",
1794
+ message_type='system',
1795
+ metadata={'event': 'agent_registered', 'url': url},
1796
+ )
1797
+
1798
+ # Publish event (broker is already validated above)
1799
+ try:
1800
+ await broker.publish_event(
1801
+ 'mcp.agent.registered',
1802
+ {
1803
+ 'name': name,
1804
+ 'url': url,
1805
+ 'timestamp': datetime.now().isoformat(),
1806
+ },
1807
+ )
1808
+ except RuntimeError:
1809
+ pass # Non-critical, continue with registration success
1810
+
1811
+ return {
1812
+ 'success': True,
1813
+ 'name': name,
1814
+ 'description': description,
1815
+ 'url': url,
1816
+ 'message': f"Agent '{name}' successfully registered and is now discoverable",
1817
+ }
1818
+
1819
+ async def _refresh_agent_heartbeat(
1820
+ self, args: Dict[str, Any]
1821
+ ) -> Dict[str, Any]:
1822
+ """Refresh the last_seen timestamp for an agent to keep it visible in discovery."""
1823
+ agent_name = args.get('agent_name')
1824
+
1825
+ if not agent_name:
1826
+ return {'error': 'agent_name is required'}
1827
+
1828
+ if not self.a2a_server or not hasattr(
1829
+ self.a2a_server, 'message_broker'
1830
+ ):
1831
+ return {'error': 'Message broker not available'}
1832
+
1833
+ broker = self.a2a_server.message_broker
1834
+ if broker is None:
1835
+ return {'error': 'Message broker not configured'}
1836
+
1837
+ # Check if broker supports heartbeat refresh
1838
+ if not hasattr(broker, 'refresh_agent_heartbeat'):
1839
+ return {
1840
+ 'error': 'Message broker does not support heartbeat refresh'
1841
+ }
1842
+
1843
+ try:
1844
+ success = await broker.refresh_agent_heartbeat(agent_name)
1845
+ if success:
1846
+ return {
1847
+ 'success': True,
1848
+ 'agent_name': agent_name,
1849
+ 'message': f"Heartbeat refreshed for agent '{agent_name}'",
1850
+ }
1851
+ else:
1852
+ return {
1853
+ 'success': False,
1854
+ 'agent_name': agent_name,
1855
+ 'message': f"Agent '{agent_name}' not found in registry (register first)",
1856
+ }
1857
+ except Exception as e:
1858
+ return {'error': f'Heartbeat refresh failed: {str(e)}'}
1859
+
1860
+ async def _get_messages(self, args: Dict[str, Any]) -> Dict[str, Any]:
1861
+ """Get messages from the monitoring system."""
1862
+ from .monitor_api import monitoring_service
1863
+
1864
+ conversation_id = args.get('conversation_id')
1865
+ limit_param = args.get('limit', 50)
1866
+
1867
+ # Ensure limit is an integer with robust conversion
1868
+ logger.info(
1869
+ f'get_messages called with limit_param: {limit_param} (type: {type(limit_param)})'
1870
+ )
1871
+ try:
1872
+ limit = int(limit_param)
1873
+ logger.info(f'Converted limit to: {limit} (type: {type(limit)})')
1874
+ except (TypeError, ValueError) as e:
1875
+ logger.warning(
1876
+ f'Invalid limit parameter: {limit_param}, error: {e}, using default 50'
1877
+ )
1878
+ limit = 50
1879
+
1880
+ # Get messages from monitoring service
1881
+ all_messages = monitoring_service.messages
1882
+ logger.info(
1883
+ f'Total messages in monitoring service: {len(all_messages)} (type: {type(all_messages)})'
1884
+ )
1885
+
1886
+ # Filter by conversation_id if provided
1887
+ if conversation_id:
1888
+ filtered_messages = [
1889
+ msg
1890
+ for msg in all_messages
1891
+ if msg.metadata.get('conversation_id') == conversation_id
1892
+ ]
1893
+ else:
1894
+ filtered_messages = list(all_messages) # Convert to list explicitly
1895
+
1896
+ logger.info(
1897
+ f'Filtered messages: {len(filtered_messages)} (type: {type(filtered_messages)})'
1898
+ )
1899
+
1900
+ # Limit results (ensure limit is positive)
1901
+ if limit > 0:
1902
+ recent_messages = filtered_messages[-limit:]
1903
+ logger.info(f'Recent messages sliced: {len(recent_messages)}')
1904
+ else:
1905
+ recent_messages = filtered_messages
1906
+
1907
+ return {
1908
+ 'success': True,
1909
+ 'messages': [
1910
+ {
1911
+ 'id': msg.id,
1912
+ 'timestamp': msg.timestamp.isoformat(),
1913
+ 'type': msg.type,
1914
+ 'agent_name': msg.agent_name,
1915
+ 'content': msg.content,
1916
+ 'metadata': msg.metadata,
1917
+ }
1918
+ for msg in recent_messages
1919
+ ],
1920
+ 'total': len(recent_messages),
1921
+ }
1922
+
1923
+ async def _get_task_updates(self, args: Dict[str, Any]) -> Dict[str, Any]:
1924
+ """Get recent task updates."""
1925
+ bridge = get_opencode_bridge()
1926
+ if bridge is None:
1927
+ return {'error': 'OpenCode bridge not available'}
1928
+
1929
+ since_timestamp = args.get('since_timestamp')
1930
+ task_ids = args.get('task_ids', [])
1931
+
1932
+ all_tasks = await bridge.list_tasks()
1933
+
1934
+ if task_ids:
1935
+ tasks = [t for t in all_tasks if t.id in task_ids]
1936
+ else:
1937
+ tasks = all_tasks
1938
+
1939
+ if since_timestamp:
1940
+ from datetime import datetime
1941
+
1942
+ cutoff = datetime.fromisoformat(
1943
+ since_timestamp.replace('Z', '+00:00')
1944
+ )
1945
+ tasks = [
1946
+ t
1947
+ for t in tasks
1948
+ if t.created_at > cutoff
1949
+ or (t.started_at and t.started_at > cutoff)
1950
+ or (t.completed_at and t.completed_at > cutoff)
1951
+ ]
1952
+
1953
+ tasks.sort(key=lambda t: t.created_at, reverse=True)
1954
+
1955
+ return {
1956
+ 'success': True,
1957
+ 'updates': [
1958
+ {
1959
+ 'task_id': task.id,
1960
+ 'title': task.title,
1961
+ 'description': task.prompt,
1962
+ 'status': 'working'
1963
+ if task.status.value == 'running'
1964
+ else task.status.value,
1965
+ 'created_at': task.created_at.isoformat(),
1966
+ 'updated_at': task.created_at.isoformat(),
1967
+ 'codebase_id': task.codebase_id,
1968
+ 'agent_type': task.agent_type,
1969
+ 'priority': task.priority,
1970
+ }
1971
+ for task in tasks
1972
+ ],
1973
+ 'total': len(tasks),
1974
+ }
1975
+
1976
+ async def _search_tools(self, args: Dict[str, Any]) -> Dict[str, Any]:
1977
+ """Search for tools by keyword or category for progressive disclosure.
1978
+
1979
+ This enables LLMs to discover tools on-demand instead of loading all
1980
+ definitions upfront, following Anthropic's MCP efficiency recommendations.
1981
+ """
1982
+ query = args.get('query', '').lower()
1983
+ detail_level = args.get('detail_level', 'summary')
1984
+
1985
+ # Define tool categories for efficient discovery
1986
+ tool_categories = {
1987
+ 'messaging': ['send_message', 'get_messages'],
1988
+ 'tasks': [
1989
+ 'create_task',
1990
+ 'get_task',
1991
+ 'list_tasks',
1992
+ 'cancel_task',
1993
+ 'get_task_updates',
1994
+ ],
1995
+ 'agents': [
1996
+ 'discover_agents',
1997
+ 'get_agent',
1998
+ 'register_agent',
1999
+ 'get_agent_card',
2000
+ ],
2001
+ 'discovery': ['search_tools', 'get_tool_schema'],
2002
+ # Spotlessbinco marketing tool categories
2003
+ 'creative': [
2004
+ 'spotless_generate_creative',
2005
+ 'spotless_batch_generate_creatives',
2006
+ 'spotless_get_top_creatives',
2007
+ 'spotless_analyze_creative_performance',
2008
+ ],
2009
+ 'campaigns': [
2010
+ 'spotless_create_campaign',
2011
+ 'spotless_update_campaign_status',
2012
+ 'spotless_update_campaign_budget',
2013
+ 'spotless_get_campaign_metrics',
2014
+ 'spotless_list_campaigns',
2015
+ ],
2016
+ 'automations': [
2017
+ 'spotless_create_automation',
2018
+ 'spotless_trigger_automation',
2019
+ 'spotless_list_automations',
2020
+ 'spotless_update_automation_status',
2021
+ ],
2022
+ 'audiences': [
2023
+ 'spotless_create_geo_audience',
2024
+ 'spotless_create_lookalike_audience',
2025
+ 'spotless_create_custom_audience',
2026
+ 'spotless_get_trash_zone_zips',
2027
+ ],
2028
+ 'analytics': [
2029
+ 'spotless_get_unified_metrics',
2030
+ 'spotless_get_roi_metrics',
2031
+ 'spotless_get_channel_performance',
2032
+ 'spotless_thompson_sample_budget',
2033
+ 'spotless_get_conversion_attribution',
2034
+ ],
2035
+ 'platform_sync': [
2036
+ 'spotless_sync_facebook_metrics',
2037
+ 'spotless_sync_tiktok_metrics',
2038
+ 'spotless_sync_google_metrics',
2039
+ 'spotless_send_facebook_conversion',
2040
+ 'spotless_send_tiktok_event',
2041
+ ],
2042
+ # Convenience aliases
2043
+ 'marketing': [
2044
+ 'spotless_create_campaign',
2045
+ 'spotless_generate_creative',
2046
+ 'spotless_create_automation',
2047
+ 'spotless_get_unified_metrics',
2048
+ 'spotless_thompson_sample_budget',
2049
+ ],
2050
+ 'spotless': [
2051
+ 'spotless_generate_creative',
2052
+ 'spotless_create_campaign',
2053
+ 'spotless_create_automation',
2054
+ 'spotless_create_geo_audience',
2055
+ 'spotless_get_unified_metrics',
2056
+ ],
2057
+ }
2058
+
2059
+ # Get all tools
2060
+ all_tools = self._get_tools_from_a2a_server()
2061
+ tools_by_name = {t['name']: t for t in all_tools}
2062
+
2063
+ matching_tools = []
2064
+
2065
+ # Check if query matches a category
2066
+ if query in tool_categories:
2067
+ tool_names = tool_categories[query]
2068
+ matching_tools = [
2069
+ tools_by_name[name]
2070
+ for name in tool_names
2071
+ if name in tools_by_name
2072
+ ]
2073
+ else:
2074
+ # Search by keyword in name or description
2075
+ for tool in all_tools:
2076
+ if (
2077
+ query in tool['name'].lower()
2078
+ or query in tool['description'].lower()
2079
+ ):
2080
+ matching_tools.append(tool)
2081
+
2082
+ # Format results based on detail level
2083
+ if detail_level == 'name_only':
2084
+ results = [{'name': t['name']} for t in matching_tools]
2085
+ elif detail_level == 'full':
2086
+ results = matching_tools
2087
+ else: # summary (default)
2088
+ results = [
2089
+ {
2090
+ 'name': t['name'],
2091
+ 'description': t['description'][:200] + '...'
2092
+ if len(t['description']) > 200
2093
+ else t['description'],
2094
+ }
2095
+ for t in matching_tools
2096
+ ]
2097
+
2098
+ return {
2099
+ 'success': True,
2100
+ 'tools': results,
2101
+ 'count': len(results),
2102
+ 'categories': list(tool_categories.keys()),
2103
+ 'hint': 'Use get_tool_schema(tool_name) to get full parameter details for a specific tool',
2104
+ }
2105
+
2106
+ async def _get_tool_schema(self, args: Dict[str, Any]) -> Dict[str, Any]:
2107
+ """Get the complete schema for a specific tool.
2108
+
2109
+ Enables LLMs to load tool definitions on-demand rather than all upfront,
2110
+ reducing context window usage per Anthropic's MCP efficiency recommendations.
2111
+ """
2112
+ tool_name = args.get('tool_name')
2113
+
2114
+ if not tool_name:
2115
+ return {'error': 'tool_name is required'}
2116
+
2117
+ # Get all tools and find the requested one
2118
+ all_tools = self._get_tools_from_a2a_server()
2119
+
2120
+ for tool in all_tools:
2121
+ if tool['name'] == tool_name:
2122
+ # Add usage examples for common workflows
2123
+ examples = self._get_tool_examples(tool_name)
2124
+
2125
+ return {
2126
+ 'success': True,
2127
+ 'tool': tool,
2128
+ 'examples': examples,
2129
+ 'hint': f'Call this tool with: {tool_name}({{...params}})',
2130
+ }
2131
+
2132
+ return {'error': f"Tool '{tool_name}' not found"}
2133
+
2134
+ def _get_tool_examples(self, tool_name: str) -> List[Dict[str, Any]]:
2135
+ """Get code examples for efficient tool chaining.
2136
+
2137
+ Provides examples showing how to chain tools efficiently,
2138
+ keeping intermediate results in code rather than context.
2139
+ """
2140
+ examples = {
2141
+ 'create_task': [
2142
+ {
2143
+ 'description': 'Create a task and monitor until complete',
2144
+ 'code': """
2145
+ # Create task and poll for completion
2146
+ result = await create_task(title="Process data", description="Analyze sales data")
2147
+ task_id = result["task_id"]
2148
+
2149
+ # Poll for updates (efficient - only checks changed tasks)
2150
+ while True:
2151
+ updates = await get_task_updates(task_ids=[task_id])
2152
+ if updates["updates"][0]["status"] in ["completed", "failed"]:
2153
+ break
2154
+ await asyncio.sleep(5)
2155
+ """,
2156
+ }
2157
+ ],
2158
+ 'send_message': [
2159
+ {
2160
+ 'description': 'Send message and continue conversation',
2161
+ 'code': """
2162
+ # Start conversation
2163
+ resp = await send_message(message="Hello, analyze this data")
2164
+ conv_id = resp["conversation_id"]
2165
+
2166
+ # Continue same conversation thread
2167
+ resp2 = await send_message(message="Now summarize the results", conversation_id=conv_id)
2168
+ """,
2169
+ }
2170
+ ],
2171
+ 'discover_agents': [
2172
+ {
2173
+ 'description': 'Find and delegate to a specialized agent',
2174
+ 'code': """
2175
+ # Discover available agents
2176
+ agents = await discover_agents()
2177
+
2178
+ # Find agent with specific capability
2179
+ for agent in agents["agents"]:
2180
+ if "analysis" in agent["description"].lower():
2181
+ details = await get_agent(agent_name=agent["name"])
2182
+ # Delegate work to this agent...
2183
+ break
2184
+ """,
2185
+ }
2186
+ ],
2187
+ 'register_agent': [
2188
+ {
2189
+ 'description': 'Register as a worker agent on startup',
2190
+ 'code': """
2191
+ # Register this agent to receive tasks
2192
+ await register_agent(
2193
+ name="data-processor",
2194
+ description="Processes and analyzes data files",
2195
+ url="http://localhost:8001"
2196
+ )
2197
+
2198
+ # Now poll for pending tasks
2199
+ while True:
2200
+ tasks = await list_tasks(status="pending")
2201
+ if tasks["count"] > 0:
2202
+ # Claim and process first task...
2203
+ break
2204
+ await asyncio.sleep(5)
2205
+ """,
2206
+ }
2207
+ ],
2208
+ 'list_tasks': [
2209
+ {
2210
+ 'description': 'Process all pending tasks efficiently',
2211
+ 'code': """
2212
+ # Get pending tasks in one call
2213
+ pending = await list_tasks(status="pending")
2214
+
2215
+ # Process in code without returning to model each iteration
2216
+ for task in pending["tasks"]:
2217
+ task_id = task["task_id"]
2218
+ # Process task...
2219
+ # Update status when done
2220
+ """,
2221
+ }
2222
+ ],
2223
+ }
2224
+
2225
+ return examples.get(tool_name, [])
2226
+
2227
+ async def start(self):
2228
+ """Start the HTTP MCP server."""
2229
+ logger.info(f'Starting MCP HTTP server on {self.host}:{self.port}')
2230
+ config = uvicorn.Config(
2231
+ self.app, host=self.host, port=self.port, log_level='info'
2232
+ )
2233
+ server = uvicorn.Server(config)
2234
+ await server.serve()
2235
+
2236
+
2237
+ async def run_mcp_http_server(
2238
+ host: str = '0.0.0.0', port: int = 9000, a2a_server=None
2239
+ ):
2240
+ """Run the MCP HTTP server connected to an A2A server."""
2241
+ server = MCPHTTPServer(host=host, port=port, a2a_server=a2a_server)
2242
+ await server.start()
2243
+
2244
+
2245
+ if __name__ == '__main__':
2246
+ import argparse
2247
+
2248
+ parser = argparse.ArgumentParser(description='MCP HTTP Server')
2249
+ parser.add_argument(
2250
+ '--port', '-p', type=int, default=9000, help='Port to run on'
2251
+ )
2252
+ parser.add_argument(
2253
+ '--host', '-H', type=str, default='0.0.0.0', help='Host to bind to'
2254
+ )
2255
+ args = parser.parse_args()
2256
+ asyncio.run(run_mcp_http_server(host=args.host, port=args.port))