langgraph-api 0.4.1__py3-none-any.whl → 0.4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

@@ -0,0 +1,1128 @@
1
+ """Implement A2A (Agent2Agent) endpoint for JSON-RPC 2.0 protocol.
2
+
3
+ The Agent2Agent (A2A) Protocol is an open standard designed to facilitate
4
+ communication and interoperability between independent AI agent systems.
5
+
6
+ A2A Protocol specification:
7
+ https://a2a-protocol.org/dev/specification/
8
+
9
+ The implementation currently supports JSON-RPC 2.0 transport only.
10
+ Streaming (SSE) and push notifications are not implemented.
11
+ """
12
+
13
+ import functools
14
+ import uuid
15
+ from datetime import UTC, datetime
16
+ from typing import Any, Literal, NotRequired, cast
17
+
18
+ import orjson
19
+ from langgraph_sdk.client import LangGraphClient, get_client
20
+ from starlette.datastructures import Headers
21
+ from starlette.responses import JSONResponse, Response
22
+ from structlog import getLogger
23
+ from typing_extensions import TypedDict
24
+
25
+ from langgraph_api.metadata import USER_API_URL
26
+ from langgraph_api.route import ApiRequest, ApiRoute
27
+ from langgraph_api.utils.cache import LRUCache
28
+
29
+ logger = getLogger(__name__)
30
+
31
+ # Cache for assistant schemas (assistant_id -> schemas dict)
32
+ _assistant_schemas_cache = LRUCache[dict[str, Any]](max_size=1000, ttl=60)
33
+
34
+
35
+ # ============================================================================
36
+ # JSON-RPC 2.0 Base Types (shared with MCP)
37
+ # ============================================================================
38
+
39
+
40
+ class JsonRpcErrorObject(TypedDict):
41
+ code: int
42
+ message: str
43
+ data: NotRequired[Any]
44
+
45
+
46
+ class JsonRpcRequest(TypedDict):
47
+ jsonrpc: Literal["2.0"]
48
+ id: str | int
49
+ method: str
50
+ params: NotRequired[dict[str, Any]]
51
+
52
+
53
+ class JsonRpcResponse(TypedDict):
54
+ jsonrpc: Literal["2.0"]
55
+ id: str | int
56
+ result: NotRequired[dict[str, Any]]
57
+ error: NotRequired[JsonRpcErrorObject]
58
+
59
+
60
+ class JsonRpcNotification(TypedDict):
61
+ jsonrpc: Literal["2.0"]
62
+ method: str
63
+ params: NotRequired[dict[str, Any]]
64
+
65
+
66
+ # ============================================================================
67
+ # A2A Specific Error Codes
68
+ # ============================================================================
69
+
70
+ # Standard JSON-RPC error codes
71
+ ERROR_CODE_PARSE_ERROR = -32700
72
+ ERROR_CODE_INVALID_REQUEST = -32600
73
+ ERROR_CODE_METHOD_NOT_FOUND = -32601
74
+ ERROR_CODE_INVALID_PARAMS = -32602
75
+ ERROR_CODE_INTERNAL_ERROR = -32603
76
+
77
+ # A2A-specific error codes (in server error range -32000 to -32099)
78
+ ERROR_CODE_TASK_NOT_FOUND = -32001
79
+ ERROR_CODE_TASK_NOT_CANCELABLE = -32002
80
+ ERROR_CODE_PUSH_NOTIFICATION_NOT_SUPPORTED = -32003
81
+ ERROR_CODE_UNSUPPORTED_OPERATION = -32004
82
+ ERROR_CODE_CONTENT_TYPE_NOT_SUPPORTED = -32005
83
+ ERROR_CODE_INVALID_AGENT_RESPONSE = -32006
84
+
85
+
86
+ # ============================================================================
87
+ # Constants and Configuration
88
+ # ============================================================================
89
+
90
+ A2A_PROTOCOL_VERSION = "0.3.0"
91
+
92
+
93
+ @functools.lru_cache(maxsize=1)
94
+ def _client() -> LangGraphClient:
95
+ """Get a client for local operations."""
96
+ return get_client(url=None)
97
+
98
+
99
+ def _get_version() -> str:
100
+ """Get langgraph-api version."""
101
+ from langgraph_api import __version__
102
+
103
+ return __version__
104
+
105
+
106
+ def _generate_task_id() -> str:
107
+ """Generate a unique task ID."""
108
+ return str(uuid.uuid4())
109
+
110
+
111
+ def _generate_context_id() -> str:
112
+ """Generate a unique context ID."""
113
+ return str(uuid.uuid4())
114
+
115
+
116
+ def _generate_timestamp() -> str:
117
+ """Generate ISO 8601 timestamp."""
118
+ return datetime.now(UTC).isoformat()
119
+
120
+
121
+ async def _get_assistant(
122
+ client: LangGraphClient, assistant_id: str, headers: Headers | dict[str, Any] | None
123
+ ) -> dict[str, Any]:
124
+ """Get assistant with proper 404 error handling.
125
+
126
+ Args:
127
+ client: LangGraph client
128
+ assistant_id: The assistant ID to get
129
+ headers: Request headers
130
+
131
+ Returns:
132
+ The assistant dictionary
133
+
134
+ Raises:
135
+ ValueError: If assistant not found or other errors
136
+ """
137
+ try:
138
+ return await client.assistants.get(assistant_id, headers=headers)
139
+ except Exception as e:
140
+ if (
141
+ hasattr(e, "response")
142
+ and hasattr(e.response, "status_code")
143
+ and e.response.status_code == 404
144
+ ):
145
+ raise ValueError(f"Assistant '{assistant_id}' not found") from e
146
+ raise ValueError(f"Failed to get assistant '{assistant_id}': {e}") from e
147
+
148
+
149
+ async def _validate_supports_messages(
150
+ client: LangGraphClient,
151
+ assistant: dict[str, Any],
152
+ headers: Headers | dict[str, Any] | None,
153
+ parts: list[dict[str, Any]],
154
+ ) -> dict[str, Any]:
155
+ """Validate that assistant supports messages if text parts are present.
156
+
157
+ If the parts contain text parts, the agent must support the 'messages' field.
158
+ If the parts only contain data parts, no validation is performed.
159
+
160
+ Args:
161
+ client: LangGraph client
162
+ assistant: The assistant dictionary
163
+ headers: Request headers
164
+ parts: The original A2A message parts
165
+
166
+ Returns:
167
+ The schemas dictionary from the assistant
168
+
169
+ Raises:
170
+ ValueError: If assistant doesn't support messages when text parts are present
171
+ """
172
+ assistant_id = assistant["assistant_id"]
173
+
174
+ cached_schemas = _assistant_schemas_cache.get(assistant_id)
175
+ if cached_schemas is not None:
176
+ schemas = cached_schemas
177
+ else:
178
+ try:
179
+ schemas = await client.assistants.get_schemas(assistant_id, headers=headers)
180
+ _assistant_schemas_cache.set(assistant_id, schemas)
181
+ except Exception as e:
182
+ raise ValueError(
183
+ f"Failed to get schemas for assistant '{assistant_id}': {e}"
184
+ ) from e
185
+
186
+ # Validate messages field only if there are text parts
187
+ has_text_parts = any(part.get("kind") == "text" for part in parts)
188
+ if has_text_parts:
189
+ input_schema = schemas.get("input_schema")
190
+ if not input_schema:
191
+ raise ValueError(
192
+ f"Assistant '{assistant_id}' has no input schema defined. "
193
+ f"A2A conversational agents using text parts must have an input schema with a 'messages' field."
194
+ )
195
+
196
+ properties = input_schema.get("properties", {})
197
+ if "messages" not in properties:
198
+ graph_id = assistant["graph_id"]
199
+ raise ValueError(
200
+ f"Assistant '{assistant_id}' (graph '{graph_id}') does not support A2A conversational messages. "
201
+ f"Graph input schema must include a 'messages' field to accept text parts. "
202
+ f"Available input fields: {list(properties.keys())}"
203
+ )
204
+
205
+ return schemas
206
+
207
+
208
+ def _process_a2a_message_parts(
209
+ parts: list[dict[str, Any]], message_role: str
210
+ ) -> dict[str, Any]:
211
+ """Convert A2A message parts to LangChain messages format.
212
+
213
+ Args:
214
+ parts: List of A2A message parts
215
+ message_role: A2A message role ("user" or "agent")
216
+
217
+ Returns:
218
+ Input content with messages in LangChain format
219
+
220
+ Raises:
221
+ ValueError: If message parts are invalid
222
+ """
223
+ messages = []
224
+ additional_data = {}
225
+
226
+ for part in parts:
227
+ part_kind = part.get("kind")
228
+
229
+ if part_kind == "text":
230
+ # Text parts become messages with role based on A2A message role
231
+ if "text" not in part:
232
+ raise ValueError("TextPart must contain a 'text' field")
233
+
234
+ # Map A2A role to LangGraph role
235
+ langgraph_role = "human" if message_role == "user" else "assistant"
236
+ messages.append({"role": langgraph_role, "content": part["text"]})
237
+
238
+ elif part_kind == "data":
239
+ # Data parts become structured input parameters
240
+ part_data = part.get("data", {})
241
+ if not isinstance(part_data, dict):
242
+ raise ValueError(
243
+ "DataPart must contain a JSON object in the 'data' field"
244
+ )
245
+ additional_data.update(part_data)
246
+
247
+ else:
248
+ raise ValueError(
249
+ f"Unsupported part kind '{part_kind}'. "
250
+ f"A2A agents support 'text' and 'data' parts only."
251
+ )
252
+
253
+ if not messages and not additional_data:
254
+ raise ValueError("Message must contain at least one valid text or data part")
255
+
256
+ # Create input with messages in LangChain format
257
+ input_content = {}
258
+ if messages:
259
+ input_content["messages"] = messages
260
+ if additional_data:
261
+ input_content.update(additional_data)
262
+
263
+ return input_content
264
+
265
+
266
+ def _extract_a2a_response(result: dict[str, Any]) -> str:
267
+ """Extract the last assistant message from graph execution result.
268
+
269
+ Args:
270
+ result: Graph execution result
271
+
272
+ Returns:
273
+ Content of the last assistant message
274
+
275
+ Raises:
276
+ ValueError: If result doesn't contain messages or is invalid
277
+ """
278
+ if "__error__" in result:
279
+ # Let the caller handle errors
280
+ return str(result)
281
+
282
+ if "messages" not in result:
283
+ # Fallback to the full result if no messages schema. It is not optimal to do A2A on assistants without
284
+ # a messages key, but it is not a hard requirement.
285
+ return str(result)
286
+
287
+ messages = result["messages"]
288
+ if not isinstance(messages, list) or not messages:
289
+ return str(result)
290
+
291
+ # Find the last assistant message
292
+ for message in reversed(messages):
293
+ if (
294
+ isinstance(message, dict)
295
+ and message.get("role") == "assistant"
296
+ and "content" in message
297
+ or message.get("type") == "ai"
298
+ and "content" in message
299
+ ):
300
+ return message["content"]
301
+
302
+ # If no assistant message found, return the last message content
303
+ last_message = messages[-1]
304
+ if isinstance(last_message, dict):
305
+ return last_message.get("content", str(last_message))
306
+
307
+ return str(last_message)
308
+
309
+
310
+ def _map_runs_create_error_to_rpc(
311
+ exception: Exception, assistant_id: str, thread_id: str | None = None
312
+ ) -> dict[str, Any]:
313
+ """Map runs.create() exceptions to A2A JSON-RPC error responses.
314
+
315
+ Args:
316
+ exception: Exception from runs.create()
317
+ assistant_id: The assistant ID that was used
318
+ thread_id: The thread ID that was used (if any)
319
+
320
+ Returns:
321
+ A2A error response dictionary
322
+ """
323
+ if hasattr(exception, "response") and hasattr(exception.response, "status_code"):
324
+ status_code = exception.response.status_code
325
+ error_text = str(exception)
326
+
327
+ if status_code == 404:
328
+ # Check if it's a thread or assistant not found
329
+ if "thread" in error_text.lower() or "Thread" in error_text:
330
+ return {
331
+ "error": {
332
+ "code": ERROR_CODE_INVALID_PARAMS,
333
+ "message": f"Thread '{thread_id}' not found. Please create the thread first before sending messages to it.",
334
+ "data": {
335
+ "thread_id": thread_id,
336
+ "error_type": "thread_not_found",
337
+ },
338
+ }
339
+ }
340
+ else:
341
+ return {
342
+ "error": {
343
+ "code": ERROR_CODE_INVALID_PARAMS,
344
+ "message": f"Assistant '{assistant_id}' not found",
345
+ }
346
+ }
347
+ elif status_code == 400:
348
+ return {
349
+ "error": {
350
+ "code": ERROR_CODE_INVALID_PARAMS,
351
+ "message": f"Invalid request: {error_text}",
352
+ }
353
+ }
354
+ elif status_code == 403:
355
+ return {
356
+ "error": {
357
+ "code": ERROR_CODE_INVALID_PARAMS,
358
+ "message": "Access denied to assistant or thread",
359
+ }
360
+ }
361
+ else:
362
+ return {
363
+ "error": {
364
+ "code": ERROR_CODE_INVALID_PARAMS,
365
+ "message": f"Failed to create run: {error_text}",
366
+ }
367
+ }
368
+
369
+ return {
370
+ "error": {
371
+ "code": ERROR_CODE_INTERNAL_ERROR,
372
+ "message": f"Internal server error: {str(exception)}",
373
+ }
374
+ }
375
+
376
+
377
+ def _map_runs_get_error_to_rpc(
378
+ exception: Exception, task_id: str, thread_id: str
379
+ ) -> dict[str, Any]:
380
+ """Map runs.get() exceptions to A2A JSON-RPC error responses.
381
+
382
+ Args:
383
+ exception: Exception from runs.get()
384
+ task_id: The task/run ID that was requested
385
+ thread_id: The thread ID that was requested
386
+
387
+ Returns:
388
+ A2A error response dictionary
389
+ """
390
+ if hasattr(exception, "response") and hasattr(exception.response, "status_code"):
391
+ status_code = exception.response.status_code
392
+ error_text = str(exception)
393
+
394
+ status_code_handlers = {
395
+ 404: {
396
+ "error": {
397
+ "code": ERROR_CODE_TASK_NOT_FOUND,
398
+ "message": f"Task '{task_id}' not found in thread '{thread_id}'",
399
+ }
400
+ },
401
+ 400: {
402
+ "error": {
403
+ "code": ERROR_CODE_INVALID_PARAMS,
404
+ "message": f"Invalid request: {error_text}",
405
+ }
406
+ },
407
+ 403: {
408
+ "error": {
409
+ "code": ERROR_CODE_INVALID_PARAMS,
410
+ "message": "Access denied to task",
411
+ }
412
+ },
413
+ }
414
+
415
+ return status_code_handlers.get(
416
+ status_code,
417
+ {
418
+ "error": {
419
+ "code": ERROR_CODE_INVALID_PARAMS,
420
+ "message": f"Failed to get task: {error_text}",
421
+ }
422
+ },
423
+ )
424
+
425
+ return {
426
+ "error": {
427
+ "code": ERROR_CODE_INTERNAL_ERROR,
428
+ "message": f"Internal server error: {str(exception)}",
429
+ }
430
+ }
431
+
432
+
433
+ def _create_task_response(
434
+ task_id: str,
435
+ context_id: str,
436
+ message: dict[str, Any],
437
+ result: dict[str, Any],
438
+ assistant_id: str,
439
+ ) -> dict[str, Any]:
440
+ """Create A2A Task response structure for both success and failure cases.
441
+
442
+ Args:
443
+ task_id: The task/run ID
444
+ context_id: The context/thread ID
445
+ message: Original A2A message from request
446
+ result: LangGraph execution result
447
+ assistant_id: The assistant ID used
448
+
449
+ Returns:
450
+ A2A Task response dictionary
451
+ """
452
+ base_task = {
453
+ "id": task_id,
454
+ "contextId": context_id,
455
+ "history": [
456
+ {**message, "taskId": task_id, "contextId": context_id, "kind": "message"}
457
+ ],
458
+ "kind": "task",
459
+ }
460
+
461
+ if "__error__" in result:
462
+ base_task["status"] = {
463
+ "state": "failed",
464
+ "message": {
465
+ "role": "agent",
466
+ "parts": [
467
+ {
468
+ "kind": "text",
469
+ "text": f"Error executing assistant: {result['__error__']['error']}",
470
+ }
471
+ ],
472
+ "messageId": _generate_task_id(),
473
+ "taskId": task_id,
474
+ "contextId": context_id,
475
+ "kind": "message",
476
+ },
477
+ }
478
+ else:
479
+ artifact_id = _generate_task_id()
480
+ artifacts = [
481
+ {
482
+ "artifactId": artifact_id,
483
+ "name": "Assistant Response",
484
+ "description": f"Response from assistant {assistant_id}",
485
+ "parts": [
486
+ {
487
+ "kind": "text",
488
+ "text": _extract_a2a_response(result),
489
+ }
490
+ ],
491
+ }
492
+ ]
493
+
494
+ base_task["status"] = {
495
+ "state": "completed",
496
+ "timestamp": _generate_timestamp(),
497
+ }
498
+ base_task["artifacts"] = artifacts
499
+
500
+ return {"result": base_task}
501
+
502
+
503
+ # ============================================================================
504
+ # Main A2A Endpoint Handler
505
+ # ============================================================================
506
+
507
+
508
+ def handle_get_request() -> Response:
509
+ """Handle HTTP GET requests (streaming not currently supported).
510
+
511
+ Returns:
512
+ 405 Method Not Allowed
513
+ """
514
+ return Response(status_code=405)
515
+
516
+
517
+ def handle_delete_request() -> Response:
518
+ """Handle HTTP DELETE requests (session termination not currently supported).
519
+
520
+ Returns:
521
+ 404 Not Found
522
+ """
523
+ return Response(status_code=405)
524
+
525
+
526
+ async def handle_post_request(request: ApiRequest, assistant_id: str) -> Response:
527
+ """Handle HTTP POST requests containing JSON-RPC messages.
528
+
529
+ Args:
530
+ request: The incoming HTTP request
531
+ assistant_id: The assistant ID from the URL path
532
+
533
+ Returns:
534
+ JSON-RPC response
535
+ """
536
+ body = await request.body()
537
+
538
+ try:
539
+ message = orjson.loads(body)
540
+ except orjson.JSONDecodeError:
541
+ return create_error_response("Invalid JSON payload", 400)
542
+
543
+ if not is_valid_accept_header(request):
544
+ return create_error_response("Accept header must include application/json", 400)
545
+
546
+ if not isinstance(message, dict):
547
+ return create_error_response("Invalid message format", 400)
548
+
549
+ if message.get("jsonrpc") != "2.0":
550
+ return create_error_response(
551
+ "Invalid JSON-RPC message. Missing or invalid jsonrpc version", 400
552
+ )
553
+
554
+ # Route based on message type
555
+ id_ = message.get("id")
556
+ method = message.get("method")
557
+
558
+ if id_ is not None and method:
559
+ # JSON-RPC request
560
+ return await handle_jsonrpc_request(
561
+ request, cast(JsonRpcRequest, message), assistant_id
562
+ )
563
+ elif id_ is not None:
564
+ # JSON-RPC response (not expected in A2A server context)
565
+ return handle_jsonrpc_response()
566
+ elif method:
567
+ # JSON-RPC notification
568
+ return handle_jsonrpc_notification(cast(JsonRpcNotification, message))
569
+ else:
570
+ return create_error_response(
571
+ "Invalid message format. Message must be a JSON-RPC request, "
572
+ "response, or notification",
573
+ 400,
574
+ )
575
+
576
+
577
+ def is_valid_accept_header(request: ApiRequest) -> bool:
578
+ """Check if Accept header contains supported content types.
579
+
580
+ Args:
581
+ request: The incoming request
582
+
583
+ Returns:
584
+ True if header contains application/json
585
+ """
586
+ accept_header = request.headers.get("Accept", "")
587
+ return "application/json" in accept_header
588
+
589
+
590
+ def create_error_response(message: str, status_code: int) -> Response:
591
+ """Create a JSON error response.
592
+
593
+ Args:
594
+ message: Error message
595
+ status_code: HTTP status code
596
+
597
+ Returns:
598
+ JSON error response
599
+ """
600
+ return Response(
601
+ content=orjson.dumps({"error": message}),
602
+ status_code=status_code,
603
+ media_type="application/json",
604
+ )
605
+
606
+
607
+ # ============================================================================
608
+ # JSON-RPC Message Handlers
609
+ # ============================================================================
610
+
611
+
612
+ async def handle_jsonrpc_request(
613
+ request: ApiRequest, message: JsonRpcRequest, assistant_id: str
614
+ ) -> Response:
615
+ """Handle JSON-RPC requests with A2A methods.
616
+
617
+ Args:
618
+ request: The HTTP request
619
+ message: Parsed JSON-RPC request
620
+ assistant_id: The assistant ID from the URL path
621
+
622
+ Returns:
623
+ JSON-RPC response
624
+ """
625
+ method = message["method"]
626
+ params = message.get("params", {})
627
+
628
+ # Route to appropriate A2A method handler
629
+ if method == "message/send":
630
+ result_or_error = await handle_message_send(request, params, assistant_id)
631
+ elif method == "tasks/get":
632
+ result_or_error = await handle_tasks_get(request, params)
633
+ elif method == "tasks/cancel":
634
+ result_or_error = await handle_tasks_cancel(request, params)
635
+ else:
636
+ result_or_error = {
637
+ "error": {
638
+ "code": ERROR_CODE_METHOD_NOT_FOUND,
639
+ "message": f"Method not found: {method}",
640
+ }
641
+ }
642
+
643
+ response_keys = set(result_or_error.keys())
644
+ if not (response_keys == {"result"} or response_keys == {"error"}):
645
+ raise AssertionError(
646
+ "Internal server error. Invalid response format in A2A implementation"
647
+ )
648
+
649
+ return JSONResponse(
650
+ {
651
+ "jsonrpc": "2.0",
652
+ "id": message["id"],
653
+ **result_or_error,
654
+ }
655
+ )
656
+
657
+
658
+ def handle_jsonrpc_response() -> Response:
659
+ """Handle JSON-RPC responses (not expected in server context).
660
+
661
+ Args:
662
+ message: Parsed JSON-RPC response
663
+
664
+ Returns:
665
+ 202 Accepted acknowledgement
666
+ """
667
+ return Response(status_code=202)
668
+
669
+
670
+ def handle_jsonrpc_notification(message: JsonRpcNotification) -> Response:
671
+ """Handle JSON-RPC notifications.
672
+
673
+ Args:
674
+ message: Parsed JSON-RPC notification
675
+
676
+ Returns:
677
+ 202 Accepted acknowledgement
678
+ """
679
+ return Response(status_code=202)
680
+
681
+
682
+ # ============================================================================
683
+ # A2A Method Implementations
684
+ # ============================================================================
685
+
686
+
687
+ async def handle_message_send(
688
+ request: ApiRequest, params: dict[str, Any], assistant_id: str
689
+ ) -> dict[str, Any]:
690
+ """Handle message/send requests to create or continue tasks.
691
+
692
+ This method:
693
+ 1. Accepts A2A Messages containing text/file/data parts
694
+ 2. Maps to LangGraph assistant execution
695
+ 3. Returns Task objects with status and results
696
+
697
+ Args:
698
+ request: HTTP request for auth/headers
699
+ params: A2A MessageSendParams
700
+ assistant_id: The target assistant ID from the URL
701
+
702
+ Returns:
703
+ {"result": Task} or {"error": JsonRpcErrorObject}
704
+ """
705
+ client = _client()
706
+
707
+ try:
708
+ message = params.get("message")
709
+ if not message:
710
+ return {
711
+ "error": {
712
+ "code": ERROR_CODE_INVALID_PARAMS,
713
+ "message": "Missing 'message' in params",
714
+ }
715
+ }
716
+
717
+ parts = message.get("parts", [])
718
+ if not parts:
719
+ return {
720
+ "error": {
721
+ "code": ERROR_CODE_INVALID_PARAMS,
722
+ "message": "Message must contain at least one part",
723
+ }
724
+ }
725
+
726
+ try:
727
+ assistant = await _get_assistant(client, assistant_id, request.headers)
728
+ await _validate_supports_messages(client, assistant, request.headers, parts)
729
+ except ValueError as e:
730
+ return {
731
+ "error": {
732
+ "code": ERROR_CODE_INVALID_PARAMS,
733
+ "message": str(e),
734
+ }
735
+ }
736
+
737
+ # Process A2A message parts into LangChain messages format
738
+ try:
739
+ message_role = message.get(
740
+ "role", "user"
741
+ ) # Default to "user" if role not specified
742
+ input_content = _process_a2a_message_parts(parts, message_role)
743
+ except ValueError as e:
744
+ return {
745
+ "error": {
746
+ "code": ERROR_CODE_CONTENT_TYPE_NOT_SUPPORTED,
747
+ "message": str(e),
748
+ }
749
+ }
750
+
751
+ context_id = message.get("contextId")
752
+ thread_id = context_id if context_id else None
753
+
754
+ try:
755
+ # Creating + joining separately so we can get the run id
756
+ run = await client.runs.create(
757
+ thread_id=thread_id,
758
+ assistant_id=assistant_id,
759
+ input=input_content,
760
+ headers=request.headers,
761
+ )
762
+ except Exception as e:
763
+ error_response = _map_runs_create_error_to_rpc(e, assistant_id, thread_id)
764
+ if error_response.get("error", {}).get("code") == ERROR_CODE_INTERNAL_ERROR:
765
+ raise
766
+ return error_response
767
+
768
+ result = await client.runs.join(
769
+ thread_id=run["thread_id"],
770
+ run_id=run["run_id"],
771
+ headers=request.headers,
772
+ )
773
+
774
+ task_id = run["run_id"]
775
+ context_id = thread_id or _generate_context_id()
776
+
777
+ return _create_task_response(
778
+ task_id=task_id,
779
+ context_id=context_id,
780
+ message=message,
781
+ result=result,
782
+ assistant_id=assistant_id,
783
+ )
784
+
785
+ except Exception as e:
786
+ logger.exception(f"Error in message/send for assistant {assistant_id}")
787
+ return {
788
+ "error": {
789
+ "code": ERROR_CODE_INTERNAL_ERROR,
790
+ "message": f"Internal server error: {str(e)}",
791
+ }
792
+ }
793
+
794
+
795
+ async def handle_tasks_get(
796
+ request: ApiRequest, params: dict[str, Any]
797
+ ) -> dict[str, Any]:
798
+ """Handle tasks/get requests to retrieve task status.
799
+
800
+ This method:
801
+ 1. Accepts task ID from params
802
+ 2. Maps to LangGraph run/thread status
803
+ 3. Returns current Task state and results
804
+
805
+ Args:
806
+ request: HTTP request for auth/headers
807
+ params: A2A TaskQueryParams containing task ID
808
+
809
+ Returns:
810
+ {"result": Task} or {"error": JsonRpcErrorObject}
811
+ """
812
+ client = _client()
813
+
814
+ try:
815
+ task_id = params.get("id")
816
+ context_id = params.get("contextId")
817
+
818
+ if not task_id:
819
+ return {
820
+ "error": {
821
+ "code": ERROR_CODE_INVALID_PARAMS,
822
+ "message": "Missing required parameter: id (task_id)",
823
+ }
824
+ }
825
+
826
+ if not context_id:
827
+ return {
828
+ "error": {
829
+ "code": ERROR_CODE_INVALID_PARAMS,
830
+ "message": "Missing required parameter: contextId (thread_id)",
831
+ }
832
+ }
833
+
834
+ try:
835
+ run_info = await client.runs.get(
836
+ thread_id=context_id,
837
+ run_id=task_id,
838
+ headers=request.headers,
839
+ )
840
+ except Exception as e:
841
+ error_response = _map_runs_get_error_to_rpc(e, task_id, context_id)
842
+ if error_response.get("error", {}).get("code") == ERROR_CODE_INTERNAL_ERROR:
843
+ # For unmapped errors, re-raise to be caught by outer exception handler
844
+ raise
845
+ return error_response
846
+
847
+ assistant_id = run_info.get("assistant_id")
848
+ if assistant_id:
849
+ try:
850
+ # Verify that the assistant exists
851
+ await _get_assistant(client, assistant_id, request.headers)
852
+ except ValueError as e:
853
+ return {
854
+ "error": {
855
+ "code": ERROR_CODE_INVALID_PARAMS,
856
+ "message": str(e),
857
+ }
858
+ }
859
+
860
+ lg_status = run_info.get("status", "unknown")
861
+
862
+ if lg_status == "pending":
863
+ a2a_state = "submitted"
864
+ elif lg_status == "running":
865
+ a2a_state = "working"
866
+ elif lg_status == "success":
867
+ a2a_state = "completed"
868
+ elif lg_status in ["error", "timeout", "interrupted"]:
869
+ a2a_state = "failed"
870
+ else:
871
+ a2a_state = "submitted"
872
+
873
+ # Build the A2A Task response
874
+ task_response = {
875
+ "id": task_id,
876
+ "contextId": context_id,
877
+ "status": {
878
+ "state": a2a_state,
879
+ },
880
+ }
881
+
882
+ # Add result message if completed
883
+ if a2a_state == "completed":
884
+ task_response["status"]["message"] = {
885
+ "role": "agent",
886
+ "parts": [{"kind": "text", "text": "Task completed successfully"}],
887
+ "messageId": _generate_task_id(),
888
+ "taskId": task_id,
889
+ }
890
+ elif a2a_state == "failed":
891
+ task_response["status"]["message"] = {
892
+ "role": "agent",
893
+ "parts": [
894
+ {"kind": "text", "text": f"Task failed with status: {lg_status}"}
895
+ ],
896
+ "messageId": _generate_task_id(),
897
+ "taskId": task_id,
898
+ }
899
+
900
+ return {"result": task_response}
901
+
902
+ except Exception as e:
903
+ await logger.aerror(
904
+ f"Error in tasks/get for task {params.get('id')}: {str(e)}", exc_info=True
905
+ )
906
+ return {
907
+ "error": {
908
+ "code": ERROR_CODE_INTERNAL_ERROR,
909
+ "message": f"Internal server error: {str(e)}",
910
+ }
911
+ }
912
+
913
+
914
+ async def handle_tasks_cancel(
915
+ request: ApiRequest, params: dict[str, Any]
916
+ ) -> dict[str, Any]:
917
+ """Handle tasks/cancel requests to cancel running tasks.
918
+
919
+ This method:
920
+ 1. Accepts task ID from params
921
+ 2. Maps to LangGraph run cancellation
922
+ 3. Returns updated Task with canceled state
923
+
924
+ Args:
925
+ request: HTTP request for auth/headers
926
+ params: A2A TaskIdParams containing task ID
927
+
928
+ Returns:
929
+ {"result": Task} or {"error": JsonRpcErrorObject}
930
+ """
931
+ # TODO: Implement tasks/cancel
932
+ # - Extract task_id from params
933
+ # - Map task_id to run_id
934
+ # - Cancel run via client if possible
935
+ # - Return updated Task with canceled status
936
+
937
+ return {
938
+ "error": {
939
+ "code": ERROR_CODE_UNSUPPORTED_OPERATION,
940
+ "message": "Task cancellation is not currently supported",
941
+ }
942
+ }
943
+
944
+
945
+ # ============================================================================
946
+ # Agent Card Generation
947
+ # ============================================================================
948
+
949
+
950
+ async def generate_agent_card(request: ApiRequest, assistant_id: str) -> dict[str, Any]:
951
+ """Generate A2A Agent Card for a specific assistant.
952
+
953
+ Each LangGraph assistant becomes its own A2A agent with a dedicated
954
+ agent card describing its individual capabilities and skills.
955
+
956
+ Args:
957
+ request: HTTP request for auth/headers
958
+ assistant_id: The specific assistant ID to generate card for
959
+
960
+ Returns:
961
+ A2A AgentCard dictionary for the specific assistant
962
+ """
963
+ client = _client()
964
+
965
+ assistant = await _get_assistant(client, assistant_id, request.headers)
966
+ schemas = await client.assistants.get_schemas(assistant_id, headers=request.headers)
967
+
968
+ # Extract schema information for metadata
969
+ input_schema = schemas.get("input_schema", {})
970
+ properties = input_schema.get("properties", {})
971
+ required = input_schema.get("required", [])
972
+
973
+ assistant_name = assistant["name"]
974
+ assistant_description = assistant.get("description", f"{assistant_name} assistant")
975
+
976
+ # For now, each assistant has one main skill - itself
977
+ skills = [
978
+ {
979
+ "id": f"{assistant_id}-main",
980
+ "name": f"{assistant_name} Capabilities",
981
+ "description": assistant_description,
982
+ "tags": ["assistant", "langgraph"],
983
+ "examples": [],
984
+ "inputModes": ["application/json", "text/plain"],
985
+ "outputModes": ["application/json", "text/plain"],
986
+ "metadata": {
987
+ "inputSchema": {
988
+ "required": required,
989
+ "properties": sorted(properties.keys()),
990
+ "supportsA2A": "messages" in properties,
991
+ }
992
+ },
993
+ }
994
+ ]
995
+
996
+ if USER_API_URL:
997
+ base_url = USER_API_URL.rstrip("/")
998
+ else:
999
+ # Fallback to constructing from request
1000
+ scheme = request.url.scheme
1001
+ host = request.url.hostname or "localhost"
1002
+ port = request.url.port
1003
+ if port and (
1004
+ (scheme == "http" and port != 80) or (scheme == "https" and port != 443)
1005
+ ):
1006
+ base_url = f"{scheme}://{host}:{port}"
1007
+ else:
1008
+ base_url = f"{scheme}://{host}"
1009
+
1010
+ return {
1011
+ "protocolVersion": A2A_PROTOCOL_VERSION,
1012
+ "name": assistant_name,
1013
+ "description": assistant_description,
1014
+ "url": f"{base_url}/a2a/{assistant_id}",
1015
+ "preferredTransport": "JSONRPC",
1016
+ "capabilities": {
1017
+ "streaming": False, # Not implemented yet
1018
+ "pushNotifications": False, # Not implemented yet
1019
+ "stateTransitionHistory": False,
1020
+ },
1021
+ "defaultInputModes": ["application/json", "text/plain"],
1022
+ "defaultOutputModes": ["application/json", "text/plain"],
1023
+ "skills": skills,
1024
+ "version": _get_version(),
1025
+ }
1026
+
1027
+
1028
+ async def handle_agent_card_endpoint(request: ApiRequest) -> Response:
1029
+ """Serve Agent Card for a specific assistant.
1030
+
1031
+ Expected URL: /.well-known/agent-card.json?assistant_id=uuid
1032
+
1033
+ Args:
1034
+ request: HTTP request
1035
+
1036
+ Returns:
1037
+ JSON response with Agent Card for the specific assistant
1038
+ """
1039
+ try:
1040
+ # Get assistant_id from query parameters
1041
+ assistant_id = request.query_params.get("assistant_id")
1042
+
1043
+ if not assistant_id:
1044
+ error_response = {
1045
+ "error": {
1046
+ "code": ERROR_CODE_INVALID_PARAMS,
1047
+ "message": "Missing required query parameter: assistant_id",
1048
+ }
1049
+ }
1050
+ return Response(
1051
+ content=orjson.dumps(error_response),
1052
+ status_code=400,
1053
+ media_type="application/json",
1054
+ )
1055
+
1056
+ agent_card = await generate_agent_card(request, assistant_id)
1057
+ return JSONResponse(agent_card)
1058
+
1059
+ except ValueError as e:
1060
+ # A2A validation error or assistant not found
1061
+ error_response = {
1062
+ "error": {
1063
+ "code": ERROR_CODE_INVALID_PARAMS,
1064
+ "message": str(e),
1065
+ }
1066
+ }
1067
+ return Response(
1068
+ content=orjson.dumps(error_response),
1069
+ status_code=400,
1070
+ media_type="application/json",
1071
+ )
1072
+ except Exception as e:
1073
+ logger.exception("Failed to generate agent card")
1074
+ error_response = {
1075
+ "error": {
1076
+ "code": ERROR_CODE_INTERNAL_ERROR,
1077
+ "message": f"Internal server error: {str(e)}",
1078
+ }
1079
+ }
1080
+ return Response(
1081
+ content=orjson.dumps(error_response),
1082
+ status_code=500,
1083
+ media_type="application/json",
1084
+ )
1085
+
1086
+
1087
+ # ============================================================================
1088
+ # Route Definitions
1089
+ # ============================================================================
1090
+
1091
+
1092
+ async def handle_a2a_assistant_endpoint(request: ApiRequest) -> Response:
1093
+ """A2A endpoint handler for specific assistant.
1094
+
1095
+ Expected URL: /a2a/{assistant_id}
1096
+
1097
+ Args:
1098
+ request: The incoming HTTP request
1099
+
1100
+ Returns:
1101
+ JSON-RPC response or appropriate HTTP error response
1102
+ """
1103
+ # Extract assistant_id from URL path params
1104
+ assistant_id = request.path_params.get("assistant_id")
1105
+ if not assistant_id:
1106
+ return create_error_response("Missing assistant ID in URL", 400)
1107
+
1108
+ if request.method == "POST":
1109
+ return await handle_post_request(request, assistant_id)
1110
+ elif request.method == "GET":
1111
+ return handle_get_request()
1112
+ elif request.method == "DELETE":
1113
+ return handle_delete_request()
1114
+ else:
1115
+ return Response(status_code=405) # Method Not Allowed
1116
+
1117
+
1118
+ a2a_routes = [
1119
+ # Per-assistant A2A endpoints: /a2a/{assistant_id}
1120
+ ApiRoute(
1121
+ "/a2a/{assistant_id}",
1122
+ handle_a2a_assistant_endpoint,
1123
+ methods=["GET", "POST", "DELETE"],
1124
+ ),
1125
+ ApiRoute(
1126
+ "/.well-known/agent-card.json", handle_agent_card_endpoint, methods=["GET"]
1127
+ ),
1128
+ ]