langgraph-api 0.4.23__tar.gz → 0.4.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

Files changed (123) hide show
  1. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/PKG-INFO +1 -1
  2. langgraph_api-0.4.25/healthcheck.py +48 -0
  3. langgraph_api-0.4.25/langgraph_api/__init__.py +1 -0
  4. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/a2a.py +130 -21
  5. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/queue_entrypoint.py +1 -1
  6. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/stream.py +11 -6
  7. langgraph_api-0.4.23/healthcheck.py +0 -19
  8. langgraph_api-0.4.23/langgraph_api/__init__.py +0 -1
  9. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/.gitignore +0 -0
  10. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/LICENSE +0 -0
  11. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/Makefile +0 -0
  12. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/README.md +0 -0
  13. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/.gitignore +0 -0
  14. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/Makefile +0 -0
  15. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/README.md +0 -0
  16. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/burst.js +0 -0
  17. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/clean.js +0 -0
  18. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/graphs.js +0 -0
  19. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/package.json +0 -0
  20. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/ramp.js +0 -0
  21. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/update-revision.js +0 -0
  22. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/benchmark/weather.js +0 -0
  23. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/constraints.txt +0 -0
  24. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/forbidden.txt +0 -0
  25. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/__init__.py +0 -0
  26. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/assistants.py +0 -0
  27. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/mcp.py +0 -0
  28. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/meta.py +0 -0
  29. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/openapi.py +0 -0
  30. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/runs.py +0 -0
  31. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/store.py +0 -0
  32. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/threads.py +0 -0
  33. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/api/ui.py +0 -0
  34. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/asgi_transport.py +0 -0
  35. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/asyncio.py +0 -0
  36. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/auth/__init__.py +0 -0
  37. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/auth/custom.py +0 -0
  38. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/auth/langsmith/__init__.py +0 -0
  39. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/auth/langsmith/backend.py +0 -0
  40. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/auth/langsmith/client.py +0 -0
  41. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/auth/middleware.py +0 -0
  42. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/auth/noop.py +0 -0
  43. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/auth/studio_user.py +0 -0
  44. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/cli.py +0 -0
  45. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/command.py +0 -0
  46. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/config.py +0 -0
  47. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/cron_scheduler.py +0 -0
  48. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/errors.py +0 -0
  49. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/executor_entrypoint.py +0 -0
  50. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/feature_flags.py +0 -0
  51. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/graph.py +0 -0
  52. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/http.py +0 -0
  53. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/http_metrics.py +0 -0
  54. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/.gitignore +0 -0
  55. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/.prettierrc +0 -0
  56. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/__init__.py +0 -0
  57. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/base.py +0 -0
  58. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/build.mts +0 -0
  59. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/client.http.mts +0 -0
  60. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/client.mts +0 -0
  61. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/errors.py +0 -0
  62. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/global.d.ts +0 -0
  63. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/package.json +0 -0
  64. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/remote.py +0 -0
  65. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/schema.py +0 -0
  66. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/src/graph.mts +0 -0
  67. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/src/load.hooks.mjs +0 -0
  68. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/src/preload.mjs +0 -0
  69. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/src/utils/files.mts +0 -0
  70. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/src/utils/importMap.mts +0 -0
  71. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/src/utils/pythonSchemas.mts +0 -0
  72. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/src/utils/serde.mts +0 -0
  73. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/sse.py +0 -0
  74. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/traceblock.mts +0 -0
  75. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/tsconfig.json +0 -0
  76. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/ui.py +0 -0
  77. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/js/yarn.lock +0 -0
  78. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/logging.py +0 -0
  79. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/metadata.py +0 -0
  80. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/middleware/__init__.py +0 -0
  81. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/middleware/http_logger.py +0 -0
  82. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/middleware/private_network.py +0 -0
  83. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/middleware/request_id.py +0 -0
  84. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/models/__init__.py +0 -0
  85. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/models/run.py +0 -0
  86. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/patch.py +0 -0
  87. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/route.py +0 -0
  88. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/schema.py +0 -0
  89. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/serde.py +0 -0
  90. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/server.py +0 -0
  91. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/sse.py +0 -0
  92. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/state.py +0 -0
  93. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/store.py +0 -0
  94. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/thread_ttl.py +0 -0
  95. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/traceblock.py +0 -0
  96. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/tunneling/cloudflare.py +0 -0
  97. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/utils/__init__.py +0 -0
  98. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/utils/cache.py +0 -0
  99. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/utils/config.py +0 -0
  100. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/utils/future.py +0 -0
  101. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/utils/headers.py +0 -0
  102. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/utils/retriable_client.py +0 -0
  103. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/utils/stream_codec.py +0 -0
  104. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/utils/uuids.py +0 -0
  105. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/validation.py +0 -0
  106. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/webhook.py +0 -0
  107. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_api/worker.py +0 -0
  108. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_license/__init__.py +0 -0
  109. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_license/validation.py +0 -0
  110. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_runtime/__init__.py +0 -0
  111. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_runtime/checkpoint.py +0 -0
  112. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_runtime/database.py +0 -0
  113. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_runtime/lifespan.py +0 -0
  114. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_runtime/metrics.py +0 -0
  115. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_runtime/ops.py +0 -0
  116. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_runtime/queue.py +0 -0
  117. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_runtime/retry.py +0 -0
  118. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/langgraph_runtime/store.py +0 -0
  119. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/logging.json +0 -0
  120. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/openapi.json +0 -0
  121. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/pyproject.toml +0 -0
  122. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/scripts/create_license.py +0 -0
  123. {langgraph_api-0.4.23 → langgraph_api-0.4.25}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-api
3
- Version: 0.4.23
3
+ Version: 0.4.25
4
4
  Author-email: Nuno Campos <nuno@langchain.dev>, Will Fu-Hinthorn <will@langchain.dev>
5
5
  License: Elastic-2.0
6
6
  License-File: LICENSE
@@ -0,0 +1,48 @@
1
+ import json
2
+ import os
3
+ import urllib.request
4
+ from ipaddress import IPv6Address, ip_address
5
+
6
+
7
+ def get_healthcheck_host() -> str:
8
+ server_host = os.environ.get("LANGGRAPH_SERVER_HOST", "0.0.0.0")
9
+ if server_host in (
10
+ "0.0.0.0", # IPv4 wildcard
11
+ "", # IPv4/IPv6 dual-stack
12
+ ):
13
+ return "localhost"
14
+
15
+ try:
16
+ server_host_ip = ip_address(server_host)
17
+ except ValueError:
18
+ return server_host
19
+
20
+ return (
21
+ f"[{server_host_ip.compressed}]"
22
+ if isinstance(server_host_ip, IPv6Address)
23
+ else server_host_ip.compressed
24
+ )
25
+
26
+
27
+ def healthcheck():
28
+ host = get_healthcheck_host()
29
+
30
+ prefix = ""
31
+ mount_prefix = None
32
+ # Override prefix if it's set in the http config
33
+ if (http := os.environ.get("LANGGRAPH_HTTP")) and (
34
+ mount_prefix := json.loads(http).get("mount_prefix")
35
+ ):
36
+ prefix = mount_prefix
37
+ # Override that
38
+ if os.environ.get("MOUNT_PREFIX"):
39
+ prefix = os.environ["MOUNT_PREFIX"]
40
+
41
+ with urllib.request.urlopen(
42
+ f"http://{host}:{os.environ['PORT']}{prefix}/ok"
43
+ ) as response:
44
+ assert response.status == 200
45
+
46
+
47
+ if __name__ == "__main__":
48
+ healthcheck()
@@ -0,0 +1 @@
1
+ __version__ = "0.4.25"
@@ -33,6 +33,9 @@ logger = structlog.stdlib.get_logger(__name__)
33
33
  # Cache for assistant schemas (assistant_id -> schemas dict)
34
34
  _assistant_schemas_cache = LRUCache[dict[str, Any]](max_size=1000, ttl=60)
35
35
 
36
+ MAX_HISTORY_LENGTH_REQUESTED = 10
37
+ LANGGRAPH_HISTORY_QUERY_LIMIT = 500
38
+
36
39
 
37
40
  # ============================================================================
38
41
  # JSON-RPC 2.0 Base Types (shared with MCP)
@@ -99,12 +102,11 @@ def _client() -> LangGraphClient:
99
102
 
100
103
 
101
104
  async def _get_assistant(
102
- client: LangGraphClient, assistant_id: str, headers: Headers | dict[str, Any] | None
105
+ assistant_id: str, headers: Headers | dict[str, Any] | None
103
106
  ) -> dict[str, Any]:
104
107
  """Get assistant with proper 404 error handling.
105
108
 
106
109
  Args:
107
- client: LangGraph client
108
110
  assistant_id: The assistant ID to get
109
111
  headers: Request headers
110
112
 
@@ -115,7 +117,7 @@ async def _get_assistant(
115
117
  ValueError: If assistant not found or other errors
116
118
  """
117
119
  try:
118
- return await client.assistants.get(assistant_id, headers=headers)
120
+ return await get_client().assistants.get(assistant_id, headers=headers)
119
121
  except Exception as e:
120
122
  if (
121
123
  hasattr(e, "response")
@@ -127,7 +129,6 @@ async def _get_assistant(
127
129
 
128
130
 
129
131
  async def _validate_supports_messages(
130
- client: LangGraphClient,
131
132
  assistant: dict[str, Any],
132
133
  headers: Headers | dict[str, Any] | None,
133
134
  parts: list[dict[str, Any]],
@@ -138,7 +139,6 @@ async def _validate_supports_messages(
138
139
  If the parts only contain data parts, no validation is performed.
139
140
 
140
141
  Args:
141
- client: LangGraph client
142
142
  assistant: The assistant dictionary
143
143
  headers: Request headers
144
144
  parts: The original A2A message parts
@@ -156,7 +156,9 @@ async def _validate_supports_messages(
156
156
  schemas = cached_schemas
157
157
  else:
158
158
  try:
159
- schemas = await client.assistants.get_schemas(assistant_id, headers=headers)
159
+ schemas = await get_client().assistants.get_schemas(
160
+ assistant_id, headers=headers
161
+ )
160
162
  _assistant_schemas_cache.set(assistant_id, schemas)
161
163
  except Exception as e:
162
164
  raise ValueError(
@@ -505,10 +507,50 @@ def _map_runs_get_error_to_rpc(
505
507
  }
506
508
 
507
509
 
508
- def _create_task_response(
510
+ def _convert_messages_to_a2a_format(
511
+ messages: list[dict[str, Any]],
512
+ task_id: str,
513
+ context_id: str,
514
+ ) -> list[dict[str, Any]]:
515
+ """Convert LangChain messages to A2A message format.
516
+
517
+ Args:
518
+ messages: List of LangChain messages
519
+ task_id: The task ID to assign to all messages
520
+ context_id: The context ID to assign to all messages
521
+
522
+ Returns:
523
+ List of A2A messages
524
+ """
525
+
526
+ # Convert each LangChain message to A2A format
527
+ a2a_messages = []
528
+ for msg in messages:
529
+ if isinstance(msg, dict):
530
+ msg_type = msg.get("type", "ai")
531
+ msg_role = msg.get("role", "")
532
+ content = msg.get("content", "")
533
+
534
+ # Support both LangChain style (type: "human"/"ai") and OpenAI style (role: "user"/"assistant")
535
+ # Map to A2A roles: "human"/"user" -> "user", everything else -> "agent"
536
+ a2a_role = "user" if msg_type == "human" or msg_role == "user" else "agent"
537
+
538
+ a2a_message = {
539
+ "role": a2a_role,
540
+ "parts": [{"kind": "text", "text": str(content)}],
541
+ "messageId": str(uuid.uuid4()),
542
+ "taskId": task_id,
543
+ "contextId": context_id,
544
+ "kind": "message",
545
+ }
546
+ a2a_messages.append(a2a_message)
547
+
548
+ return a2a_messages
549
+
550
+
551
+ async def _create_task_response(
509
552
  task_id: str,
510
553
  context_id: str,
511
- message: dict[str, Any],
512
554
  result: dict[str, Any],
513
555
  assistant_id: str,
514
556
  ) -> dict[str, Any]:
@@ -520,16 +562,19 @@ def _create_task_response(
520
562
  message: Original A2A message from request
521
563
  result: LangGraph execution result
522
564
  assistant_id: The assistant ID used
565
+ headers: Request headers
523
566
 
524
567
  Returns:
525
568
  A2A Task response dictionary
526
569
  """
570
+ # Convert result messages to A2A message format
571
+ messages = result.get("messages", []) or []
572
+ thread_history = _convert_messages_to_a2a_format(messages, task_id, context_id)
573
+
527
574
  base_task = {
528
575
  "id": task_id,
529
576
  "contextId": context_id,
530
- "history": [
531
- {**message, "taskId": task_id, "contextId": context_id, "kind": "message"}
532
- ],
577
+ "history": thread_history,
533
578
  "kind": "task",
534
579
  }
535
580
 
@@ -796,8 +841,8 @@ async def handle_message_send(
796
841
  }
797
842
 
798
843
  try:
799
- assistant = await _get_assistant(client, assistant_id, request.headers)
800
- await _validate_supports_messages(client, assistant, request.headers, parts)
844
+ assistant = await _get_assistant(assistant_id, request.headers)
845
+ await _validate_supports_messages(assistant, request.headers, parts)
801
846
  except ValueError as e:
802
847
  return {
803
848
  "error": {
@@ -822,6 +867,10 @@ async def handle_message_send(
822
867
 
823
868
  context_id = message.get("contextId")
824
869
 
870
+ # If no contextId provided, generate a UUID so we don't pass None to runs.create
871
+ if context_id is None:
872
+ context_id = str(uuid.uuid4())
873
+
825
874
  try:
826
875
  run = await client.runs.create(
827
876
  thread_id=context_id,
@@ -845,10 +894,9 @@ async def handle_message_send(
845
894
  task_id = run["run_id"]
846
895
  context_id = run["thread_id"]
847
896
 
848
- return _create_task_response(
897
+ return await _create_task_response(
849
898
  task_id=task_id,
850
899
  context_id=context_id,
851
- message=message,
852
900
  result=result,
853
901
  assistant_id=assistant_id,
854
902
  )
@@ -863,6 +911,37 @@ async def handle_message_send(
863
911
  }
864
912
 
865
913
 
914
+ async def _get_historical_messages_for_task(
915
+ context_id: str,
916
+ task_run_id: str,
917
+ request_headers: Headers,
918
+ history_length: int | None = None,
919
+ ) -> list[Any]:
920
+ """Get historical messages for a specific task by matching run_id."""
921
+ history = await get_client().threads.get_history(
922
+ context_id,
923
+ limit=LANGGRAPH_HISTORY_QUERY_LIMIT,
924
+ metadata={"run_id": task_run_id},
925
+ headers=request_headers,
926
+ )
927
+
928
+ if history:
929
+ # Find the checkpoint with the highest step number (final state for this task)
930
+ target_checkpoint = max(
931
+ history, key=lambda c: c.get("metadata", {}).get("step", 0)
932
+ )
933
+ values = target_checkpoint["values"]
934
+ messages = values.get("messages", [])
935
+
936
+ # Apply client-requested history length limit per A2A spec
937
+ if history_length is not None and len(messages) > history_length:
938
+ # Return the most recent messages up to the limit
939
+ messages = messages[-history_length:]
940
+ return messages
941
+ else:
942
+ return []
943
+
944
+
866
945
  async def handle_tasks_get(
867
946
  request: ApiRequest, params: dict[str, Any]
868
947
  ) -> dict[str, Any]:
@@ -885,6 +964,7 @@ async def handle_tasks_get(
885
964
  try:
886
965
  task_id = params.get("id")
887
966
  context_id = params.get("contextId")
967
+ history_length = params.get("historyLength")
888
968
 
889
969
  if not task_id:
890
970
  return {
@@ -902,6 +982,23 @@ async def handle_tasks_get(
902
982
  }
903
983
  }
904
984
 
985
+ # Validate history_length parameter per A2A spec
986
+ if history_length is not None:
987
+ if not isinstance(history_length, int) or history_length < 0:
988
+ return {
989
+ "error": {
990
+ "code": ERROR_CODE_INVALID_PARAMS,
991
+ "message": "historyLength must be a non-negative integer",
992
+ }
993
+ }
994
+ if history_length > MAX_HISTORY_LENGTH_REQUESTED:
995
+ return {
996
+ "error": {
997
+ "code": ERROR_CODE_INVALID_PARAMS,
998
+ "message": f"historyLength cannot exceed {MAX_HISTORY_LENGTH_REQUESTED}",
999
+ }
1000
+ }
1001
+
905
1002
  try:
906
1003
  run_info = await client.runs.get(
907
1004
  thread_id=context_id,
@@ -919,7 +1016,7 @@ async def handle_tasks_get(
919
1016
  if assistant_id:
920
1017
  try:
921
1018
  # Verify that the assistant exists
922
- await _get_assistant(client, assistant_id, request.headers)
1019
+ await _get_assistant(assistant_id, request.headers)
923
1020
  except ValueError as e:
924
1021
  return {
925
1022
  "error": {
@@ -941,10 +1038,24 @@ async def handle_tasks_get(
941
1038
  else:
942
1039
  a2a_state = "submitted"
943
1040
 
1041
+ try:
1042
+ task_run_id = run_info.get("run_id")
1043
+ messages = await _get_historical_messages_for_task(
1044
+ context_id, task_run_id, request.headers, history_length
1045
+ )
1046
+ thread_history = _convert_messages_to_a2a_format(
1047
+ messages, task_id, context_id
1048
+ )
1049
+ except Exception as e:
1050
+ await logger.aexception(f"Failed to get thread state for tasks/get: {e}")
1051
+ thread_history = []
1052
+
944
1053
  # Build the A2A Task response
945
1054
  task_response = {
946
1055
  "id": task_id,
947
1056
  "contextId": context_id,
1057
+ "history": thread_history,
1058
+ "kind": "task",
948
1059
  "status": {
949
1060
  "state": a2a_state,
950
1061
  },
@@ -1033,7 +1144,7 @@ async def generate_agent_card(request: ApiRequest, assistant_id: str) -> dict[st
1033
1144
  """
1034
1145
  client = _client()
1035
1146
 
1036
- assistant = await _get_assistant(client, assistant_id, request.headers)
1147
+ assistant = await _get_assistant(assistant_id, request.headers)
1037
1148
  schemas = await client.assistants.get_schemas(assistant_id, headers=request.headers)
1038
1149
 
1039
1150
  # Extract schema information for metadata
@@ -1212,10 +1323,8 @@ async def handle_message_stream(
1212
1323
  return
1213
1324
 
1214
1325
  try:
1215
- assistant = await _get_assistant(client, assistant_id, request.headers)
1216
- await _validate_supports_messages(
1217
- client, assistant, request.headers, parts
1218
- )
1326
+ assistant = await _get_assistant(assistant_id, request.headers)
1327
+ await _validate_supports_messages(assistant, request.headers, parts)
1219
1328
  except ValueError as e:
1220
1329
  yield (
1221
1330
  b"message",
@@ -81,7 +81,7 @@ async def health_and_metrics_server():
81
81
 
82
82
  config = uvicorn.Config(
83
83
  app,
84
- host="0.0.0.0",
84
+ host=os.getenv("LANGGRAPH_SERVER_HOST", "0.0.0.0"),
85
85
  port=port,
86
86
  log_level="error",
87
87
  access_log=False,
@@ -317,9 +317,11 @@ async def astream_state(
317
317
  else "messages/complete"
318
318
  ),
319
319
  [
320
- message_chunk_to_message(messages[msg.id])
321
- if not is_chunk
322
- else messages[msg.id]
320
+ (
321
+ message_chunk_to_message(messages[msg.id])
322
+ if not is_chunk
323
+ else messages[msg.id]
324
+ )
323
325
  ],
324
326
  )
325
327
  elif mode in stream_mode:
@@ -379,6 +381,7 @@ async def astream_state(
379
381
  elif chunk["type"] == "task_result":
380
382
  on_task_result(chunk["payload"])
381
383
  if mode == "messages":
384
+ logger.warning("EVENT BRO", stream_event=event)
382
385
  if "messages-tuple" in stream_mode:
383
386
  if subgraphs and ns:
384
387
  yield f"messages|{'|'.join(ns)}", chunk
@@ -417,9 +420,11 @@ async def astream_state(
417
420
  else "messages/complete"
418
421
  ),
419
422
  [
420
- message_chunk_to_message(messages[msg.id])
421
- if not is_chunk
422
- else messages[msg.id]
423
+ (
424
+ message_chunk_to_message(messages[msg.id])
425
+ if not is_chunk
426
+ else messages[msg.id]
427
+ )
423
428
  ],
424
429
  )
425
430
  elif mode in stream_mode:
@@ -1,19 +0,0 @@
1
- import json
2
- import os
3
- import urllib.request
4
-
5
- prefix = ""
6
- mount_prefix = None
7
- # Override prefix if it's set in the http config
8
- if (http := os.environ.get("LANGGRAPH_HTTP")) and (
9
- mount_prefix := json.loads(http).get("mount_prefix")
10
- ):
11
- prefix = mount_prefix
12
- # Override that
13
- if os.environ.get("MOUNT_PREFIX"):
14
- prefix = os.environ["MOUNT_PREFIX"]
15
-
16
- with urllib.request.urlopen(
17
- f"http://localhost:{os.environ['PORT']}{prefix}/ok"
18
- ) as response:
19
- assert response.status == 200
@@ -1 +0,0 @@
1
- __version__ = "0.4.23"
File without changes
File without changes
File without changes
File without changes