langgraph-api 0.4.20__py3-none-any.whl → 0.4.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

langgraph_api/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.4.20"
1
+ __version__ = "0.4.21"
langgraph_api/api/a2a.py CHANGED
@@ -7,7 +7,7 @@ A2A Protocol specification:
7
7
  https://a2a-protocol.org/dev/specification/
8
8
 
9
9
  The implementation currently supports JSON-RPC 2.0 transport only.
10
- Streaming (SSE) and push notifications are not implemented.
10
+ Push notifications are not implemented.
11
11
  """
12
12
 
13
13
  import functools
@@ -16,18 +16,19 @@ from datetime import UTC, datetime
16
16
  from typing import Any, Literal, NotRequired, cast
17
17
 
18
18
  import orjson
19
+ import structlog
19
20
  from langgraph_sdk.client import LangGraphClient, get_client
20
21
  from starlette.datastructures import Headers
21
22
  from starlette.responses import JSONResponse, Response
22
- from structlog import getLogger
23
23
  from typing_extensions import TypedDict
24
24
 
25
25
  from langgraph_api import __version__
26
26
  from langgraph_api.metadata import USER_API_URL
27
27
  from langgraph_api.route import ApiRequest, ApiRoute
28
+ from langgraph_api.sse import EventSourceResponse
28
29
  from langgraph_api.utils.cache import LRUCache
29
30
 
30
- logger = getLogger(__name__)
31
+ logger = structlog.stdlib.get_logger(__name__)
31
32
 
32
33
  # Cache for assistant schemas (assistant_id -> schemas dict)
33
34
  _assistant_schemas_cache = LRUCache[dict[str, Any]](max_size=1000, ttl=60)
@@ -286,6 +287,101 @@ def _extract_a2a_response(result: dict[str, Any]) -> str:
286
287
  return str(last_message)
287
288
 
288
289
 
290
+ def _lc_stream_items_to_a2a_message(
291
+ items: list[dict[str, Any]],
292
+ *,
293
+ task_id: str,
294
+ context_id: str,
295
+ role: Literal["agent", "user"] = "agent",
296
+ ) -> dict[str, Any]:
297
+ """Convert LangChain stream "messages/*" items into a valid A2A Message.
298
+
299
+ This takes the list found in a messages/* StreamPart's data field and
300
+ constructs a single A2A Message object, concatenating textual content and
301
+ preserving select structured metadata into a DataPart.
302
+
303
+ Args:
304
+ items: List of LangChain message dicts from stream (e.g., with keys like
305
+ "content", "type", "response_metadata", "tool_calls", etc.)
306
+ task_id: The A2A task ID this message belongs to
307
+ context_id: The A2A context ID (thread) for grouping
308
+ role: A2A role; defaults to "agent" for streamed assistant output
309
+
310
+ Returns:
311
+ A2A Message dict with required fields and minimally valid parts.
312
+ """
313
+ # Aggregate any text content across items
314
+ text_parts: list[str] = []
315
+ # Collect a small amount of structured data for debugging/traceability
316
+ extra_data: dict[str, Any] = {}
317
+
318
+ def _sse_safe_text(s: str) -> str:
319
+ return s.replace("\u2028", "\\u2028").replace("\u2029", "\\u2029")
320
+
321
+ for it in items:
322
+ if not isinstance(it, dict):
323
+ continue
324
+ content = it.get("content")
325
+ if isinstance(content, str) and content:
326
+ text_parts.append(_sse_safe_text(content))
327
+
328
+ # Preserve a couple of useful fields if present
329
+ # Keep this small to avoid bloating the message payload
330
+ rm = it.get("response_metadata")
331
+ if isinstance(rm, dict) and rm:
332
+ extra_data.setdefault("response_metadata", rm)
333
+ tc = it.get("tool_calls")
334
+ if isinstance(tc, list) and tc:
335
+ extra_data.setdefault("tool_calls", tc)
336
+
337
+ parts: list[dict[str, Any]] = []
338
+ if text_parts:
339
+ parts.append({"kind": "text", "text": "".join(text_parts)})
340
+ if extra_data:
341
+ parts.append({"kind": "data", "data": extra_data})
342
+
343
+ # Ensure we always produce a minimally valid A2A Message
344
+ if not parts:
345
+ parts = [{"kind": "text", "text": ""}]
346
+
347
+ return {
348
+ "role": role,
349
+ "parts": parts,
350
+ "messageId": str(uuid.uuid4()),
351
+ "taskId": task_id,
352
+ "contextId": context_id,
353
+ "kind": "message",
354
+ }
355
+
356
+
357
+ def _lc_items_to_status_update_event(
358
+ items: list[dict[str, Any]],
359
+ *,
360
+ task_id: str,
361
+ context_id: str,
362
+ state: str = "working",
363
+ ) -> dict[str, Any]:
364
+ """Build a TaskStatusUpdateEvent embedding a converted A2A Message.
365
+
366
+ This avoids emitting standalone Message results (which some clients reject)
367
+ and keeps message content within the status update per spec.
368
+ """
369
+ message = _lc_stream_items_to_a2a_message(
370
+ items, task_id=task_id, context_id=context_id, role="agent"
371
+ )
372
+ return {
373
+ "taskId": task_id,
374
+ "contextId": context_id,
375
+ "kind": "status-update",
376
+ "status": {
377
+ "state": state,
378
+ "message": message,
379
+ "timestamp": datetime.now(UTC).isoformat(),
380
+ },
381
+ "final": False,
382
+ }
383
+
384
+
289
385
  def _map_runs_create_error_to_rpc(
290
386
  exception: Exception, assistant_id: str, thread_id: str | None = None
291
387
  ) -> dict[str, Any]:
@@ -519,9 +615,6 @@ async def handle_post_request(request: ApiRequest, assistant_id: str) -> Respons
519
615
  except orjson.JSONDecodeError:
520
616
  return create_error_response("Invalid JSON payload", 400)
521
617
 
522
- if not is_valid_accept_header(request):
523
- return create_error_response("Accept header must include application/json", 400)
524
-
525
618
  if not isinstance(message, dict):
526
619
  return create_error_response("Invalid message format", 400)
527
620
 
@@ -534,6 +627,18 @@ async def handle_post_request(request: ApiRequest, assistant_id: str) -> Respons
534
627
  id_ = message.get("id")
535
628
  method = message.get("method")
536
629
 
630
+ accept_header = request.headers.get("Accept") or ""
631
+ if method == "message/stream":
632
+ if "text/event-stream" not in accept_header:
633
+ return create_error_response(
634
+ "Accept header must include text/event-stream for streaming", 400
635
+ )
636
+ else:
637
+ if "application/json" not in accept_header:
638
+ return create_error_response(
639
+ "Accept header must include application/json", 400
640
+ )
641
+
537
642
  if id_ is not None and method:
538
643
  # JSON-RPC request
539
644
  return await handle_jsonrpc_request(
@@ -553,19 +658,6 @@ async def handle_post_request(request: ApiRequest, assistant_id: str) -> Respons
553
658
  )
554
659
 
555
660
 
556
- def is_valid_accept_header(request: ApiRequest) -> bool:
557
- """Check if Accept header contains supported content types.
558
-
559
- Args:
560
- request: The incoming request
561
-
562
- Returns:
563
- True if header contains application/json
564
- """
565
- accept_header = request.headers.get("Accept", "")
566
- return "application/json" in accept_header
567
-
568
-
569
661
  def create_error_response(message: str, status_code: int) -> Response:
570
662
  """Create a JSON error response.
571
663
 
@@ -603,9 +695,10 @@ async def handle_jsonrpc_request(
603
695
  """
604
696
  method = message["method"]
605
697
  params = message.get("params", {})
606
-
607
698
  # Route to appropriate A2A method handler
608
- if method == "message/send":
699
+ if method == "message/stream":
700
+ return await handle_message_stream(request, params, assistant_id, message["id"])
701
+ elif method == "message/send":
609
702
  result_or_error = await handle_message_send(request, params, assistant_id)
610
703
  elif method == "tasks/get":
611
704
  result_or_error = await handle_tasks_get(request, params)
@@ -949,7 +1042,9 @@ async def generate_agent_card(request: ApiRequest, assistant_id: str) -> dict[st
949
1042
  required = input_schema.get("required", [])
950
1043
 
951
1044
  assistant_name = assistant["name"]
952
- assistant_description = assistant.get("description", f"{assistant_name} assistant")
1045
+ assistant_description = (
1046
+ assistant.get("description") or f"{assistant_name} assistant"
1047
+ )
953
1048
 
954
1049
  # For now, each assistant has one main skill - itself
955
1050
  skills = [
@@ -978,10 +1073,11 @@ async def generate_agent_card(request: ApiRequest, assistant_id: str) -> dict[st
978
1073
  scheme = request.url.scheme
979
1074
  host = request.url.hostname or "localhost"
980
1075
  port = request.url.port
1076
+ path = request.url.path.removesuffix("/.well-known/agent-card.json")
981
1077
  if port and (
982
1078
  (scheme == "http" and port != 80) or (scheme == "https" and port != 443)
983
1079
  ):
984
- base_url = f"{scheme}://{host}:{port}"
1080
+ base_url = f"{scheme}://{host}:{port}{path}"
985
1081
  else:
986
1082
  base_url = f"{scheme}://{host}"
987
1083
 
@@ -992,7 +1088,7 @@ async def generate_agent_card(request: ApiRequest, assistant_id: str) -> dict[st
992
1088
  "url": f"{base_url}/a2a/{assistant_id}",
993
1089
  "preferredTransport": "JSONRPC",
994
1090
  "capabilities": {
995
- "streaming": False, # Not implemented yet
1091
+ "streaming": True,
996
1092
  "pushNotifications": False, # Not implemented yet
997
1093
  "stateTransitionHistory": False,
998
1094
  },
@@ -1062,6 +1158,281 @@ async def handle_agent_card_endpoint(request: ApiRequest) -> Response:
1062
1158
  )
1063
1159
 
1064
1160
 
1161
+ # ============================================================================
1162
+ # Message Streaming
1163
+ # ============================================================================
1164
+
1165
+
1166
+ async def handle_message_stream(
1167
+ request: ApiRequest,
1168
+ params: dict[str, Any],
1169
+ assistant_id: str,
1170
+ rpc_id: str | int,
1171
+ ) -> Response:
1172
+ """Handle message/stream requests and stream JSON-RPC responses via SSE.
1173
+
1174
+ Each SSE "data" is a JSON-RPC 2.0 response object. We emit:
1175
+ - An initial TaskStatusUpdateEvent with state "submitted".
1176
+ - Optionally a TaskStatusUpdateEvent with state "working" on first update.
1177
+ - A final Task result when the run completes.
1178
+ - A JSON-RPC error if anything fails.
1179
+ """
1180
+ client = _client()
1181
+
1182
+ async def stream_body():
1183
+ try:
1184
+ message = params.get("message")
1185
+ if not message:
1186
+ yield (
1187
+ b"message",
1188
+ {
1189
+ "jsonrpc": "2.0",
1190
+ "id": rpc_id,
1191
+ "error": {
1192
+ "code": ERROR_CODE_INVALID_PARAMS,
1193
+ "message": "Missing 'message' in params",
1194
+ },
1195
+ },
1196
+ )
1197
+ return
1198
+
1199
+ parts = message.get("parts", [])
1200
+ if not parts:
1201
+ yield (
1202
+ b"message",
1203
+ {
1204
+ "jsonrpc": "2.0",
1205
+ "id": rpc_id,
1206
+ "error": {
1207
+ "code": ERROR_CODE_INVALID_PARAMS,
1208
+ "message": "Message must contain at least one part",
1209
+ },
1210
+ },
1211
+ )
1212
+ return
1213
+
1214
+ try:
1215
+ assistant = await _get_assistant(client, assistant_id, request.headers)
1216
+ await _validate_supports_messages(
1217
+ client, assistant, request.headers, parts
1218
+ )
1219
+ except ValueError as e:
1220
+ yield (
1221
+ b"message",
1222
+ {
1223
+ "jsonrpc": "2.0",
1224
+ "id": rpc_id,
1225
+ "error": {
1226
+ "code": ERROR_CODE_INVALID_PARAMS,
1227
+ "message": str(e),
1228
+ },
1229
+ },
1230
+ )
1231
+ return
1232
+
1233
+ # Process A2A message parts into LangChain messages format
1234
+ try:
1235
+ message_role = message.get("role", "user")
1236
+ input_content = _process_a2a_message_parts(parts, message_role)
1237
+ except ValueError as e:
1238
+ yield (
1239
+ b"message",
1240
+ {
1241
+ "jsonrpc": "2.0",
1242
+ "id": rpc_id,
1243
+ "error": {
1244
+ "code": ERROR_CODE_CONTENT_TYPE_NOT_SUPPORTED,
1245
+ "message": str(e),
1246
+ },
1247
+ },
1248
+ )
1249
+ return
1250
+
1251
+ run = await client.runs.create(
1252
+ thread_id=message.get("contextId"),
1253
+ assistant_id=assistant_id,
1254
+ stream_mode=["messages", "values"],
1255
+ if_not_exists="create",
1256
+ input=input_content,
1257
+ headers=request.headers,
1258
+ )
1259
+ context_id = run["thread_id"]
1260
+ # Emit initial Task object to establish task context
1261
+ initial_task = {
1262
+ "id": run["run_id"],
1263
+ "contextId": context_id,
1264
+ "history": [
1265
+ {
1266
+ **message,
1267
+ "taskId": run["run_id"],
1268
+ "contextId": context_id,
1269
+ "kind": "message",
1270
+ }
1271
+ ],
1272
+ "kind": "task",
1273
+ "status": {
1274
+ "state": "submitted",
1275
+ "timestamp": datetime.now(UTC).isoformat(),
1276
+ },
1277
+ }
1278
+ yield (b"message", {"jsonrpc": "2.0", "id": rpc_id, "result": initial_task})
1279
+ task_id = run["run_id"]
1280
+ stream = client.runs.join_stream(
1281
+ run_id=task_id,
1282
+ thread_id=context_id,
1283
+ headers=request.headers,
1284
+ )
1285
+ result = None
1286
+ err = None
1287
+ notified_is_working = False
1288
+ async for chunk in stream:
1289
+ try:
1290
+ if chunk.event == "metadata":
1291
+ data = chunk.data or {}
1292
+ if data.get("status") == "run_done":
1293
+ final_message = None
1294
+ if isinstance(result, dict):
1295
+ try:
1296
+ final_text = _extract_a2a_response(result)
1297
+ final_message = {
1298
+ "role": "agent",
1299
+ "parts": [{"kind": "text", "text": final_text}],
1300
+ "messageId": str(uuid.uuid4()),
1301
+ "taskId": task_id,
1302
+ "contextId": context_id,
1303
+ "kind": "message",
1304
+ }
1305
+ except Exception:
1306
+ await logger.aexception(
1307
+ "Failed to extract final message from result",
1308
+ result=result,
1309
+ )
1310
+ if final_message is None:
1311
+ final_message = {
1312
+ "role": "agent",
1313
+ "parts": [{"kind": "text", "text": str(result)}],
1314
+ "messageId": str(uuid.uuid4()),
1315
+ "taskId": task_id,
1316
+ "contextId": context_id,
1317
+ "kind": "message",
1318
+ }
1319
+ completed = {
1320
+ "taskId": task_id,
1321
+ "contextId": context_id,
1322
+ "kind": "status-update",
1323
+ "status": {
1324
+ "state": "completed",
1325
+ "message": final_message,
1326
+ "timestamp": datetime.now(UTC).isoformat(),
1327
+ },
1328
+ "final": True,
1329
+ }
1330
+ yield (
1331
+ b"message",
1332
+ {"jsonrpc": "2.0", "id": rpc_id, "result": completed},
1333
+ )
1334
+ return
1335
+ if data.get("run_id") and not notified_is_working:
1336
+ notified_is_working = True
1337
+ yield (
1338
+ b"message",
1339
+ {
1340
+ "jsonrpc": "2.0",
1341
+ "id": rpc_id,
1342
+ "result": {
1343
+ "taskId": task_id,
1344
+ "contextId": context_id,
1345
+ "kind": "status-update",
1346
+ "status": {"state": "working"},
1347
+ "final": False,
1348
+ },
1349
+ },
1350
+ )
1351
+ elif chunk.event == "error":
1352
+ err = chunk.data
1353
+ elif chunk.event == "values":
1354
+ err = None # Error was retriable
1355
+ result = chunk.data
1356
+ elif chunk.event.startswith("messages"):
1357
+ err = None # Error was retriable
1358
+ items = chunk.data or []
1359
+ if isinstance(items, list) and items:
1360
+ update = _lc_items_to_status_update_event(
1361
+ items,
1362
+ task_id=task_id,
1363
+ context_id=context_id,
1364
+ state="working",
1365
+ )
1366
+ yield (
1367
+ b"message",
1368
+ {"jsonrpc": "2.0", "id": rpc_id, "result": update},
1369
+ )
1370
+ else:
1371
+ await logger.awarning(
1372
+ "Ignoring unknown event type: " + chunk.event
1373
+ )
1374
+
1375
+ except Exception as e:
1376
+ await logger.aexception("Failed to process message stream")
1377
+ err = {"error": type(e).__name__, "message": str(e)}
1378
+ continue
1379
+
1380
+ # If we exit unexpectedly, send a final status based on error presence
1381
+ final_message = None
1382
+ if isinstance(err, dict) and ("__error__" in err or "error" in err):
1383
+ msg = (
1384
+ err.get("__error__", {}).get("error")
1385
+ if isinstance(err.get("__error__"), dict)
1386
+ else err.get("message")
1387
+ )
1388
+ await logger.aerror("Failed to process message stream", err=err)
1389
+ final_message = {
1390
+ "role": "agent",
1391
+ "parts": [{"kind": "text", "text": str(msg or "")}],
1392
+ "messageId": str(uuid.uuid4()),
1393
+ "taskId": task_id,
1394
+ "contextId": context_id,
1395
+ "kind": "message",
1396
+ }
1397
+ fallback = {
1398
+ "taskId": task_id,
1399
+ "contextId": context_id,
1400
+ "kind": "status-update",
1401
+ "status": {
1402
+ "state": "failed" if err else "completed",
1403
+ **({"message": final_message} if final_message else {}),
1404
+ "timestamp": datetime.now(UTC).isoformat(),
1405
+ },
1406
+ "final": True,
1407
+ }
1408
+ yield (b"message", {"jsonrpc": "2.0", "id": rpc_id, "result": fallback})
1409
+ except Exception as e:
1410
+ await logger.aerror(
1411
+ f"Error in message/stream for assistant {assistant_id}: {str(e)}",
1412
+ exc_info=True,
1413
+ )
1414
+ yield (
1415
+ b"message",
1416
+ {
1417
+ "jsonrpc": "2.0",
1418
+ "id": rpc_id,
1419
+ "error": {
1420
+ "code": ERROR_CODE_INTERNAL_ERROR,
1421
+ "message": f"Internal server error: {str(e)}",
1422
+ },
1423
+ },
1424
+ )
1425
+
1426
+ async def consume_():
1427
+ async for chunk in stream_body():
1428
+ await logger.adebug("A2A.stream_body: Yielding chunk", chunk=chunk)
1429
+ yield chunk
1430
+
1431
+ return EventSourceResponse(
1432
+ consume_(), headers={"Content-Type": "text/event-stream"}
1433
+ )
1434
+
1435
+
1065
1436
  # ============================================================================
1066
1437
  # Route Definitions
1067
1438
  # ============================================================================
@@ -17,7 +17,7 @@ from langgraph_api.graph import get_assistant_id, get_graph
17
17
  from langgraph_api.js.base import BaseRemotePregel
18
18
  from langgraph_api.route import ApiRequest, ApiResponse, ApiRoute
19
19
  from langgraph_api.schema import ASSISTANT_FIELDS
20
- from langgraph_api.serde import ajson_loads
20
+ from langgraph_api.serde import json_loads
21
21
  from langgraph_api.utils import (
22
22
  fetchone,
23
23
  get_pagination_headers,
@@ -240,7 +240,7 @@ async def get_assistant_graph(
240
240
  async with connect() as conn:
241
241
  assistant_ = await Assistants.get(conn, assistant_id)
242
242
  assistant = await fetchone(assistant_)
243
- config = await ajson_loads(assistant["config"])
243
+ config = json_loads(assistant["config"])
244
244
  configurable = config.setdefault("configurable", {})
245
245
  configurable.update(get_configurable_headers(request.headers))
246
246
 
@@ -297,7 +297,7 @@ async def get_assistant_subgraphs(
297
297
  async with connect() as conn:
298
298
  assistant_ = await Assistants.get(conn, assistant_id)
299
299
  assistant = await fetchone(assistant_)
300
- config = await ajson_loads(assistant["config"])
300
+ config = json_loads(assistant["config"])
301
301
  configurable = config.setdefault("configurable", {})
302
302
  configurable.update(get_configurable_headers(request.headers))
303
303
  async with get_graph(
@@ -345,7 +345,7 @@ async def get_assistant_schemas(
345
345
  assistant_ = await Assistants.get(conn, assistant_id)
346
346
  # TODO Implementa cache so we can de-dent and release this connection.
347
347
  assistant = await fetchone(assistant_)
348
- config = await ajson_loads(assistant["config"])
348
+ config = json_loads(assistant["config"])
349
349
  configurable = config.setdefault("configurable", {})
350
350
  configurable.update(get_configurable_headers(request.headers))
351
351
  async with get_graph(
langgraph_api/cli.py CHANGED
@@ -11,12 +11,17 @@ from typing import Literal
11
11
  from typing_extensions import TypedDict
12
12
 
13
13
  if typing.TYPE_CHECKING:
14
+ from packaging.version import Version
15
+
14
16
  from langgraph_api.config import HttpConfig, StoreConfig
15
17
 
16
18
  logging.basicConfig(level=logging.INFO)
17
19
  logger = logging.getLogger(__name__)
18
20
 
19
21
 
22
+ SUPPORT_STATUS = Literal["active", "critical", "eol"]
23
+
24
+
20
25
  def _get_ls_origin() -> str | None:
21
26
  from langsmith.client import Client
22
27
  from langsmith.utils import tracing_is_enabled
@@ -121,46 +126,6 @@ class AuthConfig(TypedDict, total=False):
121
126
  cache: CacheConfig | None
122
127
 
123
128
 
124
- def _check_newer_version(pkg: str, timeout: float = 0.2) -> None:
125
- """Log a notice if PyPI reports a newer version."""
126
- import importlib.metadata as md
127
- import json
128
- import urllib.request
129
-
130
- from packaging.version import Version
131
-
132
- thread_logger = logging.getLogger("check_version")
133
- if not thread_logger.handlers:
134
- handler = logging.StreamHandler()
135
- handler.setFormatter(logging.Formatter("%(message)s"))
136
- thread_logger.addHandler(handler)
137
-
138
- try:
139
- current = Version(md.version(pkg))
140
- with urllib.request.urlopen(
141
- f"https://pypi.org/pypi/{pkg}/json", timeout=timeout
142
- ) as resp:
143
- latest_str = json.load(resp)["info"]["version"]
144
- latest = Version(latest_str)
145
- if latest > current:
146
- thread_logger.info(
147
- "🔔 A newer version of %s is available: %s → %s (pip install -U %s)",
148
- pkg,
149
- current,
150
- latest,
151
- pkg,
152
- )
153
-
154
- except Exception:
155
- pass
156
-
157
- except RuntimeError:
158
- thread_logger.info(
159
- f"Failed to check for newer version of {pkg}."
160
- " To disable version checks, set LANGGRAPH_NO_VERSION_CHECK=true"
161
- )
162
-
163
-
164
129
  def run_server(
165
130
  host: str = "127.0.0.1",
166
131
  port: int = 2024,
@@ -362,8 +327,12 @@ For production use, please use LangGraph Platform.
362
327
  threading.Thread(target=_open_browser, daemon=True).start()
363
328
  nvc = os.getenv("LANGGRAPH_NO_VERSION_CHECK")
364
329
  if nvc is None or nvc.lower() not in ("true", "1"):
330
+ from langgraph_api import __version__
331
+
365
332
  threading.Thread(
366
- target=_check_newer_version, args=("langgraph-api",), daemon=True
333
+ target=_check_newer_version,
334
+ args=("langgraph-api", __version__),
335
+ daemon=True,
367
336
  ).start()
368
337
  supported_kwargs = {
369
338
  k: v
@@ -471,5 +440,133 @@ def main():
471
440
  )
472
441
 
473
442
 
443
+ def _check_newer_version(pkg: str, current_version: str, timeout: float = 0.5) -> None:
444
+ """Check PyPI for newer versions and log support status.
445
+
446
+ Critical = one minor behind on same major, OR latest minor of previous major while latest is X.0.*
447
+ EOL = two+ minors behind on same major, OR any previous major after X.1.*
448
+ """
449
+ import json
450
+ import urllib.request
451
+
452
+ from packaging.version import InvalidVersion, Version
453
+
454
+ log = logging.getLogger("version_check")
455
+ if not log.handlers:
456
+ h = logging.StreamHandler()
457
+ h.setFormatter(logging.Formatter("%(message)s"))
458
+ log.addHandler(h)
459
+
460
+ if os.getenv("LANGGRAPH_NO_VERSION_CHECK", "").lower() in ("true", "1"):
461
+ return
462
+
463
+ def _parse(v: str) -> Version | None:
464
+ try:
465
+ return Version(v)
466
+ except InvalidVersion:
467
+ return None
468
+
469
+ try:
470
+ current = Version(current_version)
471
+ except InvalidVersion:
472
+ log.info(
473
+ f"[version] Could not parse installed version {current_version!r}. Skipping support check."
474
+ )
475
+ return
476
+
477
+ try:
478
+ with urllib.request.urlopen(
479
+ f"https://pypi.org/pypi/{pkg}/json", timeout=timeout
480
+ ) as resp:
481
+ payload = json.load(resp)
482
+ latest_str = payload["info"]["version"]
483
+ latest = Version(latest_str)
484
+ releases: dict[str, list[dict]] = payload.get("releases", {})
485
+ except Exception:
486
+ log.debug("Failed to retrieve latest version info for %s", pkg)
487
+ return
488
+ prev_major_latest_minor: Version | None = None
489
+ if latest.major > 0:
490
+ pm = latest.major - 1
491
+ prev_major_versions = [
492
+ v
493
+ for s in releases
494
+ if (v := _parse(s)) is not None and not v.is_prerelease and v.major == pm
495
+ ]
496
+ if prev_major_versions:
497
+ prev_major_latest_minor = max(
498
+ prev_major_versions, key=lambda v: (v.major, v.minor, v.micro)
499
+ )
500
+
501
+ if latest > current and not current.is_prerelease:
502
+ log.info(
503
+ "[version] A newer version of %s is available: %s → %s (pip install -U %s)",
504
+ pkg,
505
+ current,
506
+ latest,
507
+ pkg,
508
+ )
509
+
510
+ level = _support_level(current, latest, prev_major_latest_minor)
511
+ changelog = (
512
+ "https://docs.langchain.com/langgraph-platform/langgraph-server-changelog"
513
+ )
514
+
515
+ if level == "critical":
516
+ # Distinguish same-major vs cross-major grace in the wording
517
+ if current.major == latest.major and current.minor == latest.minor - 1:
518
+ tail = "You are one minor version behind the latest (%d.%d.x).\n"
519
+ else:
520
+ tail = "You are on the latest minor of the previous major while a new major (%d.%d.x) just released.\n"
521
+ log.info(
522
+ "⚠️ [support] %s %s is in Critical support.\n"
523
+ "Only critical security and installation fixes are provided.\n"
524
+ + tail
525
+ + "Please plan an upgrade soon. See changelog: %s",
526
+ pkg,
527
+ current,
528
+ latest.major,
529
+ latest.minor,
530
+ changelog,
531
+ )
532
+ elif level == "eol":
533
+ log.info(
534
+ "⚠️ [support] %s %s is End of Life.\n"
535
+ "No bug fixes or security updates will be provided.\n"
536
+ "You are two or more minor versions behind the latest (%d.%d.x).\n"
537
+ "You should upgrade immediately. See changelog: %s",
538
+ pkg,
539
+ current,
540
+ latest.major,
541
+ latest.minor,
542
+ changelog,
543
+ )
544
+
545
+
546
+ def _support_level(
547
+ cur: "Version", lat: "Version", prev_major_latest_minor: "Version | None"
548
+ ) -> SUPPORT_STATUS:
549
+ if cur.major > lat.major:
550
+ return "active"
551
+ if cur.major == lat.major:
552
+ if cur.minor == lat.minor:
553
+ return "active"
554
+ if cur.minor == lat.minor - 1:
555
+ return "critical"
556
+ if cur.minor <= lat.minor - 2:
557
+ return "eol"
558
+ return "active"
559
+
560
+ if cur.major == lat.major - 1 and lat.minor == 0:
561
+ if (
562
+ prev_major_latest_minor is not None
563
+ and cur.minor == prev_major_latest_minor.minor
564
+ ):
565
+ return "critical"
566
+ return "eol"
567
+
568
+ return "eol"
569
+
570
+
474
571
  if __name__ == "__main__":
475
572
  main()
@@ -18,7 +18,7 @@
18
18
  "@typescript/vfs": "^1.6.0",
19
19
  "dedent": "^1.5.3",
20
20
  "exit-hook": "^4.0.0",
21
- "hono": "^4.5.4",
21
+ "hono": "^4.9.7",
22
22
  "p-queue": "^8.0.1",
23
23
  "p-retry": "^6.2.0",
24
24
  "tsx": "^4.19.3",
@@ -974,10 +974,10 @@ has-flag@^4.0.0:
974
974
  resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
975
975
  integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
976
976
 
977
- hono@^4.5.4:
978
- version "4.6.14"
979
- resolved "https://registry.yarnpkg.com/hono/-/hono-4.6.14.tgz#f83f51e81b8ae5611dab459570990bf4c977d20c"
980
- integrity sha512-j4VkyUp2xazGJ8eCCLN1Vm/bxdvm/j5ZuU9AIjLu9vapn2M44p9L3Ktr9Vnb2RN2QtcR/wVjZVMlT5k7GJQgPw==
977
+ hono@^4.5.4, hono@^4.9.7:
978
+ version "4.9.7"
979
+ resolved "https://registry.yarnpkg.com/hono/-/hono-4.9.7.tgz#8ac244477397de71d7d3d393fad129209b5b631e"
980
+ integrity sha512-t4Te6ERzIaC48W3x4hJmBwgNlLhmiEdEE5ViYb02ffw4ignHNHa5IBtPjmbKstmtKa8X6C35iWwK4HaqvrzG9w==
981
981
 
982
982
  icss-utils@^5.0.0, icss-utils@^5.1.0:
983
983
  version "5.1.0"
langgraph_api/metadata.py CHANGED
@@ -7,6 +7,8 @@ import langgraph.version
7
7
  import orjson
8
8
  import structlog
9
9
 
10
+ import langgraph_api.config as config
11
+ from langgraph_api.auth.custom import get_auth_instance
10
12
  from langgraph_api.config import (
11
13
  LANGGRAPH_CLOUD_LICENSE_KEY,
12
14
  LANGSMITH_API_KEY,
@@ -17,7 +19,9 @@ from langgraph_api.config import (
17
19
  USES_STORE_TTL,
18
20
  USES_THREAD_TTL,
19
21
  )
22
+ from langgraph_api.graph import GRAPHS, is_js_graph
20
23
  from langgraph_api.http import http_request
24
+ from langgraph_api.js.base import is_js_path
21
25
  from langgraph_license.validation import plus_features_enabled
22
26
 
23
27
  logger = structlog.stdlib.get_logger(__name__)
@@ -72,6 +76,36 @@ if LANGSMITH_AUTH_ENDPOINT:
72
76
  )
73
77
 
74
78
 
79
+ def _lang_usage_metadata() -> tuple[dict[str, str], dict[str, int]]:
80
+ js_graph_count = sum(1 for graph_id in GRAPHS if is_js_graph(graph_id))
81
+ py_graph_count = len(GRAPHS) - js_graph_count
82
+
83
+ auth_instance = get_auth_instance()
84
+ custom_auth_enabled = auth_instance is not None
85
+ custom_js_auth_enabled = auth_instance == "js"
86
+
87
+ js_proxy_middleware_enabled = False
88
+ if (
89
+ config.HTTP_CONFIG
90
+ and (app := config.HTTP_CONFIG.get("app"))
91
+ and isinstance(app, str)
92
+ ):
93
+ app_path = app.split(":", 1)[0] # type: ignore[possibly-unresolved-reference]
94
+ js_proxy_middleware_enabled = is_js_path(app_path)
95
+
96
+ tags = {
97
+ "langgraph.platform.uses_custom_auth": str(custom_auth_enabled),
98
+ "langgraph.platform.uses_js_custom_auth": str(custom_js_auth_enabled),
99
+ "langgraph.platform.uses_js_proxy_middleware": str(js_proxy_middleware_enabled),
100
+ }
101
+ measures = {
102
+ "langgraph.platform.py_graphs": py_graph_count,
103
+ "langgraph.platform.js_graphs": js_graph_count,
104
+ }
105
+
106
+ return tags, measures
107
+
108
+
75
109
  def incr_runs(*, incr: int = 1) -> None:
76
110
  global RUN_COUNTER
77
111
  RUN_COUNTER += incr
@@ -111,6 +145,7 @@ async def metadata_loop() -> None:
111
145
  RUN_COUNTER = 0
112
146
  NODE_COUNTER = 0
113
147
  FROM_TIMESTAMP = to_timestamp
148
+ usage_tags, usage_measures = _lang_usage_metadata()
114
149
 
115
150
  base_payload = {
116
151
  "from_timestamp": from_timestamp,
@@ -131,10 +166,12 @@ async def metadata_loop() -> None:
131
166
  "user_app.uses_custom_auth": str(USES_CUSTOM_AUTH),
132
167
  "user_app.uses_thread_ttl": str(USES_THREAD_TTL),
133
168
  "user_app.uses_store_ttl": str(USES_STORE_TTL),
169
+ **usage_tags,
134
170
  },
135
171
  "measures": {
136
172
  "langgraph.platform.runs": runs,
137
173
  "langgraph.platform.nodes": nodes,
174
+ **usage_measures,
138
175
  },
139
176
  "logs": [],
140
177
  }
langgraph_api/serde.py CHANGED
@@ -152,6 +152,7 @@ def json_loads(content: bytes | Fragment | dict) -> Any:
152
152
  return orjson.loads(cast(bytes, content))
153
153
 
154
154
 
155
+ # Do not use. orjson holds the GIL the entire time it's running anyway.
155
156
  async def ajson_loads(content: bytes | Fragment) -> Any:
156
157
  return await asyncio.to_thread(json_loads, content)
157
158
 
langgraph_api/server.py CHANGED
@@ -53,11 +53,13 @@ middleware = []
53
53
  if config.ALLOW_PRIVATE_NETWORK:
54
54
  middleware.append(Middleware(PrivateNetworkMiddleware))
55
55
 
56
- if (
56
+ JS_PROXY_MIDDLEWARE_ENABLED = (
57
57
  config.HTTP_CONFIG
58
58
  and (app := config.HTTP_CONFIG.get("app"))
59
59
  and is_js_path(app.split(":")[0])
60
- ):
60
+ )
61
+
62
+ if JS_PROXY_MIDDLEWARE_ENABLED:
61
63
  from langgraph_api.js.remote import JSCustomHTTPProxyMiddleware
62
64
 
63
65
  middleware.append(Middleware(JSCustomHTTPProxyMiddleware))
langgraph_api/stream.py CHANGED
@@ -8,9 +8,11 @@ import langgraph.version
8
8
  import langsmith
9
9
  import structlog
10
10
  from langchain_core.messages import (
11
+ AIMessageChunk,
11
12
  # TODO: Remove explicit dependency
12
13
  BaseMessage,
13
14
  BaseMessageChunk,
15
+ ToolMessageChunk,
14
16
  convert_to_messages,
15
17
  message_chunk_to_message,
16
18
  )
@@ -286,11 +288,23 @@ async def astream_state(
286
288
  msg_, meta = cast(
287
289
  tuple[BaseMessage | dict, dict[str, Any]], chunk
288
290
  )
289
- msg = (
290
- convert_to_messages([msg_])[0]
291
- if isinstance(msg_, dict)
292
- else cast(BaseMessage, msg_)
293
- )
291
+ is_chunk = False
292
+ if isinstance(msg_, dict):
293
+ if (
294
+ "chunk" in msg_.get("type", "").lower()
295
+ or "chunk" in msg_.get("role", "").lower()
296
+ ):
297
+ if "ai" in msg_.get("role", "").lower():
298
+ msg = AIMessageChunk(**msg_) # type: ignore[arg-type]
299
+ elif "tool" in msg_.get("role", "").lower():
300
+ msg = ToolMessageChunk(**msg_) # type: ignore[arg-type]
301
+ else:
302
+ msg = BaseMessageChunk(**msg_) # type: ignore[arg-type]
303
+ is_chunk = True
304
+ else:
305
+ msg = convert_to_messages([msg_])[0]
306
+ else:
307
+ msg = cast(BaseMessage, msg_)
294
308
  if msg.id in messages:
295
309
  messages[msg.id] += msg
296
310
  else:
@@ -302,7 +316,11 @@ async def astream_state(
302
316
  if isinstance(msg, BaseMessageChunk)
303
317
  else "messages/complete"
304
318
  ),
305
- [message_chunk_to_message(messages[msg.id])],
319
+ [
320
+ message_chunk_to_message(messages[msg.id])
321
+ if not is_chunk
322
+ else messages[msg.id]
323
+ ],
306
324
  )
307
325
  elif mode in stream_mode:
308
326
  if subgraphs and ns:
@@ -370,12 +388,23 @@ async def astream_state(
370
388
  msg_, meta = cast(
371
389
  tuple[BaseMessage | dict, dict[str, Any]], chunk
372
390
  )
373
- msg = (
374
- convert_to_messages([msg_])[0]
375
- if isinstance(msg_, dict)
376
- else cast(BaseMessage, msg_)
377
- )
378
-
391
+ is_chunk = False
392
+ if isinstance(msg_, dict):
393
+ if (
394
+ "chunk" in msg_.get("type", "").lower()
395
+ or "chunk" in msg_.get("role", "").lower()
396
+ ):
397
+ if "ai" in msg_.get("role", "").lower():
398
+ msg = AIMessageChunk(**msg_) # type: ignore[arg-type]
399
+ elif "tool" in msg_.get("role", "").lower():
400
+ msg = ToolMessageChunk(**msg_) # type: ignore[arg-type]
401
+ else:
402
+ msg = BaseMessageChunk(**msg_) # type: ignore[arg-type]
403
+ is_chunk = True
404
+ else:
405
+ msg = convert_to_messages([msg_])[0]
406
+ else:
407
+ msg = cast(BaseMessage, msg_)
379
408
  if msg.id in messages:
380
409
  messages[msg.id] += msg
381
410
  else:
@@ -387,7 +416,11 @@ async def astream_state(
387
416
  if isinstance(msg, BaseMessageChunk)
388
417
  else "messages/complete"
389
418
  ),
390
- [message_chunk_to_message(messages[msg.id])],
419
+ [
420
+ message_chunk_to_message(messages[msg.id])
421
+ if not is_chunk
422
+ else messages[msg.id]
423
+ ],
391
424
  )
392
425
  elif mode in stream_mode:
393
426
  if subgraphs and ns:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-api
3
- Version: 0.4.20
3
+ Version: 0.4.21
4
4
  Author-email: Nuno Campos <nuno@langchain.dev>, Will Fu-Hinthorn <will@langchain.dev>
5
5
  License: Elastic-2.0
6
6
  License-File: LICENSE
@@ -11,7 +11,7 @@ Requires-Dist: httpx>=0.25.0
11
11
  Requires-Dist: jsonschema-rs<0.30,>=0.20.0
12
12
  Requires-Dist: langchain-core>=0.3.64
13
13
  Requires-Dist: langgraph-checkpoint>=2.0.23
14
- Requires-Dist: langgraph-runtime-inmem<0.13.0,>=0.12.0
14
+ Requires-Dist: langgraph-runtime-inmem<0.13.0,>=0.12.1
15
15
  Requires-Dist: langgraph-sdk>=0.2.0
16
16
  Requires-Dist: langgraph>=0.4.0
17
17
  Requires-Dist: langsmith>=0.3.45
@@ -1,7 +1,7 @@
1
- langgraph_api/__init__.py,sha256=WCSj_sB9bdEmI7lwg4E_Vhy7TRLJmGYIYjp5bfMMYXE,23
1
+ langgraph_api/__init__.py,sha256=HRnyuWvhHjVmMtjn-Iir0_ZbxVK3sjpTYkAqLIRVMIE,23
2
2
  langgraph_api/asgi_transport.py,sha256=XtiLOu4WWsd-xizagBLzT5xUkxc9ZG9YqwvETBPjBFE,5161
3
3
  langgraph_api/asyncio.py,sha256=FEEkLm_N-15cbElo4vQ309MkDKBZuRqAYV8VJ1DocNw,9860
4
- langgraph_api/cli.py,sha256=-ruIeKi1imvS6GriOfRDZY-waV4SbWiJ0BEFAciPVYI,16330
4
+ langgraph_api/cli.py,sha256=mUSY41UBsPbJ3mGB01VH8UCAba1YCpA7h4P9tvhKAEw,19540
5
5
  langgraph_api/command.py,sha256=Q9XDRhnkCX7jyqW52_Rf2PPYKxjr-Z9BUHazI1HcmB8,817
6
6
  langgraph_api/config.py,sha256=r9mmbyZlhBuJLpnTkaOLcNH6ufFNqm_2eCiuOmhqRl0,12241
7
7
  langgraph_api/cron_scheduler.py,sha256=25wYzEQrhPEivZrAPYOmzLPDOQa-aFogU37mTXc9TJk,2566
@@ -12,25 +12,25 @@ langgraph_api/graph.py,sha256=h1m6rsLiCocvMO283LLU03A5cBycxAIxixXu9mwzqsQ,25056
12
12
  langgraph_api/http.py,sha256=fyK-H-0UfNy_BzuVW3aWWGvhRavmGAVMkDwDArryJ_4,5659
13
13
  langgraph_api/http_metrics.py,sha256=MU9ccXt7aBb0AJ2SWEjwtbtbJEWmeqSdx7-CI51e32o,5594
14
14
  langgraph_api/logging.py,sha256=qB6q_cUba31edE4_D6dBGhdiUTpW7sXAOepUjYb_R50,5216
15
- langgraph_api/metadata.py,sha256=fVsbwxVitAj4LGVYpCcadYeIFANEaNtcx6LBxQLcTqg,6949
15
+ langgraph_api/metadata.py,sha256=0eGYhXOW6UIVDj2Y5mOdSJz_RadgJG8xmUsC9WqwsiE,8342
16
16
  langgraph_api/patch.py,sha256=iLwSd9ZWoVj6MxozMyGyMvWWbE9RIP5eZX1dpCBSlSU,1480
17
17
  langgraph_api/queue_entrypoint.py,sha256=Y0Hu4QXNV7HPZWlBwuNCm8ehqD_n79AMk7ZWDZfBc4U,5631
18
18
  langgraph_api/route.py,sha256=EBhELuJ1He-ZYcAnR5YTImcIeDtWthDae5CHELBxPkM,5056
19
19
  langgraph_api/schema.py,sha256=AsgF0dIjBvDd_PDy20mGqB_IkBLgVvSj8qRKG_lPlec,8440
20
- langgraph_api/serde.py,sha256=Ovs37vfnXP9FHbQqIlA8J3ZdFMuFu-3vnS6EqEuaQ54,5440
21
- langgraph_api/server.py,sha256=uCAqPgCLJ6ckslLs0i_dacSR8mzuR0Y6PkkJYk0O3bE,7196
20
+ langgraph_api/serde.py,sha256=CLAwuiCRJY2-8GCd7gc3IVzJ8GiyBPmrrlLJjh1B0bw,5512
21
+ langgraph_api/server.py,sha256=C9TO7N0mzyrLulT_2FtaJfgfFbm2B4yyYTdAGPxgIeE,7255
22
22
  langgraph_api/sse.py,sha256=SLdtZmTdh5D8fbWrQjuY9HYLd2dg8Rmi6ZMmFMVc2iE,4204
23
23
  langgraph_api/state.py,sha256=AjkLbUQakIwK7oGzJ8oqubazRsXxG3vDMnRa0s0mzDM,4716
24
24
  langgraph_api/store.py,sha256=NIoNZojs6NbtG3VLBPQEFNttvp7XPkHAfjbQ3gY7aLY,4701
25
- langgraph_api/stream.py,sha256=V8jWwA3wBRenMk3WIFkt0OLXm_LhPwg_Yj_tP4Dc6iI,18970
25
+ langgraph_api/stream.py,sha256=TQbrMc1CeT5cMjS0NB-AUEyL7x6TgOfYdNp7c0-X-mY,20961
26
26
  langgraph_api/thread_ttl.py,sha256=KyHnvD0e1p1cV4Z_ZvKNVzDztuI2RBCUsUO2V7GlOSw,1951
27
27
  langgraph_api/traceblock.py,sha256=Qq5CUdefnMDaRDnyvBSWGBClEj-f3oO7NbH6fedxOSE,630
28
28
  langgraph_api/validation.py,sha256=86jftgOsMa7tkeshBw6imYe7zyUXPoVuf5Voh6dFiR8,5285
29
29
  langgraph_api/webhook.py,sha256=SvSM1rdnNtiH4q3JQYmAqJUk2Sable5xAcwOLuRhtlo,1723
30
30
  langgraph_api/worker.py,sha256=FQRw3kL9ynDv_LNgY_OjjPZQBuAvSQpsW6nECnABvDg,15354
31
31
  langgraph_api/api/__init__.py,sha256=raFkYH50tsO-KjRmDbGVoHCuxuH58u1lrZbr-MlITIY,6262
32
- langgraph_api/api/a2a.py,sha256=ChqlhgTq5fzWt0jbbEl8ec9rf-cvAP19Yge7OFv8-6E,34629
33
- langgraph_api/api/assistants.py,sha256=JFaBYp9BAXGaJ0yfy1SG_Mr-3xjeWSkdCHtmXpiAqP4,17290
32
+ langgraph_api/api/a2a.py,sha256=QEx-g0bTOREL-YaFsrcwA7oaix4VsaOr8hoQ7PF4Zwk,49382
33
+ langgraph_api/api/assistants.py,sha256=OX83GCWwGR8MuEJKIzAPEC4LC3Aghs5vD3NGLNnijaU,17268
34
34
  langgraph_api/api/mcp.py,sha256=qe10ZRMN3f-Hli-9TI8nbQyWvMeBb72YB1PZVbyqBQw,14418
35
35
  langgraph_api/api/meta.py,sha256=Qyj6r5czkVJ81tpD6liFY7tlrmFDsiSfBr-4X8HJpRc,4834
36
36
  langgraph_api/api/openapi.py,sha256=If-z1ckXt-Yu5bwQytK1LWyX_T7G46UtLfixgEP8hwc,11959
@@ -55,14 +55,14 @@ langgraph_api/js/client.http.mts,sha256=cvn8JV9go4pUMWkcug8FfSYWsp1wTaT8SgJaskqE
55
55
  langgraph_api/js/client.mts,sha256=gDvYiW7Qfl4re2YhZ5oNqtuvffnW_Sf7DK5aUbKB3vw,32330
56
56
  langgraph_api/js/errors.py,sha256=Cm1TKWlUCwZReDC5AQ6SgNIVGD27Qov2xcgHyf8-GXo,361
57
57
  langgraph_api/js/global.d.ts,sha256=j4GhgtQSZ5_cHzjSPcHgMJ8tfBThxrH-pUOrrJGteOU,196
58
- langgraph_api/js/package.json,sha256=9NJqixH8l_Vqvr2OeyhtXyuURFO1xcYabD8JOf315dQ,1335
58
+ langgraph_api/js/package.json,sha256=TLyPB9pZyZ1KQXC08NvbBbf8X5dKAF8WXc5cyjFtWZE,1335
59
59
  langgraph_api/js/remote.py,sha256=VmQ4Ie1V5z5gWEChXdY1m1kxzL3HE6AwKzfyIEfdE2k,38650
60
60
  langgraph_api/js/schema.py,sha256=M4fLtr50O1jck8H1hm_0W4cZOGYGdkrB7riLyCes4oY,438
61
61
  langgraph_api/js/sse.py,sha256=hHkbncnYnXNIbHhAWneGWYkHp4UhhhGB7-MYtDrY264,4141
62
62
  langgraph_api/js/traceblock.mts,sha256=QtGSN5VpzmGqDfbArrGXkMiONY94pMQ5CgzetT_bKYg,761
63
63
  langgraph_api/js/tsconfig.json,sha256=imCYqVnqFpaBoZPx8k1nO4slHIWBFsSlmCYhO73cpBs,341
64
64
  langgraph_api/js/ui.py,sha256=l9regrvKIxLOjH5SIYE2nhr8QCKLK1Q_1pZgxdL71X4,2488
65
- langgraph_api/js/yarn.lock,sha256=_aWrwb3UoBi5YXB0spkJjkQbGQfsofMk8ngxemN8ajw,84341
65
+ langgraph_api/js/yarn.lock,sha256=FCizZGxfI4SVoeAZWbezvonYBXsuvlWMUHX-1yACFz8,84352
66
66
  langgraph_api/js/src/graph.mts,sha256=9zTQNdtanI_CFnOwNRoamoCVHHQHGbNlbm91aRxDeOc,2675
67
67
  langgraph_api/js/src/load.hooks.mjs,sha256=xNVHq75W0Lk6MUKl1pQYrx-wtQ8_neiUyI6SO-k0ecM,2235
68
68
  langgraph_api/js/src/preload.mjs,sha256=8m3bYkf9iZLCQzKAYAdU8snxUwAG3dVLwGvAjfGfgIc,959
@@ -98,8 +98,8 @@ langgraph_runtime/store.py,sha256=7mowndlsIroGHv3NpTSOZDJR0lCuaYMBoTnTrewjslw,11
98
98
  LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
99
99
  logging.json,sha256=3RNjSADZmDq38eHePMm1CbP6qZ71AmpBtLwCmKU9Zgo,379
100
100
  openapi.json,sha256=21wu-NxdxyTQwZctNcEfRkLMnSBi0QhGAfwq5kg8XNU,172618
101
- langgraph_api-0.4.20.dist-info/METADATA,sha256=8sa6a-gRKNjbsccT6f5NteILecTGP_CuXoKbNCROXXE,3893
102
- langgraph_api-0.4.20.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
103
- langgraph_api-0.4.20.dist-info/entry_points.txt,sha256=hGedv8n7cgi41PypMfinwS_HfCwA7xJIfS0jAp8htV8,78
104
- langgraph_api-0.4.20.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
105
- langgraph_api-0.4.20.dist-info/RECORD,,
101
+ langgraph_api-0.4.21.dist-info/METADATA,sha256=u0MQ-7iSelB0Sytf4gzdD7n_V94Sefh_NVRzz1znYCg,3893
102
+ langgraph_api-0.4.21.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
103
+ langgraph_api-0.4.21.dist-info/entry_points.txt,sha256=hGedv8n7cgi41PypMfinwS_HfCwA7xJIfS0jAp8htV8,78
104
+ langgraph_api-0.4.21.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
105
+ langgraph_api-0.4.21.dist-info/RECORD,,