langgraph-api 0.5.0__tar.gz → 0.5.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

Files changed (147) hide show
  1. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/PKG-INFO +2 -2
  2. langgraph_api-0.5.3/langgraph_api/__init__.py +1 -0
  3. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/assistants.py +64 -60
  4. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/config.py +7 -1
  5. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/grpc_ops/client.py +91 -0
  6. langgraph_api-0.5.3/langgraph_api/grpc_ops/config_conversion.py +225 -0
  7. langgraph_api-0.5.3/langgraph_api/grpc_ops/generated/core_api_pb2.py +275 -0
  8. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/grpc_ops/generated/core_api_pb2.pyi +14 -25
  9. langgraph_api-0.5.3/langgraph_api/grpc_ops/generated/engine_common_pb2.py +191 -0
  10. langgraph_api-0.5.3/langgraph_api/grpc_ops/generated/engine_common_pb2.pyi +637 -0
  11. langgraph_api-0.5.3/langgraph_api/grpc_ops/generated/engine_common_pb2_grpc.py +24 -0
  12. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/grpc_ops/ops.py +75 -217
  13. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/package.json +5 -5
  14. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/src/graph.mts +20 -0
  15. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/yarn.lock +137 -187
  16. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/route.py +14 -4
  17. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/pyproject.toml +2 -2
  18. langgraph_api-0.5.0/langgraph_api/__init__.py +0 -1
  19. langgraph_api-0.5.0/langgraph_api/grpc_ops/generated/core_api_pb2.py +0 -276
  20. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/.gitignore +0 -0
  21. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/LICENSE +0 -0
  22. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/Makefile +0 -0
  23. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/README.md +0 -0
  24. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/.gitignore +0 -0
  25. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/Makefile +0 -0
  26. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/README.md +0 -0
  27. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/benchmark-runners/assistant.js +0 -0
  28. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/benchmark-runners/benchmark-runner.js +0 -0
  29. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/benchmark-runners/benchmarks.js +0 -0
  30. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/benchmark-runners/stream_write.js +0 -0
  31. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/benchmark-runners/wait_write.js +0 -0
  32. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/burst.js +0 -0
  33. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/capacity_k6.js +0 -0
  34. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/capacity_runner.mjs +0 -0
  35. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/capacity_urls.mjs +0 -0
  36. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/clean.js +0 -0
  37. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/graphs.js +0 -0
  38. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/package.json +0 -0
  39. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/ramp.js +0 -0
  40. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/reporting/dd_reporting.py +0 -0
  41. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/update-revision.js +0 -0
  42. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/benchmark/weather.js +0 -0
  43. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/constraints.txt +0 -0
  44. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/forbidden.txt +0 -0
  45. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/healthcheck.py +0 -0
  46. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/__init__.py +0 -0
  47. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/a2a.py +0 -0
  48. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/mcp.py +0 -0
  49. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/meta.py +0 -0
  50. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/openapi.py +0 -0
  51. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/runs.py +0 -0
  52. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/store.py +0 -0
  53. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/threads.py +0 -0
  54. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/api/ui.py +0 -0
  55. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/asgi_transport.py +0 -0
  56. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/asyncio.py +0 -0
  57. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/auth/__init__.py +0 -0
  58. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/auth/custom.py +0 -0
  59. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/auth/langsmith/__init__.py +0 -0
  60. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/auth/langsmith/backend.py +0 -0
  61. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/auth/langsmith/client.py +0 -0
  62. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/auth/middleware.py +0 -0
  63. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/auth/noop.py +0 -0
  64. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/auth/studio_user.py +0 -0
  65. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/cli.py +0 -0
  66. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/command.py +0 -0
  67. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/cron_scheduler.py +0 -0
  68. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/errors.py +0 -0
  69. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/executor_entrypoint.py +0 -0
  70. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/feature_flags.py +0 -0
  71. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/graph.py +0 -0
  72. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/grpc_ops/__init__.py +0 -0
  73. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/grpc_ops/generated/__init__.py +0 -0
  74. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/grpc_ops/generated/core_api_pb2_grpc.py +0 -0
  75. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/http.py +0 -0
  76. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/http_metrics.py +0 -0
  77. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/http_metrics_utils.py +0 -0
  78. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/.gitignore +0 -0
  79. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/.prettierrc +0 -0
  80. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/__init__.py +0 -0
  81. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/base.py +0 -0
  82. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/build.mts +0 -0
  83. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/client.http.mts +0 -0
  84. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/client.mts +0 -0
  85. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/errors.py +0 -0
  86. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/global.d.ts +0 -0
  87. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/remote.py +0 -0
  88. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/schema.py +0 -0
  89. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/src/load.hooks.mjs +0 -0
  90. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/src/preload.mjs +0 -0
  91. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/src/utils/files.mts +0 -0
  92. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/src/utils/importMap.mts +0 -0
  93. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/src/utils/pythonSchemas.mts +0 -0
  94. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/src/utils/serde.mts +0 -0
  95. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/sse.py +0 -0
  96. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/traceblock.mts +0 -0
  97. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/tsconfig.json +0 -0
  98. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/js/ui.py +0 -0
  99. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/logging.py +0 -0
  100. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/metadata.py +0 -0
  101. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/middleware/__init__.py +0 -0
  102. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/middleware/http_logger.py +0 -0
  103. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/middleware/private_network.py +0 -0
  104. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/middleware/request_id.py +0 -0
  105. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/models/__init__.py +0 -0
  106. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/models/run.py +0 -0
  107. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/patch.py +0 -0
  108. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/queue_entrypoint.py +0 -0
  109. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/schema.py +0 -0
  110. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/self_hosted_logs.py +0 -0
  111. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/self_hosted_metrics.py +0 -0
  112. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/serde.py +0 -0
  113. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/server.py +0 -0
  114. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/sse.py +0 -0
  115. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/state.py +0 -0
  116. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/store.py +0 -0
  117. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/stream.py +0 -0
  118. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/thread_ttl.py +0 -0
  119. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/traceblock.py +0 -0
  120. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/tunneling/cloudflare.py +0 -0
  121. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/utils/__init__.py +0 -0
  122. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/utils/cache.py +0 -0
  123. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/utils/config.py +0 -0
  124. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/utils/errors.py +0 -0
  125. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/utils/future.py +0 -0
  126. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/utils/headers.py +0 -0
  127. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/utils/retriable_client.py +0 -0
  128. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/utils/stream_codec.py +0 -0
  129. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/utils/uuids.py +0 -0
  130. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/validation.py +0 -0
  131. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/webhook.py +0 -0
  132. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_api/worker.py +0 -0
  133. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_license/__init__.py +0 -0
  134. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_license/validation.py +0 -0
  135. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_runtime/__init__.py +0 -0
  136. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_runtime/checkpoint.py +0 -0
  137. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_runtime/database.py +0 -0
  138. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_runtime/lifespan.py +0 -0
  139. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_runtime/metrics.py +0 -0
  140. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_runtime/ops.py +0 -0
  141. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_runtime/queue.py +0 -0
  142. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_runtime/retry.py +0 -0
  143. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/langgraph_runtime/store.py +0 -0
  144. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/logging.json +0 -0
  145. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/openapi.json +0 -0
  146. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/scripts/create_license.py +0 -0
  147. {langgraph_api-0.5.0 → langgraph_api-0.5.3}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-api
3
- Version: 0.5.0
3
+ Version: 0.5.3
4
4
  Author-email: Will Fu-Hinthorn <will@langchain.dev>, Josh Rogers <josh@langchain.dev>, Parker Rule <parker@langchain.dev>
5
5
  License: Elastic-2.0
6
6
  License-File: LICENSE
@@ -13,7 +13,7 @@ Requires-Dist: httpx>=0.25.0
13
13
  Requires-Dist: jsonschema-rs<0.30,>=0.20.0
14
14
  Requires-Dist: langchain-core>=0.3.64
15
15
  Requires-Dist: langgraph-checkpoint<4,>=3
16
- Requires-Dist: langgraph-runtime-inmem<0.16.0,>=0.15.0
16
+ Requires-Dist: langgraph-runtime-inmem<0.17.0,>=0.16.0
17
17
  Requires-Dist: langgraph-sdk>=0.2.0
18
18
  Requires-Dist: langgraph<2,>=0.4.10
19
19
  Requires-Dist: langsmith>=0.3.45
@@ -0,0 +1 @@
1
+ __version__ = "0.5.3"
@@ -1,3 +1,4 @@
1
+ from functools import partial
1
2
  from typing import Any
2
3
  from uuid import uuid4
3
4
 
@@ -37,7 +38,7 @@ from langgraph_api.validation import (
37
38
  ConfigValidator,
38
39
  )
39
40
  from langgraph_runtime.checkpoint import Checkpointer
40
- from langgraph_runtime.database import connect
41
+ from langgraph_runtime.database import connect as base_connect
41
42
  from langgraph_runtime.ops import Assistants
42
43
  from langgraph_runtime.retry import retry_db
43
44
 
@@ -45,6 +46,8 @@ logger = structlog.stdlib.get_logger(__name__)
45
46
 
46
47
  CrudAssistants = GrpcAssistants if FF_USE_CORE_API else Assistants
47
48
 
49
+ connect = partial(base_connect, supports_core_api=FF_USE_CORE_API)
50
+
48
51
  EXCLUDED_CONFIG_SCHEMA = (
49
52
  "__pregel_checkpointer",
50
53
  "__pregel_store",
@@ -255,7 +258,7 @@ async def get_assistant_graph(
255
258
  assistant_id = get_assistant_id(str(request.path_params["assistant_id"]))
256
259
  validate_uuid(assistant_id, "Invalid assistant ID: must be a UUID")
257
260
  async with connect() as conn:
258
- assistant_ = await Assistants.get(conn, assistant_id)
261
+ assistant_ = await CrudAssistants.get(conn, assistant_id)
259
262
  assistant = await fetchone(assistant_)
260
263
  config = json_loads(assistant["config"])
261
264
  configurable = config.setdefault("configurable", {})
@@ -312,43 +315,44 @@ async def get_assistant_subgraphs(
312
315
  assistant_id = request.path_params["assistant_id"]
313
316
  validate_uuid(assistant_id, "Invalid assistant ID: must be a UUID")
314
317
  async with connect() as conn:
315
- assistant_ = await Assistants.get(conn, assistant_id)
318
+ assistant_ = await CrudAssistants.get(conn, assistant_id)
316
319
  assistant = await fetchone(assistant_)
317
- config = json_loads(assistant["config"])
318
- configurable = config.setdefault("configurable", {})
319
- configurable.update(get_configurable_headers(request.headers))
320
- async with get_graph(
321
- assistant["graph_id"],
322
- config,
323
- checkpointer=Checkpointer(),
324
- store=(await api_store.get_store()),
325
- ) as graph:
326
- namespace = request.path_params.get("namespace")
327
-
328
- if isinstance(graph, BaseRemotePregel):
329
- return ApiResponse(
330
- await graph.fetch_subgraphs(
320
+
321
+ config = json_loads(assistant["config"])
322
+ configurable = config.setdefault("configurable", {})
323
+ configurable.update(get_configurable_headers(request.headers))
324
+ async with get_graph(
325
+ assistant["graph_id"],
326
+ config,
327
+ checkpointer=Checkpointer(),
328
+ store=(await api_store.get_store()),
329
+ ) as graph:
330
+ namespace = request.path_params.get("namespace")
331
+
332
+ if isinstance(graph, BaseRemotePregel):
333
+ return ApiResponse(
334
+ await graph.fetch_subgraphs(
335
+ namespace=namespace,
336
+ recurse=request.query_params.get("recurse", "False")
337
+ in ("true", "True"),
338
+ )
339
+ )
340
+
341
+ try:
342
+ return ApiResponse(
343
+ {
344
+ ns: _graph_schemas(subgraph)
345
+ async for ns, subgraph in graph.aget_subgraphs(
331
346
  namespace=namespace,
332
347
  recurse=request.query_params.get("recurse", "False")
333
348
  in ("true", "True"),
334
349
  )
335
- )
336
-
337
- try:
338
- return ApiResponse(
339
- {
340
- ns: _graph_schemas(subgraph)
341
- async for ns, subgraph in graph.aget_subgraphs(
342
- namespace=namespace,
343
- recurse=request.query_params.get("recurse", "False")
344
- in ("true", "True"),
345
- )
346
- }
347
- )
348
- except NotImplementedError:
349
- raise HTTPException(
350
- 422, detail="The graph does not support visualization"
351
- ) from None
350
+ }
351
+ )
352
+ except NotImplementedError:
353
+ raise HTTPException(
354
+ 422, detail="The graph does not support visualization"
355
+ ) from None
352
356
 
353
357
 
354
358
  @retry_db
@@ -359,40 +363,40 @@ async def get_assistant_schemas(
359
363
  assistant_id = request.path_params["assistant_id"]
360
364
  validate_uuid(assistant_id, "Invalid assistant ID: must be a UUID")
361
365
  async with connect() as conn:
362
- assistant_ = await Assistants.get(conn, assistant_id)
363
- # TODO Implementa cache so we can de-dent and release this connection.
366
+ assistant_ = await CrudAssistants.get(conn, assistant_id)
364
367
  assistant = await fetchone(assistant_)
365
- config = json_loads(assistant["config"])
366
- configurable = config.setdefault("configurable", {})
367
- configurable.update(get_configurable_headers(request.headers))
368
- async with get_graph(
369
- assistant["graph_id"],
370
- config,
371
- checkpointer=Checkpointer(),
372
- store=(await api_store.get_store()),
373
- ) as graph:
374
- if isinstance(graph, BaseRemotePregel):
375
- schemas = await graph.fetch_state_schema()
376
- return ApiResponse(
377
- {
378
- "graph_id": assistant["graph_id"],
379
- "input_schema": schemas.get("input"),
380
- "output_schema": schemas.get("output"),
381
- "state_schema": schemas.get("state"),
382
- "config_schema": schemas.get("config"),
383
- "context_schema": schemas.get("context"),
384
- }
385
- )
386
-
387
- schemas = _graph_schemas(graph)
388
368
 
369
+ config = json_loads(assistant["config"])
370
+ configurable = config.setdefault("configurable", {})
371
+ configurable.update(get_configurable_headers(request.headers))
372
+ async with get_graph(
373
+ assistant["graph_id"],
374
+ config,
375
+ checkpointer=Checkpointer(),
376
+ store=(await api_store.get_store()),
377
+ ) as graph:
378
+ if isinstance(graph, BaseRemotePregel):
379
+ schemas = await graph.fetch_state_schema()
389
380
  return ApiResponse(
390
381
  {
391
382
  "graph_id": assistant["graph_id"],
392
- **schemas,
383
+ "input_schema": schemas.get("input"),
384
+ "output_schema": schemas.get("output"),
385
+ "state_schema": schemas.get("state"),
386
+ "config_schema": schemas.get("config"),
387
+ "context_schema": schemas.get("context"),
393
388
  }
394
389
  )
395
390
 
391
+ schemas = _graph_schemas(graph)
392
+
393
+ return ApiResponse(
394
+ {
395
+ "graph_id": assistant["graph_id"],
396
+ **schemas,
397
+ }
398
+ )
399
+
396
400
 
397
401
  @retry_db
398
402
  async def patch_assistant(
@@ -291,6 +291,13 @@ ALLOW_PRIVATE_NETWORK = env("ALLOW_PRIVATE_NETWORK", cast=bool, default=False)
291
291
  See https://developer.chrome.com/blog/private-network-access-update-2024-03
292
292
  """
293
293
 
294
+ # gRPC client pool size for persistence server.
295
+ GRPC_CLIENT_POOL_SIZE = env("GRPC_CLIENT_POOL_SIZE", cast=int, default=5)
296
+
297
+ # Minimum payload size to use the dedicated thread pool for JSON parsing.
298
+ # (Otherwise, the payload is parsed directly in the event loop.)
299
+ JSON_THREAD_POOL_MINIMUM_SIZE_BYTES = 100 * 1024 # 100 KB
300
+
294
301
  HTTP_CONFIG = env("LANGGRAPH_HTTP", cast=_parse_schema(HttpConfig), default=None)
295
302
  STORE_CONFIG = env("LANGGRAPH_STORE", cast=_parse_schema(StoreConfig), default=None)
296
303
 
@@ -395,7 +402,6 @@ N_JOBS_PER_WORKER = env("N_JOBS_PER_WORKER", cast=int, default=10)
395
402
  BG_JOB_TIMEOUT_SECS = env("BG_JOB_TIMEOUT_SECS", cast=float, default=3600)
396
403
 
397
404
  FF_CRONS_ENABLED = env("FF_CRONS_ENABLED", cast=bool, default=True)
398
- FF_RICH_THREADS = env("FF_RICH_THREADS", cast=bool, default=True)
399
405
  FF_LOG_DROPPED_EVENTS = env("FF_LOG_DROPPED_EVENTS", cast=bool, default=False)
400
406
  FF_LOG_QUERY_AND_PARAMS = env("FF_LOG_QUERY_AND_PARAMS", cast=bool, default=False)
401
407
 
@@ -1,5 +1,6 @@
1
1
  """gRPC client wrapper for LangGraph persistence services."""
2
2
 
3
+ import asyncio
3
4
  import os
4
5
 
5
6
  import structlog
@@ -10,6 +11,10 @@ from .generated.core_api_pb2_grpc import AdminStub, AssistantsStub, ThreadsStub
10
11
  logger = structlog.stdlib.get_logger(__name__)
11
12
 
12
13
 
14
+ # Shared global client pool
15
+ _client_pool: "GrpcClientPool | None" = None
16
+
17
+
13
18
  class GrpcClient:
14
19
  """gRPC client for LangGraph persistence services."""
15
20
 
@@ -90,3 +95,89 @@ class GrpcClient:
90
95
  "Client not connected. Use async context manager or call connect() first."
91
96
  )
92
97
  return self._admin_stub
98
+
99
+
100
+ class GrpcClientPool:
101
+ """Pool of gRPC clients for load distribution."""
102
+
103
+ def __init__(self, pool_size: int = 5, server_address: str | None = None):
104
+ self.pool_size = pool_size
105
+ self.server_address = server_address
106
+ self.clients: list[GrpcClient] = []
107
+ self._current_index = 0
108
+ self._init_lock = asyncio.Lock()
109
+ self._initialized = False
110
+
111
+ async def _initialize(self):
112
+ """Initialize the pool of clients."""
113
+ async with self._init_lock:
114
+ if self._initialized:
115
+ return
116
+
117
+ await logger.ainfo(
118
+ "Initializing gRPC client pool",
119
+ pool_size=self.pool_size,
120
+ server_address=self.server_address,
121
+ )
122
+
123
+ for _ in range(self.pool_size):
124
+ client = GrpcClient(server_address=self.server_address)
125
+ await client.connect()
126
+ self.clients.append(client)
127
+
128
+ self._initialized = True
129
+ await logger.ainfo(
130
+ f"gRPC client pool initialized with {self.pool_size} clients"
131
+ )
132
+
133
+ async def get_client(self) -> GrpcClient:
134
+ """Get next client using round-robin selection.
135
+
136
+ Round-robin without strict locking - slight races are acceptable
137
+ and result in good enough distribution under high load.
138
+ """
139
+ if not self._initialized:
140
+ await self._initialize()
141
+
142
+ idx = self._current_index % self.pool_size
143
+ self._current_index = idx + 1
144
+ return self.clients[idx]
145
+
146
+ async def close(self):
147
+ """Close all clients in the pool."""
148
+ if self._initialized:
149
+ await logger.ainfo(f"Closing gRPC client pool ({self.pool_size} clients)")
150
+ for client in self.clients:
151
+ await client.close()
152
+ self.clients.clear()
153
+ self._initialized = False
154
+
155
+
156
+ async def get_shared_client() -> GrpcClient:
157
+ """Get a gRPC client from the shared pool.
158
+
159
+ Uses a pool of channels for better performance under high concurrency.
160
+ Each channel is a separate TCP connection that can handle ~100-200
161
+ concurrent streams effectively.
162
+
163
+ Returns:
164
+ A GrpcClient instance from the pool
165
+ """
166
+ global _client_pool
167
+ if _client_pool is None:
168
+ from langgraph_api import config
169
+
170
+ _client_pool = GrpcClientPool(
171
+ pool_size=config.GRPC_CLIENT_POOL_SIZE,
172
+ server_address=os.getenv("GRPC_SERVER_ADDRESS"),
173
+ )
174
+
175
+ return await _client_pool.get_client()
176
+
177
+
178
+ async def close_shared_client():
179
+ """Close the shared gRPC client pool."""
180
+ global _client_pool
181
+ if _client_pool is not None:
182
+ await _client_pool.close()
183
+ _client_pool = None
@@ -0,0 +1,225 @@
1
+ """Conversion utils for the RunnableConfig."""
2
+
3
+ # THIS IS DUPLICATED
4
+ # TODO: WFH - Deduplicate with the executor logic by moving into a separate package
5
+ # Sequencing in the next PR.
6
+ from typing import Any, cast
7
+
8
+ import orjson
9
+ from langchain_core.runnables.config import RunnableConfig
10
+
11
+ from langgraph_api.grpc_ops.generated import engine_common_pb2
12
+
13
+ CONFIG_KEY_SEND = "__pregel_send"
14
+ CONFIG_KEY_READ = "__pregel_read"
15
+ CONFIG_KEY_RESUMING = "__pregel_resuming"
16
+ CONFIG_KEY_TASK_ID = "__pregel_task_id"
17
+ CONFIG_KEY_THREAD_ID = "thread_id"
18
+ CONFIG_KEY_CHECKPOINT_MAP = "checkpoint_map"
19
+ CONFIG_KEY_CHECKPOINT_ID = "checkpoint_id"
20
+ CONFIG_KEY_CHECKPOINT_NS = "checkpoint_ns"
21
+ CONFIG_KEY_SCRATCHPAD = "__pregel_scratchpad"
22
+ CONFIG_KEY_DURABILITY = "__pregel_durability"
23
+ CONFIG_KEY_GRAPH_ID = "graph_id"
24
+
25
+
26
+ def _durability_to_proto(
27
+ durability: str,
28
+ ) -> engine_common_pb2.Durability:
29
+ match durability:
30
+ case "async":
31
+ return engine_common_pb2.Durability.ASYNC
32
+ case "sync":
33
+ return engine_common_pb2.Durability.SYNC
34
+ case "exit":
35
+ return engine_common_pb2.Durability.EXIT
36
+ case _:
37
+ raise ValueError(f"invalid durability: {durability}")
38
+
39
+
40
+ def _durability_from_proto(
41
+ durability: engine_common_pb2.Durability,
42
+ ) -> str:
43
+ match durability:
44
+ case engine_common_pb2.Durability.ASYNC:
45
+ return "async"
46
+ case engine_common_pb2.Durability.SYNC:
47
+ return "sync"
48
+ case engine_common_pb2.Durability.EXIT:
49
+ return "exit"
50
+ case _:
51
+ raise ValueError(f"invalid durability: {durability}")
52
+
53
+
54
+ def config_to_proto(
55
+ config: RunnableConfig,
56
+ ) -> engine_common_pb2.EngineRunnableConfig | None:
57
+ # Prepare kwargs for construction
58
+ if not config:
59
+ return None
60
+ cp = {**config}
61
+ pb_config = engine_common_pb2.EngineRunnableConfig()
62
+ for k, v in (cp.pop("metadata", None) or {}).items():
63
+ if k == "run_attempt":
64
+ pb_config.run_attempt = v
65
+ elif k == "run_id":
66
+ pb_config.server_run_id = str(v)
67
+ else:
68
+ pb_config.metadata_json[k] = orjson.dumps(v)
69
+ if run_name := cp.pop("run_name", None):
70
+ pb_config.run_name = run_name
71
+
72
+ if run_id := cp.pop("run_id", None):
73
+ pb_config.run_id = str(run_id)
74
+
75
+ if (max_concurrency := cp.pop("max_concurrency", None)) and isinstance(
76
+ max_concurrency, int
77
+ ):
78
+ pb_config.max_concurrency = max_concurrency
79
+
80
+ if (recursion_limit := cp.pop("recursion_limit", None)) and isinstance(
81
+ recursion_limit, int
82
+ ):
83
+ pb_config.recursion_limit = recursion_limit
84
+
85
+ # Handle collections after construction
86
+ if (tags := cp.pop("tags", None)) and isinstance(tags, list):
87
+ pb_config.tags.extend(tags)
88
+
89
+ if (configurable := cp.pop("configurable", None)) and isinstance(
90
+ configurable, dict
91
+ ):
92
+ _inject_configurable_into_proto(configurable, pb_config)
93
+ if cp:
94
+ pb_config.extra_json.update({k: orjson.dumps(v) for k, v in cp.items()})
95
+
96
+ return pb_config
97
+
98
+
99
+ RESTRICTED_RESERVED_CONFIGURABLE_KEYS = {
100
+ CONFIG_KEY_SEND,
101
+ CONFIG_KEY_READ,
102
+ CONFIG_KEY_SCRATCHPAD,
103
+ }
104
+
105
+
106
+ def _inject_configurable_into_proto(
107
+ configurable: dict[str, Any], proto: engine_common_pb2.EngineRunnableConfig
108
+ ) -> None:
109
+ extra = {}
110
+ for key, value in configurable.items():
111
+ if key == CONFIG_KEY_RESUMING:
112
+ proto.resuming = bool(value)
113
+ elif key == CONFIG_KEY_TASK_ID:
114
+ proto.task_id = str(value)
115
+ elif key == CONFIG_KEY_THREAD_ID:
116
+ proto.thread_id = str(value)
117
+ elif key == CONFIG_KEY_CHECKPOINT_MAP:
118
+ proto.checkpoint_map.update(cast(dict[str, str], value))
119
+ elif key == CONFIG_KEY_CHECKPOINT_ID:
120
+ proto.checkpoint_id = str(value)
121
+ elif key == CONFIG_KEY_CHECKPOINT_NS:
122
+ proto.checkpoint_ns = str(value)
123
+ elif key == CONFIG_KEY_DURABILITY and value:
124
+ proto.durability = _durability_to_proto(value)
125
+ elif key not in RESTRICTED_RESERVED_CONFIGURABLE_KEYS:
126
+ extra[key] = value
127
+ if extra:
128
+ proto.extra_configurable_json.update(
129
+ {k: orjson.dumps(v) for k, v in extra.items()}
130
+ )
131
+
132
+
133
+ def context_to_json_bytes(context: dict[str, Any] | Any) -> bytes | None:
134
+ """Convert context to JSON bytes for proto serialization."""
135
+ if context is None:
136
+ return None
137
+
138
+ # Convert dataclass or other objects to dict if needed
139
+ if hasattr(context, "__dict__") and not hasattr(context, "items"):
140
+ # Convert dataclass to dict
141
+ context_dict = context.__dict__
142
+ elif hasattr(context, "items"):
143
+ # Already a dict-like object
144
+ context_dict = dict(context)
145
+ else:
146
+ # Try to convert to dict using vars()
147
+ context_dict = vars(context) if hasattr(context, "__dict__") else {}
148
+
149
+ return orjson.dumps(context_dict)
150
+
151
+
152
+ def config_from_proto(
153
+ config_proto: engine_common_pb2.EngineRunnableConfig | None,
154
+ ) -> RunnableConfig:
155
+ if not config_proto:
156
+ return RunnableConfig(tags=[], metadata={}, configurable={})
157
+
158
+ configurable = _configurable_from_proto(config_proto)
159
+
160
+ metadata = {}
161
+ for k, v in config_proto.metadata_json.items():
162
+ metadata[k] = orjson.loads(v)
163
+ if config_proto.HasField("run_attempt"):
164
+ metadata["run_attempt"] = config_proto.run_attempt
165
+ if config_proto.HasField("server_run_id"):
166
+ metadata["run_id"] = config_proto.server_run_id
167
+
168
+ config = RunnableConfig()
169
+ if config_proto.extra_json:
170
+ for k, v in config_proto.extra_json.items():
171
+ config[k] = orjson.loads(v) # type: ignore[invalid-key]
172
+ if config_proto.tags:
173
+ config["tags"] = list(config_proto.tags)
174
+ if metadata:
175
+ config["metadata"] = metadata
176
+ if configurable:
177
+ config["configurable"] = configurable
178
+ if config_proto.HasField("run_name"):
179
+ config["run_name"] = config_proto.run_name
180
+
181
+ if config_proto.HasField("max_concurrency"):
182
+ config["max_concurrency"] = config_proto.max_concurrency
183
+
184
+ if config_proto.HasField("recursion_limit"):
185
+ config["recursion_limit"] = config_proto.recursion_limit
186
+
187
+ return config
188
+
189
+
190
+ def _configurable_from_proto(
191
+ config_proto: engine_common_pb2.EngineRunnableConfig,
192
+ ) -> dict[str, Any]:
193
+ configurable = {}
194
+
195
+ if config_proto.HasField("resuming"):
196
+ configurable[CONFIG_KEY_RESUMING] = config_proto.resuming
197
+
198
+ if config_proto.HasField("task_id"):
199
+ configurable[CONFIG_KEY_TASK_ID] = config_proto.task_id
200
+
201
+ if config_proto.HasField("thread_id"):
202
+ configurable[CONFIG_KEY_THREAD_ID] = config_proto.thread_id
203
+
204
+ if config_proto.HasField("checkpoint_id"):
205
+ configurable[CONFIG_KEY_CHECKPOINT_ID] = config_proto.checkpoint_id
206
+
207
+ if config_proto.HasField("checkpoint_ns"):
208
+ configurable[CONFIG_KEY_CHECKPOINT_NS] = config_proto.checkpoint_ns
209
+
210
+ if config_proto.HasField("durability"):
211
+ durability = _durability_from_proto(config_proto.durability)
212
+ if durability:
213
+ configurable[CONFIG_KEY_DURABILITY] = durability
214
+
215
+ if config_proto.HasField("graph_id"):
216
+ configurable[CONFIG_KEY_GRAPH_ID] = config_proto.graph_id
217
+
218
+ if len(config_proto.checkpoint_map) > 0:
219
+ configurable[CONFIG_KEY_CHECKPOINT_MAP] = dict(config_proto.checkpoint_map)
220
+
221
+ if len(config_proto.extra_configurable_json) > 0:
222
+ for k, v in config_proto.extra_configurable_json.items():
223
+ configurable[k] = orjson.loads(v)
224
+
225
+ return configurable