langgraph-api 0.4.1__py3-none-any.whl → 0.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. langgraph_api/__init__.py +1 -1
  2. langgraph_api/api/__init__.py +111 -51
  3. langgraph_api/api/a2a.py +1610 -0
  4. langgraph_api/api/assistants.py +212 -89
  5. langgraph_api/api/mcp.py +3 -3
  6. langgraph_api/api/meta.py +52 -28
  7. langgraph_api/api/openapi.py +27 -17
  8. langgraph_api/api/profile.py +108 -0
  9. langgraph_api/api/runs.py +342 -195
  10. langgraph_api/api/store.py +19 -2
  11. langgraph_api/api/threads.py +209 -27
  12. langgraph_api/asgi_transport.py +14 -9
  13. langgraph_api/asyncio.py +14 -4
  14. langgraph_api/auth/custom.py +52 -37
  15. langgraph_api/auth/langsmith/backend.py +4 -3
  16. langgraph_api/auth/langsmith/client.py +13 -8
  17. langgraph_api/cli.py +230 -133
  18. langgraph_api/command.py +5 -3
  19. langgraph_api/config/__init__.py +532 -0
  20. langgraph_api/config/_parse.py +58 -0
  21. langgraph_api/config/schemas.py +431 -0
  22. langgraph_api/cron_scheduler.py +17 -1
  23. langgraph_api/encryption/__init__.py +15 -0
  24. langgraph_api/encryption/aes_json.py +158 -0
  25. langgraph_api/encryption/context.py +35 -0
  26. langgraph_api/encryption/custom.py +280 -0
  27. langgraph_api/encryption/middleware.py +632 -0
  28. langgraph_api/encryption/shared.py +63 -0
  29. langgraph_api/errors.py +12 -1
  30. langgraph_api/executor_entrypoint.py +11 -6
  31. langgraph_api/feature_flags.py +29 -0
  32. langgraph_api/graph.py +176 -76
  33. langgraph_api/grpc/client.py +313 -0
  34. langgraph_api/grpc/config_conversion.py +231 -0
  35. langgraph_api/grpc/generated/__init__.py +29 -0
  36. langgraph_api/grpc/generated/checkpointer_pb2.py +63 -0
  37. langgraph_api/grpc/generated/checkpointer_pb2.pyi +99 -0
  38. langgraph_api/grpc/generated/checkpointer_pb2_grpc.py +329 -0
  39. langgraph_api/grpc/generated/core_api_pb2.py +216 -0
  40. langgraph_api/grpc/generated/core_api_pb2.pyi +905 -0
  41. langgraph_api/grpc/generated/core_api_pb2_grpc.py +1621 -0
  42. langgraph_api/grpc/generated/engine_common_pb2.py +219 -0
  43. langgraph_api/grpc/generated/engine_common_pb2.pyi +722 -0
  44. langgraph_api/grpc/generated/engine_common_pb2_grpc.py +24 -0
  45. langgraph_api/grpc/generated/enum_cancel_run_action_pb2.py +37 -0
  46. langgraph_api/grpc/generated/enum_cancel_run_action_pb2.pyi +12 -0
  47. langgraph_api/grpc/generated/enum_cancel_run_action_pb2_grpc.py +24 -0
  48. langgraph_api/grpc/generated/enum_control_signal_pb2.py +37 -0
  49. langgraph_api/grpc/generated/enum_control_signal_pb2.pyi +16 -0
  50. langgraph_api/grpc/generated/enum_control_signal_pb2_grpc.py +24 -0
  51. langgraph_api/grpc/generated/enum_durability_pb2.py +37 -0
  52. langgraph_api/grpc/generated/enum_durability_pb2.pyi +16 -0
  53. langgraph_api/grpc/generated/enum_durability_pb2_grpc.py +24 -0
  54. langgraph_api/grpc/generated/enum_multitask_strategy_pb2.py +37 -0
  55. langgraph_api/grpc/generated/enum_multitask_strategy_pb2.pyi +16 -0
  56. langgraph_api/grpc/generated/enum_multitask_strategy_pb2_grpc.py +24 -0
  57. langgraph_api/grpc/generated/enum_run_status_pb2.py +37 -0
  58. langgraph_api/grpc/generated/enum_run_status_pb2.pyi +22 -0
  59. langgraph_api/grpc/generated/enum_run_status_pb2_grpc.py +24 -0
  60. langgraph_api/grpc/generated/enum_stream_mode_pb2.py +37 -0
  61. langgraph_api/grpc/generated/enum_stream_mode_pb2.pyi +28 -0
  62. langgraph_api/grpc/generated/enum_stream_mode_pb2_grpc.py +24 -0
  63. langgraph_api/grpc/generated/enum_thread_status_pb2.py +37 -0
  64. langgraph_api/grpc/generated/enum_thread_status_pb2.pyi +16 -0
  65. langgraph_api/grpc/generated/enum_thread_status_pb2_grpc.py +24 -0
  66. langgraph_api/grpc/generated/enum_thread_stream_mode_pb2.py +37 -0
  67. langgraph_api/grpc/generated/enum_thread_stream_mode_pb2.pyi +16 -0
  68. langgraph_api/grpc/generated/enum_thread_stream_mode_pb2_grpc.py +24 -0
  69. langgraph_api/grpc/generated/errors_pb2.py +39 -0
  70. langgraph_api/grpc/generated/errors_pb2.pyi +21 -0
  71. langgraph_api/grpc/generated/errors_pb2_grpc.py +24 -0
  72. langgraph_api/grpc/ops/__init__.py +370 -0
  73. langgraph_api/grpc/ops/assistants.py +424 -0
  74. langgraph_api/grpc/ops/runs.py +792 -0
  75. langgraph_api/grpc/ops/threads.py +1013 -0
  76. langgraph_api/http.py +16 -5
  77. langgraph_api/http_metrics.py +15 -35
  78. langgraph_api/http_metrics_utils.py +38 -0
  79. langgraph_api/js/build.mts +1 -1
  80. langgraph_api/js/client.http.mts +13 -7
  81. langgraph_api/js/client.mts +2 -5
  82. langgraph_api/js/package.json +29 -28
  83. langgraph_api/js/remote.py +56 -30
  84. langgraph_api/js/src/graph.mts +20 -0
  85. langgraph_api/js/sse.py +2 -2
  86. langgraph_api/js/ui.py +1 -1
  87. langgraph_api/js/yarn.lock +1204 -1006
  88. langgraph_api/logging.py +29 -2
  89. langgraph_api/metadata.py +99 -28
  90. langgraph_api/middleware/http_logger.py +7 -2
  91. langgraph_api/middleware/private_network.py +7 -7
  92. langgraph_api/models/run.py +54 -93
  93. langgraph_api/otel_context.py +205 -0
  94. langgraph_api/patch.py +5 -3
  95. langgraph_api/queue_entrypoint.py +154 -65
  96. langgraph_api/route.py +47 -5
  97. langgraph_api/schema.py +88 -10
  98. langgraph_api/self_hosted_logs.py +124 -0
  99. langgraph_api/self_hosted_metrics.py +450 -0
  100. langgraph_api/serde.py +79 -37
  101. langgraph_api/server.py +138 -60
  102. langgraph_api/state.py +4 -3
  103. langgraph_api/store.py +25 -16
  104. langgraph_api/stream.py +80 -29
  105. langgraph_api/thread_ttl.py +31 -13
  106. langgraph_api/timing/__init__.py +25 -0
  107. langgraph_api/timing/profiler.py +200 -0
  108. langgraph_api/timing/timer.py +318 -0
  109. langgraph_api/utils/__init__.py +53 -8
  110. langgraph_api/utils/cache.py +47 -10
  111. langgraph_api/utils/config.py +2 -1
  112. langgraph_api/utils/errors.py +77 -0
  113. langgraph_api/utils/future.py +10 -6
  114. langgraph_api/utils/headers.py +76 -2
  115. langgraph_api/utils/retriable_client.py +74 -0
  116. langgraph_api/utils/stream_codec.py +315 -0
  117. langgraph_api/utils/uuids.py +29 -62
  118. langgraph_api/validation.py +9 -0
  119. langgraph_api/webhook.py +120 -6
  120. langgraph_api/worker.py +55 -24
  121. {langgraph_api-0.4.1.dist-info → langgraph_api-0.7.3.dist-info}/METADATA +16 -8
  122. langgraph_api-0.7.3.dist-info/RECORD +168 -0
  123. {langgraph_api-0.4.1.dist-info → langgraph_api-0.7.3.dist-info}/WHEEL +1 -1
  124. langgraph_runtime/__init__.py +1 -0
  125. langgraph_runtime/routes.py +11 -0
  126. logging.json +1 -3
  127. openapi.json +839 -478
  128. langgraph_api/config.py +0 -387
  129. langgraph_api/js/isolate-0x130008000-46649-46649-v8.log +0 -4430
  130. langgraph_api/js/isolate-0x138008000-44681-44681-v8.log +0 -4430
  131. langgraph_api/js/package-lock.json +0 -3308
  132. langgraph_api-0.4.1.dist-info/RECORD +0 -107
  133. /langgraph_api/{utils.py → grpc/__init__.py} +0 -0
  134. {langgraph_api-0.4.1.dist-info → langgraph_api-0.7.3.dist-info}/entry_points.txt +0 -0
  135. {langgraph_api-0.4.1.dist-info → langgraph_api-0.7.3.dist-info}/licenses/LICENSE +0 -0
langgraph_api/server.py CHANGED
@@ -1,38 +1,44 @@
1
1
  # MONKEY PATCH: Patch Starlette to fix an error in the library
2
- # ruff: noqa: E402
3
- import langgraph_api.patch # noqa: F401,I001
4
- import sys
5
- import os
6
-
7
2
  # WARNING: Keep the import above before other code runs as it
8
3
  # patches an error in the Starlette library.
4
+ import langgraph_api.patch # noqa: F401,I001
5
+ import langgraph_api.timing as timing
9
6
  import logging
7
+ import os
8
+ import sys
10
9
  import typing
11
10
 
12
11
  if not (
13
12
  (disable_truststore := os.getenv("DISABLE_TRUSTSTORE"))
14
13
  and disable_truststore.lower() == "true"
15
14
  ):
16
- import truststore # noqa: F401
15
+ import truststore
17
16
 
18
- truststore.inject_into_ssl() # noqa: F401
17
+ truststore.inject_into_ssl()
19
18
 
20
- from contextlib import asynccontextmanager
21
19
 
22
20
  import jsonschema_rs
23
21
  import structlog
24
22
  from langgraph.errors import EmptyInputError, InvalidUpdateError
25
23
  from langgraph_sdk.client import configure_loopback_transports
26
24
  from starlette.applications import Starlette
25
+ from starlette.exceptions import HTTPException
27
26
  from starlette.middleware import Middleware
28
27
  from starlette.middleware.cors import CORSMiddleware
29
- from starlette.routing import Mount
28
+ from starlette.routing import BaseRoute, Mount
30
29
  from starlette.types import Receive, Scope, Send
31
30
 
32
31
  import langgraph_api.config as config
33
- from langgraph_api.api import meta_routes, routes, user_router
32
+ from langgraph_api.api import (
33
+ middleware_for_protected_routes,
34
+ protected_routes,
35
+ shadowable_meta_routes,
36
+ unshadowable_meta_routes,
37
+ user_router,
38
+ )
34
39
  from langgraph_api.api.openapi import set_custom_spec
35
40
  from langgraph_api.errors import (
41
+ http_exception_handler,
36
42
  overloaded_error_handler,
37
43
  validation_error_handler,
38
44
  value_error_handler,
@@ -48,21 +54,23 @@ from langgraph_runtime.retry import OVERLOADED_EXCEPTIONS
48
54
  logging.captureWarnings(True)
49
55
  logger = structlog.stdlib.get_logger(__name__)
50
56
 
51
- middleware = []
57
+ global_middleware = []
52
58
 
53
59
  if config.ALLOW_PRIVATE_NETWORK:
54
- middleware.append(Middleware(PrivateNetworkMiddleware))
60
+ global_middleware.append(Middleware(PrivateNetworkMiddleware))
55
61
 
56
- if (
62
+ JS_PROXY_MIDDLEWARE_ENABLED = (
57
63
  config.HTTP_CONFIG
58
64
  and (app := config.HTTP_CONFIG.get("app"))
59
65
  and is_js_path(app.split(":")[0])
60
- ):
66
+ )
67
+
68
+ if JS_PROXY_MIDDLEWARE_ENABLED:
61
69
  from langgraph_api.js.remote import JSCustomHTTPProxyMiddleware
62
70
 
63
- middleware.append(Middleware(JSCustomHTTPProxyMiddleware))
71
+ global_middleware.append(Middleware(JSCustomHTTPProxyMiddleware))
64
72
 
65
- middleware.extend(
73
+ global_middleware.extend(
66
74
  [
67
75
  (
68
76
  Middleware(
@@ -88,6 +96,7 @@ middleware.extend(
88
96
  ]
89
97
  )
90
98
  exception_handlers = {
99
+ HTTPException: http_exception_handler,
91
100
  ValueError: value_error_handler,
92
101
  InvalidUpdateError: value_error_handler,
93
102
  EmptyInputError: value_error_handler,
@@ -109,7 +118,7 @@ def update_openapi_spec(app):
109
118
  schemas = SchemaGenerator(
110
119
  {
111
120
  "openapi": "3.1.0",
112
- "info": {"title": "LangGraph Platform", "version": "0.1.0"},
121
+ "info": {"title": "LangSmith Deployment", "version": "0.1.0"},
113
122
  }
114
123
  )
115
124
  spec = schemas.get_schema(routes=app.routes)
@@ -118,70 +127,135 @@ def update_openapi_spec(app):
118
127
  set_custom_spec(spec)
119
128
 
120
129
 
130
+ def apply_middleware(
131
+ routes: list[BaseRoute], middleware: list[Middleware]
132
+ ) -> list[BaseRoute]:
133
+ """Applies middleware to a list of routes.
134
+
135
+ Routes are modified in place (only the `app` attribute is modified);
136
+ the modified routes are returned for convenience.
137
+ """
138
+ middleware_routes = []
139
+ for route in routes:
140
+ for cls, args, kwargs in reversed(middleware):
141
+ if hasattr(route, "app"):
142
+ route.app = cls(route.app, *args, **kwargs) # type: ignore
143
+ else:
144
+ raise ValueError(f"Cannot apply middleware: route {route} has no app")
145
+ middleware_routes.append(route)
146
+ return middleware_routes
147
+
148
+
149
+ custom_middleware = (
150
+ user_router.user_middleware if user_router and user_router.user_middleware else []
151
+ )
152
+ auth_before_custom_middleware = (
153
+ config.HTTP_CONFIG and config.HTTP_CONFIG.get("middleware_order") == "auth_first"
154
+ )
155
+ enable_auth_on_custom_routes = config.HTTP_CONFIG and config.HTTP_CONFIG.get(
156
+ "enable_custom_route_auth"
157
+ )
158
+ # Custom middleware to be applied at the route/mount level, not globally (app level).
159
+ route_level_custom_middleware = (
160
+ custom_middleware if auth_before_custom_middleware else []
161
+ )
162
+
163
+ protected_mount = Mount(
164
+ "",
165
+ routes=protected_routes,
166
+ middleware=(
167
+ middleware_for_protected_routes + route_level_custom_middleware
168
+ if auth_before_custom_middleware
169
+ else route_level_custom_middleware + middleware_for_protected_routes
170
+ ),
171
+ )
172
+
173
+
121
174
  if user_router:
122
175
  # Merge routes
123
176
  app = user_router
177
+ if auth_before_custom_middleware:
178
+ # Authentication middleware is only applied to protected routes--
179
+ # it is *not* global middleware. This means that by default,
180
+ # authentication middleware is necessarily applied *after* any global middleware.
181
+ # including custom middleware that the user might have supplied.
182
+ #
183
+ # To apply authentication middleware before custom middleware,
184
+ # we must rearrange things a bit:
185
+ # 1. Extract user-supplied routes and bundle them into a `Mount`
186
+ # so that we can easily apply custom middleware to all of them at once.
187
+ # 2. Extract custom middleware from the user-supplied app.
188
+ # Remove it globally, but apply it to each bundle of routes at the mount level.
189
+ # This gives us more flexibility in ordering: we can now apply this
190
+ # custom middleware before *or* after authentication middleware,
191
+ # depending on the `middleware_order` config.
192
+ user_app = apply_middleware(
193
+ routes=app.routes,
194
+ middleware=(
195
+ middleware_for_protected_routes if enable_auth_on_custom_routes else []
196
+ )
197
+ + route_level_custom_middleware,
198
+ )
199
+ app.user_middleware = global_middleware
200
+ else:
201
+ user_app = (
202
+ apply_middleware(
203
+ routes=app.routes,
204
+ middleware=middleware_for_protected_routes,
205
+ )
206
+ if enable_auth_on_custom_routes
207
+ else app.routes
208
+ )
209
+ app.user_middleware = custom_middleware + global_middleware
124
210
 
125
- meta_route_paths = [
126
- getattr(route, "path", None) for route in meta_routes if hasattr(route, "path")
127
- ]
128
- custom_route_paths = [
129
- route.path
130
- for route in user_router.router.routes
131
- if hasattr(route, "path") and route.path not in meta_route_paths
132
- ]
133
- logger.info(f"Custom route paths: {custom_route_paths}")
211
+ app.router.routes = (
212
+ apply_middleware(unshadowable_meta_routes, route_level_custom_middleware)
213
+ + user_app
214
+ + apply_middleware(shadowable_meta_routes, route_level_custom_middleware)
215
+ + [protected_mount]
216
+ )
134
217
 
135
218
  update_openapi_spec(app)
136
- for route in routes:
137
- if getattr(route, "path", None) in ("/docs", "/openapi.json"):
138
- # Our handlers for these are inclusive of the custom routes and default API ones
139
- # Don't let these be shadowed
140
- app.router.routes.insert(0, route)
141
- else:
142
- # Everything else could be shadowed.
143
- app.router.routes.append(route)
144
219
 
145
- # Merge lifespans
146
- original_lifespan = app.router.lifespan_context
220
+ # Merge lifespans (base + user)
221
+ user_lifespan = app.router.lifespan_context
147
222
  if app.router.on_startup or app.router.on_shutdown:
148
223
  raise ValueError(
149
224
  f"Cannot merge lifespans with on_startup or on_shutdown: {app.router.on_startup} {app.router.on_shutdown}"
150
225
  )
151
226
 
152
- @asynccontextmanager
153
- async def combined_lifespan(app):
154
- async with lifespan(app):
155
- if original_lifespan:
156
- async with original_lifespan(app):
157
- yield
158
- else:
159
- yield
160
-
161
- app.router.lifespan_context = combined_lifespan
227
+ app.router.lifespan_context = timing.combine_lifespans(lifespan, user_lifespan)
162
228
 
163
- # Merge middleware
164
- app.user_middleware = (app.user_middleware or []) + middleware
165
- # Merge exception handlers
229
+ # Merge exception handlers (base + user)
166
230
  for k, v in exception_handlers.items():
167
231
  if k not in app.exception_handlers:
168
232
  app.exception_handlers[k] = v
169
233
  else:
170
234
  logger.debug(f"Overriding exception handler for {k}")
171
- # If the user creates a loopback client with `get_client() (no url)
172
- # this will update the http transport to connect to the right app
173
- configure_loopback_transports(app)
174
-
175
235
  else:
176
236
  # It's a regular starlette app
177
237
  app = Starlette(
178
- routes=routes,
179
- lifespan=lifespan,
180
- middleware=middleware,
238
+ routes=[
239
+ *apply_middleware(
240
+ unshadowable_meta_routes + shadowable_meta_routes,
241
+ route_level_custom_middleware,
242
+ ),
243
+ protected_mount,
244
+ ],
245
+ lifespan=timing.combine_lifespans(lifespan),
246
+ middleware=global_middleware,
181
247
  exception_handlers=exception_handlers,
182
248
  )
183
249
 
250
+ # If the user creates a loopback client with `get_client() (no url)
251
+ # this will update the http transport to connect to the right app
252
+ configure_loopback_transports(app)
253
+
184
254
  if config.MOUNT_PREFIX:
255
+ from starlette.routing import Route
256
+
257
+ from langgraph_api.api import meta_metrics, ok
258
+
185
259
  prefix = config.MOUNT_PREFIX
186
260
  if not prefix.startswith("/") or prefix.endswith("/"):
187
261
  raise ValueError(
@@ -189,8 +263,6 @@ if config.MOUNT_PREFIX:
189
263
  f"Valid examples: '/my-api', '/v1', '/api/v1'.\nInvalid examples: 'api/', '/api/'"
190
264
  )
191
265
  logger.info(f"Mounting routes at prefix: {prefix}")
192
- plen = len(prefix)
193
- rplen = len(prefix.encode("utf-8"))
194
266
 
195
267
  class ASGIBypassMiddleware:
196
268
  def __init__(self, app: typing.Any, **kwargs):
@@ -208,9 +280,15 @@ if config.MOUNT_PREFIX:
208
280
 
209
281
  return await self.app(scope, receive, send)
210
282
 
283
+ # Add health checks at root still to avoid having to override health checks.
211
284
  app = Starlette(
212
- routes=[Mount(prefix, app=app)],
285
+ routes=[
286
+ Route("/", ok, methods=["GET"]),
287
+ Route("/ok", ok, methods=["GET"]),
288
+ Route("/metrics", meta_metrics, methods=["GET"]),
289
+ Mount(prefix, app=app),
290
+ ],
213
291
  lifespan=app.router.lifespan_context,
214
- middleware=[Middleware(ASGIBypassMiddleware)] + app.user_middleware,
292
+ middleware=[Middleware(ASGIBypassMiddleware)],
215
293
  exception_handlers=app.exception_handlers,
216
294
  )
langgraph_api/state.py CHANGED
@@ -6,12 +6,13 @@ from langgraph.types import Interrupt, StateSnapshot
6
6
 
7
7
  from langgraph_api.feature_flags import USE_NEW_INTERRUPTS
8
8
  from langgraph_api.js.base import RemoteInterrupt
9
- from langgraph_api.schema import Checkpoint, DeprecatedInterrupt, ThreadState
10
- from langgraph_api.schema import Interrupt as InterruptSchema
11
9
 
12
10
  if typing.TYPE_CHECKING:
13
11
  from langchain_core.runnables.config import RunnableConfig
14
12
 
13
+ from langgraph_api.schema import Checkpoint, DeprecatedInterrupt, ThreadState
14
+ from langgraph_api.schema import Interrupt as InterruptSchema
15
+
15
16
 
16
17
  def runnable_config_to_checkpoint(
17
18
  config: RunnableConfig | None,
@@ -27,7 +28,7 @@ def runnable_config_to_checkpoint(
27
28
  return None
28
29
 
29
30
  configurable = config["configurable"]
30
- checkpoint: Checkpoint = {
31
+ checkpoint: Checkpoint = { # type: ignore[typed-dict-item]
31
32
  "checkpoint_id": configurable["checkpoint_id"],
32
33
  "thread_id": configurable["thread_id"],
33
34
  }
langgraph_api/store.py CHANGED
@@ -12,7 +12,8 @@ from langgraph.graph import StateGraph
12
12
  from langgraph.pregel import Pregel
13
13
  from langgraph.store.base import BaseStore
14
14
 
15
- from langgraph_api import config
15
+ from langgraph_api import config, timing
16
+ from langgraph_api.timing import profiled_import
16
17
  from langgraph_api.utils.config import run_in_executor
17
18
 
18
19
  logger = structlog.stdlib.get_logger(__name__)
@@ -83,22 +84,30 @@ async def collect_store_from_env() -> None:
83
84
  CUSTOM_STORE = value
84
85
 
85
86
 
87
+ @timing.timer(
88
+ message="Loading store {store_path}",
89
+ metadata_fn=lambda store_path: {"store_path": store_path},
90
+ warn_threshold_secs=5,
91
+ warn_message="Loading store '{store_path}' took longer than expected",
92
+ error_threshold_secs=10,
93
+ )
86
94
  def _load_store(store_path: str) -> Any:
87
- if "/" in store_path or ".py:" in store_path:
88
- modname = "".join(choice("abcdefghijklmnopqrstuvwxyz") for _ in range(24))
89
- path_name, function = store_path.rsplit(":", 1)
90
- module_name = path_name.rstrip(":")
91
- # Load from file path
92
- modspec = importlib.util.spec_from_file_location(modname, module_name)
93
- if modspec is None:
94
- raise ValueError(f"Could not find store file: {path_name}")
95
- module = importlib.util.module_from_spec(modspec)
96
- sys.modules[module_name] = module
97
- modspec.loader.exec_module(module) # type: ignore[possibly-unbound-attribute]
98
-
99
- else:
100
- path_name, function = store_path.rsplit(".", 1)
101
- module = importlib.import_module(path_name)
95
+ with profiled_import(store_path):
96
+ if "/" in store_path or ".py:" in store_path:
97
+ modname = "".join(choice("abcdefghijklmnopqrstuvwxyz") for _ in range(24))
98
+ path_name, function = store_path.rsplit(":", 1)
99
+ module_name = path_name.rstrip(":")
100
+ # Load from file path
101
+ modspec = importlib.util.spec_from_file_location(modname, module_name)
102
+ if modspec is None:
103
+ raise ValueError(f"Could not find store file: {path_name}")
104
+ module = importlib.util.module_from_spec(modspec)
105
+ sys.modules[module_name] = module
106
+ modspec.loader.exec_module(module) # type: ignore[possibly-unbound-attribute]
107
+
108
+ else:
109
+ path_name, function = store_path.rsplit(".", 1)
110
+ module = importlib.import_module(path_name)
102
111
 
103
112
  try:
104
113
  store: BaseStore | Callable[[config.StoreConfig], BaseStore] = module.__dict__[
langgraph_api/stream.py CHANGED
@@ -2,19 +2,20 @@ import uuid
2
2
  from collections.abc import AsyncIterator, Callable
3
3
  from contextlib import AsyncExitStack, aclosing, asynccontextmanager
4
4
  from functools import lru_cache
5
- from typing import Any, cast
5
+ from typing import TYPE_CHECKING, Any, cast
6
6
 
7
7
  import langgraph.version
8
8
  import langsmith
9
9
  import structlog
10
10
  from langchain_core.messages import (
11
+ AIMessageChunk,
11
12
  # TODO: Remove explicit dependency
12
13
  BaseMessage,
13
14
  BaseMessageChunk,
15
+ ToolMessageChunk,
14
16
  convert_to_messages,
15
17
  message_chunk_to_message,
16
18
  )
17
- from langchain_core.runnables import RunnableConfig
18
19
  from langgraph.errors import (
19
20
  EmptyChannelError,
20
21
  EmptyInputError,
@@ -30,7 +31,11 @@ from langgraph_api import __version__
30
31
  from langgraph_api import store as api_store
31
32
  from langgraph_api.asyncio import ValueEvent, wait_if_not_done
32
33
  from langgraph_api.command import map_cmd
33
- from langgraph_api.feature_flags import USE_RUNTIME_CONTEXT_API
34
+ from langgraph_api.feature_flags import (
35
+ UPDATES_NEEDED_FOR_INTERRUPTS,
36
+ USE_DURABILITY,
37
+ USE_RUNTIME_CONTEXT_API,
38
+ )
34
39
  from langgraph_api.graph import get_graph
35
40
  from langgraph_api.js.base import BaseRemotePregel
36
41
  from langgraph_api.metadata import HOST, PLAN, USER_API_URL, incr_nodes
@@ -40,6 +45,10 @@ from langgraph_api.utils.config import run_in_executor
40
45
  from langgraph_runtime.checkpoint import Checkpointer
41
46
  from langgraph_runtime.ops import Runs
42
47
 
48
+ if TYPE_CHECKING:
49
+ from langchain_core.runnables import RunnableConfig
50
+
51
+
43
52
  logger = structlog.stdlib.get_logger(__name__)
44
53
 
45
54
 
@@ -134,10 +143,18 @@ async def astream_state(
134
143
  kwargs = run["kwargs"].copy()
135
144
  kwargs.pop("webhook", None)
136
145
  kwargs.pop("resumable", False)
146
+ if USE_DURABILITY:
147
+ checkpoint_during = kwargs.pop("checkpoint_during")
148
+ if not kwargs.get("durability") and checkpoint_during:
149
+ kwargs["durability"] = "async" if checkpoint_during else "exit"
150
+ else:
151
+ durability = kwargs.pop("durability")
152
+ if not kwargs.get("checkpoint_during") and durability in ("async", "exit"):
153
+ kwargs["checkpoint_during"] = durability == "async"
137
154
  subgraphs = kwargs.get("subgraphs", False)
138
155
  temporary = kwargs.pop("temporary", False)
139
156
  context = kwargs.pop("context", None)
140
- config = cast(RunnableConfig, kwargs.pop("config"))
157
+ config = cast("RunnableConfig", kwargs.pop("config"))
141
158
  configurable = config["configurable"]
142
159
  stack = AsyncExitStack()
143
160
  graph = await stack.enter_async_context(
@@ -146,6 +163,7 @@ async def astream_state(
146
163
  config,
147
164
  store=(await api_store.get_store()),
148
165
  checkpointer=None if temporary else Checkpointer(),
166
+ is_for_execution=True,
149
167
  )
150
168
  )
151
169
 
@@ -170,7 +188,7 @@ async def astream_state(
170
188
  if "messages-tuple" in stream_modes_set and not isinstance(graph, BaseRemotePregel):
171
189
  stream_modes_set.remove("messages-tuple")
172
190
  stream_modes_set.add("messages")
173
- if "updates" not in stream_modes_set:
191
+ if "updates" not in stream_modes_set and UPDATES_NEEDED_FOR_INTERRUPTS:
174
192
  stream_modes_set.add("updates")
175
193
  only_interrupt_updates = True
176
194
  else:
@@ -221,6 +239,8 @@ async def astream_state(
221
239
 
222
240
  # stream run
223
241
  if use_astream_events:
242
+ if USE_RUNTIME_CONTEXT_API:
243
+ kwargs["context"] = context
224
244
  async with (
225
245
  stack,
226
246
  aclosing( # type: ignore[invalid-argument-type]
@@ -238,7 +258,7 @@ async def astream_state(
238
258
  event = await wait_if_not_done(anext(stream, sentinel), done)
239
259
  if event is sentinel:
240
260
  break
241
- event = cast(dict, event)
261
+ event = cast("dict", event)
242
262
  if event.get("tags") and "langsmith:hidden" in event["tags"]:
243
263
  continue
244
264
  if (
@@ -276,13 +296,25 @@ async def astream_state(
276
296
  yield "messages", chunk
277
297
  else:
278
298
  msg_, meta = cast(
279
- tuple[BaseMessage | dict, dict[str, Any]], chunk
280
- )
281
- msg = (
282
- convert_to_messages([msg_])[0]
283
- if isinstance(msg_, dict)
284
- else cast(BaseMessage, msg_)
299
+ "tuple[BaseMessage | dict, dict[str, Any]]", chunk
285
300
  )
301
+ is_chunk = False
302
+ if isinstance(msg_, dict):
303
+ if (
304
+ "chunk" in msg_.get("type", "").lower()
305
+ or "chunk" in msg_.get("role", "").lower()
306
+ ):
307
+ if "ai" in msg_.get("role", "").lower():
308
+ msg = AIMessageChunk(**msg_) # type: ignore[arg-type]
309
+ elif "tool" in msg_.get("role", "").lower():
310
+ msg = ToolMessageChunk(**msg_) # type: ignore[arg-type]
311
+ else:
312
+ msg = BaseMessageChunk(**msg_) # type: ignore[arg-type]
313
+ is_chunk = True
314
+ else:
315
+ msg = convert_to_messages([msg_])[0]
316
+ else:
317
+ msg = msg_
286
318
  if msg.id in messages:
287
319
  messages[msg.id] += msg
288
320
  else:
@@ -294,7 +326,13 @@ async def astream_state(
294
326
  if isinstance(msg, BaseMessageChunk)
295
327
  else "messages/complete"
296
328
  ),
297
- [message_chunk_to_message(messages[msg.id])],
329
+ [
330
+ (
331
+ message_chunk_to_message(messages[msg.id])
332
+ if not is_chunk
333
+ else messages[msg.id]
334
+ )
335
+ ],
298
336
  )
299
337
  elif mode in stream_mode:
300
338
  if subgraphs and ns:
@@ -308,11 +346,9 @@ async def astream_state(
308
346
  and len(chunk["__interrupt__"]) > 0
309
347
  and only_interrupt_updates
310
348
  ):
311
- # We always want to return interrupt events by default.
312
- # If updates aren't specified as a stream mode, we return these as values events.
313
349
  # If the interrupt doesn't have any actions (e.g. interrupt before or after a node is specified), we don't return the interrupt at all today.
314
350
  if subgraphs and ns:
315
- yield f"values|{'|'.join(ns)}", chunk
351
+ yield "values|{'|'.join(ns)}", chunk
316
352
  else:
317
353
  yield "values", chunk
318
354
  # --- end shared logic with astream ---
@@ -340,9 +376,9 @@ async def astream_state(
340
376
  if event is sentinel:
341
377
  break
342
378
  if subgraphs:
343
- ns, mode, chunk = cast(tuple[str, str, dict[str, Any]], event)
379
+ ns, mode, chunk = cast("tuple[str, str, dict[str, Any]]", event)
344
380
  else:
345
- mode, chunk = cast(tuple[str, dict[str, Any]], event)
381
+ mode, chunk = cast("tuple[str, dict[str, Any]]", event)
346
382
  ns = None
347
383
  # --- begin shared logic with astream_events ---
348
384
  if mode == "debug":
@@ -360,14 +396,25 @@ async def astream_state(
360
396
  yield "messages", chunk
361
397
  else:
362
398
  msg_, meta = cast(
363
- tuple[BaseMessage | dict, dict[str, Any]], chunk
364
- )
365
- msg = (
366
- convert_to_messages([msg_])[0]
367
- if isinstance(msg_, dict)
368
- else cast(BaseMessage, msg_)
399
+ "tuple[BaseMessage | dict, dict[str, Any]]", chunk
369
400
  )
370
-
401
+ is_chunk = False
402
+ if isinstance(msg_, dict):
403
+ if (
404
+ "chunk" in msg_.get("type", "").lower()
405
+ or "chunk" in msg_.get("role", "").lower()
406
+ ):
407
+ if "ai" in msg_.get("role", "").lower():
408
+ msg = AIMessageChunk(**msg_) # type: ignore[arg-type]
409
+ elif "tool" in msg_.get("role", "").lower():
410
+ msg = ToolMessageChunk(**msg_) # type: ignore[arg-type]
411
+ else:
412
+ msg = BaseMessageChunk(**msg_) # type: ignore[arg-type]
413
+ is_chunk = True
414
+ else:
415
+ msg = convert_to_messages([msg_])[0]
416
+ else:
417
+ msg = msg_
371
418
  if msg.id in messages:
372
419
  messages[msg.id] += msg
373
420
  else:
@@ -379,7 +426,13 @@ async def astream_state(
379
426
  if isinstance(msg, BaseMessageChunk)
380
427
  else "messages/complete"
381
428
  ),
382
- [message_chunk_to_message(messages[msg.id])],
429
+ [
430
+ (
431
+ message_chunk_to_message(messages[msg.id])
432
+ if not is_chunk
433
+ else messages[msg.id]
434
+ )
435
+ ],
383
436
  )
384
437
  elif mode in stream_mode:
385
438
  if subgraphs and ns:
@@ -393,11 +446,9 @@ async def astream_state(
393
446
  and len(chunk["__interrupt__"]) > 0
394
447
  and only_interrupt_updates
395
448
  ):
396
- # We always want to return interrupt events by default.
397
- # If updates aren't specified as a stream mode, we return these as values events.
398
449
  # If the interrupt doesn't have any actions (e.g. interrupt before or after a node is specified), we don't return the interrupt at all today.
399
450
  if subgraphs and ns:
400
- yield "values|{'|'.join(ns)}", chunk
451
+ yield f"values|{'|'.join(ns)}", chunk
401
452
  else:
402
453
  yield "values", chunk
403
454
  # --- end shared logic with astream_events ---