langgraph-api 0.4.48__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

langgraph_api/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.4.48"
1
+ __version__ = "0.5.1"
@@ -1,3 +1,4 @@
1
+ from functools import partial
1
2
  from typing import Any
2
3
  from uuid import uuid4
3
4
 
@@ -37,7 +38,7 @@ from langgraph_api.validation import (
37
38
  ConfigValidator,
38
39
  )
39
40
  from langgraph_runtime.checkpoint import Checkpointer
40
- from langgraph_runtime.database import connect
41
+ from langgraph_runtime.database import connect as base_connect
41
42
  from langgraph_runtime.ops import Assistants
42
43
  from langgraph_runtime.retry import retry_db
43
44
 
@@ -45,6 +46,8 @@ logger = structlog.stdlib.get_logger(__name__)
45
46
 
46
47
  CrudAssistants = GrpcAssistants if FF_USE_CORE_API else Assistants
47
48
 
49
+ connect = partial(base_connect, supports_core_api=FF_USE_CORE_API)
50
+
48
51
  EXCLUDED_CONFIG_SCHEMA = (
49
52
  "__pregel_checkpointer",
50
53
  "__pregel_store",
@@ -255,7 +258,7 @@ async def get_assistant_graph(
255
258
  assistant_id = get_assistant_id(str(request.path_params["assistant_id"]))
256
259
  validate_uuid(assistant_id, "Invalid assistant ID: must be a UUID")
257
260
  async with connect() as conn:
258
- assistant_ = await Assistants.get(conn, assistant_id)
261
+ assistant_ = await CrudAssistants.get(conn, assistant_id)
259
262
  assistant = await fetchone(assistant_)
260
263
  config = json_loads(assistant["config"])
261
264
  configurable = config.setdefault("configurable", {})
@@ -312,43 +315,44 @@ async def get_assistant_subgraphs(
312
315
  assistant_id = request.path_params["assistant_id"]
313
316
  validate_uuid(assistant_id, "Invalid assistant ID: must be a UUID")
314
317
  async with connect() as conn:
315
- assistant_ = await Assistants.get(conn, assistant_id)
318
+ assistant_ = await CrudAssistants.get(conn, assistant_id)
316
319
  assistant = await fetchone(assistant_)
317
- config = json_loads(assistant["config"])
318
- configurable = config.setdefault("configurable", {})
319
- configurable.update(get_configurable_headers(request.headers))
320
- async with get_graph(
321
- assistant["graph_id"],
322
- config,
323
- checkpointer=Checkpointer(),
324
- store=(await api_store.get_store()),
325
- ) as graph:
326
- namespace = request.path_params.get("namespace")
327
-
328
- if isinstance(graph, BaseRemotePregel):
329
- return ApiResponse(
330
- await graph.fetch_subgraphs(
320
+
321
+ config = json_loads(assistant["config"])
322
+ configurable = config.setdefault("configurable", {})
323
+ configurable.update(get_configurable_headers(request.headers))
324
+ async with get_graph(
325
+ assistant["graph_id"],
326
+ config,
327
+ checkpointer=Checkpointer(),
328
+ store=(await api_store.get_store()),
329
+ ) as graph:
330
+ namespace = request.path_params.get("namespace")
331
+
332
+ if isinstance(graph, BaseRemotePregel):
333
+ return ApiResponse(
334
+ await graph.fetch_subgraphs(
335
+ namespace=namespace,
336
+ recurse=request.query_params.get("recurse", "False")
337
+ in ("true", "True"),
338
+ )
339
+ )
340
+
341
+ try:
342
+ return ApiResponse(
343
+ {
344
+ ns: _graph_schemas(subgraph)
345
+ async for ns, subgraph in graph.aget_subgraphs(
331
346
  namespace=namespace,
332
347
  recurse=request.query_params.get("recurse", "False")
333
348
  in ("true", "True"),
334
349
  )
335
- )
336
-
337
- try:
338
- return ApiResponse(
339
- {
340
- ns: _graph_schemas(subgraph)
341
- async for ns, subgraph in graph.aget_subgraphs(
342
- namespace=namespace,
343
- recurse=request.query_params.get("recurse", "False")
344
- in ("true", "True"),
345
- )
346
- }
347
- )
348
- except NotImplementedError:
349
- raise HTTPException(
350
- 422, detail="The graph does not support visualization"
351
- ) from None
350
+ }
351
+ )
352
+ except NotImplementedError:
353
+ raise HTTPException(
354
+ 422, detail="The graph does not support visualization"
355
+ ) from None
352
356
 
353
357
 
354
358
  @retry_db
@@ -359,40 +363,40 @@ async def get_assistant_schemas(
359
363
  assistant_id = request.path_params["assistant_id"]
360
364
  validate_uuid(assistant_id, "Invalid assistant ID: must be a UUID")
361
365
  async with connect() as conn:
362
- assistant_ = await Assistants.get(conn, assistant_id)
363
- # TODO Implementa cache so we can de-dent and release this connection.
366
+ assistant_ = await CrudAssistants.get(conn, assistant_id)
364
367
  assistant = await fetchone(assistant_)
365
- config = json_loads(assistant["config"])
366
- configurable = config.setdefault("configurable", {})
367
- configurable.update(get_configurable_headers(request.headers))
368
- async with get_graph(
369
- assistant["graph_id"],
370
- config,
371
- checkpointer=Checkpointer(),
372
- store=(await api_store.get_store()),
373
- ) as graph:
374
- if isinstance(graph, BaseRemotePregel):
375
- schemas = await graph.fetch_state_schema()
376
- return ApiResponse(
377
- {
378
- "graph_id": assistant["graph_id"],
379
- "input_schema": schemas.get("input"),
380
- "output_schema": schemas.get("output"),
381
- "state_schema": schemas.get("state"),
382
- "config_schema": schemas.get("config"),
383
- "context_schema": schemas.get("context"),
384
- }
385
- )
386
-
387
- schemas = _graph_schemas(graph)
388
368
 
369
+ config = json_loads(assistant["config"])
370
+ configurable = config.setdefault("configurable", {})
371
+ configurable.update(get_configurable_headers(request.headers))
372
+ async with get_graph(
373
+ assistant["graph_id"],
374
+ config,
375
+ checkpointer=Checkpointer(),
376
+ store=(await api_store.get_store()),
377
+ ) as graph:
378
+ if isinstance(graph, BaseRemotePregel):
379
+ schemas = await graph.fetch_state_schema()
389
380
  return ApiResponse(
390
381
  {
391
382
  "graph_id": assistant["graph_id"],
392
- **schemas,
383
+ "input_schema": schemas.get("input"),
384
+ "output_schema": schemas.get("output"),
385
+ "state_schema": schemas.get("state"),
386
+ "config_schema": schemas.get("config"),
387
+ "context_schema": schemas.get("context"),
393
388
  }
394
389
  )
395
390
 
391
+ schemas = _graph_schemas(graph)
392
+
393
+ return ApiResponse(
394
+ {
395
+ "graph_id": assistant["graph_id"],
396
+ **schemas,
397
+ }
398
+ )
399
+
396
400
 
397
401
  @retry_db
398
402
  async def patch_assistant(
langgraph_api/config.py CHANGED
@@ -128,6 +128,45 @@ class StoreConfig(TypedDict, total=False):
128
128
  ttl: TTLConfig
129
129
 
130
130
 
131
+ class SerdeConfig(TypedDict, total=False):
132
+ """Configuration for the built-in serde, which handles checkpointing of state.
133
+
134
+ If omitted, no serde is set up (the object store will still be present, however)."""
135
+
136
+ allowed_json_modules: list[list[str]] | Literal[True] | None
137
+ """Optional. List of allowed python modules to de-serialize custom objects from.
138
+
139
+ If provided, only the specified modules will be allowed to be deserialized.
140
+ If omitted, no modules are allowed, and the object returned will simply be a json object OR
141
+ a deserialized langchain object.
142
+
143
+ Example:
144
+ {...
145
+ "serde": {
146
+ "allowed_json_modules": [
147
+ ["my_agent", "my_file", "SomeType"],
148
+ ]
149
+ }
150
+ }
151
+
152
+ If you set this to True, any module will be allowed to be deserialized.
153
+
154
+ Example:
155
+ {...
156
+ "serde": {
157
+ "allowed_json_modules": true
158
+ }
159
+ }
160
+
161
+ """
162
+ pickle_fallback: bool
163
+ """Optional. Whether to allow pickling as a fallback for deserialization.
164
+
165
+ If True, pickling will be allowed as a fallback for deserialization.
166
+ If False, pickling will not be allowed as a fallback for deserialization.
167
+ Defaults to True if not configured."""
168
+
169
+
131
170
  class CheckpointerConfig(TypedDict, total=False):
132
171
  """Configuration for the built-in checkpointer, which handles checkpointing of state.
133
172
 
@@ -140,6 +179,8 @@ class CheckpointerConfig(TypedDict, total=False):
140
179
  If provided, the checkpointer will apply TTL settings according to the configuration.
141
180
  If omitted, no TTL behavior is configured.
142
181
  """
182
+ serde: SerdeConfig | None
183
+ """Optional. Defines the configuration for how checkpoints are serialized."""
143
184
 
144
185
 
145
186
  class SecurityConfig(TypedDict, total=False):
@@ -250,6 +291,13 @@ ALLOW_PRIVATE_NETWORK = env("ALLOW_PRIVATE_NETWORK", cast=bool, default=False)
250
291
  See https://developer.chrome.com/blog/private-network-access-update-2024-03
251
292
  """
252
293
 
294
+ # gRPC client pool size for persistence server.
295
+ GRPC_CLIENT_POOL_SIZE = env("GRPC_CLIENT_POOL_SIZE", cast=int, default=5)
296
+
297
+ # Minimum payload size to use the dedicated thread pool for JSON parsing.
298
+ # (Otherwise, the payload is parsed directly in the event loop.)
299
+ JSON_THREAD_POOL_MINIMUM_SIZE_BYTES = 100 * 1024 # 100 KB
300
+
253
301
  HTTP_CONFIG = env("LANGGRAPH_HTTP", cast=_parse_schema(HttpConfig), default=None)
254
302
  STORE_CONFIG = env("LANGGRAPH_STORE", cast=_parse_schema(StoreConfig), default=None)
255
303
 
@@ -339,6 +387,11 @@ def _parse_thread_ttl(value: str | None) -> ThreadTTLConfig | None:
339
387
  CHECKPOINTER_CONFIG = env(
340
388
  "LANGGRAPH_CHECKPOINTER", cast=_parse_schema(CheckpointerConfig), default=None
341
389
  )
390
+ SERDE: SerdeConfig | None = (
391
+ CHECKPOINTER_CONFIG["serde"]
392
+ if CHECKPOINTER_CONFIG and "serde" in CHECKPOINTER_CONFIG
393
+ else None
394
+ )
342
395
  THREAD_TTL: ThreadTTLConfig | None = env(
343
396
  "LANGGRAPH_THREAD_TTL", cast=_parse_thread_ttl, default=None
344
397
  )
@@ -349,7 +402,6 @@ N_JOBS_PER_WORKER = env("N_JOBS_PER_WORKER", cast=int, default=10)
349
402
  BG_JOB_TIMEOUT_SECS = env("BG_JOB_TIMEOUT_SECS", cast=float, default=3600)
350
403
 
351
404
  FF_CRONS_ENABLED = env("FF_CRONS_ENABLED", cast=bool, default=True)
352
- FF_RICH_THREADS = env("FF_RICH_THREADS", cast=bool, default=True)
353
405
  FF_LOG_DROPPED_EVENTS = env("FF_LOG_DROPPED_EVENTS", cast=bool, default=False)
354
406
  FF_LOG_QUERY_AND_PARAMS = env("FF_LOG_QUERY_AND_PARAMS", cast=bool, default=False)
355
407
 
@@ -1,5 +1,6 @@
1
1
  """gRPC client wrapper for LangGraph persistence services."""
2
2
 
3
+ import asyncio
3
4
  import os
4
5
 
5
6
  import structlog
@@ -10,6 +11,10 @@ from .generated.core_api_pb2_grpc import AdminStub, AssistantsStub, ThreadsStub
10
11
  logger = structlog.stdlib.get_logger(__name__)
11
12
 
12
13
 
14
+ # Shared global client pool
15
+ _client_pool: "GrpcClientPool | None" = None
16
+
17
+
13
18
  class GrpcClient:
14
19
  """gRPC client for LangGraph persistence services."""
15
20
 
@@ -90,3 +95,89 @@ class GrpcClient:
90
95
  "Client not connected. Use async context manager or call connect() first."
91
96
  )
92
97
  return self._admin_stub
98
+
99
+
100
+ class GrpcClientPool:
101
+ """Pool of gRPC clients for load distribution."""
102
+
103
+ def __init__(self, pool_size: int = 5, server_address: str | None = None):
104
+ self.pool_size = pool_size
105
+ self.server_address = server_address
106
+ self.clients: list[GrpcClient] = []
107
+ self._current_index = 0
108
+ self._init_lock = asyncio.Lock()
109
+ self._initialized = False
110
+
111
+ async def _initialize(self):
112
+ """Initialize the pool of clients."""
113
+ async with self._init_lock:
114
+ if self._initialized:
115
+ return
116
+
117
+ await logger.ainfo(
118
+ "Initializing gRPC client pool",
119
+ pool_size=self.pool_size,
120
+ server_address=self.server_address,
121
+ )
122
+
123
+ for _ in range(self.pool_size):
124
+ client = GrpcClient(server_address=self.server_address)
125
+ await client.connect()
126
+ self.clients.append(client)
127
+
128
+ self._initialized = True
129
+ await logger.ainfo(
130
+ f"gRPC client pool initialized with {self.pool_size} clients"
131
+ )
132
+
133
+ async def get_client(self) -> GrpcClient:
134
+ """Get next client using round-robin selection.
135
+
136
+ Round-robin without strict locking - slight races are acceptable
137
+ and result in good enough distribution under high load.
138
+ """
139
+ if not self._initialized:
140
+ await self._initialize()
141
+
142
+ idx = self._current_index % self.pool_size
143
+ self._current_index = idx + 1
144
+ return self.clients[idx]
145
+
146
+ async def close(self):
147
+ """Close all clients in the pool."""
148
+ if self._initialized:
149
+ await logger.ainfo(f"Closing gRPC client pool ({self.pool_size} clients)")
150
+ for client in self.clients:
151
+ await client.close()
152
+ self.clients.clear()
153
+ self._initialized = False
154
+
155
+
156
+ async def get_shared_client() -> GrpcClient:
157
+ """Get a gRPC client from the shared pool.
158
+
159
+ Uses a pool of channels for better performance under high concurrency.
160
+ Each channel is a separate TCP connection that can handle ~100-200
161
+ concurrent streams effectively.
162
+
163
+ Returns:
164
+ A GrpcClient instance from the pool
165
+ """
166
+ global _client_pool
167
+ if _client_pool is None:
168
+ from langgraph_api import config
169
+
170
+ _client_pool = GrpcClientPool(
171
+ pool_size=config.GRPC_CLIENT_POOL_SIZE,
172
+ server_address=os.getenv("GRPC_SERVER_ADDRESS"),
173
+ )
174
+
175
+ return await _client_pool.get_client()
176
+
177
+
178
+ async def close_shared_client():
179
+ """Close the shared gRPC client pool."""
180
+ global _client_pool
181
+ if _client_pool is not None:
182
+ await _client_pool.close()
183
+ _client_pool = None
@@ -6,7 +6,7 @@ import warnings
6
6
  from . import core_api_pb2 as core__api__pb2
7
7
  from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
8
8
 
9
- GRPC_GENERATED_VERSION = '1.76.0'
9
+ GRPC_GENERATED_VERSION = '1.75.1'
10
10
  GRPC_VERSION = grpc.__version__
11
11
  _version_not_supported = False
12
12
 
@@ -19,7 +19,7 @@ except ImportError:
19
19
  if _version_not_supported:
20
20
  raise RuntimeError(
21
21
  f'The grpc package installed is at version {GRPC_VERSION},'
22
- + ' but the generated code in core_api_pb2_grpc.py depends on'
22
+ + f' but the generated code in core_api_pb2_grpc.py depends on'
23
23
  + f' grpcio>={GRPC_GENERATED_VERSION}.'
24
24
  + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
25
25
  + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
@@ -32,7 +32,7 @@ from langgraph_api.schema import (
32
32
  )
33
33
  from langgraph_api.serde import json_dumpb, json_loads
34
34
 
35
- from .client import GrpcClient
35
+ from .client import get_shared_client
36
36
  from .generated import core_api_pb2 as pb
37
37
 
38
38
  GRPC_STATUS_TO_HTTP_STATUS = {
@@ -539,9 +539,8 @@ class Assistants(Authenticated):
539
539
  select=select,
540
540
  )
541
541
 
542
- # Make the gRPC call
543
- async with GrpcClient() as client:
544
- response = await client.assistants.Search(request)
542
+ client = await get_shared_client()
543
+ response = await client.assistants.Search(request)
545
544
 
546
545
  # Convert response to expected format
547
546
  assistants = [
@@ -578,9 +577,8 @@ class Assistants(Authenticated):
578
577
  filters=auth_filters or {},
579
578
  )
580
579
 
581
- # Make the gRPC call
582
- async with GrpcClient() as client:
583
- response = await client.assistants.Get(request)
580
+ client = await get_shared_client()
581
+ response = await client.assistants.Get(request)
584
582
 
585
583
  # Convert and yield the result
586
584
  assistant = proto_to_assistant(response)
@@ -637,9 +635,8 @@ class Assistants(Authenticated):
637
635
  metadata=dict_to_struct(metadata or {}),
638
636
  )
639
637
 
640
- # Make the gRPC call
641
- async with GrpcClient() as client:
642
- response = await client.assistants.Create(request)
638
+ client = await get_shared_client()
639
+ response = await client.assistants.Create(request)
643
640
 
644
641
  # Convert and yield the result
645
642
  assistant = proto_to_assistant(response)
@@ -700,9 +697,8 @@ class Assistants(Authenticated):
700
697
  if context:
701
698
  request.context.CopyFrom(dict_to_struct(context))
702
699
 
703
- # Make the gRPC call
704
- async with GrpcClient() as client:
705
- response = await client.assistants.Patch(request)
700
+ client = await get_shared_client()
701
+ response = await client.assistants.Patch(request)
706
702
 
707
703
  # Convert and yield the result
708
704
  assistant = proto_to_assistant(response)
@@ -730,9 +726,8 @@ class Assistants(Authenticated):
730
726
  filters=auth_filters or {},
731
727
  )
732
728
 
733
- # Make the gRPC call
734
- async with GrpcClient() as client:
735
- await client.assistants.Delete(request)
729
+ client = await get_shared_client()
730
+ await client.assistants.Delete(request)
736
731
 
737
732
  # Return the deleted ID
738
733
  async def generate_result():
@@ -765,9 +760,8 @@ class Assistants(Authenticated):
765
760
  filters=auth_filters or {},
766
761
  )
767
762
 
768
- # Make the gRPC call
769
- async with GrpcClient() as client:
770
- response = await client.assistants.SetLatest(request)
763
+ client = await get_shared_client()
764
+ response = await client.assistants.SetLatest(request)
771
765
 
772
766
  # Convert and yield the result
773
767
  assistant = proto_to_assistant(response)
@@ -803,9 +797,8 @@ class Assistants(Authenticated):
803
797
  offset=offset,
804
798
  )
805
799
 
806
- # Make the gRPC call
807
- async with GrpcClient() as client:
808
- response = await client.assistants.GetVersions(request)
800
+ client = await get_shared_client()
801
+ response = await client.assistants.GetVersions(request)
809
802
 
810
803
  # Convert and yield the results
811
804
  async def generate_results():
@@ -849,9 +842,8 @@ class Assistants(Authenticated):
849
842
  metadata=dict_to_struct(metadata or {}),
850
843
  )
851
844
 
852
- # Make the gRPC call
853
- async with GrpcClient() as client:
854
- response = await client.assistants.Count(request)
845
+ client = await get_shared_client()
846
+ response = await client.assistants.Count(request)
855
847
 
856
848
  return int(response.count)
857
849
 
@@ -913,24 +905,22 @@ class Threads(Authenticated):
913
905
  if ids:
914
906
  normalized_ids = [_normalize_uuid(thread_id) for thread_id in ids]
915
907
  threads: list[Thread] = []
916
- async with GrpcClient() as client:
917
- for thread_id in normalized_ids:
918
- request = pb.GetThreadRequest(
919
- thread_id=pb.UUID(value=_normalize_uuid(thread_id)),
920
- filters=auth_filters or {},
921
- )
922
- response = await client.threads.Get(request)
923
- thread = proto_to_thread(response)
924
-
925
- if status and thread["status"] != status:
926
- continue
927
- if metadata and not _json_contains(thread["metadata"], metadata):
928
- continue
929
- if values and not _json_contains(
930
- thread.get("values") or {}, values
931
- ):
932
- continue
933
- threads.append(thread)
908
+ client = await get_shared_client()
909
+ for thread_id in normalized_ids:
910
+ request = pb.GetThreadRequest(
911
+ thread_id=pb.UUID(value=_normalize_uuid(thread_id)),
912
+ filters=auth_filters or {},
913
+ )
914
+ response = await client.threads.Get(request)
915
+ thread = proto_to_thread(response)
916
+
917
+ if status and thread["status"] != status:
918
+ continue
919
+ if metadata and not _json_contains(thread["metadata"], metadata):
920
+ continue
921
+ if values and not _json_contains(thread.get("values") or {}, values):
922
+ continue
923
+ threads.append(thread)
934
924
 
935
925
  total = len(threads)
936
926
  paginated = threads[offset : offset + limit]
@@ -964,10 +954,10 @@ class Threads(Authenticated):
964
954
  if select:
965
955
  request_kwargs["select"] = select
966
956
 
967
- async with GrpcClient() as client:
968
- response = await client.threads.Search(
969
- pb.SearchThreadsRequest(**request_kwargs)
970
- )
957
+ client = await get_shared_client()
958
+ response = await client.threads.Search(
959
+ pb.SearchThreadsRequest(**request_kwargs)
960
+ )
971
961
 
972
962
  threads = [proto_to_thread(thread) for thread in response.threads]
973
963
  cursor = offset + limit if len(threads) == limit else None
@@ -1014,10 +1004,8 @@ class Threads(Authenticated):
1014
1004
  )
1015
1005
  request_kwargs["status"] = mapped_status
1016
1006
 
1017
- async with GrpcClient() as client:
1018
- response = await client.threads.Count(
1019
- pb.CountThreadsRequest(**request_kwargs)
1020
- )
1007
+ client = await get_shared_client()
1008
+ response = await client.threads.Count(pb.CountThreadsRequest(**request_kwargs))
1021
1009
 
1022
1010
  return int(response.count)
1023
1011
 
@@ -1035,8 +1023,8 @@ class Threads(Authenticated):
1035
1023
  thread_id=pb.UUID(value=_normalize_uuid(thread_id)),
1036
1024
  filters=auth_filters or {},
1037
1025
  )
1038
- async with GrpcClient() as client:
1039
- response = await client.threads.Get(request)
1026
+ client = await get_shared_client()
1027
+ response = await client.threads.Get(request)
1040
1028
 
1041
1029
  thread = proto_to_thread(response)
1042
1030
 
@@ -1077,8 +1065,8 @@ class Threads(Authenticated):
1077
1065
  if ttl_config is not None:
1078
1066
  request.ttl.CopyFrom(ttl_config)
1079
1067
 
1080
- async with GrpcClient() as client:
1081
- response = await client.threads.Create(request)
1068
+ client = await get_shared_client()
1069
+ response = await client.threads.Create(request)
1082
1070
  thread = proto_to_thread(response)
1083
1071
 
1084
1072
  async def generate_result():
@@ -1118,8 +1106,8 @@ class Threads(Authenticated):
1118
1106
  if ttl_config is not None:
1119
1107
  request.ttl.CopyFrom(ttl_config)
1120
1108
 
1121
- async with GrpcClient() as client:
1122
- response = await client.threads.Patch(request)
1109
+ client = await get_shared_client()
1110
+ response = await client.threads.Patch(request)
1123
1111
 
1124
1112
  thread = proto_to_thread(response)
1125
1113
 
@@ -1147,8 +1135,8 @@ class Threads(Authenticated):
1147
1135
  filters=auth_filters or {},
1148
1136
  )
1149
1137
 
1150
- async with GrpcClient() as client:
1151
- response = await client.threads.Delete(request)
1138
+ client = await get_shared_client()
1139
+ response = await client.threads.Delete(request)
1152
1140
 
1153
1141
  deleted_id = UUID(response.value)
1154
1142
 
@@ -1176,8 +1164,8 @@ class Threads(Authenticated):
1176
1164
  filters=auth_filters or {},
1177
1165
  )
1178
1166
 
1179
- async with GrpcClient() as client:
1180
- response = await client.threads.Copy(request)
1167
+ client = await get_shared_client()
1168
+ response = await client.threads.Copy(request)
1181
1169
 
1182
1170
  thread = proto_to_thread(response)
1183
1171
 
@@ -18,7 +18,7 @@
18
18
  "@typescript/vfs": "^1.6.0",
19
19
  "dedent": "^1.5.3",
20
20
  "exit-hook": "^4.0.0",
21
- "hono": "^4.10.2",
21
+ "hono": "^4.10.3",
22
22
  "p-queue": "^8.0.1",
23
23
  "p-retry": "^6.2.0",
24
24
  "tsx": "^4.19.3",
@@ -62,6 +62,17 @@ export async function resolveGraph(
62
62
  return "compile" in graph && typeof graph.compile === "function";
63
63
  };
64
64
 
65
+ const isCompiledGraph = (
66
+ graph: GraphLike,
67
+ ): graph is CompiledGraph<string> => {
68
+ if (typeof graph !== "object" || graph == null) return false;
69
+ return (
70
+ "builder" in graph &&
71
+ typeof graph.builder === "object" &&
72
+ graph.builder != null
73
+ );
74
+ };
75
+
65
76
  const graph: GraphUnknown = await import(sourceFile).then(
66
77
  (module) => module[exportSymbol || "default"],
67
78
  );
@@ -73,6 +84,15 @@ export async function resolveGraph(
73
84
 
74
85
  const afterResolve = (graphLike: GraphLike): CompiledGraph<string> => {
75
86
  const graph = isGraph(graphLike) ? graphLike.compile() : graphLike;
87
+
88
+ // TODO: hack, remove once LangChain 1.x createAgent is fixed
89
+ // LangGraph API will assign it's checkpointer by setting it
90
+ // via `graph.checkpointer = ...` and `graph.store = ...`, and the 1.x `createAgent`
91
+ // hides the underlying `StateGraph` instance, so we need to access it directly.
92
+ if (!isCompiledGraph(graph) && "graph" in graph) {
93
+ return (graph as { graph: CompiledGraph<string> }).graph;
94
+ }
95
+
76
96
  return graph;
77
97
  };
78
98
 
@@ -971,10 +971,10 @@ has-flag@^4.0.0:
971
971
  resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
972
972
  integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
973
973
 
974
- hono@^4.10.2, hono@^4.5.4:
975
- version "4.10.2"
976
- resolved "https://registry.yarnpkg.com/hono/-/hono-4.10.2.tgz#a78d322f2c5fabebb1887f7ae1632a6524513e74"
977
- integrity sha512-p6fyzl+mQo6uhESLxbF5WlBOAJMDh36PljwlKtP5V1v09NxlqGru3ShK+4wKhSuhuYf8qxMmrivHOa/M7q0sMg==
974
+ hono@^4.10.3, hono@^4.5.4:
975
+ version "4.10.3"
976
+ resolved "https://registry.yarnpkg.com/hono/-/hono-4.10.3.tgz#4e4063eebaac2b735ca4c7455b4d7a6339afc251"
977
+ integrity sha512-2LOYWUbnhdxdL8MNbNg9XZig6k+cZXm5IjHn2Aviv7honhBMOHb+jxrKIeJRZJRmn+htUCKhaicxwXuUDlchRA==
978
978
 
979
979
  icss-utils@^5.0.0, icss-utils@^5.1.0:
980
980
  version "5.1.0"
langgraph_api/route.py CHANGED
@@ -14,6 +14,7 @@ from starlette.responses import JSONResponse
14
14
  from starlette.routing import Route, compile_path, get_name
15
15
  from starlette.types import ASGIApp, Receive, Scope, Send
16
16
 
17
+ from langgraph_api import config
17
18
  from langgraph_api.serde import json_dumpb
18
19
  from langgraph_api.utils import get_auth_ctx, with_user
19
20
 
@@ -58,10 +59,11 @@ class ApiResponse(JSONResponse):
58
59
 
59
60
 
60
61
  def _json_loads(content: bytearray, schema: SchemaType) -> typing.Any:
61
- json = orjson.loads(content)
62
+ """Parse JSON and validate schema. Used by threadpool for large payloads."""
63
+ json_data = orjson.loads(content)
62
64
  if schema is not None:
63
- schema.validate(json)
64
- return json
65
+ schema.validate(json_data)
66
+ return json_data
65
67
 
66
68
 
67
69
  class ApiRequest(Request):
@@ -76,8 +78,16 @@ class ApiRequest(Request):
76
78
  async def json(self, schema: SchemaType = None) -> typing.Any:
77
79
  if not hasattr(self, "_json"):
78
80
  body = await self.body()
81
+
82
+ # Hybrid approach for optimal performance:
83
+ # - Small payloads: parse directly (fast, no queueing/thread pool limitations)
84
+ # - Large payloads: use dedicated thread pool (safer, doesn't block event loop)
79
85
  try:
80
- self._json = await run_in_threadpool(_json_loads, body, schema)
86
+ self._json = (
87
+ await run_in_threadpool(_json_loads, body, schema)
88
+ if len(body) > config.JSON_THREAD_POOL_MINIMUM_SIZE_BYTES
89
+ else _json_loads(body, schema)
90
+ )
81
91
  except orjson.JSONDecodeError as e:
82
92
  raise HTTPException(
83
93
  status_code=422, detail="Invalid JSON in request body"
langgraph_api/serde.py CHANGED
@@ -3,7 +3,7 @@ import re
3
3
  import uuid
4
4
  from base64 import b64encode
5
5
  from collections import deque
6
- from collections.abc import Mapping
6
+ from collections.abc import Callable, Mapping
7
7
  from datetime import timedelta, timezone
8
8
  from decimal import Decimal
9
9
  from ipaddress import (
@@ -16,7 +16,7 @@ from ipaddress import (
16
16
  )
17
17
  from pathlib import Path
18
18
  from re import Pattern
19
- from typing import Any, NamedTuple, cast
19
+ from typing import Any, Literal, NamedTuple, cast
20
20
  from zoneinfo import ZoneInfo
21
21
 
22
22
  import cloudpickle
@@ -123,6 +123,14 @@ def _sanitise(o: Any) -> Any:
123
123
  if isinstance(o, Mapping):
124
124
  return {_sanitise(k): _sanitise(v) for k, v in o.items()}
125
125
  if isinstance(o, list | tuple | set):
126
+ if (
127
+ isinstance(o, tuple)
128
+ and hasattr(o, "_asdict")
129
+ and callable(o._asdict)
130
+ and hasattr(o, "_fields")
131
+ and isinstance(o._fields, tuple)
132
+ ): # named tuple
133
+ return {f: _sanitise(ov) for f, ov in zip(o._fields, o, strict=True)}
126
134
  ctor = list if isinstance(o, list) else type(o)
127
135
  return ctor(_sanitise(x) for x in o)
128
136
  return o
@@ -158,18 +166,46 @@ async def ajson_loads(content: bytes | Fragment) -> Any:
158
166
 
159
167
 
160
168
  class Serializer(JsonPlusSerializer):
169
+ def __init__(
170
+ self,
171
+ __unpack_ext_hook__: Callable[[int, bytes], Any] | None = None,
172
+ pickle_fallback: bool | None = None,
173
+ ):
174
+ from langgraph_api.config import SERDE
175
+
176
+ allowed_json_modules: list[tuple[str, ...]] | Literal[True] | None = None
177
+ if SERDE and "allowed_json_modules" in SERDE:
178
+ allowed_ = SERDE["allowed_json_modules"]
179
+ if allowed_ is True:
180
+ allowed_json_modules = True
181
+ elif allowed_ is None:
182
+ allowed_json_modules = None
183
+ else:
184
+ allowed_json_modules = [tuple(x) for x in allowed_]
185
+ if pickle_fallback is None:
186
+ if SERDE and "pickle_fallback" in SERDE:
187
+ pickle_fallback = SERDE["pickle_fallback"]
188
+ else:
189
+ pickle_fallback = True
190
+
191
+ super().__init__(
192
+ allowed_json_modules=allowed_json_modules,
193
+ __unpack_ext_hook__=__unpack_ext_hook__,
194
+ )
195
+ self.pickle_fallback = pickle_fallback
196
+
161
197
  def dumps_typed(self, obj: Any) -> tuple[str, bytes]:
162
198
  try:
163
199
  return super().dumps_typed(obj)
164
200
  except TypeError:
165
201
  return "pickle", cloudpickle.dumps(obj)
166
202
 
167
- def dumps(self, obj: Any) -> bytes:
168
- # See comment above (in json_dumpb)
169
- return super().dumps(obj).replace(rb"\\u0000", b"").replace(rb"\u0000", b"")
170
-
171
203
  def loads_typed(self, data: tuple[str, bytes]) -> Any:
172
204
  if data[0] == "pickle":
205
+ if not self.pickle_fallback:
206
+ raise ValueError(
207
+ "Pickle fallback is disabled. Cannot deserialize pickled object."
208
+ )
173
209
  try:
174
210
  return cloudpickle.loads(data[1])
175
211
  except Exception as e:
@@ -177,8 +213,16 @@ class Serializer(JsonPlusSerializer):
177
213
  "Failed to unpickle object, replacing w None", exc_info=e
178
214
  )
179
215
  return None
180
- return super().loads_typed(data)
181
-
182
-
183
- mpack_keys = {"method", "value"}
184
- SERIALIZER = Serializer()
216
+ try:
217
+ return super().loads_typed(data)
218
+ except Exception:
219
+ if data[0] == "json":
220
+ logger.exception(
221
+ "Heads up! There was a deserialization error of an item stored using 'json'-type serialization."
222
+ ' For security reasons, starting in langgraph-api version 0.5.0, we no longer serialize objects using the "json" type.'
223
+ " If you would like to retain the ability to deserialize old checkpoints saved in this format, "
224
+ 'please set the "allowed_json_modules" option in your langgraph.json configuration to add the'
225
+ " necessary module and type paths to an allow-list to be deserialized. You can alkso retain the"
226
+ ' ability to insecurely deserialize custom types by setting it to "true".'
227
+ )
228
+ raise
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-api
3
- Version: 0.4.48
4
- Author-email: Nuno Campos <nuno@langchain.dev>, Will Fu-Hinthorn <will@langchain.dev>
3
+ Version: 0.5.1
4
+ Author-email: Will Fu-Hinthorn <will@langchain.dev>, Josh Rogers <josh@langchain.dev>, Parker Rule <parker@langchain.dev>
5
5
  License: Elastic-2.0
6
6
  License-File: LICENSE
7
7
  Requires-Python: >=3.11
@@ -12,10 +12,10 @@ Requires-Dist: grpcio<2.0.0,>=1.75.0
12
12
  Requires-Dist: httpx>=0.25.0
13
13
  Requires-Dist: jsonschema-rs<0.30,>=0.20.0
14
14
  Requires-Dist: langchain-core>=0.3.64
15
- Requires-Dist: langgraph-checkpoint>=2.0.23
16
- Requires-Dist: langgraph-runtime-inmem<0.15.0,>=0.14.0
15
+ Requires-Dist: langgraph-checkpoint<4,>=3
16
+ Requires-Dist: langgraph-runtime-inmem<0.16.0,>=0.15.0
17
17
  Requires-Dist: langgraph-sdk>=0.2.0
18
- Requires-Dist: langgraph>=0.4.0
18
+ Requires-Dist: langgraph<2,>=0.4.10
19
19
  Requires-Dist: langsmith>=0.3.45
20
20
  Requires-Dist: opentelemetry-api>=1.37.0
21
21
  Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.37.0
@@ -1,9 +1,9 @@
1
- langgraph_api/__init__.py,sha256=w0y38C_CURQVpRIfkitkwViZZChCbN89DFUhKKh3PA8,23
1
+ langgraph_api/__init__.py,sha256=eZ1bOun1DDVV0YLOBW4wj2FP1ajReLjbIrGmzN7ASBw,22
2
2
  langgraph_api/asgi_transport.py,sha256=XtiLOu4WWsd-xizagBLzT5xUkxc9ZG9YqwvETBPjBFE,5161
3
3
  langgraph_api/asyncio.py,sha256=FEEkLm_N-15cbElo4vQ309MkDKBZuRqAYV8VJ1DocNw,9860
4
4
  langgraph_api/cli.py,sha256=aEI2pfztEEziIwUk2imiLkNVK1LapMp_3dxvcar1org,18341
5
5
  langgraph_api/command.py,sha256=Bh-rvuTLwdHCqFWryCjB1M8oWxPBwRBUjMNj_04KPxM,852
6
- langgraph_api/config.py,sha256=79efOb8cBNNT1geUfVsUPJFycjUBXnIxWwMkMsbl36A,15292
6
+ langgraph_api/config.py,sha256=YK0tQ3ZbbI3GgqLcX4-Jayzt4iJX3gsFaa8IeTWEGXM,17047
7
7
  langgraph_api/cron_scheduler.py,sha256=25wYzEQrhPEivZrAPYOmzLPDOQa-aFogU37mTXc9TJk,2566
8
8
  langgraph_api/errors.py,sha256=zlnl3xXIwVG0oGNKKpXf1an9Rn_SBDHSyhe53hU6aLw,1858
9
9
  langgraph_api/executor_entrypoint.py,sha256=CaX813ygtf9CpOaBkfkQXJAHjFtmlScCkrOvTDmu4Aw,750
@@ -16,11 +16,11 @@ langgraph_api/logging.py,sha256=o5iVARqtFYKIcRrK2nk1ymcKEiVYKd_dHmhXLF2khFI,6090
16
16
  langgraph_api/metadata.py,sha256=Ah5x5TB8O1VAypzDa1UTrsptS1hjy9z-PuNF8WYl3VM,8597
17
17
  langgraph_api/patch.py,sha256=J0MmcfpZG15SUVaVcI0Z4x_c0-0rbbT7Pwh9fDAQOpA,1566
18
18
  langgraph_api/queue_entrypoint.py,sha256=z3ZUBl3CpnMm0KFPqCuGvSohPAmYQbhAdyRizSJSClM,8481
19
- langgraph_api/route.py,sha256=EBhELuJ1He-ZYcAnR5YTImcIeDtWthDae5CHELBxPkM,5056
19
+ langgraph_api/route.py,sha256=wh2vMKksTpXJRQ_rLLrFXBSlG608fSMJguZATSWu0Y8,5593
20
20
  langgraph_api/schema.py,sha256=spZ_XPT4AMJfw2YatsdnMZZLzgB9Sm3YR8n0SlgGdJ8,8480
21
21
  langgraph_api/self_hosted_logs.py,sha256=9ljOz3KH3O1SwsD7eTKnreyJ80NbeR7nj7SuxBlrmCc,4422
22
22
  langgraph_api/self_hosted_metrics.py,sha256=3FFezxjU0Vs-bsH39f4Dcwn7fporTLHV9REQ3UQ315A,14004
23
- langgraph_api/serde.py,sha256=Jkww6ixP5o2YZmnXtM7ihuAYC6YSuNDNPvE-8ILoqVo,5499
23
+ langgraph_api/serde.py,sha256=gNnTO98OVcIsfXJr60bG7GPioKUhTDZYqIWHc97Uyz4,7658
24
24
  langgraph_api/server.py,sha256=PExNHgem0tY_KkRFiFzj8m8Np6TrP4M0XJsEw6O2SAU,10112
25
25
  langgraph_api/sse.py,sha256=SLdtZmTdh5D8fbWrQjuY9HYLd2dg8Rmi6ZMmFMVc2iE,4204
26
26
  langgraph_api/state.py,sha256=AjkLbUQakIwK7oGzJ8oqubazRsXxG3vDMnRa0s0mzDM,4716
@@ -33,7 +33,7 @@ langgraph_api/webhook.py,sha256=SvSM1rdnNtiH4q3JQYmAqJUk2Sable5xAcwOLuRhtlo,1723
33
33
  langgraph_api/worker.py,sha256=HHgf590xElF7v02lgn0lG0iK2v2sENMjdx7TVFCvYXY,15399
34
34
  langgraph_api/api/__init__.py,sha256=wrnxz_204b2Vhv4-N0WpiPf-ZpDDlmIQkbh-TiXPnOo,5997
35
35
  langgraph_api/api/a2a.py,sha256=HIHZkLnIcM1u1FJti-L2NH-h1I9BZ_d-QW9z3gFonn8,53995
36
- langgraph_api/api/assistants.py,sha256=tRJse7Gr2BTeTZPljL05UvGkFiULpA-6hy03nBx9PF4,18177
36
+ langgraph_api/api/assistants.py,sha256=OCup8rXk0HaqWqDhOLz59f1KSTS5fwxXrCaDgyopges,17981
37
37
  langgraph_api/api/mcp.py,sha256=qe10ZRMN3f-Hli-9TI8nbQyWvMeBb72YB1PZVbyqBQw,14418
38
38
  langgraph_api/api/meta.py,sha256=_jG61UKs0J_alsCDgIwCAx1rX5pYuUwKrmOEpWnzR1I,4817
39
39
  langgraph_api/api/openapi.py,sha256=If-z1ckXt-Yu5bwQytK1LWyX_T7G46UtLfixgEP8hwc,11959
@@ -50,12 +50,12 @@ langgraph_api/auth/langsmith/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
50
50
  langgraph_api/auth/langsmith/backend.py,sha256=JVf8-q1IvB5EeiLJge3cOtPvDg6qHzK_4cR-R8hPXXQ,3753
51
51
  langgraph_api/auth/langsmith/client.py,sha256=79kwCVeHU64nsHsxWipfZhf44lM6vfs2nlfTxlJF6LU,4142
52
52
  langgraph_api/grpc_ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
- langgraph_api/grpc_ops/client.py,sha256=usSA1ZObwFzspgh3VYvhwRv9ZEOErcOzVc25JrsvYQU,2920
54
- langgraph_api/grpc_ops/ops.py,sha256=OHU1ikDMKJXRSTU7zWDXorEuB6o9czLErijh3mhDYh0,37867
53
+ langgraph_api/grpc_ops/client.py,sha256=Qr07JVaJrMr3jWQKFHngyC3gqsj-VNLzahbnpj1vDO8,5753
54
+ langgraph_api/grpc_ops/ops.py,sha256=6L7OiKcVQgAVjkrWiGO5o1pnkZ1Fcas1H3ULtyp0vbo,37442
55
55
  langgraph_api/grpc_ops/generated/__init__.py,sha256=dRiB_iGscPKdMpuLp9ueLwAmIfRaNjNXC64ABtb4cg8,135
56
56
  langgraph_api/grpc_ops/generated/core_api_pb2.py,sha256=l209i8cIazfs-zPTlt2jUg_og82oiDT4QMQCYAhU0P4,42262
57
57
  langgraph_api/grpc_ops/generated/core_api_pb2.pyi,sha256=6fnrjKRdN1-jJfHagLHhdlVog1cLkLoAc9fvTBzeFdM,49429
58
- langgraph_api/grpc_ops/generated/core_api_pb2_grpc.py,sha256=K_bHjM6BDzqIc1N8lc1SaxRLGFp1GUTpSiQEr5-70Oo,52466
58
+ langgraph_api/grpc_ops/generated/core_api_pb2_grpc.py,sha256=Qav2DuCMUSmR8nP4-fVtUBbY0Vc42jqjCs3L4LdIl-0,52467
59
59
  langgraph_api/js/.gitignore,sha256=l5yI6G_V6F1600I1IjiUKn87f4uYIrBAYU1MOyBBhg4,59
60
60
  langgraph_api/js/.prettierrc,sha256=0es3ovvyNIqIw81rPQsdt1zCQcOdBqyR_DMbFE4Ifms,19
61
61
  langgraph_api/js/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -65,15 +65,15 @@ langgraph_api/js/client.http.mts,sha256=FeVM53vduTPCyMPaYs__kmB3iWcz0k0om811DG0J
65
65
  langgraph_api/js/client.mts,sha256=8T5wp_114c2wGPfktY77StTnejhYL3ZWBmLwaUvp5XU,32333
66
66
  langgraph_api/js/errors.py,sha256=Cm1TKWlUCwZReDC5AQ6SgNIVGD27Qov2xcgHyf8-GXo,361
67
67
  langgraph_api/js/global.d.ts,sha256=j4GhgtQSZ5_cHzjSPcHgMJ8tfBThxrH-pUOrrJGteOU,196
68
- langgraph_api/js/package.json,sha256=besBq5s3c370nNWhHXy8ZxD0X350Xbzx4FNUDxhG-Pk,1330
68
+ langgraph_api/js/package.json,sha256=i6LBt4R-bi70FuMC3lIkZAuWZVJnlDE_CpI0mnArrOA,1330
69
69
  langgraph_api/js/remote.py,sha256=gBk273R7esmXg8aR6InxasNFc5E6Qju2bv2DhmmGJyU,38676
70
70
  langgraph_api/js/schema.py,sha256=M4fLtr50O1jck8H1hm_0W4cZOGYGdkrB7riLyCes4oY,438
71
71
  langgraph_api/js/sse.py,sha256=hHkbncnYnXNIbHhAWneGWYkHp4UhhhGB7-MYtDrY264,4141
72
72
  langgraph_api/js/traceblock.mts,sha256=QtGSN5VpzmGqDfbArrGXkMiONY94pMQ5CgzetT_bKYg,761
73
73
  langgraph_api/js/tsconfig.json,sha256=imCYqVnqFpaBoZPx8k1nO4slHIWBFsSlmCYhO73cpBs,341
74
74
  langgraph_api/js/ui.py,sha256=l9regrvKIxLOjH5SIYE2nhr8QCKLK1Q_1pZgxdL71X4,2488
75
- langgraph_api/js/yarn.lock,sha256=eWe1iuI634accFo7tumAcG8I7gLrcYKe7OyX0TMfK_s,83943
76
- langgraph_api/js/src/graph.mts,sha256=9zTQNdtanI_CFnOwNRoamoCVHHQHGbNlbm91aRxDeOc,2675
75
+ langgraph_api/js/yarn.lock,sha256=CV3hl-TyqWBKPLYvnWcn42-95nxVlDJRvUuPcC-CQuo,83943
76
+ langgraph_api/js/src/graph.mts,sha256=etZd27NaoVevyitJ-LAUue0HeR7V3F2YNeSGwWHm13s,3417
77
77
  langgraph_api/js/src/load.hooks.mjs,sha256=xNVHq75W0Lk6MUKl1pQYrx-wtQ8_neiUyI6SO-k0ecM,2235
78
78
  langgraph_api/js/src/preload.mjs,sha256=8m3bYkf9iZLCQzKAYAdU8snxUwAG3dVLwGvAjfGfgIc,959
79
79
  langgraph_api/js/src/utils/files.mts,sha256=nU09Y8lN8SYsg0x2ffmbIW8LEDBl-SWkmxsoXunFU0M,219
@@ -110,8 +110,8 @@ langgraph_runtime/store.py,sha256=7mowndlsIroGHv3NpTSOZDJR0lCuaYMBoTnTrewjslw,11
110
110
  LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
111
111
  logging.json,sha256=3RNjSADZmDq38eHePMm1CbP6qZ71AmpBtLwCmKU9Zgo,379
112
112
  openapi.json,sha256=Oi2tU1b8PsXb-6XNHafQvcZv934vLNQhBNPYXr9e2nU,172620
113
- langgraph_api-0.4.48.dist-info/METADATA,sha256=UDqSGmsHJIXF0DrV_u8dFni__0-xszEswI232NY8l6M,4149
114
- langgraph_api-0.4.48.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
115
- langgraph_api-0.4.48.dist-info/entry_points.txt,sha256=hGedv8n7cgi41PypMfinwS_HfCwA7xJIfS0jAp8htV8,78
116
- langgraph_api-0.4.48.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
117
- langgraph_api-0.4.48.dist-info/RECORD,,
113
+ langgraph_api-0.5.1.dist-info/METADATA,sha256=8VfO9GgGcyYdDTov627-7xBHu-GD23j2SMVAORR4zoM,4186
114
+ langgraph_api-0.5.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
115
+ langgraph_api-0.5.1.dist-info/entry_points.txt,sha256=hGedv8n7cgi41PypMfinwS_HfCwA7xJIfS0jAp8htV8,78
116
+ langgraph_api-0.5.1.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
117
+ langgraph_api-0.5.1.dist-info/RECORD,,