langgraph-api 0.4.11__py3-none-any.whl → 0.4.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

langgraph_api/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.4.11"
1
+ __version__ = "0.4.14"
langgraph_api/api/a2a.py CHANGED
@@ -171,7 +171,7 @@ async def _validate_supports_messages(
171
171
  """
172
172
  assistant_id = assistant["assistant_id"]
173
173
 
174
- cached_schemas = _assistant_schemas_cache.get(assistant_id)
174
+ cached_schemas = await _assistant_schemas_cache.get(assistant_id)
175
175
  if cached_schemas is not None:
176
176
  schemas = cached_schemas
177
177
  else:
langgraph_api/api/meta.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from typing import cast
2
2
 
3
3
  import langgraph.version
4
+ import structlog
4
5
  from starlette.responses import JSONResponse, PlainTextResponse
5
6
 
6
7
  from langgraph_api import __version__, config, metadata
@@ -13,6 +14,8 @@ from langgraph_runtime.ops import Runs
13
14
 
14
15
  METRICS_FORMATS = {"prometheus", "json"}
15
16
 
17
+ logger = structlog.stdlib.get_logger(__name__)
18
+
16
19
 
17
20
  async def meta_info(request: ApiRequest):
18
21
  plus = plus_features_enabled()
@@ -71,35 +74,44 @@ async def meta_metrics(request: ApiRequest):
71
74
  resp["workers"] = worker_metrics
72
75
  return JSONResponse(resp)
73
76
  elif metrics_format == "prometheus":
74
- async with connect() as conn:
75
- queue_stats = await Runs.stats(conn)
76
-
77
- metrics = [
78
- "# HELP lg_api_num_pending_runs The number of runs currently pending.",
79
- "# TYPE lg_api_num_pending_runs gauge",
80
- f'lg_api_num_pending_runs{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {queue_stats["n_pending"]}',
81
- "# HELP lg_api_num_running_runs The number of runs currently running.",
82
- "# TYPE lg_api_num_running_runs gauge",
83
- f'lg_api_num_running_runs{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {queue_stats["n_running"]}',
84
- ]
77
+ metrics = []
78
+ try:
79
+ async with connect() as conn:
80
+ queue_stats = await Runs.stats(conn)
85
81
 
86
- if config.N_JOBS_PER_WORKER > 0:
87
82
  metrics.extend(
88
83
  [
89
- "# HELP lg_api_workers_max The maximum number of workers available.",
90
- "# TYPE lg_api_workers_max gauge",
91
- f'lg_api_workers_max{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {workers_max}',
92
- "# HELP lg_api_workers_active The number of currently active workers.",
93
- "# TYPE lg_api_workers_active gauge",
94
- f'lg_api_workers_active{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {workers_active}',
95
- "# HELP lg_api_workers_available The number of available (idle) workers.",
96
- "# TYPE lg_api_workers_available gauge",
97
- f'lg_api_workers_available{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {workers_available}',
84
+ "# HELP lg_api_num_pending_runs The number of runs currently pending.",
85
+ "# TYPE lg_api_num_pending_runs gauge",
86
+ f'lg_api_num_pending_runs{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {queue_stats["n_pending"]}',
87
+ "# HELP lg_api_num_running_runs The number of runs currently running.",
88
+ "# TYPE lg_api_num_running_runs gauge",
89
+ f'lg_api_num_running_runs{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {queue_stats["n_running"]}',
98
90
  ]
99
91
  )
92
+ except Exception as e:
93
+ # if we get a db connection error/timeout, just skip queue stats
94
+ await logger.awarning(
95
+ "Ignoring error while getting run stats for /metrics", exc_info=e
96
+ )
97
+
98
+ if config.N_JOBS_PER_WORKER > 0:
99
+ metrics.extend(
100
+ [
101
+ "# HELP lg_api_workers_max The maximum number of workers available.",
102
+ "# TYPE lg_api_workers_max gauge",
103
+ f'lg_api_workers_max{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {workers_max}',
104
+ "# HELP lg_api_workers_active The number of currently active workers.",
105
+ "# TYPE lg_api_workers_active gauge",
106
+ f'lg_api_workers_active{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {workers_active}',
107
+ "# HELP lg_api_workers_available The number of available (idle) workers.",
108
+ "# TYPE lg_api_workers_available gauge",
109
+ f'lg_api_workers_available{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {workers_available}',
110
+ ]
111
+ )
100
112
 
101
- metrics.extend(http_metrics)
102
- metrics.extend(pg_redis_stats)
113
+ metrics.extend(http_metrics)
114
+ metrics.extend(pg_redis_stats)
103
115
 
104
116
  metrics_response = "\n".join(metrics)
105
117
  return PlainTextResponse(metrics_response)
langgraph_api/asyncio.py CHANGED
@@ -158,6 +158,7 @@ class SimpleTaskGroup(AbstractAsyncContextManager["SimpleTaskGroup"]):
158
158
  self,
159
159
  *coros: Coroutine[Any, Any, T],
160
160
  cancel: bool = False,
161
+ cancel_event: asyncio.Event | None = None,
161
162
  wait: bool = True,
162
163
  taskset: set[asyncio.Task] | None = None,
163
164
  taskgroup_name: str | None = None,
@@ -165,6 +166,7 @@ class SimpleTaskGroup(AbstractAsyncContextManager["SimpleTaskGroup"]):
165
166
  # Copy the taskset to avoid modifying the original set unintentionally (like in lifespan)
166
167
  self.tasks = taskset.copy() if taskset is not None else set()
167
168
  self.cancel = cancel
169
+ self.cancel_event = cancel_event
168
170
  self.wait = wait
169
171
  if taskset:
170
172
  for task in tuple(taskset):
@@ -181,6 +183,8 @@ class SimpleTaskGroup(AbstractAsyncContextManager["SimpleTaskGroup"]):
181
183
  try:
182
184
  if (exc := task.exception()) and not isinstance(exc, ignore_exceptions):
183
185
  logger.exception("asyncio.task failed in task group", exc_info=exc)
186
+ if self.cancel_event:
187
+ self.cancel_event.set()
184
188
  except asyncio.CancelledError:
185
189
  pass
186
190
 
@@ -58,7 +58,7 @@ class LangsmithAuthBackend(AuthenticationBackend):
58
58
 
59
59
  # Check cache first
60
60
  cache_key = self._get_cache_key(headers)
61
- if cached_entry := self._cache.get(cache_key):
61
+ if cached_entry := await self._cache.get(cache_key):
62
62
  return cached_entry["credentials"], cached_entry["user"]
63
63
 
64
64
  async with auth_client() as auth:
@@ -895,7 +895,7 @@ class CustomJsAuthBackend(AuthenticationBackend):
895
895
  if self.cache_keys:
896
896
  cache_key = tuple((k, headers[k]) for k in self.cache_keys if k in headers)
897
897
  if cache_key and self.ttl_cache is not None:
898
- cached = self.ttl_cache.get(cache_key)
898
+ cached = await self.ttl_cache.get(cache_key)
899
899
  if cached:
900
900
  return cached
901
901
 
@@ -86,6 +86,7 @@ async def health_and_metrics_server():
86
86
  log_level="error",
87
87
  access_log=False,
88
88
  )
89
+ # Server will run indefinitely until the process is terminated
89
90
  server = uvicorn.Server(config)
90
91
 
91
92
  logger.info(f"Health and metrics server started at http://0.0.0.0:{port}")
@@ -93,14 +94,15 @@ async def health_and_metrics_server():
93
94
 
94
95
 
95
96
  async def entrypoint(
96
- grpc_port: int | None = None, entrypoint_name: str = "python-queue"
97
+ grpc_port: int | None = None,
98
+ entrypoint_name: str = "python-queue",
99
+ cancel_event: asyncio.Event | None = None,
97
100
  ):
98
101
  from langgraph_api import logging as lg_logging
99
102
  from langgraph_api.api import user_router
100
103
 
101
104
  lg_logging.set_logging_context({"entrypoint": entrypoint_name})
102
105
  tasks: set[asyncio.Task] = set()
103
- tasks.add(asyncio.create_task(health_and_metrics_server()))
104
106
 
105
107
  original_lifespan = user_router.router.lifespan_context if user_router else None
106
108
 
@@ -113,6 +115,7 @@ async def entrypoint(
113
115
  with_cron_scheduler=with_cron_scheduler,
114
116
  grpc_port=grpc_port,
115
117
  taskset=taskset,
118
+ cancel_event=cancel_event,
116
119
  ):
117
120
  if original_lifespan:
118
121
  async with original_lifespan(app):
@@ -123,6 +126,7 @@ async def entrypoint(
123
126
  async with combined_lifespan(
124
127
  None, with_cron_scheduler=False, grpc_port=grpc_port, taskset=tasks
125
128
  ):
129
+ tasks.add(asyncio.create_task(health_and_metrics_server()))
126
130
  await asyncio.gather(*tasks)
127
131
 
128
132
 
@@ -141,8 +145,14 @@ async def main(grpc_port: int | None = None, entrypoint_name: str = "python-queu
141
145
  signal.signal(signal.SIGTERM, lambda *_: _handle_signal())
142
146
 
143
147
  entry_task = asyncio.create_task(
144
- entrypoint(grpc_port=grpc_port, entrypoint_name=entrypoint_name)
148
+ entrypoint(
149
+ grpc_port=grpc_port,
150
+ entrypoint_name=entrypoint_name,
151
+ cancel_event=stop_event,
152
+ )
145
153
  )
154
+ # Handle the case where the entrypoint errors out
155
+ entry_task.add_done_callback(lambda _: stop_event.set())
146
156
  await stop_event.wait()
147
157
 
148
158
  logger.warning("Cancelling queue entrypoint task")
langgraph_api/serde.py CHANGED
@@ -1,5 +1,4 @@
1
1
  import asyncio
2
- import base64
3
2
  import re
4
3
  import uuid
5
4
  from base64 import b64encode
@@ -178,21 +177,3 @@ class Serializer(JsonPlusSerializer):
178
177
 
179
178
  mpack_keys = {"method", "value"}
180
179
  SERIALIZER = Serializer()
181
-
182
-
183
- # TODO: Make more performant (by removing)
184
- async def reserialize_message(message: bytes) -> bytes:
185
- # Stream messages from golang runtime are a byte dict of StreamChunks.
186
- loaded = await ajson_loads(message)
187
- converted = {}
188
- for k, v in loaded.items():
189
- if isinstance(v, dict) and v.keys() == mpack_keys:
190
- if v["method"] == "missing":
191
- converted[k] = v["value"] # oops
192
- else:
193
- converted[k] = SERIALIZER.loads_typed(
194
- (v["method"], base64.b64decode(v["value"]))
195
- )
196
- else:
197
- converted[k] = v
198
- return json_dumpb(converted)
@@ -1,18 +1,27 @@
1
1
  import asyncio
2
2
  import time
3
3
  from collections import OrderedDict
4
+ from collections.abc import Awaitable, Callable
4
5
  from typing import Generic, TypeVar
5
6
 
6
7
  T = TypeVar("T")
7
8
 
8
9
 
9
10
  class LRUCache(Generic[T]):
10
- """LRU cache with TTL support."""
11
+ """LRU cache with TTL and proactive refresh support."""
11
12
 
12
- def __init__(self, max_size: int = 1000, ttl: float = 60):
13
- self._cache: OrderedDict[str, tuple[T, float]] = OrderedDict()
13
+ def __init__(
14
+ self,
15
+ max_size: int = 1000,
16
+ ttl: float = 60,
17
+ refresh_window: float = 30,
18
+ refresh_callback: Callable[[str], Awaitable[T | None]] | None = None,
19
+ ):
20
+ self._cache: OrderedDict[str, tuple[T, float, bool]] = OrderedDict()
14
21
  self._max_size = max_size if max_size > 0 else 1000
15
22
  self._ttl = ttl
23
+ self._refresh_window = refresh_window if refresh_window > 0 else 30
24
+ self._refresh_callback = refresh_callback
16
25
 
17
26
  def _get_time(self) -> float:
18
27
  """Get current time, using loop.time() if available for better performance."""
@@ -21,17 +30,45 @@ class LRUCache(Generic[T]):
21
30
  except RuntimeError:
22
31
  return time.monotonic()
23
32
 
24
- def get(self, key: str) -> T | None:
25
- """Get item from cache, returning None if expired or not found."""
33
+ async def get(self, key: str) -> T | None:
34
+ """Get item from cache, attempting refresh if within refresh window."""
26
35
  if key not in self._cache:
27
36
  return None
28
37
 
29
- value, timestamp = self._cache[key]
30
- if self._get_time() - timestamp >= self._ttl:
31
- # Expired, remove and return None
38
+ value, timestamp, is_refreshing = self._cache[key]
39
+ current_time = self._get_time()
40
+ time_until_expiry = self._ttl - (current_time - timestamp)
41
+
42
+ # Check if expired
43
+ if time_until_expiry <= 0:
32
44
  del self._cache[key]
33
45
  return None
34
46
 
47
+ # Check if we should attempt refresh (within refresh window and not already refreshing)
48
+ if (
49
+ time_until_expiry <= self._refresh_window
50
+ and not is_refreshing
51
+ and self._refresh_callback
52
+ ):
53
+ # Mark as refreshing to prevent multiple simultaneous refresh attempts
54
+ self._cache[key] = (value, timestamp, True)
55
+
56
+ try:
57
+ # Attempt refresh
58
+ refreshed_value = await self._refresh_callback(key)
59
+ if refreshed_value is not None:
60
+ # Refresh successful, update cache with new value
61
+ self._cache[key] = (refreshed_value, current_time, False)
62
+ # Move to end (most recently used)
63
+ self._cache.move_to_end(key)
64
+ return refreshed_value
65
+ else:
66
+ # Refresh failed, fallback to cached value
67
+ self._cache[key] = (value, timestamp, False)
68
+ except Exception:
69
+ # Refresh failed with exception, fallback to cached value
70
+ self._cache[key] = (value, timestamp, False)
71
+
35
72
  # Move to end (most recently used)
36
73
  self._cache.move_to_end(key)
37
74
  return value
@@ -46,8 +83,8 @@ class LRUCache(Generic[T]):
46
83
  while len(self._cache) >= self._max_size:
47
84
  self._cache.popitem(last=False) # Remove oldest (FIFO)
48
85
 
49
- # Add new entry
50
- self._cache[key] = (value, self._get_time())
86
+ # Add new entry (not refreshing initially)
87
+ self._cache[key] = (value, self._get_time(), False)
51
88
 
52
89
  def size(self) -> int:
53
90
  """Return current cache size."""
@@ -0,0 +1,74 @@
1
+ import asyncio
2
+
3
+ import httpx
4
+ import structlog
5
+
6
+ logger = structlog.stdlib.get_logger(__name__)
7
+
8
+
9
+ async def _make_http_request_with_retries(
10
+ url: str,
11
+ headers: dict,
12
+ method: str = "GET",
13
+ json_data: dict | None = None,
14
+ max_retries: int = 3,
15
+ base_delay: float = 1.0,
16
+ ) -> httpx.Response | None:
17
+ """
18
+ Make an HTTP request with exponential backoff retries.
19
+
20
+ Args:
21
+ url: The URL to request
22
+ headers: Headers to include in the request
23
+ method: HTTP method ("GET" or "POST")
24
+ json_data: JSON data for POST requests
25
+ max_retries: Maximum number of retry attempts
26
+ base_delay: Base delay in seconds for exponential backoff
27
+
28
+ Returns:
29
+ httpx.Response: The successful response
30
+
31
+ Raises:
32
+ httpx.HTTPStatusError: If the request fails after all retries
33
+ httpx.RequestError: If the request fails after all retries
34
+ """
35
+ for attempt in range(max_retries + 1):
36
+ try:
37
+ async with httpx.AsyncClient(timeout=10.0) as client:
38
+ response = await client.request(
39
+ method, url, headers=headers, json=json_data
40
+ )
41
+ response.raise_for_status()
42
+ return response
43
+
44
+ except (
45
+ httpx.TimeoutException,
46
+ httpx.NetworkError,
47
+ httpx.RequestError,
48
+ httpx.HTTPStatusError,
49
+ ) as e:
50
+ if isinstance(e, httpx.HTTPStatusError) and e.response.status_code < 500:
51
+ # Don't retry on 4xx errors, but do on 5xxs
52
+ raise e
53
+
54
+ # Back off and retry if we haven't reached the max retries
55
+ if attempt < max_retries:
56
+ delay = base_delay * (2**attempt) # Exponential backoff
57
+ logger.warning(
58
+ "HTTP %s request attempt %d to %s failed: %s. Retrying in %.1f seconds...",
59
+ method,
60
+ attempt + 1,
61
+ url,
62
+ e,
63
+ delay,
64
+ )
65
+ await asyncio.sleep(delay)
66
+ else:
67
+ logger.exception(
68
+ "HTTP %s request to %s failed after %d attempts. Last error: %s",
69
+ method,
70
+ url,
71
+ max_retries + 1,
72
+ e,
73
+ )
74
+ raise e
langgraph_api/worker.py CHANGED
@@ -153,7 +153,7 @@ async def worker(
153
153
  raise UserTimeout(e) from e
154
154
  raise
155
155
 
156
- async with Runs.enter(run_id, run["thread_id"], main_loop) as done:
156
+ async with Runs.enter(run_id, run["thread_id"], main_loop, resumable) as done:
157
157
  # attempt the run
158
158
  try:
159
159
  if attempt > BG_JOB_MAX_RETRIES:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-api
3
- Version: 0.4.11
3
+ Version: 0.4.14
4
4
  Author-email: Nuno Campos <nuno@langchain.dev>, Will Fu-Hinthorn <will@langchain.dev>
5
5
  License: Elastic-2.0
6
6
  License-File: LICENSE
@@ -11,7 +11,7 @@ Requires-Dist: httpx>=0.25.0
11
11
  Requires-Dist: jsonschema-rs<0.30,>=0.20.0
12
12
  Requires-Dist: langchain-core>=0.3.64
13
13
  Requires-Dist: langgraph-checkpoint>=2.0.23
14
- Requires-Dist: langgraph-runtime-inmem<0.12.0,>=0.11.0
14
+ Requires-Dist: langgraph-runtime-inmem<0.13.0,>=0.12.0
15
15
  Requires-Dist: langgraph-sdk>=0.2.0
16
16
  Requires-Dist: langgraph>=0.4.0
17
17
  Requires-Dist: langsmith>=0.3.45
@@ -1,6 +1,6 @@
1
- langgraph_api/__init__.py,sha256=xIphSmmFF5C8ZjsK5bpruTtbjrTL9bI6TjdjgsELGCw,23
1
+ langgraph_api/__init__.py,sha256=kBEbn8dkCFa3vKochkZqeCl78cbsUbutSFlOYZrn__w,23
2
2
  langgraph_api/asgi_transport.py,sha256=XtiLOu4WWsd-xizagBLzT5xUkxc9ZG9YqwvETBPjBFE,5161
3
- langgraph_api/asyncio.py,sha256=NjHFvZStKryAAfGOrl3-efHtCzibvpDx-dl8PnrE1Tk,9588
3
+ langgraph_api/asyncio.py,sha256=DN2kyEYFBePzBW1Sa6hDUuQuUEp7M5LAMsH3_6r1Mjc,9762
4
4
  langgraph_api/cli.py,sha256=-ruIeKi1imvS6GriOfRDZY-waV4SbWiJ0BEFAciPVYI,16330
5
5
  langgraph_api/command.py,sha256=Q9XDRhnkCX7jyqW52_Rf2PPYKxjr-Z9BUHazI1HcmB8,817
6
6
  langgraph_api/config.py,sha256=r9mmbyZlhBuJLpnTkaOLcNH6ufFNqm_2eCiuOmhqRl0,12241
@@ -14,10 +14,10 @@ langgraph_api/http_metrics.py,sha256=MU9ccXt7aBb0AJ2SWEjwtbtbJEWmeqSdx7-CI51e32o
14
14
  langgraph_api/logging.py,sha256=qB6q_cUba31edE4_D6dBGhdiUTpW7sXAOepUjYb_R50,5216
15
15
  langgraph_api/metadata.py,sha256=fVsbwxVitAj4LGVYpCcadYeIFANEaNtcx6LBxQLcTqg,6949
16
16
  langgraph_api/patch.py,sha256=iLwSd9ZWoVj6MxozMyGyMvWWbE9RIP5eZX1dpCBSlSU,1480
17
- langgraph_api/queue_entrypoint.py,sha256=yFzVX3_YKTq4w1A5h5nRpVfiWuSOeJ9acHMPAcTIrKY,5282
17
+ langgraph_api/queue_entrypoint.py,sha256=Y0Hu4QXNV7HPZWlBwuNCm8ehqD_n79AMk7ZWDZfBc4U,5631
18
18
  langgraph_api/route.py,sha256=EBhELuJ1He-ZYcAnR5YTImcIeDtWthDae5CHELBxPkM,5056
19
19
  langgraph_api/schema.py,sha256=AsgF0dIjBvDd_PDy20mGqB_IkBLgVvSj8qRKG_lPlec,8440
20
- langgraph_api/serde.py,sha256=3GvelKhySjlXaNqpg2GyUxU6-NEkvif7WlMF9if_EgU,6029
20
+ langgraph_api/serde.py,sha256=CBS3ctOLpmUWUJqT784DvvgOU1SUY2EPKkyRLIZwYn0,5367
21
21
  langgraph_api/server.py,sha256=uCAqPgCLJ6ckslLs0i_dacSR8mzuR0Y6PkkJYk0O3bE,7196
22
22
  langgraph_api/sse.py,sha256=SLdtZmTdh5D8fbWrQjuY9HYLd2dg8Rmi6ZMmFMVc2iE,4204
23
23
  langgraph_api/state.py,sha256=AjkLbUQakIwK7oGzJ8oqubazRsXxG3vDMnRa0s0mzDM,4716
@@ -27,12 +27,12 @@ langgraph_api/thread_ttl.py,sha256=KyHnvD0e1p1cV4Z_ZvKNVzDztuI2RBCUsUO2V7GlOSw,1
27
27
  langgraph_api/traceblock.py,sha256=Qq5CUdefnMDaRDnyvBSWGBClEj-f3oO7NbH6fedxOSE,630
28
28
  langgraph_api/validation.py,sha256=86jftgOsMa7tkeshBw6imYe7zyUXPoVuf5Voh6dFiR8,5285
29
29
  langgraph_api/webhook.py,sha256=SvSM1rdnNtiH4q3JQYmAqJUk2Sable5xAcwOLuRhtlo,1723
30
- langgraph_api/worker.py,sha256=M9WQdxEzVGDZqdjz3LHEHhM1g6isPcf3k1V4PEkcSY8,15343
30
+ langgraph_api/worker.py,sha256=FQRw3kL9ynDv_LNgY_OjjPZQBuAvSQpsW6nECnABvDg,15354
31
31
  langgraph_api/api/__init__.py,sha256=raFkYH50tsO-KjRmDbGVoHCuxuH58u1lrZbr-MlITIY,6262
32
- langgraph_api/api/a2a.py,sha256=HralDXn_sbTnZIKfRfC-1Gl2SHX3Z74vYkTkBA-mlyw,35143
32
+ langgraph_api/api/a2a.py,sha256=4J2rEYDz_ZkBrrggfkyqnkytCs3lAfpPD3fMkRdLt9A,35149
33
33
  langgraph_api/api/assistants.py,sha256=JFaBYp9BAXGaJ0yfy1SG_Mr-3xjeWSkdCHtmXpiAqP4,17290
34
34
  langgraph_api/api/mcp.py,sha256=qe10ZRMN3f-Hli-9TI8nbQyWvMeBb72YB1PZVbyqBQw,14418
35
- langgraph_api/api/meta.py,sha256=dFD9ZgykbKARLdVSaJD9vO3CShvEyBmGpkjE8tqii0c,4448
35
+ langgraph_api/api/meta.py,sha256=Qyj6r5czkVJ81tpD6liFY7tlrmFDsiSfBr-4X8HJpRc,4834
36
36
  langgraph_api/api/openapi.py,sha256=If-z1ckXt-Yu5bwQytK1LWyX_T7G46UtLfixgEP8hwc,11959
37
37
  langgraph_api/api/runs.py,sha256=Dzqg3Klnp_7QVHl26J51DpSlMvBhgUdwcKeeMQdqa4Y,22127
38
38
  langgraph_api/api/store.py,sha256=xGcPFx4v-VxlK6HRU9uCjzCQ0v66cvc3o_PB5_g7n0Q,5550
@@ -44,7 +44,7 @@ langgraph_api/auth/middleware.py,sha256=jDA4t41DUoAArEY_PNoXesIUBJ0nGhh85QzRdn5E
44
44
  langgraph_api/auth/noop.py,sha256=Bk6Nf3p8D_iMVy_OyfPlyiJp_aEwzL-sHrbxoXpCbac,586
45
45
  langgraph_api/auth/studio_user.py,sha256=fojJpexdIZYI1w3awiqOLSwMUiK_M_3p4mlfQI0o-BE,454
46
46
  langgraph_api/auth/langsmith/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
- langgraph_api/auth/langsmith/backend.py,sha256=060NPoZ82J1Y23hr3XgZnxyzhQ5lZngBWPcUy0QntjY,3658
47
+ langgraph_api/auth/langsmith/backend.py,sha256=rdkz8IXLHusJqcoacvl2XuMZnQVR7PLpE0SHHcKTqv0,3664
48
48
  langgraph_api/auth/langsmith/client.py,sha256=Kn9503en1tmlNtkbvqRxYSRCOUrWaVpqvxyLLb1cgzY,3908
49
49
  langgraph_api/js/.gitignore,sha256=l5yI6G_V6F1600I1IjiUKn87f4uYIrBAYU1MOyBBhg4,59
50
50
  langgraph_api/js/.prettierrc,sha256=0es3ovvyNIqIw81rPQsdt1zCQcOdBqyR_DMbFE4Ifms,19
@@ -56,7 +56,7 @@ langgraph_api/js/client.mts,sha256=gDvYiW7Qfl4re2YhZ5oNqtuvffnW_Sf7DK5aUbKB3vw,3
56
56
  langgraph_api/js/errors.py,sha256=Cm1TKWlUCwZReDC5AQ6SgNIVGD27Qov2xcgHyf8-GXo,361
57
57
  langgraph_api/js/global.d.ts,sha256=j4GhgtQSZ5_cHzjSPcHgMJ8tfBThxrH-pUOrrJGteOU,196
58
58
  langgraph_api/js/package.json,sha256=syy2fEcmTxGQVfz4P9MUTgoTxHr1MUcA1rDXemAig2U,1335
59
- langgraph_api/js/remote.py,sha256=iWs3SdmV7vFa28p04rYFSdFEGSYSlS3918AzWljKT9Q,38644
59
+ langgraph_api/js/remote.py,sha256=VmQ4Ie1V5z5gWEChXdY1m1kxzL3HE6AwKzfyIEfdE2k,38650
60
60
  langgraph_api/js/schema.py,sha256=M4fLtr50O1jck8H1hm_0W4cZOGYGdkrB7riLyCes4oY,438
61
61
  langgraph_api/js/sse.py,sha256=hHkbncnYnXNIbHhAWneGWYkHp4UhhhGB7-MYtDrY264,4141
62
62
  langgraph_api/js/traceblock.mts,sha256=QtGSN5VpzmGqDfbArrGXkMiONY94pMQ5CgzetT_bKYg,761
@@ -78,10 +78,11 @@ langgraph_api/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
78
78
  langgraph_api/models/run.py,sha256=HAMvpmIVnGcuOaKoDcpEfeRWo00-bmX_Gvp6lqo7VO0,13223
79
79
  langgraph_api/tunneling/cloudflare.py,sha256=iKb6tj-VWPlDchHFjuQyep2Dpb-w2NGfJKt-WJG9LH0,3650
80
80
  langgraph_api/utils/__init__.py,sha256=yCMq7pOMlmeNmi2Fh8U7KLiljBdOMcF0L2SfpobnKKE,5703
81
- langgraph_api/utils/cache.py,sha256=SrtIWYibbrNeZzLXLUGBFhJPkMVNQnVxR5giiYGHEfI,1810
81
+ langgraph_api/utils/cache.py,sha256=F23s-4BPJjuYh_PRL5pmIsSjqYWsY_b3PB7xmRwKwKw,3452
82
82
  langgraph_api/utils/config.py,sha256=Tbp4tKDSLKXQJ44EKr885wAQupY-9VWNJ6rgUU2oLOY,4162
83
83
  langgraph_api/utils/future.py,sha256=lXsRQPhJwY7JUbFFZrK-94JjgsToLu-EWU896hvbUxE,7289
84
84
  langgraph_api/utils/headers.py,sha256=NDBmKSSVOOYeYN0HfK1a3xbYtAg35M_JO1G9yklpZsA,5682
85
+ langgraph_api/utils/retriable_client.py,sha256=a50ZxfXV48C97rOCiVWAEmfOPJELwPnvUyEqo3vEixI,2379
85
86
  langgraph_api/utils/uuids.py,sha256=AW_9-1iFqK2K5hljmi-jtaNzUIoBshk5QPt8LbpbD2g,2975
86
87
  langgraph_license/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
87
88
  langgraph_license/validation.py,sha256=CU38RUZ5xhP1S8F_y8TNeV6OmtO-tIGjCXbXTwJjJO4,612
@@ -97,8 +98,8 @@ langgraph_runtime/store.py,sha256=7mowndlsIroGHv3NpTSOZDJR0lCuaYMBoTnTrewjslw,11
97
98
  LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
98
99
  logging.json,sha256=3RNjSADZmDq38eHePMm1CbP6qZ71AmpBtLwCmKU9Zgo,379
99
100
  openapi.json,sha256=21wu-NxdxyTQwZctNcEfRkLMnSBi0QhGAfwq5kg8XNU,172618
100
- langgraph_api-0.4.11.dist-info/METADATA,sha256=hK2zyCzG6xb1reykMm0EeX_kXl_y2he20Bf2xHDWndM,3893
101
- langgraph_api-0.4.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
- langgraph_api-0.4.11.dist-info/entry_points.txt,sha256=hGedv8n7cgi41PypMfinwS_HfCwA7xJIfS0jAp8htV8,78
103
- langgraph_api-0.4.11.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
104
- langgraph_api-0.4.11.dist-info/RECORD,,
101
+ langgraph_api-0.4.14.dist-info/METADATA,sha256=1bPmLia8QAL_OsbEw9PmGJGOUECDnIPoDuULeSbB6GI,3893
102
+ langgraph_api-0.4.14.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
103
+ langgraph_api-0.4.14.dist-info/entry_points.txt,sha256=hGedv8n7cgi41PypMfinwS_HfCwA7xJIfS0jAp8htV8,78
104
+ langgraph_api-0.4.14.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
105
+ langgraph_api-0.4.14.dist-info/RECORD,,