langgraph-runtime-inmem 0.22.0__py3-none-any.whl → 0.22.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,7 @@ from langgraph_runtime_inmem import (
10
10
  store,
11
11
  )
12
12
 
13
- __version__ = "0.22.0"
13
+ __version__ = "0.22.1"
14
14
  __all__ = [
15
15
  "ops",
16
16
  "database",
@@ -1,13 +1,21 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import functools
3
4
  import logging
4
5
  import os
6
+ import threading
5
7
  import typing
6
8
  import uuid
7
- from collections.abc import AsyncIterator
9
+ import weakref
10
+ from collections.abc import AsyncIterator, Callable
8
11
  from typing import Any
9
12
 
10
- from langgraph.checkpoint.memory import MemorySaver, PersistentDict
13
+ from langgraph.checkpoint.memory import (
14
+ InMemorySaver as InMemorySaverBase,
15
+ )
16
+ from langgraph.checkpoint.memory import (
17
+ PersistentDict,
18
+ )
11
19
 
12
20
  if typing.TYPE_CHECKING:
13
21
  from langchain_core.runnables import RunnableConfig
@@ -37,11 +45,12 @@ DISABLE_FILE_PERSISTENCE = (
37
45
  )
38
46
 
39
47
 
40
- class InMemorySaver(MemorySaver):
48
+ class InMemorySaver(InMemorySaverBase):
41
49
  def __init__(
42
50
  self,
43
51
  *,
44
52
  serde: SerializerProtocol | None = None,
53
+ __persistence_hook__: Callable[[PersistentDict], None] | None = None,
45
54
  ) -> None:
46
55
  self.filename = os.path.join(".langgraph_api", ".langgraph_checkpoint.")
47
56
  i = 0
@@ -54,6 +63,8 @@ class InMemorySaver(MemorySaver):
54
63
  os.mkdir(".langgraph_api")
55
64
  thisfname = self.filename + str(i) + ".pckl"
56
65
  d = PersistentDict(*args, filename=thisfname)
66
+ if __persistence_hook__:
67
+ __persistence_hook__(d)
57
68
 
58
69
  try:
59
70
  d.load()
@@ -199,6 +210,15 @@ class InMemorySaver(MemorySaver):
199
210
  pending_writes=tuple_.pending_writes,
200
211
  )
201
212
 
213
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
214
+ global _ingestion_thread
215
+ if _ingestion_thread is not None:
216
+ logger.info("Stopping dev checkpoint ingestion loop")
217
+ _ingestion_thread[0].set()
218
+ _ingestion_thread[1].join()
219
+ _ingestion_thread = None
220
+ await super().__aexit__(exc_type, exc_val, exc_tb)
221
+
202
222
 
203
223
  MEMORY = None
204
224
 
@@ -206,12 +226,16 @@ MEMORY = None
206
226
  def Checkpointer(*args, unpack_hook=None, **kwargs):
207
227
  global MEMORY
208
228
  if MEMORY is None:
209
- MEMORY = InMemorySaver()
229
+ MEMORY = InMemorySaver(
230
+ __persistence_hook__=_hook,
231
+ )
210
232
  if unpack_hook is not None:
211
233
  from langgraph_api.serde import Serializer
212
234
 
213
235
  saver = InMemorySaver(
214
- serde=Serializer(__unpack_ext_hook__=unpack_hook), **kwargs
236
+ serde=Serializer(__unpack_ext_hook__=unpack_hook),
237
+ __persistence_hook__=_hook,
238
+ **kwargs,
215
239
  )
216
240
  saver.writes = MEMORY.writes
217
241
  saver.blobs = MEMORY.blobs
@@ -220,4 +244,43 @@ def Checkpointer(*args, unpack_hook=None, **kwargs):
220
244
  return MEMORY
221
245
 
222
246
 
247
+ _stores: dict[str, weakref.ref[PersistentDict]] = {}
248
+ _ingestion_thread: tuple[threading.Event, threading.Thread] | None = None
249
+ _ingestion_delay: int = 10
250
+
251
+
252
+ def _hook(d: PersistentDict):
253
+ global _ingestion_thread
254
+ _stores[d.filename] = weakref.ref(d)
255
+ if _ingestion_thread is None:
256
+ logger.info("Starting dev checkpoint ingestion loop")
257
+ stop_event = threading.Event()
258
+ _ingestion_thread = (
259
+ stop_event,
260
+ threading.Thread(
261
+ target=functools.partial(_ingestion_loop, stop_event), daemon=True
262
+ ),
263
+ )
264
+ _ingestion_thread[1].start()
265
+ pass
266
+
267
+
268
+ def _ingestion_loop(stop_event: threading.Event):
269
+ drop = set()
270
+ while not stop_event.wait(timeout=_ingestion_delay):
271
+ keys = list(_stores.keys())
272
+ for store_key in keys:
273
+ if store := _stores[store_key]():
274
+ store.sync()
275
+ continue
276
+ else:
277
+ drop.add(store_key)
278
+ if drop:
279
+ for store_key in drop:
280
+ del _stores[store_key]
281
+ drop.clear()
282
+ # Note: the checkpoints are flushed one last time upon exit.
283
+ logger.info("dev checkpoint ingestion loop exiting")
284
+
285
+
223
286
  __all__ = ["Checkpointer"]
@@ -10,7 +10,7 @@ import typing
10
10
  import uuid
11
11
  from collections import defaultdict
12
12
  from collections.abc import AsyncIterator, Sequence
13
- from contextlib import asynccontextmanager
13
+ from contextlib import AsyncExitStack, asynccontextmanager
14
14
  from datetime import UTC, datetime, timedelta
15
15
  from typing import Any, Literal, cast
16
16
  from uuid import UUID, uuid4
@@ -150,6 +150,8 @@ class Assistants(Authenticated):
150
150
  select: list[AssistantSelectField] | None = None,
151
151
  ctx: Auth.types.BaseAuthContext | None = None,
152
152
  ) -> tuple[AsyncIterator[Assistant], int]:
153
+ from langgraph_api.graph import GRAPHS
154
+
153
155
  metadata = metadata if metadata is not None else {}
154
156
  filters = await Assistants.handle_event(
155
157
  ctx,
@@ -159,6 +161,9 @@ class Assistants(Authenticated):
159
161
  ),
160
162
  )
161
163
 
164
+ if graph_id is not None and graph_id not in GRAPHS:
165
+ raise HTTPException(status_code=404, detail=f"Graph {graph_id} not found")
166
+
162
167
  # Get all assistants and filter them
163
168
  assistants = conn.store["assistants"]
164
169
  filtered_assistants = [
@@ -365,6 +370,8 @@ class Assistants(Authenticated):
365
370
  Returns:
366
371
  return the updated assistant model.
367
372
  """
373
+ from langgraph_api.graph import GRAPHS
374
+
368
375
  assistant_id = _ensure_uuid(assistant_id)
369
376
  metadata = metadata if metadata is not None else {}
370
377
  config = config if config is not None else {}
@@ -387,6 +394,9 @@ class Assistants(Authenticated):
387
394
  detail="Cannot specify both configurable and context. Prefer setting context alone. Context was introduced in LangGraph 0.6.0 and is the long term planned replacement for configurable.",
388
395
  )
389
396
 
397
+ if graph_id is not None and graph_id not in GRAPHS:
398
+ raise HTTPException(status_code=404, detail=f"Graph {graph_id} not found")
399
+
390
400
  # Keep config and context up to date with one another
391
401
  if config.get("configurable"):
392
402
  context = config["configurable"]
@@ -462,55 +472,85 @@ class Assistants(Authenticated):
462
472
 
463
473
  @staticmethod
464
474
  async def delete(
465
- conn: InMemConnectionProto,
475
+ conn: InMemConnectionProto | None,
466
476
  assistant_id: UUID,
467
477
  ctx: Auth.types.BaseAuthContext | None = None,
478
+ *,
479
+ delete_threads: bool = False,
468
480
  ) -> AsyncIterator[UUID]:
469
481
  """Delete an assistant by ID."""
470
- assistant_id = _ensure_uuid(assistant_id)
471
- filters = await Assistants.handle_event(
472
- ctx,
473
- "delete",
474
- Auth.types.AssistantsDelete(
475
- assistant_id=assistant_id,
476
- ),
477
- )
478
- assistant = next(
479
- (a for a in conn.store["assistants"] if a["assistant_id"] == assistant_id),
480
- None,
481
- )
482
+ async with AsyncExitStack() as stack:
483
+ if conn is None:
484
+ conn = await stack.enter_async_context(connect())
482
485
 
483
- if not assistant:
484
- raise HTTPException(
485
- status_code=404, detail=f"Assistant with ID {assistant_id} not found"
486
+ assistant_id = _ensure_uuid(assistant_id)
487
+ filters = await Assistants.handle_event(
488
+ ctx,
489
+ "delete",
490
+ Auth.types.AssistantsDelete(
491
+ assistant_id=assistant_id,
492
+ ),
486
493
  )
487
- elif filters and not _check_filter_match(assistant["metadata"], filters):
488
- raise HTTPException(
489
- status_code=404, detail=f"Assistant with ID {assistant_id} not found"
494
+ assistant = next(
495
+ (
496
+ a
497
+ for a in conn.store["assistants"]
498
+ if a["assistant_id"] == assistant_id
499
+ ),
500
+ None,
490
501
  )
491
502
 
492
- # Cancel all in-flight runs for this assistant before deletion
493
- await Runs.cancel(
494
- conn,
495
- assistant_id=assistant_id,
496
- action="interrupt",
497
- ctx=ctx,
498
- )
503
+ if not assistant:
504
+ raise HTTPException(
505
+ status_code=404,
506
+ detail=f"Assistant with ID {assistant_id} not found",
507
+ )
508
+ elif filters and not _check_filter_match(assistant["metadata"], filters):
509
+ raise HTTPException(
510
+ status_code=404,
511
+ detail=f"Assistant with ID {assistant_id} not found",
512
+ )
499
513
 
500
- conn.store["assistants"] = [
501
- a for a in conn.store["assistants"] if a["assistant_id"] != assistant_id
502
- ]
503
- # Cascade delete assistant versions
504
- conn.store["assistant_versions"] = [
505
- v
506
- for v in conn.store["assistant_versions"]
507
- if v["assistant_id"] != assistant_id
508
- ]
514
+ if delete_threads:
515
+ threads_to_delete = [
516
+ t["thread_id"]
517
+ for t in conn.store["threads"]
518
+ if t.get("metadata", {}).get("assistant_id") == str(assistant_id)
519
+ ]
520
+ for thread_id in threads_to_delete:
521
+ try:
522
+ async for _ in await Threads.delete(conn, thread_id, ctx=ctx):
523
+ pass
524
+ except HTTPException:
525
+ await logger.awarning(
526
+ "Skipping thread deletion during cascade delete (user lacks permission)",
527
+ thread_id=thread_id,
528
+ assistant_id=assistant_id,
529
+ )
509
530
 
510
- async def _yield_deleted():
511
- yield assistant_id
531
+ # 3. Cancel in-flight runs AFTER auth validation
532
+ await Runs.cancel(
533
+ conn,
534
+ assistant_id=assistant_id,
535
+ action="interrupt",
536
+ ctx=ctx,
537
+ )
512
538
 
513
- return _yield_deleted()
539
+ # 4. Delete assistant
540
+ conn.store["assistants"] = [
541
+ a for a in conn.store["assistants"] if a["assistant_id"] != assistant_id
542
+ ]
543
+ # Cascade delete assistant versions
544
+ conn.store["assistant_versions"] = [
545
+ v
546
+ for v in conn.store["assistant_versions"]
547
+ if v["assistant_id"] != assistant_id
548
+ ]
549
+
550
+ async def _yield_deleted():
551
+ yield assistant_id
552
+
553
+ return _yield_deleted()
514
554
 
515
555
  @staticmethod
516
556
  async def set_latest(
@@ -632,6 +672,8 @@ class Assistants(Authenticated):
632
672
  ctx: Auth.types.BaseAuthContext | None = None,
633
673
  ) -> int:
634
674
  """Get count of assistants."""
675
+ from langgraph_api.graph import GRAPHS
676
+
635
677
  metadata = metadata if metadata is not None else {}
636
678
  filters = await Assistants.handle_event(
637
679
  ctx,
@@ -641,6 +683,9 @@ class Assistants(Authenticated):
641
683
  ),
642
684
  )
643
685
 
686
+ if graph_id is not None and graph_id not in GRAPHS:
687
+ raise HTTPException(status_code=404, detail=f"Graph {graph_id} not found")
688
+
644
689
  count = 0
645
690
  for assistant in conn.store["assistants"]:
646
691
  if (
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-runtime-inmem
3
- Version: 0.22.0
3
+ Version: 0.22.1
4
4
  Summary: Inmem implementation for the LangGraph API server.
5
5
  Author-email: Will Fu-Hinthorn <will@langchain.dev>
6
6
  License: Elastic-2.0
@@ -1,14 +1,14 @@
1
- langgraph_runtime_inmem/__init__.py,sha256=MEt3B-cz2OdptjyXn7hgqdFgWBMBLaSY3wsKSEeB8Cg,337
2
- langgraph_runtime_inmem/checkpoint.py,sha256=lFUjSGra3xqD39FY3U-PgxYMXU6j1L28WPajmXT7-YE,7478
1
+ langgraph_runtime_inmem/__init__.py,sha256=yocpl21Fk4OqK3oi1op2eCBM4EqmVbZFTETc--eQcJk,337
2
+ langgraph_runtime_inmem/checkpoint.py,sha256=k9xHZVwbJhxunpbuM36zaVtcP2c2zdx6ABdDFYE2PrE,9471
3
3
  langgraph_runtime_inmem/database.py,sha256=g2XYa5KN-T8MbDeFH9sfUApDG62Wp4BACumVnDtxYhI,6403
4
4
  langgraph_runtime_inmem/inmem_stream.py,sha256=PFLWbsxU8RqbT5mYJgNk6v5q6TWJRIY1hkZWhJF8nkI,9094
5
5
  langgraph_runtime_inmem/lifespan.py,sha256=fCoYcN_h0cxmj6-muC-f0csPdSpyepZuGRD1yBrq4XM,4755
6
6
  langgraph_runtime_inmem/metrics.py,sha256=_YiSkLnhQvHpMktk38SZo0abyL-5GihfVAtBo0-lFIc,403
7
- langgraph_runtime_inmem/ops.py,sha256=qJaQb138aL9D8ROy4yRaxlcaCuiNfoSDIX7m6lCWndE,119178
7
+ langgraph_runtime_inmem/ops.py,sha256=lsfdJczwmmMClaGbXIuYMBtZ0JED5UP-iwUDtHpShl4,121054
8
8
  langgraph_runtime_inmem/queue.py,sha256=WM6ZJu25QPVjFXeJYW06GALLUgRsnRrA4YdypR0oG0U,9584
9
9
  langgraph_runtime_inmem/retry.py,sha256=XmldOP4e_H5s264CagJRVnQMDFcEJR_dldVR1Hm5XvM,763
10
10
  langgraph_runtime_inmem/routes.py,sha256=VVNxgJ8FWI3kDBoIgQUWN1gY5ivo7L954Agxzv72TAY,1377
11
11
  langgraph_runtime_inmem/store.py,sha256=rTfL1JJvd-j4xjTrL8qDcynaWF6gUJ9-GDVwH0NBD_I,3506
12
- langgraph_runtime_inmem-0.22.0.dist-info/METADATA,sha256=viwco9HtZ2XtqNpn1nnBoLT2r2_8fvT8tqO3s2LwjHo,570
13
- langgraph_runtime_inmem-0.22.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
14
- langgraph_runtime_inmem-0.22.0.dist-info/RECORD,,
12
+ langgraph_runtime_inmem-0.22.1.dist-info/METADATA,sha256=CVWjgQ-ttGgcpuVhDcdQQO9Ba6iuam76jRVWEFo4_SY,570
13
+ langgraph_runtime_inmem-0.22.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
14
+ langgraph_runtime_inmem-0.22.1.dist-info/RECORD,,