langgraph-runtime-inmem 0.6.8__py3-none-any.whl → 0.6.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,7 +9,7 @@ from langgraph_runtime_inmem import (
9
9
  store,
10
10
  )
11
11
 
12
- __version__ = "0.6.8"
12
+ __version__ = "0.6.11"
13
13
  __all__ = [
14
14
  "ops",
15
15
  "database",
@@ -42,6 +42,7 @@ class ContextQueue(asyncio.Queue):
42
42
  class StreamManager:
43
43
  def __init__(self):
44
44
  self.queues = defaultdict(list) # Dict[UUID, List[asyncio.Queue]]
45
+ self.control_keys = defaultdict()
45
46
  self.control_queues = defaultdict(list)
46
47
 
47
48
  self.message_stores = defaultdict(list) # Dict[UUID, List[Message]]
@@ -51,6 +52,14 @@ class StreamManager:
51
52
  run_id = _ensure_uuid(run_id)
52
53
  return self.queues[run_id]
53
54
 
55
+ def get_control_queues(self, run_id: UUID | str) -> list[asyncio.Queue]:
56
+ run_id = _ensure_uuid(run_id)
57
+ return self.control_queues[run_id]
58
+
59
+ def get_control_key(self, run_id: UUID | str) -> Message | None:
60
+ run_id = _ensure_uuid(run_id)
61
+ return self.control_keys.get(run_id)
62
+
54
63
  async def put(
55
64
  self, run_id: UUID | str, message: Message, resumable: bool = False
56
65
  ) -> None:
@@ -61,8 +70,10 @@ class StreamManager:
61
70
  self.message_stores[run_id].append(message)
62
71
  topic = message.topic.decode()
63
72
  if "control" in topic:
64
- self.control_queues[run_id].append(message)
65
- queues = self.queues.get(run_id, [])
73
+ self.control_keys[run_id] = message
74
+ queues = self.control_queues[run_id]
75
+ else:
76
+ queues = self.queues[run_id]
66
77
  coros = [queue.put(message) for queue in queues]
67
78
  results = await asyncio.gather(*coros, return_exceptions=True)
68
79
  for result in results:
@@ -73,14 +84,12 @@ class StreamManager:
73
84
  run_id = _ensure_uuid(run_id)
74
85
  queue = ContextQueue()
75
86
  self.queues[run_id].append(queue)
76
- for control_msg in self.control_queues[run_id]:
77
- try:
78
- await queue.put(control_msg)
79
- except Exception:
80
- logger.exception(
81
- f"Failed to put control message in queue: {control_msg}"
82
- )
87
+ return queue
83
88
 
89
+ async def add_control_queue(self, run_id: UUID | str) -> asyncio.Queue:
90
+ run_id = _ensure_uuid(run_id)
91
+ queue = ContextQueue()
92
+ self.control_queues[run_id].append(queue)
84
93
  return queue
85
94
 
86
95
  async def remove_queue(self, run_id: UUID | str, queue: asyncio.Queue):
@@ -89,8 +98,13 @@ class StreamManager:
89
98
  self.queues[run_id].remove(queue)
90
99
  if not self.queues[run_id]:
91
100
  del self.queues[run_id]
92
- if run_id in self.message_stores:
93
- del self.message_stores[run_id]
101
+
102
+ async def remove_control_queue(self, run_id: UUID | str, queue: asyncio.Queue):
103
+ run_id = _ensure_uuid(run_id)
104
+ if run_id in self.control_queues:
105
+ self.control_queues[run_id].remove(queue)
106
+ if not self.control_queues[run_id]:
107
+ del self.control_queues[run_id]
94
108
 
95
109
  def restore_messages(
96
110
  self, run_id: UUID | str, message_id: str | None
@@ -243,6 +243,19 @@ class Assistants(Authenticated):
243
243
  name=name,
244
244
  ),
245
245
  )
246
+
247
+ if config.get("configurable") and context:
248
+ raise HTTPException(
249
+ status_code=400,
250
+ detail="Cannot specify both configurable and context. Prefer setting context alone. Context was introduced in LangGraph 0.6.0 and is the long term planned replacement for configurable.",
251
+ )
252
+
253
+ # Keep config and context up to date with one another
254
+ if config.get("configurable"):
255
+ context = config["configurable"]
256
+ elif context:
257
+ config["configurable"] = context
258
+
246
259
  existing_assistant = next(
247
260
  (a for a in conn.store["assistants"] if a["assistant_id"] == assistant_id),
248
261
  None,
@@ -328,6 +341,7 @@ class Assistants(Authenticated):
328
341
  """
329
342
  assistant_id = _ensure_uuid(assistant_id)
330
343
  metadata = metadata if metadata is not None else {}
344
+ config = config if config is not None else {}
331
345
  filters = await Assistants.handle_event(
332
346
  ctx,
333
347
  "update",
@@ -340,6 +354,19 @@ class Assistants(Authenticated):
340
354
  name=name,
341
355
  ),
342
356
  )
357
+
358
+ if config.get("configurable") and context:
359
+ raise HTTPException(
360
+ status_code=400,
361
+ detail="Cannot specify both configurable and context. Prefer setting context alone. Context was introduced in LangGraph 0.6.0 and is the long term planned replacement for configurable.",
362
+ )
363
+
364
+ # Keep config and context up to date with one another
365
+ if config.get("configurable"):
366
+ context = config["configurable"]
367
+ elif context:
368
+ config["configurable"] = context
369
+
343
370
  assistant = next(
344
371
  (a for a in conn.store["assistants"] if a["assistant_id"] == assistant_id),
345
372
  None,
@@ -375,7 +402,7 @@ class Assistants(Authenticated):
375
402
  "assistant_id": assistant_id,
376
403
  "version": new_version,
377
404
  "graph_id": graph_id if graph_id is not None else assistant["graph_id"],
378
- "config": config if config is not None else assistant["config"],
405
+ "config": config if config else assistant["config"],
379
406
  "context": context if context is not None else assistant.get("context", {}),
380
407
  "metadata": metadata if metadata is not None else assistant["metadata"],
381
408
  "created_at": now,
@@ -722,7 +749,7 @@ class Threads(Authenticated):
722
749
  else:
723
750
  # Default sorting by created_at in descending order
724
751
  sorted_threads = sorted(
725
- filtered_threads, key=lambda x: x["created_at"], reverse=True
752
+ filtered_threads, key=lambda x: x["updated_at"], reverse=True
726
753
  )
727
754
 
728
755
  # Apply limit and offset
@@ -1451,7 +1478,7 @@ class Threads(Authenticated):
1451
1478
  conn: InMemConnectionProto,
1452
1479
  *,
1453
1480
  config: Config,
1454
- limit: int = 10,
1481
+ limit: int = 1,
1455
1482
  before: str | Checkpoint | None = None,
1456
1483
  metadata: MetadataInput = None,
1457
1484
  ctx: Auth.types.BaseAuthContext | None = None,
@@ -1626,7 +1653,7 @@ class Runs(Authenticated):
1626
1653
 
1627
1654
  stream_manager = get_stream_manager()
1628
1655
  # Get queue for this run
1629
- queue = await Runs.Stream.subscribe(run_id)
1656
+ queue = await stream_manager.add_control_queue(run_id)
1630
1657
 
1631
1658
  async with SimpleTaskGroup(cancel=True, taskgroup_name="Runs.enter") as tg:
1632
1659
  done = ValueEvent()
@@ -1634,16 +1661,21 @@ class Runs(Authenticated):
1634
1661
 
1635
1662
  # Give done event to caller
1636
1663
  yield done
1637
- # Signal done to all subscribers
1664
+ # Store the control message for late subscribers
1638
1665
  control_message = Message(
1639
1666
  topic=f"run:{run_id}:control".encode(), data=b"done"
1640
1667
  )
1641
-
1642
- # Store the control message for late subscribers
1643
1668
  await stream_manager.put(run_id, control_message)
1644
- stream_manager.control_queues[run_id].append(control_message)
1645
- # Clean up this queue
1646
- await stream_manager.remove_queue(run_id, queue)
1669
+
1670
+ # Signal done to all subscribers
1671
+ stream_message = Message(
1672
+ topic=f"run:{run_id}:stream".encode(),
1673
+ data={"event": "control", "message": b"done"},
1674
+ )
1675
+ await stream_manager.put(run_id, stream_message)
1676
+
1677
+ # Remove the queue
1678
+ await stream_manager.remove_control_queue(run_id, queue)
1647
1679
 
1648
1680
  @staticmethod
1649
1681
  async def sweep(conn: InMemConnectionProto) -> list[UUID]:
@@ -1843,9 +1875,7 @@ class Runs(Authenticated):
1843
1875
  "metadata": merged_metadata,
1844
1876
  },
1845
1877
  ),
1846
- "context": Runs._merge_jsonb(
1847
- assistant.get("context", {}), kwargs.get("context", {})
1848
- ),
1878
+ "context": configurable,
1849
1879
  },
1850
1880
  ),
1851
1881
  multitask_strategy=multitask_strategy,
@@ -1979,6 +2009,16 @@ class Runs(Authenticated):
1979
2009
  return Fragment(
1980
2010
  orjson.dumps({"__error__": orjson.Fragment(thread["error"])})
1981
2011
  )
2012
+ if thread["status"] == "interrupted":
2013
+ # Get an interrupt for the thread. There is the case where there are multiple interrupts for the same run and we may not show the same
2014
+ # interrupt, but we'll always show one. Long term we should show all of them.
2015
+ try:
2016
+ interrupt_map = thread["interrupts"]
2017
+ interrupt = [next(iter(interrupt_map.values()))[0]]
2018
+ return Fragment(orjson.dumps({"__interrupt__": interrupt}))
2019
+ except Exception:
2020
+ # No interrupt, but status is interrupted from a before/after block. Default back to values.
2021
+ pass
1982
2022
  return thread["values"]
1983
2023
 
1984
2024
  @staticmethod
@@ -2199,8 +2239,6 @@ class Runs(Authenticated):
2199
2239
  @staticmethod
2200
2240
  async def subscribe(
2201
2241
  run_id: UUID,
2202
- *,
2203
- stream_mode: StreamMode | None = None,
2204
2242
  ) -> asyncio.Queue:
2205
2243
  """Subscribe to the run stream, returning a queue."""
2206
2244
  stream_manager = get_stream_manager()
@@ -2220,20 +2258,18 @@ class Runs(Authenticated):
2220
2258
  ignore_404: bool = False,
2221
2259
  cancel_on_disconnect: bool = False,
2222
2260
  stream_channel: asyncio.Queue | None = None,
2223
- stream_mode: list[StreamMode] | StreamMode,
2261
+ stream_mode: list[StreamMode] | StreamMode | None = None,
2224
2262
  last_event_id: str | None = None,
2225
2263
  ctx: Auth.types.BaseAuthContext | None = None,
2226
2264
  ) -> AsyncIterator[tuple[bytes, bytes, bytes | None]]:
2227
2265
  """Stream the run output."""
2228
2266
  from langgraph_api.asyncio import create_task
2229
-
2230
- if stream_mode and not isinstance(stream_mode, list):
2231
- stream_mode = [stream_mode]
2267
+ from langgraph_api.serde import json_loads
2232
2268
 
2233
2269
  queue = (
2234
2270
  stream_channel
2235
2271
  if stream_channel
2236
- else await Runs.Stream.subscribe(run_id, stream_mode=stream_mode)
2272
+ else await Runs.Stream.subscribe(run_id)
2237
2273
  )
2238
2274
 
2239
2275
  try:
@@ -2254,53 +2290,71 @@ class Runs(Authenticated):
2254
2290
  )
2255
2291
  )
2256
2292
  run = await Runs.get(conn, run_id, thread_id=thread_id, ctx=ctx)
2257
- channel_prefix = f"run:{run_id}:stream:"
2258
- len_prefix = len(channel_prefix.encode())
2259
2293
 
2260
2294
  for message in get_stream_manager().restore_messages(
2261
2295
  run_id, last_event_id
2262
2296
  ):
2263
- topic, data, id = message.topic, message.data, message.id
2264
- if topic.decode() == f"run:{run_id}:control":
2265
- if data == b"done":
2297
+ data, id = message.data, message.id
2298
+
2299
+ data = json_loads(data)
2300
+ mode = data["event"]
2301
+ message = data["message"]
2302
+
2303
+ if mode == "control":
2304
+ if message == b"done":
2266
2305
  return
2267
- else:
2268
- mode = topic[len_prefix:]
2269
- if mode == b"updates" and "updates" not in stream_mode:
2270
- continue
2271
- else:
2272
- yield mode, data, id
2273
- logger.debug(
2274
- "Replayed run event",
2275
- run_id=str(run_id),
2276
- message_id=id,
2277
- stream_mode=mode,
2278
- data=data,
2306
+ elif (
2307
+ not stream_mode
2308
+ or mode in stream_mode
2309
+ or (
2310
+ (
2311
+ "messages" in stream_mode
2312
+ or "messages-tuple" in stream_mode
2279
2313
  )
2314
+ and mode.startswith("messages")
2315
+ )
2316
+ ):
2317
+ yield mode.encode(), base64.b64decode(message), id
2318
+ logger.debug(
2319
+ "Replayed run event",
2320
+ run_id=str(run_id),
2321
+ message_id=id,
2322
+ stream_mode=mode,
2323
+ data=data,
2324
+ )
2280
2325
 
2281
2326
  while True:
2282
2327
  try:
2283
2328
  # Wait for messages with a timeout
2284
2329
  message = await asyncio.wait_for(queue.get(), timeout=0.5)
2285
- topic, data, id = message.topic, message.data, message.id
2330
+ data, id = message.data, message.id
2331
+
2332
+ data = json_loads(data)
2333
+ mode = data["event"]
2334
+ message = data["message"]
2286
2335
 
2287
- if topic.decode() == f"run:{run_id}:control":
2288
- if data == b"done":
2336
+ if mode == "control":
2337
+ if message == b"done":
2289
2338
  break
2290
- else:
2291
- # Extract mode from topic
2292
- mode = topic[len_prefix:]
2293
- if mode == b"updates" and "updates" not in stream_mode:
2294
- continue
2295
- else:
2296
- yield mode, data, id
2297
- logger.debug(
2298
- "Streamed run event",
2299
- run_id=str(run_id),
2300
- stream_mode=mode,
2301
- message_id=id,
2302
- data=data,
2339
+ elif (
2340
+ not stream_mode
2341
+ or mode in stream_mode
2342
+ or (
2343
+ (
2344
+ "messages" in stream_mode
2345
+ or "messages-tuple" in stream_mode
2303
2346
  )
2347
+ and mode.startswith("messages")
2348
+ )
2349
+ ):
2350
+ yield mode.encode(), base64.b64decode(message), id
2351
+ logger.debug(
2352
+ "Streamed run event",
2353
+ run_id=str(run_id),
2354
+ stream_mode=mode,
2355
+ message_id=id,
2356
+ data=message,
2357
+ )
2304
2358
  except TimeoutError:
2305
2359
  # Check if the run is still pending
2306
2360
  run_iter = await Runs.get(
@@ -2340,12 +2394,20 @@ class Runs(Authenticated):
2340
2394
  resumable: bool = False,
2341
2395
  ) -> None:
2342
2396
  """Publish a message to all subscribers of the run stream."""
2343
- topic = f"run:{run_id}:stream:{event}".encode()
2397
+ from langgraph_api.serde import json_dumpb
2398
+
2399
+ topic = f"run:{run_id}:stream".encode()
2344
2400
 
2345
2401
  stream_manager = get_stream_manager()
2346
2402
  # Send to all queues subscribed to this run_id
2403
+ payload = json_dumpb(
2404
+ {
2405
+ "event": event,
2406
+ "message": message,
2407
+ }
2408
+ )
2347
2409
  await stream_manager.put(
2348
- run_id, Message(topic=topic, data=message), resumable
2410
+ run_id, Message(topic=topic, data=payload), resumable
2349
2411
  )
2350
2412
 
2351
2413
 
@@ -2354,20 +2416,18 @@ async def listen_for_cancellation(queue: asyncio.Queue, run_id: UUID, done: Valu
2354
2416
  from langgraph_api.errors import UserInterrupt, UserRollback
2355
2417
 
2356
2418
  stream_manager = get_stream_manager()
2357
- control_key = f"run:{run_id}:control"
2358
2419
 
2359
- if existing_queue := stream_manager.control_queues.get(run_id):
2360
- for message in existing_queue:
2361
- payload = message.data
2362
- if payload == b"rollback":
2363
- done.set(UserRollback())
2364
- elif payload == b"interrupt":
2365
- done.set(UserInterrupt())
2420
+ if control_key := stream_manager.get_control_key(run_id):
2421
+ payload = control_key.data
2422
+ if payload == b"rollback":
2423
+ done.set(UserRollback())
2424
+ elif payload == b"interrupt":
2425
+ done.set(UserInterrupt())
2366
2426
 
2367
2427
  while not done.is_set():
2368
2428
  try:
2369
2429
  # This task gets cancelled when Runs.enter exits anyway,
2370
- # so we can have a pretty length timeout here
2430
+ # so we can have a pretty lengthy timeout here
2371
2431
  message = await asyncio.wait_for(queue.get(), timeout=240)
2372
2432
  payload = message.data
2373
2433
  if payload == b"rollback":
@@ -2377,10 +2437,6 @@ async def listen_for_cancellation(queue: asyncio.Queue, run_id: UUID, done: Valu
2377
2437
  elif payload == b"done":
2378
2438
  done.set()
2379
2439
  break
2380
-
2381
- # Store control messages for late subscribers
2382
- if message.topic.decode() == control_key:
2383
- stream_manager.control_queues[run_id].append(message)
2384
2440
  except TimeoutError:
2385
2441
  break
2386
2442
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-runtime-inmem
3
- Version: 0.6.8
3
+ Version: 0.6.11
4
4
  Summary: Inmem implementation for the LangGraph API server.
5
5
  Author-email: Will Fu-Hinthorn <will@langchain.dev>
6
6
  License: Elastic-2.0
@@ -1,13 +1,13 @@
1
- langgraph_runtime_inmem/__init__.py,sha256=Zy5RCTPPvryu_HMij5RWuFUvkeY5gEeOV66pJgigHDM,310
1
+ langgraph_runtime_inmem/__init__.py,sha256=FaiDP9SdI1qgH5N7_RMO7yqFhq5967V9hXeJsDFc9PA,311
2
2
  langgraph_runtime_inmem/checkpoint.py,sha256=nc1G8DqVdIu-ibjKTqXfbPfMbAsKjPObKqegrSzo6Po,4432
3
3
  langgraph_runtime_inmem/database.py,sha256=G_6L2khpRDSpS2Vs_SujzHayODcwG5V2IhFP7LLBXgw,6349
4
- langgraph_runtime_inmem/inmem_stream.py,sha256=65z_2mBNJ0-yJsXWnlYwRc71039_y6Sa0MN8fL_U3Ko,4581
4
+ langgraph_runtime_inmem/inmem_stream.py,sha256=UWk1srLF44HZPPbRdArGGhsy0MY0UOJKSIxBSO7Hosc,5138
5
5
  langgraph_runtime_inmem/lifespan.py,sha256=t0w2MX2dGxe8yNtSX97Z-d2pFpllSLS4s1rh2GJDw5M,3557
6
6
  langgraph_runtime_inmem/metrics.py,sha256=HhO0RC2bMDTDyGBNvnd2ooLebLA8P1u5oq978Kp_nAA,392
7
- langgraph_runtime_inmem/ops.py,sha256=CpicJwlu55cGm8WMWtfyse1Sy1rj8vLZqbLWVF45mB0,89326
7
+ langgraph_runtime_inmem/ops.py,sha256=pf5wtLPTJWwmjr9zsGXHdN7slXrYR2PQvCx71KujFqE,91593
8
8
  langgraph_runtime_inmem/queue.py,sha256=nqfgz7j_Jkh5Ek5-RsHB2Uvwbxguu9IUPkGXIxvFPns,10037
9
9
  langgraph_runtime_inmem/retry.py,sha256=XmldOP4e_H5s264CagJRVnQMDFcEJR_dldVR1Hm5XvM,763
10
10
  langgraph_runtime_inmem/store.py,sha256=rTfL1JJvd-j4xjTrL8qDcynaWF6gUJ9-GDVwH0NBD_I,3506
11
- langgraph_runtime_inmem-0.6.8.dist-info/METADATA,sha256=v2xaQ-PTil64hCubv5PMmqlc-k1xQSExh4jisUNbTCk,565
12
- langgraph_runtime_inmem-0.6.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
13
- langgraph_runtime_inmem-0.6.8.dist-info/RECORD,,
11
+ langgraph_runtime_inmem-0.6.11.dist-info/METADATA,sha256=U4PqBeXV9YamUB2A8etc50Z8mZ2WxYJ0swoo3WSy6nk,566
12
+ langgraph_runtime_inmem-0.6.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
13
+ langgraph_runtime_inmem-0.6.11.dist-info/RECORD,,