langgraph-api 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

@@ -686,6 +686,40 @@ describe("runs", () => {
686
686
  expect(run.status).toBe("success");
687
687
  });
688
688
 
689
+ it.concurrent("stream messages tuple", async () => {
690
+ const assistant = await client.assistants.create({ graphId: "agent" });
691
+ const thread = await client.threads.create();
692
+ const input = {
693
+ messages: [{ type: "human", content: "foo", id: "initial-message" }],
694
+ };
695
+ const stream = await client.runs.stream(
696
+ thread.thread_id,
697
+ assistant.assistant_id,
698
+ { input, streamMode: "messages-tuple", config: globalConfig }
699
+ );
700
+
701
+ const chunks = await gatherIterator(stream);
702
+ const runId = findLast(chunks, (i) => i.event === "metadata")?.data.run_id;
703
+ expect(runId).not.toBeNull();
704
+
705
+ const messages = chunks
706
+ .filter((i) => i.event === "messages")
707
+ .map((i) => i.data[0]);
708
+
709
+ expect(messages).toHaveLength("begin".length + "end".length + 1);
710
+ expect(messages).toMatchObject([
711
+ ..."begin".split("").map((c) => ({ content: c })),
712
+ { content: "tool_call__begin" },
713
+ ..."end".split("").map((c) => ({ content: c })),
714
+ ]);
715
+
716
+ const seenEventTypes = new Set(chunks.map((i) => i.event));
717
+ expect(seenEventTypes).toEqual(new Set(["metadata", "messages"]));
718
+
719
+ const run = await client.runs.get(thread.thread_id, runId as string);
720
+ expect(run.status).toBe("success");
721
+ });
722
+
689
723
  it.concurrent("stream mixed modes", async () => {
690
724
  const assistant = await client.assistants.create({ graphId: "agent" });
691
725
  const thread = await client.threads.create();
@@ -753,13 +787,13 @@ describe("runs", () => {
753
787
  messages = findLast(chunks, (i) => i.event === "values")?.data.messages;
754
788
 
755
789
  const threadAfterInterrupt = await client.threads.get(thread.thread_id);
756
- expect(threadAfterInterrupt.status).toBe("idle");
790
+ expect(threadAfterInterrupt.status).toBe("interrupted");
757
791
 
758
792
  expect(messages.at(-1)).not.toBeNull();
759
- expect(messages.at(-1)?.content).toBe("end");
793
+ expect(messages.at(-1)?.content).toBe("begin");
760
794
 
761
795
  const state = await client.threads.getState(thread.thread_id);
762
- expect(state.next).toEqual([]);
796
+ expect(state.next).toEqual(["tool"]);
763
797
 
764
798
  // continue after interrupt
765
799
  chunks = await gatherIterator(
@@ -817,6 +851,7 @@ describe("runs", () => {
817
851
  });
818
852
 
819
853
  const modifiedThread = await client.threads.get(thread.thread_id);
854
+ expect(modifiedThread.status).toBe("interrupted");
820
855
  expect(modifiedThread.metadata?.modified).toBe(true);
821
856
 
822
857
  const stateAfterModify = await client.threads.getState<AgentState>(
@@ -836,22 +871,42 @@ describe("runs", () => {
836
871
  })
837
872
  );
838
873
 
874
+ const threadAfterContinue = await client.threads.get(thread.thread_id);
875
+ expect(threadAfterContinue.status).toBe("idle");
876
+
839
877
  expect(chunks.filter((i) => i.event === "error").length).toBe(0);
840
878
  messages = findLast(chunks, (i) => i.event === "values")?.data.messages;
841
879
 
842
- expect(messages.length).toBe(8);
843
- expect(messages[4].content).toBe(`tool_call__modified`);
880
+ expect(messages.length).toBe(4);
881
+ expect(messages[2].content).toBe(`tool_call__modified`);
844
882
  expect(messages.at(-1)?.content).toBe("end");
845
883
 
846
884
  // get the history
847
885
  const history = await client.threads.getHistory<AgentState>(
848
886
  thread.thread_id
849
887
  );
850
- expect(history.length).toBe(10);
888
+ expect(history.length).toBe(6);
851
889
  expect(history[0].next.length).toBe(0);
852
- expect(history[0].values.messages.length).toBe(8);
890
+ expect(history[0].values.messages.length).toBe(4);
853
891
  expect(history.at(-1)?.next).toEqual(["__start__"]);
854
892
  });
893
+
894
+ it.concurrent("interrupt before", async () => {
895
+ const assistant = await client.assistants.create({ graphId: "agent" });
896
+ let thread = await client.threads.create();
897
+ const input = {
898
+ messages: [{ type: "human", content: "foo", id: "initial-message" }],
899
+ };
900
+
901
+ await client.runs.wait(thread.thread_id, assistant.assistant_id, {
902
+ input,
903
+ interruptBefore: ["agent"],
904
+ config: globalConfig,
905
+ });
906
+
907
+ thread = await client.threads.get(thread.thread_id);
908
+ expect(thread.status).toBe("interrupted");
909
+ });
855
910
  });
856
911
 
857
912
  describe("shared state", () => {
@@ -1684,3 +1739,65 @@ describe("long running tasks", () => {
1684
1739
  }
1685
1740
  );
1686
1741
  });
1742
+
1743
+ // Not implemented in JS yet
1744
+ describe.skip("command update state", () => {
1745
+ it("updates state via commands", async () => {
1746
+ const assistant = await client.assistants.create({ graphId: "agent" });
1747
+ const thread = await client.threads.create();
1748
+
1749
+ const input = { messages: [{ role: "human", content: "foo" }] };
1750
+
1751
+ // dict-based updates
1752
+ await client.runs.wait(thread.thread_id, assistant.assistant_id, {
1753
+ input,
1754
+ config: globalConfig,
1755
+ });
1756
+ let stream = await gatherIterator(
1757
+ client.runs.stream(thread.thread_id, assistant.assistant_id, {
1758
+ command: { update: { keyOne: "value3", keyTwo: "value4" } },
1759
+ config: globalConfig,
1760
+ })
1761
+ );
1762
+ expect(stream.filter((chunk) => chunk.event === "error")).toEqual([]);
1763
+
1764
+ let state = await client.threads.getState<{
1765
+ keyOne: string;
1766
+ keyTwo: string;
1767
+ }>(thread.thread_id);
1768
+
1769
+ expect(state.values).toMatchObject({
1770
+ keyOne: "value3",
1771
+ keyTwo: "value4",
1772
+ });
1773
+
1774
+ // list-based updates
1775
+ await client.runs.wait(thread.thread_id, assistant.assistant_id, {
1776
+ input,
1777
+ config: globalConfig,
1778
+ });
1779
+ stream = await gatherIterator(
1780
+ client.runs.stream(thread.thread_id, assistant.assistant_id, {
1781
+ command: {
1782
+ update: [
1783
+ ["keyOne", "value1"],
1784
+ ["keyTwo", "value2"],
1785
+ ],
1786
+ },
1787
+ config: globalConfig,
1788
+ })
1789
+ );
1790
+
1791
+ expect(stream.filter((chunk) => chunk.event === "error")).toEqual([]);
1792
+
1793
+ state = await client.threads.getState<{
1794
+ keyOne: string;
1795
+ keyTwo: string;
1796
+ }>(thread.thread_id);
1797
+
1798
+ expect(state.values).toMatchObject({
1799
+ keyOne: "value1",
1800
+ keyTwo: "value2",
1801
+ });
1802
+ });
1803
+ });
@@ -6,11 +6,13 @@ import {
6
6
  END,
7
7
  messagesStateReducer,
8
8
  SharedValue,
9
- LangGraphRunnableConfig,
10
9
  interrupt,
10
+ type LangGraphRunnableConfig,
11
11
  } from "@langchain/langgraph";
12
12
  import { FakeListChatModel } from "@langchain/core/utils/testing";
13
-
13
+ import { ChatGenerationChunk } from "@langchain/core/outputs";
14
+ import { v4 as uuidv4 } from "uuid";
15
+ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
14
16
  const GraphAnnotationOutput = Annotation.Root({
15
17
  messages: Annotation<BaseMessage[]>({
16
18
  reducer: messagesStateReducer,
@@ -18,6 +20,9 @@ const GraphAnnotationOutput = Annotation.Root({
18
20
  }),
19
21
  sharedStateValue: Annotation<string | null>(),
20
22
  interrupt: Annotation<boolean>(),
23
+ keyOne: Annotation<string | null>(),
24
+ keyTwo: Annotation<string | null>(),
25
+ sleep: Annotation<number | null>(),
21
26
  });
22
27
 
23
28
  const GraphAnnotationInput = Annotation.Root({
@@ -26,13 +31,58 @@ const GraphAnnotationInput = Annotation.Root({
26
31
  sharedStateFromStoreConfig: Annotation<Record<string, any> | null>,
27
32
  });
28
33
 
34
+ class StableFakeListChatModel extends FakeListChatModel {
35
+ streamMessageId: string = uuidv4();
36
+
37
+ async *_streamResponseChunks(
38
+ _messages: BaseMessage[],
39
+ options: this["ParsedCallOptions"],
40
+ runManager?: CallbackManagerForLLMRun
41
+ ): AsyncGenerator<ChatGenerationChunk> {
42
+ const response = this._currentResponse();
43
+ this._incrementResponse();
44
+ this.streamMessageId = uuidv4();
45
+
46
+ if (this.emitCustomEvent) {
47
+ await runManager?.handleCustomEvent("some_test_event", {
48
+ someval: true,
49
+ });
50
+ }
51
+
52
+ for await (const text of response) {
53
+ await this._sleepIfRequested();
54
+ if (options?.thrownErrorString) {
55
+ throw new Error(options.thrownErrorString);
56
+ }
57
+ const chunk = this._createResponseChunk(text);
58
+
59
+ // ensure stable ID
60
+ chunk.message.id = this.streamMessageId;
61
+ chunk.message.lc_kwargs.id = this.streamMessageId;
62
+
63
+ yield chunk;
64
+
65
+ void runManager?.handleLLMNewToken(
66
+ text,
67
+ undefined,
68
+ undefined,
69
+ undefined,
70
+ undefined,
71
+ { chunk }
72
+ );
73
+ }
74
+ }
75
+ }
76
+
29
77
  // For shared state
30
78
  const namespace = ["sharedState", "data"];
31
79
  const key = "user_id";
32
80
 
33
81
  const modelMap: Record<string, FakeListChatModel> = {};
34
82
  const getModel = (threadId: string) => {
35
- modelMap[threadId] ??= new FakeListChatModel({ responses: ["begin", "end"] });
83
+ modelMap[threadId] ??= new StableFakeListChatModel({
84
+ responses: ["begin", "end"],
85
+ });
36
86
  return modelMap[threadId];
37
87
  };
38
88
 
@@ -42,6 +92,11 @@ const agentNode = async (
42
92
  ) => {
43
93
  if (state.interrupt) interrupt("i want to interrupt");
44
94
 
95
+ if (state.sleep != null && state.messages.at(-1)?.getType() === "human") {
96
+ const sleep = state.sleep;
97
+ await new Promise((resolve) => setTimeout(resolve, sleep * 1000));
98
+ }
99
+
45
100
  const model = getModel(config.configurable?.thread_id ?? "$");
46
101
  const response = await model.invoke(state.messages);
47
102
  const sharedStateValue = state.sharedState?.data?.user_id ?? null;
@@ -14,6 +14,11 @@ const StateSchema = Annotation.Root({
14
14
  const longRunning = async (
15
15
  state: typeof StateSchema.State
16
16
  ): Promise<typeof StateSchema.Update> => {
17
+ if (state.delay === -1) {
18
+ while (true) {
19
+ // hang the event loop
20
+ }
21
+ }
17
22
  await new Promise((resolve) => setTimeout(resolve, state.delay));
18
23
  return { messages: [`finished after ${state.delay}ms`] };
19
24
  };
@@ -389,10 +389,10 @@
389
389
  p-retry "4"
390
390
  uuid "^9.0.0"
391
391
 
392
- "@langchain/langgraph@^0.2.31":
393
- version "0.2.31"
394
- resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.2.31.tgz#2cb2faac1d02a7ccf47559aec87aae7f44dc7d3f"
395
- integrity sha512-/otJC3/P3Pt58eVZz1gxC3sBiC0N0HhOaAbOBKxckskhayBO6OC6ZDHtH9a+rxEIlreBoninR1/At1Gj/3liFA==
392
+ "@langchain/langgraph@^0.2.35":
393
+ version "0.2.35"
394
+ resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.2.35.tgz#e2dc4d07b3080570ef63b1a5a98a4e2b1e1cc630"
395
+ integrity sha512-h209sOZGgbKpdkc+5WgjiBH0Fe8zLmPv+ff/RnXGEr+phrXwUNQnx5iu4HexVd7P6gxM9Ymt1iZBCBXpgRbK8A==
396
396
  dependencies:
397
397
  "@langchain/langgraph-checkpoint" "~0.0.13"
398
398
  "@langchain/langgraph-sdk" "~0.0.21"
@@ -1486,10 +1486,10 @@ nanoid@^3.3.7:
1486
1486
  resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8"
1487
1487
  integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==
1488
1488
 
1489
- node-addon-api@^8.2.1:
1490
- version "8.2.1"
1491
- resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-8.2.1.tgz#43a993f110b88e22ba48bcd65e16b92165a6b002"
1492
- integrity sha512-vmEOvxwiH8tlOcv4SyE8RH34rI5/nWVaigUeAUPawC6f0+HoDthwI0vkMu4tbtsZrXq6QXFfrkhjofzKEs5tpA==
1489
+ node-addon-api@^8.3.0:
1490
+ version "8.3.0"
1491
+ resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-8.3.0.tgz#ec3763f18befc1cdf66d11e157ce44d5eddc0603"
1492
+ integrity sha512-8VOpLHFrOQlAH+qA0ZzuGRlALRA6/LVh8QJldbrC4DY0hXoMP0l4Acq8TzFC018HztWiRqyCEj2aTWY2UvnJUg==
1493
1493
 
1494
1494
  node-int64@^0.4.0:
1495
1495
  version "0.4.0"
@@ -2173,13 +2173,13 @@ yallist@^4.0.0:
2173
2173
  resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72"
2174
2174
  integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==
2175
2175
 
2176
- zeromq@^6.1.1:
2177
- version "6.1.2"
2178
- resolved "https://registry.yarnpkg.com/zeromq/-/zeromq-6.1.2.tgz#89f1cae83f1a4615dab02dbc7881b10ea1a33c73"
2179
- integrity sha512-5lqsW2UXnKhFhBbIm0wzrRnZmWaGI2b2bm4E03rYMK1c1jBMN0wbPnuyqqhGg7QkzBUmpqf+LyjLhg2TRJmvow==
2176
+ zeromq@^6.3.0:
2177
+ version "6.3.0"
2178
+ resolved "https://registry.yarnpkg.com/zeromq/-/zeromq-6.3.0.tgz#19ffa286d128cc317847b5d615c079a67e3bae48"
2179
+ integrity sha512-PG61AT4Y37NGJHSrp5SG2m4cKtRirso/5mm4Yf7l+upgNZZAugrGdhENsM5IcLS+WVVzbWVjEsLKpgLj/31SWQ==
2180
2180
  dependencies:
2181
2181
  "@aminya/cmake-ts" "^0.3.0-aminya.7"
2182
- node-addon-api "^8.2.1"
2182
+ node-addon-api "^8.3.0"
2183
2183
 
2184
2184
  zod-to-json-schema@^3.22.3:
2185
2185
  version "3.23.5"
@@ -8,7 +8,7 @@ from langgraph.checkpoint.base.id import uuid6
8
8
  from starlette.authentication import BaseUser
9
9
  from starlette.exceptions import HTTPException
10
10
 
11
- from langgraph_api.graph import get_assistant_id
11
+ from langgraph_api.graph import GRAPHS, get_assistant_id
12
12
  from langgraph_api.schema import (
13
13
  All,
14
14
  Config,
@@ -92,7 +92,13 @@ def ensure_ids(
92
92
  assistant_id if isinstance(assistant_id, UUID) else UUID(assistant_id)
93
93
  ]
94
94
  except ValueError:
95
- raise HTTPException(status_code=422, detail="Invalid assistant ID") from None
95
+ keys = ", ".join(GRAPHS.keys())
96
+ raise HTTPException(
97
+ status_code=422,
98
+ detail=f"Invalid assistant: '{assistant_id}'. Must be either:\n"
99
+ f"- A valid assistant UUID, or\n"
100
+ f"- One of the registered graphs: {keys}",
101
+ ) from None
96
102
  if thread_id:
97
103
  try:
98
104
  results.append(
@@ -191,6 +197,7 @@ async def create_valid_run(
191
197
  user_id = get_user_id(user)
192
198
  config["configurable"]["langgraph_auth_user"] = user
193
199
  config["configurable"]["langgraph_auth_user_id"] = user_id
200
+ config["configurable"]["langgraph_auth_permissions"] = ctx.permissions
194
201
  else:
195
202
  user_id = None
196
203
  run_coro = Runs.put(
langgraph_api/queue.py CHANGED
@@ -1,13 +1,14 @@
1
1
  import asyncio
2
- from contextlib import AsyncExitStack
2
+ from collections.abc import AsyncGenerator
3
+ from contextlib import AsyncExitStack, asynccontextmanager
3
4
  from datetime import UTC, datetime
4
- from random import random
5
5
  from typing import TypedDict, cast
6
6
 
7
7
  import structlog
8
8
  from langgraph.pregel.debug import CheckpointPayload, TaskResultPayload
9
9
 
10
- from langgraph_api.config import BG_JOB_NO_DELAY, STATS_INTERVAL_SECS
10
+ from langgraph_api.auth.custom import SimpleUser, normalize_user
11
+ from langgraph_api.config import BG_JOB_DELAY, STATS_INTERVAL_SECS
11
12
  from langgraph_api.errors import (
12
13
  UserInterrupt,
13
14
  UserRollback,
@@ -20,11 +21,16 @@ from langgraph_api.stream import (
20
21
  astream_state,
21
22
  consume,
22
23
  )
23
- from langgraph_api.utils import AsyncConnectionProto
24
+ from langgraph_api.utils import AsyncConnectionProto, set_auth_ctx, with_user
24
25
  from langgraph_storage.database import connect
25
26
  from langgraph_storage.ops import Runs, Threads
26
27
  from langgraph_storage.retry import RETRIABLE_EXCEPTIONS
27
28
 
29
+ try:
30
+ from psycopg.errors import InFailedSqlTransaction
31
+ except ImportError:
32
+ InFailedSqlTransaction = ()
33
+
28
34
  logger = structlog.stdlib.get_logger(__name__)
29
35
 
30
36
  WORKERS: set[asyncio.Task] = set()
@@ -120,13 +126,13 @@ async def queue(concurrency: int, timeout: float):
120
126
  else:
121
127
  semaphore.release()
122
128
  await exit.aclose()
123
- await asyncio.sleep(0 if BG_JOB_NO_DELAY else random())
129
+ await asyncio.sleep(BG_JOB_DELAY)
124
130
  except Exception as exc:
125
131
  # keep trying to run the scheduler indefinitely
126
132
  logger.exception("Background worker scheduler failed", exc_info=exc)
127
133
  semaphore.release()
128
134
  await exit.aclose()
129
- await asyncio.sleep(0 if BG_JOB_NO_DELAY else random())
135
+ await asyncio.sleep(BG_JOB_DELAY)
130
136
  finally:
131
137
  logger.info("Shutting down background workers")
132
138
  for task in WORKERS:
@@ -149,6 +155,35 @@ class WorkerResult(TypedDict):
149
155
  run_ended_at: str | None
150
156
 
151
157
 
158
+ @asynccontextmanager
159
+ async def set_auth_ctx_for_run(
160
+ run_kwargs: dict, user_id: str | None = None
161
+ ) -> AsyncGenerator[None, None]:
162
+ # user_id is a fallback.
163
+ try:
164
+ user = run_kwargs["config"]["configurable"]["langgraph_auth_user"]
165
+ permissions = run_kwargs["config"]["configurable"]["langgraph_auth_permissions"]
166
+ if user is not None:
167
+ user = normalize_user(user)
168
+ async with with_user(user, permissions):
169
+ yield None
170
+ else:
171
+ yield None
172
+
173
+ except KeyError:
174
+ if user_id is not None:
175
+ await logger.ainfo(
176
+ "Setting auth to backup user_id",
177
+ user_id=user_id,
178
+ )
179
+ async with with_user(SimpleUser(user_id)):
180
+ yield None
181
+ else:
182
+ yield None
183
+ except Exception:
184
+ pass
185
+
186
+
152
187
  async def worker(
153
188
  timeout: float,
154
189
  exit: AsyncExitStack,
@@ -165,7 +200,8 @@ async def worker(
165
200
  webhook = run["kwargs"].pop("webhook", None)
166
201
  run_started_at = datetime.now(UTC)
167
202
  run_ended_at: str | None = None
168
- async with Runs.enter(run_id) as done, exit:
203
+
204
+ async with set_auth_ctx_for_run(run["kwargs"]), Runs.enter(run_id) as done, exit:
169
205
  temporary = run["kwargs"].get("temporary", False)
170
206
  run_created_at = run["created_at"].isoformat()
171
207
  await logger.ainfo(
@@ -245,7 +281,20 @@ async def worker(
245
281
  run_ended_at=run_ended_at,
246
282
  run_exec_ms=ms(datetime.now(UTC), run_started_at),
247
283
  )
248
- await Runs.delete(conn, run_id, thread_id=run["thread_id"])
284
+ try:
285
+ await Runs.delete(conn, run_id, thread_id=run["thread_id"])
286
+ except InFailedSqlTransaction as e:
287
+ await logger.ainfo(
288
+ "Ignoring rollback error",
289
+ run_id=str(run_id),
290
+ run_attempt=attempt,
291
+ run_created_at=run_created_at,
292
+ exc=str(e),
293
+ )
294
+ # We need to clean up the transaction early if we want to
295
+ # update the thread status with the same connection
296
+ await exit.aclose()
297
+ checkpoint = None # reset the checkpoint
249
298
  except UserInterrupt as e:
250
299
  exception = e
251
300
  status = "interrupted"
@@ -292,6 +341,7 @@ async def worker(
292
341
  run_exec_ms=ms(datetime.now(UTC), run_started_at),
293
342
  )
294
343
  await Runs.set_status(conn, run_id, "error")
344
+ set_auth_ctx(None, None)
295
345
  # delete or set status of thread
296
346
  if temporary:
297
347
  await Threads.delete(conn, run["thread_id"])
langgraph_api/route.py CHANGED
@@ -14,7 +14,7 @@ from starlette.routing import Route, compile_path, get_name
14
14
  from starlette.types import ASGIApp, Receive, Scope, Send
15
15
 
16
16
  from langgraph_api.serde import json_dumpb
17
- from langgraph_api.utils import set_auth_ctx
17
+ from langgraph_api.utils import get_auth_ctx, with_user
18
18
 
19
19
 
20
20
  def api_request_response(
@@ -116,5 +116,10 @@ class ApiRoute(Route):
116
116
  async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:
117
117
  # https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope
118
118
  scope["route"] = self.path
119
- set_auth_ctx(scope.get("user"), scope.get("auth"))
120
- return await super().handle(scope, receive, send)
119
+ ctx = get_auth_ctx()
120
+ if ctx:
121
+ user, auth = ctx.user, ctx.permissions
122
+ else:
123
+ user, auth = scope.get("user"), scope.get("auth")
124
+ async with with_user(user, auth):
125
+ return await super().handle(scope, receive, send)
langgraph_api/schema.py CHANGED
@@ -157,7 +157,7 @@ class RunSend(TypedDict):
157
157
 
158
158
  class RunCommand(TypedDict):
159
159
  goto: str | RunSend | Sequence[RunSend | str] | None
160
- update: dict[str, Any] | None
160
+ update: dict[str, Any] | Sequence[tuple[str, Any]] | None
161
161
  resume: Any | None
162
162
 
163
163
 
langgraph_api/stream.py CHANGED
@@ -75,8 +75,15 @@ def _map_cmd(cmd: RunCommand) -> Command:
75
75
  if goto is not None and not isinstance(goto, list):
76
76
  goto = [cmd.get("goto")]
77
77
 
78
+ update = cmd.get("update")
79
+ if isinstance(update, tuple | list) and all(
80
+ isinstance(t, tuple | list) and len(t) == 2 and isinstance(t[0], str)
81
+ for t in update
82
+ ):
83
+ update = [tuple(t) for t in update]
84
+
78
85
  return Command(
79
- update=cmd.get("update"),
86
+ update=update,
80
87
  goto=(
81
88
  [
82
89
  it if isinstance(it, str) else Send(it["node"], it["input"])
@@ -123,7 +130,7 @@ async def astream_state(
123
130
  stream_modes_set: set[StreamMode] = set(stream_mode) - {"events"}
124
131
  if "debug" not in stream_modes_set:
125
132
  stream_modes_set.add("debug")
126
- if "messages-tuple" in stream_modes_set:
133
+ if "messages-tuple" in stream_modes_set and not isinstance(graph, BaseRemotePregel):
127
134
  stream_modes_set.remove("messages-tuple")
128
135
  stream_modes_set.add("messages")
129
136
  # attach attempt metadata
langgraph_api/utils.py CHANGED
@@ -6,9 +6,11 @@ from datetime import datetime
6
6
  from typing import Any, Protocol, TypeAlias, TypeVar
7
7
 
8
8
  from langgraph_sdk import Auth
9
- from starlette.authentication import AuthCredentials, BaseUser, SimpleUser
9
+ from starlette.authentication import AuthCredentials, BaseUser
10
10
  from starlette.exceptions import HTTPException
11
11
 
12
+ from langgraph_api.auth.custom import SimpleUser
13
+
12
14
  T = TypeVar("T")
13
15
  Row: TypeAlias = dict[str, Any]
14
16
  AuthContext = contextvars.ContextVar[Auth.types.BaseAuthContext | None](
@@ -17,22 +19,28 @@ AuthContext = contextvars.ContextVar[Auth.types.BaseAuthContext | None](
17
19
 
18
20
 
19
21
  @asynccontextmanager
20
- async def with_user(user: BaseUser | None = None, auth: AuthCredentials | None = None):
22
+ async def with_user(
23
+ user: BaseUser | None = None, auth: AuthCredentials | list[str] | None = None
24
+ ):
21
25
  current = get_auth_ctx()
22
26
  set_auth_ctx(user, auth)
23
27
  yield
24
28
  if current is None:
25
29
  return
26
- set_auth_ctx(current.user, AuthCredentials(scopes=current.scopes))
30
+ set_auth_ctx(current.user, AuthCredentials(scopes=current.permissions))
27
31
 
28
32
 
29
- def set_auth_ctx(user: BaseUser | None, auth: AuthCredentials | None) -> None:
30
- if not user or not auth:
33
+ def set_auth_ctx(
34
+ user: BaseUser | None, auth: AuthCredentials | list[str] | None
35
+ ) -> None:
36
+ if user is None and auth is None:
31
37
  AuthContext.set(None)
32
38
  else:
33
39
  AuthContext.set(
34
40
  Auth.types.BaseAuthContext(
35
- permissions=auth.scopes,
41
+ permissions=(
42
+ auth.scopes if isinstance(auth, AuthCredentials) else (auth or [])
43
+ ),
36
44
  user=user or SimpleUser(""),
37
45
  )
38
46
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langgraph-api
3
- Version: 0.0.15
3
+ Version: 0.0.17
4
4
  Summary:
5
5
  License: Elastic-2.0
6
6
  Author: Nuno Campos
@@ -16,7 +16,7 @@ Requires-Dist: jsonschema-rs (>=0.25.0,<0.26.0)
16
16
  Requires-Dist: langchain-core (>=0.2.38,<0.4.0)
17
17
  Requires-Dist: langgraph (>=0.2.56,<0.3.0)
18
18
  Requires-Dist: langgraph-checkpoint (>=2.0.7,<3.0)
19
- Requires-Dist: langgraph-sdk (>=0.1.48,<0.2.0)
19
+ Requires-Dist: langgraph-sdk (>=0.1.51,<0.2.0)
20
20
  Requires-Dist: langsmith (>=0.1.63,<0.3.0)
21
21
  Requires-Dist: orjson (>=3.10.1)
22
22
  Requires-Dist: pyjwt (>=2.9.0,<3.0.0)