@langchain/langgraph 0.2.26 → 0.2.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/errors.cjs CHANGED
@@ -64,6 +64,7 @@ class GraphInterrupt extends GraphBubbleUp {
64
64
  exports.GraphInterrupt = GraphInterrupt;
65
65
  /** Raised by a node to interrupt execution. */
66
66
  class NodeInterrupt extends GraphInterrupt {
67
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
67
68
  constructor(message, fields) {
68
69
  super([
69
70
  {
package/dist/errors.d.ts CHANGED
@@ -24,7 +24,7 @@ export declare class GraphInterrupt extends GraphBubbleUp {
24
24
  }
25
25
  /** Raised by a node to interrupt execution. */
26
26
  export declare class NodeInterrupt extends GraphInterrupt {
27
- constructor(message: string, fields?: BaseLangGraphErrorFields);
27
+ constructor(message: any, fields?: BaseLangGraphErrorFields);
28
28
  static get unminifiable_name(): string;
29
29
  }
30
30
  export declare function isGraphBubbleUp(e?: Error): e is GraphBubbleUp;
package/dist/errors.js CHANGED
@@ -56,6 +56,7 @@ export class GraphInterrupt extends GraphBubbleUp {
56
56
  }
57
57
  /** Raised by a node to interrupt execution. */
58
58
  export class NodeInterrupt extends GraphInterrupt {
59
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
59
60
  constructor(message, fields) {
60
61
  super([
61
62
  {
@@ -1,5 +1,5 @@
1
1
  export { type AgentExecutorState, createAgentExecutor, } from "./agent_executor.js";
2
2
  export { type FunctionCallingExecutorState, createFunctionCallingExecutor, } from "./chat_agent_executor.js";
3
- export { type AgentState, createReactAgent } from "./react_agent_executor.js";
3
+ export { type AgentState, type CreateReactAgentParams, createReactAgent, } from "./react_agent_executor.js";
4
4
  export { type ToolExecutorArgs, type ToolInvocationInterface, ToolExecutor, } from "./tool_executor.js";
5
5
  export { ToolNode, toolsCondition, type ToolNodeOptions } from "./tool_node.js";
@@ -1,5 +1,5 @@
1
1
  export { createAgentExecutor, } from "./agent_executor.js";
2
2
  export { createFunctionCallingExecutor, } from "./chat_agent_executor.js";
3
- export { createReactAgent } from "./react_agent_executor.js";
3
+ export { createReactAgent, } from "./react_agent_executor.js";
4
4
  export { ToolExecutor, } from "./tool_executor.js";
5
5
  export { ToolNode, toolsCondition } from "./tool_node.js";
@@ -3,20 +3,70 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.createReactAgent = void 0;
4
4
  const messages_1 = require("@langchain/core/messages");
5
5
  const runnables_1 = require("@langchain/core/runnables");
6
- const prompts_1 = require("@langchain/core/prompts");
7
6
  const index_js_1 = require("../graph/index.cjs");
8
7
  const messages_annotation_js_1 = require("../graph/messages_annotation.cjs");
9
8
  const tool_node_js_1 = require("./tool_node.cjs");
9
+ function _convertMessageModifierToStateModifier(messageModifier) {
10
+ // Handle string or SystemMessage
11
+ if (typeof messageModifier === "string" ||
12
+ ((0, messages_1.isBaseMessage)(messageModifier) && messageModifier._getType() === "system")) {
13
+ return messageModifier;
14
+ }
15
+ // Handle callable function
16
+ if (typeof messageModifier === "function") {
17
+ return async (state) => messageModifier(state.messages);
18
+ }
19
+ // Handle Runnable
20
+ if (runnables_1.Runnable.isRunnable(messageModifier)) {
21
+ return runnables_1.RunnableLambda.from((state) => state.messages).pipe(messageModifier);
22
+ }
23
+ throw new Error(`Unexpected type for messageModifier: ${typeof messageModifier}`);
24
+ }
25
+ function _getStateModifierRunnable(stateModifier) {
26
+ let stateModifierRunnable;
27
+ if (stateModifier == null) {
28
+ stateModifierRunnable = runnables_1.RunnableLambda.from((state) => state.messages).withConfig({ runName: "state_modifier" });
29
+ }
30
+ else if (typeof stateModifier === "string") {
31
+ const systemMessage = new messages_1.SystemMessage(stateModifier);
32
+ stateModifierRunnable = runnables_1.RunnableLambda.from((state) => {
33
+ return [systemMessage, ...(state.messages ?? [])];
34
+ }).withConfig({ runName: "state_modifier" });
35
+ }
36
+ else if ((0, messages_1.isBaseMessage)(stateModifier) &&
37
+ stateModifier._getType() === "system") {
38
+ stateModifierRunnable = runnables_1.RunnableLambda.from((state) => [
39
+ stateModifier,
40
+ ...state.messages,
41
+ ]).withConfig({ runName: "state_modifier" });
42
+ }
43
+ else if (typeof stateModifier === "function") {
44
+ stateModifierRunnable = runnables_1.RunnableLambda.from(stateModifier).withConfig({
45
+ runName: "state_modifier",
46
+ });
47
+ }
48
+ else if (runnables_1.Runnable.isRunnable(stateModifier)) {
49
+ stateModifierRunnable = stateModifier;
50
+ }
51
+ else {
52
+ throw new Error(`Got unexpected type for 'stateModifier': ${typeof stateModifier}`);
53
+ }
54
+ return stateModifierRunnable;
55
+ }
56
+ function _getModelPreprocessingRunnable(stateModifier, messageModifier) {
57
+ // Check if both modifiers exist
58
+ if (stateModifier != null && messageModifier != null) {
59
+ throw new Error("Expected value for either stateModifier or messageModifier, got values for both");
60
+ }
61
+ // Convert message modifier to state modifier if necessary
62
+ if (stateModifier == null && messageModifier != null) {
63
+ // eslint-disable-next-line no-param-reassign
64
+ stateModifier = _convertMessageModifierToStateModifier(messageModifier);
65
+ }
66
+ return _getStateModifierRunnable(stateModifier);
67
+ }
10
68
  /**
11
69
  * Creates a StateGraph agent that relies on a chat model utilizing tool calling.
12
- * @param params.llm The chat model that can utilize OpenAI-style tool calling.
13
- * @param params.tools A list of tools or a ToolNode.
14
- * @param params.messageModifier An optional message modifier to apply to messages before being passed to the LLM.
15
- * Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
16
- * @param params.checkpointer An optional checkpoint saver to persist the agent's state.
17
- * @param params.interruptBefore An optional list of node names to interrupt before running.
18
- * @param params.interruptAfter An optional list of node names to interrupt after running.
19
- * @returns A prebuilt compiled graph.
20
70
  *
21
71
  * @example
22
72
  * ```ts
@@ -58,7 +108,7 @@ const tool_node_js_1 = require("./tool_node.cjs");
58
108
  * ```
59
109
  */
60
110
  function createReactAgent(params) {
61
- const { llm, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter, } = params;
111
+ const { llm, tools, messageModifier, stateModifier, checkpointSaver, interruptBefore, interruptAfter, store, } = params;
62
112
  let toolClasses;
63
113
  if (!Array.isArray(tools)) {
64
114
  toolClasses = tools.tools;
@@ -70,7 +120,9 @@ function createReactAgent(params) {
70
120
  throw new Error(`llm ${llm} must define bindTools method.`);
71
121
  }
72
122
  const modelWithTools = llm.bindTools(toolClasses);
73
- const modelRunnable = _createModelWrapper(modelWithTools, messageModifier);
123
+ // we're passing store here for validation
124
+ const preprocessor = _getModelPreprocessingRunnable(stateModifier, messageModifier);
125
+ const modelRunnable = preprocessor.pipe(modelWithTools);
74
126
  const shouldContinue = (state) => {
75
127
  const { messages } = state;
76
128
  const lastMessage = messages[messages.length - 1];
@@ -83,9 +135,8 @@ function createReactAgent(params) {
83
135
  }
84
136
  };
85
137
  const callModel = async (state, config) => {
86
- const { messages } = state;
87
138
  // TODO: Auto-promote streaming.
88
- return { messages: [await modelRunnable.invoke(messages, config)] };
139
+ return { messages: [await modelRunnable.invoke(state, config)] };
89
140
  };
90
141
  const workflow = new index_js_1.StateGraph(messages_annotation_js_1.MessagesAnnotation)
91
142
  .addNode("agent", callModel)
@@ -100,39 +151,7 @@ function createReactAgent(params) {
100
151
  checkpointer: checkpointSaver,
101
152
  interruptBefore,
102
153
  interruptAfter,
154
+ store,
103
155
  });
104
156
  }
105
157
  exports.createReactAgent = createReactAgent;
106
- function _createModelWrapper(modelWithTools, messageModifier) {
107
- if (!messageModifier) {
108
- return modelWithTools;
109
- }
110
- const endict = runnables_1.RunnableLambda.from((messages) => ({
111
- messages,
112
- }));
113
- if (typeof messageModifier === "string") {
114
- const systemMessage = new messages_1.SystemMessage(messageModifier);
115
- const prompt = prompts_1.ChatPromptTemplate.fromMessages([
116
- systemMessage,
117
- ["placeholder", "{messages}"],
118
- ]);
119
- return endict.pipe(prompt).pipe(modelWithTools);
120
- }
121
- if (typeof messageModifier === "function") {
122
- const lambda = runnables_1.RunnableLambda.from(messageModifier).withConfig({
123
- runName: "message_modifier",
124
- });
125
- return lambda.pipe(modelWithTools);
126
- }
127
- if (runnables_1.Runnable.isRunnable(messageModifier)) {
128
- return messageModifier.pipe(modelWithTools);
129
- }
130
- if (messageModifier._getType() === "system") {
131
- const prompt = prompts_1.ChatPromptTemplate.fromMessages([
132
- messageModifier,
133
- ["placeholder", "{messages}"],
134
- ]);
135
- return endict.pipe(prompt).pipe(modelWithTools);
136
- }
137
- throw new Error(`Unsupported message modifier type: ${typeof messageModifier}`);
138
- }
@@ -1,34 +1,88 @@
1
1
  import { BaseChatModel } from "@langchain/core/language_models/chat_models";
2
- import { BaseMessage, SystemMessage } from "@langchain/core/messages";
2
+ import { BaseMessage, BaseMessageLike, SystemMessage } from "@langchain/core/messages";
3
3
  import { Runnable, RunnableToolLike } from "@langchain/core/runnables";
4
4
  import { StructuredToolInterface } from "@langchain/core/tools";
5
- import { All, BaseCheckpointSaver } from "@langchain/langgraph-checkpoint";
6
- import { START } from "../graph/index.js";
5
+ import { All, BaseCheckpointSaver, BaseStore } from "@langchain/langgraph-checkpoint";
6
+ import { START, CompiledStateGraph } from "../graph/index.js";
7
7
  import { MessagesAnnotation } from "../graph/messages_annotation.js";
8
- import { CompiledStateGraph } from "../graph/state.js";
9
8
  import { ToolNode } from "./tool_node.js";
9
+ import { LangGraphRunnableConfig } from "../pregel/runnable_types.js";
10
10
  export interface AgentState {
11
11
  messages: BaseMessage[];
12
12
  }
13
13
  export type N = typeof START | "agent" | "tools";
14
+ export type StateModifier = SystemMessage | string | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => BaseMessageLike[]) | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => Promise<BaseMessageLike[]>) | Runnable;
15
+ /** @deprecated Use StateModifier instead. */
16
+ export type MessageModifier = SystemMessage | string | ((messages: BaseMessage[]) => BaseMessage[]) | ((messages: BaseMessage[]) => Promise<BaseMessage[]>) | Runnable;
14
17
  export type CreateReactAgentParams = {
18
+ /** The chat model that can utilize OpenAI-style tool calling. */
15
19
  llm: BaseChatModel;
20
+ /** A list of tools or a ToolNode. */
16
21
  tools: ToolNode<typeof MessagesAnnotation.State> | (StructuredToolInterface | RunnableToolLike)[];
17
- messageModifier?: SystemMessage | string | ((messages: BaseMessage[]) => BaseMessage[]) | ((messages: BaseMessage[]) => Promise<BaseMessage[]>) | Runnable;
22
+ /**
23
+ * @deprecated
24
+ * Use stateModifier instead. stateModifier works the same as
25
+ * messageModifier in that it runs right before calling the chat model,
26
+ * but if passed as a function, it takes the full graph state as
27
+ * input whenever a tool is called rather than a list of messages.
28
+ *
29
+ * If a function is passed, it should return a list of messages to
30
+ * pass directly to the chat model.
31
+ *
32
+ * @example
33
+ * ```ts
34
+ * import { ChatOpenAI } from "@langchain/openai";
35
+ * import { MessagesAnnotation } from "@langchain/langgraph";
36
+ * import { createReactAgent } from "@langchain/langgraph/prebuilt";
37
+ * import { type BaseMessage, SystemMessage } from "@langchain/core/messages";
38
+ *
39
+ * const model = new ChatOpenAI({
40
+ * model: "gpt-4o-mini",
41
+ * });
42
+ *
43
+ * const tools = [...];
44
+ *
45
+ * // Deprecated style with messageModifier
46
+ * const deprecated = createReactAgent({
47
+ * llm,
48
+ * tools,
49
+ * messageModifier: async (messages: BaseMessage[]) => {
50
+ * return [new SystemMessage("You are a pirate")].concat(messages);
51
+ * }
52
+ * });
53
+ *
54
+ * // New style with stateModifier
55
+ * const agent = createReactAgent({
56
+ * llm,
57
+ * tools,
58
+ * stateModifier: async (state: typeof MessagesAnnotation.State) => {
59
+ * return [new SystemMessage("You are a pirate.")].concat(messages);
60
+ * }
61
+ * });
62
+ * ```
63
+ */
64
+ messageModifier?: MessageModifier;
65
+ /**
66
+ * An optional state modifier. This takes full graph state BEFORE the LLM is called and prepares the input to LLM.
67
+ *
68
+ * Can take a few different forms:
69
+ *
70
+ * - SystemMessage: this is added to the beginning of the list of messages in state["messages"].
71
+ * - str: This is converted to a SystemMessage and added to the beginning of the list of messages in state["messages"].
72
+ * - Function: This function should take in full graph state and the output is then passed to the language model.
73
+ * - Runnable: This runnable should take in full graph state and the output is then passed to the language model.
74
+ */
75
+ stateModifier?: StateModifier;
76
+ /** An optional checkpoint saver to persist the agent's state. */
18
77
  checkpointSaver?: BaseCheckpointSaver;
78
+ /** An optional list of node names to interrupt before running. */
19
79
  interruptBefore?: N[] | All;
80
+ /** An optional list of node names to interrupt after running. */
20
81
  interruptAfter?: N[] | All;
82
+ store?: BaseStore;
21
83
  };
22
84
  /**
23
85
  * Creates a StateGraph agent that relies on a chat model utilizing tool calling.
24
- * @param params.llm The chat model that can utilize OpenAI-style tool calling.
25
- * @param params.tools A list of tools or a ToolNode.
26
- * @param params.messageModifier An optional message modifier to apply to messages before being passed to the LLM.
27
- * Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
28
- * @param params.checkpointer An optional checkpoint saver to persist the agent's state.
29
- * @param params.interruptBefore An optional list of node names to interrupt before running.
30
- * @param params.interruptAfter An optional list of node names to interrupt after running.
31
- * @returns A prebuilt compiled graph.
32
86
  *
33
87
  * @example
34
88
  * ```ts
@@ -1,19 +1,69 @@
1
- import { isAIMessage, SystemMessage, } from "@langchain/core/messages";
1
+ import { isAIMessage, isBaseMessage, SystemMessage, } from "@langchain/core/messages";
2
2
  import { Runnable, RunnableLambda, } from "@langchain/core/runnables";
3
- import { ChatPromptTemplate } from "@langchain/core/prompts";
4
3
  import { END, START, StateGraph } from "../graph/index.js";
5
4
  import { MessagesAnnotation } from "../graph/messages_annotation.js";
6
5
  import { ToolNode } from "./tool_node.js";
6
+ function _convertMessageModifierToStateModifier(messageModifier) {
7
+ // Handle string or SystemMessage
8
+ if (typeof messageModifier === "string" ||
9
+ (isBaseMessage(messageModifier) && messageModifier._getType() === "system")) {
10
+ return messageModifier;
11
+ }
12
+ // Handle callable function
13
+ if (typeof messageModifier === "function") {
14
+ return async (state) => messageModifier(state.messages);
15
+ }
16
+ // Handle Runnable
17
+ if (Runnable.isRunnable(messageModifier)) {
18
+ return RunnableLambda.from((state) => state.messages).pipe(messageModifier);
19
+ }
20
+ throw new Error(`Unexpected type for messageModifier: ${typeof messageModifier}`);
21
+ }
22
+ function _getStateModifierRunnable(stateModifier) {
23
+ let stateModifierRunnable;
24
+ if (stateModifier == null) {
25
+ stateModifierRunnable = RunnableLambda.from((state) => state.messages).withConfig({ runName: "state_modifier" });
26
+ }
27
+ else if (typeof stateModifier === "string") {
28
+ const systemMessage = new SystemMessage(stateModifier);
29
+ stateModifierRunnable = RunnableLambda.from((state) => {
30
+ return [systemMessage, ...(state.messages ?? [])];
31
+ }).withConfig({ runName: "state_modifier" });
32
+ }
33
+ else if (isBaseMessage(stateModifier) &&
34
+ stateModifier._getType() === "system") {
35
+ stateModifierRunnable = RunnableLambda.from((state) => [
36
+ stateModifier,
37
+ ...state.messages,
38
+ ]).withConfig({ runName: "state_modifier" });
39
+ }
40
+ else if (typeof stateModifier === "function") {
41
+ stateModifierRunnable = RunnableLambda.from(stateModifier).withConfig({
42
+ runName: "state_modifier",
43
+ });
44
+ }
45
+ else if (Runnable.isRunnable(stateModifier)) {
46
+ stateModifierRunnable = stateModifier;
47
+ }
48
+ else {
49
+ throw new Error(`Got unexpected type for 'stateModifier': ${typeof stateModifier}`);
50
+ }
51
+ return stateModifierRunnable;
52
+ }
53
+ function _getModelPreprocessingRunnable(stateModifier, messageModifier) {
54
+ // Check if both modifiers exist
55
+ if (stateModifier != null && messageModifier != null) {
56
+ throw new Error("Expected value for either stateModifier or messageModifier, got values for both");
57
+ }
58
+ // Convert message modifier to state modifier if necessary
59
+ if (stateModifier == null && messageModifier != null) {
60
+ // eslint-disable-next-line no-param-reassign
61
+ stateModifier = _convertMessageModifierToStateModifier(messageModifier);
62
+ }
63
+ return _getStateModifierRunnable(stateModifier);
64
+ }
7
65
  /**
8
66
  * Creates a StateGraph agent that relies on a chat model utilizing tool calling.
9
- * @param params.llm The chat model that can utilize OpenAI-style tool calling.
10
- * @param params.tools A list of tools or a ToolNode.
11
- * @param params.messageModifier An optional message modifier to apply to messages before being passed to the LLM.
12
- * Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
13
- * @param params.checkpointer An optional checkpoint saver to persist the agent's state.
14
- * @param params.interruptBefore An optional list of node names to interrupt before running.
15
- * @param params.interruptAfter An optional list of node names to interrupt after running.
16
- * @returns A prebuilt compiled graph.
17
67
  *
18
68
  * @example
19
69
  * ```ts
@@ -55,7 +105,7 @@ import { ToolNode } from "./tool_node.js";
55
105
  * ```
56
106
  */
57
107
  export function createReactAgent(params) {
58
- const { llm, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter, } = params;
108
+ const { llm, tools, messageModifier, stateModifier, checkpointSaver, interruptBefore, interruptAfter, store, } = params;
59
109
  let toolClasses;
60
110
  if (!Array.isArray(tools)) {
61
111
  toolClasses = tools.tools;
@@ -67,7 +117,9 @@ export function createReactAgent(params) {
67
117
  throw new Error(`llm ${llm} must define bindTools method.`);
68
118
  }
69
119
  const modelWithTools = llm.bindTools(toolClasses);
70
- const modelRunnable = _createModelWrapper(modelWithTools, messageModifier);
120
+ // we're passing store here for validation
121
+ const preprocessor = _getModelPreprocessingRunnable(stateModifier, messageModifier);
122
+ const modelRunnable = preprocessor.pipe(modelWithTools);
71
123
  const shouldContinue = (state) => {
72
124
  const { messages } = state;
73
125
  const lastMessage = messages[messages.length - 1];
@@ -80,9 +132,8 @@ export function createReactAgent(params) {
80
132
  }
81
133
  };
82
134
  const callModel = async (state, config) => {
83
- const { messages } = state;
84
135
  // TODO: Auto-promote streaming.
85
- return { messages: [await modelRunnable.invoke(messages, config)] };
136
+ return { messages: [await modelRunnable.invoke(state, config)] };
86
137
  };
87
138
  const workflow = new StateGraph(MessagesAnnotation)
88
139
  .addNode("agent", callModel)
@@ -97,38 +148,6 @@ export function createReactAgent(params) {
97
148
  checkpointer: checkpointSaver,
98
149
  interruptBefore,
99
150
  interruptAfter,
151
+ store,
100
152
  });
101
153
  }
102
- function _createModelWrapper(modelWithTools, messageModifier) {
103
- if (!messageModifier) {
104
- return modelWithTools;
105
- }
106
- const endict = RunnableLambda.from((messages) => ({
107
- messages,
108
- }));
109
- if (typeof messageModifier === "string") {
110
- const systemMessage = new SystemMessage(messageModifier);
111
- const prompt = ChatPromptTemplate.fromMessages([
112
- systemMessage,
113
- ["placeholder", "{messages}"],
114
- ]);
115
- return endict.pipe(prompt).pipe(modelWithTools);
116
- }
117
- if (typeof messageModifier === "function") {
118
- const lambda = RunnableLambda.from(messageModifier).withConfig({
119
- runName: "message_modifier",
120
- });
121
- return lambda.pipe(modelWithTools);
122
- }
123
- if (Runnable.isRunnable(messageModifier)) {
124
- return messageModifier.pipe(modelWithTools);
125
- }
126
- if (messageModifier._getType() === "system") {
127
- const prompt = ChatPromptTemplate.fromMessages([
128
- messageModifier,
129
- ["placeholder", "{messages}"],
130
- ]);
131
- return endict.pipe(prompt).pipe(modelWithTools);
132
- }
133
- throw new Error(`Unsupported message modifier type: ${typeof messageModifier}`);
134
- }
@@ -472,8 +472,13 @@ class Pregel extends runnables_1.Runnable {
472
472
  let checkpointConfig = (0, utils_js_1.patchConfigurable)(config, {
473
473
  checkpoint_ns: config.configurable?.checkpoint_ns ?? "",
474
474
  });
475
+ let checkpointMetadata = config.metadata ?? {};
475
476
  if (saved?.config.configurable) {
476
477
  checkpointConfig = (0, utils_js_1.patchConfigurable)(config, saved.config.configurable);
478
+ checkpointMetadata = {
479
+ ...saved.metadata,
480
+ ...checkpointMetadata,
481
+ };
477
482
  }
478
483
  // Find last node that updated the state, if not provided
479
484
  if (values == null && asNode === undefined) {
@@ -485,6 +490,54 @@ class Pregel extends runnables_1.Runnable {
485
490
  }, {});
486
491
  return (0, index_js_1.patchCheckpointMap)(nextConfig, saved ? saved.metadata : undefined);
487
492
  }
493
+ // update channels
494
+ const channels = (0, base_js_1.emptyChannels)(this.channels, checkpoint);
495
+ // Pass `skipManaged: true` as managed values are not used/relevant in update state calls.
496
+ const { managed } = await this.prepareSpecs(config, { skipManaged: true });
497
+ if (values === null && asNode === "__end__") {
498
+ if (saved) {
499
+ // tasks for this checkpoint
500
+ const nextTasks = (0, algo_js_1._prepareNextTasks)(checkpoint, saved.pendingWrites || [], this.nodes, channels, managed, saved.config, true, {
501
+ step: (saved.metadata?.step ?? -1) + 1,
502
+ checkpointer: this.checkpointer || undefined,
503
+ store: this.store,
504
+ });
505
+ // apply null writes
506
+ const nullWrites = (saved.pendingWrites || [])
507
+ .filter((w) => w[0] === constants_js_1.NULL_TASK_ID)
508
+ .flatMap((w) => w.slice(1));
509
+ if (nullWrites.length > 0) {
510
+ (0, algo_js_1._applyWrites)(saved.checkpoint, channels, [
511
+ {
512
+ name: constants_js_1.INPUT,
513
+ writes: nullWrites,
514
+ triggers: [],
515
+ },
516
+ ]);
517
+ }
518
+ // apply writes from tasks that already ran
519
+ for (const [taskId, k, v] of saved.pendingWrites || []) {
520
+ if ([constants_js_1.ERROR, constants_js_1.INTERRUPT, langgraph_checkpoint_1.SCHEDULED].includes(k)) {
521
+ continue;
522
+ }
523
+ if (!(taskId in nextTasks)) {
524
+ continue;
525
+ }
526
+ nextTasks[taskId].writes.push([k, v]);
527
+ }
528
+ // clear all current tasks
529
+ (0, algo_js_1._applyWrites)(checkpoint, channels, Object.values(nextTasks));
530
+ }
531
+ // save checkpoint
532
+ const nextConfig = await checkpointer.put(checkpointConfig, (0, base_js_1.createCheckpoint)(checkpoint, undefined, step), {
533
+ ...checkpointMetadata,
534
+ source: "update",
535
+ step: step + 1,
536
+ writes: {},
537
+ parents: saved?.metadata?.parents ?? {},
538
+ }, {});
539
+ return (0, index_js_1.patchCheckpointMap)(nextConfig, saved ? saved.metadata : undefined);
540
+ }
488
541
  if (values == null && asNode === "__copy__") {
489
542
  const nextConfig = await checkpointer.put(saved?.parentConfig ?? checkpointConfig, (0, base_js_1.createCheckpoint)(checkpoint, undefined, step), {
490
543
  source: "fork",
@@ -534,10 +587,6 @@ class Pregel extends runnables_1.Runnable {
534
587
  if (this.nodes[asNode] === undefined) {
535
588
  throw new errors_js_1.InvalidUpdateError(`Node "${asNode.toString()}" does not exist`);
536
589
  }
537
- // update channels
538
- const channels = (0, base_js_1.emptyChannels)(this.channels, checkpoint);
539
- // Pass `skipManaged: true` as managed values are not used/relevant in update state calls.
540
- const { managed } = await this.prepareSpecs(config, { skipManaged: true });
541
590
  // run all writers of the chosen node
542
591
  const writers = this.nodes[asNode].getWriters();
543
592
  if (!writers.length) {
@@ -1,13 +1,13 @@
1
1
  /* eslint-disable no-param-reassign */
2
2
  import { Runnable, RunnableSequence, getCallbackManagerForConfig, mergeConfigs, patchConfig, _coerceToRunnable, } from "@langchain/core/runnables";
3
- import { compareChannelVersions, copyCheckpoint, emptyCheckpoint, uuid5, } from "@langchain/langgraph-checkpoint";
3
+ import { compareChannelVersions, copyCheckpoint, emptyCheckpoint, SCHEDULED, uuid5, } from "@langchain/langgraph-checkpoint";
4
4
  import { createCheckpoint, emptyChannels, isBaseChannel, } from "../channels/base.js";
5
5
  import { PregelNode } from "./read.js";
6
6
  import { validateGraph, validateKeys } from "./validate.js";
7
7
  import { readChannels } from "./io.js";
8
8
  import { printStepCheckpoint, printStepTasks, printStepWrites, tasksWithWrites, } from "./debug.js";
9
9
  import { ChannelWrite, PASSTHROUGH } from "./write.js";
10
- import { CONFIG_KEY_CHECKPOINTER, CONFIG_KEY_READ, CONFIG_KEY_SEND, ERROR, INTERRUPT, CHECKPOINT_NAMESPACE_SEPARATOR, CHECKPOINT_NAMESPACE_END, CONFIG_KEY_STREAM, CONFIG_KEY_TASK_ID, } from "../constants.js";
10
+ import { CONFIG_KEY_CHECKPOINTER, CONFIG_KEY_READ, CONFIG_KEY_SEND, ERROR, INTERRUPT, CHECKPOINT_NAMESPACE_SEPARATOR, CHECKPOINT_NAMESPACE_END, CONFIG_KEY_STREAM, CONFIG_KEY_TASK_ID, NULL_TASK_ID, INPUT, } from "../constants.js";
11
11
  import { GraphRecursionError, GraphValueError, InvalidUpdateError, isGraphBubbleUp, isGraphInterrupt, } from "../errors.js";
12
12
  import { _prepareNextTasks, _localRead, _applyWrites, } from "./algo.js";
13
13
  import { _coerceToDict, getNewChannelVersions, patchCheckpointMap, } from "./utils/index.js";
@@ -468,8 +468,13 @@ export class Pregel extends Runnable {
468
468
  let checkpointConfig = patchConfigurable(config, {
469
469
  checkpoint_ns: config.configurable?.checkpoint_ns ?? "",
470
470
  });
471
+ let checkpointMetadata = config.metadata ?? {};
471
472
  if (saved?.config.configurable) {
472
473
  checkpointConfig = patchConfigurable(config, saved.config.configurable);
474
+ checkpointMetadata = {
475
+ ...saved.metadata,
476
+ ...checkpointMetadata,
477
+ };
473
478
  }
474
479
  // Find last node that updated the state, if not provided
475
480
  if (values == null && asNode === undefined) {
@@ -481,6 +486,54 @@ export class Pregel extends Runnable {
481
486
  }, {});
482
487
  return patchCheckpointMap(nextConfig, saved ? saved.metadata : undefined);
483
488
  }
489
+ // update channels
490
+ const channels = emptyChannels(this.channels, checkpoint);
491
+ // Pass `skipManaged: true` as managed values are not used/relevant in update state calls.
492
+ const { managed } = await this.prepareSpecs(config, { skipManaged: true });
493
+ if (values === null && asNode === "__end__") {
494
+ if (saved) {
495
+ // tasks for this checkpoint
496
+ const nextTasks = _prepareNextTasks(checkpoint, saved.pendingWrites || [], this.nodes, channels, managed, saved.config, true, {
497
+ step: (saved.metadata?.step ?? -1) + 1,
498
+ checkpointer: this.checkpointer || undefined,
499
+ store: this.store,
500
+ });
501
+ // apply null writes
502
+ const nullWrites = (saved.pendingWrites || [])
503
+ .filter((w) => w[0] === NULL_TASK_ID)
504
+ .flatMap((w) => w.slice(1));
505
+ if (nullWrites.length > 0) {
506
+ _applyWrites(saved.checkpoint, channels, [
507
+ {
508
+ name: INPUT,
509
+ writes: nullWrites,
510
+ triggers: [],
511
+ },
512
+ ]);
513
+ }
514
+ // apply writes from tasks that already ran
515
+ for (const [taskId, k, v] of saved.pendingWrites || []) {
516
+ if ([ERROR, INTERRUPT, SCHEDULED].includes(k)) {
517
+ continue;
518
+ }
519
+ if (!(taskId in nextTasks)) {
520
+ continue;
521
+ }
522
+ nextTasks[taskId].writes.push([k, v]);
523
+ }
524
+ // clear all current tasks
525
+ _applyWrites(checkpoint, channels, Object.values(nextTasks));
526
+ }
527
+ // save checkpoint
528
+ const nextConfig = await checkpointer.put(checkpointConfig, createCheckpoint(checkpoint, undefined, step), {
529
+ ...checkpointMetadata,
530
+ source: "update",
531
+ step: step + 1,
532
+ writes: {},
533
+ parents: saved?.metadata?.parents ?? {},
534
+ }, {});
535
+ return patchCheckpointMap(nextConfig, saved ? saved.metadata : undefined);
536
+ }
484
537
  if (values == null && asNode === "__copy__") {
485
538
  const nextConfig = await checkpointer.put(saved?.parentConfig ?? checkpointConfig, createCheckpoint(checkpoint, undefined, step), {
486
539
  source: "fork",
@@ -530,10 +583,6 @@ export class Pregel extends Runnable {
530
583
  if (this.nodes[asNode] === undefined) {
531
584
  throw new InvalidUpdateError(`Node "${asNode.toString()}" does not exist`);
532
585
  }
533
- // update channels
534
- const channels = emptyChannels(this.channels, checkpoint);
535
- // Pass `skipManaged: true` as managed values are not used/relevant in update state calls.
536
- const { managed } = await this.prepareSpecs(config, { skipManaged: true });
537
586
  // run all writers of the chosen node
538
587
  const writers = this.nodes[asNode].getWriters();
539
588
  if (!writers.length) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/langgraph",
3
- "version": "0.2.26",
3
+ "version": "0.2.27",
4
4
  "description": "LangGraph",
5
5
  "type": "module",
6
6
  "engines": {