@langchain/langgraph 0.2.26 → 0.2.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,14 @@
1
1
  /* eslint-disable @typescript-eslint/no-use-before-define */
2
2
  import { _coerceToRunnable, Runnable, } from "@langchain/core/runnables";
3
3
  import { isBaseChannel } from "../channels/base.js";
4
- import { END, CompiledGraph, Graph, START, } from "./graph.js";
4
+ import { END, CompiledGraph, Graph, START, Branch, } from "./graph.js";
5
5
  import { ChannelWrite, PASSTHROUGH, SKIP_WRITE, } from "../pregel/write.js";
6
6
  import { ChannelRead, PregelNode } from "../pregel/read.js";
7
7
  import { NamedBarrierValue } from "../channels/named_barrier_value.js";
8
8
  import { EphemeralValue } from "../channels/ephemeral_value.js";
9
9
  import { RunnableCallable } from "../utils.js";
10
- import { _isSend, CHECKPOINT_NAMESPACE_END, CHECKPOINT_NAMESPACE_SEPARATOR, TAG_HIDDEN, } from "../constants.js";
11
- import { InvalidUpdateError } from "../errors.js";
10
+ import { _isCommand, _isSend, CHECKPOINT_NAMESPACE_END, CHECKPOINT_NAMESPACE_SEPARATOR, Command, SELF, TAG_HIDDEN, } from "../constants.js";
11
+ import { InvalidUpdateError, ParentCommand } from "../errors.js";
12
12
  import { getChannel, } from "./annotation.js";
13
13
  import { isConfiguredManagedValue } from "../managed/base.js";
14
14
  import { isPregelLike } from "../pregel/utils/subgraph.js";
@@ -300,6 +300,14 @@ export class StateGraph extends Graph {
300
300
  for (const [key, node] of Object.entries(this.nodes)) {
301
301
  compiled.attachNode(key, node);
302
302
  }
303
+ compiled.attachBranch(START, SELF, _getControlBranch(), {
304
+ withReader: false,
305
+ });
306
+ for (const [key] of Object.entries(this.nodes)) {
307
+ compiled.attachBranch(key, SELF, _getControlBranch(), {
308
+ withReader: false,
309
+ });
310
+ }
303
311
  for (const [start, end] of this.edges) {
304
312
  compiled.attachEdge(start, end);
305
313
  }
@@ -335,13 +343,30 @@ function _getChannels(schema) {
335
343
  export class CompiledStateGraph extends CompiledGraph {
336
344
  attachNode(key, node) {
337
345
  const stateKeys = Object.keys(this.builder.channels);
346
+ function _getRoot(input) {
347
+ if (_isCommand(input)) {
348
+ if (input.graph === Command.PARENT) {
349
+ return SKIP_WRITE;
350
+ }
351
+ return input.update;
352
+ }
353
+ return input;
354
+ }
355
+ // to avoid name collision below
356
+ const nodeKey = key;
338
357
  function getStateKey(key, input) {
339
358
  if (!input) {
340
359
  return SKIP_WRITE;
341
360
  }
361
+ else if (_isCommand(input)) {
362
+ if (input.graph === Command.PARENT) {
363
+ return SKIP_WRITE;
364
+ }
365
+ return getStateKey(key, input.update);
366
+ }
342
367
  else if (typeof input !== "object" || Array.isArray(input)) {
343
368
  const typeofInput = Array.isArray(input) ? "array" : typeof input;
344
- throw new InvalidUpdateError(`Expected node "${key.toString()}" to return an object, received ${typeofInput}`, {
369
+ throw new InvalidUpdateError(`Expected node "${nodeKey.toString()}" to return an object, received ${typeofInput}`, {
345
370
  lc_error_code: "INVALID_GRAPH_NODE_RETURN_VALUE",
346
371
  });
347
372
  }
@@ -351,7 +376,16 @@ export class CompiledStateGraph extends CompiledGraph {
351
376
  }
352
377
  // state updaters
353
378
  const stateWriteEntries = stateKeys.map((key) => key === ROOT
354
- ? { channel: key, value: PASSTHROUGH, skipNone: true }
379
+ ? {
380
+ channel: key,
381
+ value: PASSTHROUGH,
382
+ skipNone: true,
383
+ mapper: new RunnableCallable({
384
+ func: _getRoot,
385
+ trace: false,
386
+ recurse: false,
387
+ }),
388
+ }
355
389
  : {
356
390
  channel: key,
357
391
  value: PASSTHROUGH,
@@ -426,28 +460,29 @@ export class CompiledStateGraph extends CompiledGraph {
426
460
  this.nodes[end].triggers.push(start);
427
461
  }
428
462
  }
429
- attachBranch(start, name, branch) {
430
- // attach branch publisher
431
- this.nodes[start].writers.push(branch.compile(
432
- // writer
433
- (dests) => {
434
- const filteredDests = dests.filter((dest) => dest !== END);
435
- if (!filteredDests.length) {
463
+ attachBranch(start, name, branch, options = { withReader: true }) {
464
+ const branchWriter = async (packets, config) => {
465
+ const filteredPackets = packets.filter((p) => p !== END);
466
+ if (!filteredPackets.length) {
436
467
  return;
437
468
  }
438
- const writes = filteredDests.map((dest) => {
439
- if (_isSend(dest)) {
440
- return dest;
469
+ const writes = filteredPackets.map((p) => {
470
+ if (_isSend(p)) {
471
+ return p;
441
472
  }
442
473
  return {
443
- channel: `branch:${start}:${name}:${dest}`,
474
+ channel: `branch:${start}:${name}:${p}`,
444
475
  value: start,
445
476
  };
446
477
  });
447
- return new ChannelWrite(writes, [TAG_HIDDEN]);
448
- },
478
+ await ChannelWrite.doWrite({ ...config, tags: (config.tags ?? []).concat([TAG_HIDDEN]) }, writes);
479
+ };
480
+ // attach branch publisher
481
+ this.nodes[start].writers.push(branch.run(branchWriter,
449
482
  // reader
450
- (config) => ChannelRead.doRead(config, this.streamChannels ?? this.outputChannels, true)));
483
+ options.withReader
484
+ ? (config) => ChannelRead.doRead(config, this.streamChannels ?? this.outputChannels, true)
485
+ : undefined));
451
486
  // attach branch subscribers
452
487
  const ends = branch.ends
453
488
  ? Object.values(branch.ends)
@@ -494,3 +529,28 @@ function isStateGraphArgsWithInputOutputSchemas(obj) {
494
529
  obj.input !== undefined &&
495
530
  obj.output !== undefined);
496
531
  }
532
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
533
+ function _controlBranch(value) {
534
+ if (_isSend(value)) {
535
+ return [value];
536
+ }
537
+ if (!_isCommand(value)) {
538
+ return [];
539
+ }
540
+ if (value.graph === Command.PARENT) {
541
+ throw new ParentCommand(value);
542
+ }
543
+ return Array.isArray(value.goto) ? value.goto : [value.goto];
544
+ }
545
+ function _getControlBranch() {
546
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
547
+ const CONTROL_BRANCH_PATH = new RunnableCallable({
548
+ func: _controlBranch,
549
+ tags: [TAG_HIDDEN],
550
+ trace: false,
551
+ recurse: false,
552
+ });
553
+ return new Branch({
554
+ path: CONTROL_BRANCH_PATH,
555
+ });
556
+ }
@@ -1,5 +1,5 @@
1
1
  export { type AgentExecutorState, createAgentExecutor, } from "./agent_executor.js";
2
2
  export { type FunctionCallingExecutorState, createFunctionCallingExecutor, } from "./chat_agent_executor.js";
3
- export { type AgentState, createReactAgent } from "./react_agent_executor.js";
3
+ export { type AgentState, type CreateReactAgentParams, createReactAgent, } from "./react_agent_executor.js";
4
4
  export { type ToolExecutorArgs, type ToolInvocationInterface, ToolExecutor, } from "./tool_executor.js";
5
5
  export { ToolNode, toolsCondition, type ToolNodeOptions } from "./tool_node.js";
@@ -1,5 +1,5 @@
1
1
  export { createAgentExecutor, } from "./agent_executor.js";
2
2
  export { createFunctionCallingExecutor, } from "./chat_agent_executor.js";
3
- export { createReactAgent } from "./react_agent_executor.js";
3
+ export { createReactAgent, } from "./react_agent_executor.js";
4
4
  export { ToolExecutor, } from "./tool_executor.js";
5
5
  export { ToolNode, toolsCondition } from "./tool_node.js";
@@ -3,20 +3,70 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.createReactAgent = void 0;
4
4
  const messages_1 = require("@langchain/core/messages");
5
5
  const runnables_1 = require("@langchain/core/runnables");
6
- const prompts_1 = require("@langchain/core/prompts");
7
6
  const index_js_1 = require("../graph/index.cjs");
8
7
  const messages_annotation_js_1 = require("../graph/messages_annotation.cjs");
9
8
  const tool_node_js_1 = require("./tool_node.cjs");
9
+ function _convertMessageModifierToStateModifier(messageModifier) {
10
+ // Handle string or SystemMessage
11
+ if (typeof messageModifier === "string" ||
12
+ ((0, messages_1.isBaseMessage)(messageModifier) && messageModifier._getType() === "system")) {
13
+ return messageModifier;
14
+ }
15
+ // Handle callable function
16
+ if (typeof messageModifier === "function") {
17
+ return async (state) => messageModifier(state.messages);
18
+ }
19
+ // Handle Runnable
20
+ if (runnables_1.Runnable.isRunnable(messageModifier)) {
21
+ return runnables_1.RunnableLambda.from((state) => state.messages).pipe(messageModifier);
22
+ }
23
+ throw new Error(`Unexpected type for messageModifier: ${typeof messageModifier}`);
24
+ }
25
+ function _getStateModifierRunnable(stateModifier) {
26
+ let stateModifierRunnable;
27
+ if (stateModifier == null) {
28
+ stateModifierRunnable = runnables_1.RunnableLambda.from((state) => state.messages).withConfig({ runName: "state_modifier" });
29
+ }
30
+ else if (typeof stateModifier === "string") {
31
+ const systemMessage = new messages_1.SystemMessage(stateModifier);
32
+ stateModifierRunnable = runnables_1.RunnableLambda.from((state) => {
33
+ return [systemMessage, ...(state.messages ?? [])];
34
+ }).withConfig({ runName: "state_modifier" });
35
+ }
36
+ else if ((0, messages_1.isBaseMessage)(stateModifier) &&
37
+ stateModifier._getType() === "system") {
38
+ stateModifierRunnable = runnables_1.RunnableLambda.from((state) => [
39
+ stateModifier,
40
+ ...state.messages,
41
+ ]).withConfig({ runName: "state_modifier" });
42
+ }
43
+ else if (typeof stateModifier === "function") {
44
+ stateModifierRunnable = runnables_1.RunnableLambda.from(stateModifier).withConfig({
45
+ runName: "state_modifier",
46
+ });
47
+ }
48
+ else if (runnables_1.Runnable.isRunnable(stateModifier)) {
49
+ stateModifierRunnable = stateModifier;
50
+ }
51
+ else {
52
+ throw new Error(`Got unexpected type for 'stateModifier': ${typeof stateModifier}`);
53
+ }
54
+ return stateModifierRunnable;
55
+ }
56
+ function _getModelPreprocessingRunnable(stateModifier, messageModifier) {
57
+ // Check if both modifiers exist
58
+ if (stateModifier != null && messageModifier != null) {
59
+ throw new Error("Expected value for either stateModifier or messageModifier, got values for both");
60
+ }
61
+ // Convert message modifier to state modifier if necessary
62
+ if (stateModifier == null && messageModifier != null) {
63
+ // eslint-disable-next-line no-param-reassign
64
+ stateModifier = _convertMessageModifierToStateModifier(messageModifier);
65
+ }
66
+ return _getStateModifierRunnable(stateModifier);
67
+ }
10
68
  /**
11
69
  * Creates a StateGraph agent that relies on a chat model utilizing tool calling.
12
- * @param params.llm The chat model that can utilize OpenAI-style tool calling.
13
- * @param params.tools A list of tools or a ToolNode.
14
- * @param params.messageModifier An optional message modifier to apply to messages before being passed to the LLM.
15
- * Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
16
- * @param params.checkpointer An optional checkpoint saver to persist the agent's state.
17
- * @param params.interruptBefore An optional list of node names to interrupt before running.
18
- * @param params.interruptAfter An optional list of node names to interrupt after running.
19
- * @returns A prebuilt compiled graph.
20
70
  *
21
71
  * @example
22
72
  * ```ts
@@ -58,7 +108,7 @@ const tool_node_js_1 = require("./tool_node.cjs");
58
108
  * ```
59
109
  */
60
110
  function createReactAgent(params) {
61
- const { llm, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter, } = params;
111
+ const { llm, tools, messageModifier, stateModifier, checkpointSaver, interruptBefore, interruptAfter, store, } = params;
62
112
  let toolClasses;
63
113
  if (!Array.isArray(tools)) {
64
114
  toolClasses = tools.tools;
@@ -70,7 +120,9 @@ function createReactAgent(params) {
70
120
  throw new Error(`llm ${llm} must define bindTools method.`);
71
121
  }
72
122
  const modelWithTools = llm.bindTools(toolClasses);
73
- const modelRunnable = _createModelWrapper(modelWithTools, messageModifier);
123
+ // we're passing store here for validation
124
+ const preprocessor = _getModelPreprocessingRunnable(stateModifier, messageModifier);
125
+ const modelRunnable = preprocessor.pipe(modelWithTools);
74
126
  const shouldContinue = (state) => {
75
127
  const { messages } = state;
76
128
  const lastMessage = messages[messages.length - 1];
@@ -83,9 +135,8 @@ function createReactAgent(params) {
83
135
  }
84
136
  };
85
137
  const callModel = async (state, config) => {
86
- const { messages } = state;
87
138
  // TODO: Auto-promote streaming.
88
- return { messages: [await modelRunnable.invoke(messages, config)] };
139
+ return { messages: [await modelRunnable.invoke(state, config)] };
89
140
  };
90
141
  const workflow = new index_js_1.StateGraph(messages_annotation_js_1.MessagesAnnotation)
91
142
  .addNode("agent", callModel)
@@ -100,39 +151,7 @@ function createReactAgent(params) {
100
151
  checkpointer: checkpointSaver,
101
152
  interruptBefore,
102
153
  interruptAfter,
154
+ store,
103
155
  });
104
156
  }
105
157
  exports.createReactAgent = createReactAgent;
106
- function _createModelWrapper(modelWithTools, messageModifier) {
107
- if (!messageModifier) {
108
- return modelWithTools;
109
- }
110
- const endict = runnables_1.RunnableLambda.from((messages) => ({
111
- messages,
112
- }));
113
- if (typeof messageModifier === "string") {
114
- const systemMessage = new messages_1.SystemMessage(messageModifier);
115
- const prompt = prompts_1.ChatPromptTemplate.fromMessages([
116
- systemMessage,
117
- ["placeholder", "{messages}"],
118
- ]);
119
- return endict.pipe(prompt).pipe(modelWithTools);
120
- }
121
- if (typeof messageModifier === "function") {
122
- const lambda = runnables_1.RunnableLambda.from(messageModifier).withConfig({
123
- runName: "message_modifier",
124
- });
125
- return lambda.pipe(modelWithTools);
126
- }
127
- if (runnables_1.Runnable.isRunnable(messageModifier)) {
128
- return messageModifier.pipe(modelWithTools);
129
- }
130
- if (messageModifier._getType() === "system") {
131
- const prompt = prompts_1.ChatPromptTemplate.fromMessages([
132
- messageModifier,
133
- ["placeholder", "{messages}"],
134
- ]);
135
- return endict.pipe(prompt).pipe(modelWithTools);
136
- }
137
- throw new Error(`Unsupported message modifier type: ${typeof messageModifier}`);
138
- }
@@ -1,34 +1,88 @@
1
1
  import { BaseChatModel } from "@langchain/core/language_models/chat_models";
2
- import { BaseMessage, SystemMessage } from "@langchain/core/messages";
2
+ import { BaseMessage, BaseMessageLike, SystemMessage } from "@langchain/core/messages";
3
3
  import { Runnable, RunnableToolLike } from "@langchain/core/runnables";
4
4
  import { StructuredToolInterface } from "@langchain/core/tools";
5
- import { All, BaseCheckpointSaver } from "@langchain/langgraph-checkpoint";
6
- import { START } from "../graph/index.js";
5
+ import { All, BaseCheckpointSaver, BaseStore } from "@langchain/langgraph-checkpoint";
6
+ import { START, CompiledStateGraph } from "../graph/index.js";
7
7
  import { MessagesAnnotation } from "../graph/messages_annotation.js";
8
- import { CompiledStateGraph } from "../graph/state.js";
9
8
  import { ToolNode } from "./tool_node.js";
9
+ import { LangGraphRunnableConfig } from "../pregel/runnable_types.js";
10
10
  export interface AgentState {
11
11
  messages: BaseMessage[];
12
12
  }
13
13
  export type N = typeof START | "agent" | "tools";
14
+ export type StateModifier = SystemMessage | string | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => BaseMessageLike[]) | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => Promise<BaseMessageLike[]>) | Runnable;
15
+ /** @deprecated Use StateModifier instead. */
16
+ export type MessageModifier = SystemMessage | string | ((messages: BaseMessage[]) => BaseMessage[]) | ((messages: BaseMessage[]) => Promise<BaseMessage[]>) | Runnable;
14
17
  export type CreateReactAgentParams = {
18
+ /** The chat model that can utilize OpenAI-style tool calling. */
15
19
  llm: BaseChatModel;
20
+ /** A list of tools or a ToolNode. */
16
21
  tools: ToolNode<typeof MessagesAnnotation.State> | (StructuredToolInterface | RunnableToolLike)[];
17
- messageModifier?: SystemMessage | string | ((messages: BaseMessage[]) => BaseMessage[]) | ((messages: BaseMessage[]) => Promise<BaseMessage[]>) | Runnable;
22
+ /**
23
+ * @deprecated
24
+ * Use stateModifier instead. stateModifier works the same as
25
+ * messageModifier in that it runs right before calling the chat model,
26
+ * but if passed as a function, it takes the full graph state as
27
+ * input whenever a tool is called rather than a list of messages.
28
+ *
29
+ * If a function is passed, it should return a list of messages to
30
+ * pass directly to the chat model.
31
+ *
32
+ * @example
33
+ * ```ts
34
+ * import { ChatOpenAI } from "@langchain/openai";
35
+ * import { MessagesAnnotation } from "@langchain/langgraph";
36
+ * import { createReactAgent } from "@langchain/langgraph/prebuilt";
37
+ * import { type BaseMessage, SystemMessage } from "@langchain/core/messages";
38
+ *
39
+ * const model = new ChatOpenAI({
40
+ * model: "gpt-4o-mini",
41
+ * });
42
+ *
43
+ * const tools = [...];
44
+ *
45
+ * // Deprecated style with messageModifier
46
+ * const deprecated = createReactAgent({
47
+ * llm,
48
+ * tools,
49
+ * messageModifier: async (messages: BaseMessage[]) => {
50
+ * return [new SystemMessage("You are a pirate")].concat(messages);
51
+ * }
52
+ * });
53
+ *
54
+ * // New style with stateModifier
55
+ * const agent = createReactAgent({
56
+ * llm,
57
+ * tools,
58
+ * stateModifier: async (state: typeof MessagesAnnotation.State) => {
59
+ * return [new SystemMessage("You are a pirate.")].concat(messages);
60
+ * }
61
+ * });
62
+ * ```
63
+ */
64
+ messageModifier?: MessageModifier;
65
+ /**
66
+ * An optional state modifier. This takes full graph state BEFORE the LLM is called and prepares the input to LLM.
67
+ *
68
+ * Can take a few different forms:
69
+ *
70
+ * - SystemMessage: this is added to the beginning of the list of messages in state["messages"].
71
+ * - str: This is converted to a SystemMessage and added to the beginning of the list of messages in state["messages"].
72
+ * - Function: This function should take in full graph state and the output is then passed to the language model.
73
+ * - Runnable: This runnable should take in full graph state and the output is then passed to the language model.
74
+ */
75
+ stateModifier?: StateModifier;
76
+ /** An optional checkpoint saver to persist the agent's state. */
18
77
  checkpointSaver?: BaseCheckpointSaver;
78
+ /** An optional list of node names to interrupt before running. */
19
79
  interruptBefore?: N[] | All;
80
+ /** An optional list of node names to interrupt after running. */
20
81
  interruptAfter?: N[] | All;
82
+ store?: BaseStore;
21
83
  };
22
84
  /**
23
85
  * Creates a StateGraph agent that relies on a chat model utilizing tool calling.
24
- * @param params.llm The chat model that can utilize OpenAI-style tool calling.
25
- * @param params.tools A list of tools or a ToolNode.
26
- * @param params.messageModifier An optional message modifier to apply to messages before being passed to the LLM.
27
- * Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
28
- * @param params.checkpointer An optional checkpoint saver to persist the agent's state.
29
- * @param params.interruptBefore An optional list of node names to interrupt before running.
30
- * @param params.interruptAfter An optional list of node names to interrupt after running.
31
- * @returns A prebuilt compiled graph.
32
86
  *
33
87
  * @example
34
88
  * ```ts
@@ -1,19 +1,69 @@
1
- import { isAIMessage, SystemMessage, } from "@langchain/core/messages";
1
+ import { isAIMessage, isBaseMessage, SystemMessage, } from "@langchain/core/messages";
2
2
  import { Runnable, RunnableLambda, } from "@langchain/core/runnables";
3
- import { ChatPromptTemplate } from "@langchain/core/prompts";
4
3
  import { END, START, StateGraph } from "../graph/index.js";
5
4
  import { MessagesAnnotation } from "../graph/messages_annotation.js";
6
5
  import { ToolNode } from "./tool_node.js";
6
+ function _convertMessageModifierToStateModifier(messageModifier) {
7
+ // Handle string or SystemMessage
8
+ if (typeof messageModifier === "string" ||
9
+ (isBaseMessage(messageModifier) && messageModifier._getType() === "system")) {
10
+ return messageModifier;
11
+ }
12
+ // Handle callable function
13
+ if (typeof messageModifier === "function") {
14
+ return async (state) => messageModifier(state.messages);
15
+ }
16
+ // Handle Runnable
17
+ if (Runnable.isRunnable(messageModifier)) {
18
+ return RunnableLambda.from((state) => state.messages).pipe(messageModifier);
19
+ }
20
+ throw new Error(`Unexpected type for messageModifier: ${typeof messageModifier}`);
21
+ }
22
+ function _getStateModifierRunnable(stateModifier) {
23
+ let stateModifierRunnable;
24
+ if (stateModifier == null) {
25
+ stateModifierRunnable = RunnableLambda.from((state) => state.messages).withConfig({ runName: "state_modifier" });
26
+ }
27
+ else if (typeof stateModifier === "string") {
28
+ const systemMessage = new SystemMessage(stateModifier);
29
+ stateModifierRunnable = RunnableLambda.from((state) => {
30
+ return [systemMessage, ...(state.messages ?? [])];
31
+ }).withConfig({ runName: "state_modifier" });
32
+ }
33
+ else if (isBaseMessage(stateModifier) &&
34
+ stateModifier._getType() === "system") {
35
+ stateModifierRunnable = RunnableLambda.from((state) => [
36
+ stateModifier,
37
+ ...state.messages,
38
+ ]).withConfig({ runName: "state_modifier" });
39
+ }
40
+ else if (typeof stateModifier === "function") {
41
+ stateModifierRunnable = RunnableLambda.from(stateModifier).withConfig({
42
+ runName: "state_modifier",
43
+ });
44
+ }
45
+ else if (Runnable.isRunnable(stateModifier)) {
46
+ stateModifierRunnable = stateModifier;
47
+ }
48
+ else {
49
+ throw new Error(`Got unexpected type for 'stateModifier': ${typeof stateModifier}`);
50
+ }
51
+ return stateModifierRunnable;
52
+ }
53
+ function _getModelPreprocessingRunnable(stateModifier, messageModifier) {
54
+ // Check if both modifiers exist
55
+ if (stateModifier != null && messageModifier != null) {
56
+ throw new Error("Expected value for either stateModifier or messageModifier, got values for both");
57
+ }
58
+ // Convert message modifier to state modifier if necessary
59
+ if (stateModifier == null && messageModifier != null) {
60
+ // eslint-disable-next-line no-param-reassign
61
+ stateModifier = _convertMessageModifierToStateModifier(messageModifier);
62
+ }
63
+ return _getStateModifierRunnable(stateModifier);
64
+ }
7
65
  /**
8
66
  * Creates a StateGraph agent that relies on a chat model utilizing tool calling.
9
- * @param params.llm The chat model that can utilize OpenAI-style tool calling.
10
- * @param params.tools A list of tools or a ToolNode.
11
- * @param params.messageModifier An optional message modifier to apply to messages before being passed to the LLM.
12
- * Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
13
- * @param params.checkpointer An optional checkpoint saver to persist the agent's state.
14
- * @param params.interruptBefore An optional list of node names to interrupt before running.
15
- * @param params.interruptAfter An optional list of node names to interrupt after running.
16
- * @returns A prebuilt compiled graph.
17
67
  *
18
68
  * @example
19
69
  * ```ts
@@ -55,7 +105,7 @@ import { ToolNode } from "./tool_node.js";
55
105
  * ```
56
106
  */
57
107
  export function createReactAgent(params) {
58
- const { llm, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter, } = params;
108
+ const { llm, tools, messageModifier, stateModifier, checkpointSaver, interruptBefore, interruptAfter, store, } = params;
59
109
  let toolClasses;
60
110
  if (!Array.isArray(tools)) {
61
111
  toolClasses = tools.tools;
@@ -67,7 +117,9 @@ export function createReactAgent(params) {
67
117
  throw new Error(`llm ${llm} must define bindTools method.`);
68
118
  }
69
119
  const modelWithTools = llm.bindTools(toolClasses);
70
- const modelRunnable = _createModelWrapper(modelWithTools, messageModifier);
120
+ // we're passing store here for validation
121
+ const preprocessor = _getModelPreprocessingRunnable(stateModifier, messageModifier);
122
+ const modelRunnable = preprocessor.pipe(modelWithTools);
71
123
  const shouldContinue = (state) => {
72
124
  const { messages } = state;
73
125
  const lastMessage = messages[messages.length - 1];
@@ -80,9 +132,8 @@ export function createReactAgent(params) {
80
132
  }
81
133
  };
82
134
  const callModel = async (state, config) => {
83
- const { messages } = state;
84
135
  // TODO: Auto-promote streaming.
85
- return { messages: [await modelRunnable.invoke(messages, config)] };
136
+ return { messages: [await modelRunnable.invoke(state, config)] };
86
137
  };
87
138
  const workflow = new StateGraph(MessagesAnnotation)
88
139
  .addNode("agent", callModel)
@@ -97,38 +148,6 @@ export function createReactAgent(params) {
97
148
  checkpointer: checkpointSaver,
98
149
  interruptBefore,
99
150
  interruptAfter,
151
+ store,
100
152
  });
101
153
  }
102
- function _createModelWrapper(modelWithTools, messageModifier) {
103
- if (!messageModifier) {
104
- return modelWithTools;
105
- }
106
- const endict = RunnableLambda.from((messages) => ({
107
- messages,
108
- }));
109
- if (typeof messageModifier === "string") {
110
- const systemMessage = new SystemMessage(messageModifier);
111
- const prompt = ChatPromptTemplate.fromMessages([
112
- systemMessage,
113
- ["placeholder", "{messages}"],
114
- ]);
115
- return endict.pipe(prompt).pipe(modelWithTools);
116
- }
117
- if (typeof messageModifier === "function") {
118
- const lambda = RunnableLambda.from(messageModifier).withConfig({
119
- runName: "message_modifier",
120
- });
121
- return lambda.pipe(modelWithTools);
122
- }
123
- if (Runnable.isRunnable(messageModifier)) {
124
- return messageModifier.pipe(modelWithTools);
125
- }
126
- if (messageModifier._getType() === "system") {
127
- const prompt = ChatPromptTemplate.fromMessages([
128
- messageModifier,
129
- ["placeholder", "{messages}"],
130
- ]);
131
- return endict.pipe(prompt).pipe(modelWithTools);
132
- }
133
- throw new Error(`Unsupported message modifier type: ${typeof messageModifier}`);
134
- }
@@ -4,6 +4,7 @@ exports.toolsCondition = exports.ToolNode = void 0;
4
4
  const messages_1 = require("@langchain/core/messages");
5
5
  const utils_js_1 = require("../utils.cjs");
6
6
  const graph_js_1 = require("../graph/graph.cjs");
7
+ const errors_js_1 = require("../errors.cjs");
7
8
  /**
8
9
  * A node that runs the tools requested in the last AIMessage. It can be used
9
10
  * either in StateGraph with a "messages" key or in MessageGraph. If multiple
@@ -178,6 +179,12 @@ class ToolNode extends utils_js_1.RunnableCallable {
178
179
  if (!this.handleToolErrors) {
179
180
  throw e;
180
181
  }
182
+ if ((0, errors_js_1.isGraphInterrupt)(e.name)) {
183
+ // `NodeInterrupt` errors are a breakpoint to bring a human into the loop.
184
+ // As such, they are not recoverable by the agent and shouldn't be fed
185
+ // back. Instead, re-throw these errors even when `handleToolErrors = true`.
186
+ throw e;
187
+ }
181
188
  return new messages_1.ToolMessage({
182
189
  content: `Error: ${e.message}\n Please fix your mistakes.`,
183
190
  name: call.name,
@@ -1,6 +1,7 @@
1
1
  import { ToolMessage, isBaseMessage, } from "@langchain/core/messages";
2
2
  import { RunnableCallable } from "../utils.js";
3
3
  import { END } from "../graph/graph.js";
4
+ import { isGraphInterrupt } from "../errors.js";
4
5
  /**
5
6
  * A node that runs the tools requested in the last AIMessage. It can be used
6
7
  * either in StateGraph with a "messages" key or in MessageGraph. If multiple
@@ -175,6 +176,12 @@ export class ToolNode extends RunnableCallable {
175
176
  if (!this.handleToolErrors) {
176
177
  throw e;
177
178
  }
179
+ if (isGraphInterrupt(e.name)) {
180
+ // `NodeInterrupt` errors are a breakpoint to bring a human into the loop.
181
+ // As such, they are not recoverable by the agent and shouldn't be fed
182
+ // back. Instead, re-throw these errors even when `handleToolErrors = true`.
183
+ throw e;
184
+ }
178
185
  return new ToolMessage({
179
186
  content: `Error: ${e.message}\n Please fix your mistakes.`,
180
187
  name: call.name,