@langchain/langgraph 0.2.39 → 0.2.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,3 +3,4 @@ export { type FunctionCallingExecutorState, createFunctionCallingExecutor, } fro
3
3
  export { type AgentState, type CreateReactAgentParams, createReactAgent, } from "./react_agent_executor.js";
4
4
  export { type ToolExecutorArgs, type ToolInvocationInterface, ToolExecutor, } from "./tool_executor.js";
5
5
  export { ToolNode, toolsCondition, type ToolNodeOptions } from "./tool_node.js";
6
+ export type { HumanInterruptConfig, ActionRequest, HumanInterrupt, HumanResponse, } from "./interrupt.js";
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,56 @@
1
+ /**
2
+ * Configuration interface that defines what actions are allowed for a human interrupt.
3
+ * This controls the available interaction options when the graph is paused for human input.
4
+ *
5
+ * @property {boolean} allow_ignore - Whether the human can choose to ignore/skip the current step
6
+ * @property {boolean} allow_respond - Whether the human can provide a text response/feedback
7
+ * @property {boolean} allow_edit - Whether the human can edit the provided content/state
8
+ * @property {boolean} allow_accept - Whether the human can accept/approve the current state
9
+ */
10
+ export interface HumanInterruptConfig {
11
+ allow_ignore: boolean;
12
+ allow_respond: boolean;
13
+ allow_edit: boolean;
14
+ allow_accept: boolean;
15
+ }
16
+ /**
17
+ * Represents a request for human action within the graph execution.
18
+ * Contains the action type and any associated arguments needed for the action.
19
+ *
20
+ * @property {string} action - The type or name of action being requested (e.g., "Approve XYZ action")
21
+ * @property {Record<string, any>} args - Key-value pairs of arguments needed for the action
22
+ */
23
+ export interface ActionRequest {
24
+ action: string;
25
+ args: Record<string, any>;
26
+ }
27
+ /**
28
+ * Represents an interrupt triggered by the graph that requires human intervention.
29
+ * This is passed to the `interrupt` function when execution is paused for human input.
30
+ *
31
+ * @property {ActionRequest} action_request - The specific action being requested from the human
32
+ * @property {HumanInterruptConfig} config - Configuration defining what actions are allowed
33
+ * @property {string} [description] - Optional detailed description of what input is needed
34
+ */
35
+ export interface HumanInterrupt {
36
+ action_request: ActionRequest;
37
+ config: HumanInterruptConfig;
38
+ description?: string;
39
+ }
40
+ /**
41
+ * The response provided by a human to an interrupt, which is returned when graph execution resumes.
42
+ *
43
+ * @property {("accept"|"ignore"|"response"|"edit")} type - The type of response:
44
+ * - "accept": Approves the current state without changes
45
+ * - "ignore": Skips/ignores the current step
46
+ * - "response": Provides text feedback or instructions
47
+ * - "edit": Modifies the current state/content
48
+ * @property {null|string|ActionRequest} args - The response payload:
49
+ * - null: For ignore/accept actions
50
+ * - string: For text responses
51
+ * - ActionRequest: For edit actions with updated content
52
+ */
53
+ export type HumanResponse = {
54
+ type: "accept" | "ignore" | "response" | "edit";
55
+ args: null | string | ActionRequest;
56
+ };
@@ -0,0 +1 @@
1
+ export {};
@@ -1,11 +1,12 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.createReactAgent = void 0;
3
+ exports.createReactAgent = exports.createReactAgentAnnotation = void 0;
4
4
  const messages_1 = require("@langchain/core/messages");
5
5
  const runnables_1 = require("@langchain/core/runnables");
6
6
  const index_js_1 = require("../graph/index.cjs");
7
- const messages_annotation_js_1 = require("../graph/messages_annotation.cjs");
8
7
  const tool_node_js_1 = require("./tool_node.cjs");
8
+ const annotation_js_1 = require("../graph/annotation.cjs");
9
+ const message_js_1 = require("../graph/message.cjs");
9
10
  function _convertMessageModifierToStateModifier(messageModifier) {
10
11
  // Handle string or SystemMessage
11
12
  if (typeof messageModifier === "string" ||
@@ -65,6 +66,14 @@ function _getModelPreprocessingRunnable(stateModifier, messageModifier) {
65
66
  }
66
67
  return _getStateModifierRunnable(stateModifier);
67
68
  }
69
+ const createReactAgentAnnotation = () => annotation_js_1.Annotation.Root({
70
+ messages: (0, annotation_js_1.Annotation)({
71
+ reducer: message_js_1.messagesStateReducer,
72
+ default: () => [],
73
+ }),
74
+ structuredResponse: (annotation_js_1.Annotation),
75
+ });
76
+ exports.createReactAgentAnnotation = createReactAgentAnnotation;
68
77
  /**
69
78
  * Creates a StateGraph agent that relies on a chat model utilizing tool calling.
70
79
  *
@@ -108,7 +117,7 @@ function _getModelPreprocessingRunnable(stateModifier, messageModifier) {
108
117
  * ```
109
118
  */
110
119
  function createReactAgent(params) {
111
- const { llm, tools, messageModifier, stateModifier, stateSchema, checkpointSaver, interruptBefore, interruptAfter, store, } = params;
120
+ const { llm, tools, messageModifier, stateModifier, stateSchema, checkpointSaver, interruptBefore, interruptAfter, store, responseFormat, } = params;
112
121
  let toolClasses;
113
122
  if (!Array.isArray(tools)) {
114
123
  toolClasses = tools.tools;
@@ -128,25 +137,58 @@ function createReactAgent(params) {
128
137
  const lastMessage = messages[messages.length - 1];
129
138
  if ((0, messages_1.isAIMessage)(lastMessage) &&
130
139
  (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0)) {
131
- return index_js_1.END;
140
+ return responseFormat != null ? "generate_structured_response" : index_js_1.END;
132
141
  }
133
142
  else {
134
143
  return "continue";
135
144
  }
136
145
  };
146
+ const generateStructuredResponse = async (state, config) => {
147
+ if (responseFormat == null) {
148
+ throw new Error("Attempted to generate structured output with no passed response schema. Please contact us for help.");
149
+ }
150
+ // Exclude the last message as there's enough information
151
+ // for the LLM to generate the structured response
152
+ const messages = state.messages.slice(0, -1);
153
+ let modelWithStructuredOutput;
154
+ if (typeof responseFormat === "object" &&
155
+ "prompt" in responseFormat &&
156
+ "schema" in responseFormat) {
157
+ const { prompt, schema } = responseFormat;
158
+ modelWithStructuredOutput = llm.withStructuredOutput(schema);
159
+ messages.unshift(new messages_1.SystemMessage({ content: prompt }));
160
+ }
161
+ else {
162
+ modelWithStructuredOutput = llm.withStructuredOutput(responseFormat);
163
+ }
164
+ const response = await modelWithStructuredOutput.invoke(messages, config);
165
+ return { structuredResponse: response };
166
+ };
137
167
  const callModel = async (state, config) => {
138
168
  // TODO: Auto-promote streaming.
139
169
  return { messages: [await modelRunnable.invoke(state, config)] };
140
170
  };
141
- const workflow = new index_js_1.StateGraph(stateSchema ?? messages_annotation_js_1.MessagesAnnotation)
171
+ const workflow = new index_js_1.StateGraph(stateSchema ?? (0, exports.createReactAgentAnnotation)())
142
172
  .addNode("agent", callModel)
143
173
  .addNode("tools", new tool_node_js_1.ToolNode(toolClasses))
144
174
  .addEdge(index_js_1.START, "agent")
145
- .addConditionalEdges("agent", shouldContinue, {
146
- continue: "tools",
147
- [index_js_1.END]: index_js_1.END,
148
- })
149
175
  .addEdge("tools", "agent");
176
+ if (responseFormat !== undefined) {
177
+ workflow
178
+ .addNode("generate_structured_response", generateStructuredResponse)
179
+ .addEdge("generate_structured_response", index_js_1.END)
180
+ .addConditionalEdges("agent", shouldContinue, {
181
+ continue: "tools",
182
+ [index_js_1.END]: index_js_1.END,
183
+ generate_structured_response: "generate_structured_response",
184
+ });
185
+ }
186
+ else {
187
+ workflow.addConditionalEdges("agent", shouldContinue, {
188
+ continue: "tools",
189
+ [index_js_1.END]: index_js_1.END,
190
+ });
191
+ }
150
192
  return workflow.compile({
151
193
  checkpointer: checkpointSaver,
152
194
  interruptBefore,
@@ -3,18 +3,33 @@ import { BaseMessage, BaseMessageLike, SystemMessage } from "@langchain/core/mes
3
3
  import { Runnable, RunnableToolLike } from "@langchain/core/runnables";
4
4
  import { StructuredToolInterface } from "@langchain/core/tools";
5
5
  import { All, BaseCheckpointSaver, BaseStore } from "@langchain/langgraph-checkpoint";
6
+ import { z } from "zod";
6
7
  import { START, CompiledStateGraph, AnnotationRoot } from "../graph/index.js";
7
8
  import { MessagesAnnotation } from "../graph/messages_annotation.js";
8
9
  import { ToolNode } from "./tool_node.js";
9
10
  import { LangGraphRunnableConfig } from "../pregel/runnable_types.js";
10
- export interface AgentState {
11
+ import { Messages } from "../graph/message.js";
12
+ export interface AgentState<StructuredResponseType extends Record<string, any> = Record<string, any>> {
11
13
  messages: BaseMessage[];
14
+ structuredResponse: StructuredResponseType;
12
15
  }
13
16
  export type N = typeof START | "agent" | "tools";
17
+ export type StructuredResponseSchemaAndPrompt<StructuredResponseType> = {
18
+ prompt: string;
19
+ schema: z.ZodType<StructuredResponseType> | Record<string, any>;
20
+ };
14
21
  export type StateModifier = SystemMessage | string | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => BaseMessageLike[]) | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => Promise<BaseMessageLike[]>) | Runnable;
15
22
  /** @deprecated Use StateModifier instead. */
16
23
  export type MessageModifier = SystemMessage | string | ((messages: BaseMessage[]) => BaseMessage[]) | ((messages: BaseMessage[]) => Promise<BaseMessage[]>) | Runnable;
17
- export type CreateReactAgentParams<A extends AnnotationRoot<any> = AnnotationRoot<any>> = {
24
+ export declare const createReactAgentAnnotation: <T extends Record<string, any> = Record<string, any>>() => AnnotationRoot<{
25
+ messages: import("../web.js").BinaryOperatorAggregate<BaseMessage[], Messages>;
26
+ structuredResponse: {
27
+ (): import("../web.js").LastValue<T>;
28
+ (annotation: import("../graph/annotation.js").SingleReducer<T, T>): import("../web.js").BinaryOperatorAggregate<T, T>;
29
+ Root: <S extends import("../graph/annotation.js").StateDefinition>(sd: S) => AnnotationRoot<S>;
30
+ };
31
+ }>;
32
+ export type CreateReactAgentParams<A extends AnnotationRoot<any> = AnnotationRoot<any>, StructuredResponseType = Record<string, any>> = {
18
33
  /** The chat model that can utilize OpenAI-style tool calling. */
19
34
  llm: BaseChatModel;
20
35
  /** A list of tools or a ToolNode. */
@@ -81,6 +96,19 @@ export type CreateReactAgentParams<A extends AnnotationRoot<any> = AnnotationRoo
81
96
  /** An optional list of node names to interrupt after running. */
82
97
  interruptAfter?: N[] | All;
83
98
  store?: BaseStore;
99
+ /**
100
+ * An optional schema for the final agent output.
101
+ *
102
+ * If provided, output will be formatted to match the given schema and returned in the 'structured_response' state key.
103
+ * If not provided, `structured_response` will not be present in the output state.
104
+ *
105
+ * Can be passed in as:
106
+ * - Zod schema
107
+ * - Dictionary object
108
+ * - [prompt, schema], where schema is one of the above.
109
+ * The prompt will be used together with the model that is being used to generate the structured response.
110
+ */
111
+ responseFormat?: z.ZodType<StructuredResponseType> | StructuredResponseSchemaAndPrompt<StructuredResponseType> | Record<string, any>;
84
112
  };
85
113
  /**
86
114
  * Creates a StateGraph agent that relies on a chat model utilizing tool calling.
@@ -124,4 +152,4 @@ export type CreateReactAgentParams<A extends AnnotationRoot<any> = AnnotationRoo
124
152
  * // Returns the messages in the state at each step of execution
125
153
  * ```
126
154
  */
127
- export declare function createReactAgent<A extends AnnotationRoot<any> = AnnotationRoot<any>>(params: CreateReactAgentParams<A>): CompiledStateGraph<(typeof MessagesAnnotation)["State"], (typeof MessagesAnnotation)["Update"], typeof START | "agent" | "tools", typeof MessagesAnnotation.spec & A["spec"], typeof MessagesAnnotation.spec & A["spec"]>;
155
+ export declare function createReactAgent<A extends AnnotationRoot<any> = AnnotationRoot<{}>, StructuredResponseFormat extends Record<string, any> = Record<string, any>>(params: CreateReactAgentParams<A, StructuredResponseFormat>): CompiledStateGraph<(typeof MessagesAnnotation)["State"], (typeof MessagesAnnotation)["Update"], any, typeof MessagesAnnotation.spec & A["spec"], ReturnType<typeof createReactAgentAnnotation<StructuredResponseFormat>>["spec"] & A["spec"]>;
@@ -1,8 +1,9 @@
1
1
  import { isAIMessage, isBaseMessage, SystemMessage, } from "@langchain/core/messages";
2
2
  import { Runnable, RunnableLambda, } from "@langchain/core/runnables";
3
3
  import { END, START, StateGraph, } from "../graph/index.js";
4
- import { MessagesAnnotation } from "../graph/messages_annotation.js";
5
4
  import { ToolNode } from "./tool_node.js";
5
+ import { Annotation } from "../graph/annotation.js";
6
+ import { messagesStateReducer } from "../graph/message.js";
6
7
  function _convertMessageModifierToStateModifier(messageModifier) {
7
8
  // Handle string or SystemMessage
8
9
  if (typeof messageModifier === "string" ||
@@ -62,6 +63,13 @@ function _getModelPreprocessingRunnable(stateModifier, messageModifier) {
62
63
  }
63
64
  return _getStateModifierRunnable(stateModifier);
64
65
  }
66
+ export const createReactAgentAnnotation = () => Annotation.Root({
67
+ messages: Annotation({
68
+ reducer: messagesStateReducer,
69
+ default: () => [],
70
+ }),
71
+ structuredResponse: (Annotation),
72
+ });
65
73
  /**
66
74
  * Creates a StateGraph agent that relies on a chat model utilizing tool calling.
67
75
  *
@@ -105,7 +113,7 @@ function _getModelPreprocessingRunnable(stateModifier, messageModifier) {
105
113
  * ```
106
114
  */
107
115
  export function createReactAgent(params) {
108
- const { llm, tools, messageModifier, stateModifier, stateSchema, checkpointSaver, interruptBefore, interruptAfter, store, } = params;
116
+ const { llm, tools, messageModifier, stateModifier, stateSchema, checkpointSaver, interruptBefore, interruptAfter, store, responseFormat, } = params;
109
117
  let toolClasses;
110
118
  if (!Array.isArray(tools)) {
111
119
  toolClasses = tools.tools;
@@ -125,25 +133,58 @@ export function createReactAgent(params) {
125
133
  const lastMessage = messages[messages.length - 1];
126
134
  if (isAIMessage(lastMessage) &&
127
135
  (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0)) {
128
- return END;
136
+ return responseFormat != null ? "generate_structured_response" : END;
129
137
  }
130
138
  else {
131
139
  return "continue";
132
140
  }
133
141
  };
142
+ const generateStructuredResponse = async (state, config) => {
143
+ if (responseFormat == null) {
144
+ throw new Error("Attempted to generate structured output with no passed response schema. Please contact us for help.");
145
+ }
146
+ // Exclude the last message as there's enough information
147
+ // for the LLM to generate the structured response
148
+ const messages = state.messages.slice(0, -1);
149
+ let modelWithStructuredOutput;
150
+ if (typeof responseFormat === "object" &&
151
+ "prompt" in responseFormat &&
152
+ "schema" in responseFormat) {
153
+ const { prompt, schema } = responseFormat;
154
+ modelWithStructuredOutput = llm.withStructuredOutput(schema);
155
+ messages.unshift(new SystemMessage({ content: prompt }));
156
+ }
157
+ else {
158
+ modelWithStructuredOutput = llm.withStructuredOutput(responseFormat);
159
+ }
160
+ const response = await modelWithStructuredOutput.invoke(messages, config);
161
+ return { structuredResponse: response };
162
+ };
134
163
  const callModel = async (state, config) => {
135
164
  // TODO: Auto-promote streaming.
136
165
  return { messages: [await modelRunnable.invoke(state, config)] };
137
166
  };
138
- const workflow = new StateGraph(stateSchema ?? MessagesAnnotation)
167
+ const workflow = new StateGraph(stateSchema ?? createReactAgentAnnotation())
139
168
  .addNode("agent", callModel)
140
169
  .addNode("tools", new ToolNode(toolClasses))
141
170
  .addEdge(START, "agent")
142
- .addConditionalEdges("agent", shouldContinue, {
143
- continue: "tools",
144
- [END]: END,
145
- })
146
171
  .addEdge("tools", "agent");
172
+ if (responseFormat !== undefined) {
173
+ workflow
174
+ .addNode("generate_structured_response", generateStructuredResponse)
175
+ .addEdge("generate_structured_response", END)
176
+ .addConditionalEdges("agent", shouldContinue, {
177
+ continue: "tools",
178
+ [END]: END,
179
+ generate_structured_response: "generate_structured_response",
180
+ });
181
+ }
182
+ else {
183
+ workflow.addConditionalEdges("agent", shouldContinue, {
184
+ continue: "tools",
185
+ [END]: END,
186
+ });
187
+ }
147
188
  return workflow.compile({
148
189
  checkpointer: checkpointSaver,
149
190
  interruptBefore,
@@ -94,8 +94,15 @@ function* mapCommand(cmd, pendingWrites) {
94
94
  if (typeof cmd.update !== "object" || !cmd.update) {
95
95
  throw new Error("Expected cmd.update to be a dict mapping channel names to update values");
96
96
  }
97
- for (const [k, v] of Object.entries(cmd.update)) {
98
- yield [constants_js_1.NULL_TASK_ID, k, v];
97
+ if (Array.isArray(cmd.update)) {
98
+ for (const [k, v] of cmd.update) {
99
+ yield [constants_js_1.NULL_TASK_ID, k, v];
100
+ }
101
+ }
102
+ else {
103
+ for (const [k, v] of Object.entries(cmd.update)) {
104
+ yield [constants_js_1.NULL_TASK_ID, k, v];
105
+ }
99
106
  }
100
107
  }
101
108
  }
package/dist/pregel/io.js CHANGED
@@ -89,8 +89,15 @@ export function* mapCommand(cmd, pendingWrites) {
89
89
  if (typeof cmd.update !== "object" || !cmd.update) {
90
90
  throw new Error("Expected cmd.update to be a dict mapping channel names to update values");
91
91
  }
92
- for (const [k, v] of Object.entries(cmd.update)) {
93
- yield [NULL_TASK_ID, k, v];
92
+ if (Array.isArray(cmd.update)) {
93
+ for (const [k, v] of cmd.update) {
94
+ yield [NULL_TASK_ID, k, v];
95
+ }
96
+ }
97
+ else {
98
+ for (const [k, v] of Object.entries(cmd.update)) {
99
+ yield [NULL_TASK_ID, k, v];
100
+ }
94
101
  }
95
102
  }
96
103
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/langgraph",
3
- "version": "0.2.39",
3
+ "version": "0.2.41",
4
4
  "description": "LangGraph",
5
5
  "type": "module",
6
6
  "engines": {
@@ -72,7 +72,7 @@
72
72
  "pg": "^8.13.0",
73
73
  "prettier": "^2.8.3",
74
74
  "release-it": "^17.6.0",
75
- "rollup": "^4.24.1",
75
+ "rollup": "^4.29.0",
76
76
  "ts-jest": "^29.1.0",
77
77
  "tsx": "^4.7.0",
78
78
  "typescript": "^4.9.5 || ^5.4.5",