@langchain/langgraph 0.2.45 → 0.2.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@ import { ToolNode } from "./tool_node.js";
5
5
  import { Annotation } from "../graph/annotation.js";
6
6
  import { messagesStateReducer } from "../graph/message.js";
7
7
  import { END, START } from "../constants.js";
8
- function _convertMessageModifierToStateModifier(messageModifier) {
8
+ function _convertMessageModifierToPrompt(messageModifier) {
9
9
  // Handle string or SystemMessage
10
10
  if (typeof messageModifier === "string" ||
11
11
  (isBaseMessage(messageModifier) && messageModifier._getType() === "system")) {
@@ -21,48 +21,99 @@ function _convertMessageModifierToStateModifier(messageModifier) {
21
21
  }
22
22
  throw new Error(`Unexpected type for messageModifier: ${typeof messageModifier}`);
23
23
  }
24
- function _getStateModifierRunnable(stateModifier) {
25
- let stateModifierRunnable;
26
- if (stateModifier == null) {
27
- stateModifierRunnable = RunnableLambda.from((state) => state.messages).withConfig({ runName: "state_modifier" });
28
- }
29
- else if (typeof stateModifier === "string") {
30
- const systemMessage = new SystemMessage(stateModifier);
31
- stateModifierRunnable = RunnableLambda.from((state) => {
24
+ const PROMPT_RUNNABLE_NAME = "prompt";
25
+ function _getPromptRunnable(prompt) {
26
+ let promptRunnable;
27
+ if (prompt == null) {
28
+ promptRunnable = RunnableLambda.from((state) => state.messages).withConfig({ runName: PROMPT_RUNNABLE_NAME });
29
+ }
30
+ else if (typeof prompt === "string") {
31
+ const systemMessage = new SystemMessage(prompt);
32
+ promptRunnable = RunnableLambda.from((state) => {
32
33
  return [systemMessage, ...(state.messages ?? [])];
33
- }).withConfig({ runName: "state_modifier" });
34
- }
35
- else if (isBaseMessage(stateModifier) &&
36
- stateModifier._getType() === "system") {
37
- stateModifierRunnable = RunnableLambda.from((state) => [
38
- stateModifier,
39
- ...state.messages,
40
- ]).withConfig({ runName: "state_modifier" });
41
- }
42
- else if (typeof stateModifier === "function") {
43
- stateModifierRunnable = RunnableLambda.from(stateModifier).withConfig({
44
- runName: "state_modifier",
34
+ }).withConfig({ runName: PROMPT_RUNNABLE_NAME });
35
+ }
36
+ else if (isBaseMessage(prompt) && prompt._getType() === "system") {
37
+ promptRunnable = RunnableLambda.from((state) => [prompt, ...state.messages]).withConfig({ runName: PROMPT_RUNNABLE_NAME });
38
+ }
39
+ else if (typeof prompt === "function") {
40
+ promptRunnable = RunnableLambda.from(prompt).withConfig({
41
+ runName: PROMPT_RUNNABLE_NAME,
45
42
  });
46
43
  }
47
- else if (Runnable.isRunnable(stateModifier)) {
48
- stateModifierRunnable = stateModifier;
44
+ else if (Runnable.isRunnable(prompt)) {
45
+ promptRunnable = prompt;
49
46
  }
50
47
  else {
51
- throw new Error(`Got unexpected type for 'stateModifier': ${typeof stateModifier}`);
48
+ throw new Error(`Got unexpected type for 'prompt': ${typeof prompt}`);
49
+ }
50
+ return promptRunnable;
51
+ }
52
+ function _getPrompt(prompt, stateModifier, messageModifier) {
53
+ // Check if multiple modifiers exist
54
+ const definedCount = [prompt, stateModifier, messageModifier].filter((x) => x != null).length;
55
+ if (definedCount > 1) {
56
+ throw new Error("Expected only one of prompt, stateModifier, or messageModifier, got multiple values");
57
+ }
58
+ let finalPrompt = prompt;
59
+ if (stateModifier != null) {
60
+ finalPrompt = stateModifier;
61
+ }
62
+ else if (messageModifier != null) {
63
+ finalPrompt = _convertMessageModifierToPrompt(messageModifier);
64
+ }
65
+ return _getPromptRunnable(finalPrompt);
66
+ }
67
+ function _shouldBindTools(llm, tools) {
68
+ if (!Runnable.isRunnable(llm) || !("kwargs" in llm)) {
69
+ return true;
70
+ }
71
+ if (!llm.kwargs ||
72
+ typeof llm.kwargs !== "object" ||
73
+ !("tools" in llm.kwargs)) {
74
+ return true;
52
75
  }
53
- return stateModifierRunnable;
76
+ const boundTools = llm.kwargs.tools;
77
+ if (tools.length !== boundTools.length) {
78
+ throw new Error("Number of tools in the model.bindTools() and tools passed to createReactAgent must match");
79
+ }
80
+ const toolNames = new Set(tools.map((tool) => tool.name));
81
+ const boundToolNames = new Set();
82
+ for (const boundTool of boundTools) {
83
+ let boundToolName;
84
+ // OpenAI-style tool
85
+ if ("type" in boundTool && boundTool.type === "function") {
86
+ boundToolName = boundTool.function.name;
87
+ }
88
+ // Anthropic-style tool
89
+ else if ("name" in boundTool) {
90
+ boundToolName = boundTool.name;
91
+ }
92
+ // unknown tool type so we'll ignore it
93
+ else {
94
+ continue;
95
+ }
96
+ boundToolNames.add(boundToolName);
97
+ }
98
+ const missingTools = [...toolNames].filter((x) => !boundToolNames.has(x));
99
+ if (missingTools.length > 0) {
100
+ throw new Error(`Missing tools '${missingTools}' in the model.bindTools().` +
101
+ `Tools in the model.bindTools() must match the tools passed to createReactAgent.`);
102
+ }
103
+ return false;
54
104
  }
55
- function _getModelPreprocessingRunnable(stateModifier, messageModifier) {
56
- // Check if both modifiers exist
57
- if (stateModifier != null && messageModifier != null) {
58
- throw new Error("Expected value for either stateModifier or messageModifier, got values for both");
105
+ function _getModel(llm) {
106
+ // Get the underlying model from a RunnableBinding or return the model itself
107
+ let model = llm;
108
+ if (Runnable.isRunnable(llm) && "bound" in llm) {
109
+ model = llm.bound;
59
110
  }
60
- // Convert message modifier to state modifier if necessary
61
- if (stateModifier == null && messageModifier != null) {
62
- // eslint-disable-next-line no-param-reassign
63
- stateModifier = _convertMessageModifierToStateModifier(messageModifier);
111
+ if (!("invoke" in model &&
112
+ typeof model.invoke === "function" &&
113
+ "_modelType" in model)) {
114
+ throw new Error(`Expected \`llm\` to be a ChatModel or RunnableBinding (e.g. llm.bind_tools(...)) with invoke() and generate() methods, got ${model.constructor.name}`);
64
115
  }
65
- return _getStateModifierRunnable(stateModifier);
116
+ return model;
66
117
  }
67
118
  export const createReactAgentAnnotation = () => Annotation.Root({
68
119
  messages: Annotation({
@@ -114,7 +165,7 @@ export const createReactAgentAnnotation = () => Annotation.Root({
114
165
  * ```
115
166
  */
116
167
  export function createReactAgent(params) {
117
- const { llm, tools, messageModifier, stateModifier, stateSchema, checkpointSaver, checkpointer, interruptBefore, interruptAfter, store, responseFormat, } = params;
168
+ const { llm, tools, messageModifier, stateModifier, prompt, stateSchema, checkpointSaver, checkpointer, interruptBefore, interruptAfter, store, responseFormat, name, } = params;
118
169
  let toolClasses;
119
170
  if (!Array.isArray(tools)) {
120
171
  toolClasses = tools.tools;
@@ -122,13 +173,17 @@ export function createReactAgent(params) {
122
173
  else {
123
174
  toolClasses = tools;
124
175
  }
125
- if (!("bindTools" in llm) || typeof llm.bindTools !== "function") {
126
- throw new Error(`llm ${llm} must define bindTools method.`);
176
+ let modelWithTools;
177
+ if (_shouldBindTools(llm, toolClasses)) {
178
+ if (!("bindTools" in llm) || typeof llm.bindTools !== "function") {
179
+ throw new Error(`llm ${llm} must define bindTools method.`);
180
+ }
181
+ modelWithTools = llm.bindTools(toolClasses);
182
+ }
183
+ else {
184
+ modelWithTools = llm;
127
185
  }
128
- const modelWithTools = llm.bindTools(toolClasses);
129
- // we're passing store here for validation
130
- const preprocessor = _getModelPreprocessingRunnable(stateModifier, messageModifier);
131
- const modelRunnable = preprocessor.pipe(modelWithTools);
186
+ const modelRunnable = _getPrompt(prompt, stateModifier, messageModifier).pipe(modelWithTools);
132
187
  // If any of the tools are configured to return_directly after running,
133
188
  // our graph needs to check if these were called
134
189
  const shouldReturnDirect = new Set(toolClasses
@@ -157,18 +212,22 @@ export function createReactAgent(params) {
157
212
  "prompt" in responseFormat &&
158
213
  "schema" in responseFormat) {
159
214
  const { prompt, schema } = responseFormat;
160
- modelWithStructuredOutput = llm.withStructuredOutput(schema);
215
+ modelWithStructuredOutput = _getModel(llm).withStructuredOutput(schema);
161
216
  messages.unshift(new SystemMessage({ content: prompt }));
162
217
  }
163
218
  else {
164
- modelWithStructuredOutput = llm.withStructuredOutput(responseFormat);
219
+ modelWithStructuredOutput =
220
+ _getModel(llm).withStructuredOutput(responseFormat);
165
221
  }
166
222
  const response = await modelWithStructuredOutput.invoke(messages, config);
167
223
  return { structuredResponse: response };
168
224
  };
169
225
  const callModel = async (state, config) => {
170
226
  // TODO: Auto-promote streaming.
171
- return { messages: [await modelRunnable.invoke(state, config)] };
227
+ const response = (await modelRunnable.invoke(state, config));
228
+ // add agent name to the AIMessage
229
+ response.name = name;
230
+ return { messages: [response] };
172
231
  };
173
232
  const workflow = new StateGraph(stateSchema ?? createReactAgentAnnotation())
174
233
  .addNode("agent", callModel)
@@ -215,5 +274,6 @@ export function createReactAgent(params) {
215
274
  interruptBefore,
216
275
  interruptAfter,
217
276
  store,
277
+ name,
218
278
  });
219
279
  }