@langchain/langgraph 0.0.17 → 0.0.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/prebuilt/index.cjs +3 -1
- package/dist/prebuilt/index.d.ts +1 -0
- package/dist/prebuilt/index.js +1 -0
- package/dist/prebuilt/react_agent_executor.cjs +105 -0
- package/dist/prebuilt/react_agent_executor.d.ts +26 -0
- package/dist/prebuilt/react_agent_executor.js +101 -0
- package/dist/prebuilt/tool_node.d.ts +4 -4
- package/dist/tests/prebuilt.int.test.js +87 -12
- package/dist/tests/prebuilt.test.d.ts +20 -1
- package/dist/tests/prebuilt.test.js +228 -3
- package/package.json +2 -2
package/dist/prebuilt/index.cjs
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.toolsCondition = exports.ToolNode = exports.ToolExecutor = exports.createFunctionCallingExecutor = exports.createAgentExecutor = void 0;
|
|
3
|
+
exports.toolsCondition = exports.ToolNode = exports.ToolExecutor = exports.createReactAgent = exports.createFunctionCallingExecutor = exports.createAgentExecutor = void 0;
|
|
4
4
|
var agent_executor_js_1 = require("./agent_executor.cjs");
|
|
5
5
|
Object.defineProperty(exports, "createAgentExecutor", { enumerable: true, get: function () { return agent_executor_js_1.createAgentExecutor; } });
|
|
6
6
|
var chat_agent_executor_js_1 = require("./chat_agent_executor.cjs");
|
|
7
7
|
Object.defineProperty(exports, "createFunctionCallingExecutor", { enumerable: true, get: function () { return chat_agent_executor_js_1.createFunctionCallingExecutor; } });
|
|
8
|
+
var react_agent_executor_js_1 = require("./react_agent_executor.cjs");
|
|
9
|
+
Object.defineProperty(exports, "createReactAgent", { enumerable: true, get: function () { return react_agent_executor_js_1.createReactAgent; } });
|
|
8
10
|
var tool_executor_js_1 = require("./tool_executor.cjs");
|
|
9
11
|
Object.defineProperty(exports, "ToolExecutor", { enumerable: true, get: function () { return tool_executor_js_1.ToolExecutor; } });
|
|
10
12
|
var tool_node_js_1 = require("./tool_node.cjs");
|
package/dist/prebuilt/index.d.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
export { type AgentExecutorState, createAgentExecutor, } from "./agent_executor.js";
|
|
2
2
|
export { type FunctionCallingExecutorState, createFunctionCallingExecutor, } from "./chat_agent_executor.js";
|
|
3
|
+
export { type AgentState, createReactAgent } from "./react_agent_executor.js";
|
|
3
4
|
export { type ToolExecutorArgs, type ToolInvocationInterface, ToolExecutor, } from "./tool_executor.js";
|
|
4
5
|
export { ToolNode, toolsCondition } from "./tool_node.js";
|
package/dist/prebuilt/index.js
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
export { createAgentExecutor, } from "./agent_executor.js";
|
|
2
2
|
export { createFunctionCallingExecutor, } from "./chat_agent_executor.js";
|
|
3
|
+
export { createReactAgent } from "./react_agent_executor.js";
|
|
3
4
|
export { ToolExecutor, } from "./tool_executor.js";
|
|
4
5
|
export { ToolNode, toolsCondition } from "./tool_node.js";
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createReactAgent = void 0;
|
|
4
|
+
const messages_1 = require("@langchain/core/messages");
|
|
5
|
+
const runnables_1 = require("@langchain/core/runnables");
|
|
6
|
+
const prompts_1 = require("@langchain/core/prompts");
|
|
7
|
+
const index_js_1 = require("../graph/index.cjs");
|
|
8
|
+
const tool_node_js_1 = require("./tool_node.cjs");
|
|
9
|
+
/**
|
|
10
|
+
* Creates a StateGraph agent that relies on a chat model utilizing tool calling.
|
|
11
|
+
* @param model The chat model that can utilize OpenAI-style function calling.
|
|
12
|
+
* @param tools A list of tools or a ToolNode.
|
|
13
|
+
* @param messageModifier An optional message modifier to apply to messages before being passed to the LLM.
|
|
14
|
+
* Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
|
|
15
|
+
* @param checkpointSaver An optional checkpoint saver to persist the agent's state.
|
|
16
|
+
* @param interruptBefore An optional list of node names to interrupt before running.
|
|
17
|
+
* @param interruptAfter An optional list of node names to interrupt after running.
|
|
18
|
+
* @returns A compiled agent as a LangChain Runnable.
|
|
19
|
+
*/
|
|
20
|
+
function createReactAgent(model, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter) {
|
|
21
|
+
const schema = {
|
|
22
|
+
messages: {
|
|
23
|
+
value: (left, right) => left.concat(right),
|
|
24
|
+
default: () => [],
|
|
25
|
+
},
|
|
26
|
+
};
|
|
27
|
+
let toolClasses;
|
|
28
|
+
if (!Array.isArray(tools)) {
|
|
29
|
+
toolClasses = tools.tools;
|
|
30
|
+
}
|
|
31
|
+
else {
|
|
32
|
+
toolClasses = tools;
|
|
33
|
+
}
|
|
34
|
+
if (!("bindTools" in model) || typeof model.bindTools !== "function") {
|
|
35
|
+
throw new Error(`Model ${model} must define bindTools method.`);
|
|
36
|
+
}
|
|
37
|
+
const modelWithTools = model.bindTools(toolClasses);
|
|
38
|
+
const modelRunnable = _createModelWrapper(modelWithTools, messageModifier);
|
|
39
|
+
const shouldContinue = (state) => {
|
|
40
|
+
const { messages } = state;
|
|
41
|
+
const lastMessage = messages[messages.length - 1];
|
|
42
|
+
if ((0, messages_1.isAIMessage)(lastMessage) &&
|
|
43
|
+
(!lastMessage.tool_calls || lastMessage.tool_calls.length === 0)) {
|
|
44
|
+
return index_js_1.END;
|
|
45
|
+
}
|
|
46
|
+
else {
|
|
47
|
+
return "continue";
|
|
48
|
+
}
|
|
49
|
+
};
|
|
50
|
+
const callModel = async (state) => {
|
|
51
|
+
const { messages } = state;
|
|
52
|
+
// TODO: Auto-promote streaming.
|
|
53
|
+
return { messages: [await modelRunnable.invoke(messages)] };
|
|
54
|
+
};
|
|
55
|
+
const workflow = new index_js_1.StateGraph({
|
|
56
|
+
channels: schema,
|
|
57
|
+
})
|
|
58
|
+
.addNode("agent", new runnables_1.RunnableLambda({ func: callModel }).withConfig({ runName: "agent" }))
|
|
59
|
+
.addNode("tools", new tool_node_js_1.ToolNode(toolClasses))
|
|
60
|
+
.addEdge(index_js_1.START, "agent")
|
|
61
|
+
.addConditionalEdges("agent", shouldContinue, {
|
|
62
|
+
continue: "tools",
|
|
63
|
+
end: index_js_1.END,
|
|
64
|
+
})
|
|
65
|
+
.addEdge("tools", "agent");
|
|
66
|
+
return workflow.compile({
|
|
67
|
+
checkpointer: checkpointSaver,
|
|
68
|
+
interruptBefore,
|
|
69
|
+
interruptAfter,
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
exports.createReactAgent = createReactAgent;
|
|
73
|
+
function _createModelWrapper(modelWithTools, messageModifier) {
|
|
74
|
+
if (!messageModifier) {
|
|
75
|
+
return modelWithTools;
|
|
76
|
+
}
|
|
77
|
+
const endict = new runnables_1.RunnableLambda({
|
|
78
|
+
func: (messages) => ({ messages }),
|
|
79
|
+
});
|
|
80
|
+
if (typeof messageModifier === "string") {
|
|
81
|
+
const systemMessage = new messages_1.SystemMessage(messageModifier);
|
|
82
|
+
const prompt = prompts_1.ChatPromptTemplate.fromMessages([
|
|
83
|
+
systemMessage,
|
|
84
|
+
["placeholder", "{messages}"],
|
|
85
|
+
]);
|
|
86
|
+
return endict.pipe(prompt).pipe(modelWithTools);
|
|
87
|
+
}
|
|
88
|
+
if (typeof messageModifier === "function") {
|
|
89
|
+
const lambda = new runnables_1.RunnableLambda({ func: messageModifier }).withConfig({
|
|
90
|
+
runName: "message_modifier",
|
|
91
|
+
});
|
|
92
|
+
return lambda.pipe(modelWithTools);
|
|
93
|
+
}
|
|
94
|
+
if (runnables_1.Runnable.isRunnable(messageModifier)) {
|
|
95
|
+
return messageModifier.pipe(modelWithTools);
|
|
96
|
+
}
|
|
97
|
+
if (messageModifier._getType() === "system") {
|
|
98
|
+
const prompt = prompts_1.ChatPromptTemplate.fromMessages([
|
|
99
|
+
messageModifier,
|
|
100
|
+
["placeholder", "{messages}"],
|
|
101
|
+
]);
|
|
102
|
+
return endict.pipe(prompt).pipe(modelWithTools);
|
|
103
|
+
}
|
|
104
|
+
throw new Error(`Unsupported message modifier type: ${typeof messageModifier}`);
|
|
105
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
2
|
+
import { BaseMessage, SystemMessage } from "@langchain/core/messages";
|
|
3
|
+
import { Runnable } from "@langchain/core/runnables";
|
|
4
|
+
import { StructuredTool } from "@langchain/core/tools";
|
|
5
|
+
import { BaseCheckpointSaver } from "../checkpoint/base.js";
|
|
6
|
+
import { START } from "../graph/index.js";
|
|
7
|
+
import { MessagesState } from "../graph/message.js";
|
|
8
|
+
import { CompiledStateGraph } from "../graph/state.js";
|
|
9
|
+
import { All } from "../pregel/types.js";
|
|
10
|
+
import { ToolNode } from "./tool_node.js";
|
|
11
|
+
export interface AgentState {
|
|
12
|
+
messages: BaseMessage[];
|
|
13
|
+
}
|
|
14
|
+
export type N = typeof START | "agent" | "tools";
|
|
15
|
+
/**
|
|
16
|
+
* Creates a StateGraph agent that relies on a chat model utilizing tool calling.
|
|
17
|
+
* @param model The chat model that can utilize OpenAI-style function calling.
|
|
18
|
+
* @param tools A list of tools or a ToolNode.
|
|
19
|
+
* @param messageModifier An optional message modifier to apply to messages before being passed to the LLM.
|
|
20
|
+
* Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
|
|
21
|
+
* @param checkpointSaver An optional checkpoint saver to persist the agent's state.
|
|
22
|
+
* @param interruptBefore An optional list of node names to interrupt before running.
|
|
23
|
+
* @param interruptAfter An optional list of node names to interrupt after running.
|
|
24
|
+
* @returns A compiled agent as a LangChain Runnable.
|
|
25
|
+
*/
|
|
26
|
+
export declare function createReactAgent(model: BaseChatModel, tools: ToolNode<MessagesState> | StructuredTool[], messageModifier?: SystemMessage | string | ((messages: BaseMessage[]) => BaseMessage[]) | Runnable, checkpointSaver?: BaseCheckpointSaver, interruptBefore?: N[] | All, interruptAfter?: N[] | All): CompiledStateGraph<AgentState, Partial<AgentState>, typeof START | "agent" | "tools">;
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import { isAIMessage, SystemMessage, } from "@langchain/core/messages";
|
|
2
|
+
import { Runnable, RunnableLambda, } from "@langchain/core/runnables";
|
|
3
|
+
import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
4
|
+
import { END, START, StateGraph } from "../graph/index.js";
|
|
5
|
+
import { ToolNode } from "./tool_node.js";
|
|
6
|
+
/**
|
|
7
|
+
* Creates a StateGraph agent that relies on a chat model utilizing tool calling.
|
|
8
|
+
* @param model The chat model that can utilize OpenAI-style function calling.
|
|
9
|
+
* @param tools A list of tools or a ToolNode.
|
|
10
|
+
* @param messageModifier An optional message modifier to apply to messages before being passed to the LLM.
|
|
11
|
+
* Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
|
|
12
|
+
* @param checkpointSaver An optional checkpoint saver to persist the agent's state.
|
|
13
|
+
* @param interruptBefore An optional list of node names to interrupt before running.
|
|
14
|
+
* @param interruptAfter An optional list of node names to interrupt after running.
|
|
15
|
+
* @returns A compiled agent as a LangChain Runnable.
|
|
16
|
+
*/
|
|
17
|
+
export function createReactAgent(model, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter) {
|
|
18
|
+
const schema = {
|
|
19
|
+
messages: {
|
|
20
|
+
value: (left, right) => left.concat(right),
|
|
21
|
+
default: () => [],
|
|
22
|
+
},
|
|
23
|
+
};
|
|
24
|
+
let toolClasses;
|
|
25
|
+
if (!Array.isArray(tools)) {
|
|
26
|
+
toolClasses = tools.tools;
|
|
27
|
+
}
|
|
28
|
+
else {
|
|
29
|
+
toolClasses = tools;
|
|
30
|
+
}
|
|
31
|
+
if (!("bindTools" in model) || typeof model.bindTools !== "function") {
|
|
32
|
+
throw new Error(`Model ${model} must define bindTools method.`);
|
|
33
|
+
}
|
|
34
|
+
const modelWithTools = model.bindTools(toolClasses);
|
|
35
|
+
const modelRunnable = _createModelWrapper(modelWithTools, messageModifier);
|
|
36
|
+
const shouldContinue = (state) => {
|
|
37
|
+
const { messages } = state;
|
|
38
|
+
const lastMessage = messages[messages.length - 1];
|
|
39
|
+
if (isAIMessage(lastMessage) &&
|
|
40
|
+
(!lastMessage.tool_calls || lastMessage.tool_calls.length === 0)) {
|
|
41
|
+
return END;
|
|
42
|
+
}
|
|
43
|
+
else {
|
|
44
|
+
return "continue";
|
|
45
|
+
}
|
|
46
|
+
};
|
|
47
|
+
const callModel = async (state) => {
|
|
48
|
+
const { messages } = state;
|
|
49
|
+
// TODO: Auto-promote streaming.
|
|
50
|
+
return { messages: [await modelRunnable.invoke(messages)] };
|
|
51
|
+
};
|
|
52
|
+
const workflow = new StateGraph({
|
|
53
|
+
channels: schema,
|
|
54
|
+
})
|
|
55
|
+
.addNode("agent", new RunnableLambda({ func: callModel }).withConfig({ runName: "agent" }))
|
|
56
|
+
.addNode("tools", new ToolNode(toolClasses))
|
|
57
|
+
.addEdge(START, "agent")
|
|
58
|
+
.addConditionalEdges("agent", shouldContinue, {
|
|
59
|
+
continue: "tools",
|
|
60
|
+
end: END,
|
|
61
|
+
})
|
|
62
|
+
.addEdge("tools", "agent");
|
|
63
|
+
return workflow.compile({
|
|
64
|
+
checkpointer: checkpointSaver,
|
|
65
|
+
interruptBefore,
|
|
66
|
+
interruptAfter,
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
function _createModelWrapper(modelWithTools, messageModifier) {
|
|
70
|
+
if (!messageModifier) {
|
|
71
|
+
return modelWithTools;
|
|
72
|
+
}
|
|
73
|
+
const endict = new RunnableLambda({
|
|
74
|
+
func: (messages) => ({ messages }),
|
|
75
|
+
});
|
|
76
|
+
if (typeof messageModifier === "string") {
|
|
77
|
+
const systemMessage = new SystemMessage(messageModifier);
|
|
78
|
+
const prompt = ChatPromptTemplate.fromMessages([
|
|
79
|
+
systemMessage,
|
|
80
|
+
["placeholder", "{messages}"],
|
|
81
|
+
]);
|
|
82
|
+
return endict.pipe(prompt).pipe(modelWithTools);
|
|
83
|
+
}
|
|
84
|
+
if (typeof messageModifier === "function") {
|
|
85
|
+
const lambda = new RunnableLambda({ func: messageModifier }).withConfig({
|
|
86
|
+
runName: "message_modifier",
|
|
87
|
+
});
|
|
88
|
+
return lambda.pipe(modelWithTools);
|
|
89
|
+
}
|
|
90
|
+
if (Runnable.isRunnable(messageModifier)) {
|
|
91
|
+
return messageModifier.pipe(modelWithTools);
|
|
92
|
+
}
|
|
93
|
+
if (messageModifier._getType() === "system") {
|
|
94
|
+
const prompt = ChatPromptTemplate.fromMessages([
|
|
95
|
+
messageModifier,
|
|
96
|
+
["placeholder", "{messages}"],
|
|
97
|
+
]);
|
|
98
|
+
return endict.pipe(prompt).pipe(modelWithTools);
|
|
99
|
+
}
|
|
100
|
+
throw new Error(`Unsupported message modifier type: ${typeof messageModifier}`);
|
|
101
|
+
}
|
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
import { BaseMessage } from "@langchain/core/messages";
|
|
2
|
-
import {
|
|
2
|
+
import { StructuredTool } from "@langchain/core/tools";
|
|
3
3
|
import { RunnableCallable } from "../utils.js";
|
|
4
4
|
import { END } from "../graph/graph.js";
|
|
5
5
|
import { MessagesState } from "../graph/message.js";
|
|
6
|
-
export declare class ToolNode extends
|
|
6
|
+
export declare class ToolNode<T extends BaseMessage[] | MessagesState> extends RunnableCallable<T, T> {
|
|
7
7
|
/**
|
|
8
8
|
A node that runs the tools requested in the last AIMessage. It can be used
|
|
9
9
|
either in StateGraph with a "messages" key or in MessageGraph. If multiple
|
|
10
10
|
tool calls are requested, they will be run in parallel. The output will be
|
|
11
11
|
a list of ToolMessages, one for each tool call.
|
|
12
12
|
*/
|
|
13
|
-
tools:
|
|
14
|
-
constructor(tools:
|
|
13
|
+
tools: StructuredTool[];
|
|
14
|
+
constructor(tools: StructuredTool[], name?: string, tags?: string[]);
|
|
15
15
|
private run;
|
|
16
16
|
}
|
|
17
17
|
export declare function toolsCondition(state: BaseMessage[] | MessagesState): "tools" | typeof END;
|
|
@@ -3,8 +3,7 @@ import { it, beforeAll, describe, expect } from "@jest/globals";
|
|
|
3
3
|
import { Tool } from "@langchain/core/tools";
|
|
4
4
|
import { ChatOpenAI } from "@langchain/openai";
|
|
5
5
|
import { HumanMessage } from "@langchain/core/messages";
|
|
6
|
-
import {
|
|
7
|
-
import { createFunctionCallingExecutor } from "../prebuilt/index.js";
|
|
6
|
+
import { createReactAgent, createFunctionCallingExecutor, } from "../prebuilt/index.js";
|
|
8
7
|
// Tracing slows down the tests
|
|
9
8
|
beforeAll(() => {
|
|
10
9
|
process.env.LANGCHAIN_TRACING_V2 = "false";
|
|
@@ -44,7 +43,6 @@ describe("createFunctionCallingExecutor", () => {
|
|
|
44
43
|
const response = await functionsAgentExecutor.invoke({
|
|
45
44
|
messages: [new HumanMessage("What's the weather like in SF?")],
|
|
46
45
|
});
|
|
47
|
-
console.log(response);
|
|
48
46
|
// It needs at least one human message, one AI and one function message.
|
|
49
47
|
expect(response.messages.length > 3).toBe(true);
|
|
50
48
|
const firstFunctionMessage = response.messages.find((message) => message._getType() === "function");
|
|
@@ -83,19 +81,96 @@ describe("createFunctionCallingExecutor", () => {
|
|
|
83
81
|
});
|
|
84
82
|
const stream = await functionsAgentExecutor.stream({
|
|
85
83
|
messages: [new HumanMessage("What's the weather like in SF?")],
|
|
86
|
-
});
|
|
84
|
+
}, { streamMode: "values" });
|
|
87
85
|
const fullResponse = [];
|
|
88
86
|
for await (const item of stream) {
|
|
89
|
-
console.log(item);
|
|
90
|
-
console.log("-----\n");
|
|
91
87
|
fullResponse.push(item);
|
|
92
88
|
}
|
|
93
|
-
//
|
|
94
|
-
expect(fullResponse.length
|
|
95
|
-
const
|
|
96
|
-
|
|
97
|
-
expect(
|
|
98
|
-
const functionCall =
|
|
89
|
+
// human -> agent -> action -> agent
|
|
90
|
+
expect(fullResponse.length).toEqual(4);
|
|
91
|
+
const endState = fullResponse[fullResponse.length - 1];
|
|
92
|
+
// 1 human, 2 llm calls, 1 function call.
|
|
93
|
+
expect(endState.messages.length).toEqual(4);
|
|
94
|
+
const functionCall = endState.messages.find((message) => message._getType() === "function");
|
|
99
95
|
expect(functionCall.content).toBe(weatherResponse);
|
|
100
96
|
});
|
|
101
97
|
});
|
|
98
|
+
describe("createReactAgent", () => {
|
|
99
|
+
it("can call a tool", async () => {
|
|
100
|
+
const weatherResponse = `Not too cold, not too hot 😎`;
|
|
101
|
+
const model = new ChatOpenAI();
|
|
102
|
+
class SanFranciscoWeatherTool extends Tool {
|
|
103
|
+
constructor() {
|
|
104
|
+
super();
|
|
105
|
+
Object.defineProperty(this, "name", {
|
|
106
|
+
enumerable: true,
|
|
107
|
+
configurable: true,
|
|
108
|
+
writable: true,
|
|
109
|
+
value: "current_weather"
|
|
110
|
+
});
|
|
111
|
+
Object.defineProperty(this, "description", {
|
|
112
|
+
enumerable: true,
|
|
113
|
+
configurable: true,
|
|
114
|
+
writable: true,
|
|
115
|
+
value: "Get the current weather report for San Francisco, CA"
|
|
116
|
+
});
|
|
117
|
+
}
|
|
118
|
+
async _call(_) {
|
|
119
|
+
return weatherResponse;
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
const tools = [new SanFranciscoWeatherTool()];
|
|
123
|
+
const reactAgent = createReactAgent(model, tools);
|
|
124
|
+
const response = await reactAgent.invoke({
|
|
125
|
+
messages: [new HumanMessage("What's the weather like in SF?")],
|
|
126
|
+
});
|
|
127
|
+
// It needs at least one human message and one AI message.
|
|
128
|
+
expect(response.messages.length > 1).toBe(true);
|
|
129
|
+
const lastMessage = response.messages[response.messages.length - 1];
|
|
130
|
+
expect(lastMessage._getType()).toBe("ai");
|
|
131
|
+
expect(lastMessage.content.toLowerCase()).toContain("not too cold");
|
|
132
|
+
});
|
|
133
|
+
it("can stream a tool call", async () => {
|
|
134
|
+
const weatherResponse = `Not too cold, not too hot 😎`;
|
|
135
|
+
const model = new ChatOpenAI({
|
|
136
|
+
streaming: true,
|
|
137
|
+
});
|
|
138
|
+
class SanFranciscoWeatherTool extends Tool {
|
|
139
|
+
constructor() {
|
|
140
|
+
super();
|
|
141
|
+
Object.defineProperty(this, "name", {
|
|
142
|
+
enumerable: true,
|
|
143
|
+
configurable: true,
|
|
144
|
+
writable: true,
|
|
145
|
+
value: "current_weather"
|
|
146
|
+
});
|
|
147
|
+
Object.defineProperty(this, "description", {
|
|
148
|
+
enumerable: true,
|
|
149
|
+
configurable: true,
|
|
150
|
+
writable: true,
|
|
151
|
+
value: "Get the current weather report for San Francisco, CA"
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
async _call(_) {
|
|
155
|
+
return weatherResponse;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
const tools = [new SanFranciscoWeatherTool()];
|
|
159
|
+
const reactAgent = createReactAgent(model, tools);
|
|
160
|
+
const stream = await reactAgent.stream({
|
|
161
|
+
messages: [new HumanMessage("What's the weather like in SF?")],
|
|
162
|
+
}, { streamMode: "values" });
|
|
163
|
+
const fullResponse = [];
|
|
164
|
+
for await (const item of stream) {
|
|
165
|
+
fullResponse.push(item);
|
|
166
|
+
}
|
|
167
|
+
// human -> agent -> action -> agent
|
|
168
|
+
expect(fullResponse.length).toEqual(4);
|
|
169
|
+
const endState = fullResponse[fullResponse.length - 1];
|
|
170
|
+
// 1 human, 2 ai, 1 tool.
|
|
171
|
+
expect(endState.messages.length).toEqual(4);
|
|
172
|
+
const lastMessage = endState.messages[endState.messages.length - 1];
|
|
173
|
+
expect(lastMessage._getType()).toBe("ai");
|
|
174
|
+
expect(lastMessage.content.toLowerCase()).toContain("not too cold");
|
|
175
|
+
});
|
|
176
|
+
});
|
|
@@ -1 +1,20 @@
|
|
|
1
|
-
|
|
1
|
+
import { Tool } from "@langchain/core/tools";
|
|
2
|
+
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
|
|
3
|
+
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
4
|
+
import { BaseLLMParams } from "@langchain/core/language_models/llms";
|
|
5
|
+
import { BaseMessage } from "@langchain/core/messages";
|
|
6
|
+
import { ChatResult } from "@langchain/core/outputs";
|
|
7
|
+
export declare class FakeToolCallingChatModel extends BaseChatModel {
|
|
8
|
+
sleep?: number;
|
|
9
|
+
responses?: BaseMessage[];
|
|
10
|
+
thrownErrorString?: string;
|
|
11
|
+
idx: number;
|
|
12
|
+
constructor(fields: {
|
|
13
|
+
sleep?: number;
|
|
14
|
+
responses?: BaseMessage[];
|
|
15
|
+
thrownErrorString?: string;
|
|
16
|
+
} & BaseLLMParams);
|
|
17
|
+
_llmType(): string;
|
|
18
|
+
_generate(messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
19
|
+
bindTools(_: Tool[]): FakeToolCallingChatModel;
|
|
20
|
+
}
|
|
@@ -1,9 +1,13 @@
|
|
|
1
1
|
/* eslint-disable no-process-env */
|
|
2
|
-
import {
|
|
2
|
+
import { beforeAll, describe, expect, it } from "@jest/globals";
|
|
3
3
|
import { PromptTemplate } from "@langchain/core/prompts";
|
|
4
|
+
import { StructuredTool, Tool } from "@langchain/core/tools";
|
|
4
5
|
import { FakeStreamingLLM } from "@langchain/core/utils/testing";
|
|
5
|
-
import {
|
|
6
|
-
import {
|
|
6
|
+
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
7
|
+
import { AIMessage, HumanMessage, SystemMessage, ToolMessage, } from "@langchain/core/messages";
|
|
8
|
+
import { RunnableLambda } from "@langchain/core/runnables";
|
|
9
|
+
import { z } from "zod";
|
|
10
|
+
import { createAgentExecutor, createReactAgent } from "../prebuilt/index.js";
|
|
7
11
|
// Tracing slows down the tests
|
|
8
12
|
beforeAll(() => {
|
|
9
13
|
process.env.LANGCHAIN_TRACING_V2 = "false";
|
|
@@ -193,3 +197,224 @@ describe("PreBuilt", () => {
|
|
|
193
197
|
]);
|
|
194
198
|
});
|
|
195
199
|
});
|
|
200
|
+
export class FakeToolCallingChatModel extends BaseChatModel {
|
|
201
|
+
constructor(fields) {
|
|
202
|
+
super(fields);
|
|
203
|
+
Object.defineProperty(this, "sleep", {
|
|
204
|
+
enumerable: true,
|
|
205
|
+
configurable: true,
|
|
206
|
+
writable: true,
|
|
207
|
+
value: 50
|
|
208
|
+
});
|
|
209
|
+
Object.defineProperty(this, "responses", {
|
|
210
|
+
enumerable: true,
|
|
211
|
+
configurable: true,
|
|
212
|
+
writable: true,
|
|
213
|
+
value: void 0
|
|
214
|
+
});
|
|
215
|
+
Object.defineProperty(this, "thrownErrorString", {
|
|
216
|
+
enumerable: true,
|
|
217
|
+
configurable: true,
|
|
218
|
+
writable: true,
|
|
219
|
+
value: void 0
|
|
220
|
+
});
|
|
221
|
+
Object.defineProperty(this, "idx", {
|
|
222
|
+
enumerable: true,
|
|
223
|
+
configurable: true,
|
|
224
|
+
writable: true,
|
|
225
|
+
value: void 0
|
|
226
|
+
});
|
|
227
|
+
this.sleep = fields.sleep ?? this.sleep;
|
|
228
|
+
this.responses = fields.responses;
|
|
229
|
+
this.thrownErrorString = fields.thrownErrorString;
|
|
230
|
+
this.idx = 0;
|
|
231
|
+
}
|
|
232
|
+
_llmType() {
|
|
233
|
+
return "fake";
|
|
234
|
+
}
|
|
235
|
+
async _generate(messages, _options, _runManager) {
|
|
236
|
+
if (this.thrownErrorString) {
|
|
237
|
+
throw new Error(this.thrownErrorString);
|
|
238
|
+
}
|
|
239
|
+
const msg = this.responses?.[this.idx] ?? messages[this.idx];
|
|
240
|
+
const generation = {
|
|
241
|
+
generations: [
|
|
242
|
+
{
|
|
243
|
+
text: "",
|
|
244
|
+
message: msg,
|
|
245
|
+
},
|
|
246
|
+
],
|
|
247
|
+
};
|
|
248
|
+
this.idx += 1;
|
|
249
|
+
return generation;
|
|
250
|
+
}
|
|
251
|
+
bindTools(_) {
|
|
252
|
+
return new FakeToolCallingChatModel({
|
|
253
|
+
sleep: this.sleep,
|
|
254
|
+
responses: this.responses,
|
|
255
|
+
thrownErrorString: this.thrownErrorString,
|
|
256
|
+
});
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
describe("createReactAgent", () => {
|
|
260
|
+
const searchSchema = z.object({
|
|
261
|
+
query: z.string().describe("The query to search for."),
|
|
262
|
+
});
|
|
263
|
+
class SearchAPI extends StructuredTool {
|
|
264
|
+
constructor() {
|
|
265
|
+
super(...arguments);
|
|
266
|
+
Object.defineProperty(this, "name", {
|
|
267
|
+
enumerable: true,
|
|
268
|
+
configurable: true,
|
|
269
|
+
writable: true,
|
|
270
|
+
value: "search_api"
|
|
271
|
+
});
|
|
272
|
+
Object.defineProperty(this, "description", {
|
|
273
|
+
enumerable: true,
|
|
274
|
+
configurable: true,
|
|
275
|
+
writable: true,
|
|
276
|
+
value: "A simple API that returns the input string."
|
|
277
|
+
});
|
|
278
|
+
Object.defineProperty(this, "schema", {
|
|
279
|
+
enumerable: true,
|
|
280
|
+
configurable: true,
|
|
281
|
+
writable: true,
|
|
282
|
+
value: searchSchema
|
|
283
|
+
});
|
|
284
|
+
}
|
|
285
|
+
async _call(input) {
|
|
286
|
+
return `result for ${input?.query}`;
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
const tools = [new SearchAPI()];
|
|
290
|
+
it("Can use string message modifier", async () => {
|
|
291
|
+
const llm = new FakeToolCallingChatModel({
|
|
292
|
+
responses: [
|
|
293
|
+
new AIMessage({
|
|
294
|
+
content: "result1",
|
|
295
|
+
tool_calls: [
|
|
296
|
+
{ name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
|
|
297
|
+
],
|
|
298
|
+
}),
|
|
299
|
+
new AIMessage("result2"),
|
|
300
|
+
],
|
|
301
|
+
});
|
|
302
|
+
const agent = createReactAgent(llm, tools, "You are a helpful assistant");
|
|
303
|
+
const result = await agent.invoke({
|
|
304
|
+
messages: [new HumanMessage("Hello Input!")],
|
|
305
|
+
});
|
|
306
|
+
expect(result.messages).toEqual([
|
|
307
|
+
new HumanMessage("Hello Input!"),
|
|
308
|
+
new AIMessage({
|
|
309
|
+
content: "result1",
|
|
310
|
+
tool_calls: [
|
|
311
|
+
{ name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
|
|
312
|
+
],
|
|
313
|
+
}),
|
|
314
|
+
new ToolMessage({
|
|
315
|
+
name: "search_api",
|
|
316
|
+
content: "result for foo",
|
|
317
|
+
tool_call_id: "tool_abcd123",
|
|
318
|
+
}),
|
|
319
|
+
new AIMessage("result2"),
|
|
320
|
+
]);
|
|
321
|
+
});
|
|
322
|
+
it("Can use SystemMessage message modifier", async () => {
|
|
323
|
+
const llm = new FakeToolCallingChatModel({
|
|
324
|
+
responses: [
|
|
325
|
+
new AIMessage({
|
|
326
|
+
content: "result1",
|
|
327
|
+
tool_calls: [
|
|
328
|
+
{ name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
|
|
329
|
+
],
|
|
330
|
+
}),
|
|
331
|
+
new AIMessage("result2"),
|
|
332
|
+
],
|
|
333
|
+
});
|
|
334
|
+
const agent = createReactAgent(llm, tools, new SystemMessage("You are a helpful assistant"));
|
|
335
|
+
const result = await agent.invoke({
|
|
336
|
+
messages: [],
|
|
337
|
+
});
|
|
338
|
+
console.log("RESULT THING", result);
|
|
339
|
+
expect(result.messages).toEqual([
|
|
340
|
+
new AIMessage({
|
|
341
|
+
content: "result1",
|
|
342
|
+
tool_calls: [
|
|
343
|
+
{ name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
|
|
344
|
+
],
|
|
345
|
+
}),
|
|
346
|
+
new ToolMessage({
|
|
347
|
+
name: "search_api",
|
|
348
|
+
content: "result for foo",
|
|
349
|
+
tool_call_id: "tool_abcd123",
|
|
350
|
+
}),
|
|
351
|
+
new AIMessage("result2"),
|
|
352
|
+
]);
|
|
353
|
+
});
|
|
354
|
+
it("Can use custom function message modifier", async () => {
|
|
355
|
+
const aiM1 = new AIMessage({
|
|
356
|
+
content: "result1",
|
|
357
|
+
tool_calls: [
|
|
358
|
+
{ name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
|
|
359
|
+
],
|
|
360
|
+
});
|
|
361
|
+
const aiM2 = new AIMessage("result2");
|
|
362
|
+
const llm = new FakeToolCallingChatModel({
|
|
363
|
+
responses: [aiM1, aiM2],
|
|
364
|
+
});
|
|
365
|
+
const messageModifier = (messages) => [
|
|
366
|
+
new SystemMessage("You are a helpful assistant"),
|
|
367
|
+
...messages,
|
|
368
|
+
];
|
|
369
|
+
const agent = createReactAgent(llm, tools, messageModifier);
|
|
370
|
+
const result = await agent.invoke({
|
|
371
|
+
messages: [new HumanMessage("Hello Input!")],
|
|
372
|
+
});
|
|
373
|
+
expect(result.messages).toEqual([
|
|
374
|
+
new HumanMessage("Hello Input!"),
|
|
375
|
+
aiM1,
|
|
376
|
+
new ToolMessage({
|
|
377
|
+
name: "search_api",
|
|
378
|
+
content: "result for foo",
|
|
379
|
+
tool_call_id: "tool_abcd123",
|
|
380
|
+
}),
|
|
381
|
+
aiM2,
|
|
382
|
+
]);
|
|
383
|
+
});
|
|
384
|
+
it("Can use RunnableLambda message modifier", async () => {
|
|
385
|
+
const aiM1 = new AIMessage({
|
|
386
|
+
content: "result1",
|
|
387
|
+
tool_calls: [
|
|
388
|
+
{ name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
|
|
389
|
+
],
|
|
390
|
+
});
|
|
391
|
+
const aiM2 = new AIMessage("result2");
|
|
392
|
+
const llm = new FakeToolCallingChatModel({
|
|
393
|
+
responses: [aiM1, aiM2],
|
|
394
|
+
});
|
|
395
|
+
const messageModifier = new RunnableLambda({
|
|
396
|
+
func: (messages) => [
|
|
397
|
+
new SystemMessage("You are a helpful assistant"),
|
|
398
|
+
...messages,
|
|
399
|
+
],
|
|
400
|
+
});
|
|
401
|
+
const agent = createReactAgent(llm, tools, messageModifier);
|
|
402
|
+
const result = await agent.invoke({
|
|
403
|
+
messages: [
|
|
404
|
+
new HumanMessage("Hello Input!"),
|
|
405
|
+
new HumanMessage("Another Input!"),
|
|
406
|
+
],
|
|
407
|
+
});
|
|
408
|
+
expect(result.messages).toEqual([
|
|
409
|
+
new HumanMessage("Hello Input!"),
|
|
410
|
+
new HumanMessage("Another Input!"),
|
|
411
|
+
aiM1,
|
|
412
|
+
new ToolMessage({
|
|
413
|
+
name: "search_api",
|
|
414
|
+
content: "result for foo",
|
|
415
|
+
tool_call_id: "tool_abcd123",
|
|
416
|
+
}),
|
|
417
|
+
aiM2,
|
|
418
|
+
]);
|
|
419
|
+
});
|
|
420
|
+
});
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@langchain/langgraph",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.18",
|
|
4
4
|
"description": "LangGraph",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -43,7 +43,7 @@
|
|
|
43
43
|
"devDependencies": {
|
|
44
44
|
"@jest/globals": "^29.5.0",
|
|
45
45
|
"@langchain/community": "^0.0.43",
|
|
46
|
-
"@langchain/openai": "
|
|
46
|
+
"@langchain/openai": "latest",
|
|
47
47
|
"@langchain/scripts": "^0.0.13",
|
|
48
48
|
"@swc/core": "^1.3.90",
|
|
49
49
|
"@swc/jest": "^0.2.29",
|