@langchain/langgraph 0.2.0 → 0.2.1-rc.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -8
- package/dist/graph/messages_annotation.cjs +30 -0
- package/dist/graph/messages_annotation.d.ts +30 -0
- package/dist/graph/messages_annotation.js +30 -0
- package/dist/prebuilt/react_agent_executor.cjs +49 -10
- package/dist/prebuilt/react_agent_executor.d.ts +48 -9
- package/dist/prebuilt/react_agent_executor.js +49 -10
- package/dist/prebuilt/tool_executor.cjs +1 -0
- package/dist/prebuilt/tool_executor.d.ts +2 -0
- package/dist/prebuilt/tool_executor.js +1 -0
- package/dist/prebuilt/tool_node.cjs +111 -0
- package/dist/prebuilt/tool_node.d.ts +111 -0
- package/dist/prebuilt/tool_node.js +111 -0
- package/dist/web.d.ts +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -31,7 +31,7 @@ npm install @langchain/langgraph @langchain/core
|
|
|
31
31
|
|
|
32
32
|
One of the central concepts of LangGraph is state. Each graph execution creates a state that is passed between nodes in the graph as they execute, and each node updates this internal state with its return value after it executes. The way that the graph updates its internal state is defined by either the type of graph chosen or a custom function.
|
|
33
33
|
|
|
34
|
-
Let's take a look at
|
|
34
|
+
Let's take a look at an example of an agent that can use a search tool.
|
|
35
35
|
|
|
36
36
|
First install the required dependencies:
|
|
37
37
|
|
|
@@ -64,7 +64,8 @@ import { MemorySaver, Annotation } from "@langchain/langgraph";
|
|
|
64
64
|
import { ToolNode } from "@langchain/langgraph/prebuilt";
|
|
65
65
|
|
|
66
66
|
// Define the graph state
|
|
67
|
-
|
|
67
|
+
// See here for more info: https://langchain-ai.github.io/langgraphjs/how-tos/define-state/
|
|
68
|
+
const StateAnnotation = Annotation.Root({
|
|
68
69
|
messages: Annotation<BaseMessage[]>({
|
|
69
70
|
reducer: (x, y) => x.concat(y),
|
|
70
71
|
})
|
|
@@ -87,8 +88,7 @@ const weatherTool = tool(async ({ query }) => {
|
|
|
87
88
|
});
|
|
88
89
|
|
|
89
90
|
const tools = [weatherTool];
|
|
90
|
-
|
|
91
|
-
const toolNode = new ToolNode<typeof GraphState.State>(tools);
|
|
91
|
+
const toolNode = new ToolNode(tools);
|
|
92
92
|
|
|
93
93
|
const model = new ChatAnthropic({
|
|
94
94
|
model: "claude-3-5-sonnet-20240620",
|
|
@@ -96,7 +96,8 @@ const model = new ChatAnthropic({
|
|
|
96
96
|
}).bindTools(tools);
|
|
97
97
|
|
|
98
98
|
// Define the function that determines whether to continue or not
|
|
99
|
-
|
|
99
|
+
// We can extract the state typing via `StateAnnotation.State`
|
|
100
|
+
function shouldContinue(state: typeof StateAnnotation.State) {
|
|
100
101
|
const messages = state.messages;
|
|
101
102
|
const lastMessage = messages[messages.length - 1] as AIMessage;
|
|
102
103
|
|
|
@@ -109,7 +110,7 @@ function shouldContinue(state: typeof GraphState.State) {
|
|
|
109
110
|
}
|
|
110
111
|
|
|
111
112
|
// Define the function that calls the model
|
|
112
|
-
async function callModel(state: typeof
|
|
113
|
+
async function callModel(state: typeof StateAnnotation.State) {
|
|
113
114
|
const messages = state.messages;
|
|
114
115
|
const response = await model.invoke(messages);
|
|
115
116
|
|
|
@@ -118,7 +119,7 @@ async function callModel(state: typeof GraphState.State) {
|
|
|
118
119
|
}
|
|
119
120
|
|
|
120
121
|
// Define a new graph
|
|
121
|
-
const workflow = new StateGraph(
|
|
122
|
+
const workflow = new StateGraph(StateAnnotation)
|
|
122
123
|
.addNode("agent", callModel)
|
|
123
124
|
.addNode("tools", toolNode)
|
|
124
125
|
.addEdge("__start__", "agent")
|
|
@@ -191,7 +192,7 @@ Is there anything else you'd like to know about the weather in New York or any o
|
|
|
191
192
|
<summary>Initialize graph with state.</summary>
|
|
192
193
|
|
|
193
194
|
- We initialize the graph (`StateGraph`) by passing the state interface (`AgentState`).
|
|
194
|
-
- The `
|
|
195
|
+
- The `StateAnnotation` object defines how updates from each node should be merged into the graph's state.
|
|
195
196
|
</details>
|
|
196
197
|
|
|
197
198
|
3. <details>
|
|
@@ -8,6 +8,36 @@ const message_js_1 = require("./message.cjs");
|
|
|
8
8
|
* Prebuilt state annotation that combines returned messages.
|
|
9
9
|
* Can handle standard messages and special modifiers like {@link RemoveMessage}
|
|
10
10
|
* instances.
|
|
11
|
+
*
|
|
12
|
+
* Specifically, importing and using the prebuilt MessagesAnnotation like this:
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```ts
|
|
16
|
+
* import { MessagesAnnotation, StateGraph } from "@langchain/langgraph";
|
|
17
|
+
*
|
|
18
|
+
* const graph = new StateGraph(MessagesAnnotation)
|
|
19
|
+
* .addNode(...)
|
|
20
|
+
* ...
|
|
21
|
+
* ```
|
|
22
|
+
*
|
|
23
|
+
* Is equivalent to initializing your state manually like this:
|
|
24
|
+
*
|
|
25
|
+
* @example
|
|
26
|
+
* ```ts
|
|
27
|
+
* import { BaseMessage } from "@langchain/core/messages";
|
|
28
|
+
* import { Annotation, StateGraph, messagesStateReducer } from "@langchain/langgraph";
|
|
29
|
+
*
|
|
30
|
+
* export const StateAnnotation = Annotation.Root({
|
|
31
|
+
* messages: Annotation<BaseMessage[]>({
|
|
32
|
+
* reducer: messagesStateReducer,
|
|
33
|
+
* default: () => [],
|
|
34
|
+
* }),
|
|
35
|
+
* });
|
|
36
|
+
*
|
|
37
|
+
* const graph = new StateGraph(StateAnnotation)
|
|
38
|
+
* .addNode(...)
|
|
39
|
+
* ...
|
|
40
|
+
* ```
|
|
11
41
|
*/
|
|
12
42
|
exports.MessagesAnnotation = annotation_js_1.Annotation.Root({
|
|
13
43
|
messages: (0, annotation_js_1.Annotation)({
|
|
@@ -4,6 +4,36 @@ import { Messages } from "./message.js";
|
|
|
4
4
|
* Prebuilt state annotation that combines returned messages.
|
|
5
5
|
* Can handle standard messages and special modifiers like {@link RemoveMessage}
|
|
6
6
|
* instances.
|
|
7
|
+
*
|
|
8
|
+
* Specifically, importing and using the prebuilt MessagesAnnotation like this:
|
|
9
|
+
*
|
|
10
|
+
* @example
|
|
11
|
+
* ```ts
|
|
12
|
+
* import { MessagesAnnotation, StateGraph } from "@langchain/langgraph";
|
|
13
|
+
*
|
|
14
|
+
* const graph = new StateGraph(MessagesAnnotation)
|
|
15
|
+
* .addNode(...)
|
|
16
|
+
* ...
|
|
17
|
+
* ```
|
|
18
|
+
*
|
|
19
|
+
* Is equivalent to initializing your state manually like this:
|
|
20
|
+
*
|
|
21
|
+
* @example
|
|
22
|
+
* ```ts
|
|
23
|
+
* import { BaseMessage } from "@langchain/core/messages";
|
|
24
|
+
* import { Annotation, StateGraph, messagesStateReducer } from "@langchain/langgraph";
|
|
25
|
+
*
|
|
26
|
+
* export const StateAnnotation = Annotation.Root({
|
|
27
|
+
* messages: Annotation<BaseMessage[]>({
|
|
28
|
+
* reducer: messagesStateReducer,
|
|
29
|
+
* default: () => [],
|
|
30
|
+
* }),
|
|
31
|
+
* });
|
|
32
|
+
*
|
|
33
|
+
* const graph = new StateGraph(StateAnnotation)
|
|
34
|
+
* .addNode(...)
|
|
35
|
+
* ...
|
|
36
|
+
* ```
|
|
7
37
|
*/
|
|
8
38
|
export declare const MessagesAnnotation: import("./annotation.js").AnnotationRoot<{
|
|
9
39
|
messages: import("../web.js").BinaryOperatorAggregate<BaseMessage[], Messages>;
|
|
@@ -5,6 +5,36 @@ import { messagesStateReducer } from "./message.js";
|
|
|
5
5
|
* Prebuilt state annotation that combines returned messages.
|
|
6
6
|
* Can handle standard messages and special modifiers like {@link RemoveMessage}
|
|
7
7
|
* instances.
|
|
8
|
+
*
|
|
9
|
+
* Specifically, importing and using the prebuilt MessagesAnnotation like this:
|
|
10
|
+
*
|
|
11
|
+
* @example
|
|
12
|
+
* ```ts
|
|
13
|
+
* import { MessagesAnnotation, StateGraph } from "@langchain/langgraph";
|
|
14
|
+
*
|
|
15
|
+
* const graph = new StateGraph(MessagesAnnotation)
|
|
16
|
+
* .addNode(...)
|
|
17
|
+
* ...
|
|
18
|
+
* ```
|
|
19
|
+
*
|
|
20
|
+
* Is equivalent to initializing your state manually like this:
|
|
21
|
+
*
|
|
22
|
+
* @example
|
|
23
|
+
* ```ts
|
|
24
|
+
* import { BaseMessage } from "@langchain/core/messages";
|
|
25
|
+
* import { Annotation, StateGraph, messagesStateReducer } from "@langchain/langgraph";
|
|
26
|
+
*
|
|
27
|
+
* export const StateAnnotation = Annotation.Root({
|
|
28
|
+
* messages: Annotation<BaseMessage[]>({
|
|
29
|
+
* reducer: messagesStateReducer,
|
|
30
|
+
* default: () => [],
|
|
31
|
+
* }),
|
|
32
|
+
* });
|
|
33
|
+
*
|
|
34
|
+
* const graph = new StateGraph(StateAnnotation)
|
|
35
|
+
* .addNode(...)
|
|
36
|
+
* ...
|
|
37
|
+
* ```
|
|
8
38
|
*/
|
|
9
39
|
export const MessagesAnnotation = Annotation.Root({
|
|
10
40
|
messages: Annotation({
|
|
@@ -7,18 +7,57 @@ const prompts_1 = require("@langchain/core/prompts");
|
|
|
7
7
|
const index_js_1 = require("../graph/index.cjs");
|
|
8
8
|
const tool_node_js_1 = require("./tool_node.cjs");
|
|
9
9
|
/**
|
|
10
|
-
* Creates a StateGraph agent that relies on a chat
|
|
11
|
-
* @param llm The chat
|
|
12
|
-
* @param tools A list of tools or a ToolNode.
|
|
13
|
-
* @param messageModifier An optional message modifier to apply to messages before being passed to the LLM.
|
|
10
|
+
* Creates a StateGraph agent that relies on a chat model utilizing tool calling.
|
|
11
|
+
* @param params.llm The chat model that can utilize OpenAI-style tool calling.
|
|
12
|
+
* @param params.tools A list of tools or a ToolNode.
|
|
13
|
+
* @param params.messageModifier An optional message modifier to apply to messages before being passed to the LLM.
|
|
14
14
|
* Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
|
|
15
|
-
* @param
|
|
16
|
-
* @param interruptBefore An optional list of node names to interrupt before running.
|
|
17
|
-
* @param interruptAfter An optional list of node names to interrupt after running.
|
|
18
|
-
* @returns A compiled
|
|
15
|
+
* @param params.checkpointer An optional checkpoint saver to persist the agent's state.
|
|
16
|
+
* @param params.interruptBefore An optional list of node names to interrupt before running.
|
|
17
|
+
* @param params.interruptAfter An optional list of node names to interrupt after running.
|
|
18
|
+
* @returns A prebuilt compiled graph.
|
|
19
|
+
*
|
|
20
|
+
* @example
|
|
21
|
+
* ```ts
|
|
22
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
23
|
+
* import { tool } from "@langchain/core/tools";
|
|
24
|
+
* import { z } from "zod";
|
|
25
|
+
* import { createReactAgent } from "@langchain/langgraph/prebuilt";
|
|
26
|
+
*
|
|
27
|
+
* const model = new ChatOpenAI({
|
|
28
|
+
* model: "gpt-4o",
|
|
29
|
+
* });
|
|
30
|
+
*
|
|
31
|
+
* const getWeather = tool((input) => {
|
|
32
|
+
* if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
|
|
33
|
+
* return "It's 60 degrees and foggy.";
|
|
34
|
+
* } else {
|
|
35
|
+
* return "It's 90 degrees and sunny.";
|
|
36
|
+
* }
|
|
37
|
+
* }, {
|
|
38
|
+
* name: "get_weather",
|
|
39
|
+
* description: "Call to get the current weather.",
|
|
40
|
+
* schema: z.object({
|
|
41
|
+
* location: z.string().describe("Location to get the weather for."),
|
|
42
|
+
* })
|
|
43
|
+
* })
|
|
44
|
+
*
|
|
45
|
+
* const agent = createReactAgent({ llm: model, tools: [getWeather] });
|
|
46
|
+
*
|
|
47
|
+
* const inputs = {
|
|
48
|
+
* messages: [{ role: "user", content: "what is the weather in SF?" }],
|
|
49
|
+
* };
|
|
50
|
+
*
|
|
51
|
+
* const stream = await agent.stream(inputs, { streamMode: "values" });
|
|
52
|
+
*
|
|
53
|
+
* for await (const { messages } of stream) {
|
|
54
|
+
* console.log(messages);
|
|
55
|
+
* }
|
|
56
|
+
* // Returns the messages in the state at each step of execution
|
|
57
|
+
* ```
|
|
19
58
|
*/
|
|
20
|
-
function createReactAgent(
|
|
21
|
-
const { llm, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter, } =
|
|
59
|
+
function createReactAgent(params) {
|
|
60
|
+
const { llm, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter, } = params;
|
|
22
61
|
const schema = {
|
|
23
62
|
messages: {
|
|
24
63
|
value: index_js_1.messagesStateReducer,
|
|
@@ -20,14 +20,53 @@ export type CreateReactAgentParams = {
|
|
|
20
20
|
interruptAfter?: N[] | All;
|
|
21
21
|
};
|
|
22
22
|
/**
|
|
23
|
-
* Creates a StateGraph agent that relies on a chat
|
|
24
|
-
* @param llm The chat
|
|
25
|
-
* @param tools A list of tools or a ToolNode.
|
|
26
|
-
* @param messageModifier An optional message modifier to apply to messages before being passed to the LLM.
|
|
23
|
+
* Creates a StateGraph agent that relies on a chat model utilizing tool calling.
|
|
24
|
+
* @param params.llm The chat model that can utilize OpenAI-style tool calling.
|
|
25
|
+
* @param params.tools A list of tools or a ToolNode.
|
|
26
|
+
* @param params.messageModifier An optional message modifier to apply to messages before being passed to the LLM.
|
|
27
27
|
* Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
|
|
28
|
-
* @param
|
|
29
|
-
* @param interruptBefore An optional list of node names to interrupt before running.
|
|
30
|
-
* @param interruptAfter An optional list of node names to interrupt after running.
|
|
31
|
-
* @returns A compiled
|
|
28
|
+
* @param params.checkpointer An optional checkpoint saver to persist the agent's state.
|
|
29
|
+
* @param params.interruptBefore An optional list of node names to interrupt before running.
|
|
30
|
+
* @param params.interruptAfter An optional list of node names to interrupt after running.
|
|
31
|
+
* @returns A prebuilt compiled graph.
|
|
32
|
+
*
|
|
33
|
+
* @example
|
|
34
|
+
* ```ts
|
|
35
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
36
|
+
* import { tool } from "@langchain/core/tools";
|
|
37
|
+
* import { z } from "zod";
|
|
38
|
+
* import { createReactAgent } from "@langchain/langgraph/prebuilt";
|
|
39
|
+
*
|
|
40
|
+
* const model = new ChatOpenAI({
|
|
41
|
+
* model: "gpt-4o",
|
|
42
|
+
* });
|
|
43
|
+
*
|
|
44
|
+
* const getWeather = tool((input) => {
|
|
45
|
+
* if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
|
|
46
|
+
* return "It's 60 degrees and foggy.";
|
|
47
|
+
* } else {
|
|
48
|
+
* return "It's 90 degrees and sunny.";
|
|
49
|
+
* }
|
|
50
|
+
* }, {
|
|
51
|
+
* name: "get_weather",
|
|
52
|
+
* description: "Call to get the current weather.",
|
|
53
|
+
* schema: z.object({
|
|
54
|
+
* location: z.string().describe("Location to get the weather for."),
|
|
55
|
+
* })
|
|
56
|
+
* })
|
|
57
|
+
*
|
|
58
|
+
* const agent = createReactAgent({ llm: model, tools: [getWeather] });
|
|
59
|
+
*
|
|
60
|
+
* const inputs = {
|
|
61
|
+
* messages: [{ role: "user", content: "what is the weather in SF?" }],
|
|
62
|
+
* };
|
|
63
|
+
*
|
|
64
|
+
* const stream = await agent.stream(inputs, { streamMode: "values" });
|
|
65
|
+
*
|
|
66
|
+
* for await (const { messages } of stream) {
|
|
67
|
+
* console.log(messages);
|
|
68
|
+
* }
|
|
69
|
+
* // Returns the messages in the state at each step of execution
|
|
70
|
+
* ```
|
|
32
71
|
*/
|
|
33
|
-
export declare function createReactAgent(
|
|
72
|
+
export declare function createReactAgent(params: CreateReactAgentParams): CompiledStateGraph<AgentState, Partial<AgentState>, typeof START | "agent" | "tools">;
|
|
@@ -4,18 +4,57 @@ import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
|
4
4
|
import { END, messagesStateReducer, START, StateGraph, } from "../graph/index.js";
|
|
5
5
|
import { ToolNode } from "./tool_node.js";
|
|
6
6
|
/**
|
|
7
|
-
* Creates a StateGraph agent that relies on a chat
|
|
8
|
-
* @param llm The chat
|
|
9
|
-
* @param tools A list of tools or a ToolNode.
|
|
10
|
-
* @param messageModifier An optional message modifier to apply to messages before being passed to the LLM.
|
|
7
|
+
* Creates a StateGraph agent that relies on a chat model utilizing tool calling.
|
|
8
|
+
* @param params.llm The chat model that can utilize OpenAI-style tool calling.
|
|
9
|
+
* @param params.tools A list of tools or a ToolNode.
|
|
10
|
+
* @param params.messageModifier An optional message modifier to apply to messages before being passed to the LLM.
|
|
11
11
|
* Can be a SystemMessage, string, function that takes and returns a list of messages, or a Runnable.
|
|
12
|
-
* @param
|
|
13
|
-
* @param interruptBefore An optional list of node names to interrupt before running.
|
|
14
|
-
* @param interruptAfter An optional list of node names to interrupt after running.
|
|
15
|
-
* @returns A compiled
|
|
12
|
+
* @param params.checkpointer An optional checkpoint saver to persist the agent's state.
|
|
13
|
+
* @param params.interruptBefore An optional list of node names to interrupt before running.
|
|
14
|
+
* @param params.interruptAfter An optional list of node names to interrupt after running.
|
|
15
|
+
* @returns A prebuilt compiled graph.
|
|
16
|
+
*
|
|
17
|
+
* @example
|
|
18
|
+
* ```ts
|
|
19
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
20
|
+
* import { tool } from "@langchain/core/tools";
|
|
21
|
+
* import { z } from "zod";
|
|
22
|
+
* import { createReactAgent } from "@langchain/langgraph/prebuilt";
|
|
23
|
+
*
|
|
24
|
+
* const model = new ChatOpenAI({
|
|
25
|
+
* model: "gpt-4o",
|
|
26
|
+
* });
|
|
27
|
+
*
|
|
28
|
+
* const getWeather = tool((input) => {
|
|
29
|
+
* if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
|
|
30
|
+
* return "It's 60 degrees and foggy.";
|
|
31
|
+
* } else {
|
|
32
|
+
* return "It's 90 degrees and sunny.";
|
|
33
|
+
* }
|
|
34
|
+
* }, {
|
|
35
|
+
* name: "get_weather",
|
|
36
|
+
* description: "Call to get the current weather.",
|
|
37
|
+
* schema: z.object({
|
|
38
|
+
* location: z.string().describe("Location to get the weather for."),
|
|
39
|
+
* })
|
|
40
|
+
* })
|
|
41
|
+
*
|
|
42
|
+
* const agent = createReactAgent({ llm: model, tools: [getWeather] });
|
|
43
|
+
*
|
|
44
|
+
* const inputs = {
|
|
45
|
+
* messages: [{ role: "user", content: "what is the weather in SF?" }],
|
|
46
|
+
* };
|
|
47
|
+
*
|
|
48
|
+
* const stream = await agent.stream(inputs, { streamMode: "values" });
|
|
49
|
+
*
|
|
50
|
+
* for await (const { messages } of stream) {
|
|
51
|
+
* console.log(messages);
|
|
52
|
+
* }
|
|
53
|
+
* // Returns the messages in the state at each step of execution
|
|
54
|
+
* ```
|
|
16
55
|
*/
|
|
17
|
-
export function createReactAgent(
|
|
18
|
-
const { llm, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter, } =
|
|
56
|
+
export function createReactAgent(params) {
|
|
57
|
+
const { llm, tools, messageModifier, checkpointSaver, interruptBefore, interruptAfter, } = params;
|
|
19
58
|
const schema = {
|
|
20
59
|
messages: {
|
|
21
60
|
value: messagesStateReducer,
|
|
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.ToolExecutor = void 0;
|
|
4
4
|
const runnables_1 = require("@langchain/core/runnables");
|
|
5
5
|
const INVALID_TOOL_MSG_TEMPLATE = `{requestedToolName} is not a valid tool, try one of {availableToolNamesString}.`;
|
|
6
|
+
/** @deprecated Use {@link ToolNode} instead. */
|
|
6
7
|
class ToolExecutor extends runnables_1.RunnableBinding {
|
|
7
8
|
constructor(fields) {
|
|
8
9
|
const fieldsWithDefaults = {
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { RunnableBinding, RunnableConfig, RunnableToolLike } from "@langchain/core/runnables";
|
|
2
2
|
import { StructuredToolInterface } from "@langchain/core/tools";
|
|
3
|
+
/** @deprecated Use {@link ToolNode} instead. */
|
|
3
4
|
export interface ToolExecutorArgs {
|
|
4
5
|
tools: Array<StructuredToolInterface | RunnableToolLike>;
|
|
5
6
|
/**
|
|
@@ -16,6 +17,7 @@ export interface ToolInvocationInterface {
|
|
|
16
17
|
}
|
|
17
18
|
type ToolExecutorInputType = any;
|
|
18
19
|
type ToolExecutorOutputType = any;
|
|
20
|
+
/** @deprecated Use {@link ToolNode} instead. */
|
|
19
21
|
export declare class ToolExecutor extends RunnableBinding<ToolExecutorInputType, ToolExecutorOutputType> {
|
|
20
22
|
lc_graph_name: string;
|
|
21
23
|
tools: Array<StructuredToolInterface | RunnableToolLike>;
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { RunnableBinding, RunnableLambda, } from "@langchain/core/runnables";
|
|
2
2
|
const INVALID_TOOL_MSG_TEMPLATE = `{requestedToolName} is not a valid tool, try one of {availableToolNamesString}.`;
|
|
3
|
+
/** @deprecated Use {@link ToolNode} instead. */
|
|
3
4
|
export class ToolExecutor extends RunnableBinding {
|
|
4
5
|
constructor(fields) {
|
|
5
6
|
const fieldsWithDefaults = {
|
|
@@ -9,6 +9,117 @@ const graph_js_1 = require("../graph/graph.cjs");
|
|
|
9
9
|
* either in StateGraph with a "messages" key or in MessageGraph. If multiple
|
|
10
10
|
* tool calls are requested, they will be run in parallel. The output will be
|
|
11
11
|
* a list of ToolMessages, one for each tool call.
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* ```ts
|
|
15
|
+
* import { ToolNode } from "@langchain/langgraph/prebuilt";
|
|
16
|
+
* import { tool } from "@langchain/core/tools";
|
|
17
|
+
* import { z } from "zod";
|
|
18
|
+
* import { AIMessage } from "@langchain/core/messages";
|
|
19
|
+
*
|
|
20
|
+
* const getWeather = tool((input) => {
|
|
21
|
+
* if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
|
|
22
|
+
* return "It's 60 degrees and foggy.";
|
|
23
|
+
* } else {
|
|
24
|
+
* return "It's 90 degrees and sunny.";
|
|
25
|
+
* }
|
|
26
|
+
* }, {
|
|
27
|
+
* name: "get_weather",
|
|
28
|
+
* description: "Call to get the current weather.",
|
|
29
|
+
* schema: z.object({
|
|
30
|
+
* location: z.string().describe("Location to get the weather for."),
|
|
31
|
+
* }),
|
|
32
|
+
* });
|
|
33
|
+
*
|
|
34
|
+
* const tools = [getWeather];
|
|
35
|
+
* const toolNode = new ToolNode(tools);
|
|
36
|
+
*
|
|
37
|
+
* const messageWithSingleToolCall = new AIMessage({
|
|
38
|
+
* content: "",
|
|
39
|
+
* tool_calls: [
|
|
40
|
+
* {
|
|
41
|
+
* name: "get_weather",
|
|
42
|
+
* args: { location: "sf" },
|
|
43
|
+
* id: "tool_call_id",
|
|
44
|
+
* type: "tool_call",
|
|
45
|
+
* }
|
|
46
|
+
* ]
|
|
47
|
+
* })
|
|
48
|
+
*
|
|
49
|
+
* await toolNode.invoke({ messages: [messageWithSingleToolCall] });
|
|
50
|
+
* // Returns tool invocation responses as:
|
|
51
|
+
* // { messages: ToolMessage[] }
|
|
52
|
+
* ```
|
|
53
|
+
*
|
|
54
|
+
* @example
|
|
55
|
+
* ```ts
|
|
56
|
+
* import {
|
|
57
|
+
* StateGraph,
|
|
58
|
+
* MessagesAnnotation,
|
|
59
|
+
* } from "@langchain/langgraph";
|
|
60
|
+
* import { ToolNode } from "@langchain/langgraph/prebuilt";
|
|
61
|
+
* import { tool } from "@langchain/core/tools";
|
|
62
|
+
* import { z } from "zod";
|
|
63
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
64
|
+
*
|
|
65
|
+
* const getWeather = tool((input) => {
|
|
66
|
+
* if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
|
|
67
|
+
* return "It's 60 degrees and foggy.";
|
|
68
|
+
* } else {
|
|
69
|
+
* return "It's 90 degrees and sunny.";
|
|
70
|
+
* }
|
|
71
|
+
* }, {
|
|
72
|
+
* name: "get_weather",
|
|
73
|
+
* description: "Call to get the current weather.",
|
|
74
|
+
* schema: z.object({
|
|
75
|
+
* location: z.string().describe("Location to get the weather for."),
|
|
76
|
+
* }),
|
|
77
|
+
* });
|
|
78
|
+
*
|
|
79
|
+
* const tools = [getWeather];
|
|
80
|
+
* const modelWithTools = new ChatAnthropic({
|
|
81
|
+
* model: "claude-3-haiku-20240307",
|
|
82
|
+
* temperature: 0
|
|
83
|
+
* }).bindTools(tools);
|
|
84
|
+
*
|
|
85
|
+
* const toolNodeForGraph = new ToolNode(tools)
|
|
86
|
+
*
|
|
87
|
+
* const shouldContinue = (state: typeof MessagesAnnotation.State) => {
|
|
88
|
+
* const { messages } = state;
|
|
89
|
+
* const lastMessage = messages[messages.length - 1];
|
|
90
|
+
* if ("tool_calls" in lastMessage && Array.isArray(lastMessage.tool_calls) && lastMessage.tool_calls?.length) {
|
|
91
|
+
* return "tools";
|
|
92
|
+
* }
|
|
93
|
+
* return "__end__";
|
|
94
|
+
* }
|
|
95
|
+
*
|
|
96
|
+
* const callModel = async (state: typeof MessagesAnnotation.State) => {
|
|
97
|
+
* const { messages } = state;
|
|
98
|
+
* const response = await modelWithTools.invoke(messages);
|
|
99
|
+
* return { messages: response };
|
|
100
|
+
* }
|
|
101
|
+
*
|
|
102
|
+
* const graph = new StateGraph(MessagesAnnotation)
|
|
103
|
+
* .addNode("agent", callModel)
|
|
104
|
+
* .addNode("tools", toolNodeForGraph)
|
|
105
|
+
* .addEdge("__start__", "agent")
|
|
106
|
+
* .addConditionalEdges("agent", shouldContinue)
|
|
107
|
+
* .addEdge("tools", "agent")
|
|
108
|
+
* .compile();
|
|
109
|
+
*
|
|
110
|
+
* const inputs = {
|
|
111
|
+
* messages: [{ role: "user", content: "what is the weather in SF?" }],
|
|
112
|
+
* };
|
|
113
|
+
*
|
|
114
|
+
* const stream = await graph.stream(inputs, {
|
|
115
|
+
* streamMode: "values",
|
|
116
|
+
* });
|
|
117
|
+
*
|
|
118
|
+
* for await (const { messages } of stream) {
|
|
119
|
+
* console.log(messages);
|
|
120
|
+
* }
|
|
121
|
+
* // Returns the messages in the state at each step of execution
|
|
122
|
+
* ```
|
|
12
123
|
*/
|
|
13
124
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
14
125
|
class ToolNode extends utils_js_1.RunnableCallable {
|
|
@@ -14,6 +14,117 @@ export type ToolNodeOptions = {
|
|
|
14
14
|
* either in StateGraph with a "messages" key or in MessageGraph. If multiple
|
|
15
15
|
* tool calls are requested, they will be run in parallel. The output will be
|
|
16
16
|
* a list of ToolMessages, one for each tool call.
|
|
17
|
+
*
|
|
18
|
+
* @example
|
|
19
|
+
* ```ts
|
|
20
|
+
* import { ToolNode } from "@langchain/langgraph/prebuilt";
|
|
21
|
+
* import { tool } from "@langchain/core/tools";
|
|
22
|
+
* import { z } from "zod";
|
|
23
|
+
* import { AIMessage } from "@langchain/core/messages";
|
|
24
|
+
*
|
|
25
|
+
* const getWeather = tool((input) => {
|
|
26
|
+
* if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
|
|
27
|
+
* return "It's 60 degrees and foggy.";
|
|
28
|
+
* } else {
|
|
29
|
+
* return "It's 90 degrees and sunny.";
|
|
30
|
+
* }
|
|
31
|
+
* }, {
|
|
32
|
+
* name: "get_weather",
|
|
33
|
+
* description: "Call to get the current weather.",
|
|
34
|
+
* schema: z.object({
|
|
35
|
+
* location: z.string().describe("Location to get the weather for."),
|
|
36
|
+
* }),
|
|
37
|
+
* });
|
|
38
|
+
*
|
|
39
|
+
* const tools = [getWeather];
|
|
40
|
+
* const toolNode = new ToolNode(tools);
|
|
41
|
+
*
|
|
42
|
+
* const messageWithSingleToolCall = new AIMessage({
|
|
43
|
+
* content: "",
|
|
44
|
+
* tool_calls: [
|
|
45
|
+
* {
|
|
46
|
+
* name: "get_weather",
|
|
47
|
+
* args: { location: "sf" },
|
|
48
|
+
* id: "tool_call_id",
|
|
49
|
+
* type: "tool_call",
|
|
50
|
+
* }
|
|
51
|
+
* ]
|
|
52
|
+
* })
|
|
53
|
+
*
|
|
54
|
+
* await toolNode.invoke({ messages: [messageWithSingleToolCall] });
|
|
55
|
+
* // Returns tool invocation responses as:
|
|
56
|
+
* // { messages: ToolMessage[] }
|
|
57
|
+
* ```
|
|
58
|
+
*
|
|
59
|
+
* @example
|
|
60
|
+
* ```ts
|
|
61
|
+
* import {
|
|
62
|
+
* StateGraph,
|
|
63
|
+
* MessagesAnnotation,
|
|
64
|
+
* } from "@langchain/langgraph";
|
|
65
|
+
* import { ToolNode } from "@langchain/langgraph/prebuilt";
|
|
66
|
+
* import { tool } from "@langchain/core/tools";
|
|
67
|
+
* import { z } from "zod";
|
|
68
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
69
|
+
*
|
|
70
|
+
* const getWeather = tool((input) => {
|
|
71
|
+
* if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
|
|
72
|
+
* return "It's 60 degrees and foggy.";
|
|
73
|
+
* } else {
|
|
74
|
+
* return "It's 90 degrees and sunny.";
|
|
75
|
+
* }
|
|
76
|
+
* }, {
|
|
77
|
+
* name: "get_weather",
|
|
78
|
+
* description: "Call to get the current weather.",
|
|
79
|
+
* schema: z.object({
|
|
80
|
+
* location: z.string().describe("Location to get the weather for."),
|
|
81
|
+
* }),
|
|
82
|
+
* });
|
|
83
|
+
*
|
|
84
|
+
* const tools = [getWeather];
|
|
85
|
+
* const modelWithTools = new ChatAnthropic({
|
|
86
|
+
* model: "claude-3-haiku-20240307",
|
|
87
|
+
* temperature: 0
|
|
88
|
+
* }).bindTools(tools);
|
|
89
|
+
*
|
|
90
|
+
* const toolNodeForGraph = new ToolNode(tools)
|
|
91
|
+
*
|
|
92
|
+
* const shouldContinue = (state: typeof MessagesAnnotation.State) => {
|
|
93
|
+
* const { messages } = state;
|
|
94
|
+
* const lastMessage = messages[messages.length - 1];
|
|
95
|
+
* if ("tool_calls" in lastMessage && Array.isArray(lastMessage.tool_calls) && lastMessage.tool_calls?.length) {
|
|
96
|
+
* return "tools";
|
|
97
|
+
* }
|
|
98
|
+
* return "__end__";
|
|
99
|
+
* }
|
|
100
|
+
*
|
|
101
|
+
* const callModel = async (state: typeof MessagesAnnotation.State) => {
|
|
102
|
+
* const { messages } = state;
|
|
103
|
+
* const response = await modelWithTools.invoke(messages);
|
|
104
|
+
* return { messages: response };
|
|
105
|
+
* }
|
|
106
|
+
*
|
|
107
|
+
* const graph = new StateGraph(MessagesAnnotation)
|
|
108
|
+
* .addNode("agent", callModel)
|
|
109
|
+
* .addNode("tools", toolNodeForGraph)
|
|
110
|
+
* .addEdge("__start__", "agent")
|
|
111
|
+
* .addConditionalEdges("agent", shouldContinue)
|
|
112
|
+
* .addEdge("tools", "agent")
|
|
113
|
+
* .compile();
|
|
114
|
+
*
|
|
115
|
+
* const inputs = {
|
|
116
|
+
* messages: [{ role: "user", content: "what is the weather in SF?" }],
|
|
117
|
+
* };
|
|
118
|
+
*
|
|
119
|
+
* const stream = await graph.stream(inputs, {
|
|
120
|
+
* streamMode: "values",
|
|
121
|
+
* });
|
|
122
|
+
*
|
|
123
|
+
* for await (const { messages } of stream) {
|
|
124
|
+
* console.log(messages);
|
|
125
|
+
* }
|
|
126
|
+
* // Returns the messages in the state at each step of execution
|
|
127
|
+
* ```
|
|
17
128
|
*/
|
|
18
129
|
export declare class ToolNode<T = any> extends RunnableCallable<T, T> {
|
|
19
130
|
tools: (StructuredToolInterface | RunnableToolLike)[];
|
|
@@ -6,6 +6,117 @@ import { END } from "../graph/graph.js";
|
|
|
6
6
|
* either in StateGraph with a "messages" key or in MessageGraph. If multiple
|
|
7
7
|
* tool calls are requested, they will be run in parallel. The output will be
|
|
8
8
|
* a list of ToolMessages, one for each tool call.
|
|
9
|
+
*
|
|
10
|
+
* @example
|
|
11
|
+
* ```ts
|
|
12
|
+
* import { ToolNode } from "@langchain/langgraph/prebuilt";
|
|
13
|
+
* import { tool } from "@langchain/core/tools";
|
|
14
|
+
* import { z } from "zod";
|
|
15
|
+
* import { AIMessage } from "@langchain/core/messages";
|
|
16
|
+
*
|
|
17
|
+
* const getWeather = tool((input) => {
|
|
18
|
+
* if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
|
|
19
|
+
* return "It's 60 degrees and foggy.";
|
|
20
|
+
* } else {
|
|
21
|
+
* return "It's 90 degrees and sunny.";
|
|
22
|
+
* }
|
|
23
|
+
* }, {
|
|
24
|
+
* name: "get_weather",
|
|
25
|
+
* description: "Call to get the current weather.",
|
|
26
|
+
* schema: z.object({
|
|
27
|
+
* location: z.string().describe("Location to get the weather for."),
|
|
28
|
+
* }),
|
|
29
|
+
* });
|
|
30
|
+
*
|
|
31
|
+
* const tools = [getWeather];
|
|
32
|
+
* const toolNode = new ToolNode(tools);
|
|
33
|
+
*
|
|
34
|
+
* const messageWithSingleToolCall = new AIMessage({
|
|
35
|
+
* content: "",
|
|
36
|
+
* tool_calls: [
|
|
37
|
+
* {
|
|
38
|
+
* name: "get_weather",
|
|
39
|
+
* args: { location: "sf" },
|
|
40
|
+
* id: "tool_call_id",
|
|
41
|
+
* type: "tool_call",
|
|
42
|
+
* }
|
|
43
|
+
* ]
|
|
44
|
+
* })
|
|
45
|
+
*
|
|
46
|
+
* await toolNode.invoke({ messages: [messageWithSingleToolCall] });
|
|
47
|
+
* // Returns tool invocation responses as:
|
|
48
|
+
* // { messages: ToolMessage[] }
|
|
49
|
+
* ```
|
|
50
|
+
*
|
|
51
|
+
* @example
|
|
52
|
+
* ```ts
|
|
53
|
+
* import {
|
|
54
|
+
* StateGraph,
|
|
55
|
+
* MessagesAnnotation,
|
|
56
|
+
* } from "@langchain/langgraph";
|
|
57
|
+
* import { ToolNode } from "@langchain/langgraph/prebuilt";
|
|
58
|
+
* import { tool } from "@langchain/core/tools";
|
|
59
|
+
* import { z } from "zod";
|
|
60
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
61
|
+
*
|
|
62
|
+
* const getWeather = tool((input) => {
|
|
63
|
+
* if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
|
|
64
|
+
* return "It's 60 degrees and foggy.";
|
|
65
|
+
* } else {
|
|
66
|
+
* return "It's 90 degrees and sunny.";
|
|
67
|
+
* }
|
|
68
|
+
* }, {
|
|
69
|
+
* name: "get_weather",
|
|
70
|
+
* description: "Call to get the current weather.",
|
|
71
|
+
* schema: z.object({
|
|
72
|
+
* location: z.string().describe("Location to get the weather for."),
|
|
73
|
+
* }),
|
|
74
|
+
* });
|
|
75
|
+
*
|
|
76
|
+
* const tools = [getWeather];
|
|
77
|
+
* const modelWithTools = new ChatAnthropic({
|
|
78
|
+
* model: "claude-3-haiku-20240307",
|
|
79
|
+
* temperature: 0
|
|
80
|
+
* }).bindTools(tools);
|
|
81
|
+
*
|
|
82
|
+
* const toolNodeForGraph = new ToolNode(tools)
|
|
83
|
+
*
|
|
84
|
+
* const shouldContinue = (state: typeof MessagesAnnotation.State) => {
|
|
85
|
+
* const { messages } = state;
|
|
86
|
+
* const lastMessage = messages[messages.length - 1];
|
|
87
|
+
* if ("tool_calls" in lastMessage && Array.isArray(lastMessage.tool_calls) && lastMessage.tool_calls?.length) {
|
|
88
|
+
* return "tools";
|
|
89
|
+
* }
|
|
90
|
+
* return "__end__";
|
|
91
|
+
* }
|
|
92
|
+
*
|
|
93
|
+
* const callModel = async (state: typeof MessagesAnnotation.State) => {
|
|
94
|
+
* const { messages } = state;
|
|
95
|
+
* const response = await modelWithTools.invoke(messages);
|
|
96
|
+
* return { messages: response };
|
|
97
|
+
* }
|
|
98
|
+
*
|
|
99
|
+
* const graph = new StateGraph(MessagesAnnotation)
|
|
100
|
+
* .addNode("agent", callModel)
|
|
101
|
+
* .addNode("tools", toolNodeForGraph)
|
|
102
|
+
* .addEdge("__start__", "agent")
|
|
103
|
+
* .addConditionalEdges("agent", shouldContinue)
|
|
104
|
+
* .addEdge("tools", "agent")
|
|
105
|
+
* .compile();
|
|
106
|
+
*
|
|
107
|
+
* const inputs = {
|
|
108
|
+
* messages: [{ role: "user", content: "what is the weather in SF?" }],
|
|
109
|
+
* };
|
|
110
|
+
*
|
|
111
|
+
* const stream = await graph.stream(inputs, {
|
|
112
|
+
* streamMode: "values",
|
|
113
|
+
* });
|
|
114
|
+
*
|
|
115
|
+
* for await (const { messages } of stream) {
|
|
116
|
+
* console.log(messages);
|
|
117
|
+
* }
|
|
118
|
+
* // Returns the messages in the state at each step of execution
|
|
119
|
+
* ```
|
|
9
120
|
*/
|
|
10
121
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
11
122
|
export class ToolNode extends RunnableCallable {
|
package/dist/web.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
export { END, Graph, type StateGraphArgs, START, StateGraph, type CompiledStateGraph, MessageGraph, messagesStateReducer, type Messages, Annotation, type StateType, type UpdateType, type NodeType, type StateDefinition, type SingleReducer, type CompiledGraph, } from "./graph/index.js";
|
|
2
2
|
export * from "./errors.js";
|
|
3
|
-
export { BaseChannel, type BinaryOperator, BinaryOperatorAggregate, } from "./channels/index.js";
|
|
3
|
+
export { BaseChannel, type BinaryOperator, BinaryOperatorAggregate, type AnyValue, type WaitForNames, type DynamicBarrierValue, type LastValue, type NamedBarrierValue, type Topic, } from "./channels/index.js";
|
|
4
4
|
export { type RetryPolicy } from "./pregel/utils.js";
|
|
5
5
|
export { Send } from "./constants.js";
|
|
6
6
|
export { MemorySaver, type Checkpoint, type CheckpointMetadata, type CheckpointTuple, copyCheckpoint, emptyCheckpoint, BaseCheckpointSaver, } from "@langchain/langgraph-checkpoint";
|