@langchain/langgraph 0.2.41 → 0.2.43-rc.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +237 -154
- package/dist/channels/any_value.cjs +10 -10
- package/dist/channels/any_value.d.ts +1 -1
- package/dist/channels/any_value.js +10 -10
- package/dist/channels/ephemeral_value.cjs +10 -9
- package/dist/channels/ephemeral_value.d.ts +1 -1
- package/dist/channels/ephemeral_value.js +10 -9
- package/dist/channels/last_value.cjs +8 -7
- package/dist/channels/last_value.d.ts +1 -1
- package/dist/channels/last_value.js +8 -7
- package/dist/constants.cjs +33 -6
- package/dist/constants.d.ts +17 -2
- package/dist/constants.js +32 -5
- package/dist/errors.d.ts +3 -3
- package/dist/func/index.cjs +272 -0
- package/dist/func/index.d.ts +310 -0
- package/dist/func/index.js +267 -0
- package/dist/func/types.cjs +15 -0
- package/dist/func/types.d.ts +59 -0
- package/dist/func/types.js +11 -0
- package/dist/graph/graph.cjs +31 -35
- package/dist/graph/graph.d.ts +1 -5
- package/dist/graph/graph.js +1 -5
- package/dist/graph/index.cjs +1 -3
- package/dist/graph/index.d.ts +1 -1
- package/dist/graph/index.js +1 -1
- package/dist/graph/message.d.ts +1 -1
- package/dist/graph/state.cjs +17 -17
- package/dist/graph/state.d.ts +2 -1
- package/dist/graph/state.js +2 -2
- package/dist/index.cjs +8 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +3 -0
- package/dist/interrupt.cjs +21 -34
- package/dist/interrupt.d.ts +1 -1
- package/dist/interrupt.js +22 -35
- package/dist/prebuilt/agent_executor.cjs +3 -3
- package/dist/prebuilt/agent_executor.d.ts +1 -1
- package/dist/prebuilt/agent_executor.js +1 -1
- package/dist/prebuilt/chat_agent_executor.cjs +3 -3
- package/dist/prebuilt/chat_agent_executor.d.ts +1 -1
- package/dist/prebuilt/chat_agent_executor.js +1 -1
- package/dist/prebuilt/react_agent_executor.cjs +33 -8
- package/dist/prebuilt/react_agent_executor.d.ts +4 -1
- package/dist/prebuilt/react_agent_executor.js +31 -6
- package/dist/prebuilt/tool_node.cjs +1 -2
- package/dist/prebuilt/tool_node.d.ts +1 -1
- package/dist/prebuilt/tool_node.js +1 -2
- package/dist/pregel/algo.cjs +121 -12
- package/dist/pregel/algo.d.ts +8 -6
- package/dist/pregel/algo.js +122 -13
- package/dist/pregel/call.cjs +77 -0
- package/dist/pregel/call.d.ts +15 -0
- package/dist/pregel/call.js +71 -0
- package/dist/pregel/index.cjs +59 -96
- package/dist/pregel/index.d.ts +1 -10
- package/dist/pregel/index.js +61 -98
- package/dist/pregel/io.cjs +6 -1
- package/dist/pregel/io.js +7 -2
- package/dist/pregel/loop.cjs +109 -75
- package/dist/pregel/loop.d.ts +17 -23
- package/dist/pregel/loop.js +110 -75
- package/dist/pregel/messages.d.ts +1 -1
- package/dist/pregel/retry.cjs +22 -50
- package/dist/pregel/retry.d.ts +6 -6
- package/dist/pregel/retry.js +22 -50
- package/dist/pregel/runner.cjs +275 -0
- package/dist/pregel/runner.d.ts +64 -0
- package/dist/pregel/runner.js +271 -0
- package/dist/pregel/stream.cjs +71 -0
- package/dist/pregel/stream.d.ts +17 -0
- package/dist/pregel/stream.js +67 -0
- package/dist/pregel/types.cjs +54 -0
- package/dist/pregel/types.d.ts +78 -6
- package/dist/pregel/types.js +51 -1
- package/dist/pregel/utils/config.cjs +26 -1
- package/dist/pregel/utils/config.d.ts +14 -0
- package/dist/pregel/utils/config.js +22 -0
- package/dist/pregel/write.d.ts +1 -1
- package/dist/utils.cjs +15 -1
- package/dist/utils.d.ts +3 -1
- package/dist/utils.js +12 -0
- package/dist/web.cjs +7 -5
- package/dist/web.d.ts +4 -4
- package/dist/web.js +3 -3
- package/package.json +8 -8
package/README.md
CHANGED
|
@@ -7,54 +7,174 @@
|
|
|
7
7
|
|
|
8
8
|
⚡ Building language agents as graphs ⚡
|
|
9
9
|
|
|
10
|
+
> [!NOTE]
|
|
11
|
+
> Looking for the Python version? See the [Python repo](https://github.com/langchain-ai/langgraph) and the [Python docs](https://langchain-ai.github.io/langgraph/).
|
|
12
|
+
|
|
10
13
|
## Overview
|
|
11
14
|
|
|
12
|
-
[LangGraph
|
|
15
|
+
[LangGraph](https://langchain-ai.github.io/langgraphjs/) is a library for building
|
|
16
|
+
stateful, multi-actor applications with LLMs, used to create agent and multi-agent
|
|
17
|
+
workflows. Check out an introductory tutorial [here](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/).
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
LangGraph is inspired by [Pregel](https://research.google/pubs/pub37252/) and [Apache Beam](https://beam.apache.org/). The public interface draws inspiration from [NetworkX](https://networkx.org/documentation/latest/). LangGraph is built by LangChain Inc, the creators of LangChain, but can be used without LangChain.
|
|
21
|
+
|
|
22
|
+
### Why use LangGraph?
|
|
23
|
+
|
|
24
|
+
LangGraph provides fine-grained control over both the flow and state of your
|
|
25
|
+
agent applications. It implements a central
|
|
26
|
+
[persistence layer](https://langchain-ai.github.io/langgraphjs/concepts/persistence/),
|
|
27
|
+
enabling features that are common to most agent architectures:
|
|
28
|
+
|
|
29
|
+
- **Memory**: LangGraph persists arbitrary aspects of your application's state,
|
|
30
|
+
supporting memory of conversations and other updates within and across user
|
|
31
|
+
interactions;
|
|
32
|
+
- **Human-in-the-loop**: Because state is checkpointed, execution can be interrupted
|
|
33
|
+
and resumed, allowing for decisions, validation, and corrections at key stages via
|
|
34
|
+
human input.
|
|
35
|
+
|
|
36
|
+
Standardizing these components allows individuals and teams to focus on the behavior
|
|
37
|
+
of their agent, instead of its supporting infrastructure.
|
|
38
|
+
|
|
39
|
+
Through [LangGraph Platform](#langgraph-platform), LangGraph also provides tooling for
|
|
40
|
+
the development, deployment, debugging, and monitoring of your applications.
|
|
41
|
+
|
|
42
|
+
LangGraph integrates seamlessly with
|
|
43
|
+
[LangChain](https://js.langchain.com/docs/introduction/) and
|
|
44
|
+
[LangSmith](https://docs.smith.langchain.com/) (but does not require them).
|
|
45
|
+
|
|
46
|
+
To learn more about LangGraph, check out our first LangChain Academy
|
|
47
|
+
course, *Introduction to LangGraph*, available for free
|
|
48
|
+
[here](https://academy.langchain.com/courses/intro-to-langgraph).
|
|
49
|
+
|
|
50
|
+
### LangGraph Platform
|
|
51
|
+
|
|
52
|
+
[LangGraph Platform](https://langchain-ai.github.io/langgraphjs/concepts/langgraph_platform) is infrastructure for deploying LangGraph agents. It is a commercial solution for deploying agentic applications to production, built on the open-source LangGraph framework. The LangGraph Platform consists of several components that work together to support the development, deployment, debugging, and monitoring of LangGraph applications: [LangGraph Server](https://langchain-ai.github.io/langgraphjs/concepts/langgraph_server) (APIs), [LangGraph SDKs](https://langchain-ai.github.io/langgraphjs/concepts/sdk) (clients for the APIs), [LangGraph CLI](https://langchain-ai.github.io/langgraphjs/concepts/langgraph_cli) (command line tool for building the server), and [LangGraph Studio](https://langchain-ai.github.io/langgraphjs/concepts/langgraph_studio) (UI/debugger).
|
|
13
53
|
|
|
14
|
-
|
|
54
|
+
See deployment options [here](https://langchain-ai.github.io/langgraphjs/concepts/deployment_options/)
|
|
55
|
+
(includes a free tier).
|
|
15
56
|
|
|
16
|
-
|
|
57
|
+
Here are some common issues that arise in complex deployments, which LangGraph Platform addresses:
|
|
17
58
|
|
|
18
|
-
- **
|
|
19
|
-
- **
|
|
20
|
-
- **
|
|
21
|
-
- **
|
|
22
|
-
- **
|
|
59
|
+
- **Streaming support**: LangGraph Server provides [multiple streaming modes](https://langchain-ai.github.io/langgraphjs/concepts/streaming) optimized for various application needs
|
|
60
|
+
- **Background runs**: Runs agents asynchronously in the background
|
|
61
|
+
- **Support for long running agents**: Infrastructure that can handle long running processes
|
|
62
|
+
- **[Double texting](https://langchain-ai.github.io/langgraphjs/concepts/double_texting)**: Handle the case where you get two messages from the user before the agent can respond
|
|
63
|
+
- **Handle burstiness**: Task queue for ensuring requests are handled consistently without loss, even under heavy loads
|
|
23
64
|
|
|
24
65
|
## Installation
|
|
25
66
|
|
|
26
|
-
```
|
|
67
|
+
```shell
|
|
27
68
|
npm install @langchain/langgraph @langchain/core
|
|
28
69
|
```
|
|
29
70
|
|
|
30
71
|
## Example
|
|
31
72
|
|
|
32
|
-
|
|
73
|
+
Let's build a tool-calling [ReAct-style](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#react-implementation) agent that uses a search tool!
|
|
33
74
|
|
|
34
|
-
|
|
75
|
+
```shell
|
|
76
|
+
npm install @langchain/anthropic zod
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
```shell
|
|
80
|
+
export ANTHROPIC_API_KEY=sk-...
|
|
81
|
+
```
|
|
35
82
|
|
|
36
|
-
|
|
83
|
+
Optionally, we can set up [LangSmith](https://docs.smith.langchain.com/) for best-in-class observability.
|
|
37
84
|
|
|
38
|
-
```
|
|
39
|
-
|
|
85
|
+
```shell
|
|
86
|
+
export LANGSMITH_TRACING=true
|
|
87
|
+
export LANGSMITH_API_KEY=lsv2_sk_...
|
|
40
88
|
```
|
|
41
89
|
|
|
42
|
-
|
|
90
|
+
The simplest way to create a tool-calling agent in LangGraph is to use [`createReactAgent`](https://langchain-ai.github.io/langgraphjs/reference/functions/langgraph_prebuilt.createReactAgent.html):
|
|
43
91
|
|
|
44
|
-
|
|
45
|
-
|
|
92
|
+
<details open>
|
|
93
|
+
<summary>High-level implementation</summary>
|
|
94
|
+
|
|
95
|
+
```ts
|
|
96
|
+
import { createReactAgent } from "@langchain/langgraph/prebuilt";
|
|
97
|
+
import { MemorySaver } from "@langchain/langgraph";
|
|
98
|
+
import { ChatAnthropic } from "@langchain/anthropic";
|
|
99
|
+
import { tool } from "@langchain/core/tools";
|
|
100
|
+
|
|
101
|
+
import { z } from "zod";
|
|
102
|
+
|
|
103
|
+
// Define the tools for the agent to use
|
|
104
|
+
const search = tool(async ({ query }) => {
|
|
105
|
+
// This is a placeholder, but don't tell the LLM that...
|
|
106
|
+
if (query.toLowerCase().includes("sf") || query.toLowerCase().includes("san francisco")) {
|
|
107
|
+
return "It's 60 degrees and foggy."
|
|
108
|
+
}
|
|
109
|
+
return "It's 90 degrees and sunny."
|
|
110
|
+
}, {
|
|
111
|
+
name: "search",
|
|
112
|
+
description: "Call to surf the web.",
|
|
113
|
+
schema: z.object({
|
|
114
|
+
query: z.string().describe("The query to use in your search."),
|
|
115
|
+
}),
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
const tools = [search];
|
|
119
|
+
const model = new ChatAnthropic({
|
|
120
|
+
model: "claude-3-5-sonnet-latest"
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
// Initialize memory to persist state between graph runs
|
|
124
|
+
const checkpointer = new MemorySaver();
|
|
125
|
+
|
|
126
|
+
const app = createReactAgent({
|
|
127
|
+
llm: model,
|
|
128
|
+
tools,
|
|
129
|
+
checkpointSaver: checkpointer,
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
// Use the agent
|
|
133
|
+
const result = await app.invoke(
|
|
134
|
+
{
|
|
135
|
+
messages: [{
|
|
136
|
+
role: "user",
|
|
137
|
+
content: "what is the weather in sf"
|
|
138
|
+
}]
|
|
139
|
+
},
|
|
140
|
+
{ configurable: { thread_id: 42 } }
|
|
141
|
+
);
|
|
142
|
+
console.log(result.messages.at(-1)?.content);
|
|
143
|
+
```
|
|
144
|
+
```
|
|
145
|
+
"Based on the search results, it's currently 60 degrees Fahrenheit and foggy in San Francisco, which is quite typical weather for the city."
|
|
46
146
|
```
|
|
47
147
|
|
|
48
|
-
|
|
148
|
+
Now when we pass the same <code>"thread_id"</code>, the conversation context is retained via the saved state (i.e. stored list of messages)
|
|
149
|
+
|
|
150
|
+
```ts
|
|
151
|
+
const followup = await app.invoke(
|
|
152
|
+
{
|
|
153
|
+
messages: [{
|
|
154
|
+
role: "user",
|
|
155
|
+
content: "what about ny"
|
|
156
|
+
}]
|
|
157
|
+
},
|
|
158
|
+
{ configurable: { thread_id: 42 } }
|
|
159
|
+
);
|
|
160
|
+
|
|
161
|
+
console.log(followup.messages.at(-1)?.content);
|
|
162
|
+
```
|
|
49
163
|
|
|
50
|
-
```bash
|
|
51
|
-
export LANGCHAIN_TRACING_V2=true
|
|
52
|
-
export LANGCHAIN_API_KEY=ls__...
|
|
53
164
|
```
|
|
165
|
+
"According to the search results, it's currently 90 degrees Fahrenheit and sunny in New York City. That's quite a warm day for New York!"
|
|
166
|
+
```
|
|
167
|
+
</details>
|
|
168
|
+
|
|
169
|
+
> [!TIP]
|
|
170
|
+
> LangGraph is a **low-level** framework that allows you to implement any custom agent
|
|
171
|
+
architectures. Click on the low-level implementation below to see how to implement a
|
|
172
|
+
tool-calling agent from scratch.
|
|
54
173
|
|
|
55
|
-
|
|
174
|
+
<details>
|
|
175
|
+
<summary>Low-level implementation</summary>
|
|
56
176
|
|
|
57
|
-
```
|
|
177
|
+
```ts
|
|
58
178
|
import { AIMessage, BaseMessage, HumanMessage } from "@langchain/core/messages";
|
|
59
179
|
import { tool } from "@langchain/core/tools";
|
|
60
180
|
import { z } from "zod";
|
|
@@ -145,139 +265,102 @@ const finalState = await app.invoke(
|
|
|
145
265
|
console.log(finalState.messages[finalState.messages.length - 1].content);
|
|
146
266
|
```
|
|
147
267
|
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
4. The graph cycles through the following steps until there are no more `tool_calls` on the `AIMessage`:
|
|
236
|
-
|
|
237
|
-
- If `AIMessage` has `tool_calls`, the `"tools"` node executes.
|
|
238
|
-
- The `"agent"` node executes again and returns an `AIMessage`.
|
|
239
|
-
|
|
240
|
-
5. Execution progresses to the special `__end__` value and outputs the final state.
|
|
241
|
-
As a result, we get a list of all our chat messages as output.
|
|
242
|
-
</details>
|
|
268
|
+
<b>Step-by-step Breakdown</b>:
|
|
269
|
+
|
|
270
|
+
<details>
|
|
271
|
+
<summary>Initialize the model and tools.</summary>
|
|
272
|
+
<ul>
|
|
273
|
+
<li>
|
|
274
|
+
We use <code>ChatAnthropic</code> as our LLM. <strong>NOTE:</strong> we need to make sure the model knows that it has these tools available to call. We can do this by converting the LangChain tools into the format for OpenAI tool calling using the <code>.bindTools()</code> method.
|
|
275
|
+
</li>
|
|
276
|
+
<li>
|
|
277
|
+
We define the tools we want to use - a search tool in our case. It is really easy to create your own tools - see documentation here on how to do that <a href="https://js.langchain.com/docs/how_to/custom_tools">here</a>.
|
|
278
|
+
</li>
|
|
279
|
+
</ul>
|
|
280
|
+
</details>
|
|
281
|
+
|
|
282
|
+
<details>
|
|
283
|
+
<summary>Initialize graph with state.</summary>
|
|
284
|
+
|
|
285
|
+
<ul>
|
|
286
|
+
<li>We initialize the graph (<code>StateGraph</code>) by passing state schema with a reducer that defines how the state should be updated. In our case, we want to append new messages to the list and overwrite messages with the same ID, so we use the prebuilt <code>messagesStateReducer</code>.</li>
|
|
287
|
+
</ul>
|
|
288
|
+
</details>
|
|
289
|
+
|
|
290
|
+
<details>
|
|
291
|
+
<summary>Define graph nodes.</summary>
|
|
292
|
+
|
|
293
|
+
There are two main nodes we need:
|
|
294
|
+
|
|
295
|
+
<ul>
|
|
296
|
+
<li>The <code>agent</code> node: responsible for deciding what (if any) actions to take.</li>
|
|
297
|
+
<li>The <code>tools</code> node that invokes tools: if the agent decides to take an action, this node will then execute that action.</li>
|
|
298
|
+
</ul>
|
|
299
|
+
</details>
|
|
300
|
+
|
|
301
|
+
<details>
|
|
302
|
+
<summary>Define entry point and graph edges.</summary>
|
|
303
|
+
|
|
304
|
+
First, we need to set the entry point for graph execution - <code>agent</code> node.
|
|
305
|
+
|
|
306
|
+
Then we define one normal and one conditional edge. Conditional edge means that the destination depends on the contents of the graph's state. In our case, the destination is not known until the agent (LLM) decides.
|
|
307
|
+
|
|
308
|
+
<ul>
|
|
309
|
+
<li>Conditional edge: after the agent is called, we should either:
|
|
310
|
+
<ul>
|
|
311
|
+
<li>a. Run tools if the agent said to take an action, OR</li>
|
|
312
|
+
<li>b. Finish (respond to the user) if the agent did not ask to run tools</li>
|
|
313
|
+
</ul>
|
|
314
|
+
</li>
|
|
315
|
+
<li>Normal edge: after the tools are invoked, the graph should always return to the agent to decide what to do next</li>
|
|
316
|
+
</ul>
|
|
317
|
+
</details>
|
|
318
|
+
|
|
319
|
+
<details>
|
|
320
|
+
<summary>Compile the graph.</summary>
|
|
321
|
+
|
|
322
|
+
<ul>
|
|
323
|
+
<li>
|
|
324
|
+
When we compile the graph, we turn it into a LangChain
|
|
325
|
+
<a href="https://js.langchain.com/docs/concepts/runnables">Runnable</a>,
|
|
326
|
+
which automatically enables calling <code>.invoke()</code>, <code>.stream()</code> and <code>.batch()</code>
|
|
327
|
+
with your inputs
|
|
328
|
+
</li>
|
|
329
|
+
<li>
|
|
330
|
+
We can also optionally pass checkpointer object for persisting state between graph runs, and enabling memory,
|
|
331
|
+
human-in-the-loop workflows, time travel and more. In our case we use <code>MemorySaver</code> -
|
|
332
|
+
a simple in-memory checkpointer
|
|
333
|
+
</li>
|
|
334
|
+
</ul>
|
|
335
|
+
</details>
|
|
336
|
+
|
|
337
|
+
<details>
|
|
338
|
+
<summary>Execute the graph.</summary>
|
|
339
|
+
|
|
340
|
+
<ol>
|
|
341
|
+
<li>LangGraph adds the input message to the internal state, then passes the state to the entrypoint node, <code>"agent"</code>.</li>
|
|
342
|
+
<li>The <code>"agent"</code> node executes, invoking the chat model.</li>
|
|
343
|
+
<li>The chat model returns an <code>AIMessage</code>. LangGraph adds this to the state.</li>
|
|
344
|
+
<li>Graph cycles the following steps until there are no more <code>tool_calls</code> on <code>AIMessage</code>:
|
|
345
|
+
<ul>
|
|
346
|
+
<li>If <code>AIMessage</code> has <code>tool_calls</code>, <code>"tools"</code> node executes</li>
|
|
347
|
+
<li>The <code>"agent"</code> node executes again and returns <code>AIMessage</code></li>
|
|
348
|
+
</ul>
|
|
349
|
+
</li>
|
|
350
|
+
<li>Execution progresses to the special <code>END</code> value and outputs the final state. And as a result, we get a list of all our chat messages as output.</li>
|
|
351
|
+
</ol>
|
|
352
|
+
</details>
|
|
353
|
+
|
|
354
|
+
</details>
|
|
243
355
|
|
|
244
356
|
## Documentation
|
|
245
357
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
## Running Example Jupyter Notebooks
|
|
358
|
+
* [Tutorials](https://langchain-ai.github.io/langgraphjs/tutorials/): Learn to build with LangGraph through guided examples.
|
|
359
|
+
* [How-to Guides](https://langchain-ai.github.io/langgraphjs/how-tos/): Accomplish specific things within LangGraph, from streaming, to adding memory & persistence, to common design patterns (branching, subgraphs, etc.), these are the place to go if you want to copy and run a specific code snippet.
|
|
360
|
+
* [Conceptual Guides](https://langchain-ai.github.io/langgraphjs/concepts/high_level/): In-depth explanations of the key concepts and principles behind LangGraph, such as nodes, edges, state and more.
|
|
361
|
+
* [API Reference](https://langchain-ai.github.io/langgraphjs/reference/): Review important classes and methods, simple examples of how to use the graph and checkpointing APIs, higher-level prebuilt components and more.
|
|
362
|
+
* [LangGraph Platform](https://langchain-ai.github.io/langgraphjs/concepts/#langgraph-platform): LangGraph Platform is a commercial solution for deploying agentic applications in production, built on the open-source LangGraph framework.
|
|
252
363
|
|
|
253
|
-
|
|
364
|
+
## Contributing
|
|
254
365
|
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
```
|
|
258
|
-
{
|
|
259
|
-
"compilerOptions": {
|
|
260
|
-
"esModuleInterop": true,
|
|
261
|
-
"moduleResolution": "node",
|
|
262
|
-
"target": "ES2020",
|
|
263
|
-
"module": "ES2020",
|
|
264
|
-
"lib": [
|
|
265
|
-
"ES2020"
|
|
266
|
-
],
|
|
267
|
-
"strict": true,
|
|
268
|
-
"baseUrl": ".",
|
|
269
|
-
"paths": {
|
|
270
|
-
"@langchain/langgraph": [
|
|
271
|
-
"../langgraph/src"
|
|
272
|
-
]
|
|
273
|
-
}
|
|
274
|
-
},
|
|
275
|
-
"include": [
|
|
276
|
-
"./**/*.ts",
|
|
277
|
-
"./**/*.tsx"
|
|
278
|
-
],
|
|
279
|
-
"exclude": [
|
|
280
|
-
"node_modules"
|
|
281
|
-
]
|
|
282
|
-
}
|
|
283
|
-
```
|
|
366
|
+
For more information on how to contribute, see [here](https://github.com/langchain-ai/langgraphjs/blob/main/CONTRIBUTING.md).
|
|
@@ -20,42 +20,42 @@ class AnyValue extends base_js_1.BaseChannel {
|
|
|
20
20
|
writable: true,
|
|
21
21
|
value: "AnyValue"
|
|
22
22
|
});
|
|
23
|
+
// value is an array so we don't misinterpret an update to undefined as no write
|
|
23
24
|
Object.defineProperty(this, "value", {
|
|
24
25
|
enumerable: true,
|
|
25
26
|
configurable: true,
|
|
26
27
|
writable: true,
|
|
27
|
-
value:
|
|
28
|
+
value: []
|
|
28
29
|
});
|
|
29
|
-
this.value = undefined;
|
|
30
30
|
}
|
|
31
31
|
fromCheckpoint(checkpoint) {
|
|
32
32
|
const empty = new AnyValue();
|
|
33
33
|
if (checkpoint) {
|
|
34
|
-
empty.value = checkpoint;
|
|
34
|
+
empty.value = [checkpoint];
|
|
35
35
|
}
|
|
36
36
|
return empty;
|
|
37
37
|
}
|
|
38
38
|
update(values) {
|
|
39
39
|
if (values.length === 0) {
|
|
40
|
-
const updated = this.value
|
|
41
|
-
this.value =
|
|
40
|
+
const updated = this.value.length > 0;
|
|
41
|
+
this.value = [];
|
|
42
42
|
return updated;
|
|
43
43
|
}
|
|
44
44
|
// eslint-disable-next-line prefer-destructuring
|
|
45
|
-
this.value = values[values.length - 1];
|
|
45
|
+
this.value = [values[values.length - 1]];
|
|
46
46
|
return false;
|
|
47
47
|
}
|
|
48
48
|
get() {
|
|
49
|
-
if (this.value ===
|
|
49
|
+
if (this.value.length === 0) {
|
|
50
50
|
throw new errors_js_1.EmptyChannelError();
|
|
51
51
|
}
|
|
52
|
-
return this.value;
|
|
52
|
+
return this.value[0];
|
|
53
53
|
}
|
|
54
54
|
checkpoint() {
|
|
55
|
-
if (this.value ===
|
|
55
|
+
if (this.value.length === 0) {
|
|
56
56
|
throw new errors_js_1.EmptyChannelError();
|
|
57
57
|
}
|
|
58
|
-
return this.value;
|
|
58
|
+
return this.value[0];
|
|
59
59
|
}
|
|
60
60
|
}
|
|
61
61
|
exports.AnyValue = AnyValue;
|
|
@@ -9,7 +9,7 @@ import { BaseChannel } from "./base.js";
|
|
|
9
9
|
*/
|
|
10
10
|
export declare class AnyValue<Value> extends BaseChannel<Value, Value, Value> {
|
|
11
11
|
lc_graph_name: string;
|
|
12
|
-
value: Value |
|
|
12
|
+
value: [Value] | [];
|
|
13
13
|
constructor();
|
|
14
14
|
fromCheckpoint(checkpoint?: Value): this;
|
|
15
15
|
update(values: Value[]): boolean;
|
|
@@ -17,41 +17,41 @@ export class AnyValue extends BaseChannel {
|
|
|
17
17
|
writable: true,
|
|
18
18
|
value: "AnyValue"
|
|
19
19
|
});
|
|
20
|
+
// value is an array so we don't misinterpret an update to undefined as no write
|
|
20
21
|
Object.defineProperty(this, "value", {
|
|
21
22
|
enumerable: true,
|
|
22
23
|
configurable: true,
|
|
23
24
|
writable: true,
|
|
24
|
-
value:
|
|
25
|
+
value: []
|
|
25
26
|
});
|
|
26
|
-
this.value = undefined;
|
|
27
27
|
}
|
|
28
28
|
fromCheckpoint(checkpoint) {
|
|
29
29
|
const empty = new AnyValue();
|
|
30
30
|
if (checkpoint) {
|
|
31
|
-
empty.value = checkpoint;
|
|
31
|
+
empty.value = [checkpoint];
|
|
32
32
|
}
|
|
33
33
|
return empty;
|
|
34
34
|
}
|
|
35
35
|
update(values) {
|
|
36
36
|
if (values.length === 0) {
|
|
37
|
-
const updated = this.value
|
|
38
|
-
this.value =
|
|
37
|
+
const updated = this.value.length > 0;
|
|
38
|
+
this.value = [];
|
|
39
39
|
return updated;
|
|
40
40
|
}
|
|
41
41
|
// eslint-disable-next-line prefer-destructuring
|
|
42
|
-
this.value = values[values.length - 1];
|
|
42
|
+
this.value = [values[values.length - 1]];
|
|
43
43
|
return false;
|
|
44
44
|
}
|
|
45
45
|
get() {
|
|
46
|
-
if (this.value ===
|
|
46
|
+
if (this.value.length === 0) {
|
|
47
47
|
throw new EmptyChannelError();
|
|
48
48
|
}
|
|
49
|
-
return this.value;
|
|
49
|
+
return this.value[0];
|
|
50
50
|
}
|
|
51
51
|
checkpoint() {
|
|
52
|
-
if (this.value ===
|
|
52
|
+
if (this.value.length === 0) {
|
|
53
53
|
throw new EmptyChannelError();
|
|
54
54
|
}
|
|
55
|
-
return this.value;
|
|
55
|
+
return this.value[0];
|
|
56
56
|
}
|
|
57
57
|
}
|
|
@@ -22,46 +22,47 @@ class EphemeralValue extends index_js_1.BaseChannel {
|
|
|
22
22
|
writable: true,
|
|
23
23
|
value: void 0
|
|
24
24
|
});
|
|
25
|
+
// value is an array so we don't misinterpret an update to undefined as no write
|
|
25
26
|
Object.defineProperty(this, "value", {
|
|
26
27
|
enumerable: true,
|
|
27
28
|
configurable: true,
|
|
28
29
|
writable: true,
|
|
29
|
-
value:
|
|
30
|
+
value: []
|
|
30
31
|
});
|
|
31
32
|
this.guard = guard;
|
|
32
33
|
}
|
|
33
34
|
fromCheckpoint(checkpoint) {
|
|
34
35
|
const empty = new EphemeralValue(this.guard);
|
|
35
36
|
if (checkpoint) {
|
|
36
|
-
empty.value = checkpoint;
|
|
37
|
+
empty.value = [checkpoint];
|
|
37
38
|
}
|
|
38
39
|
return empty;
|
|
39
40
|
}
|
|
40
41
|
update(values) {
|
|
41
42
|
if (values.length === 0) {
|
|
42
|
-
const updated = this.value
|
|
43
|
+
const updated = this.value.length > 0;
|
|
43
44
|
// If there are no updates for this specific channel at the end of the step, wipe it.
|
|
44
|
-
this.value =
|
|
45
|
+
this.value = [];
|
|
45
46
|
return updated;
|
|
46
47
|
}
|
|
47
48
|
if (values.length !== 1 && this.guard) {
|
|
48
49
|
throw new errors_js_1.InvalidUpdateError("EphemeralValue can only receive one value per step.");
|
|
49
50
|
}
|
|
50
51
|
// eslint-disable-next-line prefer-destructuring
|
|
51
|
-
this.value = values[values.length - 1];
|
|
52
|
+
this.value = [values[values.length - 1]];
|
|
52
53
|
return true;
|
|
53
54
|
}
|
|
54
55
|
get() {
|
|
55
|
-
if (this.value ===
|
|
56
|
+
if (this.value.length === 0) {
|
|
56
57
|
throw new errors_js_1.EmptyChannelError();
|
|
57
58
|
}
|
|
58
|
-
return this.value;
|
|
59
|
+
return this.value[0];
|
|
59
60
|
}
|
|
60
61
|
checkpoint() {
|
|
61
|
-
if (this.value ===
|
|
62
|
+
if (this.value.length === 0) {
|
|
62
63
|
throw new errors_js_1.EmptyChannelError();
|
|
63
64
|
}
|
|
64
|
-
return this.value;
|
|
65
|
+
return this.value[0];
|
|
65
66
|
}
|
|
66
67
|
}
|
|
67
68
|
exports.EphemeralValue = EphemeralValue;
|
|
@@ -6,7 +6,7 @@ import { BaseChannel } from "./index.js";
|
|
|
6
6
|
export declare class EphemeralValue<Value> extends BaseChannel<Value, Value, Value> {
|
|
7
7
|
lc_graph_name: string;
|
|
8
8
|
guard: boolean;
|
|
9
|
-
value
|
|
9
|
+
value: [Value] | [];
|
|
10
10
|
constructor(guard?: boolean);
|
|
11
11
|
fromCheckpoint(checkpoint?: Value): this;
|
|
12
12
|
update(values: Value[]): boolean;
|