@langchain/langgraph 0.0.30 → 0.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +75 -28
  2. package/dist/channels/base.cjs +14 -0
  3. package/dist/channels/base.d.ts +2 -0
  4. package/dist/channels/base.js +14 -0
  5. package/dist/graph/message.d.ts +1 -1
  6. package/dist/graph/state.cjs +36 -2
  7. package/dist/graph/state.d.ts +23 -9
  8. package/dist/graph/state.js +34 -1
  9. package/dist/index.cjs +2 -1
  10. package/dist/index.js +2 -1
  11. package/dist/prebuilt/agent_executor.d.ts +1 -1
  12. package/dist/pregel/index.cjs +26 -21
  13. package/dist/pregel/index.js +26 -21
  14. package/package.json +9 -16
  15. package/dist/tests/channels.test.d.ts +0 -1
  16. package/dist/tests/channels.test.js +0 -151
  17. package/dist/tests/chatbot.int.test.d.ts +0 -1
  18. package/dist/tests/chatbot.int.test.js +0 -66
  19. package/dist/tests/checkpoints.test.d.ts +0 -1
  20. package/dist/tests/checkpoints.test.js +0 -178
  21. package/dist/tests/diagrams.test.d.ts +0 -1
  22. package/dist/tests/diagrams.test.js +0 -25
  23. package/dist/tests/graph.test.d.ts +0 -1
  24. package/dist/tests/graph.test.js +0 -33
  25. package/dist/tests/prebuilt.int.test.d.ts +0 -1
  26. package/dist/tests/prebuilt.int.test.js +0 -207
  27. package/dist/tests/prebuilt.test.d.ts +0 -1
  28. package/dist/tests/prebuilt.test.js +0 -427
  29. package/dist/tests/pregel.io.test.d.ts +0 -1
  30. package/dist/tests/pregel.io.test.js +0 -332
  31. package/dist/tests/pregel.read.test.d.ts +0 -1
  32. package/dist/tests/pregel.read.test.js +0 -109
  33. package/dist/tests/pregel.test.d.ts +0 -1
  34. package/dist/tests/pregel.test.js +0 -1882
  35. package/dist/tests/pregel.validate.test.d.ts +0 -1
  36. package/dist/tests/pregel.validate.test.js +0 -198
  37. package/dist/tests/pregel.write.test.d.ts +0 -1
  38. package/dist/tests/pregel.write.test.js +0 -44
  39. package/dist/tests/tracing.int.test.d.ts +0 -1
  40. package/dist/tests/tracing.int.test.js +0 -450
  41. package/dist/tests/tracing.test.d.ts +0 -1
  42. package/dist/tests/tracing.test.js +0 -332
  43. package/dist/tests/utils.d.ts +0 -53
  44. package/dist/tests/utils.js +0 -167
@@ -1,207 +0,0 @@
1
- /* eslint-disable no-process-env */
2
- import { it, beforeAll, describe, expect } from "@jest/globals";
3
- import { Tool } from "@langchain/core/tools";
4
- import { ChatOpenAI } from "@langchain/openai";
5
- import { HumanMessage } from "@langchain/core/messages";
6
- import { RunnableLambda } from "@langchain/core/runnables";
7
- import { z } from "zod";
8
- import { createReactAgent, createFunctionCallingExecutor, } from "../prebuilt/index.js";
9
- import { initializeAsyncLocalStorageSingleton } from "../setup/async_local_storage.js";
10
- // Tracing slows down the tests
11
- beforeAll(() => {
12
- // process.env.LANGCHAIN_TRACING_V2 = "false";
13
- // process.env.LANGCHAIN_ENDPOINT = "";
14
- // process.env.LANGCHAIN_API_KEY = "";
15
- // process.env.LANGCHAIN_PROJECT = "";
16
- // Will occur naturally if user imports from main `@langchain/langgraph` endpoint.
17
- initializeAsyncLocalStorageSingleton();
18
- });
19
- describe("createFunctionCallingExecutor", () => {
20
- it("can call a function", async () => {
21
- const weatherResponse = `Not too cold, not too hot 😎`;
22
- const model = new ChatOpenAI();
23
- class SanFranciscoWeatherTool extends Tool {
24
- constructor() {
25
- super();
26
- Object.defineProperty(this, "name", {
27
- enumerable: true,
28
- configurable: true,
29
- writable: true,
30
- value: "current_weather"
31
- });
32
- Object.defineProperty(this, "description", {
33
- enumerable: true,
34
- configurable: true,
35
- writable: true,
36
- value: "Get the current weather report for San Francisco, CA"
37
- });
38
- }
39
- async _call(_) {
40
- return weatherResponse;
41
- }
42
- }
43
- const tools = [new SanFranciscoWeatherTool()];
44
- const functionsAgentExecutor = createFunctionCallingExecutor({
45
- model,
46
- tools,
47
- });
48
- const response = await functionsAgentExecutor.invoke({
49
- messages: [new HumanMessage("What's the weather like in SF?")],
50
- });
51
- // It needs at least one human message, one AI and one function message.
52
- expect(response.messages.length > 3).toBe(true);
53
- const firstFunctionMessage = response.messages.find((message) => message._getType() === "function");
54
- expect(firstFunctionMessage).toBeDefined();
55
- expect(firstFunctionMessage?.content).toBe(weatherResponse);
56
- });
57
- it("can stream a function", async () => {
58
- const weatherResponse = `Not too cold, not too hot 😎`;
59
- const model = new ChatOpenAI({
60
- streaming: true,
61
- });
62
- class SanFranciscoWeatherTool extends Tool {
63
- constructor() {
64
- super();
65
- Object.defineProperty(this, "name", {
66
- enumerable: true,
67
- configurable: true,
68
- writable: true,
69
- value: "current_weather"
70
- });
71
- Object.defineProperty(this, "description", {
72
- enumerable: true,
73
- configurable: true,
74
- writable: true,
75
- value: "Get the current weather report for San Francisco, CA"
76
- });
77
- }
78
- async _call(_) {
79
- return weatherResponse;
80
- }
81
- }
82
- const tools = [new SanFranciscoWeatherTool()];
83
- const functionsAgentExecutor = createFunctionCallingExecutor({
84
- model,
85
- tools,
86
- });
87
- const stream = await functionsAgentExecutor.stream({
88
- messages: [new HumanMessage("What's the weather like in SF?")],
89
- }, { streamMode: "values" });
90
- const fullResponse = [];
91
- for await (const item of stream) {
92
- fullResponse.push(item);
93
- }
94
- // human -> agent -> action -> agent
95
- expect(fullResponse.length).toEqual(4);
96
- const endState = fullResponse[fullResponse.length - 1];
97
- // 1 human, 2 llm calls, 1 function call.
98
- expect(endState.messages.length).toEqual(4);
99
- const functionCall = endState.messages.find((message) => message._getType() === "function");
100
- expect(functionCall.content).toBe(weatherResponse);
101
- });
102
- it("can accept RunnableToolLike tools", async () => {
103
- const weatherResponse = `Not too cold, not too hot 😎`;
104
- const model = new ChatOpenAI();
105
- const sfWeatherTool = RunnableLambda.from(async (_) => weatherResponse);
106
- const tools = [
107
- sfWeatherTool.asTool({
108
- name: "current_weather",
109
- description: "Get the current weather report for San Francisco, CA",
110
- schema: z.object({
111
- location: z.string(),
112
- }),
113
- }),
114
- ];
115
- const functionsAgentExecutor = createFunctionCallingExecutor({
116
- model,
117
- tools,
118
- });
119
- const response = await functionsAgentExecutor.invoke({
120
- messages: [new HumanMessage("What's the weather like in SF?")],
121
- });
122
- // It needs at least one human message, one AI and one function message.
123
- expect(response.messages.length > 3).toBe(true);
124
- const firstFunctionMessage = response.messages.find((message) => message._getType() === "function");
125
- expect(firstFunctionMessage).toBeDefined();
126
- expect(firstFunctionMessage?.content).toBe(weatherResponse);
127
- });
128
- });
129
- describe("createReactAgent", () => {
130
- it("can call a tool", async () => {
131
- const weatherResponse = `Not too cold, not too hot 😎`;
132
- const model = new ChatOpenAI();
133
- class SanFranciscoWeatherTool extends Tool {
134
- constructor() {
135
- super();
136
- Object.defineProperty(this, "name", {
137
- enumerable: true,
138
- configurable: true,
139
- writable: true,
140
- value: "current_weather"
141
- });
142
- Object.defineProperty(this, "description", {
143
- enumerable: true,
144
- configurable: true,
145
- writable: true,
146
- value: "Get the current weather report for San Francisco, CA"
147
- });
148
- }
149
- async _call(_) {
150
- return weatherResponse;
151
- }
152
- }
153
- const tools = [new SanFranciscoWeatherTool()];
154
- const reactAgent = createReactAgent({ llm: model, tools });
155
- const response = await reactAgent.invoke({
156
- messages: [new HumanMessage("What's the weather like in SF?")],
157
- });
158
- // It needs at least one human message and one AI message.
159
- expect(response.messages.length > 1).toBe(true);
160
- const lastMessage = response.messages[response.messages.length - 1];
161
- expect(lastMessage._getType()).toBe("ai");
162
- expect(lastMessage.content.toLowerCase()).toContain("not too cold");
163
- });
164
- it("can stream a tool call", async () => {
165
- const weatherResponse = `Not too cold, not too hot 😎`;
166
- const model = new ChatOpenAI({
167
- streaming: true,
168
- });
169
- class SanFranciscoWeatherTool extends Tool {
170
- constructor() {
171
- super();
172
- Object.defineProperty(this, "name", {
173
- enumerable: true,
174
- configurable: true,
175
- writable: true,
176
- value: "current_weather"
177
- });
178
- Object.defineProperty(this, "description", {
179
- enumerable: true,
180
- configurable: true,
181
- writable: true,
182
- value: "Get the current weather report for San Francisco, CA"
183
- });
184
- }
185
- async _call(_) {
186
- return weatherResponse;
187
- }
188
- }
189
- const tools = [new SanFranciscoWeatherTool()];
190
- const reactAgent = createReactAgent({ llm: model, tools });
191
- const stream = await reactAgent.stream({
192
- messages: [new HumanMessage("What's the weather like in SF?")],
193
- }, { streamMode: "values" });
194
- const fullResponse = [];
195
- for await (const item of stream) {
196
- fullResponse.push(item);
197
- }
198
- // human -> agent -> action -> agent
199
- expect(fullResponse.length).toEqual(4);
200
- const endState = fullResponse[fullResponse.length - 1];
201
- // 1 human, 2 ai, 1 tool.
202
- expect(endState.messages.length).toEqual(4);
203
- const lastMessage = endState.messages[endState.messages.length - 1];
204
- expect(lastMessage._getType()).toBe("ai");
205
- expect(lastMessage.content.toLowerCase()).toContain("not too cold");
206
- });
207
- });
@@ -1 +0,0 @@
1
- export {};
@@ -1,427 +0,0 @@
1
- /* eslint-disable no-process-env */
2
- import { beforeAll, describe, expect, it } from "@jest/globals";
3
- import { PromptTemplate } from "@langchain/core/prompts";
4
- import { StructuredTool, Tool } from "@langchain/core/tools";
5
- import { FakeStreamingLLM } from "@langchain/core/utils/testing";
6
- import { AIMessage, HumanMessage, SystemMessage, ToolMessage, } from "@langchain/core/messages";
7
- import { z } from "zod";
8
- import { RunnableLambda } from "@langchain/core/runnables";
9
- import { FakeToolCallingChatModel } from "./utils.js";
10
- import { ToolNode, createAgentExecutor, createReactAgent, } from "../prebuilt/index.js";
11
- // Tracing slows down the tests
12
- beforeAll(() => {
13
- process.env.LANGCHAIN_TRACING_V2 = "false";
14
- process.env.LANGCHAIN_ENDPOINT = "";
15
- process.env.LANGCHAIN_ENDPOINT = "";
16
- process.env.LANGCHAIN_API_KEY = "";
17
- process.env.LANGCHAIN_PROJECT = "";
18
- });
19
- const searchSchema = z.object({
20
- query: z.string().describe("The query to search for."),
21
- });
22
- class SearchAPI extends StructuredTool {
23
- constructor() {
24
- super(...arguments);
25
- Object.defineProperty(this, "name", {
26
- enumerable: true,
27
- configurable: true,
28
- writable: true,
29
- value: "search_api"
30
- });
31
- Object.defineProperty(this, "description", {
32
- enumerable: true,
33
- configurable: true,
34
- writable: true,
35
- value: "A simple API that returns the input string."
36
- });
37
- Object.defineProperty(this, "schema", {
38
- enumerable: true,
39
- configurable: true,
40
- writable: true,
41
- value: searchSchema
42
- });
43
- }
44
- async _call(input) {
45
- return `result for ${input?.query}`;
46
- }
47
- }
48
- class SearchAPIWithArtifact extends StructuredTool {
49
- constructor() {
50
- super(...arguments);
51
- Object.defineProperty(this, "name", {
52
- enumerable: true,
53
- configurable: true,
54
- writable: true,
55
- value: "search_api"
56
- });
57
- Object.defineProperty(this, "description", {
58
- enumerable: true,
59
- configurable: true,
60
- writable: true,
61
- value: "A simple API that returns the input string."
62
- });
63
- Object.defineProperty(this, "schema", {
64
- enumerable: true,
65
- configurable: true,
66
- writable: true,
67
- value: searchSchema
68
- });
69
- Object.defineProperty(this, "responseFormat", {
70
- enumerable: true,
71
- configurable: true,
72
- writable: true,
73
- value: "content_and_artifact"
74
- });
75
- }
76
- async _call(_) {
77
- return ["some response format", Buffer.from("123")];
78
- }
79
- }
80
- describe("PreBuilt", () => {
81
- class SearchAPI extends Tool {
82
- constructor() {
83
- super();
84
- Object.defineProperty(this, "name", {
85
- enumerable: true,
86
- configurable: true,
87
- writable: true,
88
- value: "search_api"
89
- });
90
- Object.defineProperty(this, "description", {
91
- enumerable: true,
92
- configurable: true,
93
- writable: true,
94
- value: "A simple API that returns the input string."
95
- });
96
- }
97
- async _call(query) {
98
- return `result for ${query}`;
99
- }
100
- }
101
- const tools = [new SearchAPI()];
102
- it("Can invoke createAgentExecutor", async () => {
103
- const prompt = PromptTemplate.fromTemplate("Hello!");
104
- const llm = new FakeStreamingLLM({
105
- responses: [
106
- "tool:search_api:query",
107
- "tool:search_api:another",
108
- "finish:answer",
109
- ],
110
- });
111
- const agentParser = (input) => {
112
- if (input.startsWith("finish")) {
113
- const answer = input.split(":")[1];
114
- return {
115
- returnValues: { answer },
116
- log: input,
117
- };
118
- }
119
- const [, toolName, toolInput] = input.split(":");
120
- return {
121
- tool: toolName,
122
- toolInput,
123
- log: input,
124
- };
125
- };
126
- const agent = prompt.pipe(llm).pipe(agentParser);
127
- const agentExecutor = createAgentExecutor({
128
- agentRunnable: agent,
129
- tools,
130
- });
131
- const result = await agentExecutor.invoke({
132
- input: "what is the weather in sf?",
133
- });
134
- expect(result).toEqual({
135
- input: "what is the weather in sf?",
136
- agentOutcome: {
137
- returnValues: {
138
- answer: "answer",
139
- },
140
- log: "finish:answer",
141
- },
142
- steps: [
143
- {
144
- action: {
145
- tool: "search_api",
146
- toolInput: "query",
147
- log: "tool:search_api:query",
148
- },
149
- observation: "result for query",
150
- },
151
- {
152
- action: {
153
- tool: "search_api",
154
- toolInput: "another",
155
- log: "tool:search_api:another",
156
- },
157
- observation: "result for another",
158
- },
159
- ],
160
- });
161
- });
162
- it("Can stream createAgentExecutor", async () => {
163
- const prompt = PromptTemplate.fromTemplate("Hello!");
164
- const llm = new FakeStreamingLLM({
165
- responses: [
166
- "tool:search_api:query",
167
- "tool:search_api:another",
168
- "finish:answer",
169
- ],
170
- });
171
- const agentParser = (input) => {
172
- if (input.startsWith("finish")) {
173
- const answer = input.split(":")[1];
174
- return {
175
- returnValues: { answer },
176
- log: input,
177
- };
178
- }
179
- const [, toolName, toolInput] = input.split(":");
180
- return {
181
- tool: toolName,
182
- toolInput,
183
- log: input,
184
- };
185
- };
186
- const agent = prompt.pipe(llm).pipe(agentParser);
187
- const agentExecutor = createAgentExecutor({
188
- agentRunnable: agent,
189
- tools,
190
- });
191
- const stream = agentExecutor.stream({
192
- input: "what is the weather in sf?",
193
- });
194
- const fullResponse = [];
195
- for await (const item of await stream) {
196
- fullResponse.push(item);
197
- }
198
- expect(fullResponse.length > 3).toBe(true);
199
- const allAgentMessages = fullResponse.filter((res) => "agent" in res);
200
- expect(allAgentMessages.length >= 3).toBe(true);
201
- expect(fullResponse).toEqual([
202
- {
203
- agent: {
204
- agentOutcome: {
205
- log: "tool:search_api:query",
206
- tool: "search_api",
207
- toolInput: "query",
208
- },
209
- },
210
- },
211
- {
212
- action: {
213
- steps: [
214
- {
215
- action: {
216
- log: "tool:search_api:query",
217
- tool: "search_api",
218
- toolInput: "query",
219
- },
220
- observation: "result for query",
221
- },
222
- ],
223
- },
224
- },
225
- {
226
- agent: {
227
- agentOutcome: {
228
- log: "tool:search_api:another",
229
- tool: "search_api",
230
- toolInput: "another",
231
- },
232
- },
233
- },
234
- {
235
- action: {
236
- steps: [
237
- {
238
- action: {
239
- log: "tool:search_api:another",
240
- tool: "search_api",
241
- toolInput: "another",
242
- },
243
- observation: "result for another",
244
- },
245
- ],
246
- },
247
- },
248
- {
249
- agent: {
250
- agentOutcome: {
251
- log: "finish:answer",
252
- returnValues: {
253
- answer: "answer",
254
- },
255
- },
256
- },
257
- },
258
- ]);
259
- });
260
- });
261
- describe("createReactAgent", () => {
262
- const tools = [new SearchAPI()];
263
- it("Can use string message modifier", async () => {
264
- const llm = new FakeToolCallingChatModel({
265
- responses: [
266
- new AIMessage({
267
- content: "result1",
268
- tool_calls: [
269
- { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
270
- ],
271
- }),
272
- new AIMessage("result2"),
273
- ],
274
- });
275
- const agent = createReactAgent({
276
- llm,
277
- tools,
278
- messageModifier: "You are a helpful assistant",
279
- });
280
- const result = await agent.invoke({
281
- messages: [new HumanMessage("Hello Input!")],
282
- });
283
- expect(result.messages).toEqual([
284
- new HumanMessage("Hello Input!"),
285
- new AIMessage({
286
- content: "result1",
287
- tool_calls: [
288
- { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
289
- ],
290
- }),
291
- new ToolMessage({
292
- name: "search_api",
293
- content: "result for foo",
294
- tool_call_id: "tool_abcd123",
295
- }),
296
- new AIMessage("result2"),
297
- ]);
298
- });
299
- it("Can use SystemMessage message modifier", async () => {
300
- const llm = new FakeToolCallingChatModel({
301
- responses: [
302
- new AIMessage({
303
- content: "result1",
304
- tool_calls: [
305
- { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
306
- ],
307
- }),
308
- new AIMessage("result2"),
309
- ],
310
- });
311
- const agent = createReactAgent({
312
- llm,
313
- tools,
314
- messageModifier: new SystemMessage("You are a helpful assistant"),
315
- });
316
- const result = await agent.invoke({
317
- messages: [],
318
- });
319
- expect(result.messages).toEqual([
320
- new AIMessage({
321
- content: "result1",
322
- tool_calls: [
323
- { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
324
- ],
325
- }),
326
- new ToolMessage({
327
- name: "search_api",
328
- content: "result for foo",
329
- tool_call_id: "tool_abcd123",
330
- }),
331
- new AIMessage("result2"),
332
- ]);
333
- });
334
- it("Works with tools that return content_and_artifact response format", async () => {
335
- const llm = new FakeToolCallingChatModel({
336
- responses: [
337
- new AIMessage({
338
- content: "result1",
339
- tool_calls: [
340
- { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
341
- ],
342
- }),
343
- new AIMessage("result2"),
344
- ],
345
- });
346
- const agent = createReactAgent({
347
- llm,
348
- tools: [new SearchAPIWithArtifact()],
349
- messageModifier: "You are a helpful assistant",
350
- });
351
- const result = await agent.invoke({
352
- messages: [new HumanMessage("Hello Input!")],
353
- });
354
- expect(result.messages).toEqual([
355
- new HumanMessage("Hello Input!"),
356
- new AIMessage({
357
- content: "result1",
358
- tool_calls: [
359
- { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
360
- ],
361
- }),
362
- new ToolMessage({
363
- name: "search_api",
364
- content: "some response format",
365
- tool_call_id: "tool_abcd123",
366
- artifact: Buffer.from("123"),
367
- }),
368
- new AIMessage("result2"),
369
- ]);
370
- });
371
- it("Can accept RunnableToolLike", async () => {
372
- const llm = new FakeToolCallingChatModel({
373
- responses: [
374
- new AIMessage({
375
- content: "result1",
376
- tool_calls: [
377
- { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
378
- ],
379
- }),
380
- new AIMessage("result2"),
381
- ],
382
- });
383
- // Instead of re-implementing the tool, wrap it in a RunnableLambda and
384
- // call `asTool` to create a RunnableToolLike.
385
- const searchApiTool = new SearchAPI();
386
- const runnableToolLikeTool = RunnableLambda.from(async (input, config) => searchApiTool.invoke(input, config)).asTool({
387
- name: searchApiTool.name,
388
- description: searchApiTool.description,
389
- schema: searchApiTool.schema,
390
- });
391
- const agent = createReactAgent({
392
- llm,
393
- tools: [runnableToolLikeTool],
394
- messageModifier: "You are a helpful assistant",
395
- });
396
- const result = await agent.invoke({
397
- messages: [new HumanMessage("Hello Input!")],
398
- });
399
- expect(result.messages).toEqual([
400
- new HumanMessage("Hello Input!"),
401
- new AIMessage({
402
- content: "result1",
403
- tool_calls: [
404
- { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
405
- ],
406
- }),
407
- new ToolMessage({
408
- name: "search_api",
409
- content: "result for foo",
410
- tool_call_id: "tool_abcd123",
411
- }),
412
- new AIMessage("result2"),
413
- ]);
414
- });
415
- });
416
- describe("ToolNode", () => {
417
- it("Should support graceful error handling", async () => {
418
- const toolNode = new ToolNode([new SearchAPI()]);
419
- const res = await toolNode.invoke([
420
- new AIMessage({
421
- content: "",
422
- tool_calls: [{ name: "badtool", args: {}, id: "testid" }],
423
- }),
424
- ]);
425
- expect(res[0].content).toEqual(`Error: Tool "badtool" not found.\n Please fix your mistakes.`);
426
- });
427
- });
@@ -1 +0,0 @@
1
- export {};