@langchain/langgraph 0.0.23 → 0.0.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -46,7 +46,7 @@ And now we're ready! The graph below contains a single node called `"oracle"` th
46
46
  ```ts
47
47
  import { ChatOpenAI } from "@langchain/openai";
48
48
  import { HumanMessage, BaseMessage, } from "@langchain/core/messages";
49
- import { END, MessageGraph } from "@langchain/langgraph";
49
+ import { START, END, MessageGraph } from "@langchain/langgraph";
50
50
 
51
51
  const model = new ChatOpenAI({ temperature: 0 });
52
52
 
@@ -58,7 +58,7 @@ graph.addNode("oracle", async (state: BaseMessage[]) => {
58
58
 
59
59
  graph.addEdge("oracle", END);
60
60
 
61
- graph.setEntryPoint("oracle");
61
+ graph.addEdge(START, "oracle");
62
62
 
63
63
  const runnable = graph.compile();
64
64
  ```
@@ -90,7 +90,7 @@ So what did we do here? Let's break it down step by step:
90
90
  1. First, we initialize our model and a `MessageGraph`.
91
91
  2. Next, we add a single node to the graph, called `"oracle"`, which simply calls the model with the given input.
92
92
  3. We add an edge from this `"oracle"` node to the special value `END`. This means that execution will end after current node.
93
- 4. We set `"oracle"` as the entrypoint to the graph.
93
+ 4. We set `"oracle"` as the entrypoint to the graph by adding an edge from the special `START` value to it.
94
94
  5. We compile the graph, ensuring that no more modifications to it can be made.
95
95
 
96
96
  Then, when we execute the graph:
@@ -185,7 +185,7 @@ graph.addNode("calculator", async (state: BaseMessage[]) => {
185
185
 
186
186
  graph.addEdge("calculator", END);
187
187
 
188
- graph.setEntryPoint("oracle");
188
+ graph.addEdge(START, "oracle");
189
189
  ```
190
190
 
191
191
  Now let's think - what do we want to have happen?
@@ -477,7 +477,7 @@ const callTool = async (
477
477
  We can now put it all together and define the graph!
478
478
 
479
479
  ```typescript
480
- import { StateGraph, END } from "@langchain/langgraph";
480
+ import { StateGraph, START, END } from "@langchain/langgraph";
481
481
  import { RunnableLambda } from "@langchain/core/runnables";
482
482
 
483
483
  // Define a new graph
@@ -491,7 +491,7 @@ workflow.addNode("action", callTool);
491
491
 
492
492
  // Set the entrypoint as `agent`
493
493
  // This means that this node is the first one called
494
- workflow.setEntryPoint("agent");
494
+ workflow.addEdge(START, "agent");
495
495
 
496
496
  // We now add a conditional edge
497
497
  workflow.addConditionalEdges(
@@ -723,31 +723,14 @@ This takes three arguments:
723
723
  - `condition`: A function to call to decide what to do next. The input will be the output of the start node. It should return a string that is present in `conditionalEdgeMapping` and represents the edge to take.
724
724
  - `conditionalEdgeMapping`: A mapping of string to string. The keys should be strings that may be returned by `condition`. The values should be the downstream node to call if that condition is returned.
725
725
 
726
- ### `.setEntryPoint`
726
+ ### `START`
727
727
 
728
728
  ```typescript
729
- setEntryPoint(key: string): void
729
+ import { START } from "@langchain/langgraph";
730
730
  ```
731
731
 
732
- The entrypoint to the graph.
733
- This is the node that is first called.
734
- It only takes one argument:
735
-
736
- - `key`: The name of the node that should be called first.
737
-
738
- ### `.setFinishPoint`
739
-
740
- ```typescript
741
- setFinishPoint(key: string): void
742
- ```
743
-
744
- This is the exit point of the graph.
745
- When this node is called, the results will be the final result from the graph.
746
- It only has one argument:
747
-
748
- - `key`: The name of the node that, when called, will return the results of calling it as the final output
749
-
750
- Note: This does not need to be called if at any point you previously created an edge (conditional or normal) to `END`
732
+ This is a special node representing the start of the graph.
733
+ This means that anything with an edge from this node will be the entrypoint of the graph.
751
734
 
752
735
  ### `END`
753
736
 
@@ -827,7 +810,7 @@ workflow.addNode("agent", agent);
827
810
  workflow.addNode("tools", executeTools);
828
811
 
829
812
  // We now set the entry point to be this first agent
830
- workflow.setEntryPoint("firstAgent");
813
+ workflow.addEdge(START, "firstAgent");
831
814
 
832
815
  // We define the same edges as before
833
816
  workflow.addConditionalEdges("agent", shouldContinue, {
@@ -15,7 +15,7 @@ describe("Chatbot", () => {
15
15
  const graph = new MessageGraph()
16
16
  .addNode("oracle", async (state) => model.invoke(state))
17
17
  .addEdge("oracle", END)
18
- .setEntryPoint("oracle")
18
+ .addEdge(START, "oracle")
19
19
  .compile();
20
20
  const res = await graph.invoke(new HumanMessage("What is 1 + 1?"));
21
21
  console.log(res);
@@ -15,7 +15,7 @@ import { z } from "zod";
15
15
  import { ToolExecutor } from "../prebuilt/tool_executor.js";
16
16
  import { createAgentExecutor } from "../prebuilt/agent_executor.js";
17
17
  // Import from main `@langchain/langgraph` endpoint to turn on automatic config passing
18
- import { StateGraph, END } from "../index.js";
18
+ import { StateGraph, END, START } from "../index.js";
19
19
  test.skip("Can invoke with tracing", async () => {
20
20
  const tools = [new TavilySearchResults({ maxResults: 1 })];
21
21
  // Get the prompt to use - you can modify this!
@@ -73,7 +73,7 @@ test.skip("Can invoke with tracing", async () => {
73
73
  .addNode("action", new RunnableLambda({ func: executeTools }))
74
74
  // Set the entrypoint as `agent`
75
75
  // This means that this node is the first one called
76
- .setEntryPoint("agent")
76
+ .addEdge(START, "agent")
77
77
  // We now add a conditional edge
78
78
  .addConditionalEdges(
79
79
  // First, we define the start node. We use `agent`.
@@ -188,7 +188,7 @@ test.skip("Can nest an agent executor", async () => {
188
188
  // Or end work if done
189
189
  FINISH: END,
190
190
  })
191
- .setEntryPoint("supervisor");
191
+ .addEdge(START, "supervisor");
192
192
  const graph = workflow.compile();
193
193
  const streamResults = graph.stream({
194
194
  messages: [
@@ -287,7 +287,7 @@ test.skip("Can nest a graph within a graph", async () => {
287
287
  researcher: "researcher",
288
288
  FINISH: END,
289
289
  })
290
- .setEntryPoint("supervisor");
290
+ .addEdge(START, "supervisor");
291
291
  const graph = workflow.compile();
292
292
  const streamResults = graph.stream({
293
293
  messages: [
@@ -425,7 +425,7 @@ Only add steps to the plan that still NEED to be done. Do not return previously
425
425
  .addNode("agent", executeStep)
426
426
  // Add a replan node
427
427
  .addNode("replan", replanStep)
428
- .setEntryPoint("planner")
428
+ .addEdge(START, "planner")
429
429
  // From plan we go to agent
430
430
  .addEdge("planner", "agent")
431
431
  // From agent, we replan
@@ -17,7 +17,13 @@ it("should pass config through if importing from the primary entrypoint", async
17
17
  return { messages: [res] };
18
18
  })
19
19
  .addEdge(START, "testnode")
20
- .addEdge("testnode", END)
20
+ .addConditionalEdges("testnode", async (_state) => {
21
+ const model = new FakeToolCallingChatModel({
22
+ responses: [new AIMessage("hey!")],
23
+ }).withConfig({ runName: "conditional_edge_call" });
24
+ await model.invoke("testing but should be traced");
25
+ return END;
26
+ })
21
27
  .compile();
22
28
  const eventStream = graph.streamEvents({ messages: [] }, { version: "v2" });
23
29
  const events = [];
@@ -157,6 +163,64 @@ it("should pass config through if importing from the primary entrypoint", async
157
163
  tags: ["seq:step:2", "langsmith:hidden"],
158
164
  metadata: {},
159
165
  },
166
+ {
167
+ event: "on_chain_start",
168
+ data: {
169
+ input: {
170
+ input: undefined,
171
+ },
172
+ },
173
+ name: "func",
174
+ tags: ["seq:step:3"],
175
+ run_id: expect.any(String),
176
+ metadata: {},
177
+ },
178
+ {
179
+ event: "on_chat_model_start",
180
+ data: {
181
+ input: {
182
+ messages: [[new HumanMessage("testing but should be traced")]],
183
+ },
184
+ },
185
+ name: "conditional_edge_call",
186
+ tags: [],
187
+ run_id: expect.any(String),
188
+ metadata: {
189
+ ls_model_type: "chat",
190
+ ls_stop: undefined,
191
+ },
192
+ },
193
+ {
194
+ event: "on_chat_model_end",
195
+ data: {
196
+ output: new AIMessage("hey!"),
197
+ input: {
198
+ messages: [[new HumanMessage("testing but should be traced")]],
199
+ },
200
+ },
201
+ run_id: expect.any(String),
202
+ name: "conditional_edge_call",
203
+ tags: [],
204
+ metadata: {
205
+ ls_model_type: "chat",
206
+ ls_stop: undefined,
207
+ },
208
+ },
209
+ {
210
+ event: "on_chain_end",
211
+ data: {
212
+ output: {
213
+ output: undefined,
214
+ },
215
+ input: {
216
+ input: undefined,
217
+ },
218
+ },
219
+ run_id: expect.any(String),
220
+ name: "func",
221
+ tags: ["seq:step:3"],
222
+ metadata: {},
223
+ },
160
224
  {
161
225
  event: "on_chain_end",
162
226
  data: {
package/dist/utils.cjs CHANGED
@@ -2,6 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.RunnableCallable = void 0;
4
4
  const runnables_1 = require("@langchain/core/runnables");
5
+ const singletons_1 = require("@langchain/core/singletons");
5
6
  class RunnableCallable extends runnables_1.Runnable {
6
7
  constructor(fields) {
7
8
  super();
@@ -48,24 +49,36 @@ class RunnableCallable extends runnables_1.Runnable {
48
49
  this.trace = fields.trace ?? this.trace;
49
50
  this.recurse = fields.recurse ?? this.recurse;
50
51
  }
52
+ async _tracedInvoke(input, config, runManager) {
53
+ return new Promise((resolve, reject) => {
54
+ const childConfig = (0, runnables_1.patchConfig)(config, {
55
+ callbacks: runManager?.getChild(),
56
+ });
57
+ void singletons_1.AsyncLocalStorageProviderSingleton.getInstance().run(childConfig, async () => {
58
+ try {
59
+ const output = await this.func(input, childConfig);
60
+ resolve(output);
61
+ }
62
+ catch (e) {
63
+ reject(e);
64
+ }
65
+ });
66
+ });
67
+ }
51
68
  async invoke(
52
69
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
53
70
  input, options
54
71
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
55
72
  ) {
56
- if (this.func === undefined) {
57
- return this.invoke(input, options);
58
- }
59
73
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
60
74
  let returnValue;
61
75
  if (this.trace) {
62
- returnValue = await this._callWithConfig(this.func, input, (0, runnables_1.mergeConfigs)(this.config, options));
76
+ returnValue = await this._callWithConfig(this._tracedInvoke, input, (0, runnables_1.mergeConfigs)(this.config, options));
63
77
  }
64
78
  else {
65
79
  returnValue = await this.func(input, (0, runnables_1.mergeConfigs)(this.config, options));
66
80
  }
67
- // eslint-disable-next-line no-instanceof/no-instanceof
68
- if (returnValue instanceof runnables_1.Runnable && this.recurse) {
81
+ if (runnables_1.Runnable.isRunnable(returnValue) && this.recurse) {
69
82
  return await returnValue.invoke(input, options);
70
83
  }
71
84
  return returnValue;
package/dist/utils.d.ts CHANGED
@@ -1,3 +1,4 @@
1
+ import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
1
2
  import { Runnable, RunnableConfig } from "@langchain/core/runnables";
2
3
  export interface RunnableCallableArgs extends Partial<any> {
3
4
  name?: string;
@@ -14,5 +15,6 @@ export declare class RunnableCallable<I = unknown, O = unknown> extends Runnable
14
15
  trace: boolean;
15
16
  recurse: boolean;
16
17
  constructor(fields: RunnableCallableArgs);
18
+ protected _tracedInvoke(input: I, config?: Partial<RunnableConfig>, runManager?: CallbackManagerForChainRun): Promise<O>;
17
19
  invoke(input: any, options?: Partial<RunnableConfig> | undefined): Promise<any>;
18
20
  }
package/dist/utils.js CHANGED
@@ -1,4 +1,5 @@
1
- import { mergeConfigs, Runnable, } from "@langchain/core/runnables";
1
+ import { mergeConfigs, patchConfig, Runnable, } from "@langchain/core/runnables";
2
+ import { AsyncLocalStorageProviderSingleton } from "@langchain/core/singletons";
2
3
  export class RunnableCallable extends Runnable {
3
4
  constructor(fields) {
4
5
  super();
@@ -45,24 +46,36 @@ export class RunnableCallable extends Runnable {
45
46
  this.trace = fields.trace ?? this.trace;
46
47
  this.recurse = fields.recurse ?? this.recurse;
47
48
  }
49
+ async _tracedInvoke(input, config, runManager) {
50
+ return new Promise((resolve, reject) => {
51
+ const childConfig = patchConfig(config, {
52
+ callbacks: runManager?.getChild(),
53
+ });
54
+ void AsyncLocalStorageProviderSingleton.getInstance().run(childConfig, async () => {
55
+ try {
56
+ const output = await this.func(input, childConfig);
57
+ resolve(output);
58
+ }
59
+ catch (e) {
60
+ reject(e);
61
+ }
62
+ });
63
+ });
64
+ }
48
65
  async invoke(
49
66
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
50
67
  input, options
51
68
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
52
69
  ) {
53
- if (this.func === undefined) {
54
- return this.invoke(input, options);
55
- }
56
70
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
57
71
  let returnValue;
58
72
  if (this.trace) {
59
- returnValue = await this._callWithConfig(this.func, input, mergeConfigs(this.config, options));
73
+ returnValue = await this._callWithConfig(this._tracedInvoke, input, mergeConfigs(this.config, options));
60
74
  }
61
75
  else {
62
76
  returnValue = await this.func(input, mergeConfigs(this.config, options));
63
77
  }
64
- // eslint-disable-next-line no-instanceof/no-instanceof
65
- if (returnValue instanceof Runnable && this.recurse) {
78
+ if (Runnable.isRunnable(returnValue) && this.recurse) {
66
79
  return await returnValue.invoke(input, options);
67
80
  }
68
81
  return returnValue;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/langgraph",
3
- "version": "0.0.23",
3
+ "version": "0.0.24",
4
4
  "description": "LangGraph",
5
5
  "type": "module",
6
6
  "engines": {