@langchain/langgraph 0.0.27 → 0.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,6 @@
1
- import { StructuredTool } from "@langchain/core/tools";
1
+ import { StructuredToolInterface } from "@langchain/core/tools";
2
2
  import { BaseMessage } from "@langchain/core/messages";
3
+ import { RunnableToolLike } from "@langchain/core/runnables";
3
4
  import { ToolExecutor } from "./tool_executor.js";
4
5
  import { CompiledStateGraph } from "../graph/state.js";
5
6
  import { START } from "../graph/index.js";
@@ -8,5 +9,5 @@ export type FunctionCallingExecutorState = {
8
9
  };
9
10
  export declare function createFunctionCallingExecutor<Model extends object>({ model, tools, }: {
10
11
  model: Model;
11
- tools: Array<StructuredTool> | ToolExecutor;
12
+ tools: Array<StructuredToolInterface | RunnableToolLike> | ToolExecutor;
12
13
  }): CompiledStateGraph<FunctionCallingExecutorState, Partial<FunctionCallingExecutorState>, typeof START | "agent" | "action">;
@@ -1,6 +1,6 @@
1
1
  import { convertToOpenAIFunction } from "@langchain/core/utils/function_calling";
2
2
  import { FunctionMessage } from "@langchain/core/messages";
3
- import { RunnableLambda } from "@langchain/core/runnables";
3
+ import { RunnableLambda, } from "@langchain/core/runnables";
4
4
  import { ToolExecutor } from "./tool_executor.js";
5
5
  import { StateGraph, } from "../graph/state.js";
6
6
  import { END, START } from "../graph/index.js";
@@ -1,7 +1,7 @@
1
1
  import { BaseChatModel } from "@langchain/core/language_models/chat_models";
2
2
  import { BaseMessage, SystemMessage } from "@langchain/core/messages";
3
- import { Runnable } from "@langchain/core/runnables";
4
- import { StructuredTool } from "@langchain/core/tools";
3
+ import { Runnable, RunnableToolLike } from "@langchain/core/runnables";
4
+ import { StructuredToolInterface } from "@langchain/core/tools";
5
5
  import { BaseCheckpointSaver } from "../checkpoint/base.js";
6
6
  import { START } from "../graph/index.js";
7
7
  import { MessagesState } from "../graph/message.js";
@@ -14,7 +14,7 @@ export interface AgentState {
14
14
  export type N = typeof START | "agent" | "tools";
15
15
  export type CreateReactAgentParams = {
16
16
  llm: BaseChatModel;
17
- tools: ToolNode<MessagesState> | StructuredTool[];
17
+ tools: ToolNode<MessagesState> | (StructuredToolInterface | RunnableToolLike)[];
18
18
  messageModifier?: SystemMessage | string | ((messages: BaseMessage[]) => BaseMessage[]) | ((messages: BaseMessage[]) => Promise<BaseMessage[]>) | Runnable;
19
19
  checkpointSaver?: BaseCheckpointSaver;
20
20
  interruptBefore?: N[] | All;
@@ -1,7 +1,7 @@
1
- import { RunnableBinding, RunnableConfig } from "@langchain/core/runnables";
2
- import { StructuredTool } from "@langchain/core/tools";
1
+ import { RunnableBinding, RunnableConfig, RunnableToolLike } from "@langchain/core/runnables";
2
+ import { StructuredToolInterface } from "@langchain/core/tools";
3
3
  export interface ToolExecutorArgs {
4
- tools: Array<StructuredTool>;
4
+ tools: Array<StructuredToolInterface | RunnableToolLike>;
5
5
  /**
6
6
  * @default {INVALID_TOOL_MSG_TEMPLATE}
7
7
  */
@@ -18,8 +18,8 @@ type ToolExecutorInputType = any;
18
18
  type ToolExecutorOutputType = any;
19
19
  export declare class ToolExecutor extends RunnableBinding<ToolExecutorInputType, ToolExecutorOutputType> {
20
20
  lc_graph_name: string;
21
- tools: Array<StructuredTool>;
22
- toolMap: Record<string, StructuredTool>;
21
+ tools: Array<StructuredToolInterface | RunnableToolLike>;
22
+ toolMap: Record<string, StructuredToolInterface | RunnableToolLike>;
23
23
  invalidToolMsgTemplate: string;
24
24
  constructor(fields: ToolExecutorArgs);
25
25
  /**
@@ -1,5 +1,6 @@
1
1
  import { BaseMessage } from "@langchain/core/messages";
2
- import { StructuredTool } from "@langchain/core/tools";
2
+ import { RunnableToolLike } from "@langchain/core/runnables";
3
+ import { StructuredToolInterface } from "@langchain/core/tools";
3
4
  import { RunnableCallable } from "../utils.js";
4
5
  import { END } from "../graph/graph.js";
5
6
  import { MessagesState } from "../graph/message.js";
@@ -10,8 +11,8 @@ export declare class ToolNode<T extends BaseMessage[] | MessagesState> extends R
10
11
  tool calls are requested, they will be run in parallel. The output will be
11
12
  a list of ToolMessages, one for each tool call.
12
13
  */
13
- tools: StructuredTool[];
14
- constructor(tools: StructuredTool[], name?: string, tags?: string[]);
14
+ tools: (StructuredToolInterface | RunnableToolLike)[];
15
+ constructor(tools: (StructuredToolInterface | RunnableToolLike)[], name?: string, tags?: string[]);
15
16
  private run;
16
17
  }
17
18
  export declare function toolsCondition(state: BaseMessage[] | MessagesState): "tools" | typeof END;
@@ -3,6 +3,8 @@ import { it, beforeAll, describe, expect } from "@jest/globals";
3
3
  import { Tool } from "@langchain/core/tools";
4
4
  import { ChatOpenAI } from "@langchain/openai";
5
5
  import { HumanMessage } from "@langchain/core/messages";
6
+ import { RunnableLambda } from "@langchain/core/runnables";
7
+ import { z } from "zod";
6
8
  import { createReactAgent, createFunctionCallingExecutor, } from "../prebuilt/index.js";
7
9
  import { initializeAsyncLocalStorageSingleton } from "../setup/async_local_storage.js";
8
10
  // Tracing slows down the tests
@@ -97,6 +99,32 @@ describe("createFunctionCallingExecutor", () => {
97
99
  const functionCall = endState.messages.find((message) => message._getType() === "function");
98
100
  expect(functionCall.content).toBe(weatherResponse);
99
101
  });
102
+ it("can accept RunnableToolLike tools", async () => {
103
+ const weatherResponse = `Not too cold, not too hot 😎`;
104
+ const model = new ChatOpenAI();
105
+ const sfWeatherTool = RunnableLambda.from(async (_) => weatherResponse);
106
+ const tools = [
107
+ sfWeatherTool.asTool({
108
+ name: "current_weather",
109
+ description: "Get the current weather report for San Francisco, CA",
110
+ schema: z.object({
111
+ location: z.string(),
112
+ }),
113
+ }),
114
+ ];
115
+ const functionsAgentExecutor = createFunctionCallingExecutor({
116
+ model,
117
+ tools,
118
+ });
119
+ const response = await functionsAgentExecutor.invoke({
120
+ messages: [new HumanMessage("What's the weather like in SF?")],
121
+ });
122
+ // It needs at least one human message, one AI and one function message.
123
+ expect(response.messages.length > 3).toBe(true);
124
+ const firstFunctionMessage = response.messages.find((message) => message._getType() === "function");
125
+ expect(firstFunctionMessage).toBeDefined();
126
+ expect(firstFunctionMessage?.content).toBe(weatherResponse);
127
+ });
100
128
  });
101
129
  describe("createReactAgent", () => {
102
130
  it("can call a tool", async () => {
@@ -5,6 +5,7 @@ import { StructuredTool, Tool } from "@langchain/core/tools";
5
5
  import { FakeStreamingLLM } from "@langchain/core/utils/testing";
6
6
  import { AIMessage, HumanMessage, SystemMessage, ToolMessage, } from "@langchain/core/messages";
7
7
  import { z } from "zod";
8
+ import { RunnableLambda } from "@langchain/core/runnables";
8
9
  import { FakeToolCallingChatModel } from "./utils.js";
9
10
  import { createAgentExecutor, createReactAgent } from "../prebuilt/index.js";
10
11
  // Tracing slows down the tests
@@ -367,4 +368,49 @@ describe("createReactAgent", () => {
367
368
  new AIMessage("result2"),
368
369
  ]);
369
370
  });
371
+ it("Can accept RunnableToolLike", async () => {
372
+ const llm = new FakeToolCallingChatModel({
373
+ responses: [
374
+ new AIMessage({
375
+ content: "result1",
376
+ tool_calls: [
377
+ { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
378
+ ],
379
+ }),
380
+ new AIMessage("result2"),
381
+ ],
382
+ });
383
+ // Instead of re-implementing the tool, wrap it in a RunnableLambda and
384
+ // call `asTool` to create a RunnableToolLike.
385
+ const searchApiWithArtifactsTool = new SearchAPIWithArtifact();
386
+ const runnableToolLikeTool = RunnableLambda.from(async (input, config) => searchApiWithArtifactsTool.invoke(input, config)).asTool({
387
+ name: searchApiWithArtifactsTool.name,
388
+ description: searchApiWithArtifactsTool.description,
389
+ schema: searchApiWithArtifactsTool.schema,
390
+ });
391
+ const agent = createReactAgent({
392
+ llm,
393
+ tools: [runnableToolLikeTool],
394
+ messageModifier: "You are a helpful assistant",
395
+ });
396
+ const result = await agent.invoke({
397
+ messages: [new HumanMessage("Hello Input!")],
398
+ });
399
+ expect(result.messages).toEqual([
400
+ new HumanMessage("Hello Input!"),
401
+ new AIMessage({
402
+ content: "result1",
403
+ tool_calls: [
404
+ { name: "search_api", id: "tool_abcd123", args: { query: "foo" } },
405
+ ],
406
+ }),
407
+ new ToolMessage({
408
+ name: "search_api",
409
+ content: "some response format",
410
+ tool_call_id: "tool_abcd123",
411
+ artifact: Buffer.from("123"),
412
+ }),
413
+ new AIMessage("result2"),
414
+ ]);
415
+ });
370
416
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/langgraph",
3
- "version": "0.0.27",
3
+ "version": "0.0.28",
4
4
  "description": "LangGraph",
5
5
  "type": "module",
6
6
  "engines": {