@langchain/langgraph 1.0.0-alpha.2 → 1.0.0-alpha.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/graph/message.cjs +3 -4
  3. package/dist/graph/message.cjs.map +1 -1
  4. package/dist/graph/message.d.cts +2 -1
  5. package/dist/graph/message.d.cts.map +1 -1
  6. package/dist/graph/message.d.ts +2 -1
  7. package/dist/graph/message.d.ts.map +1 -1
  8. package/dist/graph/message.js +3 -4
  9. package/dist/graph/message.js.map +1 -1
  10. package/dist/graph/messages_annotation.d.cts +5 -4
  11. package/dist/graph/messages_annotation.d.cts.map +1 -1
  12. package/dist/graph/messages_annotation.d.ts +5 -4
  13. package/dist/graph/messages_annotation.d.ts.map +1 -1
  14. package/dist/graph/state.cjs +5 -1
  15. package/dist/graph/state.cjs.map +1 -1
  16. package/dist/graph/state.d.cts +6 -9
  17. package/dist/graph/state.d.cts.map +1 -1
  18. package/dist/graph/state.d.ts +6 -9
  19. package/dist/graph/state.d.ts.map +1 -1
  20. package/dist/graph/state.js +5 -1
  21. package/dist/graph/state.js.map +1 -1
  22. package/dist/graph/zod/zod-registry.cjs.map +1 -1
  23. package/dist/graph/zod/zod-registry.d.cts.map +1 -1
  24. package/dist/graph/zod/zod-registry.js.map +1 -1
  25. package/dist/interrupt.cjs.map +1 -1
  26. package/dist/interrupt.d.cts +10 -1
  27. package/dist/interrupt.d.cts.map +1 -1
  28. package/dist/interrupt.d.ts +10 -1
  29. package/dist/interrupt.d.ts.map +1 -1
  30. package/dist/interrupt.js.map +1 -1
  31. package/dist/prebuilt/agentName.cjs +8 -5
  32. package/dist/prebuilt/agentName.cjs.map +1 -1
  33. package/dist/prebuilt/agentName.js +8 -5
  34. package/dist/prebuilt/agentName.js.map +1 -1
  35. package/dist/prebuilt/agent_executor.d.cts +5 -4
  36. package/dist/prebuilt/agent_executor.d.cts.map +1 -1
  37. package/dist/prebuilt/agent_executor.d.ts +5 -4
  38. package/dist/prebuilt/agent_executor.d.ts.map +1 -1
  39. package/dist/prebuilt/react_agent_executor.d.cts +3 -2
  40. package/dist/prebuilt/react_agent_executor.d.cts.map +1 -1
  41. package/dist/prebuilt/react_agent_executor.d.ts +3 -2
  42. package/dist/prebuilt/react_agent_executor.d.ts.map +1 -1
  43. package/dist/prebuilt/tool_node.cjs +2 -2
  44. package/dist/prebuilt/tool_node.cjs.map +1 -1
  45. package/dist/prebuilt/tool_node.js +3 -3
  46. package/dist/prebuilt/tool_node.js.map +1 -1
  47. package/dist/pregel/index.cjs +40 -12
  48. package/dist/pregel/index.cjs.map +1 -1
  49. package/dist/pregel/index.d.cts +13 -4
  50. package/dist/pregel/index.d.cts.map +1 -1
  51. package/dist/pregel/index.d.ts +13 -4
  52. package/dist/pregel/index.d.ts.map +1 -1
  53. package/dist/pregel/index.js +41 -13
  54. package/dist/pregel/index.js.map +1 -1
  55. package/dist/pregel/stream.cjs +107 -0
  56. package/dist/pregel/stream.cjs.map +1 -1
  57. package/dist/pregel/stream.js +107 -1
  58. package/dist/pregel/stream.js.map +1 -1
  59. package/dist/pregel/types.cjs.map +1 -1
  60. package/dist/pregel/types.d.cts +15 -2
  61. package/dist/pregel/types.d.cts.map +1 -1
  62. package/dist/pregel/types.d.ts +15 -2
  63. package/dist/pregel/types.d.ts.map +1 -1
  64. package/dist/pregel/types.js.map +1 -1
  65. package/dist/writer.cjs.map +1 -1
  66. package/dist/writer.d.cts +3 -1
  67. package/dist/writer.d.cts.map +1 -1
  68. package/dist/writer.d.ts +3 -1
  69. package/dist/writer.d.ts.map +1 -1
  70. package/dist/writer.js.map +1 -1
  71. package/package.json +29 -32
  72. package/dist/ui/index.cjs +0 -4
  73. package/dist/ui/index.d.cts +0 -5
  74. package/dist/ui/index.d.ts +0 -5
  75. package/dist/ui/index.js +0 -3
  76. package/dist/ui/stream.cjs +0 -145
  77. package/dist/ui/stream.cjs.map +0 -1
  78. package/dist/ui/stream.d.cts +0 -25
  79. package/dist/ui/stream.d.cts.map +0 -1
  80. package/dist/ui/stream.d.ts +0 -25
  81. package/dist/ui/stream.d.ts.map +0 -1
  82. package/dist/ui/stream.js +0 -143
  83. package/dist/ui/stream.js.map +0 -1
  84. package/dist/ui/types.infer.d.cts +0 -53
  85. package/dist/ui/types.infer.d.cts.map +0 -1
  86. package/dist/ui/types.infer.d.ts +0 -53
  87. package/dist/ui/types.infer.d.ts.map +0 -1
  88. package/dist/ui/types.message.d.cts +0 -95
  89. package/dist/ui/types.message.d.cts.map +0 -1
  90. package/dist/ui/types.message.d.ts +0 -95
  91. package/dist/ui/types.message.d.ts.map +0 -1
  92. package/dist/ui/types.schema.d.cts +0 -228
  93. package/dist/ui/types.schema.d.cts.map +0 -1
  94. package/dist/ui/types.schema.d.ts +0 -228
  95. package/dist/ui/types.schema.d.ts.map +0 -1
@@ -1 +1 @@
1
- {"version":3,"file":"react_agent_executor.d.ts","names":["_langchain_core_messages0","_langchain_core_language_models_chat_models0","___web_js0","BaseChatModel","LanguageModelLike","BaseMessage","BaseMessageLike","SystemMessage","Runnable","RunnableToolLike","RunnableSequence","RunnableBinding","RunnableLike","DynamicTool","StructuredToolInterface","InteropZodObject","InteropZodType","All","BaseCheckpointSaver","BaseStore","CompiledStateGraph","AnnotationRoot","MessagesAnnotation","ToolNode","LangGraphRunnableConfig","Runtime","Messages","START","InteropZodToStateDefinition","AgentState","Record","StructuredResponseType","N","StructuredResponseSchemaOptions","ServerTool","ClientTool","ConfigurableModelInterface","Promise","_shouldBindTools","_bindTools","_langchain_core_language_models_base0","BaseLanguageModelInput","AIMessageChunk","BaseChatModelCallOptions","_getModel","Prompt","State","StateModifier","MessageModifier","createReactAgentAnnotation","BinaryOperatorAggregate","T","LastValue","SingleReducer","StateDefinition","S","PreHookAnnotation","AnyAnnotationRoot","ToAnnotationRoot","A","CreateReactAgentParams","C","createReactAgent","StructuredResponseFormat","spec","ReturnType"],"sources":["../../src/prebuilt/react_agent_executor.d.ts"],"sourcesContent":["import { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { BaseMessage, BaseMessageLike, SystemMessage } from \"@langchain/core/messages\";\nimport { Runnable, RunnableToolLike, RunnableSequence, RunnableBinding, type RunnableLike } from \"@langchain/core/runnables\";\nimport { DynamicTool, StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { InteropZodObject, InteropZodType } from \"@langchain/core/utils/types\";\nimport { All, BaseCheckpointSaver, BaseStore } from \"@langchain/langgraph-checkpoint\";\nimport { type CompiledStateGraph, AnnotationRoot } from \"../graph/index.js\";\nimport { MessagesAnnotation } from \"../graph/messages_annotation.js\";\nimport { ToolNode } from \"./tool_node.js\";\nimport { LangGraphRunnableConfig, Runtime } from \"../pregel/runnable_types.js\";\nimport { Messages } from \"../graph/message.js\";\nimport { START } from \"../constants.js\";\nimport type { InteropZodToStateDefinition } from \"../graph/zod/meta.js\";\n/**\n * @deprecated `AgentState` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { AgentState } from \"langchain\";`\n */\nexport interface AgentState<\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nStructuredResponseType extends Record<string, any> = Record<string, any>> {\n messages: BaseMessage[];\n // TODO: This won't be set until we\n // implement managed values in LangGraphJS\n // Will be useful for inserting a message on\n // graph recursion end\n // is_last_step: boolean;\n structuredResponse: StructuredResponseType;\n}\nexport type N = typeof START | \"agent\" | \"tools\";\ntype StructuredResponseSchemaOptions<StructuredResponseType> = {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n schema: InteropZodType<StructuredResponseType> | Record<string, any>;\n prompt?: string;\n strict?: boolean;\n [key: string]: unknown;\n};\ntype ServerTool = Record<string, unknown>;\ntype ClientTool = StructuredToolInterface | DynamicTool | RunnableToolLike;\ninterface ConfigurableModelInterface {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any>;\n _model: () => Promise<BaseChatModel>;\n}\nexport declare function _shouldBindTools(llm: LanguageModelLike, tools: (ClientTool | ServerTool)[]): Promise<boolean>;\nexport declare function _bindTools(llm: LanguageModelLike, toolClasses: (ClientTool | ServerTool)[]): Promise<Runnable<import(\"@langchain/core/language_models/base\").BaseLanguageModelInput, import(\"@langchain/core/messages\").AIMessageChunk, import(\"@langchain/core/language_models/chat_models\").BaseChatModelCallOptions> | RunnableBinding<any, any, any> | RunnableSequence<any, any>>;\nexport declare function _getModel(llm: LanguageModelLike | ConfigurableModelInterface): Promise<LanguageModelLike>;\nexport type Prompt = SystemMessage | string | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => BaseMessageLike[]) | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => Promise<BaseMessageLike[]>) | Runnable;\n/** @deprecated Use Prompt instead. */\nexport type StateModifier = Prompt;\n/** @deprecated Use Prompt instead. */\nexport type MessageModifier = SystemMessage | string | ((messages: BaseMessage[]) => BaseMessage[]) | ((messages: BaseMessage[]) => Promise<BaseMessage[]>) | Runnable;\nexport declare const createReactAgentAnnotation: <\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nT extends Record<string, any> = Record<string, any>>() => AnnotationRoot<{\n messages: import(\"../web.js\").BinaryOperatorAggregate<BaseMessage[], Messages>;\n structuredResponse: {\n (): import(\"../web.js\").LastValue<T>;\n (annotation: import(\"../web.js\").SingleReducer<T, T>): import(\"../web.js\").BinaryOperatorAggregate<T, T>;\n Root: <S extends import(\"../web.js\").StateDefinition>(sd: S) => AnnotationRoot<S>;\n };\n}>;\ndeclare const PreHookAnnotation: AnnotationRoot<{\n llmInputMessages: import(\"../web.js\").BinaryOperatorAggregate<BaseMessage[], Messages>;\n}>;\ntype PreHookAnnotation = typeof PreHookAnnotation;\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype AnyAnnotationRoot = AnnotationRoot<any>;\ntype ToAnnotationRoot<A extends AnyAnnotationRoot | InteropZodObject> = A extends AnyAnnotationRoot ? A : A extends InteropZodObject ? AnnotationRoot<InteropZodToStateDefinition<A>> : never;\n/**\n * @deprecated `CreateReactAgentParams` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { CreateAgentParams } from \"langchain\";`\n */\nexport type CreateReactAgentParams<A extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, \n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nStructuredResponseType extends Record<string, any> = Record<string, any>, C extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot> = {\n /** The chat model that can utilize OpenAI-style tool calling. */\n llm: LanguageModelLike | ((state: ToAnnotationRoot<A>[\"State\"] & PreHookAnnotation[\"State\"], runtime: Runtime<ToAnnotationRoot<C>[\"State\"]>) => Promise<LanguageModelLike> | LanguageModelLike);\n /** A list of tools or a ToolNode. */\n tools: ToolNode | (ServerTool | ClientTool)[];\n /**\n * @deprecated Use prompt instead.\n */\n messageModifier?: MessageModifier;\n /**\n * @deprecated Use prompt instead.\n */\n stateModifier?: StateModifier;\n /**\n * An optional prompt for the LLM. This takes full graph state BEFORE the LLM is called and prepares the input to LLM.\n *\n * Can take a few different forms:\n *\n * - str: This is converted to a SystemMessage and added to the beginning of the list of messages in state[\"messages\"].\n * - SystemMessage: this is added to the beginning of the list of messages in state[\"messages\"].\n * - Function: This function should take in full graph state and the output is then passed to the language model.\n * - Runnable: This runnable should take in full graph state and the output is then passed to the language model.\n *\n * Note:\n * Prior to `v0.2.46`, the prompt was set using `stateModifier` / `messagesModifier` parameters.\n * This is now deprecated and will be removed in a future release.\n */\n prompt?: Prompt;\n /**\n * Additional state schema for the agent.\n */\n stateSchema?: A;\n /**\n * An optional schema for the context.\n */\n contextSchema?: C;\n /** An optional checkpoint saver to persist the agent's state. */\n checkpointSaver?: BaseCheckpointSaver | boolean;\n /** An optional checkpoint saver to persist the agent's state. Alias of \"checkpointSaver\". */\n checkpointer?: BaseCheckpointSaver | boolean;\n /** An optional list of node names to interrupt before running. */\n interruptBefore?: N[] | All;\n /** An optional list of node names to interrupt after running. */\n interruptAfter?: N[] | All;\n store?: BaseStore;\n /**\n * An optional schema for the final agent output.\n *\n * If provided, output will be formatted to match the given schema and returned in the 'structuredResponse' state key.\n * If not provided, `structuredResponse` will not be present in the output state.\n *\n * Can be passed in as:\n * - Zod schema\n * - JSON schema\n * - { prompt, schema }, where schema is one of the above.\n * The prompt will be used together with the model that is being used to generate the structured response.\n *\n * @remarks\n * **Important**: `responseFormat` requires the model to support `.withStructuredOutput()`.\n *\n * **Note**: The graph will make a separate call to the LLM to generate the structured response after the agent loop is finished.\n * This is not the only strategy to get structured responses, see more options in [this guide](https://langchain-ai.github.io/langgraph/how-tos/react-agent-structured-output/).\n */\n responseFormat?: InteropZodType<StructuredResponseType> | StructuredResponseSchemaOptions<StructuredResponseType>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>;\n /**\n * An optional name for the agent.\n */\n name?: string;\n /**\n * An optional description for the agent.\n * This can be used to describe the agent to the underlying supervisor LLM.\n */\n description?: string | undefined;\n /**\n * Use to specify how to expose the agent name to the underlying supervisor LLM.\n \n - undefined: Relies on the LLM provider {@link AIMessage#name}. Currently, only OpenAI supports this.\n - `\"inline\"`: Add the agent name directly into the content field of the {@link AIMessage} using XML-style tags.\n Example: `\"How can I help you\"` -> `\"<name>agent_name</name><content>How can I help you?</content>\"`\n */\n includeAgentName?: \"inline\" | undefined;\n /**\n * An optional node to add before the `agent` node (i.e., the node that calls the LLM).\n * Useful for managing long message histories (e.g., message trimming, summarization, etc.).\n */\n preModelHook?: RunnableLike<ToAnnotationRoot<A>[\"State\"] & PreHookAnnotation[\"State\"], ToAnnotationRoot<A>[\"Update\"] & PreHookAnnotation[\"Update\"], LangGraphRunnableConfig>;\n /**\n * An optional node to add after the `agent` node (i.e., the node that calls the LLM).\n * Useful for implementing human-in-the-loop, guardrails, validation, or other post-processing.\n */\n postModelHook?: RunnableLike<ToAnnotationRoot<A>[\"State\"], ToAnnotationRoot<A>[\"Update\"], LangGraphRunnableConfig>;\n /**\n * Determines the version of the graph to create.\n *\n * Can be one of\n * - `\"v1\"`: The tool node processes a single message. All tool calls in the message are\n * executed in parallel within the tool node.\n * - `\"v2\"`: The tool node processes a single tool call. Tool calls are distributed across\n * multiple instances of the tool node using the Send API.\n *\n * @default `\"v1\"`\n */\n version?: \"v1\" | \"v2\";\n};\n/**\n * @deprecated `createReactAgent` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { createAgent } from \"langchain\";`\n *\n * Creates a StateGraph agent that relies on a chat model utilizing tool calling.\n *\n * @example\n * ```ts\n * import { ChatOpenAI } from \"@langchain/openai\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n *\n * const model = new ChatOpenAI({\n * model: \"gpt-4o\",\n * });\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * })\n * })\n *\n * const agent = createReactAgent({ llm: model, tools: [getWeather] });\n *\n * const inputs = {\n * messages: [{ role: \"user\", content: \"what is the weather in SF?\" }],\n * };\n *\n * const stream = await agent.stream(inputs, { streamMode: \"values\" });\n *\n * for await (const { messages } of stream) {\n * console.log(messages);\n * }\n * // Returns the messages in the state at each step of execution\n * ```\n */\nexport declare function createReactAgent<A extends AnyAnnotationRoot | InteropZodObject = typeof MessagesAnnotation, \n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nStructuredResponseFormat extends Record<string, any> = Record<string, any>, C extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot>(params: CreateReactAgentParams<A, StructuredResponseFormat, C>): CompiledStateGraph<ToAnnotationRoot<A>[\"State\"], ToAnnotationRoot<A>[\"Update\"], \n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nany, typeof MessagesAnnotation.spec & ToAnnotationRoot<A>[\"spec\"], ReturnType<typeof createReactAgentAnnotation<StructuredResponseFormat>>[\"spec\"] & ToAnnotationRoot<A>[\"spec\"]>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;AAkBiB6B,UAAAA,UAAU;;+BAEIC,MAAAA,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,GAAsBA,MAAtBA,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,CAAAA,CAAAA;UAAsBA,EACvCzB,WADuCyB,EAAAA;;;;EASzCE;EACPC;EAA+B,kBAAA,EAHZF,sBAGY;;AAExBf,KAHAgB,CAAAA,GAGAhB,OAHWW,KAGXX,GAAAA,OAAAA,GAAAA,OAAAA;KAFPiB,+BAEgDH,CAAAA,sBAAAA,CAAAA,GAAAA;;EAKhDI,MAAAA,EALOlB,cAKG,CALYe,sBAKTD,CAAAA,GALmCA,MAKnCA,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EACbK,MAAAA,CAAAA,EAAAA,MAAU;EAAA,MAAA,CAAA,EAAA,OAAA;MAAGrB,EAAAA,MAAAA,CAAAA,EAAAA,OAAAA;;KADboB,UAAAA,GAAaJ,MACwCrB,CAAAA,MAAAA,EAAAA,OAAAA,CAAAA;KAArD0B,UAAAA,GAAarB,0BAA0BD,cAAcJ;AAS6KH,KAA3NuC,MAAAA,GAAStC,aAAkND,GAAAA,MAAAA,GAAAA,CAAAA,CAAAA,KAAAA,EAAAA,OAAzKgB,kBAAAA,CAAmBwB,KAAsJxC,EAAAA,MAAAA,EAAvIkB,uBAAuIlB,EAAAA,GAA3GA,eAA2GA,EAAAA,CAAAA,GAAAA,CAAAA,CAAAA,KAAAA,EAAAA,OAAtEgB,kBAAAA,CAAmBwB,KAAmDxC,EAAAA,MAAAA,EAApCkB,uBAAoClB,EAAAA,GAAR+B,OAAQ/B,CAAAA,eAAAA,EAAAA,CAAAA,CAAAA,GAAsBE,QAAtBF;;AAAsBE,KAEjPuC,aAAAA,GAAgBF,MAFiOrC;;AAEjPuC,KAEAC,eAAAA,GAAkBzC,aAFFsC,GAAAA,MAAAA,GAAAA,CAAAA,CAAAA,QAAAA,EAEuCxC,WAFvCwC,EAAAA,EAAAA,GAEyDxC,WAFzDwC,EAAAA,CAAAA,GAAAA,CAAAA,CAAAA,QAAAA,EAEsFxC,WAFtFwC,EAAAA,EAAAA,GAEwGR,OAFxGQ,CAEgHxC,WAFhHwC,EAAAA,CAAAA,CAAAA,GAEkIrC,QAFlIqC;AAEhBG,cACSC,0BADM,EAAA;;UAGjBnB,MAHoBvB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,GAGEuB,MAHFvB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,CAAAA,GAAAA,GAG4Bc,cAH5Bd,CAAAA;UAAqCF,yBAAAA,CAITA,WAJSA,EAAAA,EAIMqB,QAJNrB,CAAAA;oBAAkBA,EAAAA;IAA6BA,EAAAA,WAAAA,CAMxE8C,CANwE9C,CAAAA;IAA0BA,CAAAA,UAAAA,eAAAA,CAOrF8C,CAPqF9C,EAOlF8C,CAPkF9C,CAAAA,CAAAA,yBAAAA,CAOjC8C,CAPiC9C,EAO9B8C,CAP8B9C,CAAAA;IAARgC,IAAAA,EAAAA,CAAAA,yBAAAA,CAAAA,CAAAA,EAAAA,EAQlEkB,CARkElB,EAAAA,GAQ5DhB,cAR4DgB,CAQ7CkB,CAR6ClB,CAAAA;;;AACpI,cAUcmB,iBAVOP,EAUY5B,cAD/B,CAAA;EAAA,gBAAA,yBAAA,CAEgEhB,WAFhE,EAAA,EAE+EqB,QAF/E,CAAA;;KAIG8B,iBAAAA,GAX2B1B,OAWA0B,iBAXA1B;;KAa3B2B,iBAAAA,GAAoBpC,cAZgDK,CAAAA,GAAAA,CAAAA;KAapEgC,2BAA2BD,oBAAoB1C,oBAAoB4C,UAAUF,oBAAoBE,IAAIA,UAAU5C,mBAAmBM,eAAeO,4BAA4B+B;;;;;KAKtKC,iCAAiCH,oBAAoB1C,mBAAmB0C;;+BAErD3B,MAjB+EqB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,GAiBzDrB,MAjByDqB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,EAAAA,UAiB1BM,iBAjB0BN,GAiBNpC,gBAjBMoC,GAiBaM,iBAjBbN,CAAAA,GAAAA;;OAmBrG/C,6BAA6BsD,iBAAiBC,cAAcH,qCAAqC/B,QAAQiC,iBAAiBG,iBAAiBxB,QAAQjC,qBAAqBA;;OAlB1FmD,EAoB5EhC,QApB4EgC,GAAAA,CAoBhErB,UApBgEqB,GAoBnDpB,UApBmDoB,CAAAA,EAAAA;;;;EAGzEC,eAAAA,CAAAA,EAqBQR,eAnBpB;EAAA;;;kBAuBkBD;;;AAzB2B;AAGE;AAEV;;;;;;;;;;QAC+GnB,CAAAA,EAkCzIiB,MAlCyIjB;;;AAKtJ;EAAkC,WAAA,CAAA,EAiChB+B,CAjCgB;;;;eAEH7B,CAAAA,EAmCX+B,CAnCW/B;;iBAAqD2B,CAAAA,EAqC9DvC,mBArC8DuC,GAAAA,OAAAA;;cAAuCA,CAAAA,EAuCxGvC,mBAvCwGuC,GAAAA,OAAAA;;iBAEpEE,CAAAA,EAuCjC3B,CAvCiC2B,EAAAA,GAuC3B1C,GAvC2B0C;;gBAAcH,CAAAA,EAyChDxB,CAzCgDwB,EAAAA,GAyC1CvC,GAzC0CuC;OAA8DK,CAAAA,EA0CvH1C,SA1CuH0C;;;;;;;;;;;;;;;;;;;gBAyCxG5C,CAAAA,EAoBND,cApBMC,CAoBSc,sBApBTd,CAAAA,GAoBmCgB,+BApBnChB,CAoBmEc,sBApBnEd;;IAsBpBa,MAF6BC,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;;;;MAE7BD,CAAAA,EAAAA,MAAAA;;;;;aAsBoF4B,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;;;;;;;kBAKGlC,CAAAA,EAAAA,QAAAA,GAAAA,SAAAA;;;AA2D9F;;cAAmDiC,CAAAA,EAhEhC7C,YAgEgC6C,CAhEnBC,gBAgEmBD,CAhEFE,CAgEEF,CAAAA,CAAAA,OAAAA,CAAAA,GAhEYD,iBAgEZC,CAAAA,OAAAA,CAAAA,EAhEwCC,gBAgExCD,CAhEyDE,CAgEzDF,CAAAA,CAAAA,QAAAA,CAAAA,GAhEwED,iBAgExEC,CAAAA,QAAAA,CAAAA,EAhEqGjC,uBAgErGiC,CAAAA;;;;;eAEmCA,CAAAA,EA7DlE7C,YA6DkE6C,CA7DrDC,gBA6DqDD,CA7DpCE,CA6DoCF,CAAAA,CAAAA,OAAAA,CAAAA,EA7DvBC,gBA6DuBD,CA7DNE,CA6DMF,CAAAA,CAAAA,QAAAA,CAAAA,EA7DQjC,uBA6DRiC,CAAAA;;;;;;;;;;;;SAE/BE,CAAAA,EAAAA,IAAAA,GAAAA,IAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAJ/BG,2BAA2BL,oBAAoB1C,0BAA0BO;;iCAEhEQ,sBAAsBA,+BAA+B2B,oBAAoB1C,mBAAmB0C,2BAA2BG,uBAAuBD,GAAGI,0BAA0BF,KAAKzC,mBAAmBsC,iBAAiBC,aAAaD,iBAAiBC;;YAEvQrC,kBAAAA,CAAmB0C,OAAON,iBAAiBC,YAAYM,kBAAkBhB,2BAA2Bc,qCAAqCL,iBAAiBC"}
1
+ {"version":3,"file":"react_agent_executor.d.ts","names":["_langchain_core_messages23","_langchain_core_language_models_chat_models0","___web_js2","BaseChatModel","LanguageModelLike","BaseMessage","BaseMessageLike","SystemMessage","Runnable","RunnableToolLike","RunnableSequence","RunnableBinding","RunnableLike","DynamicTool","StructuredToolInterface","InteropZodObject","InteropZodType","All","BaseCheckpointSaver","BaseStore","CompiledStateGraph","AnnotationRoot","MessagesAnnotation","ToolNode","LangGraphRunnableConfig","Runtime","Messages","START","InteropZodToStateDefinition","AgentState","Record","StructuredResponseType","N","StructuredResponseSchemaOptions","ServerTool","ClientTool","ConfigurableModelInterface","Promise","_shouldBindTools","_bindTools","_langchain_core_language_models_base0","BaseLanguageModelInput","MessageStructure","AIMessageChunk","BaseChatModelCallOptions","_getModel","Prompt","State","StateModifier","MessageModifier","createReactAgentAnnotation","MessageType","BinaryOperatorAggregate","T","LastValue","SingleReducer","StateDefinition","S","PreHookAnnotation","AnyAnnotationRoot","ToAnnotationRoot","A","CreateReactAgentParams","C","createReactAgent","StructuredResponseFormat","spec","ReturnType"],"sources":["../../src/prebuilt/react_agent_executor.d.ts"],"sourcesContent":["import { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { BaseMessage, BaseMessageLike, SystemMessage } from \"@langchain/core/messages\";\nimport { Runnable, RunnableToolLike, RunnableSequence, RunnableBinding, type RunnableLike } from \"@langchain/core/runnables\";\nimport { DynamicTool, StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { InteropZodObject, InteropZodType } from \"@langchain/core/utils/types\";\nimport { All, BaseCheckpointSaver, BaseStore } from \"@langchain/langgraph-checkpoint\";\nimport { type CompiledStateGraph, AnnotationRoot } from \"../graph/index.js\";\nimport { MessagesAnnotation } from \"../graph/messages_annotation.js\";\nimport { ToolNode } from \"./tool_node.js\";\nimport { LangGraphRunnableConfig, Runtime } from \"../pregel/runnable_types.js\";\nimport { Messages } from \"../graph/message.js\";\nimport { START } from \"../constants.js\";\nimport type { InteropZodToStateDefinition } from \"../graph/zod/meta.js\";\n/**\n * @deprecated `AgentState` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { AgentState } from \"langchain\";`\n */\nexport interface AgentState<\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nStructuredResponseType extends Record<string, any> = Record<string, any>> {\n messages: BaseMessage[];\n // TODO: This won't be set until we\n // implement managed values in LangGraphJS\n // Will be useful for inserting a message on\n // graph recursion end\n // is_last_step: boolean;\n structuredResponse: StructuredResponseType;\n}\nexport type N = typeof START | \"agent\" | \"tools\";\ntype StructuredResponseSchemaOptions<StructuredResponseType> = {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n schema: InteropZodType<StructuredResponseType> | Record<string, any>;\n prompt?: string;\n strict?: boolean;\n [key: string]: unknown;\n};\ntype ServerTool = Record<string, unknown>;\ntype ClientTool = StructuredToolInterface | DynamicTool | RunnableToolLike;\ninterface ConfigurableModelInterface {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any>;\n _model: () => Promise<BaseChatModel>;\n}\nexport declare function _shouldBindTools(llm: LanguageModelLike, tools: (ClientTool | ServerTool)[]): Promise<boolean>;\nexport declare function _bindTools(llm: LanguageModelLike, toolClasses: (ClientTool | ServerTool)[]): Promise<Runnable<import(\"@langchain/core/language_models/base\").BaseLanguageModelInput, import(\"@langchain/core/messages\").AIMessageChunk<import(\"@langchain/core/messages\").MessageStructure>, import(\"@langchain/core/language_models/chat_models\").BaseChatModelCallOptions> | RunnableBinding<any, any, any> | RunnableSequence<any, any>>;\nexport declare function _getModel(llm: LanguageModelLike | ConfigurableModelInterface): Promise<LanguageModelLike>;\nexport type Prompt = SystemMessage | string | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => BaseMessageLike[]) | ((state: typeof MessagesAnnotation.State, config: LangGraphRunnableConfig) => Promise<BaseMessageLike[]>) | Runnable;\n/** @deprecated Use Prompt instead. */\nexport type StateModifier = Prompt;\n/** @deprecated Use Prompt instead. */\nexport type MessageModifier = SystemMessage | string | ((messages: BaseMessage[]) => BaseMessage[]) | ((messages: BaseMessage[]) => Promise<BaseMessage[]>) | Runnable;\nexport declare const createReactAgentAnnotation: <\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nT extends Record<string, any> = Record<string, any>>() => AnnotationRoot<{\n messages: import(\"../web.js\").BinaryOperatorAggregate<BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], Messages>;\n structuredResponse: {\n (): import(\"../web.js\").LastValue<T>;\n (annotation: import(\"../web.js\").SingleReducer<T, T>): import(\"../web.js\").BinaryOperatorAggregate<T, T>;\n Root: <S extends import(\"../web.js\").StateDefinition>(sd: S) => AnnotationRoot<S>;\n };\n}>;\ndeclare const PreHookAnnotation: AnnotationRoot<{\n llmInputMessages: import(\"../web.js\").BinaryOperatorAggregate<BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], Messages>;\n}>;\ntype PreHookAnnotation = typeof PreHookAnnotation;\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype AnyAnnotationRoot = AnnotationRoot<any>;\ntype ToAnnotationRoot<A extends AnyAnnotationRoot | InteropZodObject> = A extends AnyAnnotationRoot ? A : A extends InteropZodObject ? AnnotationRoot<InteropZodToStateDefinition<A>> : never;\n/**\n * @deprecated `CreateReactAgentParams` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { CreateAgentParams } from \"langchain\";`\n */\nexport type CreateReactAgentParams<A extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, \n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nStructuredResponseType extends Record<string, any> = Record<string, any>, C extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot> = {\n /** The chat model that can utilize OpenAI-style tool calling. */\n llm: LanguageModelLike | ((state: ToAnnotationRoot<A>[\"State\"] & PreHookAnnotation[\"State\"], runtime: Runtime<ToAnnotationRoot<C>[\"State\"]>) => Promise<LanguageModelLike> | LanguageModelLike);\n /** A list of tools or a ToolNode. */\n tools: ToolNode | (ServerTool | ClientTool)[];\n /**\n * @deprecated Use prompt instead.\n */\n messageModifier?: MessageModifier;\n /**\n * @deprecated Use prompt instead.\n */\n stateModifier?: StateModifier;\n /**\n * An optional prompt for the LLM. This takes full graph state BEFORE the LLM is called and prepares the input to LLM.\n *\n * Can take a few different forms:\n *\n * - str: This is converted to a SystemMessage and added to the beginning of the list of messages in state[\"messages\"].\n * - SystemMessage: this is added to the beginning of the list of messages in state[\"messages\"].\n * - Function: This function should take in full graph state and the output is then passed to the language model.\n * - Runnable: This runnable should take in full graph state and the output is then passed to the language model.\n *\n * Note:\n * Prior to `v0.2.46`, the prompt was set using `stateModifier` / `messagesModifier` parameters.\n * This is now deprecated and will be removed in a future release.\n */\n prompt?: Prompt;\n /**\n * Additional state schema for the agent.\n */\n stateSchema?: A;\n /**\n * An optional schema for the context.\n */\n contextSchema?: C;\n /** An optional checkpoint saver to persist the agent's state. */\n checkpointSaver?: BaseCheckpointSaver | boolean;\n /** An optional checkpoint saver to persist the agent's state. Alias of \"checkpointSaver\". */\n checkpointer?: BaseCheckpointSaver | boolean;\n /** An optional list of node names to interrupt before running. */\n interruptBefore?: N[] | All;\n /** An optional list of node names to interrupt after running. */\n interruptAfter?: N[] | All;\n store?: BaseStore;\n /**\n * An optional schema for the final agent output.\n *\n * If provided, output will be formatted to match the given schema and returned in the 'structuredResponse' state key.\n * If not provided, `structuredResponse` will not be present in the output state.\n *\n * Can be passed in as:\n * - Zod schema\n * - JSON schema\n * - { prompt, schema }, where schema is one of the above.\n * The prompt will be used together with the model that is being used to generate the structured response.\n *\n * @remarks\n * **Important**: `responseFormat` requires the model to support `.withStructuredOutput()`.\n *\n * **Note**: The graph will make a separate call to the LLM to generate the structured response after the agent loop is finished.\n * This is not the only strategy to get structured responses, see more options in [this guide](https://langchain-ai.github.io/langgraph/how-tos/react-agent-structured-output/).\n */\n responseFormat?: InteropZodType<StructuredResponseType> | StructuredResponseSchemaOptions<StructuredResponseType>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>;\n /**\n * An optional name for the agent.\n */\n name?: string;\n /**\n * An optional description for the agent.\n * This can be used to describe the agent to the underlying supervisor LLM.\n */\n description?: string | undefined;\n /**\n * Use to specify how to expose the agent name to the underlying supervisor LLM.\n \n - undefined: Relies on the LLM provider {@link AIMessage#name}. Currently, only OpenAI supports this.\n - `\"inline\"`: Add the agent name directly into the content field of the {@link AIMessage} using XML-style tags.\n Example: `\"How can I help you\"` -> `\"<name>agent_name</name><content>How can I help you?</content>\"`\n */\n includeAgentName?: \"inline\" | undefined;\n /**\n * An optional node to add before the `agent` node (i.e., the node that calls the LLM).\n * Useful for managing long message histories (e.g., message trimming, summarization, etc.).\n */\n preModelHook?: RunnableLike<ToAnnotationRoot<A>[\"State\"] & PreHookAnnotation[\"State\"], ToAnnotationRoot<A>[\"Update\"] & PreHookAnnotation[\"Update\"], LangGraphRunnableConfig>;\n /**\n * An optional node to add after the `agent` node (i.e., the node that calls the LLM).\n * Useful for implementing human-in-the-loop, guardrails, validation, or other post-processing.\n */\n postModelHook?: RunnableLike<ToAnnotationRoot<A>[\"State\"], ToAnnotationRoot<A>[\"Update\"], LangGraphRunnableConfig>;\n /**\n * Determines the version of the graph to create.\n *\n * Can be one of\n * - `\"v1\"`: The tool node processes a single message. All tool calls in the message are\n * executed in parallel within the tool node.\n * - `\"v2\"`: The tool node processes a single tool call. Tool calls are distributed across\n * multiple instances of the tool node using the Send API.\n *\n * @default `\"v1\"`\n */\n version?: \"v1\" | \"v2\";\n};\n/**\n * @deprecated `createReactAgent` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { createAgent } from \"langchain\";`\n *\n * Creates a StateGraph agent that relies on a chat model utilizing tool calling.\n *\n * @example\n * ```ts\n * import { ChatOpenAI } from \"@langchain/openai\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n *\n * const model = new ChatOpenAI({\n * model: \"gpt-4o\",\n * });\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * })\n * })\n *\n * const agent = createReactAgent({ llm: model, tools: [getWeather] });\n *\n * const inputs = {\n * messages: [{ role: \"user\", content: \"what is the weather in SF?\" }],\n * };\n *\n * const stream = await agent.stream(inputs, { streamMode: \"values\" });\n *\n * for await (const { messages } of stream) {\n * console.log(messages);\n * }\n * // Returns the messages in the state at each step of execution\n * ```\n */\nexport declare function createReactAgent<A extends AnyAnnotationRoot | InteropZodObject = typeof MessagesAnnotation, \n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nStructuredResponseFormat extends Record<string, any> = Record<string, any>, C extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot>(params: CreateReactAgentParams<A, StructuredResponseFormat, C>): CompiledStateGraph<ToAnnotationRoot<A>[\"State\"], ToAnnotationRoot<A>[\"Update\"], \n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nany, typeof MessagesAnnotation.spec & ToAnnotationRoot<A>[\"spec\"], ReturnType<typeof createReactAgentAnnotation<StructuredResponseFormat>>[\"spec\"] & ToAnnotationRoot<A>[\"spec\"]>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;UAkBiB6B;AAAjB;+BAE+BC,MAFJ,CAAA,MAAA,EAAA,GAAA,CAAA,GAE0BA,MAF1B,CAAA,MAAA,EAAA,GAAA,CAAA,CAAA,CAAA;UAEIA,EACjBzB,WADiByB,EAAAA;;;;;EASnBE;EACPC,kBAAAA,EAHmBF,sBAGY;;AAETA,KAHfC,CAAAA,GAGeD,OAHJJ,KAGII,GAAAA,OAAAA,GAAAA,OAAAA;KAFtBE,+BAEOjB,CAAAA,sBAAAA,CAAAA,GAAAA;;UAAAA,eAAee,0BAA0BD;EAKhDI,MAAAA,CAAAA,EAAAA,MAAU;EACVC,MAAAA,CAAAA,EAAAA,OAAU;EAAA,CAAA,GAAA,EAAA,MAAA,CAAA,EAAA,OAAA;;KADVD,UAAAA,GAAaJ,MAC0BjB,CAAAA,MAAAA,EAAAA,OAAAA,CAAAA;KAAvCsB,UAAAA,GAAarB,uBAAwCL,GAAdI,WAAcJ,GAAAA,gBAAAA;AASyIe,KAAvLsB,MAAAA,GAASvC,aAA8KiB,GAAAA,MAAAA,GAAAA,CAAAA,CAAAA,KAAAA,EAAAA,OAArIF,kBAAAA,CAAmByB,KAAkHvB,EAAAA,MAAAA,EAAnGA,uBAAmGA,EAAAA,GAAvElB,eAAuEkB,EAAAA,CAAAA,GAAAA,CAAAA,CAAAA,KAAAA,EAAAA,OAAlCF,kBAAAA,CAAmByB,KAAevB,EAAAA,MAAAA,EAAAA,uBAAAA,EAAAA,GAA4Ba,OAA5Bb,CAAoClB,eAApCkB,EAAAA,CAAAA,CAAAA,GAA0DhB,QAA1DgB;;AAA4Ba,KAEnNW,aAAAA,GAAgBF,MAFmMT;;KAInNY,eAAAA,GAAkB1C,qCAAqCF,kBAAkBA,6BAA6BA,kBAAkBgC,QAAQhC,kBAAkBG;AAFlJwC,cAGSE,0BAHOJ,EAAAA;AAE5B;UAGUhB,MAHiB,CAAA,MAAA,EAAA,GAAA,CAAA,GAGKA,MAHL,CAAA,MAAA,EAAA,GAAA,CAAA,CAAA,GAAA,GAG+BT,cAH/B,CAAA;UAAGd,yBAAAA,CAI4BF,WAJ5BE,CAGQP,0BAAAA,CACmE0C,gBAAAA,EAAgB1C,0BAAAA,CAAqCmD,WAAAA,CAJhI5C,EAAAA,EAIgJmB,QAJhJnB,CAAAA;oBAAqCF,EAAAA;IAAkBA,EAAAA,WAAAA,CAM3CgD,CAN2ChD,CAAAA;IAA6BA,CAAAA,UAAAA,eAAAA,CAO3DgD,CAP2DhD,EAOxDgD,CAPwDhD,CAAAA,CAAAA,yBAAAA,CAOPgD,CAPOhD,EAOJgD,CAPIhD,CAAAA;IAA0BA,IAAAA,EAAAA,CAAAA,yBAAAA,CAAAA,CAAAA,EAAAA,EAQ1EoD,CAR0EpD,EAAAA,GAQpEgB,cARoEhB,CAQrDoD,CARqDpD,CAAAA;;;cAW9HqD,mBAAmBrC;EAVZ6B,gBAAAA,yBASnB,CAEgE7C,WAFhE,CAGAL,0BAAAA,CAD+G0C,gBAAAA,EAAgB1C,0BAAAA,CAAqCmD,WAAAA,CAFpK,EAAA,EAEoLzB,QAFpL,CAAA;CAAA,CAAA;KAIGgC,iBAAAA,GAXK5B,OAWsB4B,iBAXtB5B;;KAaL6B,iBAAAA,GAAoBtC,cAZgFqB,CAAAA,GAAAA,CAAAA;KAapGkB,gBAboH5D,CAAAA,UAazF2D,iBAb8HR,GAa1GpC,gBAb0GoC,CAAAA,GAatFU,CAbsFV,SAa5EQ,iBAb4ER,GAaxDU,CAbwDV,GAapDU,CAboDV,SAa1CpC,gBAb0CoC,GAavB9B,cAbuB8B,CAaRvB,2BAbQuB,CAaoBU,CAbpBV,CAAAA,CAAAA,GAAAA,KAAAA;;;;;KAkBlJW,iCAAiCH,oBAAoB5C,mBAAmB4C;;+BAErD7B,MAjB2BuB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,GAiBLvB,MAjBKuB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,EAAAA,UAiB0BM,iBAjB1BN,GAiB8CtC,gBAjB9CsC,GAiBiEM,iBAjBjEN,CAAAA,GAAAA;;KAAiDA,EAmBlGjD,iBAnBkGiD,GAAAA,CAAAA,CAAAA,KAAAA,EAmBrEO,gBAnBqEP,CAmBpDQ,CAnBoDR,CAAAA,CAAAA,OAAAA,CAAAA,GAmBtCK,iBAnBsCL,CAAAA,OAAAA,CAAAA,EAAAA,OAAAA,EAmBD5B,OAnBC4B,CAmBOO,gBAnBPP,CAmBwBU,CAnBxBV,CAAAA,CAAAA,OAAAA,CAAAA,CAAAA,EAAAA,GAmByChB,OAnBzCgB,CAmBiDjD,iBAnBjDiD,CAAAA,GAmBsEjD,iBAnBtEiD,CAAAA;;SAqBhG9B,YAAYW,aAAaC;;;;iBApBoCd,CAAAA,EAwBlD4B,eAxBkD5B;;;AAErE;EAGD,aAAA,CAAA,EAuBkB2B,aAvBlB;;;;;;;;AAF6C;AAGE;AAEV;;;;;QAC2CW,CAAAA,EAkCrEb,MAlCqEa;;;;aAAgGE,CAAAA,EAsChKA,CAtCgKA;;;;EAKtKC,aAAAA,CAAAA,EAqCQC,CArCRD;EAAsB;iBAAWH,CAAAA,EAuCvBzC,mBAvCuByC,GAAAA,OAAAA;;cAAuCA,CAAAA,EAyCjEzC,mBAzCiEyC,GAAAA,OAAAA;;iBAE/B7B,CAAAA,EAyC/BE,CAzC+BF,EAAAA,GAyCzBb,GAzCyBa;;gBAAmDf,CAAAA,EA2CnFiB,CA3CmFjB,EAAAA,GA2C7EE,GA3C6EF;OAAmB4C,CAAAA,EA4C/GxC,SA5C+GwC;;;;;;;;;;;;;;;;;;;gBAqCrGzC,CAAAA,EA0BDF,cA1BCE,CA0Bca,sBA1Bdb,CAAAA,GA0BwCe,+BA1BxCf,CA0BwEa,sBA1BxEb;;IA4BfY,MAxBeE,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;;;;MAGVb,CAAAA,EAAAA,MAAAA;;;;;aAqBLW,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;;;;;;;kBAsBYlB,CAAAA,EAAAA,QAAAA,GAAAA,SAAAA;;;;;cAK2EY,CAAAA,EAL3EZ,YAK2EY,CAL9DoC,gBAK8DpC,CAL7CqC,CAK6CrC,CAAAA,CAAAA,OAAAA,CAAAA,GAL/BkC,iBAK+BlC,CAAAA,OAAAA,CAAAA,EALHoC,gBAKGpC,CALcqC,CAKdrC,CAAAA,CAAAA,QAAAA,CAAAA,GAL6BkC,iBAK7BlC,CAAAA,QAAAA,CAAAA,EAL0DA,uBAK1DA,CAAAA;;;AA2D9F;;eAAmDmC,CAAAA,EA3D/B/C,YA2D+B+C,CA3DlBC,gBA2DkBD,CA3DDE,CA2DCF,CAAAA,CAAAA,OAAAA,CAAAA,EA3DYC,gBA2DZD,CA3D6BE,CA2D7BF,CAAAA,CAAAA,QAAAA,CAAAA,EA3D2CnC,uBA2D3CmC,CAAAA;;;;;;;;;;;;SAEkME,CAAAA,EAAAA,IAAAA,GAAAA,IAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAF7NG,2BAA2BL,oBAAoB5C,0BAA0BO;;iCAEhEQ,sBAAsBA,+BAA+B6B,oBAAoB5C,mBAAmB4C,2BAA2BG,uBAAuBD,GAAGI,0BAA0BF,KAAK3C,mBAAmBwC,iBAAiBC,aAAaD,iBAAiBC;;YAEvQvC,kBAAAA,CAAmB4C,OAAON,iBAAiBC,YAAYM,kBAAkBjB,2BAA2Be,qCAAqCL,iBAAiBC"}
@@ -180,12 +180,12 @@ var ToolNode = class extends require_utils.RunnableCallable {
180
180
  let aiMessage;
181
181
  for (let i = messages.length - 1; i >= 0; i -= 1) {
182
182
  const message = messages[i];
183
- if (message.getType() === "ai") {
183
+ if ((0, __langchain_core_messages.isAIMessage)(message)) {
184
184
  aiMessage = message;
185
185
  break;
186
186
  }
187
187
  }
188
- if (aiMessage?.getType() !== "ai") throw new Error("ToolNode only accepts AIMessages as input.");
188
+ if (aiMessage == null || !(0, __langchain_core_messages.isAIMessage)(aiMessage)) throw new Error("ToolNode only accepts AIMessages as input.");
189
189
  outputs = await Promise.all(aiMessage.tool_calls?.filter((call) => call.id == null || !toolMessageIds.has(call.id)).map((call) => this.runTool(call, config)) ?? []);
190
190
  }
191
191
  if (!outputs.some(require_constants.isCommand)) return Array.isArray(input) ? outputs : { messages: outputs };
@@ -1 +1 @@
1
- {"version":3,"file":"tool_node.cjs","names":["isBaseMessage","RunnableCallable","tool","isCommand","ToolMessage","e: any","isGraphInterrupt","outputs: (ToolMessage | Command)[]","messages: BaseMessage[]","toolMessageIds: Set<string>","aiMessage: AIMessage | undefined","combinedOutputs: (\n | { messages: BaseMessage[] }\n | BaseMessage[]\n | Command\n )[]","parentCommand: Command | null","Command","_isSend","END"],"sources":["../../src/prebuilt/tool_node.ts"],"sourcesContent":["import {\n BaseMessage,\n ToolMessage,\n AIMessage,\n isBaseMessage,\n} from \"@langchain/core/messages\";\nimport { RunnableConfig, RunnableToolLike } from \"@langchain/core/runnables\";\nimport { DynamicTool, StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { ToolCall } from \"@langchain/core/messages/tool\";\nimport { RunnableCallable } from \"../utils.js\";\nimport { MessagesAnnotation } from \"../graph/messages_annotation.js\";\nimport { isGraphInterrupt } from \"../errors.js\";\nimport { END, isCommand, Command, _isSend, Send } from \"../constants.js\";\n\nexport type ToolNodeOptions = {\n name?: string;\n tags?: string[];\n handleToolErrors?: boolean;\n};\n\nconst isBaseMessageArray = (input: unknown): input is BaseMessage[] =>\n Array.isArray(input) && input.every(isBaseMessage);\n\nconst isMessagesState = (\n input: unknown\n): input is { messages: BaseMessage[] } =>\n typeof input === \"object\" &&\n input != null &&\n \"messages\" in input &&\n isBaseMessageArray(input.messages);\n\nconst isSendInput = (input: unknown): input is { lg_tool_call: ToolCall } =>\n typeof input === \"object\" && input != null && \"lg_tool_call\" in input;\n\n/**\n * @deprecated `ToolNode` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { ToolNode } from \"langchain\";`\n *\n * A node that runs the tools requested in the last AIMessage. It can be used\n * either in StateGraph with a \"messages\" key or in MessageGraph. If multiple\n * tool calls are requested, they will be run in parallel. The output will be\n * a list of ToolMessages, one for each tool call.\n *\n * @example\n * ```ts\n * import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { AIMessage } from \"@langchain/core/messages\";\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * }),\n * });\n *\n * const tools = [getWeather];\n * const toolNode = new ToolNode(tools);\n *\n * const messageWithSingleToolCall = new AIMessage({\n * content: \"\",\n * tool_calls: [\n * {\n * name: \"get_weather\",\n * args: { location: \"sf\" },\n * id: \"tool_call_id\",\n * type: \"tool_call\",\n * }\n * ]\n * })\n *\n * await toolNode.invoke({ messages: [messageWithSingleToolCall] });\n * // Returns tool invocation responses as:\n * // { messages: ToolMessage[] }\n * ```\n *\n * @example\n * ```ts\n * import {\n * StateGraph,\n * MessagesAnnotation,\n * } from \"@langchain/langgraph\";\n * import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * }),\n * });\n *\n * const tools = [getWeather];\n * const modelWithTools = new ChatAnthropic({\n * model: \"claude-3-haiku-20240307\",\n * temperature: 0\n * }).bindTools(tools);\n *\n * const toolNodeForGraph = new ToolNode(tools)\n *\n * const shouldContinue = (state: typeof MessagesAnnotation.State) => {\n * const { messages } = state;\n * const lastMessage = messages[messages.length - 1];\n * if (\"tool_calls\" in lastMessage && Array.isArray(lastMessage.tool_calls) && lastMessage.tool_calls?.length) {\n * return \"tools\";\n * }\n * return \"__end__\";\n * }\n *\n * const callModel = async (state: typeof MessagesAnnotation.State) => {\n * const { messages } = state;\n * const response = await modelWithTools.invoke(messages);\n * return { messages: response };\n * }\n *\n * const graph = new StateGraph(MessagesAnnotation)\n * .addNode(\"agent\", callModel)\n * .addNode(\"tools\", toolNodeForGraph)\n * .addEdge(\"__start__\", \"agent\")\n * .addConditionalEdges(\"agent\", shouldContinue)\n * .addEdge(\"tools\", \"agent\")\n * .compile();\n *\n * const inputs = {\n * messages: [{ role: \"user\", content: \"what is the weather in SF?\" }],\n * };\n *\n * const stream = await graph.stream(inputs, {\n * streamMode: \"values\",\n * });\n *\n * for await (const { messages } of stream) {\n * console.log(messages);\n * }\n * // Returns the messages in the state at each step of execution\n * ```\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport class ToolNode<T = any> extends RunnableCallable<T, T> {\n tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[];\n\n handleToolErrors = true;\n\n trace = false;\n\n constructor(\n tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[],\n options?: ToolNodeOptions\n ) {\n const { name, tags, handleToolErrors } = options ?? {};\n super({ name, tags, func: (input, config) => this.run(input, config) });\n this.tools = tools;\n this.handleToolErrors = handleToolErrors ?? this.handleToolErrors;\n }\n\n protected async runTool(\n call: ToolCall,\n config: RunnableConfig\n ): Promise<ToolMessage | Command> {\n const tool = this.tools.find((tool) => tool.name === call.name);\n try {\n if (tool === undefined) {\n throw new Error(`Tool \"${call.name}\" not found.`);\n }\n const output = await tool.invoke({ ...call, type: \"tool_call\" }, config);\n\n if (\n (isBaseMessage(output) && output.getType() === \"tool\") ||\n isCommand(output)\n ) {\n return output as ToolMessage | Command;\n }\n\n return new ToolMessage({\n status: \"success\",\n name: tool.name,\n content: typeof output === \"string\" ? output : JSON.stringify(output),\n tool_call_id: call.id!,\n });\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } catch (e: any) {\n if (!this.handleToolErrors) throw e;\n\n if (isGraphInterrupt(e)) {\n // `NodeInterrupt` errors are a breakpoint to bring a human into the loop.\n // As such, they are not recoverable by the agent and shouldn't be fed\n // back. Instead, re-throw these errors even when `handleToolErrors = true`.\n throw e;\n }\n\n return new ToolMessage({\n status: \"error\",\n content: `Error: ${e.message}\\n Please fix your mistakes.`,\n name: call.name,\n tool_call_id: call.id ?? \"\",\n });\n }\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n protected async run(input: unknown, config: RunnableConfig): Promise<T> {\n let outputs: (ToolMessage | Command)[];\n\n if (isSendInput(input)) {\n outputs = [await this.runTool(input.lg_tool_call, config)];\n } else {\n let messages: BaseMessage[];\n if (isBaseMessageArray(input)) {\n messages = input;\n } else if (isMessagesState(input)) {\n messages = input.messages;\n } else {\n throw new Error(\n \"ToolNode only accepts BaseMessage[] or { messages: BaseMessage[] } as input.\"\n );\n }\n\n const toolMessageIds: Set<string> = new Set(\n messages\n .filter((msg) => msg.getType() === \"tool\")\n .map((msg) => (msg as ToolMessage).tool_call_id)\n );\n\n let aiMessage: AIMessage | undefined;\n for (let i = messages.length - 1; i >= 0; i -= 1) {\n const message = messages[i];\n if (message.getType() === \"ai\") {\n aiMessage = message;\n break;\n }\n }\n\n if (aiMessage?.getType() !== \"ai\") {\n throw new Error(\"ToolNode only accepts AIMessages as input.\");\n }\n\n outputs = await Promise.all(\n aiMessage.tool_calls\n ?.filter((call) => call.id == null || !toolMessageIds.has(call.id))\n .map((call) => this.runTool(call, config)) ?? []\n );\n }\n\n // Preserve existing behavior for non-command tool outputs for backwards compatibility\n if (!outputs.some(isCommand)) {\n return (Array.isArray(input) ? outputs : { messages: outputs }) as T;\n }\n\n // Handle mixed Command and non-Command outputs\n const combinedOutputs: (\n | { messages: BaseMessage[] }\n | BaseMessage[]\n | Command\n )[] = [];\n let parentCommand: Command | null = null;\n\n for (const output of outputs) {\n if (isCommand(output)) {\n if (\n output.graph === Command.PARENT &&\n Array.isArray(output.goto) &&\n output.goto.every((send) => _isSend(send))\n ) {\n if (parentCommand) {\n (parentCommand.goto as Send[]).push(...(output.goto as Send[]));\n } else {\n parentCommand = new Command({\n graph: Command.PARENT,\n goto: output.goto,\n });\n }\n } else {\n combinedOutputs.push(output);\n }\n } else {\n combinedOutputs.push(\n Array.isArray(input) ? [output] : { messages: [output] }\n );\n }\n }\n\n if (parentCommand) {\n combinedOutputs.push(parentCommand);\n }\n\n return combinedOutputs as T;\n }\n}\n\n/**\n * @deprecated Use new `ToolNode` from {@link https://www.npmjs.com/package/langchain langchain} package instead.\n */\nexport function toolsCondition(\n state: BaseMessage[] | typeof MessagesAnnotation.State\n): \"tools\" | typeof END {\n const message = Array.isArray(state)\n ? state[state.length - 1]\n : state.messages[state.messages.length - 1];\n\n if (\n message !== undefined &&\n \"tool_calls\" in message &&\n ((message as AIMessage).tool_calls?.length ?? 0) > 0\n ) {\n return \"tools\";\n } else {\n return END;\n }\n}\n"],"mappings":";;;;;;;AAoBA,MAAM,sBAAsB,UAC1B,MAAM,QAAQ,UAAU,MAAM,MAAMA;AAEtC,MAAM,mBACJ,UAEA,OAAO,UAAU,YACjB,SAAS,QACT,cAAc,SACd,mBAAmB,MAAM;AAE3B,MAAM,eAAe,UACnB,OAAO,UAAU,YAAY,SAAS,QAAQ,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2HlE,IAAa,WAAb,cAAuCC,+BAAuB;CAC5D;CAEA,mBAAmB;CAEnB,QAAQ;CAER,YACE,OACA,SACA;EACA,MAAM,EAAE,MAAM,MAAM,qBAAqB,WAAW;AACpD,QAAM;GAAE;GAAM;GAAM,OAAO,OAAO,WAAW,KAAK,IAAI,OAAO;;AAC7D,OAAK,QAAQ;AACb,OAAK,mBAAmB,oBAAoB,KAAK;;CAGnD,MAAgB,QACd,MACA,QACgC;EAChC,MAAM,OAAO,KAAK,MAAM,MAAM,WAASC,OAAK,SAAS,KAAK;AAC1D,MAAI;AACF,OAAI,SAAS,OACX,OAAM,IAAI,MAAM,SAAS,KAAK,KAAK;GAErC,MAAM,SAAS,MAAM,KAAK,OAAO;IAAE,GAAG;IAAM,MAAM;MAAe;AAEjE,oDACiB,WAAW,OAAO,cAAc,UAC/CC,4BAAU,QAEV,QAAO;AAGT,UAAO,IAAIC,sCAAY;IACrB,QAAQ;IACR,MAAM,KAAK;IACX,SAAS,OAAO,WAAW,WAAW,SAAS,KAAK,UAAU;IAC9D,cAAc,KAAK;;WAGdC,GAAQ;AACf,OAAI,CAAC,KAAK,iBAAkB,OAAM;AAElC,OAAIC,gCAAiB,GAInB,OAAM;AAGR,UAAO,IAAIF,sCAAY;IACrB,QAAQ;IACR,SAAS,UAAU,EAAE,QAAQ;IAC7B,MAAM,KAAK;IACX,cAAc,KAAK,MAAM;;;;CAM/B,MAAgB,IAAI,OAAgB,QAAoC;EACtE,IAAIG;AAEJ,MAAI,YAAY,OACd,WAAU,CAAC,MAAM,KAAK,QAAQ,MAAM,cAAc;OAC7C;GACL,IAAIC;AACJ,OAAI,mBAAmB,OACrB,YAAW;YACF,gBAAgB,OACzB,YAAW,MAAM;OAEjB,OAAM,IAAI,MACR;GAIJ,MAAMC,iBAA8B,IAAI,IACtC,SACG,QAAQ,QAAQ,IAAI,cAAc,QAClC,KAAK,QAAS,IAAoB;GAGvC,IAAIC;AACJ,QAAK,IAAI,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,KAAK,GAAG;IAChD,MAAM,UAAU,SAAS;AACzB,QAAI,QAAQ,cAAc,MAAM;AAC9B,iBAAY;AACZ;;;AAIJ,OAAI,WAAW,cAAc,KAC3B,OAAM,IAAI,MAAM;AAGlB,aAAU,MAAM,QAAQ,IACtB,UAAU,YACN,QAAQ,SAAS,KAAK,MAAM,QAAQ,CAAC,eAAe,IAAI,KAAK,KAC9D,KAAK,SAAS,KAAK,QAAQ,MAAM,YAAY;;AAKpD,MAAI,CAAC,QAAQ,KAAKP,6BAChB,QAAQ,MAAM,QAAQ,SAAS,UAAU,EAAE,UAAU;EAIvD,MAAMQ,kBAIA;EACN,IAAIC,gBAAgC;AAEpC,OAAK,MAAM,UAAU,QACnB,KAAIT,4BAAU,QACZ,KACE,OAAO,UAAUU,0BAAQ,UACzB,MAAM,QAAQ,OAAO,SACrB,OAAO,KAAK,OAAO,SAASC,0BAAQ,OAEpC,KAAI,cACF,CAAC,cAAc,KAAgB,KAAK,GAAI,OAAO;MAE/C,iBAAgB,IAAID,0BAAQ;GAC1B,OAAOA,0BAAQ;GACf,MAAM,OAAO;;MAIjB,iBAAgB,KAAK;MAGvB,iBAAgB,KACd,MAAM,QAAQ,SAAS,CAAC,UAAU,EAAE,UAAU,CAAC;AAKrD,MAAI,cACF,iBAAgB,KAAK;AAGvB,SAAO;;;;;;AAOX,SAAgB,eACd,OACsB;CACtB,MAAM,UAAU,MAAM,QAAQ,SAC1B,MAAM,MAAM,SAAS,KACrB,MAAM,SAAS,MAAM,SAAS,SAAS;AAE3C,KACE,YAAY,UACZ,gBAAgB,YACd,QAAsB,YAAY,UAAU,KAAK,EAEnD,QAAO;KAEP,QAAOE"}
1
+ {"version":3,"file":"tool_node.cjs","names":["isBaseMessage","RunnableCallable","tool","isCommand","ToolMessage","e: any","isGraphInterrupt","outputs: (ToolMessage | Command)[]","messages: BaseMessage[]","toolMessageIds: Set<string>","aiMessage: AIMessage | undefined","combinedOutputs: (\n | { messages: BaseMessage[] }\n | BaseMessage[]\n | Command\n )[]","parentCommand: Command | null","Command","_isSend","END"],"sources":["../../src/prebuilt/tool_node.ts"],"sourcesContent":["import {\n BaseMessage,\n ToolMessage,\n AIMessage,\n isBaseMessage,\n isAIMessage,\n} from \"@langchain/core/messages\";\nimport { RunnableConfig, RunnableToolLike } from \"@langchain/core/runnables\";\nimport { DynamicTool, StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { ToolCall } from \"@langchain/core/messages/tool\";\nimport { RunnableCallable } from \"../utils.js\";\nimport { MessagesAnnotation } from \"../graph/messages_annotation.js\";\nimport { isGraphInterrupt } from \"../errors.js\";\nimport { END, isCommand, Command, _isSend, Send } from \"../constants.js\";\n\nexport type ToolNodeOptions = {\n name?: string;\n tags?: string[];\n handleToolErrors?: boolean;\n};\n\nconst isBaseMessageArray = (input: unknown): input is BaseMessage[] =>\n Array.isArray(input) && input.every(isBaseMessage);\n\nconst isMessagesState = (\n input: unknown\n): input is { messages: BaseMessage[] } =>\n typeof input === \"object\" &&\n input != null &&\n \"messages\" in input &&\n isBaseMessageArray(input.messages);\n\nconst isSendInput = (input: unknown): input is { lg_tool_call: ToolCall } =>\n typeof input === \"object\" && input != null && \"lg_tool_call\" in input;\n\n/**\n * @deprecated `ToolNode` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { ToolNode } from \"langchain\";`\n *\n * A node that runs the tools requested in the last AIMessage. It can be used\n * either in StateGraph with a \"messages\" key or in MessageGraph. If multiple\n * tool calls are requested, they will be run in parallel. The output will be\n * a list of ToolMessages, one for each tool call.\n *\n * @example\n * ```ts\n * import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { AIMessage } from \"@langchain/core/messages\";\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * }),\n * });\n *\n * const tools = [getWeather];\n * const toolNode = new ToolNode(tools);\n *\n * const messageWithSingleToolCall = new AIMessage({\n * content: \"\",\n * tool_calls: [\n * {\n * name: \"get_weather\",\n * args: { location: \"sf\" },\n * id: \"tool_call_id\",\n * type: \"tool_call\",\n * }\n * ]\n * })\n *\n * await toolNode.invoke({ messages: [messageWithSingleToolCall] });\n * // Returns tool invocation responses as:\n * // { messages: ToolMessage[] }\n * ```\n *\n * @example\n * ```ts\n * import {\n * StateGraph,\n * MessagesAnnotation,\n * } from \"@langchain/langgraph\";\n * import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * }),\n * });\n *\n * const tools = [getWeather];\n * const modelWithTools = new ChatAnthropic({\n * model: \"claude-3-haiku-20240307\",\n * temperature: 0\n * }).bindTools(tools);\n *\n * const toolNodeForGraph = new ToolNode(tools)\n *\n * const shouldContinue = (state: typeof MessagesAnnotation.State) => {\n * const { messages } = state;\n * const lastMessage = messages[messages.length - 1];\n * if (\"tool_calls\" in lastMessage && Array.isArray(lastMessage.tool_calls) && lastMessage.tool_calls?.length) {\n * return \"tools\";\n * }\n * return \"__end__\";\n * }\n *\n * const callModel = async (state: typeof MessagesAnnotation.State) => {\n * const { messages } = state;\n * const response = await modelWithTools.invoke(messages);\n * return { messages: response };\n * }\n *\n * const graph = new StateGraph(MessagesAnnotation)\n * .addNode(\"agent\", callModel)\n * .addNode(\"tools\", toolNodeForGraph)\n * .addEdge(\"__start__\", \"agent\")\n * .addConditionalEdges(\"agent\", shouldContinue)\n * .addEdge(\"tools\", \"agent\")\n * .compile();\n *\n * const inputs = {\n * messages: [{ role: \"user\", content: \"what is the weather in SF?\" }],\n * };\n *\n * const stream = await graph.stream(inputs, {\n * streamMode: \"values\",\n * });\n *\n * for await (const { messages } of stream) {\n * console.log(messages);\n * }\n * // Returns the messages in the state at each step of execution\n * ```\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport class ToolNode<T = any> extends RunnableCallable<T, T> {\n tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[];\n\n handleToolErrors = true;\n\n trace = false;\n\n constructor(\n tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[],\n options?: ToolNodeOptions\n ) {\n const { name, tags, handleToolErrors } = options ?? {};\n super({ name, tags, func: (input, config) => this.run(input, config) });\n this.tools = tools;\n this.handleToolErrors = handleToolErrors ?? this.handleToolErrors;\n }\n\n protected async runTool(\n call: ToolCall,\n config: RunnableConfig\n ): Promise<ToolMessage | Command> {\n const tool = this.tools.find((tool) => tool.name === call.name);\n try {\n if (tool === undefined) {\n throw new Error(`Tool \"${call.name}\" not found.`);\n }\n const output = await tool.invoke({ ...call, type: \"tool_call\" }, config);\n\n if (\n (isBaseMessage(output) && output.getType() === \"tool\") ||\n isCommand(output)\n ) {\n return output as ToolMessage | Command;\n }\n\n return new ToolMessage({\n status: \"success\",\n name: tool.name,\n content: typeof output === \"string\" ? output : JSON.stringify(output),\n tool_call_id: call.id!,\n });\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } catch (e: any) {\n if (!this.handleToolErrors) throw e;\n\n if (isGraphInterrupt(e)) {\n // `NodeInterrupt` errors are a breakpoint to bring a human into the loop.\n // As such, they are not recoverable by the agent and shouldn't be fed\n // back. Instead, re-throw these errors even when `handleToolErrors = true`.\n throw e;\n }\n\n return new ToolMessage({\n status: \"error\",\n content: `Error: ${e.message}\\n Please fix your mistakes.`,\n name: call.name,\n tool_call_id: call.id ?? \"\",\n });\n }\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n protected async run(input: unknown, config: RunnableConfig): Promise<T> {\n let outputs: (ToolMessage | Command)[];\n\n if (isSendInput(input)) {\n outputs = [await this.runTool(input.lg_tool_call, config)];\n } else {\n let messages: BaseMessage[];\n if (isBaseMessageArray(input)) {\n messages = input;\n } else if (isMessagesState(input)) {\n messages = input.messages;\n } else {\n throw new Error(\n \"ToolNode only accepts BaseMessage[] or { messages: BaseMessage[] } as input.\"\n );\n }\n\n const toolMessageIds: Set<string> = new Set(\n messages\n .filter((msg) => msg.getType() === \"tool\")\n .map((msg) => (msg as ToolMessage).tool_call_id)\n );\n\n let aiMessage: AIMessage | undefined;\n for (let i = messages.length - 1; i >= 0; i -= 1) {\n const message = messages[i];\n if (isAIMessage(message)) {\n aiMessage = message;\n break;\n }\n }\n\n if (aiMessage == null || !isAIMessage(aiMessage)) {\n throw new Error(\"ToolNode only accepts AIMessages as input.\");\n }\n\n outputs = await Promise.all(\n aiMessage.tool_calls\n ?.filter((call) => call.id == null || !toolMessageIds.has(call.id))\n .map((call) => this.runTool(call, config)) ?? []\n );\n }\n\n // Preserve existing behavior for non-command tool outputs for backwards compatibility\n if (!outputs.some(isCommand)) {\n return (Array.isArray(input) ? outputs : { messages: outputs }) as T;\n }\n\n // Handle mixed Command and non-Command outputs\n const combinedOutputs: (\n | { messages: BaseMessage[] }\n | BaseMessage[]\n | Command\n )[] = [];\n let parentCommand: Command | null = null;\n\n for (const output of outputs) {\n if (isCommand(output)) {\n if (\n output.graph === Command.PARENT &&\n Array.isArray(output.goto) &&\n output.goto.every((send) => _isSend(send))\n ) {\n if (parentCommand) {\n (parentCommand.goto as Send[]).push(...(output.goto as Send[]));\n } else {\n parentCommand = new Command({\n graph: Command.PARENT,\n goto: output.goto,\n });\n }\n } else {\n combinedOutputs.push(output);\n }\n } else {\n combinedOutputs.push(\n Array.isArray(input) ? [output] : { messages: [output] }\n );\n }\n }\n\n if (parentCommand) {\n combinedOutputs.push(parentCommand);\n }\n\n return combinedOutputs as T;\n }\n}\n\n/**\n * @deprecated Use new `ToolNode` from {@link https://www.npmjs.com/package/langchain langchain} package instead.\n */\nexport function toolsCondition(\n state: BaseMessage[] | typeof MessagesAnnotation.State\n): \"tools\" | typeof END {\n const message = Array.isArray(state)\n ? state[state.length - 1]\n : state.messages[state.messages.length - 1];\n\n if (\n message !== undefined &&\n \"tool_calls\" in message &&\n ((message as AIMessage).tool_calls?.length ?? 0) > 0\n ) {\n return \"tools\";\n } else {\n return END;\n }\n}\n"],"mappings":";;;;;;;AAqBA,MAAM,sBAAsB,UAC1B,MAAM,QAAQ,UAAU,MAAM,MAAMA;AAEtC,MAAM,mBACJ,UAEA,OAAO,UAAU,YACjB,SAAS,QACT,cAAc,SACd,mBAAmB,MAAM;AAE3B,MAAM,eAAe,UACnB,OAAO,UAAU,YAAY,SAAS,QAAQ,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2HlE,IAAa,WAAb,cAAuCC,+BAAuB;CAC5D;CAEA,mBAAmB;CAEnB,QAAQ;CAER,YACE,OACA,SACA;EACA,MAAM,EAAE,MAAM,MAAM,qBAAqB,WAAW;AACpD,QAAM;GAAE;GAAM;GAAM,OAAO,OAAO,WAAW,KAAK,IAAI,OAAO;;AAC7D,OAAK,QAAQ;AACb,OAAK,mBAAmB,oBAAoB,KAAK;;CAGnD,MAAgB,QACd,MACA,QACgC;EAChC,MAAM,OAAO,KAAK,MAAM,MAAM,WAASC,OAAK,SAAS,KAAK;AAC1D,MAAI;AACF,OAAI,SAAS,OACX,OAAM,IAAI,MAAM,SAAS,KAAK,KAAK;GAErC,MAAM,SAAS,MAAM,KAAK,OAAO;IAAE,GAAG;IAAM,MAAM;MAAe;AAEjE,oDACiB,WAAW,OAAO,cAAc,UAC/CC,4BAAU,QAEV,QAAO;AAGT,UAAO,IAAIC,sCAAY;IACrB,QAAQ;IACR,MAAM,KAAK;IACX,SAAS,OAAO,WAAW,WAAW,SAAS,KAAK,UAAU;IAC9D,cAAc,KAAK;;WAGdC,GAAQ;AACf,OAAI,CAAC,KAAK,iBAAkB,OAAM;AAElC,OAAIC,gCAAiB,GAInB,OAAM;AAGR,UAAO,IAAIF,sCAAY;IACrB,QAAQ;IACR,SAAS,UAAU,EAAE,QAAQ;IAC7B,MAAM,KAAK;IACX,cAAc,KAAK,MAAM;;;;CAM/B,MAAgB,IAAI,OAAgB,QAAoC;EACtE,IAAIG;AAEJ,MAAI,YAAY,OACd,WAAU,CAAC,MAAM,KAAK,QAAQ,MAAM,cAAc;OAC7C;GACL,IAAIC;AACJ,OAAI,mBAAmB,OACrB,YAAW;YACF,gBAAgB,OACzB,YAAW,MAAM;OAEjB,OAAM,IAAI,MACR;GAIJ,MAAMC,iBAA8B,IAAI,IACtC,SACG,QAAQ,QAAQ,IAAI,cAAc,QAClC,KAAK,QAAS,IAAoB;GAGvC,IAAIC;AACJ,QAAK,IAAI,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,KAAK,GAAG;IAChD,MAAM,UAAU,SAAS;AACzB,mDAAgB,UAAU;AACxB,iBAAY;AACZ;;;AAIJ,OAAI,aAAa,QAAQ,4CAAa,WACpC,OAAM,IAAI,MAAM;AAGlB,aAAU,MAAM,QAAQ,IACtB,UAAU,YACN,QAAQ,SAAS,KAAK,MAAM,QAAQ,CAAC,eAAe,IAAI,KAAK,KAC9D,KAAK,SAAS,KAAK,QAAQ,MAAM,YAAY;;AAKpD,MAAI,CAAC,QAAQ,KAAKP,6BAChB,QAAQ,MAAM,QAAQ,SAAS,UAAU,EAAE,UAAU;EAIvD,MAAMQ,kBAIA;EACN,IAAIC,gBAAgC;AAEpC,OAAK,MAAM,UAAU,QACnB,KAAIT,4BAAU,QACZ,KACE,OAAO,UAAUU,0BAAQ,UACzB,MAAM,QAAQ,OAAO,SACrB,OAAO,KAAK,OAAO,SAASC,0BAAQ,OAEpC,KAAI,cACF,CAAC,cAAc,KAAgB,KAAK,GAAI,OAAO;MAE/C,iBAAgB,IAAID,0BAAQ;GAC1B,OAAOA,0BAAQ;GACf,MAAM,OAAO;;MAIjB,iBAAgB,KAAK;MAGvB,iBAAgB,KACd,MAAM,QAAQ,SAAS,CAAC,UAAU,EAAE,UAAU,CAAC;AAKrD,MAAI,cACF,iBAAgB,KAAK;AAGvB,SAAO;;;;;;AAOX,SAAgB,eACd,OACsB;CACtB,MAAM,UAAU,MAAM,QAAQ,SAC1B,MAAM,MAAM,SAAS,KACrB,MAAM,SAAS,MAAM,SAAS,SAAS;AAE3C,KACE,YAAY,UACZ,gBAAgB,YACd,QAAsB,YAAY,UAAU,KAAK,EAEnD,QAAO;KAEP,QAAOE"}
@@ -1,7 +1,7 @@
1
1
  import { isGraphInterrupt } from "../errors.js";
2
2
  import { Command, END, _isSend, isCommand } from "../constants.js";
3
3
  import { RunnableCallable } from "../utils.js";
4
- import { ToolMessage, isBaseMessage } from "@langchain/core/messages";
4
+ import { ToolMessage, isAIMessage, isBaseMessage } from "@langchain/core/messages";
5
5
 
6
6
  //#region src/prebuilt/tool_node.ts
7
7
  const isBaseMessageArray = (input) => Array.isArray(input) && input.every(isBaseMessage);
@@ -179,12 +179,12 @@ var ToolNode = class extends RunnableCallable {
179
179
  let aiMessage;
180
180
  for (let i = messages.length - 1; i >= 0; i -= 1) {
181
181
  const message = messages[i];
182
- if (message.getType() === "ai") {
182
+ if (isAIMessage(message)) {
183
183
  aiMessage = message;
184
184
  break;
185
185
  }
186
186
  }
187
- if (aiMessage?.getType() !== "ai") throw new Error("ToolNode only accepts AIMessages as input.");
187
+ if (aiMessage == null || !isAIMessage(aiMessage)) throw new Error("ToolNode only accepts AIMessages as input.");
188
188
  outputs = await Promise.all(aiMessage.tool_calls?.filter((call) => call.id == null || !toolMessageIds.has(call.id)).map((call) => this.runTool(call, config)) ?? []);
189
189
  }
190
190
  if (!outputs.some(isCommand)) return Array.isArray(input) ? outputs : { messages: outputs };
@@ -1 +1 @@
1
- {"version":3,"file":"tool_node.js","names":["tool","e: any","outputs: (ToolMessage | Command)[]","messages: BaseMessage[]","toolMessageIds: Set<string>","aiMessage: AIMessage | undefined","combinedOutputs: (\n | { messages: BaseMessage[] }\n | BaseMessage[]\n | Command\n )[]","parentCommand: Command | null"],"sources":["../../src/prebuilt/tool_node.ts"],"sourcesContent":["import {\n BaseMessage,\n ToolMessage,\n AIMessage,\n isBaseMessage,\n} from \"@langchain/core/messages\";\nimport { RunnableConfig, RunnableToolLike } from \"@langchain/core/runnables\";\nimport { DynamicTool, StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { ToolCall } from \"@langchain/core/messages/tool\";\nimport { RunnableCallable } from \"../utils.js\";\nimport { MessagesAnnotation } from \"../graph/messages_annotation.js\";\nimport { isGraphInterrupt } from \"../errors.js\";\nimport { END, isCommand, Command, _isSend, Send } from \"../constants.js\";\n\nexport type ToolNodeOptions = {\n name?: string;\n tags?: string[];\n handleToolErrors?: boolean;\n};\n\nconst isBaseMessageArray = (input: unknown): input is BaseMessage[] =>\n Array.isArray(input) && input.every(isBaseMessage);\n\nconst isMessagesState = (\n input: unknown\n): input is { messages: BaseMessage[] } =>\n typeof input === \"object\" &&\n input != null &&\n \"messages\" in input &&\n isBaseMessageArray(input.messages);\n\nconst isSendInput = (input: unknown): input is { lg_tool_call: ToolCall } =>\n typeof input === \"object\" && input != null && \"lg_tool_call\" in input;\n\n/**\n * @deprecated `ToolNode` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { ToolNode } from \"langchain\";`\n *\n * A node that runs the tools requested in the last AIMessage. It can be used\n * either in StateGraph with a \"messages\" key or in MessageGraph. If multiple\n * tool calls are requested, they will be run in parallel. The output will be\n * a list of ToolMessages, one for each tool call.\n *\n * @example\n * ```ts\n * import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { AIMessage } from \"@langchain/core/messages\";\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * }),\n * });\n *\n * const tools = [getWeather];\n * const toolNode = new ToolNode(tools);\n *\n * const messageWithSingleToolCall = new AIMessage({\n * content: \"\",\n * tool_calls: [\n * {\n * name: \"get_weather\",\n * args: { location: \"sf\" },\n * id: \"tool_call_id\",\n * type: \"tool_call\",\n * }\n * ]\n * })\n *\n * await toolNode.invoke({ messages: [messageWithSingleToolCall] });\n * // Returns tool invocation responses as:\n * // { messages: ToolMessage[] }\n * ```\n *\n * @example\n * ```ts\n * import {\n * StateGraph,\n * MessagesAnnotation,\n * } from \"@langchain/langgraph\";\n * import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * }),\n * });\n *\n * const tools = [getWeather];\n * const modelWithTools = new ChatAnthropic({\n * model: \"claude-3-haiku-20240307\",\n * temperature: 0\n * }).bindTools(tools);\n *\n * const toolNodeForGraph = new ToolNode(tools)\n *\n * const shouldContinue = (state: typeof MessagesAnnotation.State) => {\n * const { messages } = state;\n * const lastMessage = messages[messages.length - 1];\n * if (\"tool_calls\" in lastMessage && Array.isArray(lastMessage.tool_calls) && lastMessage.tool_calls?.length) {\n * return \"tools\";\n * }\n * return \"__end__\";\n * }\n *\n * const callModel = async (state: typeof MessagesAnnotation.State) => {\n * const { messages } = state;\n * const response = await modelWithTools.invoke(messages);\n * return { messages: response };\n * }\n *\n * const graph = new StateGraph(MessagesAnnotation)\n * .addNode(\"agent\", callModel)\n * .addNode(\"tools\", toolNodeForGraph)\n * .addEdge(\"__start__\", \"agent\")\n * .addConditionalEdges(\"agent\", shouldContinue)\n * .addEdge(\"tools\", \"agent\")\n * .compile();\n *\n * const inputs = {\n * messages: [{ role: \"user\", content: \"what is the weather in SF?\" }],\n * };\n *\n * const stream = await graph.stream(inputs, {\n * streamMode: \"values\",\n * });\n *\n * for await (const { messages } of stream) {\n * console.log(messages);\n * }\n * // Returns the messages in the state at each step of execution\n * ```\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport class ToolNode<T = any> extends RunnableCallable<T, T> {\n tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[];\n\n handleToolErrors = true;\n\n trace = false;\n\n constructor(\n tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[],\n options?: ToolNodeOptions\n ) {\n const { name, tags, handleToolErrors } = options ?? {};\n super({ name, tags, func: (input, config) => this.run(input, config) });\n this.tools = tools;\n this.handleToolErrors = handleToolErrors ?? this.handleToolErrors;\n }\n\n protected async runTool(\n call: ToolCall,\n config: RunnableConfig\n ): Promise<ToolMessage | Command> {\n const tool = this.tools.find((tool) => tool.name === call.name);\n try {\n if (tool === undefined) {\n throw new Error(`Tool \"${call.name}\" not found.`);\n }\n const output = await tool.invoke({ ...call, type: \"tool_call\" }, config);\n\n if (\n (isBaseMessage(output) && output.getType() === \"tool\") ||\n isCommand(output)\n ) {\n return output as ToolMessage | Command;\n }\n\n return new ToolMessage({\n status: \"success\",\n name: tool.name,\n content: typeof output === \"string\" ? output : JSON.stringify(output),\n tool_call_id: call.id!,\n });\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } catch (e: any) {\n if (!this.handleToolErrors) throw e;\n\n if (isGraphInterrupt(e)) {\n // `NodeInterrupt` errors are a breakpoint to bring a human into the loop.\n // As such, they are not recoverable by the agent and shouldn't be fed\n // back. Instead, re-throw these errors even when `handleToolErrors = true`.\n throw e;\n }\n\n return new ToolMessage({\n status: \"error\",\n content: `Error: ${e.message}\\n Please fix your mistakes.`,\n name: call.name,\n tool_call_id: call.id ?? \"\",\n });\n }\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n protected async run(input: unknown, config: RunnableConfig): Promise<T> {\n let outputs: (ToolMessage | Command)[];\n\n if (isSendInput(input)) {\n outputs = [await this.runTool(input.lg_tool_call, config)];\n } else {\n let messages: BaseMessage[];\n if (isBaseMessageArray(input)) {\n messages = input;\n } else if (isMessagesState(input)) {\n messages = input.messages;\n } else {\n throw new Error(\n \"ToolNode only accepts BaseMessage[] or { messages: BaseMessage[] } as input.\"\n );\n }\n\n const toolMessageIds: Set<string> = new Set(\n messages\n .filter((msg) => msg.getType() === \"tool\")\n .map((msg) => (msg as ToolMessage).tool_call_id)\n );\n\n let aiMessage: AIMessage | undefined;\n for (let i = messages.length - 1; i >= 0; i -= 1) {\n const message = messages[i];\n if (message.getType() === \"ai\") {\n aiMessage = message;\n break;\n }\n }\n\n if (aiMessage?.getType() !== \"ai\") {\n throw new Error(\"ToolNode only accepts AIMessages as input.\");\n }\n\n outputs = await Promise.all(\n aiMessage.tool_calls\n ?.filter((call) => call.id == null || !toolMessageIds.has(call.id))\n .map((call) => this.runTool(call, config)) ?? []\n );\n }\n\n // Preserve existing behavior for non-command tool outputs for backwards compatibility\n if (!outputs.some(isCommand)) {\n return (Array.isArray(input) ? outputs : { messages: outputs }) as T;\n }\n\n // Handle mixed Command and non-Command outputs\n const combinedOutputs: (\n | { messages: BaseMessage[] }\n | BaseMessage[]\n | Command\n )[] = [];\n let parentCommand: Command | null = null;\n\n for (const output of outputs) {\n if (isCommand(output)) {\n if (\n output.graph === Command.PARENT &&\n Array.isArray(output.goto) &&\n output.goto.every((send) => _isSend(send))\n ) {\n if (parentCommand) {\n (parentCommand.goto as Send[]).push(...(output.goto as Send[]));\n } else {\n parentCommand = new Command({\n graph: Command.PARENT,\n goto: output.goto,\n });\n }\n } else {\n combinedOutputs.push(output);\n }\n } else {\n combinedOutputs.push(\n Array.isArray(input) ? [output] : { messages: [output] }\n );\n }\n }\n\n if (parentCommand) {\n combinedOutputs.push(parentCommand);\n }\n\n return combinedOutputs as T;\n }\n}\n\n/**\n * @deprecated Use new `ToolNode` from {@link https://www.npmjs.com/package/langchain langchain} package instead.\n */\nexport function toolsCondition(\n state: BaseMessage[] | typeof MessagesAnnotation.State\n): \"tools\" | typeof END {\n const message = Array.isArray(state)\n ? state[state.length - 1]\n : state.messages[state.messages.length - 1];\n\n if (\n message !== undefined &&\n \"tool_calls\" in message &&\n ((message as AIMessage).tool_calls?.length ?? 0) > 0\n ) {\n return \"tools\";\n } else {\n return END;\n }\n}\n"],"mappings":";;;;;;AAoBA,MAAM,sBAAsB,UAC1B,MAAM,QAAQ,UAAU,MAAM,MAAM;AAEtC,MAAM,mBACJ,UAEA,OAAO,UAAU,YACjB,SAAS,QACT,cAAc,SACd,mBAAmB,MAAM;AAE3B,MAAM,eAAe,UACnB,OAAO,UAAU,YAAY,SAAS,QAAQ,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2HlE,IAAa,WAAb,cAAuC,iBAAuB;CAC5D;CAEA,mBAAmB;CAEnB,QAAQ;CAER,YACE,OACA,SACA;EACA,MAAM,EAAE,MAAM,MAAM,qBAAqB,WAAW;AACpD,QAAM;GAAE;GAAM;GAAM,OAAO,OAAO,WAAW,KAAK,IAAI,OAAO;;AAC7D,OAAK,QAAQ;AACb,OAAK,mBAAmB,oBAAoB,KAAK;;CAGnD,MAAgB,QACd,MACA,QACgC;EAChC,MAAM,OAAO,KAAK,MAAM,MAAM,WAASA,OAAK,SAAS,KAAK;AAC1D,MAAI;AACF,OAAI,SAAS,OACX,OAAM,IAAI,MAAM,SAAS,KAAK,KAAK;GAErC,MAAM,SAAS,MAAM,KAAK,OAAO;IAAE,GAAG;IAAM,MAAM;MAAe;AAEjE,OACG,cAAc,WAAW,OAAO,cAAc,UAC/C,UAAU,QAEV,QAAO;AAGT,UAAO,IAAI,YAAY;IACrB,QAAQ;IACR,MAAM,KAAK;IACX,SAAS,OAAO,WAAW,WAAW,SAAS,KAAK,UAAU;IAC9D,cAAc,KAAK;;WAGdC,GAAQ;AACf,OAAI,CAAC,KAAK,iBAAkB,OAAM;AAElC,OAAI,iBAAiB,GAInB,OAAM;AAGR,UAAO,IAAI,YAAY;IACrB,QAAQ;IACR,SAAS,UAAU,EAAE,QAAQ;IAC7B,MAAM,KAAK;IACX,cAAc,KAAK,MAAM;;;;CAM/B,MAAgB,IAAI,OAAgB,QAAoC;EACtE,IAAIC;AAEJ,MAAI,YAAY,OACd,WAAU,CAAC,MAAM,KAAK,QAAQ,MAAM,cAAc;OAC7C;GACL,IAAIC;AACJ,OAAI,mBAAmB,OACrB,YAAW;YACF,gBAAgB,OACzB,YAAW,MAAM;OAEjB,OAAM,IAAI,MACR;GAIJ,MAAMC,iBAA8B,IAAI,IACtC,SACG,QAAQ,QAAQ,IAAI,cAAc,QAClC,KAAK,QAAS,IAAoB;GAGvC,IAAIC;AACJ,QAAK,IAAI,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,KAAK,GAAG;IAChD,MAAM,UAAU,SAAS;AACzB,QAAI,QAAQ,cAAc,MAAM;AAC9B,iBAAY;AACZ;;;AAIJ,OAAI,WAAW,cAAc,KAC3B,OAAM,IAAI,MAAM;AAGlB,aAAU,MAAM,QAAQ,IACtB,UAAU,YACN,QAAQ,SAAS,KAAK,MAAM,QAAQ,CAAC,eAAe,IAAI,KAAK,KAC9D,KAAK,SAAS,KAAK,QAAQ,MAAM,YAAY;;AAKpD,MAAI,CAAC,QAAQ,KAAK,WAChB,QAAQ,MAAM,QAAQ,SAAS,UAAU,EAAE,UAAU;EAIvD,MAAMC,kBAIA;EACN,IAAIC,gBAAgC;AAEpC,OAAK,MAAM,UAAU,QACnB,KAAI,UAAU,QACZ,KACE,OAAO,UAAU,QAAQ,UACzB,MAAM,QAAQ,OAAO,SACrB,OAAO,KAAK,OAAO,SAAS,QAAQ,OAEpC,KAAI,cACF,CAAC,cAAc,KAAgB,KAAK,GAAI,OAAO;MAE/C,iBAAgB,IAAI,QAAQ;GAC1B,OAAO,QAAQ;GACf,MAAM,OAAO;;MAIjB,iBAAgB,KAAK;MAGvB,iBAAgB,KACd,MAAM,QAAQ,SAAS,CAAC,UAAU,EAAE,UAAU,CAAC;AAKrD,MAAI,cACF,iBAAgB,KAAK;AAGvB,SAAO;;;;;;AAOX,SAAgB,eACd,OACsB;CACtB,MAAM,UAAU,MAAM,QAAQ,SAC1B,MAAM,MAAM,SAAS,KACrB,MAAM,SAAS,MAAM,SAAS,SAAS;AAE3C,KACE,YAAY,UACZ,gBAAgB,YACd,QAAsB,YAAY,UAAU,KAAK,EAEnD,QAAO;KAEP,QAAO"}
1
+ {"version":3,"file":"tool_node.js","names":["tool","e: any","outputs: (ToolMessage | Command)[]","messages: BaseMessage[]","toolMessageIds: Set<string>","aiMessage: AIMessage | undefined","combinedOutputs: (\n | { messages: BaseMessage[] }\n | BaseMessage[]\n | Command\n )[]","parentCommand: Command | null"],"sources":["../../src/prebuilt/tool_node.ts"],"sourcesContent":["import {\n BaseMessage,\n ToolMessage,\n AIMessage,\n isBaseMessage,\n isAIMessage,\n} from \"@langchain/core/messages\";\nimport { RunnableConfig, RunnableToolLike } from \"@langchain/core/runnables\";\nimport { DynamicTool, StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { ToolCall } from \"@langchain/core/messages/tool\";\nimport { RunnableCallable } from \"../utils.js\";\nimport { MessagesAnnotation } from \"../graph/messages_annotation.js\";\nimport { isGraphInterrupt } from \"../errors.js\";\nimport { END, isCommand, Command, _isSend, Send } from \"../constants.js\";\n\nexport type ToolNodeOptions = {\n name?: string;\n tags?: string[];\n handleToolErrors?: boolean;\n};\n\nconst isBaseMessageArray = (input: unknown): input is BaseMessage[] =>\n Array.isArray(input) && input.every(isBaseMessage);\n\nconst isMessagesState = (\n input: unknown\n): input is { messages: BaseMessage[] } =>\n typeof input === \"object\" &&\n input != null &&\n \"messages\" in input &&\n isBaseMessageArray(input.messages);\n\nconst isSendInput = (input: unknown): input is { lg_tool_call: ToolCall } =>\n typeof input === \"object\" && input != null && \"lg_tool_call\" in input;\n\n/**\n * @deprecated `ToolNode` has been moved to {@link https://www.npmjs.com/package/langchain langchain} package.\n * Update your import to `import { ToolNode } from \"langchain\";`\n *\n * A node that runs the tools requested in the last AIMessage. It can be used\n * either in StateGraph with a \"messages\" key or in MessageGraph. If multiple\n * tool calls are requested, they will be run in parallel. The output will be\n * a list of ToolMessages, one for each tool call.\n *\n * @example\n * ```ts\n * import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { AIMessage } from \"@langchain/core/messages\";\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * }),\n * });\n *\n * const tools = [getWeather];\n * const toolNode = new ToolNode(tools);\n *\n * const messageWithSingleToolCall = new AIMessage({\n * content: \"\",\n * tool_calls: [\n * {\n * name: \"get_weather\",\n * args: { location: \"sf\" },\n * id: \"tool_call_id\",\n * type: \"tool_call\",\n * }\n * ]\n * })\n *\n * await toolNode.invoke({ messages: [messageWithSingleToolCall] });\n * // Returns tool invocation responses as:\n * // { messages: ToolMessage[] }\n * ```\n *\n * @example\n * ```ts\n * import {\n * StateGraph,\n * MessagesAnnotation,\n * } from \"@langchain/langgraph\";\n * import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n * import { tool } from \"@langchain/core/tools\";\n * import { z } from \"zod\";\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n *\n * const getWeather = tool((input) => {\n * if ([\"sf\", \"san francisco\"].includes(input.location.toLowerCase())) {\n * return \"It's 60 degrees and foggy.\";\n * } else {\n * return \"It's 90 degrees and sunny.\";\n * }\n * }, {\n * name: \"get_weather\",\n * description: \"Call to get the current weather.\",\n * schema: z.object({\n * location: z.string().describe(\"Location to get the weather for.\"),\n * }),\n * });\n *\n * const tools = [getWeather];\n * const modelWithTools = new ChatAnthropic({\n * model: \"claude-3-haiku-20240307\",\n * temperature: 0\n * }).bindTools(tools);\n *\n * const toolNodeForGraph = new ToolNode(tools)\n *\n * const shouldContinue = (state: typeof MessagesAnnotation.State) => {\n * const { messages } = state;\n * const lastMessage = messages[messages.length - 1];\n * if (\"tool_calls\" in lastMessage && Array.isArray(lastMessage.tool_calls) && lastMessage.tool_calls?.length) {\n * return \"tools\";\n * }\n * return \"__end__\";\n * }\n *\n * const callModel = async (state: typeof MessagesAnnotation.State) => {\n * const { messages } = state;\n * const response = await modelWithTools.invoke(messages);\n * return { messages: response };\n * }\n *\n * const graph = new StateGraph(MessagesAnnotation)\n * .addNode(\"agent\", callModel)\n * .addNode(\"tools\", toolNodeForGraph)\n * .addEdge(\"__start__\", \"agent\")\n * .addConditionalEdges(\"agent\", shouldContinue)\n * .addEdge(\"tools\", \"agent\")\n * .compile();\n *\n * const inputs = {\n * messages: [{ role: \"user\", content: \"what is the weather in SF?\" }],\n * };\n *\n * const stream = await graph.stream(inputs, {\n * streamMode: \"values\",\n * });\n *\n * for await (const { messages } of stream) {\n * console.log(messages);\n * }\n * // Returns the messages in the state at each step of execution\n * ```\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport class ToolNode<T = any> extends RunnableCallable<T, T> {\n tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[];\n\n handleToolErrors = true;\n\n trace = false;\n\n constructor(\n tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[],\n options?: ToolNodeOptions\n ) {\n const { name, tags, handleToolErrors } = options ?? {};\n super({ name, tags, func: (input, config) => this.run(input, config) });\n this.tools = tools;\n this.handleToolErrors = handleToolErrors ?? this.handleToolErrors;\n }\n\n protected async runTool(\n call: ToolCall,\n config: RunnableConfig\n ): Promise<ToolMessage | Command> {\n const tool = this.tools.find((tool) => tool.name === call.name);\n try {\n if (tool === undefined) {\n throw new Error(`Tool \"${call.name}\" not found.`);\n }\n const output = await tool.invoke({ ...call, type: \"tool_call\" }, config);\n\n if (\n (isBaseMessage(output) && output.getType() === \"tool\") ||\n isCommand(output)\n ) {\n return output as ToolMessage | Command;\n }\n\n return new ToolMessage({\n status: \"success\",\n name: tool.name,\n content: typeof output === \"string\" ? output : JSON.stringify(output),\n tool_call_id: call.id!,\n });\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } catch (e: any) {\n if (!this.handleToolErrors) throw e;\n\n if (isGraphInterrupt(e)) {\n // `NodeInterrupt` errors are a breakpoint to bring a human into the loop.\n // As such, they are not recoverable by the agent and shouldn't be fed\n // back. Instead, re-throw these errors even when `handleToolErrors = true`.\n throw e;\n }\n\n return new ToolMessage({\n status: \"error\",\n content: `Error: ${e.message}\\n Please fix your mistakes.`,\n name: call.name,\n tool_call_id: call.id ?? \"\",\n });\n }\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n protected async run(input: unknown, config: RunnableConfig): Promise<T> {\n let outputs: (ToolMessage | Command)[];\n\n if (isSendInput(input)) {\n outputs = [await this.runTool(input.lg_tool_call, config)];\n } else {\n let messages: BaseMessage[];\n if (isBaseMessageArray(input)) {\n messages = input;\n } else if (isMessagesState(input)) {\n messages = input.messages;\n } else {\n throw new Error(\n \"ToolNode only accepts BaseMessage[] or { messages: BaseMessage[] } as input.\"\n );\n }\n\n const toolMessageIds: Set<string> = new Set(\n messages\n .filter((msg) => msg.getType() === \"tool\")\n .map((msg) => (msg as ToolMessage).tool_call_id)\n );\n\n let aiMessage: AIMessage | undefined;\n for (let i = messages.length - 1; i >= 0; i -= 1) {\n const message = messages[i];\n if (isAIMessage(message)) {\n aiMessage = message;\n break;\n }\n }\n\n if (aiMessage == null || !isAIMessage(aiMessage)) {\n throw new Error(\"ToolNode only accepts AIMessages as input.\");\n }\n\n outputs = await Promise.all(\n aiMessage.tool_calls\n ?.filter((call) => call.id == null || !toolMessageIds.has(call.id))\n .map((call) => this.runTool(call, config)) ?? []\n );\n }\n\n // Preserve existing behavior for non-command tool outputs for backwards compatibility\n if (!outputs.some(isCommand)) {\n return (Array.isArray(input) ? outputs : { messages: outputs }) as T;\n }\n\n // Handle mixed Command and non-Command outputs\n const combinedOutputs: (\n | { messages: BaseMessage[] }\n | BaseMessage[]\n | Command\n )[] = [];\n let parentCommand: Command | null = null;\n\n for (const output of outputs) {\n if (isCommand(output)) {\n if (\n output.graph === Command.PARENT &&\n Array.isArray(output.goto) &&\n output.goto.every((send) => _isSend(send))\n ) {\n if (parentCommand) {\n (parentCommand.goto as Send[]).push(...(output.goto as Send[]));\n } else {\n parentCommand = new Command({\n graph: Command.PARENT,\n goto: output.goto,\n });\n }\n } else {\n combinedOutputs.push(output);\n }\n } else {\n combinedOutputs.push(\n Array.isArray(input) ? [output] : { messages: [output] }\n );\n }\n }\n\n if (parentCommand) {\n combinedOutputs.push(parentCommand);\n }\n\n return combinedOutputs as T;\n }\n}\n\n/**\n * @deprecated Use new `ToolNode` from {@link https://www.npmjs.com/package/langchain langchain} package instead.\n */\nexport function toolsCondition(\n state: BaseMessage[] | typeof MessagesAnnotation.State\n): \"tools\" | typeof END {\n const message = Array.isArray(state)\n ? state[state.length - 1]\n : state.messages[state.messages.length - 1];\n\n if (\n message !== undefined &&\n \"tool_calls\" in message &&\n ((message as AIMessage).tool_calls?.length ?? 0) > 0\n ) {\n return \"tools\";\n } else {\n return END;\n }\n}\n"],"mappings":";;;;;;AAqBA,MAAM,sBAAsB,UAC1B,MAAM,QAAQ,UAAU,MAAM,MAAM;AAEtC,MAAM,mBACJ,UAEA,OAAO,UAAU,YACjB,SAAS,QACT,cAAc,SACd,mBAAmB,MAAM;AAE3B,MAAM,eAAe,UACnB,OAAO,UAAU,YAAY,SAAS,QAAQ,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2HlE,IAAa,WAAb,cAAuC,iBAAuB;CAC5D;CAEA,mBAAmB;CAEnB,QAAQ;CAER,YACE,OACA,SACA;EACA,MAAM,EAAE,MAAM,MAAM,qBAAqB,WAAW;AACpD,QAAM;GAAE;GAAM;GAAM,OAAO,OAAO,WAAW,KAAK,IAAI,OAAO;;AAC7D,OAAK,QAAQ;AACb,OAAK,mBAAmB,oBAAoB,KAAK;;CAGnD,MAAgB,QACd,MACA,QACgC;EAChC,MAAM,OAAO,KAAK,MAAM,MAAM,WAASA,OAAK,SAAS,KAAK;AAC1D,MAAI;AACF,OAAI,SAAS,OACX,OAAM,IAAI,MAAM,SAAS,KAAK,KAAK;GAErC,MAAM,SAAS,MAAM,KAAK,OAAO;IAAE,GAAG;IAAM,MAAM;MAAe;AAEjE,OACG,cAAc,WAAW,OAAO,cAAc,UAC/C,UAAU,QAEV,QAAO;AAGT,UAAO,IAAI,YAAY;IACrB,QAAQ;IACR,MAAM,KAAK;IACX,SAAS,OAAO,WAAW,WAAW,SAAS,KAAK,UAAU;IAC9D,cAAc,KAAK;;WAGdC,GAAQ;AACf,OAAI,CAAC,KAAK,iBAAkB,OAAM;AAElC,OAAI,iBAAiB,GAInB,OAAM;AAGR,UAAO,IAAI,YAAY;IACrB,QAAQ;IACR,SAAS,UAAU,EAAE,QAAQ;IAC7B,MAAM,KAAK;IACX,cAAc,KAAK,MAAM;;;;CAM/B,MAAgB,IAAI,OAAgB,QAAoC;EACtE,IAAIC;AAEJ,MAAI,YAAY,OACd,WAAU,CAAC,MAAM,KAAK,QAAQ,MAAM,cAAc;OAC7C;GACL,IAAIC;AACJ,OAAI,mBAAmB,OACrB,YAAW;YACF,gBAAgB,OACzB,YAAW,MAAM;OAEjB,OAAM,IAAI,MACR;GAIJ,MAAMC,iBAA8B,IAAI,IACtC,SACG,QAAQ,QAAQ,IAAI,cAAc,QAClC,KAAK,QAAS,IAAoB;GAGvC,IAAIC;AACJ,QAAK,IAAI,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,KAAK,GAAG;IAChD,MAAM,UAAU,SAAS;AACzB,QAAI,YAAY,UAAU;AACxB,iBAAY;AACZ;;;AAIJ,OAAI,aAAa,QAAQ,CAAC,YAAY,WACpC,OAAM,IAAI,MAAM;AAGlB,aAAU,MAAM,QAAQ,IACtB,UAAU,YACN,QAAQ,SAAS,KAAK,MAAM,QAAQ,CAAC,eAAe,IAAI,KAAK,KAC9D,KAAK,SAAS,KAAK,QAAQ,MAAM,YAAY;;AAKpD,MAAI,CAAC,QAAQ,KAAK,WAChB,QAAQ,MAAM,QAAQ,SAAS,UAAU,EAAE,UAAU;EAIvD,MAAMC,kBAIA;EACN,IAAIC,gBAAgC;AAEpC,OAAK,MAAM,UAAU,QACnB,KAAI,UAAU,QACZ,KACE,OAAO,UAAU,QAAQ,UACzB,MAAM,QAAQ,OAAO,SACrB,OAAO,KAAK,OAAO,SAAS,QAAQ,OAEpC,KAAI,cACF,CAAC,cAAc,KAAgB,KAAK,GAAI,OAAO;MAE/C,iBAAgB,IAAI,QAAQ;GAC1B,OAAO,QAAQ;GACf,MAAM,OAAO;;MAIjB,iBAAgB,KAAK;MAGvB,iBAAgB,KACd,MAAM,QAAQ,SAAS,CAAC,UAAU,EAAE,UAAU,CAAC;AAKrD,MAAI,cACF,iBAAgB,KAAK;AAGvB,SAAO;;;;;;AAOX,SAAgB,eACd,OACsB;CACtB,MAAM,UAAU,MAAM,QAAQ,SAC1B,MAAM,MAAM,SAAS,KACrB,MAAM,SAAS,MAAM,SAAS,SAAS;AAE3C,KACE,YAAY,UACZ,gBAAgB,YACd,QAAsB,YAAY,UAAU,KAAK,EAEnD,QAAO;KAEP,QAAO"}
@@ -236,12 +236,21 @@ var Pregel = class extends PartialRunnable {
236
236
  * Optional long-term memory store for the graph, allows for persistence & retrieval of data across threads
237
237
  */
238
238
  store;
239
- triggerToNodes = {};
240
239
  /**
241
240
  * Optional cache for the graph, useful for caching tasks.
242
241
  */
243
242
  cache;
244
243
  /**
244
+ * Optional interrupt helper function.
245
+ * @internal
246
+ */
247
+ userInterrupt;
248
+ /**
249
+ * The trigger to node mapping for the graph run.
250
+ * @internal
251
+ */
252
+ triggerToNodes = {};
253
+ /**
245
254
  * Constructor for Pregel - meant for internal use only.
246
255
  *
247
256
  * @internal
@@ -270,6 +279,7 @@ var Pregel = class extends PartialRunnable {
270
279
  this.cache = fields.cache;
271
280
  this.name = fields.name;
272
281
  this.triggerToNodes = fields.triggerToNodes ?? this.triggerToNodes;
282
+ this.userInterrupt = fields.userInterrupt;
273
283
  if (this.autoValidate) this.validate();
274
284
  }
275
285
  /**
@@ -905,7 +915,8 @@ var Pregel = class extends PartialRunnable {
905
915
  ...options,
906
916
  signal: require_index.combineAbortSignals(options?.signal, abortController.signal).signal
907
917
  };
908
- return new require_stream.IterableReadableStreamWithAbortSignal(await super.stream(input, config), abortController);
918
+ const stream = await super.stream(input, config);
919
+ return new require_stream.IterableReadableStreamWithAbortSignal(options?.encoding === "text/event-stream" ? require_stream.toEventStream(stream) : stream, abortController);
909
920
  }
910
921
  streamEvents(input, options, streamOptions) {
911
922
  const abortController = new AbortController();
@@ -945,6 +956,7 @@ var Pregel = class extends PartialRunnable {
945
956
  * @internal
946
957
  */
947
958
  async *_streamIterator(input, options) {
959
+ const streamEncoding = "version" in (options ?? {}) ? void 0 : options?.encoding ?? void 0;
948
960
  const streamSubgraphs = options?.subgraphs;
949
961
  const inputConfig = require_config.ensureLangGraphConfig(this.config, options);
950
962
  if (inputConfig.recursionLimit === void 0 || inputConfig.recursionLimit < 1) throw new Error(`Passed "recursionLimit" must be at least 1.`);
@@ -980,7 +992,7 @@ var Pregel = class extends PartialRunnable {
980
992
  chunk
981
993
  ]);
982
994
  };
983
- config.interrupt ??= require_interrupt.interrupt;
995
+ config.interrupt ??= this.userInterrupt ?? require_interrupt.interrupt;
984
996
  const callbackManager = await (0, __langchain_core_runnables.getCallbackManagerForConfig)(config);
985
997
  const runManager = await callbackManager?.handleChainStart(this.toJSON(), require_index._coerceToDict(input, "input"), runId, void 0, void 0, void 0, config?.runName ?? this.getName());
986
998
  const channelSpecs = require_base.getOnlyChannels(this.channels);
@@ -1050,14 +1062,29 @@ var Pregel = class extends PartialRunnable {
1050
1062
  for await (const chunk of stream) {
1051
1063
  if (chunk === void 0) throw new Error("Data structure error.");
1052
1064
  const [namespace, mode, payload] = chunk;
1053
- if (streamMode.includes(mode)) if (streamSubgraphs && !streamModeSingle) yield [
1054
- namespace,
1055
- mode,
1056
- payload
1057
- ];
1058
- else if (!streamModeSingle) yield [mode, payload];
1059
- else if (streamSubgraphs) yield [namespace, payload];
1060
- else yield payload;
1065
+ if (streamMode.includes(mode)) {
1066
+ if (streamEncoding === "text/event-stream") {
1067
+ if (streamSubgraphs) yield [
1068
+ namespace,
1069
+ mode,
1070
+ payload
1071
+ ];
1072
+ else yield [
1073
+ null,
1074
+ mode,
1075
+ payload
1076
+ ];
1077
+ continue;
1078
+ }
1079
+ if (streamSubgraphs && !streamModeSingle) yield [
1080
+ namespace,
1081
+ mode,
1082
+ payload
1083
+ ];
1084
+ else if (!streamModeSingle) yield [mode, payload];
1085
+ else if (streamSubgraphs) yield [namespace, payload];
1086
+ else yield payload;
1087
+ }
1061
1088
  }
1062
1089
  } catch (e) {
1063
1090
  await runManager?.handleChainError(loopError);
@@ -1077,7 +1104,8 @@ var Pregel = class extends PartialRunnable {
1077
1104
  const config = {
1078
1105
  ...options,
1079
1106
  outputKeys: options?.outputKeys ?? this.outputChannels,
1080
- streamMode
1107
+ streamMode,
1108
+ encoding: void 0
1081
1109
  };
1082
1110
  const chunks = [];
1083
1111
  const stream = await this.stream(input, config);