langchain 1.0.6 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +20 -0
- package/README.md +1 -1
- package/chat_models/universal.cjs +1 -0
- package/chat_models/universal.d.cts +1 -0
- package/chat_models/universal.d.ts +1 -0
- package/chat_models/universal.js +1 -0
- package/dist/agents/ReactAgent.cjs +1 -1
- package/dist/agents/ReactAgent.cjs.map +1 -1
- package/dist/agents/ReactAgent.js +2 -2
- package/dist/agents/ReactAgent.js.map +1 -1
- package/dist/agents/index.d.cts +0 -2
- package/dist/agents/index.d.ts +0 -2
- package/dist/agents/middleware/constants.cjs +16 -0
- package/dist/agents/middleware/constants.cjs.map +1 -0
- package/dist/agents/middleware/constants.js +15 -0
- package/dist/agents/middleware/constants.js.map +1 -0
- package/dist/agents/middleware/contextEditing.cjs.map +1 -1
- package/dist/agents/middleware/contextEditing.d.cts +23 -7
- package/dist/agents/middleware/contextEditing.d.ts +23 -7
- package/dist/agents/middleware/contextEditing.js.map +1 -1
- package/dist/agents/middleware/dynamicSystemPrompt.cjs +5 -2
- package/dist/agents/middleware/dynamicSystemPrompt.cjs.map +1 -1
- package/dist/agents/middleware/dynamicSystemPrompt.d.cts +2 -1
- package/dist/agents/middleware/dynamicSystemPrompt.d.ts +2 -1
- package/dist/agents/middleware/dynamicSystemPrompt.js +4 -2
- package/dist/agents/middleware/dynamicSystemPrompt.js.map +1 -1
- package/dist/agents/middleware/error.cjs +20 -0
- package/dist/agents/middleware/error.cjs.map +1 -0
- package/dist/agents/middleware/error.js +19 -0
- package/dist/agents/middleware/error.js.map +1 -0
- package/dist/agents/middleware/index.cjs +4 -2
- package/dist/agents/middleware/index.d.ts +18 -0
- package/dist/agents/middleware/index.js +4 -2
- package/dist/agents/middleware/modelRetry.cjs +162 -0
- package/dist/agents/middleware/modelRetry.cjs.map +1 -0
- package/dist/agents/middleware/modelRetry.d.cts +134 -0
- package/dist/agents/middleware/modelRetry.d.ts +134 -0
- package/dist/agents/middleware/modelRetry.js +161 -0
- package/dist/agents/middleware/modelRetry.js.map +1 -0
- package/dist/agents/middleware/{promptCaching.cjs → provider/anthropic/promptCaching.cjs} +3 -3
- package/dist/agents/middleware/provider/anthropic/promptCaching.cjs.map +1 -0
- package/dist/agents/middleware/{promptCaching.d.cts → provider/anthropic/promptCaching.d.cts} +2 -2
- package/dist/agents/middleware/{promptCaching.d.ts → provider/anthropic/promptCaching.d.ts} +2 -2
- package/dist/agents/middleware/{promptCaching.js → provider/anthropic/promptCaching.js} +2 -2
- package/dist/agents/middleware/provider/anthropic/promptCaching.js.map +1 -0
- package/dist/agents/middleware/provider/openai/moderation.cjs +299 -0
- package/dist/agents/middleware/provider/openai/moderation.cjs.map +1 -0
- package/dist/agents/middleware/provider/openai/moderation.d.cts +133 -0
- package/dist/agents/middleware/provider/openai/moderation.d.ts +133 -0
- package/dist/agents/middleware/provider/openai/moderation.js +298 -0
- package/dist/agents/middleware/provider/openai/moderation.js.map +1 -0
- package/dist/agents/middleware/summarization.d.cts +0 -4
- package/dist/agents/middleware/summarization.d.ts +0 -4
- package/dist/agents/middleware/todoListMiddleware.cjs +1 -1
- package/dist/agents/middleware/todoListMiddleware.cjs.map +1 -1
- package/dist/agents/middleware/todoListMiddleware.js +1 -1
- package/dist/agents/middleware/todoListMiddleware.js.map +1 -1
- package/dist/agents/middleware/toolRetry.cjs +32 -44
- package/dist/agents/middleware/toolRetry.cjs.map +1 -1
- package/dist/agents/middleware/toolRetry.d.cts +16 -36
- package/dist/agents/middleware/toolRetry.d.ts +16 -36
- package/dist/agents/middleware/toolRetry.js +32 -44
- package/dist/agents/middleware/toolRetry.js.map +1 -1
- package/dist/agents/middleware/types.d.cts +9 -10
- package/dist/agents/middleware/types.d.ts +9 -10
- package/dist/agents/middleware/utils.cjs +23 -0
- package/dist/agents/middleware/utils.cjs.map +1 -1
- package/dist/agents/middleware/utils.d.ts +2 -0
- package/dist/agents/middleware/utils.js +23 -1
- package/dist/agents/middleware/utils.js.map +1 -1
- package/dist/agents/nodes/AgentNode.cjs +50 -22
- package/dist/agents/nodes/AgentNode.cjs.map +1 -1
- package/dist/agents/nodes/AgentNode.js +52 -24
- package/dist/agents/nodes/AgentNode.js.map +1 -1
- package/dist/agents/nodes/types.d.cts +39 -3
- package/dist/agents/nodes/types.d.ts +39 -3
- package/dist/agents/responses.d.cts +0 -19
- package/dist/agents/responses.d.ts +0 -19
- package/dist/agents/runtime.d.ts +1 -0
- package/dist/agents/tests/utils.cjs +10 -1
- package/dist/agents/tests/utils.cjs.map +1 -1
- package/dist/agents/tests/utils.js +10 -1
- package/dist/agents/tests/utils.js.map +1 -1
- package/dist/agents/types.d.cts +68 -2
- package/dist/agents/types.d.ts +68 -2
- package/dist/agents/utils.cjs +15 -12
- package/dist/agents/utils.cjs.map +1 -1
- package/dist/agents/utils.js +16 -13
- package/dist/agents/utils.js.map +1 -1
- package/dist/chat_models/universal.cjs +50 -16
- package/dist/chat_models/universal.cjs.map +1 -1
- package/dist/chat_models/universal.d.cts +19 -1
- package/dist/chat_models/universal.d.ts +19 -1
- package/dist/chat_models/universal.js +50 -16
- package/dist/chat_models/universal.js.map +1 -1
- package/dist/index.cjs +8 -2
- package/dist/index.d.cts +5 -3
- package/dist/index.d.ts +6 -3
- package/dist/index.js +7 -3
- package/dist/load/import_constants.cjs +2 -1
- package/dist/load/import_constants.cjs.map +1 -1
- package/dist/load/import_constants.js +2 -1
- package/dist/load/import_constants.js.map +1 -1
- package/dist/load/import_map.cjs +2 -19
- package/dist/load/import_map.cjs.map +1 -1
- package/dist/load/import_map.js +2 -19
- package/dist/load/import_map.js.map +1 -1
- package/hub/node.cjs +1 -0
- package/hub/node.d.cts +1 -0
- package/hub/node.d.ts +1 -0
- package/hub/node.js +1 -0
- package/hub.cjs +1 -0
- package/hub.d.cts +1 -0
- package/hub.d.ts +1 -0
- package/hub.js +1 -0
- package/load/serializable.cjs +1 -0
- package/load/serializable.d.cts +1 -0
- package/load/serializable.d.ts +1 -0
- package/load/serializable.js +1 -0
- package/load.cjs +1 -0
- package/load.d.cts +1 -0
- package/load.d.ts +1 -0
- package/load.js +1 -0
- package/package.json +65 -52
- package/storage/encoder_backed.cjs +1 -0
- package/storage/encoder_backed.d.cts +1 -0
- package/storage/encoder_backed.d.ts +1 -0
- package/storage/encoder_backed.js +1 -0
- package/storage/file_system.cjs +1 -0
- package/storage/file_system.d.cts +1 -0
- package/storage/file_system.d.ts +1 -0
- package/storage/file_system.js +1 -0
- package/storage/in_memory.cjs +1 -0
- package/storage/in_memory.d.cts +1 -0
- package/storage/in_memory.d.ts +1 -0
- package/storage/in_memory.js +1 -0
- package/dist/agents/ReactAgent.d.cts.map +0 -1
- package/dist/agents/ReactAgent.d.ts.map +0 -1
- package/dist/agents/constants.cjs +0 -7
- package/dist/agents/constants.cjs.map +0 -1
- package/dist/agents/constants.d.cts.map +0 -1
- package/dist/agents/constants.d.ts.map +0 -1
- package/dist/agents/constants.js +0 -6
- package/dist/agents/constants.js.map +0 -1
- package/dist/agents/errors.d.cts.map +0 -1
- package/dist/agents/errors.d.ts.map +0 -1
- package/dist/agents/index.d.cts.map +0 -1
- package/dist/agents/index.d.ts.map +0 -1
- package/dist/agents/middleware/contextEditing.d.cts.map +0 -1
- package/dist/agents/middleware/contextEditing.d.ts.map +0 -1
- package/dist/agents/middleware/dynamicSystemPrompt.d.cts.map +0 -1
- package/dist/agents/middleware/dynamicSystemPrompt.d.ts.map +0 -1
- package/dist/agents/middleware/hitl.d.cts.map +0 -1
- package/dist/agents/middleware/hitl.d.ts.map +0 -1
- package/dist/agents/middleware/llmToolSelector.d.cts.map +0 -1
- package/dist/agents/middleware/llmToolSelector.d.ts.map +0 -1
- package/dist/agents/middleware/modelCallLimit.d.cts.map +0 -1
- package/dist/agents/middleware/modelCallLimit.d.ts.map +0 -1
- package/dist/agents/middleware/modelFallback.d.cts.map +0 -1
- package/dist/agents/middleware/modelFallback.d.ts.map +0 -1
- package/dist/agents/middleware/pii.d.cts.map +0 -1
- package/dist/agents/middleware/pii.d.ts.map +0 -1
- package/dist/agents/middleware/piiRedaction.d.cts.map +0 -1
- package/dist/agents/middleware/piiRedaction.d.ts.map +0 -1
- package/dist/agents/middleware/promptCaching.cjs.map +0 -1
- package/dist/agents/middleware/promptCaching.d.cts.map +0 -1
- package/dist/agents/middleware/promptCaching.d.ts.map +0 -1
- package/dist/agents/middleware/promptCaching.js.map +0 -1
- package/dist/agents/middleware/summarization.d.cts.map +0 -1
- package/dist/agents/middleware/summarization.d.ts.map +0 -1
- package/dist/agents/middleware/todoListMiddleware.d.cts.map +0 -1
- package/dist/agents/middleware/todoListMiddleware.d.ts.map +0 -1
- package/dist/agents/middleware/toolCallLimit.d.cts.map +0 -1
- package/dist/agents/middleware/toolCallLimit.d.ts.map +0 -1
- package/dist/agents/middleware/toolEmulator.d.cts.map +0 -1
- package/dist/agents/middleware/toolEmulator.d.ts.map +0 -1
- package/dist/agents/middleware/toolRetry.d.cts.map +0 -1
- package/dist/agents/middleware/toolRetry.d.ts.map +0 -1
- package/dist/agents/middleware/types.d.cts.map +0 -1
- package/dist/agents/middleware/types.d.ts.map +0 -1
- package/dist/agents/middleware/utils.d.cts.map +0 -1
- package/dist/agents/middleware/utils.d.ts.map +0 -1
- package/dist/agents/middleware.d.cts.map +0 -1
- package/dist/agents/middleware.d.ts.map +0 -1
- package/dist/agents/nodes/types.d.cts.map +0 -1
- package/dist/agents/nodes/types.d.ts.map +0 -1
- package/dist/agents/responses.d.cts.map +0 -1
- package/dist/agents/responses.d.ts.map +0 -1
- package/dist/agents/runtime.d.cts.map +0 -1
- package/dist/agents/runtime.d.ts.map +0 -1
- package/dist/agents/tests/utils.d.cts.map +0 -1
- package/dist/agents/tests/utils.d.ts.map +0 -1
- package/dist/agents/types.d.cts.map +0 -1
- package/dist/agents/types.d.ts.map +0 -1
- package/dist/chat_models/universal.d.cts.map +0 -1
- package/dist/chat_models/universal.d.ts.map +0 -1
- package/dist/hub/base.d.cts.map +0 -1
- package/dist/hub/base.d.ts.map +0 -1
- package/dist/hub/index.d.cts.map +0 -1
- package/dist/hub/index.d.ts.map +0 -1
- package/dist/hub/node.d.cts.map +0 -1
- package/dist/hub/node.d.ts.map +0 -1
- package/dist/load/import_type.d.cts.map +0 -1
- package/dist/load/import_type.d.ts.map +0 -1
- package/dist/load/index.d.cts.map +0 -1
- package/dist/load/index.d.ts.map +0 -1
- package/dist/storage/encoder_backed.d.cts.map +0 -1
- package/dist/storage/encoder_backed.d.ts.map +0 -1
- package/dist/storage/file_system.d.cts.map +0 -1
- package/dist/storage/file_system.d.ts.map +0 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"AgentNode.js","names":["options: AgentNodeOptions<StructuredResponseFormat, ContextSchema>","#run","#options","model: string | LanguageModelLike","state: InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"]","config: RunnableConfig","#invokeModel","#areMoreStepsNeeded","options: {\n lastMessage?: string;\n }","#deriveModel","request: ModelRequest","#getResponseFormat","#bindTools","#getModelInputState","#handleMultipleStructuredOutputs","#handleSingleStructuredOutput","wrappedHandler: (\n request: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >\n ) => Promise<InternalModelResponse<StructuredResponseFormat>>","request: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >","runtime: Runtime<unknown>","requestWithStateAndRuntime: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >","req: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >","initialRequest: ModelRequest<\n InternalAgentState<StructuredResponseFormat> & PreHookAnnotation[\"State\"],\n unknown\n >","response: AIMessage","toolCalls: ToolCall[]","responseFormat: ToolResponseFormat","#handleToolStrategyError","toolCall: ToolCall","lastMessage?: string","error: ToolStrategyError","response: BaseMessage","model: LanguageModelLike","preparedOptions: ModelRequest | undefined","structuredResponseFormat: ResponseFormat | undefined","options: Partial<BaseChatModelCallOptions>"],"sources":["../../../src/agents/nodes/AgentNode.ts"],"sourcesContent":["/* eslint-disable no-instanceof/no-instanceof */\nimport { Runnable, RunnableConfig } from \"@langchain/core/runnables\";\nimport { BaseMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\nimport { Command, type LangGraphRunnableConfig } from \"@langchain/langgraph\";\nimport { type LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { type BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport {\n InteropZodObject,\n getSchemaDescription,\n interopParse,\n interopZodObjectPartial,\n} from \"@langchain/core/utils/types\";\nimport type { ToolCall } from \"@langchain/core/messages/tool\";\nimport type { ClientTool, ServerTool } from \"@langchain/core/tools\";\n\nimport { initChatModel } from \"../../chat_models/universal.js\";\nimport { MultipleStructuredOutputsError } from \"../errors.js\";\nimport { RunnableCallable } from \"../RunnableCallable.js\";\nimport { PreHookAnnotation } from \"../annotation.js\";\nimport {\n bindTools,\n getPromptRunnable,\n validateLLMHasNoBoundTools,\n hasToolCalls,\n isClientTool,\n} from \"../utils.js\";\nimport { mergeAbortSignals } from \"../nodes/utils.js\";\nimport { CreateAgentParams } from \"../types.js\";\nimport type { InternalAgentState, Runtime } from \"../runtime.js\";\nimport type {\n AgentMiddleware,\n AnyAnnotationRoot,\n WrapModelCallHandler,\n} from \"../middleware/types.js\";\nimport type { ModelRequest } from \"./types.js\";\nimport { withAgentName } from \"../withAgentName.js\";\nimport {\n ToolStrategy,\n ProviderStrategy,\n transformResponseFormat,\n ToolStrategyError,\n} from \"../responses.js\";\n\ntype ResponseHandlerResult<StructuredResponseFormat> =\n | {\n structuredResponse: StructuredResponseFormat;\n messages: BaseMessage[];\n }\n | Promise<Command>;\n\n/**\n * Wrap the base handler with middleware wrapModelCall hooks\n * Middleware are composed so the first middleware is the outermost wrapper\n * Example: [auth, retry, cache] means auth wraps retry wraps cache wraps baseHandler\n */\ntype InternalModelResponse<StructuredResponseFormat> =\n | AIMessage\n | ResponseHandlerResult<StructuredResponseFormat>;\n\n/**\n * The name of the agent node in the state graph.\n */\nexport const AGENT_NODE_NAME = \"model_request\";\n\nexport interface AgentNodeOptions<\n StructuredResponseFormat extends Record<string, unknown> = Record<\n string,\n unknown\n >,\n StateSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot,\n ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot\n> extends Pick<\n CreateAgentParams<StructuredResponseFormat, StateSchema, ContextSchema>,\n | \"model\"\n | \"systemPrompt\"\n | \"includeAgentName\"\n | \"name\"\n | \"responseFormat\"\n | \"middleware\"\n > {\n toolClasses: (ClientTool | ServerTool)[];\n shouldReturnDirect: Set<string>;\n signal?: AbortSignal;\n wrapModelCallHookMiddleware?: [\n AgentMiddleware,\n () => Record<string, unknown>\n ][];\n}\n\ninterface NativeResponseFormat {\n type: \"native\";\n strategy: ProviderStrategy;\n}\n\ninterface ToolResponseFormat {\n type: \"tool\";\n tools: Record<string, ToolStrategy>;\n}\n\ntype ResponseFormat = NativeResponseFormat | ToolResponseFormat;\n\nexport class AgentNode<\n StructuredResponseFormat extends Record<string, unknown> = Record<\n string,\n unknown\n >,\n ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot\n> extends RunnableCallable<\n InternalAgentState<StructuredResponseFormat> & PreHookAnnotation[\"State\"],\n | (\n | { messages: BaseMessage[] }\n | { structuredResponse: StructuredResponseFormat }\n )\n | Command\n> {\n #options: AgentNodeOptions<StructuredResponseFormat, ContextSchema>;\n\n constructor(\n options: AgentNodeOptions<StructuredResponseFormat, ContextSchema>\n ) {\n super({\n name: options.name ?? \"model\",\n func: (input, config) => this.#run(input, config as RunnableConfig),\n });\n\n this.#options = options;\n }\n\n /**\n * Returns response format primtivies based on given model and response format provided by the user.\n *\n * If the user selects a tool output:\n * - return a record of tools to extract structured output from the model's response\n *\n * if the the user selects a native schema output or if the model supports JSON schema output:\n * - return a provider strategy to extract structured output from the model's response\n *\n * @param model - The model to get the response format for.\n * @returns The response format.\n */\n #getResponseFormat(\n model: string | LanguageModelLike\n ): ResponseFormat | undefined {\n if (!this.#options.responseFormat) {\n return undefined;\n }\n\n const strategies = transformResponseFormat(\n this.#options.responseFormat,\n undefined,\n model\n );\n\n /**\n * we either define a list of provider strategies or a list of tool strategies\n */\n const isProviderStrategy = strategies.every(\n (format) => format instanceof ProviderStrategy\n );\n\n /**\n * Populate a list of structured tool info.\n */\n if (!isProviderStrategy) {\n return {\n type: \"tool\",\n tools: (\n strategies.filter(\n (format) => format instanceof ToolStrategy\n ) as ToolStrategy[]\n ).reduce((acc, format) => {\n acc[format.name] = format;\n return acc;\n }, {} as Record<string, ToolStrategy>),\n };\n }\n\n return {\n type: \"native\",\n /**\n * there can only be one provider strategy\n */\n strategy: strategies[0] as ProviderStrategy,\n };\n }\n\n async #run(\n state: InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n config: RunnableConfig\n ) {\n /**\n * Check if we just executed a returnDirect tool\n * If so, we should generate structured response (if needed) and stop\n */\n const lastMessage = state.messages.at(-1);\n if (\n lastMessage &&\n ToolMessage.isInstance(lastMessage) &&\n lastMessage.name &&\n this.#options.shouldReturnDirect.has(lastMessage.name)\n ) {\n /**\n * return directly without invoking the model again\n */\n return { messages: [] };\n }\n\n const response = await this.#invokeModel(state, config);\n\n /**\n * if we were able to generate a structured response, return it\n */\n if (\"structuredResponse\" in response) {\n return {\n messages: [...state.messages, ...(response.messages || [])],\n structuredResponse: response.structuredResponse,\n };\n }\n\n /**\n * if we need to direct the agent to the model, return the update\n */\n if (response instanceof Command) {\n return response;\n }\n\n response.name = this.name;\n response.lc_kwargs.name = this.name;\n\n if (this.#areMoreStepsNeeded(state, response)) {\n return {\n messages: [\n new AIMessage({\n content: \"Sorry, need more steps to process this request.\",\n name: this.name,\n id: response.id,\n }),\n ],\n };\n }\n\n return { messages: [response] };\n }\n\n /**\n * Derive the model from the options.\n * @param state - The state of the agent.\n * @param config - The config of the agent.\n * @returns The model.\n */\n #deriveModel() {\n if (typeof this.#options.model === \"string\") {\n return initChatModel(this.#options.model);\n }\n\n if (this.#options.model) {\n return this.#options.model;\n }\n\n throw new Error(\"No model option was provided, either via `model` option.\");\n }\n\n async #invokeModel(\n state: InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n config: RunnableConfig,\n options: {\n lastMessage?: string;\n } = {}\n ): Promise<AIMessage | ResponseHandlerResult<StructuredResponseFormat>> {\n const model = await this.#deriveModel();\n const lgConfig = config as LangGraphRunnableConfig;\n\n /**\n * Create the base handler that performs the actual model invocation\n */\n const baseHandler = async (\n request: ModelRequest\n ): Promise<AIMessage | ResponseHandlerResult<StructuredResponseFormat>> => {\n /**\n * Check if the LLM already has bound tools and throw if it does.\n */\n validateLLMHasNoBoundTools(request.model);\n\n const structuredResponseFormat = this.#getResponseFormat(request.model);\n const modelWithTools = await this.#bindTools(\n request.model,\n request,\n structuredResponseFormat\n );\n\n let modelInput = this.#getModelInputState(state);\n modelInput = { ...modelInput, messages: request.messages };\n\n const signal = mergeAbortSignals(this.#options.signal, config.signal);\n const invokeConfig = { ...config, signal };\n const response = (await modelWithTools.invoke(\n modelInput,\n invokeConfig\n )) as AIMessage;\n\n /**\n * if the user requests a native schema output, try to parse the response\n * and return the structured response if it is valid\n */\n if (structuredResponseFormat?.type === \"native\") {\n const structuredResponse =\n structuredResponseFormat.strategy.parse(response);\n if (structuredResponse) {\n return { structuredResponse, messages: [response] };\n }\n\n return response;\n }\n\n if (!structuredResponseFormat || !response.tool_calls) {\n return response;\n }\n\n const toolCalls = response.tool_calls.filter(\n (call) => call.name in structuredResponseFormat.tools\n );\n\n /**\n * if there were not structured tool calls, we can return the response\n */\n if (toolCalls.length === 0) {\n return response;\n }\n\n /**\n * if there were multiple structured tool calls, we should throw an error as this\n * scenario is not defined/supported.\n */\n if (toolCalls.length > 1) {\n return this.#handleMultipleStructuredOutputs(\n response,\n toolCalls,\n structuredResponseFormat\n );\n }\n\n const toolStrategy = structuredResponseFormat.tools[toolCalls[0].name];\n const toolMessageContent = toolStrategy?.options?.toolMessageContent;\n return this.#handleSingleStructuredOutput(\n response,\n toolCalls[0],\n structuredResponseFormat,\n toolMessageContent ?? options.lastMessage\n );\n };\n\n const wrapperMiddleware = this.#options.wrapModelCallHookMiddleware ?? [];\n let wrappedHandler: (\n request: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >\n ) => Promise<InternalModelResponse<StructuredResponseFormat>> = baseHandler;\n\n /**\n * Build composed handler from last to first so first middleware becomes outermost\n */\n for (let i = wrapperMiddleware.length - 1; i >= 0; i--) {\n const [middleware, getMiddlewareState] = wrapperMiddleware[i];\n if (middleware.wrapModelCall) {\n const innerHandler = wrappedHandler;\n const currentMiddleware = middleware;\n const currentGetState = getMiddlewareState;\n\n wrappedHandler = async (\n request: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >\n ): Promise<InternalModelResponse<StructuredResponseFormat>> => {\n /**\n * Merge context with default context of middleware\n */\n const context = currentMiddleware.contextSchema\n ? interopParse(\n currentMiddleware.contextSchema,\n lgConfig?.context || {}\n )\n : lgConfig?.context;\n\n /**\n * Create runtime\n */\n const runtime: Runtime<unknown> = Object.freeze({\n context,\n writer: lgConfig.writer,\n interrupt: lgConfig.interrupt,\n signal: lgConfig.signal,\n });\n\n /**\n * Create the request with state and runtime\n */\n const requestWithStateAndRuntime: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n > = {\n ...request,\n state: {\n ...(middleware.stateSchema\n ? interopParse(\n interopZodObjectPartial(middleware.stateSchema),\n state\n )\n : {}),\n ...currentGetState(),\n messages: state.messages,\n } as InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n runtime,\n };\n\n /**\n * Create handler that validates tools and calls the inner handler\n */\n const handlerWithValidation = async (\n req: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >\n ): Promise<InternalModelResponse<StructuredResponseFormat>> => {\n /**\n * Verify that the user didn't add any new tools.\n * We can't allow this as the ToolNode is already initiated with given tools.\n */\n const modifiedTools = req.tools ?? [];\n const newTools = modifiedTools.filter(\n (tool) =>\n isClientTool(tool) &&\n !this.#options.toolClasses.some((t) => t.name === tool.name)\n );\n if (newTools.length > 0) {\n throw new Error(\n `You have added a new tool in \"wrapModelCall\" hook of middleware \"${\n currentMiddleware.name\n }\": ${newTools\n .map((tool) => tool.name)\n .join(\", \")}. This is not supported.`\n );\n }\n\n /**\n * Verify that user has not added or modified a tool with the same name.\n * We can't allow this as the ToolNode is already initiated with given tools.\n */\n const invalidTools = modifiedTools.filter(\n (tool) =>\n isClientTool(tool) &&\n this.#options.toolClasses.every((t) => t !== tool)\n );\n if (invalidTools.length > 0) {\n throw new Error(\n `You have modified a tool in \"wrapModelCall\" hook of middleware \"${\n currentMiddleware.name\n }\": ${invalidTools\n .map((tool) => tool.name)\n .join(\", \")}. This is not supported.`\n );\n }\n\n return innerHandler(req);\n };\n\n // Call middleware's wrapModelCall with the validation handler\n if (!currentMiddleware.wrapModelCall) {\n return handlerWithValidation(requestWithStateAndRuntime);\n }\n\n try {\n const middlewareResponse = await currentMiddleware.wrapModelCall(\n requestWithStateAndRuntime,\n handlerWithValidation as WrapModelCallHandler\n );\n\n /**\n * Validate that this specific middleware returned a valid AIMessage\n */\n if (!AIMessage.isInstance(middlewareResponse)) {\n throw new Error(\n `Invalid response from \"wrapModelCall\" in middleware \"${\n currentMiddleware.name\n }\": expected AIMessage, got ${typeof middlewareResponse}`\n );\n }\n\n return middlewareResponse;\n } catch (error) {\n // Add middleware context to error if not already added\n if (\n error instanceof Error &&\n !error.message.includes(`middleware \"${currentMiddleware.name}\"`)\n ) {\n error.message = `Error in middleware \"${currentMiddleware.name}\": ${error.message}`;\n }\n throw error;\n }\n };\n }\n }\n\n /**\n * Execute the wrapped handler with the initial request\n */\n const initialRequest: ModelRequest<\n InternalAgentState<StructuredResponseFormat> & PreHookAnnotation[\"State\"],\n unknown\n > = {\n model,\n systemPrompt: this.#options.systemPrompt,\n messages: state.messages,\n tools: this.#options.toolClasses,\n state,\n runtime: Object.freeze({\n context: lgConfig?.context,\n writer: lgConfig.writer,\n interrupt: lgConfig.interrupt,\n signal: lgConfig.signal,\n }) as Runtime<unknown>,\n };\n\n return wrappedHandler(initialRequest);\n }\n\n /**\n * If the model returns multiple structured outputs, we need to handle it.\n * @param response - The response from the model\n * @param toolCalls - The tool calls that were made\n * @returns The response from the model\n */\n #handleMultipleStructuredOutputs(\n response: AIMessage,\n toolCalls: ToolCall[],\n responseFormat: ToolResponseFormat\n ): Promise<Command> {\n const multipleStructuredOutputsError = new MultipleStructuredOutputsError(\n toolCalls.map((call) => call.name)\n );\n\n return this.#handleToolStrategyError(\n multipleStructuredOutputsError,\n response,\n toolCalls[0],\n responseFormat\n );\n }\n\n /**\n * If the model returns a single structured output, we need to handle it.\n * @param toolCall - The tool call that was made\n * @returns The structured response and a message to the LLM if needed\n */\n #handleSingleStructuredOutput(\n response: AIMessage,\n toolCall: ToolCall,\n responseFormat: ToolResponseFormat,\n lastMessage?: string\n ): ResponseHandlerResult<StructuredResponseFormat> {\n const tool = responseFormat.tools[toolCall.name];\n\n try {\n const structuredResponse = tool.parse(\n toolCall.args\n ) as StructuredResponseFormat;\n\n return {\n structuredResponse,\n messages: [\n response,\n new ToolMessage({\n tool_call_id: toolCall.id ?? \"\",\n content: JSON.stringify(structuredResponse),\n name: toolCall.name,\n }),\n new AIMessage(\n lastMessage ??\n `Returning structured response: ${JSON.stringify(\n structuredResponse\n )}`\n ),\n ],\n };\n } catch (error) {\n return this.#handleToolStrategyError(\n error as ToolStrategyError,\n response,\n toolCall,\n responseFormat\n );\n }\n }\n\n async #handleToolStrategyError(\n error: ToolStrategyError,\n response: AIMessage,\n toolCall: ToolCall,\n responseFormat: ToolResponseFormat\n ): Promise<Command> {\n /**\n * Using the `errorHandler` option of the first `ToolStrategy` entry is sufficient here.\n * There is technically only one `ToolStrategy` entry in `structuredToolInfo` if the user\n * uses `toolStrategy` to define the response format. If the user applies a list of json\n * schema objects, these will be transformed into multiple `ToolStrategy` entries but all\n * with the same `handleError` option.\n */\n const errorHandler = Object.values(responseFormat.tools).at(0)?.options\n ?.handleError;\n\n const toolCallId = toolCall.id;\n if (!toolCallId) {\n throw new Error(\n \"Tool call ID is required to handle tool output errors. Please provide a tool call ID.\"\n );\n }\n\n /**\n * Default behavior: retry if `errorHandler` is undefined or truthy.\n * Only throw if explicitly set to `false`.\n */\n if (errorHandler === false) {\n throw error;\n }\n\n /**\n * retry if:\n */\n if (\n /**\n * if the user has provided truthy value as the `errorHandler`, return a new AIMessage\n * with the error message and retry the tool call.\n */\n errorHandler === undefined ||\n (typeof errorHandler === \"boolean\" && errorHandler) ||\n /**\n * if `errorHandler` is an array and contains MultipleStructuredOutputsError\n */\n (Array.isArray(errorHandler) &&\n errorHandler.some((h) => h instanceof MultipleStructuredOutputsError))\n ) {\n return new Command({\n update: {\n messages: [\n response,\n new ToolMessage({\n content: error.message,\n tool_call_id: toolCallId,\n }),\n ],\n },\n goto: AGENT_NODE_NAME,\n });\n }\n\n /**\n * if `errorHandler` is a string, retry the tool call with given string\n */\n if (typeof errorHandler === \"string\") {\n return new Command({\n update: {\n messages: [\n response,\n new ToolMessage({\n content: errorHandler,\n tool_call_id: toolCallId,\n }),\n ],\n },\n goto: AGENT_NODE_NAME,\n });\n }\n\n /**\n * if `errorHandler` is a function, retry the tool call with the function\n */\n if (typeof errorHandler === \"function\") {\n const content = await errorHandler(error);\n if (typeof content !== \"string\") {\n throw new Error(\"Error handler must return a string.\");\n }\n\n return new Command({\n update: {\n messages: [\n response,\n new ToolMessage({\n content,\n tool_call_id: toolCallId,\n }),\n ],\n },\n goto: AGENT_NODE_NAME,\n });\n }\n\n /**\n * Default: retry if we reach here\n */\n return new Command({\n update: {\n messages: [\n response,\n new ToolMessage({\n content: error.message,\n tool_call_id: toolCallId,\n }),\n ],\n },\n goto: AGENT_NODE_NAME,\n });\n }\n\n #areMoreStepsNeeded(\n state: InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n response: BaseMessage\n ): boolean {\n const allToolsReturnDirect =\n AIMessage.isInstance(response) &&\n response.tool_calls?.every((call) =>\n this.#options.shouldReturnDirect.has(call.name)\n );\n const remainingSteps =\n \"remainingSteps\" in state ? (state.remainingSteps as number) : undefined;\n return Boolean(\n remainingSteps &&\n ((remainingSteps < 1 && allToolsReturnDirect) ||\n (remainingSteps < 2 && hasToolCalls(state.messages.at(-1))))\n );\n }\n\n #getModelInputState(\n state: InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"]\n ): Omit<InternalAgentState<StructuredResponseFormat>, \"llmInputMessages\"> {\n const { messages, llmInputMessages, ...rest } = state;\n if (llmInputMessages && llmInputMessages.length > 0) {\n return { messages: llmInputMessages, ...rest } as Omit<\n InternalAgentState<StructuredResponseFormat>,\n \"llmInputMessages\"\n >;\n }\n return { messages, ...rest } as Omit<\n InternalAgentState<StructuredResponseFormat>,\n \"llmInputMessages\"\n >;\n }\n\n async #bindTools(\n model: LanguageModelLike,\n preparedOptions: ModelRequest | undefined,\n structuredResponseFormat: ResponseFormat | undefined\n ): Promise<Runnable> {\n const options: Partial<BaseChatModelCallOptions> = {};\n const structuredTools = Object.values(\n structuredResponseFormat && \"tools\" in structuredResponseFormat\n ? structuredResponseFormat.tools\n : {}\n );\n\n /**\n * Use tools from preparedOptions if provided, otherwise use default tools\n */\n const allTools = [\n ...(preparedOptions?.tools ?? this.#options.toolClasses),\n ...structuredTools.map((toolStrategy) => toolStrategy.tool),\n ];\n\n /**\n * If there are structured tools, we need to set the tool choice to \"any\"\n * so that the model can choose to use a structured tool or not.\n */\n const toolChoice =\n preparedOptions?.toolChoice ||\n (structuredTools.length > 0 ? \"any\" : undefined);\n\n /**\n * check if the user requests a native schema output\n */\n if (structuredResponseFormat?.type === \"native\") {\n const jsonSchemaParams = {\n name: structuredResponseFormat.strategy.schema?.name ?? \"extract\",\n description: getSchemaDescription(\n structuredResponseFormat.strategy.schema\n ),\n schema: structuredResponseFormat.strategy.schema,\n strict: true,\n };\n\n Object.assign(options, {\n response_format: {\n type: \"json_schema\",\n json_schema: jsonSchemaParams,\n },\n ls_structured_output_format: {\n kwargs: { method: \"json_schema\" },\n schema: structuredResponseFormat.strategy.schema,\n },\n strict: true,\n });\n }\n\n /**\n * Bind tools to the model if they are not already bound.\n */\n const modelWithTools = await bindTools(model, allTools, {\n ...options,\n ...(preparedOptions?.modelSettings ?? {}),\n tool_choice: toolChoice,\n });\n\n /**\n * Create a model runnable with the prompt and agent name\n */\n const modelRunnable = getPromptRunnable(\n preparedOptions?.systemPrompt ?? this.#options.systemPrompt\n ).pipe(\n this.#options.includeAgentName === \"inline\"\n ? withAgentName(modelWithTools, this.#options.includeAgentName)\n : modelWithTools\n );\n\n return modelRunnable;\n }\n\n getState(): {\n messages: BaseMessage[];\n } {\n const state = super.getState();\n const origState = state && !(state instanceof Command) ? state : {};\n\n return {\n messages: [],\n ...origState,\n };\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;AA8DA,MAAa,kBAAkB;AAuC/B,IAAa,YAAb,cAMU,iBAOR;CACA;CAEA,YACEA,SACA;EACA,MAAM;GACJ,MAAM,QAAQ,QAAQ;GACtB,MAAM,CAAC,OAAO,WAAW,KAAKC,KAAK,OAAO,OAAyB;EACpE,EAAC;EAEF,KAAKC,WAAW;CACjB;;;;;;;;;;;;;CAcD,mBACEC,OAC4B;AAC5B,MAAI,CAAC,KAAKD,SAAS,eACjB,QAAO;EAGT,MAAM,aAAa,wBACjB,KAAKA,SAAS,gBACd,QACA,MACD;;;;EAKD,MAAM,qBAAqB,WAAW,MACpC,CAAC,WAAW,kBAAkB,iBAC/B;;;;AAKD,MAAI,CAAC,mBACH,QAAO;GACL,MAAM;GACN,OACE,WAAW,OACT,CAAC,WAAW,kBAAkB,aAC/B,CACD,OAAO,CAAC,KAAK,WAAW;IACxB,IAAI,OAAO,QAAQ;AACnB,WAAO;GACR,GAAE,CAAE,EAAiC;EACvC;AAGH,SAAO;GACL,MAAM;GAIN,UAAU,WAAW;EACtB;CACF;CAED,MAAMD,KACJG,OAEAC,QACA;;;;;EAKA,MAAM,cAAc,MAAM,SAAS,GAAG,GAAG;AACzC,MACE,eACA,YAAY,WAAW,YAAY,IACnC,YAAY,QACZ,KAAKH,SAAS,mBAAmB,IAAI,YAAY,KAAK;;;;AAKtD,SAAO,EAAE,UAAU,CAAE,EAAE;EAGzB,MAAM,WAAW,MAAM,KAAKI,aAAa,OAAO,OAAO;;;;AAKvD,MAAI,wBAAwB,SAC1B,QAAO;GACL,UAAU,CAAC,GAAG,MAAM,UAAU,GAAI,SAAS,YAAY,CAAE,CAAE;GAC3D,oBAAoB,SAAS;EAC9B;;;;AAMH,MAAI,oBAAoB,QACtB,QAAO;EAGT,SAAS,OAAO,KAAK;EACrB,SAAS,UAAU,OAAO,KAAK;AAE/B,MAAI,KAAKC,oBAAoB,OAAO,SAAS,CAC3C,QAAO,EACL,UAAU,CACR,IAAI,UAAU;GACZ,SAAS;GACT,MAAM,KAAK;GACX,IAAI,SAAS;EACd,EACF,EACF;AAGH,SAAO,EAAE,UAAU,CAAC,QAAS,EAAE;CAChC;;;;;;;CAQD,eAAe;AACb,MAAI,OAAO,KAAKL,SAAS,UAAU,SACjC,QAAO,cAAc,KAAKA,SAAS,MAAM;AAG3C,MAAI,KAAKA,SAAS,MAChB,QAAO,KAAKA,SAAS;AAGvB,QAAM,IAAI,MAAM;CACjB;CAED,MAAMI,aACJF,OAEAC,QACAG,UAEI,CAAE,GACgE;EACtE,MAAM,QAAQ,MAAM,KAAKC,cAAc;EACvC,MAAM,WAAW;;;;EAKjB,MAAM,cAAc,OAClBC,YACyE;;;;GAIzE,2BAA2B,QAAQ,MAAM;GAEzC,MAAM,2BAA2B,KAAKC,mBAAmB,QAAQ,MAAM;GACvE,MAAM,iBAAiB,MAAM,KAAKC,WAChC,QAAQ,OACR,SACA,yBACD;GAED,IAAI,aAAa,KAAKC,oBAAoB,MAAM;GAChD,aAAa;IAAE,GAAG;IAAY,UAAU,QAAQ;GAAU;GAE1D,MAAM,SAAS,kBAAkB,KAAKX,SAAS,QAAQ,OAAO,OAAO;GACrE,MAAM,eAAe;IAAE,GAAG;IAAQ;GAAQ;GAC1C,MAAM,WAAY,MAAM,eAAe,OACrC,YACA,aACD;;;;;AAMD,OAAI,0BAA0B,SAAS,UAAU;IAC/C,MAAM,qBACJ,yBAAyB,SAAS,MAAM,SAAS;AACnD,QAAI,mBACF,QAAO;KAAE;KAAoB,UAAU,CAAC,QAAS;IAAE;AAGrD,WAAO;GACR;AAED,OAAI,CAAC,4BAA4B,CAAC,SAAS,WACzC,QAAO;GAGT,MAAM,YAAY,SAAS,WAAW,OACpC,CAAC,SAAS,KAAK,QAAQ,yBAAyB,MACjD;;;;AAKD,OAAI,UAAU,WAAW,EACvB,QAAO;;;;;AAOT,OAAI,UAAU,SAAS,EACrB,QAAO,KAAKY,iCACV,UACA,WACA,yBACD;GAGH,MAAM,eAAe,yBAAyB,MAAM,UAAU,GAAG;GACjE,MAAM,qBAAqB,cAAc,SAAS;AAClD,UAAO,KAAKC,8BACV,UACA,UAAU,IACV,0BACA,sBAAsB,QAAQ,YAC/B;EACF;EAED,MAAM,oBAAoB,KAAKb,SAAS,+BAA+B,CAAE;EACzE,IAAIc,iBAM4D;;;;AAKhE,OAAK,IAAI,IAAI,kBAAkB,SAAS,GAAG,KAAK,GAAG,KAAK;GACtD,MAAM,CAAC,YAAY,mBAAmB,GAAG,kBAAkB;AAC3D,OAAI,WAAW,eAAe;IAC5B,MAAM,eAAe;IACrB,MAAM,oBAAoB;IAC1B,MAAM,kBAAkB;IAExB,iBAAiB,OACfC,YAK6D;;;;KAI7D,MAAM,UAAU,kBAAkB,gBAC9B,aACE,kBAAkB,eAClB,UAAU,WAAW,CAAE,EACxB,GACD,UAAU;;;;KAKd,MAAMC,UAA4B,OAAO,OAAO;MAC9C;MACA,QAAQ,SAAS;MACjB,WAAW,SAAS;MACpB,QAAQ,SAAS;KAClB,EAAC;;;;KAKF,MAAMC,6BAIF;MACF,GAAG;MACH,OAAO;OACL,GAAI,WAAW,cACX,aACE,wBAAwB,WAAW,YAAY,EAC/C,MACD,GACD,CAAE;OACN,GAAG,iBAAiB;OACpB,UAAU,MAAM;MACjB;MAED;KACD;;;;KAKD,MAAM,wBAAwB,OAC5BC,QAK6D;;;;;MAK7D,MAAM,gBAAgB,IAAI,SAAS,CAAE;MACrC,MAAM,WAAW,cAAc,OAC7B,CAAC,SACC,aAAa,KAAK,IAClB,CAAC,KAAKlB,SAAS,YAAY,KAAK,CAAC,MAAM,EAAE,SAAS,KAAK,KAAK,CAC/D;AACD,UAAI,SAAS,SAAS,EACpB,OAAM,IAAI,MACR,CAAC,iEAAiE,EAChE,kBAAkB,KACnB,GAAG,EAAE,SACH,IAAI,CAAC,SAAS,KAAK,KAAK,CACxB,KAAK,KAAK,CAAC,wBAAwB,CAAC;;;;;MAQ3C,MAAM,eAAe,cAAc,OACjC,CAAC,SACC,aAAa,KAAK,IAClB,KAAKA,SAAS,YAAY,MAAM,CAAC,MAAM,MAAM,KAAK,CACrD;AACD,UAAI,aAAa,SAAS,EACxB,OAAM,IAAI,MACR,CAAC,gEAAgE,EAC/D,kBAAkB,KACnB,GAAG,EAAE,aACH,IAAI,CAAC,SAAS,KAAK,KAAK,CACxB,KAAK,KAAK,CAAC,wBAAwB,CAAC;AAI3C,aAAO,aAAa,IAAI;KACzB;AAGD,SAAI,CAAC,kBAAkB,cACrB,QAAO,sBAAsB,2BAA2B;AAG1D,SAAI;MACF,MAAM,qBAAqB,MAAM,kBAAkB,cACjD,4BACA,sBACD;;;;AAKD,UAAI,CAAC,UAAU,WAAW,mBAAmB,CAC3C,OAAM,IAAI,MACR,CAAC,qDAAqD,EACpD,kBAAkB,KACnB,2BAA2B,EAAE,OAAO,oBAAoB;AAI7D,aAAO;KACR,SAAQ,OAAO;AAEd,UACE,iBAAiB,SACjB,CAAC,MAAM,QAAQ,SAAS,CAAC,YAAY,EAAE,kBAAkB,KAAK,CAAC,CAAC,CAAC,EAEjE,MAAM,UAAU,CAAC,qBAAqB,EAAE,kBAAkB,KAAK,GAAG,EAAE,MAAM,SAAS;AAErF,YAAM;KACP;IACF;GACF;EACF;;;;EAKD,MAAMmB,iBAGF;GACF;GACA,cAAc,KAAKnB,SAAS;GAC5B,UAAU,MAAM;GAChB,OAAO,KAAKA,SAAS;GACrB;GACA,SAAS,OAAO,OAAO;IACrB,SAAS,UAAU;IACnB,QAAQ,SAAS;IACjB,WAAW,SAAS;IACpB,QAAQ,SAAS;GAClB,EAAC;EACH;AAED,SAAO,eAAe,eAAe;CACtC;;;;;;;CAQD,iCACEoB,UACAC,WACAC,gBACkB;EAClB,MAAM,iCAAiC,IAAI,+BACzC,UAAU,IAAI,CAAC,SAAS,KAAK,KAAK;AAGpC,SAAO,KAAKC,yBACV,gCACA,UACA,UAAU,IACV,eACD;CACF;;;;;;CAOD,8BACEH,UACAI,UACAF,gBACAG,aACiD;EACjD,MAAM,OAAO,eAAe,MAAM,SAAS;AAE3C,MAAI;GACF,MAAM,qBAAqB,KAAK,MAC9B,SAAS,KACV;AAED,UAAO;IACL;IACA,UAAU;KACR;KACA,IAAI,YAAY;MACd,cAAc,SAAS,MAAM;MAC7B,SAAS,KAAK,UAAU,mBAAmB;MAC3C,MAAM,SAAS;KAChB;KACD,IAAI,UACF,eACE,CAAC,+BAA+B,EAAE,KAAK,UACrC,mBACD,EAAE;IAER;GACF;EACF,SAAQ,OAAO;AACd,UAAO,KAAKF,yBACV,OACA,UACA,UACA,eACD;EACF;CACF;CAED,MAAMA,yBACJG,OACAN,UACAI,UACAF,gBACkB;;;;;;;;EAQlB,MAAM,eAAe,OAAO,OAAO,eAAe,MAAM,CAAC,GAAG,EAAE,EAAE,SAC5D;EAEJ,MAAM,aAAa,SAAS;AAC5B,MAAI,CAAC,WACH,OAAM,IAAI,MACR;;;;;AAQJ,MAAI,iBAAiB,MACnB,OAAM;;;;AAMR,MAKE,iBAAiB,UAChB,OAAO,iBAAiB,aAAa,gBAIrC,MAAM,QAAQ,aAAa,IAC1B,aAAa,KAAK,CAAC,MAAM,aAAa,+BAA+B,CAEvE,QAAO,IAAI,QAAQ;GACjB,QAAQ,EACN,UAAU,CACR,UACA,IAAI,YAAY;IACd,SAAS,MAAM;IACf,cAAc;GACf,EACF,EACF;GACD,MAAM;EACP;;;;AAMH,MAAI,OAAO,iBAAiB,SAC1B,QAAO,IAAI,QAAQ;GACjB,QAAQ,EACN,UAAU,CACR,UACA,IAAI,YAAY;IACd,SAAS;IACT,cAAc;GACf,EACF,EACF;GACD,MAAM;EACP;;;;AAMH,MAAI,OAAO,iBAAiB,YAAY;GACtC,MAAM,UAAU,MAAM,aAAa,MAAM;AACzC,OAAI,OAAO,YAAY,SACrB,OAAM,IAAI,MAAM;AAGlB,UAAO,IAAI,QAAQ;IACjB,QAAQ,EACN,UAAU,CACR,UACA,IAAI,YAAY;KACd;KACA,cAAc;IACf,EACF,EACF;IACD,MAAM;GACP;EACF;;;;AAKD,SAAO,IAAI,QAAQ;GACjB,QAAQ,EACN,UAAU,CACR,UACA,IAAI,YAAY;IACd,SAAS,MAAM;IACf,cAAc;GACf,EACF,EACF;GACD,MAAM;EACP;CACF;CAED,oBACEpB,OAEAyB,UACS;EACT,MAAM,uBACJ,UAAU,WAAW,SAAS,IAC9B,SAAS,YAAY,MAAM,CAAC,SAC1B,KAAK3B,SAAS,mBAAmB,IAAI,KAAK,KAAK,CAChD;EACH,MAAM,iBACJ,oBAAoB,QAAS,MAAM,iBAA4B;AACjE,SAAO,QACL,mBACI,iBAAiB,KAAK,wBACrB,iBAAiB,KAAK,aAAa,MAAM,SAAS,GAAG,GAAG,CAAC,EAC/D;CACF;CAED,oBACEE,OAEwE;EACxE,MAAM,EAAE,UAAU,iBAAkB,GAAG,MAAM,GAAG;AAChD,MAAI,oBAAoB,iBAAiB,SAAS,EAChD,QAAO;GAAE,UAAU;GAAkB,GAAG;EAAM;AAKhD,SAAO;GAAE;GAAU,GAAG;EAAM;CAI7B;CAED,MAAMQ,WACJkB,OACAC,iBACAC,0BACmB;EACnB,MAAMC,UAA6C,CAAE;EACrD,MAAM,kBAAkB,OAAO,OAC7B,4BAA4B,WAAW,2BACnC,yBAAyB,QACzB,CAAE,EACP;;;;EAKD,MAAM,WAAW,CACf,GAAI,iBAAiB,SAAS,KAAK/B,SAAS,aAC5C,GAAG,gBAAgB,IAAI,CAAC,iBAAiB,aAAa,KAAK,AAC5D;;;;;EAMD,MAAM,aACJ,iBAAiB,eAChB,gBAAgB,SAAS,IAAI,QAAQ;;;;AAKxC,MAAI,0BAA0B,SAAS,UAAU;GAC/C,MAAM,mBAAmB;IACvB,MAAM,yBAAyB,SAAS,QAAQ,QAAQ;IACxD,aAAa,qBACX,yBAAyB,SAAS,OACnC;IACD,QAAQ,yBAAyB,SAAS;IAC1C,QAAQ;GACT;GAED,OAAO,OAAO,SAAS;IACrB,iBAAiB;KACf,MAAM;KACN,aAAa;IACd;IACD,6BAA6B;KAC3B,QAAQ,EAAE,QAAQ,cAAe;KACjC,QAAQ,yBAAyB,SAAS;IAC3C;IACD,QAAQ;GACT,EAAC;EACH;;;;EAKD,MAAM,iBAAiB,MAAM,UAAU,OAAO,UAAU;GACtD,GAAG;GACH,GAAI,iBAAiB,iBAAiB,CAAE;GACxC,aAAa;EACd,EAAC;;;;EAKF,MAAM,gBAAgB,kBACpB,iBAAiB,gBAAgB,KAAKA,SAAS,aAChD,CAAC,KACA,KAAKA,SAAS,qBAAqB,WAC/B,cAAc,gBAAgB,KAAKA,SAAS,iBAAiB,GAC7D,eACL;AAED,SAAO;CACR;CAED,WAEE;EACA,MAAM,QAAQ,MAAM,UAAU;EAC9B,MAAM,YAAY,SAAS,EAAE,iBAAiB,WAAW,QAAQ,CAAE;AAEnE,SAAO;GACL,UAAU,CAAE;GACZ,GAAG;EACJ;CACF;AACF"}
|
|
1
|
+
{"version":3,"file":"AgentNode.js","names":["options: AgentNodeOptions<StructuredResponseFormat, ContextSchema>","#run","#options","#systemMessage","model: string | LanguageModelLike","state: InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"]","config: RunnableConfig","#invokeModel","#areMoreStepsNeeded","options: {\n lastMessage?: string;\n }","#deriveModel","request: ModelRequest","#getResponseFormat","#bindTools","#currentSystemMessage","#handleMultipleStructuredOutputs","#handleSingleStructuredOutput","wrappedHandler: (\n request: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >\n ) => Promise<InternalModelResponse<StructuredResponseFormat>>","request: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >","runtime: Runtime<unknown>","requestWithStateAndRuntime: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >","req: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >","initialRequest: ModelRequest<\n InternalAgentState<StructuredResponseFormat> & PreHookAnnotation[\"State\"],\n unknown\n >","response: AIMessage","toolCalls: ToolCall[]","responseFormat: ToolResponseFormat","#handleToolStrategyError","toolCall: ToolCall","lastMessage?: string","error: ToolStrategyError","response: BaseMessage","model: LanguageModelLike","preparedOptions: ModelRequest | undefined","structuredResponseFormat: ResponseFormat | undefined","options: Partial<BaseChatModelCallOptions>"],"sources":["../../../src/agents/nodes/AgentNode.ts"],"sourcesContent":["/* eslint-disable no-instanceof/no-instanceof */\nimport { Runnable, RunnableConfig } from \"@langchain/core/runnables\";\nimport {\n BaseMessage,\n AIMessage,\n ToolMessage,\n SystemMessage,\n} from \"@langchain/core/messages\";\nimport { Command, type LangGraphRunnableConfig } from \"@langchain/langgraph\";\nimport { type LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { type BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport {\n InteropZodObject,\n getSchemaDescription,\n interopParse,\n interopZodObjectPartial,\n} from \"@langchain/core/utils/types\";\nimport { raceWithSignal } from \"@langchain/core/runnables\";\nimport type { ToolCall } from \"@langchain/core/messages/tool\";\nimport type { ClientTool, ServerTool } from \"@langchain/core/tools\";\n\nimport { initChatModel } from \"../../chat_models/universal.js\";\nimport { MultipleStructuredOutputsError } from \"../errors.js\";\nimport { RunnableCallable } from \"../RunnableCallable.js\";\nimport { PreHookAnnotation } from \"../annotation.js\";\nimport {\n bindTools,\n validateLLMHasNoBoundTools,\n hasToolCalls,\n isClientTool,\n} from \"../utils.js\";\nimport { mergeAbortSignals } from \"../nodes/utils.js\";\nimport { CreateAgentParams } from \"../types.js\";\nimport type { InternalAgentState, Runtime } from \"../runtime.js\";\nimport type {\n AgentMiddleware,\n AnyAnnotationRoot,\n WrapModelCallHandler,\n} from \"../middleware/types.js\";\nimport type { ModelRequest } from \"./types.js\";\nimport { withAgentName } from \"../withAgentName.js\";\nimport {\n ToolStrategy,\n ProviderStrategy,\n transformResponseFormat,\n ToolStrategyError,\n} from \"../responses.js\";\n\ntype ResponseHandlerResult<StructuredResponseFormat> =\n | {\n structuredResponse: StructuredResponseFormat;\n messages: BaseMessage[];\n }\n | Promise<Command>;\n\n/**\n * Wrap the base handler with middleware wrapModelCall hooks\n * Middleware are composed so the first middleware is the outermost wrapper\n * Example: [auth, retry, cache] means auth wraps retry wraps cache wraps baseHandler\n */\ntype InternalModelResponse<StructuredResponseFormat> =\n | AIMessage\n | ResponseHandlerResult<StructuredResponseFormat>;\n\n/**\n * The name of the agent node in the state graph.\n */\nexport const AGENT_NODE_NAME = \"model_request\";\n\nexport interface AgentNodeOptions<\n StructuredResponseFormat extends Record<string, unknown> = Record<\n string,\n unknown\n >,\n StateSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot,\n ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot\n> extends Pick<\n CreateAgentParams<StructuredResponseFormat, StateSchema, ContextSchema>,\n \"model\" | \"includeAgentName\" | \"name\" | \"responseFormat\" | \"middleware\"\n > {\n toolClasses: (ClientTool | ServerTool)[];\n shouldReturnDirect: Set<string>;\n signal?: AbortSignal;\n systemMessage: SystemMessage;\n wrapModelCallHookMiddleware?: [\n AgentMiddleware,\n () => Record<string, unknown>\n ][];\n}\n\ninterface NativeResponseFormat {\n type: \"native\";\n strategy: ProviderStrategy;\n}\n\ninterface ToolResponseFormat {\n type: \"tool\";\n tools: Record<string, ToolStrategy>;\n}\n\ntype ResponseFormat = NativeResponseFormat | ToolResponseFormat;\n\nexport class AgentNode<\n StructuredResponseFormat extends Record<string, unknown> = Record<\n string,\n unknown\n >,\n ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot\n> extends RunnableCallable<\n InternalAgentState<StructuredResponseFormat> & PreHookAnnotation[\"State\"],\n | (\n | { messages: BaseMessage[] }\n | { structuredResponse: StructuredResponseFormat }\n )\n | Command\n> {\n #options: AgentNodeOptions<StructuredResponseFormat, ContextSchema>;\n #systemMessage: SystemMessage;\n #currentSystemMessage: SystemMessage;\n\n constructor(\n options: AgentNodeOptions<StructuredResponseFormat, ContextSchema>\n ) {\n super({\n name: options.name ?? \"model\",\n func: (input, config) => this.#run(input, config as RunnableConfig),\n });\n\n this.#options = options;\n this.#systemMessage = options.systemMessage;\n }\n\n /**\n * Returns response format primtivies based on given model and response format provided by the user.\n *\n * If the user selects a tool output:\n * - return a record of tools to extract structured output from the model's response\n *\n * if the the user selects a native schema output or if the model supports JSON schema output:\n * - return a provider strategy to extract structured output from the model's response\n *\n * @param model - The model to get the response format for.\n * @returns The response format.\n */\n #getResponseFormat(\n model: string | LanguageModelLike\n ): ResponseFormat | undefined {\n if (!this.#options.responseFormat) {\n return undefined;\n }\n\n const strategies = transformResponseFormat(\n this.#options.responseFormat,\n undefined,\n model\n );\n\n /**\n * we either define a list of provider strategies or a list of tool strategies\n */\n const isProviderStrategy = strategies.every(\n (format) => format instanceof ProviderStrategy\n );\n\n /**\n * Populate a list of structured tool info.\n */\n if (!isProviderStrategy) {\n return {\n type: \"tool\",\n tools: (\n strategies.filter(\n (format) => format instanceof ToolStrategy\n ) as ToolStrategy[]\n ).reduce((acc, format) => {\n acc[format.name] = format;\n return acc;\n }, {} as Record<string, ToolStrategy>),\n };\n }\n\n return {\n type: \"native\",\n /**\n * there can only be one provider strategy\n */\n strategy: strategies[0] as ProviderStrategy,\n };\n }\n\n async #run(\n state: InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n config: RunnableConfig\n ) {\n /**\n * Check if we just executed a returnDirect tool\n * If so, we should generate structured response (if needed) and stop\n */\n const lastMessage = state.messages.at(-1);\n if (\n lastMessage &&\n ToolMessage.isInstance(lastMessage) &&\n lastMessage.name &&\n this.#options.shouldReturnDirect.has(lastMessage.name)\n ) {\n /**\n * return directly without invoking the model again\n */\n return { messages: [] };\n }\n\n const response = await this.#invokeModel(state, config);\n\n /**\n * if we were able to generate a structured response, return it\n */\n if (\"structuredResponse\" in response) {\n return {\n messages: [...state.messages, ...(response.messages || [])],\n structuredResponse: response.structuredResponse,\n };\n }\n\n /**\n * if we need to direct the agent to the model, return the update\n */\n if (response instanceof Command) {\n return response;\n }\n\n response.name = this.name;\n response.lc_kwargs.name = this.name;\n\n if (this.#areMoreStepsNeeded(state, response)) {\n return {\n messages: [\n new AIMessage({\n content: \"Sorry, need more steps to process this request.\",\n name: this.name,\n id: response.id,\n }),\n ],\n };\n }\n\n return { messages: [response] };\n }\n\n /**\n * Derive the model from the options.\n * @param state - The state of the agent.\n * @param config - The config of the agent.\n * @returns The model.\n */\n #deriveModel() {\n if (typeof this.#options.model === \"string\") {\n return initChatModel(this.#options.model);\n }\n\n if (this.#options.model) {\n return this.#options.model;\n }\n\n throw new Error(\"No model option was provided, either via `model` option.\");\n }\n\n async #invokeModel(\n state: InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n config: RunnableConfig,\n options: {\n lastMessage?: string;\n } = {}\n ): Promise<AIMessage | ResponseHandlerResult<StructuredResponseFormat>> {\n const model = await this.#deriveModel();\n const lgConfig = config as LangGraphRunnableConfig;\n\n /**\n * Create the base handler that performs the actual model invocation\n */\n const baseHandler = async (\n request: ModelRequest\n ): Promise<AIMessage | ResponseHandlerResult<StructuredResponseFormat>> => {\n /**\n * Check if the LLM already has bound tools and throw if it does.\n */\n validateLLMHasNoBoundTools(request.model);\n\n const structuredResponseFormat = this.#getResponseFormat(request.model);\n const modelWithTools = await this.#bindTools(\n request.model,\n request,\n structuredResponseFormat\n );\n\n /**\n * prepend the system message to the messages if it is not empty\n */\n const messages = [\n ...(this.#currentSystemMessage.text === \"\"\n ? []\n : [this.#currentSystemMessage]),\n ...request.messages,\n ];\n\n const signal = mergeAbortSignals(this.#options.signal, config.signal);\n const response = (await raceWithSignal(\n modelWithTools.invoke(messages, {\n ...config,\n signal,\n }),\n signal\n )) as AIMessage;\n\n /**\n * if the user requests a native schema output, try to parse the response\n * and return the structured response if it is valid\n */\n if (structuredResponseFormat?.type === \"native\") {\n const structuredResponse =\n structuredResponseFormat.strategy.parse(response);\n if (structuredResponse) {\n return { structuredResponse, messages: [response] };\n }\n\n return response;\n }\n\n if (!structuredResponseFormat || !response.tool_calls) {\n return response;\n }\n\n const toolCalls = response.tool_calls.filter(\n (call) => call.name in structuredResponseFormat.tools\n );\n\n /**\n * if there were not structured tool calls, we can return the response\n */\n if (toolCalls.length === 0) {\n return response;\n }\n\n /**\n * if there were multiple structured tool calls, we should throw an error as this\n * scenario is not defined/supported.\n */\n if (toolCalls.length > 1) {\n return this.#handleMultipleStructuredOutputs(\n response,\n toolCalls,\n structuredResponseFormat\n );\n }\n\n const toolStrategy = structuredResponseFormat.tools[toolCalls[0].name];\n const toolMessageContent = toolStrategy?.options?.toolMessageContent;\n return this.#handleSingleStructuredOutput(\n response,\n toolCalls[0],\n structuredResponseFormat,\n toolMessageContent ?? options.lastMessage\n );\n };\n\n const wrapperMiddleware = this.#options.wrapModelCallHookMiddleware ?? [];\n let wrappedHandler: (\n request: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >\n ) => Promise<InternalModelResponse<StructuredResponseFormat>> = baseHandler;\n\n /**\n * Build composed handler from last to first so first middleware becomes outermost\n */\n for (let i = wrapperMiddleware.length - 1; i >= 0; i--) {\n const [middleware, getMiddlewareState] = wrapperMiddleware[i];\n if (middleware.wrapModelCall) {\n const innerHandler = wrappedHandler;\n const currentMiddleware = middleware;\n const currentGetState = getMiddlewareState;\n\n wrappedHandler = async (\n request: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >\n ): Promise<InternalModelResponse<StructuredResponseFormat>> => {\n /**\n * Merge context with default context of middleware\n */\n const context = currentMiddleware.contextSchema\n ? interopParse(\n currentMiddleware.contextSchema,\n lgConfig?.context || {}\n )\n : lgConfig?.context;\n\n /**\n * Create runtime\n */\n const runtime: Runtime<unknown> = Object.freeze({\n context,\n writer: lgConfig.writer,\n interrupt: lgConfig.interrupt,\n signal: lgConfig.signal,\n });\n\n /**\n * Create the request with state and runtime\n */\n const requestWithStateAndRuntime: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n > = {\n ...request,\n state: {\n ...(middleware.stateSchema\n ? interopParse(\n interopZodObjectPartial(middleware.stateSchema),\n state\n )\n : {}),\n ...currentGetState(),\n messages: state.messages,\n } as InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n runtime,\n };\n\n /**\n * Create handler that validates tools and calls the inner handler\n */\n const handlerWithValidation = async (\n req: ModelRequest<\n InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n unknown\n >\n ): Promise<InternalModelResponse<StructuredResponseFormat>> => {\n /**\n * Verify that the user didn't add any new tools.\n * We can't allow this as the ToolNode is already initiated with given tools.\n */\n const modifiedTools = req.tools ?? [];\n const newTools = modifiedTools.filter(\n (tool) =>\n isClientTool(tool) &&\n !this.#options.toolClasses.some((t) => t.name === tool.name)\n );\n if (newTools.length > 0) {\n throw new Error(\n `You have added a new tool in \"wrapModelCall\" hook of middleware \"${\n currentMiddleware.name\n }\": ${newTools\n .map((tool) => tool.name)\n .join(\", \")}. This is not supported.`\n );\n }\n\n /**\n * Verify that user has not added or modified a tool with the same name.\n * We can't allow this as the ToolNode is already initiated with given tools.\n */\n const invalidTools = modifiedTools.filter(\n (tool) =>\n isClientTool(tool) &&\n this.#options.toolClasses.every((t) => t !== tool)\n );\n if (invalidTools.length > 0) {\n throw new Error(\n `You have modified a tool in \"wrapModelCall\" hook of middleware \"${\n currentMiddleware.name\n }\": ${invalidTools\n .map((tool) => tool.name)\n .join(\", \")}. This is not supported.`\n );\n }\n\n let normalizedReq = req;\n const hasSystemPromptChanged =\n req.systemPrompt !== this.#currentSystemMessage.text;\n const hasSystemMessageChanged =\n req.systemMessage !== this.#currentSystemMessage;\n if (hasSystemPromptChanged && hasSystemMessageChanged) {\n throw new Error(\n \"Cannot change both systemPrompt and systemMessage in the same request.\"\n );\n }\n\n /**\n * Check if systemPrompt is a string was changed, if so create a new SystemMessage\n */\n if (hasSystemPromptChanged) {\n this.#currentSystemMessage = new SystemMessage({\n content: [{ type: \"text\", text: req.systemPrompt }],\n });\n normalizedReq = {\n ...req,\n systemPrompt: this.#currentSystemMessage.text,\n systemMessage: this.#currentSystemMessage,\n };\n }\n /**\n * If the systemMessage was changed, update the current system message\n */\n if (hasSystemMessageChanged) {\n this.#currentSystemMessage = new SystemMessage({\n ...req.systemMessage,\n });\n normalizedReq = {\n ...req,\n systemPrompt: this.#currentSystemMessage.text,\n systemMessage: this.#currentSystemMessage,\n };\n }\n\n return innerHandler(normalizedReq);\n };\n\n // Call middleware's wrapModelCall with the validation handler\n if (!currentMiddleware.wrapModelCall) {\n return handlerWithValidation(requestWithStateAndRuntime);\n }\n\n try {\n const middlewareResponse = await currentMiddleware.wrapModelCall(\n requestWithStateAndRuntime,\n handlerWithValidation as WrapModelCallHandler\n );\n\n /**\n * Validate that this specific middleware returned a valid AIMessage\n */\n if (!AIMessage.isInstance(middlewareResponse)) {\n throw new Error(\n `Invalid response from \"wrapModelCall\" in middleware \"${\n currentMiddleware.name\n }\": expected AIMessage, got ${typeof middlewareResponse}`\n );\n }\n\n return middlewareResponse;\n } catch (error) {\n /**\n * Add middleware context to error if not already added\n */\n if (\n error instanceof Error &&\n !error.message.includes(`middleware \"${currentMiddleware.name}\"`)\n ) {\n error.message = `Error in middleware \"${currentMiddleware.name}\": ${error.message}`;\n }\n throw error;\n }\n };\n }\n }\n\n /**\n * Execute the wrapped handler with the initial request\n * Reset current system prompt to initial state and convert to string using .text getter\n * for backwards compatibility with ModelRequest\n */\n this.#currentSystemMessage = this.#systemMessage;\n const initialRequest: ModelRequest<\n InternalAgentState<StructuredResponseFormat> & PreHookAnnotation[\"State\"],\n unknown\n > = {\n model,\n systemPrompt: this.#currentSystemMessage?.text,\n systemMessage: this.#currentSystemMessage,\n messages: state.messages,\n tools: this.#options.toolClasses,\n state,\n runtime: Object.freeze({\n context: lgConfig?.context,\n writer: lgConfig.writer,\n interrupt: lgConfig.interrupt,\n signal: lgConfig.signal,\n }) as Runtime<unknown>,\n };\n\n return wrappedHandler(initialRequest);\n }\n\n /**\n * If the model returns multiple structured outputs, we need to handle it.\n * @param response - The response from the model\n * @param toolCalls - The tool calls that were made\n * @returns The response from the model\n */\n #handleMultipleStructuredOutputs(\n response: AIMessage,\n toolCalls: ToolCall[],\n responseFormat: ToolResponseFormat\n ): Promise<Command> {\n const multipleStructuredOutputsError = new MultipleStructuredOutputsError(\n toolCalls.map((call) => call.name)\n );\n\n return this.#handleToolStrategyError(\n multipleStructuredOutputsError,\n response,\n toolCalls[0],\n responseFormat\n );\n }\n\n /**\n * If the model returns a single structured output, we need to handle it.\n * @param toolCall - The tool call that was made\n * @returns The structured response and a message to the LLM if needed\n */\n #handleSingleStructuredOutput(\n response: AIMessage,\n toolCall: ToolCall,\n responseFormat: ToolResponseFormat,\n lastMessage?: string\n ): ResponseHandlerResult<StructuredResponseFormat> {\n const tool = responseFormat.tools[toolCall.name];\n\n try {\n const structuredResponse = tool.parse(\n toolCall.args\n ) as StructuredResponseFormat;\n\n return {\n structuredResponse,\n messages: [\n response,\n new ToolMessage({\n tool_call_id: toolCall.id ?? \"\",\n content: JSON.stringify(structuredResponse),\n name: toolCall.name,\n }),\n new AIMessage(\n lastMessage ??\n `Returning structured response: ${JSON.stringify(\n structuredResponse\n )}`\n ),\n ],\n };\n } catch (error) {\n return this.#handleToolStrategyError(\n error as ToolStrategyError,\n response,\n toolCall,\n responseFormat\n );\n }\n }\n\n async #handleToolStrategyError(\n error: ToolStrategyError,\n response: AIMessage,\n toolCall: ToolCall,\n responseFormat: ToolResponseFormat\n ): Promise<Command> {\n /**\n * Using the `errorHandler` option of the first `ToolStrategy` entry is sufficient here.\n * There is technically only one `ToolStrategy` entry in `structuredToolInfo` if the user\n * uses `toolStrategy` to define the response format. If the user applies a list of json\n * schema objects, these will be transformed into multiple `ToolStrategy` entries but all\n * with the same `handleError` option.\n */\n const errorHandler = Object.values(responseFormat.tools).at(0)?.options\n ?.handleError;\n\n const toolCallId = toolCall.id;\n if (!toolCallId) {\n throw new Error(\n \"Tool call ID is required to handle tool output errors. Please provide a tool call ID.\"\n );\n }\n\n /**\n * Default behavior: retry if `errorHandler` is undefined or truthy.\n * Only throw if explicitly set to `false`.\n */\n if (errorHandler === false) {\n throw error;\n }\n\n /**\n * retry if:\n */\n if (\n /**\n * if the user has provided truthy value as the `errorHandler`, return a new AIMessage\n * with the error message and retry the tool call.\n */\n errorHandler === undefined ||\n (typeof errorHandler === \"boolean\" && errorHandler) ||\n /**\n * if `errorHandler` is an array and contains MultipleStructuredOutputsError\n */\n (Array.isArray(errorHandler) &&\n errorHandler.some((h) => h instanceof MultipleStructuredOutputsError))\n ) {\n return new Command({\n update: {\n messages: [\n response,\n new ToolMessage({\n content: error.message,\n tool_call_id: toolCallId,\n }),\n ],\n },\n goto: AGENT_NODE_NAME,\n });\n }\n\n /**\n * if `errorHandler` is a string, retry the tool call with given string\n */\n if (typeof errorHandler === \"string\") {\n return new Command({\n update: {\n messages: [\n response,\n new ToolMessage({\n content: errorHandler,\n tool_call_id: toolCallId,\n }),\n ],\n },\n goto: AGENT_NODE_NAME,\n });\n }\n\n /**\n * if `errorHandler` is a function, retry the tool call with the function\n */\n if (typeof errorHandler === \"function\") {\n const content = await errorHandler(error);\n if (typeof content !== \"string\") {\n throw new Error(\"Error handler must return a string.\");\n }\n\n return new Command({\n update: {\n messages: [\n response,\n new ToolMessage({\n content,\n tool_call_id: toolCallId,\n }),\n ],\n },\n goto: AGENT_NODE_NAME,\n });\n }\n\n /**\n * Default: retry if we reach here\n */\n return new Command({\n update: {\n messages: [\n response,\n new ToolMessage({\n content: error.message,\n tool_call_id: toolCallId,\n }),\n ],\n },\n goto: AGENT_NODE_NAME,\n });\n }\n\n #areMoreStepsNeeded(\n state: InternalAgentState<StructuredResponseFormat> &\n PreHookAnnotation[\"State\"],\n response: BaseMessage\n ): boolean {\n const allToolsReturnDirect =\n AIMessage.isInstance(response) &&\n response.tool_calls?.every((call) =>\n this.#options.shouldReturnDirect.has(call.name)\n );\n const remainingSteps =\n \"remainingSteps\" in state ? (state.remainingSteps as number) : undefined;\n return Boolean(\n remainingSteps &&\n ((remainingSteps < 1 && allToolsReturnDirect) ||\n (remainingSteps < 2 && hasToolCalls(state.messages.at(-1))))\n );\n }\n\n async #bindTools(\n model: LanguageModelLike,\n preparedOptions: ModelRequest | undefined,\n structuredResponseFormat: ResponseFormat | undefined\n ): Promise<Runnable> {\n const options: Partial<BaseChatModelCallOptions> = {};\n const structuredTools = Object.values(\n structuredResponseFormat && \"tools\" in structuredResponseFormat\n ? structuredResponseFormat.tools\n : {}\n );\n\n /**\n * Use tools from preparedOptions if provided, otherwise use default tools\n */\n const allTools = [\n ...(preparedOptions?.tools ?? this.#options.toolClasses),\n ...structuredTools.map((toolStrategy) => toolStrategy.tool),\n ];\n\n /**\n * If there are structured tools, we need to set the tool choice to \"any\"\n * so that the model can choose to use a structured tool or not.\n */\n const toolChoice =\n preparedOptions?.toolChoice ||\n (structuredTools.length > 0 ? \"any\" : undefined);\n\n /**\n * check if the user requests a native schema output\n */\n if (structuredResponseFormat?.type === \"native\") {\n const jsonSchemaParams = {\n name: structuredResponseFormat.strategy.schema?.name ?? \"extract\",\n description: getSchemaDescription(\n structuredResponseFormat.strategy.schema\n ),\n schema: structuredResponseFormat.strategy.schema,\n strict: true,\n };\n\n Object.assign(options, {\n response_format: {\n type: \"json_schema\",\n json_schema: jsonSchemaParams,\n },\n ls_structured_output_format: {\n kwargs: { method: \"json_schema\" },\n schema: structuredResponseFormat.strategy.schema,\n },\n strict: true,\n });\n }\n\n /**\n * Bind tools to the model if they are not already bound.\n */\n const modelWithTools = await bindTools(model, allTools, {\n ...options,\n ...(preparedOptions?.modelSettings ?? {}),\n tool_choice: toolChoice,\n });\n\n /**\n * Create a model runnable with the prompt and agent name\n * Use current SystemMessage state (which may have been modified by middleware)\n */\n const modelRunnable =\n this.#options.includeAgentName === \"inline\"\n ? withAgentName(modelWithTools, this.#options.includeAgentName)\n : modelWithTools;\n\n return modelRunnable;\n }\n\n getState(): {\n messages: BaseMessage[];\n } {\n const state = super.getState();\n const origState = state && !(state instanceof Command) ? state : {};\n\n return {\n messages: [],\n ...origState,\n };\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;AAmEA,MAAa,kBAAkB;AAmC/B,IAAa,YAAb,cAMU,iBAOR;CACA;CACA;CACA;CAEA,YACEA,SACA;EACA,MAAM;GACJ,MAAM,QAAQ,QAAQ;GACtB,MAAM,CAAC,OAAO,WAAW,KAAKC,KAAK,OAAO,OAAyB;EACpE,EAAC;EAEF,KAAKC,WAAW;EAChB,KAAKC,iBAAiB,QAAQ;CAC/B;;;;;;;;;;;;;CAcD,mBACEC,OAC4B;AAC5B,MAAI,CAAC,KAAKF,SAAS,eACjB,QAAO;EAGT,MAAM,aAAa,wBACjB,KAAKA,SAAS,gBACd,QACA,MACD;;;;EAKD,MAAM,qBAAqB,WAAW,MACpC,CAAC,WAAW,kBAAkB,iBAC/B;;;;AAKD,MAAI,CAAC,mBACH,QAAO;GACL,MAAM;GACN,OACE,WAAW,OACT,CAAC,WAAW,kBAAkB,aAC/B,CACD,OAAO,CAAC,KAAK,WAAW;IACxB,IAAI,OAAO,QAAQ;AACnB,WAAO;GACR,GAAE,CAAE,EAAiC;EACvC;AAGH,SAAO;GACL,MAAM;GAIN,UAAU,WAAW;EACtB;CACF;CAED,MAAMD,KACJI,OAEAC,QACA;;;;;EAKA,MAAM,cAAc,MAAM,SAAS,GAAG,GAAG;AACzC,MACE,eACA,YAAY,WAAW,YAAY,IACnC,YAAY,QACZ,KAAKJ,SAAS,mBAAmB,IAAI,YAAY,KAAK;;;;AAKtD,SAAO,EAAE,UAAU,CAAE,EAAE;EAGzB,MAAM,WAAW,MAAM,KAAKK,aAAa,OAAO,OAAO;;;;AAKvD,MAAI,wBAAwB,SAC1B,QAAO;GACL,UAAU,CAAC,GAAG,MAAM,UAAU,GAAI,SAAS,YAAY,CAAE,CAAE;GAC3D,oBAAoB,SAAS;EAC9B;;;;AAMH,MAAI,oBAAoB,QACtB,QAAO;EAGT,SAAS,OAAO,KAAK;EACrB,SAAS,UAAU,OAAO,KAAK;AAE/B,MAAI,KAAKC,oBAAoB,OAAO,SAAS,CAC3C,QAAO,EACL,UAAU,CACR,IAAI,UAAU;GACZ,SAAS;GACT,MAAM,KAAK;GACX,IAAI,SAAS;EACd,EACF,EACF;AAGH,SAAO,EAAE,UAAU,CAAC,QAAS,EAAE;CAChC;;;;;;;CAQD,eAAe;AACb,MAAI,OAAO,KAAKN,SAAS,UAAU,SACjC,QAAO,cAAc,KAAKA,SAAS,MAAM;AAG3C,MAAI,KAAKA,SAAS,MAChB,QAAO,KAAKA,SAAS;AAGvB,QAAM,IAAI,MAAM;CACjB;CAED,MAAMK,aACJF,OAEAC,QACAG,UAEI,CAAE,GACgE;EACtE,MAAM,QAAQ,MAAM,KAAKC,cAAc;EACvC,MAAM,WAAW;;;;EAKjB,MAAM,cAAc,OAClBC,YACyE;;;;GAIzE,2BAA2B,QAAQ,MAAM;GAEzC,MAAM,2BAA2B,KAAKC,mBAAmB,QAAQ,MAAM;GACvE,MAAM,iBAAiB,MAAM,KAAKC,WAChC,QAAQ,OACR,SACA,yBACD;;;;GAKD,MAAM,WAAW,CACf,GAAI,KAAKC,sBAAsB,SAAS,KACpC,CAAE,IACF,CAAC,KAAKA,qBAAsB,GAChC,GAAG,QAAQ,QACZ;GAED,MAAM,SAAS,kBAAkB,KAAKZ,SAAS,QAAQ,OAAO,OAAO;GACrE,MAAM,WAAY,MAAM,eACtB,eAAe,OAAO,UAAU;IAC9B,GAAG;IACH;GACD,EAAC,EACF,OACD;;;;;AAMD,OAAI,0BAA0B,SAAS,UAAU;IAC/C,MAAM,qBACJ,yBAAyB,SAAS,MAAM,SAAS;AACnD,QAAI,mBACF,QAAO;KAAE;KAAoB,UAAU,CAAC,QAAS;IAAE;AAGrD,WAAO;GACR;AAED,OAAI,CAAC,4BAA4B,CAAC,SAAS,WACzC,QAAO;GAGT,MAAM,YAAY,SAAS,WAAW,OACpC,CAAC,SAAS,KAAK,QAAQ,yBAAyB,MACjD;;;;AAKD,OAAI,UAAU,WAAW,EACvB,QAAO;;;;;AAOT,OAAI,UAAU,SAAS,EACrB,QAAO,KAAKa,iCACV,UACA,WACA,yBACD;GAGH,MAAM,eAAe,yBAAyB,MAAM,UAAU,GAAG;GACjE,MAAM,qBAAqB,cAAc,SAAS;AAClD,UAAO,KAAKC,8BACV,UACA,UAAU,IACV,0BACA,sBAAsB,QAAQ,YAC/B;EACF;EAED,MAAM,oBAAoB,KAAKd,SAAS,+BAA+B,CAAE;EACzE,IAAIe,iBAM4D;;;;AAKhE,OAAK,IAAI,IAAI,kBAAkB,SAAS,GAAG,KAAK,GAAG,KAAK;GACtD,MAAM,CAAC,YAAY,mBAAmB,GAAG,kBAAkB;AAC3D,OAAI,WAAW,eAAe;IAC5B,MAAM,eAAe;IACrB,MAAM,oBAAoB;IAC1B,MAAM,kBAAkB;IAExB,iBAAiB,OACfC,YAK6D;;;;KAI7D,MAAM,UAAU,kBAAkB,gBAC9B,aACE,kBAAkB,eAClB,UAAU,WAAW,CAAE,EACxB,GACD,UAAU;;;;KAKd,MAAMC,UAA4B,OAAO,OAAO;MAC9C;MACA,QAAQ,SAAS;MACjB,WAAW,SAAS;MACpB,QAAQ,SAAS;KAClB,EAAC;;;;KAKF,MAAMC,6BAIF;MACF,GAAG;MACH,OAAO;OACL,GAAI,WAAW,cACX,aACE,wBAAwB,WAAW,YAAY,EAC/C,MACD,GACD,CAAE;OACN,GAAG,iBAAiB;OACpB,UAAU,MAAM;MACjB;MAED;KACD;;;;KAKD,MAAM,wBAAwB,OAC5BC,QAK6D;;;;;MAK7D,MAAM,gBAAgB,IAAI,SAAS,CAAE;MACrC,MAAM,WAAW,cAAc,OAC7B,CAAC,SACC,aAAa,KAAK,IAClB,CAAC,KAAKnB,SAAS,YAAY,KAAK,CAAC,MAAM,EAAE,SAAS,KAAK,KAAK,CAC/D;AACD,UAAI,SAAS,SAAS,EACpB,OAAM,IAAI,MACR,CAAC,iEAAiE,EAChE,kBAAkB,KACnB,GAAG,EAAE,SACH,IAAI,CAAC,SAAS,KAAK,KAAK,CACxB,KAAK,KAAK,CAAC,wBAAwB,CAAC;;;;;MAQ3C,MAAM,eAAe,cAAc,OACjC,CAAC,SACC,aAAa,KAAK,IAClB,KAAKA,SAAS,YAAY,MAAM,CAAC,MAAM,MAAM,KAAK,CACrD;AACD,UAAI,aAAa,SAAS,EACxB,OAAM,IAAI,MACR,CAAC,gEAAgE,EAC/D,kBAAkB,KACnB,GAAG,EAAE,aACH,IAAI,CAAC,SAAS,KAAK,KAAK,CACxB,KAAK,KAAK,CAAC,wBAAwB,CAAC;MAI3C,IAAI,gBAAgB;MACpB,MAAM,yBACJ,IAAI,iBAAiB,KAAKY,sBAAsB;MAClD,MAAM,0BACJ,IAAI,kBAAkB,KAAKA;AAC7B,UAAI,0BAA0B,wBAC5B,OAAM,IAAI,MACR;;;;AAOJ,UAAI,wBAAwB;OAC1B,KAAKA,wBAAwB,IAAI,cAAc,EAC7C,SAAS,CAAC;QAAE,MAAM;QAAQ,MAAM,IAAI;OAAc,CAAC,EACpD;OACD,gBAAgB;QACd,GAAG;QACH,cAAc,KAAKA,sBAAsB;QACzC,eAAe,KAAKA;OACrB;MACF;;;;AAID,UAAI,yBAAyB;OAC3B,KAAKA,wBAAwB,IAAI,cAAc,EAC7C,GAAG,IAAI,cACR;OACD,gBAAgB;QACd,GAAG;QACH,cAAc,KAAKA,sBAAsB;QACzC,eAAe,KAAKA;OACrB;MACF;AAED,aAAO,aAAa,cAAc;KACnC;AAGD,SAAI,CAAC,kBAAkB,cACrB,QAAO,sBAAsB,2BAA2B;AAG1D,SAAI;MACF,MAAM,qBAAqB,MAAM,kBAAkB,cACjD,4BACA,sBACD;;;;AAKD,UAAI,CAAC,UAAU,WAAW,mBAAmB,CAC3C,OAAM,IAAI,MACR,CAAC,qDAAqD,EACpD,kBAAkB,KACnB,2BAA2B,EAAE,OAAO,oBAAoB;AAI7D,aAAO;KACR,SAAQ,OAAO;;;;AAId,UACE,iBAAiB,SACjB,CAAC,MAAM,QAAQ,SAAS,CAAC,YAAY,EAAE,kBAAkB,KAAK,CAAC,CAAC,CAAC,EAEjE,MAAM,UAAU,CAAC,qBAAqB,EAAE,kBAAkB,KAAK,GAAG,EAAE,MAAM,SAAS;AAErF,YAAM;KACP;IACF;GACF;EACF;;;;;;EAOD,KAAKA,wBAAwB,KAAKX;EAClC,MAAMmB,iBAGF;GACF;GACA,cAAc,KAAKR,uBAAuB;GAC1C,eAAe,KAAKA;GACpB,UAAU,MAAM;GAChB,OAAO,KAAKZ,SAAS;GACrB;GACA,SAAS,OAAO,OAAO;IACrB,SAAS,UAAU;IACnB,QAAQ,SAAS;IACjB,WAAW,SAAS;IACpB,QAAQ,SAAS;GAClB,EAAC;EACH;AAED,SAAO,eAAe,eAAe;CACtC;;;;;;;CAQD,iCACEqB,UACAC,WACAC,gBACkB;EAClB,MAAM,iCAAiC,IAAI,+BACzC,UAAU,IAAI,CAAC,SAAS,KAAK,KAAK;AAGpC,SAAO,KAAKC,yBACV,gCACA,UACA,UAAU,IACV,eACD;CACF;;;;;;CAOD,8BACEH,UACAI,UACAF,gBACAG,aACiD;EACjD,MAAM,OAAO,eAAe,MAAM,SAAS;AAE3C,MAAI;GACF,MAAM,qBAAqB,KAAK,MAC9B,SAAS,KACV;AAED,UAAO;IACL;IACA,UAAU;KACR;KACA,IAAI,YAAY;MACd,cAAc,SAAS,MAAM;MAC7B,SAAS,KAAK,UAAU,mBAAmB;MAC3C,MAAM,SAAS;KAChB;KACD,IAAI,UACF,eACE,CAAC,+BAA+B,EAAE,KAAK,UACrC,mBACD,EAAE;IAER;GACF;EACF,SAAQ,OAAO;AACd,UAAO,KAAKF,yBACV,OACA,UACA,UACA,eACD;EACF;CACF;CAED,MAAMA,yBACJG,OACAN,UACAI,UACAF,gBACkB;;;;;;;;EAQlB,MAAM,eAAe,OAAO,OAAO,eAAe,MAAM,CAAC,GAAG,EAAE,EAAE,SAC5D;EAEJ,MAAM,aAAa,SAAS;AAC5B,MAAI,CAAC,WACH,OAAM,IAAI,MACR;;;;;AAQJ,MAAI,iBAAiB,MACnB,OAAM;;;;AAMR,MAKE,iBAAiB,UAChB,OAAO,iBAAiB,aAAa,gBAIrC,MAAM,QAAQ,aAAa,IAC1B,aAAa,KAAK,CAAC,MAAM,aAAa,+BAA+B,CAEvE,QAAO,IAAI,QAAQ;GACjB,QAAQ,EACN,UAAU,CACR,UACA,IAAI,YAAY;IACd,SAAS,MAAM;IACf,cAAc;GACf,EACF,EACF;GACD,MAAM;EACP;;;;AAMH,MAAI,OAAO,iBAAiB,SAC1B,QAAO,IAAI,QAAQ;GACjB,QAAQ,EACN,UAAU,CACR,UACA,IAAI,YAAY;IACd,SAAS;IACT,cAAc;GACf,EACF,EACF;GACD,MAAM;EACP;;;;AAMH,MAAI,OAAO,iBAAiB,YAAY;GACtC,MAAM,UAAU,MAAM,aAAa,MAAM;AACzC,OAAI,OAAO,YAAY,SACrB,OAAM,IAAI,MAAM;AAGlB,UAAO,IAAI,QAAQ;IACjB,QAAQ,EACN,UAAU,CACR,UACA,IAAI,YAAY;KACd;KACA,cAAc;IACf,EACF,EACF;IACD,MAAM;GACP;EACF;;;;AAKD,SAAO,IAAI,QAAQ;GACjB,QAAQ,EACN,UAAU,CACR,UACA,IAAI,YAAY;IACd,SAAS,MAAM;IACf,cAAc;GACf,EACF,EACF;GACD,MAAM;EACP;CACF;CAED,oBACEpB,OAEAyB,UACS;EACT,MAAM,uBACJ,UAAU,WAAW,SAAS,IAC9B,SAAS,YAAY,MAAM,CAAC,SAC1B,KAAK5B,SAAS,mBAAmB,IAAI,KAAK,KAAK,CAChD;EACH,MAAM,iBACJ,oBAAoB,QAAS,MAAM,iBAA4B;AACjE,SAAO,QACL,mBACI,iBAAiB,KAAK,wBACrB,iBAAiB,KAAK,aAAa,MAAM,SAAS,GAAG,GAAG,CAAC,EAC/D;CACF;CAED,MAAMW,WACJkB,OACAC,iBACAC,0BACmB;EACnB,MAAMC,UAA6C,CAAE;EACrD,MAAM,kBAAkB,OAAO,OAC7B,4BAA4B,WAAW,2BACnC,yBAAyB,QACzB,CAAE,EACP;;;;EAKD,MAAM,WAAW,CACf,GAAI,iBAAiB,SAAS,KAAKhC,SAAS,aAC5C,GAAG,gBAAgB,IAAI,CAAC,iBAAiB,aAAa,KAAK,AAC5D;;;;;EAMD,MAAM,aACJ,iBAAiB,eAChB,gBAAgB,SAAS,IAAI,QAAQ;;;;AAKxC,MAAI,0BAA0B,SAAS,UAAU;GAC/C,MAAM,mBAAmB;IACvB,MAAM,yBAAyB,SAAS,QAAQ,QAAQ;IACxD,aAAa,qBACX,yBAAyB,SAAS,OACnC;IACD,QAAQ,yBAAyB,SAAS;IAC1C,QAAQ;GACT;GAED,OAAO,OAAO,SAAS;IACrB,iBAAiB;KACf,MAAM;KACN,aAAa;IACd;IACD,6BAA6B;KAC3B,QAAQ,EAAE,QAAQ,cAAe;KACjC,QAAQ,yBAAyB,SAAS;IAC3C;IACD,QAAQ;GACT,EAAC;EACH;;;;EAKD,MAAM,iBAAiB,MAAM,UAAU,OAAO,UAAU;GACtD,GAAG;GACH,GAAI,iBAAiB,iBAAiB,CAAE;GACxC,aAAa;EACd,EAAC;;;;;EAMF,MAAM,gBACJ,KAAKA,SAAS,qBAAqB,WAC/B,cAAc,gBAAgB,KAAKA,SAAS,iBAAiB,GAC7D;AAEN,SAAO;CACR;CAED,WAEE;EACA,MAAM,QAAQ,MAAM,UAAU;EAC9B,MAAM,YAAY,SAAS,EAAE,iBAAiB,WAAW,QAAQ,CAAE;AAEnE,SAAO;GACL,UAAU,CAAE;GACZ,GAAG;EACJ;CACF;AACF"}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { AgentBuiltInState, Runtime } from "../runtime.cjs";
|
|
2
2
|
import { LanguageModelLike } from "@langchain/core/language_models/base";
|
|
3
|
-
import { BaseMessage } from "@langchain/core/messages";
|
|
3
|
+
import { BaseMessage, SystemMessage } from "@langchain/core/messages";
|
|
4
4
|
import { ClientTool, ServerTool } from "@langchain/core/tools";
|
|
5
5
|
|
|
6
6
|
//#region src/agents/nodes/types.d.ts
|
|
@@ -22,9 +22,45 @@ interface ModelRequest<TState extends Record<string, unknown> = Record<string, u
|
|
|
22
22
|
*/
|
|
23
23
|
messages: BaseMessage[];
|
|
24
24
|
/**
|
|
25
|
-
* The system message for this step.
|
|
25
|
+
* The system message string for this step.
|
|
26
|
+
*
|
|
27
|
+
* @default ""
|
|
28
|
+
* @deprecated Use {@link ModelRequest.systemMessage} instead.
|
|
29
|
+
*/
|
|
30
|
+
systemPrompt: string;
|
|
31
|
+
/**
|
|
32
|
+
* The system message for this step. If no `systemPrompt` was provided, when `createAgent`
|
|
33
|
+
* was initialized, an empty system message will be used.
|
|
34
|
+
*
|
|
35
|
+
* @default new SystemMessage("")
|
|
36
|
+
* @example extend system message with a string
|
|
37
|
+
* ```ts
|
|
38
|
+
* wrapModelCall: async (request, handler) => {
|
|
39
|
+
* return handler({
|
|
40
|
+
* ...request,
|
|
41
|
+
* systemMessage: request.systemMessage.concat("something")
|
|
42
|
+
* });
|
|
43
|
+
* }
|
|
44
|
+
* ```
|
|
45
|
+
* @example extend system message with a SystemMessage and cache control
|
|
46
|
+
* ```ts
|
|
47
|
+
* wrapModelCall: async (request, handler) => {
|
|
48
|
+
* return handler({
|
|
49
|
+
* ...request,
|
|
50
|
+
* systemMessage: request.systemMessage.concat(
|
|
51
|
+
* new SystemMessage({
|
|
52
|
+
* content: [{
|
|
53
|
+
* type: "text",
|
|
54
|
+
* text: "something",
|
|
55
|
+
* cache_control: { type: "ephemeral", ttl: "1m" }
|
|
56
|
+
* }],
|
|
57
|
+
* })
|
|
58
|
+
* ),
|
|
59
|
+
* });
|
|
60
|
+
* }
|
|
61
|
+
* ```
|
|
26
62
|
*/
|
|
27
|
-
|
|
63
|
+
systemMessage: SystemMessage;
|
|
28
64
|
/**
|
|
29
65
|
* Tool choice configuration (model-specific format).
|
|
30
66
|
* Can be one of:
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { AgentBuiltInState, Runtime } from "../runtime.js";
|
|
2
|
-
import { BaseMessage } from "@langchain/core/messages";
|
|
2
|
+
import { BaseMessage, SystemMessage } from "@langchain/core/messages";
|
|
3
3
|
import { ClientTool, ServerTool } from "@langchain/core/tools";
|
|
4
4
|
import { LanguageModelLike } from "@langchain/core/language_models/base";
|
|
5
5
|
|
|
@@ -22,9 +22,45 @@ interface ModelRequest<TState extends Record<string, unknown> = Record<string, u
|
|
|
22
22
|
*/
|
|
23
23
|
messages: BaseMessage[];
|
|
24
24
|
/**
|
|
25
|
-
* The system message for this step.
|
|
25
|
+
* The system message string for this step.
|
|
26
|
+
*
|
|
27
|
+
* @default ""
|
|
28
|
+
* @deprecated Use {@link ModelRequest.systemMessage} instead.
|
|
29
|
+
*/
|
|
30
|
+
systemPrompt: string;
|
|
31
|
+
/**
|
|
32
|
+
* The system message for this step. If no `systemPrompt` was provided, when `createAgent`
|
|
33
|
+
* was initialized, an empty system message will be used.
|
|
34
|
+
*
|
|
35
|
+
* @default new SystemMessage("")
|
|
36
|
+
* @example extend system message with a string
|
|
37
|
+
* ```ts
|
|
38
|
+
* wrapModelCall: async (request, handler) => {
|
|
39
|
+
* return handler({
|
|
40
|
+
* ...request,
|
|
41
|
+
* systemMessage: request.systemMessage.concat("something")
|
|
42
|
+
* });
|
|
43
|
+
* }
|
|
44
|
+
* ```
|
|
45
|
+
* @example extend system message with a SystemMessage and cache control
|
|
46
|
+
* ```ts
|
|
47
|
+
* wrapModelCall: async (request, handler) => {
|
|
48
|
+
* return handler({
|
|
49
|
+
* ...request,
|
|
50
|
+
* systemMessage: request.systemMessage.concat(
|
|
51
|
+
* new SystemMessage({
|
|
52
|
+
* content: [{
|
|
53
|
+
* type: "text",
|
|
54
|
+
* text: "something",
|
|
55
|
+
* cache_control: { type: "ephemeral", ttl: "1m" }
|
|
56
|
+
* }],
|
|
57
|
+
* })
|
|
58
|
+
* ),
|
|
59
|
+
* });
|
|
60
|
+
* }
|
|
61
|
+
* ```
|
|
26
62
|
*/
|
|
27
|
-
|
|
63
|
+
systemMessage: SystemMessage;
|
|
28
64
|
/**
|
|
29
65
|
* Tool choice configuration (model-specific format).
|
|
30
66
|
* Can be one of:
|
|
@@ -54,20 +54,6 @@ declare class ProviderStrategy<T = unknown> {
|
|
|
54
54
|
parse(response: AIMessage): any;
|
|
55
55
|
}
|
|
56
56
|
type ResponseFormat = ToolStrategy<any> | ProviderStrategy<any>;
|
|
57
|
-
/**
|
|
58
|
-
* Handle user input for `responseFormat` parameter of `CreateAgentParams`.
|
|
59
|
-
* This function defines the default behavior for the `responseFormat` parameter, which is:
|
|
60
|
-
*
|
|
61
|
-
* - if value is a Zod schema, default to structured output via tool calling
|
|
62
|
-
* - if value is a JSON schema, default to structured output via tool calling
|
|
63
|
-
* - if value is a custom response format, return it as is
|
|
64
|
-
* - if value is an array, ensure all array elements are instance of `ToolStrategy`
|
|
65
|
-
* @param responseFormat - The response format to transform, provided by the user
|
|
66
|
-
* @param options - The response format options for tool strategy
|
|
67
|
-
* @param model - The model to check if it supports JSON schema output
|
|
68
|
-
* @returns
|
|
69
|
-
*/
|
|
70
|
-
|
|
71
57
|
/**
|
|
72
58
|
* Branded type for ToolStrategy arrays that preserves type information
|
|
73
59
|
*/
|
|
@@ -117,11 +103,6 @@ type JsonSchemaFormat = {
|
|
|
117
103
|
// Brand to ensure this is not a ToolStrategy or ProviderStrategy
|
|
118
104
|
__brand?: never;
|
|
119
105
|
};
|
|
120
|
-
/**
|
|
121
|
-
* Identifies the models that support JSON schema output
|
|
122
|
-
* @param model - The model to check
|
|
123
|
-
* @returns True if the model supports JSON schema output, false otherwise
|
|
124
|
-
*/
|
|
125
106
|
//#endregion
|
|
126
107
|
export { JsonSchemaFormat, ProviderStrategy, ResponseFormat, ResponseFormatUndefined, ToolStrategy, TypedToolStrategy, providerStrategy, toolStrategy };
|
|
127
108
|
//# sourceMappingURL=responses.d.cts.map
|
|
@@ -54,20 +54,6 @@ declare class ProviderStrategy<T = unknown> {
|
|
|
54
54
|
parse(response: AIMessage): any;
|
|
55
55
|
}
|
|
56
56
|
type ResponseFormat = ToolStrategy<any> | ProviderStrategy<any>;
|
|
57
|
-
/**
|
|
58
|
-
* Handle user input for `responseFormat` parameter of `CreateAgentParams`.
|
|
59
|
-
* This function defines the default behavior for the `responseFormat` parameter, which is:
|
|
60
|
-
*
|
|
61
|
-
* - if value is a Zod schema, default to structured output via tool calling
|
|
62
|
-
* - if value is a JSON schema, default to structured output via tool calling
|
|
63
|
-
* - if value is a custom response format, return it as is
|
|
64
|
-
* - if value is an array, ensure all array elements are instance of `ToolStrategy`
|
|
65
|
-
* @param responseFormat - The response format to transform, provided by the user
|
|
66
|
-
* @param options - The response format options for tool strategy
|
|
67
|
-
* @param model - The model to check if it supports JSON schema output
|
|
68
|
-
* @returns
|
|
69
|
-
*/
|
|
70
|
-
|
|
71
57
|
/**
|
|
72
58
|
* Branded type for ToolStrategy arrays that preserves type information
|
|
73
59
|
*/
|
|
@@ -117,11 +103,6 @@ type JsonSchemaFormat = {
|
|
|
117
103
|
// Brand to ensure this is not a ToolStrategy or ProviderStrategy
|
|
118
104
|
__brand?: never;
|
|
119
105
|
};
|
|
120
|
-
/**
|
|
121
|
-
* Identifies the models that support JSON schema output
|
|
122
|
-
* @param model - The model to check
|
|
123
|
-
* @returns True if the model supports JSON schema output, false otherwise
|
|
124
|
-
*/
|
|
125
106
|
//#endregion
|
|
126
107
|
export { JsonSchemaFormat, ProviderStrategy, ResponseFormat, ResponseFormatUndefined, ToolStrategy, TypedToolStrategy, providerStrategy, toolStrategy };
|
|
127
108
|
//# sourceMappingURL=responses.d.ts.map
|
package/dist/agents/runtime.d.ts
CHANGED
|
@@ -55,7 +55,16 @@ var FakeToolCallingModel = class FakeToolCallingModel extends __langchain_core_l
|
|
|
55
55
|
let content = lastMessage.content;
|
|
56
56
|
if (messages.length > 1) {
|
|
57
57
|
const parts = messages.map((m) => m.content).filter(Boolean);
|
|
58
|
-
content = parts.
|
|
58
|
+
content = parts.map((part) => {
|
|
59
|
+
if (typeof part === "string") return part;
|
|
60
|
+
else if (typeof part === "object" && "text" in part) return part.text;
|
|
61
|
+
else if (Array.isArray(part)) return part.map((p) => {
|
|
62
|
+
if (typeof p === "string") return p;
|
|
63
|
+
else if (typeof p === "object" && "text" in p) return p.text;
|
|
64
|
+
return "";
|
|
65
|
+
}).join("-");
|
|
66
|
+
else return JSON.stringify(part);
|
|
67
|
+
}).join("-");
|
|
59
68
|
}
|
|
60
69
|
const isStartOfConversation = messages.length === 1 || messages.length === 2 && messages.every(__langchain_core_messages.HumanMessage.isInstance);
|
|
61
70
|
if (isStartOfConversation && this.index !== 0) this.index = 0;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"utils.cjs","names":["BaseChatModel","value: number","tools: StructuredTool[]","_schema: any","RunnableLambda","messages: BaseMessage[]","_options?: this[\"ParsedCallOptions\"]","_runManager?: CallbackManagerForLLMRun","HumanMessage","AIMessage"],"sources":["../../../src/agents/tests/utils.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/no-explicit-any */\n/* eslint-disable import/no-extraneous-dependencies */\nimport { expect } from \"vitest\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BaseChatModelCallOptions,\n BindToolsInput,\n ToolChoice,\n} from \"@langchain/core/language_models/chat_models\";\nimport { StructuredTool } from \"@langchain/core/tools\";\nimport {\n BaseMessage,\n AIMessage,\n HumanMessage,\n BaseMessageFields,\n AIMessageFields,\n ToolMessage,\n ToolMessageFields,\n} from \"@langchain/core/messages\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport {\n Runnable,\n RunnableConfig,\n RunnableLambda,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\nimport {\n MemorySaver,\n Checkpoint,\n CheckpointMetadata,\n type BaseCheckpointSaver,\n} from \"@langchain/langgraph-checkpoint\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { z } from \"zod/v3\";\n\nexport class _AnyIdAIMessage extends AIMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"AIMessage\"];\n }\n\n constructor(fields: AIMessageFields | string) {\n let fieldsWithJestMatcher: Partial<AIMessageFields> = {\n id: expect.any(String) as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as AIMessageFields);\n }\n}\n\nexport class _AnyIdHumanMessage extends HumanMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"HumanMessage\"];\n }\n\n constructor(fields: BaseMessageFields | string) {\n let fieldsWithJestMatcher: Partial<BaseMessageFields> = {\n id: expect.any(String) as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as BaseMessageFields);\n }\n}\n\nexport class _AnyIdToolMessage extends ToolMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"ToolMessage\"];\n }\n\n constructor(fields: ToolMessageFields) {\n const fieldsWithJestMatcher: Partial<ToolMessageFields> = {\n id: expect.any(String) as unknown as string,\n ...fields,\n };\n super(fieldsWithJestMatcher as ToolMessageFields);\n }\n}\n\nexport class FakeConfigurableModel extends BaseChatModel {\n _queuedMethodOperations: Record<string, any> = {};\n\n _chatModel: LanguageModelLike;\n\n constructor(\n fields: {\n model: LanguageModelLike;\n } & BaseChatModelParams\n ) {\n super(fields);\n this._chatModel = fields.model;\n }\n\n _llmType() {\n return \"fake_configurable\";\n }\n\n async _generate(\n _messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n throw new Error(\"Not implemented\");\n }\n\n async _model() {\n return this._chatModel;\n }\n\n bindTools(tools: BindToolsInput[]) {\n const modelWithTools = new FakeConfigurableModel({\n model: (this._chatModel as FakeToolCallingChatModel).bindTools(tools),\n });\n modelWithTools._queuedMethodOperations.bindTools = tools;\n return modelWithTools;\n }\n}\n\nexport class FakeToolCallingChatModel extends BaseChatModel {\n sleep?: number = 50;\n\n responses?: BaseMessage[];\n\n thrownErrorString?: string;\n\n idx: number;\n\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\" = \"openai\";\n\n structuredResponse?: Record<string, unknown>;\n\n // Track messages passed to structured output calls\n structuredOutputMessages: BaseMessage[][] = [];\n\n constructor(\n fields: {\n sleep?: number;\n responses?: BaseMessage[];\n thrownErrorString?: string;\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n structuredResponse?: Record<string, unknown>;\n } & BaseChatModelParams\n ) {\n super(fields);\n this.sleep = fields.sleep ?? this.sleep;\n this.responses = fields.responses;\n this.thrownErrorString = fields.thrownErrorString;\n this.idx = 0;\n this.toolStyle = fields.toolStyle ?? this.toolStyle;\n this.structuredResponse = fields.structuredResponse;\n this.structuredOutputMessages = [];\n }\n\n _llmType() {\n return \"fake\";\n }\n\n async _generate(\n messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n if (this.sleep !== undefined) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n const responses = this.responses?.length ? this.responses : messages;\n const msg = responses[this.idx % responses.length];\n const generation: ChatResult = {\n generations: [\n {\n text: \"\",\n message: msg,\n },\n ],\n };\n this.idx += 1;\n\n if (typeof msg.content === \"string\") {\n await runManager?.handleLLMNewToken(msg.content);\n }\n return generation;\n }\n\n bindTools(tools: BindToolsInput[]): Runnable<any> {\n const toolDicts = [];\n const serverTools = [];\n for (const tool of tools) {\n if (!(\"name\" in tool)) {\n serverTools.push(tool);\n continue;\n }\n\n // NOTE: this is a simplified tool spec for testing purposes only\n if (this.toolStyle === \"openai\") {\n toolDicts.push({\n type: \"function\",\n function: {\n name: tool.name,\n },\n });\n } else if ([\"anthropic\", \"google\"].includes(this.toolStyle)) {\n toolDicts.push({\n name: tool.name,\n });\n } else if (this.toolStyle === \"bedrock\") {\n toolDicts.push({\n toolSpec: {\n name: tool.name,\n },\n });\n }\n }\n let toolsToBind: BindToolsInput[] = toolDicts;\n if (this.toolStyle === \"google\") {\n toolsToBind = [{ functionDeclarations: toolDicts }];\n }\n return this.withConfig({\n tools: [...toolsToBind, ...serverTools],\n } as BaseChatModelCallOptions);\n }\n\n withStructuredOutput<\n RunOutput extends Record<string, any> = Record<string, any>\n >(_: unknown): Runnable<any> {\n if (!this.structuredResponse) {\n throw new Error(\"No structured response provided\");\n }\n // Create a runnable that returns the proper structured format\n return RunnableLambda.from(async (messages: BaseMessage[]) => {\n if (this.sleep) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n\n // Store the messages that were sent to generate structured output\n this.structuredOutputMessages.push([...messages]);\n\n // Return in the format expected: { raw: BaseMessage, parsed: RunOutput }\n return this.structuredResponse as RunOutput;\n });\n }\n}\n\nexport class MemorySaverAssertImmutable extends MemorySaver {\n storageForCopies: Record<string, Record<string, Uint8Array>> = {};\n\n constructor() {\n super();\n this.storageForCopies = {};\n }\n\n async put(\n config: RunnableConfig,\n checkpoint: Checkpoint,\n metadata: CheckpointMetadata\n ): Promise<RunnableConfig> {\n const thread_id = config.configurable?.thread_id;\n this.storageForCopies[thread_id] ??= {};\n\n // assert checkpoint hasn't been modified since last written\n const saved = await this.get(config);\n if (saved) {\n const savedId = saved.id;\n if (this.storageForCopies[thread_id][savedId]) {\n const loaded = await this.serde.loadsTyped(\n \"json\",\n this.storageForCopies[thread_id][savedId]\n );\n\n expect(\n saved,\n `Checkpoint [${savedId}] has been modified since last written`\n ).toEqual(loaded);\n }\n }\n const [, serializedCheckpoint] = await this.serde.dumpsTyped(checkpoint);\n // save a copy of the checkpoint\n this.storageForCopies[thread_id][checkpoint.id] = serializedCheckpoint;\n\n return super.put(config, checkpoint, metadata);\n }\n}\n\ninterface ToolCall {\n name: string;\n args: Record<string, any>;\n id: string;\n type?: \"tool_call\";\n}\n\ninterface FakeToolCallingModelFields {\n toolCalls?: ToolCall[][];\n toolStyle?: \"openai\" | \"anthropic\";\n index?: number;\n structuredResponse?: any;\n}\n\n// Helper function to create checkpointer\nexport function createCheckpointer(): BaseCheckpointSaver {\n return new MemorySaver();\n}\n\n/**\n * Fake chat model for testing tool calling functionality\n */\nexport class FakeToolCallingModel extends BaseChatModel {\n toolCalls: ToolCall[][];\n\n toolStyle: \"openai\" | \"anthropic\";\n\n // Use a shared reference object so the index persists across bindTools calls\n private indexRef: { current: number };\n\n structuredResponse?: any;\n\n private tools: StructuredTool[] = [];\n\n constructor({\n toolCalls = [],\n toolStyle = \"openai\",\n index = 0,\n structuredResponse,\n indexRef,\n ...rest\n }: FakeToolCallingModelFields & { indexRef?: { current: number } } = {}) {\n super(rest);\n this.toolCalls = toolCalls;\n this.toolStyle = toolStyle;\n // Share the same index reference across instances\n this.indexRef = indexRef ?? { current: index };\n this.structuredResponse = structuredResponse;\n }\n\n // Getter/setter for backwards compatibility\n get index(): number {\n return this.indexRef.current;\n }\n\n set index(value: number) {\n this.indexRef.current = value;\n }\n\n _llmType(): string {\n return \"fake-tool-calling\";\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n bindTools(\n tools: StructuredTool[]\n ):\n | FakeToolCallingModel\n | RunnableBinding<\n any,\n any,\n any & { tool_choice?: ToolChoice | undefined }\n > {\n const newInstance = new FakeToolCallingModel({\n toolCalls: this.toolCalls,\n toolStyle: this.toolStyle,\n structuredResponse: this.structuredResponse,\n // Pass the same indexRef so all instances share the same index state\n indexRef: this.indexRef,\n });\n newInstance.tools = [...this.tools, ...tools];\n return newInstance;\n }\n\n withStructuredOutput(_schema: any) {\n return new RunnableLambda({\n func: async () => {\n return this.structuredResponse;\n },\n });\n }\n\n async _generate(\n messages: BaseMessage[],\n _options?: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const lastMessage = messages[messages.length - 1];\n let content = lastMessage.content as string;\n\n // Handle prompt concatenation\n if (messages.length > 1) {\n const parts = messages.map((m) => m.content).filter(Boolean);\n content = parts.join(\"-\");\n }\n\n // Reset index at the start of a new conversation (only human message)\n // This allows the model to be reused across multiple agent.invoke() calls\n const isStartOfConversation =\n messages.length === 1 ||\n (messages.length === 2 && messages.every(HumanMessage.isInstance));\n if (isStartOfConversation && this.index !== 0) {\n this.index = 0;\n }\n\n const currentToolCalls = this.toolCalls[this.index] || [];\n const messageId = this.index.toString();\n\n // Move to next set of tool calls for subsequent invocations\n this.index = (this.index + 1) % Math.max(1, this.toolCalls.length);\n\n const message = new AIMessage({\n content,\n id: messageId,\n tool_calls:\n currentToolCalls.length > 0\n ? currentToolCalls.map((tc) => ({\n ...tc,\n type: \"tool_call\" as const,\n }))\n : undefined,\n });\n\n return {\n generations: [\n {\n text: content,\n message,\n },\n ],\n llmOutput: {},\n };\n }\n}\n\nexport class SearchAPI extends StructuredTool {\n name = \"search_api\";\n\n description = \"A simple API that returns the input string.\";\n\n schema = z.object({\n query: z.string().describe(\"The query to search for.\"),\n });\n\n async _call(input: z.infer<typeof this.schema>) {\n if (input?.query === \"error\") {\n throw new Error(\"Error\");\n }\n return `result for ${input?.query}`;\n }\n}\n"],"mappings":";;;;;;;;;;;;AAuUA,IAAa,uBAAb,MAAa,6BAA6BA,2DAAc;CACtD;CAEA;CAGA,AAAQ;CAER;CAEA,AAAQ,QAA0B,CAAE;CAEpC,YAAY,EACV,YAAY,CAAE,GACd,YAAY,UACZ,QAAQ,GACR,oBACA,SACA,GAAG,MAC6D,GAAG,CAAE,GAAE;EACvE,MAAM,KAAK;EACX,KAAK,YAAY;EACjB,KAAK,YAAY;EAEjB,KAAK,WAAW,YAAY,EAAE,SAAS,MAAO;EAC9C,KAAK,qBAAqB;CAC3B;CAGD,IAAI,QAAgB;AAClB,SAAO,KAAK,SAAS;CACtB;CAED,IAAI,MAAMC,OAAe;EACvB,KAAK,SAAS,UAAU;CACzB;CAED,WAAmB;AACjB,SAAO;CACR;CAED,oBAAoB;AAClB,SAAO,CAAE;CACV;CAED,UACEC,OAOI;EACJ,MAAM,cAAc,IAAI,qBAAqB;GAC3C,WAAW,KAAK;GAChB,WAAW,KAAK;GAChB,oBAAoB,KAAK;GAEzB,UAAU,KAAK;EAChB;EACD,YAAY,QAAQ,CAAC,GAAG,KAAK,OAAO,GAAG,KAAM;AAC7C,SAAO;CACR;CAED,qBAAqBC,SAAc;AACjC,SAAO,IAAIC,0CAAe,EACxB,MAAM,YAAY;AAChB,UAAO,KAAK;EACb,EACF;CACF;CAED,MAAM,UACJC,UACAC,UACAC,aACqB;EACrB,MAAM,cAAc,SAAS,SAAS,SAAS;EAC/C,IAAI,UAAU,YAAY;AAG1B,MAAI,SAAS,SAAS,GAAG;GACvB,MAAM,QAAQ,SAAS,IAAI,CAAC,MAAM,EAAE,QAAQ,CAAC,OAAO,QAAQ;GAC5D,UAAU,MAAM,KAAK,IAAI;EAC1B;EAID,MAAM,wBACJ,SAAS,WAAW,KACnB,SAAS,WAAW,KAAK,SAAS,MAAMC,uCAAa,WAAW;AACnE,MAAI,yBAAyB,KAAK,UAAU,GAC1C,KAAK,QAAQ;EAGf,MAAM,mBAAmB,KAAK,UAAU,KAAK,UAAU,CAAE;EACzD,MAAM,YAAY,KAAK,MAAM,UAAU;EAGvC,KAAK,SAAS,KAAK,QAAQ,KAAK,KAAK,IAAI,GAAG,KAAK,UAAU,OAAO;EAElE,MAAM,UAAU,IAAIC,oCAAU;GAC5B;GACA,IAAI;GACJ,YACE,iBAAiB,SAAS,IACtB,iBAAiB,IAAI,CAAC,QAAQ;IAC5B,GAAG;IACH,MAAM;GACP,GAAE,GACH;EACP;AAED,SAAO;GACL,aAAa,CACX;IACE,MAAM;IACN;GACD,CACF;GACD,WAAW,CAAE;EACd;CACF;AACF"}
|
|
1
|
+
{"version":3,"file":"utils.cjs","names":["BaseChatModel","value: number","tools: StructuredTool[]","_schema: any","RunnableLambda","messages: BaseMessage[]","_options?: this[\"ParsedCallOptions\"]","_runManager?: CallbackManagerForLLMRun","HumanMessage","AIMessage"],"sources":["../../../src/agents/tests/utils.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/no-explicit-any */\n/* eslint-disable import/no-extraneous-dependencies */\nimport { expect } from \"vitest\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BaseChatModelCallOptions,\n BindToolsInput,\n ToolChoice,\n} from \"@langchain/core/language_models/chat_models\";\nimport { StructuredTool } from \"@langchain/core/tools\";\nimport {\n BaseMessage,\n AIMessage,\n HumanMessage,\n BaseMessageFields,\n AIMessageFields,\n ToolMessage,\n ToolMessageFields,\n} from \"@langchain/core/messages\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport {\n Runnable,\n RunnableConfig,\n RunnableLambda,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\nimport {\n MemorySaver,\n Checkpoint,\n CheckpointMetadata,\n type BaseCheckpointSaver,\n} from \"@langchain/langgraph-checkpoint\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { z } from \"zod/v3\";\n\nexport class _AnyIdAIMessage extends AIMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"AIMessage\"];\n }\n\n constructor(fields: AIMessageFields | string) {\n let fieldsWithJestMatcher: Partial<AIMessageFields> = {\n id: expect.any(String) as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as AIMessageFields);\n }\n}\n\nexport class _AnyIdHumanMessage extends HumanMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"HumanMessage\"];\n }\n\n constructor(fields: BaseMessageFields | string) {\n let fieldsWithJestMatcher: Partial<BaseMessageFields> = {\n id: expect.any(String) as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as BaseMessageFields);\n }\n}\n\nexport class _AnyIdToolMessage extends ToolMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"ToolMessage\"];\n }\n\n constructor(fields: ToolMessageFields) {\n const fieldsWithJestMatcher: Partial<ToolMessageFields> = {\n id: expect.any(String) as unknown as string,\n ...fields,\n };\n super(fieldsWithJestMatcher as ToolMessageFields);\n }\n}\n\nexport class FakeConfigurableModel extends BaseChatModel {\n _queuedMethodOperations: Record<string, any> = {};\n\n _chatModel: LanguageModelLike;\n\n constructor(\n fields: {\n model: LanguageModelLike;\n } & BaseChatModelParams\n ) {\n super(fields);\n this._chatModel = fields.model;\n }\n\n _llmType() {\n return \"fake_configurable\";\n }\n\n async _generate(\n _messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n throw new Error(\"Not implemented\");\n }\n\n async _model() {\n return this._chatModel;\n }\n\n bindTools(tools: BindToolsInput[]) {\n const modelWithTools = new FakeConfigurableModel({\n model: (this._chatModel as FakeToolCallingChatModel).bindTools(tools),\n });\n modelWithTools._queuedMethodOperations.bindTools = tools;\n return modelWithTools;\n }\n}\n\nexport class FakeToolCallingChatModel extends BaseChatModel {\n sleep?: number = 50;\n\n responses?: BaseMessage[];\n\n thrownErrorString?: string;\n\n idx: number;\n\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\" = \"openai\";\n\n structuredResponse?: Record<string, unknown>;\n\n // Track messages passed to structured output calls\n structuredOutputMessages: BaseMessage[][] = [];\n\n constructor(\n fields: {\n sleep?: number;\n responses?: BaseMessage[];\n thrownErrorString?: string;\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n structuredResponse?: Record<string, unknown>;\n } & BaseChatModelParams\n ) {\n super(fields);\n this.sleep = fields.sleep ?? this.sleep;\n this.responses = fields.responses;\n this.thrownErrorString = fields.thrownErrorString;\n this.idx = 0;\n this.toolStyle = fields.toolStyle ?? this.toolStyle;\n this.structuredResponse = fields.structuredResponse;\n this.structuredOutputMessages = [];\n }\n\n _llmType() {\n return \"fake\";\n }\n\n async _generate(\n messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n if (this.sleep !== undefined) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n const responses = this.responses?.length ? this.responses : messages;\n const msg = responses[this.idx % responses.length];\n const generation: ChatResult = {\n generations: [\n {\n text: \"\",\n message: msg,\n },\n ],\n };\n this.idx += 1;\n\n if (typeof msg.content === \"string\") {\n await runManager?.handleLLMNewToken(msg.content);\n }\n return generation;\n }\n\n bindTools(tools: BindToolsInput[]): Runnable<any> {\n const toolDicts = [];\n const serverTools = [];\n for (const tool of tools) {\n if (!(\"name\" in tool)) {\n serverTools.push(tool);\n continue;\n }\n\n // NOTE: this is a simplified tool spec for testing purposes only\n if (this.toolStyle === \"openai\") {\n toolDicts.push({\n type: \"function\",\n function: {\n name: tool.name,\n },\n });\n } else if ([\"anthropic\", \"google\"].includes(this.toolStyle)) {\n toolDicts.push({\n name: tool.name,\n });\n } else if (this.toolStyle === \"bedrock\") {\n toolDicts.push({\n toolSpec: {\n name: tool.name,\n },\n });\n }\n }\n let toolsToBind: BindToolsInput[] = toolDicts;\n if (this.toolStyle === \"google\") {\n toolsToBind = [{ functionDeclarations: toolDicts }];\n }\n return this.withConfig({\n tools: [...toolsToBind, ...serverTools],\n } as BaseChatModelCallOptions);\n }\n\n withStructuredOutput<\n RunOutput extends Record<string, any> = Record<string, any>\n >(_: unknown): Runnable<any> {\n if (!this.structuredResponse) {\n throw new Error(\"No structured response provided\");\n }\n // Create a runnable that returns the proper structured format\n return RunnableLambda.from(async (messages: BaseMessage[]) => {\n if (this.sleep) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n\n // Store the messages that were sent to generate structured output\n this.structuredOutputMessages.push([...messages]);\n\n // Return in the format expected: { raw: BaseMessage, parsed: RunOutput }\n return this.structuredResponse as RunOutput;\n });\n }\n}\n\nexport class MemorySaverAssertImmutable extends MemorySaver {\n storageForCopies: Record<string, Record<string, Uint8Array>> = {};\n\n constructor() {\n super();\n this.storageForCopies = {};\n }\n\n async put(\n config: RunnableConfig,\n checkpoint: Checkpoint,\n metadata: CheckpointMetadata\n ): Promise<RunnableConfig> {\n const thread_id = config.configurable?.thread_id;\n this.storageForCopies[thread_id] ??= {};\n\n // assert checkpoint hasn't been modified since last written\n const saved = await this.get(config);\n if (saved) {\n const savedId = saved.id;\n if (this.storageForCopies[thread_id][savedId]) {\n const loaded = await this.serde.loadsTyped(\n \"json\",\n this.storageForCopies[thread_id][savedId]\n );\n\n expect(\n saved,\n `Checkpoint [${savedId}] has been modified since last written`\n ).toEqual(loaded);\n }\n }\n const [, serializedCheckpoint] = await this.serde.dumpsTyped(checkpoint);\n // save a copy of the checkpoint\n this.storageForCopies[thread_id][checkpoint.id] = serializedCheckpoint;\n\n return super.put(config, checkpoint, metadata);\n }\n}\n\ninterface ToolCall {\n name: string;\n args: Record<string, any>;\n id: string;\n type?: \"tool_call\";\n}\n\ninterface FakeToolCallingModelFields {\n toolCalls?: ToolCall[][];\n toolStyle?: \"openai\" | \"anthropic\";\n index?: number;\n structuredResponse?: any;\n}\n\n// Helper function to create checkpointer\nexport function createCheckpointer(): BaseCheckpointSaver {\n return new MemorySaver();\n}\n\n/**\n * Fake chat model for testing tool calling functionality\n */\nexport class FakeToolCallingModel extends BaseChatModel {\n toolCalls: ToolCall[][];\n\n toolStyle: \"openai\" | \"anthropic\";\n\n // Use a shared reference object so the index persists across bindTools calls\n private indexRef: { current: number };\n\n structuredResponse?: any;\n\n private tools: StructuredTool[] = [];\n\n constructor({\n toolCalls = [],\n toolStyle = \"openai\",\n index = 0,\n structuredResponse,\n indexRef,\n ...rest\n }: FakeToolCallingModelFields & { indexRef?: { current: number } } = {}) {\n super(rest);\n this.toolCalls = toolCalls;\n this.toolStyle = toolStyle;\n // Share the same index reference across instances\n this.indexRef = indexRef ?? { current: index };\n this.structuredResponse = structuredResponse;\n }\n\n // Getter/setter for backwards compatibility\n get index(): number {\n return this.indexRef.current;\n }\n\n set index(value: number) {\n this.indexRef.current = value;\n }\n\n _llmType(): string {\n return \"fake-tool-calling\";\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n bindTools(\n tools: StructuredTool[]\n ):\n | FakeToolCallingModel\n | RunnableBinding<\n any,\n any,\n any & { tool_choice?: ToolChoice | undefined }\n > {\n const newInstance = new FakeToolCallingModel({\n toolCalls: this.toolCalls,\n toolStyle: this.toolStyle,\n structuredResponse: this.structuredResponse,\n // Pass the same indexRef so all instances share the same index state\n indexRef: this.indexRef,\n });\n newInstance.tools = [...this.tools, ...tools];\n return newInstance;\n }\n\n withStructuredOutput(_schema: any) {\n return new RunnableLambda({\n func: async () => {\n return this.structuredResponse;\n },\n });\n }\n\n async _generate(\n messages: BaseMessage[],\n _options?: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const lastMessage = messages[messages.length - 1];\n let content = lastMessage.content as string;\n\n // Handle prompt concatenation\n if (messages.length > 1) {\n const parts = messages.map((m) => m.content).filter(Boolean);\n content = parts\n .map((part) => {\n if (typeof part === \"string\") {\n return part;\n } else if (typeof part === \"object\" && \"text\" in part) {\n return part.text;\n } else if (Array.isArray(part)) {\n return part\n .map((p) => {\n if (typeof p === \"string\") {\n return p;\n } else if (typeof p === \"object\" && \"text\" in p) {\n return p.text;\n }\n return \"\";\n })\n .join(\"-\");\n } else {\n return JSON.stringify(part);\n }\n })\n .join(\"-\");\n }\n\n // Reset index at the start of a new conversation (only human message)\n // This allows the model to be reused across multiple agent.invoke() calls\n const isStartOfConversation =\n messages.length === 1 ||\n (messages.length === 2 && messages.every(HumanMessage.isInstance));\n if (isStartOfConversation && this.index !== 0) {\n this.index = 0;\n }\n\n const currentToolCalls = this.toolCalls[this.index] || [];\n const messageId = this.index.toString();\n\n // Move to next set of tool calls for subsequent invocations\n this.index = (this.index + 1) % Math.max(1, this.toolCalls.length);\n\n const message = new AIMessage({\n content,\n id: messageId,\n tool_calls:\n currentToolCalls.length > 0\n ? currentToolCalls.map((tc) => ({\n ...tc,\n type: \"tool_call\" as const,\n }))\n : undefined,\n });\n\n return {\n generations: [\n {\n text: content,\n message,\n },\n ],\n llmOutput: {},\n };\n }\n}\n\nexport class SearchAPI extends StructuredTool {\n name = \"search_api\";\n\n description = \"A simple API that returns the input string.\";\n\n schema = z.object({\n query: z.string().describe(\"The query to search for.\"),\n });\n\n async _call(input: z.infer<typeof this.schema>) {\n if (input?.query === \"error\") {\n throw new Error(\"Error\");\n }\n return `result for ${input?.query}`;\n }\n}\n"],"mappings":";;;;;;;;;;;;AAuUA,IAAa,uBAAb,MAAa,6BAA6BA,2DAAc;CACtD;CAEA;CAGA,AAAQ;CAER;CAEA,AAAQ,QAA0B,CAAE;CAEpC,YAAY,EACV,YAAY,CAAE,GACd,YAAY,UACZ,QAAQ,GACR,oBACA,SACA,GAAG,MAC6D,GAAG,CAAE,GAAE;EACvE,MAAM,KAAK;EACX,KAAK,YAAY;EACjB,KAAK,YAAY;EAEjB,KAAK,WAAW,YAAY,EAAE,SAAS,MAAO;EAC9C,KAAK,qBAAqB;CAC3B;CAGD,IAAI,QAAgB;AAClB,SAAO,KAAK,SAAS;CACtB;CAED,IAAI,MAAMC,OAAe;EACvB,KAAK,SAAS,UAAU;CACzB;CAED,WAAmB;AACjB,SAAO;CACR;CAED,oBAAoB;AAClB,SAAO,CAAE;CACV;CAED,UACEC,OAOI;EACJ,MAAM,cAAc,IAAI,qBAAqB;GAC3C,WAAW,KAAK;GAChB,WAAW,KAAK;GAChB,oBAAoB,KAAK;GAEzB,UAAU,KAAK;EAChB;EACD,YAAY,QAAQ,CAAC,GAAG,KAAK,OAAO,GAAG,KAAM;AAC7C,SAAO;CACR;CAED,qBAAqBC,SAAc;AACjC,SAAO,IAAIC,0CAAe,EACxB,MAAM,YAAY;AAChB,UAAO,KAAK;EACb,EACF;CACF;CAED,MAAM,UACJC,UACAC,UACAC,aACqB;EACrB,MAAM,cAAc,SAAS,SAAS,SAAS;EAC/C,IAAI,UAAU,YAAY;AAG1B,MAAI,SAAS,SAAS,GAAG;GACvB,MAAM,QAAQ,SAAS,IAAI,CAAC,MAAM,EAAE,QAAQ,CAAC,OAAO,QAAQ;GAC5D,UAAU,MACP,IAAI,CAAC,SAAS;AACb,QAAI,OAAO,SAAS,SAClB,QAAO;aACE,OAAO,SAAS,YAAY,UAAU,KAC/C,QAAO,KAAK;aACH,MAAM,QAAQ,KAAK,CAC5B,QAAO,KACJ,IAAI,CAAC,MAAM;AACV,SAAI,OAAO,MAAM,SACf,QAAO;cACE,OAAO,MAAM,YAAY,UAAU,EAC5C,QAAO,EAAE;AAEX,YAAO;IACR,EAAC,CACD,KAAK,IAAI;QAEZ,QAAO,KAAK,UAAU,KAAK;GAE9B,EAAC,CACD,KAAK,IAAI;EACb;EAID,MAAM,wBACJ,SAAS,WAAW,KACnB,SAAS,WAAW,KAAK,SAAS,MAAMC,uCAAa,WAAW;AACnE,MAAI,yBAAyB,KAAK,UAAU,GAC1C,KAAK,QAAQ;EAGf,MAAM,mBAAmB,KAAK,UAAU,KAAK,UAAU,CAAE;EACzD,MAAM,YAAY,KAAK,MAAM,UAAU;EAGvC,KAAK,SAAS,KAAK,QAAQ,KAAK,KAAK,IAAI,GAAG,KAAK,UAAU,OAAO;EAElE,MAAM,UAAU,IAAIC,oCAAU;GAC5B;GACA,IAAI;GACJ,YACE,iBAAiB,SAAS,IACtB,iBAAiB,IAAI,CAAC,QAAQ;IAC5B,GAAG;IACH,MAAM;GACP,GAAE,GACH;EACP;AAED,SAAO;GACL,aAAa,CACX;IACE,MAAM;IACN;GACD,CACF;GACD,WAAW,CAAE;EACd;CACF;AACF"}
|
|
@@ -54,7 +54,16 @@ var FakeToolCallingModel = class FakeToolCallingModel extends BaseChatModel {
|
|
|
54
54
|
let content = lastMessage.content;
|
|
55
55
|
if (messages.length > 1) {
|
|
56
56
|
const parts = messages.map((m) => m.content).filter(Boolean);
|
|
57
|
-
content = parts.
|
|
57
|
+
content = parts.map((part) => {
|
|
58
|
+
if (typeof part === "string") return part;
|
|
59
|
+
else if (typeof part === "object" && "text" in part) return part.text;
|
|
60
|
+
else if (Array.isArray(part)) return part.map((p) => {
|
|
61
|
+
if (typeof p === "string") return p;
|
|
62
|
+
else if (typeof p === "object" && "text" in p) return p.text;
|
|
63
|
+
return "";
|
|
64
|
+
}).join("-");
|
|
65
|
+
else return JSON.stringify(part);
|
|
66
|
+
}).join("-");
|
|
58
67
|
}
|
|
59
68
|
const isStartOfConversation = messages.length === 1 || messages.length === 2 && messages.every(HumanMessage.isInstance);
|
|
60
69
|
if (isStartOfConversation && this.index !== 0) this.index = 0;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"utils.js","names":["value: number","tools: StructuredTool[]","_schema: any","messages: BaseMessage[]","_options?: this[\"ParsedCallOptions\"]","_runManager?: CallbackManagerForLLMRun"],"sources":["../../../src/agents/tests/utils.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/no-explicit-any */\n/* eslint-disable import/no-extraneous-dependencies */\nimport { expect } from \"vitest\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BaseChatModelCallOptions,\n BindToolsInput,\n ToolChoice,\n} from \"@langchain/core/language_models/chat_models\";\nimport { StructuredTool } from \"@langchain/core/tools\";\nimport {\n BaseMessage,\n AIMessage,\n HumanMessage,\n BaseMessageFields,\n AIMessageFields,\n ToolMessage,\n ToolMessageFields,\n} from \"@langchain/core/messages\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport {\n Runnable,\n RunnableConfig,\n RunnableLambda,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\nimport {\n MemorySaver,\n Checkpoint,\n CheckpointMetadata,\n type BaseCheckpointSaver,\n} from \"@langchain/langgraph-checkpoint\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { z } from \"zod/v3\";\n\nexport class _AnyIdAIMessage extends AIMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"AIMessage\"];\n }\n\n constructor(fields: AIMessageFields | string) {\n let fieldsWithJestMatcher: Partial<AIMessageFields> = {\n id: expect.any(String) as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as AIMessageFields);\n }\n}\n\nexport class _AnyIdHumanMessage extends HumanMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"HumanMessage\"];\n }\n\n constructor(fields: BaseMessageFields | string) {\n let fieldsWithJestMatcher: Partial<BaseMessageFields> = {\n id: expect.any(String) as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as BaseMessageFields);\n }\n}\n\nexport class _AnyIdToolMessage extends ToolMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"ToolMessage\"];\n }\n\n constructor(fields: ToolMessageFields) {\n const fieldsWithJestMatcher: Partial<ToolMessageFields> = {\n id: expect.any(String) as unknown as string,\n ...fields,\n };\n super(fieldsWithJestMatcher as ToolMessageFields);\n }\n}\n\nexport class FakeConfigurableModel extends BaseChatModel {\n _queuedMethodOperations: Record<string, any> = {};\n\n _chatModel: LanguageModelLike;\n\n constructor(\n fields: {\n model: LanguageModelLike;\n } & BaseChatModelParams\n ) {\n super(fields);\n this._chatModel = fields.model;\n }\n\n _llmType() {\n return \"fake_configurable\";\n }\n\n async _generate(\n _messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n throw new Error(\"Not implemented\");\n }\n\n async _model() {\n return this._chatModel;\n }\n\n bindTools(tools: BindToolsInput[]) {\n const modelWithTools = new FakeConfigurableModel({\n model: (this._chatModel as FakeToolCallingChatModel).bindTools(tools),\n });\n modelWithTools._queuedMethodOperations.bindTools = tools;\n return modelWithTools;\n }\n}\n\nexport class FakeToolCallingChatModel extends BaseChatModel {\n sleep?: number = 50;\n\n responses?: BaseMessage[];\n\n thrownErrorString?: string;\n\n idx: number;\n\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\" = \"openai\";\n\n structuredResponse?: Record<string, unknown>;\n\n // Track messages passed to structured output calls\n structuredOutputMessages: BaseMessage[][] = [];\n\n constructor(\n fields: {\n sleep?: number;\n responses?: BaseMessage[];\n thrownErrorString?: string;\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n structuredResponse?: Record<string, unknown>;\n } & BaseChatModelParams\n ) {\n super(fields);\n this.sleep = fields.sleep ?? this.sleep;\n this.responses = fields.responses;\n this.thrownErrorString = fields.thrownErrorString;\n this.idx = 0;\n this.toolStyle = fields.toolStyle ?? this.toolStyle;\n this.structuredResponse = fields.structuredResponse;\n this.structuredOutputMessages = [];\n }\n\n _llmType() {\n return \"fake\";\n }\n\n async _generate(\n messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n if (this.sleep !== undefined) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n const responses = this.responses?.length ? this.responses : messages;\n const msg = responses[this.idx % responses.length];\n const generation: ChatResult = {\n generations: [\n {\n text: \"\",\n message: msg,\n },\n ],\n };\n this.idx += 1;\n\n if (typeof msg.content === \"string\") {\n await runManager?.handleLLMNewToken(msg.content);\n }\n return generation;\n }\n\n bindTools(tools: BindToolsInput[]): Runnable<any> {\n const toolDicts = [];\n const serverTools = [];\n for (const tool of tools) {\n if (!(\"name\" in tool)) {\n serverTools.push(tool);\n continue;\n }\n\n // NOTE: this is a simplified tool spec for testing purposes only\n if (this.toolStyle === \"openai\") {\n toolDicts.push({\n type: \"function\",\n function: {\n name: tool.name,\n },\n });\n } else if ([\"anthropic\", \"google\"].includes(this.toolStyle)) {\n toolDicts.push({\n name: tool.name,\n });\n } else if (this.toolStyle === \"bedrock\") {\n toolDicts.push({\n toolSpec: {\n name: tool.name,\n },\n });\n }\n }\n let toolsToBind: BindToolsInput[] = toolDicts;\n if (this.toolStyle === \"google\") {\n toolsToBind = [{ functionDeclarations: toolDicts }];\n }\n return this.withConfig({\n tools: [...toolsToBind, ...serverTools],\n } as BaseChatModelCallOptions);\n }\n\n withStructuredOutput<\n RunOutput extends Record<string, any> = Record<string, any>\n >(_: unknown): Runnable<any> {\n if (!this.structuredResponse) {\n throw new Error(\"No structured response provided\");\n }\n // Create a runnable that returns the proper structured format\n return RunnableLambda.from(async (messages: BaseMessage[]) => {\n if (this.sleep) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n\n // Store the messages that were sent to generate structured output\n this.structuredOutputMessages.push([...messages]);\n\n // Return in the format expected: { raw: BaseMessage, parsed: RunOutput }\n return this.structuredResponse as RunOutput;\n });\n }\n}\n\nexport class MemorySaverAssertImmutable extends MemorySaver {\n storageForCopies: Record<string, Record<string, Uint8Array>> = {};\n\n constructor() {\n super();\n this.storageForCopies = {};\n }\n\n async put(\n config: RunnableConfig,\n checkpoint: Checkpoint,\n metadata: CheckpointMetadata\n ): Promise<RunnableConfig> {\n const thread_id = config.configurable?.thread_id;\n this.storageForCopies[thread_id] ??= {};\n\n // assert checkpoint hasn't been modified since last written\n const saved = await this.get(config);\n if (saved) {\n const savedId = saved.id;\n if (this.storageForCopies[thread_id][savedId]) {\n const loaded = await this.serde.loadsTyped(\n \"json\",\n this.storageForCopies[thread_id][savedId]\n );\n\n expect(\n saved,\n `Checkpoint [${savedId}] has been modified since last written`\n ).toEqual(loaded);\n }\n }\n const [, serializedCheckpoint] = await this.serde.dumpsTyped(checkpoint);\n // save a copy of the checkpoint\n this.storageForCopies[thread_id][checkpoint.id] = serializedCheckpoint;\n\n return super.put(config, checkpoint, metadata);\n }\n}\n\ninterface ToolCall {\n name: string;\n args: Record<string, any>;\n id: string;\n type?: \"tool_call\";\n}\n\ninterface FakeToolCallingModelFields {\n toolCalls?: ToolCall[][];\n toolStyle?: \"openai\" | \"anthropic\";\n index?: number;\n structuredResponse?: any;\n}\n\n// Helper function to create checkpointer\nexport function createCheckpointer(): BaseCheckpointSaver {\n return new MemorySaver();\n}\n\n/**\n * Fake chat model for testing tool calling functionality\n */\nexport class FakeToolCallingModel extends BaseChatModel {\n toolCalls: ToolCall[][];\n\n toolStyle: \"openai\" | \"anthropic\";\n\n // Use a shared reference object so the index persists across bindTools calls\n private indexRef: { current: number };\n\n structuredResponse?: any;\n\n private tools: StructuredTool[] = [];\n\n constructor({\n toolCalls = [],\n toolStyle = \"openai\",\n index = 0,\n structuredResponse,\n indexRef,\n ...rest\n }: FakeToolCallingModelFields & { indexRef?: { current: number } } = {}) {\n super(rest);\n this.toolCalls = toolCalls;\n this.toolStyle = toolStyle;\n // Share the same index reference across instances\n this.indexRef = indexRef ?? { current: index };\n this.structuredResponse = structuredResponse;\n }\n\n // Getter/setter for backwards compatibility\n get index(): number {\n return this.indexRef.current;\n }\n\n set index(value: number) {\n this.indexRef.current = value;\n }\n\n _llmType(): string {\n return \"fake-tool-calling\";\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n bindTools(\n tools: StructuredTool[]\n ):\n | FakeToolCallingModel\n | RunnableBinding<\n any,\n any,\n any & { tool_choice?: ToolChoice | undefined }\n > {\n const newInstance = new FakeToolCallingModel({\n toolCalls: this.toolCalls,\n toolStyle: this.toolStyle,\n structuredResponse: this.structuredResponse,\n // Pass the same indexRef so all instances share the same index state\n indexRef: this.indexRef,\n });\n newInstance.tools = [...this.tools, ...tools];\n return newInstance;\n }\n\n withStructuredOutput(_schema: any) {\n return new RunnableLambda({\n func: async () => {\n return this.structuredResponse;\n },\n });\n }\n\n async _generate(\n messages: BaseMessage[],\n _options?: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const lastMessage = messages[messages.length - 1];\n let content = lastMessage.content as string;\n\n // Handle prompt concatenation\n if (messages.length > 1) {\n const parts = messages.map((m) => m.content).filter(Boolean);\n content = parts.join(\"-\");\n }\n\n // Reset index at the start of a new conversation (only human message)\n // This allows the model to be reused across multiple agent.invoke() calls\n const isStartOfConversation =\n messages.length === 1 ||\n (messages.length === 2 && messages.every(HumanMessage.isInstance));\n if (isStartOfConversation && this.index !== 0) {\n this.index = 0;\n }\n\n const currentToolCalls = this.toolCalls[this.index] || [];\n const messageId = this.index.toString();\n\n // Move to next set of tool calls for subsequent invocations\n this.index = (this.index + 1) % Math.max(1, this.toolCalls.length);\n\n const message = new AIMessage({\n content,\n id: messageId,\n tool_calls:\n currentToolCalls.length > 0\n ? currentToolCalls.map((tc) => ({\n ...tc,\n type: \"tool_call\" as const,\n }))\n : undefined,\n });\n\n return {\n generations: [\n {\n text: content,\n message,\n },\n ],\n llmOutput: {},\n };\n }\n}\n\nexport class SearchAPI extends StructuredTool {\n name = \"search_api\";\n\n description = \"A simple API that returns the input string.\";\n\n schema = z.object({\n query: z.string().describe(\"The query to search for.\"),\n });\n\n async _call(input: z.infer<typeof this.schema>) {\n if (input?.query === \"error\") {\n throw new Error(\"Error\");\n }\n return `result for ${input?.query}`;\n }\n}\n"],"mappings":";;;;;;;;;;;AAuUA,IAAa,uBAAb,MAAa,6BAA6B,cAAc;CACtD;CAEA;CAGA,AAAQ;CAER;CAEA,AAAQ,QAA0B,CAAE;CAEpC,YAAY,EACV,YAAY,CAAE,GACd,YAAY,UACZ,QAAQ,GACR,oBACA,SACA,GAAG,MAC6D,GAAG,CAAE,GAAE;EACvE,MAAM,KAAK;EACX,KAAK,YAAY;EACjB,KAAK,YAAY;EAEjB,KAAK,WAAW,YAAY,EAAE,SAAS,MAAO;EAC9C,KAAK,qBAAqB;CAC3B;CAGD,IAAI,QAAgB;AAClB,SAAO,KAAK,SAAS;CACtB;CAED,IAAI,MAAMA,OAAe;EACvB,KAAK,SAAS,UAAU;CACzB;CAED,WAAmB;AACjB,SAAO;CACR;CAED,oBAAoB;AAClB,SAAO,CAAE;CACV;CAED,UACEC,OAOI;EACJ,MAAM,cAAc,IAAI,qBAAqB;GAC3C,WAAW,KAAK;GAChB,WAAW,KAAK;GAChB,oBAAoB,KAAK;GAEzB,UAAU,KAAK;EAChB;EACD,YAAY,QAAQ,CAAC,GAAG,KAAK,OAAO,GAAG,KAAM;AAC7C,SAAO;CACR;CAED,qBAAqBC,SAAc;AACjC,SAAO,IAAI,eAAe,EACxB,MAAM,YAAY;AAChB,UAAO,KAAK;EACb,EACF;CACF;CAED,MAAM,UACJC,UACAC,UACAC,aACqB;EACrB,MAAM,cAAc,SAAS,SAAS,SAAS;EAC/C,IAAI,UAAU,YAAY;AAG1B,MAAI,SAAS,SAAS,GAAG;GACvB,MAAM,QAAQ,SAAS,IAAI,CAAC,MAAM,EAAE,QAAQ,CAAC,OAAO,QAAQ;GAC5D,UAAU,MAAM,KAAK,IAAI;EAC1B;EAID,MAAM,wBACJ,SAAS,WAAW,KACnB,SAAS,WAAW,KAAK,SAAS,MAAM,aAAa,WAAW;AACnE,MAAI,yBAAyB,KAAK,UAAU,GAC1C,KAAK,QAAQ;EAGf,MAAM,mBAAmB,KAAK,UAAU,KAAK,UAAU,CAAE;EACzD,MAAM,YAAY,KAAK,MAAM,UAAU;EAGvC,KAAK,SAAS,KAAK,QAAQ,KAAK,KAAK,IAAI,GAAG,KAAK,UAAU,OAAO;EAElE,MAAM,UAAU,IAAI,UAAU;GAC5B;GACA,IAAI;GACJ,YACE,iBAAiB,SAAS,IACtB,iBAAiB,IAAI,CAAC,QAAQ;IAC5B,GAAG;IACH,MAAM;GACP,GAAE,GACH;EACP;AAED,SAAO;GACL,aAAa,CACX;IACE,MAAM;IACN;GACD,CACF;GACD,WAAW,CAAE;EACd;CACF;AACF"}
|
|
1
|
+
{"version":3,"file":"utils.js","names":["value: number","tools: StructuredTool[]","_schema: any","messages: BaseMessage[]","_options?: this[\"ParsedCallOptions\"]","_runManager?: CallbackManagerForLLMRun"],"sources":["../../../src/agents/tests/utils.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/no-explicit-any */\n/* eslint-disable import/no-extraneous-dependencies */\nimport { expect } from \"vitest\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BaseChatModelCallOptions,\n BindToolsInput,\n ToolChoice,\n} from \"@langchain/core/language_models/chat_models\";\nimport { StructuredTool } from \"@langchain/core/tools\";\nimport {\n BaseMessage,\n AIMessage,\n HumanMessage,\n BaseMessageFields,\n AIMessageFields,\n ToolMessage,\n ToolMessageFields,\n} from \"@langchain/core/messages\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport {\n Runnable,\n RunnableConfig,\n RunnableLambda,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\nimport {\n MemorySaver,\n Checkpoint,\n CheckpointMetadata,\n type BaseCheckpointSaver,\n} from \"@langchain/langgraph-checkpoint\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { z } from \"zod/v3\";\n\nexport class _AnyIdAIMessage extends AIMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"AIMessage\"];\n }\n\n constructor(fields: AIMessageFields | string) {\n let fieldsWithJestMatcher: Partial<AIMessageFields> = {\n id: expect.any(String) as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as AIMessageFields);\n }\n}\n\nexport class _AnyIdHumanMessage extends HumanMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"HumanMessage\"];\n }\n\n constructor(fields: BaseMessageFields | string) {\n let fieldsWithJestMatcher: Partial<BaseMessageFields> = {\n id: expect.any(String) as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as BaseMessageFields);\n }\n}\n\nexport class _AnyIdToolMessage extends ToolMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"ToolMessage\"];\n }\n\n constructor(fields: ToolMessageFields) {\n const fieldsWithJestMatcher: Partial<ToolMessageFields> = {\n id: expect.any(String) as unknown as string,\n ...fields,\n };\n super(fieldsWithJestMatcher as ToolMessageFields);\n }\n}\n\nexport class FakeConfigurableModel extends BaseChatModel {\n _queuedMethodOperations: Record<string, any> = {};\n\n _chatModel: LanguageModelLike;\n\n constructor(\n fields: {\n model: LanguageModelLike;\n } & BaseChatModelParams\n ) {\n super(fields);\n this._chatModel = fields.model;\n }\n\n _llmType() {\n return \"fake_configurable\";\n }\n\n async _generate(\n _messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n throw new Error(\"Not implemented\");\n }\n\n async _model() {\n return this._chatModel;\n }\n\n bindTools(tools: BindToolsInput[]) {\n const modelWithTools = new FakeConfigurableModel({\n model: (this._chatModel as FakeToolCallingChatModel).bindTools(tools),\n });\n modelWithTools._queuedMethodOperations.bindTools = tools;\n return modelWithTools;\n }\n}\n\nexport class FakeToolCallingChatModel extends BaseChatModel {\n sleep?: number = 50;\n\n responses?: BaseMessage[];\n\n thrownErrorString?: string;\n\n idx: number;\n\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\" = \"openai\";\n\n structuredResponse?: Record<string, unknown>;\n\n // Track messages passed to structured output calls\n structuredOutputMessages: BaseMessage[][] = [];\n\n constructor(\n fields: {\n sleep?: number;\n responses?: BaseMessage[];\n thrownErrorString?: string;\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n structuredResponse?: Record<string, unknown>;\n } & BaseChatModelParams\n ) {\n super(fields);\n this.sleep = fields.sleep ?? this.sleep;\n this.responses = fields.responses;\n this.thrownErrorString = fields.thrownErrorString;\n this.idx = 0;\n this.toolStyle = fields.toolStyle ?? this.toolStyle;\n this.structuredResponse = fields.structuredResponse;\n this.structuredOutputMessages = [];\n }\n\n _llmType() {\n return \"fake\";\n }\n\n async _generate(\n messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n if (this.sleep !== undefined) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n const responses = this.responses?.length ? this.responses : messages;\n const msg = responses[this.idx % responses.length];\n const generation: ChatResult = {\n generations: [\n {\n text: \"\",\n message: msg,\n },\n ],\n };\n this.idx += 1;\n\n if (typeof msg.content === \"string\") {\n await runManager?.handleLLMNewToken(msg.content);\n }\n return generation;\n }\n\n bindTools(tools: BindToolsInput[]): Runnable<any> {\n const toolDicts = [];\n const serverTools = [];\n for (const tool of tools) {\n if (!(\"name\" in tool)) {\n serverTools.push(tool);\n continue;\n }\n\n // NOTE: this is a simplified tool spec for testing purposes only\n if (this.toolStyle === \"openai\") {\n toolDicts.push({\n type: \"function\",\n function: {\n name: tool.name,\n },\n });\n } else if ([\"anthropic\", \"google\"].includes(this.toolStyle)) {\n toolDicts.push({\n name: tool.name,\n });\n } else if (this.toolStyle === \"bedrock\") {\n toolDicts.push({\n toolSpec: {\n name: tool.name,\n },\n });\n }\n }\n let toolsToBind: BindToolsInput[] = toolDicts;\n if (this.toolStyle === \"google\") {\n toolsToBind = [{ functionDeclarations: toolDicts }];\n }\n return this.withConfig({\n tools: [...toolsToBind, ...serverTools],\n } as BaseChatModelCallOptions);\n }\n\n withStructuredOutput<\n RunOutput extends Record<string, any> = Record<string, any>\n >(_: unknown): Runnable<any> {\n if (!this.structuredResponse) {\n throw new Error(\"No structured response provided\");\n }\n // Create a runnable that returns the proper structured format\n return RunnableLambda.from(async (messages: BaseMessage[]) => {\n if (this.sleep) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n\n // Store the messages that were sent to generate structured output\n this.structuredOutputMessages.push([...messages]);\n\n // Return in the format expected: { raw: BaseMessage, parsed: RunOutput }\n return this.structuredResponse as RunOutput;\n });\n }\n}\n\nexport class MemorySaverAssertImmutable extends MemorySaver {\n storageForCopies: Record<string, Record<string, Uint8Array>> = {};\n\n constructor() {\n super();\n this.storageForCopies = {};\n }\n\n async put(\n config: RunnableConfig,\n checkpoint: Checkpoint,\n metadata: CheckpointMetadata\n ): Promise<RunnableConfig> {\n const thread_id = config.configurable?.thread_id;\n this.storageForCopies[thread_id] ??= {};\n\n // assert checkpoint hasn't been modified since last written\n const saved = await this.get(config);\n if (saved) {\n const savedId = saved.id;\n if (this.storageForCopies[thread_id][savedId]) {\n const loaded = await this.serde.loadsTyped(\n \"json\",\n this.storageForCopies[thread_id][savedId]\n );\n\n expect(\n saved,\n `Checkpoint [${savedId}] has been modified since last written`\n ).toEqual(loaded);\n }\n }\n const [, serializedCheckpoint] = await this.serde.dumpsTyped(checkpoint);\n // save a copy of the checkpoint\n this.storageForCopies[thread_id][checkpoint.id] = serializedCheckpoint;\n\n return super.put(config, checkpoint, metadata);\n }\n}\n\ninterface ToolCall {\n name: string;\n args: Record<string, any>;\n id: string;\n type?: \"tool_call\";\n}\n\ninterface FakeToolCallingModelFields {\n toolCalls?: ToolCall[][];\n toolStyle?: \"openai\" | \"anthropic\";\n index?: number;\n structuredResponse?: any;\n}\n\n// Helper function to create checkpointer\nexport function createCheckpointer(): BaseCheckpointSaver {\n return new MemorySaver();\n}\n\n/**\n * Fake chat model for testing tool calling functionality\n */\nexport class FakeToolCallingModel extends BaseChatModel {\n toolCalls: ToolCall[][];\n\n toolStyle: \"openai\" | \"anthropic\";\n\n // Use a shared reference object so the index persists across bindTools calls\n private indexRef: { current: number };\n\n structuredResponse?: any;\n\n private tools: StructuredTool[] = [];\n\n constructor({\n toolCalls = [],\n toolStyle = \"openai\",\n index = 0,\n structuredResponse,\n indexRef,\n ...rest\n }: FakeToolCallingModelFields & { indexRef?: { current: number } } = {}) {\n super(rest);\n this.toolCalls = toolCalls;\n this.toolStyle = toolStyle;\n // Share the same index reference across instances\n this.indexRef = indexRef ?? { current: index };\n this.structuredResponse = structuredResponse;\n }\n\n // Getter/setter for backwards compatibility\n get index(): number {\n return this.indexRef.current;\n }\n\n set index(value: number) {\n this.indexRef.current = value;\n }\n\n _llmType(): string {\n return \"fake-tool-calling\";\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n bindTools(\n tools: StructuredTool[]\n ):\n | FakeToolCallingModel\n | RunnableBinding<\n any,\n any,\n any & { tool_choice?: ToolChoice | undefined }\n > {\n const newInstance = new FakeToolCallingModel({\n toolCalls: this.toolCalls,\n toolStyle: this.toolStyle,\n structuredResponse: this.structuredResponse,\n // Pass the same indexRef so all instances share the same index state\n indexRef: this.indexRef,\n });\n newInstance.tools = [...this.tools, ...tools];\n return newInstance;\n }\n\n withStructuredOutput(_schema: any) {\n return new RunnableLambda({\n func: async () => {\n return this.structuredResponse;\n },\n });\n }\n\n async _generate(\n messages: BaseMessage[],\n _options?: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const lastMessage = messages[messages.length - 1];\n let content = lastMessage.content as string;\n\n // Handle prompt concatenation\n if (messages.length > 1) {\n const parts = messages.map((m) => m.content).filter(Boolean);\n content = parts\n .map((part) => {\n if (typeof part === \"string\") {\n return part;\n } else if (typeof part === \"object\" && \"text\" in part) {\n return part.text;\n } else if (Array.isArray(part)) {\n return part\n .map((p) => {\n if (typeof p === \"string\") {\n return p;\n } else if (typeof p === \"object\" && \"text\" in p) {\n return p.text;\n }\n return \"\";\n })\n .join(\"-\");\n } else {\n return JSON.stringify(part);\n }\n })\n .join(\"-\");\n }\n\n // Reset index at the start of a new conversation (only human message)\n // This allows the model to be reused across multiple agent.invoke() calls\n const isStartOfConversation =\n messages.length === 1 ||\n (messages.length === 2 && messages.every(HumanMessage.isInstance));\n if (isStartOfConversation && this.index !== 0) {\n this.index = 0;\n }\n\n const currentToolCalls = this.toolCalls[this.index] || [];\n const messageId = this.index.toString();\n\n // Move to next set of tool calls for subsequent invocations\n this.index = (this.index + 1) % Math.max(1, this.toolCalls.length);\n\n const message = new AIMessage({\n content,\n id: messageId,\n tool_calls:\n currentToolCalls.length > 0\n ? currentToolCalls.map((tc) => ({\n ...tc,\n type: \"tool_call\" as const,\n }))\n : undefined,\n });\n\n return {\n generations: [\n {\n text: content,\n message,\n },\n ],\n llmOutput: {},\n };\n }\n}\n\nexport class SearchAPI extends StructuredTool {\n name = \"search_api\";\n\n description = \"A simple API that returns the input string.\";\n\n schema = z.object({\n query: z.string().describe(\"The query to search for.\"),\n });\n\n async _call(input: z.infer<typeof this.schema>) {\n if (input?.query === \"error\") {\n throw new Error(\"Error\");\n }\n return `result for ${input?.query}`;\n }\n}\n"],"mappings":";;;;;;;;;;;AAuUA,IAAa,uBAAb,MAAa,6BAA6B,cAAc;CACtD;CAEA;CAGA,AAAQ;CAER;CAEA,AAAQ,QAA0B,CAAE;CAEpC,YAAY,EACV,YAAY,CAAE,GACd,YAAY,UACZ,QAAQ,GACR,oBACA,SACA,GAAG,MAC6D,GAAG,CAAE,GAAE;EACvE,MAAM,KAAK;EACX,KAAK,YAAY;EACjB,KAAK,YAAY;EAEjB,KAAK,WAAW,YAAY,EAAE,SAAS,MAAO;EAC9C,KAAK,qBAAqB;CAC3B;CAGD,IAAI,QAAgB;AAClB,SAAO,KAAK,SAAS;CACtB;CAED,IAAI,MAAMA,OAAe;EACvB,KAAK,SAAS,UAAU;CACzB;CAED,WAAmB;AACjB,SAAO;CACR;CAED,oBAAoB;AAClB,SAAO,CAAE;CACV;CAED,UACEC,OAOI;EACJ,MAAM,cAAc,IAAI,qBAAqB;GAC3C,WAAW,KAAK;GAChB,WAAW,KAAK;GAChB,oBAAoB,KAAK;GAEzB,UAAU,KAAK;EAChB;EACD,YAAY,QAAQ,CAAC,GAAG,KAAK,OAAO,GAAG,KAAM;AAC7C,SAAO;CACR;CAED,qBAAqBC,SAAc;AACjC,SAAO,IAAI,eAAe,EACxB,MAAM,YAAY;AAChB,UAAO,KAAK;EACb,EACF;CACF;CAED,MAAM,UACJC,UACAC,UACAC,aACqB;EACrB,MAAM,cAAc,SAAS,SAAS,SAAS;EAC/C,IAAI,UAAU,YAAY;AAG1B,MAAI,SAAS,SAAS,GAAG;GACvB,MAAM,QAAQ,SAAS,IAAI,CAAC,MAAM,EAAE,QAAQ,CAAC,OAAO,QAAQ;GAC5D,UAAU,MACP,IAAI,CAAC,SAAS;AACb,QAAI,OAAO,SAAS,SAClB,QAAO;aACE,OAAO,SAAS,YAAY,UAAU,KAC/C,QAAO,KAAK;aACH,MAAM,QAAQ,KAAK,CAC5B,QAAO,KACJ,IAAI,CAAC,MAAM;AACV,SAAI,OAAO,MAAM,SACf,QAAO;cACE,OAAO,MAAM,YAAY,UAAU,EAC5C,QAAO,EAAE;AAEX,YAAO;IACR,EAAC,CACD,KAAK,IAAI;QAEZ,QAAO,KAAK,UAAU,KAAK;GAE9B,EAAC,CACD,KAAK,IAAI;EACb;EAID,MAAM,wBACJ,SAAS,WAAW,KACnB,SAAS,WAAW,KAAK,SAAS,MAAM,aAAa,WAAW;AACnE,MAAI,yBAAyB,KAAK,UAAU,GAC1C,KAAK,QAAQ;EAGf,MAAM,mBAAmB,KAAK,UAAU,KAAK,UAAU,CAAE;EACzD,MAAM,YAAY,KAAK,MAAM,UAAU;EAGvC,KAAK,SAAS,KAAK,QAAQ,KAAK,KAAK,IAAI,GAAG,KAAK,UAAU,OAAO;EAElE,MAAM,UAAU,IAAI,UAAU;GAC5B;GACA,IAAI;GACJ,YACE,iBAAiB,SAAS,IACtB,iBAAiB,IAAI,CAAC,QAAQ;IAC5B,GAAG;IACH,MAAM;GACP,GAAE,GACH;EACP;AAED,SAAO;GACL,aAAa,CACX;IACE,MAAM;IACN;GACD,CACF;GACD,WAAW,CAAE;EACd;CACF;AACF"}
|