langchain 1.2.3 → 1.2.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +31 -0
- package/dist/agents/ReactAgent.cjs +3 -2
- package/dist/agents/ReactAgent.cjs.map +1 -1
- package/dist/agents/ReactAgent.js +3 -2
- package/dist/agents/ReactAgent.js.map +1 -1
- package/dist/agents/middleware/contextEditing.d.cts.map +1 -1
- package/dist/agents/middleware/contextEditing.d.ts.map +1 -1
- package/dist/agents/middleware/dynamicSystemPrompt.d.cts.map +1 -1
- package/dist/agents/middleware/dynamicSystemPrompt.d.ts.map +1 -1
- package/dist/agents/middleware/hitl.d.cts.map +1 -1
- package/dist/agents/middleware/llmToolSelector.d.ts +4 -4
- package/dist/agents/middleware/llmToolSelector.d.ts.map +1 -1
- package/dist/agents/middleware/modelCallLimit.d.cts.map +1 -1
- package/dist/agents/middleware/summarization.cjs +64 -2
- package/dist/agents/middleware/summarization.cjs.map +1 -1
- package/dist/agents/middleware/summarization.d.cts.map +1 -1
- package/dist/agents/middleware/summarization.d.ts +7 -7
- package/dist/agents/middleware/summarization.d.ts.map +1 -1
- package/dist/agents/middleware/summarization.js +65 -3
- package/dist/agents/middleware/summarization.js.map +1 -1
- package/dist/agents/middleware/todoListMiddleware.d.cts.map +1 -1
- package/dist/agents/middleware/toolCallLimit.d.cts.map +1 -1
- package/dist/agents/middleware/types.d.cts.map +1 -1
- package/dist/agents/responses.cjs +16 -6
- package/dist/agents/responses.cjs.map +1 -1
- package/dist/agents/responses.d.cts.map +1 -1
- package/dist/agents/responses.js +16 -6
- package/dist/agents/responses.js.map +1 -1
- package/dist/agents/runtime.d.cts +1 -1
- package/dist/agents/runtime.d.cts.map +1 -1
- package/dist/agents/runtime.d.ts +1 -1
- package/dist/agents/runtime.d.ts.map +1 -1
- package/dist/agents/types.d.cts.map +1 -1
- package/dist/chat_models/universal.cjs +3 -2
- package/dist/chat_models/universal.cjs.map +1 -1
- package/dist/chat_models/universal.js +3 -2
- package/dist/chat_models/universal.js.map +1 -1
- package/dist/hub/index.cjs +8 -1
- package/dist/hub/index.cjs.map +1 -1
- package/dist/hub/index.d.cts +9 -0
- package/dist/hub/index.d.cts.map +1 -1
- package/dist/hub/index.d.ts +9 -0
- package/dist/hub/index.d.ts.map +1 -1
- package/dist/hub/index.js +8 -1
- package/dist/hub/index.js.map +1 -1
- package/dist/hub/node.cjs +8 -1
- package/dist/hub/node.cjs.map +1 -1
- package/dist/hub/node.d.cts +9 -0
- package/dist/hub/node.d.cts.map +1 -1
- package/dist/hub/node.d.ts +9 -0
- package/dist/hub/node.d.ts.map +1 -1
- package/dist/hub/node.js +8 -1
- package/dist/hub/node.js.map +1 -1
- package/dist/load/index.cjs +5 -2
- package/dist/load/index.cjs.map +1 -1
- package/dist/load/index.d.cts +3 -1
- package/dist/load/index.d.cts.map +1 -1
- package/dist/load/index.d.ts +3 -1
- package/dist/load/index.d.ts.map +1 -1
- package/dist/load/index.js +5 -2
- package/dist/load/index.js.map +1 -1
- package/package.json +5 -5
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.d.cts","names":["InteropZodObject","InteropZodType","START","END","StateGraph","LanguageModelLike","BaseMessage","SystemMessage","BaseCheckpointSaver","BaseStore","Messages","ClientTool","ServerTool","ResponseFormat","ToolStrategy","TypedToolStrategy","ProviderStrategy","JsonSchemaFormat","ResponseFormatUndefined","AgentMiddleware","AnyAnnotationRoot","InferSchemaInput","JumpToTarget","N","Interrupt","TValue","BuiltInState","UserInput","TStateSchema","ToolCall","Record","ToolResult","JumpTo","ExecutedToolCall","CreateAgentParams","StructuredResponseType","StateSchema","ContextSchema","ResponseFormatType","AbortSignal","ExtractZodArrayTypes","T","Rest","A","WithStateGraphNodes","K","Graph","SD","S","U","I","O","C"],"sources":["../../src/agents/types.d.ts"],"sourcesContent":["import type { InteropZodObject, InteropZodType } from \"@langchain/core/utils/types\";\nimport type { START, END, StateGraph } from \"@langchain/langgraph\";\nimport type { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport type { BaseMessage, SystemMessage } from \"@langchain/core/messages\";\nimport type { BaseCheckpointSaver, BaseStore } from \"@langchain/langgraph-checkpoint\";\nimport type { Messages } from \"@langchain/langgraph/\";\nimport type { ClientTool, ServerTool } from \"@langchain/core/tools\";\nimport type { ResponseFormat, ToolStrategy, TypedToolStrategy, ProviderStrategy, JsonSchemaFormat, ResponseFormatUndefined } from \"./responses.js\";\nimport type { AgentMiddleware, AnyAnnotationRoot, InferSchemaInput } from \"./middleware/types.js\";\nimport type { JumpToTarget } from \"./constants.js\";\nexport type N = typeof START | \"model_request\" | \"tools\";\n/**\n * Represents information about an interrupt.\n */\nexport interface Interrupt<TValue = unknown> {\n /**\n * The ID of the interrupt.\n */\n id: string;\n /**\n * The requests for human input.\n */\n value: TValue;\n}\nexport interface BuiltInState {\n messages: BaseMessage[];\n __interrupt__?: Interrupt[];\n /**\n * Optional property to control routing after afterModel middleware execution.\n * When set by middleware, the agent will jump to the specified node instead of\n * following normal routing logic. The property is automatically cleared after use.\n *\n * - \"model_request\": Jump back to the model for another LLM call\n * - \"tools\": Jump to tool execution (requires tools to be available)\n */\n jumpTo?: JumpToTarget;\n}\n/**\n * Base input type for `.invoke` and `.stream` methods.\n */\nexport type UserInput<TStateSchema extends AnyAnnotationRoot | InteropZodObject | undefined = undefined> = InferSchemaInput<TStateSchema> & {\n messages: Messages;\n};\n/**\n * Information about a tool call that has been executed.\n */\nexport interface ToolCall {\n /**\n * The ID of the tool call.\n */\n id: string;\n /**\n * The name of the tool that was called.\n */\n name: string;\n /**\n * The arguments that were passed to the tool.\n */\n args: Record<string, any>;\n /**\n * The result of the tool call.\n */\n result?: unknown;\n /**\n * An optional error message if the tool call failed.\n */\n error?: string;\n}\n/**\n * Information about a tool result from a tool execution.\n */\nexport interface ToolResult {\n /**\n * The ID of the tool call.\n */\n id: string;\n /**\n * The result of the tool call.\n */\n result: any;\n /**\n * An optional error message if the tool call failed.\n */\n error?: string;\n}\n/**\n * jump targets (internal)\n */\nexport type JumpTo = \"model_request\" | \"tools\" | typeof END;\n/**\n * Information about a tool call that has been executed.\n */\nexport interface ExecutedToolCall {\n /**\n * The name of the tool that was called.\n */\n name: string;\n /**\n * The arguments that were passed to the tool.\n */\n args: Record<string, unknown>;\n /**\n * The ID of the tool call.\n */\n tool_id: string;\n /**\n * The result of the tool call (if available).\n */\n result?: unknown;\n}\nexport type CreateAgentParams<StructuredResponseType extends Record<string, any> = Record<string, any>, StateSchema extends AnyAnnotationRoot | InteropZodObject | undefined = undefined, ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, ResponseFormatType = InteropZodType<StructuredResponseType> | InteropZodType<unknown>[] | JsonSchemaFormat | JsonSchemaFormat[] | ResponseFormat | TypedToolStrategy<StructuredResponseType> | ToolStrategy<StructuredResponseType> | ProviderStrategy<StructuredResponseType> | ResponseFormatUndefined> = {\n /**\n * Defines a model to use for the agent. You can either pass in an instance of a LangChain chat model\n * or a string. If a string is provided the agent initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * @uses {@link initChatModel}\n * @example\n * ```ts\n * const agent = createAgent({\n * model: \"anthropic:claude-3-7-sonnet-latest\",\n * // ...\n * });\n * ```\n *\n * @example\n * ```ts\n * import { ChatOpenAI } from \"@langchain/openai\";\n * const agent = createAgent({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * // ...\n * });\n * ```\n */\n model: string | LanguageModelLike;\n /**\n * A list of tools or a ToolNode.\n *\n * @example\n * ```ts\n * import { tool } from \"langchain\";\n *\n * const weatherTool = tool(() => \"Sunny!\", {\n * name: \"get_weather\",\n * description: \"Get the weather for a location\",\n * schema: z.object({\n * location: z.string().describe(\"The location to get weather for\"),\n * }),\n * });\n *\n * const agent = createAgent({\n * tools: [weatherTool],\n * // ...\n * });\n * ```\n */\n tools?: (ServerTool | ClientTool)[];\n /**\n * An optional system message for the model.\n *\n * **Use a `string`** for simple, static system prompts. This is the most common use case\n * and works well with template literals for dynamic content. When a string is provided,\n * it's converted to a single text block internally.\n *\n * **Use a `SystemMessage`** when you need advanced features that require structured content:\n * - **Anthropic cache control**: Use `SystemMessage` with array content to enable per-block\n * cache control settings (e.g., `cache_control: { type: \"ephemeral\" }`). This allows you\n * to have different cache settings for different parts of your system prompt.\n * - **Multiple content blocks**: When you need multiple text blocks with different metadata\n * or formatting requirements.\n * - **Integration with existing code**: When working with code that already produces\n * `SystemMessage` instances.\n *\n * @example Using a string (recommended for most cases)\n * ```ts\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: \"You are a helpful assistant.\",\n * // ...\n * });\n * ```\n *\n * @example Using a string with template literals\n * ```ts\n * const userRole = \"premium\";\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: `You are a helpful assistant for ${userRole} users.`,\n * // ...\n * });\n * ```\n *\n * @example Using SystemMessage with cache control (Anthropic)\n * ```ts\n * import { SystemMessage } from \"@langchain/core/messages\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: new SystemMessage({\n * content: [\n * {\n * type: \"text\",\n * text: \"You are a helpful assistant.\",\n * },\n * {\n * type: \"text\",\n * text: \"Today's date is 2024-06-01.\",\n * cache_control: { type: \"ephemeral\" },\n * },\n * ],\n * }),\n * // ...\n * });\n * ```\n *\n * @example Using SystemMessage (simple)\n * ```ts\n * import { SystemMessage } from \"@langchain/core/messages\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: new SystemMessage(\"You are a helpful assistant.\"),\n * // ...\n * });\n * ```\n */\n systemPrompt?: string | SystemMessage;\n /**\n * An optional schema for the agent state. It allows you to define custom state properties that persist\n * across agent invocations and can be accessed in hooks, middleware, and throughout the agent's execution.\n * The state is persisted when using a checkpointer and can be updated by middleware or during execution.\n *\n * As opposed to the context (defined in `contextSchema`), the state is persisted between agent invocations\n * when using a checkpointer, making it suitable for maintaining conversation history, user preferences,\n * or any other data that should persist across multiple interactions.\n *\n * @example\n * ```ts\n * import { z } from \"zod\";\n * import { createAgent } from \"@langchain/langgraph\";\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * tools: [getWeather],\n * stateSchema: z.object({\n * userPreferences: z.object({\n * temperatureUnit: z.enum([\"celsius\", \"fahrenheit\"]).default(\"celsius\"),\n * location: z.string().optional(),\n * }).optional(),\n * conversationCount: z.number().default(0),\n * }),\n * prompt: (state, config) => {\n * const unit = state.userPreferences?.temperatureUnit || \"celsius\";\n * return [\n * new SystemMessage(`You are a helpful assistant. Use ${unit} for temperature.`),\n * ];\n * },\n * });\n *\n * const result = await agent.invoke({\n * messages: [\n * new HumanMessage(\"What's the weather like?\"),\n * ],\n * userPreferences: {\n * temperatureUnit: \"fahrenheit\",\n * location: \"New York\",\n * },\n * conversationCount: 1,\n * });\n * ```\n */\n stateSchema?: StateSchema;\n /**\n * An optional schema for the context. It allows to pass in a typed context object into the agent\n * invocation and allows to access it in hooks such as `prompt` and middleware.\n * As opposed to the agent state, defined in `stateSchema`, the context is not persisted between\n * agent invocations.\n *\n * @example\n * ```ts\n * const agent = createAgent({\n * llm: model,\n * tools: [getWeather],\n * contextSchema: z.object({\n * capital: z.string(),\n * }),\n * prompt: (state, config) => {\n * return [\n * new SystemMessage(`You are a helpful assistant. The capital of France is ${config.context.capital}.`),\n * ];\n * },\n * });\n *\n * const result = await agent.invoke({\n * messages: [\n * new SystemMessage(\"You are a helpful assistant.\"),\n * new HumanMessage(\"What is the capital of France?\"),\n * ],\n * }, {\n * context: {\n * capital: \"Paris\",\n * },\n * });\n * ```\n */\n contextSchema?: ContextSchema;\n /**\n * An optional checkpoint saver to persist the agent's state.\n * @see {@link https://docs.langchain.com/oss/javascript/langgraph/persistence | Checkpointing}\n */\n checkpointer?: BaseCheckpointSaver | boolean;\n /**\n * An optional store to persist the agent's state.\n * @see {@link https://docs.langchain.com/oss/javascript/langgraph/memory#memory-storage | Long-term memory}\n */\n store?: BaseStore;\n /**\n * An optional schema for the final agent output.\n *\n * If provided, output will be formatted to match the given schema and returned in the 'structuredResponse' state key.\n * If not provided, `structuredResponse` will not be present in the output state.\n *\n * Can be passed in as:\n * - Zod schema\n * ```ts\n * const agent = createAgent({\n * responseFormat: z.object({\n * capital: z.string(),\n * }),\n * // ...\n * });\n * ```\n * - JSON schema\n * ```ts\n * const agent = createAgent({\n * responseFormat: {\n * type: \"json_schema\",\n * schema: {\n * type: \"object\",\n * properties: {\n * capital: { type: \"string\" },\n * },\n * required: [\"capital\"],\n * },\n * },\n * // ...\n * });\n * ```\n * - Create React Agent ResponseFormat\n * ```ts\n * import { providerStrategy, toolStrategy } from \"langchain\";\n * const agent = createAgent({\n * responseFormat: providerStrategy(\n * z.object({\n * capital: z.string(),\n * })\n * ),\n * // or\n * responseFormat: [\n * toolStrategy({ ... }),\n * toolStrategy({ ... }),\n * ]\n * // ...\n * });\n * ```\n *\n * **Note**: The graph will make a separate call to the LLM to generate the structured response after the agent loop is finished.\n * This is not the only strategy to get structured responses, see more options in [this guide](https://langchain-ai.github.io/langgraph/how-tos/react-agent-structured-output/).\n */\n responseFormat?: ResponseFormatType;\n /**\n * Middleware instances to run during agent execution.\n * Each middleware can define its own state schema and hook into the agent lifecycle.\n *\n * @see {@link https://docs.langchain.com/oss/javascript/langchain/middleware | Middleware}\n */\n middleware?: readonly AgentMiddleware<any, any, any>[];\n /**\n * An optional name for the agent.\n */\n name?: string;\n /**\n * An optional description for the agent.\n * This can be used to describe the agent to the underlying supervisor LLM.\n */\n description?: string;\n /**\n * Use to specify how to expose the agent name to the underlying supervisor LLM.\n * - `undefined`: Relies on the LLM provider {@link AIMessage#name}. Currently, only OpenAI supports this.\n * - `\"inline\"`: Add the agent name directly into the content field of the {@link AIMessage} using XML-style tags.\n * Example: `\"How can I help you\"` -> `\"<name>agent_name</name><content>How can I help you?</content>\"`\n */\n includeAgentName?: \"inline\" | undefined;\n /**\n * An optional abort signal that indicates that the overall operation should be aborted.\n */\n signal?: AbortSignal;\n /**\n * Determines the version of the graph to create.\n *\n * Can be one of\n * - `\"v1\"`: The tool node processes a single message. All tool calls in the message are\n * executed in parallel within the tool node.\n * - `\"v2\"`: The tool node processes a single tool call. Tool calls are distributed across\n * multiple instances of the tool node using the Send API.\n *\n * @default `\"v2\"`\n */\n version?: \"v1\" | \"v2\";\n};\n/**\n * Type helper to extract union type from an array of Zod schemas\n */\nexport type ExtractZodArrayTypes<T extends readonly InteropZodType<any>[]> = T extends readonly [InteropZodType<infer A>, ...infer Rest] ? Rest extends readonly InteropZodType<any>[] ? A | ExtractZodArrayTypes<Rest> : A : never;\nexport type WithStateGraphNodes<K extends string, Graph> = Graph extends StateGraph<infer SD, infer S, infer U, infer N, infer I, infer O, infer C> ? StateGraph<SD, S, U, N | K, I, O, C> : never;\n//# sourceMappingURL=types.d.ts.map"],"mappings":";;;;;;;;;;;;KAUYuB,CAAAA,UAAWrB;;AAAvB;AAIA;AAUiBwB,UAVAF,SAUY,CAAA,SAAA,OAAA,CAAA,CAAA;EACflB;;;EAUW,EAAA,EAAA,MAAA;EAKbqB;;;EAAgHC,KAAAA,EAlBjHH,MAkBiHG;;AAC9GlB,UAjBGgB,YAAAA,CAiBHhB;EAAQ,QAAA,EAhBRJ,WAgBQ,EAAA;EAKLuB,aAAQ,CAAA,EApBLL,SAoBK,EAYfM;EAaOC;AAiBjB;AAIA;AAkBA;;;;;EAAgNX,MAAAA,CAAAA,EA3EnME,YA2EmMF;;;;;AAAwHnB,KAtE5T0B,SAsE4T1B,CAAAA,qBAtE7RmB,iBAsE6RnB,GAtEzQD,gBAsEyQC,GAAAA,SAAAA,GAAAA,SAAAA,CAAAA,GAtE7NoB,gBAsE6NpB,CAtE5M2B,YAsE4M3B,CAAAA,GAAAA;EAA4BgB,QAAAA,EArEtVP,QAqEsVO;CAAmBA;;;;AAA+FkB,UAhErcN,UAAAA,CAgEqcM;EAAbrB;;;EAAkFI,EAAAA,EAAAA,MAAAA;EAwBvgBb;;;EA4FQE,IAAAA,EAAAA,MAAAA;EA6CV6B;;;EA4CN3B,IAAAA,EAjQFqB,MAiQErB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EAsDS6B;;;EA2BG,MAAA,CAAA,EAAA,OAAA;EAiBZE;;;EAAqFvC,KAAAA,CAAAA,EAAAA,MAAAA;;;;;AAA4FuC,UAtV5KT,UAAAA,CAsV4KS;EAA6BG;AAAC;AAC3N;EAA2DG,EAAAA,EAAAA,MAAAA;EAAc1C;;;EAA+F6C,MAAAA,EAAAA,GAAAA;EAAG1B;;;EAAU4B,KAAAA,CAAAA,EAAAA,MAAAA;;;AAArB;;KAtUpJnB,MAAAA,sCAA4C7B;;;;UAIvC8B,gBAAAA;;;;;;;;QAQPH;;;;;;;;;;KAUEI,iDAAiDJ,sBAAsBA,yCAAyCV,oBAAoBpB,gEAAgEoB,oBAAoBpB,mBAAmBoB,wCAAwCnB,eAAekC,0BAA0BlC,4BAA4BgB,mBAAmBA,qBAAqBJ,iBAAiBE,kBAAkBoB,0BAA0BrB,aAAaqB,0BAA0BnB,iBAAiBmB,0BAA0BjB;;;;;;;;;;;;;;;;;;;;;;;;kBAwBvgBb;;;;;;;;;;;;;;;;;;;;;;WAsBPO,aAAaD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;0BAsEEJ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;gBA6CV6B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;kBAkCEC;;;;;iBAKD7B;;;;;UAKPC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;mBAsDS6B;;;;;;;wBAOKnB;;;;;;;;;;;;;;;;;;;;WAoBboB;;;;;;;;;;;;;;;;;KAiBDC,wCAAwCvC,yBAAyBwC,oBAAoBxC,0CAA0CyC,sBAAsBzC,wBAAwB0C,IAAIH,qBAAqBE,QAAQC;KAC9MC,+CAA+CE,cAAc1C,6EAA6EA,WAAW2C,IAAIC,GAAGC,GAAG1B,IAAIsB,GAAGK,GAAGC,GAAGC"}
|
|
1
|
+
{"version":3,"file":"types.d.cts","names":["InteropZodObject","InteropZodType","START","END","StateGraph","LanguageModelLike","BaseMessage","SystemMessage","BaseCheckpointSaver","BaseStore","Messages","ClientTool","ServerTool","ResponseFormat","ToolStrategy","TypedToolStrategy","ProviderStrategy","JsonSchemaFormat","ResponseFormatUndefined","AgentMiddleware","AnyAnnotationRoot","InferSchemaInput","JumpToTarget","N","Interrupt","TValue","BuiltInState","UserInput","TStateSchema","ToolCall","Record","ToolResult","JumpTo","ExecutedToolCall","CreateAgentParams","StructuredResponseType","StateSchema","ContextSchema","ResponseFormatType","AbortSignal","ExtractZodArrayTypes","T","Rest","A","WithStateGraphNodes","K","Graph","SD","S","U","I","O","C"],"sources":["../../src/agents/types.d.ts"],"sourcesContent":["import type { InteropZodObject, InteropZodType } from \"@langchain/core/utils/types\";\nimport type { START, END, StateGraph } from \"@langchain/langgraph\";\nimport type { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport type { BaseMessage, SystemMessage } from \"@langchain/core/messages\";\nimport type { BaseCheckpointSaver, BaseStore } from \"@langchain/langgraph-checkpoint\";\nimport type { Messages } from \"@langchain/langgraph/\";\nimport type { ClientTool, ServerTool } from \"@langchain/core/tools\";\nimport type { ResponseFormat, ToolStrategy, TypedToolStrategy, ProviderStrategy, JsonSchemaFormat, ResponseFormatUndefined } from \"./responses.js\";\nimport type { AgentMiddleware, AnyAnnotationRoot, InferSchemaInput } from \"./middleware/types.js\";\nimport type { JumpToTarget } from \"./constants.js\";\nexport type N = typeof START | \"model_request\" | \"tools\";\n/**\n * Represents information about an interrupt.\n */\nexport interface Interrupt<TValue = unknown> {\n /**\n * The ID of the interrupt.\n */\n id: string;\n /**\n * The requests for human input.\n */\n value: TValue;\n}\nexport interface BuiltInState {\n messages: BaseMessage[];\n __interrupt__?: Interrupt[];\n /**\n * Optional property to control routing after afterModel middleware execution.\n * When set by middleware, the agent will jump to the specified node instead of\n * following normal routing logic. The property is automatically cleared after use.\n *\n * - \"model_request\": Jump back to the model for another LLM call\n * - \"tools\": Jump to tool execution (requires tools to be available)\n */\n jumpTo?: JumpToTarget;\n}\n/**\n * Base input type for `.invoke` and `.stream` methods.\n */\nexport type UserInput<TStateSchema extends AnyAnnotationRoot | InteropZodObject | undefined = undefined> = InferSchemaInput<TStateSchema> & {\n messages: Messages;\n};\n/**\n * Information about a tool call that has been executed.\n */\nexport interface ToolCall {\n /**\n * The ID of the tool call.\n */\n id: string;\n /**\n * The name of the tool that was called.\n */\n name: string;\n /**\n * The arguments that were passed to the tool.\n */\n args: Record<string, any>;\n /**\n * The result of the tool call.\n */\n result?: unknown;\n /**\n * An optional error message if the tool call failed.\n */\n error?: string;\n}\n/**\n * Information about a tool result from a tool execution.\n */\nexport interface ToolResult {\n /**\n * The ID of the tool call.\n */\n id: string;\n /**\n * The result of the tool call.\n */\n result: any;\n /**\n * An optional error message if the tool call failed.\n */\n error?: string;\n}\n/**\n * jump targets (internal)\n */\nexport type JumpTo = \"model_request\" | \"tools\" | typeof END;\n/**\n * Information about a tool call that has been executed.\n */\nexport interface ExecutedToolCall {\n /**\n * The name of the tool that was called.\n */\n name: string;\n /**\n * The arguments that were passed to the tool.\n */\n args: Record<string, unknown>;\n /**\n * The ID of the tool call.\n */\n tool_id: string;\n /**\n * The result of the tool call (if available).\n */\n result?: unknown;\n}\nexport type CreateAgentParams<StructuredResponseType extends Record<string, any> = Record<string, any>, StateSchema extends AnyAnnotationRoot | InteropZodObject | undefined = undefined, ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, ResponseFormatType = InteropZodType<StructuredResponseType> | InteropZodType<unknown>[] | JsonSchemaFormat | JsonSchemaFormat[] | ResponseFormat | TypedToolStrategy<StructuredResponseType> | ToolStrategy<StructuredResponseType> | ProviderStrategy<StructuredResponseType> | ResponseFormatUndefined> = {\n /**\n * Defines a model to use for the agent. You can either pass in an instance of a LangChain chat model\n * or a string. If a string is provided the agent initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * @uses {@link initChatModel}\n * @example\n * ```ts\n * const agent = createAgent({\n * model: \"anthropic:claude-3-7-sonnet-latest\",\n * // ...\n * });\n * ```\n *\n * @example\n * ```ts\n * import { ChatOpenAI } from \"@langchain/openai\";\n * const agent = createAgent({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * // ...\n * });\n * ```\n */\n model: string | LanguageModelLike;\n /**\n * A list of tools or a ToolNode.\n *\n * @example\n * ```ts\n * import { tool } from \"langchain\";\n *\n * const weatherTool = tool(() => \"Sunny!\", {\n * name: \"get_weather\",\n * description: \"Get the weather for a location\",\n * schema: z.object({\n * location: z.string().describe(\"The location to get weather for\"),\n * }),\n * });\n *\n * const agent = createAgent({\n * tools: [weatherTool],\n * // ...\n * });\n * ```\n */\n tools?: (ServerTool | ClientTool)[];\n /**\n * An optional system message for the model.\n *\n * **Use a `string`** for simple, static system prompts. This is the most common use case\n * and works well with template literals for dynamic content. When a string is provided,\n * it's converted to a single text block internally.\n *\n * **Use a `SystemMessage`** when you need advanced features that require structured content:\n * - **Anthropic cache control**: Use `SystemMessage` with array content to enable per-block\n * cache control settings (e.g., `cache_control: { type: \"ephemeral\" }`). This allows you\n * to have different cache settings for different parts of your system prompt.\n * - **Multiple content blocks**: When you need multiple text blocks with different metadata\n * or formatting requirements.\n * - **Integration with existing code**: When working with code that already produces\n * `SystemMessage` instances.\n *\n * @example Using a string (recommended for most cases)\n * ```ts\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: \"You are a helpful assistant.\",\n * // ...\n * });\n * ```\n *\n * @example Using a string with template literals\n * ```ts\n * const userRole = \"premium\";\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: `You are a helpful assistant for ${userRole} users.`,\n * // ...\n * });\n * ```\n *\n * @example Using SystemMessage with cache control (Anthropic)\n * ```ts\n * import { SystemMessage } from \"@langchain/core/messages\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: new SystemMessage({\n * content: [\n * {\n * type: \"text\",\n * text: \"You are a helpful assistant.\",\n * },\n * {\n * type: \"text\",\n * text: \"Today's date is 2024-06-01.\",\n * cache_control: { type: \"ephemeral\" },\n * },\n * ],\n * }),\n * // ...\n * });\n * ```\n *\n * @example Using SystemMessage (simple)\n * ```ts\n * import { SystemMessage } from \"@langchain/core/messages\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: new SystemMessage(\"You are a helpful assistant.\"),\n * // ...\n * });\n * ```\n */\n systemPrompt?: string | SystemMessage;\n /**\n * An optional schema for the agent state. It allows you to define custom state properties that persist\n * across agent invocations and can be accessed in hooks, middleware, and throughout the agent's execution.\n * The state is persisted when using a checkpointer and can be updated by middleware or during execution.\n *\n * As opposed to the context (defined in `contextSchema`), the state is persisted between agent invocations\n * when using a checkpointer, making it suitable for maintaining conversation history, user preferences,\n * or any other data that should persist across multiple interactions.\n *\n * @example\n * ```ts\n * import { z } from \"zod\";\n * import { createAgent } from \"@langchain/langgraph\";\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * tools: [getWeather],\n * stateSchema: z.object({\n * userPreferences: z.object({\n * temperatureUnit: z.enum([\"celsius\", \"fahrenheit\"]).default(\"celsius\"),\n * location: z.string().optional(),\n * }).optional(),\n * conversationCount: z.number().default(0),\n * }),\n * prompt: (state, config) => {\n * const unit = state.userPreferences?.temperatureUnit || \"celsius\";\n * return [\n * new SystemMessage(`You are a helpful assistant. Use ${unit} for temperature.`),\n * ];\n * },\n * });\n *\n * const result = await agent.invoke({\n * messages: [\n * new HumanMessage(\"What's the weather like?\"),\n * ],\n * userPreferences: {\n * temperatureUnit: \"fahrenheit\",\n * location: \"New York\",\n * },\n * conversationCount: 1,\n * });\n * ```\n */\n stateSchema?: StateSchema;\n /**\n * An optional schema for the context. It allows to pass in a typed context object into the agent\n * invocation and allows to access it in hooks such as `prompt` and middleware.\n * As opposed to the agent state, defined in `stateSchema`, the context is not persisted between\n * agent invocations.\n *\n * @example\n * ```ts\n * const agent = createAgent({\n * llm: model,\n * tools: [getWeather],\n * contextSchema: z.object({\n * capital: z.string(),\n * }),\n * prompt: (state, config) => {\n * return [\n * new SystemMessage(`You are a helpful assistant. The capital of France is ${config.context.capital}.`),\n * ];\n * },\n * });\n *\n * const result = await agent.invoke({\n * messages: [\n * new SystemMessage(\"You are a helpful assistant.\"),\n * new HumanMessage(\"What is the capital of France?\"),\n * ],\n * }, {\n * context: {\n * capital: \"Paris\",\n * },\n * });\n * ```\n */\n contextSchema?: ContextSchema;\n /**\n * An optional checkpoint saver to persist the agent's state.\n * @see {@link https://docs.langchain.com/oss/javascript/langgraph/persistence | Checkpointing}\n */\n checkpointer?: BaseCheckpointSaver | boolean;\n /**\n * An optional store to persist the agent's state.\n * @see {@link https://docs.langchain.com/oss/javascript/langgraph/memory#memory-storage | Long-term memory}\n */\n store?: BaseStore;\n /**\n * An optional schema for the final agent output.\n *\n * If provided, output will be formatted to match the given schema and returned in the 'structuredResponse' state key.\n * If not provided, `structuredResponse` will not be present in the output state.\n *\n * Can be passed in as:\n * - Zod schema\n * ```ts\n * const agent = createAgent({\n * responseFormat: z.object({\n * capital: z.string(),\n * }),\n * // ...\n * });\n * ```\n * - JSON schema\n * ```ts\n * const agent = createAgent({\n * responseFormat: {\n * type: \"json_schema\",\n * schema: {\n * type: \"object\",\n * properties: {\n * capital: { type: \"string\" },\n * },\n * required: [\"capital\"],\n * },\n * },\n * // ...\n * });\n * ```\n * - Create React Agent ResponseFormat\n * ```ts\n * import { providerStrategy, toolStrategy } from \"langchain\";\n * const agent = createAgent({\n * responseFormat: providerStrategy(\n * z.object({\n * capital: z.string(),\n * })\n * ),\n * // or\n * responseFormat: [\n * toolStrategy({ ... }),\n * toolStrategy({ ... }),\n * ]\n * // ...\n * });\n * ```\n *\n * **Note**: The graph will make a separate call to the LLM to generate the structured response after the agent loop is finished.\n * This is not the only strategy to get structured responses, see more options in [this guide](https://langchain-ai.github.io/langgraph/how-tos/react-agent-structured-output/).\n */\n responseFormat?: ResponseFormatType;\n /**\n * Middleware instances to run during agent execution.\n * Each middleware can define its own state schema and hook into the agent lifecycle.\n *\n * @see {@link https://docs.langchain.com/oss/javascript/langchain/middleware | Middleware}\n */\n middleware?: readonly AgentMiddleware<any, any, any>[];\n /**\n * An optional name for the agent.\n */\n name?: string;\n /**\n * An optional description for the agent.\n * This can be used to describe the agent to the underlying supervisor LLM.\n */\n description?: string;\n /**\n * Use to specify how to expose the agent name to the underlying supervisor LLM.\n * - `undefined`: Relies on the LLM provider {@link AIMessage#name}. Currently, only OpenAI supports this.\n * - `\"inline\"`: Add the agent name directly into the content field of the {@link AIMessage} using XML-style tags.\n * Example: `\"How can I help you\"` -> `\"<name>agent_name</name><content>How can I help you?</content>\"`\n */\n includeAgentName?: \"inline\" | undefined;\n /**\n * An optional abort signal that indicates that the overall operation should be aborted.\n */\n signal?: AbortSignal;\n /**\n * Determines the version of the graph to create.\n *\n * Can be one of\n * - `\"v1\"`: The tool node processes a single message. All tool calls in the message are\n * executed in parallel within the tool node.\n * - `\"v2\"`: The tool node processes a single tool call. Tool calls are distributed across\n * multiple instances of the tool node using the Send API.\n *\n * @default `\"v2\"`\n */\n version?: \"v1\" | \"v2\";\n};\n/**\n * Type helper to extract union type from an array of Zod schemas\n */\nexport type ExtractZodArrayTypes<T extends readonly InteropZodType<any>[]> = T extends readonly [InteropZodType<infer A>, ...infer Rest] ? Rest extends readonly InteropZodType<any>[] ? A | ExtractZodArrayTypes<Rest> : A : never;\nexport type WithStateGraphNodes<K extends string, Graph> = Graph extends StateGraph<infer SD, infer S, infer U, infer N, infer I, infer O, infer C> ? StateGraph<SD, S, U, N | K, I, O, C> : never;\n//# sourceMappingURL=types.d.ts.map"],"mappings":";;;;;;;;;;;;KAUYuB,CAAAA,UAAWrB;;AAAvB;AAIA;AAUiBwB,UAVAF,SAUY,CAAA,SAAA,OAAA,CAAA,CAAA;EACflB;;;EAUW,EAAA,EAAA,MAAA;EAKbqB;;;EAAgHC,KAAAA,EAlBjHH,MAkBiHG;;AAC9GlB,UAjBGgB,YAAAA,CAiBHhB;EAAQ,QAAA,EAhBRJ,WAgBQ,EAAA;EAKLuB,aAAQ,CAAA,EApBLL,SAgCVM,EAAAA;EAaOC;AAiBjB;AAIA;AAkBA;;;;;EAAgNX,MAAAA,CAAAA,EA3EnME,YA2EmMF;;;;;AAAwHnB,KAtE5T0B,SAsE4T1B,CAAAA,qBAtE7RmB,iBAsE6RnB,GAtEzQD,gBAsEyQC,GAAAA,SAAAA,GAAAA,SAAAA,CAAAA,GAtE7NoB,gBAsE6NpB,CAtE5M2B,YAsE4M3B,CAAAA,GAAAA;EAA4BgB,QAAAA,EArEtVP,QAqEsVO;CAAmBA;;;;AAA+FkB,UAhErcN,UAAAA,CAgEqcM;EAAbrB;;;EAAkFI,EAAAA,EAAAA,MAAAA;EAwBvgBb;;;EA4FQE,IAAAA,EAAAA,MAAAA;EA6CV6B;;;EA4CN3B,IAAAA,EAjQFqB,MAiQErB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EAsDS6B;;;EA2BG,MAAA,CAAA,EAAA,OAAA;EAiBZE;;;EAAqFvC,KAAAA,CAAAA,EAAAA,MAAAA;;;;;AAA4FuC,UAtV5KT,UAAAA,CAsV4KS;EAA6BG;AAAC;AAC3N;EAA2DG,EAAAA,EAAAA,MAAAA;EAAc1C;;;EAA+F6C,MAAAA,EAAAA,GAAAA;EAAG1B;;;EAAU4B,KAAAA,CAAAA,EAAAA,MAAAA;;;AAArB;;KAtUpJnB,MAAAA,sCAA4C7B;;;;UAIvC8B,gBAAAA;;;;;;;;QAQPH;;;;;;;;;;KAUEI,iDAAiDJ,sBAAsBA,yCAAyCV,oBAAoBpB,gEAAgEoB,oBAAoBpB,mBAAmBoB,wCAAwCnB,eAAekC,0BAA0BlC,4BAA4BgB,mBAAmBA,qBAAqBJ,iBAAiBE,kBAAkBoB,0BAA0BrB,aAAaqB,0BAA0BnB,iBAAiBmB,0BAA0BjB;;;;;;;;;;;;;;;;;;;;;;;;kBAwBvgBb;;;;;;;;;;;;;;;;;;;;;;WAsBPO,aAAaD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;0BAsEEJ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;gBA6CV6B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;kBAkCEC;;;;;iBAKD7B;;;;;UAKPC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;mBAsDS6B;;;;;;;wBAOKnB;;;;;;;;;;;;;;;;;;;;WAoBboB;;;;;;;;;;;;;;;;;KAiBDC,wCAAwCvC,yBAAyBwC,oBAAoBxC,0CAA0CyC,sBAAsBzC,wBAAwB0C,IAAIH,qBAAqBE,QAAQC;KAC9MC,+CAA+CE,cAAc1C,6EAA6EA,WAAW2C,IAAIC,GAAGC,GAAG1B,IAAIsB,GAAGK,GAAGC,GAAGC"}
|
|
@@ -104,8 +104,9 @@ async function getChatModelByClassName(className) {
|
|
|
104
104
|
return module$1[config.className];
|
|
105
105
|
} catch (e) {
|
|
106
106
|
const err = e;
|
|
107
|
-
if ("code" in err && err.code?.toString().includes("ERR_MODULE_NOT_FOUND") && "message" in err) {
|
|
108
|
-
const
|
|
107
|
+
if ("code" in err && err.code?.toString().includes("ERR_MODULE_NOT_FOUND") && "message" in err && typeof err.message === "string") {
|
|
108
|
+
const msg = err.message.startsWith("Error: ") ? err.message.slice(7) : err.message;
|
|
109
|
+
const attemptedPackage = msg.split("Cannot find package '")[1].split("'")[0];
|
|
109
110
|
throw new Error(`Unable to import ${attemptedPackage}. Please install with \`npm install ${attemptedPackage}\` or \`pnpm install ${attemptedPackage}\``);
|
|
110
111
|
}
|
|
111
112
|
throw e;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"universal.cjs","names":["className: string","config","module","e: unknown","model: string","modelProvider?: string","params: Record<string, any>","modelName: string","BaseChatModel","fields: ConfigurableModelFields","config?: RunnableConfig","messages: BaseMessage[]","options?: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","tools: BindToolsInput[]","params?: Record<string, any>","modelParams: Record<string, any>","str: string","prefix: string","mergedConfig: RunnableConfig","remainingConfig: RunnableConfig","RunnableBinding","input: RunInput","options?: CallOptions","AsyncGeneratorWithSetup","IterableReadableStream","inputs: RunInput[]","options?: Partial<CallOptions> | Partial<CallOptions>[]","batchOptions?: RunnableBatchOptions","generator: AsyncGenerator<RunInput>","options: CallOptions","options?: Partial<CallOptions>","streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">","options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n }","streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">","model?: string","fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }","paramsCopy: Record<string, any>","configurableModel: ConfigurableModel<RunInput, CallOptions>"],"sources":["../../src/chat_models/universal.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n ToolDefinition,\n} from \"@langchain/core/language_models/base\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n BaseMessage,\n type AIMessageChunk,\n MessageStructure,\n} from \"@langchain/core/messages\";\nimport {\n type RunnableBatchOptions,\n RunnableBinding,\n type RunnableConfig,\n type RunnableToolLike,\n ensureConfig,\n} from \"@langchain/core/runnables\";\nimport {\n AsyncGeneratorWithSetup,\n IterableReadableStream,\n} from \"@langchain/core/utils/stream\";\nimport {\n type LogStreamCallbackHandlerInput,\n type RunLogPatch,\n type StreamEvent,\n} from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\n\n// TODO: remove once `EventStreamCallbackHandlerInput` is exposed in core\ninterface EventStreamCallbackHandlerInput\n extends Omit<LogStreamCallbackHandlerInput, \"_schemaFormat\"> {}\n\nexport interface ConfigurableChatModelCallOptions\n extends BaseChatModelCallOptions {\n tools?: (\n | StructuredToolInterface\n | Record<string, unknown>\n | ToolDefinition\n | RunnableToolLike\n )[];\n}\n\n// Configuration map for model providers\nexport const MODEL_PROVIDER_CONFIG = {\n openai: {\n package: \"@langchain/openai\",\n className: \"ChatOpenAI\",\n },\n anthropic: {\n package: \"@langchain/anthropic\",\n className: \"ChatAnthropic\",\n },\n azure_openai: {\n package: \"@langchain/openai\",\n className: \"AzureChatOpenAI\",\n },\n cohere: {\n package: \"@langchain/cohere\",\n className: \"ChatCohere\",\n },\n \"google-vertexai\": {\n package: \"@langchain/google-vertexai\",\n className: \"ChatVertexAI\",\n },\n \"google-vertexai-web\": {\n package: \"@langchain/google-vertexai-web\",\n className: \"ChatVertexAI\",\n },\n \"google-genai\": {\n package: \"@langchain/google-genai\",\n className: \"ChatGoogleGenerativeAI\",\n },\n ollama: {\n package: \"@langchain/ollama\",\n className: \"ChatOllama\",\n },\n mistralai: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n mistral: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n groq: {\n package: \"@langchain/groq\",\n className: \"ChatGroq\",\n },\n cerebras: {\n package: \"@langchain/cerebras\",\n className: \"ChatCerebras\",\n },\n bedrock: {\n package: \"@langchain/aws\",\n className: \"ChatBedrockConverse\",\n },\n deepseek: {\n package: \"@langchain/deepseek\",\n className: \"ChatDeepSeek\",\n },\n xai: {\n package: \"@langchain/xai\",\n className: \"ChatXAI\",\n },\n fireworks: {\n package: \"@langchain/community/chat_models/fireworks\",\n className: \"ChatFireworks\",\n hasCircularDependency: true,\n },\n together: {\n package: \"@langchain/community/chat_models/togetherai\",\n className: \"ChatTogetherAI\",\n hasCircularDependency: true,\n },\n perplexity: {\n package: \"@langchain/community/chat_models/perplexity\",\n className: \"ChatPerplexity\",\n hasCircularDependency: true,\n },\n} as const;\n\nconst SUPPORTED_PROVIDERS = Object.keys(\n MODEL_PROVIDER_CONFIG\n) as (keyof typeof MODEL_PROVIDER_CONFIG)[];\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\ntype ModelProviderConfig = {\n package: string;\n className: string;\n hasCircularDependency?: boolean;\n};\n\n/**\n * Helper function to get a chat model class by its class name\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @returns The imported model class or undefined if not found\n */\nexport async function getChatModelByClassName(className: string) {\n // Find the provider config that matches the class name\n const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(\n ([, config]) => config.className === className\n );\n\n if (!providerEntry) {\n return undefined;\n }\n\n const [, config] = providerEntry;\n try {\n const module = await import(config.package);\n return module[config.className];\n } catch (e: unknown) {\n const err = e as Error;\n if (\n \"code\" in err &&\n err.code?.toString().includes(\"ERR_MODULE_NOT_FOUND\") &&\n \"message\" in err\n ) {\n const attemptedPackage = err.message\n .split(\"Error: Cannot find package '\")[1]\n .split(\"'\")[0];\n throw new Error(\n `Unable to import ${attemptedPackage}. Please install with ` +\n `\\`npm install ${attemptedPackage}\\` or \\`pnpm install ${attemptedPackage}\\``\n );\n }\n throw e;\n }\n}\n\nasync function _initChatModelHelper(\n model: string,\n modelProvider?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params: Record<string, any> = {}\n): Promise<BaseChatModel> {\n const modelProviderCopy = modelProvider || _inferModelProvider(model);\n if (!modelProviderCopy) {\n throw new Error(\n `Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`\n );\n }\n\n const config = MODEL_PROVIDER_CONFIG[\n modelProviderCopy as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig;\n if (!config) {\n const supported = SUPPORTED_PROVIDERS.join(\", \");\n throw new Error(\n `Unsupported { modelProvider: ${modelProviderCopy} }.\\n\\nSupported model providers are: ${supported}`\n );\n }\n\n const { modelProvider: _unused, ...passedParams } = params;\n const ProviderClass = await getChatModelByClassName(config.className);\n return new ProviderClass({ model, ...passedParams });\n}\n\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport function _inferModelProvider(modelName: string): string | undefined {\n if (\n modelName.startsWith(\"gpt-3\") ||\n modelName.startsWith(\"gpt-4\") ||\n modelName.startsWith(\"gpt-5\") ||\n modelName.startsWith(\"o1\") ||\n modelName.startsWith(\"o3\") ||\n modelName.startsWith(\"o4\")\n ) {\n return \"openai\";\n } else if (modelName.startsWith(\"claude\")) {\n return \"anthropic\";\n } else if (modelName.startsWith(\"command\")) {\n return \"cohere\";\n } else if (modelName.startsWith(\"accounts/fireworks\")) {\n return \"fireworks\";\n } else if (modelName.startsWith(\"gemini\")) {\n return \"google-vertexai\";\n } else if (modelName.startsWith(\"amazon.\")) {\n return \"bedrock\";\n } else if (modelName.startsWith(\"mistral\")) {\n return \"mistralai\";\n } else if (modelName.startsWith(\"sonar\") || modelName.startsWith(\"pplx\")) {\n return \"perplexity\";\n } else {\n return undefined;\n }\n}\n\ninterface ConfigurableModelFields extends BaseChatModelParams {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n queuedMethodOperations?: Record<string, any>;\n /**\n * Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n */\n profile?: ModelProfile;\n}\n\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport class ConfigurableModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string {\n return \"chat_model\";\n }\n\n lc_namespace = [\"langchain\", \"chat_models\"];\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _defaultConfig?: Record<string, any> = {};\n\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\" = \"any\";\n\n /**\n * @default \"\"\n */\n _configPrefix: string;\n\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any> = {};\n\n /** @internal */\n private _modelInstanceCache = new Map<\n string,\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n >();\n\n /** @internal */\n private _profile?: ModelProfile;\n\n constructor(fields: ConfigurableModelFields) {\n super(fields);\n this._defaultConfig = fields.defaultConfig ?? {};\n\n if (fields.configurableFields === \"any\") {\n this._configurableFields = \"any\";\n } else {\n this._configurableFields = fields.configurableFields ?? [\n \"model\",\n \"modelProvider\",\n ];\n }\n\n if (fields.configPrefix) {\n this._configPrefix = fields.configPrefix.endsWith(\"_\")\n ? fields.configPrefix\n : `${fields.configPrefix}_`;\n } else {\n this._configPrefix = \"\";\n }\n\n this._queuedMethodOperations =\n fields.queuedMethodOperations ?? this._queuedMethodOperations;\n\n this._profile = fields.profile ?? undefined;\n }\n\n async _getModelInstance(\n config?: RunnableConfig\n ): Promise<\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n > {\n // Check cache first\n const cacheKey = JSON.stringify(config ?? {});\n const cachedModel = this._modelInstanceCache.get(cacheKey);\n if (cachedModel) {\n return cachedModel;\n }\n\n // Initialize model with merged params\n const params = { ...this._defaultConfig, ...this._modelParams(config) };\n let initializedModel = await _initChatModelHelper(\n params.model,\n params.modelProvider,\n params\n );\n\n // Apply queued method operations in sequence\n for (const [method, args] of Object.entries(this._queuedMethodOperations)) {\n if (\n method in initializedModel &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n typeof (initializedModel as any)[method] === \"function\"\n ) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n initializedModel = await (initializedModel as any)[method](...args);\n }\n }\n\n // Cache and return the initialized model\n this._modelInstanceCache.set(cacheKey, initializedModel);\n return initializedModel;\n }\n\n async _generate(\n messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const model = await this._getModelInstance(options);\n return model._generate(messages, options ?? {}, runManager);\n }\n\n override bindTools(\n tools: BindToolsInput[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params?: Record<string, any>\n ): ConfigurableModel<RunInput, CallOptions> {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.bindTools = [tools, params];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n });\n }\n\n // Extract the input types from the `BaseModel` class.\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"] = (\n schema,\n ...args\n ): ReturnType<BaseChatModel[\"withStructuredOutput\"]> => {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.withStructuredOutput = [schema, ...args];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n }) as unknown as ReturnType<BaseChatModel[\"withStructuredOutput\"]>;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _modelParams(config?: RunnableConfig): Record<string, any> {\n const configurable = config?.configurable ?? {};\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let modelParams: Record<string, any> = {};\n\n for (const [key, value] of Object.entries(configurable)) {\n if (key.startsWith(this._configPrefix)) {\n const strippedKey = this._removePrefix(key, this._configPrefix);\n modelParams[strippedKey] = value;\n }\n }\n\n if (this._configurableFields !== \"any\") {\n modelParams = Object.fromEntries(\n Object.entries(modelParams).filter(([key]) =>\n this._configurableFields.includes(key)\n )\n );\n }\n\n return modelParams;\n }\n\n _removePrefix(str: string, prefix: string): string {\n return str.startsWith(prefix) ? str.slice(prefix.length) : str;\n }\n\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(\n config?: RunnableConfig\n ): RunnableBinding<RunInput, AIMessageChunk, CallOptions> {\n const mergedConfig: RunnableConfig = { ...(config || {}) };\n const modelParams = this._modelParams(mergedConfig);\n\n const remainingConfig: RunnableConfig = Object.fromEntries(\n Object.entries(mergedConfig).filter(([k]) => k !== \"configurable\")\n );\n\n remainingConfig.configurable = Object.fromEntries(\n Object.entries(mergedConfig.configurable || {}).filter(\n ([k]) =>\n this._configPrefix &&\n !Object.keys(modelParams).includes(\n this._removePrefix(k, this._configPrefix)\n )\n )\n );\n\n const newConfigurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: { ...this._defaultConfig, ...modelParams },\n configurableFields: Array.isArray(this._configurableFields)\n ? [...this._configurableFields]\n : this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: this._queuedMethodOperations,\n });\n\n return new RunnableBinding<RunInput, AIMessageChunk, CallOptions>({\n config: mergedConfig,\n bound: newConfigurableModel,\n });\n }\n\n async invoke(\n input: RunInput,\n options?: CallOptions\n ): Promise<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n return model.invoke(input, config);\n }\n\n async stream(\n input: RunInput,\n options?: CallOptions\n ): Promise<IterableReadableStream<AIMessageChunk>> {\n const model = await this._getModelInstance(options);\n const wrappedGenerator = new AsyncGeneratorWithSetup({\n generator: await model.stream(input, options),\n config: options,\n });\n await wrappedGenerator.setup;\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator);\n }\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions?: false }\n ): Promise<AIMessageChunk[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions: true }\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]> {\n // We can super this since the base runnable implementation of\n // `.batch` will call `.invoke` on each input.\n return super.batch(inputs, options, batchOptions);\n }\n\n async *transform(\n generator: AsyncGenerator<RunInput>,\n options: CallOptions\n ): AsyncGenerator<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.transform(generator, config);\n }\n\n async *streamLog(\n input: RunInput,\n options?: Partial<CallOptions>,\n streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">\n ): AsyncGenerator<RunLogPatch> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.streamLog(input, config, {\n ...streamOptions,\n _schemaFormat: \"original\",\n includeNames: streamOptions?.includeNames,\n includeTypes: streamOptions?.includeTypes,\n includeTags: streamOptions?.includeTags,\n excludeNames: streamOptions?.excludeNames,\n excludeTypes: streamOptions?.excludeTypes,\n excludeTags: streamOptions?.excludeTags,\n });\n }\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & { version: \"v1\" | \"v2\" },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<Uint8Array>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent | Uint8Array> {\n const outerThis = this;\n async function* wrappedGenerator() {\n const model = await outerThis._getModelInstance(options);\n const config = ensureConfig(options);\n const eventStream = model.streamEvents(input, config, streamOptions);\n\n for await (const chunk of eventStream) {\n yield chunk;\n }\n }\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator());\n }\n\n /**\n * Return profiling information for the model.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n */\n get profile(): ModelProfile {\n if (this._profile) {\n return this._profile;\n }\n const cacheKey = JSON.stringify({});\n const instance = this._modelInstanceCache.get(cacheKey);\n return instance?.profile ?? {};\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\n\nexport type ConfigurableFields = \"any\" | string[];\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model: never,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\n// ################################# FOR CONTRIBUTORS #################################\n//\n// If adding support for a new provider, please append the provider\n// name to the supported list in the docstring below.\n//\n// ####################################################################################\n\n/**\n * Initialize a ChatModel from the model name and provider.\n * Must have the integration package corresponding to the model provider installed.\n *\n * @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model.\n * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.\n *\n * @param {string | ChatModelProvider} [model] - The name of the model, e.g. \"gpt-4\", \"claude-3-opus-20240229\".\n * Can be prefixed with the model provider, e.g. \"openai:gpt-4\", \"anthropic:claude-3-opus-20240229\".\n * @param {Object} [fields] - Additional configuration options.\n * @param {string} [fields.modelProvider] - The model provider. Supported values include:\n * - openai (@langchain/openai)\n * - anthropic (@langchain/anthropic)\n * - azure_openai (@langchain/openai)\n * - google-vertexai (@langchain/google-vertexai)\n * - google-vertexai-web (@langchain/google-vertexai-web)\n * - google-genai (@langchain/google-genai)\n * - bedrock (@langchain/aws)\n * - cohere (@langchain/cohere)\n * - fireworks (@langchain/community/chat_models/fireworks)\n * - together (@langchain/community/chat_models/togetherai)\n * - mistralai (@langchain/mistralai)\n * - groq (@langchain/groq)\n * - ollama (@langchain/ollama)\n * - perplexity (@langchain/community/chat_models/perplexity)\n * - cerebras (@langchain/cerebras)\n * - deepseek (@langchain/deepseek)\n * - xai (@langchain/xai)\n * @param {string[] | \"any\"} [fields.configurableFields] - Which model parameters are configurable:\n * - undefined: No configurable fields.\n * - \"any\": All fields are configurable. (See Security Note in description)\n * - string[]: Specified fields are configurable.\n * @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime.\n * @param {ModelProfile} [fields.profile] - Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n * @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor.\n * @returns {Promise<ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel.\n * @throws {Error} If modelProvider cannot be inferred or isn't supported.\n * @throws {Error} If the model provider integration package is not installed.\n *\n * @example Initialize non-configurable models\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const gpt4 = await initChatModel(\"openai:gpt-4\", {\n * temperature: 0.25,\n * });\n * const gpt4Result = await gpt4.invoke(\"what's your name\");\n *\n * const claude = await initChatModel(\"anthropic:claude-3-opus-20240229\", {\n * temperature: 0.25,\n * });\n * const claudeResult = await claude.invoke(\"what's your name\");\n *\n * const gemini = await initChatModel(\"gemini-1.5-pro\", {\n * modelProvider: \"google-vertexai\",\n * temperature: 0.25,\n * });\n * const geminiResult = await gemini.invoke(\"what's your name\");\n * ```\n *\n * @example Create a partially configurable model with no default model\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModel = await initChatModel(undefined, {\n * temperature: 0,\n * configurableFields: [\"model\", \"apiKey\"],\n * });\n *\n * const gpt4Result = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"gpt-4\",\n * },\n * });\n *\n * const claudeResult = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * },\n * });\n * ```\n *\n * @example Create a fully configurable model with a default model and a config prefix\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModelWithDefault = await initChatModel(\"gpt-4\", {\n * modelProvider: \"openai\",\n * configurableFields: \"any\",\n * configPrefix: \"foo\",\n * temperature: 0,\n * });\n *\n * const openaiResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const claudeResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_model: \"claude-3-5-sonnet-20240620\",\n * foo_modelProvider: \"anthropic\",\n * foo_temperature: 0.6,\n * foo_apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Bind tools to a configurable model:\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n * import { z } from \"zod/v3\";\n * import { tool } from \"@langchain/core/tools\";\n *\n * const getWeatherTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current weather in a given location\"),\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * }\n * );\n *\n * const getPopulationTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current population in a given location\"),\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * }\n * );\n *\n * const configurableModel = await initChatModel(\"gpt-4\", {\n * configurableFields: [\"model\", \"modelProvider\", \"apiKey\"],\n * temperature: 0,\n * });\n *\n * const configurableModelWithTools = configurableModel.bindTools([\n * getWeatherTool,\n * getPopulationTool,\n * ]);\n *\n * const configurableToolResult = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const configurableToolResult2 = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Initialize a model with a custom profile\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const model = await initChatModel(\"gpt-4o-mini\", {\n * profile: {\n * maxInputTokens: 100000,\n * },\n * });\n *\n * @description\n * This function initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * Security Note: Setting `configurableFields` to \"any\" means fields like apiKey, baseUrl, etc.\n * can be altered at runtime, potentially redirecting model requests to a different service/user.\n * Make sure that if you're accepting untrusted configurations, you enumerate the\n * `configurableFields` explicitly.\n *\n * The function will attempt to infer the model provider from the model name if not specified.\n * Certain model name prefixes are associated with specific providers:\n * - gpt-3... or gpt-4... -> openai\n * - claude... -> anthropic\n * - amazon.... -> bedrock\n * - gemini... -> google-vertexai\n * - command... -> cohere\n * - accounts/fireworks... -> fireworks\n *\n * @since 0.2.11\n * @version 0.2.11\n */\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>> {\n // eslint-disable-next-line prefer-const\n let { configurableFields, configPrefix, modelProvider, profile, ...params } =\n {\n configPrefix: \"\",\n ...(fields ?? {}),\n };\n if (modelProvider === undefined && model?.includes(\":\")) {\n const [provider, ...remainingParts] = model.split(\":\");\n const modelComponents =\n remainingParts.length === 0\n ? [provider]\n : [provider, remainingParts.join(\":\")];\n if (SUPPORTED_PROVIDERS.includes(modelComponents[0] as ChatModelProvider)) {\n // eslint-disable-next-line no-param-reassign\n [modelProvider, model] = modelComponents;\n }\n }\n let configurableFieldsCopy = Array.isArray(configurableFields)\n ? [...configurableFields]\n : configurableFields;\n\n if (!model && configurableFieldsCopy === undefined) {\n configurableFieldsCopy = [\"model\", \"modelProvider\"];\n }\n if (configPrefix && configurableFieldsCopy === undefined) {\n console.warn(\n `{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` +\n `{ configurableFields: [...] } to specify the model params that are ` +\n `configurable.`\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const paramsCopy: Record<string, any> = { ...params };\n\n let configurableModel: ConfigurableModel<RunInput, CallOptions>;\n\n if (configurableFieldsCopy === undefined) {\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: {\n ...paramsCopy,\n model,\n modelProvider,\n },\n configPrefix,\n profile,\n });\n } else {\n if (model) {\n paramsCopy.model = model;\n }\n if (modelProvider) {\n paramsCopy.modelProvider = modelProvider;\n }\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: paramsCopy,\n configPrefix,\n configurableFields: configurableFieldsCopy,\n profile,\n });\n }\n\n // Initialize the model instance to make sure a profile is available\n await configurableModel._getModelInstance();\n return configurableModel;\n}\n"],"mappings":";;;;;;;;;;;;;;AAmDA,MAAa,wBAAwB;CACnC,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,cAAc;EACZ,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,mBAAmB;EACjB,SAAS;EACT,WAAW;CACZ;CACD,uBAAuB;EACrB,SAAS;EACT,WAAW;CACZ;CACD,gBAAgB;EACd,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,MAAM;EACJ,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,KAAK;EACH,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,YAAY;EACV,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;AACF;AAED,MAAM,sBAAsB,OAAO,KACjC,sBACD;;;;;;AAaD,eAAsB,wBAAwBA,WAAmB;CAE/D,MAAM,gBAAgB,OAAO,QAAQ,sBAAsB,CAAC,KAC1D,CAAC,GAAGC,SAAO,KAAKA,SAAO,cAAc,UACtC;AAED,KAAI,CAAC,cACH,QAAO;CAGT,MAAM,GAAG,OAAO,GAAG;AACnB,KAAI;EACF,MAAMC,WAAS,MAAM,OAAO,OAAO;AACnC,SAAOA,SAAO,OAAO;CACtB,SAAQC,GAAY;EACnB,MAAM,MAAM;AACZ,MACE,UAAU,OACV,IAAI,MAAM,UAAU,CAAC,SAAS,uBAAuB,IACrD,aAAa,KACb;GACA,MAAM,mBAAmB,IAAI,QAC1B,MAAM,+BAA+B,CAAC,GACtC,MAAM,IAAI,CAAC;AACd,SAAM,IAAI,MACR,CAAC,iBAAiB,EAAE,iBAAiB,oCAAsB,EACxC,iBAAiB,qBAAqB,EAAE,iBAAiB,EAAE,CAAC;EAElF;AACD,QAAM;CACP;AACF;AAED,eAAe,qBACbC,OACAC,eAEAC,SAA8B,CAAE,GACR;CACxB,MAAM,oBAAoB,iBAAiB,oBAAoB,MAAM;AACrE,KAAI,CAAC,kBACH,OAAM,IAAI,MACR,CAAC,4CAA4C,EAAE,MAAM,0CAA0C,CAAC;CAIpG,MAAM,SAAS,sBACb;AAEF,KAAI,CAAC,QAAQ;EACX,MAAM,YAAY,oBAAoB,KAAK,KAAK;AAChD,QAAM,IAAI,MACR,CAAC,6BAA6B,EAAE,kBAAkB,sCAAsC,EAAE,WAAW;CAExG;CAED,MAAM,EAAE,eAAe,QAAS,GAAG,cAAc,GAAG;CACpD,MAAM,gBAAgB,MAAM,wBAAwB,OAAO,UAAU;AACrE,QAAO,IAAI,cAAc;EAAE;EAAO,GAAG;CAAc;AACpD;;;;;;;;;;;;AAaD,SAAgB,oBAAoBC,WAAuC;AACzE,KACE,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,CAE1B,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,qBAAqB,CACnD,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,QAAQ,IAAI,UAAU,WAAW,OAAO,CACtE,QAAO;KAEP,QAAO;AAEV;;;;;;AA+BD,IAAa,oBAAb,MAAa,0BAGHC,2DAA2C;CACnD,WAAmB;AACjB,SAAO;CACR;CAED,eAAe,CAAC,aAAa,aAAc;CAG3C,iBAAuC,CAAE;;;;CAKzC,sBAAwC;;;;CAKxC;;;;;CAOA,0BAA+C,CAAE;;CAGjD,AAAQ,sCAAsB,IAAI;;CAMlC,AAAQ;CAER,YAAYC,QAAiC;EAC3C,MAAM,OAAO;EACb,KAAK,iBAAiB,OAAO,iBAAiB,CAAE;AAEhD,MAAI,OAAO,uBAAuB,OAChC,KAAK,sBAAsB;OAE3B,KAAK,sBAAsB,OAAO,sBAAsB,CACtD,SACA,eACD;AAGH,MAAI,OAAO,cACT,KAAK,gBAAgB,OAAO,aAAa,SAAS,IAAI,GAClD,OAAO,eACP,GAAG,OAAO,aAAa,CAAC,CAAC;OAE7B,KAAK,gBAAgB;EAGvB,KAAK,0BACH,OAAO,0BAA0B,KAAK;EAExC,KAAK,WAAW,OAAO,WAAW;CACnC;CAED,MAAM,kBACJC,QAGA;EAEA,MAAM,WAAW,KAAK,UAAU,UAAU,CAAE,EAAC;EAC7C,MAAM,cAAc,KAAK,oBAAoB,IAAI,SAAS;AAC1D,MAAI,YACF,QAAO;EAIT,MAAM,SAAS;GAAE,GAAG,KAAK;GAAgB,GAAG,KAAK,aAAa,OAAO;EAAE;EACvE,IAAI,mBAAmB,MAAM,qBAC3B,OAAO,OACP,OAAO,eACP,OACD;AAGD,OAAK,MAAM,CAAC,QAAQ,KAAK,IAAI,OAAO,QAAQ,KAAK,wBAAwB,CACvE,KACE,UAAU,oBAEV,OAAQ,iBAAyB,YAAY,YAG7C,mBAAmB,MAAO,iBAAyB,QAAQ,GAAG,KAAK;EAKvE,KAAK,oBAAoB,IAAI,UAAU,iBAAiB;AACxD,SAAO;CACR;CAED,MAAM,UACJC,UACAC,SACAC,YACqB;EACrB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;AACnD,SAAO,MAAM,UAAU,UAAU,WAAW,CAAE,GAAE,WAAW;CAC5D;CAED,AAAS,UACPC,OAEAC,QAC0C;EAC1C,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,YAAY,CAAC,OAAO,MAAO;AAC/C,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,uBAA8D,CAC5D,QACA,GAAG,SACmD;EACtD,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,uBAAuB,CAAC,QAAQ,GAAG,IAAK;AAC5D,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,aAAaL,QAA8C;EACzD,MAAM,eAAe,QAAQ,gBAAgB,CAAE;EAE/C,IAAIM,cAAmC,CAAE;AAEzC,OAAK,MAAM,CAAC,KAAK,MAAM,IAAI,OAAO,QAAQ,aAAa,CACrD,KAAI,IAAI,WAAW,KAAK,cAAc,EAAE;GACtC,MAAM,cAAc,KAAK,cAAc,KAAK,KAAK,cAAc;GAC/D,YAAY,eAAe;EAC5B;AAGH,MAAI,KAAK,wBAAwB,OAC/B,cAAc,OAAO,YACnB,OAAO,QAAQ,YAAY,CAAC,OAAO,CAAC,CAAC,IAAI,KACvC,KAAK,oBAAoB,SAAS,IAAI,CACvC,CACF;AAGH,SAAO;CACR;CAED,cAAcC,KAAaC,QAAwB;AACjD,SAAO,IAAI,WAAW,OAAO,GAAG,IAAI,MAAM,OAAO,OAAO,GAAG;CAC5D;;;;;;CAOD,WACER,QACwD;EACxD,MAAMS,eAA+B,EAAE,GAAI,UAAU,CAAE,EAAG;EAC1D,MAAM,cAAc,KAAK,aAAa,aAAa;EAEnD,MAAMC,kBAAkC,OAAO,YAC7C,OAAO,QAAQ,aAAa,CAAC,OAAO,CAAC,CAAC,EAAE,KAAK,MAAM,eAAe,CACnE;EAED,gBAAgB,eAAe,OAAO,YACpC,OAAO,QAAQ,aAAa,gBAAgB,CAAE,EAAC,CAAC,OAC9C,CAAC,CAAC,EAAE,KACF,KAAK,iBACL,CAAC,OAAO,KAAK,YAAY,CAAC,SACxB,KAAK,cAAc,GAAG,KAAK,cAAc,CAC1C,CACJ,CACF;EAED,MAAM,uBAAuB,IAAI,kBAAyC;GACxE,eAAe;IAAE,GAAG,KAAK;IAAgB,GAAG;GAAa;GACzD,oBAAoB,MAAM,QAAQ,KAAK,oBAAoB,GACvD,CAAC,GAAG,KAAK,mBAAoB,IAC7B,KAAK;GACT,cAAc,KAAK;GACnB,wBAAwB,KAAK;EAC9B;AAED,SAAO,IAAIC,2CAAuD;GAChE,QAAQ;GACR,OAAO;EACR;CACF;CAED,MAAM,OACJC,OACAC,SACyB;EACzB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,sDAAsB,QAAQ;AACpC,SAAO,MAAM,OAAO,OAAO,OAAO;CACnC;CAED,MAAM,OACJD,OACAC,SACiD;EACjD,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,mBAAmB,IAAIC,sDAAwB;GACnD,WAAW,MAAM,MAAM,OAAO,OAAO,QAAQ;GAC7C,QAAQ;EACT;EACD,MAAM,iBAAiB;AACvB,SAAOC,qDAAuB,mBAAmB,iBAAiB;CACnE;CAoBD,MAAM,MACJC,QACAC,SACAC,cACqC;AAGrC,SAAO,MAAM,MAAM,QAAQ,SAAS,aAAa;CAClD;CAED,OAAO,UACLC,WACAC,SACgC;EAChC,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,sDAAsB,QAAQ;EAEpC,OAAO,MAAM,UAAU,WAAW,OAAO;CAC1C;CAED,OAAO,UACLR,OACAS,SACAC,eAC6B;EAC7B,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,sDAAsB,QAAQ;EAEpC,OAAO,MAAM,UAAU,OAAO,QAAQ;GACpC,GAAG;GACH,eAAe;GACf,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC5B,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;EAC7B,EAAC;CACH;CAiBD,aACEV,OACAW,SAIAC,eACkD;EAClD,MAAM,YAAY;EAClB,gBAAgB,mBAAmB;GACjC,MAAM,QAAQ,MAAM,UAAU,kBAAkB,QAAQ;GACxD,MAAM,sDAAsB,QAAQ;GACpC,MAAM,cAAc,MAAM,aAAa,OAAO,QAAQ,cAAc;AAEpE,cAAW,MAAM,SAAS,aACxB,MAAM;EAET;AACD,SAAOT,qDAAuB,mBAAmB,kBAAkB,CAAC;CACrE;;;;;;CAOD,IAAI,UAAwB;AAC1B,MAAI,KAAK,SACP,QAAO,KAAK;EAEd,MAAM,WAAW,KAAK,UAAU,CAAE,EAAC;EACnC,MAAM,WAAW,KAAK,oBAAoB,IAAI,SAAS;AACvD,SAAO,UAAU,WAAW,CAAE;CAC/B;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsRD,eAAsB,cAIpBU,OAEAC,QAMmD;CAEnD,IAAI,EAAE,oBAAoB,cAAc,eAAe,QAAS,GAAG,QAAQ,GACzE;EACE,cAAc;EACd,GAAI,UAAU,CAAE;CACjB;AACH,KAAI,kBAAkB,UAAa,OAAO,SAAS,IAAI,EAAE;EACvD,MAAM,CAAC,UAAU,GAAG,eAAe,GAAG,MAAM,MAAM,IAAI;EACtD,MAAM,kBACJ,eAAe,WAAW,IACtB,CAAC,QAAS,IACV,CAAC,UAAU,eAAe,KAAK,IAAI,AAAC;AAC1C,MAAI,oBAAoB,SAAS,gBAAgB,GAAwB,EAEvE,CAAC,eAAe,MAAM,GAAG;CAE5B;CACD,IAAI,yBAAyB,MAAM,QAAQ,mBAAmB,GAC1D,CAAC,GAAG,kBAAmB,IACvB;AAEJ,KAAI,CAAC,SAAS,2BAA2B,QACvC,yBAAyB,CAAC,SAAS,eAAgB;AAErD,KAAI,gBAAgB,2BAA2B,QAC7C,QAAQ,KACN,CAAC,gBAAgB,EAAE,aAAa,oIAAoD,CAEnE,CAClB;CAIH,MAAMC,aAAkC,EAAE,GAAG,OAAQ;CAErD,IAAIC;AAEJ,KAAI,2BAA2B,QAC7B,oBAAoB,IAAI,kBAAyC;EAC/D,eAAe;GACb,GAAG;GACH;GACA;EACD;EACD;EACA;CACD;MACI;AACL,MAAI,OACF,WAAW,QAAQ;AAErB,MAAI,eACF,WAAW,gBAAgB;EAE7B,oBAAoB,IAAI,kBAAyC;GAC/D,eAAe;GACf;GACA,oBAAoB;GACpB;EACD;CACF;CAGD,MAAM,kBAAkB,mBAAmB;AAC3C,QAAO;AACR"}
|
|
1
|
+
{"version":3,"file":"universal.cjs","names":["className: string","config","module","e: unknown","model: string","modelProvider?: string","params: Record<string, any>","modelName: string","BaseChatModel","fields: ConfigurableModelFields","config?: RunnableConfig","messages: BaseMessage[]","options?: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","tools: BindToolsInput[]","params?: Record<string, any>","modelParams: Record<string, any>","str: string","prefix: string","mergedConfig: RunnableConfig","remainingConfig: RunnableConfig","RunnableBinding","input: RunInput","options?: CallOptions","AsyncGeneratorWithSetup","IterableReadableStream","inputs: RunInput[]","options?: Partial<CallOptions> | Partial<CallOptions>[]","batchOptions?: RunnableBatchOptions","generator: AsyncGenerator<RunInput>","options: CallOptions","options?: Partial<CallOptions>","streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">","options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n }","streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">","model?: string","fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }","paramsCopy: Record<string, any>","configurableModel: ConfigurableModel<RunInput, CallOptions>"],"sources":["../../src/chat_models/universal.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n ToolDefinition,\n} from \"@langchain/core/language_models/base\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n BaseMessage,\n type AIMessageChunk,\n MessageStructure,\n} from \"@langchain/core/messages\";\nimport {\n type RunnableBatchOptions,\n RunnableBinding,\n type RunnableConfig,\n type RunnableToolLike,\n ensureConfig,\n} from \"@langchain/core/runnables\";\nimport {\n AsyncGeneratorWithSetup,\n IterableReadableStream,\n} from \"@langchain/core/utils/stream\";\nimport {\n type LogStreamCallbackHandlerInput,\n type RunLogPatch,\n type StreamEvent,\n} from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\n\n// TODO: remove once `EventStreamCallbackHandlerInput` is exposed in core\ninterface EventStreamCallbackHandlerInput\n extends Omit<LogStreamCallbackHandlerInput, \"_schemaFormat\"> {}\n\nexport interface ConfigurableChatModelCallOptions\n extends BaseChatModelCallOptions {\n tools?: (\n | StructuredToolInterface\n | Record<string, unknown>\n | ToolDefinition\n | RunnableToolLike\n )[];\n}\n\n// Configuration map for model providers\nexport const MODEL_PROVIDER_CONFIG = {\n openai: {\n package: \"@langchain/openai\",\n className: \"ChatOpenAI\",\n },\n anthropic: {\n package: \"@langchain/anthropic\",\n className: \"ChatAnthropic\",\n },\n azure_openai: {\n package: \"@langchain/openai\",\n className: \"AzureChatOpenAI\",\n },\n cohere: {\n package: \"@langchain/cohere\",\n className: \"ChatCohere\",\n },\n \"google-vertexai\": {\n package: \"@langchain/google-vertexai\",\n className: \"ChatVertexAI\",\n },\n \"google-vertexai-web\": {\n package: \"@langchain/google-vertexai-web\",\n className: \"ChatVertexAI\",\n },\n \"google-genai\": {\n package: \"@langchain/google-genai\",\n className: \"ChatGoogleGenerativeAI\",\n },\n ollama: {\n package: \"@langchain/ollama\",\n className: \"ChatOllama\",\n },\n mistralai: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n mistral: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n groq: {\n package: \"@langchain/groq\",\n className: \"ChatGroq\",\n },\n cerebras: {\n package: \"@langchain/cerebras\",\n className: \"ChatCerebras\",\n },\n bedrock: {\n package: \"@langchain/aws\",\n className: \"ChatBedrockConverse\",\n },\n deepseek: {\n package: \"@langchain/deepseek\",\n className: \"ChatDeepSeek\",\n },\n xai: {\n package: \"@langchain/xai\",\n className: \"ChatXAI\",\n },\n fireworks: {\n package: \"@langchain/community/chat_models/fireworks\",\n className: \"ChatFireworks\",\n hasCircularDependency: true,\n },\n together: {\n package: \"@langchain/community/chat_models/togetherai\",\n className: \"ChatTogetherAI\",\n hasCircularDependency: true,\n },\n perplexity: {\n package: \"@langchain/community/chat_models/perplexity\",\n className: \"ChatPerplexity\",\n hasCircularDependency: true,\n },\n} as const;\n\nconst SUPPORTED_PROVIDERS = Object.keys(\n MODEL_PROVIDER_CONFIG\n) as (keyof typeof MODEL_PROVIDER_CONFIG)[];\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\ntype ModelProviderConfig = {\n package: string;\n className: string;\n hasCircularDependency?: boolean;\n};\n\n/**\n * Helper function to get a chat model class by its class name\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @returns The imported model class or undefined if not found\n */\nexport async function getChatModelByClassName(className: string) {\n // Find the provider config that matches the class name\n const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(\n ([, config]) => config.className === className\n );\n\n if (!providerEntry) {\n return undefined;\n }\n\n const [, config] = providerEntry;\n try {\n const module = await import(config.package);\n return module[config.className];\n } catch (e: unknown) {\n const err = e as Error;\n if (\n \"code\" in err &&\n err.code?.toString().includes(\"ERR_MODULE_NOT_FOUND\") &&\n \"message\" in err &&\n typeof err.message === \"string\"\n ) {\n const msg = err.message.startsWith(\"Error: \")\n ? err.message.slice(\"Error: \".length)\n : err.message;\n const attemptedPackage = msg\n .split(\"Cannot find package '\")[1]\n .split(\"'\")[0];\n throw new Error(\n `Unable to import ${attemptedPackage}. Please install with ` +\n `\\`npm install ${attemptedPackage}\\` or \\`pnpm install ${attemptedPackage}\\``\n );\n }\n throw e;\n }\n}\n\nasync function _initChatModelHelper(\n model: string,\n modelProvider?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params: Record<string, any> = {}\n): Promise<BaseChatModel> {\n const modelProviderCopy = modelProvider || _inferModelProvider(model);\n if (!modelProviderCopy) {\n throw new Error(\n `Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`\n );\n }\n\n const config = MODEL_PROVIDER_CONFIG[\n modelProviderCopy as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig;\n if (!config) {\n const supported = SUPPORTED_PROVIDERS.join(\", \");\n throw new Error(\n `Unsupported { modelProvider: ${modelProviderCopy} }.\\n\\nSupported model providers are: ${supported}`\n );\n }\n\n const { modelProvider: _unused, ...passedParams } = params;\n const ProviderClass = await getChatModelByClassName(config.className);\n return new ProviderClass({ model, ...passedParams });\n}\n\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport function _inferModelProvider(modelName: string): string | undefined {\n if (\n modelName.startsWith(\"gpt-3\") ||\n modelName.startsWith(\"gpt-4\") ||\n modelName.startsWith(\"gpt-5\") ||\n modelName.startsWith(\"o1\") ||\n modelName.startsWith(\"o3\") ||\n modelName.startsWith(\"o4\")\n ) {\n return \"openai\";\n } else if (modelName.startsWith(\"claude\")) {\n return \"anthropic\";\n } else if (modelName.startsWith(\"command\")) {\n return \"cohere\";\n } else if (modelName.startsWith(\"accounts/fireworks\")) {\n return \"fireworks\";\n } else if (modelName.startsWith(\"gemini\")) {\n return \"google-vertexai\";\n } else if (modelName.startsWith(\"amazon.\")) {\n return \"bedrock\";\n } else if (modelName.startsWith(\"mistral\")) {\n return \"mistralai\";\n } else if (modelName.startsWith(\"sonar\") || modelName.startsWith(\"pplx\")) {\n return \"perplexity\";\n } else {\n return undefined;\n }\n}\n\ninterface ConfigurableModelFields extends BaseChatModelParams {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n queuedMethodOperations?: Record<string, any>;\n /**\n * Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n */\n profile?: ModelProfile;\n}\n\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport class ConfigurableModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string {\n return \"chat_model\";\n }\n\n lc_namespace = [\"langchain\", \"chat_models\"];\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _defaultConfig?: Record<string, any> = {};\n\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\" = \"any\";\n\n /**\n * @default \"\"\n */\n _configPrefix: string;\n\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any> = {};\n\n /** @internal */\n private _modelInstanceCache = new Map<\n string,\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n >();\n\n /** @internal */\n private _profile?: ModelProfile;\n\n constructor(fields: ConfigurableModelFields) {\n super(fields);\n this._defaultConfig = fields.defaultConfig ?? {};\n\n if (fields.configurableFields === \"any\") {\n this._configurableFields = \"any\";\n } else {\n this._configurableFields = fields.configurableFields ?? [\n \"model\",\n \"modelProvider\",\n ];\n }\n\n if (fields.configPrefix) {\n this._configPrefix = fields.configPrefix.endsWith(\"_\")\n ? fields.configPrefix\n : `${fields.configPrefix}_`;\n } else {\n this._configPrefix = \"\";\n }\n\n this._queuedMethodOperations =\n fields.queuedMethodOperations ?? this._queuedMethodOperations;\n\n this._profile = fields.profile ?? undefined;\n }\n\n async _getModelInstance(\n config?: RunnableConfig\n ): Promise<\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n > {\n // Check cache first\n const cacheKey = JSON.stringify(config ?? {});\n const cachedModel = this._modelInstanceCache.get(cacheKey);\n if (cachedModel) {\n return cachedModel;\n }\n\n // Initialize model with merged params\n const params = { ...this._defaultConfig, ...this._modelParams(config) };\n let initializedModel = await _initChatModelHelper(\n params.model,\n params.modelProvider,\n params\n );\n\n // Apply queued method operations in sequence\n for (const [method, args] of Object.entries(this._queuedMethodOperations)) {\n if (\n method in initializedModel &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n typeof (initializedModel as any)[method] === \"function\"\n ) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n initializedModel = await (initializedModel as any)[method](...args);\n }\n }\n\n // Cache and return the initialized model\n this._modelInstanceCache.set(cacheKey, initializedModel);\n return initializedModel;\n }\n\n async _generate(\n messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const model = await this._getModelInstance(options);\n return model._generate(messages, options ?? {}, runManager);\n }\n\n override bindTools(\n tools: BindToolsInput[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params?: Record<string, any>\n ): ConfigurableModel<RunInput, CallOptions> {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.bindTools = [tools, params];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n });\n }\n\n // Extract the input types from the `BaseModel` class.\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"] = (\n schema,\n ...args\n ): ReturnType<BaseChatModel[\"withStructuredOutput\"]> => {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.withStructuredOutput = [schema, ...args];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n }) as unknown as ReturnType<BaseChatModel[\"withStructuredOutput\"]>;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _modelParams(config?: RunnableConfig): Record<string, any> {\n const configurable = config?.configurable ?? {};\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let modelParams: Record<string, any> = {};\n\n for (const [key, value] of Object.entries(configurable)) {\n if (key.startsWith(this._configPrefix)) {\n const strippedKey = this._removePrefix(key, this._configPrefix);\n modelParams[strippedKey] = value;\n }\n }\n\n if (this._configurableFields !== \"any\") {\n modelParams = Object.fromEntries(\n Object.entries(modelParams).filter(([key]) =>\n this._configurableFields.includes(key)\n )\n );\n }\n\n return modelParams;\n }\n\n _removePrefix(str: string, prefix: string): string {\n return str.startsWith(prefix) ? str.slice(prefix.length) : str;\n }\n\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(\n config?: RunnableConfig\n ): RunnableBinding<RunInput, AIMessageChunk, CallOptions> {\n const mergedConfig: RunnableConfig = { ...(config || {}) };\n const modelParams = this._modelParams(mergedConfig);\n\n const remainingConfig: RunnableConfig = Object.fromEntries(\n Object.entries(mergedConfig).filter(([k]) => k !== \"configurable\")\n );\n\n remainingConfig.configurable = Object.fromEntries(\n Object.entries(mergedConfig.configurable || {}).filter(\n ([k]) =>\n this._configPrefix &&\n !Object.keys(modelParams).includes(\n this._removePrefix(k, this._configPrefix)\n )\n )\n );\n\n const newConfigurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: { ...this._defaultConfig, ...modelParams },\n configurableFields: Array.isArray(this._configurableFields)\n ? [...this._configurableFields]\n : this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: this._queuedMethodOperations,\n });\n\n return new RunnableBinding<RunInput, AIMessageChunk, CallOptions>({\n config: mergedConfig,\n bound: newConfigurableModel,\n });\n }\n\n async invoke(\n input: RunInput,\n options?: CallOptions\n ): Promise<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n return model.invoke(input, config);\n }\n\n async stream(\n input: RunInput,\n options?: CallOptions\n ): Promise<IterableReadableStream<AIMessageChunk>> {\n const model = await this._getModelInstance(options);\n const wrappedGenerator = new AsyncGeneratorWithSetup({\n generator: await model.stream(input, options),\n config: options,\n });\n await wrappedGenerator.setup;\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator);\n }\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions?: false }\n ): Promise<AIMessageChunk[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions: true }\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]> {\n // We can super this since the base runnable implementation of\n // `.batch` will call `.invoke` on each input.\n return super.batch(inputs, options, batchOptions);\n }\n\n async *transform(\n generator: AsyncGenerator<RunInput>,\n options: CallOptions\n ): AsyncGenerator<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.transform(generator, config);\n }\n\n async *streamLog(\n input: RunInput,\n options?: Partial<CallOptions>,\n streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">\n ): AsyncGenerator<RunLogPatch> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.streamLog(input, config, {\n ...streamOptions,\n _schemaFormat: \"original\",\n includeNames: streamOptions?.includeNames,\n includeTypes: streamOptions?.includeTypes,\n includeTags: streamOptions?.includeTags,\n excludeNames: streamOptions?.excludeNames,\n excludeTypes: streamOptions?.excludeTypes,\n excludeTags: streamOptions?.excludeTags,\n });\n }\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & { version: \"v1\" | \"v2\" },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<Uint8Array>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent | Uint8Array> {\n const outerThis = this;\n async function* wrappedGenerator() {\n const model = await outerThis._getModelInstance(options);\n const config = ensureConfig(options);\n const eventStream = model.streamEvents(input, config, streamOptions);\n\n for await (const chunk of eventStream) {\n yield chunk;\n }\n }\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator());\n }\n\n /**\n * Return profiling information for the model.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n */\n get profile(): ModelProfile {\n if (this._profile) {\n return this._profile;\n }\n const cacheKey = JSON.stringify({});\n const instance = this._modelInstanceCache.get(cacheKey);\n return instance?.profile ?? {};\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\n\nexport type ConfigurableFields = \"any\" | string[];\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model: never,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\n// ################################# FOR CONTRIBUTORS #################################\n//\n// If adding support for a new provider, please append the provider\n// name to the supported list in the docstring below.\n//\n// ####################################################################################\n\n/**\n * Initialize a ChatModel from the model name and provider.\n * Must have the integration package corresponding to the model provider installed.\n *\n * @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model.\n * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.\n *\n * @param {string | ChatModelProvider} [model] - The name of the model, e.g. \"gpt-4\", \"claude-3-opus-20240229\".\n * Can be prefixed with the model provider, e.g. \"openai:gpt-4\", \"anthropic:claude-3-opus-20240229\".\n * @param {Object} [fields] - Additional configuration options.\n * @param {string} [fields.modelProvider] - The model provider. Supported values include:\n * - openai (@langchain/openai)\n * - anthropic (@langchain/anthropic)\n * - azure_openai (@langchain/openai)\n * - google-vertexai (@langchain/google-vertexai)\n * - google-vertexai-web (@langchain/google-vertexai-web)\n * - google-genai (@langchain/google-genai)\n * - bedrock (@langchain/aws)\n * - cohere (@langchain/cohere)\n * - fireworks (@langchain/community/chat_models/fireworks)\n * - together (@langchain/community/chat_models/togetherai)\n * - mistralai (@langchain/mistralai)\n * - groq (@langchain/groq)\n * - ollama (@langchain/ollama)\n * - perplexity (@langchain/community/chat_models/perplexity)\n * - cerebras (@langchain/cerebras)\n * - deepseek (@langchain/deepseek)\n * - xai (@langchain/xai)\n * @param {string[] | \"any\"} [fields.configurableFields] - Which model parameters are configurable:\n * - undefined: No configurable fields.\n * - \"any\": All fields are configurable. (See Security Note in description)\n * - string[]: Specified fields are configurable.\n * @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime.\n * @param {ModelProfile} [fields.profile] - Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n * @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor.\n * @returns {Promise<ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel.\n * @throws {Error} If modelProvider cannot be inferred or isn't supported.\n * @throws {Error} If the model provider integration package is not installed.\n *\n * @example Initialize non-configurable models\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const gpt4 = await initChatModel(\"openai:gpt-4\", {\n * temperature: 0.25,\n * });\n * const gpt4Result = await gpt4.invoke(\"what's your name\");\n *\n * const claude = await initChatModel(\"anthropic:claude-3-opus-20240229\", {\n * temperature: 0.25,\n * });\n * const claudeResult = await claude.invoke(\"what's your name\");\n *\n * const gemini = await initChatModel(\"gemini-1.5-pro\", {\n * modelProvider: \"google-vertexai\",\n * temperature: 0.25,\n * });\n * const geminiResult = await gemini.invoke(\"what's your name\");\n * ```\n *\n * @example Create a partially configurable model with no default model\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModel = await initChatModel(undefined, {\n * temperature: 0,\n * configurableFields: [\"model\", \"apiKey\"],\n * });\n *\n * const gpt4Result = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"gpt-4\",\n * },\n * });\n *\n * const claudeResult = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * },\n * });\n * ```\n *\n * @example Create a fully configurable model with a default model and a config prefix\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModelWithDefault = await initChatModel(\"gpt-4\", {\n * modelProvider: \"openai\",\n * configurableFields: \"any\",\n * configPrefix: \"foo\",\n * temperature: 0,\n * });\n *\n * const openaiResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const claudeResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_model: \"claude-3-5-sonnet-20240620\",\n * foo_modelProvider: \"anthropic\",\n * foo_temperature: 0.6,\n * foo_apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Bind tools to a configurable model:\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n * import { z } from \"zod/v3\";\n * import { tool } from \"@langchain/core/tools\";\n *\n * const getWeatherTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current weather in a given location\"),\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * }\n * );\n *\n * const getPopulationTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current population in a given location\"),\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * }\n * );\n *\n * const configurableModel = await initChatModel(\"gpt-4\", {\n * configurableFields: [\"model\", \"modelProvider\", \"apiKey\"],\n * temperature: 0,\n * });\n *\n * const configurableModelWithTools = configurableModel.bindTools([\n * getWeatherTool,\n * getPopulationTool,\n * ]);\n *\n * const configurableToolResult = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const configurableToolResult2 = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Initialize a model with a custom profile\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const model = await initChatModel(\"gpt-4o-mini\", {\n * profile: {\n * maxInputTokens: 100000,\n * },\n * });\n *\n * @description\n * This function initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * Security Note: Setting `configurableFields` to \"any\" means fields like apiKey, baseUrl, etc.\n * can be altered at runtime, potentially redirecting model requests to a different service/user.\n * Make sure that if you're accepting untrusted configurations, you enumerate the\n * `configurableFields` explicitly.\n *\n * The function will attempt to infer the model provider from the model name if not specified.\n * Certain model name prefixes are associated with specific providers:\n * - gpt-3... or gpt-4... -> openai\n * - claude... -> anthropic\n * - amazon.... -> bedrock\n * - gemini... -> google-vertexai\n * - command... -> cohere\n * - accounts/fireworks... -> fireworks\n *\n * @since 0.2.11\n * @version 0.2.11\n */\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>> {\n // eslint-disable-next-line prefer-const\n let { configurableFields, configPrefix, modelProvider, profile, ...params } =\n {\n configPrefix: \"\",\n ...(fields ?? {}),\n };\n if (modelProvider === undefined && model?.includes(\":\")) {\n const [provider, ...remainingParts] = model.split(\":\");\n const modelComponents =\n remainingParts.length === 0\n ? [provider]\n : [provider, remainingParts.join(\":\")];\n if (SUPPORTED_PROVIDERS.includes(modelComponents[0] as ChatModelProvider)) {\n // eslint-disable-next-line no-param-reassign\n [modelProvider, model] = modelComponents;\n }\n }\n let configurableFieldsCopy = Array.isArray(configurableFields)\n ? [...configurableFields]\n : configurableFields;\n\n if (!model && configurableFieldsCopy === undefined) {\n configurableFieldsCopy = [\"model\", \"modelProvider\"];\n }\n if (configPrefix && configurableFieldsCopy === undefined) {\n console.warn(\n `{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` +\n `{ configurableFields: [...] } to specify the model params that are ` +\n `configurable.`\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const paramsCopy: Record<string, any> = { ...params };\n\n let configurableModel: ConfigurableModel<RunInput, CallOptions>;\n\n if (configurableFieldsCopy === undefined) {\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: {\n ...paramsCopy,\n model,\n modelProvider,\n },\n configPrefix,\n profile,\n });\n } else {\n if (model) {\n paramsCopy.model = model;\n }\n if (modelProvider) {\n paramsCopy.modelProvider = modelProvider;\n }\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: paramsCopy,\n configPrefix,\n configurableFields: configurableFieldsCopy,\n profile,\n });\n }\n\n // Initialize the model instance to make sure a profile is available\n await configurableModel._getModelInstance();\n return configurableModel;\n}\n"],"mappings":";;;;;;;;;;;;;;AAmDA,MAAa,wBAAwB;CACnC,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,cAAc;EACZ,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,mBAAmB;EACjB,SAAS;EACT,WAAW;CACZ;CACD,uBAAuB;EACrB,SAAS;EACT,WAAW;CACZ;CACD,gBAAgB;EACd,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,MAAM;EACJ,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,KAAK;EACH,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,YAAY;EACV,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;AACF;AAED,MAAM,sBAAsB,OAAO,KACjC,sBACD;;;;;;AAaD,eAAsB,wBAAwBA,WAAmB;CAE/D,MAAM,gBAAgB,OAAO,QAAQ,sBAAsB,CAAC,KAC1D,CAAC,GAAGC,SAAO,KAAKA,SAAO,cAAc,UACtC;AAED,KAAI,CAAC,cACH,QAAO;CAGT,MAAM,GAAG,OAAO,GAAG;AACnB,KAAI;EACF,MAAMC,WAAS,MAAM,OAAO,OAAO;AACnC,SAAOA,SAAO,OAAO;CACtB,SAAQC,GAAY;EACnB,MAAM,MAAM;AACZ,MACE,UAAU,OACV,IAAI,MAAM,UAAU,CAAC,SAAS,uBAAuB,IACrD,aAAa,OACb,OAAO,IAAI,YAAY,UACvB;GACA,MAAM,MAAM,IAAI,QAAQ,WAAW,UAAU,GACzC,IAAI,QAAQ,MAAM,EAAiB,GACnC,IAAI;GACR,MAAM,mBAAmB,IACtB,MAAM,wBAAwB,CAAC,GAC/B,MAAM,IAAI,CAAC;AACd,SAAM,IAAI,MACR,CAAC,iBAAiB,EAAE,iBAAiB,oCAAsB,EACxC,iBAAiB,qBAAqB,EAAE,iBAAiB,EAAE,CAAC;EAElF;AACD,QAAM;CACP;AACF;AAED,eAAe,qBACbC,OACAC,eAEAC,SAA8B,CAAE,GACR;CACxB,MAAM,oBAAoB,iBAAiB,oBAAoB,MAAM;AACrE,KAAI,CAAC,kBACH,OAAM,IAAI,MACR,CAAC,4CAA4C,EAAE,MAAM,0CAA0C,CAAC;CAIpG,MAAM,SAAS,sBACb;AAEF,KAAI,CAAC,QAAQ;EACX,MAAM,YAAY,oBAAoB,KAAK,KAAK;AAChD,QAAM,IAAI,MACR,CAAC,6BAA6B,EAAE,kBAAkB,sCAAsC,EAAE,WAAW;CAExG;CAED,MAAM,EAAE,eAAe,QAAS,GAAG,cAAc,GAAG;CACpD,MAAM,gBAAgB,MAAM,wBAAwB,OAAO,UAAU;AACrE,QAAO,IAAI,cAAc;EAAE;EAAO,GAAG;CAAc;AACpD;;;;;;;;;;;;AAaD,SAAgB,oBAAoBC,WAAuC;AACzE,KACE,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,CAE1B,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,qBAAqB,CACnD,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,QAAQ,IAAI,UAAU,WAAW,OAAO,CACtE,QAAO;KAEP,QAAO;AAEV;;;;;;AA+BD,IAAa,oBAAb,MAAa,0BAGHC,2DAA2C;CACnD,WAAmB;AACjB,SAAO;CACR;CAED,eAAe,CAAC,aAAa,aAAc;CAG3C,iBAAuC,CAAE;;;;CAKzC,sBAAwC;;;;CAKxC;;;;;CAOA,0BAA+C,CAAE;;CAGjD,AAAQ,sCAAsB,IAAI;;CAMlC,AAAQ;CAER,YAAYC,QAAiC;EAC3C,MAAM,OAAO;EACb,KAAK,iBAAiB,OAAO,iBAAiB,CAAE;AAEhD,MAAI,OAAO,uBAAuB,OAChC,KAAK,sBAAsB;OAE3B,KAAK,sBAAsB,OAAO,sBAAsB,CACtD,SACA,eACD;AAGH,MAAI,OAAO,cACT,KAAK,gBAAgB,OAAO,aAAa,SAAS,IAAI,GAClD,OAAO,eACP,GAAG,OAAO,aAAa,CAAC,CAAC;OAE7B,KAAK,gBAAgB;EAGvB,KAAK,0BACH,OAAO,0BAA0B,KAAK;EAExC,KAAK,WAAW,OAAO,WAAW;CACnC;CAED,MAAM,kBACJC,QAGA;EAEA,MAAM,WAAW,KAAK,UAAU,UAAU,CAAE,EAAC;EAC7C,MAAM,cAAc,KAAK,oBAAoB,IAAI,SAAS;AAC1D,MAAI,YACF,QAAO;EAIT,MAAM,SAAS;GAAE,GAAG,KAAK;GAAgB,GAAG,KAAK,aAAa,OAAO;EAAE;EACvE,IAAI,mBAAmB,MAAM,qBAC3B,OAAO,OACP,OAAO,eACP,OACD;AAGD,OAAK,MAAM,CAAC,QAAQ,KAAK,IAAI,OAAO,QAAQ,KAAK,wBAAwB,CACvE,KACE,UAAU,oBAEV,OAAQ,iBAAyB,YAAY,YAG7C,mBAAmB,MAAO,iBAAyB,QAAQ,GAAG,KAAK;EAKvE,KAAK,oBAAoB,IAAI,UAAU,iBAAiB;AACxD,SAAO;CACR;CAED,MAAM,UACJC,UACAC,SACAC,YACqB;EACrB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;AACnD,SAAO,MAAM,UAAU,UAAU,WAAW,CAAE,GAAE,WAAW;CAC5D;CAED,AAAS,UACPC,OAEAC,QAC0C;EAC1C,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,YAAY,CAAC,OAAO,MAAO;AAC/C,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,uBAA8D,CAC5D,QACA,GAAG,SACmD;EACtD,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,uBAAuB,CAAC,QAAQ,GAAG,IAAK;AAC5D,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,aAAaL,QAA8C;EACzD,MAAM,eAAe,QAAQ,gBAAgB,CAAE;EAE/C,IAAIM,cAAmC,CAAE;AAEzC,OAAK,MAAM,CAAC,KAAK,MAAM,IAAI,OAAO,QAAQ,aAAa,CACrD,KAAI,IAAI,WAAW,KAAK,cAAc,EAAE;GACtC,MAAM,cAAc,KAAK,cAAc,KAAK,KAAK,cAAc;GAC/D,YAAY,eAAe;EAC5B;AAGH,MAAI,KAAK,wBAAwB,OAC/B,cAAc,OAAO,YACnB,OAAO,QAAQ,YAAY,CAAC,OAAO,CAAC,CAAC,IAAI,KACvC,KAAK,oBAAoB,SAAS,IAAI,CACvC,CACF;AAGH,SAAO;CACR;CAED,cAAcC,KAAaC,QAAwB;AACjD,SAAO,IAAI,WAAW,OAAO,GAAG,IAAI,MAAM,OAAO,OAAO,GAAG;CAC5D;;;;;;CAOD,WACER,QACwD;EACxD,MAAMS,eAA+B,EAAE,GAAI,UAAU,CAAE,EAAG;EAC1D,MAAM,cAAc,KAAK,aAAa,aAAa;EAEnD,MAAMC,kBAAkC,OAAO,YAC7C,OAAO,QAAQ,aAAa,CAAC,OAAO,CAAC,CAAC,EAAE,KAAK,MAAM,eAAe,CACnE;EAED,gBAAgB,eAAe,OAAO,YACpC,OAAO,QAAQ,aAAa,gBAAgB,CAAE,EAAC,CAAC,OAC9C,CAAC,CAAC,EAAE,KACF,KAAK,iBACL,CAAC,OAAO,KAAK,YAAY,CAAC,SACxB,KAAK,cAAc,GAAG,KAAK,cAAc,CAC1C,CACJ,CACF;EAED,MAAM,uBAAuB,IAAI,kBAAyC;GACxE,eAAe;IAAE,GAAG,KAAK;IAAgB,GAAG;GAAa;GACzD,oBAAoB,MAAM,QAAQ,KAAK,oBAAoB,GACvD,CAAC,GAAG,KAAK,mBAAoB,IAC7B,KAAK;GACT,cAAc,KAAK;GACnB,wBAAwB,KAAK;EAC9B;AAED,SAAO,IAAIC,2CAAuD;GAChE,QAAQ;GACR,OAAO;EACR;CACF;CAED,MAAM,OACJC,OACAC,SACyB;EACzB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,sDAAsB,QAAQ;AACpC,SAAO,MAAM,OAAO,OAAO,OAAO;CACnC;CAED,MAAM,OACJD,OACAC,SACiD;EACjD,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,mBAAmB,IAAIC,sDAAwB;GACnD,WAAW,MAAM,MAAM,OAAO,OAAO,QAAQ;GAC7C,QAAQ;EACT;EACD,MAAM,iBAAiB;AACvB,SAAOC,qDAAuB,mBAAmB,iBAAiB;CACnE;CAoBD,MAAM,MACJC,QACAC,SACAC,cACqC;AAGrC,SAAO,MAAM,MAAM,QAAQ,SAAS,aAAa;CAClD;CAED,OAAO,UACLC,WACAC,SACgC;EAChC,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,sDAAsB,QAAQ;EAEpC,OAAO,MAAM,UAAU,WAAW,OAAO;CAC1C;CAED,OAAO,UACLR,OACAS,SACAC,eAC6B;EAC7B,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,sDAAsB,QAAQ;EAEpC,OAAO,MAAM,UAAU,OAAO,QAAQ;GACpC,GAAG;GACH,eAAe;GACf,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC5B,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;EAC7B,EAAC;CACH;CAiBD,aACEV,OACAW,SAIAC,eACkD;EAClD,MAAM,YAAY;EAClB,gBAAgB,mBAAmB;GACjC,MAAM,QAAQ,MAAM,UAAU,kBAAkB,QAAQ;GACxD,MAAM,sDAAsB,QAAQ;GACpC,MAAM,cAAc,MAAM,aAAa,OAAO,QAAQ,cAAc;AAEpE,cAAW,MAAM,SAAS,aACxB,MAAM;EAET;AACD,SAAOT,qDAAuB,mBAAmB,kBAAkB,CAAC;CACrE;;;;;;CAOD,IAAI,UAAwB;AAC1B,MAAI,KAAK,SACP,QAAO,KAAK;EAEd,MAAM,WAAW,KAAK,UAAU,CAAE,EAAC;EACnC,MAAM,WAAW,KAAK,oBAAoB,IAAI,SAAS;AACvD,SAAO,UAAU,WAAW,CAAE;CAC/B;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsRD,eAAsB,cAIpBU,OAEAC,QAMmD;CAEnD,IAAI,EAAE,oBAAoB,cAAc,eAAe,QAAS,GAAG,QAAQ,GACzE;EACE,cAAc;EACd,GAAI,UAAU,CAAE;CACjB;AACH,KAAI,kBAAkB,UAAa,OAAO,SAAS,IAAI,EAAE;EACvD,MAAM,CAAC,UAAU,GAAG,eAAe,GAAG,MAAM,MAAM,IAAI;EACtD,MAAM,kBACJ,eAAe,WAAW,IACtB,CAAC,QAAS,IACV,CAAC,UAAU,eAAe,KAAK,IAAI,AAAC;AAC1C,MAAI,oBAAoB,SAAS,gBAAgB,GAAwB,EAEvE,CAAC,eAAe,MAAM,GAAG;CAE5B;CACD,IAAI,yBAAyB,MAAM,QAAQ,mBAAmB,GAC1D,CAAC,GAAG,kBAAmB,IACvB;AAEJ,KAAI,CAAC,SAAS,2BAA2B,QACvC,yBAAyB,CAAC,SAAS,eAAgB;AAErD,KAAI,gBAAgB,2BAA2B,QAC7C,QAAQ,KACN,CAAC,gBAAgB,EAAE,aAAa,oIAAoD,CAEnE,CAClB;CAIH,MAAMC,aAAkC,EAAE,GAAG,OAAQ;CAErD,IAAIC;AAEJ,KAAI,2BAA2B,QAC7B,oBAAoB,IAAI,kBAAyC;EAC/D,eAAe;GACb,GAAG;GACH;GACA;EACD;EACD;EACA;CACD;MACI;AACL,MAAI,OACF,WAAW,QAAQ;AAErB,MAAI,eACF,WAAW,gBAAgB;EAE7B,oBAAoB,IAAI,kBAAyC;GAC/D,eAAe;GACf;GACA,oBAAoB;GACpB;EACD;CACF;CAGD,MAAM,kBAAkB,mBAAmB;AAC3C,QAAO;AACR"}
|
|
@@ -104,8 +104,9 @@ async function getChatModelByClassName(className) {
|
|
|
104
104
|
return module[config.className];
|
|
105
105
|
} catch (e) {
|
|
106
106
|
const err = e;
|
|
107
|
-
if ("code" in err && err.code?.toString().includes("ERR_MODULE_NOT_FOUND") && "message" in err) {
|
|
108
|
-
const
|
|
107
|
+
if ("code" in err && err.code?.toString().includes("ERR_MODULE_NOT_FOUND") && "message" in err && typeof err.message === "string") {
|
|
108
|
+
const msg = err.message.startsWith("Error: ") ? err.message.slice(7) : err.message;
|
|
109
|
+
const attemptedPackage = msg.split("Cannot find package '")[1].split("'")[0];
|
|
109
110
|
throw new Error(`Unable to import ${attemptedPackage}. Please install with \`npm install ${attemptedPackage}\` or \`pnpm install ${attemptedPackage}\``);
|
|
110
111
|
}
|
|
111
112
|
throw e;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"universal.js","names":["className: string","config","e: unknown","model: string","modelProvider?: string","params: Record<string, any>","modelName: string","fields: ConfigurableModelFields","config?: RunnableConfig","messages: BaseMessage[]","options?: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","tools: BindToolsInput[]","params?: Record<string, any>","modelParams: Record<string, any>","str: string","prefix: string","mergedConfig: RunnableConfig","remainingConfig: RunnableConfig","input: RunInput","options?: CallOptions","inputs: RunInput[]","options?: Partial<CallOptions> | Partial<CallOptions>[]","batchOptions?: RunnableBatchOptions","generator: AsyncGenerator<RunInput>","options: CallOptions","options?: Partial<CallOptions>","streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">","options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n }","streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">","model?: string","fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }","paramsCopy: Record<string, any>","configurableModel: ConfigurableModel<RunInput, CallOptions>"],"sources":["../../src/chat_models/universal.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n ToolDefinition,\n} from \"@langchain/core/language_models/base\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n BaseMessage,\n type AIMessageChunk,\n MessageStructure,\n} from \"@langchain/core/messages\";\nimport {\n type RunnableBatchOptions,\n RunnableBinding,\n type RunnableConfig,\n type RunnableToolLike,\n ensureConfig,\n} from \"@langchain/core/runnables\";\nimport {\n AsyncGeneratorWithSetup,\n IterableReadableStream,\n} from \"@langchain/core/utils/stream\";\nimport {\n type LogStreamCallbackHandlerInput,\n type RunLogPatch,\n type StreamEvent,\n} from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\n\n// TODO: remove once `EventStreamCallbackHandlerInput` is exposed in core\ninterface EventStreamCallbackHandlerInput\n extends Omit<LogStreamCallbackHandlerInput, \"_schemaFormat\"> {}\n\nexport interface ConfigurableChatModelCallOptions\n extends BaseChatModelCallOptions {\n tools?: (\n | StructuredToolInterface\n | Record<string, unknown>\n | ToolDefinition\n | RunnableToolLike\n )[];\n}\n\n// Configuration map for model providers\nexport const MODEL_PROVIDER_CONFIG = {\n openai: {\n package: \"@langchain/openai\",\n className: \"ChatOpenAI\",\n },\n anthropic: {\n package: \"@langchain/anthropic\",\n className: \"ChatAnthropic\",\n },\n azure_openai: {\n package: \"@langchain/openai\",\n className: \"AzureChatOpenAI\",\n },\n cohere: {\n package: \"@langchain/cohere\",\n className: \"ChatCohere\",\n },\n \"google-vertexai\": {\n package: \"@langchain/google-vertexai\",\n className: \"ChatVertexAI\",\n },\n \"google-vertexai-web\": {\n package: \"@langchain/google-vertexai-web\",\n className: \"ChatVertexAI\",\n },\n \"google-genai\": {\n package: \"@langchain/google-genai\",\n className: \"ChatGoogleGenerativeAI\",\n },\n ollama: {\n package: \"@langchain/ollama\",\n className: \"ChatOllama\",\n },\n mistralai: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n mistral: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n groq: {\n package: \"@langchain/groq\",\n className: \"ChatGroq\",\n },\n cerebras: {\n package: \"@langchain/cerebras\",\n className: \"ChatCerebras\",\n },\n bedrock: {\n package: \"@langchain/aws\",\n className: \"ChatBedrockConverse\",\n },\n deepseek: {\n package: \"@langchain/deepseek\",\n className: \"ChatDeepSeek\",\n },\n xai: {\n package: \"@langchain/xai\",\n className: \"ChatXAI\",\n },\n fireworks: {\n package: \"@langchain/community/chat_models/fireworks\",\n className: \"ChatFireworks\",\n hasCircularDependency: true,\n },\n together: {\n package: \"@langchain/community/chat_models/togetherai\",\n className: \"ChatTogetherAI\",\n hasCircularDependency: true,\n },\n perplexity: {\n package: \"@langchain/community/chat_models/perplexity\",\n className: \"ChatPerplexity\",\n hasCircularDependency: true,\n },\n} as const;\n\nconst SUPPORTED_PROVIDERS = Object.keys(\n MODEL_PROVIDER_CONFIG\n) as (keyof typeof MODEL_PROVIDER_CONFIG)[];\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\ntype ModelProviderConfig = {\n package: string;\n className: string;\n hasCircularDependency?: boolean;\n};\n\n/**\n * Helper function to get a chat model class by its class name\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @returns The imported model class or undefined if not found\n */\nexport async function getChatModelByClassName(className: string) {\n // Find the provider config that matches the class name\n const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(\n ([, config]) => config.className === className\n );\n\n if (!providerEntry) {\n return undefined;\n }\n\n const [, config] = providerEntry;\n try {\n const module = await import(config.package);\n return module[config.className];\n } catch (e: unknown) {\n const err = e as Error;\n if (\n \"code\" in err &&\n err.code?.toString().includes(\"ERR_MODULE_NOT_FOUND\") &&\n \"message\" in err\n ) {\n const attemptedPackage = err.message\n .split(\"Error: Cannot find package '\")[1]\n .split(\"'\")[0];\n throw new Error(\n `Unable to import ${attemptedPackage}. Please install with ` +\n `\\`npm install ${attemptedPackage}\\` or \\`pnpm install ${attemptedPackage}\\``\n );\n }\n throw e;\n }\n}\n\nasync function _initChatModelHelper(\n model: string,\n modelProvider?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params: Record<string, any> = {}\n): Promise<BaseChatModel> {\n const modelProviderCopy = modelProvider || _inferModelProvider(model);\n if (!modelProviderCopy) {\n throw new Error(\n `Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`\n );\n }\n\n const config = MODEL_PROVIDER_CONFIG[\n modelProviderCopy as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig;\n if (!config) {\n const supported = SUPPORTED_PROVIDERS.join(\", \");\n throw new Error(\n `Unsupported { modelProvider: ${modelProviderCopy} }.\\n\\nSupported model providers are: ${supported}`\n );\n }\n\n const { modelProvider: _unused, ...passedParams } = params;\n const ProviderClass = await getChatModelByClassName(config.className);\n return new ProviderClass({ model, ...passedParams });\n}\n\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport function _inferModelProvider(modelName: string): string | undefined {\n if (\n modelName.startsWith(\"gpt-3\") ||\n modelName.startsWith(\"gpt-4\") ||\n modelName.startsWith(\"gpt-5\") ||\n modelName.startsWith(\"o1\") ||\n modelName.startsWith(\"o3\") ||\n modelName.startsWith(\"o4\")\n ) {\n return \"openai\";\n } else if (modelName.startsWith(\"claude\")) {\n return \"anthropic\";\n } else if (modelName.startsWith(\"command\")) {\n return \"cohere\";\n } else if (modelName.startsWith(\"accounts/fireworks\")) {\n return \"fireworks\";\n } else if (modelName.startsWith(\"gemini\")) {\n return \"google-vertexai\";\n } else if (modelName.startsWith(\"amazon.\")) {\n return \"bedrock\";\n } else if (modelName.startsWith(\"mistral\")) {\n return \"mistralai\";\n } else if (modelName.startsWith(\"sonar\") || modelName.startsWith(\"pplx\")) {\n return \"perplexity\";\n } else {\n return undefined;\n }\n}\n\ninterface ConfigurableModelFields extends BaseChatModelParams {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n queuedMethodOperations?: Record<string, any>;\n /**\n * Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n */\n profile?: ModelProfile;\n}\n\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport class ConfigurableModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string {\n return \"chat_model\";\n }\n\n lc_namespace = [\"langchain\", \"chat_models\"];\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _defaultConfig?: Record<string, any> = {};\n\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\" = \"any\";\n\n /**\n * @default \"\"\n */\n _configPrefix: string;\n\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any> = {};\n\n /** @internal */\n private _modelInstanceCache = new Map<\n string,\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n >();\n\n /** @internal */\n private _profile?: ModelProfile;\n\n constructor(fields: ConfigurableModelFields) {\n super(fields);\n this._defaultConfig = fields.defaultConfig ?? {};\n\n if (fields.configurableFields === \"any\") {\n this._configurableFields = \"any\";\n } else {\n this._configurableFields = fields.configurableFields ?? [\n \"model\",\n \"modelProvider\",\n ];\n }\n\n if (fields.configPrefix) {\n this._configPrefix = fields.configPrefix.endsWith(\"_\")\n ? fields.configPrefix\n : `${fields.configPrefix}_`;\n } else {\n this._configPrefix = \"\";\n }\n\n this._queuedMethodOperations =\n fields.queuedMethodOperations ?? this._queuedMethodOperations;\n\n this._profile = fields.profile ?? undefined;\n }\n\n async _getModelInstance(\n config?: RunnableConfig\n ): Promise<\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n > {\n // Check cache first\n const cacheKey = JSON.stringify(config ?? {});\n const cachedModel = this._modelInstanceCache.get(cacheKey);\n if (cachedModel) {\n return cachedModel;\n }\n\n // Initialize model with merged params\n const params = { ...this._defaultConfig, ...this._modelParams(config) };\n let initializedModel = await _initChatModelHelper(\n params.model,\n params.modelProvider,\n params\n );\n\n // Apply queued method operations in sequence\n for (const [method, args] of Object.entries(this._queuedMethodOperations)) {\n if (\n method in initializedModel &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n typeof (initializedModel as any)[method] === \"function\"\n ) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n initializedModel = await (initializedModel as any)[method](...args);\n }\n }\n\n // Cache and return the initialized model\n this._modelInstanceCache.set(cacheKey, initializedModel);\n return initializedModel;\n }\n\n async _generate(\n messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const model = await this._getModelInstance(options);\n return model._generate(messages, options ?? {}, runManager);\n }\n\n override bindTools(\n tools: BindToolsInput[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params?: Record<string, any>\n ): ConfigurableModel<RunInput, CallOptions> {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.bindTools = [tools, params];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n });\n }\n\n // Extract the input types from the `BaseModel` class.\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"] = (\n schema,\n ...args\n ): ReturnType<BaseChatModel[\"withStructuredOutput\"]> => {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.withStructuredOutput = [schema, ...args];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n }) as unknown as ReturnType<BaseChatModel[\"withStructuredOutput\"]>;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _modelParams(config?: RunnableConfig): Record<string, any> {\n const configurable = config?.configurable ?? {};\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let modelParams: Record<string, any> = {};\n\n for (const [key, value] of Object.entries(configurable)) {\n if (key.startsWith(this._configPrefix)) {\n const strippedKey = this._removePrefix(key, this._configPrefix);\n modelParams[strippedKey] = value;\n }\n }\n\n if (this._configurableFields !== \"any\") {\n modelParams = Object.fromEntries(\n Object.entries(modelParams).filter(([key]) =>\n this._configurableFields.includes(key)\n )\n );\n }\n\n return modelParams;\n }\n\n _removePrefix(str: string, prefix: string): string {\n return str.startsWith(prefix) ? str.slice(prefix.length) : str;\n }\n\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(\n config?: RunnableConfig\n ): RunnableBinding<RunInput, AIMessageChunk, CallOptions> {\n const mergedConfig: RunnableConfig = { ...(config || {}) };\n const modelParams = this._modelParams(mergedConfig);\n\n const remainingConfig: RunnableConfig = Object.fromEntries(\n Object.entries(mergedConfig).filter(([k]) => k !== \"configurable\")\n );\n\n remainingConfig.configurable = Object.fromEntries(\n Object.entries(mergedConfig.configurable || {}).filter(\n ([k]) =>\n this._configPrefix &&\n !Object.keys(modelParams).includes(\n this._removePrefix(k, this._configPrefix)\n )\n )\n );\n\n const newConfigurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: { ...this._defaultConfig, ...modelParams },\n configurableFields: Array.isArray(this._configurableFields)\n ? [...this._configurableFields]\n : this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: this._queuedMethodOperations,\n });\n\n return new RunnableBinding<RunInput, AIMessageChunk, CallOptions>({\n config: mergedConfig,\n bound: newConfigurableModel,\n });\n }\n\n async invoke(\n input: RunInput,\n options?: CallOptions\n ): Promise<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n return model.invoke(input, config);\n }\n\n async stream(\n input: RunInput,\n options?: CallOptions\n ): Promise<IterableReadableStream<AIMessageChunk>> {\n const model = await this._getModelInstance(options);\n const wrappedGenerator = new AsyncGeneratorWithSetup({\n generator: await model.stream(input, options),\n config: options,\n });\n await wrappedGenerator.setup;\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator);\n }\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions?: false }\n ): Promise<AIMessageChunk[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions: true }\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]> {\n // We can super this since the base runnable implementation of\n // `.batch` will call `.invoke` on each input.\n return super.batch(inputs, options, batchOptions);\n }\n\n async *transform(\n generator: AsyncGenerator<RunInput>,\n options: CallOptions\n ): AsyncGenerator<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.transform(generator, config);\n }\n\n async *streamLog(\n input: RunInput,\n options?: Partial<CallOptions>,\n streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">\n ): AsyncGenerator<RunLogPatch> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.streamLog(input, config, {\n ...streamOptions,\n _schemaFormat: \"original\",\n includeNames: streamOptions?.includeNames,\n includeTypes: streamOptions?.includeTypes,\n includeTags: streamOptions?.includeTags,\n excludeNames: streamOptions?.excludeNames,\n excludeTypes: streamOptions?.excludeTypes,\n excludeTags: streamOptions?.excludeTags,\n });\n }\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & { version: \"v1\" | \"v2\" },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<Uint8Array>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent | Uint8Array> {\n const outerThis = this;\n async function* wrappedGenerator() {\n const model = await outerThis._getModelInstance(options);\n const config = ensureConfig(options);\n const eventStream = model.streamEvents(input, config, streamOptions);\n\n for await (const chunk of eventStream) {\n yield chunk;\n }\n }\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator());\n }\n\n /**\n * Return profiling information for the model.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n */\n get profile(): ModelProfile {\n if (this._profile) {\n return this._profile;\n }\n const cacheKey = JSON.stringify({});\n const instance = this._modelInstanceCache.get(cacheKey);\n return instance?.profile ?? {};\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\n\nexport type ConfigurableFields = \"any\" | string[];\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model: never,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\n// ################################# FOR CONTRIBUTORS #################################\n//\n// If adding support for a new provider, please append the provider\n// name to the supported list in the docstring below.\n//\n// ####################################################################################\n\n/**\n * Initialize a ChatModel from the model name and provider.\n * Must have the integration package corresponding to the model provider installed.\n *\n * @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model.\n * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.\n *\n * @param {string | ChatModelProvider} [model] - The name of the model, e.g. \"gpt-4\", \"claude-3-opus-20240229\".\n * Can be prefixed with the model provider, e.g. \"openai:gpt-4\", \"anthropic:claude-3-opus-20240229\".\n * @param {Object} [fields] - Additional configuration options.\n * @param {string} [fields.modelProvider] - The model provider. Supported values include:\n * - openai (@langchain/openai)\n * - anthropic (@langchain/anthropic)\n * - azure_openai (@langchain/openai)\n * - google-vertexai (@langchain/google-vertexai)\n * - google-vertexai-web (@langchain/google-vertexai-web)\n * - google-genai (@langchain/google-genai)\n * - bedrock (@langchain/aws)\n * - cohere (@langchain/cohere)\n * - fireworks (@langchain/community/chat_models/fireworks)\n * - together (@langchain/community/chat_models/togetherai)\n * - mistralai (@langchain/mistralai)\n * - groq (@langchain/groq)\n * - ollama (@langchain/ollama)\n * - perplexity (@langchain/community/chat_models/perplexity)\n * - cerebras (@langchain/cerebras)\n * - deepseek (@langchain/deepseek)\n * - xai (@langchain/xai)\n * @param {string[] | \"any\"} [fields.configurableFields] - Which model parameters are configurable:\n * - undefined: No configurable fields.\n * - \"any\": All fields are configurable. (See Security Note in description)\n * - string[]: Specified fields are configurable.\n * @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime.\n * @param {ModelProfile} [fields.profile] - Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n * @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor.\n * @returns {Promise<ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel.\n * @throws {Error} If modelProvider cannot be inferred or isn't supported.\n * @throws {Error} If the model provider integration package is not installed.\n *\n * @example Initialize non-configurable models\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const gpt4 = await initChatModel(\"openai:gpt-4\", {\n * temperature: 0.25,\n * });\n * const gpt4Result = await gpt4.invoke(\"what's your name\");\n *\n * const claude = await initChatModel(\"anthropic:claude-3-opus-20240229\", {\n * temperature: 0.25,\n * });\n * const claudeResult = await claude.invoke(\"what's your name\");\n *\n * const gemini = await initChatModel(\"gemini-1.5-pro\", {\n * modelProvider: \"google-vertexai\",\n * temperature: 0.25,\n * });\n * const geminiResult = await gemini.invoke(\"what's your name\");\n * ```\n *\n * @example Create a partially configurable model with no default model\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModel = await initChatModel(undefined, {\n * temperature: 0,\n * configurableFields: [\"model\", \"apiKey\"],\n * });\n *\n * const gpt4Result = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"gpt-4\",\n * },\n * });\n *\n * const claudeResult = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * },\n * });\n * ```\n *\n * @example Create a fully configurable model with a default model and a config prefix\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModelWithDefault = await initChatModel(\"gpt-4\", {\n * modelProvider: \"openai\",\n * configurableFields: \"any\",\n * configPrefix: \"foo\",\n * temperature: 0,\n * });\n *\n * const openaiResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const claudeResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_model: \"claude-3-5-sonnet-20240620\",\n * foo_modelProvider: \"anthropic\",\n * foo_temperature: 0.6,\n * foo_apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Bind tools to a configurable model:\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n * import { z } from \"zod/v3\";\n * import { tool } from \"@langchain/core/tools\";\n *\n * const getWeatherTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current weather in a given location\"),\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * }\n * );\n *\n * const getPopulationTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current population in a given location\"),\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * }\n * );\n *\n * const configurableModel = await initChatModel(\"gpt-4\", {\n * configurableFields: [\"model\", \"modelProvider\", \"apiKey\"],\n * temperature: 0,\n * });\n *\n * const configurableModelWithTools = configurableModel.bindTools([\n * getWeatherTool,\n * getPopulationTool,\n * ]);\n *\n * const configurableToolResult = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const configurableToolResult2 = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Initialize a model with a custom profile\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const model = await initChatModel(\"gpt-4o-mini\", {\n * profile: {\n * maxInputTokens: 100000,\n * },\n * });\n *\n * @description\n * This function initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * Security Note: Setting `configurableFields` to \"any\" means fields like apiKey, baseUrl, etc.\n * can be altered at runtime, potentially redirecting model requests to a different service/user.\n * Make sure that if you're accepting untrusted configurations, you enumerate the\n * `configurableFields` explicitly.\n *\n * The function will attempt to infer the model provider from the model name if not specified.\n * Certain model name prefixes are associated with specific providers:\n * - gpt-3... or gpt-4... -> openai\n * - claude... -> anthropic\n * - amazon.... -> bedrock\n * - gemini... -> google-vertexai\n * - command... -> cohere\n * - accounts/fireworks... -> fireworks\n *\n * @since 0.2.11\n * @version 0.2.11\n */\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>> {\n // eslint-disable-next-line prefer-const\n let { configurableFields, configPrefix, modelProvider, profile, ...params } =\n {\n configPrefix: \"\",\n ...(fields ?? {}),\n };\n if (modelProvider === undefined && model?.includes(\":\")) {\n const [provider, ...remainingParts] = model.split(\":\");\n const modelComponents =\n remainingParts.length === 0\n ? [provider]\n : [provider, remainingParts.join(\":\")];\n if (SUPPORTED_PROVIDERS.includes(modelComponents[0] as ChatModelProvider)) {\n // eslint-disable-next-line no-param-reassign\n [modelProvider, model] = modelComponents;\n }\n }\n let configurableFieldsCopy = Array.isArray(configurableFields)\n ? [...configurableFields]\n : configurableFields;\n\n if (!model && configurableFieldsCopy === undefined) {\n configurableFieldsCopy = [\"model\", \"modelProvider\"];\n }\n if (configPrefix && configurableFieldsCopy === undefined) {\n console.warn(\n `{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` +\n `{ configurableFields: [...] } to specify the model params that are ` +\n `configurable.`\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const paramsCopy: Record<string, any> = { ...params };\n\n let configurableModel: ConfigurableModel<RunInput, CallOptions>;\n\n if (configurableFieldsCopy === undefined) {\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: {\n ...paramsCopy,\n model,\n modelProvider,\n },\n configPrefix,\n profile,\n });\n } else {\n if (model) {\n paramsCopy.model = model;\n }\n if (modelProvider) {\n paramsCopy.modelProvider = modelProvider;\n }\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: paramsCopy,\n configPrefix,\n configurableFields: configurableFieldsCopy,\n profile,\n });\n }\n\n // Initialize the model instance to make sure a profile is available\n await configurableModel._getModelInstance();\n return configurableModel;\n}\n"],"mappings":";;;;;;;;;;;;;;AAmDA,MAAa,wBAAwB;CACnC,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,cAAc;EACZ,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,mBAAmB;EACjB,SAAS;EACT,WAAW;CACZ;CACD,uBAAuB;EACrB,SAAS;EACT,WAAW;CACZ;CACD,gBAAgB;EACd,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,MAAM;EACJ,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,KAAK;EACH,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,YAAY;EACV,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;AACF;AAED,MAAM,sBAAsB,OAAO,KACjC,sBACD;;;;;;AAaD,eAAsB,wBAAwBA,WAAmB;CAE/D,MAAM,gBAAgB,OAAO,QAAQ,sBAAsB,CAAC,KAC1D,CAAC,GAAGC,SAAO,KAAKA,SAAO,cAAc,UACtC;AAED,KAAI,CAAC,cACH,QAAO;CAGT,MAAM,GAAG,OAAO,GAAG;AACnB,KAAI;EACF,MAAM,SAAS,MAAM,OAAO,OAAO;AACnC,SAAO,OAAO,OAAO;CACtB,SAAQC,GAAY;EACnB,MAAM,MAAM;AACZ,MACE,UAAU,OACV,IAAI,MAAM,UAAU,CAAC,SAAS,uBAAuB,IACrD,aAAa,KACb;GACA,MAAM,mBAAmB,IAAI,QAC1B,MAAM,+BAA+B,CAAC,GACtC,MAAM,IAAI,CAAC;AACd,SAAM,IAAI,MACR,CAAC,iBAAiB,EAAE,iBAAiB,oCAAsB,EACxC,iBAAiB,qBAAqB,EAAE,iBAAiB,EAAE,CAAC;EAElF;AACD,QAAM;CACP;AACF;AAED,eAAe,qBACbC,OACAC,eAEAC,SAA8B,CAAE,GACR;CACxB,MAAM,oBAAoB,iBAAiB,oBAAoB,MAAM;AACrE,KAAI,CAAC,kBACH,OAAM,IAAI,MACR,CAAC,4CAA4C,EAAE,MAAM,0CAA0C,CAAC;CAIpG,MAAM,SAAS,sBACb;AAEF,KAAI,CAAC,QAAQ;EACX,MAAM,YAAY,oBAAoB,KAAK,KAAK;AAChD,QAAM,IAAI,MACR,CAAC,6BAA6B,EAAE,kBAAkB,sCAAsC,EAAE,WAAW;CAExG;CAED,MAAM,EAAE,eAAe,QAAS,GAAG,cAAc,GAAG;CACpD,MAAM,gBAAgB,MAAM,wBAAwB,OAAO,UAAU;AACrE,QAAO,IAAI,cAAc;EAAE;EAAO,GAAG;CAAc;AACpD;;;;;;;;;;;;AAaD,SAAgB,oBAAoBC,WAAuC;AACzE,KACE,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,CAE1B,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,qBAAqB,CACnD,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,QAAQ,IAAI,UAAU,WAAW,OAAO,CACtE,QAAO;KAEP,QAAO;AAEV;;;;;;AA+BD,IAAa,oBAAb,MAAa,0BAGH,cAA2C;CACnD,WAAmB;AACjB,SAAO;CACR;CAED,eAAe,CAAC,aAAa,aAAc;CAG3C,iBAAuC,CAAE;;;;CAKzC,sBAAwC;;;;CAKxC;;;;;CAOA,0BAA+C,CAAE;;CAGjD,AAAQ,sCAAsB,IAAI;;CAMlC,AAAQ;CAER,YAAYC,QAAiC;EAC3C,MAAM,OAAO;EACb,KAAK,iBAAiB,OAAO,iBAAiB,CAAE;AAEhD,MAAI,OAAO,uBAAuB,OAChC,KAAK,sBAAsB;OAE3B,KAAK,sBAAsB,OAAO,sBAAsB,CACtD,SACA,eACD;AAGH,MAAI,OAAO,cACT,KAAK,gBAAgB,OAAO,aAAa,SAAS,IAAI,GAClD,OAAO,eACP,GAAG,OAAO,aAAa,CAAC,CAAC;OAE7B,KAAK,gBAAgB;EAGvB,KAAK,0BACH,OAAO,0BAA0B,KAAK;EAExC,KAAK,WAAW,OAAO,WAAW;CACnC;CAED,MAAM,kBACJC,QAGA;EAEA,MAAM,WAAW,KAAK,UAAU,UAAU,CAAE,EAAC;EAC7C,MAAM,cAAc,KAAK,oBAAoB,IAAI,SAAS;AAC1D,MAAI,YACF,QAAO;EAIT,MAAM,SAAS;GAAE,GAAG,KAAK;GAAgB,GAAG,KAAK,aAAa,OAAO;EAAE;EACvE,IAAI,mBAAmB,MAAM,qBAC3B,OAAO,OACP,OAAO,eACP,OACD;AAGD,OAAK,MAAM,CAAC,QAAQ,KAAK,IAAI,OAAO,QAAQ,KAAK,wBAAwB,CACvE,KACE,UAAU,oBAEV,OAAQ,iBAAyB,YAAY,YAG7C,mBAAmB,MAAO,iBAAyB,QAAQ,GAAG,KAAK;EAKvE,KAAK,oBAAoB,IAAI,UAAU,iBAAiB;AACxD,SAAO;CACR;CAED,MAAM,UACJC,UACAC,SACAC,YACqB;EACrB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;AACnD,SAAO,MAAM,UAAU,UAAU,WAAW,CAAE,GAAE,WAAW;CAC5D;CAED,AAAS,UACPC,OAEAC,QAC0C;EAC1C,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,YAAY,CAAC,OAAO,MAAO;AAC/C,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,uBAA8D,CAC5D,QACA,GAAG,SACmD;EACtD,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,uBAAuB,CAAC,QAAQ,GAAG,IAAK;AAC5D,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,aAAaL,QAA8C;EACzD,MAAM,eAAe,QAAQ,gBAAgB,CAAE;EAE/C,IAAIM,cAAmC,CAAE;AAEzC,OAAK,MAAM,CAAC,KAAK,MAAM,IAAI,OAAO,QAAQ,aAAa,CACrD,KAAI,IAAI,WAAW,KAAK,cAAc,EAAE;GACtC,MAAM,cAAc,KAAK,cAAc,KAAK,KAAK,cAAc;GAC/D,YAAY,eAAe;EAC5B;AAGH,MAAI,KAAK,wBAAwB,OAC/B,cAAc,OAAO,YACnB,OAAO,QAAQ,YAAY,CAAC,OAAO,CAAC,CAAC,IAAI,KACvC,KAAK,oBAAoB,SAAS,IAAI,CACvC,CACF;AAGH,SAAO;CACR;CAED,cAAcC,KAAaC,QAAwB;AACjD,SAAO,IAAI,WAAW,OAAO,GAAG,IAAI,MAAM,OAAO,OAAO,GAAG;CAC5D;;;;;;CAOD,WACER,QACwD;EACxD,MAAMS,eAA+B,EAAE,GAAI,UAAU,CAAE,EAAG;EAC1D,MAAM,cAAc,KAAK,aAAa,aAAa;EAEnD,MAAMC,kBAAkC,OAAO,YAC7C,OAAO,QAAQ,aAAa,CAAC,OAAO,CAAC,CAAC,EAAE,KAAK,MAAM,eAAe,CACnE;EAED,gBAAgB,eAAe,OAAO,YACpC,OAAO,QAAQ,aAAa,gBAAgB,CAAE,EAAC,CAAC,OAC9C,CAAC,CAAC,EAAE,KACF,KAAK,iBACL,CAAC,OAAO,KAAK,YAAY,CAAC,SACxB,KAAK,cAAc,GAAG,KAAK,cAAc,CAC1C,CACJ,CACF;EAED,MAAM,uBAAuB,IAAI,kBAAyC;GACxE,eAAe;IAAE,GAAG,KAAK;IAAgB,GAAG;GAAa;GACzD,oBAAoB,MAAM,QAAQ,KAAK,oBAAoB,GACvD,CAAC,GAAG,KAAK,mBAAoB,IAC7B,KAAK;GACT,cAAc,KAAK;GACnB,wBAAwB,KAAK;EAC9B;AAED,SAAO,IAAI,gBAAuD;GAChE,QAAQ;GACR,OAAO;EACR;CACF;CAED,MAAM,OACJC,OACAC,SACyB;EACzB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,SAAS,aAAa,QAAQ;AACpC,SAAO,MAAM,OAAO,OAAO,OAAO;CACnC;CAED,MAAM,OACJD,OACAC,SACiD;EACjD,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,mBAAmB,IAAI,wBAAwB;GACnD,WAAW,MAAM,MAAM,OAAO,OAAO,QAAQ;GAC7C,QAAQ;EACT;EACD,MAAM,iBAAiB;AACvB,SAAO,uBAAuB,mBAAmB,iBAAiB;CACnE;CAoBD,MAAM,MACJC,QACAC,SACAC,cACqC;AAGrC,SAAO,MAAM,MAAM,QAAQ,SAAS,aAAa;CAClD;CAED,OAAO,UACLC,WACAC,SACgC;EAChC,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,SAAS,aAAa,QAAQ;EAEpC,OAAO,MAAM,UAAU,WAAW,OAAO;CAC1C;CAED,OAAO,UACLN,OACAO,SACAC,eAC6B;EAC7B,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,SAAS,aAAa,QAAQ;EAEpC,OAAO,MAAM,UAAU,OAAO,QAAQ;GACpC,GAAG;GACH,eAAe;GACf,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC5B,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;EAC7B,EAAC;CACH;CAiBD,aACER,OACAS,SAIAC,eACkD;EAClD,MAAM,YAAY;EAClB,gBAAgB,mBAAmB;GACjC,MAAM,QAAQ,MAAM,UAAU,kBAAkB,QAAQ;GACxD,MAAM,SAAS,aAAa,QAAQ;GACpC,MAAM,cAAc,MAAM,aAAa,OAAO,QAAQ,cAAc;AAEpE,cAAW,MAAM,SAAS,aACxB,MAAM;EAET;AACD,SAAO,uBAAuB,mBAAmB,kBAAkB,CAAC;CACrE;;;;;;CAOD,IAAI,UAAwB;AAC1B,MAAI,KAAK,SACP,QAAO,KAAK;EAEd,MAAM,WAAW,KAAK,UAAU,CAAE,EAAC;EACnC,MAAM,WAAW,KAAK,oBAAoB,IAAI,SAAS;AACvD,SAAO,UAAU,WAAW,CAAE;CAC/B;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsRD,eAAsB,cAIpBC,OAEAC,QAMmD;CAEnD,IAAI,EAAE,oBAAoB,cAAc,eAAe,QAAS,GAAG,QAAQ,GACzE;EACE,cAAc;EACd,GAAI,UAAU,CAAE;CACjB;AACH,KAAI,kBAAkB,UAAa,OAAO,SAAS,IAAI,EAAE;EACvD,MAAM,CAAC,UAAU,GAAG,eAAe,GAAG,MAAM,MAAM,IAAI;EACtD,MAAM,kBACJ,eAAe,WAAW,IACtB,CAAC,QAAS,IACV,CAAC,UAAU,eAAe,KAAK,IAAI,AAAC;AAC1C,MAAI,oBAAoB,SAAS,gBAAgB,GAAwB,EAEvE,CAAC,eAAe,MAAM,GAAG;CAE5B;CACD,IAAI,yBAAyB,MAAM,QAAQ,mBAAmB,GAC1D,CAAC,GAAG,kBAAmB,IACvB;AAEJ,KAAI,CAAC,SAAS,2BAA2B,QACvC,yBAAyB,CAAC,SAAS,eAAgB;AAErD,KAAI,gBAAgB,2BAA2B,QAC7C,QAAQ,KACN,CAAC,gBAAgB,EAAE,aAAa,oIAAoD,CAEnE,CAClB;CAIH,MAAMC,aAAkC,EAAE,GAAG,OAAQ;CAErD,IAAIC;AAEJ,KAAI,2BAA2B,QAC7B,oBAAoB,IAAI,kBAAyC;EAC/D,eAAe;GACb,GAAG;GACH;GACA;EACD;EACD;EACA;CACD;MACI;AACL,MAAI,OACF,WAAW,QAAQ;AAErB,MAAI,eACF,WAAW,gBAAgB;EAE7B,oBAAoB,IAAI,kBAAyC;GAC/D,eAAe;GACf;GACA,oBAAoB;GACpB;EACD;CACF;CAGD,MAAM,kBAAkB,mBAAmB;AAC3C,QAAO;AACR"}
|
|
1
|
+
{"version":3,"file":"universal.js","names":["className: string","config","e: unknown","model: string","modelProvider?: string","params: Record<string, any>","modelName: string","fields: ConfigurableModelFields","config?: RunnableConfig","messages: BaseMessage[]","options?: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","tools: BindToolsInput[]","params?: Record<string, any>","modelParams: Record<string, any>","str: string","prefix: string","mergedConfig: RunnableConfig","remainingConfig: RunnableConfig","input: RunInput","options?: CallOptions","inputs: RunInput[]","options?: Partial<CallOptions> | Partial<CallOptions>[]","batchOptions?: RunnableBatchOptions","generator: AsyncGenerator<RunInput>","options: CallOptions","options?: Partial<CallOptions>","streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">","options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n }","streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">","model?: string","fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }","paramsCopy: Record<string, any>","configurableModel: ConfigurableModel<RunInput, CallOptions>"],"sources":["../../src/chat_models/universal.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n ToolDefinition,\n} from \"@langchain/core/language_models/base\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n BaseMessage,\n type AIMessageChunk,\n MessageStructure,\n} from \"@langchain/core/messages\";\nimport {\n type RunnableBatchOptions,\n RunnableBinding,\n type RunnableConfig,\n type RunnableToolLike,\n ensureConfig,\n} from \"@langchain/core/runnables\";\nimport {\n AsyncGeneratorWithSetup,\n IterableReadableStream,\n} from \"@langchain/core/utils/stream\";\nimport {\n type LogStreamCallbackHandlerInput,\n type RunLogPatch,\n type StreamEvent,\n} from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\n\n// TODO: remove once `EventStreamCallbackHandlerInput` is exposed in core\ninterface EventStreamCallbackHandlerInput\n extends Omit<LogStreamCallbackHandlerInput, \"_schemaFormat\"> {}\n\nexport interface ConfigurableChatModelCallOptions\n extends BaseChatModelCallOptions {\n tools?: (\n | StructuredToolInterface\n | Record<string, unknown>\n | ToolDefinition\n | RunnableToolLike\n )[];\n}\n\n// Configuration map for model providers\nexport const MODEL_PROVIDER_CONFIG = {\n openai: {\n package: \"@langchain/openai\",\n className: \"ChatOpenAI\",\n },\n anthropic: {\n package: \"@langchain/anthropic\",\n className: \"ChatAnthropic\",\n },\n azure_openai: {\n package: \"@langchain/openai\",\n className: \"AzureChatOpenAI\",\n },\n cohere: {\n package: \"@langchain/cohere\",\n className: \"ChatCohere\",\n },\n \"google-vertexai\": {\n package: \"@langchain/google-vertexai\",\n className: \"ChatVertexAI\",\n },\n \"google-vertexai-web\": {\n package: \"@langchain/google-vertexai-web\",\n className: \"ChatVertexAI\",\n },\n \"google-genai\": {\n package: \"@langchain/google-genai\",\n className: \"ChatGoogleGenerativeAI\",\n },\n ollama: {\n package: \"@langchain/ollama\",\n className: \"ChatOllama\",\n },\n mistralai: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n mistral: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n groq: {\n package: \"@langchain/groq\",\n className: \"ChatGroq\",\n },\n cerebras: {\n package: \"@langchain/cerebras\",\n className: \"ChatCerebras\",\n },\n bedrock: {\n package: \"@langchain/aws\",\n className: \"ChatBedrockConverse\",\n },\n deepseek: {\n package: \"@langchain/deepseek\",\n className: \"ChatDeepSeek\",\n },\n xai: {\n package: \"@langchain/xai\",\n className: \"ChatXAI\",\n },\n fireworks: {\n package: \"@langchain/community/chat_models/fireworks\",\n className: \"ChatFireworks\",\n hasCircularDependency: true,\n },\n together: {\n package: \"@langchain/community/chat_models/togetherai\",\n className: \"ChatTogetherAI\",\n hasCircularDependency: true,\n },\n perplexity: {\n package: \"@langchain/community/chat_models/perplexity\",\n className: \"ChatPerplexity\",\n hasCircularDependency: true,\n },\n} as const;\n\nconst SUPPORTED_PROVIDERS = Object.keys(\n MODEL_PROVIDER_CONFIG\n) as (keyof typeof MODEL_PROVIDER_CONFIG)[];\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\ntype ModelProviderConfig = {\n package: string;\n className: string;\n hasCircularDependency?: boolean;\n};\n\n/**\n * Helper function to get a chat model class by its class name\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @returns The imported model class or undefined if not found\n */\nexport async function getChatModelByClassName(className: string) {\n // Find the provider config that matches the class name\n const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(\n ([, config]) => config.className === className\n );\n\n if (!providerEntry) {\n return undefined;\n }\n\n const [, config] = providerEntry;\n try {\n const module = await import(config.package);\n return module[config.className];\n } catch (e: unknown) {\n const err = e as Error;\n if (\n \"code\" in err &&\n err.code?.toString().includes(\"ERR_MODULE_NOT_FOUND\") &&\n \"message\" in err &&\n typeof err.message === \"string\"\n ) {\n const msg = err.message.startsWith(\"Error: \")\n ? err.message.slice(\"Error: \".length)\n : err.message;\n const attemptedPackage = msg\n .split(\"Cannot find package '\")[1]\n .split(\"'\")[0];\n throw new Error(\n `Unable to import ${attemptedPackage}. Please install with ` +\n `\\`npm install ${attemptedPackage}\\` or \\`pnpm install ${attemptedPackage}\\``\n );\n }\n throw e;\n }\n}\n\nasync function _initChatModelHelper(\n model: string,\n modelProvider?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params: Record<string, any> = {}\n): Promise<BaseChatModel> {\n const modelProviderCopy = modelProvider || _inferModelProvider(model);\n if (!modelProviderCopy) {\n throw new Error(\n `Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`\n );\n }\n\n const config = MODEL_PROVIDER_CONFIG[\n modelProviderCopy as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig;\n if (!config) {\n const supported = SUPPORTED_PROVIDERS.join(\", \");\n throw new Error(\n `Unsupported { modelProvider: ${modelProviderCopy} }.\\n\\nSupported model providers are: ${supported}`\n );\n }\n\n const { modelProvider: _unused, ...passedParams } = params;\n const ProviderClass = await getChatModelByClassName(config.className);\n return new ProviderClass({ model, ...passedParams });\n}\n\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport function _inferModelProvider(modelName: string): string | undefined {\n if (\n modelName.startsWith(\"gpt-3\") ||\n modelName.startsWith(\"gpt-4\") ||\n modelName.startsWith(\"gpt-5\") ||\n modelName.startsWith(\"o1\") ||\n modelName.startsWith(\"o3\") ||\n modelName.startsWith(\"o4\")\n ) {\n return \"openai\";\n } else if (modelName.startsWith(\"claude\")) {\n return \"anthropic\";\n } else if (modelName.startsWith(\"command\")) {\n return \"cohere\";\n } else if (modelName.startsWith(\"accounts/fireworks\")) {\n return \"fireworks\";\n } else if (modelName.startsWith(\"gemini\")) {\n return \"google-vertexai\";\n } else if (modelName.startsWith(\"amazon.\")) {\n return \"bedrock\";\n } else if (modelName.startsWith(\"mistral\")) {\n return \"mistralai\";\n } else if (modelName.startsWith(\"sonar\") || modelName.startsWith(\"pplx\")) {\n return \"perplexity\";\n } else {\n return undefined;\n }\n}\n\ninterface ConfigurableModelFields extends BaseChatModelParams {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n queuedMethodOperations?: Record<string, any>;\n /**\n * Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n */\n profile?: ModelProfile;\n}\n\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport class ConfigurableModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string {\n return \"chat_model\";\n }\n\n lc_namespace = [\"langchain\", \"chat_models\"];\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _defaultConfig?: Record<string, any> = {};\n\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\" = \"any\";\n\n /**\n * @default \"\"\n */\n _configPrefix: string;\n\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any> = {};\n\n /** @internal */\n private _modelInstanceCache = new Map<\n string,\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n >();\n\n /** @internal */\n private _profile?: ModelProfile;\n\n constructor(fields: ConfigurableModelFields) {\n super(fields);\n this._defaultConfig = fields.defaultConfig ?? {};\n\n if (fields.configurableFields === \"any\") {\n this._configurableFields = \"any\";\n } else {\n this._configurableFields = fields.configurableFields ?? [\n \"model\",\n \"modelProvider\",\n ];\n }\n\n if (fields.configPrefix) {\n this._configPrefix = fields.configPrefix.endsWith(\"_\")\n ? fields.configPrefix\n : `${fields.configPrefix}_`;\n } else {\n this._configPrefix = \"\";\n }\n\n this._queuedMethodOperations =\n fields.queuedMethodOperations ?? this._queuedMethodOperations;\n\n this._profile = fields.profile ?? undefined;\n }\n\n async _getModelInstance(\n config?: RunnableConfig\n ): Promise<\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n > {\n // Check cache first\n const cacheKey = JSON.stringify(config ?? {});\n const cachedModel = this._modelInstanceCache.get(cacheKey);\n if (cachedModel) {\n return cachedModel;\n }\n\n // Initialize model with merged params\n const params = { ...this._defaultConfig, ...this._modelParams(config) };\n let initializedModel = await _initChatModelHelper(\n params.model,\n params.modelProvider,\n params\n );\n\n // Apply queued method operations in sequence\n for (const [method, args] of Object.entries(this._queuedMethodOperations)) {\n if (\n method in initializedModel &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n typeof (initializedModel as any)[method] === \"function\"\n ) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n initializedModel = await (initializedModel as any)[method](...args);\n }\n }\n\n // Cache and return the initialized model\n this._modelInstanceCache.set(cacheKey, initializedModel);\n return initializedModel;\n }\n\n async _generate(\n messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const model = await this._getModelInstance(options);\n return model._generate(messages, options ?? {}, runManager);\n }\n\n override bindTools(\n tools: BindToolsInput[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params?: Record<string, any>\n ): ConfigurableModel<RunInput, CallOptions> {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.bindTools = [tools, params];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n });\n }\n\n // Extract the input types from the `BaseModel` class.\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"] = (\n schema,\n ...args\n ): ReturnType<BaseChatModel[\"withStructuredOutput\"]> => {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.withStructuredOutput = [schema, ...args];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n }) as unknown as ReturnType<BaseChatModel[\"withStructuredOutput\"]>;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _modelParams(config?: RunnableConfig): Record<string, any> {\n const configurable = config?.configurable ?? {};\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let modelParams: Record<string, any> = {};\n\n for (const [key, value] of Object.entries(configurable)) {\n if (key.startsWith(this._configPrefix)) {\n const strippedKey = this._removePrefix(key, this._configPrefix);\n modelParams[strippedKey] = value;\n }\n }\n\n if (this._configurableFields !== \"any\") {\n modelParams = Object.fromEntries(\n Object.entries(modelParams).filter(([key]) =>\n this._configurableFields.includes(key)\n )\n );\n }\n\n return modelParams;\n }\n\n _removePrefix(str: string, prefix: string): string {\n return str.startsWith(prefix) ? str.slice(prefix.length) : str;\n }\n\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(\n config?: RunnableConfig\n ): RunnableBinding<RunInput, AIMessageChunk, CallOptions> {\n const mergedConfig: RunnableConfig = { ...(config || {}) };\n const modelParams = this._modelParams(mergedConfig);\n\n const remainingConfig: RunnableConfig = Object.fromEntries(\n Object.entries(mergedConfig).filter(([k]) => k !== \"configurable\")\n );\n\n remainingConfig.configurable = Object.fromEntries(\n Object.entries(mergedConfig.configurable || {}).filter(\n ([k]) =>\n this._configPrefix &&\n !Object.keys(modelParams).includes(\n this._removePrefix(k, this._configPrefix)\n )\n )\n );\n\n const newConfigurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: { ...this._defaultConfig, ...modelParams },\n configurableFields: Array.isArray(this._configurableFields)\n ? [...this._configurableFields]\n : this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: this._queuedMethodOperations,\n });\n\n return new RunnableBinding<RunInput, AIMessageChunk, CallOptions>({\n config: mergedConfig,\n bound: newConfigurableModel,\n });\n }\n\n async invoke(\n input: RunInput,\n options?: CallOptions\n ): Promise<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n return model.invoke(input, config);\n }\n\n async stream(\n input: RunInput,\n options?: CallOptions\n ): Promise<IterableReadableStream<AIMessageChunk>> {\n const model = await this._getModelInstance(options);\n const wrappedGenerator = new AsyncGeneratorWithSetup({\n generator: await model.stream(input, options),\n config: options,\n });\n await wrappedGenerator.setup;\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator);\n }\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions?: false }\n ): Promise<AIMessageChunk[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions: true }\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]> {\n // We can super this since the base runnable implementation of\n // `.batch` will call `.invoke` on each input.\n return super.batch(inputs, options, batchOptions);\n }\n\n async *transform(\n generator: AsyncGenerator<RunInput>,\n options: CallOptions\n ): AsyncGenerator<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.transform(generator, config);\n }\n\n async *streamLog(\n input: RunInput,\n options?: Partial<CallOptions>,\n streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">\n ): AsyncGenerator<RunLogPatch> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.streamLog(input, config, {\n ...streamOptions,\n _schemaFormat: \"original\",\n includeNames: streamOptions?.includeNames,\n includeTypes: streamOptions?.includeTypes,\n includeTags: streamOptions?.includeTags,\n excludeNames: streamOptions?.excludeNames,\n excludeTypes: streamOptions?.excludeTypes,\n excludeTags: streamOptions?.excludeTags,\n });\n }\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & { version: \"v1\" | \"v2\" },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<Uint8Array>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent | Uint8Array> {\n const outerThis = this;\n async function* wrappedGenerator() {\n const model = await outerThis._getModelInstance(options);\n const config = ensureConfig(options);\n const eventStream = model.streamEvents(input, config, streamOptions);\n\n for await (const chunk of eventStream) {\n yield chunk;\n }\n }\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator());\n }\n\n /**\n * Return profiling information for the model.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n */\n get profile(): ModelProfile {\n if (this._profile) {\n return this._profile;\n }\n const cacheKey = JSON.stringify({});\n const instance = this._modelInstanceCache.get(cacheKey);\n return instance?.profile ?? {};\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\n\nexport type ConfigurableFields = \"any\" | string[];\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model: never,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\n// ################################# FOR CONTRIBUTORS #################################\n//\n// If adding support for a new provider, please append the provider\n// name to the supported list in the docstring below.\n//\n// ####################################################################################\n\n/**\n * Initialize a ChatModel from the model name and provider.\n * Must have the integration package corresponding to the model provider installed.\n *\n * @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model.\n * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.\n *\n * @param {string | ChatModelProvider} [model] - The name of the model, e.g. \"gpt-4\", \"claude-3-opus-20240229\".\n * Can be prefixed with the model provider, e.g. \"openai:gpt-4\", \"anthropic:claude-3-opus-20240229\".\n * @param {Object} [fields] - Additional configuration options.\n * @param {string} [fields.modelProvider] - The model provider. Supported values include:\n * - openai (@langchain/openai)\n * - anthropic (@langchain/anthropic)\n * - azure_openai (@langchain/openai)\n * - google-vertexai (@langchain/google-vertexai)\n * - google-vertexai-web (@langchain/google-vertexai-web)\n * - google-genai (@langchain/google-genai)\n * - bedrock (@langchain/aws)\n * - cohere (@langchain/cohere)\n * - fireworks (@langchain/community/chat_models/fireworks)\n * - together (@langchain/community/chat_models/togetherai)\n * - mistralai (@langchain/mistralai)\n * - groq (@langchain/groq)\n * - ollama (@langchain/ollama)\n * - perplexity (@langchain/community/chat_models/perplexity)\n * - cerebras (@langchain/cerebras)\n * - deepseek (@langchain/deepseek)\n * - xai (@langchain/xai)\n * @param {string[] | \"any\"} [fields.configurableFields] - Which model parameters are configurable:\n * - undefined: No configurable fields.\n * - \"any\": All fields are configurable. (See Security Note in description)\n * - string[]: Specified fields are configurable.\n * @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime.\n * @param {ModelProfile} [fields.profile] - Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n * @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor.\n * @returns {Promise<ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel.\n * @throws {Error} If modelProvider cannot be inferred or isn't supported.\n * @throws {Error} If the model provider integration package is not installed.\n *\n * @example Initialize non-configurable models\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const gpt4 = await initChatModel(\"openai:gpt-4\", {\n * temperature: 0.25,\n * });\n * const gpt4Result = await gpt4.invoke(\"what's your name\");\n *\n * const claude = await initChatModel(\"anthropic:claude-3-opus-20240229\", {\n * temperature: 0.25,\n * });\n * const claudeResult = await claude.invoke(\"what's your name\");\n *\n * const gemini = await initChatModel(\"gemini-1.5-pro\", {\n * modelProvider: \"google-vertexai\",\n * temperature: 0.25,\n * });\n * const geminiResult = await gemini.invoke(\"what's your name\");\n * ```\n *\n * @example Create a partially configurable model with no default model\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModel = await initChatModel(undefined, {\n * temperature: 0,\n * configurableFields: [\"model\", \"apiKey\"],\n * });\n *\n * const gpt4Result = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"gpt-4\",\n * },\n * });\n *\n * const claudeResult = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * },\n * });\n * ```\n *\n * @example Create a fully configurable model with a default model and a config prefix\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModelWithDefault = await initChatModel(\"gpt-4\", {\n * modelProvider: \"openai\",\n * configurableFields: \"any\",\n * configPrefix: \"foo\",\n * temperature: 0,\n * });\n *\n * const openaiResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const claudeResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_model: \"claude-3-5-sonnet-20240620\",\n * foo_modelProvider: \"anthropic\",\n * foo_temperature: 0.6,\n * foo_apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Bind tools to a configurable model:\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n * import { z } from \"zod/v3\";\n * import { tool } from \"@langchain/core/tools\";\n *\n * const getWeatherTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current weather in a given location\"),\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * }\n * );\n *\n * const getPopulationTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current population in a given location\"),\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * }\n * );\n *\n * const configurableModel = await initChatModel(\"gpt-4\", {\n * configurableFields: [\"model\", \"modelProvider\", \"apiKey\"],\n * temperature: 0,\n * });\n *\n * const configurableModelWithTools = configurableModel.bindTools([\n * getWeatherTool,\n * getPopulationTool,\n * ]);\n *\n * const configurableToolResult = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const configurableToolResult2 = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Initialize a model with a custom profile\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const model = await initChatModel(\"gpt-4o-mini\", {\n * profile: {\n * maxInputTokens: 100000,\n * },\n * });\n *\n * @description\n * This function initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * Security Note: Setting `configurableFields` to \"any\" means fields like apiKey, baseUrl, etc.\n * can be altered at runtime, potentially redirecting model requests to a different service/user.\n * Make sure that if you're accepting untrusted configurations, you enumerate the\n * `configurableFields` explicitly.\n *\n * The function will attempt to infer the model provider from the model name if not specified.\n * Certain model name prefixes are associated with specific providers:\n * - gpt-3... or gpt-4... -> openai\n * - claude... -> anthropic\n * - amazon.... -> bedrock\n * - gemini... -> google-vertexai\n * - command... -> cohere\n * - accounts/fireworks... -> fireworks\n *\n * @since 0.2.11\n * @version 0.2.11\n */\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>> {\n // eslint-disable-next-line prefer-const\n let { configurableFields, configPrefix, modelProvider, profile, ...params } =\n {\n configPrefix: \"\",\n ...(fields ?? {}),\n };\n if (modelProvider === undefined && model?.includes(\":\")) {\n const [provider, ...remainingParts] = model.split(\":\");\n const modelComponents =\n remainingParts.length === 0\n ? [provider]\n : [provider, remainingParts.join(\":\")];\n if (SUPPORTED_PROVIDERS.includes(modelComponents[0] as ChatModelProvider)) {\n // eslint-disable-next-line no-param-reassign\n [modelProvider, model] = modelComponents;\n }\n }\n let configurableFieldsCopy = Array.isArray(configurableFields)\n ? [...configurableFields]\n : configurableFields;\n\n if (!model && configurableFieldsCopy === undefined) {\n configurableFieldsCopy = [\"model\", \"modelProvider\"];\n }\n if (configPrefix && configurableFieldsCopy === undefined) {\n console.warn(\n `{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` +\n `{ configurableFields: [...] } to specify the model params that are ` +\n `configurable.`\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const paramsCopy: Record<string, any> = { ...params };\n\n let configurableModel: ConfigurableModel<RunInput, CallOptions>;\n\n if (configurableFieldsCopy === undefined) {\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: {\n ...paramsCopy,\n model,\n modelProvider,\n },\n configPrefix,\n profile,\n });\n } else {\n if (model) {\n paramsCopy.model = model;\n }\n if (modelProvider) {\n paramsCopy.modelProvider = modelProvider;\n }\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: paramsCopy,\n configPrefix,\n configurableFields: configurableFieldsCopy,\n profile,\n });\n }\n\n // Initialize the model instance to make sure a profile is available\n await configurableModel._getModelInstance();\n return configurableModel;\n}\n"],"mappings":";;;;;;;;;;;;;;AAmDA,MAAa,wBAAwB;CACnC,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,cAAc;EACZ,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,mBAAmB;EACjB,SAAS;EACT,WAAW;CACZ;CACD,uBAAuB;EACrB,SAAS;EACT,WAAW;CACZ;CACD,gBAAgB;EACd,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,MAAM;EACJ,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,KAAK;EACH,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,YAAY;EACV,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;AACF;AAED,MAAM,sBAAsB,OAAO,KACjC,sBACD;;;;;;AAaD,eAAsB,wBAAwBA,WAAmB;CAE/D,MAAM,gBAAgB,OAAO,QAAQ,sBAAsB,CAAC,KAC1D,CAAC,GAAGC,SAAO,KAAKA,SAAO,cAAc,UACtC;AAED,KAAI,CAAC,cACH,QAAO;CAGT,MAAM,GAAG,OAAO,GAAG;AACnB,KAAI;EACF,MAAM,SAAS,MAAM,OAAO,OAAO;AACnC,SAAO,OAAO,OAAO;CACtB,SAAQC,GAAY;EACnB,MAAM,MAAM;AACZ,MACE,UAAU,OACV,IAAI,MAAM,UAAU,CAAC,SAAS,uBAAuB,IACrD,aAAa,OACb,OAAO,IAAI,YAAY,UACvB;GACA,MAAM,MAAM,IAAI,QAAQ,WAAW,UAAU,GACzC,IAAI,QAAQ,MAAM,EAAiB,GACnC,IAAI;GACR,MAAM,mBAAmB,IACtB,MAAM,wBAAwB,CAAC,GAC/B,MAAM,IAAI,CAAC;AACd,SAAM,IAAI,MACR,CAAC,iBAAiB,EAAE,iBAAiB,oCAAsB,EACxC,iBAAiB,qBAAqB,EAAE,iBAAiB,EAAE,CAAC;EAElF;AACD,QAAM;CACP;AACF;AAED,eAAe,qBACbC,OACAC,eAEAC,SAA8B,CAAE,GACR;CACxB,MAAM,oBAAoB,iBAAiB,oBAAoB,MAAM;AACrE,KAAI,CAAC,kBACH,OAAM,IAAI,MACR,CAAC,4CAA4C,EAAE,MAAM,0CAA0C,CAAC;CAIpG,MAAM,SAAS,sBACb;AAEF,KAAI,CAAC,QAAQ;EACX,MAAM,YAAY,oBAAoB,KAAK,KAAK;AAChD,QAAM,IAAI,MACR,CAAC,6BAA6B,EAAE,kBAAkB,sCAAsC,EAAE,WAAW;CAExG;CAED,MAAM,EAAE,eAAe,QAAS,GAAG,cAAc,GAAG;CACpD,MAAM,gBAAgB,MAAM,wBAAwB,OAAO,UAAU;AACrE,QAAO,IAAI,cAAc;EAAE;EAAO,GAAG;CAAc;AACpD;;;;;;;;;;;;AAaD,SAAgB,oBAAoBC,WAAuC;AACzE,KACE,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,CAE1B,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,qBAAqB,CACnD,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,QAAQ,IAAI,UAAU,WAAW,OAAO,CACtE,QAAO;KAEP,QAAO;AAEV;;;;;;AA+BD,IAAa,oBAAb,MAAa,0BAGH,cAA2C;CACnD,WAAmB;AACjB,SAAO;CACR;CAED,eAAe,CAAC,aAAa,aAAc;CAG3C,iBAAuC,CAAE;;;;CAKzC,sBAAwC;;;;CAKxC;;;;;CAOA,0BAA+C,CAAE;;CAGjD,AAAQ,sCAAsB,IAAI;;CAMlC,AAAQ;CAER,YAAYC,QAAiC;EAC3C,MAAM,OAAO;EACb,KAAK,iBAAiB,OAAO,iBAAiB,CAAE;AAEhD,MAAI,OAAO,uBAAuB,OAChC,KAAK,sBAAsB;OAE3B,KAAK,sBAAsB,OAAO,sBAAsB,CACtD,SACA,eACD;AAGH,MAAI,OAAO,cACT,KAAK,gBAAgB,OAAO,aAAa,SAAS,IAAI,GAClD,OAAO,eACP,GAAG,OAAO,aAAa,CAAC,CAAC;OAE7B,KAAK,gBAAgB;EAGvB,KAAK,0BACH,OAAO,0BAA0B,KAAK;EAExC,KAAK,WAAW,OAAO,WAAW;CACnC;CAED,MAAM,kBACJC,QAGA;EAEA,MAAM,WAAW,KAAK,UAAU,UAAU,CAAE,EAAC;EAC7C,MAAM,cAAc,KAAK,oBAAoB,IAAI,SAAS;AAC1D,MAAI,YACF,QAAO;EAIT,MAAM,SAAS;GAAE,GAAG,KAAK;GAAgB,GAAG,KAAK,aAAa,OAAO;EAAE;EACvE,IAAI,mBAAmB,MAAM,qBAC3B,OAAO,OACP,OAAO,eACP,OACD;AAGD,OAAK,MAAM,CAAC,QAAQ,KAAK,IAAI,OAAO,QAAQ,KAAK,wBAAwB,CACvE,KACE,UAAU,oBAEV,OAAQ,iBAAyB,YAAY,YAG7C,mBAAmB,MAAO,iBAAyB,QAAQ,GAAG,KAAK;EAKvE,KAAK,oBAAoB,IAAI,UAAU,iBAAiB;AACxD,SAAO;CACR;CAED,MAAM,UACJC,UACAC,SACAC,YACqB;EACrB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;AACnD,SAAO,MAAM,UAAU,UAAU,WAAW,CAAE,GAAE,WAAW;CAC5D;CAED,AAAS,UACPC,OAEAC,QAC0C;EAC1C,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,YAAY,CAAC,OAAO,MAAO;AAC/C,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,uBAA8D,CAC5D,QACA,GAAG,SACmD;EACtD,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,uBAAuB,CAAC,QAAQ,GAAG,IAAK;AAC5D,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,aAAaL,QAA8C;EACzD,MAAM,eAAe,QAAQ,gBAAgB,CAAE;EAE/C,IAAIM,cAAmC,CAAE;AAEzC,OAAK,MAAM,CAAC,KAAK,MAAM,IAAI,OAAO,QAAQ,aAAa,CACrD,KAAI,IAAI,WAAW,KAAK,cAAc,EAAE;GACtC,MAAM,cAAc,KAAK,cAAc,KAAK,KAAK,cAAc;GAC/D,YAAY,eAAe;EAC5B;AAGH,MAAI,KAAK,wBAAwB,OAC/B,cAAc,OAAO,YACnB,OAAO,QAAQ,YAAY,CAAC,OAAO,CAAC,CAAC,IAAI,KACvC,KAAK,oBAAoB,SAAS,IAAI,CACvC,CACF;AAGH,SAAO;CACR;CAED,cAAcC,KAAaC,QAAwB;AACjD,SAAO,IAAI,WAAW,OAAO,GAAG,IAAI,MAAM,OAAO,OAAO,GAAG;CAC5D;;;;;;CAOD,WACER,QACwD;EACxD,MAAMS,eAA+B,EAAE,GAAI,UAAU,CAAE,EAAG;EAC1D,MAAM,cAAc,KAAK,aAAa,aAAa;EAEnD,MAAMC,kBAAkC,OAAO,YAC7C,OAAO,QAAQ,aAAa,CAAC,OAAO,CAAC,CAAC,EAAE,KAAK,MAAM,eAAe,CACnE;EAED,gBAAgB,eAAe,OAAO,YACpC,OAAO,QAAQ,aAAa,gBAAgB,CAAE,EAAC,CAAC,OAC9C,CAAC,CAAC,EAAE,KACF,KAAK,iBACL,CAAC,OAAO,KAAK,YAAY,CAAC,SACxB,KAAK,cAAc,GAAG,KAAK,cAAc,CAC1C,CACJ,CACF;EAED,MAAM,uBAAuB,IAAI,kBAAyC;GACxE,eAAe;IAAE,GAAG,KAAK;IAAgB,GAAG;GAAa;GACzD,oBAAoB,MAAM,QAAQ,KAAK,oBAAoB,GACvD,CAAC,GAAG,KAAK,mBAAoB,IAC7B,KAAK;GACT,cAAc,KAAK;GACnB,wBAAwB,KAAK;EAC9B;AAED,SAAO,IAAI,gBAAuD;GAChE,QAAQ;GACR,OAAO;EACR;CACF;CAED,MAAM,OACJC,OACAC,SACyB;EACzB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,SAAS,aAAa,QAAQ;AACpC,SAAO,MAAM,OAAO,OAAO,OAAO;CACnC;CAED,MAAM,OACJD,OACAC,SACiD;EACjD,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,mBAAmB,IAAI,wBAAwB;GACnD,WAAW,MAAM,MAAM,OAAO,OAAO,QAAQ;GAC7C,QAAQ;EACT;EACD,MAAM,iBAAiB;AACvB,SAAO,uBAAuB,mBAAmB,iBAAiB;CACnE;CAoBD,MAAM,MACJC,QACAC,SACAC,cACqC;AAGrC,SAAO,MAAM,MAAM,QAAQ,SAAS,aAAa;CAClD;CAED,OAAO,UACLC,WACAC,SACgC;EAChC,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,SAAS,aAAa,QAAQ;EAEpC,OAAO,MAAM,UAAU,WAAW,OAAO;CAC1C;CAED,OAAO,UACLN,OACAO,SACAC,eAC6B;EAC7B,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,SAAS,aAAa,QAAQ;EAEpC,OAAO,MAAM,UAAU,OAAO,QAAQ;GACpC,GAAG;GACH,eAAe;GACf,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC5B,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;EAC7B,EAAC;CACH;CAiBD,aACER,OACAS,SAIAC,eACkD;EAClD,MAAM,YAAY;EAClB,gBAAgB,mBAAmB;GACjC,MAAM,QAAQ,MAAM,UAAU,kBAAkB,QAAQ;GACxD,MAAM,SAAS,aAAa,QAAQ;GACpC,MAAM,cAAc,MAAM,aAAa,OAAO,QAAQ,cAAc;AAEpE,cAAW,MAAM,SAAS,aACxB,MAAM;EAET;AACD,SAAO,uBAAuB,mBAAmB,kBAAkB,CAAC;CACrE;;;;;;CAOD,IAAI,UAAwB;AAC1B,MAAI,KAAK,SACP,QAAO,KAAK;EAEd,MAAM,WAAW,KAAK,UAAU,CAAE,EAAC;EACnC,MAAM,WAAW,KAAK,oBAAoB,IAAI,SAAS;AACvD,SAAO,UAAU,WAAW,CAAE;CAC/B;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsRD,eAAsB,cAIpBC,OAEAC,QAMmD;CAEnD,IAAI,EAAE,oBAAoB,cAAc,eAAe,QAAS,GAAG,QAAQ,GACzE;EACE,cAAc;EACd,GAAI,UAAU,CAAE;CACjB;AACH,KAAI,kBAAkB,UAAa,OAAO,SAAS,IAAI,EAAE;EACvD,MAAM,CAAC,UAAU,GAAG,eAAe,GAAG,MAAM,MAAM,IAAI;EACtD,MAAM,kBACJ,eAAe,WAAW,IACtB,CAAC,QAAS,IACV,CAAC,UAAU,eAAe,KAAK,IAAI,AAAC;AAC1C,MAAI,oBAAoB,SAAS,gBAAgB,GAAwB,EAEvE,CAAC,eAAe,MAAM,GAAG;CAE5B;CACD,IAAI,yBAAyB,MAAM,QAAQ,mBAAmB,GAC1D,CAAC,GAAG,kBAAmB,IACvB;AAEJ,KAAI,CAAC,SAAS,2BAA2B,QACvC,yBAAyB,CAAC,SAAS,eAAgB;AAErD,KAAI,gBAAgB,2BAA2B,QAC7C,QAAQ,KACN,CAAC,gBAAgB,EAAE,aAAa,oIAAoD,CAEnE,CAClB;CAIH,MAAMC,aAAkC,EAAE,GAAG,OAAQ;CAErD,IAAIC;AAEJ,KAAI,2BAA2B,QAC7B,oBAAoB,IAAI,kBAAyC;EAC/D,eAAe;GACb,GAAG;GACH;GACA;EACD;EACD;EACA;CACD;MACI;AACL,MAAI,OACF,WAAW,QAAQ;AAErB,MAAI,eACF,WAAW,gBAAgB;EAE7B,oBAAoB,IAAI,kBAAyC;GAC/D,eAAe;GACf;GACA,oBAAoB;GACpB;EACD;CACF;CAGD,MAAM,kBAAkB,mBAAmB;AAC3C,QAAO;AACR"}
|
package/dist/hub/index.cjs
CHANGED
|
@@ -16,12 +16,19 @@ const require_base = require('./base.cjs');
|
|
|
16
16
|
* for non-OpenAI models. If you are running in Node or another environment that supports dynamic imports,
|
|
17
17
|
* you may instead import this function from "langchain/hub/node" and pass "includeModel: true" instead
|
|
18
18
|
* of specifying this parameter.
|
|
19
|
+
* @param options.secrets A map of secrets to use when loading, e.g.
|
|
20
|
+
* {'OPENAI_API_KEY': 'sk-...'}`.
|
|
21
|
+
* If a secret is not found in the map, it will be loaded from the
|
|
22
|
+
* environment if `secrets_from_env` is `True`. Should only be needed when
|
|
23
|
+
* `includeModel` is `true`.
|
|
24
|
+
* @param options.secretsFromEnv Whether to load secrets from environment variables.
|
|
25
|
+
* Use with caution and only with trusted prompts.
|
|
19
26
|
* @returns
|
|
20
27
|
*/
|
|
21
28
|
async function pull(ownerRepoCommit, options) {
|
|
22
29
|
const promptObject = await require_base.basePull(ownerRepoCommit, options);
|
|
23
30
|
try {
|
|
24
|
-
const loadedPrompt = await require_load_index.load(JSON.stringify(promptObject.manifest),
|
|
31
|
+
const loadedPrompt = await require_load_index.load(JSON.stringify(promptObject.manifest), options?.secrets, require_base.generateOptionalImportMap(options?.modelClass), require_base.generateModelImportMap(options?.modelClass), options?.secretsFromEnv);
|
|
25
32
|
return require_base.bindOutputSchema(loadedPrompt);
|
|
26
33
|
} catch (e) {
|
|
27
34
|
if (options?.includeModel) throw new Error([
|
package/dist/hub/index.cjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.cjs","names":["ownerRepoCommit: string","options?: {\n apiKey?: string;\n apiUrl?: string;\n includeModel?: boolean;\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel;\n }","basePull","load","generateOptionalImportMap","generateModelImportMap","bindOutputSchema","e: any"],"sources":["../../src/hub/index.ts"],"sourcesContent":["import { Runnable } from \"@langchain/core/runnables\";\nimport type { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport { load } from \"../load/index.js\";\nimport {\n basePush,\n basePull,\n generateModelImportMap,\n generateOptionalImportMap,\n bindOutputSchema,\n} from \"./base.js\";\n\nexport { basePush as push };\n\n/**\n * Pull a prompt from the hub.\n *\n * @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.\n * @param options.apiKey LangSmith API key to use when pulling the prompt\n * @param options.apiUrl LangSmith API URL to use when pulling the prompt\n * @param options.includeModel Whether to also instantiate and attach a model instance to the prompt,\n * if the prompt has associated model metadata. If set to true, invoking the resulting pulled prompt will\n * also invoke the instantiated model. For non-OpenAI models, you must also set \"modelClass\" to the\n * correct class of the model.\n * @param options.modelClass If includeModel is true, the class of the model to instantiate. Required\n * for non-OpenAI models. If you are running in Node or another environment that supports dynamic imports,\n * you may instead import this function from \"langchain/hub/node\" and pass \"includeModel: true\" instead\n * of specifying this parameter.\n * @returns\n */\nexport async function pull<T extends Runnable>(\n ownerRepoCommit: string,\n options?: {\n apiKey?: string;\n apiUrl?: string;\n includeModel?: boolean;\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel;\n }\n) {\n const promptObject = await basePull(ownerRepoCommit, options);\n try {\n const loadedPrompt = await load<T>(\n JSON.stringify(promptObject.manifest),\n
|
|
1
|
+
{"version":3,"file":"index.cjs","names":["ownerRepoCommit: string","options?: {\n apiKey?: string;\n apiUrl?: string;\n includeModel?: boolean;\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel;\n secrets?: Record<string, string>;\n secretsFromEnv?: boolean;\n }","basePull","load","generateOptionalImportMap","generateModelImportMap","bindOutputSchema","e: any"],"sources":["../../src/hub/index.ts"],"sourcesContent":["import { Runnable } from \"@langchain/core/runnables\";\nimport type { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport { load } from \"../load/index.js\";\nimport {\n basePush,\n basePull,\n generateModelImportMap,\n generateOptionalImportMap,\n bindOutputSchema,\n} from \"./base.js\";\n\nexport { basePush as push };\n\n/**\n * Pull a prompt from the hub.\n *\n * @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.\n * @param options.apiKey LangSmith API key to use when pulling the prompt\n * @param options.apiUrl LangSmith API URL to use when pulling the prompt\n * @param options.includeModel Whether to also instantiate and attach a model instance to the prompt,\n * if the prompt has associated model metadata. If set to true, invoking the resulting pulled prompt will\n * also invoke the instantiated model. For non-OpenAI models, you must also set \"modelClass\" to the\n * correct class of the model.\n * @param options.modelClass If includeModel is true, the class of the model to instantiate. Required\n * for non-OpenAI models. If you are running in Node or another environment that supports dynamic imports,\n * you may instead import this function from \"langchain/hub/node\" and pass \"includeModel: true\" instead\n * of specifying this parameter.\n * @param options.secrets A map of secrets to use when loading, e.g.\n * {'OPENAI_API_KEY': 'sk-...'}`.\n * If a secret is not found in the map, it will be loaded from the\n * environment if `secrets_from_env` is `True`. Should only be needed when\n * `includeModel` is `true`.\n * @param options.secretsFromEnv Whether to load secrets from environment variables.\n * Use with caution and only with trusted prompts.\n * @returns\n */\nexport async function pull<T extends Runnable>(\n ownerRepoCommit: string,\n options?: {\n apiKey?: string;\n apiUrl?: string;\n includeModel?: boolean;\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel;\n secrets?: Record<string, string>;\n secretsFromEnv?: boolean;\n }\n) {\n const promptObject = await basePull(ownerRepoCommit, options);\n try {\n const loadedPrompt = await load<T>(\n JSON.stringify(promptObject.manifest),\n options?.secrets,\n generateOptionalImportMap(options?.modelClass),\n generateModelImportMap(options?.modelClass),\n options?.secretsFromEnv\n );\n return bindOutputSchema(loadedPrompt);\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } catch (e: any) {\n if (options?.includeModel) {\n throw new Error(\n [\n e.message,\n \"\",\n `To load prompts with an associated non-OpenAI model, you must use the \"langchain/hub/node\" entrypoint, or pass a \"modelClass\" parameter like this:`,\n \"\",\n \"```\",\n `import { pull } from \"langchain/hub\";`,\n `import { ChatAnthropic } from \"@langchain/anthropic\";`,\n \"\",\n `const prompt = await pull(\"my-prompt\", {`,\n ` includeModel: true,`,\n ` modelClass: ChatAnthropic,`,\n `});`,\n \"```\",\n ].join(\"\\n\")\n );\n } else {\n throw e;\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;AAoCA,eAAsB,KACpBA,iBACAC,SASA;CACA,MAAM,eAAe,MAAMC,sBAAS,iBAAiB,QAAQ;AAC7D,KAAI;EACF,MAAM,eAAe,MAAMC,wBACzB,KAAK,UAAU,aAAa,SAAS,EACrC,SAAS,SACTC,uCAA0B,SAAS,WAAW,EAC9CC,oCAAuB,SAAS,WAAW,EAC3C,SAAS,eACV;AACD,SAAOC,8BAAiB,aAAa;CAEtC,SAAQC,GAAQ;AACf,MAAI,SAAS,aACX,OAAM,IAAI,MACR;GACE,EAAE;GACF;GACA,CAAC,kJAAkJ,CAAC;GACpJ;GACA;GACA,CAAC,qCAAqC,CAAC;GACvC,CAAC,qDAAqD,CAAC;GACvD;GACA,CAAC,wCAAwC,CAAC;GAC1C,CAAC,qBAAqB,CAAC;GACvB,CAAC,4BAA4B,CAAC;GAC9B,CAAC,GAAG,CAAC;GACL;EACD,EAAC,KAAK,KAAK;MAGd,OAAM;CAET;AACF"}
|