@langchain/classic 1.0.14 → 1.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/CHANGELOG.md +11 -0
  2. package/dist/agents/chat/outputParser.d.ts +2 -2
  3. package/dist/agents/chat/outputParser.d.ts.map +1 -1
  4. package/dist/agents/react/index.d.ts +2 -2
  5. package/dist/agents/react/index.d.ts.map +1 -1
  6. package/dist/agents/structured_chat/index.d.ts +2 -2
  7. package/dist/agents/structured_chat/index.d.ts.map +1 -1
  8. package/dist/agents/tool_calling/index.d.ts +2 -2
  9. package/dist/agents/tool_calling/index.d.ts.map +1 -1
  10. package/dist/chat_models/universal.cjs +12 -6
  11. package/dist/chat_models/universal.cjs.map +1 -1
  12. package/dist/chat_models/universal.d.cts +4 -2
  13. package/dist/chat_models/universal.d.cts.map +1 -1
  14. package/dist/chat_models/universal.d.ts +4 -2
  15. package/dist/chat_models/universal.d.ts.map +1 -1
  16. package/dist/chat_models/universal.js +12 -6
  17. package/dist/chat_models/universal.js.map +1 -1
  18. package/dist/evaluation/agents/trajectory.d.cts +2 -2
  19. package/dist/evaluation/agents/trajectory.d.cts.map +1 -1
  20. package/dist/evaluation/comparison/pairwise.d.cts +3 -3
  21. package/dist/evaluation/comparison/pairwise.d.cts.map +1 -1
  22. package/dist/evaluation/criteria/criteria.d.cts +3 -3
  23. package/dist/evaluation/criteria/criteria.d.cts.map +1 -1
  24. package/dist/experimental/prompts/custom_format.d.cts.map +1 -1
  25. package/dist/experimental/prompts/handlebars.d.cts.map +1 -1
  26. package/dist/hub/node.cjs +27 -1
  27. package/dist/hub/node.cjs.map +1 -1
  28. package/dist/hub/node.d.cts +10 -1
  29. package/dist/hub/node.d.cts.map +1 -1
  30. package/dist/hub/node.d.ts +10 -1
  31. package/dist/hub/node.d.ts.map +1 -1
  32. package/dist/hub/node.js +27 -2
  33. package/dist/hub/node.js.map +1 -1
  34. package/dist/retrievers/matryoshka_retriever.d.cts.map +1 -1
  35. package/package.json +8 -8
package/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  # @langchain/classic
2
2
 
3
+ ## 1.0.15
4
+
5
+ ### Patch Changes
6
+
7
+ - [#9763](https://github.com/langchain-ai/langchainjs/pull/9763) [`8f0757f`](https://github.com/langchain-ai/langchainjs/commit/8f0757f06b2ed9fe810f636333fc71ffcedb3feb) Thanks [@AdamParker19](https://github.com/AdamParker19)! - fix(langchain): resolve className collision in MODEL_PROVIDER_CONFIG
8
+
9
+ Refactored `getChatModelByClassName` to accept an optional `modelProvider` parameter for direct lookup, avoiding the className collision issue where multiple providers share the same className (e.g., `google-vertexai` and `google-vertexai-web` both use `"ChatVertexAI"`). When `modelProvider` is provided, the function uses direct config lookup instead of searching by className. Backward compatibility is maintained for existing callers that only pass `className`. This eliminates the duplicated import logic that was previously in `_initChatModelHelper`.
10
+
11
+ - Updated dependencies [[`0870ca0`](https://github.com/langchain-ai/langchainjs/commit/0870ca0719dacd8a555b3341e581d6c15cd6faf3), [`cf46089`](https://github.com/langchain-ai/langchainjs/commit/cf46089d250b1ec87f99956f5cd87e2615ac25c5)]:
12
+ - @langchain/openai@1.2.5
13
+
3
14
  ## 1.0.14
4
15
 
5
16
  ### Patch Changes
@@ -51,12 +51,12 @@ declare class ChatAgentOutputParser extends AgentActionOutputParser {
51
51
  * @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.
52
52
  */
53
53
  parse(text: string): Promise<{
54
- tool?: undefined;
55
- toolInput?: undefined;
56
54
  returnValues: {
57
55
  output: string;
58
56
  };
59
57
  log: string;
58
+ tool?: undefined;
59
+ toolInput?: undefined;
60
60
  } | {
61
61
  returnValues?: undefined;
62
62
  tool: any;
@@ -1 +1 @@
1
- {"version":3,"file":"outputParser.d.ts","names":["AgentActionOutputParser","FINAL_ANSWER_ACTION","ChatAgentOutputParser","Promise"],"sources":["../../../src/agents/chat/outputParser.d.ts"],"sourcesContent":["import { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends the AgentActionOutputParser to parse the output of\n * the ChatAgent in LangChain. It checks if the output text contains the\n * final answer action or a JSON response, and parses it accordingly.\n * @example\n * ```typescript\n * const prompt = ChatPromptTemplate.fromMessages([\n * [\n * \"ai\",\n * `{PREFIX}\n * {FORMAT_INSTRUCTIONS}\n * {SUFFIX}`,\n * ],\n * [\"human\", \"Question: {input}\"],\n * ]);\n * const runnableAgent = RunnableSequence.from([\n * {\n * input: (i: { input: string; steps: AgentStep[] }) => i.input,\n * agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>\n * formatLogToString(i.steps),\n * },\n * prompt,\n * new OpenAI({ temperature: 0 }),\n * new ChatAgentOutputParser(),\n * ]);\n *\n * const executor = AgentExecutor.fromAgentAndTools({\n * agent: runnableAgent,\n * tools: [new SerpAPI(), new Calculator()],\n * });\n *\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport declare class ChatAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n /**\n * Parses the output text from the MRKL chain into an agent action or\n * agent finish. If the text contains the final answer action or does not\n * contain an action, it returns an AgentFinish with the output and log.\n * If the text contains a JSON response, it returns the tool, toolInput,\n * and log.\n * @param text The output text from the MRKL chain.\n * @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.\n */\n parse(text: string): Promise<{\n tool?: undefined;\n toolInput?: undefined;\n returnValues: {\n output: string;\n };\n log: string;\n } | {\n returnValues?: undefined;\n tool: any;\n toolInput: any;\n log: string;\n }>;\n /**\n * Returns the format instructions used in the output parser for the\n * ChatAgent class.\n * @returns The format instructions as a string.\n */\n getFormatInstructions(): string;\n}\n//# sourceMappingURL=outputParser.d.ts.map"],"mappings":";;;;;AAuCA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAAqBE,qBAAAA,SAA8BF,uBAAuB;;;;;;;;;;;uBAWjDG"}
1
+ {"version":3,"file":"outputParser.d.ts","names":["AgentActionOutputParser","FINAL_ANSWER_ACTION","ChatAgentOutputParser","Promise"],"sources":["../../../src/agents/chat/outputParser.d.ts"],"sourcesContent":["import { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends the AgentActionOutputParser to parse the output of\n * the ChatAgent in LangChain. It checks if the output text contains the\n * final answer action or a JSON response, and parses it accordingly.\n * @example\n * ```typescript\n * const prompt = ChatPromptTemplate.fromMessages([\n * [\n * \"ai\",\n * `{PREFIX}\n * {FORMAT_INSTRUCTIONS}\n * {SUFFIX}`,\n * ],\n * [\"human\", \"Question: {input}\"],\n * ]);\n * const runnableAgent = RunnableSequence.from([\n * {\n * input: (i: { input: string; steps: AgentStep[] }) => i.input,\n * agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>\n * formatLogToString(i.steps),\n * },\n * prompt,\n * new OpenAI({ temperature: 0 }),\n * new ChatAgentOutputParser(),\n * ]);\n *\n * const executor = AgentExecutor.fromAgentAndTools({\n * agent: runnableAgent,\n * tools: [new SerpAPI(), new Calculator()],\n * });\n *\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport declare class ChatAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n /**\n * Parses the output text from the MRKL chain into an agent action or\n * agent finish. If the text contains the final answer action or does not\n * contain an action, it returns an AgentFinish with the output and log.\n * If the text contains a JSON response, it returns the tool, toolInput,\n * and log.\n * @param text The output text from the MRKL chain.\n * @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.\n */\n parse(text: string): Promise<{\n returnValues: {\n output: string;\n };\n log: string;\n tool?: undefined;\n toolInput?: undefined;\n } | {\n returnValues?: undefined;\n tool: any;\n toolInput: any;\n log: string;\n }>;\n /**\n * Returns the format instructions used in the output parser for the\n * ChatAgent class.\n * @returns The format instructions as a string.\n */\n getFormatInstructions(): string;\n}\n//# sourceMappingURL=outputParser.d.ts.map"],"mappings":";;;;;AAuCA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAAqBE,qBAAAA,SAA8BF,uBAAuB;;;;;;;;;;;uBAWjDG"}
@@ -2,7 +2,7 @@ import { AgentRunnableSequence } from "../agent.js";
2
2
  import { BasePromptTemplate } from "@langchain/core/prompts";
3
3
  import { ToolInterface } from "@langchain/core/tools";
4
4
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
5
- import * as _langchain_core_agents0 from "@langchain/core/agents";
5
+ import * as _langchain_core_agents5 from "@langchain/core/agents";
6
6
  import { AgentStep } from "@langchain/core/agents";
7
7
 
8
8
  //#region src/agents/react/index.d.ts
@@ -75,7 +75,7 @@ declare function createReactAgent({
75
75
  streamRunnable
76
76
  }: CreateReactAgentParams): Promise<AgentRunnableSequence<{
77
77
  steps: AgentStep[];
78
- }, _langchain_core_agents0.AgentAction | _langchain_core_agents0.AgentFinish>>;
78
+ }, _langchain_core_agents5.AgentAction | _langchain_core_agents5.AgentFinish>>;
79
79
  //#endregion
80
80
  export { CreateReactAgentParams, createReactAgent };
81
81
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents0","AgentAction","AgentFinish","Promise"],"sources":["../../../src/agents/react/index.d.ts"],"sourcesContent":["import type { ToolInterface } from \"@langchain/core/tools\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { AgentRunnableSequence } from \"../agent.js\";\n/**\n * Params used by the createXmlAgent function.\n */\nexport type CreateReactAgentParams = {\n /** LLM to use for the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: ToolInterface[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses ReAct prompting.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createReactAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { PromptTemplate } from \"@langchain/core/prompts\";\n *\n * import { OpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/react\n * const prompt = await pull<PromptTemplate>(\"hwchase17/react\");\n *\n * const llm = new OpenAI({\n * temperature: 0,\n * });\n *\n * const agent = await createReactAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n * ```\n */\nexport declare function createReactAgent({ llm, tools, prompt, streamRunnable }: CreateReactAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n//# sourceMappingURL=index.d.ts.map"],"mappings":";;;;;;;;;;;AAQYK,KAAAA,sBAAAA,GAAsB;EAEzBH;EAEEF,GAAAA,EAFFE,0BAEEF;EAKCC;EAAkB,KAAA,EALnBD,aAKmB,EAAA;EAkDNM;;;;EAAuCI,MAAAA,EAlDnDT,kBAkDmDS;EAAkBL;;;;EAAiCD,cAAAA,CAAAA,EAAAA,OAAAA;CAARU;AAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAzFR,gBAAAA;;;;;GAAyDD,yBAAyBS,QAAQV;SACvGD;GAASQ,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
1
+ {"version":3,"file":"index.d.ts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents5","AgentAction","AgentFinish","Promise"],"sources":["../../../src/agents/react/index.d.ts"],"sourcesContent":["import type { ToolInterface } from \"@langchain/core/tools\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { AgentRunnableSequence } from \"../agent.js\";\n/**\n * Params used by the createXmlAgent function.\n */\nexport type CreateReactAgentParams = {\n /** LLM to use for the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: ToolInterface[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses ReAct prompting.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createReactAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { PromptTemplate } from \"@langchain/core/prompts\";\n *\n * import { OpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/react\n * const prompt = await pull<PromptTemplate>(\"hwchase17/react\");\n *\n * const llm = new OpenAI({\n * temperature: 0,\n * });\n *\n * const agent = await createReactAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n * ```\n */\nexport declare function createReactAgent({ llm, tools, prompt, streamRunnable }: CreateReactAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n//# sourceMappingURL=index.d.ts.map"],"mappings":";;;;;;;;;;;AAQYK,KAAAA,sBAAAA,GAAsB;EAEzBH;EAEEF,GAAAA,EAFFE,0BAEEF;EAKCC;EAAkB,KAAA,EALnBD,aAKmB,EAAA;EAkDNM;;;;EAAuCI,MAAAA,EAlDnDT,kBAkDmDS;EAAkBL;;;;EAAiCD,cAAAA,CAAAA,EAAAA,OAAAA;CAARU;AAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAzFR,gBAAAA;;;;;GAAyDD,yBAAyBS,QAAQV;SACvGD;GAASQ,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
@@ -5,7 +5,7 @@ import { StructuredChatOutputParserWithRetries } from "./outputParser.js";
5
5
  import { BaseMessagePromptTemplate, BasePromptTemplate, ChatPromptTemplate } from "@langchain/core/prompts";
6
6
  import { StructuredToolInterface } from "@langchain/core/tools";
7
7
  import { BaseLanguageModelInterface, ToolDefinition } from "@langchain/core/language_models/base";
8
- import * as _langchain_core_agents1 from "@langchain/core/agents";
8
+ import * as _langchain_core_agents0 from "@langchain/core/agents";
9
9
  import { AgentStep } from "@langchain/core/agents";
10
10
 
11
11
  //#region src/agents/structured_chat/index.d.ts
@@ -176,7 +176,7 @@ declare function createStructuredChatAgent({
176
176
  streamRunnable
177
177
  }: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{
178
178
  steps: AgentStep[];
179
- }, _langchain_core_agents1.AgentAction | _langchain_core_agents1.AgentFinish>>;
179
+ }, _langchain_core_agents0.AgentAction | _langchain_core_agents0.AgentFinish>>;
180
180
  //#endregion
181
181
  export { CreateStructuredChatAgentParams, StructuredChatAgent, StructuredChatAgentInput, StructuredChatCreatePromptArgs, createStructuredChatAgent };
182
182
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseLanguageModelInterface","ToolDefinition","BasePromptTemplate","BaseMessagePromptTemplate","ChatPromptTemplate","AgentStep","Optional","Agent","AgentArgs","AgentRunnableSequence","OutputParserArgs","AgentInput","StructuredChatOutputParserWithRetries","StructuredChatCreatePromptArgs","StructuredChatAgentInput","StructuredChatAgent","Promise","CreateStructuredChatAgentParams","createStructuredChatAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents1","AgentAction","AgentFinish"],"sources":["../../../src/agents/structured_chat/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { type BaseLanguageModelInterface, type ToolDefinition } from \"@langchain/core/language_models/base\";\nimport type { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseMessagePromptTemplate, ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { Optional } from \"../../types/type-utils.js\";\nimport { Agent, AgentArgs, AgentRunnableSequence, OutputParserArgs } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { StructuredChatOutputParserWithRetries } from \"./outputParser.js\";\n/**\n * Interface for arguments used to create a prompt for a\n * StructuredChatAgent.\n */\nexport interface StructuredChatCreatePromptArgs {\n /** String to put after the list of tools. */\n suffix?: string;\n /** String to put before the list of tools. */\n prefix?: string;\n /** String to use directly as the human message template. */\n humanMessageTemplate?: string;\n /** List of input variables the final prompt will expect. */\n inputVariables?: string[];\n /** List of historical prompts from memory. */\n memoryPrompts?: BaseMessagePromptTemplate[];\n}\n/**\n * Type for input data for creating a StructuredChatAgent, with the\n * 'outputParser' property made optional.\n */\nexport type StructuredChatAgentInput = Optional<AgentInput, \"outputParser\">;\n/**\n * Agent that interoperates with Structured Tools using React logic.\n * @augments Agent\n */\nexport declare class StructuredChatAgent extends Agent {\n static lc_name(): string;\n lc_namespace: string[];\n constructor(input: StructuredChatAgentInput);\n _agentType(): \"structured-chat-zero-shot-react-description\";\n observationPrefix(): string;\n llmPrefix(): string;\n _stop(): string[];\n /**\n * Validates that all provided tools have a description. Throws an error\n * if any tool lacks a description.\n * @param tools Array of StructuredTool instances to validate.\n */\n static validateTools(tools: StructuredToolInterface[]): void;\n /**\n * Returns a default output parser for the StructuredChatAgent. If an LLM\n * is provided, it creates an output parser with retry logic from the LLM.\n * @param fields Optional fields to customize the output parser. Can include an LLM and a list of tool names.\n * @returns An instance of StructuredChatOutputParserWithRetries.\n */\n static getDefaultOutputParser(fields?: OutputParserArgs & {\n toolNames: string[];\n }): StructuredChatOutputParserWithRetries;\n /**\n * Constructs the agent's scratchpad from a list of steps. If the agent's\n * scratchpad is not empty, it prepends a message indicating that the\n * agent has not seen any previous work.\n * @param steps Array of AgentStep instances to construct the scratchpad from.\n * @returns A Promise that resolves to a string representing the agent's scratchpad.\n */\n constructScratchPad(steps: AgentStep[]): Promise<string>;\n /**\n * Creates a string representation of the schemas of the provided tools.\n * @param tools Array of StructuredTool instances to create the schemas string from.\n * @returns A string representing the schemas of the provided tools.\n */\n static createToolSchemasString(tools: StructuredToolInterface[]): string;\n /**\n * Create prompt in the style of the agent.\n *\n * @param tools - List of tools the agent will have access to, used to format the prompt.\n * @param args - Arguments to create the prompt with.\n * @param args.suffix - String to put after the list of tools.\n * @param args.prefix - String to put before the list of tools.\n * @param args.inputVariables List of input variables the final prompt will expect.\n * @param args.memoryPrompts List of historical prompts from memory.\n */\n static createPrompt(tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs): ChatPromptTemplate<any, any>;\n /**\n * Creates a StructuredChatAgent from an LLM and a list of tools.\n * Validates the tools, creates a prompt, and sets up an LLM chain for the\n * agent.\n * @param llm BaseLanguageModel instance to create the agent from.\n * @param tools Array of StructuredTool instances to create the agent from.\n * @param args Optional arguments to customize the creation of the agent. Can include arguments for creating the prompt and AgentArgs.\n * @returns A new instance of StructuredChatAgent.\n */\n static fromLLMAndTools(llm: BaseLanguageModelInterface, tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs & AgentArgs): StructuredChatAgent;\n}\n/**\n * Params used by the createStructuredChatAgent function.\n */\nexport type CreateStructuredChatAgentParams = {\n /** LLM to use as the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: (StructuredToolInterface | ToolDefinition)[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent aimed at supporting tools with multiple inputs.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createStructuredChatAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/structured-chat-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/structured-chat-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createStructuredChatAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createStructuredChatAgent({ llm, tools, prompt, streamRunnable }: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n//# sourceMappingURL=index.d.ts.map"],"mappings":";;;;;;;;;;;;;;AAaA;AAgBA;AAKqBgB,UArBJF,8BAAAA,CAqBuB;EAGjBC;EAUSf,MAAAA,CAAAA,EAAAA,MAAAA;EAOWW;EAEnCE,MAAAA,CAAAA,EAAAA,MAAAA;EAQuBP;EAAcW,oBAAAA,CAAAA,EAAAA,MAAAA;EAMHjB;EAWXA,cAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EAAkCc;EAAiCT,aAAAA,CAAAA,EA1D9ED,yBA0D8EC,EAAAA;;;;;;AA/CjDG,KALrCO,wBAAAA,GAA2BR,QAKUC,CALDI,UAKCJ,EAAAA,cAAAA,CAAAA;AAAK;AA8DtD;;;AAIsCN,cAlEjBc,mBAAAA,SAA4BR,KAAAA,CAkEXN;EAK1BC,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAkB,YAAA,EAAA,MAAA,EAAA;EA+DNgB,WAAAA,CAAAA,KAAAA,EAnIDJ,wBAmI0B;EAAGK,UAAAA,CAAAA,CAAAA,EAAAA,6CAAAA;EAAKC,iBAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAOC,SAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAQC,KAAAA,CAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EAAkBL;;;;;EAAkCD,OAAAA,aAAAA,CAAAA,KAAAA,EAzH5FjB,uBAyH4FiB,EAAAA,CAAAA,EAAAA,IAAAA;EAAO;;;;;;yCAlHxFN;;MAEnCE;;;;;;;;6BAQuBP,cAAcW;;;;;;wCAMHjB;;;;;;;;;;;6BAWXA,kCAAkCc,iCAAiCT;;;;;;;;;;8BAUlEJ,mCAAmCD,kCAAkCc,iCAAiCL,YAAYO;;;;;KAKtIE,+BAAAA;;OAEHjB;;UAEGD,0BAA0BE;;;;;UAK1BC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA+DYgB,yBAAAA;;;;;GAAkED,kCAAkCD,QAAQP;SACzHJ;GAASkB,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
1
+ {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseLanguageModelInterface","ToolDefinition","BasePromptTemplate","BaseMessagePromptTemplate","ChatPromptTemplate","AgentStep","Optional","Agent","AgentArgs","AgentRunnableSequence","OutputParserArgs","AgentInput","StructuredChatOutputParserWithRetries","StructuredChatCreatePromptArgs","StructuredChatAgentInput","StructuredChatAgent","Promise","CreateStructuredChatAgentParams","createStructuredChatAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents0","AgentAction","AgentFinish"],"sources":["../../../src/agents/structured_chat/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { type BaseLanguageModelInterface, type ToolDefinition } from \"@langchain/core/language_models/base\";\nimport type { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseMessagePromptTemplate, ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { Optional } from \"../../types/type-utils.js\";\nimport { Agent, AgentArgs, AgentRunnableSequence, OutputParserArgs } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { StructuredChatOutputParserWithRetries } from \"./outputParser.js\";\n/**\n * Interface for arguments used to create a prompt for a\n * StructuredChatAgent.\n */\nexport interface StructuredChatCreatePromptArgs {\n /** String to put after the list of tools. */\n suffix?: string;\n /** String to put before the list of tools. */\n prefix?: string;\n /** String to use directly as the human message template. */\n humanMessageTemplate?: string;\n /** List of input variables the final prompt will expect. */\n inputVariables?: string[];\n /** List of historical prompts from memory. */\n memoryPrompts?: BaseMessagePromptTemplate[];\n}\n/**\n * Type for input data for creating a StructuredChatAgent, with the\n * 'outputParser' property made optional.\n */\nexport type StructuredChatAgentInput = Optional<AgentInput, \"outputParser\">;\n/**\n * Agent that interoperates with Structured Tools using React logic.\n * @augments Agent\n */\nexport declare class StructuredChatAgent extends Agent {\n static lc_name(): string;\n lc_namespace: string[];\n constructor(input: StructuredChatAgentInput);\n _agentType(): \"structured-chat-zero-shot-react-description\";\n observationPrefix(): string;\n llmPrefix(): string;\n _stop(): string[];\n /**\n * Validates that all provided tools have a description. Throws an error\n * if any tool lacks a description.\n * @param tools Array of StructuredTool instances to validate.\n */\n static validateTools(tools: StructuredToolInterface[]): void;\n /**\n * Returns a default output parser for the StructuredChatAgent. If an LLM\n * is provided, it creates an output parser with retry logic from the LLM.\n * @param fields Optional fields to customize the output parser. Can include an LLM and a list of tool names.\n * @returns An instance of StructuredChatOutputParserWithRetries.\n */\n static getDefaultOutputParser(fields?: OutputParserArgs & {\n toolNames: string[];\n }): StructuredChatOutputParserWithRetries;\n /**\n * Constructs the agent's scratchpad from a list of steps. If the agent's\n * scratchpad is not empty, it prepends a message indicating that the\n * agent has not seen any previous work.\n * @param steps Array of AgentStep instances to construct the scratchpad from.\n * @returns A Promise that resolves to a string representing the agent's scratchpad.\n */\n constructScratchPad(steps: AgentStep[]): Promise<string>;\n /**\n * Creates a string representation of the schemas of the provided tools.\n * @param tools Array of StructuredTool instances to create the schemas string from.\n * @returns A string representing the schemas of the provided tools.\n */\n static createToolSchemasString(tools: StructuredToolInterface[]): string;\n /**\n * Create prompt in the style of the agent.\n *\n * @param tools - List of tools the agent will have access to, used to format the prompt.\n * @param args - Arguments to create the prompt with.\n * @param args.suffix - String to put after the list of tools.\n * @param args.prefix - String to put before the list of tools.\n * @param args.inputVariables List of input variables the final prompt will expect.\n * @param args.memoryPrompts List of historical prompts from memory.\n */\n static createPrompt(tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs): ChatPromptTemplate<any, any>;\n /**\n * Creates a StructuredChatAgent from an LLM and a list of tools.\n * Validates the tools, creates a prompt, and sets up an LLM chain for the\n * agent.\n * @param llm BaseLanguageModel instance to create the agent from.\n * @param tools Array of StructuredTool instances to create the agent from.\n * @param args Optional arguments to customize the creation of the agent. Can include arguments for creating the prompt and AgentArgs.\n * @returns A new instance of StructuredChatAgent.\n */\n static fromLLMAndTools(llm: BaseLanguageModelInterface, tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs & AgentArgs): StructuredChatAgent;\n}\n/**\n * Params used by the createStructuredChatAgent function.\n */\nexport type CreateStructuredChatAgentParams = {\n /** LLM to use as the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: (StructuredToolInterface | ToolDefinition)[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent aimed at supporting tools with multiple inputs.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createStructuredChatAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/structured-chat-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/structured-chat-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createStructuredChatAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createStructuredChatAgent({ llm, tools, prompt, streamRunnable }: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n//# sourceMappingURL=index.d.ts.map"],"mappings":";;;;;;;;;;;;;;AAaA;AAgBA;AAKqBgB,UArBJF,8BAAAA,CAqBuB;EAGjBC;EAUSf,MAAAA,CAAAA,EAAAA,MAAAA;EAOWW;EAEnCE,MAAAA,CAAAA,EAAAA,MAAAA;EAQuBP;EAAcW,oBAAAA,CAAAA,EAAAA,MAAAA;EAMHjB;EAWXA,cAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EAAkCc;EAAiCT,aAAAA,CAAAA,EA1D9ED,yBA0D8EC,EAAAA;;;;;;AA/CjDG,KALrCO,wBAAAA,GAA2BR,QAKUC,CALDI,UAKCJ,EAAAA,cAAAA,CAAAA;AAAK;AA8DtD;;;AAIsCN,cAlEjBc,mBAAAA,SAA4BR,KAAAA,CAkEXN;EAK1BC,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAkB,YAAA,EAAA,MAAA,EAAA;EA+DNgB,WAAAA,CAAAA,KAAAA,EAnIDJ,wBAmI0B;EAAGK,UAAAA,CAAAA,CAAAA,EAAAA,6CAAAA;EAAKC,iBAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAOC,SAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAQC,KAAAA,CAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EAAkBL;;;;;EAAkCD,OAAAA,aAAAA,CAAAA,KAAAA,EAzH5FjB,uBAyH4FiB,EAAAA,CAAAA,EAAAA,IAAAA;EAAO;;;;;;yCAlHxFN;;MAEnCE;;;;;;;;6BAQuBP,cAAcW;;;;;;wCAMHjB;;;;;;;;;;;6BAWXA,kCAAkCc,iCAAiCT;;;;;;;;;;8BAUlEJ,mCAAmCD,kCAAkCc,iCAAiCL,YAAYO;;;;;KAKtIE,+BAAAA;;OAEHjB;;UAEGD,0BAA0BE;;;;;UAK1BC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA+DYgB,yBAAAA;;;;;GAAkED,kCAAkCD,QAAQP;SACzHJ;GAASkB,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
@@ -3,7 +3,7 @@ import { AgentRunnableSequence } from "../agent.js";
3
3
  import { ChatPromptTemplate } from "@langchain/core/prompts";
4
4
  import { StructuredToolInterface } from "@langchain/core/tools";
5
5
  import { LanguageModelLike, ToolDefinition } from "@langchain/core/language_models/base";
6
- import * as _langchain_core_agents5 from "@langchain/core/agents";
6
+ import * as _langchain_core_agents1 from "@langchain/core/agents";
7
7
 
8
8
  //#region src/agents/tool_calling/index.d.ts
9
9
  /**
@@ -83,7 +83,7 @@ declare function createToolCallingAgent({
83
83
  streamRunnable
84
84
  }: CreateToolCallingAgentParams): AgentRunnableSequence<{
85
85
  steps: ToolsAgentStep[];
86
- }, _langchain_core_agents5.AgentFinish | _langchain_core_agents5.AgentAction[]>;
86
+ }, _langchain_core_agents1.AgentFinish | _langchain_core_agents1.AgentAction[]>;
87
87
  //#endregion
88
88
  export { CreateToolCallingAgentParams, createToolCallingAgent };
89
89
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["ChatPromptTemplate","StructuredToolInterface","LanguageModelLike","ToolDefinition","AgentRunnableSequence","ToolsAgentStep","CreateToolCallingAgentParams","createToolCallingAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents5","AgentFinish","AgentAction"],"sources":["../../../src/agents/tool_calling/index.d.ts"],"sourcesContent":["import { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { LanguageModelLike, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nimport { ToolsAgentStep } from \"./output_parser.js\";\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateToolCallingAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: LanguageModelLike;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses tools.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n * @example\n * ```typescript\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n * import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n * import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n *\n * const prompt = ChatPromptTemplate.fromMessages(\n * [\n * [\"system\", \"You are a helpful assistant\"],\n * [\"placeholder\", \"{chat_history}\"],\n * [\"human\", \"{input}\"],\n * [\"placeholder\", \"{agent_scratchpad}\"],\n * ]\n * );\n *\n *\n * const llm = new ChatAnthropic({\n * modelName: \"claude-3-opus-20240229\",\n * temperature: 0,\n * });\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * const agent = createToolCallingAgent({ llm, tools, prompt });\n *\n * const agentExecutor = new AgentExecutor({ agent, tools });\n *\n * const result = await agentExecutor.invoke({input: \"what is LangChain?\"});\n *\n * // Using with chat history\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * const result2 = await agentExecutor.invoke(\n * {\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage({content: \"hi! my name is bob\"}),\n * new AIMessage({content: \"Hello Bob! How can I assist you today?\"}),\n * ],\n * }\n * );\n * ```\n */\nexport declare function createToolCallingAgent({ llm, tools, prompt, streamRunnable }: CreateToolCallingAgentParams): AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"@langchain/core/agents\").AgentFinish | import(\"@langchain/core/agents\").AgentAction[]>;\n//# sourceMappingURL=index.d.ts.map"],"mappings":";;;;;;;;;;;AAQYM,KAAAA,4BAAAA,GAA4B;EAM/BJ;;;;AAIqB;EAyDNK,GAAAA,EA7DfL,iBA6DeK;EAAyBC;EAAKC,KAAAA,EA3D3CR,uBA2D2CQ,EAAAA,GA3DfN,cA2DeM,EAAAA;EAAOC;EAAQC,MAAAA,EAzDzDX,kBAyDyDW;EAAkBL;;;;EAA+BF,cAAAA,CAAAA,EAAAA,OAAAA;AAAqB,CAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAnHG,sBAAAA;;;;;GAA+DD,+BAA+BF;SAC3GC;GAAcO,uBAAAA,CACWC,WAAAA,GAAWD,uBAAAA,CAAoCE,WAAAA"}
1
+ {"version":3,"file":"index.d.ts","names":["ChatPromptTemplate","StructuredToolInterface","LanguageModelLike","ToolDefinition","AgentRunnableSequence","ToolsAgentStep","CreateToolCallingAgentParams","createToolCallingAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents1","AgentFinish","AgentAction"],"sources":["../../../src/agents/tool_calling/index.d.ts"],"sourcesContent":["import { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { LanguageModelLike, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nimport { ToolsAgentStep } from \"./output_parser.js\";\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateToolCallingAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: LanguageModelLike;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses tools.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n * @example\n * ```typescript\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n * import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n * import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n *\n * const prompt = ChatPromptTemplate.fromMessages(\n * [\n * [\"system\", \"You are a helpful assistant\"],\n * [\"placeholder\", \"{chat_history}\"],\n * [\"human\", \"{input}\"],\n * [\"placeholder\", \"{agent_scratchpad}\"],\n * ]\n * );\n *\n *\n * const llm = new ChatAnthropic({\n * modelName: \"claude-3-opus-20240229\",\n * temperature: 0,\n * });\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * const agent = createToolCallingAgent({ llm, tools, prompt });\n *\n * const agentExecutor = new AgentExecutor({ agent, tools });\n *\n * const result = await agentExecutor.invoke({input: \"what is LangChain?\"});\n *\n * // Using with chat history\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * const result2 = await agentExecutor.invoke(\n * {\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage({content: \"hi! my name is bob\"}),\n * new AIMessage({content: \"Hello Bob! How can I assist you today?\"}),\n * ],\n * }\n * );\n * ```\n */\nexport declare function createToolCallingAgent({ llm, tools, prompt, streamRunnable }: CreateToolCallingAgentParams): AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"@langchain/core/agents\").AgentFinish | import(\"@langchain/core/agents\").AgentAction[]>;\n//# sourceMappingURL=index.d.ts.map"],"mappings":";;;;;;;;;;;AAQYM,KAAAA,4BAAAA,GAA4B;EAM/BJ;;;;AAIqB;EAyDNK,GAAAA,EA7DfL,iBA6DeK;EAAyBC;EAAKC,KAAAA,EA3D3CR,uBA2D2CQ,EAAAA,GA3DfN,cA2DeM,EAAAA;EAAOC;EAAQC,MAAAA,EAzDzDX,kBAyDyDW;EAAkBL;;;;EAA+BF,cAAAA,CAAAA,EAAAA,OAAAA;AAAqB,CAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAnHG,sBAAAA;;;;;GAA+DD,+BAA+BF;SAC3GC;GAAcO,uBAAAA,CACWC,WAAAA,GAAWD,uBAAAA,CAAoCE,WAAAA"}
@@ -87,14 +87,20 @@ const MODEL_PROVIDER_CONFIG = {
87
87
  };
88
88
  const SUPPORTED_PROVIDERS = Object.keys(MODEL_PROVIDER_CONFIG);
89
89
  /**
90
- * Helper function to get a chat model class by its class name
90
+ * Helper function to get a chat model class by its class name or model provider.
91
91
  * @param className The class name (e.g., "ChatOpenAI", "ChatAnthropic")
92
+ * @param modelProvider Optional model provider key for direct lookup (e.g., "google-vertexai-web").
93
+ * When provided, uses direct lookup to avoid className collision issues.
92
94
  * @returns The imported model class or undefined if not found
93
95
  */
94
- async function getChatModelByClassName(className) {
95
- const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(([, config$1]) => config$1.className === className);
96
- if (!providerEntry) return void 0;
97
- const [, config] = providerEntry;
96
+ async function getChatModelByClassName(className, modelProvider) {
97
+ let config;
98
+ if (modelProvider) config = MODEL_PROVIDER_CONFIG[modelProvider];
99
+ else {
100
+ const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(([, c]) => c.className === className);
101
+ config = providerEntry ? providerEntry[1] : void 0;
102
+ }
103
+ if (!config) return void 0;
98
104
  try {
99
105
  const module$1 = await import(config.package);
100
106
  return module$1[config.className];
@@ -117,7 +123,7 @@ async function _initChatModelHelper(model, modelProvider, params = {}) {
117
123
  throw new Error(`Unsupported { modelProvider: ${modelProviderCopy} }.\n\nSupported model providers are: ${supported}`);
118
124
  }
119
125
  const { modelProvider: _unused,...passedParams } = params;
120
- const ProviderClass = await getChatModelByClassName(config.className);
126
+ const ProviderClass = await getChatModelByClassName(config.className, modelProviderCopy);
121
127
  return new ProviderClass({
122
128
  model,
123
129
  ...passedParams
@@ -1 +1 @@
1
- {"version":3,"file":"universal.cjs","names":["className: string","config","module","e: unknown","model: string","modelProvider?: string","params: Record<string, any>","modelName: string","BaseChatModel","fields: ConfigurableModelFields","config?: RunnableConfig","messages: BaseMessage[]","options?: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","tools: BindToolsInput[]","params?: Record<string, any>","modelParams: Record<string, any>","str: string","prefix: string","mergedConfig: RunnableConfig","remainingConfig: RunnableConfig","RunnableBinding","input: RunInput","options?: CallOptions","AsyncGeneratorWithSetup","IterableReadableStream","inputs: RunInput[]","options?: Partial<CallOptions> | Partial<CallOptions>[]","batchOptions?: RunnableBatchOptions","generator: AsyncGenerator<RunInput>","options: CallOptions","options?: Partial<CallOptions>","streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">","options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n }","streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">","model?: string","fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n }","paramsCopy: Record<string, any>"],"sources":["../../src/chat_models/universal.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n ToolDefinition,\n} from \"@langchain/core/language_models/base\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n BaseMessage,\n type AIMessageChunk,\n MessageStructure,\n} from \"@langchain/core/messages\";\nimport {\n type RunnableBatchOptions,\n RunnableBinding,\n type RunnableConfig,\n type RunnableToolLike,\n ensureConfig,\n} from \"@langchain/core/runnables\";\nimport {\n AsyncGeneratorWithSetup,\n IterableReadableStream,\n} from \"@langchain/core/utils/stream\";\nimport {\n type LogStreamCallbackHandlerInput,\n type RunLogPatch,\n type StreamEvent,\n} from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\n\n// TODO: remove once `EventStreamCallbackHandlerInput` is exposed in core\ninterface EventStreamCallbackHandlerInput\n extends Omit<LogStreamCallbackHandlerInput, \"_schemaFormat\"> {}\n\nexport interface ConfigurableChatModelCallOptions\n extends BaseChatModelCallOptions {\n tools?: (\n | StructuredToolInterface\n | Record<string, unknown>\n | ToolDefinition\n | RunnableToolLike\n )[];\n}\n\n// Configuration map for model providers\nexport const MODEL_PROVIDER_CONFIG = {\n openai: {\n package: \"@langchain/openai\",\n className: \"ChatOpenAI\",\n },\n anthropic: {\n package: \"@langchain/anthropic\",\n className: \"ChatAnthropic\",\n },\n azure_openai: {\n package: \"@langchain/openai\",\n className: \"AzureChatOpenAI\",\n },\n cohere: {\n package: \"@langchain/cohere\",\n className: \"ChatCohere\",\n },\n \"google-vertexai\": {\n package: \"@langchain/google-vertexai\",\n className: \"ChatVertexAI\",\n },\n \"google-vertexai-web\": {\n package: \"@langchain/google-vertexai-web\",\n className: \"ChatVertexAI\",\n },\n \"google-genai\": {\n package: \"@langchain/google-genai\",\n className: \"ChatGoogleGenerativeAI\",\n },\n ollama: {\n package: \"@langchain/ollama\",\n className: \"ChatOllama\",\n },\n mistralai: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n groq: {\n package: \"@langchain/groq\",\n className: \"ChatGroq\",\n },\n cerebras: {\n package: \"@langchain/cerebras\",\n className: \"ChatCerebras\",\n },\n bedrock: {\n package: \"@langchain/aws\",\n className: \"ChatBedrockConverse\",\n },\n deepseek: {\n package: \"@langchain/deepseek\",\n className: \"ChatDeepSeek\",\n },\n xai: {\n package: \"@langchain/xai\",\n className: \"ChatXAI\",\n },\n fireworks: {\n package: \"@langchain/community/chat_models/fireworks\",\n className: \"ChatFireworks\",\n hasCircularDependency: true,\n },\n together: {\n package: \"@langchain/community/chat_models/togetherai\",\n className: \"ChatTogetherAI\",\n hasCircularDependency: true,\n },\n perplexity: {\n package: \"@langchain/community/chat_models/perplexity\",\n className: \"ChatPerplexity\",\n hasCircularDependency: true,\n },\n} as const;\n\nconst SUPPORTED_PROVIDERS = Object.keys(\n MODEL_PROVIDER_CONFIG\n) as (keyof typeof MODEL_PROVIDER_CONFIG)[];\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\ntype ModelProviderConfig = {\n package: string;\n className: string;\n hasCircularDependency?: boolean;\n};\n\n/**\n * Helper function to get a chat model class by its class name\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @returns The imported model class or undefined if not found\n */\nexport async function getChatModelByClassName(className: string) {\n // Find the provider config that matches the class name\n const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(\n ([, config]) => config.className === className\n );\n\n if (!providerEntry) {\n return undefined;\n }\n\n const [, config] = providerEntry;\n try {\n const module = await import(config.package);\n return module[config.className];\n } catch (e: unknown) {\n const err = e as Error;\n if (\n \"code\" in err &&\n err.code?.toString().includes(\"ERR_MODULE_NOT_FOUND\") &&\n \"message\" in err &&\n typeof err.message === \"string\"\n ) {\n const msg = err.message.startsWith(\"Error: \")\n ? err.message.slice(\"Error: \".length)\n : err.message;\n const attemptedPackage = msg\n .split(\"Cannot find package '\")[1]\n .split(\"'\")[0];\n throw new Error(\n `Unable to import ${attemptedPackage}. Please install with ` +\n `\\`npm install ${attemptedPackage}\\` or \\`pnpm install ${attemptedPackage}\\``\n );\n }\n throw e;\n }\n}\n\nasync function _initChatModelHelper(\n model: string,\n modelProvider?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params: Record<string, any> = {}\n): Promise<BaseChatModel> {\n const modelProviderCopy = modelProvider || _inferModelProvider(model);\n if (!modelProviderCopy) {\n throw new Error(\n `Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`\n );\n }\n\n const config = MODEL_PROVIDER_CONFIG[\n modelProviderCopy as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig;\n if (!config) {\n const supported = SUPPORTED_PROVIDERS.join(\", \");\n throw new Error(\n `Unsupported { modelProvider: ${modelProviderCopy} }.\\n\\nSupported model providers are: ${supported}`\n );\n }\n\n const { modelProvider: _unused, ...passedParams } = params;\n const ProviderClass = await getChatModelByClassName(config.className);\n return new ProviderClass({ model, ...passedParams });\n}\n\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport function _inferModelProvider(modelName: string): string | undefined {\n if (\n modelName.startsWith(\"gpt-3\") ||\n modelName.startsWith(\"gpt-4\") ||\n modelName.startsWith(\"gpt-5\") ||\n modelName.startsWith(\"o1\") ||\n modelName.startsWith(\"o3\") ||\n modelName.startsWith(\"o4\")\n ) {\n return \"openai\";\n } else if (modelName.startsWith(\"claude\")) {\n return \"anthropic\";\n } else if (modelName.startsWith(\"command\")) {\n return \"cohere\";\n } else if (modelName.startsWith(\"accounts/fireworks\")) {\n return \"fireworks\";\n } else if (modelName.startsWith(\"gemini\")) {\n return \"google-vertexai\";\n } else if (modelName.startsWith(\"amazon.\")) {\n return \"bedrock\";\n } else if (modelName.startsWith(\"mistral\")) {\n return \"mistralai\";\n } else if (modelName.startsWith(\"sonar\") || modelName.startsWith(\"pplx\")) {\n return \"perplexity\";\n } else {\n return undefined;\n }\n}\n\ninterface ConfigurableModelFields extends BaseChatModelParams {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n queuedMethodOperations?: Record<string, any>;\n}\n\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport class ConfigurableModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string {\n return \"chat_model\";\n }\n\n lc_namespace = [\"langchain\", \"chat_models\"];\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _defaultConfig?: Record<string, any> = {};\n\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\" = \"any\";\n\n /**\n * @default \"\"\n */\n _configPrefix: string;\n\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any> = {};\n\n constructor(fields: ConfigurableModelFields) {\n super(fields);\n this._defaultConfig = fields.defaultConfig ?? {};\n\n if (fields.configurableFields === \"any\") {\n this._configurableFields = \"any\";\n } else {\n this._configurableFields = fields.configurableFields ?? [\n \"model\",\n \"modelProvider\",\n ];\n }\n\n if (fields.configPrefix) {\n this._configPrefix = fields.configPrefix.endsWith(\"_\")\n ? fields.configPrefix\n : `${fields.configPrefix}_`;\n } else {\n this._configPrefix = \"\";\n }\n\n this._queuedMethodOperations =\n fields.queuedMethodOperations ?? this._queuedMethodOperations;\n }\n\n async _model(\n config?: RunnableConfig\n ): Promise<\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n > {\n const params = { ...this._defaultConfig, ...this._modelParams(config) };\n let initializedModel = await _initChatModelHelper(\n params.model,\n params.modelProvider,\n params\n );\n\n // Apply queued method operations\n const queuedMethodOperationsEntries = Object.entries(\n this._queuedMethodOperations\n );\n if (queuedMethodOperationsEntries.length > 0) {\n for (const [method, args] of queuedMethodOperationsEntries) {\n if (\n method in initializedModel &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n typeof (initializedModel as any)[method] === \"function\"\n ) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n initializedModel = await (initializedModel as any)[method](...args);\n }\n }\n }\n\n return initializedModel;\n }\n\n async _generate(\n messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const model = await this._model(options);\n return model._generate(messages, options ?? {}, runManager);\n }\n\n override bindTools(\n tools: BindToolsInput[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params?: Record<string, any>\n ): ConfigurableModel<RunInput, CallOptions> {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.bindTools = [tools, params];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n });\n }\n\n // Extract the input types from the `BaseModel` class.\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"] = (\n schema,\n ...args\n ): ReturnType<BaseChatModel[\"withStructuredOutput\"]> => {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.withStructuredOutput = [schema, ...args];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n }) as unknown as ReturnType<BaseChatModel[\"withStructuredOutput\"]>;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _modelParams(config?: RunnableConfig): Record<string, any> {\n const configurable = config?.configurable ?? {};\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let modelParams: Record<string, any> = {};\n\n for (const [key, value] of Object.entries(configurable)) {\n if (key.startsWith(this._configPrefix)) {\n const strippedKey = this._removePrefix(key, this._configPrefix);\n modelParams[strippedKey] = value;\n }\n }\n\n if (this._configurableFields !== \"any\") {\n modelParams = Object.fromEntries(\n Object.entries(modelParams).filter(([key]) =>\n this._configurableFields.includes(key)\n )\n );\n }\n\n return modelParams;\n }\n\n _removePrefix(str: string, prefix: string): string {\n return str.startsWith(prefix) ? str.slice(prefix.length) : str;\n }\n\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(\n config?: RunnableConfig\n ): RunnableBinding<RunInput, AIMessageChunk, CallOptions> {\n const mergedConfig: RunnableConfig = { ...(config || {}) };\n const modelParams = this._modelParams(mergedConfig);\n\n const remainingConfig: RunnableConfig = Object.fromEntries(\n Object.entries(mergedConfig).filter(([k]) => k !== \"configurable\")\n );\n\n remainingConfig.configurable = Object.fromEntries(\n Object.entries(mergedConfig.configurable || {}).filter(\n ([k]) =>\n this._configPrefix &&\n !Object.keys(modelParams).includes(\n this._removePrefix(k, this._configPrefix)\n )\n )\n );\n\n const newConfigurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: { ...this._defaultConfig, ...modelParams },\n configurableFields: Array.isArray(this._configurableFields)\n ? [...this._configurableFields]\n : this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: this._queuedMethodOperations,\n });\n\n return new RunnableBinding<RunInput, AIMessageChunk, CallOptions>({\n config: mergedConfig,\n bound: newConfigurableModel,\n });\n }\n\n async invoke(\n input: RunInput,\n options?: CallOptions\n ): Promise<AIMessageChunk> {\n const model = await this._model(options);\n const config = ensureConfig(options);\n return model.invoke(input, config);\n }\n\n async stream(\n input: RunInput,\n options?: CallOptions\n ): Promise<IterableReadableStream<AIMessageChunk>> {\n const model = await this._model(options);\n const wrappedGenerator = new AsyncGeneratorWithSetup({\n generator: await model.stream(input, options),\n config: options,\n });\n await wrappedGenerator.setup;\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator);\n }\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions?: false }\n ): Promise<AIMessageChunk[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions: true }\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]> {\n // We can super this since the base runnable implementation of\n // `.batch` will call `.invoke` on each input.\n return super.batch(inputs, options, batchOptions);\n }\n\n async *transform(\n generator: AsyncGenerator<RunInput>,\n options: CallOptions\n ): AsyncGenerator<AIMessageChunk> {\n const model = await this._model(options);\n const config = ensureConfig(options);\n\n yield* model.transform(generator, config);\n }\n\n async *streamLog(\n input: RunInput,\n options?: Partial<CallOptions>,\n streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">\n ): AsyncGenerator<RunLogPatch> {\n const model = await this._model(options);\n const config = ensureConfig(options);\n\n yield* model.streamLog(input, config, {\n ...streamOptions,\n _schemaFormat: \"original\",\n includeNames: streamOptions?.includeNames,\n includeTypes: streamOptions?.includeTypes,\n includeTags: streamOptions?.includeTags,\n excludeNames: streamOptions?.excludeNames,\n excludeTypes: streamOptions?.excludeTypes,\n excludeTags: streamOptions?.excludeTags,\n });\n }\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & { version: \"v1\" | \"v2\" },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<Uint8Array>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent | Uint8Array> {\n const outerThis = this;\n async function* wrappedGenerator() {\n const model = await outerThis._model(options);\n const config = ensureConfig(options);\n const eventStream = model.streamEvents(input, config, streamOptions);\n\n for await (const chunk of eventStream) {\n yield chunk;\n }\n }\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator());\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\n\nexport type ConfigurableFields = \"any\" | string[];\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n>(\n model: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n>(\n model: never,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\n// ################################# FOR CONTRIBUTORS #################################\n//\n// If adding support for a new provider, please append the provider\n// name to the supported list in the docstring below.\n//\n// ####################################################################################\n\n/**\n * Initialize a ChatModel from the model name and provider.\n * Must have the integration package corresponding to the model provider installed.\n *\n * @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model.\n * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.\n *\n * @param {string | ChatModelProvider} [model] - The name of the model, e.g. \"gpt-4\", \"claude-3-opus-20240229\".\n * Can be prefixed with the model provider, e.g. \"openai:gpt-4\", \"anthropic:claude-3-opus-20240229\".\n * @param {Object} [fields] - Additional configuration options.\n * @param {string} [fields.modelProvider] - The model provider. Supported values include:\n * - openai (@langchain/openai)\n * - anthropic (@langchain/anthropic)\n * - azure_openai (@langchain/openai)\n * - google-vertexai (@langchain/google-vertexai)\n * - google-vertexai-web (@langchain/google-vertexai-web)\n * - google-genai (@langchain/google-genai)\n * - bedrock (@langchain/aws)\n * - cohere (@langchain/cohere)\n * - fireworks (@langchain/community/chat_models/fireworks)\n * - together (@langchain/community/chat_models/togetherai)\n * - mistralai (@langchain/mistralai)\n * - groq (@langchain/groq)\n * - ollama (@langchain/ollama)\n * - perplexity (@langchain/community/chat_models/perplexity)\n * - cerebras (@langchain/cerebras)\n * - deepseek (@langchain/deepseek)\n * - xai (@langchain/xai)\n * @param {string[] | \"any\"} [fields.configurableFields] - Which model parameters are configurable:\n * - undefined: No configurable fields.\n * - \"any\": All fields are configurable. (See Security Note in description)\n * - string[]: Specified fields are configurable.\n * @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime.\n * @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor.\n * @returns {Promise<ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel.\n * @throws {Error} If modelProvider cannot be inferred or isn't supported.\n * @throws {Error} If the model provider integration package is not installed.\n *\n * @example Initialize non-configurable models\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const gpt4 = await initChatModel(\"openai:gpt-4\", {\n * temperature: 0.25,\n * });\n * const gpt4Result = await gpt4.invoke(\"what's your name\");\n *\n * const claude = await initChatModel(\"anthropic:claude-3-opus-20240229\", {\n * temperature: 0.25,\n * });\n * const claudeResult = await claude.invoke(\"what's your name\");\n *\n * const gemini = await initChatModel(\"gemini-1.5-pro\", {\n * modelProvider: \"google-vertexai\",\n * temperature: 0.25,\n * });\n * const geminiResult = await gemini.invoke(\"what's your name\");\n * ```\n *\n * @example Create a partially configurable model with no default model\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModel = await initChatModel(undefined, {\n * temperature: 0,\n * configurableFields: [\"model\", \"apiKey\"],\n * });\n *\n * const gpt4Result = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"gpt-4\",\n * },\n * });\n *\n * const claudeResult = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * },\n * });\n * ```\n *\n * @example Create a fully configurable model with a default model and a config prefix\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModelWithDefault = await initChatModel(\"gpt-4\", {\n * modelProvider: \"openai\",\n * configurableFields: \"any\",\n * configPrefix: \"foo\",\n * temperature: 0,\n * });\n *\n * const openaiResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const claudeResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_model: \"claude-3-5-sonnet-20240620\",\n * foo_modelProvider: \"anthropic\",\n * foo_temperature: 0.6,\n * foo_apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Bind tools to a configurable model:\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n * import { z } from \"zod/v3\";\n * import { tool } from \"@langchain/core/tools\";\n *\n * const getWeatherTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current weather in a given location\"),\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * }\n * );\n *\n * const getPopulationTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current population in a given location\"),\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * }\n * );\n *\n * const configurableModel = await initChatModel(\"gpt-4\", {\n * configurableFields: [\"model\", \"modelProvider\", \"apiKey\"],\n * temperature: 0,\n * });\n *\n * const configurableModelWithTools = configurableModel.bindTools([\n * getWeatherTool,\n * getPopulationTool,\n * ]);\n *\n * const configurableToolResult = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const configurableToolResult2 = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @description\n * This function initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * Security Note: Setting `configurableFields` to \"any\" means fields like apiKey, baseUrl, etc.\n * can be altered at runtime, potentially redirecting model requests to a different service/user.\n * Make sure that if you're accepting untrusted configurations, you enumerate the\n * `configurableFields` explicitly.\n *\n * The function will attempt to infer the model provider from the model name if not specified.\n * Certain model name prefixes are associated with specific providers:\n * - gpt-3... or gpt-4... -> openai\n * - claude... -> anthropic\n * - amazon.... -> bedrock\n * - gemini... -> google-vertexai\n * - command... -> cohere\n * - accounts/fireworks... -> fireworks\n *\n * @since 0.2.11\n * @version 0.2.11\n */\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>> {\n // eslint-disable-next-line prefer-const\n let { configurableFields, configPrefix, modelProvider, ...params } = {\n configPrefix: \"\",\n ...(fields ?? {}),\n };\n if (modelProvider === undefined && model?.includes(\":\")) {\n const modelComponents = model.split(\":\", 2);\n if (SUPPORTED_PROVIDERS.includes(modelComponents[0] as ChatModelProvider)) {\n // eslint-disable-next-line no-param-reassign\n [modelProvider, model] = modelComponents;\n }\n }\n let configurableFieldsCopy = Array.isArray(configurableFields)\n ? [...configurableFields]\n : configurableFields;\n\n if (!model && configurableFieldsCopy === undefined) {\n configurableFieldsCopy = [\"model\", \"modelProvider\"];\n }\n if (configPrefix && configurableFieldsCopy === undefined) {\n console.warn(\n `{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` +\n `{ configurableFields: [...] } to specify the model params that are ` +\n `configurable.`\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const paramsCopy: Record<string, any> = { ...params };\n\n if (configurableFieldsCopy === undefined) {\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: {\n ...paramsCopy,\n model,\n modelProvider,\n },\n configPrefix,\n });\n } else {\n if (model) {\n paramsCopy.model = model;\n }\n if (modelProvider) {\n paramsCopy.modelProvider = modelProvider;\n }\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: paramsCopy,\n configPrefix,\n configurableFields: configurableFieldsCopy,\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;AAkDA,MAAa,wBAAwB;CACnC,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,cAAc;EACZ,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,mBAAmB;EACjB,SAAS;EACT,WAAW;CACZ;CACD,uBAAuB;EACrB,SAAS;EACT,WAAW;CACZ;CACD,gBAAgB;EACd,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,MAAM;EACJ,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,KAAK;EACH,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,YAAY;EACV,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;AACF;AAED,MAAM,sBAAsB,OAAO,KACjC,sBACD;;;;;;AAaD,eAAsB,wBAAwBA,WAAmB;CAE/D,MAAM,gBAAgB,OAAO,QAAQ,sBAAsB,CAAC,KAC1D,CAAC,GAAGC,SAAO,KAAKA,SAAO,cAAc,UACtC;AAED,KAAI,CAAC,cACH,QAAO;CAGT,MAAM,GAAG,OAAO,GAAG;AACnB,KAAI;EACF,MAAMC,WAAS,MAAM,OAAO,OAAO;AACnC,SAAOA,SAAO,OAAO;CACtB,SAAQC,GAAY;EACnB,MAAM,MAAM;AACZ,MACE,UAAU,OACV,IAAI,MAAM,UAAU,CAAC,SAAS,uBAAuB,IACrD,aAAa,OACb,OAAO,IAAI,YAAY,UACvB;GACA,MAAM,MAAM,IAAI,QAAQ,WAAW,UAAU,GACzC,IAAI,QAAQ,MAAM,EAAiB,GACnC,IAAI;GACR,MAAM,mBAAmB,IACtB,MAAM,wBAAwB,CAAC,GAC/B,MAAM,IAAI,CAAC;AACd,SAAM,IAAI,MACR,CAAC,iBAAiB,EAAE,iBAAiB,oCAAsB,EACxC,iBAAiB,qBAAqB,EAAE,iBAAiB,EAAE,CAAC;EAElF;AACD,QAAM;CACP;AACF;AAED,eAAe,qBACbC,OACAC,eAEAC,SAA8B,CAAE,GACR;CACxB,MAAM,oBAAoB,iBAAiB,oBAAoB,MAAM;AACrE,KAAI,CAAC,kBACH,OAAM,IAAI,MACR,CAAC,4CAA4C,EAAE,MAAM,0CAA0C,CAAC;CAIpG,MAAM,SAAS,sBACb;AAEF,KAAI,CAAC,QAAQ;EACX,MAAM,YAAY,oBAAoB,KAAK,KAAK;AAChD,QAAM,IAAI,MACR,CAAC,6BAA6B,EAAE,kBAAkB,sCAAsC,EAAE,WAAW;CAExG;CAED,MAAM,EAAE,eAAe,QAAS,GAAG,cAAc,GAAG;CACpD,MAAM,gBAAgB,MAAM,wBAAwB,OAAO,UAAU;AACrE,QAAO,IAAI,cAAc;EAAE;EAAO,GAAG;CAAc;AACpD;;;;;;;;;;;;AAaD,SAAgB,oBAAoBC,WAAuC;AACzE,KACE,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,CAE1B,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,qBAAqB,CACnD,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,QAAQ,IAAI,UAAU,WAAW,OAAO,CACtE,QAAO;KAEP,QAAO;AAEV;;;;;;AA0BD,IAAa,oBAAb,MAAa,0BAIHC,2DAA2C;CACnD,WAAmB;AACjB,SAAO;CACR;CAED,eAAe,CAAC,aAAa,aAAc;CAG3C,iBAAuC,CAAE;;;;CAKzC,sBAAwC;;;;CAKxC;;;;;CAOA,0BAA+C,CAAE;CAEjD,YAAYC,QAAiC;EAC3C,MAAM,OAAO;EACb,KAAK,iBAAiB,OAAO,iBAAiB,CAAE;AAEhD,MAAI,OAAO,uBAAuB,OAChC,KAAK,sBAAsB;OAE3B,KAAK,sBAAsB,OAAO,sBAAsB,CACtD,SACA,eACD;AAGH,MAAI,OAAO,cACT,KAAK,gBAAgB,OAAO,aAAa,SAAS,IAAI,GAClD,OAAO,eACP,GAAG,OAAO,aAAa,CAAC,CAAC;OAE7B,KAAK,gBAAgB;EAGvB,KAAK,0BACH,OAAO,0BAA0B,KAAK;CACzC;CAED,MAAM,OACJC,QAGA;EACA,MAAM,SAAS;GAAE,GAAG,KAAK;GAAgB,GAAG,KAAK,aAAa,OAAO;EAAE;EACvE,IAAI,mBAAmB,MAAM,qBAC3B,OAAO,OACP,OAAO,eACP,OACD;EAGD,MAAM,gCAAgC,OAAO,QAC3C,KAAK,wBACN;AACD,MAAI,8BAA8B,SAAS,GACzC;QAAK,MAAM,CAAC,QAAQ,KAAK,IAAI,8BAC3B,KACE,UAAU,oBAEV,OAAQ,iBAAyB,YAAY,YAG7C,mBAAmB,MAAO,iBAAyB,QAAQ,GAAG,KAAK;EAEtE;AAGH,SAAO;CACR;CAED,MAAM,UACJC,UACAC,SACAC,YACqB;EACrB,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;AACxC,SAAO,MAAM,UAAU,UAAU,WAAW,CAAE,GAAE,WAAW;CAC5D;CAED,AAAS,UACPC,OAEAC,QAC0C;EAC1C,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,YAAY,CAAC,OAAO,MAAO;AAC/C,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,uBAA8D,CAC5D,QACA,GAAG,SACmD;EACtD,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,uBAAuB,CAAC,QAAQ,GAAG,IAAK;AAC5D,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,aAAaL,QAA8C;EACzD,MAAM,eAAe,QAAQ,gBAAgB,CAAE;EAE/C,IAAIM,cAAmC,CAAE;AAEzC,OAAK,MAAM,CAAC,KAAK,MAAM,IAAI,OAAO,QAAQ,aAAa,CACrD,KAAI,IAAI,WAAW,KAAK,cAAc,EAAE;GACtC,MAAM,cAAc,KAAK,cAAc,KAAK,KAAK,cAAc;GAC/D,YAAY,eAAe;EAC5B;AAGH,MAAI,KAAK,wBAAwB,OAC/B,cAAc,OAAO,YACnB,OAAO,QAAQ,YAAY,CAAC,OAAO,CAAC,CAAC,IAAI,KACvC,KAAK,oBAAoB,SAAS,IAAI,CACvC,CACF;AAGH,SAAO;CACR;CAED,cAAcC,KAAaC,QAAwB;AACjD,SAAO,IAAI,WAAW,OAAO,GAAG,IAAI,MAAM,OAAO,OAAO,GAAG;CAC5D;;;;;;CAOD,WACER,QACwD;EACxD,MAAMS,eAA+B,EAAE,GAAI,UAAU,CAAE,EAAG;EAC1D,MAAM,cAAc,KAAK,aAAa,aAAa;EAEnD,MAAMC,kBAAkC,OAAO,YAC7C,OAAO,QAAQ,aAAa,CAAC,OAAO,CAAC,CAAC,EAAE,KAAK,MAAM,eAAe,CACnE;EAED,gBAAgB,eAAe,OAAO,YACpC,OAAO,QAAQ,aAAa,gBAAgB,CAAE,EAAC,CAAC,OAC9C,CAAC,CAAC,EAAE,KACF,KAAK,iBACL,CAAC,OAAO,KAAK,YAAY,CAAC,SACxB,KAAK,cAAc,GAAG,KAAK,cAAc,CAC1C,CACJ,CACF;EAED,MAAM,uBAAuB,IAAI,kBAAyC;GACxE,eAAe;IAAE,GAAG,KAAK;IAAgB,GAAG;GAAa;GACzD,oBAAoB,MAAM,QAAQ,KAAK,oBAAoB,GACvD,CAAC,GAAG,KAAK,mBAAoB,IAC7B,KAAK;GACT,cAAc,KAAK;GACnB,wBAAwB,KAAK;EAC9B;AAED,SAAO,IAAIC,2CAAuD;GAChE,QAAQ;GACR,OAAO;EACR;CACF;CAED,MAAM,OACJC,OACAC,SACyB;EACzB,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;EACxC,MAAM,sDAAsB,QAAQ;AACpC,SAAO,MAAM,OAAO,OAAO,OAAO;CACnC;CAED,MAAM,OACJD,OACAC,SACiD;EACjD,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;EACxC,MAAM,mBAAmB,IAAIC,sDAAwB;GACnD,WAAW,MAAM,MAAM,OAAO,OAAO,QAAQ;GAC7C,QAAQ;EACT;EACD,MAAM,iBAAiB;AACvB,SAAOC,qDAAuB,mBAAmB,iBAAiB;CACnE;CAoBD,MAAM,MACJC,QACAC,SACAC,cACqC;AAGrC,SAAO,MAAM,MAAM,QAAQ,SAAS,aAAa;CAClD;CAED,OAAO,UACLC,WACAC,SACgC;EAChC,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;EACxC,MAAM,sDAAsB,QAAQ;EAEpC,OAAO,MAAM,UAAU,WAAW,OAAO;CAC1C;CAED,OAAO,UACLR,OACAS,SACAC,eAC6B;EAC7B,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;EACxC,MAAM,sDAAsB,QAAQ;EAEpC,OAAO,MAAM,UAAU,OAAO,QAAQ;GACpC,GAAG;GACH,eAAe;GACf,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC5B,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;EAC7B,EAAC;CACH;CAiBD,aACEV,OACAW,SAIAC,eACkD;EAClD,MAAM,YAAY;EAClB,gBAAgB,mBAAmB;GACjC,MAAM,QAAQ,MAAM,UAAU,OAAO,QAAQ;GAC7C,MAAM,sDAAsB,QAAQ;GACpC,MAAM,cAAc,MAAM,aAAa,OAAO,QAAQ,cAAc;AAEpE,cAAW,MAAM,SAAS,aACxB,MAAM;EAET;AACD,SAAOT,qDAAuB,mBAAmB,kBAAkB,CAAC;CACrE;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2QD,eAAsB,cAKpBU,OAEAC,QAKmD;CAEnD,IAAI,EAAE,oBAAoB,cAAc,cAAe,GAAG,QAAQ,GAAG;EACnE,cAAc;EACd,GAAI,UAAU,CAAE;CACjB;AACD,KAAI,kBAAkB,UAAa,OAAO,SAAS,IAAI,EAAE;EACvD,MAAM,kBAAkB,MAAM,MAAM,KAAK,EAAE;AAC3C,MAAI,oBAAoB,SAAS,gBAAgB,GAAwB,EAEvE,CAAC,eAAe,MAAM,GAAG;CAE5B;CACD,IAAI,yBAAyB,MAAM,QAAQ,mBAAmB,GAC1D,CAAC,GAAG,kBAAmB,IACvB;AAEJ,KAAI,CAAC,SAAS,2BAA2B,QACvC,yBAAyB,CAAC,SAAS,eAAgB;AAErD,KAAI,gBAAgB,2BAA2B,QAC7C,QAAQ,KACN,CAAC,gBAAgB,EAAE,aAAa,oIAAoD,CAEnE,CAClB;CAIH,MAAMC,aAAkC,EAAE,GAAG,OAAQ;AAErD,KAAI,2BAA2B,OAC7B,QAAO,IAAI,kBAAyC;EAClD,eAAe;GACb,GAAG;GACH;GACA;EACD;EACD;CACD;MACI;AACL,MAAI,OACF,WAAW,QAAQ;AAErB,MAAI,eACF,WAAW,gBAAgB;AAE7B,SAAO,IAAI,kBAAyC;GAClD,eAAe;GACf;GACA,oBAAoB;EACrB;CACF;AACF"}
1
+ {"version":3,"file":"universal.cjs","names":["className: string","modelProvider?: string","config: ModelProviderConfig | undefined","module","e: unknown","model: string","params: Record<string, any>","modelName: string","BaseChatModel","fields: ConfigurableModelFields","config?: RunnableConfig","messages: BaseMessage[]","options?: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","tools: BindToolsInput[]","params?: Record<string, any>","modelParams: Record<string, any>","str: string","prefix: string","mergedConfig: RunnableConfig","remainingConfig: RunnableConfig","RunnableBinding","input: RunInput","options?: CallOptions","AsyncGeneratorWithSetup","IterableReadableStream","inputs: RunInput[]","options?: Partial<CallOptions> | Partial<CallOptions>[]","batchOptions?: RunnableBatchOptions","generator: AsyncGenerator<RunInput>","options: CallOptions","options?: Partial<CallOptions>","streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">","options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n }","streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">","model?: string","fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n }","paramsCopy: Record<string, any>"],"sources":["../../src/chat_models/universal.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n ToolDefinition,\n} from \"@langchain/core/language_models/base\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n BaseMessage,\n type AIMessageChunk,\n MessageStructure,\n} from \"@langchain/core/messages\";\nimport {\n type RunnableBatchOptions,\n RunnableBinding,\n type RunnableConfig,\n type RunnableToolLike,\n ensureConfig,\n} from \"@langchain/core/runnables\";\nimport {\n AsyncGeneratorWithSetup,\n IterableReadableStream,\n} from \"@langchain/core/utils/stream\";\nimport {\n type LogStreamCallbackHandlerInput,\n type RunLogPatch,\n type StreamEvent,\n} from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\n\n// TODO: remove once `EventStreamCallbackHandlerInput` is exposed in core\ninterface EventStreamCallbackHandlerInput\n extends Omit<LogStreamCallbackHandlerInput, \"_schemaFormat\"> {}\n\nexport interface ConfigurableChatModelCallOptions\n extends BaseChatModelCallOptions {\n tools?: (\n | StructuredToolInterface\n | Record<string, unknown>\n | ToolDefinition\n | RunnableToolLike\n )[];\n}\n\n// Configuration map for model providers\nexport const MODEL_PROVIDER_CONFIG = {\n openai: {\n package: \"@langchain/openai\",\n className: \"ChatOpenAI\",\n },\n anthropic: {\n package: \"@langchain/anthropic\",\n className: \"ChatAnthropic\",\n },\n azure_openai: {\n package: \"@langchain/openai\",\n className: \"AzureChatOpenAI\",\n },\n cohere: {\n package: \"@langchain/cohere\",\n className: \"ChatCohere\",\n },\n \"google-vertexai\": {\n package: \"@langchain/google-vertexai\",\n className: \"ChatVertexAI\",\n },\n \"google-vertexai-web\": {\n package: \"@langchain/google-vertexai-web\",\n className: \"ChatVertexAI\",\n },\n \"google-genai\": {\n package: \"@langchain/google-genai\",\n className: \"ChatGoogleGenerativeAI\",\n },\n ollama: {\n package: \"@langchain/ollama\",\n className: \"ChatOllama\",\n },\n mistralai: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n groq: {\n package: \"@langchain/groq\",\n className: \"ChatGroq\",\n },\n cerebras: {\n package: \"@langchain/cerebras\",\n className: \"ChatCerebras\",\n },\n bedrock: {\n package: \"@langchain/aws\",\n className: \"ChatBedrockConverse\",\n },\n deepseek: {\n package: \"@langchain/deepseek\",\n className: \"ChatDeepSeek\",\n },\n xai: {\n package: \"@langchain/xai\",\n className: \"ChatXAI\",\n },\n fireworks: {\n package: \"@langchain/community/chat_models/fireworks\",\n className: \"ChatFireworks\",\n hasCircularDependency: true,\n },\n together: {\n package: \"@langchain/community/chat_models/togetherai\",\n className: \"ChatTogetherAI\",\n hasCircularDependency: true,\n },\n perplexity: {\n package: \"@langchain/community/chat_models/perplexity\",\n className: \"ChatPerplexity\",\n hasCircularDependency: true,\n },\n} as const;\n\nconst SUPPORTED_PROVIDERS = Object.keys(\n MODEL_PROVIDER_CONFIG\n) as (keyof typeof MODEL_PROVIDER_CONFIG)[];\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\ntype ModelProviderConfig = {\n package: string;\n className: string;\n hasCircularDependency?: boolean;\n};\n\n/**\n * Helper function to get a chat model class by its class name or model provider.\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @param modelProvider Optional model provider key for direct lookup (e.g., \"google-vertexai-web\").\n * When provided, uses direct lookup to avoid className collision issues.\n * @returns The imported model class or undefined if not found\n */\nexport async function getChatModelByClassName(\n className: string,\n modelProvider?: string\n) {\n let config: ModelProviderConfig | undefined;\n\n if (modelProvider) {\n // Direct lookup by modelProvider key - avoids className collision\n // (e.g., google-vertexai and google-vertexai-web both use \"ChatVertexAI\")\n config = MODEL_PROVIDER_CONFIG[\n modelProvider as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig | undefined;\n } else {\n // Fallback to className lookup for backward compatibility\n const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(\n ([, c]) => c.className === className\n );\n config = providerEntry\n ? (providerEntry[1] as ModelProviderConfig)\n : undefined;\n }\n\n if (!config) {\n return undefined;\n }\n\n try {\n const module = await import(config.package);\n return module[config.className];\n } catch (e: unknown) {\n const err = e as Error;\n if (\n \"code\" in err &&\n err.code?.toString().includes(\"ERR_MODULE_NOT_FOUND\") &&\n \"message\" in err &&\n typeof err.message === \"string\"\n ) {\n const msg = err.message.startsWith(\"Error: \")\n ? err.message.slice(\"Error: \".length)\n : err.message;\n const attemptedPackage = msg\n .split(\"Cannot find package '\")[1]\n .split(\"'\")[0];\n throw new Error(\n `Unable to import ${attemptedPackage}. Please install with ` +\n `\\`npm install ${attemptedPackage}\\` or \\`pnpm install ${attemptedPackage}\\``\n );\n }\n throw e;\n }\n}\n\nasync function _initChatModelHelper(\n model: string,\n modelProvider?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params: Record<string, any> = {}\n): Promise<BaseChatModel> {\n const modelProviderCopy = modelProvider || _inferModelProvider(model);\n if (!modelProviderCopy) {\n throw new Error(\n `Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`\n );\n }\n\n const config = MODEL_PROVIDER_CONFIG[\n modelProviderCopy as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig;\n if (!config) {\n const supported = SUPPORTED_PROVIDERS.join(\", \");\n throw new Error(\n `Unsupported { modelProvider: ${modelProviderCopy} }.\\n\\nSupported model providers are: ${supported}`\n );\n }\n\n const { modelProvider: _unused, ...passedParams } = params;\n // Pass modelProviderCopy to use direct lookup and avoid className collision\n const ProviderClass = await getChatModelByClassName(\n config.className,\n modelProviderCopy\n );\n return new ProviderClass({ model, ...passedParams });\n}\n\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport function _inferModelProvider(modelName: string): string | undefined {\n if (\n modelName.startsWith(\"gpt-3\") ||\n modelName.startsWith(\"gpt-4\") ||\n modelName.startsWith(\"gpt-5\") ||\n modelName.startsWith(\"o1\") ||\n modelName.startsWith(\"o3\") ||\n modelName.startsWith(\"o4\")\n ) {\n return \"openai\";\n } else if (modelName.startsWith(\"claude\")) {\n return \"anthropic\";\n } else if (modelName.startsWith(\"command\")) {\n return \"cohere\";\n } else if (modelName.startsWith(\"accounts/fireworks\")) {\n return \"fireworks\";\n } else if (modelName.startsWith(\"gemini\")) {\n return \"google-vertexai\";\n } else if (modelName.startsWith(\"amazon.\")) {\n return \"bedrock\";\n } else if (modelName.startsWith(\"mistral\")) {\n return \"mistralai\";\n } else if (modelName.startsWith(\"sonar\") || modelName.startsWith(\"pplx\")) {\n return \"perplexity\";\n } else {\n return undefined;\n }\n}\n\ninterface ConfigurableModelFields extends BaseChatModelParams {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n queuedMethodOperations?: Record<string, any>;\n}\n\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport class ConfigurableModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string {\n return \"chat_model\";\n }\n\n lc_namespace = [\"langchain\", \"chat_models\"];\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _defaultConfig?: Record<string, any> = {};\n\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\" = \"any\";\n\n /**\n * @default \"\"\n */\n _configPrefix: string;\n\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any> = {};\n\n constructor(fields: ConfigurableModelFields) {\n super(fields);\n this._defaultConfig = fields.defaultConfig ?? {};\n\n if (fields.configurableFields === \"any\") {\n this._configurableFields = \"any\";\n } else {\n this._configurableFields = fields.configurableFields ?? [\n \"model\",\n \"modelProvider\",\n ];\n }\n\n if (fields.configPrefix) {\n this._configPrefix = fields.configPrefix.endsWith(\"_\")\n ? fields.configPrefix\n : `${fields.configPrefix}_`;\n } else {\n this._configPrefix = \"\";\n }\n\n this._queuedMethodOperations =\n fields.queuedMethodOperations ?? this._queuedMethodOperations;\n }\n\n async _model(\n config?: RunnableConfig\n ): Promise<\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n > {\n const params = { ...this._defaultConfig, ...this._modelParams(config) };\n let initializedModel = await _initChatModelHelper(\n params.model,\n params.modelProvider,\n params\n );\n\n // Apply queued method operations\n const queuedMethodOperationsEntries = Object.entries(\n this._queuedMethodOperations\n );\n if (queuedMethodOperationsEntries.length > 0) {\n for (const [method, args] of queuedMethodOperationsEntries) {\n if (\n method in initializedModel &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n typeof (initializedModel as any)[method] === \"function\"\n ) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n initializedModel = await (initializedModel as any)[method](...args);\n }\n }\n }\n\n return initializedModel;\n }\n\n async _generate(\n messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const model = await this._model(options);\n return model._generate(messages, options ?? {}, runManager);\n }\n\n override bindTools(\n tools: BindToolsInput[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params?: Record<string, any>\n ): ConfigurableModel<RunInput, CallOptions> {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.bindTools = [tools, params];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n });\n }\n\n // Extract the input types from the `BaseModel` class.\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"] = (\n schema,\n ...args\n ): ReturnType<BaseChatModel[\"withStructuredOutput\"]> => {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.withStructuredOutput = [schema, ...args];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n }) as unknown as ReturnType<BaseChatModel[\"withStructuredOutput\"]>;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _modelParams(config?: RunnableConfig): Record<string, any> {\n const configurable = config?.configurable ?? {};\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let modelParams: Record<string, any> = {};\n\n for (const [key, value] of Object.entries(configurable)) {\n if (key.startsWith(this._configPrefix)) {\n const strippedKey = this._removePrefix(key, this._configPrefix);\n modelParams[strippedKey] = value;\n }\n }\n\n if (this._configurableFields !== \"any\") {\n modelParams = Object.fromEntries(\n Object.entries(modelParams).filter(([key]) =>\n this._configurableFields.includes(key)\n )\n );\n }\n\n return modelParams;\n }\n\n _removePrefix(str: string, prefix: string): string {\n return str.startsWith(prefix) ? str.slice(prefix.length) : str;\n }\n\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(\n config?: RunnableConfig\n ): RunnableBinding<RunInput, AIMessageChunk, CallOptions> {\n const mergedConfig: RunnableConfig = { ...(config || {}) };\n const modelParams = this._modelParams(mergedConfig);\n\n const remainingConfig: RunnableConfig = Object.fromEntries(\n Object.entries(mergedConfig).filter(([k]) => k !== \"configurable\")\n );\n\n remainingConfig.configurable = Object.fromEntries(\n Object.entries(mergedConfig.configurable || {}).filter(\n ([k]) =>\n this._configPrefix &&\n !Object.keys(modelParams).includes(\n this._removePrefix(k, this._configPrefix)\n )\n )\n );\n\n const newConfigurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: { ...this._defaultConfig, ...modelParams },\n configurableFields: Array.isArray(this._configurableFields)\n ? [...this._configurableFields]\n : this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: this._queuedMethodOperations,\n });\n\n return new RunnableBinding<RunInput, AIMessageChunk, CallOptions>({\n config: mergedConfig,\n bound: newConfigurableModel,\n });\n }\n\n async invoke(\n input: RunInput,\n options?: CallOptions\n ): Promise<AIMessageChunk> {\n const model = await this._model(options);\n const config = ensureConfig(options);\n return model.invoke(input, config);\n }\n\n async stream(\n input: RunInput,\n options?: CallOptions\n ): Promise<IterableReadableStream<AIMessageChunk>> {\n const model = await this._model(options);\n const wrappedGenerator = new AsyncGeneratorWithSetup({\n generator: await model.stream(input, options),\n config: options,\n });\n await wrappedGenerator.setup;\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator);\n }\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions?: false }\n ): Promise<AIMessageChunk[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions: true }\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]> {\n // We can super this since the base runnable implementation of\n // `.batch` will call `.invoke` on each input.\n return super.batch(inputs, options, batchOptions);\n }\n\n async *transform(\n generator: AsyncGenerator<RunInput>,\n options: CallOptions\n ): AsyncGenerator<AIMessageChunk> {\n const model = await this._model(options);\n const config = ensureConfig(options);\n\n yield* model.transform(generator, config);\n }\n\n async *streamLog(\n input: RunInput,\n options?: Partial<CallOptions>,\n streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">\n ): AsyncGenerator<RunLogPatch> {\n const model = await this._model(options);\n const config = ensureConfig(options);\n\n yield* model.streamLog(input, config, {\n ...streamOptions,\n _schemaFormat: \"original\",\n includeNames: streamOptions?.includeNames,\n includeTypes: streamOptions?.includeTypes,\n includeTags: streamOptions?.includeTags,\n excludeNames: streamOptions?.excludeNames,\n excludeTypes: streamOptions?.excludeTypes,\n excludeTags: streamOptions?.excludeTags,\n });\n }\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & { version: \"v1\" | \"v2\" },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<Uint8Array>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent | Uint8Array> {\n const outerThis = this;\n async function* wrappedGenerator() {\n const model = await outerThis._model(options);\n const config = ensureConfig(options);\n const eventStream = model.streamEvents(input, config, streamOptions);\n\n for await (const chunk of eventStream) {\n yield chunk;\n }\n }\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator());\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\n\nexport type ConfigurableFields = \"any\" | string[];\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n>(\n model: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n>(\n model: never,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\n// ################################# FOR CONTRIBUTORS #################################\n//\n// If adding support for a new provider, please append the provider\n// name to the supported list in the docstring below.\n//\n// ####################################################################################\n\n/**\n * Initialize a ChatModel from the model name and provider.\n * Must have the integration package corresponding to the model provider installed.\n *\n * @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model.\n * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.\n *\n * @param {string | ChatModelProvider} [model] - The name of the model, e.g. \"gpt-4\", \"claude-3-opus-20240229\".\n * Can be prefixed with the model provider, e.g. \"openai:gpt-4\", \"anthropic:claude-3-opus-20240229\".\n * @param {Object} [fields] - Additional configuration options.\n * @param {string} [fields.modelProvider] - The model provider. Supported values include:\n * - openai (@langchain/openai)\n * - anthropic (@langchain/anthropic)\n * - azure_openai (@langchain/openai)\n * - google-vertexai (@langchain/google-vertexai)\n * - google-vertexai-web (@langchain/google-vertexai-web)\n * - google-genai (@langchain/google-genai)\n * - bedrock (@langchain/aws)\n * - cohere (@langchain/cohere)\n * - fireworks (@langchain/community/chat_models/fireworks)\n * - together (@langchain/community/chat_models/togetherai)\n * - mistralai (@langchain/mistralai)\n * - groq (@langchain/groq)\n * - ollama (@langchain/ollama)\n * - perplexity (@langchain/community/chat_models/perplexity)\n * - cerebras (@langchain/cerebras)\n * - deepseek (@langchain/deepseek)\n * - xai (@langchain/xai)\n * @param {string[] | \"any\"} [fields.configurableFields] - Which model parameters are configurable:\n * - undefined: No configurable fields.\n * - \"any\": All fields are configurable. (See Security Note in description)\n * - string[]: Specified fields are configurable.\n * @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime.\n * @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor.\n * @returns {Promise<ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel.\n * @throws {Error} If modelProvider cannot be inferred or isn't supported.\n * @throws {Error} If the model provider integration package is not installed.\n *\n * @example Initialize non-configurable models\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const gpt4 = await initChatModel(\"openai:gpt-4\", {\n * temperature: 0.25,\n * });\n * const gpt4Result = await gpt4.invoke(\"what's your name\");\n *\n * const claude = await initChatModel(\"anthropic:claude-3-opus-20240229\", {\n * temperature: 0.25,\n * });\n * const claudeResult = await claude.invoke(\"what's your name\");\n *\n * const gemini = await initChatModel(\"gemini-1.5-pro\", {\n * modelProvider: \"google-vertexai\",\n * temperature: 0.25,\n * });\n * const geminiResult = await gemini.invoke(\"what's your name\");\n * ```\n *\n * @example Create a partially configurable model with no default model\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModel = await initChatModel(undefined, {\n * temperature: 0,\n * configurableFields: [\"model\", \"apiKey\"],\n * });\n *\n * const gpt4Result = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"gpt-4\",\n * },\n * });\n *\n * const claudeResult = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * },\n * });\n * ```\n *\n * @example Create a fully configurable model with a default model and a config prefix\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModelWithDefault = await initChatModel(\"gpt-4\", {\n * modelProvider: \"openai\",\n * configurableFields: \"any\",\n * configPrefix: \"foo\",\n * temperature: 0,\n * });\n *\n * const openaiResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const claudeResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_model: \"claude-3-5-sonnet-20240620\",\n * foo_modelProvider: \"anthropic\",\n * foo_temperature: 0.6,\n * foo_apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Bind tools to a configurable model:\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n * import { z } from \"zod/v3\";\n * import { tool } from \"@langchain/core/tools\";\n *\n * const getWeatherTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current weather in a given location\"),\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * }\n * );\n *\n * const getPopulationTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current population in a given location\"),\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * }\n * );\n *\n * const configurableModel = await initChatModel(\"gpt-4\", {\n * configurableFields: [\"model\", \"modelProvider\", \"apiKey\"],\n * temperature: 0,\n * });\n *\n * const configurableModelWithTools = configurableModel.bindTools([\n * getWeatherTool,\n * getPopulationTool,\n * ]);\n *\n * const configurableToolResult = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const configurableToolResult2 = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * model: \"claude-3-5-sonnet-20240620\",\n * apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @description\n * This function initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * Security Note: Setting `configurableFields` to \"any\" means fields like apiKey, baseUrl, etc.\n * can be altered at runtime, potentially redirecting model requests to a different service/user.\n * Make sure that if you're accepting untrusted configurations, you enumerate the\n * `configurableFields` explicitly.\n *\n * The function will attempt to infer the model provider from the model name if not specified.\n * Certain model name prefixes are associated with specific providers:\n * - gpt-3... or gpt-4... -> openai\n * - claude... -> anthropic\n * - amazon.... -> bedrock\n * - gemini... -> google-vertexai\n * - command... -> cohere\n * - accounts/fireworks... -> fireworks\n *\n * @since 0.2.11\n * @version 0.2.11\n */\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends\n ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions,\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>> {\n // eslint-disable-next-line prefer-const\n let { configurableFields, configPrefix, modelProvider, ...params } = {\n configPrefix: \"\",\n ...(fields ?? {}),\n };\n if (modelProvider === undefined && model?.includes(\":\")) {\n const modelComponents = model.split(\":\", 2);\n if (SUPPORTED_PROVIDERS.includes(modelComponents[0] as ChatModelProvider)) {\n // eslint-disable-next-line no-param-reassign\n [modelProvider, model] = modelComponents;\n }\n }\n let configurableFieldsCopy = Array.isArray(configurableFields)\n ? [...configurableFields]\n : configurableFields;\n\n if (!model && configurableFieldsCopy === undefined) {\n configurableFieldsCopy = [\"model\", \"modelProvider\"];\n }\n if (configPrefix && configurableFieldsCopy === undefined) {\n console.warn(\n `{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` +\n `{ configurableFields: [...] } to specify the model params that are ` +\n `configurable.`\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const paramsCopy: Record<string, any> = { ...params };\n\n if (configurableFieldsCopy === undefined) {\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: {\n ...paramsCopy,\n model,\n modelProvider,\n },\n configPrefix,\n });\n } else {\n if (model) {\n paramsCopy.model = model;\n }\n if (modelProvider) {\n paramsCopy.modelProvider = modelProvider;\n }\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: paramsCopy,\n configPrefix,\n configurableFields: configurableFieldsCopy,\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;AAkDA,MAAa,wBAAwB;CACnC,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,cAAc;EACZ,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,mBAAmB;EACjB,SAAS;EACT,WAAW;CACZ;CACD,uBAAuB;EACrB,SAAS;EACT,WAAW;CACZ;CACD,gBAAgB;EACd,SAAS;EACT,WAAW;CACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;CACZ;CACD,MAAM;EACJ,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;CACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;CACZ;CACD,KAAK;EACH,SAAS;EACT,WAAW;CACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;CACD,YAAY;EACV,SAAS;EACT,WAAW;EACX,uBAAuB;CACxB;AACF;AAED,MAAM,sBAAsB,OAAO,KACjC,sBACD;;;;;;;;AAeD,eAAsB,wBACpBA,WACAC,eACA;CACA,IAAIC;AAEJ,KAAI,eAGF,SAAS,sBACP;MAEG;EAEL,MAAM,gBAAgB,OAAO,QAAQ,sBAAsB,CAAC,KAC1D,CAAC,GAAG,EAAE,KAAK,EAAE,cAAc,UAC5B;EACD,SAAS,gBACJ,cAAc,KACf;CACL;AAED,KAAI,CAAC,OACH,QAAO;AAGT,KAAI;EACF,MAAMC,WAAS,MAAM,OAAO,OAAO;AACnC,SAAOA,SAAO,OAAO;CACtB,SAAQC,GAAY;EACnB,MAAM,MAAM;AACZ,MACE,UAAU,OACV,IAAI,MAAM,UAAU,CAAC,SAAS,uBAAuB,IACrD,aAAa,OACb,OAAO,IAAI,YAAY,UACvB;GACA,MAAM,MAAM,IAAI,QAAQ,WAAW,UAAU,GACzC,IAAI,QAAQ,MAAM,EAAiB,GACnC,IAAI;GACR,MAAM,mBAAmB,IACtB,MAAM,wBAAwB,CAAC,GAC/B,MAAM,IAAI,CAAC;AACd,SAAM,IAAI,MACR,CAAC,iBAAiB,EAAE,iBAAiB,oCAAsB,EACxC,iBAAiB,qBAAqB,EAAE,iBAAiB,EAAE,CAAC;EAElF;AACD,QAAM;CACP;AACF;AAED,eAAe,qBACbC,OACAJ,eAEAK,SAA8B,CAAE,GACR;CACxB,MAAM,oBAAoB,iBAAiB,oBAAoB,MAAM;AACrE,KAAI,CAAC,kBACH,OAAM,IAAI,MACR,CAAC,4CAA4C,EAAE,MAAM,0CAA0C,CAAC;CAIpG,MAAM,SAAS,sBACb;AAEF,KAAI,CAAC,QAAQ;EACX,MAAM,YAAY,oBAAoB,KAAK,KAAK;AAChD,QAAM,IAAI,MACR,CAAC,6BAA6B,EAAE,kBAAkB,sCAAsC,EAAE,WAAW;CAExG;CAED,MAAM,EAAE,eAAe,QAAS,GAAG,cAAc,GAAG;CAEpD,MAAM,gBAAgB,MAAM,wBAC1B,OAAO,WACP,kBACD;AACD,QAAO,IAAI,cAAc;EAAE;EAAO,GAAG;CAAc;AACpD;;;;;;;;;;;;AAaD,SAAgB,oBAAoBC,WAAuC;AACzE,KACE,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,CAE1B,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,qBAAqB,CACnD,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,QAAQ,IAAI,UAAU,WAAW,OAAO,CACtE,QAAO;KAEP,QAAO;AAEV;;;;;;AA0BD,IAAa,oBAAb,MAAa,0BAIHC,2DAA2C;CACnD,WAAmB;AACjB,SAAO;CACR;CAED,eAAe,CAAC,aAAa,aAAc;CAG3C,iBAAuC,CAAE;;;;CAKzC,sBAAwC;;;;CAKxC;;;;;CAOA,0BAA+C,CAAE;CAEjD,YAAYC,QAAiC;EAC3C,MAAM,OAAO;EACb,KAAK,iBAAiB,OAAO,iBAAiB,CAAE;AAEhD,MAAI,OAAO,uBAAuB,OAChC,KAAK,sBAAsB;OAE3B,KAAK,sBAAsB,OAAO,sBAAsB,CACtD,SACA,eACD;AAGH,MAAI,OAAO,cACT,KAAK,gBAAgB,OAAO,aAAa,SAAS,IAAI,GAClD,OAAO,eACP,GAAG,OAAO,aAAa,CAAC,CAAC;OAE7B,KAAK,gBAAgB;EAGvB,KAAK,0BACH,OAAO,0BAA0B,KAAK;CACzC;CAED,MAAM,OACJC,QAGA;EACA,MAAM,SAAS;GAAE,GAAG,KAAK;GAAgB,GAAG,KAAK,aAAa,OAAO;EAAE;EACvE,IAAI,mBAAmB,MAAM,qBAC3B,OAAO,OACP,OAAO,eACP,OACD;EAGD,MAAM,gCAAgC,OAAO,QAC3C,KAAK,wBACN;AACD,MAAI,8BAA8B,SAAS,GACzC;QAAK,MAAM,CAAC,QAAQ,KAAK,IAAI,8BAC3B,KACE,UAAU,oBAEV,OAAQ,iBAAyB,YAAY,YAG7C,mBAAmB,MAAO,iBAAyB,QAAQ,GAAG,KAAK;EAEtE;AAGH,SAAO;CACR;CAED,MAAM,UACJC,UACAC,SACAC,YACqB;EACrB,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;AACxC,SAAO,MAAM,UAAU,UAAU,WAAW,CAAE,GAAE,WAAW;CAC5D;CAED,AAAS,UACPC,OAEAC,QAC0C;EAC1C,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,YAAY,CAAC,OAAO,MAAO;AAC/C,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,uBAA8D,CAC5D,QACA,GAAG,SACmD;EACtD,MAAM,sBAAsB,EAAE,GAAG,KAAK,wBAAyB;EAC/D,oBAAoB,uBAAuB,CAAC,QAAQ,GAAG,IAAK;AAC5D,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;EACzB;CACF;CAGD,aAAaL,QAA8C;EACzD,MAAM,eAAe,QAAQ,gBAAgB,CAAE;EAE/C,IAAIM,cAAmC,CAAE;AAEzC,OAAK,MAAM,CAAC,KAAK,MAAM,IAAI,OAAO,QAAQ,aAAa,CACrD,KAAI,IAAI,WAAW,KAAK,cAAc,EAAE;GACtC,MAAM,cAAc,KAAK,cAAc,KAAK,KAAK,cAAc;GAC/D,YAAY,eAAe;EAC5B;AAGH,MAAI,KAAK,wBAAwB,OAC/B,cAAc,OAAO,YACnB,OAAO,QAAQ,YAAY,CAAC,OAAO,CAAC,CAAC,IAAI,KACvC,KAAK,oBAAoB,SAAS,IAAI,CACvC,CACF;AAGH,SAAO;CACR;CAED,cAAcC,KAAaC,QAAwB;AACjD,SAAO,IAAI,WAAW,OAAO,GAAG,IAAI,MAAM,OAAO,OAAO,GAAG;CAC5D;;;;;;CAOD,WACER,QACwD;EACxD,MAAMS,eAA+B,EAAE,GAAI,UAAU,CAAE,EAAG;EAC1D,MAAM,cAAc,KAAK,aAAa,aAAa;EAEnD,MAAMC,kBAAkC,OAAO,YAC7C,OAAO,QAAQ,aAAa,CAAC,OAAO,CAAC,CAAC,EAAE,KAAK,MAAM,eAAe,CACnE;EAED,gBAAgB,eAAe,OAAO,YACpC,OAAO,QAAQ,aAAa,gBAAgB,CAAE,EAAC,CAAC,OAC9C,CAAC,CAAC,EAAE,KACF,KAAK,iBACL,CAAC,OAAO,KAAK,YAAY,CAAC,SACxB,KAAK,cAAc,GAAG,KAAK,cAAc,CAC1C,CACJ,CACF;EAED,MAAM,uBAAuB,IAAI,kBAAyC;GACxE,eAAe;IAAE,GAAG,KAAK;IAAgB,GAAG;GAAa;GACzD,oBAAoB,MAAM,QAAQ,KAAK,oBAAoB,GACvD,CAAC,GAAG,KAAK,mBAAoB,IAC7B,KAAK;GACT,cAAc,KAAK;GACnB,wBAAwB,KAAK;EAC9B;AAED,SAAO,IAAIC,2CAAuD;GAChE,QAAQ;GACR,OAAO;EACR;CACF;CAED,MAAM,OACJC,OACAC,SACyB;EACzB,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;EACxC,MAAM,sDAAsB,QAAQ;AACpC,SAAO,MAAM,OAAO,OAAO,OAAO;CACnC;CAED,MAAM,OACJD,OACAC,SACiD;EACjD,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;EACxC,MAAM,mBAAmB,IAAIC,sDAAwB;GACnD,WAAW,MAAM,MAAM,OAAO,OAAO,QAAQ;GAC7C,QAAQ;EACT;EACD,MAAM,iBAAiB;AACvB,SAAOC,qDAAuB,mBAAmB,iBAAiB;CACnE;CAoBD,MAAM,MACJC,QACAC,SACAC,cACqC;AAGrC,SAAO,MAAM,MAAM,QAAQ,SAAS,aAAa;CAClD;CAED,OAAO,UACLC,WACAC,SACgC;EAChC,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;EACxC,MAAM,sDAAsB,QAAQ;EAEpC,OAAO,MAAM,UAAU,WAAW,OAAO;CAC1C;CAED,OAAO,UACLR,OACAS,SACAC,eAC6B;EAC7B,MAAM,QAAQ,MAAM,KAAK,OAAO,QAAQ;EACxC,MAAM,sDAAsB,QAAQ;EAEpC,OAAO,MAAM,UAAU,OAAO,QAAQ;GACpC,GAAG;GACH,eAAe;GACf,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC5B,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;EAC7B,EAAC;CACH;CAiBD,aACEV,OACAW,SAIAC,eACkD;EAClD,MAAM,YAAY;EAClB,gBAAgB,mBAAmB;GACjC,MAAM,QAAQ,MAAM,UAAU,OAAO,QAAQ;GAC7C,MAAM,sDAAsB,QAAQ;GACpC,MAAM,cAAc,MAAM,aAAa,OAAO,QAAQ,cAAc;AAEpE,cAAW,MAAM,SAAS,aACxB,MAAM;EAET;AACD,SAAOT,qDAAuB,mBAAmB,kBAAkB,CAAC;CACrE;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2QD,eAAsB,cAKpBU,OAEAC,QAKmD;CAEnD,IAAI,EAAE,oBAAoB,cAAc,cAAe,GAAG,QAAQ,GAAG;EACnE,cAAc;EACd,GAAI,UAAU,CAAE;CACjB;AACD,KAAI,kBAAkB,UAAa,OAAO,SAAS,IAAI,EAAE;EACvD,MAAM,kBAAkB,MAAM,MAAM,KAAK,EAAE;AAC3C,MAAI,oBAAoB,SAAS,gBAAgB,GAAwB,EAEvE,CAAC,eAAe,MAAM,GAAG;CAE5B;CACD,IAAI,yBAAyB,MAAM,QAAQ,mBAAmB,GAC1D,CAAC,GAAG,kBAAmB,IACvB;AAEJ,KAAI,CAAC,SAAS,2BAA2B,QACvC,yBAAyB,CAAC,SAAS,eAAgB;AAErD,KAAI,gBAAgB,2BAA2B,QAC7C,QAAQ,KACN,CAAC,gBAAgB,EAAE,aAAa,oIAAoD,CAEnE,CAClB;CAIH,MAAMC,aAAkC,EAAE,GAAG,OAAQ;AAErD,KAAI,2BAA2B,OAC7B,QAAO,IAAI,kBAAyC;EAClD,eAAe;GACb,GAAG;GACH;GACA;EACD;EACD;CACD;MACI;AACL,MAAI,OACF,WAAW,QAAQ;AAErB,MAAI,eACF,WAAW,gBAAgB;AAE7B,SAAO,IAAI,kBAAyC;GAClD,eAAe;GACf;GACA,oBAAoB;EACrB;CACF;AACF"}
@@ -88,11 +88,13 @@ declare const MODEL_PROVIDER_CONFIG: {
88
88
  };
89
89
  type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;
90
90
  /**
91
- * Helper function to get a chat model class by its class name
91
+ * Helper function to get a chat model class by its class name or model provider.
92
92
  * @param className The class name (e.g., "ChatOpenAI", "ChatAnthropic")
93
+ * @param modelProvider Optional model provider key for direct lookup (e.g., "google-vertexai-web").
94
+ * When provided, uses direct lookup to avoid className collision issues.
93
95
  * @returns The imported model class or undefined if not found
94
96
  */
95
- declare function getChatModelByClassName(className: string): Promise<any>;
97
+ declare function getChatModelByClassName(className: string, modelProvider?: string): Promise<any>;
96
98
  /**
97
99
  * Attempts to infer the model provider based on the given model name.
98
100
  *
@@ -1 +1 @@
1
- {"version":3,"file":"universal.d.cts","names":["BaseLanguageModelInput","ToolDefinition","BaseChatModel","BaseChatModelParams","BindToolsInput","BaseChatModelCallOptions","BaseMessage","AIMessageChunk","MessageStructure","RunnableBatchOptions","RunnableBinding","RunnableConfig","RunnableToolLike","IterableReadableStream","LogStreamCallbackHandlerInput","RunLogPatch","StreamEvent","StructuredToolInterface","CallbackManagerForLLMRun","ChatResult","EventStreamCallbackHandlerInput","Omit","ConfigurableChatModelCallOptions","Record","MODEL_PROVIDER_CONFIG","ChatModelProvider","getChatModelByClassName","Promise","_inferModelProvider","ConfigurableModelFields","ConfigurableModel","RunInput","CallOptions","Partial","Error","AsyncGenerator","Uint8Array","InitChatModelFields","ConfigurableFields","initChatModel"],"sources":["../../src/chat_models/universal.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { BaseChatModel, BaseChatModelParams, BindToolsInput, type BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport { BaseMessage, type AIMessageChunk, MessageStructure } from \"@langchain/core/messages\";\nimport { type RunnableBatchOptions, RunnableBinding, type RunnableConfig, type RunnableToolLike } from \"@langchain/core/runnables\";\nimport { IterableReadableStream } from \"@langchain/core/utils/stream\";\nimport { type LogStreamCallbackHandlerInput, type RunLogPatch, type StreamEvent } from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\ninterface EventStreamCallbackHandlerInput extends Omit<LogStreamCallbackHandlerInput, \"_schemaFormat\"> {\n}\nexport interface ConfigurableChatModelCallOptions extends BaseChatModelCallOptions {\n tools?: (StructuredToolInterface | Record<string, unknown> | ToolDefinition | RunnableToolLike)[];\n}\nexport declare const MODEL_PROVIDER_CONFIG: {\n readonly openai: {\n readonly package: \"@langchain/openai\";\n readonly className: \"ChatOpenAI\";\n };\n readonly anthropic: {\n readonly package: \"@langchain/anthropic\";\n readonly className: \"ChatAnthropic\";\n };\n readonly azure_openai: {\n readonly package: \"@langchain/openai\";\n readonly className: \"AzureChatOpenAI\";\n };\n readonly cohere: {\n readonly package: \"@langchain/cohere\";\n readonly className: \"ChatCohere\";\n };\n readonly \"google-vertexai\": {\n readonly package: \"@langchain/google-vertexai\";\n readonly className: \"ChatVertexAI\";\n };\n readonly \"google-vertexai-web\": {\n readonly package: \"@langchain/google-vertexai-web\";\n readonly className: \"ChatVertexAI\";\n };\n readonly \"google-genai\": {\n readonly package: \"@langchain/google-genai\";\n readonly className: \"ChatGoogleGenerativeAI\";\n };\n readonly ollama: {\n readonly package: \"@langchain/ollama\";\n readonly className: \"ChatOllama\";\n };\n readonly mistralai: {\n readonly package: \"@langchain/mistralai\";\n readonly className: \"ChatMistralAI\";\n };\n readonly groq: {\n readonly package: \"@langchain/groq\";\n readonly className: \"ChatGroq\";\n };\n readonly cerebras: {\n readonly package: \"@langchain/cerebras\";\n readonly className: \"ChatCerebras\";\n };\n readonly bedrock: {\n readonly package: \"@langchain/aws\";\n readonly className: \"ChatBedrockConverse\";\n };\n readonly deepseek: {\n readonly package: \"@langchain/deepseek\";\n readonly className: \"ChatDeepSeek\";\n };\n readonly xai: {\n readonly package: \"@langchain/xai\";\n readonly className: \"ChatXAI\";\n };\n readonly fireworks: {\n readonly package: \"@langchain/community/chat_models/fireworks\";\n readonly className: \"ChatFireworks\";\n readonly hasCircularDependency: true;\n };\n readonly together: {\n readonly package: \"@langchain/community/chat_models/togetherai\";\n readonly className: \"ChatTogetherAI\";\n readonly hasCircularDependency: true;\n };\n readonly perplexity: {\n readonly package: \"@langchain/community/chat_models/perplexity\";\n readonly className: \"ChatPerplexity\";\n readonly hasCircularDependency: true;\n };\n};\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\n/**\n * Helper function to get a chat model class by its class name\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @returns The imported model class or undefined if not found\n */\nexport declare function getChatModelByClassName(className: string): Promise<any>;\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport declare function _inferModelProvider(modelName: string): string | undefined;\ninterface ConfigurableModelFields extends BaseChatModelParams {\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n queuedMethodOperations?: Record<string, any>;\n}\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport declare class ConfigurableModel<RunInput extends BaseLanguageModelInput = BaseLanguageModelInput, CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string;\n lc_namespace: string[];\n _defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\";\n /**\n * @default \"\"\n */\n _configPrefix: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n _queuedMethodOperations: Record<string, any>;\n constructor(fields: ConfigurableModelFields);\n _model(config?: RunnableConfig): Promise<BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>>;\n _generate(messages: BaseMessage[], options?: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n bindTools(tools: BindToolsInput[], params?: Record<string, any>): ConfigurableModel<RunInput, CallOptions>;\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"];\n _modelParams(config?: RunnableConfig): Record<string, any>;\n _removePrefix(str: string, prefix: string): string;\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(config?: RunnableConfig): RunnableBinding<RunInput, AIMessageChunk, CallOptions>;\n invoke(input: RunInput, options?: CallOptions): Promise<AIMessageChunk>;\n stream(input: RunInput, options?: CallOptions): Promise<IterableReadableStream<AIMessageChunk>>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions?: false;\n }): Promise<AIMessageChunk[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions: true;\n }): Promise<(AIMessageChunk | Error)[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions): Promise<(AIMessageChunk | Error)[]>;\n transform(generator: AsyncGenerator<RunInput>, options: CallOptions): AsyncGenerator<AIMessageChunk>;\n streamLog(input: RunInput, options?: Partial<CallOptions>, streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">): AsyncGenerator<RunLogPatch>;\n streamEvents(input: RunInput, options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n }, streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">): IterableReadableStream<StreamEvent>;\n streamEvents(input: RunInput, options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n }, streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">): IterableReadableStream<Uint8Array>;\n}\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\nexport type ConfigurableFields = \"any\" | string[];\nexport declare function initChatModel<RunInput extends BaseLanguageModelInput = BaseLanguageModelInput, CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions>(model: string, fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n}): Promise<ConfigurableModel<RunInput, CallOptions>>;\nexport declare function initChatModel<RunInput extends BaseLanguageModelInput = BaseLanguageModelInput, CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions>(model: never, options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n}): Promise<ConfigurableModel<RunInput, CallOptions>>;\nexport declare function initChatModel<RunInput extends BaseLanguageModelInput = BaseLanguageModelInput, CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions>(model?: string, options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n}): Promise<ConfigurableModel<RunInput, CallOptions>>;\nexport {};\n//# sourceMappingURL=universal.d.ts.map"],"mappings":";;;;;;;;;;;UASUoB,+BAAAA,SAAwCC,KAAKP;AAA7CM,UAEOE,gCAAAA,SAAyCjB,wBAFRgB,CAAAA;EAEjCC,KAAAA,CAAAA,EAAAA,CACJL,uBADIK,GACsBC,MADU,CAAA,MAAA,EAAA,OAAA,CAAA,GACgBtB,cADhB,GACiCW,gBADjC,CAAA,EAAA;;AACVW,cAElBC,qBAFkBD,EAAAA;EAA0BtB,SAAAA,MAAAA,EAAAA;IAAiBW,SAAAA,OAAAA,EAAAA,mBAAAA;IADxBP,SAAAA,SAAAA,EAAAA,YAAAA;EAAwB,CAAA;EAG7DmB,SAAAA,SAAAA,EAAAA;IAyETC,SAAAA,OAAiB,EAAA,sBAAgBD;IAMrBE,SAAAA,SAAAA,EAAAA,eAA4CC;EAY5CC,CAAAA;EACdC,SAAAA,YAAAA,EAAAA;IACUN,SAAAA,OAAAA,EAAAA,mBAAAA;IAaSA,SAAAA,SAAAA,EAAAA,iBAAAA;EAdapB,CAAAA;EAAmB,SAAA,MAAA,EAAA;IAqBxC2B,SAAAA,OAAiB,EAAA,mBAAAE;IAAkBhC,SAAAA,SAAAA,EAAAA,YAAAA;EAAyBA,CAAAA;EAA4CsB,SAAAA,iBAAAA,EAAAA;IAAmCA,SAAAA,OAAAA,EAAAA,4BAAAA;IAAwDU,SAAAA,SAAAA,EAAAA,cAAAA;EAAazB,CAAAA;EAGhNgB,SAAAA,qBAAAA,EAAAA;IAaQA,SAAAA,OAAAA,EAAAA,gCAAAA;IACLM,SAAAA,SAAAA,EAAAA,cAAAA;EACJlB,CAAAA;EAAuCN,SAAAA,cAAAA,EAAAA;IAAyCG,SAAAA,OAAAA,EAAAA,yBAAAA;IAAfD,SAAAA,SAAAA,EAAAA,wBAAAA;EAAxCL,CAAAA;EAARyB,SAAAA,MAAAA,EAAAA;IACbrB,SAAAA,OAAAA,EAAAA,mBAAAA;IAAiEY,SAAAA,SAAAA,EAAAA,YAAAA;EAAmCC,CAAAA;EAARQ,SAAAA,SAAAA,EAAAA;IAC/FvB,SAAAA,OAAAA,EAAAA,sBAAAA;IAA2BmB,SAAAA,SAAAA,EAAAA,eAAAA;EAAwCQ,CAAAA;EAAUC,SAAAA,IAAAA,EAAAA;IAA5BF,SAAAA,OAAAA,EAAAA,iBAAAA;IAC5C5B,SAAAA,SAAAA,EAAAA,UAAAA;EACAS,CAAAA;EAAiBY,SAAAA,QAAAA,EAAAA;IAOnBZ,SAAAA,OAAAA,EAAAA,qBAAAA;IAAiCoB,SAAAA,SAAAA,EAAAA,cAAAA;EAAUxB,CAAAA;EAAgByB,SAAAA,OAAAA,EAAAA;IAA1CtB,SAAAA,OAAAA,EAAAA,gBAAAA;IACvBqB,SAAAA,SAAAA,EAAAA,qBAAAA;EAAoBC,CAAAA;EAAsBzB,SAAAA,QAAAA,EAAAA;IAARoB,SAAAA,OAAAA,EAAAA,qBAAAA;IAClCI,SAAAA,SAAAA,EAAAA,cAAAA;EAAoBC,CAAAA;EAA6CzB,SAAAA,GAAAA,EAAAA;IAAvBM,SAAAA,OAAAA,EAAAA,gBAAAA;IAARc,SAAAA,SAAAA,EAAAA,SAAAA;EAClCI,CAAAA;EAA8BC,SAAAA,SAAAA,EAAAA;IAARC,SAAAA,OAAAA,EAAAA,4CAAAA;IAA+BD,SAAAA,SAAAA,EAAAA,eAAAA;IAARC,SAAAA,qBAAAA,EAAAA,IAAAA;EAAuCxB,CAAAA;EAEtFF,SAAAA,QAAAA,EAAAA;IAARoB,SAAAA,OAAAA,EAAAA,6CAAAA;IACUI,SAAAA,SAAAA,EAAAA,gBAAAA;IAA8BC,SAAAA,qBAAAA,EAAAA,IAAAA;EAARC,CAAAA;EAA+BD,SAAAA,UAAAA,EAAAA;IAARC,SAAAA,OAAAA,EAAAA,6CAAAA;IAAuCxB,SAAAA,SAAAA,EAAAA,gBAAAA;IAErFF,SAAAA,qBAAAA,EAAAA,IAAAA;EAAiB2B,CAAAA;CAA1BP;AACUI,KA9ENN,iBAAAA,GA8EMM,MAAAA,OA9E2BP,qBA8E3BO;;;;;;AAAoHxB,iBAxE9GmB,uBAAAA,CAwE8GnB,SAAAA,EAAAA,MAAAA,CAAAA,EAxElEoB,OAwEkEpB,CAAAA,GAAAA,CAAAA;;;;;;;;;;;;AAEvDc,iBA9DvDO,mBAAAA,CA8DuDP,SAAAA,EAAAA,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;UA7DrEQ,uBAAAA,SAAgC1B,mBA6DuGY,CAAAA;EAAfoB,aAAAA,CAAAA,EA5D9GZ,MA4D8GY,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EAC1GJ;;;EAEIX,kBAAAA,CAAAA,EAAAA,MAAAA,EAAAA,GAAAA,KAAAA;EAALC;;;EACCU,YAAAA,CAAAA,EAAAA,MAAAA;EAA2BC;;;;EAGgDI,sBAAAA,CAAAA,EAtDtEb,MAsDsEa,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;;;AA/CoH;AAiDvN;AAKA;AACA;AAAuDpC,cAvDlC8B,iBAuDkC9B,CAAAA,iBAvDCA,sBAuDDA,GAvD0BA,sBAuD1BA,EAAAA,oBAvDsEsB,gCAuDtEtB,GAvDyGsB,gCAuDzGtB,CAAAA,SAvDmJE,aAuDnJF,CAvDiKgC,WAuDjKhC,EAvD8KO,cAuD9KP,CAAAA,CAAAA;EAAyBA,QAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAA4CsB,YAAAA,EAAAA,MAAAA,EAAAA;EAAmCA,cAAAA,CAAAA,EApD1IC,MAoD0ID,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EAAkEC;;;EAIzLS,mBAAAA,EAAAA,MAAAA,EAAAA,GAAAA,KAAAA;EAA5BF;;AAAD;EACaS,aAAAA,EAAAA,MAAa;EAAkBvC;;;;EAA0KuB,uBAAAA,EA5CpMA,MA4CoMA,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EAARU,WAAAA,CAAAA,MAAAA,EA3CjMJ,uBA2CiMI;EAI3LF,MAAAA,CAAAA,MAAAA,CAAAA,EA9CVpB,cA8CUoB,CAAAA,EA9COJ,OA8CPI,CA9Ce7B,aA8Cf6B,CA9C6B1B,wBA8C7B0B,EA9CuDxB,cA8CvDwB,CA9CsEvB,gBA8CtEuB,CAAAA,CAAAA,CAAAA;EAAUC,SAAAA,CAAAA,QAAAA,EA7ChB1B,WA6CgB0B,EAAAA,EAAAA,OAAAA,CAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EA7CiDd,wBA6CjDc,CAAAA,EA7C4EL,OA6C5EK,CA7CoFb,UA6CpFa,CAAAA;EAA5BF,SAAAA,CAAAA,KAAAA,EA5CS1B,cA4CT0B,EAAAA,EAAAA,MAAAA,CAAAA,EA5CoCP,MA4CpCO,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,CAAAA,EA5C0DA,iBA4C1DA,CA5C4EC,QA4C5ED,EA5CsFE,WA4CtFF,CAAAA;EAARH,oBAAAA,EA3CsBzB,aA2CtByB,CAAAA,sBAAAA,CAAAA;EAAO,YAAA,CAAA,MAAA,CAAA,EA1CehB,cA0Cf,CAAA,EA1CgCY,MA0ChC,CAAA,MAAA,EAAA,GAAA,CAAA;EACagB,aAAAA,CAAAA,GAAa,EAAA,MAAAR,EAAAA,MAAAC,EAAAA,MAAAA,CAAAA,EAAAA,MAAA;EAAkBhC;;;;;EAAoKiC,UAAAA,CAAAA,MAAAA,CAAAA,EApCnMtB,cAoCmMsB,CAAAA,EApClLvB,eAoCkLuB,CApClKF,QAoCkKE,EApCxJ1B,cAoCwJ0B,EApCxID,WAoCwIC,CAAAA;EAElMK,MAAAA,CAAAA,KAAAA,EArCPP,QAqCOO,EAAAA,OAAAA,CAAAA,EArCaN,WAqCbM,CAAAA,EArC2BX,OAqC3BW,CArCmC/B,cAqCnC+B,CAAAA;EAEKP,MAAAA,CAAAA,KAAAA,EAtCZA,QAsCYA,EAAAA,OAAAA,CAAAA,EAtCQC,WAsCRD,CAAAA,EAtCsBJ,OAsCtBI,CAtC8BlB,sBAsC9BkB,CAtCqDxB,cAsCrDwB,CAAAA,CAAAA;EAAUC,KAAAA,CAAAA,MAAAA,EArCtBD,QAqCsBC,EAAAA,EAAAA,OAAAA,CAAAA,EArCAC,OAqCAD,CArCQA,WAqCRA,CAAAA,GArCuBC,OAqCvBD,CArC+BA,WAqC/BA,CAAAA,EAAAA,EAAAA,YAApCL,CAAoCK,EArC8DvB,oBAqC9DuB,GAAAA;IAA5BF,gBAAAA,CAAAA,EAAAA,KAAAA;EAARH,CAAAA,CAAAA,EAnCIA,OAmCJA,CAnCYpB,cAmCZoB,EAAAA,CAAAA;EAAO,KAAA,CAAA,MAAA,EAlCOI,QAkCP,EAAA,EAAA,OAAA,CAAA,EAlC6BE,OAkC7B,CAlCqCD,WAkCrC,CAAA,GAlCoDC,OAkCpD,CAlC4DD,WAkC5D,CAAA,EAAA,EAAA,aAAA,EAlC2FvB,oBAkC3F,GAAA;;MAhCHkB,SAASpB,iBAAiB2B;gBAChBH,sBAAsBE,QAAQD,eAAeC,QAAQD,+BAA+BvB,uBAAuBkB,SAASpB,iBAAiB2B;uBAC9HC,eAAeJ,oBAAoBC,cAAcG,eAAe5B;mBACpEwB,oBAAoBE,QAAQD,8BAA8BX,KAAKP,8CAA8CqB,eAAepB;sBACzHgB,mBAAmBE,QAAQD;;qBAE5BX,KAAKD,gDAAgDP,uBAAuBG;sBAC3Ee,mBAAmBE,QAAQD;;;qBAG5BX,KAAKD,gDAAgDP,uBAAuBuB;;UAElFC,mBAAAA,SAA4BJ,QAAQV;;;;;KAKzCe,kBAAAA;iBACYC,+BAA+BvC,yBAAyBA,4CAA4CsB,mCAAmCA,0DAA0DW,QAAQV;;;;IAI7NI,QAAQG,kBAAkBC,UAAUC;iBAChBO,+BAA+BvC,yBAAyBA,4CAA4CsB,mCAAmCA,0DAA0DW,QAAQV;;;;IAI7NI,QAAQG,kBAAkBC,UAAUC;iBAChBO,+BAA+BvC,yBAAyBA,4CAA4CsB,mCAAmCA,4DAA4DW,QAAQV;;uBAE1Me;;IAErBX,QAAQG,kBAAkBC,UAAUC"}
1
+ {"version":3,"file":"universal.d.cts","names":["BaseLanguageModelInput","ToolDefinition","BaseChatModel","BaseChatModelParams","BindToolsInput","BaseChatModelCallOptions","BaseMessage","AIMessageChunk","MessageStructure","RunnableBatchOptions","RunnableBinding","RunnableConfig","RunnableToolLike","IterableReadableStream","LogStreamCallbackHandlerInput","RunLogPatch","StreamEvent","StructuredToolInterface","CallbackManagerForLLMRun","ChatResult","EventStreamCallbackHandlerInput","Omit","ConfigurableChatModelCallOptions","Record","MODEL_PROVIDER_CONFIG","ChatModelProvider","getChatModelByClassName","Promise","_inferModelProvider","ConfigurableModelFields","ConfigurableModel","RunInput","CallOptions","Partial","Error","AsyncGenerator","Uint8Array","InitChatModelFields","ConfigurableFields","initChatModel"],"sources":["../../src/chat_models/universal.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { BaseChatModel, BaseChatModelParams, BindToolsInput, type BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport { BaseMessage, type AIMessageChunk, MessageStructure } from \"@langchain/core/messages\";\nimport { type RunnableBatchOptions, RunnableBinding, type RunnableConfig, type RunnableToolLike } from \"@langchain/core/runnables\";\nimport { IterableReadableStream } from \"@langchain/core/utils/stream\";\nimport { type LogStreamCallbackHandlerInput, type RunLogPatch, type StreamEvent } from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\ninterface EventStreamCallbackHandlerInput extends Omit<LogStreamCallbackHandlerInput, \"_schemaFormat\"> {\n}\nexport interface ConfigurableChatModelCallOptions extends BaseChatModelCallOptions {\n tools?: (StructuredToolInterface | Record<string, unknown> | ToolDefinition | RunnableToolLike)[];\n}\nexport declare const MODEL_PROVIDER_CONFIG: {\n readonly openai: {\n readonly package: \"@langchain/openai\";\n readonly className: \"ChatOpenAI\";\n };\n readonly anthropic: {\n readonly package: \"@langchain/anthropic\";\n readonly className: \"ChatAnthropic\";\n };\n readonly azure_openai: {\n readonly package: \"@langchain/openai\";\n readonly className: \"AzureChatOpenAI\";\n };\n readonly cohere: {\n readonly package: \"@langchain/cohere\";\n readonly className: \"ChatCohere\";\n };\n readonly \"google-vertexai\": {\n readonly package: \"@langchain/google-vertexai\";\n readonly className: \"ChatVertexAI\";\n };\n readonly \"google-vertexai-web\": {\n readonly package: \"@langchain/google-vertexai-web\";\n readonly className: \"ChatVertexAI\";\n };\n readonly \"google-genai\": {\n readonly package: \"@langchain/google-genai\";\n readonly className: \"ChatGoogleGenerativeAI\";\n };\n readonly ollama: {\n readonly package: \"@langchain/ollama\";\n readonly className: \"ChatOllama\";\n };\n readonly mistralai: {\n readonly package: \"@langchain/mistralai\";\n readonly className: \"ChatMistralAI\";\n };\n readonly groq: {\n readonly package: \"@langchain/groq\";\n readonly className: \"ChatGroq\";\n };\n readonly cerebras: {\n readonly package: \"@langchain/cerebras\";\n readonly className: \"ChatCerebras\";\n };\n readonly bedrock: {\n readonly package: \"@langchain/aws\";\n readonly className: \"ChatBedrockConverse\";\n };\n readonly deepseek: {\n readonly package: \"@langchain/deepseek\";\n readonly className: \"ChatDeepSeek\";\n };\n readonly xai: {\n readonly package: \"@langchain/xai\";\n readonly className: \"ChatXAI\";\n };\n readonly fireworks: {\n readonly package: \"@langchain/community/chat_models/fireworks\";\n readonly className: \"ChatFireworks\";\n readonly hasCircularDependency: true;\n };\n readonly together: {\n readonly package: \"@langchain/community/chat_models/togetherai\";\n readonly className: \"ChatTogetherAI\";\n readonly hasCircularDependency: true;\n };\n readonly perplexity: {\n readonly package: \"@langchain/community/chat_models/perplexity\";\n readonly className: \"ChatPerplexity\";\n readonly hasCircularDependency: true;\n };\n};\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\n/**\n * Helper function to get a chat model class by its class name or model provider.\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @param modelProvider Optional model provider key for direct lookup (e.g., \"google-vertexai-web\").\n * When provided, uses direct lookup to avoid className collision issues.\n * @returns The imported model class or undefined if not found\n */\nexport declare function getChatModelByClassName(className: string, modelProvider?: string): Promise<any>;\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport declare function _inferModelProvider(modelName: string): string | undefined;\ninterface ConfigurableModelFields extends BaseChatModelParams {\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n queuedMethodOperations?: Record<string, any>;\n}\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport declare class ConfigurableModel<RunInput extends BaseLanguageModelInput = BaseLanguageModelInput, CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string;\n lc_namespace: string[];\n _defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\";\n /**\n * @default \"\"\n */\n _configPrefix: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n _queuedMethodOperations: Record<string, any>;\n constructor(fields: ConfigurableModelFields);\n _model(config?: RunnableConfig): Promise<BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>>;\n _generate(messages: BaseMessage[], options?: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n bindTools(tools: BindToolsInput[], params?: Record<string, any>): ConfigurableModel<RunInput, CallOptions>;\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"];\n _modelParams(config?: RunnableConfig): Record<string, any>;\n _removePrefix(str: string, prefix: string): string;\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(config?: RunnableConfig): RunnableBinding<RunInput, AIMessageChunk, CallOptions>;\n invoke(input: RunInput, options?: CallOptions): Promise<AIMessageChunk>;\n stream(input: RunInput, options?: CallOptions): Promise<IterableReadableStream<AIMessageChunk>>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions?: false;\n }): Promise<AIMessageChunk[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions & {\n returnExceptions: true;\n }): Promise<(AIMessageChunk | Error)[]>;\n batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: RunnableBatchOptions): Promise<(AIMessageChunk | Error)[]>;\n transform(generator: AsyncGenerator<RunInput>, options: CallOptions): AsyncGenerator<AIMessageChunk>;\n streamLog(input: RunInput, options?: Partial<CallOptions>, streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">): AsyncGenerator<RunLogPatch>;\n streamEvents(input: RunInput, options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n }, streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">): IterableReadableStream<StreamEvent>;\n streamEvents(input: RunInput, options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n }, streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">): IterableReadableStream<Uint8Array>;\n}\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\nexport type ConfigurableFields = \"any\" | string[];\nexport declare function initChatModel<RunInput extends BaseLanguageModelInput = BaseLanguageModelInput, CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions>(model: string, fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n}): Promise<ConfigurableModel<RunInput, CallOptions>>;\nexport declare function initChatModel<RunInput extends BaseLanguageModelInput = BaseLanguageModelInput, CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions>(model: never, options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n}): Promise<ConfigurableModel<RunInput, CallOptions>>;\nexport declare function initChatModel<RunInput extends BaseLanguageModelInput = BaseLanguageModelInput, CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions>(model?: string, options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n}): Promise<ConfigurableModel<RunInput, CallOptions>>;\nexport {};\n//# sourceMappingURL=universal.d.ts.map"],"mappings":";;;;;;;;;;;UASUoB,+BAAAA,SAAwCC,KAAKP;AAA7CM,UAEOE,gCAAAA,SAAyCjB,wBAFRgB,CAAAA;EAEjCC,KAAAA,CAAAA,EAAAA,CACJL,uBADIK,GACsBC,MADU,CAAA,MAAA,EAAA,OAAA,CAAA,GACgBtB,cADhB,GACiCW,gBADjC,CAAA,EAAA;;AACVW,cAElBC,qBAFkBD,EAAAA;EAA0BtB,SAAAA,MAAAA,EAAAA;IAAiBW,SAAAA,OAAAA,EAAAA,mBAAAA;IADxBP,SAAAA,SAAAA,EAAAA,YAAAA;EAAwB,CAAA;EAG7DmB,SAAAA,SAAAA,EAAAA;IAyETC,SAAAA,OAAiB,EAAA,sBAAgBD;IAQrBE,SAAAA,SAAAA,EAAAA,eAAoEC;EAYpEC,CAAAA;EACdC,SAAAA,YAAAA,EAAAA;IACUN,SAAAA,OAAAA,EAAAA,mBAAAA;IAaSA,SAAAA,SAAAA,EAAAA,iBAAAA;EAdapB,CAAAA;EAAmB,SAAA,MAAA,EAAA;IAqBxC2B,SAAAA,OAAiB,EAAA,mBAAAE;IAAkBhC,SAAAA,SAAAA,EAAAA,YAAAA;EAAyBA,CAAAA;EAA4CsB,SAAAA,iBAAAA,EAAAA;IAAmCA,SAAAA,OAAAA,EAAAA,4BAAAA;IAAwDU,SAAAA,SAAAA,EAAAA,cAAAA;EAAazB,CAAAA;EAGhNgB,SAAAA,qBAAAA,EAAAA;IAaQA,SAAAA,OAAAA,EAAAA,gCAAAA;IACLM,SAAAA,SAAAA,EAAAA,cAAAA;EACJlB,CAAAA;EAAuCN,SAAAA,cAAAA,EAAAA;IAAyCG,SAAAA,OAAAA,EAAAA,yBAAAA;IAAfD,SAAAA,SAAAA,EAAAA,wBAAAA;EAAxCL,CAAAA;EAARyB,SAAAA,MAAAA,EAAAA;IACbrB,SAAAA,OAAAA,EAAAA,mBAAAA;IAAiEY,SAAAA,SAAAA,EAAAA,YAAAA;EAAmCC,CAAAA;EAARQ,SAAAA,SAAAA,EAAAA;IAC/FvB,SAAAA,OAAAA,EAAAA,sBAAAA;IAA2BmB,SAAAA,SAAAA,EAAAA,eAAAA;EAAwCQ,CAAAA;EAAUC,SAAAA,IAAAA,EAAAA;IAA5BF,SAAAA,OAAAA,EAAAA,iBAAAA;IAC5C5B,SAAAA,SAAAA,EAAAA,UAAAA;EACAS,CAAAA;EAAiBY,SAAAA,QAAAA,EAAAA;IAOnBZ,SAAAA,OAAAA,EAAAA,qBAAAA;IAAiCoB,SAAAA,SAAAA,EAAAA,cAAAA;EAAUxB,CAAAA;EAAgByB,SAAAA,OAAAA,EAAAA;IAA1CtB,SAAAA,OAAAA,EAAAA,gBAAAA;IACvBqB,SAAAA,SAAAA,EAAAA,qBAAAA;EAAoBC,CAAAA;EAAsBzB,SAAAA,QAAAA,EAAAA;IAARoB,SAAAA,OAAAA,EAAAA,qBAAAA;IAClCI,SAAAA,SAAAA,EAAAA,cAAAA;EAAoBC,CAAAA;EAA6CzB,SAAAA,GAAAA,EAAAA;IAAvBM,SAAAA,OAAAA,EAAAA,gBAAAA;IAARc,SAAAA,SAAAA,EAAAA,SAAAA;EAClCI,CAAAA;EAA8BC,SAAAA,SAAAA,EAAAA;IAARC,SAAAA,OAAAA,EAAAA,4CAAAA;IAA+BD,SAAAA,SAAAA,EAAAA,eAAAA;IAARC,SAAAA,qBAAAA,EAAAA,IAAAA;EAAuCxB,CAAAA;EAEtFF,SAAAA,QAAAA,EAAAA;IAARoB,SAAAA,OAAAA,EAAAA,6CAAAA;IACUI,SAAAA,SAAAA,EAAAA,gBAAAA;IAA8BC,SAAAA,qBAAAA,EAAAA,IAAAA;EAARC,CAAAA;EAA+BD,SAAAA,UAAAA,EAAAA;IAARC,SAAAA,OAAAA,EAAAA,6CAAAA;IAAuCxB,SAAAA,SAAAA,EAAAA,gBAAAA;IAErFF,SAAAA,qBAAAA,EAAAA,IAAAA;EAAiB2B,CAAAA;CAA1BP;AACUI,KAhFNN,iBAAAA,GAgFMM,MAAAA,OAhF2BP,qBAgF3BO;;;;;;;;AAA2GJ,iBAxErGD,uBAAAA,CAwEqGC,SAAAA,EAAAA,MAAAA,EAAAA,aAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAxEjCA,OAwEiCA,CAAAA,GAAAA,CAAAA;;;;;;;;;;;;AAEKQ,iBA9D1GP,mBAAAA,CA8D0GO,SAAAA,EAAAA,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;UA7DxHN,uBAAAA,SAAgC1B,mBA8DlB4B,CAAAA;EAA2BC,aAAAA,CAAAA,EA7D/BT,MA6D+BS,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EAARC;;;EAEwDjB,kBAAAA,CAAAA,EAAAA,MAAAA,EAAAA,GAAAA,KAAAA;EAAvBH;;;EACjCoB,YAAAA,CAAAA,EAAAA,MAAAA;EAGfb;;;;EA/C8KlB,sBAAAA,CAAAA,EAP7KqB,MAO6KrB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;AAAa;AAiDvN;AAKA;AACA;;;AAA4HoB,cAvDvGQ,iBAuDuGR,CAAAA,iBAvDpEtB,sBAuDoEsB,GAvD3CtB,sBAuD2CsB,EAAAA,oBAvDCA,gCAuDDA,GAvDoCA,gCAuDpCA,CAAAA,SAvD8EpB,aAuD9EoB,CAvD4FU,WAuD5FV,EAvDyGf,cAuDzGe,CAAAA,CAAAA;EAAmCA,QAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAkEC,YAAAA,EAAAA,MAAAA,EAAAA;EAARU,cAAAA,CAAAA,EApDpMV,MAoDoMU,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EAI3LF;;;EAA1BJ,mBAAAA,EAAAA,MAAAA,EAAAA,GAAAA,KAAAA;EAAO;AACX;;EAAgF3B,aAAAA,EAAAA,MAAAA;EAA4CsB;;;;EAI9FS,uBAAAA,EAhDDR,MAgDCQ,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EAAUC,WAAAA,CAAAA,MAAAA,EA/ChBH,uBA+CgBG;EAA5BF,MAAAA,CAAAA,MAAAA,CAAAA,EA9CQnB,cA8CRmB,CAAAA,EA9CyBH,OA8CzBG,CA9CiC5B,aA8CjC4B,CA9C+CzB,wBA8C/CyB,EA9CyEvB,cA8CzEuB,CA9CwFtB,gBA8CxFsB,CAAAA,CAAAA,CAAAA;EAARH,SAAAA,CAAAA,QAAAA,EA7CoBrB,WA6CpBqB,EAAAA,EAAAA,OAAAA,CAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,EAAAA,UAAAA,CAAAA,EA7CqFT,wBA6CrFS,CAAAA,EA7CgHA,OA6ChHA,CA7CwHR,UA6CxHQ,CAAAA;EAAO,SAAA,CAAA,KAAA,EA5CUvB,cA4CV,EAAA,EAAA,MAAA,CAAA,EA5CqCmB,MA4CrC,CAAA,MAAA,EAAA,GAAA,CAAA,CAAA,EA5C2DO,iBA4C3D,CA5C6EC,QA4C7E,EA5CuFC,WA4CvF,CAAA;EACaO,oBAAa,EA5CXrC,aA4CW8B,CAAAA,sBAAA,CAAA;EAAkBhC,YAAAA,CAAAA,MAAAA,CAAAA,EA3C7BW,cA2C6BX,CAAAA,EA3CZuB,MA2CYvB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA;EAAyBA,aAAAA,CAAAA,GAAAA,EAAAA,MAAAA,EAAAA,MAAAA,EAAAA,MAAAA,CAAAA,EAAAA,MAAAA;EAA4CsB;;;;;EAI9FS,UAAAA,CAAAA,MAAAA,CAAAA,EAxCNpB,cAwCMoB,CAAAA,EAxCWrB,eAwCXqB,CAxC2BA,QAwC3BA,EAxCqCxB,cAwCrCwB,EAxCqDC,WAwCrDD,CAAAA;EAAUC,MAAAA,CAAAA,KAAAA,EAvCtBD,QAuCsBC,EAAAA,OAAAA,CAAAA,EAvCFA,WAuCEA,CAAAA,EAvCYL,OAuCZK,CAvCoBzB,cAuCpByB,CAAAA;EAA5BF,MAAAA,CAAAA,KAAAA,EAtCMC,QAsCND,EAAAA,OAAAA,CAAAA,EAtC0BE,WAsC1BF,CAAAA,EAtCwCH,OAsCxCG,CAtCgDjB,sBAsChDiB,CAtCuEvB,cAsCvEuB,CAAAA,CAAAA;EAARH,KAAAA,CAAAA,MAAAA,EArCcI,QAqCdJ,EAAAA,EAAAA,OAAAA,CAAAA,EArCoCM,OAqCpCN,CArC4CK,WAqC5CL,CAAAA,GArC2DM,OAqC3DN,CArCmEK,WAqCnEL,CAAAA,EAAAA,EAAAA,aAAAA,EArCkGlB,oBAqClGkB,GAAAA;IAAO,gBAAA,CAAA,EAAA,KAAA;MAnCHA,QAAQpB;gBACEwB,sBAAsBE,QAAQD,eAAeC,QAAQD,+BAA+BvB;;MAE9FkB,SAASpB,iBAAiB2B;gBAChBH,sBAAsBE,QAAQD,eAAeC,QAAQD,+BAA+BvB,uBAAuBkB,SAASpB,iBAAiB2B;uBAC9HC,eAAeJ,oBAAoBC,cAAcG,eAAe5B;mBACpEwB,oBAAoBE,QAAQD,8BAA8BX,KAAKP,8CAA8CqB,eAAepB;sBACzHgB,mBAAmBE,QAAQD;;qBAE5BX,KAAKD,gDAAgDP,uBAAuBG;sBAC3Ee,mBAAmBE,QAAQD;;;qBAG5BX,KAAKD,gDAAgDP,uBAAuBuB;;UAElFC,mBAAAA,SAA4BJ,QAAQV;;;;;KAKzCe,kBAAAA;iBACYC,+BAA+BvC,yBAAyBA,4CAA4CsB,mCAAmCA,0DAA0DW,QAAQV;;;;IAI7NI,QAAQG,kBAAkBC,UAAUC;iBAChBO,+BAA+BvC,yBAAyBA,4CAA4CsB,mCAAmCA,0DAA0DW,QAAQV;;;;IAI7NI,QAAQG,kBAAkBC,UAAUC;iBAChBO,+BAA+BvC,yBAAyBA,4CAA4CsB,mCAAmCA,4DAA4DW,QAAQV;;uBAE1Me;;IAErBX,QAAQG,kBAAkBC,UAAUC"}
@@ -88,11 +88,13 @@ declare const MODEL_PROVIDER_CONFIG: {
88
88
  };
89
89
  type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;
90
90
  /**
91
- * Helper function to get a chat model class by its class name
91
+ * Helper function to get a chat model class by its class name or model provider.
92
92
  * @param className The class name (e.g., "ChatOpenAI", "ChatAnthropic")
93
+ * @param modelProvider Optional model provider key for direct lookup (e.g., "google-vertexai-web").
94
+ * When provided, uses direct lookup to avoid className collision issues.
93
95
  * @returns The imported model class or undefined if not found
94
96
  */
95
- declare function getChatModelByClassName(className: string): Promise<any>;
97
+ declare function getChatModelByClassName(className: string, modelProvider?: string): Promise<any>;
96
98
  /**
97
99
  * Attempts to infer the model provider based on the given model name.
98
100
  *