langchain 1.2.35 → 1.2.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +11 -0
- package/dist/agents/ReactAgent.cjs +1 -1
- package/dist/agents/ReactAgent.cjs.map +1 -1
- package/dist/agents/ReactAgent.d.cts.map +1 -1
- package/dist/agents/ReactAgent.d.ts.map +1 -1
- package/dist/agents/ReactAgent.js +1 -1
- package/dist/agents/ReactAgent.js.map +1 -1
- package/dist/agents/nodes/AgentNode.cjs +8 -3
- package/dist/agents/nodes/AgentNode.cjs.map +1 -1
- package/dist/agents/nodes/AgentNode.js +8 -3
- package/dist/agents/nodes/AgentNode.js.map +1 -1
- package/dist/agents/responses.cjs +7 -26
- package/dist/agents/responses.cjs.map +1 -1
- package/dist/agents/responses.js +8 -27
- package/dist/agents/responses.js.map +1 -1
- package/dist/agents/tests/utils.cjs.map +1 -1
- package/dist/agents/tests/utils.d.cts +1 -0
- package/dist/agents/tests/utils.d.cts.map +1 -1
- package/dist/agents/tests/utils.d.ts +1 -0
- package/dist/agents/tests/utils.d.ts.map +1 -1
- package/dist/agents/tests/utils.js.map +1 -1
- package/dist/chat_models/universal.cjs +4 -0
- package/dist/chat_models/universal.cjs.map +1 -1
- package/dist/chat_models/universal.d.cts.map +1 -1
- package/dist/chat_models/universal.d.ts.map +1 -1
- package/dist/chat_models/universal.js +4 -0
- package/dist/chat_models/universal.js.map +1 -1
- package/package.json +3 -3
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"utils.cjs","names":["BaseChatModel","RunnableLambda","HumanMessage","AIMessage"],"sources":["../../../src/agents/tests/utils.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/no-explicit-any */\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BaseChatModelCallOptions,\n BindToolsInput,\n ToolChoice,\n} from \"@langchain/core/language_models/chat_models\";\nimport { StructuredTool } from \"@langchain/core/tools\";\nimport {\n BaseMessage,\n AIMessage,\n HumanMessage,\n BaseMessageFields,\n AIMessageFields,\n ToolMessage,\n ToolMessageFields,\n} from \"@langchain/core/messages\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport {\n Runnable,\n RunnableConfig,\n RunnableLambda,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\nimport {\n MemorySaver,\n Checkpoint,\n CheckpointMetadata,\n type BaseCheckpointSaver,\n} from \"@langchain/langgraph-checkpoint\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { z } from \"zod/v3\";\n\n/**\n * Custom asymmetric matcher that matches any string value.\n * Works with both Jest and Vitest's toEqual() assertions.\n */\nclass AnyString {\n asymmetricMatch(other: unknown): boolean {\n return typeof other === \"string\";\n }\n\n toString(): string {\n return \"Any<String>\";\n }\n\n toAsymmetricMatcher(): string {\n return \"Any<String>\";\n }\n}\n\nexport class _AnyIdAIMessage extends AIMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"AIMessage\"];\n }\n\n constructor(fields: AIMessageFields | string) {\n let fieldsWithJestMatcher: Partial<AIMessageFields> = {\n id: new AnyString() as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as AIMessageFields);\n }\n}\n\nexport class _AnyIdHumanMessage extends HumanMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"HumanMessage\"];\n }\n\n constructor(fields: BaseMessageFields | string) {\n let fieldsWithJestMatcher: Partial<BaseMessageFields> = {\n id: new AnyString() as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as BaseMessageFields);\n }\n}\n\nexport class _AnyIdToolMessage extends ToolMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"ToolMessage\"];\n }\n\n constructor(fields: ToolMessageFields) {\n const fieldsWithJestMatcher: Partial<ToolMessageFields> = {\n id: new AnyString() as unknown as string,\n ...fields,\n };\n super(fieldsWithJestMatcher as ToolMessageFields);\n }\n}\n\nexport class FakeConfigurableModel extends BaseChatModel {\n _queuedMethodOperations: Record<string, any> = {};\n\n _chatModel: LanguageModelLike;\n\n constructor(\n fields: {\n model: LanguageModelLike;\n } & BaseChatModelParams\n ) {\n super(fields);\n this._chatModel = fields.model;\n }\n\n _llmType() {\n return \"fake_configurable\";\n }\n\n async _generate(\n _messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n throw new Error(\"Not implemented\");\n }\n\n async _model() {\n return this._chatModel;\n }\n\n bindTools(tools: BindToolsInput[]) {\n const modelWithTools = new FakeConfigurableModel({\n model: (this._chatModel as FakeToolCallingChatModel).bindTools(tools),\n });\n modelWithTools._queuedMethodOperations.bindTools = tools;\n return modelWithTools;\n }\n}\n\nexport class FakeToolCallingChatModel extends BaseChatModel {\n sleep?: number = 50;\n\n responses?: BaseMessage[];\n\n thrownErrorString?: string;\n\n idx: number;\n\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\" = \"openai\";\n\n structuredResponse?: Record<string, unknown>;\n\n // Track messages passed to structured output calls\n structuredOutputMessages: BaseMessage[][] = [];\n\n constructor(\n fields: {\n sleep?: number;\n responses?: BaseMessage[];\n thrownErrorString?: string;\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n structuredResponse?: Record<string, unknown>;\n } & BaseChatModelParams\n ) {\n super(fields);\n this.sleep = fields.sleep ?? this.sleep;\n this.responses = fields.responses;\n this.thrownErrorString = fields.thrownErrorString;\n this.idx = 0;\n this.toolStyle = fields.toolStyle ?? this.toolStyle;\n this.structuredResponse = fields.structuredResponse;\n this.structuredOutputMessages = [];\n }\n\n _llmType() {\n return \"fake\";\n }\n\n async _generate(\n messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n if (this.sleep !== undefined) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n const responses = this.responses?.length ? this.responses : messages;\n const msg = responses[this.idx % responses.length];\n const generation: ChatResult = {\n generations: [\n {\n text: \"\",\n message: msg,\n },\n ],\n };\n this.idx += 1;\n\n if (typeof msg.content === \"string\") {\n await runManager?.handleLLMNewToken(msg.content);\n }\n return generation;\n }\n\n bindTools(tools: BindToolsInput[]): Runnable<any> {\n const toolDicts = [];\n const serverTools = [];\n for (const tool of tools) {\n if (!(\"name\" in tool)) {\n serverTools.push(tool);\n continue;\n }\n\n // NOTE: this is a simplified tool spec for testing purposes only\n if (this.toolStyle === \"openai\") {\n toolDicts.push({\n type: \"function\",\n function: {\n name: tool.name,\n },\n });\n } else if ([\"anthropic\", \"google\"].includes(this.toolStyle)) {\n toolDicts.push({\n name: tool.name,\n });\n } else if (this.toolStyle === \"bedrock\") {\n toolDicts.push({\n toolSpec: {\n name: tool.name,\n },\n });\n }\n }\n let toolsToBind: BindToolsInput[] = toolDicts;\n if (this.toolStyle === \"google\") {\n toolsToBind = [{ functionDeclarations: toolDicts }];\n }\n return this.withConfig({\n tools: [...toolsToBind, ...serverTools],\n } as BaseChatModelCallOptions);\n }\n\n withStructuredOutput<\n RunOutput extends Record<string, any> = Record<string, any>,\n >(_: unknown): Runnable<any> {\n if (!this.structuredResponse) {\n throw new Error(\"No structured response provided\");\n }\n // Create a runnable that returns the proper structured format\n return RunnableLambda.from(async (messages: BaseMessage[]) => {\n if (this.sleep) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n\n // Store the messages that were sent to generate structured output\n this.structuredOutputMessages.push([...messages]);\n\n // Return in the format expected: { raw: BaseMessage, parsed: RunOutput }\n return this.structuredResponse as RunOutput;\n });\n }\n}\n\nexport class MemorySaverAssertImmutable extends MemorySaver {\n storageForCopies: Record<string, Record<string, Uint8Array>> = {};\n\n constructor() {\n super();\n this.storageForCopies = {};\n }\n\n async put(\n config: RunnableConfig,\n checkpoint: Checkpoint,\n metadata: CheckpointMetadata\n ): Promise<RunnableConfig> {\n const thread_id = config.configurable?.thread_id;\n this.storageForCopies[thread_id] ??= {};\n\n // assert checkpoint hasn't been modified since last written\n const saved = await this.get(config);\n if (saved) {\n const savedId = saved.id;\n if (this.storageForCopies[thread_id][savedId]) {\n const [, serializedSaved] = await this.serde.dumpsTyped(saved);\n const serializedCopy = this.storageForCopies[thread_id][savedId];\n\n // Compare Uint8Array contents by converting to string\n const savedStr = new TextDecoder().decode(serializedSaved);\n const copyStr = new TextDecoder().decode(serializedCopy);\n if (savedStr !== copyStr) {\n throw new Error(\n `Checkpoint [${savedId}] has been modified since last written`\n );\n }\n }\n }\n const [, serializedCheckpoint] = await this.serde.dumpsTyped(checkpoint);\n // save a copy of the checkpoint\n this.storageForCopies[thread_id][checkpoint.id] = serializedCheckpoint;\n\n return super.put(config, checkpoint, metadata);\n }\n}\n\ninterface ToolCall {\n name: string;\n args: Record<string, any>;\n id: string;\n type?: \"tool_call\";\n}\n\ninterface FakeToolCallingModelFields {\n toolCalls?: ToolCall[][];\n toolStyle?: \"openai\" | \"anthropic\";\n index?: number;\n structuredResponse?: any;\n}\n\n// Helper function to create checkpointer\nexport function createCheckpointer(): BaseCheckpointSaver {\n return new MemorySaver();\n}\n\n/**\n * Fake chat model for testing tool calling functionality\n */\nexport class FakeToolCallingModel extends BaseChatModel {\n toolCalls: ToolCall[][];\n\n toolStyle: \"openai\" | \"anthropic\";\n\n // Use a shared reference object so the index persists across bindTools calls\n private indexRef: { current: number };\n\n structuredResponse?: any;\n\n private tools: StructuredTool[] = [];\n\n constructor({\n toolCalls = [],\n toolStyle = \"openai\",\n index = 0,\n structuredResponse,\n indexRef,\n ...rest\n }: FakeToolCallingModelFields & { indexRef?: { current: number } } = {}) {\n super(rest);\n this.toolCalls = toolCalls;\n this.toolStyle = toolStyle;\n // Share the same index reference across instances\n this.indexRef = indexRef ?? { current: index };\n this.structuredResponse = structuredResponse;\n }\n\n // Getter/setter for backwards compatibility\n get index(): number {\n return this.indexRef.current;\n }\n\n set index(value: number) {\n this.indexRef.current = value;\n }\n\n _llmType(): string {\n return \"fake-tool-calling\";\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n bindTools(\n tools: StructuredTool[]\n ):\n | FakeToolCallingModel\n | RunnableBinding<\n any,\n any,\n any & { tool_choice?: ToolChoice | undefined }\n > {\n const newInstance = new FakeToolCallingModel({\n toolCalls: this.toolCalls,\n toolStyle: this.toolStyle,\n structuredResponse: this.structuredResponse,\n // Pass the same indexRef so all instances share the same index state\n indexRef: this.indexRef,\n });\n newInstance.tools = [...this.tools, ...tools];\n return newInstance;\n }\n\n withStructuredOutput(_schema: any) {\n return new RunnableLambda({\n func: async () => {\n return this.structuredResponse;\n },\n });\n }\n\n async _generate(\n messages: BaseMessage[],\n _options?: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const lastMessage = messages[messages.length - 1];\n let content = lastMessage.content as string;\n\n // Handle prompt concatenation\n if (messages.length > 1) {\n const parts = messages.map((m) => m.content).filter(Boolean);\n content = parts\n .map((part) => {\n if (typeof part === \"string\") {\n return part;\n } else if (typeof part === \"object\" && \"text\" in part) {\n return part.text;\n } else if (Array.isArray(part)) {\n return part\n .map((p) => {\n if (typeof p === \"string\") {\n return p;\n } else if (typeof p === \"object\" && \"text\" in p) {\n return p.text;\n }\n return \"\";\n })\n .join(\"-\");\n } else {\n return JSON.stringify(part);\n }\n })\n .join(\"-\");\n }\n\n // Reset index at the start of a new conversation (only human message)\n // This allows the model to be reused across multiple agent.invoke() calls\n const isStartOfConversation =\n messages.length === 1 ||\n (messages.length === 2 && messages.every(HumanMessage.isInstance));\n if (isStartOfConversation && this.index !== 0) {\n this.index = 0;\n }\n\n const currentToolCalls = this.toolCalls[this.index] || [];\n const messageId = this.index.toString();\n\n // Move to next set of tool calls for subsequent invocations\n this.index = (this.index + 1) % Math.max(1, this.toolCalls.length);\n\n const message = new AIMessage({\n content,\n id: messageId,\n tool_calls:\n currentToolCalls.length > 0\n ? currentToolCalls.map((tc) => ({\n ...tc,\n type: \"tool_call\" as const,\n }))\n : undefined,\n });\n\n return {\n generations: [\n {\n text: content,\n message,\n },\n ],\n llmOutput: {},\n };\n }\n}\n\nexport class SearchAPI extends StructuredTool {\n name = \"search_api\";\n\n description = \"A simple API that returns the input string.\";\n\n schema = z.object({\n query: z.string().describe(\"The query to search for.\"),\n });\n\n async _call(input: z.infer<typeof this.schema>) {\n if (input?.query === \"error\") {\n throw new Error(\"Error\");\n }\n return `result for ${input?.query}`;\n }\n}\n"],"mappings":";;;;;;;;;;;AAyVA,IAAa,uBAAb,MAAa,6BAA6BA,4CAAAA,cAAc;CACtD;CAEA;CAGA;CAEA;CAEA,QAAkC,EAAE;CAEpC,YAAY,EACV,YAAY,EAAE,EACd,YAAY,UACZ,QAAQ,GACR,oBACA,UACA,GAAG,SACgE,EAAE,EAAE;AACvE,QAAM,KAAK;AACX,OAAK,YAAY;AACjB,OAAK,YAAY;AAEjB,OAAK,WAAW,YAAY,EAAE,SAAS,OAAO;AAC9C,OAAK,qBAAqB;;CAI5B,IAAI,QAAgB;AAClB,SAAO,KAAK,SAAS;;CAGvB,IAAI,MAAM,OAAe;AACvB,OAAK,SAAS,UAAU;;CAG1B,WAAmB;AACjB,SAAO;;CAGT,oBAAoB;AAClB,SAAO,EAAE;;CAGX,UACE,OAOI;EACJ,MAAM,cAAc,IAAI,qBAAqB;GAC3C,WAAW,KAAK;GAChB,WAAW,KAAK;GAChB,oBAAoB,KAAK;GAEzB,UAAU,KAAK;GAChB,CAAC;AACF,cAAY,QAAQ,CAAC,GAAG,KAAK,OAAO,GAAG,MAAM;AAC7C,SAAO;;CAGT,qBAAqB,SAAc;AACjC,SAAO,IAAIC,0BAAAA,eAAe,EACxB,MAAM,YAAY;AAChB,UAAO,KAAK;KAEf,CAAC;;CAGJ,MAAM,UACJ,UACA,UACA,aACqB;EAErB,IAAI,UADgB,SAAS,SAAS,SAAS,GACrB;AAG1B,MAAI,SAAS,SAAS,EAEpB,WADc,SAAS,KAAK,MAAM,EAAE,QAAQ,CAAC,OAAO,QAAQ,CAEzD,KAAK,SAAS;AACb,OAAI,OAAO,SAAS,SAClB,QAAO;YACE,OAAO,SAAS,YAAY,UAAU,KAC/C,QAAO,KAAK;YACH,MAAM,QAAQ,KAAK,CAC5B,QAAO,KACJ,KAAK,MAAM;AACV,QAAI,OAAO,MAAM,SACf,QAAO;aACE,OAAO,MAAM,YAAY,UAAU,EAC5C,QAAO,EAAE;AAEX,WAAO;KACP,CACD,KAAK,IAAI;OAEZ,QAAO,KAAK,UAAU,KAAK;IAE7B,CACD,KAAK,IAAI;AAQd,OAFE,SAAS,WAAW,KACnB,SAAS,WAAW,KAAK,SAAS,MAAMC,yBAAAA,aAAa,WAAW,KACtC,KAAK,UAAU,EAC1C,MAAK,QAAQ;EAGf,MAAM,mBAAmB,KAAK,UAAU,KAAK,UAAU,EAAE;EACzD,MAAM,YAAY,KAAK,MAAM,UAAU;AAGvC,OAAK,SAAS,KAAK,QAAQ,KAAK,KAAK,IAAI,GAAG,KAAK,UAAU,OAAO;EAElE,MAAM,UAAU,IAAIC,yBAAAA,UAAU;GAC5B;GACA,IAAI;GACJ,YACE,iBAAiB,SAAS,IACtB,iBAAiB,KAAK,QAAQ;IAC5B,GAAG;IACH,MAAM;IACP,EAAE,GACH,KAAA;GACP,CAAC;AAEF,SAAO;GACL,aAAa,CACX;IACE,MAAM;IACN;IACD,CACF;GACD,WAAW,EAAE;GACd"}
|
|
1
|
+
{"version":3,"file":"utils.cjs","names":["BaseChatModel","RunnableLambda","HumanMessage","AIMessage"],"sources":["../../../src/agents/tests/utils.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/no-explicit-any */\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BaseChatModelCallOptions,\n BindToolsInput,\n ToolChoice,\n} from \"@langchain/core/language_models/chat_models\";\nimport type { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { StructuredTool } from \"@langchain/core/tools\";\nimport {\n BaseMessage,\n AIMessage,\n HumanMessage,\n BaseMessageFields,\n AIMessageFields,\n ToolMessage,\n ToolMessageFields,\n} from \"@langchain/core/messages\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport {\n Runnable,\n RunnableConfig,\n RunnableLambda,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\nimport {\n MemorySaver,\n Checkpoint,\n CheckpointMetadata,\n type BaseCheckpointSaver,\n} from \"@langchain/langgraph-checkpoint\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { z } from \"zod/v3\";\n\n/**\n * Custom asymmetric matcher that matches any string value.\n * Works with both Jest and Vitest's toEqual() assertions.\n */\nclass AnyString {\n asymmetricMatch(other: unknown): boolean {\n return typeof other === \"string\";\n }\n\n toString(): string {\n return \"Any<String>\";\n }\n\n toAsymmetricMatcher(): string {\n return \"Any<String>\";\n }\n}\n\nexport class _AnyIdAIMessage extends AIMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"AIMessage\"];\n }\n\n constructor(fields: AIMessageFields | string) {\n let fieldsWithJestMatcher: Partial<AIMessageFields> = {\n id: new AnyString() as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as AIMessageFields);\n }\n}\n\nexport class _AnyIdHumanMessage extends HumanMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"HumanMessage\"];\n }\n\n constructor(fields: BaseMessageFields | string) {\n let fieldsWithJestMatcher: Partial<BaseMessageFields> = {\n id: new AnyString() as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as BaseMessageFields);\n }\n}\n\nexport class _AnyIdToolMessage extends ToolMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"ToolMessage\"];\n }\n\n constructor(fields: ToolMessageFields) {\n const fieldsWithJestMatcher: Partial<ToolMessageFields> = {\n id: new AnyString() as unknown as string,\n ...fields,\n };\n super(fieldsWithJestMatcher as ToolMessageFields);\n }\n}\n\nexport class FakeConfigurableModel extends BaseChatModel {\n _queuedMethodOperations: Record<string, any> = {};\n\n _chatModel: LanguageModelLike;\n\n constructor(\n fields: {\n model: LanguageModelLike;\n } & BaseChatModelParams\n ) {\n super(fields);\n this._chatModel = fields.model;\n }\n\n _llmType() {\n return \"fake_configurable\";\n }\n\n async _generate(\n _messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n throw new Error(\"Not implemented\");\n }\n\n async _model() {\n return this._chatModel;\n }\n\n bindTools(tools: BindToolsInput[]) {\n const modelWithTools = new FakeConfigurableModel({\n model: (this._chatModel as FakeToolCallingChatModel).bindTools(tools),\n });\n modelWithTools._queuedMethodOperations.bindTools = tools;\n return modelWithTools;\n }\n}\n\nexport class FakeToolCallingChatModel extends BaseChatModel {\n sleep?: number = 50;\n\n responses?: BaseMessage[];\n\n thrownErrorString?: string;\n\n idx: number;\n\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\" = \"openai\";\n\n structuredResponse?: Record<string, unknown>;\n\n // Track messages passed to structured output calls\n structuredOutputMessages: BaseMessage[][] = [];\n\n constructor(\n fields: {\n sleep?: number;\n responses?: BaseMessage[];\n thrownErrorString?: string;\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n structuredResponse?: Record<string, unknown>;\n } & BaseChatModelParams\n ) {\n super(fields);\n this.sleep = fields.sleep ?? this.sleep;\n this.responses = fields.responses;\n this.thrownErrorString = fields.thrownErrorString;\n this.idx = 0;\n this.toolStyle = fields.toolStyle ?? this.toolStyle;\n this.structuredResponse = fields.structuredResponse;\n this.structuredOutputMessages = [];\n }\n\n _llmType() {\n return \"fake\";\n }\n\n get profile(): ModelProfile {\n return {\n toolCalling: true,\n structuredOutput: true,\n };\n }\n\n async _generate(\n messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n if (this.sleep !== undefined) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n const responses = this.responses?.length ? this.responses : messages;\n const msg = responses[this.idx % responses.length];\n const generation: ChatResult = {\n generations: [\n {\n text: \"\",\n message: msg,\n },\n ],\n };\n this.idx += 1;\n\n if (typeof msg.content === \"string\") {\n await runManager?.handleLLMNewToken(msg.content);\n }\n return generation;\n }\n\n bindTools(tools: BindToolsInput[]): Runnable<any> {\n const toolDicts = [];\n const serverTools = [];\n for (const tool of tools) {\n if (!(\"name\" in tool)) {\n serverTools.push(tool);\n continue;\n }\n\n // NOTE: this is a simplified tool spec for testing purposes only\n if (this.toolStyle === \"openai\") {\n toolDicts.push({\n type: \"function\",\n function: {\n name: tool.name,\n },\n });\n } else if ([\"anthropic\", \"google\"].includes(this.toolStyle)) {\n toolDicts.push({\n name: tool.name,\n });\n } else if (this.toolStyle === \"bedrock\") {\n toolDicts.push({\n toolSpec: {\n name: tool.name,\n },\n });\n }\n }\n let toolsToBind: BindToolsInput[] = toolDicts;\n if (this.toolStyle === \"google\") {\n toolsToBind = [{ functionDeclarations: toolDicts }];\n }\n return this.withConfig({\n tools: [...toolsToBind, ...serverTools],\n } as BaseChatModelCallOptions);\n }\n\n withStructuredOutput<\n RunOutput extends Record<string, any> = Record<string, any>,\n >(_: unknown): Runnable<any> {\n if (!this.structuredResponse) {\n throw new Error(\"No structured response provided\");\n }\n // Create a runnable that returns the proper structured format\n return RunnableLambda.from(async (messages: BaseMessage[]) => {\n if (this.sleep) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n\n // Store the messages that were sent to generate structured output\n this.structuredOutputMessages.push([...messages]);\n\n // Return in the format expected: { raw: BaseMessage, parsed: RunOutput }\n return this.structuredResponse as RunOutput;\n });\n }\n}\n\nexport class MemorySaverAssertImmutable extends MemorySaver {\n storageForCopies: Record<string, Record<string, Uint8Array>> = {};\n\n constructor() {\n super();\n this.storageForCopies = {};\n }\n\n async put(\n config: RunnableConfig,\n checkpoint: Checkpoint,\n metadata: CheckpointMetadata\n ): Promise<RunnableConfig> {\n const thread_id = config.configurable?.thread_id;\n this.storageForCopies[thread_id] ??= {};\n\n // assert checkpoint hasn't been modified since last written\n const saved = await this.get(config);\n if (saved) {\n const savedId = saved.id;\n if (this.storageForCopies[thread_id][savedId]) {\n const [, serializedSaved] = await this.serde.dumpsTyped(saved);\n const serializedCopy = this.storageForCopies[thread_id][savedId];\n\n // Compare Uint8Array contents by converting to string\n const savedStr = new TextDecoder().decode(serializedSaved);\n const copyStr = new TextDecoder().decode(serializedCopy);\n if (savedStr !== copyStr) {\n throw new Error(\n `Checkpoint [${savedId}] has been modified since last written`\n );\n }\n }\n }\n const [, serializedCheckpoint] = await this.serde.dumpsTyped(checkpoint);\n // save a copy of the checkpoint\n this.storageForCopies[thread_id][checkpoint.id] = serializedCheckpoint;\n\n return super.put(config, checkpoint, metadata);\n }\n}\n\ninterface ToolCall {\n name: string;\n args: Record<string, any>;\n id: string;\n type?: \"tool_call\";\n}\n\ninterface FakeToolCallingModelFields {\n toolCalls?: ToolCall[][];\n toolStyle?: \"openai\" | \"anthropic\";\n index?: number;\n structuredResponse?: any;\n}\n\n// Helper function to create checkpointer\nexport function createCheckpointer(): BaseCheckpointSaver {\n return new MemorySaver();\n}\n\n/**\n * Fake chat model for testing tool calling functionality\n */\nexport class FakeToolCallingModel extends BaseChatModel {\n toolCalls: ToolCall[][];\n\n toolStyle: \"openai\" | \"anthropic\";\n\n // Use a shared reference object so the index persists across bindTools calls\n private indexRef: { current: number };\n\n structuredResponse?: any;\n\n private tools: StructuredTool[] = [];\n\n constructor({\n toolCalls = [],\n toolStyle = \"openai\",\n index = 0,\n structuredResponse,\n indexRef,\n ...rest\n }: FakeToolCallingModelFields & { indexRef?: { current: number } } = {}) {\n super(rest);\n this.toolCalls = toolCalls;\n this.toolStyle = toolStyle;\n // Share the same index reference across instances\n this.indexRef = indexRef ?? { current: index };\n this.structuredResponse = structuredResponse;\n }\n\n // Getter/setter for backwards compatibility\n get index(): number {\n return this.indexRef.current;\n }\n\n set index(value: number) {\n this.indexRef.current = value;\n }\n\n _llmType(): string {\n return \"fake-tool-calling\";\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n bindTools(\n tools: StructuredTool[]\n ):\n | FakeToolCallingModel\n | RunnableBinding<\n any,\n any,\n any & { tool_choice?: ToolChoice | undefined }\n > {\n const newInstance = new FakeToolCallingModel({\n toolCalls: this.toolCalls,\n toolStyle: this.toolStyle,\n structuredResponse: this.structuredResponse,\n // Pass the same indexRef so all instances share the same index state\n indexRef: this.indexRef,\n });\n newInstance.tools = [...this.tools, ...tools];\n return newInstance;\n }\n\n withStructuredOutput(_schema: any) {\n return new RunnableLambda({\n func: async () => {\n return this.structuredResponse;\n },\n });\n }\n\n async _generate(\n messages: BaseMessage[],\n _options?: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const lastMessage = messages[messages.length - 1];\n let content = lastMessage.content as string;\n\n // Handle prompt concatenation\n if (messages.length > 1) {\n const parts = messages.map((m) => m.content).filter(Boolean);\n content = parts\n .map((part) => {\n if (typeof part === \"string\") {\n return part;\n } else if (typeof part === \"object\" && \"text\" in part) {\n return part.text;\n } else if (Array.isArray(part)) {\n return part\n .map((p) => {\n if (typeof p === \"string\") {\n return p;\n } else if (typeof p === \"object\" && \"text\" in p) {\n return p.text;\n }\n return \"\";\n })\n .join(\"-\");\n } else {\n return JSON.stringify(part);\n }\n })\n .join(\"-\");\n }\n\n // Reset index at the start of a new conversation (only human message)\n // This allows the model to be reused across multiple agent.invoke() calls\n const isStartOfConversation =\n messages.length === 1 ||\n (messages.length === 2 && messages.every(HumanMessage.isInstance));\n if (isStartOfConversation && this.index !== 0) {\n this.index = 0;\n }\n\n const currentToolCalls = this.toolCalls[this.index] || [];\n const messageId = this.index.toString();\n\n // Move to next set of tool calls for subsequent invocations\n this.index = (this.index + 1) % Math.max(1, this.toolCalls.length);\n\n const message = new AIMessage({\n content,\n id: messageId,\n tool_calls:\n currentToolCalls.length > 0\n ? currentToolCalls.map((tc) => ({\n ...tc,\n type: \"tool_call\" as const,\n }))\n : undefined,\n });\n\n return {\n generations: [\n {\n text: content,\n message,\n },\n ],\n llmOutput: {},\n };\n }\n}\n\nexport class SearchAPI extends StructuredTool {\n name = \"search_api\";\n\n description = \"A simple API that returns the input string.\";\n\n schema = z.object({\n query: z.string().describe(\"The query to search for.\"),\n });\n\n async _call(input: z.infer<typeof this.schema>) {\n if (input?.query === \"error\") {\n throw new Error(\"Error\");\n }\n return `result for ${input?.query}`;\n }\n}\n"],"mappings":";;;;;;;;;;;AAiWA,IAAa,uBAAb,MAAa,6BAA6BA,4CAAAA,cAAc;CACtD;CAEA;CAGA;CAEA;CAEA,QAAkC,EAAE;CAEpC,YAAY,EACV,YAAY,EAAE,EACd,YAAY,UACZ,QAAQ,GACR,oBACA,UACA,GAAG,SACgE,EAAE,EAAE;AACvE,QAAM,KAAK;AACX,OAAK,YAAY;AACjB,OAAK,YAAY;AAEjB,OAAK,WAAW,YAAY,EAAE,SAAS,OAAO;AAC9C,OAAK,qBAAqB;;CAI5B,IAAI,QAAgB;AAClB,SAAO,KAAK,SAAS;;CAGvB,IAAI,MAAM,OAAe;AACvB,OAAK,SAAS,UAAU;;CAG1B,WAAmB;AACjB,SAAO;;CAGT,oBAAoB;AAClB,SAAO,EAAE;;CAGX,UACE,OAOI;EACJ,MAAM,cAAc,IAAI,qBAAqB;GAC3C,WAAW,KAAK;GAChB,WAAW,KAAK;GAChB,oBAAoB,KAAK;GAEzB,UAAU,KAAK;GAChB,CAAC;AACF,cAAY,QAAQ,CAAC,GAAG,KAAK,OAAO,GAAG,MAAM;AAC7C,SAAO;;CAGT,qBAAqB,SAAc;AACjC,SAAO,IAAIC,0BAAAA,eAAe,EACxB,MAAM,YAAY;AAChB,UAAO,KAAK;KAEf,CAAC;;CAGJ,MAAM,UACJ,UACA,UACA,aACqB;EAErB,IAAI,UADgB,SAAS,SAAS,SAAS,GACrB;AAG1B,MAAI,SAAS,SAAS,EAEpB,WADc,SAAS,KAAK,MAAM,EAAE,QAAQ,CAAC,OAAO,QAAQ,CAEzD,KAAK,SAAS;AACb,OAAI,OAAO,SAAS,SAClB,QAAO;YACE,OAAO,SAAS,YAAY,UAAU,KAC/C,QAAO,KAAK;YACH,MAAM,QAAQ,KAAK,CAC5B,QAAO,KACJ,KAAK,MAAM;AACV,QAAI,OAAO,MAAM,SACf,QAAO;aACE,OAAO,MAAM,YAAY,UAAU,EAC5C,QAAO,EAAE;AAEX,WAAO;KACP,CACD,KAAK,IAAI;OAEZ,QAAO,KAAK,UAAU,KAAK;IAE7B,CACD,KAAK,IAAI;AAQd,OAFE,SAAS,WAAW,KACnB,SAAS,WAAW,KAAK,SAAS,MAAMC,yBAAAA,aAAa,WAAW,KACtC,KAAK,UAAU,EAC1C,MAAK,QAAQ;EAGf,MAAM,mBAAmB,KAAK,UAAU,KAAK,UAAU,EAAE;EACzD,MAAM,YAAY,KAAK,MAAM,UAAU;AAGvC,OAAK,SAAS,KAAK,QAAQ,KAAK,KAAK,IAAI,GAAG,KAAK,UAAU,OAAO;EAElE,MAAM,UAAU,IAAIC,yBAAAA,UAAU;GAC5B;GACA,IAAI;GACJ,YACE,iBAAiB,SAAS,IACtB,iBAAiB,KAAK,QAAQ;IAC5B,GAAG;IACH,MAAM;IACP,EAAE,GACH,KAAA;GACP,CAAC;AAEF,SAAO;GACL,aAAa,CACX;IACE,MAAM;IACN;IACD,CACF;GACD,WAAW,EAAE;GACd"}
|
|
@@ -5,6 +5,7 @@ import { Runnable, RunnableBinding, RunnableConfig, RunnableLambda } from "@lang
|
|
|
5
5
|
import { StructuredTool } from "@langchain/core/tools";
|
|
6
6
|
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
|
|
7
7
|
import { ChatResult } from "@langchain/core/outputs";
|
|
8
|
+
import { ModelProfile } from "@langchain/core/language_models/profile";
|
|
8
9
|
import { BaseCheckpointSaver } from "@langchain/langgraph-checkpoint";
|
|
9
10
|
import { z } from "zod/v3";
|
|
10
11
|
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"utils.d.cts","names":[],"sources":["../../../src/agents/tests/utils.ts"],"mappings":"
|
|
1
|
+
{"version":3,"file":"utils.d.cts","names":[],"sources":["../../../src/agents/tests/utils.ts"],"mappings":";;;;;;;;;;;;UA2UU,UAAA;EACR,IAAA;EACA,IAAA,EAAM,MAAA;EACN,EAAA;EACA,IAAA;AAAA;AAAA,UAGQ,0BAAA;EACR,SAAA,GAAY,UAAA;EACZ,SAAA;EACA,KAAA;EACA,kBAAA;AAAA;;;;cAWW,oBAAA,SAA6B,aAAA;EACxC,SAAA,EAAW,UAAA;EAEX,SAAA;EAAA,QAGQ,QAAA;EAER,kBAAA;EAAA,QAEQ,KAAA;EAER,WAAA,CAAA;IACE,SAAA;IACA,SAAA;IACA,KAAA;IACA,kBAAA;IACA,QAAA;IAAA,GACG;EAAA,IACF,0BAAA;IAA+B,QAAA;MAAa,OAAA;IAAA;EAAA;EAAA,IAU3C,KAAA,CAAA;EAAA,IAIA,KAAA,CAAM,KAAA;EAIV,QAAA,CAAA;EAIA,iBAAA,CAAA;EAIA,SAAA,CACE,KAAA,EAAO,cAAA,KAEL,oBAAA,GACA,eAAA;IAGU,WAAA,GAAc,UAAA;EAAA;EAa5B,oBAAA,CAAqB,OAAA,QAAY,cAAA,eAAA,cAAA,CAAA,MAAA;EAQ3B,SAAA,CACJ,QAAA,EAAU,WAAA,IACV,QAAA,8BACA,WAAA,GAAc,wBAAA,GACb,OAAA,CAAQ,UAAA;AAAA"}
|
|
@@ -7,6 +7,7 @@ import { z } from "zod/v3";
|
|
|
7
7
|
import { LanguageModelLike } from "@langchain/core/language_models/base";
|
|
8
8
|
import { ChatResult } from "@langchain/core/outputs";
|
|
9
9
|
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
|
|
10
|
+
import { ModelProfile } from "@langchain/core/language_models/profile";
|
|
10
11
|
|
|
11
12
|
//#region src/agents/tests/utils.d.ts
|
|
12
13
|
interface ToolCall$1 {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"utils.d.ts","names":[],"sources":["../../../src/agents/tests/utils.ts"],"mappings":"
|
|
1
|
+
{"version":3,"file":"utils.d.ts","names":[],"sources":["../../../src/agents/tests/utils.ts"],"mappings":";;;;;;;;;;;;UA2UU,UAAA;EACR,IAAA;EACA,IAAA,EAAM,MAAA;EACN,EAAA;EACA,IAAA;AAAA;AAAA,UAGQ,0BAAA;EACR,SAAA,GAAY,UAAA;EACZ,SAAA;EACA,KAAA;EACA,kBAAA;AAAA;;;;cAWW,oBAAA,SAA6B,aAAA;EACxC,SAAA,EAAW,UAAA;EAEX,SAAA;EAAA,QAGQ,QAAA;EAER,kBAAA;EAAA,QAEQ,KAAA;EAER,WAAA,CAAA;IACE,SAAA;IACA,SAAA;IACA,KAAA;IACA,kBAAA;IACA,QAAA;IAAA,GACG;EAAA,IACF,0BAAA;IAA+B,QAAA;MAAa,OAAA;IAAA;EAAA;EAAA,IAU3C,KAAA,CAAA;EAAA,IAIA,KAAA,CAAM,KAAA;EAIV,QAAA,CAAA;EAIA,iBAAA,CAAA;EAIA,SAAA,CACE,KAAA,EAAO,cAAA,KAEL,oBAAA,GACA,eAAA;IAGU,WAAA,GAAc,UAAA;EAAA;EAa5B,oBAAA,CAAqB,OAAA,QAAY,cAAA,eAAA,cAAA,CAAA,MAAA;EAQ3B,SAAA,CACJ,QAAA,EAAU,WAAA,IACV,QAAA,8BACA,WAAA,GAAc,wBAAA,GACb,OAAA,CAAQ,UAAA;AAAA"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"utils.js","names":[],"sources":["../../../src/agents/tests/utils.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/no-explicit-any */\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BaseChatModelCallOptions,\n BindToolsInput,\n ToolChoice,\n} from \"@langchain/core/language_models/chat_models\";\nimport { StructuredTool } from \"@langchain/core/tools\";\nimport {\n BaseMessage,\n AIMessage,\n HumanMessage,\n BaseMessageFields,\n AIMessageFields,\n ToolMessage,\n ToolMessageFields,\n} from \"@langchain/core/messages\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport {\n Runnable,\n RunnableConfig,\n RunnableLambda,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\nimport {\n MemorySaver,\n Checkpoint,\n CheckpointMetadata,\n type BaseCheckpointSaver,\n} from \"@langchain/langgraph-checkpoint\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { z } from \"zod/v3\";\n\n/**\n * Custom asymmetric matcher that matches any string value.\n * Works with both Jest and Vitest's toEqual() assertions.\n */\nclass AnyString {\n asymmetricMatch(other: unknown): boolean {\n return typeof other === \"string\";\n }\n\n toString(): string {\n return \"Any<String>\";\n }\n\n toAsymmetricMatcher(): string {\n return \"Any<String>\";\n }\n}\n\nexport class _AnyIdAIMessage extends AIMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"AIMessage\"];\n }\n\n constructor(fields: AIMessageFields | string) {\n let fieldsWithJestMatcher: Partial<AIMessageFields> = {\n id: new AnyString() as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as AIMessageFields);\n }\n}\n\nexport class _AnyIdHumanMessage extends HumanMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"HumanMessage\"];\n }\n\n constructor(fields: BaseMessageFields | string) {\n let fieldsWithJestMatcher: Partial<BaseMessageFields> = {\n id: new AnyString() as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as BaseMessageFields);\n }\n}\n\nexport class _AnyIdToolMessage extends ToolMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"ToolMessage\"];\n }\n\n constructor(fields: ToolMessageFields) {\n const fieldsWithJestMatcher: Partial<ToolMessageFields> = {\n id: new AnyString() as unknown as string,\n ...fields,\n };\n super(fieldsWithJestMatcher as ToolMessageFields);\n }\n}\n\nexport class FakeConfigurableModel extends BaseChatModel {\n _queuedMethodOperations: Record<string, any> = {};\n\n _chatModel: LanguageModelLike;\n\n constructor(\n fields: {\n model: LanguageModelLike;\n } & BaseChatModelParams\n ) {\n super(fields);\n this._chatModel = fields.model;\n }\n\n _llmType() {\n return \"fake_configurable\";\n }\n\n async _generate(\n _messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n throw new Error(\"Not implemented\");\n }\n\n async _model() {\n return this._chatModel;\n }\n\n bindTools(tools: BindToolsInput[]) {\n const modelWithTools = new FakeConfigurableModel({\n model: (this._chatModel as FakeToolCallingChatModel).bindTools(tools),\n });\n modelWithTools._queuedMethodOperations.bindTools = tools;\n return modelWithTools;\n }\n}\n\nexport class FakeToolCallingChatModel extends BaseChatModel {\n sleep?: number = 50;\n\n responses?: BaseMessage[];\n\n thrownErrorString?: string;\n\n idx: number;\n\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\" = \"openai\";\n\n structuredResponse?: Record<string, unknown>;\n\n // Track messages passed to structured output calls\n structuredOutputMessages: BaseMessage[][] = [];\n\n constructor(\n fields: {\n sleep?: number;\n responses?: BaseMessage[];\n thrownErrorString?: string;\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n structuredResponse?: Record<string, unknown>;\n } & BaseChatModelParams\n ) {\n super(fields);\n this.sleep = fields.sleep ?? this.sleep;\n this.responses = fields.responses;\n this.thrownErrorString = fields.thrownErrorString;\n this.idx = 0;\n this.toolStyle = fields.toolStyle ?? this.toolStyle;\n this.structuredResponse = fields.structuredResponse;\n this.structuredOutputMessages = [];\n }\n\n _llmType() {\n return \"fake\";\n }\n\n async _generate(\n messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n if (this.sleep !== undefined) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n const responses = this.responses?.length ? this.responses : messages;\n const msg = responses[this.idx % responses.length];\n const generation: ChatResult = {\n generations: [\n {\n text: \"\",\n message: msg,\n },\n ],\n };\n this.idx += 1;\n\n if (typeof msg.content === \"string\") {\n await runManager?.handleLLMNewToken(msg.content);\n }\n return generation;\n }\n\n bindTools(tools: BindToolsInput[]): Runnable<any> {\n const toolDicts = [];\n const serverTools = [];\n for (const tool of tools) {\n if (!(\"name\" in tool)) {\n serverTools.push(tool);\n continue;\n }\n\n // NOTE: this is a simplified tool spec for testing purposes only\n if (this.toolStyle === \"openai\") {\n toolDicts.push({\n type: \"function\",\n function: {\n name: tool.name,\n },\n });\n } else if ([\"anthropic\", \"google\"].includes(this.toolStyle)) {\n toolDicts.push({\n name: tool.name,\n });\n } else if (this.toolStyle === \"bedrock\") {\n toolDicts.push({\n toolSpec: {\n name: tool.name,\n },\n });\n }\n }\n let toolsToBind: BindToolsInput[] = toolDicts;\n if (this.toolStyle === \"google\") {\n toolsToBind = [{ functionDeclarations: toolDicts }];\n }\n return this.withConfig({\n tools: [...toolsToBind, ...serverTools],\n } as BaseChatModelCallOptions);\n }\n\n withStructuredOutput<\n RunOutput extends Record<string, any> = Record<string, any>,\n >(_: unknown): Runnable<any> {\n if (!this.structuredResponse) {\n throw new Error(\"No structured response provided\");\n }\n // Create a runnable that returns the proper structured format\n return RunnableLambda.from(async (messages: BaseMessage[]) => {\n if (this.sleep) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n\n // Store the messages that were sent to generate structured output\n this.structuredOutputMessages.push([...messages]);\n\n // Return in the format expected: { raw: BaseMessage, parsed: RunOutput }\n return this.structuredResponse as RunOutput;\n });\n }\n}\n\nexport class MemorySaverAssertImmutable extends MemorySaver {\n storageForCopies: Record<string, Record<string, Uint8Array>> = {};\n\n constructor() {\n super();\n this.storageForCopies = {};\n }\n\n async put(\n config: RunnableConfig,\n checkpoint: Checkpoint,\n metadata: CheckpointMetadata\n ): Promise<RunnableConfig> {\n const thread_id = config.configurable?.thread_id;\n this.storageForCopies[thread_id] ??= {};\n\n // assert checkpoint hasn't been modified since last written\n const saved = await this.get(config);\n if (saved) {\n const savedId = saved.id;\n if (this.storageForCopies[thread_id][savedId]) {\n const [, serializedSaved] = await this.serde.dumpsTyped(saved);\n const serializedCopy = this.storageForCopies[thread_id][savedId];\n\n // Compare Uint8Array contents by converting to string\n const savedStr = new TextDecoder().decode(serializedSaved);\n const copyStr = new TextDecoder().decode(serializedCopy);\n if (savedStr !== copyStr) {\n throw new Error(\n `Checkpoint [${savedId}] has been modified since last written`\n );\n }\n }\n }\n const [, serializedCheckpoint] = await this.serde.dumpsTyped(checkpoint);\n // save a copy of the checkpoint\n this.storageForCopies[thread_id][checkpoint.id] = serializedCheckpoint;\n\n return super.put(config, checkpoint, metadata);\n }\n}\n\ninterface ToolCall {\n name: string;\n args: Record<string, any>;\n id: string;\n type?: \"tool_call\";\n}\n\ninterface FakeToolCallingModelFields {\n toolCalls?: ToolCall[][];\n toolStyle?: \"openai\" | \"anthropic\";\n index?: number;\n structuredResponse?: any;\n}\n\n// Helper function to create checkpointer\nexport function createCheckpointer(): BaseCheckpointSaver {\n return new MemorySaver();\n}\n\n/**\n * Fake chat model for testing tool calling functionality\n */\nexport class FakeToolCallingModel extends BaseChatModel {\n toolCalls: ToolCall[][];\n\n toolStyle: \"openai\" | \"anthropic\";\n\n // Use a shared reference object so the index persists across bindTools calls\n private indexRef: { current: number };\n\n structuredResponse?: any;\n\n private tools: StructuredTool[] = [];\n\n constructor({\n toolCalls = [],\n toolStyle = \"openai\",\n index = 0,\n structuredResponse,\n indexRef,\n ...rest\n }: FakeToolCallingModelFields & { indexRef?: { current: number } } = {}) {\n super(rest);\n this.toolCalls = toolCalls;\n this.toolStyle = toolStyle;\n // Share the same index reference across instances\n this.indexRef = indexRef ?? { current: index };\n this.structuredResponse = structuredResponse;\n }\n\n // Getter/setter for backwards compatibility\n get index(): number {\n return this.indexRef.current;\n }\n\n set index(value: number) {\n this.indexRef.current = value;\n }\n\n _llmType(): string {\n return \"fake-tool-calling\";\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n bindTools(\n tools: StructuredTool[]\n ):\n | FakeToolCallingModel\n | RunnableBinding<\n any,\n any,\n any & { tool_choice?: ToolChoice | undefined }\n > {\n const newInstance = new FakeToolCallingModel({\n toolCalls: this.toolCalls,\n toolStyle: this.toolStyle,\n structuredResponse: this.structuredResponse,\n // Pass the same indexRef so all instances share the same index state\n indexRef: this.indexRef,\n });\n newInstance.tools = [...this.tools, ...tools];\n return newInstance;\n }\n\n withStructuredOutput(_schema: any) {\n return new RunnableLambda({\n func: async () => {\n return this.structuredResponse;\n },\n });\n }\n\n async _generate(\n messages: BaseMessage[],\n _options?: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const lastMessage = messages[messages.length - 1];\n let content = lastMessage.content as string;\n\n // Handle prompt concatenation\n if (messages.length > 1) {\n const parts = messages.map((m) => m.content).filter(Boolean);\n content = parts\n .map((part) => {\n if (typeof part === \"string\") {\n return part;\n } else if (typeof part === \"object\" && \"text\" in part) {\n return part.text;\n } else if (Array.isArray(part)) {\n return part\n .map((p) => {\n if (typeof p === \"string\") {\n return p;\n } else if (typeof p === \"object\" && \"text\" in p) {\n return p.text;\n }\n return \"\";\n })\n .join(\"-\");\n } else {\n return JSON.stringify(part);\n }\n })\n .join(\"-\");\n }\n\n // Reset index at the start of a new conversation (only human message)\n // This allows the model to be reused across multiple agent.invoke() calls\n const isStartOfConversation =\n messages.length === 1 ||\n (messages.length === 2 && messages.every(HumanMessage.isInstance));\n if (isStartOfConversation && this.index !== 0) {\n this.index = 0;\n }\n\n const currentToolCalls = this.toolCalls[this.index] || [];\n const messageId = this.index.toString();\n\n // Move to next set of tool calls for subsequent invocations\n this.index = (this.index + 1) % Math.max(1, this.toolCalls.length);\n\n const message = new AIMessage({\n content,\n id: messageId,\n tool_calls:\n currentToolCalls.length > 0\n ? currentToolCalls.map((tc) => ({\n ...tc,\n type: \"tool_call\" as const,\n }))\n : undefined,\n });\n\n return {\n generations: [\n {\n text: content,\n message,\n },\n ],\n llmOutput: {},\n };\n }\n}\n\nexport class SearchAPI extends StructuredTool {\n name = \"search_api\";\n\n description = \"A simple API that returns the input string.\";\n\n schema = z.object({\n query: z.string().describe(\"The query to search for.\"),\n });\n\n async _call(input: z.infer<typeof this.schema>) {\n if (input?.query === \"error\") {\n throw new Error(\"Error\");\n }\n return `result for ${input?.query}`;\n }\n}\n"],"mappings":";;;;;;;;;;AAyVA,IAAa,uBAAb,MAAa,6BAA6B,cAAc;CACtD;CAEA;CAGA;CAEA;CAEA,QAAkC,EAAE;CAEpC,YAAY,EACV,YAAY,EAAE,EACd,YAAY,UACZ,QAAQ,GACR,oBACA,UACA,GAAG,SACgE,EAAE,EAAE;AACvE,QAAM,KAAK;AACX,OAAK,YAAY;AACjB,OAAK,YAAY;AAEjB,OAAK,WAAW,YAAY,EAAE,SAAS,OAAO;AAC9C,OAAK,qBAAqB;;CAI5B,IAAI,QAAgB;AAClB,SAAO,KAAK,SAAS;;CAGvB,IAAI,MAAM,OAAe;AACvB,OAAK,SAAS,UAAU;;CAG1B,WAAmB;AACjB,SAAO;;CAGT,oBAAoB;AAClB,SAAO,EAAE;;CAGX,UACE,OAOI;EACJ,MAAM,cAAc,IAAI,qBAAqB;GAC3C,WAAW,KAAK;GAChB,WAAW,KAAK;GAChB,oBAAoB,KAAK;GAEzB,UAAU,KAAK;GAChB,CAAC;AACF,cAAY,QAAQ,CAAC,GAAG,KAAK,OAAO,GAAG,MAAM;AAC7C,SAAO;;CAGT,qBAAqB,SAAc;AACjC,SAAO,IAAI,eAAe,EACxB,MAAM,YAAY;AAChB,UAAO,KAAK;KAEf,CAAC;;CAGJ,MAAM,UACJ,UACA,UACA,aACqB;EAErB,IAAI,UADgB,SAAS,SAAS,SAAS,GACrB;AAG1B,MAAI,SAAS,SAAS,EAEpB,WADc,SAAS,KAAK,MAAM,EAAE,QAAQ,CAAC,OAAO,QAAQ,CAEzD,KAAK,SAAS;AACb,OAAI,OAAO,SAAS,SAClB,QAAO;YACE,OAAO,SAAS,YAAY,UAAU,KAC/C,QAAO,KAAK;YACH,MAAM,QAAQ,KAAK,CAC5B,QAAO,KACJ,KAAK,MAAM;AACV,QAAI,OAAO,MAAM,SACf,QAAO;aACE,OAAO,MAAM,YAAY,UAAU,EAC5C,QAAO,EAAE;AAEX,WAAO;KACP,CACD,KAAK,IAAI;OAEZ,QAAO,KAAK,UAAU,KAAK;IAE7B,CACD,KAAK,IAAI;AAQd,OAFE,SAAS,WAAW,KACnB,SAAS,WAAW,KAAK,SAAS,MAAM,aAAa,WAAW,KACtC,KAAK,UAAU,EAC1C,MAAK,QAAQ;EAGf,MAAM,mBAAmB,KAAK,UAAU,KAAK,UAAU,EAAE;EACzD,MAAM,YAAY,KAAK,MAAM,UAAU;AAGvC,OAAK,SAAS,KAAK,QAAQ,KAAK,KAAK,IAAI,GAAG,KAAK,UAAU,OAAO;EAElE,MAAM,UAAU,IAAI,UAAU;GAC5B;GACA,IAAI;GACJ,YACE,iBAAiB,SAAS,IACtB,iBAAiB,KAAK,QAAQ;IAC5B,GAAG;IACH,MAAM;IACP,EAAE,GACH,KAAA;GACP,CAAC;AAEF,SAAO;GACL,aAAa,CACX;IACE,MAAM;IACN;IACD,CACF;GACD,WAAW,EAAE;GACd"}
|
|
1
|
+
{"version":3,"file":"utils.js","names":[],"sources":["../../../src/agents/tests/utils.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/no-explicit-any */\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BaseChatModelCallOptions,\n BindToolsInput,\n ToolChoice,\n} from \"@langchain/core/language_models/chat_models\";\nimport type { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { StructuredTool } from \"@langchain/core/tools\";\nimport {\n BaseMessage,\n AIMessage,\n HumanMessage,\n BaseMessageFields,\n AIMessageFields,\n ToolMessage,\n ToolMessageFields,\n} from \"@langchain/core/messages\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport {\n Runnable,\n RunnableConfig,\n RunnableLambda,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\nimport {\n MemorySaver,\n Checkpoint,\n CheckpointMetadata,\n type BaseCheckpointSaver,\n} from \"@langchain/langgraph-checkpoint\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport { z } from \"zod/v3\";\n\n/**\n * Custom asymmetric matcher that matches any string value.\n * Works with both Jest and Vitest's toEqual() assertions.\n */\nclass AnyString {\n asymmetricMatch(other: unknown): boolean {\n return typeof other === \"string\";\n }\n\n toString(): string {\n return \"Any<String>\";\n }\n\n toAsymmetricMatcher(): string {\n return \"Any<String>\";\n }\n}\n\nexport class _AnyIdAIMessage extends AIMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"AIMessage\"];\n }\n\n constructor(fields: AIMessageFields | string) {\n let fieldsWithJestMatcher: Partial<AIMessageFields> = {\n id: new AnyString() as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as AIMessageFields);\n }\n}\n\nexport class _AnyIdHumanMessage extends HumanMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"HumanMessage\"];\n }\n\n constructor(fields: BaseMessageFields | string) {\n let fieldsWithJestMatcher: Partial<BaseMessageFields> = {\n id: new AnyString() as unknown as string,\n };\n if (typeof fields === \"string\") {\n fieldsWithJestMatcher = {\n content: fields,\n ...fieldsWithJestMatcher,\n };\n } else {\n fieldsWithJestMatcher = {\n ...fields,\n ...fieldsWithJestMatcher,\n };\n }\n super(fieldsWithJestMatcher as BaseMessageFields);\n }\n}\n\nexport class _AnyIdToolMessage extends ToolMessage {\n get lc_id() {\n return [\"langchain_core\", \"messages\", \"ToolMessage\"];\n }\n\n constructor(fields: ToolMessageFields) {\n const fieldsWithJestMatcher: Partial<ToolMessageFields> = {\n id: new AnyString() as unknown as string,\n ...fields,\n };\n super(fieldsWithJestMatcher as ToolMessageFields);\n }\n}\n\nexport class FakeConfigurableModel extends BaseChatModel {\n _queuedMethodOperations: Record<string, any> = {};\n\n _chatModel: LanguageModelLike;\n\n constructor(\n fields: {\n model: LanguageModelLike;\n } & BaseChatModelParams\n ) {\n super(fields);\n this._chatModel = fields.model;\n }\n\n _llmType() {\n return \"fake_configurable\";\n }\n\n async _generate(\n _messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n throw new Error(\"Not implemented\");\n }\n\n async _model() {\n return this._chatModel;\n }\n\n bindTools(tools: BindToolsInput[]) {\n const modelWithTools = new FakeConfigurableModel({\n model: (this._chatModel as FakeToolCallingChatModel).bindTools(tools),\n });\n modelWithTools._queuedMethodOperations.bindTools = tools;\n return modelWithTools;\n }\n}\n\nexport class FakeToolCallingChatModel extends BaseChatModel {\n sleep?: number = 50;\n\n responses?: BaseMessage[];\n\n thrownErrorString?: string;\n\n idx: number;\n\n toolStyle: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\" = \"openai\";\n\n structuredResponse?: Record<string, unknown>;\n\n // Track messages passed to structured output calls\n structuredOutputMessages: BaseMessage[][] = [];\n\n constructor(\n fields: {\n sleep?: number;\n responses?: BaseMessage[];\n thrownErrorString?: string;\n toolStyle?: \"openai\" | \"anthropic\" | \"bedrock\" | \"google\";\n structuredResponse?: Record<string, unknown>;\n } & BaseChatModelParams\n ) {\n super(fields);\n this.sleep = fields.sleep ?? this.sleep;\n this.responses = fields.responses;\n this.thrownErrorString = fields.thrownErrorString;\n this.idx = 0;\n this.toolStyle = fields.toolStyle ?? this.toolStyle;\n this.structuredResponse = fields.structuredResponse;\n this.structuredOutputMessages = [];\n }\n\n _llmType() {\n return \"fake\";\n }\n\n get profile(): ModelProfile {\n return {\n toolCalling: true,\n structuredOutput: true,\n };\n }\n\n async _generate(\n messages: BaseMessage[],\n _options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n if (this.thrownErrorString) {\n throw new Error(this.thrownErrorString);\n }\n if (this.sleep !== undefined) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n const responses = this.responses?.length ? this.responses : messages;\n const msg = responses[this.idx % responses.length];\n const generation: ChatResult = {\n generations: [\n {\n text: \"\",\n message: msg,\n },\n ],\n };\n this.idx += 1;\n\n if (typeof msg.content === \"string\") {\n await runManager?.handleLLMNewToken(msg.content);\n }\n return generation;\n }\n\n bindTools(tools: BindToolsInput[]): Runnable<any> {\n const toolDicts = [];\n const serverTools = [];\n for (const tool of tools) {\n if (!(\"name\" in tool)) {\n serverTools.push(tool);\n continue;\n }\n\n // NOTE: this is a simplified tool spec for testing purposes only\n if (this.toolStyle === \"openai\") {\n toolDicts.push({\n type: \"function\",\n function: {\n name: tool.name,\n },\n });\n } else if ([\"anthropic\", \"google\"].includes(this.toolStyle)) {\n toolDicts.push({\n name: tool.name,\n });\n } else if (this.toolStyle === \"bedrock\") {\n toolDicts.push({\n toolSpec: {\n name: tool.name,\n },\n });\n }\n }\n let toolsToBind: BindToolsInput[] = toolDicts;\n if (this.toolStyle === \"google\") {\n toolsToBind = [{ functionDeclarations: toolDicts }];\n }\n return this.withConfig({\n tools: [...toolsToBind, ...serverTools],\n } as BaseChatModelCallOptions);\n }\n\n withStructuredOutput<\n RunOutput extends Record<string, any> = Record<string, any>,\n >(_: unknown): Runnable<any> {\n if (!this.structuredResponse) {\n throw new Error(\"No structured response provided\");\n }\n // Create a runnable that returns the proper structured format\n return RunnableLambda.from(async (messages: BaseMessage[]) => {\n if (this.sleep) {\n await new Promise((resolve) => setTimeout(resolve, this.sleep));\n }\n\n // Store the messages that were sent to generate structured output\n this.structuredOutputMessages.push([...messages]);\n\n // Return in the format expected: { raw: BaseMessage, parsed: RunOutput }\n return this.structuredResponse as RunOutput;\n });\n }\n}\n\nexport class MemorySaverAssertImmutable extends MemorySaver {\n storageForCopies: Record<string, Record<string, Uint8Array>> = {};\n\n constructor() {\n super();\n this.storageForCopies = {};\n }\n\n async put(\n config: RunnableConfig,\n checkpoint: Checkpoint,\n metadata: CheckpointMetadata\n ): Promise<RunnableConfig> {\n const thread_id = config.configurable?.thread_id;\n this.storageForCopies[thread_id] ??= {};\n\n // assert checkpoint hasn't been modified since last written\n const saved = await this.get(config);\n if (saved) {\n const savedId = saved.id;\n if (this.storageForCopies[thread_id][savedId]) {\n const [, serializedSaved] = await this.serde.dumpsTyped(saved);\n const serializedCopy = this.storageForCopies[thread_id][savedId];\n\n // Compare Uint8Array contents by converting to string\n const savedStr = new TextDecoder().decode(serializedSaved);\n const copyStr = new TextDecoder().decode(serializedCopy);\n if (savedStr !== copyStr) {\n throw new Error(\n `Checkpoint [${savedId}] has been modified since last written`\n );\n }\n }\n }\n const [, serializedCheckpoint] = await this.serde.dumpsTyped(checkpoint);\n // save a copy of the checkpoint\n this.storageForCopies[thread_id][checkpoint.id] = serializedCheckpoint;\n\n return super.put(config, checkpoint, metadata);\n }\n}\n\ninterface ToolCall {\n name: string;\n args: Record<string, any>;\n id: string;\n type?: \"tool_call\";\n}\n\ninterface FakeToolCallingModelFields {\n toolCalls?: ToolCall[][];\n toolStyle?: \"openai\" | \"anthropic\";\n index?: number;\n structuredResponse?: any;\n}\n\n// Helper function to create checkpointer\nexport function createCheckpointer(): BaseCheckpointSaver {\n return new MemorySaver();\n}\n\n/**\n * Fake chat model for testing tool calling functionality\n */\nexport class FakeToolCallingModel extends BaseChatModel {\n toolCalls: ToolCall[][];\n\n toolStyle: \"openai\" | \"anthropic\";\n\n // Use a shared reference object so the index persists across bindTools calls\n private indexRef: { current: number };\n\n structuredResponse?: any;\n\n private tools: StructuredTool[] = [];\n\n constructor({\n toolCalls = [],\n toolStyle = \"openai\",\n index = 0,\n structuredResponse,\n indexRef,\n ...rest\n }: FakeToolCallingModelFields & { indexRef?: { current: number } } = {}) {\n super(rest);\n this.toolCalls = toolCalls;\n this.toolStyle = toolStyle;\n // Share the same index reference across instances\n this.indexRef = indexRef ?? { current: index };\n this.structuredResponse = structuredResponse;\n }\n\n // Getter/setter for backwards compatibility\n get index(): number {\n return this.indexRef.current;\n }\n\n set index(value: number) {\n this.indexRef.current = value;\n }\n\n _llmType(): string {\n return \"fake-tool-calling\";\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n bindTools(\n tools: StructuredTool[]\n ):\n | FakeToolCallingModel\n | RunnableBinding<\n any,\n any,\n any & { tool_choice?: ToolChoice | undefined }\n > {\n const newInstance = new FakeToolCallingModel({\n toolCalls: this.toolCalls,\n toolStyle: this.toolStyle,\n structuredResponse: this.structuredResponse,\n // Pass the same indexRef so all instances share the same index state\n indexRef: this.indexRef,\n });\n newInstance.tools = [...this.tools, ...tools];\n return newInstance;\n }\n\n withStructuredOutput(_schema: any) {\n return new RunnableLambda({\n func: async () => {\n return this.structuredResponse;\n },\n });\n }\n\n async _generate(\n messages: BaseMessage[],\n _options?: this[\"ParsedCallOptions\"],\n _runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const lastMessage = messages[messages.length - 1];\n let content = lastMessage.content as string;\n\n // Handle prompt concatenation\n if (messages.length > 1) {\n const parts = messages.map((m) => m.content).filter(Boolean);\n content = parts\n .map((part) => {\n if (typeof part === \"string\") {\n return part;\n } else if (typeof part === \"object\" && \"text\" in part) {\n return part.text;\n } else if (Array.isArray(part)) {\n return part\n .map((p) => {\n if (typeof p === \"string\") {\n return p;\n } else if (typeof p === \"object\" && \"text\" in p) {\n return p.text;\n }\n return \"\";\n })\n .join(\"-\");\n } else {\n return JSON.stringify(part);\n }\n })\n .join(\"-\");\n }\n\n // Reset index at the start of a new conversation (only human message)\n // This allows the model to be reused across multiple agent.invoke() calls\n const isStartOfConversation =\n messages.length === 1 ||\n (messages.length === 2 && messages.every(HumanMessage.isInstance));\n if (isStartOfConversation && this.index !== 0) {\n this.index = 0;\n }\n\n const currentToolCalls = this.toolCalls[this.index] || [];\n const messageId = this.index.toString();\n\n // Move to next set of tool calls for subsequent invocations\n this.index = (this.index + 1) % Math.max(1, this.toolCalls.length);\n\n const message = new AIMessage({\n content,\n id: messageId,\n tool_calls:\n currentToolCalls.length > 0\n ? currentToolCalls.map((tc) => ({\n ...tc,\n type: \"tool_call\" as const,\n }))\n : undefined,\n });\n\n return {\n generations: [\n {\n text: content,\n message,\n },\n ],\n llmOutput: {},\n };\n }\n}\n\nexport class SearchAPI extends StructuredTool {\n name = \"search_api\";\n\n description = \"A simple API that returns the input string.\";\n\n schema = z.object({\n query: z.string().describe(\"The query to search for.\"),\n });\n\n async _call(input: z.infer<typeof this.schema>) {\n if (input?.query === \"error\") {\n throw new Error(\"Error\");\n }\n return `result for ${input?.query}`;\n }\n}\n"],"mappings":";;;;;;;;;;AAiWA,IAAa,uBAAb,MAAa,6BAA6B,cAAc;CACtD;CAEA;CAGA;CAEA;CAEA,QAAkC,EAAE;CAEpC,YAAY,EACV,YAAY,EAAE,EACd,YAAY,UACZ,QAAQ,GACR,oBACA,UACA,GAAG,SACgE,EAAE,EAAE;AACvE,QAAM,KAAK;AACX,OAAK,YAAY;AACjB,OAAK,YAAY;AAEjB,OAAK,WAAW,YAAY,EAAE,SAAS,OAAO;AAC9C,OAAK,qBAAqB;;CAI5B,IAAI,QAAgB;AAClB,SAAO,KAAK,SAAS;;CAGvB,IAAI,MAAM,OAAe;AACvB,OAAK,SAAS,UAAU;;CAG1B,WAAmB;AACjB,SAAO;;CAGT,oBAAoB;AAClB,SAAO,EAAE;;CAGX,UACE,OAOI;EACJ,MAAM,cAAc,IAAI,qBAAqB;GAC3C,WAAW,KAAK;GAChB,WAAW,KAAK;GAChB,oBAAoB,KAAK;GAEzB,UAAU,KAAK;GAChB,CAAC;AACF,cAAY,QAAQ,CAAC,GAAG,KAAK,OAAO,GAAG,MAAM;AAC7C,SAAO;;CAGT,qBAAqB,SAAc;AACjC,SAAO,IAAI,eAAe,EACxB,MAAM,YAAY;AAChB,UAAO,KAAK;KAEf,CAAC;;CAGJ,MAAM,UACJ,UACA,UACA,aACqB;EAErB,IAAI,UADgB,SAAS,SAAS,SAAS,GACrB;AAG1B,MAAI,SAAS,SAAS,EAEpB,WADc,SAAS,KAAK,MAAM,EAAE,QAAQ,CAAC,OAAO,QAAQ,CAEzD,KAAK,SAAS;AACb,OAAI,OAAO,SAAS,SAClB,QAAO;YACE,OAAO,SAAS,YAAY,UAAU,KAC/C,QAAO,KAAK;YACH,MAAM,QAAQ,KAAK,CAC5B,QAAO,KACJ,KAAK,MAAM;AACV,QAAI,OAAO,MAAM,SACf,QAAO;aACE,OAAO,MAAM,YAAY,UAAU,EAC5C,QAAO,EAAE;AAEX,WAAO;KACP,CACD,KAAK,IAAI;OAEZ,QAAO,KAAK,UAAU,KAAK;IAE7B,CACD,KAAK,IAAI;AAQd,OAFE,SAAS,WAAW,KACnB,SAAS,WAAW,KAAK,SAAS,MAAM,aAAa,WAAW,KACtC,KAAK,UAAU,EAC1C,MAAK,QAAQ;EAGf,MAAM,mBAAmB,KAAK,UAAU,KAAK,UAAU,EAAE;EACzD,MAAM,YAAY,KAAK,MAAM,UAAU;AAGvC,OAAK,SAAS,KAAK,QAAQ,KAAK,KAAK,IAAI,GAAG,KAAK,UAAU,OAAO;EAElE,MAAM,UAAU,IAAI,UAAU;GAC5B;GACA,IAAI;GACJ,YACE,iBAAiB,SAAS,IACtB,iBAAiB,KAAK,QAAQ;IAC5B,GAAG;IACH,MAAM;IACP,EAAE,GACH,KAAA;GACP,CAAC;AAEF,SAAO;GACL,aAAa,CACX;IACE,MAAM;IACN;IACD,CACF;GACD,WAAW,EAAE;GACd"}
|
|
@@ -188,6 +188,10 @@ var ConfigurableModel = class ConfigurableModel extends _langchain_core_language
|
|
|
188
188
|
else this._configPrefix = "";
|
|
189
189
|
this._queuedMethodOperations = fields.queuedMethodOperations ?? this._queuedMethodOperations;
|
|
190
190
|
this._profile = fields.profile ?? void 0;
|
|
191
|
+
this.metadata = {
|
|
192
|
+
...this.metadata,
|
|
193
|
+
ls_integration: "langchain_init_chat_model"
|
|
194
|
+
};
|
|
191
195
|
}
|
|
192
196
|
async _getModelInstance(config) {
|
|
193
197
|
const cacheKey = this._getCacheKey(config);
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"universal.cjs","names":["BaseChatModel","RunnableBinding","AsyncGeneratorWithSetup","IterableReadableStream"],"sources":["../../src/chat_models/universal.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n ToolDefinition,\n} from \"@langchain/core/language_models/base\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n BaseMessage,\n type AIMessageChunk,\n MessageStructure,\n} from \"@langchain/core/messages\";\nimport {\n type RunnableBatchOptions,\n RunnableBinding,\n type RunnableConfig,\n type RunnableToolLike,\n ensureConfig,\n} from \"@langchain/core/runnables\";\nimport {\n AsyncGeneratorWithSetup,\n IterableReadableStream,\n} from \"@langchain/core/utils/stream\";\nimport {\n type LogStreamCallbackHandlerInput,\n type RunLogPatch,\n type StreamEvent,\n} from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\n\n// TODO: remove once `EventStreamCallbackHandlerInput` is exposed in core\ninterface EventStreamCallbackHandlerInput extends Omit<\n LogStreamCallbackHandlerInput,\n \"_schemaFormat\"\n> {}\n\nexport interface ConfigurableChatModelCallOptions extends BaseChatModelCallOptions {\n tools?: (\n | StructuredToolInterface\n | Record<string, unknown>\n | ToolDefinition\n | RunnableToolLike\n )[];\n}\n\n// Configuration map for model providers\nexport const MODEL_PROVIDER_CONFIG = {\n openai: {\n package: \"@langchain/openai\",\n className: \"ChatOpenAI\",\n },\n anthropic: {\n package: \"@langchain/anthropic\",\n className: \"ChatAnthropic\",\n },\n azure_openai: {\n package: \"@langchain/openai\",\n className: \"AzureChatOpenAI\",\n },\n cohere: {\n package: \"@langchain/cohere\",\n className: \"ChatCohere\",\n },\n \"google-vertexai\": {\n package: \"@langchain/google-vertexai\",\n className: \"ChatVertexAI\",\n },\n \"google-vertexai-web\": {\n package: \"@langchain/google-vertexai-web\",\n className: \"ChatVertexAI\",\n },\n \"google-genai\": {\n package: \"@langchain/google-genai\",\n className: \"ChatGoogleGenerativeAI\",\n },\n ollama: {\n package: \"@langchain/ollama\",\n className: \"ChatOllama\",\n },\n mistralai: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n mistral: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n groq: {\n package: \"@langchain/groq\",\n className: \"ChatGroq\",\n },\n cerebras: {\n package: \"@langchain/cerebras\",\n className: \"ChatCerebras\",\n },\n bedrock: {\n package: \"@langchain/aws\",\n className: \"ChatBedrockConverse\",\n },\n deepseek: {\n package: \"@langchain/deepseek\",\n className: \"ChatDeepSeek\",\n },\n xai: {\n package: \"@langchain/xai\",\n className: \"ChatXAI\",\n },\n fireworks: {\n package: \"@langchain/community/chat_models/fireworks\",\n className: \"ChatFireworks\",\n hasCircularDependency: true,\n },\n together: {\n package: \"@langchain/community/chat_models/togetherai\",\n className: \"ChatTogetherAI\",\n hasCircularDependency: true,\n },\n perplexity: {\n package: \"@langchain/community/chat_models/perplexity\",\n className: \"ChatPerplexity\",\n hasCircularDependency: true,\n },\n} as const;\n\nconst SUPPORTED_PROVIDERS = Object.keys(\n MODEL_PROVIDER_CONFIG\n) as (keyof typeof MODEL_PROVIDER_CONFIG)[];\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\ntype ModelProviderConfig = {\n package: string;\n className: string;\n hasCircularDependency?: boolean;\n};\n\n/**\n * Helper function to get a chat model class by its class name or model provider.\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @param modelProvider Optional model provider key for direct lookup (e.g., \"google-vertexai-web\").\n * When provided, uses direct lookup to avoid className collision issues.\n * @returns The imported model class or undefined if not found\n */\nexport async function getChatModelByClassName(\n className: string,\n modelProvider?: string\n) {\n let config: ModelProviderConfig | undefined;\n\n if (modelProvider) {\n // Direct lookup by modelProvider key - avoids className collision\n // (e.g., google-vertexai and google-vertexai-web both use \"ChatVertexAI\")\n config = MODEL_PROVIDER_CONFIG[\n modelProvider as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig | undefined;\n } else {\n // Fallback to className lookup for backward compatibility\n const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(\n ([, c]) => c.className === className\n );\n config = providerEntry\n ? (providerEntry[1] as ModelProviderConfig)\n : undefined;\n }\n\n if (!config) {\n return undefined;\n }\n\n try {\n const module = await import(config.package);\n return module[config.className];\n } catch (e: unknown) {\n const err = e as Error;\n if (\n \"code\" in err &&\n err.code?.toString().includes(\"ERR_MODULE_NOT_FOUND\") &&\n \"message\" in err &&\n typeof err.message === \"string\"\n ) {\n const msg = err.message.startsWith(\"Error: \")\n ? err.message.slice(\"Error: \".length)\n : err.message;\n const attemptedPackage = msg\n .split(\"Cannot find package '\")[1]\n .split(\"'\")[0];\n throw new Error(\n `Unable to import ${attemptedPackage}. Please install with ` +\n `\\`npm install ${attemptedPackage}\\` or \\`pnpm install ${attemptedPackage}\\``\n );\n }\n throw e;\n }\n}\n\nasync function _initChatModelHelper(\n model: string,\n modelProvider?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params: Record<string, any> = {}\n): Promise<BaseChatModel> {\n const modelProviderCopy = modelProvider || _inferModelProvider(model);\n if (!modelProviderCopy) {\n throw new Error(\n `Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`\n );\n }\n\n const config = MODEL_PROVIDER_CONFIG[\n modelProviderCopy as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig;\n if (!config) {\n const supported = SUPPORTED_PROVIDERS.join(\", \");\n throw new Error(\n `Unsupported { modelProvider: ${modelProviderCopy} }.\\n\\nSupported model providers are: ${supported}`\n );\n }\n\n const { modelProvider: _unused, ...passedParams } = params;\n // Pass modelProviderCopy to use direct lookup and avoid className collision\n const ProviderClass = await getChatModelByClassName(\n config.className,\n modelProviderCopy\n );\n return new ProviderClass({ model, ...passedParams });\n}\n\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport function _inferModelProvider(modelName: string): string | undefined {\n if (\n modelName.startsWith(\"gpt-3\") ||\n modelName.startsWith(\"gpt-4\") ||\n modelName.startsWith(\"gpt-5\") ||\n modelName.startsWith(\"o1\") ||\n modelName.startsWith(\"o3\") ||\n modelName.startsWith(\"o4\")\n ) {\n return \"openai\";\n } else if (modelName.startsWith(\"claude\")) {\n return \"anthropic\";\n } else if (modelName.startsWith(\"command\")) {\n return \"cohere\";\n } else if (modelName.startsWith(\"accounts/fireworks\")) {\n return \"fireworks\";\n } else if (modelName.startsWith(\"gemini\")) {\n return \"google-vertexai\";\n } else if (modelName.startsWith(\"amazon.\")) {\n return \"bedrock\";\n } else if (modelName.startsWith(\"mistral\")) {\n return \"mistralai\";\n } else if (modelName.startsWith(\"sonar\") || modelName.startsWith(\"pplx\")) {\n return \"perplexity\";\n } else {\n return undefined;\n }\n}\n\ninterface ConfigurableModelFields extends BaseChatModelParams {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n queuedMethodOperations?: Record<string, any>;\n /**\n * Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n */\n profile?: ModelProfile;\n}\n\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport class ConfigurableModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string {\n return \"chat_model\";\n }\n\n lc_namespace = [\"langchain\", \"chat_models\"];\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _defaultConfig?: Record<string, any> = {};\n\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\" = \"any\";\n\n /**\n * @default \"\"\n */\n _configPrefix: string;\n\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any> = {};\n\n /** @internal */\n private _modelInstanceCache = new Map<\n string,\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n >();\n\n /** @internal */\n private _profile?: ModelProfile;\n\n constructor(fields: ConfigurableModelFields) {\n super(fields);\n this._defaultConfig = fields.defaultConfig ?? {};\n\n if (fields.configurableFields === \"any\") {\n this._configurableFields = \"any\";\n } else {\n this._configurableFields = fields.configurableFields ?? [\n \"model\",\n \"modelProvider\",\n ];\n }\n\n if (fields.configPrefix) {\n this._configPrefix = fields.configPrefix.endsWith(\"_\")\n ? fields.configPrefix\n : `${fields.configPrefix}_`;\n } else {\n this._configPrefix = \"\";\n }\n\n this._queuedMethodOperations =\n fields.queuedMethodOperations ?? this._queuedMethodOperations;\n\n this._profile = fields.profile ?? undefined;\n }\n\n async _getModelInstance(\n config?: RunnableConfig\n ): Promise<\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n > {\n // Check cache first\n const cacheKey = this._getCacheKey(config);\n const cachedModel = this._modelInstanceCache.get(cacheKey);\n if (cachedModel) {\n return cachedModel;\n }\n\n // Initialize model with merged params\n const params = { ...this._defaultConfig, ...this._modelParams(config) };\n let initializedModel = await _initChatModelHelper(\n params.model,\n params.modelProvider,\n params\n );\n\n // Apply queued method operations in sequence\n for (const [method, args] of Object.entries(this._queuedMethodOperations)) {\n if (\n method in initializedModel &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n typeof (initializedModel as any)[method] === \"function\"\n ) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n initializedModel = await (initializedModel as any)[method](...args);\n }\n }\n\n // Cache and return the initialized model\n this._modelInstanceCache.set(cacheKey, initializedModel);\n return initializedModel;\n }\n\n async _generate(\n messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const model = await this._getModelInstance(options);\n return model._generate(messages, options ?? {}, runManager);\n }\n\n override bindTools(\n tools: BindToolsInput[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params?: Record<string, any>\n ): ConfigurableModel<RunInput, CallOptions> {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.bindTools = [tools, params];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n });\n }\n\n // Extract the input types from the `BaseModel` class.\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"] = (\n schema,\n ...args\n ): ReturnType<BaseChatModel[\"withStructuredOutput\"]> => {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.withStructuredOutput = [schema, ...args];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n }) as unknown as ReturnType<BaseChatModel[\"withStructuredOutput\"]>;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _modelParams(config?: RunnableConfig): Record<string, any> {\n const configurable = config?.configurable ?? {};\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let modelParams: Record<string, any> = {};\n\n for (const [key, value] of Object.entries(configurable)) {\n if (key.startsWith(this._configPrefix)) {\n const strippedKey = this._removePrefix(key, this._configPrefix);\n modelParams[strippedKey] = value;\n }\n }\n\n if (this._configurableFields !== \"any\") {\n modelParams = Object.fromEntries(\n Object.entries(modelParams).filter(([key]) =>\n this._configurableFields.includes(key)\n )\n );\n }\n\n return modelParams;\n }\n\n _removePrefix(str: string, prefix: string): string {\n return str.startsWith(prefix) ? str.slice(prefix.length) : str;\n }\n\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(\n config?: RunnableConfig\n ): RunnableBinding<RunInput, AIMessageChunk, CallOptions> {\n const mergedConfig: RunnableConfig = { ...(config || {}) };\n const modelParams = this._modelParams(mergedConfig);\n\n const remainingConfig: RunnableConfig = Object.fromEntries(\n Object.entries(mergedConfig).filter(([k]) => k !== \"configurable\")\n );\n\n remainingConfig.configurable = Object.fromEntries(\n Object.entries(mergedConfig.configurable || {}).filter(\n ([k]) =>\n this._configPrefix &&\n !Object.keys(modelParams).includes(\n this._removePrefix(k, this._configPrefix)\n )\n )\n );\n\n const newConfigurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: { ...this._defaultConfig, ...modelParams },\n configurableFields: Array.isArray(this._configurableFields)\n ? [...this._configurableFields]\n : this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: this._queuedMethodOperations,\n });\n\n return new RunnableBinding<RunInput, AIMessageChunk, CallOptions>({\n config: mergedConfig,\n bound: newConfigurableModel,\n });\n }\n\n async invoke(\n input: RunInput,\n options?: CallOptions\n ): Promise<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n return model.invoke(input, config);\n }\n\n async stream(\n input: RunInput,\n options?: CallOptions\n ): Promise<IterableReadableStream<AIMessageChunk>> {\n const model = await this._getModelInstance(options);\n const wrappedGenerator = new AsyncGeneratorWithSetup({\n generator: await model.stream(input, options),\n config: options,\n });\n await wrappedGenerator.setup;\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator);\n }\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions?: false }\n ): Promise<AIMessageChunk[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions: true }\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]> {\n // We can super this since the base runnable implementation of\n // `.batch` will call `.invoke` on each input.\n return super.batch(inputs, options, batchOptions);\n }\n\n async *transform(\n generator: AsyncGenerator<RunInput>,\n options: CallOptions\n ): AsyncGenerator<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.transform(generator, config);\n }\n\n async *streamLog(\n input: RunInput,\n options?: Partial<CallOptions>,\n streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">\n ): AsyncGenerator<RunLogPatch> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.streamLog(input, config, {\n ...streamOptions,\n _schemaFormat: \"original\",\n includeNames: streamOptions?.includeNames,\n includeTypes: streamOptions?.includeTypes,\n includeTags: streamOptions?.includeTags,\n excludeNames: streamOptions?.excludeNames,\n excludeTypes: streamOptions?.excludeTypes,\n excludeTags: streamOptions?.excludeTags,\n });\n }\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & { version: \"v1\" | \"v2\" },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<Uint8Array>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent | Uint8Array> {\n const outerThis = this;\n async function* wrappedGenerator() {\n const model = await outerThis._getModelInstance(options);\n const config = ensureConfig(options);\n const eventStream = model.streamEvents(input, config, streamOptions);\n\n for await (const chunk of eventStream) {\n yield chunk;\n }\n }\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator());\n }\n\n /**\n * Return profiling information for the model.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n */\n get profile(): ModelProfile {\n if (this._profile) {\n return this._profile;\n }\n const cacheKey = this._getCacheKey({});\n const instance = this._modelInstanceCache.get(cacheKey);\n return instance?.profile ?? {};\n }\n\n /** @internal */\n _getCacheKey(config?: RunnableConfig): string {\n let toStringify = config ?? {};\n if (toStringify.configurable) {\n const { configurable } = toStringify;\n const filtered: Record<string, unknown> = {};\n for (const [k, v] of Object.entries(configurable)) {\n if (!k.startsWith(\"__pregel_\")) {\n filtered[k] = v;\n }\n }\n toStringify = { ...toStringify, configurable: filtered };\n }\n return JSON.stringify(toStringify);\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\n\nexport type ConfigurableFields = \"any\" | string[];\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n>(\n model: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n>(\n model: never,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\n// ################################# FOR CONTRIBUTORS #################################\n//\n// If adding support for a new provider, please append the provider\n// name to the supported list in the docstring below.\n//\n// ####################################################################################\n\n/**\n * Initialize a ChatModel from the model name and provider.\n * Must have the integration package corresponding to the model provider installed.\n *\n * @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model.\n * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.\n *\n * @param {string | ChatModelProvider} [model] - The name of the model, e.g. \"gpt-4\", \"claude-3-opus-20240229\".\n * Can be prefixed with the model provider, e.g. \"openai:gpt-4\", \"anthropic:claude-3-opus-20240229\".\n * @param {Object} [fields] - Additional configuration options.\n * @param {string} [fields.modelProvider] - The model provider. Supported values include:\n * - openai (@langchain/openai)\n * - anthropic (@langchain/anthropic)\n * - azure_openai (@langchain/openai)\n * - google-vertexai (@langchain/google-vertexai)\n * - google-vertexai-web (@langchain/google-vertexai-web)\n * - google-genai (@langchain/google-genai)\n * - bedrock (@langchain/aws)\n * - cohere (@langchain/cohere)\n * - fireworks (@langchain/community/chat_models/fireworks)\n * - together (@langchain/community/chat_models/togetherai)\n * - mistralai (@langchain/mistralai)\n * - groq (@langchain/groq)\n * - ollama (@langchain/ollama)\n * - perplexity (@langchain/community/chat_models/perplexity)\n * - cerebras (@langchain/cerebras)\n * - deepseek (@langchain/deepseek)\n * - xai (@langchain/xai)\n * @param {string[] | \"any\"} [fields.configurableFields] - Which model parameters are configurable:\n * - undefined: No configurable fields.\n * - \"any\": All fields are configurable. (See Security Note in description)\n * - string[]: Specified fields are configurable.\n * @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime.\n * @param {ModelProfile} [fields.profile] - Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n * @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor.\n * @returns {Promise<ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel.\n * @throws {Error} If modelProvider cannot be inferred or isn't supported.\n * @throws {Error} If the model provider integration package is not installed.\n *\n * @example Initialize non-configurable models\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const gpt4 = await initChatModel(\"openai:gpt-4\", {\n * temperature: 0.25,\n * });\n * const gpt4Result = await gpt4.invoke(\"what's your name\");\n *\n * const claude = await initChatModel(\"anthropic:claude-3-opus-20240229\", {\n * temperature: 0.25,\n * });\n * const claudeResult = await claude.invoke(\"what's your name\");\n *\n * const gemini = await initChatModel(\"gemini-1.5-pro\", {\n * modelProvider: \"google-vertexai\",\n * temperature: 0.25,\n * });\n * const geminiResult = await gemini.invoke(\"what's your name\");\n * ```\n *\n * @example Create a partially configurable model with no default model\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModel = await initChatModel(undefined, {\n * temperature: 0,\n * configurableFields: [\"model\", \"apiKey\"],\n * });\n *\n * const gpt4Result = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"gpt-4\",\n * },\n * });\n *\n * const claudeResult = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"claude-sonnet-4-5-20250929\",\n * },\n * });\n * ```\n *\n * @example Create a fully configurable model with a default model and a config prefix\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModelWithDefault = await initChatModel(\"gpt-4\", {\n * modelProvider: \"openai\",\n * configurableFields: \"any\",\n * configPrefix: \"foo\",\n * temperature: 0,\n * });\n *\n * const openaiResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const claudeResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_model: \"claude-sonnet-4-5-20250929\",\n * foo_modelProvider: \"anthropic\",\n * foo_temperature: 0.6,\n * foo_apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Bind tools to a configurable model:\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n * import { z } from \"zod/v3\";\n * import { tool } from \"@langchain/core/tools\";\n *\n * const getWeatherTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current weather in a given location\"),\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * }\n * );\n *\n * const getPopulationTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current population in a given location\"),\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * }\n * );\n *\n * const configurableModel = await initChatModel(\"gpt-4\", {\n * configurableFields: [\"model\", \"modelProvider\", \"apiKey\"],\n * temperature: 0,\n * });\n *\n * const configurableModelWithTools = configurableModel.bindTools([\n * getWeatherTool,\n * getPopulationTool,\n * ]);\n *\n * const configurableToolResult = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const configurableToolResult2 = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * model: \"claude-sonnet-4-5-20250929\",\n * apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Initialize a model with a custom profile\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const model = await initChatModel(\"gpt-4o-mini\", {\n * profile: {\n * maxInputTokens: 100000,\n * },\n * });\n *\n * @description\n * This function initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * Security Note: Setting `configurableFields` to \"any\" means fields like apiKey, baseUrl, etc.\n * can be altered at runtime, potentially redirecting model requests to a different service/user.\n * Make sure that if you're accepting untrusted configurations, you enumerate the\n * `configurableFields` explicitly.\n *\n * The function will attempt to infer the model provider from the model name if not specified.\n * Certain model name prefixes are associated with specific providers:\n * - gpt-3... or gpt-4... -> openai\n * - claude... -> anthropic\n * - amazon.... -> bedrock\n * - gemini... -> google-vertexai\n * - command... -> cohere\n * - accounts/fireworks... -> fireworks\n *\n * @since 0.2.11\n * @version 0.2.11\n */\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>> {\n // eslint-disable-next-line prefer-const\n let { configurableFields, configPrefix, modelProvider, profile, ...params } =\n {\n configPrefix: \"\",\n ...(fields ?? {}),\n };\n if (modelProvider === undefined && model?.includes(\":\")) {\n const [provider, ...remainingParts] = model.split(\":\");\n const modelComponents =\n remainingParts.length === 0\n ? [provider]\n : [provider, remainingParts.join(\":\")];\n if (SUPPORTED_PROVIDERS.includes(modelComponents[0] as ChatModelProvider)) {\n // eslint-disable-next-line no-param-reassign\n [modelProvider, model] = modelComponents;\n }\n }\n let configurableFieldsCopy = Array.isArray(configurableFields)\n ? [...configurableFields]\n : configurableFields;\n\n if (!model && configurableFieldsCopy === undefined) {\n configurableFieldsCopy = [\"model\", \"modelProvider\"];\n }\n if (configPrefix && configurableFieldsCopy === undefined) {\n console.warn(\n `{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` +\n `{ configurableFields: [...] } to specify the model params that are ` +\n `configurable.`\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const paramsCopy: Record<string, any> = { ...params };\n\n let configurableModel: ConfigurableModel<RunInput, CallOptions>;\n\n if (configurableFieldsCopy === undefined) {\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: {\n ...paramsCopy,\n model,\n modelProvider,\n },\n configPrefix,\n profile,\n });\n } else {\n if (model) {\n paramsCopy.model = model;\n }\n if (modelProvider) {\n paramsCopy.modelProvider = modelProvider;\n }\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: paramsCopy,\n configPrefix,\n configurableFields: configurableFieldsCopy,\n profile,\n });\n }\n\n // Initialize the model instance to make sure a profile is available\n await configurableModel._getModelInstance();\n return configurableModel;\n}\n"],"mappings":";;;;;;;;;;;;;AAoDA,MAAa,wBAAwB;CACnC,QAAQ;EACN,SAAS;EACT,WAAW;EACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACZ;CACD,cAAc;EACZ,SAAS;EACT,WAAW;EACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;EACZ;CACD,mBAAmB;EACjB,SAAS;EACT,WAAW;EACZ;CACD,uBAAuB;EACrB,SAAS;EACT,WAAW;EACZ;CACD,gBAAgB;EACd,SAAS;EACT,WAAW;EACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;EACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;EACZ;CACD,MAAM;EACJ,SAAS;EACT,WAAW;EACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;EACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACZ;CACD,KAAK;EACH,SAAS;EACT,WAAW;EACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACX,uBAAuB;EACxB;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACX,uBAAuB;EACxB;CACD,YAAY;EACV,SAAS;EACT,WAAW;EACX,uBAAuB;EACxB;CACF;AAED,MAAM,sBAAsB,OAAO,KACjC,sBACD;;;;;;;;AAeD,eAAsB,wBACpB,WACA,eACA;CACA,IAAI;AAEJ,KAAI,cAGF,UAAS,sBACP;MAEG;EAEL,MAAM,gBAAgB,OAAO,QAAQ,sBAAsB,CAAC,MACzD,GAAG,OAAO,EAAE,cAAc,UAC5B;AACD,WAAS,gBACJ,cAAc,KACf,KAAA;;AAGN,KAAI,CAAC,OACH;AAGF,KAAI;AAEF,UADe,MAAM,OAAO,OAAO,UACrB,OAAO;UACd,GAAY;EACnB,MAAM,MAAM;AACZ,MACE,UAAU,OACV,IAAI,MAAM,UAAU,CAAC,SAAS,uBAAuB,IACrD,aAAa,OACb,OAAO,IAAI,YAAY,UACvB;GAIA,MAAM,oBAHM,IAAI,QAAQ,WAAW,UAAU,GACzC,IAAI,QAAQ,MAAM,EAAiB,GACnC,IAAI,SAEL,MAAM,wBAAwB,CAAC,GAC/B,MAAM,IAAI,CAAC;AACd,SAAM,IAAI,MACR,oBAAoB,iBAAiB,sCAClB,iBAAiB,uBAAuB,iBAAiB,IAC7E;;AAEH,QAAM;;;AAIV,eAAe,qBACb,OACA,eAEA,SAA8B,EAAE,EACR;CACxB,MAAM,oBAAoB,iBAAiB,oBAAoB,MAAM;AACrE,KAAI,CAAC,kBACH,OAAM,IAAI,MACR,+CAA+C,MAAM,4CACtD;CAGH,MAAM,SAAS,sBACb;AAEF,KAAI,CAAC,QAAQ;EACX,MAAM,YAAY,oBAAoB,KAAK,KAAK;AAChD,QAAM,IAAI,MACR,gCAAgC,kBAAkB,wCAAwC,YAC3F;;CAGH,MAAM,EAAE,eAAe,SAAS,GAAG,iBAAiB;AAMpD,QAAO,KAJe,OAAM,wBAC1B,OAAO,WACP,kBACD,GACwB;EAAE;EAAO,GAAG;EAAc,CAAC;;;;;;;;;;;;;AActD,SAAgB,oBAAoB,WAAuC;AACzE,KACE,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,CAE1B,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,qBAAqB,CACnD,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,QAAQ,IAAI,UAAU,WAAW,OAAO,CACtE,QAAO;KAEP;;;;;;;AAiCJ,IAAa,oBAAb,MAAa,0BAIHA,4CAAAA,cAA2C;CACnD,WAAmB;AACjB,SAAO;;CAGT,eAAe,CAAC,aAAa,cAAc;CAG3C,iBAAuC,EAAE;;;;CAKzC,sBAAwC;;;;CAKxC;;;;;CAOA,0BAA+C,EAAE;;CAGjD,sCAA8B,IAAI,KAG/B;;CAGH;CAEA,YAAY,QAAiC;AAC3C,QAAM,OAAO;AACb,OAAK,iBAAiB,OAAO,iBAAiB,EAAE;AAEhD,MAAI,OAAO,uBAAuB,MAChC,MAAK,sBAAsB;MAE3B,MAAK,sBAAsB,OAAO,sBAAsB,CACtD,SACA,gBACD;AAGH,MAAI,OAAO,aACT,MAAK,gBAAgB,OAAO,aAAa,SAAS,IAAI,GAClD,OAAO,eACP,GAAG,OAAO,aAAa;MAE3B,MAAK,gBAAgB;AAGvB,OAAK,0BACH,OAAO,0BAA0B,KAAK;AAExC,OAAK,WAAW,OAAO,WAAW,KAAA;;CAGpC,MAAM,kBACJ,QAGA;EAEA,MAAM,WAAW,KAAK,aAAa,OAAO;EAC1C,MAAM,cAAc,KAAK,oBAAoB,IAAI,SAAS;AAC1D,MAAI,YACF,QAAO;EAIT,MAAM,SAAS;GAAE,GAAG,KAAK;GAAgB,GAAG,KAAK,aAAa,OAAO;GAAE;EACvE,IAAI,mBAAmB,MAAM,qBAC3B,OAAO,OACP,OAAO,eACP,OACD;AAGD,OAAK,MAAM,CAAC,QAAQ,SAAS,OAAO,QAAQ,KAAK,wBAAwB,CACvE,KACE,UAAU,oBAEV,OAAQ,iBAAyB,YAAY,WAG7C,oBAAmB,MAAO,iBAAyB,QAAQ,GAAG,KAAK;AAKvE,OAAK,oBAAoB,IAAI,UAAU,iBAAiB;AACxD,SAAO;;CAGT,MAAM,UACJ,UACA,SACA,YACqB;AAErB,UADc,MAAM,KAAK,kBAAkB,QAAQ,EACtC,UAAU,UAAU,WAAW,EAAE,EAAE,WAAW;;CAG7D,UACE,OAEA,QAC0C;EAC1C,MAAM,sBAAsB,EAAE,GAAG,KAAK,yBAAyB;AAC/D,sBAAoB,YAAY,CAAC,OAAO,OAAO;AAC/C,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;GACzB,CAAC;;CAIJ,wBACE,QACA,GAAG,SACmD;EACtD,MAAM,sBAAsB,EAAE,GAAG,KAAK,yBAAyB;AAC/D,sBAAoB,uBAAuB,CAAC,QAAQ,GAAG,KAAK;AAC5D,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;GACzB,CAAC;;CAIJ,aAAa,QAA8C;EACzD,MAAM,eAAe,QAAQ,gBAAgB,EAAE;EAE/C,IAAI,cAAmC,EAAE;AAEzC,OAAK,MAAM,CAAC,KAAK,UAAU,OAAO,QAAQ,aAAa,CACrD,KAAI,IAAI,WAAW,KAAK,cAAc,EAAE;GACtC,MAAM,cAAc,KAAK,cAAc,KAAK,KAAK,cAAc;AAC/D,eAAY,eAAe;;AAI/B,MAAI,KAAK,wBAAwB,MAC/B,eAAc,OAAO,YACnB,OAAO,QAAQ,YAAY,CAAC,QAAQ,CAAC,SACnC,KAAK,oBAAoB,SAAS,IAAI,CACvC,CACF;AAGH,SAAO;;CAGT,cAAc,KAAa,QAAwB;AACjD,SAAO,IAAI,WAAW,OAAO,GAAG,IAAI,MAAM,OAAO,OAAO,GAAG;;;;;;;CAQ7D,WACE,QACwD;EACxD,MAAM,eAA+B,EAAE,GAAI,UAAU,EAAE,EAAG;EAC1D,MAAM,cAAc,KAAK,aAAa,aAAa;EAEnD,MAAM,kBAAkC,OAAO,YAC7C,OAAO,QAAQ,aAAa,CAAC,QAAQ,CAAC,OAAO,MAAM,eAAe,CACnE;AAED,kBAAgB,eAAe,OAAO,YACpC,OAAO,QAAQ,aAAa,gBAAgB,EAAE,CAAC,CAAC,QAC7C,CAAC,OACA,KAAK,iBACL,CAAC,OAAO,KAAK,YAAY,CAAC,SACxB,KAAK,cAAc,GAAG,KAAK,cAAc,CAC1C,CACJ,CACF;AAWD,SAAO,IAAIC,0BAAAA,gBAAuD;GAChE,QAAQ;GACR,OAX2B,IAAI,kBAAyC;IACxE,eAAe;KAAE,GAAG,KAAK;KAAgB,GAAG;KAAa;IACzD,oBAAoB,MAAM,QAAQ,KAAK,oBAAoB,GACvD,CAAC,GAAG,KAAK,oBAAoB,GAC7B,KAAK;IACT,cAAc,KAAK;IACnB,wBAAwB,KAAK;IAC9B,CAAC;GAKD,CAAC;;CAGJ,MAAM,OACJ,OACA,SACyB;EACzB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,UAAA,GAAA,0BAAA,cAAsB,QAAQ;AACpC,SAAO,MAAM,OAAO,OAAO,OAAO;;CAGpC,MAAM,OACJ,OACA,SACiD;EAEjD,MAAM,mBAAmB,IAAIC,6BAAAA,wBAAwB;GACnD,WAAW,OAFC,MAAM,KAAK,kBAAkB,QAAQ,EAE1B,OAAO,OAAO,QAAQ;GAC7C,QAAQ;GACT,CAAC;AACF,QAAM,iBAAiB;AACvB,SAAOC,6BAAAA,uBAAuB,mBAAmB,iBAAiB;;CAqBpE,MAAM,MACJ,QACA,SACA,cACqC;AAGrC,SAAO,MAAM,MAAM,QAAQ,SAAS,aAAa;;CAGnD,OAAO,UACL,WACA,SACgC;EAChC,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,UAAA,GAAA,0BAAA,cAAsB,QAAQ;AAEpC,SAAO,MAAM,UAAU,WAAW,OAAO;;CAG3C,OAAO,UACL,OACA,SACA,eAC6B;EAC7B,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,UAAA,GAAA,0BAAA,cAAsB,QAAQ;AAEpC,SAAO,MAAM,UAAU,OAAO,QAAQ;GACpC,GAAG;GACH,eAAe;GACf,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC5B,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC7B,CAAC;;CAkBJ,aACE,OACA,SAIA,eACkD;EAClD,MAAM,YAAY;EAClB,gBAAgB,mBAAmB;GACjC,MAAM,QAAQ,MAAM,UAAU,kBAAkB,QAAQ;GACxD,MAAM,UAAA,GAAA,0BAAA,cAAsB,QAAQ;GACpC,MAAM,cAAc,MAAM,aAAa,OAAO,QAAQ,cAAc;AAEpE,cAAW,MAAM,SAAS,YACxB,OAAM;;AAGV,SAAOA,6BAAAA,uBAAuB,mBAAmB,kBAAkB,CAAC;;;;;;;CAQtE,IAAI,UAAwB;AAC1B,MAAI,KAAK,SACP,QAAO,KAAK;EAEd,MAAM,WAAW,KAAK,aAAa,EAAE,CAAC;AAEtC,SADiB,KAAK,oBAAoB,IAAI,SAAS,EACtC,WAAW,EAAE;;;CAIhC,aAAa,QAAiC;EAC5C,IAAI,cAAc,UAAU,EAAE;AAC9B,MAAI,YAAY,cAAc;GAC5B,MAAM,EAAE,iBAAiB;GACzB,MAAM,WAAoC,EAAE;AAC5C,QAAK,MAAM,CAAC,GAAG,MAAM,OAAO,QAAQ,aAAa,CAC/C,KAAI,CAAC,EAAE,WAAW,YAAY,CAC5B,UAAS,KAAK;AAGlB,iBAAc;IAAE,GAAG;IAAa,cAAc;IAAU;;AAE1D,SAAO,KAAK,UAAU,YAAY;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2RtC,eAAsB,cAKpB,OAEA,QAMmD;CAEnD,IAAI,EAAE,oBAAoB,cAAc,eAAe,SAAS,GAAG,WACjE;EACE,cAAc;EACd,GAAI,UAAU,EAAE;EACjB;AACH,KAAI,kBAAkB,KAAA,KAAa,OAAO,SAAS,IAAI,EAAE;EACvD,MAAM,CAAC,UAAU,GAAG,kBAAkB,MAAM,MAAM,IAAI;EACtD,MAAM,kBACJ,eAAe,WAAW,IACtB,CAAC,SAAS,GACV,CAAC,UAAU,eAAe,KAAK,IAAI,CAAC;AAC1C,MAAI,oBAAoB,SAAS,gBAAgB,GAAwB,CAEvE,EAAC,eAAe,SAAS;;CAG7B,IAAI,yBAAyB,MAAM,QAAQ,mBAAmB,GAC1D,CAAC,GAAG,mBAAmB,GACvB;AAEJ,KAAI,CAAC,SAAS,2BAA2B,KAAA,EACvC,0BAAyB,CAAC,SAAS,gBAAgB;AAErD,KAAI,gBAAgB,2BAA2B,KAAA,EAC7C,SAAQ,KACN,mBAAmB,aAAa,sIAGjC;CAIH,MAAM,aAAkC,EAAE,GAAG,QAAQ;CAErD,IAAI;AAEJ,KAAI,2BAA2B,KAAA,EAC7B,qBAAoB,IAAI,kBAAyC;EAC/D,eAAe;GACb,GAAG;GACH;GACA;GACD;EACD;EACA;EACD,CAAC;MACG;AACL,MAAI,MACF,YAAW,QAAQ;AAErB,MAAI,cACF,YAAW,gBAAgB;AAE7B,sBAAoB,IAAI,kBAAyC;GAC/D,eAAe;GACf;GACA,oBAAoB;GACpB;GACD,CAAC;;AAIJ,OAAM,kBAAkB,mBAAmB;AAC3C,QAAO"}
|
|
1
|
+
{"version":3,"file":"universal.cjs","names":["BaseChatModel","RunnableBinding","AsyncGeneratorWithSetup","IterableReadableStream"],"sources":["../../src/chat_models/universal.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n ToolDefinition,\n} from \"@langchain/core/language_models/base\";\nimport {\n BaseChatModel,\n BaseChatModelParams,\n BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n BaseMessage,\n type AIMessageChunk,\n MessageStructure,\n} from \"@langchain/core/messages\";\nimport {\n type RunnableBatchOptions,\n RunnableBinding,\n type RunnableConfig,\n type RunnableToolLike,\n ensureConfig,\n} from \"@langchain/core/runnables\";\nimport {\n AsyncGeneratorWithSetup,\n IterableReadableStream,\n} from \"@langchain/core/utils/stream\";\nimport {\n type LogStreamCallbackHandlerInput,\n type RunLogPatch,\n type StreamEvent,\n} from \"@langchain/core/tracers/log_stream\";\nimport { type StructuredToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { ChatResult } from \"@langchain/core/outputs\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\n\n// TODO: remove once `EventStreamCallbackHandlerInput` is exposed in core\ninterface EventStreamCallbackHandlerInput extends Omit<\n LogStreamCallbackHandlerInput,\n \"_schemaFormat\"\n> {}\n\nexport interface ConfigurableChatModelCallOptions extends BaseChatModelCallOptions {\n tools?: (\n | StructuredToolInterface\n | Record<string, unknown>\n | ToolDefinition\n | RunnableToolLike\n )[];\n}\n\n// Configuration map for model providers\nexport const MODEL_PROVIDER_CONFIG = {\n openai: {\n package: \"@langchain/openai\",\n className: \"ChatOpenAI\",\n },\n anthropic: {\n package: \"@langchain/anthropic\",\n className: \"ChatAnthropic\",\n },\n azure_openai: {\n package: \"@langchain/openai\",\n className: \"AzureChatOpenAI\",\n },\n cohere: {\n package: \"@langchain/cohere\",\n className: \"ChatCohere\",\n },\n \"google-vertexai\": {\n package: \"@langchain/google-vertexai\",\n className: \"ChatVertexAI\",\n },\n \"google-vertexai-web\": {\n package: \"@langchain/google-vertexai-web\",\n className: \"ChatVertexAI\",\n },\n \"google-genai\": {\n package: \"@langchain/google-genai\",\n className: \"ChatGoogleGenerativeAI\",\n },\n ollama: {\n package: \"@langchain/ollama\",\n className: \"ChatOllama\",\n },\n mistralai: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n mistral: {\n package: \"@langchain/mistralai\",\n className: \"ChatMistralAI\",\n },\n groq: {\n package: \"@langchain/groq\",\n className: \"ChatGroq\",\n },\n cerebras: {\n package: \"@langchain/cerebras\",\n className: \"ChatCerebras\",\n },\n bedrock: {\n package: \"@langchain/aws\",\n className: \"ChatBedrockConverse\",\n },\n deepseek: {\n package: \"@langchain/deepseek\",\n className: \"ChatDeepSeek\",\n },\n xai: {\n package: \"@langchain/xai\",\n className: \"ChatXAI\",\n },\n fireworks: {\n package: \"@langchain/community/chat_models/fireworks\",\n className: \"ChatFireworks\",\n hasCircularDependency: true,\n },\n together: {\n package: \"@langchain/community/chat_models/togetherai\",\n className: \"ChatTogetherAI\",\n hasCircularDependency: true,\n },\n perplexity: {\n package: \"@langchain/community/chat_models/perplexity\",\n className: \"ChatPerplexity\",\n hasCircularDependency: true,\n },\n} as const;\n\nconst SUPPORTED_PROVIDERS = Object.keys(\n MODEL_PROVIDER_CONFIG\n) as (keyof typeof MODEL_PROVIDER_CONFIG)[];\nexport type ChatModelProvider = keyof typeof MODEL_PROVIDER_CONFIG;\ntype ModelProviderConfig = {\n package: string;\n className: string;\n hasCircularDependency?: boolean;\n};\n\n/**\n * Helper function to get a chat model class by its class name or model provider.\n * @param className The class name (e.g., \"ChatOpenAI\", \"ChatAnthropic\")\n * @param modelProvider Optional model provider key for direct lookup (e.g., \"google-vertexai-web\").\n * When provided, uses direct lookup to avoid className collision issues.\n * @returns The imported model class or undefined if not found\n */\nexport async function getChatModelByClassName(\n className: string,\n modelProvider?: string\n) {\n let config: ModelProviderConfig | undefined;\n\n if (modelProvider) {\n // Direct lookup by modelProvider key - avoids className collision\n // (e.g., google-vertexai and google-vertexai-web both use \"ChatVertexAI\")\n config = MODEL_PROVIDER_CONFIG[\n modelProvider as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig | undefined;\n } else {\n // Fallback to className lookup for backward compatibility\n const providerEntry = Object.entries(MODEL_PROVIDER_CONFIG).find(\n ([, c]) => c.className === className\n );\n config = providerEntry\n ? (providerEntry[1] as ModelProviderConfig)\n : undefined;\n }\n\n if (!config) {\n return undefined;\n }\n\n try {\n const module = await import(config.package);\n return module[config.className];\n } catch (e: unknown) {\n const err = e as Error;\n if (\n \"code\" in err &&\n err.code?.toString().includes(\"ERR_MODULE_NOT_FOUND\") &&\n \"message\" in err &&\n typeof err.message === \"string\"\n ) {\n const msg = err.message.startsWith(\"Error: \")\n ? err.message.slice(\"Error: \".length)\n : err.message;\n const attemptedPackage = msg\n .split(\"Cannot find package '\")[1]\n .split(\"'\")[0];\n throw new Error(\n `Unable to import ${attemptedPackage}. Please install with ` +\n `\\`npm install ${attemptedPackage}\\` or \\`pnpm install ${attemptedPackage}\\``\n );\n }\n throw e;\n }\n}\n\nasync function _initChatModelHelper(\n model: string,\n modelProvider?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params: Record<string, any> = {}\n): Promise<BaseChatModel> {\n const modelProviderCopy = modelProvider || _inferModelProvider(model);\n if (!modelProviderCopy) {\n throw new Error(\n `Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`\n );\n }\n\n const config = MODEL_PROVIDER_CONFIG[\n modelProviderCopy as keyof typeof MODEL_PROVIDER_CONFIG\n ] as ModelProviderConfig;\n if (!config) {\n const supported = SUPPORTED_PROVIDERS.join(\", \");\n throw new Error(\n `Unsupported { modelProvider: ${modelProviderCopy} }.\\n\\nSupported model providers are: ${supported}`\n );\n }\n\n const { modelProvider: _unused, ...passedParams } = params;\n // Pass modelProviderCopy to use direct lookup and avoid className collision\n const ProviderClass = await getChatModelByClassName(\n config.className,\n modelProviderCopy\n );\n return new ProviderClass({ model, ...passedParams });\n}\n\n/**\n * Attempts to infer the model provider based on the given model name.\n *\n * @param {string} modelName - The name of the model to infer the provider for.\n * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.\n *\n * @example\n * _inferModelProvider(\"gpt-4\"); // returns \"openai\"\n * _inferModelProvider(\"claude-2\"); // returns \"anthropic\"\n * _inferModelProvider(\"unknown-model\"); // returns undefined\n */\nexport function _inferModelProvider(modelName: string): string | undefined {\n if (\n modelName.startsWith(\"gpt-3\") ||\n modelName.startsWith(\"gpt-4\") ||\n modelName.startsWith(\"gpt-5\") ||\n modelName.startsWith(\"o1\") ||\n modelName.startsWith(\"o3\") ||\n modelName.startsWith(\"o4\")\n ) {\n return \"openai\";\n } else if (modelName.startsWith(\"claude\")) {\n return \"anthropic\";\n } else if (modelName.startsWith(\"command\")) {\n return \"cohere\";\n } else if (modelName.startsWith(\"accounts/fireworks\")) {\n return \"fireworks\";\n } else if (modelName.startsWith(\"gemini\")) {\n return \"google-vertexai\";\n } else if (modelName.startsWith(\"amazon.\")) {\n return \"bedrock\";\n } else if (modelName.startsWith(\"mistral\")) {\n return \"mistralai\";\n } else if (modelName.startsWith(\"sonar\") || modelName.startsWith(\"pplx\")) {\n return \"perplexity\";\n } else {\n return undefined;\n }\n}\n\ninterface ConfigurableModelFields extends BaseChatModelParams {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n defaultConfig?: Record<string, any>;\n /**\n * @default \"any\"\n */\n configurableFields?: string[] | \"any\";\n /**\n * @default \"\"\n */\n configPrefix?: string;\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n queuedMethodOperations?: Record<string, any>;\n /**\n * Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n */\n profile?: ModelProfile;\n}\n\n/**\n * Internal class used to create chat models.\n *\n * @internal\n */\nexport class ConfigurableModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n> extends BaseChatModel<CallOptions, AIMessageChunk> {\n _llmType(): string {\n return \"chat_model\";\n }\n\n lc_namespace = [\"langchain\", \"chat_models\"];\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _defaultConfig?: Record<string, any> = {};\n\n /**\n * @default \"any\"\n */\n _configurableFields: string[] | \"any\" = \"any\";\n\n /**\n * @default \"\"\n */\n _configPrefix: string;\n\n /**\n * Methods which should be called after the model is initialized.\n * The key will be the method name, and the value will be the arguments.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _queuedMethodOperations: Record<string, any> = {};\n\n /** @internal */\n private _modelInstanceCache = new Map<\n string,\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n >();\n\n /** @internal */\n private _profile?: ModelProfile;\n\n constructor(fields: ConfigurableModelFields) {\n super(fields);\n this._defaultConfig = fields.defaultConfig ?? {};\n\n if (fields.configurableFields === \"any\") {\n this._configurableFields = \"any\";\n } else {\n this._configurableFields = fields.configurableFields ?? [\n \"model\",\n \"modelProvider\",\n ];\n }\n\n if (fields.configPrefix) {\n this._configPrefix = fields.configPrefix.endsWith(\"_\")\n ? fields.configPrefix\n : `${fields.configPrefix}_`;\n } else {\n this._configPrefix = \"\";\n }\n\n this._queuedMethodOperations =\n fields.queuedMethodOperations ?? this._queuedMethodOperations;\n\n this._profile = fields.profile ?? undefined;\n\n this.metadata = {\n ...this.metadata,\n ls_integration: \"langchain_init_chat_model\",\n };\n }\n\n async _getModelInstance(\n config?: RunnableConfig\n ): Promise<\n BaseChatModel<BaseChatModelCallOptions, AIMessageChunk<MessageStructure>>\n > {\n // Check cache first\n const cacheKey = this._getCacheKey(config);\n const cachedModel = this._modelInstanceCache.get(cacheKey);\n if (cachedModel) {\n return cachedModel;\n }\n\n // Initialize model with merged params\n const params = { ...this._defaultConfig, ...this._modelParams(config) };\n let initializedModel = await _initChatModelHelper(\n params.model,\n params.modelProvider,\n params\n );\n\n // Apply queued method operations in sequence\n for (const [method, args] of Object.entries(this._queuedMethodOperations)) {\n if (\n method in initializedModel &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n typeof (initializedModel as any)[method] === \"function\"\n ) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n initializedModel = await (initializedModel as any)[method](...args);\n }\n }\n\n // Cache and return the initialized model\n this._modelInstanceCache.set(cacheKey, initializedModel);\n return initializedModel;\n }\n\n async _generate(\n messages: BaseMessage[],\n options?: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const model = await this._getModelInstance(options);\n return model._generate(messages, options ?? {}, runManager);\n }\n\n override bindTools(\n tools: BindToolsInput[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n params?: Record<string, any>\n ): ConfigurableModel<RunInput, CallOptions> {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.bindTools = [tools, params];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n });\n }\n\n // Extract the input types from the `BaseModel` class.\n withStructuredOutput: BaseChatModel[\"withStructuredOutput\"] = (\n schema,\n ...args\n ): ReturnType<BaseChatModel[\"withStructuredOutput\"]> => {\n const newQueuedOperations = { ...this._queuedMethodOperations };\n newQueuedOperations.withStructuredOutput = [schema, ...args];\n return new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: this._defaultConfig,\n configurableFields: this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: newQueuedOperations,\n }) as unknown as ReturnType<BaseChatModel[\"withStructuredOutput\"]>;\n };\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _modelParams(config?: RunnableConfig): Record<string, any> {\n const configurable = config?.configurable ?? {};\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let modelParams: Record<string, any> = {};\n\n for (const [key, value] of Object.entries(configurable)) {\n if (key.startsWith(this._configPrefix)) {\n const strippedKey = this._removePrefix(key, this._configPrefix);\n modelParams[strippedKey] = value;\n }\n }\n\n if (this._configurableFields !== \"any\") {\n modelParams = Object.fromEntries(\n Object.entries(modelParams).filter(([key]) =>\n this._configurableFields.includes(key)\n )\n );\n }\n\n return modelParams;\n }\n\n _removePrefix(str: string, prefix: string): string {\n return str.startsWith(prefix) ? str.slice(prefix.length) : str;\n }\n\n /**\n * Bind config to a Runnable, returning a new Runnable.\n * @param {RunnableConfig | undefined} [config] - The config to bind.\n * @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.\n */\n withConfig(\n config?: RunnableConfig\n ): RunnableBinding<RunInput, AIMessageChunk, CallOptions> {\n const mergedConfig: RunnableConfig = { ...(config || {}) };\n const modelParams = this._modelParams(mergedConfig);\n\n const remainingConfig: RunnableConfig = Object.fromEntries(\n Object.entries(mergedConfig).filter(([k]) => k !== \"configurable\")\n );\n\n remainingConfig.configurable = Object.fromEntries(\n Object.entries(mergedConfig.configurable || {}).filter(\n ([k]) =>\n this._configPrefix &&\n !Object.keys(modelParams).includes(\n this._removePrefix(k, this._configPrefix)\n )\n )\n );\n\n const newConfigurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: { ...this._defaultConfig, ...modelParams },\n configurableFields: Array.isArray(this._configurableFields)\n ? [...this._configurableFields]\n : this._configurableFields,\n configPrefix: this._configPrefix,\n queuedMethodOperations: this._queuedMethodOperations,\n });\n\n return new RunnableBinding<RunInput, AIMessageChunk, CallOptions>({\n config: mergedConfig,\n bound: newConfigurableModel,\n });\n }\n\n async invoke(\n input: RunInput,\n options?: CallOptions\n ): Promise<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n return model.invoke(input, config);\n }\n\n async stream(\n input: RunInput,\n options?: CallOptions\n ): Promise<IterableReadableStream<AIMessageChunk>> {\n const model = await this._getModelInstance(options);\n const wrappedGenerator = new AsyncGeneratorWithSetup({\n generator: await model.stream(input, options),\n config: options,\n });\n await wrappedGenerator.setup;\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator);\n }\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions?: false }\n ): Promise<AIMessageChunk[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions & { returnExceptions: true }\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]>;\n\n async batch(\n inputs: RunInput[],\n options?: Partial<CallOptions> | Partial<CallOptions>[],\n batchOptions?: RunnableBatchOptions\n ): Promise<(AIMessageChunk | Error)[]> {\n // We can super this since the base runnable implementation of\n // `.batch` will call `.invoke` on each input.\n return super.batch(inputs, options, batchOptions);\n }\n\n async *transform(\n generator: AsyncGenerator<RunInput>,\n options: CallOptions\n ): AsyncGenerator<AIMessageChunk> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.transform(generator, config);\n }\n\n async *streamLog(\n input: RunInput,\n options?: Partial<CallOptions>,\n streamOptions?: Omit<LogStreamCallbackHandlerInput, \"autoClose\">\n ): AsyncGenerator<RunLogPatch> {\n const model = await this._getModelInstance(options);\n const config = ensureConfig(options);\n\n yield* model.streamLog(input, config, {\n ...streamOptions,\n _schemaFormat: \"original\",\n includeNames: streamOptions?.includeNames,\n includeTypes: streamOptions?.includeTypes,\n includeTags: streamOptions?.includeTags,\n excludeNames: streamOptions?.excludeNames,\n excludeTypes: streamOptions?.excludeTypes,\n excludeTags: streamOptions?.excludeTags,\n });\n }\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & { version: \"v1\" | \"v2\" },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding: \"text/event-stream\";\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<Uint8Array>;\n\n streamEvents(\n input: RunInput,\n options: Partial<CallOptions> & {\n version: \"v1\" | \"v2\";\n encoding?: \"text/event-stream\" | undefined;\n },\n streamOptions?: Omit<EventStreamCallbackHandlerInput, \"autoClose\">\n ): IterableReadableStream<StreamEvent | Uint8Array> {\n const outerThis = this;\n async function* wrappedGenerator() {\n const model = await outerThis._getModelInstance(options);\n const config = ensureConfig(options);\n const eventStream = model.streamEvents(input, config, streamOptions);\n\n for await (const chunk of eventStream) {\n yield chunk;\n }\n }\n return IterableReadableStream.fromAsyncGenerator(wrappedGenerator());\n }\n\n /**\n * Return profiling information for the model.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n */\n get profile(): ModelProfile {\n if (this._profile) {\n return this._profile;\n }\n const cacheKey = this._getCacheKey({});\n const instance = this._modelInstanceCache.get(cacheKey);\n return instance?.profile ?? {};\n }\n\n /** @internal */\n _getCacheKey(config?: RunnableConfig): string {\n let toStringify = config ?? {};\n if (toStringify.configurable) {\n const { configurable } = toStringify;\n const filtered: Record<string, unknown> = {};\n for (const [k, v] of Object.entries(configurable)) {\n if (!k.startsWith(\"__pregel_\")) {\n filtered[k] = v;\n }\n }\n toStringify = { ...toStringify, configurable: filtered };\n }\n return JSON.stringify(toStringify);\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport interface InitChatModelFields extends Partial<Record<string, any>> {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n}\n\nexport type ConfigurableFields = \"any\" | string[];\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n>(\n model: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n>(\n model: never,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: never;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n options?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: ConfigurableFields;\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>>;\n\n// ################################# FOR CONTRIBUTORS #################################\n//\n// If adding support for a new provider, please append the provider\n// name to the supported list in the docstring below.\n//\n// ####################################################################################\n\n/**\n * Initialize a ChatModel from the model name and provider.\n * Must have the integration package corresponding to the model provider installed.\n *\n * @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model.\n * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.\n *\n * @param {string | ChatModelProvider} [model] - The name of the model, e.g. \"gpt-4\", \"claude-3-opus-20240229\".\n * Can be prefixed with the model provider, e.g. \"openai:gpt-4\", \"anthropic:claude-3-opus-20240229\".\n * @param {Object} [fields] - Additional configuration options.\n * @param {string} [fields.modelProvider] - The model provider. Supported values include:\n * - openai (@langchain/openai)\n * - anthropic (@langchain/anthropic)\n * - azure_openai (@langchain/openai)\n * - google-vertexai (@langchain/google-vertexai)\n * - google-vertexai-web (@langchain/google-vertexai-web)\n * - google-genai (@langchain/google-genai)\n * - bedrock (@langchain/aws)\n * - cohere (@langchain/cohere)\n * - fireworks (@langchain/community/chat_models/fireworks)\n * - together (@langchain/community/chat_models/togetherai)\n * - mistralai (@langchain/mistralai)\n * - groq (@langchain/groq)\n * - ollama (@langchain/ollama)\n * - perplexity (@langchain/community/chat_models/perplexity)\n * - cerebras (@langchain/cerebras)\n * - deepseek (@langchain/deepseek)\n * - xai (@langchain/xai)\n * @param {string[] | \"any\"} [fields.configurableFields] - Which model parameters are configurable:\n * - undefined: No configurable fields.\n * - \"any\": All fields are configurable. (See Security Note in description)\n * - string[]: Specified fields are configurable.\n * @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime.\n * @param {ModelProfile} [fields.profile] - Overrides the profiling information for the model. If not provided,\n * the profile will be inferred from the inner model instance.\n * @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor.\n * @returns {Promise<ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel.\n * @throws {Error} If modelProvider cannot be inferred or isn't supported.\n * @throws {Error} If the model provider integration package is not installed.\n *\n * @example Initialize non-configurable models\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const gpt4 = await initChatModel(\"openai:gpt-4\", {\n * temperature: 0.25,\n * });\n * const gpt4Result = await gpt4.invoke(\"what's your name\");\n *\n * const claude = await initChatModel(\"anthropic:claude-3-opus-20240229\", {\n * temperature: 0.25,\n * });\n * const claudeResult = await claude.invoke(\"what's your name\");\n *\n * const gemini = await initChatModel(\"gemini-1.5-pro\", {\n * modelProvider: \"google-vertexai\",\n * temperature: 0.25,\n * });\n * const geminiResult = await gemini.invoke(\"what's your name\");\n * ```\n *\n * @example Create a partially configurable model with no default model\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModel = await initChatModel(undefined, {\n * temperature: 0,\n * configurableFields: [\"model\", \"apiKey\"],\n * });\n *\n * const gpt4Result = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"gpt-4\",\n * },\n * });\n *\n * const claudeResult = await configurableModel.invoke(\"what's your name\", {\n * configurable: {\n * model: \"claude-sonnet-4-5-20250929\",\n * },\n * });\n * ```\n *\n * @example Create a fully configurable model with a default model and a config prefix\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const configurableModelWithDefault = await initChatModel(\"gpt-4\", {\n * modelProvider: \"openai\",\n * configurableFields: \"any\",\n * configPrefix: \"foo\",\n * temperature: 0,\n * });\n *\n * const openaiResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const claudeResult = await configurableModelWithDefault.invoke(\n * \"what's your name\",\n * {\n * configurable: {\n * foo_model: \"claude-sonnet-4-5-20250929\",\n * foo_modelProvider: \"anthropic\",\n * foo_temperature: 0.6,\n * foo_apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Bind tools to a configurable model:\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n * import { z } from \"zod/v3\";\n * import { tool } from \"@langchain/core/tools\";\n *\n * const getWeatherTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current weather in a given location\"),\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * }\n * );\n *\n * const getPopulationTool = tool(\n * (input) => {\n * // Do something with the input\n * return JSON.stringify(input);\n * },\n * {\n * schema: z\n * .object({\n * location: z\n * .string()\n * .describe(\"The city and state, e.g. San Francisco, CA\"),\n * })\n * .describe(\"Get the current population in a given location\"),\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * }\n * );\n *\n * const configurableModel = await initChatModel(\"gpt-4\", {\n * configurableFields: [\"model\", \"modelProvider\", \"apiKey\"],\n * temperature: 0,\n * });\n *\n * const configurableModelWithTools = configurableModel.bindTools([\n * getWeatherTool,\n * getPopulationTool,\n * ]);\n *\n * const configurableToolResult = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * apiKey: process.env.OPENAI_API_KEY,\n * },\n * }\n * );\n *\n * const configurableToolResult2 = await configurableModelWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\",\n * {\n * configurable: {\n * model: \"claude-sonnet-4-5-20250929\",\n * apiKey: process.env.ANTHROPIC_API_KEY,\n * },\n * }\n * );\n * ```\n *\n * @example Initialize a model with a custom profile\n * ```typescript\n * import { initChatModel } from \"langchain/chat_models/universal\";\n *\n * const model = await initChatModel(\"gpt-4o-mini\", {\n * profile: {\n * maxInputTokens: 100000,\n * },\n * });\n *\n * @description\n * This function initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * Security Note: Setting `configurableFields` to \"any\" means fields like apiKey, baseUrl, etc.\n * can be altered at runtime, potentially redirecting model requests to a different service/user.\n * Make sure that if you're accepting untrusted configurations, you enumerate the\n * `configurableFields` explicitly.\n *\n * The function will attempt to infer the model provider from the model name if not specified.\n * Certain model name prefixes are associated with specific providers:\n * - gpt-3... or gpt-4... -> openai\n * - claude... -> anthropic\n * - amazon.... -> bedrock\n * - gemini... -> google-vertexai\n * - command... -> cohere\n * - accounts/fireworks... -> fireworks\n *\n * @since 0.2.11\n * @version 0.2.11\n */\nexport async function initChatModel<\n RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,\n CallOptions extends ConfigurableChatModelCallOptions =\n ConfigurableChatModelCallOptions,\n>(\n model?: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n fields?: Partial<Record<string, any>> & {\n modelProvider?: string;\n configurableFields?: string[] | \"any\";\n configPrefix?: string;\n profile?: ModelProfile;\n }\n): Promise<ConfigurableModel<RunInput, CallOptions>> {\n // eslint-disable-next-line prefer-const\n let { configurableFields, configPrefix, modelProvider, profile, ...params } =\n {\n configPrefix: \"\",\n ...(fields ?? {}),\n };\n if (modelProvider === undefined && model?.includes(\":\")) {\n const [provider, ...remainingParts] = model.split(\":\");\n const modelComponents =\n remainingParts.length === 0\n ? [provider]\n : [provider, remainingParts.join(\":\")];\n if (SUPPORTED_PROVIDERS.includes(modelComponents[0] as ChatModelProvider)) {\n // eslint-disable-next-line no-param-reassign\n [modelProvider, model] = modelComponents;\n }\n }\n let configurableFieldsCopy = Array.isArray(configurableFields)\n ? [...configurableFields]\n : configurableFields;\n\n if (!model && configurableFieldsCopy === undefined) {\n configurableFieldsCopy = [\"model\", \"modelProvider\"];\n }\n if (configPrefix && configurableFieldsCopy === undefined) {\n console.warn(\n `{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` +\n `{ configurableFields: [...] } to specify the model params that are ` +\n `configurable.`\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const paramsCopy: Record<string, any> = { ...params };\n\n let configurableModel: ConfigurableModel<RunInput, CallOptions>;\n\n if (configurableFieldsCopy === undefined) {\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: {\n ...paramsCopy,\n model,\n modelProvider,\n },\n configPrefix,\n profile,\n });\n } else {\n if (model) {\n paramsCopy.model = model;\n }\n if (modelProvider) {\n paramsCopy.modelProvider = modelProvider;\n }\n configurableModel = new ConfigurableModel<RunInput, CallOptions>({\n defaultConfig: paramsCopy,\n configPrefix,\n configurableFields: configurableFieldsCopy,\n profile,\n });\n }\n\n // Initialize the model instance to make sure a profile is available\n await configurableModel._getModelInstance();\n return configurableModel;\n}\n"],"mappings":";;;;;;;;;;;;;AAoDA,MAAa,wBAAwB;CACnC,QAAQ;EACN,SAAS;EACT,WAAW;EACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACZ;CACD,cAAc;EACZ,SAAS;EACT,WAAW;EACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;EACZ;CACD,mBAAmB;EACjB,SAAS;EACT,WAAW;EACZ;CACD,uBAAuB;EACrB,SAAS;EACT,WAAW;EACZ;CACD,gBAAgB;EACd,SAAS;EACT,WAAW;EACZ;CACD,QAAQ;EACN,SAAS;EACT,WAAW;EACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;EACZ;CACD,MAAM;EACJ,SAAS;EACT,WAAW;EACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACZ;CACD,SAAS;EACP,SAAS;EACT,WAAW;EACZ;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACZ;CACD,KAAK;EACH,SAAS;EACT,WAAW;EACZ;CACD,WAAW;EACT,SAAS;EACT,WAAW;EACX,uBAAuB;EACxB;CACD,UAAU;EACR,SAAS;EACT,WAAW;EACX,uBAAuB;EACxB;CACD,YAAY;EACV,SAAS;EACT,WAAW;EACX,uBAAuB;EACxB;CACF;AAED,MAAM,sBAAsB,OAAO,KACjC,sBACD;;;;;;;;AAeD,eAAsB,wBACpB,WACA,eACA;CACA,IAAI;AAEJ,KAAI,cAGF,UAAS,sBACP;MAEG;EAEL,MAAM,gBAAgB,OAAO,QAAQ,sBAAsB,CAAC,MACzD,GAAG,OAAO,EAAE,cAAc,UAC5B;AACD,WAAS,gBACJ,cAAc,KACf,KAAA;;AAGN,KAAI,CAAC,OACH;AAGF,KAAI;AAEF,UADe,MAAM,OAAO,OAAO,UACrB,OAAO;UACd,GAAY;EACnB,MAAM,MAAM;AACZ,MACE,UAAU,OACV,IAAI,MAAM,UAAU,CAAC,SAAS,uBAAuB,IACrD,aAAa,OACb,OAAO,IAAI,YAAY,UACvB;GAIA,MAAM,oBAHM,IAAI,QAAQ,WAAW,UAAU,GACzC,IAAI,QAAQ,MAAM,EAAiB,GACnC,IAAI,SAEL,MAAM,wBAAwB,CAAC,GAC/B,MAAM,IAAI,CAAC;AACd,SAAM,IAAI,MACR,oBAAoB,iBAAiB,sCAClB,iBAAiB,uBAAuB,iBAAiB,IAC7E;;AAEH,QAAM;;;AAIV,eAAe,qBACb,OACA,eAEA,SAA8B,EAAE,EACR;CACxB,MAAM,oBAAoB,iBAAiB,oBAAoB,MAAM;AACrE,KAAI,CAAC,kBACH,OAAM,IAAI,MACR,+CAA+C,MAAM,4CACtD;CAGH,MAAM,SAAS,sBACb;AAEF,KAAI,CAAC,QAAQ;EACX,MAAM,YAAY,oBAAoB,KAAK,KAAK;AAChD,QAAM,IAAI,MACR,gCAAgC,kBAAkB,wCAAwC,YAC3F;;CAGH,MAAM,EAAE,eAAe,SAAS,GAAG,iBAAiB;AAMpD,QAAO,KAJe,OAAM,wBAC1B,OAAO,WACP,kBACD,GACwB;EAAE;EAAO,GAAG;EAAc,CAAC;;;;;;;;;;;;;AActD,SAAgB,oBAAoB,WAAuC;AACzE,KACE,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,QAAQ,IAC7B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,IAC1B,UAAU,WAAW,KAAK,CAE1B,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,qBAAqB,CACnD,QAAO;UACE,UAAU,WAAW,SAAS,CACvC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,UAAU,CACxC,QAAO;UACE,UAAU,WAAW,QAAQ,IAAI,UAAU,WAAW,OAAO,CACtE,QAAO;KAEP;;;;;;;AAiCJ,IAAa,oBAAb,MAAa,0BAIHA,4CAAAA,cAA2C;CACnD,WAAmB;AACjB,SAAO;;CAGT,eAAe,CAAC,aAAa,cAAc;CAG3C,iBAAuC,EAAE;;;;CAKzC,sBAAwC;;;;CAKxC;;;;;CAOA,0BAA+C,EAAE;;CAGjD,sCAA8B,IAAI,KAG/B;;CAGH;CAEA,YAAY,QAAiC;AAC3C,QAAM,OAAO;AACb,OAAK,iBAAiB,OAAO,iBAAiB,EAAE;AAEhD,MAAI,OAAO,uBAAuB,MAChC,MAAK,sBAAsB;MAE3B,MAAK,sBAAsB,OAAO,sBAAsB,CACtD,SACA,gBACD;AAGH,MAAI,OAAO,aACT,MAAK,gBAAgB,OAAO,aAAa,SAAS,IAAI,GAClD,OAAO,eACP,GAAG,OAAO,aAAa;MAE3B,MAAK,gBAAgB;AAGvB,OAAK,0BACH,OAAO,0BAA0B,KAAK;AAExC,OAAK,WAAW,OAAO,WAAW,KAAA;AAElC,OAAK,WAAW;GACd,GAAG,KAAK;GACR,gBAAgB;GACjB;;CAGH,MAAM,kBACJ,QAGA;EAEA,MAAM,WAAW,KAAK,aAAa,OAAO;EAC1C,MAAM,cAAc,KAAK,oBAAoB,IAAI,SAAS;AAC1D,MAAI,YACF,QAAO;EAIT,MAAM,SAAS;GAAE,GAAG,KAAK;GAAgB,GAAG,KAAK,aAAa,OAAO;GAAE;EACvE,IAAI,mBAAmB,MAAM,qBAC3B,OAAO,OACP,OAAO,eACP,OACD;AAGD,OAAK,MAAM,CAAC,QAAQ,SAAS,OAAO,QAAQ,KAAK,wBAAwB,CACvE,KACE,UAAU,oBAEV,OAAQ,iBAAyB,YAAY,WAG7C,oBAAmB,MAAO,iBAAyB,QAAQ,GAAG,KAAK;AAKvE,OAAK,oBAAoB,IAAI,UAAU,iBAAiB;AACxD,SAAO;;CAGT,MAAM,UACJ,UACA,SACA,YACqB;AAErB,UADc,MAAM,KAAK,kBAAkB,QAAQ,EACtC,UAAU,UAAU,WAAW,EAAE,EAAE,WAAW;;CAG7D,UACE,OAEA,QAC0C;EAC1C,MAAM,sBAAsB,EAAE,GAAG,KAAK,yBAAyB;AAC/D,sBAAoB,YAAY,CAAC,OAAO,OAAO;AAC/C,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;GACzB,CAAC;;CAIJ,wBACE,QACA,GAAG,SACmD;EACtD,MAAM,sBAAsB,EAAE,GAAG,KAAK,yBAAyB;AAC/D,sBAAoB,uBAAuB,CAAC,QAAQ,GAAG,KAAK;AAC5D,SAAO,IAAI,kBAAyC;GAClD,eAAe,KAAK;GACpB,oBAAoB,KAAK;GACzB,cAAc,KAAK;GACnB,wBAAwB;GACzB,CAAC;;CAIJ,aAAa,QAA8C;EACzD,MAAM,eAAe,QAAQ,gBAAgB,EAAE;EAE/C,IAAI,cAAmC,EAAE;AAEzC,OAAK,MAAM,CAAC,KAAK,UAAU,OAAO,QAAQ,aAAa,CACrD,KAAI,IAAI,WAAW,KAAK,cAAc,EAAE;GACtC,MAAM,cAAc,KAAK,cAAc,KAAK,KAAK,cAAc;AAC/D,eAAY,eAAe;;AAI/B,MAAI,KAAK,wBAAwB,MAC/B,eAAc,OAAO,YACnB,OAAO,QAAQ,YAAY,CAAC,QAAQ,CAAC,SACnC,KAAK,oBAAoB,SAAS,IAAI,CACvC,CACF;AAGH,SAAO;;CAGT,cAAc,KAAa,QAAwB;AACjD,SAAO,IAAI,WAAW,OAAO,GAAG,IAAI,MAAM,OAAO,OAAO,GAAG;;;;;;;CAQ7D,WACE,QACwD;EACxD,MAAM,eAA+B,EAAE,GAAI,UAAU,EAAE,EAAG;EAC1D,MAAM,cAAc,KAAK,aAAa,aAAa;EAEnD,MAAM,kBAAkC,OAAO,YAC7C,OAAO,QAAQ,aAAa,CAAC,QAAQ,CAAC,OAAO,MAAM,eAAe,CACnE;AAED,kBAAgB,eAAe,OAAO,YACpC,OAAO,QAAQ,aAAa,gBAAgB,EAAE,CAAC,CAAC,QAC7C,CAAC,OACA,KAAK,iBACL,CAAC,OAAO,KAAK,YAAY,CAAC,SACxB,KAAK,cAAc,GAAG,KAAK,cAAc,CAC1C,CACJ,CACF;AAWD,SAAO,IAAIC,0BAAAA,gBAAuD;GAChE,QAAQ;GACR,OAX2B,IAAI,kBAAyC;IACxE,eAAe;KAAE,GAAG,KAAK;KAAgB,GAAG;KAAa;IACzD,oBAAoB,MAAM,QAAQ,KAAK,oBAAoB,GACvD,CAAC,GAAG,KAAK,oBAAoB,GAC7B,KAAK;IACT,cAAc,KAAK;IACnB,wBAAwB,KAAK;IAC9B,CAAC;GAKD,CAAC;;CAGJ,MAAM,OACJ,OACA,SACyB;EACzB,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,UAAA,GAAA,0BAAA,cAAsB,QAAQ;AACpC,SAAO,MAAM,OAAO,OAAO,OAAO;;CAGpC,MAAM,OACJ,OACA,SACiD;EAEjD,MAAM,mBAAmB,IAAIC,6BAAAA,wBAAwB;GACnD,WAAW,OAFC,MAAM,KAAK,kBAAkB,QAAQ,EAE1B,OAAO,OAAO,QAAQ;GAC7C,QAAQ;GACT,CAAC;AACF,QAAM,iBAAiB;AACvB,SAAOC,6BAAAA,uBAAuB,mBAAmB,iBAAiB;;CAqBpE,MAAM,MACJ,QACA,SACA,cACqC;AAGrC,SAAO,MAAM,MAAM,QAAQ,SAAS,aAAa;;CAGnD,OAAO,UACL,WACA,SACgC;EAChC,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,UAAA,GAAA,0BAAA,cAAsB,QAAQ;AAEpC,SAAO,MAAM,UAAU,WAAW,OAAO;;CAG3C,OAAO,UACL,OACA,SACA,eAC6B;EAC7B,MAAM,QAAQ,MAAM,KAAK,kBAAkB,QAAQ;EACnD,MAAM,UAAA,GAAA,0BAAA,cAAsB,QAAQ;AAEpC,SAAO,MAAM,UAAU,OAAO,QAAQ;GACpC,GAAG;GACH,eAAe;GACf,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC5B,cAAc,eAAe;GAC7B,cAAc,eAAe;GAC7B,aAAa,eAAe;GAC7B,CAAC;;CAkBJ,aACE,OACA,SAIA,eACkD;EAClD,MAAM,YAAY;EAClB,gBAAgB,mBAAmB;GACjC,MAAM,QAAQ,MAAM,UAAU,kBAAkB,QAAQ;GACxD,MAAM,UAAA,GAAA,0BAAA,cAAsB,QAAQ;GACpC,MAAM,cAAc,MAAM,aAAa,OAAO,QAAQ,cAAc;AAEpE,cAAW,MAAM,SAAS,YACxB,OAAM;;AAGV,SAAOA,6BAAAA,uBAAuB,mBAAmB,kBAAkB,CAAC;;;;;;;CAQtE,IAAI,UAAwB;AAC1B,MAAI,KAAK,SACP,QAAO,KAAK;EAEd,MAAM,WAAW,KAAK,aAAa,EAAE,CAAC;AAEtC,SADiB,KAAK,oBAAoB,IAAI,SAAS,EACtC,WAAW,EAAE;;;CAIhC,aAAa,QAAiC;EAC5C,IAAI,cAAc,UAAU,EAAE;AAC9B,MAAI,YAAY,cAAc;GAC5B,MAAM,EAAE,iBAAiB;GACzB,MAAM,WAAoC,EAAE;AAC5C,QAAK,MAAM,CAAC,GAAG,MAAM,OAAO,QAAQ,aAAa,CAC/C,KAAI,CAAC,EAAE,WAAW,YAAY,CAC5B,UAAS,KAAK;AAGlB,iBAAc;IAAE,GAAG;IAAa,cAAc;IAAU;;AAE1D,SAAO,KAAK,UAAU,YAAY;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2RtC,eAAsB,cAKpB,OAEA,QAMmD;CAEnD,IAAI,EAAE,oBAAoB,cAAc,eAAe,SAAS,GAAG,WACjE;EACE,cAAc;EACd,GAAI,UAAU,EAAE;EACjB;AACH,KAAI,kBAAkB,KAAA,KAAa,OAAO,SAAS,IAAI,EAAE;EACvD,MAAM,CAAC,UAAU,GAAG,kBAAkB,MAAM,MAAM,IAAI;EACtD,MAAM,kBACJ,eAAe,WAAW,IACtB,CAAC,SAAS,GACV,CAAC,UAAU,eAAe,KAAK,IAAI,CAAC;AAC1C,MAAI,oBAAoB,SAAS,gBAAgB,GAAwB,CAEvE,EAAC,eAAe,SAAS;;CAG7B,IAAI,yBAAyB,MAAM,QAAQ,mBAAmB,GAC1D,CAAC,GAAG,mBAAmB,GACvB;AAEJ,KAAI,CAAC,SAAS,2BAA2B,KAAA,EACvC,0BAAyB,CAAC,SAAS,gBAAgB;AAErD,KAAI,gBAAgB,2BAA2B,KAAA,EAC7C,SAAQ,KACN,mBAAmB,aAAa,sIAGjC;CAIH,MAAM,aAAkC,EAAE,GAAG,QAAQ;CAErD,IAAI;AAEJ,KAAI,2BAA2B,KAAA,EAC7B,qBAAoB,IAAI,kBAAyC;EAC/D,eAAe;GACb,GAAG;GACH;GACA;GACD;EACD;EACA;EACD,CAAC;MACG;AACL,MAAI,MACF,YAAW,QAAQ;AAErB,MAAI,cACF,YAAW,gBAAgB;AAE7B,sBAAoB,IAAI,kBAAyC;GAC/D,eAAe;GACf;GACA,oBAAoB;GACpB;GACD,CAAC;;AAIJ,OAAM,kBAAkB,mBAAmB;AAC3C,QAAO"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"universal.d.cts","names":[],"sources":["../../src/chat_models/universal.ts"],"mappings":";;;;;;;;;;;;UAqCU,+BAAA,SAAwC,IAAA,CAChD,6BAAA;AAAA,UAIe,gCAAA,SAAyC,wBAAA;EACxD,KAAA,IACI,uBAAA,GACA,MAAA,oBACA,cAAA,GACA,gBAAA;AAAA;AAAA,cAKO,qBAAA;EAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;KAiFD,iBAAA,gBAAiC,qBAAA;;;;;;AAA7C;;iBAcsB,uBAAA,CACpB,SAAA,UACA,aAAA,YAAsB,OAAA;;;AAFxB;;;;;;;;;iBA+FgB,mBAAA,CAAoB,SAAA;AAAA,UA6B1B,uBAAA,SAAgC,mBAAA;EAExC,aAAA,GAAgB,MAAA;EA/BkB;;AA2BnC;EAQC,kBAAA;;;;EAIA,YAAA;EAVwC;;;;EAgBxC,sBAAA,GAAyB,MAAA;EAdT;;;;EAmBhB,OAAA,GAAU,YAAA;AAAA;;;;AAQZ;;cAAa,iBAAA,kBACM,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,UACM,aAAA,CAAc,WAAA,EAAa,cAAA;EACnC,QAAA,CAAA;EAIA,YAAA;EAGA,cAAA,GAAiB,MAAA;EATf;;;EAcF,mBAAA;EAYyB;;;EAPzB,aAAA;
|
|
1
|
+
{"version":3,"file":"universal.d.cts","names":[],"sources":["../../src/chat_models/universal.ts"],"mappings":";;;;;;;;;;;;UAqCU,+BAAA,SAAwC,IAAA,CAChD,6BAAA;AAAA,UAIe,gCAAA,SAAyC,wBAAA;EACxD,KAAA,IACI,uBAAA,GACA,MAAA,oBACA,cAAA,GACA,gBAAA;AAAA;AAAA,cAKO,qBAAA;EAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;KAiFD,iBAAA,gBAAiC,qBAAA;;;;;;AAA7C;;iBAcsB,uBAAA,CACpB,SAAA,UACA,aAAA,YAAsB,OAAA;;;AAFxB;;;;;;;;;iBA+FgB,mBAAA,CAAoB,SAAA;AAAA,UA6B1B,uBAAA,SAAgC,mBAAA;EAExC,aAAA,GAAgB,MAAA;EA/BkB;;AA2BnC;EAQC,kBAAA;;;;EAIA,YAAA;EAVwC;;;;EAgBxC,sBAAA,GAAyB,MAAA;EAdT;;;;EAmBhB,OAAA,GAAU,YAAA;AAAA;;;;AAQZ;;cAAa,iBAAA,kBACM,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,UACM,aAAA,CAAc,WAAA,EAAa,cAAA;EACnC,QAAA,CAAA;EAIA,YAAA;EAGA,cAAA,GAAiB,MAAA;EATf;;;EAcF,mBAAA;EAYyB;;;EAPzB,aAAA;EAqDyD;;;;EA9CzD,uBAAA,EAAyB,MAAA;EAmFV;EAAA,QAhFP,mBAAA;EAiFL;EAAA,QA3EK,QAAA;EAER,WAAA,CAAY,MAAA,EAAQ,uBAAA;EAgCd,iBAAA,CACJ,MAAA,GAAS,cAAA,GACR,OAAA,CACD,aAAA,CAAc,wBAAA,EAA0B,cAAA,CAAe,gBAAA;EAkCnD,SAAA,CACJ,QAAA,EAAU,WAAA,IACV,OAAA,8BACA,UAAA,GAAa,wBAAA,GACZ,OAAA,CAAQ,UAAA;EAKF,SAAA,CACP,KAAA,EAAO,cAAA,IAEP,MAAA,GAAS,MAAA,gBACR,iBAAA,CAAkB,QAAA,EAAU,WAAA;EAY/B,oBAAA,EAAsB,aAAA;EAetB,YAAA,CAAa,MAAA,GAAS,cAAA,GAAiB,MAAA;EAuBvC,aAAA,CAAc,GAAA,UAAa,MAAA;EAUhB;;;;;EADX,UAAA,CACE,MAAA,GAAS,cAAA,GACR,eAAA,CAAgB,QAAA,EAAU,cAAA,EAAgB,WAAA;EAiCvC,MAAA,CACJ,KAAA,EAAO,QAAA,EACP,OAAA,GAAU,WAAA,GACT,OAAA,CAAQ,cAAA;EAML,MAAA,CACJ,KAAA,EAAO,QAAA,EACP,OAAA,GAAU,WAAA,GACT,OAAA,CAAQ,sBAAA,CAAuB,cAAA;EAU5B,KAAA,CACJ,MAAA,EAAQ,QAAA,IACR,OAAA,GAAU,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,KACzC,YAAA,GAAe,oBAAA;IAAyB,gBAAA;EAAA,IACvC,OAAA,CAAQ,cAAA;EAEL,KAAA,CACJ,MAAA,EAAQ,QAAA,IACR,OAAA,GAAU,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,KACzC,YAAA,GAAe,oBAAA;IAAyB,gBAAA;EAAA,IACvC,OAAA,EAAS,cAAA,GAAiB,KAAA;EAEvB,KAAA,CACJ,MAAA,EAAQ,QAAA,IACR,OAAA,GAAU,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,KACzC,YAAA,GAAe,oBAAA,GACd,OAAA,EAAS,cAAA,GAAiB,KAAA;EAYtB,SAAA,CACL,SAAA,EAAW,cAAA,CAAe,QAAA,GAC1B,OAAA,EAAS,WAAA,GACR,cAAA,CAAe,cAAA;EAOX,SAAA,CACL,KAAA,EAAO,QAAA,EACP,OAAA,GAAU,OAAA,CAAQ,WAAA,GAClB,aAAA,GAAgB,IAAA,CAAK,6BAAA,iBACpB,cAAA,CAAe,WAAA;EAgBlB,YAAA,CACE,KAAA,EAAO,QAAA,EACP,OAAA,EAAS,OAAA,CAAQ,WAAA;IAAiB,OAAA;EAAA,GAClC,aAAA,GAAgB,IAAA,CAAK,+BAAA,iBACpB,sBAAA,CAAuB,WAAA;EAE1B,YAAA,CACE,KAAA,EAAO,QAAA,EACP,OAAA,EAAS,OAAA,CAAQ,WAAA;IACf,OAAA;IACA,QAAA;EAAA,GAEF,aAAA,GAAgB,IAAA,CAAK,+BAAA,iBACpB,sBAAA,CAAuB,UAAA;EA/Dd;;;;;EAAA,IA2FR,OAAA,CAAA,GAAW,YAAA;EAzFZ;EAmGH,YAAA,CAAa,MAAA,GAAS,cAAA;AAAA;AAAA,UAiBP,mBAAA,SAA4B,OAAA,CAAQ,MAAA;EACnD,aAAA;EACA,kBAAA;EACA,YAAA;AAAA;AAAA,KAGU,kBAAA;AAAA,iBAEU,aAAA,kBACH,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,CAAA,CAEF,KAAA,UAEA,MAAA,GAAS,OAAA,CAAQ,MAAA;EACf,aAAA;EACA,kBAAA;EACA,YAAA;AAAA,IAED,OAAA,CAAQ,iBAAA,CAAkB,QAAA,EAAU,WAAA;AAAA,iBAEjB,aAAA,kBACH,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,CAAA,CAEF,KAAA,SAEA,OAAA,GAAU,OAAA,CAAQ,MAAA;EAChB,aAAA;EACA,kBAAA;EACA,YAAA;EACA,OAAA,GAAU,YAAA;AAAA,IAEX,OAAA,CAAQ,iBAAA,CAAkB,QAAA,EAAU,WAAA;AAAA,iBAEjB,aAAA,kBACH,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,CAAA,CAEF,KAAA,WAEA,OAAA,GAAU,OAAA,CAAQ,MAAA;EAChB,aAAA;EACA,kBAAA,GAAqB,kBAAA;EACrB,YAAA;EACA,OAAA,GAAU,YAAA;AAAA,IAEX,OAAA,CAAQ,iBAAA,CAAkB,QAAA,EAAU,WAAA"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"universal.d.ts","names":[],"sources":["../../src/chat_models/universal.ts"],"mappings":";;;;;;;;;;;;UAqCU,+BAAA,SAAwC,IAAA,CAChD,6BAAA;AAAA,UAIe,gCAAA,SAAyC,wBAAA;EACxD,KAAA,IACI,uBAAA,GACA,MAAA,oBACA,cAAA,GACA,gBAAA;AAAA;AAAA,cAKO,qBAAA;EAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;KAiFD,iBAAA,gBAAiC,qBAAA;;;;;;AAA7C;;iBAcsB,uBAAA,CACpB,SAAA,UACA,aAAA,YAAsB,OAAA;;;AAFxB;;;;;;;;;iBA+FgB,mBAAA,CAAoB,SAAA;AAAA,UA6B1B,uBAAA,SAAgC,mBAAA;EAExC,aAAA,GAAgB,MAAA;EA/BkB;;AA2BnC;EAQC,kBAAA;;;;EAIA,YAAA;EAVwC;;;;EAgBxC,sBAAA,GAAyB,MAAA;EAdT;;;;EAmBhB,OAAA,GAAU,YAAA;AAAA;;;;AAQZ;;cAAa,iBAAA,kBACM,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,UACM,aAAA,CAAc,WAAA,EAAa,cAAA;EACnC,QAAA,CAAA;EAIA,YAAA;EAGA,cAAA,GAAiB,MAAA;EATf;;;EAcF,mBAAA;EAYyB;;;EAPzB,aAAA;
|
|
1
|
+
{"version":3,"file":"universal.d.ts","names":[],"sources":["../../src/chat_models/universal.ts"],"mappings":";;;;;;;;;;;;UAqCU,+BAAA,SAAwC,IAAA,CAChD,6BAAA;AAAA,UAIe,gCAAA,SAAyC,wBAAA;EACxD,KAAA,IACI,uBAAA,GACA,MAAA,oBACA,cAAA,GACA,gBAAA;AAAA;AAAA,cAKO,qBAAA;EAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;KAiFD,iBAAA,gBAAiC,qBAAA;;;;;;AAA7C;;iBAcsB,uBAAA,CACpB,SAAA,UACA,aAAA,YAAsB,OAAA;;;AAFxB;;;;;;;;;iBA+FgB,mBAAA,CAAoB,SAAA;AAAA,UA6B1B,uBAAA,SAAgC,mBAAA;EAExC,aAAA,GAAgB,MAAA;EA/BkB;;AA2BnC;EAQC,kBAAA;;;;EAIA,YAAA;EAVwC;;;;EAgBxC,sBAAA,GAAyB,MAAA;EAdT;;;;EAmBhB,OAAA,GAAU,YAAA;AAAA;;;;AAQZ;;cAAa,iBAAA,kBACM,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,UACM,aAAA,CAAc,WAAA,EAAa,cAAA;EACnC,QAAA,CAAA;EAIA,YAAA;EAGA,cAAA,GAAiB,MAAA;EATf;;;EAcF,mBAAA;EAYyB;;;EAPzB,aAAA;EAqDyD;;;;EA9CzD,uBAAA,EAAyB,MAAA;EAmFV;EAAA,QAhFP,mBAAA;EAiFL;EAAA,QA3EK,QAAA;EAER,WAAA,CAAY,MAAA,EAAQ,uBAAA;EAgCd,iBAAA,CACJ,MAAA,GAAS,cAAA,GACR,OAAA,CACD,aAAA,CAAc,wBAAA,EAA0B,cAAA,CAAe,gBAAA;EAkCnD,SAAA,CACJ,QAAA,EAAU,WAAA,IACV,OAAA,8BACA,UAAA,GAAa,wBAAA,GACZ,OAAA,CAAQ,UAAA;EAKF,SAAA,CACP,KAAA,EAAO,cAAA,IAEP,MAAA,GAAS,MAAA,gBACR,iBAAA,CAAkB,QAAA,EAAU,WAAA;EAY/B,oBAAA,EAAsB,aAAA;EAetB,YAAA,CAAa,MAAA,GAAS,cAAA,GAAiB,MAAA;EAuBvC,aAAA,CAAc,GAAA,UAAa,MAAA;EAUhB;;;;;EADX,UAAA,CACE,MAAA,GAAS,cAAA,GACR,eAAA,CAAgB,QAAA,EAAU,cAAA,EAAgB,WAAA;EAiCvC,MAAA,CACJ,KAAA,EAAO,QAAA,EACP,OAAA,GAAU,WAAA,GACT,OAAA,CAAQ,cAAA;EAML,MAAA,CACJ,KAAA,EAAO,QAAA,EACP,OAAA,GAAU,WAAA,GACT,OAAA,CAAQ,sBAAA,CAAuB,cAAA;EAU5B,KAAA,CACJ,MAAA,EAAQ,QAAA,IACR,OAAA,GAAU,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,KACzC,YAAA,GAAe,oBAAA;IAAyB,gBAAA;EAAA,IACvC,OAAA,CAAQ,cAAA;EAEL,KAAA,CACJ,MAAA,EAAQ,QAAA,IACR,OAAA,GAAU,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,KACzC,YAAA,GAAe,oBAAA;IAAyB,gBAAA;EAAA,IACvC,OAAA,EAAS,cAAA,GAAiB,KAAA;EAEvB,KAAA,CACJ,MAAA,EAAQ,QAAA,IACR,OAAA,GAAU,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,WAAA,KACzC,YAAA,GAAe,oBAAA,GACd,OAAA,EAAS,cAAA,GAAiB,KAAA;EAYtB,SAAA,CACL,SAAA,EAAW,cAAA,CAAe,QAAA,GAC1B,OAAA,EAAS,WAAA,GACR,cAAA,CAAe,cAAA;EAOX,SAAA,CACL,KAAA,EAAO,QAAA,EACP,OAAA,GAAU,OAAA,CAAQ,WAAA,GAClB,aAAA,GAAgB,IAAA,CAAK,6BAAA,iBACpB,cAAA,CAAe,WAAA;EAgBlB,YAAA,CACE,KAAA,EAAO,QAAA,EACP,OAAA,EAAS,OAAA,CAAQ,WAAA;IAAiB,OAAA;EAAA,GAClC,aAAA,GAAgB,IAAA,CAAK,+BAAA,iBACpB,sBAAA,CAAuB,WAAA;EAE1B,YAAA,CACE,KAAA,EAAO,QAAA,EACP,OAAA,EAAS,OAAA,CAAQ,WAAA;IACf,OAAA;IACA,QAAA;EAAA,GAEF,aAAA,GAAgB,IAAA,CAAK,+BAAA,iBACpB,sBAAA,CAAuB,UAAA;EA/Dd;;;;;EAAA,IA2FR,OAAA,CAAA,GAAW,YAAA;EAzFZ;EAmGH,YAAA,CAAa,MAAA,GAAS,cAAA;AAAA;AAAA,UAiBP,mBAAA,SAA4B,OAAA,CAAQ,MAAA;EACnD,aAAA;EACA,kBAAA;EACA,YAAA;AAAA;AAAA,KAGU,kBAAA;AAAA,iBAEU,aAAA,kBACH,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,CAAA,CAEF,KAAA,UAEA,MAAA,GAAS,OAAA,CAAQ,MAAA;EACf,aAAA;EACA,kBAAA;EACA,YAAA;AAAA,IAED,OAAA,CAAQ,iBAAA,CAAkB,QAAA,EAAU,WAAA;AAAA,iBAEjB,aAAA,kBACH,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,CAAA,CAEF,KAAA,SAEA,OAAA,GAAU,OAAA,CAAQ,MAAA;EAChB,aAAA;EACA,kBAAA;EACA,YAAA;EACA,OAAA,GAAU,YAAA;AAAA,IAEX,OAAA,CAAQ,iBAAA,CAAkB,QAAA,EAAU,WAAA;AAAA,iBAEjB,aAAA,kBACH,sBAAA,GAAyB,sBAAA,sBACtB,gCAAA,GAClB,gCAAA,CAAA,CAEF,KAAA,WAEA,OAAA,GAAU,OAAA,CAAQ,MAAA;EAChB,aAAA;EACA,kBAAA,GAAqB,kBAAA;EACrB,YAAA;EACA,OAAA,GAAU,YAAA;AAAA,IAEX,OAAA,CAAQ,iBAAA,CAAkB,QAAA,EAAU,WAAA"}
|
|
@@ -187,6 +187,10 @@ var ConfigurableModel = class ConfigurableModel extends BaseChatModel {
|
|
|
187
187
|
else this._configPrefix = "";
|
|
188
188
|
this._queuedMethodOperations = fields.queuedMethodOperations ?? this._queuedMethodOperations;
|
|
189
189
|
this._profile = fields.profile ?? void 0;
|
|
190
|
+
this.metadata = {
|
|
191
|
+
...this.metadata,
|
|
192
|
+
ls_integration: "langchain_init_chat_model"
|
|
193
|
+
};
|
|
190
194
|
}
|
|
191
195
|
async _getModelInstance(config) {
|
|
192
196
|
const cacheKey = this._getCacheKey(config);
|