@langchain/core 1.0.4 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +14 -0
  2. package/dist/agents.d.ts.map +1 -1
  3. package/dist/caches/base.d.ts.map +1 -1
  4. package/dist/callbacks/base.d.ts.map +1 -1
  5. package/dist/language_models/base.cjs +8 -0
  6. package/dist/language_models/base.cjs.map +1 -1
  7. package/dist/language_models/base.d.cts +7 -0
  8. package/dist/language_models/base.d.cts.map +1 -1
  9. package/dist/language_models/base.d.ts +7 -0
  10. package/dist/language_models/base.d.ts.map +1 -1
  11. package/dist/language_models/base.js +8 -0
  12. package/dist/language_models/base.js.map +1 -1
  13. package/dist/language_models/profile.cjs +12 -0
  14. package/dist/language_models/profile.cjs.map +1 -0
  15. package/dist/language_models/profile.d.cts +174 -0
  16. package/dist/language_models/profile.d.cts.map +1 -0
  17. package/dist/language_models/profile.d.ts +174 -0
  18. package/dist/language_models/profile.d.ts.map +1 -0
  19. package/dist/language_models/profile.js +6 -0
  20. package/dist/language_models/profile.js.map +1 -0
  21. package/dist/load/import_map.cjs +2 -0
  22. package/dist/load/import_map.cjs.map +1 -1
  23. package/dist/load/import_map.js +2 -0
  24. package/dist/load/import_map.js.map +1 -1
  25. package/dist/runnables/config.cjs +5 -3
  26. package/dist/runnables/config.cjs.map +1 -1
  27. package/dist/runnables/config.js +5 -3
  28. package/dist/runnables/config.js.map +1 -1
  29. package/dist/utils/async_caller.d.ts.map +1 -1
  30. package/dist/utils/types/zod.cjs +21 -14
  31. package/dist/utils/types/zod.cjs.map +1 -1
  32. package/dist/utils/types/zod.js +21 -14
  33. package/dist/utils/types/zod.js.map +1 -1
  34. package/package.json +14 -2
@@ -1 +1 @@
1
- {"version":3,"file":"base.js","names":["modelName: string","modelName?: string","tool: unknown","params: BaseLangChainParams","content: MessageContent","textContent: string","input: BaseLanguageModelInput","params: Record<string, any>","_data: SerializedLLM"],"sources":["../../src/language_models/base.ts"],"sourcesContent":["import type { Tiktoken, TiktokenModel } from \"js-tiktoken/lite\";\nimport type { ZodType as ZodTypeV3 } from \"zod/v3\";\nimport type { $ZodType as ZodTypeV4 } from \"zod/v4/core\";\n\nimport { type BaseCache, InMemoryCache } from \"../caches/base.js\";\nimport {\n type BasePromptValueInterface,\n StringPromptValue,\n ChatPromptValue,\n} from \"../prompt_values.js\";\nimport {\n type BaseMessage,\n type BaseMessageLike,\n type MessageContent,\n} from \"../messages/base.js\";\nimport { coerceMessageLikeToMessage } from \"../messages/utils.js\";\nimport { type LLMResult } from \"../outputs.js\";\nimport { CallbackManager, Callbacks } from \"../callbacks/manager.js\";\nimport { AsyncCaller, AsyncCallerParams } from \"../utils/async_caller.js\";\nimport { encodingForModel } from \"../utils/tiktoken.js\";\nimport { Runnable, type RunnableInterface } from \"../runnables/base.js\";\nimport { RunnableConfig } from \"../runnables/config.js\";\nimport { JSONSchema } from \"../utils/json_schema.js\";\nimport {\n InferInteropZodOutput,\n InteropZodObject,\n InteropZodType,\n} from \"../utils/types/zod.js\";\n\n// https://www.npmjs.com/package/js-tiktoken\n\nexport const getModelNameForTiktoken = (modelName: string): TiktokenModel => {\n if (modelName.startsWith(\"gpt-5\")) {\n return \"gpt-5\" as TiktokenModel;\n }\n\n if (modelName.startsWith(\"gpt-3.5-turbo-16k\")) {\n return \"gpt-3.5-turbo-16k\";\n }\n\n if (modelName.startsWith(\"gpt-3.5-turbo-\")) {\n return \"gpt-3.5-turbo\";\n }\n\n if (modelName.startsWith(\"gpt-4-32k\")) {\n return \"gpt-4-32k\";\n }\n\n if (modelName.startsWith(\"gpt-4-\")) {\n return \"gpt-4\";\n }\n\n if (modelName.startsWith(\"gpt-4o\")) {\n return \"gpt-4o\";\n }\n\n return modelName as TiktokenModel;\n};\n\nexport const getEmbeddingContextSize = (modelName?: string): number => {\n switch (modelName) {\n case \"text-embedding-ada-002\":\n return 8191;\n default:\n return 2046;\n }\n};\n\n/**\n * Get the context window size (max input tokens) for a given model.\n *\n * Context window sizes are sourced from official model documentation:\n * - OpenAI: https://platform.openai.com/docs/models\n * - Anthropic: https://docs.anthropic.com/claude/docs/models-overview\n * - Google: https://ai.google.dev/gemini/docs/models/gemini\n *\n * @param modelName - The name of the model\n * @returns The context window size in tokens\n */\nexport const getModelContextSize = (modelName: string): number => {\n const normalizedName = getModelNameForTiktoken(modelName) as string;\n\n switch (normalizedName) {\n // GPT-5 series\n case \"gpt-5\":\n case \"gpt-5-turbo\":\n case \"gpt-5-turbo-preview\":\n return 400000;\n\n // GPT-4o series\n case \"gpt-4o\":\n case \"gpt-4o-mini\":\n case \"gpt-4o-2024-05-13\":\n case \"gpt-4o-2024-08-06\":\n return 128000;\n\n // GPT-4 Turbo series\n case \"gpt-4-turbo\":\n case \"gpt-4-turbo-preview\":\n case \"gpt-4-turbo-2024-04-09\":\n case \"gpt-4-0125-preview\":\n case \"gpt-4-1106-preview\":\n return 128000;\n\n // GPT-4 series\n case \"gpt-4-32k\":\n case \"gpt-4-32k-0314\":\n case \"gpt-4-32k-0613\":\n return 32768;\n case \"gpt-4\":\n case \"gpt-4-0314\":\n case \"gpt-4-0613\":\n return 8192;\n\n // GPT-3.5 Turbo series\n case \"gpt-3.5-turbo-16k\":\n case \"gpt-3.5-turbo-16k-0613\":\n return 16384;\n case \"gpt-3.5-turbo\":\n case \"gpt-3.5-turbo-0301\":\n case \"gpt-3.5-turbo-0613\":\n case \"gpt-3.5-turbo-1106\":\n case \"gpt-3.5-turbo-0125\":\n return 4096;\n\n // Legacy GPT-3 models\n case \"text-davinci-003\":\n case \"text-davinci-002\":\n return 4097;\n case \"text-davinci-001\":\n return 2049;\n case \"text-curie-001\":\n case \"text-babbage-001\":\n case \"text-ada-001\":\n return 2048;\n\n // Code models\n case \"code-davinci-002\":\n case \"code-davinci-001\":\n return 8000;\n case \"code-cushman-001\":\n return 2048;\n\n // Claude models (Anthropic)\n case \"claude-3-5-sonnet-20241022\":\n case \"claude-3-5-sonnet-20240620\":\n case \"claude-3-opus-20240229\":\n case \"claude-3-sonnet-20240229\":\n case \"claude-3-haiku-20240307\":\n case \"claude-2.1\":\n return 200000;\n case \"claude-2.0\":\n case \"claude-instant-1.2\":\n return 100000;\n\n // Gemini models (Google)\n case \"gemini-1.5-pro\":\n case \"gemini-1.5-pro-latest\":\n case \"gemini-1.5-flash\":\n case \"gemini-1.5-flash-latest\":\n return 1000000; // 1M tokens\n case \"gemini-pro\":\n case \"gemini-pro-vision\":\n return 32768;\n\n default:\n return 4097;\n }\n};\n\n/**\n * Whether or not the input matches the OpenAI tool definition.\n * @param {unknown} tool The input to check.\n * @returns {boolean} Whether the input is an OpenAI tool definition.\n */\nexport function isOpenAITool(tool: unknown): tool is ToolDefinition {\n if (typeof tool !== \"object\" || !tool) return false;\n if (\n \"type\" in tool &&\n tool.type === \"function\" &&\n \"function\" in tool &&\n typeof tool.function === \"object\" &&\n tool.function &&\n \"name\" in tool.function &&\n \"parameters\" in tool.function\n ) {\n return true;\n }\n return false;\n}\n\ninterface CalculateMaxTokenProps {\n prompt: string;\n modelName: TiktokenModel;\n}\n\nexport const calculateMaxTokens = async ({\n prompt,\n modelName,\n}: CalculateMaxTokenProps) => {\n let numTokens;\n\n try {\n numTokens = (\n await encodingForModel(getModelNameForTiktoken(modelName))\n ).encode(prompt).length;\n } catch {\n console.warn(\n \"Failed to calculate number of tokens, falling back to approximate count\"\n );\n\n // fallback to approximate calculation if tiktoken is not available\n // each token is ~4 characters: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them#\n numTokens = Math.ceil(prompt.length / 4);\n }\n\n const maxTokens = getModelContextSize(modelName);\n return maxTokens - numTokens;\n};\n\nconst getVerbosity = () => false;\n\nexport type SerializedLLM = {\n _model: string;\n _type: string;\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n} & Record<string, any>;\n\nexport interface BaseLangChainParams {\n verbose?: boolean;\n callbacks?: Callbacks;\n tags?: string[];\n metadata?: Record<string, unknown>;\n}\n\n/**\n * Base class for language models, chains, tools.\n */\nexport abstract class BaseLangChain<\n RunInput,\n RunOutput,\n CallOptions extends RunnableConfig = RunnableConfig\n >\n extends Runnable<RunInput, RunOutput, CallOptions>\n implements BaseLangChainParams\n{\n /**\n * Whether to print out response text.\n */\n verbose: boolean;\n\n callbacks?: Callbacks;\n\n tags?: string[];\n\n metadata?: Record<string, unknown>;\n\n get lc_attributes(): { [key: string]: undefined } | undefined {\n return {\n callbacks: undefined,\n verbose: undefined,\n };\n }\n\n constructor(params: BaseLangChainParams) {\n super(params);\n this.verbose = params.verbose ?? getVerbosity();\n this.callbacks = params.callbacks;\n this.tags = params.tags ?? [];\n this.metadata = params.metadata ?? {};\n }\n}\n\n/**\n * Base interface for language model parameters.\n * A subclass of {@link BaseLanguageModel} should have a constructor that\n * takes in a parameter that extends this interface.\n */\nexport interface BaseLanguageModelParams\n extends AsyncCallerParams,\n BaseLangChainParams {\n /**\n * @deprecated Use `callbacks` instead\n */\n callbackManager?: CallbackManager;\n\n cache?: BaseCache | boolean;\n}\n\nexport interface BaseLanguageModelTracingCallOptions {\n /**\n * Describes the format of structured outputs.\n * This should be provided if an output is considered to be structured\n */\n ls_structured_output_format?: {\n /**\n * An object containing the method used for structured output (e.g., \"jsonMode\").\n */\n kwargs: { method: string };\n /**\n * The JSON schema describing the expected output structure.\n */\n schema?: JSONSchema;\n };\n}\n\nexport interface BaseLanguageModelCallOptions\n extends RunnableConfig,\n BaseLanguageModelTracingCallOptions {\n /**\n * Stop tokens to use for this call.\n * If not provided, the default stop tokens for the model will be used.\n */\n stop?: string[];\n}\n\nexport interface FunctionDefinition {\n /**\n * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain\n * underscores and dashes, with a maximum length of 64.\n */\n name: string;\n\n /**\n * The parameters the functions accepts, described as a JSON Schema object. See the\n * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for\n * examples, and the\n * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for\n * documentation about the format.\n *\n * To describe a function that accepts no parameters, provide the value\n * `{\"type\": \"object\", \"properties\": {}}`.\n */\n parameters: Record<string, unknown> | JSONSchema;\n\n /**\n * A description of what the function does, used by the model to choose when and\n * how to call the function.\n */\n description?: string;\n}\n\nexport interface ToolDefinition {\n type: \"function\";\n function: FunctionDefinition;\n}\n\nexport type FunctionCallOption = {\n name: string;\n};\n\nexport interface BaseFunctionCallOptions extends BaseLanguageModelCallOptions {\n function_call?: FunctionCallOption;\n functions?: FunctionDefinition[];\n}\n\nexport type BaseLanguageModelInput =\n | BasePromptValueInterface\n | string\n | BaseMessageLike[];\n\nexport type StructuredOutputType = InferInteropZodOutput<InteropZodObject>;\n\nexport type StructuredOutputMethodOptions<IncludeRaw extends boolean = false> =\n {\n name?: string;\n method?: \"functionCalling\" | \"jsonMode\" | \"jsonSchema\" | string;\n includeRaw?: IncludeRaw;\n /** Whether to use strict mode. Currently only supported by OpenAI models. */\n strict?: boolean;\n };\n\n/** @deprecated Use StructuredOutputMethodOptions instead */\nexport type StructuredOutputMethodParams<\n RunOutput,\n IncludeRaw extends boolean = false\n> = {\n /** @deprecated Pass schema in as the first argument */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n schema: InteropZodType<RunOutput> | Record<string, any>;\n name?: string;\n method?: \"functionCalling\" | \"jsonMode\";\n includeRaw?: IncludeRaw;\n};\n\nexport interface BaseLanguageModelInterface<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any,\n CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions\n> extends RunnableInterface<BaseLanguageModelInput, RunOutput, CallOptions> {\n get callKeys(): string[];\n\n generatePrompt(\n promptValues: BasePromptValueInterface[],\n options?: string[] | CallOptions,\n callbacks?: Callbacks\n ): Promise<LLMResult>;\n\n _modelType(): string;\n\n _llmType(): string;\n\n getNumTokens(content: MessageContent): Promise<number>;\n\n /**\n * Get the identifying parameters of the LLM.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _identifyingParams(): Record<string, any>;\n\n serialize(): SerializedLLM;\n}\n\nexport type LanguageModelOutput = BaseMessage | string;\n\nexport type LanguageModelLike = Runnable<\n BaseLanguageModelInput,\n LanguageModelOutput\n>;\n\n/**\n * Base class for language models.\n */\nexport abstract class BaseLanguageModel<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any,\n CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions\n >\n extends BaseLangChain<BaseLanguageModelInput, RunOutput, CallOptions>\n implements\n BaseLanguageModelParams,\n BaseLanguageModelInterface<RunOutput, CallOptions>\n{\n /**\n * Keys that the language model accepts as call options.\n */\n get callKeys(): string[] {\n return [\"stop\", \"timeout\", \"signal\", \"tags\", \"metadata\", \"callbacks\"];\n }\n\n /**\n * The async caller should be used by subclasses to make any async calls,\n * which will thus benefit from the concurrency and retry logic.\n */\n caller: AsyncCaller;\n\n cache?: BaseCache;\n\n constructor({\n callbacks,\n callbackManager,\n ...params\n }: BaseLanguageModelParams) {\n const { cache, ...rest } = params;\n super({\n callbacks: callbacks ?? callbackManager,\n ...rest,\n });\n if (typeof cache === \"object\") {\n this.cache = cache;\n } else if (cache) {\n this.cache = InMemoryCache.global();\n } else {\n this.cache = undefined;\n }\n this.caller = new AsyncCaller(params ?? {});\n }\n\n abstract generatePrompt(\n promptValues: BasePromptValueInterface[],\n options?: string[] | CallOptions,\n callbacks?: Callbacks\n ): Promise<LLMResult>;\n\n abstract _modelType(): string;\n\n abstract _llmType(): string;\n\n private _encoding?: Tiktoken;\n\n /**\n * Get the number of tokens in the content.\n * @param content The content to get the number of tokens for.\n * @returns The number of tokens in the content.\n */\n async getNumTokens(content: MessageContent) {\n // Extract text content from MessageContent\n let textContent: string;\n if (typeof content === \"string\") {\n textContent = content;\n } else {\n /**\n * Content is an array of ContentBlock\n *\n * ToDo(@christian-bromann): This is a temporary fix to get the number of tokens for the content.\n * We need to find a better way to do this.\n * @see https://github.com/langchain-ai/langchainjs/pull/8341#pullrequestreview-2933713116\n */\n textContent = content\n .map((item) => {\n if (typeof item === \"string\") return item;\n if (item.type === \"text\" && \"text\" in item) return item.text;\n return \"\";\n })\n .join(\"\");\n }\n\n // fallback to approximate calculation if tiktoken is not available\n let numTokens = Math.ceil(textContent.length / 4);\n\n if (!this._encoding) {\n try {\n this._encoding = await encodingForModel(\n \"modelName\" in this\n ? getModelNameForTiktoken(this.modelName as string)\n : \"gpt2\"\n );\n } catch (error) {\n console.warn(\n \"Failed to calculate number of tokens, falling back to approximate count\",\n error\n );\n }\n }\n\n if (this._encoding) {\n try {\n numTokens = this._encoding.encode(textContent).length;\n } catch (error) {\n console.warn(\n \"Failed to calculate number of tokens, falling back to approximate count\",\n error\n );\n }\n }\n\n return numTokens;\n }\n\n protected static _convertInputToPromptValue(\n input: BaseLanguageModelInput\n ): BasePromptValueInterface {\n if (typeof input === \"string\") {\n return new StringPromptValue(input);\n } else if (Array.isArray(input)) {\n return new ChatPromptValue(input.map(coerceMessageLikeToMessage));\n } else {\n return input;\n }\n }\n\n /**\n * Get the identifying parameters of the LLM.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _identifyingParams(): Record<string, any> {\n return {};\n }\n\n /**\n * Create a unique cache key for a specific call to a specific language model.\n * @param callOptions Call options for the model\n * @returns A unique cache key.\n */\n _getSerializedCacheKeyParametersForCall(\n // TODO: Fix when we remove the RunnableLambda backwards compatibility shim.\n { config, ...callOptions }: CallOptions & { config?: RunnableConfig }\n ): string {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const params: Record<string, any> = {\n ...this._identifyingParams(),\n ...callOptions,\n _type: this._llmType(),\n _model: this._modelType(),\n };\n const filteredEntries = Object.entries(params).filter(\n ([_, value]) => value !== undefined\n );\n const serializedEntries = filteredEntries\n .map(([key, value]) => `${key}:${JSON.stringify(value)}`)\n .sort()\n .join(\",\");\n return serializedEntries;\n }\n\n /**\n * @deprecated\n * Return a json-like object representing this LLM.\n */\n serialize(): SerializedLLM {\n return {\n ...this._identifyingParams(),\n _type: this._llmType(),\n _model: this._modelType(),\n };\n }\n\n /**\n * @deprecated\n * Load an LLM from a json-like object describing it.\n */\n static async deserialize(_data: SerializedLLM): Promise<BaseLanguageModel> {\n throw new Error(\"Use .toJSON() instead\");\n }\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV3<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV3<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV4<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV4<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n /**\n * Model wrapper that returns outputs formatted to match the given schema.\n *\n * @template {BaseLanguageModelInput} RunInput The input type for the Runnable, expected to be the same input for the LLM.\n * @template {Record<string, any>} RunOutput The output type for the Runnable, expected to be a Zod schema object for structured output validation.\n *\n * @param {InteropZodType<RunOutput>} schema The schema for the structured output. Either as a Zod schema or a valid JSON schema object.\n * If a Zod schema is passed, the returned attributes will be validated, whereas with JSON schema they will not be.\n * @param {string} name The name of the function to call.\n * @param {\"functionCalling\" | \"jsonMode\"} [method=functionCalling] The method to use for getting the structured output. Defaults to \"functionCalling\".\n * @param {boolean | undefined} [includeRaw=false] Whether to include the raw output in the result. Defaults to false.\n * @returns {Runnable<RunInput, RunOutput> | Runnable<RunInput, { raw: BaseMessage; parsed: RunOutput }>} A new runnable that calls the LLM with structured output.\n */\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n {\n raw: BaseMessage;\n parsed: RunOutput;\n }\n >;\n}\n\n/**\n * Shared interface for token usage\n * return type from LLM calls.\n */\nexport interface TokenUsage {\n completionTokens?: number;\n promptTokens?: number;\n totalTokens?: number;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;AA+BA,MAAa,0BAA0B,CAACA,cAAqC;AAC3E,KAAI,UAAU,WAAW,QAAQ,CAC/B,QAAO;AAGT,KAAI,UAAU,WAAW,oBAAoB,CAC3C,QAAO;AAGT,KAAI,UAAU,WAAW,iBAAiB,CACxC,QAAO;AAGT,KAAI,UAAU,WAAW,YAAY,CACnC,QAAO;AAGT,KAAI,UAAU,WAAW,SAAS,CAChC,QAAO;AAGT,KAAI,UAAU,WAAW,SAAS,CAChC,QAAO;AAGT,QAAO;AACR;AAED,MAAa,0BAA0B,CAACC,cAA+B;AACrE,SAAQ,WAAR;EACE,KAAK,yBACH,QAAO;EACT,QACE,QAAO;CACV;AACF;;;;;;;;;;;;AAaD,MAAa,sBAAsB,CAACD,cAA8B;CAChE,MAAM,iBAAiB,wBAAwB,UAAU;AAEzD,SAAQ,gBAAR;EAEE,KAAK;EACL,KAAK;EACL,KAAK,sBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,oBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,qBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK,iBACH,QAAO;EACT,KAAK;EACL,KAAK;EACL,KAAK,aACH,QAAO;EAGT,KAAK;EACL,KAAK,yBACH,QAAO;EACT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,qBACH,QAAO;EAGT,KAAK;EACL,KAAK,mBACH,QAAO;EACT,KAAK,mBACH,QAAO;EACT,KAAK;EACL,KAAK;EACL,KAAK,eACH,QAAO;EAGT,KAAK;EACL,KAAK,mBACH,QAAO;EACT,KAAK,mBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,aACH,QAAO;EACT,KAAK;EACL,KAAK,qBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,0BACH,QAAO;EACT,KAAK;EACL,KAAK,oBACH,QAAO;EAET,QACE,QAAO;CACV;AACF;;;;;;AAOD,SAAgB,aAAaE,MAAuC;AAClE,KAAI,OAAO,SAAS,YAAY,CAAC,KAAM,QAAO;AAC9C,KACE,UAAU,QACV,KAAK,SAAS,cACd,cAAc,QACd,OAAO,KAAK,aAAa,YACzB,KAAK,YACL,UAAU,KAAK,YACf,gBAAgB,KAAK,SAErB,QAAO;AAET,QAAO;AACR;AAOD,MAAa,qBAAqB,OAAO,EACvC,QACA,WACuB,KAAK;CAC5B,IAAI;AAEJ,KAAI;EACF,aACE,MAAM,iBAAiB,wBAAwB,UAAU,CAAC,EAC1D,OAAO,OAAO,CAAC;CAClB,QAAO;EACN,QAAQ,KACN,0EACD;EAID,YAAY,KAAK,KAAK,OAAO,SAAS,EAAE;CACzC;CAED,MAAM,YAAY,oBAAoB,UAAU;AAChD,QAAO,YAAY;AACpB;AAED,MAAM,eAAe,MAAM;;;;AAkB3B,IAAsB,gBAAtB,cAKU,SAEV;;;;CAIE;CAEA;CAEA;CAEA;CAEA,IAAI,gBAA0D;AAC5D,SAAO;GACL,WAAW;GACX,SAAS;EACV;CACF;CAED,YAAYC,QAA6B;EACvC,MAAM,OAAO;EACb,KAAK,UAAU,OAAO,WAAW,cAAc;EAC/C,KAAK,YAAY,OAAO;EACxB,KAAK,OAAO,OAAO,QAAQ,CAAE;EAC7B,KAAK,WAAW,OAAO,YAAY,CAAE;CACtC;AACF;;;;AAwJD,IAAsB,oBAAtB,cAKU,cAIV;;;;CAIE,IAAI,WAAqB;AACvB,SAAO;GAAC;GAAQ;GAAW;GAAU;GAAQ;GAAY;EAAY;CACtE;;;;;CAMD;CAEA;CAEA,YAAY,EACV,WACA,gBACA,GAAG,QACqB,EAAE;EAC1B,MAAM,EAAE,MAAO,GAAG,MAAM,GAAG;EAC3B,MAAM;GACJ,WAAW,aAAa;GACxB,GAAG;EACJ,EAAC;AACF,MAAI,OAAO,UAAU,UACnB,KAAK,QAAQ;WACJ,OACT,KAAK,QAAQ,cAAc,QAAQ;OAEnC,KAAK,QAAQ;EAEf,KAAK,SAAS,IAAI,YAAY,UAAU,CAAE;CAC3C;CAYD,AAAQ;;;;;;CAOR,MAAM,aAAaC,SAAyB;EAE1C,IAAIC;AACJ,MAAI,OAAO,YAAY,UACrB,cAAc;;;;;;;;;EASd,cAAc,QACX,IAAI,CAAC,SAAS;AACb,OAAI,OAAO,SAAS,SAAU,QAAO;AACrC,OAAI,KAAK,SAAS,UAAU,UAAU,KAAM,QAAO,KAAK;AACxD,UAAO;EACR,EAAC,CACD,KAAK,GAAG;EAIb,IAAI,YAAY,KAAK,KAAK,YAAY,SAAS,EAAE;AAEjD,MAAI,CAAC,KAAK,UACR,KAAI;GACF,KAAK,YAAY,MAAM,iBACrB,eAAe,OACX,wBAAwB,KAAK,UAAoB,GACjD,OACL;EACF,SAAQ,OAAO;GACd,QAAQ,KACN,2EACA,MACD;EACF;AAGH,MAAI,KAAK,UACP,KAAI;GACF,YAAY,KAAK,UAAU,OAAO,YAAY,CAAC;EAChD,SAAQ,OAAO;GACd,QAAQ,KACN,2EACA,MACD;EACF;AAGH,SAAO;CACR;CAED,OAAiB,2BACfC,OAC0B;AAC1B,MAAI,OAAO,UAAU,SACnB,QAAO,IAAI,kBAAkB;WACpB,MAAM,QAAQ,MAAM,CAC7B,QAAO,IAAI,gBAAgB,MAAM,IAAI,2BAA2B;MAEhE,QAAO;CAEV;;;;CAMD,qBAA0C;AACxC,SAAO,CAAE;CACV;;;;;;CAOD,wCAEE,EAAE,OAAQ,GAAG,aAAwD,EAC7D;EAER,MAAMC,SAA8B;GAClC,GAAG,KAAK,oBAAoB;GAC5B,GAAG;GACH,OAAO,KAAK,UAAU;GACtB,QAAQ,KAAK,YAAY;EAC1B;EACD,MAAM,kBAAkB,OAAO,QAAQ,OAAO,CAAC,OAC7C,CAAC,CAAC,GAAG,MAAM,KAAK,UAAU,OAC3B;EACD,MAAM,oBAAoB,gBACvB,IAAI,CAAC,CAAC,KAAK,MAAM,KAAK,GAAG,IAAI,CAAC,EAAE,KAAK,UAAU,MAAM,EAAE,CAAC,CACxD,MAAM,CACN,KAAK,IAAI;AACZ,SAAO;CACR;;;;;CAMD,YAA2B;AACzB,SAAO;GACL,GAAG,KAAK,oBAAoB;GAC5B,OAAO,KAAK,UAAU;GACtB,QAAQ,KAAK,YAAY;EAC1B;CACF;;;;;CAMD,aAAa,YAAYC,OAAkD;AACzE,QAAM,IAAI,MAAM;CACjB;AA6EF"}
1
+ {"version":3,"file":"base.js","names":["modelName: string","modelName?: string","tool: unknown","params: BaseLangChainParams","content: MessageContent","textContent: string","input: BaseLanguageModelInput","params: Record<string, any>","_data: SerializedLLM"],"sources":["../../src/language_models/base.ts"],"sourcesContent":["import type { Tiktoken, TiktokenModel } from \"js-tiktoken/lite\";\nimport type { ZodType as ZodTypeV3 } from \"zod/v3\";\nimport type { $ZodType as ZodTypeV4 } from \"zod/v4/core\";\n\nimport { type BaseCache, InMemoryCache } from \"../caches/base.js\";\nimport {\n type BasePromptValueInterface,\n StringPromptValue,\n ChatPromptValue,\n} from \"../prompt_values.js\";\nimport {\n type BaseMessage,\n type BaseMessageLike,\n type MessageContent,\n} from \"../messages/base.js\";\nimport { coerceMessageLikeToMessage } from \"../messages/utils.js\";\nimport { type LLMResult } from \"../outputs.js\";\nimport { CallbackManager, Callbacks } from \"../callbacks/manager.js\";\nimport { AsyncCaller, AsyncCallerParams } from \"../utils/async_caller.js\";\nimport { encodingForModel } from \"../utils/tiktoken.js\";\nimport { Runnable, type RunnableInterface } from \"../runnables/base.js\";\nimport { RunnableConfig } from \"../runnables/config.js\";\nimport { JSONSchema } from \"../utils/json_schema.js\";\nimport {\n InferInteropZodOutput,\n InteropZodObject,\n InteropZodType,\n} from \"../utils/types/zod.js\";\nimport { ModelProfile } from \"./profile.js\";\n\n// https://www.npmjs.com/package/js-tiktoken\n\nexport const getModelNameForTiktoken = (modelName: string): TiktokenModel => {\n if (modelName.startsWith(\"gpt-5\")) {\n return \"gpt-5\" as TiktokenModel;\n }\n\n if (modelName.startsWith(\"gpt-3.5-turbo-16k\")) {\n return \"gpt-3.5-turbo-16k\";\n }\n\n if (modelName.startsWith(\"gpt-3.5-turbo-\")) {\n return \"gpt-3.5-turbo\";\n }\n\n if (modelName.startsWith(\"gpt-4-32k\")) {\n return \"gpt-4-32k\";\n }\n\n if (modelName.startsWith(\"gpt-4-\")) {\n return \"gpt-4\";\n }\n\n if (modelName.startsWith(\"gpt-4o\")) {\n return \"gpt-4o\";\n }\n\n return modelName as TiktokenModel;\n};\n\nexport const getEmbeddingContextSize = (modelName?: string): number => {\n switch (modelName) {\n case \"text-embedding-ada-002\":\n return 8191;\n default:\n return 2046;\n }\n};\n\n/**\n * Get the context window size (max input tokens) for a given model.\n *\n * Context window sizes are sourced from official model documentation:\n * - OpenAI: https://platform.openai.com/docs/models\n * - Anthropic: https://docs.anthropic.com/claude/docs/models-overview\n * - Google: https://ai.google.dev/gemini/docs/models/gemini\n *\n * @param modelName - The name of the model\n * @returns The context window size in tokens\n */\nexport const getModelContextSize = (modelName: string): number => {\n const normalizedName = getModelNameForTiktoken(modelName) as string;\n\n switch (normalizedName) {\n // GPT-5 series\n case \"gpt-5\":\n case \"gpt-5-turbo\":\n case \"gpt-5-turbo-preview\":\n return 400000;\n\n // GPT-4o series\n case \"gpt-4o\":\n case \"gpt-4o-mini\":\n case \"gpt-4o-2024-05-13\":\n case \"gpt-4o-2024-08-06\":\n return 128000;\n\n // GPT-4 Turbo series\n case \"gpt-4-turbo\":\n case \"gpt-4-turbo-preview\":\n case \"gpt-4-turbo-2024-04-09\":\n case \"gpt-4-0125-preview\":\n case \"gpt-4-1106-preview\":\n return 128000;\n\n // GPT-4 series\n case \"gpt-4-32k\":\n case \"gpt-4-32k-0314\":\n case \"gpt-4-32k-0613\":\n return 32768;\n case \"gpt-4\":\n case \"gpt-4-0314\":\n case \"gpt-4-0613\":\n return 8192;\n\n // GPT-3.5 Turbo series\n case \"gpt-3.5-turbo-16k\":\n case \"gpt-3.5-turbo-16k-0613\":\n return 16384;\n case \"gpt-3.5-turbo\":\n case \"gpt-3.5-turbo-0301\":\n case \"gpt-3.5-turbo-0613\":\n case \"gpt-3.5-turbo-1106\":\n case \"gpt-3.5-turbo-0125\":\n return 4096;\n\n // Legacy GPT-3 models\n case \"text-davinci-003\":\n case \"text-davinci-002\":\n return 4097;\n case \"text-davinci-001\":\n return 2049;\n case \"text-curie-001\":\n case \"text-babbage-001\":\n case \"text-ada-001\":\n return 2048;\n\n // Code models\n case \"code-davinci-002\":\n case \"code-davinci-001\":\n return 8000;\n case \"code-cushman-001\":\n return 2048;\n\n // Claude models (Anthropic)\n case \"claude-3-5-sonnet-20241022\":\n case \"claude-3-5-sonnet-20240620\":\n case \"claude-3-opus-20240229\":\n case \"claude-3-sonnet-20240229\":\n case \"claude-3-haiku-20240307\":\n case \"claude-2.1\":\n return 200000;\n case \"claude-2.0\":\n case \"claude-instant-1.2\":\n return 100000;\n\n // Gemini models (Google)\n case \"gemini-1.5-pro\":\n case \"gemini-1.5-pro-latest\":\n case \"gemini-1.5-flash\":\n case \"gemini-1.5-flash-latest\":\n return 1000000; // 1M tokens\n case \"gemini-pro\":\n case \"gemini-pro-vision\":\n return 32768;\n\n default:\n return 4097;\n }\n};\n\n/**\n * Whether or not the input matches the OpenAI tool definition.\n * @param {unknown} tool The input to check.\n * @returns {boolean} Whether the input is an OpenAI tool definition.\n */\nexport function isOpenAITool(tool: unknown): tool is ToolDefinition {\n if (typeof tool !== \"object\" || !tool) return false;\n if (\n \"type\" in tool &&\n tool.type === \"function\" &&\n \"function\" in tool &&\n typeof tool.function === \"object\" &&\n tool.function &&\n \"name\" in tool.function &&\n \"parameters\" in tool.function\n ) {\n return true;\n }\n return false;\n}\n\ninterface CalculateMaxTokenProps {\n prompt: string;\n modelName: TiktokenModel;\n}\n\nexport const calculateMaxTokens = async ({\n prompt,\n modelName,\n}: CalculateMaxTokenProps) => {\n let numTokens;\n\n try {\n numTokens = (\n await encodingForModel(getModelNameForTiktoken(modelName))\n ).encode(prompt).length;\n } catch {\n console.warn(\n \"Failed to calculate number of tokens, falling back to approximate count\"\n );\n\n // fallback to approximate calculation if tiktoken is not available\n // each token is ~4 characters: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them#\n numTokens = Math.ceil(prompt.length / 4);\n }\n\n const maxTokens = getModelContextSize(modelName);\n return maxTokens - numTokens;\n};\n\nconst getVerbosity = () => false;\n\nexport type SerializedLLM = {\n _model: string;\n _type: string;\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n} & Record<string, any>;\n\nexport interface BaseLangChainParams {\n verbose?: boolean;\n callbacks?: Callbacks;\n tags?: string[];\n metadata?: Record<string, unknown>;\n}\n\n/**\n * Base class for language models, chains, tools.\n */\nexport abstract class BaseLangChain<\n RunInput,\n RunOutput,\n CallOptions extends RunnableConfig = RunnableConfig\n >\n extends Runnable<RunInput, RunOutput, CallOptions>\n implements BaseLangChainParams\n{\n /**\n * Whether to print out response text.\n */\n verbose: boolean;\n\n callbacks?: Callbacks;\n\n tags?: string[];\n\n metadata?: Record<string, unknown>;\n\n get lc_attributes(): { [key: string]: undefined } | undefined {\n return {\n callbacks: undefined,\n verbose: undefined,\n };\n }\n\n constructor(params: BaseLangChainParams) {\n super(params);\n this.verbose = params.verbose ?? getVerbosity();\n this.callbacks = params.callbacks;\n this.tags = params.tags ?? [];\n this.metadata = params.metadata ?? {};\n }\n}\n\n/**\n * Base interface for language model parameters.\n * A subclass of {@link BaseLanguageModel} should have a constructor that\n * takes in a parameter that extends this interface.\n */\nexport interface BaseLanguageModelParams\n extends AsyncCallerParams,\n BaseLangChainParams {\n /**\n * @deprecated Use `callbacks` instead\n */\n callbackManager?: CallbackManager;\n\n cache?: BaseCache | boolean;\n}\n\nexport interface BaseLanguageModelTracingCallOptions {\n /**\n * Describes the format of structured outputs.\n * This should be provided if an output is considered to be structured\n */\n ls_structured_output_format?: {\n /**\n * An object containing the method used for structured output (e.g., \"jsonMode\").\n */\n kwargs: { method: string };\n /**\n * The JSON schema describing the expected output structure.\n */\n schema?: JSONSchema;\n };\n}\n\nexport interface BaseLanguageModelCallOptions\n extends RunnableConfig,\n BaseLanguageModelTracingCallOptions {\n /**\n * Stop tokens to use for this call.\n * If not provided, the default stop tokens for the model will be used.\n */\n stop?: string[];\n}\n\nexport interface FunctionDefinition {\n /**\n * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain\n * underscores and dashes, with a maximum length of 64.\n */\n name: string;\n\n /**\n * The parameters the functions accepts, described as a JSON Schema object. See the\n * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for\n * examples, and the\n * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for\n * documentation about the format.\n *\n * To describe a function that accepts no parameters, provide the value\n * `{\"type\": \"object\", \"properties\": {}}`.\n */\n parameters: Record<string, unknown> | JSONSchema;\n\n /**\n * A description of what the function does, used by the model to choose when and\n * how to call the function.\n */\n description?: string;\n}\n\nexport interface ToolDefinition {\n type: \"function\";\n function: FunctionDefinition;\n}\n\nexport type FunctionCallOption = {\n name: string;\n};\n\nexport interface BaseFunctionCallOptions extends BaseLanguageModelCallOptions {\n function_call?: FunctionCallOption;\n functions?: FunctionDefinition[];\n}\n\nexport type BaseLanguageModelInput =\n | BasePromptValueInterface\n | string\n | BaseMessageLike[];\n\nexport type StructuredOutputType = InferInteropZodOutput<InteropZodObject>;\n\nexport type StructuredOutputMethodOptions<IncludeRaw extends boolean = false> =\n {\n name?: string;\n method?: \"functionCalling\" | \"jsonMode\" | \"jsonSchema\" | string;\n includeRaw?: IncludeRaw;\n /** Whether to use strict mode. Currently only supported by OpenAI models. */\n strict?: boolean;\n };\n\n/** @deprecated Use StructuredOutputMethodOptions instead */\nexport type StructuredOutputMethodParams<\n RunOutput,\n IncludeRaw extends boolean = false\n> = {\n /** @deprecated Pass schema in as the first argument */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n schema: InteropZodType<RunOutput> | Record<string, any>;\n name?: string;\n method?: \"functionCalling\" | \"jsonMode\";\n includeRaw?: IncludeRaw;\n};\n\nexport interface BaseLanguageModelInterface<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any,\n CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions\n> extends RunnableInterface<BaseLanguageModelInput, RunOutput, CallOptions> {\n get callKeys(): string[];\n\n generatePrompt(\n promptValues: BasePromptValueInterface[],\n options?: string[] | CallOptions,\n callbacks?: Callbacks\n ): Promise<LLMResult>;\n\n _modelType(): string;\n\n _llmType(): string;\n\n getNumTokens(content: MessageContent): Promise<number>;\n\n /**\n * Get the identifying parameters of the LLM.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _identifyingParams(): Record<string, any>;\n\n serialize(): SerializedLLM;\n}\n\nexport type LanguageModelOutput = BaseMessage | string;\n\nexport type LanguageModelLike = Runnable<\n BaseLanguageModelInput,\n LanguageModelOutput\n>;\n\n/**\n * Base class for language models.\n */\nexport abstract class BaseLanguageModel<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any,\n CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions\n >\n extends BaseLangChain<BaseLanguageModelInput, RunOutput, CallOptions>\n implements\n BaseLanguageModelParams,\n BaseLanguageModelInterface<RunOutput, CallOptions>\n{\n /**\n * Keys that the language model accepts as call options.\n */\n get callKeys(): string[] {\n return [\"stop\", \"timeout\", \"signal\", \"tags\", \"metadata\", \"callbacks\"];\n }\n\n /**\n * The async caller should be used by subclasses to make any async calls,\n * which will thus benefit from the concurrency and retry logic.\n */\n caller: AsyncCaller;\n\n cache?: BaseCache;\n\n constructor({\n callbacks,\n callbackManager,\n ...params\n }: BaseLanguageModelParams) {\n const { cache, ...rest } = params;\n super({\n callbacks: callbacks ?? callbackManager,\n ...rest,\n });\n if (typeof cache === \"object\") {\n this.cache = cache;\n } else if (cache) {\n this.cache = InMemoryCache.global();\n } else {\n this.cache = undefined;\n }\n this.caller = new AsyncCaller(params ?? {});\n }\n\n abstract generatePrompt(\n promptValues: BasePromptValueInterface[],\n options?: string[] | CallOptions,\n callbacks?: Callbacks\n ): Promise<LLMResult>;\n\n abstract _modelType(): string;\n\n abstract _llmType(): string;\n\n private _encoding?: Tiktoken;\n\n /**\n * Get the number of tokens in the content.\n * @param content The content to get the number of tokens for.\n * @returns The number of tokens in the content.\n */\n async getNumTokens(content: MessageContent) {\n // Extract text content from MessageContent\n let textContent: string;\n if (typeof content === \"string\") {\n textContent = content;\n } else {\n /**\n * Content is an array of ContentBlock\n *\n * ToDo(@christian-bromann): This is a temporary fix to get the number of tokens for the content.\n * We need to find a better way to do this.\n * @see https://github.com/langchain-ai/langchainjs/pull/8341#pullrequestreview-2933713116\n */\n textContent = content\n .map((item) => {\n if (typeof item === \"string\") return item;\n if (item.type === \"text\" && \"text\" in item) return item.text;\n return \"\";\n })\n .join(\"\");\n }\n\n // fallback to approximate calculation if tiktoken is not available\n let numTokens = Math.ceil(textContent.length / 4);\n\n if (!this._encoding) {\n try {\n this._encoding = await encodingForModel(\n \"modelName\" in this\n ? getModelNameForTiktoken(this.modelName as string)\n : \"gpt2\"\n );\n } catch (error) {\n console.warn(\n \"Failed to calculate number of tokens, falling back to approximate count\",\n error\n );\n }\n }\n\n if (this._encoding) {\n try {\n numTokens = this._encoding.encode(textContent).length;\n } catch (error) {\n console.warn(\n \"Failed to calculate number of tokens, falling back to approximate count\",\n error\n );\n }\n }\n\n return numTokens;\n }\n\n protected static _convertInputToPromptValue(\n input: BaseLanguageModelInput\n ): BasePromptValueInterface {\n if (typeof input === \"string\") {\n return new StringPromptValue(input);\n } else if (Array.isArray(input)) {\n return new ChatPromptValue(input.map(coerceMessageLikeToMessage));\n } else {\n return input;\n }\n }\n\n /**\n * Get the identifying parameters of the LLM.\n */\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _identifyingParams(): Record<string, any> {\n return {};\n }\n\n /**\n * Create a unique cache key for a specific call to a specific language model.\n * @param callOptions Call options for the model\n * @returns A unique cache key.\n */\n _getSerializedCacheKeyParametersForCall(\n // TODO: Fix when we remove the RunnableLambda backwards compatibility shim.\n { config, ...callOptions }: CallOptions & { config?: RunnableConfig }\n ): string {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const params: Record<string, any> = {\n ...this._identifyingParams(),\n ...callOptions,\n _type: this._llmType(),\n _model: this._modelType(),\n };\n const filteredEntries = Object.entries(params).filter(\n ([_, value]) => value !== undefined\n );\n const serializedEntries = filteredEntries\n .map(([key, value]) => `${key}:${JSON.stringify(value)}`)\n .sort()\n .join(\",\");\n return serializedEntries;\n }\n\n /**\n * @deprecated\n * Return a json-like object representing this LLM.\n */\n serialize(): SerializedLLM {\n return {\n ...this._identifyingParams(),\n _type: this._llmType(),\n _model: this._modelType(),\n };\n }\n\n /**\n * @deprecated\n * Load an LLM from a json-like object describing it.\n */\n static async deserialize(_data: SerializedLLM): Promise<BaseLanguageModel> {\n throw new Error(\"Use .toJSON() instead\");\n }\n\n /**\n * Return profiling information for the model.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n */\n get profile(): ModelProfile {\n return {};\n }\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV3<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV3<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV4<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | ZodTypeV4<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n /**\n * Model wrapper that returns outputs formatted to match the given schema.\n *\n * @template {BaseLanguageModelInput} RunInput The input type for the Runnable, expected to be the same input for the LLM.\n * @template {Record<string, any>} RunOutput The output type for the Runnable, expected to be a Zod schema object for structured output validation.\n *\n * @param {InteropZodType<RunOutput>} schema The schema for the structured output. Either as a Zod schema or a valid JSON schema object.\n * If a Zod schema is passed, the returned attributes will be validated, whereas with JSON schema they will not be.\n * @param {string} name The name of the function to call.\n * @param {\"functionCalling\" | \"jsonMode\"} [method=functionCalling] The method to use for getting the structured output. Defaults to \"functionCalling\".\n * @param {boolean | undefined} [includeRaw=false] Whether to include the raw output in the result. Defaults to false.\n * @returns {Runnable<RunInput, RunOutput> | Runnable<RunInput, { raw: BaseMessage; parsed: RunOutput }>} A new runnable that calls the LLM with structured output.\n */\n withStructuredOutput?<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n schema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n {\n raw: BaseMessage;\n parsed: RunOutput;\n }\n >;\n}\n\n/**\n * Shared interface for token usage\n * return type from LLM calls.\n */\nexport interface TokenUsage {\n completionTokens?: number;\n promptTokens?: number;\n totalTokens?: number;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAgCA,MAAa,0BAA0B,CAACA,cAAqC;AAC3E,KAAI,UAAU,WAAW,QAAQ,CAC/B,QAAO;AAGT,KAAI,UAAU,WAAW,oBAAoB,CAC3C,QAAO;AAGT,KAAI,UAAU,WAAW,iBAAiB,CACxC,QAAO;AAGT,KAAI,UAAU,WAAW,YAAY,CACnC,QAAO;AAGT,KAAI,UAAU,WAAW,SAAS,CAChC,QAAO;AAGT,KAAI,UAAU,WAAW,SAAS,CAChC,QAAO;AAGT,QAAO;AACR;AAED,MAAa,0BAA0B,CAACC,cAA+B;AACrE,SAAQ,WAAR;EACE,KAAK,yBACH,QAAO;EACT,QACE,QAAO;CACV;AACF;;;;;;;;;;;;AAaD,MAAa,sBAAsB,CAACD,cAA8B;CAChE,MAAM,iBAAiB,wBAAwB,UAAU;AAEzD,SAAQ,gBAAR;EAEE,KAAK;EACL,KAAK;EACL,KAAK,sBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,oBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,qBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK,iBACH,QAAO;EACT,KAAK;EACL,KAAK;EACL,KAAK,aACH,QAAO;EAGT,KAAK;EACL,KAAK,yBACH,QAAO;EACT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,qBACH,QAAO;EAGT,KAAK;EACL,KAAK,mBACH,QAAO;EACT,KAAK,mBACH,QAAO;EACT,KAAK;EACL,KAAK;EACL,KAAK,eACH,QAAO;EAGT,KAAK;EACL,KAAK,mBACH,QAAO;EACT,KAAK,mBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,aACH,QAAO;EACT,KAAK;EACL,KAAK,qBACH,QAAO;EAGT,KAAK;EACL,KAAK;EACL,KAAK;EACL,KAAK,0BACH,QAAO;EACT,KAAK;EACL,KAAK,oBACH,QAAO;EAET,QACE,QAAO;CACV;AACF;;;;;;AAOD,SAAgB,aAAaE,MAAuC;AAClE,KAAI,OAAO,SAAS,YAAY,CAAC,KAAM,QAAO;AAC9C,KACE,UAAU,QACV,KAAK,SAAS,cACd,cAAc,QACd,OAAO,KAAK,aAAa,YACzB,KAAK,YACL,UAAU,KAAK,YACf,gBAAgB,KAAK,SAErB,QAAO;AAET,QAAO;AACR;AAOD,MAAa,qBAAqB,OAAO,EACvC,QACA,WACuB,KAAK;CAC5B,IAAI;AAEJ,KAAI;EACF,aACE,MAAM,iBAAiB,wBAAwB,UAAU,CAAC,EAC1D,OAAO,OAAO,CAAC;CAClB,QAAO;EACN,QAAQ,KACN,0EACD;EAID,YAAY,KAAK,KAAK,OAAO,SAAS,EAAE;CACzC;CAED,MAAM,YAAY,oBAAoB,UAAU;AAChD,QAAO,YAAY;AACpB;AAED,MAAM,eAAe,MAAM;;;;AAkB3B,IAAsB,gBAAtB,cAKU,SAEV;;;;CAIE;CAEA;CAEA;CAEA;CAEA,IAAI,gBAA0D;AAC5D,SAAO;GACL,WAAW;GACX,SAAS;EACV;CACF;CAED,YAAYC,QAA6B;EACvC,MAAM,OAAO;EACb,KAAK,UAAU,OAAO,WAAW,cAAc;EAC/C,KAAK,YAAY,OAAO;EACxB,KAAK,OAAO,OAAO,QAAQ,CAAE;EAC7B,KAAK,WAAW,OAAO,YAAY,CAAE;CACtC;AACF;;;;AAwJD,IAAsB,oBAAtB,cAKU,cAIV;;;;CAIE,IAAI,WAAqB;AACvB,SAAO;GAAC;GAAQ;GAAW;GAAU;GAAQ;GAAY;EAAY;CACtE;;;;;CAMD;CAEA;CAEA,YAAY,EACV,WACA,gBACA,GAAG,QACqB,EAAE;EAC1B,MAAM,EAAE,MAAO,GAAG,MAAM,GAAG;EAC3B,MAAM;GACJ,WAAW,aAAa;GACxB,GAAG;EACJ,EAAC;AACF,MAAI,OAAO,UAAU,UACnB,KAAK,QAAQ;WACJ,OACT,KAAK,QAAQ,cAAc,QAAQ;OAEnC,KAAK,QAAQ;EAEf,KAAK,SAAS,IAAI,YAAY,UAAU,CAAE;CAC3C;CAYD,AAAQ;;;;;;CAOR,MAAM,aAAaC,SAAyB;EAE1C,IAAIC;AACJ,MAAI,OAAO,YAAY,UACrB,cAAc;;;;;;;;;EASd,cAAc,QACX,IAAI,CAAC,SAAS;AACb,OAAI,OAAO,SAAS,SAAU,QAAO;AACrC,OAAI,KAAK,SAAS,UAAU,UAAU,KAAM,QAAO,KAAK;AACxD,UAAO;EACR,EAAC,CACD,KAAK,GAAG;EAIb,IAAI,YAAY,KAAK,KAAK,YAAY,SAAS,EAAE;AAEjD,MAAI,CAAC,KAAK,UACR,KAAI;GACF,KAAK,YAAY,MAAM,iBACrB,eAAe,OACX,wBAAwB,KAAK,UAAoB,GACjD,OACL;EACF,SAAQ,OAAO;GACd,QAAQ,KACN,2EACA,MACD;EACF;AAGH,MAAI,KAAK,UACP,KAAI;GACF,YAAY,KAAK,UAAU,OAAO,YAAY,CAAC;EAChD,SAAQ,OAAO;GACd,QAAQ,KACN,2EACA,MACD;EACF;AAGH,SAAO;CACR;CAED,OAAiB,2BACfC,OAC0B;AAC1B,MAAI,OAAO,UAAU,SACnB,QAAO,IAAI,kBAAkB;WACpB,MAAM,QAAQ,MAAM,CAC7B,QAAO,IAAI,gBAAgB,MAAM,IAAI,2BAA2B;MAEhE,QAAO;CAEV;;;;CAMD,qBAA0C;AACxC,SAAO,CAAE;CACV;;;;;;CAOD,wCAEE,EAAE,OAAQ,GAAG,aAAwD,EAC7D;EAER,MAAMC,SAA8B;GAClC,GAAG,KAAK,oBAAoB;GAC5B,GAAG;GACH,OAAO,KAAK,UAAU;GACtB,QAAQ,KAAK,YAAY;EAC1B;EACD,MAAM,kBAAkB,OAAO,QAAQ,OAAO,CAAC,OAC7C,CAAC,CAAC,GAAG,MAAM,KAAK,UAAU,OAC3B;EACD,MAAM,oBAAoB,gBACvB,IAAI,CAAC,CAAC,KAAK,MAAM,KAAK,GAAG,IAAI,CAAC,EAAE,KAAK,UAAU,MAAM,EAAE,CAAC,CACxD,MAAM,CACN,KAAK,IAAI;AACZ,SAAO;CACR;;;;;CAMD,YAA2B;AACzB,SAAO;GACL,GAAG,KAAK,oBAAoB;GAC5B,OAAO,KAAK,UAAU;GACtB,QAAQ,KAAK,YAAY;EAC1B;CACF;;;;;CAMD,aAAa,YAAYC,OAAkD;AACzE,QAAM,IAAI,MAAM;CACjB;;;;;;CAOD,IAAI,UAAwB;AAC1B,SAAO,CAAE;CACV;AA6EF"}
@@ -0,0 +1,12 @@
1
+
2
+ //#region src/language_models/profile.ts
3
+ var profile_exports = {};
4
+
5
+ //#endregion
6
+ Object.defineProperty(exports, 'profile_exports', {
7
+ enumerable: true,
8
+ get: function () {
9
+ return profile_exports;
10
+ }
11
+ });
12
+ //# sourceMappingURL=profile.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"profile.cjs","names":[],"sources":["../../src/language_models/profile.ts"],"sourcesContent":["/**\n * Represents the capabilities and constraints of a language model.\n *\n * This interface defines the various features and limitations that a model may have,\n * including input/output constraints, multimodal support, and advanced capabilities\n * like tool calling and structured output.\n */\nexport interface ModelProfile {\n /**\n * Maximum number of tokens that can be included in the input context window.\n *\n * This represents the total token budget for the model's input, including\n * the prompt, system messages, conversation history, and any other context.\n *\n * @example\n * ```typescript\n * const profile: ModelProfile = {\n * maxInputTokens: 128000 // Model supports up to 128k tokens\n * };\n * ```\n */\n maxInputTokens?: number;\n\n /**\n * Whether the model supports image inputs.\n *\n * When `true`, the model can process images as part of its input, enabling\n * multimodal interactions where visual content can be analyzed alongside text.\n *\n * @see {@link imageUrlInputs} for URL-based image input support\n */\n imageInputs?: boolean;\n\n /**\n * Whether the model supports image URL inputs.\n *\n * When `true`, the model can accept URLs pointing to images rather than\n * requiring the image data to be embedded directly in the request. This can\n * be more efficient for large images or when images are already hosted.\n *\n * @see {@link imageInputs} for direct image input support\n */\n imageUrlInputs?: boolean;\n\n /**\n * Whether the model supports PDF document inputs.\n *\n * When `true`, the model can process PDF files as input, allowing it to\n * analyze document content, extract information, or answer questions about\n * PDF documents.\n */\n pdfInputs?: boolean;\n\n /**\n * Whether the model supports audio inputs.\n *\n * When `true`, the model can process audio data as input, enabling\n * capabilities like speech recognition, audio analysis, or multimodal\n * interactions involving sound.\n */\n audioInputs?: boolean;\n\n /**\n * Whether the model supports video inputs.\n *\n * When `true`, the model can process video data as input, enabling\n * capabilities like video analysis, scene understanding, or multimodal\n * interactions involving moving images.\n */\n videoInputs?: boolean;\n\n /**\n * Whether the model supports image content in tool messages.\n *\n * When `true`, tool responses can include images that the model can process\n * and reason about. This enables workflows where tools return visual data\n * that the model needs to interpret.\n */\n imageToolMessage?: boolean;\n\n /**\n * Whether the model supports PDF content in tool messages.\n *\n * When `true`, tool responses can include PDF documents that the model can\n * process and reason about. This enables workflows where tools return\n * document data that the model needs to interpret.\n */\n pdfToolMessage?: boolean;\n\n /**\n * Maximum number of tokens the model can generate in its output.\n *\n * This represents the upper limit on the length of the model's response.\n * The actual output may be shorter depending on the completion criteria\n * (e.g., natural stopping point, stop sequences).\n *\n * @example\n * ```typescript\n * const profile: ModelProfile = {\n * maxOutputTokens: 4096 // Model can generate up to 4k tokens\n * };\n * ```\n */\n maxOutputTokens?: number;\n\n /**\n * Whether the model supports reasoning or chain-of-thought output.\n *\n * When `true`, the model can produce explicit reasoning steps or\n * chain-of-thought explanations as part of its output. This is useful\n * for understanding the model's decision-making process and improving\n * transparency in complex reasoning tasks.\n */\n reasoningOutput?: boolean;\n\n /**\n * Whether the model can generate image outputs.\n *\n * When `true`, the model can produce images as part of its response,\n * enabling capabilities like image generation, editing, or visual\n * content creation.\n */\n imageOutputs?: boolean;\n\n /**\n * Whether the model can generate audio outputs.\n *\n * When `true`, the model can produce audio data as part of its response,\n * enabling capabilities like text-to-speech, audio generation, or\n * sound synthesis.\n */\n audioOutputs?: boolean;\n\n /**\n * Whether the model can generate video outputs.\n *\n * When `true`, the model can produce video data as part of its response,\n * enabling capabilities like video generation, editing, or visual\n * content creation with motion.\n */\n videoOutputs?: boolean;\n\n /**\n * Whether the model supports tool calling (function calling).\n *\n * When `true`, the model can invoke external tools or functions during\n * its reasoning process. The model can decide which tools to call,\n * with what arguments, and can incorporate the tool results into its\n * final response.\n *\n * @see {@link toolChoice} for controlling tool selection behavior\n * @see {@link https://docs.langchain.com/oss/javascript/langchain/models#tool-calling}\n */\n toolCalling?: boolean;\n\n /**\n * Whether the model supports tool choice control.\n *\n * When `true`, the caller can specify how the model should select tools,\n * such as forcing the use of a specific tool, allowing any tool, or\n * preventing tool use entirely. This provides fine-grained control over\n * the model's tool-calling behavior.\n *\n * @see {@link toolCalling} for basic tool calling support\n */\n toolChoice?: boolean;\n\n /**\n * Whether the model supports structured output generation.\n *\n * When `true`, the model can generate responses that conform to a\n * specified schema or structure (e.g., JSON with a particular format).\n * This is useful for ensuring the model's output can be reliably parsed\n * and processed programmatically.\n *\n * @example\n * ```typescript\n * // Model can be instructed to return JSON matching a schema\n * const profile: ModelProfile = {\n * structuredOutput: true\n * };\n * ```\n */\n structuredOutput?: boolean;\n}\n"],"mappings":""}
@@ -0,0 +1,174 @@
1
+ //#region src/language_models/profile.d.ts
2
+ /**
3
+ * Represents the capabilities and constraints of a language model.
4
+ *
5
+ * This interface defines the various features and limitations that a model may have,
6
+ * including input/output constraints, multimodal support, and advanced capabilities
7
+ * like tool calling and structured output.
8
+ */
9
+ interface ModelProfile {
10
+ /**
11
+ * Maximum number of tokens that can be included in the input context window.
12
+ *
13
+ * This represents the total token budget for the model's input, including
14
+ * the prompt, system messages, conversation history, and any other context.
15
+ *
16
+ * @example
17
+ * ```typescript
18
+ * const profile: ModelProfile = {
19
+ * maxInputTokens: 128000 // Model supports up to 128k tokens
20
+ * };
21
+ * ```
22
+ */
23
+ maxInputTokens?: number;
24
+ /**
25
+ * Whether the model supports image inputs.
26
+ *
27
+ * When `true`, the model can process images as part of its input, enabling
28
+ * multimodal interactions where visual content can be analyzed alongside text.
29
+ *
30
+ * @see {@link imageUrlInputs} for URL-based image input support
31
+ */
32
+ imageInputs?: boolean;
33
+ /**
34
+ * Whether the model supports image URL inputs.
35
+ *
36
+ * When `true`, the model can accept URLs pointing to images rather than
37
+ * requiring the image data to be embedded directly in the request. This can
38
+ * be more efficient for large images or when images are already hosted.
39
+ *
40
+ * @see {@link imageInputs} for direct image input support
41
+ */
42
+ imageUrlInputs?: boolean;
43
+ /**
44
+ * Whether the model supports PDF document inputs.
45
+ *
46
+ * When `true`, the model can process PDF files as input, allowing it to
47
+ * analyze document content, extract information, or answer questions about
48
+ * PDF documents.
49
+ */
50
+ pdfInputs?: boolean;
51
+ /**
52
+ * Whether the model supports audio inputs.
53
+ *
54
+ * When `true`, the model can process audio data as input, enabling
55
+ * capabilities like speech recognition, audio analysis, or multimodal
56
+ * interactions involving sound.
57
+ */
58
+ audioInputs?: boolean;
59
+ /**
60
+ * Whether the model supports video inputs.
61
+ *
62
+ * When `true`, the model can process video data as input, enabling
63
+ * capabilities like video analysis, scene understanding, or multimodal
64
+ * interactions involving moving images.
65
+ */
66
+ videoInputs?: boolean;
67
+ /**
68
+ * Whether the model supports image content in tool messages.
69
+ *
70
+ * When `true`, tool responses can include images that the model can process
71
+ * and reason about. This enables workflows where tools return visual data
72
+ * that the model needs to interpret.
73
+ */
74
+ imageToolMessage?: boolean;
75
+ /**
76
+ * Whether the model supports PDF content in tool messages.
77
+ *
78
+ * When `true`, tool responses can include PDF documents that the model can
79
+ * process and reason about. This enables workflows where tools return
80
+ * document data that the model needs to interpret.
81
+ */
82
+ pdfToolMessage?: boolean;
83
+ /**
84
+ * Maximum number of tokens the model can generate in its output.
85
+ *
86
+ * This represents the upper limit on the length of the model's response.
87
+ * The actual output may be shorter depending on the completion criteria
88
+ * (e.g., natural stopping point, stop sequences).
89
+ *
90
+ * @example
91
+ * ```typescript
92
+ * const profile: ModelProfile = {
93
+ * maxOutputTokens: 4096 // Model can generate up to 4k tokens
94
+ * };
95
+ * ```
96
+ */
97
+ maxOutputTokens?: number;
98
+ /**
99
+ * Whether the model supports reasoning or chain-of-thought output.
100
+ *
101
+ * When `true`, the model can produce explicit reasoning steps or
102
+ * chain-of-thought explanations as part of its output. This is useful
103
+ * for understanding the model's decision-making process and improving
104
+ * transparency in complex reasoning tasks.
105
+ */
106
+ reasoningOutput?: boolean;
107
+ /**
108
+ * Whether the model can generate image outputs.
109
+ *
110
+ * When `true`, the model can produce images as part of its response,
111
+ * enabling capabilities like image generation, editing, or visual
112
+ * content creation.
113
+ */
114
+ imageOutputs?: boolean;
115
+ /**
116
+ * Whether the model can generate audio outputs.
117
+ *
118
+ * When `true`, the model can produce audio data as part of its response,
119
+ * enabling capabilities like text-to-speech, audio generation, or
120
+ * sound synthesis.
121
+ */
122
+ audioOutputs?: boolean;
123
+ /**
124
+ * Whether the model can generate video outputs.
125
+ *
126
+ * When `true`, the model can produce video data as part of its response,
127
+ * enabling capabilities like video generation, editing, or visual
128
+ * content creation with motion.
129
+ */
130
+ videoOutputs?: boolean;
131
+ /**
132
+ * Whether the model supports tool calling (function calling).
133
+ *
134
+ * When `true`, the model can invoke external tools or functions during
135
+ * its reasoning process. The model can decide which tools to call,
136
+ * with what arguments, and can incorporate the tool results into its
137
+ * final response.
138
+ *
139
+ * @see {@link toolChoice} for controlling tool selection behavior
140
+ * @see {@link https://docs.langchain.com/oss/javascript/langchain/models#tool-calling}
141
+ */
142
+ toolCalling?: boolean;
143
+ /**
144
+ * Whether the model supports tool choice control.
145
+ *
146
+ * When `true`, the caller can specify how the model should select tools,
147
+ * such as forcing the use of a specific tool, allowing any tool, or
148
+ * preventing tool use entirely. This provides fine-grained control over
149
+ * the model's tool-calling behavior.
150
+ *
151
+ * @see {@link toolCalling} for basic tool calling support
152
+ */
153
+ toolChoice?: boolean;
154
+ /**
155
+ * Whether the model supports structured output generation.
156
+ *
157
+ * When `true`, the model can generate responses that conform to a
158
+ * specified schema or structure (e.g., JSON with a particular format).
159
+ * This is useful for ensuring the model's output can be reliably parsed
160
+ * and processed programmatically.
161
+ *
162
+ * @example
163
+ * ```typescript
164
+ * // Model can be instructed to return JSON matching a schema
165
+ * const profile: ModelProfile = {
166
+ * structuredOutput: true
167
+ * };
168
+ * ```
169
+ */
170
+ structuredOutput?: boolean;
171
+ }
172
+ //#endregion
173
+ export { ModelProfile };
174
+ //# sourceMappingURL=profile.d.cts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"profile.d.cts","names":["ModelProfile"],"sources":["../../src/language_models/profile.d.ts"],"sourcesContent":["/**\n * Represents the capabilities and constraints of a language model.\n *\n * This interface defines the various features and limitations that a model may have,\n * including input/output constraints, multimodal support, and advanced capabilities\n * like tool calling and structured output.\n */\nexport interface ModelProfile {\n /**\n * Maximum number of tokens that can be included in the input context window.\n *\n * This represents the total token budget for the model's input, including\n * the prompt, system messages, conversation history, and any other context.\n *\n * @example\n * ```typescript\n * const profile: ModelProfile = {\n * maxInputTokens: 128000 // Model supports up to 128k tokens\n * };\n * ```\n */\n maxInputTokens?: number;\n /**\n * Whether the model supports image inputs.\n *\n * When `true`, the model can process images as part of its input, enabling\n * multimodal interactions where visual content can be analyzed alongside text.\n *\n * @see {@link imageUrlInputs} for URL-based image input support\n */\n imageInputs?: boolean;\n /**\n * Whether the model supports image URL inputs.\n *\n * When `true`, the model can accept URLs pointing to images rather than\n * requiring the image data to be embedded directly in the request. This can\n * be more efficient for large images or when images are already hosted.\n *\n * @see {@link imageInputs} for direct image input support\n */\n imageUrlInputs?: boolean;\n /**\n * Whether the model supports PDF document inputs.\n *\n * When `true`, the model can process PDF files as input, allowing it to\n * analyze document content, extract information, or answer questions about\n * PDF documents.\n */\n pdfInputs?: boolean;\n /**\n * Whether the model supports audio inputs.\n *\n * When `true`, the model can process audio data as input, enabling\n * capabilities like speech recognition, audio analysis, or multimodal\n * interactions involving sound.\n */\n audioInputs?: boolean;\n /**\n * Whether the model supports video inputs.\n *\n * When `true`, the model can process video data as input, enabling\n * capabilities like video analysis, scene understanding, or multimodal\n * interactions involving moving images.\n */\n videoInputs?: boolean;\n /**\n * Whether the model supports image content in tool messages.\n *\n * When `true`, tool responses can include images that the model can process\n * and reason about. This enables workflows where tools return visual data\n * that the model needs to interpret.\n */\n imageToolMessage?: boolean;\n /**\n * Whether the model supports PDF content in tool messages.\n *\n * When `true`, tool responses can include PDF documents that the model can\n * process and reason about. This enables workflows where tools return\n * document data that the model needs to interpret.\n */\n pdfToolMessage?: boolean;\n /**\n * Maximum number of tokens the model can generate in its output.\n *\n * This represents the upper limit on the length of the model's response.\n * The actual output may be shorter depending on the completion criteria\n * (e.g., natural stopping point, stop sequences).\n *\n * @example\n * ```typescript\n * const profile: ModelProfile = {\n * maxOutputTokens: 4096 // Model can generate up to 4k tokens\n * };\n * ```\n */\n maxOutputTokens?: number;\n /**\n * Whether the model supports reasoning or chain-of-thought output.\n *\n * When `true`, the model can produce explicit reasoning steps or\n * chain-of-thought explanations as part of its output. This is useful\n * for understanding the model's decision-making process and improving\n * transparency in complex reasoning tasks.\n */\n reasoningOutput?: boolean;\n /**\n * Whether the model can generate image outputs.\n *\n * When `true`, the model can produce images as part of its response,\n * enabling capabilities like image generation, editing, or visual\n * content creation.\n */\n imageOutputs?: boolean;\n /**\n * Whether the model can generate audio outputs.\n *\n * When `true`, the model can produce audio data as part of its response,\n * enabling capabilities like text-to-speech, audio generation, or\n * sound synthesis.\n */\n audioOutputs?: boolean;\n /**\n * Whether the model can generate video outputs.\n *\n * When `true`, the model can produce video data as part of its response,\n * enabling capabilities like video generation, editing, or visual\n * content creation with motion.\n */\n videoOutputs?: boolean;\n /**\n * Whether the model supports tool calling (function calling).\n *\n * When `true`, the model can invoke external tools or functions during\n * its reasoning process. The model can decide which tools to call,\n * with what arguments, and can incorporate the tool results into its\n * final response.\n *\n * @see {@link toolChoice} for controlling tool selection behavior\n * @see {@link https://docs.langchain.com/oss/javascript/langchain/models#tool-calling}\n */\n toolCalling?: boolean;\n /**\n * Whether the model supports tool choice control.\n *\n * When `true`, the caller can specify how the model should select tools,\n * such as forcing the use of a specific tool, allowing any tool, or\n * preventing tool use entirely. This provides fine-grained control over\n * the model's tool-calling behavior.\n *\n * @see {@link toolCalling} for basic tool calling support\n */\n toolChoice?: boolean;\n /**\n * Whether the model supports structured output generation.\n *\n * When `true`, the model can generate responses that conform to a\n * specified schema or structure (e.g., JSON with a particular format).\n * This is useful for ensuring the model's output can be reliably parsed\n * and processed programmatically.\n *\n * @example\n * ```typescript\n * // Model can be instructed to return JSON matching a schema\n * const profile: ModelProfile = {\n * structuredOutput: true\n * };\n * ```\n */\n structuredOutput?: boolean;\n}\n"],"mappings":";;AAOA;;;;;;UAAiBA,YAAAA"}
@@ -0,0 +1,174 @@
1
+ //#region src/language_models/profile.d.ts
2
+ /**
3
+ * Represents the capabilities and constraints of a language model.
4
+ *
5
+ * This interface defines the various features and limitations that a model may have,
6
+ * including input/output constraints, multimodal support, and advanced capabilities
7
+ * like tool calling and structured output.
8
+ */
9
+ interface ModelProfile {
10
+ /**
11
+ * Maximum number of tokens that can be included in the input context window.
12
+ *
13
+ * This represents the total token budget for the model's input, including
14
+ * the prompt, system messages, conversation history, and any other context.
15
+ *
16
+ * @example
17
+ * ```typescript
18
+ * const profile: ModelProfile = {
19
+ * maxInputTokens: 128000 // Model supports up to 128k tokens
20
+ * };
21
+ * ```
22
+ */
23
+ maxInputTokens?: number;
24
+ /**
25
+ * Whether the model supports image inputs.
26
+ *
27
+ * When `true`, the model can process images as part of its input, enabling
28
+ * multimodal interactions where visual content can be analyzed alongside text.
29
+ *
30
+ * @see {@link imageUrlInputs} for URL-based image input support
31
+ */
32
+ imageInputs?: boolean;
33
+ /**
34
+ * Whether the model supports image URL inputs.
35
+ *
36
+ * When `true`, the model can accept URLs pointing to images rather than
37
+ * requiring the image data to be embedded directly in the request. This can
38
+ * be more efficient for large images or when images are already hosted.
39
+ *
40
+ * @see {@link imageInputs} for direct image input support
41
+ */
42
+ imageUrlInputs?: boolean;
43
+ /**
44
+ * Whether the model supports PDF document inputs.
45
+ *
46
+ * When `true`, the model can process PDF files as input, allowing it to
47
+ * analyze document content, extract information, or answer questions about
48
+ * PDF documents.
49
+ */
50
+ pdfInputs?: boolean;
51
+ /**
52
+ * Whether the model supports audio inputs.
53
+ *
54
+ * When `true`, the model can process audio data as input, enabling
55
+ * capabilities like speech recognition, audio analysis, or multimodal
56
+ * interactions involving sound.
57
+ */
58
+ audioInputs?: boolean;
59
+ /**
60
+ * Whether the model supports video inputs.
61
+ *
62
+ * When `true`, the model can process video data as input, enabling
63
+ * capabilities like video analysis, scene understanding, or multimodal
64
+ * interactions involving moving images.
65
+ */
66
+ videoInputs?: boolean;
67
+ /**
68
+ * Whether the model supports image content in tool messages.
69
+ *
70
+ * When `true`, tool responses can include images that the model can process
71
+ * and reason about. This enables workflows where tools return visual data
72
+ * that the model needs to interpret.
73
+ */
74
+ imageToolMessage?: boolean;
75
+ /**
76
+ * Whether the model supports PDF content in tool messages.
77
+ *
78
+ * When `true`, tool responses can include PDF documents that the model can
79
+ * process and reason about. This enables workflows where tools return
80
+ * document data that the model needs to interpret.
81
+ */
82
+ pdfToolMessage?: boolean;
83
+ /**
84
+ * Maximum number of tokens the model can generate in its output.
85
+ *
86
+ * This represents the upper limit on the length of the model's response.
87
+ * The actual output may be shorter depending on the completion criteria
88
+ * (e.g., natural stopping point, stop sequences).
89
+ *
90
+ * @example
91
+ * ```typescript
92
+ * const profile: ModelProfile = {
93
+ * maxOutputTokens: 4096 // Model can generate up to 4k tokens
94
+ * };
95
+ * ```
96
+ */
97
+ maxOutputTokens?: number;
98
+ /**
99
+ * Whether the model supports reasoning or chain-of-thought output.
100
+ *
101
+ * When `true`, the model can produce explicit reasoning steps or
102
+ * chain-of-thought explanations as part of its output. This is useful
103
+ * for understanding the model's decision-making process and improving
104
+ * transparency in complex reasoning tasks.
105
+ */
106
+ reasoningOutput?: boolean;
107
+ /**
108
+ * Whether the model can generate image outputs.
109
+ *
110
+ * When `true`, the model can produce images as part of its response,
111
+ * enabling capabilities like image generation, editing, or visual
112
+ * content creation.
113
+ */
114
+ imageOutputs?: boolean;
115
+ /**
116
+ * Whether the model can generate audio outputs.
117
+ *
118
+ * When `true`, the model can produce audio data as part of its response,
119
+ * enabling capabilities like text-to-speech, audio generation, or
120
+ * sound synthesis.
121
+ */
122
+ audioOutputs?: boolean;
123
+ /**
124
+ * Whether the model can generate video outputs.
125
+ *
126
+ * When `true`, the model can produce video data as part of its response,
127
+ * enabling capabilities like video generation, editing, or visual
128
+ * content creation with motion.
129
+ */
130
+ videoOutputs?: boolean;
131
+ /**
132
+ * Whether the model supports tool calling (function calling).
133
+ *
134
+ * When `true`, the model can invoke external tools or functions during
135
+ * its reasoning process. The model can decide which tools to call,
136
+ * with what arguments, and can incorporate the tool results into its
137
+ * final response.
138
+ *
139
+ * @see {@link toolChoice} for controlling tool selection behavior
140
+ * @see {@link https://docs.langchain.com/oss/javascript/langchain/models#tool-calling}
141
+ */
142
+ toolCalling?: boolean;
143
+ /**
144
+ * Whether the model supports tool choice control.
145
+ *
146
+ * When `true`, the caller can specify how the model should select tools,
147
+ * such as forcing the use of a specific tool, allowing any tool, or
148
+ * preventing tool use entirely. This provides fine-grained control over
149
+ * the model's tool-calling behavior.
150
+ *
151
+ * @see {@link toolCalling} for basic tool calling support
152
+ */
153
+ toolChoice?: boolean;
154
+ /**
155
+ * Whether the model supports structured output generation.
156
+ *
157
+ * When `true`, the model can generate responses that conform to a
158
+ * specified schema or structure (e.g., JSON with a particular format).
159
+ * This is useful for ensuring the model's output can be reliably parsed
160
+ * and processed programmatically.
161
+ *
162
+ * @example
163
+ * ```typescript
164
+ * // Model can be instructed to return JSON matching a schema
165
+ * const profile: ModelProfile = {
166
+ * structuredOutput: true
167
+ * };
168
+ * ```
169
+ */
170
+ structuredOutput?: boolean;
171
+ }
172
+ //#endregion
173
+ export { ModelProfile };
174
+ //# sourceMappingURL=profile.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"profile.d.ts","names":["ModelProfile"],"sources":["../../src/language_models/profile.d.ts"],"sourcesContent":["/**\n * Represents the capabilities and constraints of a language model.\n *\n * This interface defines the various features and limitations that a model may have,\n * including input/output constraints, multimodal support, and advanced capabilities\n * like tool calling and structured output.\n */\nexport interface ModelProfile {\n /**\n * Maximum number of tokens that can be included in the input context window.\n *\n * This represents the total token budget for the model's input, including\n * the prompt, system messages, conversation history, and any other context.\n *\n * @example\n * ```typescript\n * const profile: ModelProfile = {\n * maxInputTokens: 128000 // Model supports up to 128k tokens\n * };\n * ```\n */\n maxInputTokens?: number;\n /**\n * Whether the model supports image inputs.\n *\n * When `true`, the model can process images as part of its input, enabling\n * multimodal interactions where visual content can be analyzed alongside text.\n *\n * @see {@link imageUrlInputs} for URL-based image input support\n */\n imageInputs?: boolean;\n /**\n * Whether the model supports image URL inputs.\n *\n * When `true`, the model can accept URLs pointing to images rather than\n * requiring the image data to be embedded directly in the request. This can\n * be more efficient for large images or when images are already hosted.\n *\n * @see {@link imageInputs} for direct image input support\n */\n imageUrlInputs?: boolean;\n /**\n * Whether the model supports PDF document inputs.\n *\n * When `true`, the model can process PDF files as input, allowing it to\n * analyze document content, extract information, or answer questions about\n * PDF documents.\n */\n pdfInputs?: boolean;\n /**\n * Whether the model supports audio inputs.\n *\n * When `true`, the model can process audio data as input, enabling\n * capabilities like speech recognition, audio analysis, or multimodal\n * interactions involving sound.\n */\n audioInputs?: boolean;\n /**\n * Whether the model supports video inputs.\n *\n * When `true`, the model can process video data as input, enabling\n * capabilities like video analysis, scene understanding, or multimodal\n * interactions involving moving images.\n */\n videoInputs?: boolean;\n /**\n * Whether the model supports image content in tool messages.\n *\n * When `true`, tool responses can include images that the model can process\n * and reason about. This enables workflows where tools return visual data\n * that the model needs to interpret.\n */\n imageToolMessage?: boolean;\n /**\n * Whether the model supports PDF content in tool messages.\n *\n * When `true`, tool responses can include PDF documents that the model can\n * process and reason about. This enables workflows where tools return\n * document data that the model needs to interpret.\n */\n pdfToolMessage?: boolean;\n /**\n * Maximum number of tokens the model can generate in its output.\n *\n * This represents the upper limit on the length of the model's response.\n * The actual output may be shorter depending on the completion criteria\n * (e.g., natural stopping point, stop sequences).\n *\n * @example\n * ```typescript\n * const profile: ModelProfile = {\n * maxOutputTokens: 4096 // Model can generate up to 4k tokens\n * };\n * ```\n */\n maxOutputTokens?: number;\n /**\n * Whether the model supports reasoning or chain-of-thought output.\n *\n * When `true`, the model can produce explicit reasoning steps or\n * chain-of-thought explanations as part of its output. This is useful\n * for understanding the model's decision-making process and improving\n * transparency in complex reasoning tasks.\n */\n reasoningOutput?: boolean;\n /**\n * Whether the model can generate image outputs.\n *\n * When `true`, the model can produce images as part of its response,\n * enabling capabilities like image generation, editing, or visual\n * content creation.\n */\n imageOutputs?: boolean;\n /**\n * Whether the model can generate audio outputs.\n *\n * When `true`, the model can produce audio data as part of its response,\n * enabling capabilities like text-to-speech, audio generation, or\n * sound synthesis.\n */\n audioOutputs?: boolean;\n /**\n * Whether the model can generate video outputs.\n *\n * When `true`, the model can produce video data as part of its response,\n * enabling capabilities like video generation, editing, or visual\n * content creation with motion.\n */\n videoOutputs?: boolean;\n /**\n * Whether the model supports tool calling (function calling).\n *\n * When `true`, the model can invoke external tools or functions during\n * its reasoning process. The model can decide which tools to call,\n * with what arguments, and can incorporate the tool results into its\n * final response.\n *\n * @see {@link toolChoice} for controlling tool selection behavior\n * @see {@link https://docs.langchain.com/oss/javascript/langchain/models#tool-calling}\n */\n toolCalling?: boolean;\n /**\n * Whether the model supports tool choice control.\n *\n * When `true`, the caller can specify how the model should select tools,\n * such as forcing the use of a specific tool, allowing any tool, or\n * preventing tool use entirely. This provides fine-grained control over\n * the model's tool-calling behavior.\n *\n * @see {@link toolCalling} for basic tool calling support\n */\n toolChoice?: boolean;\n /**\n * Whether the model supports structured output generation.\n *\n * When `true`, the model can generate responses that conform to a\n * specified schema or structure (e.g., JSON with a particular format).\n * This is useful for ensuring the model's output can be reliably parsed\n * and processed programmatically.\n *\n * @example\n * ```typescript\n * // Model can be instructed to return JSON matching a schema\n * const profile: ModelProfile = {\n * structuredOutput: true\n * };\n * ```\n */\n structuredOutput?: boolean;\n}\n"],"mappings":";;AAOA;;;;;;UAAiBA,YAAAA"}
@@ -0,0 +1,6 @@
1
+ //#region src/language_models/profile.ts
2
+ var profile_exports = {};
3
+
4
+ //#endregion
5
+ export { profile_exports };
6
+ //# sourceMappingURL=profile.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"profile.js","names":[],"sources":["../../src/language_models/profile.ts"],"sourcesContent":["/**\n * Represents the capabilities and constraints of a language model.\n *\n * This interface defines the various features and limitations that a model may have,\n * including input/output constraints, multimodal support, and advanced capabilities\n * like tool calling and structured output.\n */\nexport interface ModelProfile {\n /**\n * Maximum number of tokens that can be included in the input context window.\n *\n * This represents the total token budget for the model's input, including\n * the prompt, system messages, conversation history, and any other context.\n *\n * @example\n * ```typescript\n * const profile: ModelProfile = {\n * maxInputTokens: 128000 // Model supports up to 128k tokens\n * };\n * ```\n */\n maxInputTokens?: number;\n\n /**\n * Whether the model supports image inputs.\n *\n * When `true`, the model can process images as part of its input, enabling\n * multimodal interactions where visual content can be analyzed alongside text.\n *\n * @see {@link imageUrlInputs} for URL-based image input support\n */\n imageInputs?: boolean;\n\n /**\n * Whether the model supports image URL inputs.\n *\n * When `true`, the model can accept URLs pointing to images rather than\n * requiring the image data to be embedded directly in the request. This can\n * be more efficient for large images or when images are already hosted.\n *\n * @see {@link imageInputs} for direct image input support\n */\n imageUrlInputs?: boolean;\n\n /**\n * Whether the model supports PDF document inputs.\n *\n * When `true`, the model can process PDF files as input, allowing it to\n * analyze document content, extract information, or answer questions about\n * PDF documents.\n */\n pdfInputs?: boolean;\n\n /**\n * Whether the model supports audio inputs.\n *\n * When `true`, the model can process audio data as input, enabling\n * capabilities like speech recognition, audio analysis, or multimodal\n * interactions involving sound.\n */\n audioInputs?: boolean;\n\n /**\n * Whether the model supports video inputs.\n *\n * When `true`, the model can process video data as input, enabling\n * capabilities like video analysis, scene understanding, or multimodal\n * interactions involving moving images.\n */\n videoInputs?: boolean;\n\n /**\n * Whether the model supports image content in tool messages.\n *\n * When `true`, tool responses can include images that the model can process\n * and reason about. This enables workflows where tools return visual data\n * that the model needs to interpret.\n */\n imageToolMessage?: boolean;\n\n /**\n * Whether the model supports PDF content in tool messages.\n *\n * When `true`, tool responses can include PDF documents that the model can\n * process and reason about. This enables workflows where tools return\n * document data that the model needs to interpret.\n */\n pdfToolMessage?: boolean;\n\n /**\n * Maximum number of tokens the model can generate in its output.\n *\n * This represents the upper limit on the length of the model's response.\n * The actual output may be shorter depending on the completion criteria\n * (e.g., natural stopping point, stop sequences).\n *\n * @example\n * ```typescript\n * const profile: ModelProfile = {\n * maxOutputTokens: 4096 // Model can generate up to 4k tokens\n * };\n * ```\n */\n maxOutputTokens?: number;\n\n /**\n * Whether the model supports reasoning or chain-of-thought output.\n *\n * When `true`, the model can produce explicit reasoning steps or\n * chain-of-thought explanations as part of its output. This is useful\n * for understanding the model's decision-making process and improving\n * transparency in complex reasoning tasks.\n */\n reasoningOutput?: boolean;\n\n /**\n * Whether the model can generate image outputs.\n *\n * When `true`, the model can produce images as part of its response,\n * enabling capabilities like image generation, editing, or visual\n * content creation.\n */\n imageOutputs?: boolean;\n\n /**\n * Whether the model can generate audio outputs.\n *\n * When `true`, the model can produce audio data as part of its response,\n * enabling capabilities like text-to-speech, audio generation, or\n * sound synthesis.\n */\n audioOutputs?: boolean;\n\n /**\n * Whether the model can generate video outputs.\n *\n * When `true`, the model can produce video data as part of its response,\n * enabling capabilities like video generation, editing, or visual\n * content creation with motion.\n */\n videoOutputs?: boolean;\n\n /**\n * Whether the model supports tool calling (function calling).\n *\n * When `true`, the model can invoke external tools or functions during\n * its reasoning process. The model can decide which tools to call,\n * with what arguments, and can incorporate the tool results into its\n * final response.\n *\n * @see {@link toolChoice} for controlling tool selection behavior\n * @see {@link https://docs.langchain.com/oss/javascript/langchain/models#tool-calling}\n */\n toolCalling?: boolean;\n\n /**\n * Whether the model supports tool choice control.\n *\n * When `true`, the caller can specify how the model should select tools,\n * such as forcing the use of a specific tool, allowing any tool, or\n * preventing tool use entirely. This provides fine-grained control over\n * the model's tool-calling behavior.\n *\n * @see {@link toolCalling} for basic tool calling support\n */\n toolChoice?: boolean;\n\n /**\n * Whether the model supports structured output generation.\n *\n * When `true`, the model can generate responses that conform to a\n * specified schema or structure (e.g., JSON with a particular format).\n * This is useful for ensuring the model's output can be reliably parsed\n * and processed programmatically.\n *\n * @example\n * ```typescript\n * // Model can be instructed to return JSON matching a schema\n * const profile: ModelProfile = {\n * structuredOutput: true\n * };\n * ```\n */\n structuredOutput?: boolean;\n}\n"],"mappings":""}
@@ -36,6 +36,7 @@ const require_utils_tiktoken = require('../utils/tiktoken.cjs');
36
36
  const require_language_models_base = require('../language_models/base.cjs');
37
37
  const require_language_models_chat_models = require('../language_models/chat_models.cjs');
38
38
  const require_language_models_llms = require('../language_models/llms.cjs');
39
+ const require_language_models_profile = require('../language_models/profile.cjs');
39
40
  const require_runnables_index = require('../runnables/index.cjs');
40
41
  const require_utils_json_patch = require('../utils/json_patch.cjs');
41
42
  const require_output_parsers_index = require('../output_parsers/index.cjs');
@@ -74,6 +75,7 @@ require_rolldown_runtime.__export(import_map_exports, {
74
75
  language_models__base: () => require_language_models_base.base_exports,
75
76
  language_models__chat_models: () => require_language_models_chat_models.chat_models_exports,
76
77
  language_models__llms: () => require_language_models_llms.llms_exports,
78
+ language_models__profile: () => require_language_models_profile.profile_exports,
77
79
  load__serializable: () => require_load_serializable.serializable_exports,
78
80
  memory: () => require_memory.memory_exports,
79
81
  messages: () => require_messages_index.messages_exports,
@@ -1 +1 @@
1
- {"version":3,"file":"import_map.cjs","names":[],"sources":["../../src/load/import_map.ts"],"sourcesContent":["// Auto-generated by import-map plugin. Do not edit manually.\n\nexport * as index from \"../index.js\";\nexport * as agents from \"../agents.js\";\nexport * as caches from \"../caches/base.js\";\nexport * as callbacks__base from \"../callbacks/base.js\";\nexport * as callbacks__manager from \"../callbacks/manager.js\";\nexport * as callbacks__promises from \"../callbacks/promises.js\";\nexport * as chat_history from \"../chat_history.js\";\nexport * as documents from \"../documents/index.js\";\nexport * as document_loaders__base from \"../document_loaders/base.js\";\nexport * as document_loaders__langsmith from \"../document_loaders/langsmith.js\";\nexport * as embeddings from \"../embeddings.js\";\nexport * as example_selectors from \"../example_selectors/index.js\";\nexport * as indexing from \"../indexing/index.js\";\nexport * as language_models__base from \"../language_models/base.js\";\nexport * as language_models__chat_models from \"../language_models/chat_models.js\";\nexport * as language_models__llms from \"../language_models/llms.js\";\nexport * as load__serializable from \"../load/serializable.js\";\nexport * as memory from \"../memory.js\";\nexport * as messages from \"../messages/index.js\";\nexport * as messages__tool from \"../messages/tool.js\";\nexport * as output_parsers from \"../output_parsers/index.js\";\nexport * as output_parsers__openai_tools from \"../output_parsers/openai_tools/index.js\";\nexport * as output_parsers__openai_functions from \"../output_parsers/openai_functions/index.js\";\nexport * as outputs from \"../outputs.js\";\nexport * as prompts from \"../prompts/index.js\";\nexport * as prompt_values from \"../prompt_values.js\";\nexport * as runnables from \"../runnables/index.js\";\nexport * as runnables__graph from \"../runnables/graph.js\";\nexport * as retrievers from \"../retrievers/index.js\";\nexport * as retrievers__document_compressors from \"../retrievers/document_compressors/base.js\";\nexport * as singletons from \"../singletons/index.js\";\nexport * as stores from \"../stores.js\";\nexport * as structured_query from \"../structured_query/index.js\";\nexport * as tools from \"../tools/index.js\";\nexport * as tracers__base from \"../tracers/base.js\";\nexport * as tracers__console from \"../tracers/console.js\";\nexport * as tracers__log_stream from \"../tracers/log_stream.js\";\nexport * as tracers__run_collector from \"../tracers/run_collector.js\";\nexport * as tracers__tracer_langchain from \"../tracers/tracer_langchain.js\";\nexport * as types__stream from \"../types/stream.js\";\nexport * as utils__async_caller from \"../utils/async_caller.js\";\nexport * as utils__chunk_array from \"../utils/chunk_array.js\";\nexport * as utils__env from \"../utils/env.js\";\nexport * as utils__event_source_parse from \"../utils/event_source_parse.js\";\nexport * as utils__format from \"../utils/format.js\";\nexport * as utils__function_calling from \"../utils/function_calling.js\";\nexport * as utils__hash from \"../utils/hash.js\";\nexport * as utils__json_patch from \"../utils/json_patch.js\";\nexport * as utils__json_schema from \"../utils/json_schema.js\";\nexport * as utils__math from \"../utils/math.js\";\nexport * as utils__stream from \"../utils/stream.js\";\nexport * as utils__testing from \"../utils/testing/index.js\";\nexport * as utils__tiktoken from \"../utils/tiktoken.js\";\nexport * as utils__types from \"../utils/types/index.js\";\nexport * as vectorstores from \"../vectorstores.js\";\n"],"mappings":""}
1
+ {"version":3,"file":"import_map.cjs","names":[],"sources":["../../src/load/import_map.ts"],"sourcesContent":["// Auto-generated by import-map plugin. Do not edit manually.\n\nexport * as index from \"../index.js\";\nexport * as agents from \"../agents.js\";\nexport * as caches from \"../caches/base.js\";\nexport * as callbacks__base from \"../callbacks/base.js\";\nexport * as callbacks__manager from \"../callbacks/manager.js\";\nexport * as callbacks__promises from \"../callbacks/promises.js\";\nexport * as chat_history from \"../chat_history.js\";\nexport * as documents from \"../documents/index.js\";\nexport * as document_loaders__base from \"../document_loaders/base.js\";\nexport * as document_loaders__langsmith from \"../document_loaders/langsmith.js\";\nexport * as embeddings from \"../embeddings.js\";\nexport * as example_selectors from \"../example_selectors/index.js\";\nexport * as indexing from \"../indexing/index.js\";\nexport * as language_models__base from \"../language_models/base.js\";\nexport * as language_models__chat_models from \"../language_models/chat_models.js\";\nexport * as language_models__llms from \"../language_models/llms.js\";\nexport * as language_models__profile from \"../language_models/profile.js\";\nexport * as load__serializable from \"../load/serializable.js\";\nexport * as memory from \"../memory.js\";\nexport * as messages from \"../messages/index.js\";\nexport * as messages__tool from \"../messages/tool.js\";\nexport * as output_parsers from \"../output_parsers/index.js\";\nexport * as output_parsers__openai_tools from \"../output_parsers/openai_tools/index.js\";\nexport * as output_parsers__openai_functions from \"../output_parsers/openai_functions/index.js\";\nexport * as outputs from \"../outputs.js\";\nexport * as prompts from \"../prompts/index.js\";\nexport * as prompt_values from \"../prompt_values.js\";\nexport * as runnables from \"../runnables/index.js\";\nexport * as runnables__graph from \"../runnables/graph.js\";\nexport * as retrievers from \"../retrievers/index.js\";\nexport * as retrievers__document_compressors from \"../retrievers/document_compressors/base.js\";\nexport * as singletons from \"../singletons/index.js\";\nexport * as stores from \"../stores.js\";\nexport * as structured_query from \"../structured_query/index.js\";\nexport * as tools from \"../tools/index.js\";\nexport * as tracers__base from \"../tracers/base.js\";\nexport * as tracers__console from \"../tracers/console.js\";\nexport * as tracers__log_stream from \"../tracers/log_stream.js\";\nexport * as tracers__run_collector from \"../tracers/run_collector.js\";\nexport * as tracers__tracer_langchain from \"../tracers/tracer_langchain.js\";\nexport * as types__stream from \"../types/stream.js\";\nexport * as utils__async_caller from \"../utils/async_caller.js\";\nexport * as utils__chunk_array from \"../utils/chunk_array.js\";\nexport * as utils__env from \"../utils/env.js\";\nexport * as utils__event_source_parse from \"../utils/event_source_parse.js\";\nexport * as utils__format from \"../utils/format.js\";\nexport * as utils__function_calling from \"../utils/function_calling.js\";\nexport * as utils__hash from \"../utils/hash.js\";\nexport * as utils__json_patch from \"../utils/json_patch.js\";\nexport * as utils__json_schema from \"../utils/json_schema.js\";\nexport * as utils__math from \"../utils/math.js\";\nexport * as utils__stream from \"../utils/stream.js\";\nexport * as utils__testing from \"../utils/testing/index.js\";\nexport * as utils__tiktoken from \"../utils/tiktoken.js\";\nexport * as utils__types from \"../utils/types/index.js\";\nexport * as vectorstores from \"../vectorstores.js\";\n"],"mappings":""}
@@ -36,6 +36,7 @@ import { tiktoken_exports } from "../utils/tiktoken.js";
36
36
  import { base_exports as base_exports$4 } from "../language_models/base.js";
37
37
  import { chat_models_exports } from "../language_models/chat_models.js";
38
38
  import { llms_exports } from "../language_models/llms.js";
39
+ import { profile_exports } from "../language_models/profile.js";
39
40
  import { runnables_exports } from "../runnables/index.js";
40
41
  import { json_patch_exports } from "../utils/json_patch.js";
41
42
  import { output_parsers_exports } from "../output_parsers/index.js";
@@ -74,6 +75,7 @@ __export(import_map_exports, {
74
75
  language_models__base: () => base_exports$4,
75
76
  language_models__chat_models: () => chat_models_exports,
76
77
  language_models__llms: () => llms_exports,
78
+ language_models__profile: () => profile_exports,
77
79
  load__serializable: () => serializable_exports,
78
80
  memory: () => memory_exports,
79
81
  messages: () => messages_exports,