@langchain/deepseek 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @langchain/deepseek
2
2
 
3
+ ## 1.0.1
4
+
5
+ ### Patch Changes
6
+
7
+ - [#9387](https://github.com/langchain-ai/langchainjs/pull/9387) [`ac0d4fe`](https://github.com/langchain-ai/langchainjs/commit/ac0d4fe3807e05eb2185ae8a36da69498e6163d4) Thanks [@hntrl](https://github.com/hntrl)! - Add `ModelProfile` and `.profile` properties to ChatModel
8
+
9
+ - Updated dependencies [[`04bd55c`](https://github.com/langchain-ai/langchainjs/commit/04bd55c63d8a0cb56f85da0b61a6bd6169b383f3), [`ac0d4fe`](https://github.com/langchain-ai/langchainjs/commit/ac0d4fe3807e05eb2185ae8a36da69498e6163d4), [`39dbe63`](https://github.com/langchain-ai/langchainjs/commit/39dbe63e3d8390bb90bb8b17f00755fa648c5651), [`dfbe45f`](https://github.com/langchain-ai/langchainjs/commit/dfbe45f3cfade7a1dbe15b2d702a8e9f8e5ac93a)]:
10
+ - @langchain/openai@1.1.1
11
+
3
12
  ## 1.0.0
4
13
 
5
14
  This release updates the package for compatibility with LangChain v1.0. See the v1.0 [release notes](https://docs.langchain.com/oss/javascript/releases/langchain-v1) for details on what's new.
@@ -1,4 +1,5 @@
1
1
  const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
2
+ const require_profiles = require('./profiles.cjs');
2
3
  const __langchain_core_utils_env = require_rolldown_runtime.__toESM(require("@langchain/core/utils/env"));
3
4
  const __langchain_openai = require_rolldown_runtime.__toESM(require("@langchain/openai"));
4
5
 
@@ -391,6 +392,26 @@ var ChatDeepSeek = class extends __langchain_openai.ChatOpenAICompletions {
391
392
  langChainMessage.additional_kwargs.reasoning_content = message.reasoning_content;
392
393
  return langChainMessage;
393
394
  }
395
+ /**
396
+ * Return profiling information for the model.
397
+ *
398
+ * Provides information about the model's capabilities and constraints,
399
+ * including token limits, multimodal support, and advanced features like
400
+ * tool calling and structured output.
401
+ *
402
+ * @returns {ModelProfile} An object describing the model's capabilities and constraints
403
+ *
404
+ * @example
405
+ * ```typescript
406
+ * const model = new ChatDeepSeek({ model: "deepseek-chat" });
407
+ * const profile = model.profile;
408
+ * console.log(profile.maxInputTokens); // 128000
409
+ * console.log(profile.imageInputs); // false
410
+ * ```
411
+ */
412
+ get profile() {
413
+ return require_profiles.default[this.model] ?? {};
414
+ }
394
415
  withStructuredOutput(outputSchema, config) {
395
416
  const ensuredConfig = { ...config };
396
417
  if (ensuredConfig?.method === void 0) ensuredConfig.method = "functionCalling";
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.cjs","names":["ChatOpenAICompletions","fields?: Partial<ChatDeepSeekInput>","delta: Record<string, any>","rawResponse: OpenAIClient.ChatCompletionChunk","defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"","message: OpenAIClient.ChatCompletionMessage","rawResponse: OpenAIClient.ChatCompletion","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport {\n ChatOpenAICallOptions,\n ChatOpenAICompletions,\n ChatOpenAIFields,\n OpenAIClient,\n} from \"@langchain/openai\";\n\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\n\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name() {\n return \"ChatDeepSeek\";\n }\n\n _llmType() {\n return \"deepseek\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"DEEPSEEK_API_KEY\",\n };\n }\n\n lc_serializable = true;\n\n lc_namespace = [\"langchain\", \"chat_models\", \"deepseek\"];\n\n constructor(fields?: Partial<ChatDeepSeekInput>) {\n const apiKey = fields?.apiKey || getEnvironmentVariable(\"DEEPSEEK_API_KEY\");\n if (!apiKey) {\n throw new Error(\n `Deepseek API key not found. Please set the DEEPSEEK_API_KEY environment variable or pass the key into \"apiKey\" field.`\n );\n }\n\n super({\n ...fields,\n apiKey,\n configuration: {\n baseURL: \"https://api.deepseek.com\",\n ...fields?.configuration,\n },\n });\n }\n\n protected override _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"\n ) {\n const messageChunk = super._convertCompletionsDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n messageChunk.additional_kwargs.reasoning_content = delta.reasoning_content;\n return messageChunk;\n }\n\n protected override _convertCompletionsMessageToBaseMessage(\n message: OpenAIClient.ChatCompletionMessage,\n rawResponse: OpenAIClient.ChatCompletion\n ) {\n const langChainMessage = super._convertCompletionsMessageToBaseMessage(\n message,\n rawResponse\n );\n langChainMessage.additional_kwargs.reasoning_content =\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (message as any).reasoning_content;\n return langChainMessage;\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n const ensuredConfig = { ...config };\n // Deepseek does not support json schema yet\n if (ensuredConfig?.method === undefined) {\n ensuredConfig.method = \"functionCalling\";\n }\n return super.withStructuredOutput<RunOutput>(outputSchema, ensuredConfig);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqZA,IAAa,eAAb,cAAkCA,yCAA+C;CAC/E,OAAO,UAAU;AACf,SAAO;CACR;CAED,WAAW;AACT,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,mBACT;CACF;CAED,kBAAkB;CAElB,eAAe;EAAC;EAAa;EAAe;CAAW;CAEvD,YAAYC,QAAqC;EAC/C,MAAM,SAAS,QAAQ,iEAAiC,mBAAmB;AAC3E,MAAI,CAAC,OACH,OAAM,IAAI,MACR,CAAC,qHAAqH,CAAC;EAI3H,MAAM;GACJ,GAAG;GACH;GACA,eAAe;IACb,SAAS;IACT,GAAG,QAAQ;GACZ;EACF,EAAC;CACH;CAED,AAAmB,2CAEjBC,OACAC,aACAC,aAOA;EACA,MAAM,eAAe,MAAM,2CACzB,OACA,aACA,YACD;EACD,aAAa,kBAAkB,oBAAoB,MAAM;AACzD,SAAO;CACR;CAED,AAAmB,wCACjBC,SACAC,aACA;EACA,MAAM,mBAAmB,MAAM,wCAC7B,SACA,YACD;EACD,iBAAiB,kBAAkB,oBAEhC,QAAgB;AACnB,SAAO;CACR;CAqCD,qBAIEC,cAIAC,QAMI;EACJ,MAAM,gBAAgB,EAAE,GAAG,OAAQ;AAEnC,MAAI,eAAe,WAAW,QAC5B,cAAc,SAAS;AAEzB,SAAO,MAAM,qBAAgC,cAAc,cAAc;CAC1E;AACF"}
1
+ {"version":3,"file":"chat_models.cjs","names":["ChatOpenAICompletions","fields?: Partial<ChatDeepSeekInput>","delta: Record<string, any>","rawResponse: OpenAIClient.ChatCompletionChunk","defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"","message: OpenAIClient.ChatCompletionMessage","rawResponse: OpenAIClient.ChatCompletion","PROFILES","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport {\n ChatOpenAICallOptions,\n ChatOpenAICompletions,\n ChatOpenAIFields,\n OpenAIClient,\n} from \"@langchain/openai\";\nimport PROFILES from \"./profiles.js\";\n\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\n\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name() {\n return \"ChatDeepSeek\";\n }\n\n _llmType() {\n return \"deepseek\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"DEEPSEEK_API_KEY\",\n };\n }\n\n lc_serializable = true;\n\n lc_namespace = [\"langchain\", \"chat_models\", \"deepseek\"];\n\n constructor(fields?: Partial<ChatDeepSeekInput>) {\n const apiKey = fields?.apiKey || getEnvironmentVariable(\"DEEPSEEK_API_KEY\");\n if (!apiKey) {\n throw new Error(\n `Deepseek API key not found. Please set the DEEPSEEK_API_KEY environment variable or pass the key into \"apiKey\" field.`\n );\n }\n\n super({\n ...fields,\n apiKey,\n configuration: {\n baseURL: \"https://api.deepseek.com\",\n ...fields?.configuration,\n },\n });\n }\n\n protected override _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"\n ) {\n const messageChunk = super._convertCompletionsDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n messageChunk.additional_kwargs.reasoning_content = delta.reasoning_content;\n return messageChunk;\n }\n\n protected override _convertCompletionsMessageToBaseMessage(\n message: OpenAIClient.ChatCompletionMessage,\n rawResponse: OpenAIClient.ChatCompletion\n ) {\n const langChainMessage = super._convertCompletionsMessageToBaseMessage(\n message,\n rawResponse\n );\n langChainMessage.additional_kwargs.reasoning_content =\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (message as any).reasoning_content;\n return langChainMessage;\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n const ensuredConfig = { ...config };\n // Deepseek does not support json schema yet\n if (ensuredConfig?.method === undefined) {\n ensuredConfig.method = \"functionCalling\";\n }\n return super.withStructuredOutput<RunOutput>(outputSchema, ensuredConfig);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuZA,IAAa,eAAb,cAAkCA,yCAA+C;CAC/E,OAAO,UAAU;AACf,SAAO;CACR;CAED,WAAW;AACT,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,mBACT;CACF;CAED,kBAAkB;CAElB,eAAe;EAAC;EAAa;EAAe;CAAW;CAEvD,YAAYC,QAAqC;EAC/C,MAAM,SAAS,QAAQ,iEAAiC,mBAAmB;AAC3E,MAAI,CAAC,OACH,OAAM,IAAI,MACR,CAAC,qHAAqH,CAAC;EAI3H,MAAM;GACJ,GAAG;GACH;GACA,eAAe;IACb,SAAS;IACT,GAAG,QAAQ;GACZ;EACF,EAAC;CACH;CAED,AAAmB,2CAEjBC,OACAC,aACAC,aAOA;EACA,MAAM,eAAe,MAAM,2CACzB,OACA,aACA,YACD;EACD,aAAa,kBAAkB,oBAAoB,MAAM;AACzD,SAAO;CACR;CAED,AAAmB,wCACjBC,SACAC,aACA;EACA,MAAM,mBAAmB,MAAM,wCAC7B,SACA,YACD;EACD,iBAAiB,kBAAkB,oBAEhC,QAAgB;AACnB,SAAO;CACR;;;;;;;;;;;;;;;;;;CAmBD,IAAI,UAAwB;AAC1B,SAAOC,yBAAS,KAAK,UAAU,CAAE;CAClC;CAqCD,qBAIEC,cAIAC,QAMI;EACJ,MAAM,gBAAgB,EAAE,GAAG,OAAQ;AAEnC,MAAI,eAAe,WAAW,QAC5B,cAAc,SAAS;AAEzB,SAAO,MAAM,qBAAgC,cAAc,cAAc;CAC1E;AACF"}
@@ -1,6 +1,7 @@
1
1
  import * as _langchain_core_messages0 from "@langchain/core/messages";
2
2
  import { BaseMessage } from "@langchain/core/messages";
3
3
  import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
4
+ import { ModelProfile } from "@langchain/core/language_models/profile";
4
5
  import { Runnable } from "@langchain/core/runnables";
5
6
  import { InteropZodType } from "@langchain/core/utils/types";
6
7
  import { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from "@langchain/openai";
@@ -405,8 +406,26 @@ declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions
405
406
  constructor(fields?: Partial<ChatDeepSeekInput>);
406
407
  protected _convertCompletionsDeltaToBaseMessageChunk(
407
408
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
408
- delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: "function" | "user" | "system" | "developer" | "assistant" | "tool"): _langchain_core_messages0.AIMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.ChatMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.FunctionMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.HumanMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.SystemMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.ToolMessageChunk<_langchain_core_messages0.MessageStructure>;
409
+ delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: "function" | "user" | "system" | "developer" | "assistant" | "tool"): _langchain_core_messages0.BaseMessageChunk<_langchain_core_messages0.MessageStructure, _langchain_core_messages0.MessageType>;
409
410
  protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<_langchain_core_messages0.MessageStructure, _langchain_core_messages0.MessageType>;
411
+ /**
412
+ * Return profiling information for the model.
413
+ *
414
+ * Provides information about the model's capabilities and constraints,
415
+ * including token limits, multimodal support, and advanced features like
416
+ * tool calling and structured output.
417
+ *
418
+ * @returns {ModelProfile} An object describing the model's capabilities and constraints
419
+ *
420
+ * @example
421
+ * ```typescript
422
+ * const model = new ChatDeepSeek({ model: "deepseek-chat" });
423
+ * const profile = model.profile;
424
+ * console.log(profile.maxInputTokens); // 128000
425
+ * console.log(profile.imageInputs); // false
426
+ * ```
427
+ */
428
+ get profile(): ModelProfile;
410
429
  withStructuredOutput<
411
430
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
412
431
  RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.d.cts","names":["BaseLanguageModelInput","StructuredOutputMethodOptions","BaseMessage","Runnable","InteropZodType","ChatOpenAICallOptions","ChatOpenAICompletions","ChatOpenAIFields","OpenAIClient","ChatDeepSeekCallOptions","Record","ChatDeepSeekInput","Array","ChatDeepSeek","Partial","ChatCompletionChunk","_langchain_core_messages0","MessageStructure","AIMessageChunk","ChatMessageChunk","FunctionMessageChunk","HumanMessageChunk","SystemMessageChunk","ToolMessageChunk","ChatCompletionMessage","ChatCompletion","MessageType","RunOutput"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from \"@langchain/openai\";\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name(): string;\n _llmType(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_serializable: boolean;\n lc_namespace: string[];\n constructor(fields?: Partial<ChatDeepSeekInput>);\n protected _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: \"function\" | \"user\" | \"system\" | \"developer\" | \"assistant\" | \"tool\"): import(\"@langchain/core/messages\").AIMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").ChatMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").FunctionMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").HumanMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").SystemMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").ToolMessageChunk<import(\"@langchain/core/messages\").MessageStructure>;\n protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n"],"mappings":";;;;;;;;UAKiBS,uBAAAA,SAAgCJ;YACnCK;;AADGD,UAGAE,iBAAAA,SAA0BJ,gBAHH,CAAA;EAAA;;;AAA8B;EAGrDI,MAAAA,CAAAA,EAAAA,MAAAA;EAAiB;;;EAoBT,KApBkBJ,CAAAA,EAAAA,MAAAA;EAAgB;AAiY3D;;;;EAQkD,IAAzBO,CAAAA,EA1XdF,KA0XcE,CAAAA,MAAAA,CAAAA;EAAO;;;;EAGoL,aAAAE,CAAAA,EAxXhMJ,KAwXgMI,CAAAA,MAAAA,CAA+IC;EAAgB;;;EAA2D,SAAAD,CAAAA,EAAAA,OAAAA;EAAgK;;;EAAyD,WAAAA,CAAAA,EAAAA,MAAAA;EAA+J;;;;EACzlB,SAAAA,CAAAA,EAAAA,MAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAZ9I;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAA1CH,YAAAA,SAAqBP,sBAAsBG;;;;;;;;uBAQvCK,QAAQH;;;SAGtBD,kCAAkCF,YAAAA,CAAaO,yGAA8MC,yBAAAA,CAAlEE,eAAzHF,yBAAAA,CAA2KC,gBAAAA,IAA2HD,yBAAAA,CAApEG,iBAA3FH,yBAAAA,CAA+IC,gBAAAA,IAA+HD,yBAAAA,CAAxEI,qBAA3FJ,yBAAAA,CAAmJC,gBAAAA,IAA4HD,yBAAAA,CAArEK,kBAA3FL,yBAAAA,CAAgJC,gBAAAA,IAA6HD,yBAAAA,CAAtEM,mBAA3FN,yBAAAA,CAAiJC,gBAAAA,IAA2HD,yBAAAA,CAApEO,iBAA3FP,yBAAAA,CAA+IC,gBAAAA;6DACvtBT,YAAAA,CAAagB,oCAAoChB,YAAAA,CAAaiB,iBAAiBvB,YAAHc,yBAAAA,CAAkDC,gBAAAA,EAAgBD,yBAAAA,CAAqCU,WAAAA;;;oBAG5NhB,sBAAsBA,mCAAmCN,eAAeuB;;IAEvFjB,8BAA8BT,uCAAuCE,SAASH,wBAAwB2B;;;oBAGvFjB,sBAAsBA,mCAAmCN,eAAeuB;;IAEvFjB,8BAA8BT,sCAAsCE,SAASH;SACvEE;YACGyB;;;;oBAIMjB,sBAAsBA,mCAAmCN,eAAeuB;;IAEvFjB,8BAA8BT,yCAAyCE,SAASH,wBAAwB2B,aAAaxB,SAASH;SACxHE;YACGyB"}
1
+ {"version":3,"file":"chat_models.d.cts","names":["BaseLanguageModelInput","StructuredOutputMethodOptions","ModelProfile","BaseMessage","Runnable","InteropZodType","ChatOpenAICallOptions","ChatOpenAICompletions","ChatOpenAIFields","OpenAIClient","ChatDeepSeekCallOptions","Record","ChatDeepSeekInput","Array","ChatDeepSeek","Partial","ChatCompletionChunk","_langchain_core_messages0","MessageStructure","MessageType","BaseMessageChunk","ChatCompletionMessage","ChatCompletion","RunOutput"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from \"@langchain/openai\";\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name(): string;\n _llmType(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_serializable: boolean;\n lc_namespace: string[];\n constructor(fields?: Partial<ChatDeepSeekInput>);\n protected _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: \"function\" | \"user\" | \"system\" | \"developer\" | \"assistant\" | \"tool\"): import(\"@langchain/core/messages\").BaseMessageChunk<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>;\n protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n"],"mappings":";;;;;;;;;UAMiBU,uBAAAA,SAAgCJ;YACnCK;;AADGD,UAGAE,iBAAAA,SAA0BJ,gBAHH,CAAA;EAAA;;;AAA8B;EAGrDI,MAAAA,CAAAA,EAAAA,MAAAA;EAAiB;;;EAoBT,KApBkBJ,CAAAA,EAAAA,MAAAA;EAAgB;AAiY3D;;;;EAQkD,IAAzBO,CAAAA,EA1XdF,KA0XcE,CAAAA,MAAAA,CAAAA;EAAO;;;;EAG0R,aAAAE,CAAAA,EAxXtSJ,KAwXsSI,CAAAA,MAAAA,CAApHG;EAAgB;;;EACT,SAAAH,CAAAA,EAAAA,OAAAA;EAAgD;;;EAqBjO,WAAgBN,CAAAA,EAAAA,MAAAA;EAAM;;;;EAEgB,SAAmBX,CAAAA,EAAAA,MAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;AAnCtB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAA1Cc,YAAAA,SAAqBP,sBAAsBG;;;;;;;;uBAQvCK,QAAQH;;;SAGtBD,kCAAkCF,YAAAA,CAAaO,yGAAgQC,yBAAAA,CAApHG,iBAAzHH,yBAAAA,CAA6KC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;6DAChPV,YAAAA,CAAaY,oCAAoCZ,YAAAA,CAAaa,iBAAiBnB,YAAHc,yBAAAA,CAAkDC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;;;;;;;;;;;;;;;;;;iBAkB/NjB;;;oBAGGS,sBAAsBA,mCAAmCN,eAAekB;;IAEvFZ,8BAA8BV,uCAAuCG,SAASJ,wBAAwBuB;;;oBAGvFZ,sBAAsBA,mCAAmCN,eAAekB;;IAEvFZ,8BAA8BV,sCAAsCG,SAASJ;SACvEG;YACGoB;;;;oBAIMZ,sBAAsBA,mCAAmCN,eAAekB;;IAEvFZ,8BAA8BV,yCAAyCG,SAASJ,wBAAwBuB,aAAanB,SAASJ;SACxHG;YACGoB"}
@@ -2,6 +2,7 @@ import { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIC
2
2
  import * as _langchain_core_messages0 from "@langchain/core/messages";
3
3
  import { BaseMessage } from "@langchain/core/messages";
4
4
  import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
5
+ import { ModelProfile } from "@langchain/core/language_models/profile";
5
6
  import { Runnable } from "@langchain/core/runnables";
6
7
  import { InteropZodType } from "@langchain/core/utils/types";
7
8
 
@@ -405,8 +406,26 @@ declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions
405
406
  constructor(fields?: Partial<ChatDeepSeekInput>);
406
407
  protected _convertCompletionsDeltaToBaseMessageChunk(
407
408
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
408
- delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: "function" | "user" | "system" | "developer" | "assistant" | "tool"): _langchain_core_messages0.AIMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.ChatMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.FunctionMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.HumanMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.SystemMessageChunk<_langchain_core_messages0.MessageStructure> | _langchain_core_messages0.ToolMessageChunk<_langchain_core_messages0.MessageStructure>;
409
+ delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: "function" | "user" | "system" | "developer" | "assistant" | "tool"): _langchain_core_messages0.BaseMessageChunk<_langchain_core_messages0.MessageStructure, _langchain_core_messages0.MessageType>;
409
410
  protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<_langchain_core_messages0.MessageStructure, _langchain_core_messages0.MessageType>;
411
+ /**
412
+ * Return profiling information for the model.
413
+ *
414
+ * Provides information about the model's capabilities and constraints,
415
+ * including token limits, multimodal support, and advanced features like
416
+ * tool calling and structured output.
417
+ *
418
+ * @returns {ModelProfile} An object describing the model's capabilities and constraints
419
+ *
420
+ * @example
421
+ * ```typescript
422
+ * const model = new ChatDeepSeek({ model: "deepseek-chat" });
423
+ * const profile = model.profile;
424
+ * console.log(profile.maxInputTokens); // 128000
425
+ * console.log(profile.imageInputs); // false
426
+ * ```
427
+ */
428
+ get profile(): ModelProfile;
410
429
  withStructuredOutput<
411
430
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
412
431
  RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.d.ts","names":["BaseLanguageModelInput","StructuredOutputMethodOptions","BaseMessage","Runnable","InteropZodType","ChatOpenAICallOptions","ChatOpenAICompletions","ChatOpenAIFields","OpenAIClient","ChatDeepSeekCallOptions","Record","ChatDeepSeekInput","Array","ChatDeepSeek","Partial","ChatCompletionChunk","_langchain_core_messages0","MessageStructure","AIMessageChunk","ChatMessageChunk","FunctionMessageChunk","HumanMessageChunk","SystemMessageChunk","ToolMessageChunk","ChatCompletionMessage","ChatCompletion","MessageType","RunOutput"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from \"@langchain/openai\";\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name(): string;\n _llmType(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_serializable: boolean;\n lc_namespace: string[];\n constructor(fields?: Partial<ChatDeepSeekInput>);\n protected _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: \"function\" | \"user\" | \"system\" | \"developer\" | \"assistant\" | \"tool\"): import(\"@langchain/core/messages\").AIMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").ChatMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").FunctionMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").HumanMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").SystemMessageChunk<import(\"@langchain/core/messages\").MessageStructure> | import(\"@langchain/core/messages\").ToolMessageChunk<import(\"@langchain/core/messages\").MessageStructure>;\n protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n"],"mappings":";;;;;;;;UAKiBS,uBAAAA,SAAgCJ;YACnCK;;AADGD,UAGAE,iBAAAA,SAA0BJ,gBAHH,CAAA;EAAA;;;AAA8B;EAGrDI,MAAAA,CAAAA,EAAAA,MAAAA;EAAiB;;;EAoBT,KApBkBJ,CAAAA,EAAAA,MAAAA;EAAgB;AAiY3D;;;;EAQkD,IAAzBO,CAAAA,EA1XdF,KA0XcE,CAAAA,MAAAA,CAAAA;EAAO;;;;EAGoL,aAAAE,CAAAA,EAxXhMJ,KAwXgMI,CAAAA,MAAAA,CAA+IC;EAAgB;;;EAA2D,SAAAD,CAAAA,EAAAA,OAAAA;EAAgK;;;EAAyD,WAAAA,CAAAA,EAAAA,MAAAA;EAA+J;;;;EACzlB,SAAAA,CAAAA,EAAAA,MAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAZ9I;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAA1CH,YAAAA,SAAqBP,sBAAsBG;;;;;;;;uBAQvCK,QAAQH;;;SAGtBD,kCAAkCF,YAAAA,CAAaO,yGAA8MC,yBAAAA,CAAlEE,eAAzHF,yBAAAA,CAA2KC,gBAAAA,IAA2HD,yBAAAA,CAApEG,iBAA3FH,yBAAAA,CAA+IC,gBAAAA,IAA+HD,yBAAAA,CAAxEI,qBAA3FJ,yBAAAA,CAAmJC,gBAAAA,IAA4HD,yBAAAA,CAArEK,kBAA3FL,yBAAAA,CAAgJC,gBAAAA,IAA6HD,yBAAAA,CAAtEM,mBAA3FN,yBAAAA,CAAiJC,gBAAAA,IAA2HD,yBAAAA,CAApEO,iBAA3FP,yBAAAA,CAA+IC,gBAAAA;6DACvtBT,YAAAA,CAAagB,oCAAoChB,YAAAA,CAAaiB,iBAAiBvB,YAAHc,yBAAAA,CAAkDC,gBAAAA,EAAgBD,yBAAAA,CAAqCU,WAAAA;;;oBAG5NhB,sBAAsBA,mCAAmCN,eAAeuB;;IAEvFjB,8BAA8BT,uCAAuCE,SAASH,wBAAwB2B;;;oBAGvFjB,sBAAsBA,mCAAmCN,eAAeuB;;IAEvFjB,8BAA8BT,sCAAsCE,SAASH;SACvEE;YACGyB;;;;oBAIMjB,sBAAsBA,mCAAmCN,eAAeuB;;IAEvFjB,8BAA8BT,yCAAyCE,SAASH,wBAAwB2B,aAAaxB,SAASH;SACxHE;YACGyB"}
1
+ {"version":3,"file":"chat_models.d.ts","names":["BaseLanguageModelInput","StructuredOutputMethodOptions","ModelProfile","BaseMessage","Runnable","InteropZodType","ChatOpenAICallOptions","ChatOpenAICompletions","ChatOpenAIFields","OpenAIClient","ChatDeepSeekCallOptions","Record","ChatDeepSeekInput","Array","ChatDeepSeek","Partial","ChatCompletionChunk","_langchain_core_messages0","MessageStructure","MessageType","BaseMessageChunk","ChatCompletionMessage","ChatCompletion","RunOutput"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from \"@langchain/openai\";\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name(): string;\n _llmType(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_serializable: boolean;\n lc_namespace: string[];\n constructor(fields?: Partial<ChatDeepSeekInput>);\n protected _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: \"function\" | \"user\" | \"system\" | \"developer\" | \"assistant\" | \"tool\"): import(\"@langchain/core/messages\").BaseMessageChunk<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>;\n protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n"],"mappings":";;;;;;;;;UAMiBU,uBAAAA,SAAgCJ;YACnCK;;AADGD,UAGAE,iBAAAA,SAA0BJ,gBAHH,CAAA;EAAA;;;AAA8B;EAGrDI,MAAAA,CAAAA,EAAAA,MAAAA;EAAiB;;;EAoBT,KApBkBJ,CAAAA,EAAAA,MAAAA;EAAgB;AAiY3D;;;;EAQkD,IAAzBO,CAAAA,EA1XdF,KA0XcE,CAAAA,MAAAA,CAAAA;EAAO;;;;EAG0R,aAAAE,CAAAA,EAxXtSJ,KAwXsSI,CAAAA,MAAAA,CAApHG;EAAgB;;;EACT,SAAAH,CAAAA,EAAAA,OAAAA;EAAgD;;;EAqBjO,WAAgBN,CAAAA,EAAAA,MAAAA;EAAM;;;;EAEgB,SAAmBX,CAAAA,EAAAA,MAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;AAnCtB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAA1Cc,YAAAA,SAAqBP,sBAAsBG;;;;;;;;uBAQvCK,QAAQH;;;SAGtBD,kCAAkCF,YAAAA,CAAaO,yGAAgQC,yBAAAA,CAApHG,iBAAzHH,yBAAAA,CAA6KC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;6DAChPV,YAAAA,CAAaY,oCAAoCZ,YAAAA,CAAaa,iBAAiBnB,YAAHc,yBAAAA,CAAkDC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;;;;;;;;;;;;;;;;;;iBAkB/NjB;;;oBAGGS,sBAAsBA,mCAAmCN,eAAekB;;IAEvFZ,8BAA8BV,uCAAuCG,SAASJ,wBAAwBuB;;;oBAGvFZ,sBAAsBA,mCAAmCN,eAAekB;;IAEvFZ,8BAA8BV,sCAAsCG,SAASJ;SACvEG;YACGoB;;;;oBAIMZ,sBAAsBA,mCAAmCN,eAAekB;;IAEvFZ,8BAA8BV,yCAAyCG,SAASJ,wBAAwBuB,aAAanB,SAASJ;SACxHG;YACGoB"}
@@ -1,3 +1,4 @@
1
+ import profiles_default from "./profiles.js";
1
2
  import { getEnvironmentVariable } from "@langchain/core/utils/env";
2
3
  import { ChatOpenAICompletions } from "@langchain/openai";
3
4
 
@@ -390,6 +391,26 @@ var ChatDeepSeek = class extends ChatOpenAICompletions {
390
391
  langChainMessage.additional_kwargs.reasoning_content = message.reasoning_content;
391
392
  return langChainMessage;
392
393
  }
394
+ /**
395
+ * Return profiling information for the model.
396
+ *
397
+ * Provides information about the model's capabilities and constraints,
398
+ * including token limits, multimodal support, and advanced features like
399
+ * tool calling and structured output.
400
+ *
401
+ * @returns {ModelProfile} An object describing the model's capabilities and constraints
402
+ *
403
+ * @example
404
+ * ```typescript
405
+ * const model = new ChatDeepSeek({ model: "deepseek-chat" });
406
+ * const profile = model.profile;
407
+ * console.log(profile.maxInputTokens); // 128000
408
+ * console.log(profile.imageInputs); // false
409
+ * ```
410
+ */
411
+ get profile() {
412
+ return profiles_default[this.model] ?? {};
413
+ }
393
414
  withStructuredOutput(outputSchema, config) {
394
415
  const ensuredConfig = { ...config };
395
416
  if (ensuredConfig?.method === void 0) ensuredConfig.method = "functionCalling";
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.js","names":["fields?: Partial<ChatDeepSeekInput>","delta: Record<string, any>","rawResponse: OpenAIClient.ChatCompletionChunk","defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"","message: OpenAIClient.ChatCompletionMessage","rawResponse: OpenAIClient.ChatCompletion","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport {\n ChatOpenAICallOptions,\n ChatOpenAICompletions,\n ChatOpenAIFields,\n OpenAIClient,\n} from \"@langchain/openai\";\n\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\n\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name() {\n return \"ChatDeepSeek\";\n }\n\n _llmType() {\n return \"deepseek\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"DEEPSEEK_API_KEY\",\n };\n }\n\n lc_serializable = true;\n\n lc_namespace = [\"langchain\", \"chat_models\", \"deepseek\"];\n\n constructor(fields?: Partial<ChatDeepSeekInput>) {\n const apiKey = fields?.apiKey || getEnvironmentVariable(\"DEEPSEEK_API_KEY\");\n if (!apiKey) {\n throw new Error(\n `Deepseek API key not found. Please set the DEEPSEEK_API_KEY environment variable or pass the key into \"apiKey\" field.`\n );\n }\n\n super({\n ...fields,\n apiKey,\n configuration: {\n baseURL: \"https://api.deepseek.com\",\n ...fields?.configuration,\n },\n });\n }\n\n protected override _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"\n ) {\n const messageChunk = super._convertCompletionsDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n messageChunk.additional_kwargs.reasoning_content = delta.reasoning_content;\n return messageChunk;\n }\n\n protected override _convertCompletionsMessageToBaseMessage(\n message: OpenAIClient.ChatCompletionMessage,\n rawResponse: OpenAIClient.ChatCompletion\n ) {\n const langChainMessage = super._convertCompletionsMessageToBaseMessage(\n message,\n rawResponse\n );\n langChainMessage.additional_kwargs.reasoning_content =\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (message as any).reasoning_content;\n return langChainMessage;\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n const ensuredConfig = { ...config };\n // Deepseek does not support json schema yet\n if (ensuredConfig?.method === undefined) {\n ensuredConfig.method = \"functionCalling\";\n }\n return super.withStructuredOutput<RunOutput>(outputSchema, ensuredConfig);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqZA,IAAa,eAAb,cAAkC,sBAA+C;CAC/E,OAAO,UAAU;AACf,SAAO;CACR;CAED,WAAW;AACT,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,mBACT;CACF;CAED,kBAAkB;CAElB,eAAe;EAAC;EAAa;EAAe;CAAW;CAEvD,YAAYA,QAAqC;EAC/C,MAAM,SAAS,QAAQ,UAAU,uBAAuB,mBAAmB;AAC3E,MAAI,CAAC,OACH,OAAM,IAAI,MACR,CAAC,qHAAqH,CAAC;EAI3H,MAAM;GACJ,GAAG;GACH;GACA,eAAe;IACb,SAAS;IACT,GAAG,QAAQ;GACZ;EACF,EAAC;CACH;CAED,AAAmB,2CAEjBC,OACAC,aACAC,aAOA;EACA,MAAM,eAAe,MAAM,2CACzB,OACA,aACA,YACD;EACD,aAAa,kBAAkB,oBAAoB,MAAM;AACzD,SAAO;CACR;CAED,AAAmB,wCACjBC,SACAC,aACA;EACA,MAAM,mBAAmB,MAAM,wCAC7B,SACA,YACD;EACD,iBAAiB,kBAAkB,oBAEhC,QAAgB;AACnB,SAAO;CACR;CAqCD,qBAIEC,cAIAC,QAMI;EACJ,MAAM,gBAAgB,EAAE,GAAG,OAAQ;AAEnC,MAAI,eAAe,WAAW,QAC5B,cAAc,SAAS;AAEzB,SAAO,MAAM,qBAAgC,cAAc,cAAc;CAC1E;AACF"}
1
+ {"version":3,"file":"chat_models.js","names":["fields?: Partial<ChatDeepSeekInput>","delta: Record<string, any>","rawResponse: OpenAIClient.ChatCompletionChunk","defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"","message: OpenAIClient.ChatCompletionMessage","rawResponse: OpenAIClient.ChatCompletion","PROFILES","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport {\n ChatOpenAICallOptions,\n ChatOpenAICompletions,\n ChatOpenAIFields,\n OpenAIClient,\n} from \"@langchain/openai\";\nimport PROFILES from \"./profiles.js\";\n\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\n\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name() {\n return \"ChatDeepSeek\";\n }\n\n _llmType() {\n return \"deepseek\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"DEEPSEEK_API_KEY\",\n };\n }\n\n lc_serializable = true;\n\n lc_namespace = [\"langchain\", \"chat_models\", \"deepseek\"];\n\n constructor(fields?: Partial<ChatDeepSeekInput>) {\n const apiKey = fields?.apiKey || getEnvironmentVariable(\"DEEPSEEK_API_KEY\");\n if (!apiKey) {\n throw new Error(\n `Deepseek API key not found. Please set the DEEPSEEK_API_KEY environment variable or pass the key into \"apiKey\" field.`\n );\n }\n\n super({\n ...fields,\n apiKey,\n configuration: {\n baseURL: \"https://api.deepseek.com\",\n ...fields?.configuration,\n },\n });\n }\n\n protected override _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"\n ) {\n const messageChunk = super._convertCompletionsDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n messageChunk.additional_kwargs.reasoning_content = delta.reasoning_content;\n return messageChunk;\n }\n\n protected override _convertCompletionsMessageToBaseMessage(\n message: OpenAIClient.ChatCompletionMessage,\n rawResponse: OpenAIClient.ChatCompletion\n ) {\n const langChainMessage = super._convertCompletionsMessageToBaseMessage(\n message,\n rawResponse\n );\n langChainMessage.additional_kwargs.reasoning_content =\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (message as any).reasoning_content;\n return langChainMessage;\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n const ensuredConfig = { ...config };\n // Deepseek does not support json schema yet\n if (ensuredConfig?.method === undefined) {\n ensuredConfig.method = \"functionCalling\";\n }\n return super.withStructuredOutput<RunOutput>(outputSchema, ensuredConfig);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuZA,IAAa,eAAb,cAAkC,sBAA+C;CAC/E,OAAO,UAAU;AACf,SAAO;CACR;CAED,WAAW;AACT,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,mBACT;CACF;CAED,kBAAkB;CAElB,eAAe;EAAC;EAAa;EAAe;CAAW;CAEvD,YAAYA,QAAqC;EAC/C,MAAM,SAAS,QAAQ,UAAU,uBAAuB,mBAAmB;AAC3E,MAAI,CAAC,OACH,OAAM,IAAI,MACR,CAAC,qHAAqH,CAAC;EAI3H,MAAM;GACJ,GAAG;GACH;GACA,eAAe;IACb,SAAS;IACT,GAAG,QAAQ;GACZ;EACF,EAAC;CACH;CAED,AAAmB,2CAEjBC,OACAC,aACAC,aAOA;EACA,MAAM,eAAe,MAAM,2CACzB,OACA,aACA,YACD;EACD,aAAa,kBAAkB,oBAAoB,MAAM;AACzD,SAAO;CACR;CAED,AAAmB,wCACjBC,SACAC,aACA;EACA,MAAM,mBAAmB,MAAM,wCAC7B,SACA,YACD;EACD,iBAAiB,kBAAkB,oBAEhC,QAAgB;AACnB,SAAO;CACR;;;;;;;;;;;;;;;;;;CAmBD,IAAI,UAAwB;AAC1B,SAAOC,iBAAS,KAAK,UAAU,CAAE;CAClC;CAqCD,qBAIEC,cAIAC,QAMI;EACJ,MAAM,gBAAgB,EAAE,GAAG,OAAQ;AAEnC,MAAI,eAAe,WAAW,QAC5B,cAAc,SAAS;AAEzB,SAAO,MAAM,qBAAgC,cAAc,cAAc;CAC1E;AACF"}
@@ -0,0 +1,37 @@
1
+
2
+ //#region src/profiles.ts
3
+ const PROFILES = {
4
+ "deepseek-chat": {
5
+ maxInputTokens: 128e3,
6
+ imageInputs: false,
7
+ audioInputs: false,
8
+ pdfInputs: false,
9
+ videoInputs: false,
10
+ maxOutputTokens: 8192,
11
+ reasoningOutput: false,
12
+ imageOutputs: false,
13
+ audioOutputs: false,
14
+ videoOutputs: false,
15
+ toolCalling: true,
16
+ structuredOutput: false
17
+ },
18
+ "deepseek-reasoner": {
19
+ maxInputTokens: 128e3,
20
+ imageInputs: false,
21
+ audioInputs: false,
22
+ pdfInputs: false,
23
+ videoInputs: false,
24
+ maxOutputTokens: 128e3,
25
+ reasoningOutput: true,
26
+ imageOutputs: false,
27
+ audioOutputs: false,
28
+ videoOutputs: false,
29
+ toolCalling: true,
30
+ structuredOutput: false
31
+ }
32
+ };
33
+ var profiles_default = PROFILES;
34
+
35
+ //#endregion
36
+ exports.default = profiles_default;
37
+ //# sourceMappingURL=profiles.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"profiles.cjs","names":["PROFILES: Record<string, ModelProfile>"],"sources":["../src/profiles.ts"],"sourcesContent":["/**\n * This file was automatically generated by an automated script. Do not edit manually.\n */\nimport type { ModelProfile } from \"@langchain/core/language_models/profile\";\nconst PROFILES: Record<string, ModelProfile> = {\n \"deepseek-chat\": {\n maxInputTokens: 128000,\n imageInputs: false,\n audioInputs: false,\n pdfInputs: false,\n videoInputs: false,\n maxOutputTokens: 8192,\n reasoningOutput: false,\n imageOutputs: false,\n audioOutputs: false,\n videoOutputs: false,\n toolCalling: true,\n structuredOutput: false,\n },\n \"deepseek-reasoner\": {\n maxInputTokens: 128000,\n imageInputs: false,\n audioInputs: false,\n pdfInputs: false,\n videoInputs: false,\n maxOutputTokens: 128000,\n reasoningOutput: true,\n imageOutputs: false,\n audioOutputs: false,\n videoOutputs: false,\n toolCalling: true,\n structuredOutput: false,\n },\n};\nexport default PROFILES;\n"],"mappings":";;AAIA,MAAMA,WAAyC;CAC7C,iBAAiB;EACf,gBAAgB;EAChB,aAAa;EACb,aAAa;EACb,WAAW;EACX,aAAa;EACb,iBAAiB;EACjB,iBAAiB;EACjB,cAAc;EACd,cAAc;EACd,cAAc;EACd,aAAa;EACb,kBAAkB;CACnB;CACD,qBAAqB;EACnB,gBAAgB;EAChB,aAAa;EACb,aAAa;EACb,WAAW;EACX,aAAa;EACb,iBAAiB;EACjB,iBAAiB;EACjB,cAAc;EACd,cAAc;EACd,cAAc;EACd,aAAa;EACb,kBAAkB;CACnB;AACF;AACD,uBAAe"}
@@ -0,0 +1,36 @@
1
+ //#region src/profiles.ts
2
+ const PROFILES = {
3
+ "deepseek-chat": {
4
+ maxInputTokens: 128e3,
5
+ imageInputs: false,
6
+ audioInputs: false,
7
+ pdfInputs: false,
8
+ videoInputs: false,
9
+ maxOutputTokens: 8192,
10
+ reasoningOutput: false,
11
+ imageOutputs: false,
12
+ audioOutputs: false,
13
+ videoOutputs: false,
14
+ toolCalling: true,
15
+ structuredOutput: false
16
+ },
17
+ "deepseek-reasoner": {
18
+ maxInputTokens: 128e3,
19
+ imageInputs: false,
20
+ audioInputs: false,
21
+ pdfInputs: false,
22
+ videoInputs: false,
23
+ maxOutputTokens: 128e3,
24
+ reasoningOutput: true,
25
+ imageOutputs: false,
26
+ audioOutputs: false,
27
+ videoOutputs: false,
28
+ toolCalling: true,
29
+ structuredOutput: false
30
+ }
31
+ };
32
+ var profiles_default = PROFILES;
33
+
34
+ //#endregion
35
+ export { profiles_default as default };
36
+ //# sourceMappingURL=profiles.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"profiles.js","names":["PROFILES: Record<string, ModelProfile>"],"sources":["../src/profiles.ts"],"sourcesContent":["/**\n * This file was automatically generated by an automated script. Do not edit manually.\n */\nimport type { ModelProfile } from \"@langchain/core/language_models/profile\";\nconst PROFILES: Record<string, ModelProfile> = {\n \"deepseek-chat\": {\n maxInputTokens: 128000,\n imageInputs: false,\n audioInputs: false,\n pdfInputs: false,\n videoInputs: false,\n maxOutputTokens: 8192,\n reasoningOutput: false,\n imageOutputs: false,\n audioOutputs: false,\n videoOutputs: false,\n toolCalling: true,\n structuredOutput: false,\n },\n \"deepseek-reasoner\": {\n maxInputTokens: 128000,\n imageInputs: false,\n audioInputs: false,\n pdfInputs: false,\n videoInputs: false,\n maxOutputTokens: 128000,\n reasoningOutput: true,\n imageOutputs: false,\n audioOutputs: false,\n videoOutputs: false,\n toolCalling: true,\n structuredOutput: false,\n },\n};\nexport default PROFILES;\n"],"mappings":";AAIA,MAAMA,WAAyC;CAC7C,iBAAiB;EACf,gBAAgB;EAChB,aAAa;EACb,aAAa;EACb,WAAW;EACX,aAAa;EACb,iBAAiB;EACjB,iBAAiB;EACjB,cAAc;EACd,cAAc;EACd,cAAc;EACd,aAAa;EACb,kBAAkB;CACnB;CACD,qBAAqB;EACnB,gBAAgB;EAChB,aAAa;EACb,aAAa;EACb,WAAW;EACX,aAAa;EACb,iBAAiB;EACjB,iBAAiB;EACjB,cAAc;EACd,cAAc;EACd,cAAc;EACd,aAAa;EACb,kBAAkB;CACnB;AACF;AACD,uBAAe"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/deepseek",
3
- "version": "1.0.0",
3
+ "version": "1.0.1",
4
4
  "description": "Deepseek integration for LangChain.js",
5
5
  "type": "module",
6
6
  "author": "LangChain",
@@ -14,7 +14,7 @@
14
14
  },
15
15
  "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-deepseek",
16
16
  "dependencies": {
17
- "@langchain/openai": "^1.0.0"
17
+ "@langchain/openai": "^1.1.1"
18
18
  },
19
19
  "peerDependencies": {
20
20
  "@langchain/core": "^1.0.0"
@@ -30,8 +30,8 @@
30
30
  "typescript": "~5.8.3",
31
31
  "vitest": "^3.2.4",
32
32
  "@langchain/eslint": "0.1.0",
33
- "@langchain/core": "1.0.0",
34
- "@langchain/standard-tests": "0.0.0"
33
+ "@langchain/core": "1.0.5",
34
+ "@langchain/standard-tests": "0.0.1"
35
35
  },
36
36
  "publishConfig": {
37
37
  "access": "public"
@@ -59,7 +59,8 @@
59
59
  "LICENSE"
60
60
  ],
61
61
  "scripts": {
62
- "build": "pnpm --filter @langchain/build compile @langchain/deepseek",
62
+ "build": "turbo build:compile --filter @langchain/deepseek",
63
+ "build:compile": "pnpm --filter @langchain/build compile @langchain/deepseek",
63
64
  "lint:eslint": "eslint --cache src/",
64
65
  "lint:dpdm": "dpdm --skip-dynamic-imports circular --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
65
66
  "lint": "pnpm lint:eslint && pnpm lint:dpdm",
@@ -72,6 +73,8 @@
72
73
  "test:standard:int": "vitest run --mode standard-int",
73
74
  "test:standard": "pnpm test:standard:unit && pnpm test:standard:int",
74
75
  "format": "prettier --config .prettierrc --write \"src\"",
75
- "format:check": "prettier --config .prettierrc --check \"src\""
76
+ "format:check": "prettier --config .prettierrc --check \"src\"",
77
+ "typegen": "pnpm run typegen:profiles",
78
+ "typegen:profiles": "pnpm --filter @langchain/model-profiles make --config profiles.toml"
76
79
  }
77
80
  }