@langchain/deepseek 1.0.6 → 1.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @langchain/deepseek
2
2
 
3
+ ## 1.0.7
4
+
5
+ ### Patch Changes
6
+
7
+ - [#9726](https://github.com/langchain-ai/langchainjs/pull/9726) [`1877454`](https://github.com/langchain-ai/langchainjs/commit/1877454e6a501eba7bf36fc088335eaea149c8ce) Thanks [@murataslan1](https://github.com/murataslan1)! - fix(deepseek): parse <think> tags in content stream
8
+
9
+ - Updated dependencies []:
10
+ - @langchain/openai@1.2.3
11
+
3
12
  ## 1.0.6
4
13
 
5
14
  ### Patch Changes
@@ -1,7 +1,9 @@
1
1
  const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
2
2
  const require_profiles = require('./profiles.cjs');
3
+ const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
3
4
  const __langchain_core_utils_env = require_rolldown_runtime.__toESM(require("@langchain/core/utils/env"));
4
5
  const __langchain_openai = require_rolldown_runtime.__toESM(require("@langchain/openai"));
6
+ const __langchain_core_outputs = require_rolldown_runtime.__toESM(require("@langchain/core/outputs"));
5
7
 
6
8
  //#region src/chat_models.ts
7
9
  /**
@@ -391,6 +393,188 @@ var ChatDeepSeek = class extends __langchain_openai.ChatOpenAICompletions {
391
393
  };
392
394
  return messageChunk;
393
395
  }
396
+ async *_streamResponseChunks(messages, options, runManager) {
397
+ const stream = super._streamResponseChunks(messages, options, runManager);
398
+ let tokensBuffer = "";
399
+ let isThinking = false;
400
+ for await (const chunk of stream) {
401
+ if (chunk.message.additional_kwargs.reasoning_content) {
402
+ yield chunk;
403
+ continue;
404
+ }
405
+ const text = chunk.text;
406
+ if (!text) {
407
+ yield chunk;
408
+ continue;
409
+ }
410
+ tokensBuffer += text;
411
+ if (!isThinking && tokensBuffer.includes("<think>")) {
412
+ isThinking = true;
413
+ const thinkIndex = tokensBuffer.indexOf("<think>");
414
+ const beforeThink = tokensBuffer.substring(0, thinkIndex);
415
+ const afterThink = tokensBuffer.substring(thinkIndex + 7);
416
+ tokensBuffer = afterThink || "";
417
+ if (beforeThink) {
418
+ const newChunk = new __langchain_core_outputs.ChatGenerationChunk({
419
+ message: new __langchain_core_messages.AIMessageChunk({
420
+ content: beforeThink,
421
+ additional_kwargs: chunk.message.additional_kwargs,
422
+ response_metadata: chunk.message.response_metadata,
423
+ tool_calls: chunk.message.tool_calls,
424
+ tool_call_chunks: chunk.message.tool_call_chunks,
425
+ id: chunk.message.id
426
+ }),
427
+ text: beforeThink,
428
+ generationInfo: chunk.generationInfo
429
+ });
430
+ yield newChunk;
431
+ }
432
+ }
433
+ if (isThinking && tokensBuffer.includes("</think>")) {
434
+ isThinking = false;
435
+ const thinkEndIndex = tokensBuffer.indexOf("</think>");
436
+ const thoughtContent = tokensBuffer.substring(0, thinkEndIndex);
437
+ const afterThink = tokensBuffer.substring(thinkEndIndex + 8);
438
+ const reasoningChunk = new __langchain_core_outputs.ChatGenerationChunk({
439
+ message: new __langchain_core_messages.AIMessageChunk({
440
+ content: "",
441
+ additional_kwargs: {
442
+ ...chunk.message.additional_kwargs,
443
+ reasoning_content: thoughtContent
444
+ },
445
+ response_metadata: chunk.message.response_metadata,
446
+ tool_calls: chunk.message.tool_calls,
447
+ tool_call_chunks: chunk.message.tool_call_chunks,
448
+ id: chunk.message.id
449
+ }),
450
+ text: "",
451
+ generationInfo: chunk.generationInfo
452
+ });
453
+ yield reasoningChunk;
454
+ tokensBuffer = afterThink || "";
455
+ if (tokensBuffer) {
456
+ const contentChunk = new __langchain_core_outputs.ChatGenerationChunk({
457
+ message: new __langchain_core_messages.AIMessageChunk({
458
+ content: tokensBuffer,
459
+ additional_kwargs: chunk.message.additional_kwargs,
460
+ response_metadata: chunk.message.response_metadata,
461
+ tool_calls: chunk.message.tool_calls,
462
+ tool_call_chunks: chunk.message.tool_call_chunks,
463
+ id: chunk.message.id
464
+ }),
465
+ text: tokensBuffer,
466
+ generationInfo: chunk.generationInfo
467
+ });
468
+ yield contentChunk;
469
+ tokensBuffer = "";
470
+ }
471
+ } else if (isThinking) {
472
+ const possibleEndTag = "</think>";
473
+ let splitIndex = -1;
474
+ for (let i = 7; i >= 1; i--) if (tokensBuffer.endsWith(possibleEndTag.substring(0, i))) {
475
+ splitIndex = tokensBuffer.length - i;
476
+ break;
477
+ }
478
+ if (splitIndex !== -1) {
479
+ const safeToYield = tokensBuffer.substring(0, splitIndex);
480
+ if (safeToYield) {
481
+ const reasoningChunk = new __langchain_core_outputs.ChatGenerationChunk({
482
+ message: new __langchain_core_messages.AIMessageChunk({
483
+ content: "",
484
+ additional_kwargs: {
485
+ ...chunk.message.additional_kwargs,
486
+ reasoning_content: safeToYield
487
+ },
488
+ response_metadata: chunk.message.response_metadata,
489
+ tool_calls: chunk.message.tool_calls,
490
+ tool_call_chunks: chunk.message.tool_call_chunks,
491
+ id: chunk.message.id
492
+ }),
493
+ text: "",
494
+ generationInfo: chunk.generationInfo
495
+ });
496
+ yield reasoningChunk;
497
+ }
498
+ tokensBuffer = tokensBuffer.substring(splitIndex);
499
+ } else if (tokensBuffer) {
500
+ const reasoningChunk = new __langchain_core_outputs.ChatGenerationChunk({
501
+ message: new __langchain_core_messages.AIMessageChunk({
502
+ content: "",
503
+ additional_kwargs: {
504
+ ...chunk.message.additional_kwargs,
505
+ reasoning_content: tokensBuffer
506
+ },
507
+ response_metadata: chunk.message.response_metadata,
508
+ tool_calls: chunk.message.tool_calls,
509
+ tool_call_chunks: chunk.message.tool_call_chunks,
510
+ id: chunk.message.id
511
+ }),
512
+ text: "",
513
+ generationInfo: chunk.generationInfo
514
+ });
515
+ yield reasoningChunk;
516
+ tokensBuffer = "";
517
+ }
518
+ } else {
519
+ const possibleStartTag = "<think>";
520
+ let splitIndex = -1;
521
+ for (let i = 6; i >= 1; i--) if (tokensBuffer.endsWith(possibleStartTag.substring(0, i))) {
522
+ splitIndex = tokensBuffer.length - i;
523
+ break;
524
+ }
525
+ if (splitIndex !== -1) {
526
+ const safeToYield = tokensBuffer.substring(0, splitIndex);
527
+ if (safeToYield) {
528
+ const contentChunk = new __langchain_core_outputs.ChatGenerationChunk({
529
+ message: new __langchain_core_messages.AIMessageChunk({
530
+ content: safeToYield,
531
+ additional_kwargs: chunk.message.additional_kwargs,
532
+ response_metadata: chunk.message.response_metadata,
533
+ tool_calls: chunk.message.tool_calls,
534
+ tool_call_chunks: chunk.message.tool_call_chunks,
535
+ id: chunk.message.id
536
+ }),
537
+ text: safeToYield,
538
+ generationInfo: chunk.generationInfo
539
+ });
540
+ yield contentChunk;
541
+ }
542
+ tokensBuffer = tokensBuffer.substring(splitIndex);
543
+ } else if (tokensBuffer) {
544
+ const contentChunk = new __langchain_core_outputs.ChatGenerationChunk({
545
+ message: new __langchain_core_messages.AIMessageChunk({
546
+ content: tokensBuffer,
547
+ additional_kwargs: chunk.message.additional_kwargs,
548
+ response_metadata: chunk.message.response_metadata,
549
+ tool_calls: chunk.message.tool_calls,
550
+ tool_call_chunks: chunk.message.tool_call_chunks,
551
+ id: chunk.message.id
552
+ }),
553
+ text: tokensBuffer,
554
+ generationInfo: chunk.generationInfo
555
+ });
556
+ yield contentChunk;
557
+ tokensBuffer = "";
558
+ }
559
+ }
560
+ }
561
+ if (tokensBuffer) if (isThinking) {
562
+ const reasoningChunk = new __langchain_core_outputs.ChatGenerationChunk({
563
+ message: new __langchain_core_messages.AIMessageChunk({
564
+ content: "",
565
+ additional_kwargs: { reasoning_content: tokensBuffer }
566
+ }),
567
+ text: ""
568
+ });
569
+ yield reasoningChunk;
570
+ } else {
571
+ const contentChunk = new __langchain_core_outputs.ChatGenerationChunk({
572
+ message: new __langchain_core_messages.AIMessageChunk({ content: tokensBuffer }),
573
+ text: tokensBuffer
574
+ });
575
+ yield contentChunk;
576
+ }
577
+ }
394
578
  _convertCompletionsMessageToBaseMessage(message, rawResponse) {
395
579
  const langChainMessage = super._convertCompletionsMessageToBaseMessage(message, rawResponse);
396
580
  langChainMessage.additional_kwargs.reasoning_content = message.reasoning_content;
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.cjs","names":["ChatOpenAICompletions","fields?: Partial<ChatDeepSeekInput>","delta: Record<string, any>","rawResponse: OpenAIClient.ChatCompletionChunk","defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"","message: OpenAIClient.ChatCompletionMessage","rawResponse: OpenAIClient.ChatCompletion","PROFILES","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport {\n ChatOpenAICallOptions,\n ChatOpenAICompletions,\n ChatOpenAIFields,\n OpenAIClient,\n} from \"@langchain/openai\";\nimport PROFILES from \"./profiles.js\";\n\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\n\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name() {\n return \"ChatDeepSeek\";\n }\n\n _llmType() {\n return \"deepseek\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"DEEPSEEK_API_KEY\",\n };\n }\n\n lc_serializable = true;\n\n lc_namespace = [\"langchain\", \"chat_models\", \"deepseek\"];\n\n constructor(fields?: Partial<ChatDeepSeekInput>) {\n const apiKey = fields?.apiKey || getEnvironmentVariable(\"DEEPSEEK_API_KEY\");\n if (!apiKey) {\n throw new Error(\n `Deepseek API key not found. Please set the DEEPSEEK_API_KEY environment variable or pass the key into \"apiKey\" field.`\n );\n }\n\n super({\n ...fields,\n apiKey,\n configuration: {\n baseURL: \"https://api.deepseek.com\",\n ...fields?.configuration,\n },\n });\n }\n\n protected override _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"\n ) {\n const messageChunk = super._convertCompletionsDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n messageChunk.additional_kwargs.reasoning_content = delta.reasoning_content;\n // Override model_provider for DeepSeek-specific block translation\n messageChunk.response_metadata = {\n ...messageChunk.response_metadata,\n model_provider: \"deepseek\",\n };\n return messageChunk;\n }\n\n protected override _convertCompletionsMessageToBaseMessage(\n message: OpenAIClient.ChatCompletionMessage,\n rawResponse: OpenAIClient.ChatCompletion\n ) {\n const langChainMessage = super._convertCompletionsMessageToBaseMessage(\n message,\n rawResponse\n );\n langChainMessage.additional_kwargs.reasoning_content =\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (message as any).reasoning_content;\n // Override model_provider for DeepSeek-specific block translation\n langChainMessage.response_metadata = {\n ...langChainMessage.response_metadata,\n model_provider: \"deepseek\",\n };\n return langChainMessage;\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n const ensuredConfig = { ...config };\n // Deepseek does not support json schema yet\n if (ensuredConfig?.method === undefined) {\n ensuredConfig.method = \"functionCalling\";\n }\n return super.withStructuredOutput<RunOutput>(outputSchema, ensuredConfig);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuZA,IAAa,eAAb,cAAkCA,yCAA+C;CAC/E,OAAO,UAAU;AACf,SAAO;CACR;CAED,WAAW;AACT,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,mBACT;CACF;CAED,kBAAkB;CAElB,eAAe;EAAC;EAAa;EAAe;CAAW;CAEvD,YAAYC,QAAqC;EAC/C,MAAM,SAAS,QAAQ,iEAAiC,mBAAmB;AAC3E,MAAI,CAAC,OACH,OAAM,IAAI,MACR,CAAC,qHAAqH,CAAC;EAI3H,MAAM;GACJ,GAAG;GACH;GACA,eAAe;IACb,SAAS;IACT,GAAG,QAAQ;GACZ;EACF,EAAC;CACH;CAED,AAAmB,2CAEjBC,OACAC,aACAC,aAOA;EACA,MAAM,eAAe,MAAM,2CACzB,OACA,aACA,YACD;EACD,aAAa,kBAAkB,oBAAoB,MAAM;EAEzD,aAAa,oBAAoB;GAC/B,GAAG,aAAa;GAChB,gBAAgB;EACjB;AACD,SAAO;CACR;CAED,AAAmB,wCACjBC,SACAC,aACA;EACA,MAAM,mBAAmB,MAAM,wCAC7B,SACA,YACD;EACD,iBAAiB,kBAAkB,oBAEhC,QAAgB;EAEnB,iBAAiB,oBAAoB;GACnC,GAAG,iBAAiB;GACpB,gBAAgB;EACjB;AACD,SAAO;CACR;;;;;;;;;;;;;;;;;;CAmBD,IAAI,UAAwB;AAC1B,SAAOC,yBAAS,KAAK,UAAU,CAAE;CAClC;CAqCD,qBAIEC,cAIAC,QAMI;EACJ,MAAM,gBAAgB,EAAE,GAAG,OAAQ;AAEnC,MAAI,eAAe,WAAW,QAC5B,cAAc,SAAS;AAEzB,SAAO,MAAM,qBAAgC,cAAc,cAAc;CAC1E;AACF"}
1
+ {"version":3,"file":"chat_models.cjs","names":["ChatOpenAICompletions","fields?: Partial<ChatDeepSeekInput>","delta: Record<string, any>","rawResponse: OpenAIClient.ChatCompletionChunk","defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"","messages: BaseMessage[]","options: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","ChatGenerationChunk","AIMessageChunk","message: OpenAIClient.ChatCompletionMessage","rawResponse: OpenAIClient.ChatCompletion","PROFILES","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage, AIMessageChunk } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport {\n ChatOpenAICallOptions,\n ChatOpenAICompletions,\n ChatOpenAIFields,\n OpenAIClient,\n} from \"@langchain/openai\";\nimport { ChatGenerationChunk } from \"@langchain/core/outputs\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport PROFILES from \"./profiles.js\";\n\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\n\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name() {\n return \"ChatDeepSeek\";\n }\n\n _llmType() {\n return \"deepseek\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"DEEPSEEK_API_KEY\",\n };\n }\n\n lc_serializable = true;\n\n lc_namespace = [\"langchain\", \"chat_models\", \"deepseek\"];\n\n constructor(fields?: Partial<ChatDeepSeekInput>) {\n const apiKey = fields?.apiKey || getEnvironmentVariable(\"DEEPSEEK_API_KEY\");\n if (!apiKey) {\n throw new Error(\n `Deepseek API key not found. Please set the DEEPSEEK_API_KEY environment variable or pass the key into \"apiKey\" field.`\n );\n }\n\n super({\n ...fields,\n apiKey,\n configuration: {\n baseURL: \"https://api.deepseek.com\",\n ...fields?.configuration,\n },\n });\n }\n\n protected override _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"\n ) {\n const messageChunk = super._convertCompletionsDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n messageChunk.additional_kwargs.reasoning_content = delta.reasoning_content;\n // Override model_provider for DeepSeek-specific block translation\n messageChunk.response_metadata = {\n ...messageChunk.response_metadata,\n model_provider: \"deepseek\",\n };\n return messageChunk;\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const stream = super._streamResponseChunks(messages, options, runManager);\n\n // State for parsing <think> tags\n let tokensBuffer = \"\";\n let isThinking = false;\n\n for await (const chunk of stream) {\n // If the model already provided reasoning_content natively, just yield it\n if (chunk.message.additional_kwargs.reasoning_content) {\n yield chunk;\n continue;\n }\n\n const text = chunk.text;\n if (!text) {\n yield chunk;\n continue;\n }\n\n // Append text to buffer to handle split tags\n tokensBuffer += text;\n\n // Check for <think> start tag\n if (!isThinking && tokensBuffer.includes(\"<think>\")) {\n isThinking = true;\n const thinkIndex = tokensBuffer.indexOf(\"<think>\");\n const beforeThink = tokensBuffer.substring(0, thinkIndex);\n const afterThink = tokensBuffer.substring(\n thinkIndex + \"<think>\".length\n );\n\n // We consumed up to <think>, so buffer becomes what's after\n tokensBuffer = afterThink || \"\"; // might be empty or part of thought\n\n if (beforeThink) {\n // Send the content before the tag\n const newChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: beforeThink,\n additional_kwargs: chunk.message.additional_kwargs,\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: beforeThink,\n generationInfo: chunk.generationInfo,\n });\n yield newChunk;\n }\n }\n\n // Check for </think> end tag\n if (isThinking && tokensBuffer.includes(\"</think>\")) {\n isThinking = false;\n const thinkEndIndex = tokensBuffer.indexOf(\"</think>\");\n const thoughtContent = tokensBuffer.substring(0, thinkEndIndex);\n const afterThink = tokensBuffer.substring(\n thinkEndIndex + \"</think>\".length\n );\n\n // Yield the reasoning content\n const reasoningChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: \"\",\n additional_kwargs: {\n ...chunk.message.additional_kwargs,\n reasoning_content: thoughtContent,\n },\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: \"\",\n generationInfo: chunk.generationInfo,\n });\n yield reasoningChunk;\n\n // Reset buffer to what's after </think>\n tokensBuffer = afterThink || \"\";\n\n // Yield the rest as normal content if any\n if (tokensBuffer) {\n const contentChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: tokensBuffer,\n additional_kwargs: chunk.message.additional_kwargs,\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: tokensBuffer,\n generationInfo: chunk.generationInfo,\n });\n yield contentChunk;\n tokensBuffer = \"\"; // consumed\n }\n } else if (isThinking) {\n // We are inside thinking block.\n // Check partial </think> match\n const possibleEndTag = \"</think>\";\n let splitIndex = -1;\n\n // Check if buffer ends with a prefix of </think> - Greedy check (longest first)\n for (let i = possibleEndTag.length - 1; i >= 1; i--) {\n if (tokensBuffer.endsWith(possibleEndTag.substring(0, i))) {\n splitIndex = tokensBuffer.length - i;\n break;\n }\n }\n\n if (splitIndex !== -1) {\n const safeToYield = tokensBuffer.substring(0, splitIndex);\n if (safeToYield) {\n const reasoningChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: \"\",\n additional_kwargs: {\n ...chunk.message.additional_kwargs,\n reasoning_content: safeToYield,\n },\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: \"\",\n generationInfo: chunk.generationInfo,\n });\n yield reasoningChunk;\n }\n tokensBuffer = tokensBuffer.substring(splitIndex); // keep partial tag\n } else {\n // content is safe to yield as reasoning\n if (tokensBuffer) {\n const reasoningChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: \"\",\n additional_kwargs: {\n ...chunk.message.additional_kwargs,\n reasoning_content: tokensBuffer,\n },\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: \"\",\n generationInfo: chunk.generationInfo,\n });\n yield reasoningChunk;\n tokensBuffer = \"\";\n }\n }\n } else {\n // NOT thinking.\n // Check partial start tag \"<think>\" - Greedy check (longest first)\n const possibleStartTag = \"<think>\";\n let splitIndex = -1;\n for (let i = possibleStartTag.length - 1; i >= 1; i--) {\n if (tokensBuffer.endsWith(possibleStartTag.substring(0, i))) {\n splitIndex = tokensBuffer.length - i;\n break;\n }\n }\n\n if (splitIndex !== -1) {\n // Yield safe content\n const safeToYield = tokensBuffer.substring(0, splitIndex);\n if (safeToYield) {\n const contentChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: safeToYield,\n additional_kwargs: chunk.message.additional_kwargs,\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: safeToYield,\n generationInfo: chunk.generationInfo,\n });\n yield contentChunk;\n }\n tokensBuffer = tokensBuffer.substring(splitIndex); // keep partial tag\n } else {\n // Yield all\n if (tokensBuffer) {\n const contentChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: tokensBuffer,\n additional_kwargs: chunk.message.additional_kwargs,\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: tokensBuffer,\n generationInfo: chunk.generationInfo,\n });\n yield contentChunk;\n tokensBuffer = \"\";\n }\n }\n }\n }\n\n // Flush remaining buffer at end of stream\n if (tokensBuffer) {\n // If we were thinking, it's unclosed thought.\n if (isThinking) {\n const reasoningChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: \"\",\n additional_kwargs: { reasoning_content: tokensBuffer },\n }),\n text: \"\",\n });\n yield reasoningChunk;\n } else {\n const contentChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: tokensBuffer,\n }),\n text: tokensBuffer,\n });\n yield contentChunk;\n }\n }\n }\n\n protected override _convertCompletionsMessageToBaseMessage(\n message: OpenAIClient.ChatCompletionMessage,\n rawResponse: OpenAIClient.ChatCompletion\n ) {\n const langChainMessage = super._convertCompletionsMessageToBaseMessage(\n message,\n rawResponse\n );\n langChainMessage.additional_kwargs.reasoning_content =\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (message as any).reasoning_content;\n // Override model_provider for DeepSeek-specific block translation\n langChainMessage.response_metadata = {\n ...langChainMessage.response_metadata,\n model_provider: \"deepseek\",\n };\n return langChainMessage;\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n const ensuredConfig = { ...config };\n // Deepseek does not support json schema yet\n if (ensuredConfig?.method === undefined) {\n ensuredConfig.method = \"functionCalling\";\n }\n return super.withStructuredOutput<RunOutput>(outputSchema, ensuredConfig);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAyZA,IAAa,eAAb,cAAkCA,yCAA+C;CAC/E,OAAO,UAAU;AACf,SAAO;CACR;CAED,WAAW;AACT,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,mBACT;CACF;CAED,kBAAkB;CAElB,eAAe;EAAC;EAAa;EAAe;CAAW;CAEvD,YAAYC,QAAqC;EAC/C,MAAM,SAAS,QAAQ,iEAAiC,mBAAmB;AAC3E,MAAI,CAAC,OACH,OAAM,IAAI,MACR,CAAC,qHAAqH,CAAC;EAI3H,MAAM;GACJ,GAAG;GACH;GACA,eAAe;IACb,SAAS;IACT,GAAG,QAAQ;GACZ;EACF,EAAC;CACH;CAED,AAAmB,2CAEjBC,OACAC,aACAC,aAOA;EACA,MAAM,eAAe,MAAM,2CACzB,OACA,aACA,YACD;EACD,aAAa,kBAAkB,oBAAoB,MAAM;EAEzD,aAAa,oBAAoB;GAC/B,GAAG,aAAa;GAChB,gBAAgB;EACjB;AACD,SAAO;CACR;CAED,OAAO,sBACLC,UACAC,SACAC,YACqC;EACrC,MAAM,SAAS,MAAM,sBAAsB,UAAU,SAAS,WAAW;EAGzE,IAAI,eAAe;EACnB,IAAI,aAAa;AAEjB,aAAW,MAAM,SAAS,QAAQ;AAEhC,OAAI,MAAM,QAAQ,kBAAkB,mBAAmB;IACrD,MAAM;AACN;GACD;GAED,MAAM,OAAO,MAAM;AACnB,OAAI,CAAC,MAAM;IACT,MAAM;AACN;GACD;GAGD,gBAAgB;AAGhB,OAAI,CAAC,cAAc,aAAa,SAAS,UAAU,EAAE;IACnD,aAAa;IACb,MAAM,aAAa,aAAa,QAAQ,UAAU;IAClD,MAAM,cAAc,aAAa,UAAU,GAAG,WAAW;IACzD,MAAM,aAAa,aAAa,UAC9B,aAAa,EACd;IAGD,eAAe,cAAc;AAE7B,QAAI,aAAa;KAEf,MAAM,WAAW,IAAIC,6CAAoB;MACvC,SAAS,IAAIC,yCAAe;OAC1B,SAAS;OACT,mBAAmB,MAAM,QAAQ;OACjC,mBAAmB,MAAM,QAAQ;OACjC,YAAY,MAAM,QAAQ;OAC1B,kBAAkB,MAAM,QAAQ;OAChC,IAAI,MAAM,QAAQ;MACnB;MACD,MAAM;MACN,gBAAgB,MAAM;KACvB;KACD,MAAM;IACP;GACF;AAGD,OAAI,cAAc,aAAa,SAAS,WAAW,EAAE;IACnD,aAAa;IACb,MAAM,gBAAgB,aAAa,QAAQ,WAAW;IACtD,MAAM,iBAAiB,aAAa,UAAU,GAAG,cAAc;IAC/D,MAAM,aAAa,aAAa,UAC9B,gBAAgB,EACjB;IAGD,MAAM,iBAAiB,IAAID,6CAAoB;KAC7C,SAAS,IAAIC,yCAAe;MAC1B,SAAS;MACT,mBAAmB;OACjB,GAAG,MAAM,QAAQ;OACjB,mBAAmB;MACpB;MACD,mBAAmB,MAAM,QAAQ;MACjC,YAAY,MAAM,QAAQ;MAC1B,kBAAkB,MAAM,QAAQ;MAChC,IAAI,MAAM,QAAQ;KACnB;KACD,MAAM;KACN,gBAAgB,MAAM;IACvB;IACD,MAAM;IAGN,eAAe,cAAc;AAG7B,QAAI,cAAc;KAChB,MAAM,eAAe,IAAID,6CAAoB;MAC3C,SAAS,IAAIC,yCAAe;OAC1B,SAAS;OACT,mBAAmB,MAAM,QAAQ;OACjC,mBAAmB,MAAM,QAAQ;OACjC,YAAY,MAAM,QAAQ;OAC1B,kBAAkB,MAAM,QAAQ;OAChC,IAAI,MAAM,QAAQ;MACnB;MACD,MAAM;MACN,gBAAgB,MAAM;KACvB;KACD,MAAM;KACN,eAAe;IAChB;GACF,WAAU,YAAY;IAGrB,MAAM,iBAAiB;IACvB,IAAI,aAAa;AAGjB,SAAK,IAAI,IAAI,GAA2B,KAAK,GAAG,IAC9C,KAAI,aAAa,SAAS,eAAe,UAAU,GAAG,EAAE,CAAC,EAAE;KACzD,aAAa,aAAa,SAAS;AACnC;IACD;AAGH,QAAI,eAAe,IAAI;KACrB,MAAM,cAAc,aAAa,UAAU,GAAG,WAAW;AACzD,SAAI,aAAa;MACf,MAAM,iBAAiB,IAAID,6CAAoB;OAC7C,SAAS,IAAIC,yCAAe;QAC1B,SAAS;QACT,mBAAmB;SACjB,GAAG,MAAM,QAAQ;SACjB,mBAAmB;QACpB;QACD,mBAAmB,MAAM,QAAQ;QACjC,YAAY,MAAM,QAAQ;QAC1B,kBAAkB,MAAM,QAAQ;QAChC,IAAI,MAAM,QAAQ;OACnB;OACD,MAAM;OACN,gBAAgB,MAAM;MACvB;MACD,MAAM;KACP;KACD,eAAe,aAAa,UAAU,WAAW;IAClD,WAEK,cAAc;KAChB,MAAM,iBAAiB,IAAID,6CAAoB;MAC7C,SAAS,IAAIC,yCAAe;OAC1B,SAAS;OACT,mBAAmB;QACjB,GAAG,MAAM,QAAQ;QACjB,mBAAmB;OACpB;OACD,mBAAmB,MAAM,QAAQ;OACjC,YAAY,MAAM,QAAQ;OAC1B,kBAAkB,MAAM,QAAQ;OAChC,IAAI,MAAM,QAAQ;MACnB;MACD,MAAM;MACN,gBAAgB,MAAM;KACvB;KACD,MAAM;KACN,eAAe;IAChB;GAEJ,OAAM;IAGL,MAAM,mBAAmB;IACzB,IAAI,aAAa;AACjB,SAAK,IAAI,IAAI,GAA6B,KAAK,GAAG,IAChD,KAAI,aAAa,SAAS,iBAAiB,UAAU,GAAG,EAAE,CAAC,EAAE;KAC3D,aAAa,aAAa,SAAS;AACnC;IACD;AAGH,QAAI,eAAe,IAAI;KAErB,MAAM,cAAc,aAAa,UAAU,GAAG,WAAW;AACzD,SAAI,aAAa;MACf,MAAM,eAAe,IAAID,6CAAoB;OAC3C,SAAS,IAAIC,yCAAe;QAC1B,SAAS;QACT,mBAAmB,MAAM,QAAQ;QACjC,mBAAmB,MAAM,QAAQ;QACjC,YAAY,MAAM,QAAQ;QAC1B,kBAAkB,MAAM,QAAQ;QAChC,IAAI,MAAM,QAAQ;OACnB;OACD,MAAM;OACN,gBAAgB,MAAM;MACvB;MACD,MAAM;KACP;KACD,eAAe,aAAa,UAAU,WAAW;IAClD,WAEK,cAAc;KAChB,MAAM,eAAe,IAAID,6CAAoB;MAC3C,SAAS,IAAIC,yCAAe;OAC1B,SAAS;OACT,mBAAmB,MAAM,QAAQ;OACjC,mBAAmB,MAAM,QAAQ;OACjC,YAAY,MAAM,QAAQ;OAC1B,kBAAkB,MAAM,QAAQ;OAChC,IAAI,MAAM,QAAQ;MACnB;MACD,MAAM;MACN,gBAAgB,MAAM;KACvB;KACD,MAAM;KACN,eAAe;IAChB;GAEJ;EACF;AAGD,MAAI,aAEF,KAAI,YAAY;GACd,MAAM,iBAAiB,IAAID,6CAAoB;IAC7C,SAAS,IAAIC,yCAAe;KAC1B,SAAS;KACT,mBAAmB,EAAE,mBAAmB,aAAc;IACvD;IACD,MAAM;GACP;GACD,MAAM;EACP,OAAM;GACL,MAAM,eAAe,IAAID,6CAAoB;IAC3C,SAAS,IAAIC,yCAAe,EAC1B,SAAS,aACV;IACD,MAAM;GACP;GACD,MAAM;EACP;CAEJ;CAED,AAAmB,wCACjBC,SACAC,aACA;EACA,MAAM,mBAAmB,MAAM,wCAC7B,SACA,YACD;EACD,iBAAiB,kBAAkB,oBAEhC,QAAgB;EAEnB,iBAAiB,oBAAoB;GACnC,GAAG,iBAAiB;GACpB,gBAAgB;EACjB;AACD,SAAO;CACR;;;;;;;;;;;;;;;;;;CAmBD,IAAI,UAAwB;AAC1B,SAAOC,yBAAS,KAAK,UAAU,CAAE;CAClC;CAqCD,qBAIEC,cAIAC,QAMI;EACJ,MAAM,gBAAgB,EAAE,GAAG,OAAQ;AAEnC,MAAI,eAAe,WAAW,QAC5B,cAAc,SAAS;AAEzB,SAAO,MAAM,qBAAgC,cAAc,cAAc;CAC1E;AACF"}
@@ -5,6 +5,8 @@ import { ModelProfile } from "@langchain/core/language_models/profile";
5
5
  import { Runnable } from "@langchain/core/runnables";
6
6
  import { InteropZodType } from "@langchain/core/utils/types";
7
7
  import { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from "@langchain/openai";
8
+ import { ChatGenerationChunk } from "@langchain/core/outputs";
9
+ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
8
10
 
9
11
  //#region src/chat_models.d.ts
10
12
  interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {
@@ -405,6 +407,7 @@ declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions
405
407
  lc_namespace: string[];
406
408
  constructor(fields?: Partial<ChatDeepSeekInput>);
407
409
  protected _convertCompletionsDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: "function" | "user" | "system" | "developer" | "assistant" | "tool"): _langchain_core_messages0.BaseMessageChunk<_langchain_core_messages0.MessageStructure<_langchain_core_messages0.MessageToolSet>, _langchain_core_messages0.MessageType>;
410
+ _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
408
411
  protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<_langchain_core_messages0.MessageStructure<_langchain_core_messages0.MessageToolSet>, _langchain_core_messages0.MessageType>;
409
412
  /**
410
413
  * Return profiling information for the model.
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.d.cts","names":["BaseLanguageModelInput","StructuredOutputMethodOptions","ModelProfile","BaseMessage","Runnable","InteropZodType","ChatOpenAICallOptions","ChatOpenAICompletions","ChatOpenAIFields","OpenAIClient","ChatDeepSeekCallOptions","Record","ChatDeepSeekInput","Array","ChatDeepSeek","RunOutput","Partial","ChatCompletionChunk","_langchain_core_messages0","MessageToolSet","MessageStructure","MessageType","BaseMessageChunk","ChatCompletionMessage","ChatCompletion"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from \"@langchain/openai\";\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name(): string;\n _llmType(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_serializable: boolean;\n lc_namespace: string[];\n constructor(fields?: Partial<ChatDeepSeekInput>);\n protected _convertCompletionsDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: \"function\" | \"user\" | \"system\" | \"developer\" | \"assistant\" | \"tool\"): import(\"@langchain/core/messages\").BaseMessageChunk<import(\"@langchain/core/messages\").MessageStructure<import(\"@langchain/core/messages\").MessageToolSet>, import(\"@langchain/core/messages\").MessageType>;\n protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<import(\"@langchain/core/messages\").MessageStructure<import(\"@langchain/core/messages\").MessageToolSet>, import(\"@langchain/core/messages\").MessageType>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n//# sourceMappingURL=chat_models.d.ts.map"],"mappings":";;;;;;;;;UAMiBU,uBAAAA,SAAgCJ;YACnCK;;AADGD,UAGAE,iBAAAA,SAA0BJ,gBAHMF,CAAAA;EAGhCM;;;;EAA0C,MAAA,CAAA,EAAA,MAAA;EAiYtCE;;;EAQIE,KAAAA,CAAAA,EAAAA,MAAAA;EACuCL;;;;;EAAkWO,IAAAA,CAAAA,EA3XvZL,KA2XuZK,CAAAA,MAAAA,CAAAA;EACnWT;;;;EAA8IS,aAAAA,CAAAA,EAvXzLL,KAuXyLK,CAAAA,MAAwFG,CAAAA;EAAvJlB;;;EAmB7EQ,SAAAA,CAAAA,EAAAA,OAAAA;EAAkDI;;;EAA2Cd,WAAAA,CAAAA,EAAAA,MAAAA;EAAgDD;;;;EAC7IW,SAAAA,CAAAA,EAAAA,MAAAA;;;;;;;;;;;;;;;;;;;;;;;AA9BF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAA1CG,YAAAA,SAAqBP,sBAAsBG;;;;;;;;uBAQvCM,QAAQJ;8DAC+BD,kCAAkCF,YAAAA,CAAaQ,yGAAmTC,yBAAAA,CAAvKI,iBAAsHJ,yBAAAA,CAAlEE,iBAA7KF,yBAAAA,CAAiOC,cAAAA,GAApCD,yBAAAA,CAAwFG,WAAAA;6DACxVZ,YAAAA,CAAac,oCAAoCd,YAAAA,CAAae,iBAAiBrB,YAAiHe,yBAAAA,CAAlEE,iBAAlDF,yBAAAA,CAAsGC,cAAAA,GAApCD,yBAAAA,CAAwFG,WAAAA;;;;;;;;;;;;;;;;;;iBAkBlRnB;yCACwBS,sBAAsBA,mCAAmCN,eAAeU,aAAaJ,8BAA8BV,uCAAuCG,SAASJ,wBAAwBe;yCAC3LJ,sBAAsBA,mCAAmCN,eAAeU,aAAaJ,8BAA8BV,sCAAsCG,SAASJ;SAChMG;YACGY;;yCAE2BJ,sBAAsBA,mCAAmCN,eAAeU,aAAaJ,8BAA8BV,yCAAyCG,SAASJ,wBAAwBe,aAAaX,SAASJ;SACjPG;YACGY"}
1
+ {"version":3,"file":"chat_models.d.cts","names":["BaseLanguageModelInput","StructuredOutputMethodOptions","ModelProfile","BaseMessage","Runnable","InteropZodType","ChatOpenAICallOptions","ChatOpenAICompletions","ChatOpenAIFields","OpenAIClient","ChatGenerationChunk","CallbackManagerForLLMRun","ChatDeepSeekCallOptions","Record","ChatDeepSeekInput","Array","ChatDeepSeek","RunOutput","Partial","ChatCompletionChunk","_langchain_core_messages0","MessageToolSet","MessageStructure","MessageType","BaseMessageChunk","AsyncGenerator","ChatCompletionMessage","ChatCompletion"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from \"@langchain/openai\";\nimport { ChatGenerationChunk } from \"@langchain/core/outputs\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name(): string;\n _llmType(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_serializable: boolean;\n lc_namespace: string[];\n constructor(fields?: Partial<ChatDeepSeekInput>);\n protected _convertCompletionsDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: \"function\" | \"user\" | \"system\" | \"developer\" | \"assistant\" | \"tool\"): import(\"@langchain/core/messages\").BaseMessageChunk<import(\"@langchain/core/messages\").MessageStructure<import(\"@langchain/core/messages\").MessageToolSet>, import(\"@langchain/core/messages\").MessageType>;\n _streamResponseChunks(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<import(\"@langchain/core/messages\").MessageStructure<import(\"@langchain/core/messages\").MessageToolSet>, import(\"@langchain/core/messages\").MessageType>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n//# sourceMappingURL=chat_models.d.ts.map"],"mappings":";;;;;;;;;;;UAQiBY,uBAAAA,SAAgCN;YACnCO;;AADGD,UAGAE,iBAAAA,SAA0BN,gBAHMF,CAAAA;EAGhCQ;;;;EAA0C,MAAA,CAAA,EAAA,MAAA;EAiYtCE;;;EAQIE,KAAAA,CAAAA,EAAAA,MAAAA;EACuCL;;;;;EAAkWO,IAAAA,CAAAA,EA3XvZL,KA2XuZK,CAAAA,MAAAA,CAAAA;EAC9XjB;;;;EAC2BM,aAAaiB,CAAAA,EAxXxDX,KAwXwDW,CAAAA,MAAAA,CAAAA;EAAoCjB;;;EAA6FW,SAAAA,CAAAA,EAAAA,OAAAA;EAA/DjB;;;EAmB7EU,WAAAA,CAAAA,EAAAA,MAAAA;EAAkDI;;;;EAA2FjB,SAAAA,CAAAA,EAAAA,MAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;AA9B/I;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAA1CgB,YAAAA,SAAqBT,sBAAsBK;;;;;;;;uBAQvCM,QAAQJ;8DAC+BD,kCAAkCJ,YAAAA,CAAaU,yGAAmTC,yBAAAA,CAAvKI,iBAAsHJ,yBAAAA,CAAlEE,iBAA7KF,yBAAAA,CAAiOC,cAAAA,GAApCD,yBAAAA,CAAwFG,WAAAA;kCACnXpB,gEAAgEQ,2BAA2Bc,eAAef;6DAC/ED,YAAAA,CAAaiB,oCAAoCjB,YAAAA,CAAakB,iBAAiBxB,YAAiHiB,yBAAAA,CAAlEE,iBAAlDF,yBAAAA,CAAsGC,cAAAA,GAApCD,yBAAAA,CAAwFG,WAAAA;;;;;;;;;;;;;;;;;;iBAkBlRrB;yCACwBW,sBAAsBA,mCAAmCR,eAAeY,aAAaJ,8BAA8BZ,uCAAuCG,SAASJ,wBAAwBiB;yCAC3LJ,sBAAsBA,mCAAmCR,eAAeY,aAAaJ,8BAA8BZ,sCAAsCG,SAASJ;SAChMG;YACGc;;yCAE2BJ,sBAAsBA,mCAAmCR,eAAeY,aAAaJ,8BAA8BZ,yCAAyCG,SAASJ,wBAAwBiB,aAAab,SAASJ;SACjPG;YACGc"}
@@ -1,10 +1,12 @@
1
- import { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from "@langchain/openai";
2
1
  import * as _langchain_core_messages0 from "@langchain/core/messages";
3
2
  import { BaseMessage } from "@langchain/core/messages";
3
+ import { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from "@langchain/openai";
4
+ import { ChatGenerationChunk } from "@langchain/core/outputs";
4
5
  import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
5
6
  import { ModelProfile } from "@langchain/core/language_models/profile";
6
7
  import { Runnable } from "@langchain/core/runnables";
7
8
  import { InteropZodType } from "@langchain/core/utils/types";
9
+ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
8
10
 
9
11
  //#region src/chat_models.d.ts
10
12
  interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {
@@ -405,6 +407,7 @@ declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions
405
407
  lc_namespace: string[];
406
408
  constructor(fields?: Partial<ChatDeepSeekInput>);
407
409
  protected _convertCompletionsDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: "function" | "user" | "system" | "developer" | "assistant" | "tool"): _langchain_core_messages0.BaseMessageChunk<_langchain_core_messages0.MessageStructure<_langchain_core_messages0.MessageToolSet>, _langchain_core_messages0.MessageType>;
410
+ _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
408
411
  protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<_langchain_core_messages0.MessageStructure<_langchain_core_messages0.MessageToolSet>, _langchain_core_messages0.MessageType>;
409
412
  /**
410
413
  * Return profiling information for the model.
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.d.ts","names":["BaseLanguageModelInput","StructuredOutputMethodOptions","ModelProfile","BaseMessage","Runnable","InteropZodType","ChatOpenAICallOptions","ChatOpenAICompletions","ChatOpenAIFields","OpenAIClient","ChatDeepSeekCallOptions","Record","ChatDeepSeekInput","Array","ChatDeepSeek","RunOutput","Partial","ChatCompletionChunk","_langchain_core_messages0","MessageToolSet","MessageStructure","MessageType","BaseMessageChunk","ChatCompletionMessage","ChatCompletion"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from \"@langchain/openai\";\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name(): string;\n _llmType(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_serializable: boolean;\n lc_namespace: string[];\n constructor(fields?: Partial<ChatDeepSeekInput>);\n protected _convertCompletionsDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: \"function\" | \"user\" | \"system\" | \"developer\" | \"assistant\" | \"tool\"): import(\"@langchain/core/messages\").BaseMessageChunk<import(\"@langchain/core/messages\").MessageStructure<import(\"@langchain/core/messages\").MessageToolSet>, import(\"@langchain/core/messages\").MessageType>;\n protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<import(\"@langchain/core/messages\").MessageStructure<import(\"@langchain/core/messages\").MessageToolSet>, import(\"@langchain/core/messages\").MessageType>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n//# sourceMappingURL=chat_models.d.ts.map"],"mappings":";;;;;;;;;UAMiBU,uBAAAA,SAAgCJ;YACnCK;;AADGD,UAGAE,iBAAAA,SAA0BJ,gBAHMF,CAAAA;EAGhCM;;;;EAA0C,MAAA,CAAA,EAAA,MAAA;EAiYtCE;;;EAQIE,KAAAA,CAAAA,EAAAA,MAAAA;EACuCL;;;;;EAAkWO,IAAAA,CAAAA,EA3XvZL,KA2XuZK,CAAAA,MAAAA,CAAAA;EACnWT;;;;EAA8IS,aAAAA,CAAAA,EAvXzLL,KAuXyLK,CAAAA,MAAwFG,CAAAA;EAAvJlB;;;EAmB7EQ,SAAAA,CAAAA,EAAAA,OAAAA;EAAkDI;;;EAA2Cd,WAAAA,CAAAA,EAAAA,MAAAA;EAAgDD;;;;EAC7IW,SAAAA,CAAAA,EAAAA,MAAAA;;;;;;;;;;;;;;;;;;;;;;;AA9BF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAA1CG,YAAAA,SAAqBP,sBAAsBG;;;;;;;;uBAQvCM,QAAQJ;8DAC+BD,kCAAkCF,YAAAA,CAAaQ,yGAAmTC,yBAAAA,CAAvKI,iBAAsHJ,yBAAAA,CAAlEE,iBAA7KF,yBAAAA,CAAiOC,cAAAA,GAApCD,yBAAAA,CAAwFG,WAAAA;6DACxVZ,YAAAA,CAAac,oCAAoCd,YAAAA,CAAae,iBAAiBrB,YAAiHe,yBAAAA,CAAlEE,iBAAlDF,yBAAAA,CAAsGC,cAAAA,GAApCD,yBAAAA,CAAwFG,WAAAA;;;;;;;;;;;;;;;;;;iBAkBlRnB;yCACwBS,sBAAsBA,mCAAmCN,eAAeU,aAAaJ,8BAA8BV,uCAAuCG,SAASJ,wBAAwBe;yCAC3LJ,sBAAsBA,mCAAmCN,eAAeU,aAAaJ,8BAA8BV,sCAAsCG,SAASJ;SAChMG;YACGY;;yCAE2BJ,sBAAsBA,mCAAmCN,eAAeU,aAAaJ,8BAA8BV,yCAAyCG,SAASJ,wBAAwBe,aAAaX,SAASJ;SACjPG;YACGY"}
1
+ {"version":3,"file":"chat_models.d.ts","names":["BaseLanguageModelInput","StructuredOutputMethodOptions","ModelProfile","BaseMessage","Runnable","InteropZodType","ChatOpenAICallOptions","ChatOpenAICompletions","ChatOpenAIFields","OpenAIClient","ChatGenerationChunk","CallbackManagerForLLMRun","ChatDeepSeekCallOptions","Record","ChatDeepSeekInput","Array","ChatDeepSeek","RunOutput","Partial","ChatCompletionChunk","_langchain_core_messages0","MessageToolSet","MessageStructure","MessageType","BaseMessageChunk","AsyncGenerator","ChatCompletionMessage","ChatCompletion"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { ChatOpenAICallOptions, ChatOpenAICompletions, ChatOpenAIFields, OpenAIClient } from \"@langchain/openai\";\nimport { ChatGenerationChunk } from \"@langchain/core/outputs\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name(): string;\n _llmType(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_serializable: boolean;\n lc_namespace: string[];\n constructor(fields?: Partial<ChatDeepSeekInput>);\n protected _convertCompletionsDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: \"function\" | \"user\" | \"system\" | \"developer\" | \"assistant\" | \"tool\"): import(\"@langchain/core/messages\").BaseMessageChunk<import(\"@langchain/core/messages\").MessageStructure<import(\"@langchain/core/messages\").MessageToolSet>, import(\"@langchain/core/messages\").MessageType>;\n _streamResponseChunks(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage, rawResponse: OpenAIClient.ChatCompletion): BaseMessage<import(\"@langchain/core/messages\").MessageStructure<import(\"@langchain/core/messages\").MessageToolSet>, import(\"@langchain/core/messages\").MessageType>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n//# sourceMappingURL=chat_models.d.ts.map"],"mappings":";;;;;;;;;;;UAQiBY,uBAAAA,SAAgCN;YACnCO;;AADGD,UAGAE,iBAAAA,SAA0BN,gBAHMF,CAAAA;EAGhCQ;;;;EAA0C,MAAA,CAAA,EAAA,MAAA;EAiYtCE;;;EAQIE,KAAAA,CAAAA,EAAAA,MAAAA;EACuCL;;;;;EAAkWO,IAAAA,CAAAA,EA3XvZL,KA2XuZK,CAAAA,MAAAA,CAAAA;EAC9XjB;;;;EAC2BM,aAAaiB,CAAAA,EAxXxDX,KAwXwDW,CAAAA,MAAAA,CAAAA;EAAoCjB;;;EAA6FW,SAAAA,CAAAA,EAAAA,OAAAA;EAA/DjB;;;EAmB7EU,WAAAA,CAAAA,EAAAA,MAAAA;EAAkDI;;;;EAA2FjB,SAAAA,CAAAA,EAAAA,MAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;AA9B/I;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAA1CgB,YAAAA,SAAqBT,sBAAsBK;;;;;;;;uBAQvCM,QAAQJ;8DAC+BD,kCAAkCJ,YAAAA,CAAaU,yGAAmTC,yBAAAA,CAAvKI,iBAAsHJ,yBAAAA,CAAlEE,iBAA7KF,yBAAAA,CAAiOC,cAAAA,GAApCD,yBAAAA,CAAwFG,WAAAA;kCACnXpB,gEAAgEQ,2BAA2Bc,eAAef;6DAC/ED,YAAAA,CAAaiB,oCAAoCjB,YAAAA,CAAakB,iBAAiBxB,YAAiHiB,yBAAAA,CAAlEE,iBAAlDF,yBAAAA,CAAsGC,cAAAA,GAApCD,yBAAAA,CAAwFG,WAAAA;;;;;;;;;;;;;;;;;;iBAkBlRrB;yCACwBW,sBAAsBA,mCAAmCR,eAAeY,aAAaJ,8BAA8BZ,uCAAuCG,SAASJ,wBAAwBiB;yCAC3LJ,sBAAsBA,mCAAmCR,eAAeY,aAAaJ,8BAA8BZ,sCAAsCG,SAASJ;SAChMG;YACGc;;yCAE2BJ,sBAAsBA,mCAAmCR,eAAeY,aAAaJ,8BAA8BZ,yCAAyCG,SAASJ,wBAAwBiB,aAAab,SAASJ;SACjPG;YACGc"}
@@ -1,6 +1,8 @@
1
1
  import profiles_default from "./profiles.js";
2
+ import { AIMessageChunk } from "@langchain/core/messages";
2
3
  import { getEnvironmentVariable } from "@langchain/core/utils/env";
3
4
  import { ChatOpenAICompletions } from "@langchain/openai";
5
+ import { ChatGenerationChunk } from "@langchain/core/outputs";
4
6
 
5
7
  //#region src/chat_models.ts
6
8
  /**
@@ -390,6 +392,188 @@ var ChatDeepSeek = class extends ChatOpenAICompletions {
390
392
  };
391
393
  return messageChunk;
392
394
  }
395
+ async *_streamResponseChunks(messages, options, runManager) {
396
+ const stream = super._streamResponseChunks(messages, options, runManager);
397
+ let tokensBuffer = "";
398
+ let isThinking = false;
399
+ for await (const chunk of stream) {
400
+ if (chunk.message.additional_kwargs.reasoning_content) {
401
+ yield chunk;
402
+ continue;
403
+ }
404
+ const text = chunk.text;
405
+ if (!text) {
406
+ yield chunk;
407
+ continue;
408
+ }
409
+ tokensBuffer += text;
410
+ if (!isThinking && tokensBuffer.includes("<think>")) {
411
+ isThinking = true;
412
+ const thinkIndex = tokensBuffer.indexOf("<think>");
413
+ const beforeThink = tokensBuffer.substring(0, thinkIndex);
414
+ const afterThink = tokensBuffer.substring(thinkIndex + 7);
415
+ tokensBuffer = afterThink || "";
416
+ if (beforeThink) {
417
+ const newChunk = new ChatGenerationChunk({
418
+ message: new AIMessageChunk({
419
+ content: beforeThink,
420
+ additional_kwargs: chunk.message.additional_kwargs,
421
+ response_metadata: chunk.message.response_metadata,
422
+ tool_calls: chunk.message.tool_calls,
423
+ tool_call_chunks: chunk.message.tool_call_chunks,
424
+ id: chunk.message.id
425
+ }),
426
+ text: beforeThink,
427
+ generationInfo: chunk.generationInfo
428
+ });
429
+ yield newChunk;
430
+ }
431
+ }
432
+ if (isThinking && tokensBuffer.includes("</think>")) {
433
+ isThinking = false;
434
+ const thinkEndIndex = tokensBuffer.indexOf("</think>");
435
+ const thoughtContent = tokensBuffer.substring(0, thinkEndIndex);
436
+ const afterThink = tokensBuffer.substring(thinkEndIndex + 8);
437
+ const reasoningChunk = new ChatGenerationChunk({
438
+ message: new AIMessageChunk({
439
+ content: "",
440
+ additional_kwargs: {
441
+ ...chunk.message.additional_kwargs,
442
+ reasoning_content: thoughtContent
443
+ },
444
+ response_metadata: chunk.message.response_metadata,
445
+ tool_calls: chunk.message.tool_calls,
446
+ tool_call_chunks: chunk.message.tool_call_chunks,
447
+ id: chunk.message.id
448
+ }),
449
+ text: "",
450
+ generationInfo: chunk.generationInfo
451
+ });
452
+ yield reasoningChunk;
453
+ tokensBuffer = afterThink || "";
454
+ if (tokensBuffer) {
455
+ const contentChunk = new ChatGenerationChunk({
456
+ message: new AIMessageChunk({
457
+ content: tokensBuffer,
458
+ additional_kwargs: chunk.message.additional_kwargs,
459
+ response_metadata: chunk.message.response_metadata,
460
+ tool_calls: chunk.message.tool_calls,
461
+ tool_call_chunks: chunk.message.tool_call_chunks,
462
+ id: chunk.message.id
463
+ }),
464
+ text: tokensBuffer,
465
+ generationInfo: chunk.generationInfo
466
+ });
467
+ yield contentChunk;
468
+ tokensBuffer = "";
469
+ }
470
+ } else if (isThinking) {
471
+ const possibleEndTag = "</think>";
472
+ let splitIndex = -1;
473
+ for (let i = 7; i >= 1; i--) if (tokensBuffer.endsWith(possibleEndTag.substring(0, i))) {
474
+ splitIndex = tokensBuffer.length - i;
475
+ break;
476
+ }
477
+ if (splitIndex !== -1) {
478
+ const safeToYield = tokensBuffer.substring(0, splitIndex);
479
+ if (safeToYield) {
480
+ const reasoningChunk = new ChatGenerationChunk({
481
+ message: new AIMessageChunk({
482
+ content: "",
483
+ additional_kwargs: {
484
+ ...chunk.message.additional_kwargs,
485
+ reasoning_content: safeToYield
486
+ },
487
+ response_metadata: chunk.message.response_metadata,
488
+ tool_calls: chunk.message.tool_calls,
489
+ tool_call_chunks: chunk.message.tool_call_chunks,
490
+ id: chunk.message.id
491
+ }),
492
+ text: "",
493
+ generationInfo: chunk.generationInfo
494
+ });
495
+ yield reasoningChunk;
496
+ }
497
+ tokensBuffer = tokensBuffer.substring(splitIndex);
498
+ } else if (tokensBuffer) {
499
+ const reasoningChunk = new ChatGenerationChunk({
500
+ message: new AIMessageChunk({
501
+ content: "",
502
+ additional_kwargs: {
503
+ ...chunk.message.additional_kwargs,
504
+ reasoning_content: tokensBuffer
505
+ },
506
+ response_metadata: chunk.message.response_metadata,
507
+ tool_calls: chunk.message.tool_calls,
508
+ tool_call_chunks: chunk.message.tool_call_chunks,
509
+ id: chunk.message.id
510
+ }),
511
+ text: "",
512
+ generationInfo: chunk.generationInfo
513
+ });
514
+ yield reasoningChunk;
515
+ tokensBuffer = "";
516
+ }
517
+ } else {
518
+ const possibleStartTag = "<think>";
519
+ let splitIndex = -1;
520
+ for (let i = 6; i >= 1; i--) if (tokensBuffer.endsWith(possibleStartTag.substring(0, i))) {
521
+ splitIndex = tokensBuffer.length - i;
522
+ break;
523
+ }
524
+ if (splitIndex !== -1) {
525
+ const safeToYield = tokensBuffer.substring(0, splitIndex);
526
+ if (safeToYield) {
527
+ const contentChunk = new ChatGenerationChunk({
528
+ message: new AIMessageChunk({
529
+ content: safeToYield,
530
+ additional_kwargs: chunk.message.additional_kwargs,
531
+ response_metadata: chunk.message.response_metadata,
532
+ tool_calls: chunk.message.tool_calls,
533
+ tool_call_chunks: chunk.message.tool_call_chunks,
534
+ id: chunk.message.id
535
+ }),
536
+ text: safeToYield,
537
+ generationInfo: chunk.generationInfo
538
+ });
539
+ yield contentChunk;
540
+ }
541
+ tokensBuffer = tokensBuffer.substring(splitIndex);
542
+ } else if (tokensBuffer) {
543
+ const contentChunk = new ChatGenerationChunk({
544
+ message: new AIMessageChunk({
545
+ content: tokensBuffer,
546
+ additional_kwargs: chunk.message.additional_kwargs,
547
+ response_metadata: chunk.message.response_metadata,
548
+ tool_calls: chunk.message.tool_calls,
549
+ tool_call_chunks: chunk.message.tool_call_chunks,
550
+ id: chunk.message.id
551
+ }),
552
+ text: tokensBuffer,
553
+ generationInfo: chunk.generationInfo
554
+ });
555
+ yield contentChunk;
556
+ tokensBuffer = "";
557
+ }
558
+ }
559
+ }
560
+ if (tokensBuffer) if (isThinking) {
561
+ const reasoningChunk = new ChatGenerationChunk({
562
+ message: new AIMessageChunk({
563
+ content: "",
564
+ additional_kwargs: { reasoning_content: tokensBuffer }
565
+ }),
566
+ text: ""
567
+ });
568
+ yield reasoningChunk;
569
+ } else {
570
+ const contentChunk = new ChatGenerationChunk({
571
+ message: new AIMessageChunk({ content: tokensBuffer }),
572
+ text: tokensBuffer
573
+ });
574
+ yield contentChunk;
575
+ }
576
+ }
393
577
  _convertCompletionsMessageToBaseMessage(message, rawResponse) {
394
578
  const langChainMessage = super._convertCompletionsMessageToBaseMessage(message, rawResponse);
395
579
  langChainMessage.additional_kwargs.reasoning_content = message.reasoning_content;
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.js","names":["fields?: Partial<ChatDeepSeekInput>","delta: Record<string, any>","rawResponse: OpenAIClient.ChatCompletionChunk","defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"","message: OpenAIClient.ChatCompletionMessage","rawResponse: OpenAIClient.ChatCompletion","PROFILES","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport {\n ChatOpenAICallOptions,\n ChatOpenAICompletions,\n ChatOpenAIFields,\n OpenAIClient,\n} from \"@langchain/openai\";\nimport PROFILES from \"./profiles.js\";\n\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\n\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name() {\n return \"ChatDeepSeek\";\n }\n\n _llmType() {\n return \"deepseek\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"DEEPSEEK_API_KEY\",\n };\n }\n\n lc_serializable = true;\n\n lc_namespace = [\"langchain\", \"chat_models\", \"deepseek\"];\n\n constructor(fields?: Partial<ChatDeepSeekInput>) {\n const apiKey = fields?.apiKey || getEnvironmentVariable(\"DEEPSEEK_API_KEY\");\n if (!apiKey) {\n throw new Error(\n `Deepseek API key not found. Please set the DEEPSEEK_API_KEY environment variable or pass the key into \"apiKey\" field.`\n );\n }\n\n super({\n ...fields,\n apiKey,\n configuration: {\n baseURL: \"https://api.deepseek.com\",\n ...fields?.configuration,\n },\n });\n }\n\n protected override _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"\n ) {\n const messageChunk = super._convertCompletionsDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n messageChunk.additional_kwargs.reasoning_content = delta.reasoning_content;\n // Override model_provider for DeepSeek-specific block translation\n messageChunk.response_metadata = {\n ...messageChunk.response_metadata,\n model_provider: \"deepseek\",\n };\n return messageChunk;\n }\n\n protected override _convertCompletionsMessageToBaseMessage(\n message: OpenAIClient.ChatCompletionMessage,\n rawResponse: OpenAIClient.ChatCompletion\n ) {\n const langChainMessage = super._convertCompletionsMessageToBaseMessage(\n message,\n rawResponse\n );\n langChainMessage.additional_kwargs.reasoning_content =\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (message as any).reasoning_content;\n // Override model_provider for DeepSeek-specific block translation\n langChainMessage.response_metadata = {\n ...langChainMessage.response_metadata,\n model_provider: \"deepseek\",\n };\n return langChainMessage;\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n const ensuredConfig = { ...config };\n // Deepseek does not support json schema yet\n if (ensuredConfig?.method === undefined) {\n ensuredConfig.method = \"functionCalling\";\n }\n return super.withStructuredOutput<RunOutput>(outputSchema, ensuredConfig);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuZA,IAAa,eAAb,cAAkC,sBAA+C;CAC/E,OAAO,UAAU;AACf,SAAO;CACR;CAED,WAAW;AACT,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,mBACT;CACF;CAED,kBAAkB;CAElB,eAAe;EAAC;EAAa;EAAe;CAAW;CAEvD,YAAYA,QAAqC;EAC/C,MAAM,SAAS,QAAQ,UAAU,uBAAuB,mBAAmB;AAC3E,MAAI,CAAC,OACH,OAAM,IAAI,MACR,CAAC,qHAAqH,CAAC;EAI3H,MAAM;GACJ,GAAG;GACH;GACA,eAAe;IACb,SAAS;IACT,GAAG,QAAQ;GACZ;EACF,EAAC;CACH;CAED,AAAmB,2CAEjBC,OACAC,aACAC,aAOA;EACA,MAAM,eAAe,MAAM,2CACzB,OACA,aACA,YACD;EACD,aAAa,kBAAkB,oBAAoB,MAAM;EAEzD,aAAa,oBAAoB;GAC/B,GAAG,aAAa;GAChB,gBAAgB;EACjB;AACD,SAAO;CACR;CAED,AAAmB,wCACjBC,SACAC,aACA;EACA,MAAM,mBAAmB,MAAM,wCAC7B,SACA,YACD;EACD,iBAAiB,kBAAkB,oBAEhC,QAAgB;EAEnB,iBAAiB,oBAAoB;GACnC,GAAG,iBAAiB;GACpB,gBAAgB;EACjB;AACD,SAAO;CACR;;;;;;;;;;;;;;;;;;CAmBD,IAAI,UAAwB;AAC1B,SAAOC,iBAAS,KAAK,UAAU,CAAE;CAClC;CAqCD,qBAIEC,cAIAC,QAMI;EACJ,MAAM,gBAAgB,EAAE,GAAG,OAAQ;AAEnC,MAAI,eAAe,WAAW,QAC5B,cAAc,SAAS;AAEzB,SAAO,MAAM,qBAAgC,cAAc,cAAc;CAC1E;AACF"}
1
+ {"version":3,"file":"chat_models.js","names":["fields?: Partial<ChatDeepSeekInput>","delta: Record<string, any>","rawResponse: OpenAIClient.ChatCompletionChunk","defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"","messages: BaseMessage[]","options: this[\"ParsedCallOptions\"]","runManager?: CallbackManagerForLLMRun","message: OpenAIClient.ChatCompletionMessage","rawResponse: OpenAIClient.ChatCompletion","PROFILES","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseMessage, AIMessageChunk } from \"@langchain/core/messages\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport {\n ChatOpenAICallOptions,\n ChatOpenAICompletions,\n ChatOpenAIFields,\n OpenAIClient,\n} from \"@langchain/openai\";\nimport { ChatGenerationChunk } from \"@langchain/core/outputs\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport PROFILES from \"./profiles.js\";\n\nexport interface ChatDeepSeekCallOptions extends ChatOpenAICallOptions {\n headers?: Record<string, string>;\n}\n\nexport interface ChatDeepSeekInput extends ChatOpenAIFields {\n /**\n * The Deepseek API key to use for requests.\n * @default process.env.DEEPSEEK_API_KEY\n */\n apiKey?: string;\n /**\n * The name of the model to use.\n */\n model?: string;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n * Alias for `stopSequences`\n */\n stop?: Array<string>;\n /**\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stopSequences?: Array<string>;\n /**\n * Whether or not to stream responses.\n */\n streaming?: boolean;\n /**\n * The temperature to use for sampling.\n */\n temperature?: number;\n /**\n * The maximum number of tokens that the model can process in a single response.\n * This limits ensures computational efficiency and resource management.\n */\n maxTokens?: number;\n}\n\n/**\n * Deepseek chat model integration.\n *\n * The Deepseek API is compatible to the OpenAI API with some limitations.\n *\n * Setup:\n * Install `@langchain/deepseek` and set an environment variable named `DEEPSEEK_API_KEY`.\n *\n * ```bash\n * npm install @langchain/deepseek\n * export DEEPSEEK_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_deepseek.ChatDeepSeek.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_deepseek.ChatDeepSeekCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatDeepSeek } from '@langchain/deepseek';\n *\n * const llm = new ChatDeepSeek({\n * model: \"deepseek-reasoner\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"tokenUsage\": {\n * \"completionTokens\": 82,\n * \"promptTokens\": 20,\n * \"totalTokens\": 102\n * },\n * \"finish_reason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"The\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" French\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" translation\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" of\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" \\\"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"I\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \" love\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ...\n * AIMessageChunk {\n * \"content\": \".\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": null\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"The French translation of \\\"I love programming\\\" is \\\"J'aime programmer\\\". In this sentence, \\\"J'aime\\\" is the first person singular conjugation of the French verb \\\"aimer\\\" which means \\\"to love\\\", and \\\"programmer\\\" is the French infinitive for \\\"to program\\\". I hope this helps! Let me know if you have any other questions.\",\n * \"additional_kwargs\": {\n * \"reasoning_content\": \"...\",\n * },\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const llmForToolCalling = new ChatDeepSeek({\n * model: \"deepseek-chat\",\n * temperature: 0,\n * // other params...\n * });\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_cd34'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_68rf'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call',\n * id: 'call_f81z'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call',\n * id: 'call_8byt'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don't cats play poker in the wild?\",\n * punchline: 'Because there are too many cheetahs.'\n * }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatDeepSeek extends ChatOpenAICompletions<ChatDeepSeekCallOptions> {\n static lc_name() {\n return \"ChatDeepSeek\";\n }\n\n _llmType() {\n return \"deepseek\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"DEEPSEEK_API_KEY\",\n };\n }\n\n lc_serializable = true;\n\n lc_namespace = [\"langchain\", \"chat_models\", \"deepseek\"];\n\n constructor(fields?: Partial<ChatDeepSeekInput>) {\n const apiKey = fields?.apiKey || getEnvironmentVariable(\"DEEPSEEK_API_KEY\");\n if (!apiKey) {\n throw new Error(\n `Deepseek API key not found. Please set the DEEPSEEK_API_KEY environment variable or pass the key into \"apiKey\" field.`\n );\n }\n\n super({\n ...fields,\n apiKey,\n configuration: {\n baseURL: \"https://api.deepseek.com\",\n ...fields?.configuration,\n },\n });\n }\n\n protected override _convertCompletionsDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | \"function\"\n | \"user\"\n | \"system\"\n | \"developer\"\n | \"assistant\"\n | \"tool\"\n ) {\n const messageChunk = super._convertCompletionsDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n messageChunk.additional_kwargs.reasoning_content = delta.reasoning_content;\n // Override model_provider for DeepSeek-specific block translation\n messageChunk.response_metadata = {\n ...messageChunk.response_metadata,\n model_provider: \"deepseek\",\n };\n return messageChunk;\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const stream = super._streamResponseChunks(messages, options, runManager);\n\n // State for parsing <think> tags\n let tokensBuffer = \"\";\n let isThinking = false;\n\n for await (const chunk of stream) {\n // If the model already provided reasoning_content natively, just yield it\n if (chunk.message.additional_kwargs.reasoning_content) {\n yield chunk;\n continue;\n }\n\n const text = chunk.text;\n if (!text) {\n yield chunk;\n continue;\n }\n\n // Append text to buffer to handle split tags\n tokensBuffer += text;\n\n // Check for <think> start tag\n if (!isThinking && tokensBuffer.includes(\"<think>\")) {\n isThinking = true;\n const thinkIndex = tokensBuffer.indexOf(\"<think>\");\n const beforeThink = tokensBuffer.substring(0, thinkIndex);\n const afterThink = tokensBuffer.substring(\n thinkIndex + \"<think>\".length\n );\n\n // We consumed up to <think>, so buffer becomes what's after\n tokensBuffer = afterThink || \"\"; // might be empty or part of thought\n\n if (beforeThink) {\n // Send the content before the tag\n const newChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: beforeThink,\n additional_kwargs: chunk.message.additional_kwargs,\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: beforeThink,\n generationInfo: chunk.generationInfo,\n });\n yield newChunk;\n }\n }\n\n // Check for </think> end tag\n if (isThinking && tokensBuffer.includes(\"</think>\")) {\n isThinking = false;\n const thinkEndIndex = tokensBuffer.indexOf(\"</think>\");\n const thoughtContent = tokensBuffer.substring(0, thinkEndIndex);\n const afterThink = tokensBuffer.substring(\n thinkEndIndex + \"</think>\".length\n );\n\n // Yield the reasoning content\n const reasoningChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: \"\",\n additional_kwargs: {\n ...chunk.message.additional_kwargs,\n reasoning_content: thoughtContent,\n },\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: \"\",\n generationInfo: chunk.generationInfo,\n });\n yield reasoningChunk;\n\n // Reset buffer to what's after </think>\n tokensBuffer = afterThink || \"\";\n\n // Yield the rest as normal content if any\n if (tokensBuffer) {\n const contentChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: tokensBuffer,\n additional_kwargs: chunk.message.additional_kwargs,\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: tokensBuffer,\n generationInfo: chunk.generationInfo,\n });\n yield contentChunk;\n tokensBuffer = \"\"; // consumed\n }\n } else if (isThinking) {\n // We are inside thinking block.\n // Check partial </think> match\n const possibleEndTag = \"</think>\";\n let splitIndex = -1;\n\n // Check if buffer ends with a prefix of </think> - Greedy check (longest first)\n for (let i = possibleEndTag.length - 1; i >= 1; i--) {\n if (tokensBuffer.endsWith(possibleEndTag.substring(0, i))) {\n splitIndex = tokensBuffer.length - i;\n break;\n }\n }\n\n if (splitIndex !== -1) {\n const safeToYield = tokensBuffer.substring(0, splitIndex);\n if (safeToYield) {\n const reasoningChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: \"\",\n additional_kwargs: {\n ...chunk.message.additional_kwargs,\n reasoning_content: safeToYield,\n },\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: \"\",\n generationInfo: chunk.generationInfo,\n });\n yield reasoningChunk;\n }\n tokensBuffer = tokensBuffer.substring(splitIndex); // keep partial tag\n } else {\n // content is safe to yield as reasoning\n if (tokensBuffer) {\n const reasoningChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: \"\",\n additional_kwargs: {\n ...chunk.message.additional_kwargs,\n reasoning_content: tokensBuffer,\n },\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: \"\",\n generationInfo: chunk.generationInfo,\n });\n yield reasoningChunk;\n tokensBuffer = \"\";\n }\n }\n } else {\n // NOT thinking.\n // Check partial start tag \"<think>\" - Greedy check (longest first)\n const possibleStartTag = \"<think>\";\n let splitIndex = -1;\n for (let i = possibleStartTag.length - 1; i >= 1; i--) {\n if (tokensBuffer.endsWith(possibleStartTag.substring(0, i))) {\n splitIndex = tokensBuffer.length - i;\n break;\n }\n }\n\n if (splitIndex !== -1) {\n // Yield safe content\n const safeToYield = tokensBuffer.substring(0, splitIndex);\n if (safeToYield) {\n const contentChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: safeToYield,\n additional_kwargs: chunk.message.additional_kwargs,\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: safeToYield,\n generationInfo: chunk.generationInfo,\n });\n yield contentChunk;\n }\n tokensBuffer = tokensBuffer.substring(splitIndex); // keep partial tag\n } else {\n // Yield all\n if (tokensBuffer) {\n const contentChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: tokensBuffer,\n additional_kwargs: chunk.message.additional_kwargs,\n response_metadata: chunk.message.response_metadata,\n tool_calls: chunk.message.tool_calls,\n tool_call_chunks: chunk.message.tool_call_chunks,\n id: chunk.message.id,\n }),\n text: tokensBuffer,\n generationInfo: chunk.generationInfo,\n });\n yield contentChunk;\n tokensBuffer = \"\";\n }\n }\n }\n }\n\n // Flush remaining buffer at end of stream\n if (tokensBuffer) {\n // If we were thinking, it's unclosed thought.\n if (isThinking) {\n const reasoningChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: \"\",\n additional_kwargs: { reasoning_content: tokensBuffer },\n }),\n text: \"\",\n });\n yield reasoningChunk;\n } else {\n const contentChunk = new ChatGenerationChunk({\n message: new AIMessageChunk({\n content: tokensBuffer,\n }),\n text: tokensBuffer,\n });\n yield contentChunk;\n }\n }\n }\n\n protected override _convertCompletionsMessageToBaseMessage(\n message: OpenAIClient.ChatCompletionMessage,\n rawResponse: OpenAIClient.ChatCompletion\n ) {\n const langChainMessage = super._convertCompletionsMessageToBaseMessage(\n message,\n rawResponse\n );\n langChainMessage.additional_kwargs.reasoning_content =\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (message as any).reasoning_content;\n // Override model_provider for DeepSeek-specific block translation\n langChainMessage.response_metadata = {\n ...langChainMessage.response_metadata,\n model_provider: \"deepseek\",\n };\n return langChainMessage;\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatDeepSeek({ model: \"deepseek-chat\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 128000\n * console.log(profile.imageInputs); // false\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n const ensuredConfig = { ...config };\n // Deepseek does not support json schema yet\n if (ensuredConfig?.method === undefined) {\n ensuredConfig.method = \"functionCalling\";\n }\n return super.withStructuredOutput<RunOutput>(outputSchema, ensuredConfig);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAyZA,IAAa,eAAb,cAAkC,sBAA+C;CAC/E,OAAO,UAAU;AACf,SAAO;CACR;CAED,WAAW;AACT,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,mBACT;CACF;CAED,kBAAkB;CAElB,eAAe;EAAC;EAAa;EAAe;CAAW;CAEvD,YAAYA,QAAqC;EAC/C,MAAM,SAAS,QAAQ,UAAU,uBAAuB,mBAAmB;AAC3E,MAAI,CAAC,OACH,OAAM,IAAI,MACR,CAAC,qHAAqH,CAAC;EAI3H,MAAM;GACJ,GAAG;GACH;GACA,eAAe;IACb,SAAS;IACT,GAAG,QAAQ;GACZ;EACF,EAAC;CACH;CAED,AAAmB,2CAEjBC,OACAC,aACAC,aAOA;EACA,MAAM,eAAe,MAAM,2CACzB,OACA,aACA,YACD;EACD,aAAa,kBAAkB,oBAAoB,MAAM;EAEzD,aAAa,oBAAoB;GAC/B,GAAG,aAAa;GAChB,gBAAgB;EACjB;AACD,SAAO;CACR;CAED,OAAO,sBACLC,UACAC,SACAC,YACqC;EACrC,MAAM,SAAS,MAAM,sBAAsB,UAAU,SAAS,WAAW;EAGzE,IAAI,eAAe;EACnB,IAAI,aAAa;AAEjB,aAAW,MAAM,SAAS,QAAQ;AAEhC,OAAI,MAAM,QAAQ,kBAAkB,mBAAmB;IACrD,MAAM;AACN;GACD;GAED,MAAM,OAAO,MAAM;AACnB,OAAI,CAAC,MAAM;IACT,MAAM;AACN;GACD;GAGD,gBAAgB;AAGhB,OAAI,CAAC,cAAc,aAAa,SAAS,UAAU,EAAE;IACnD,aAAa;IACb,MAAM,aAAa,aAAa,QAAQ,UAAU;IAClD,MAAM,cAAc,aAAa,UAAU,GAAG,WAAW;IACzD,MAAM,aAAa,aAAa,UAC9B,aAAa,EACd;IAGD,eAAe,cAAc;AAE7B,QAAI,aAAa;KAEf,MAAM,WAAW,IAAI,oBAAoB;MACvC,SAAS,IAAI,eAAe;OAC1B,SAAS;OACT,mBAAmB,MAAM,QAAQ;OACjC,mBAAmB,MAAM,QAAQ;OACjC,YAAY,MAAM,QAAQ;OAC1B,kBAAkB,MAAM,QAAQ;OAChC,IAAI,MAAM,QAAQ;MACnB;MACD,MAAM;MACN,gBAAgB,MAAM;KACvB;KACD,MAAM;IACP;GACF;AAGD,OAAI,cAAc,aAAa,SAAS,WAAW,EAAE;IACnD,aAAa;IACb,MAAM,gBAAgB,aAAa,QAAQ,WAAW;IACtD,MAAM,iBAAiB,aAAa,UAAU,GAAG,cAAc;IAC/D,MAAM,aAAa,aAAa,UAC9B,gBAAgB,EACjB;IAGD,MAAM,iBAAiB,IAAI,oBAAoB;KAC7C,SAAS,IAAI,eAAe;MAC1B,SAAS;MACT,mBAAmB;OACjB,GAAG,MAAM,QAAQ;OACjB,mBAAmB;MACpB;MACD,mBAAmB,MAAM,QAAQ;MACjC,YAAY,MAAM,QAAQ;MAC1B,kBAAkB,MAAM,QAAQ;MAChC,IAAI,MAAM,QAAQ;KACnB;KACD,MAAM;KACN,gBAAgB,MAAM;IACvB;IACD,MAAM;IAGN,eAAe,cAAc;AAG7B,QAAI,cAAc;KAChB,MAAM,eAAe,IAAI,oBAAoB;MAC3C,SAAS,IAAI,eAAe;OAC1B,SAAS;OACT,mBAAmB,MAAM,QAAQ;OACjC,mBAAmB,MAAM,QAAQ;OACjC,YAAY,MAAM,QAAQ;OAC1B,kBAAkB,MAAM,QAAQ;OAChC,IAAI,MAAM,QAAQ;MACnB;MACD,MAAM;MACN,gBAAgB,MAAM;KACvB;KACD,MAAM;KACN,eAAe;IAChB;GACF,WAAU,YAAY;IAGrB,MAAM,iBAAiB;IACvB,IAAI,aAAa;AAGjB,SAAK,IAAI,IAAI,GAA2B,KAAK,GAAG,IAC9C,KAAI,aAAa,SAAS,eAAe,UAAU,GAAG,EAAE,CAAC,EAAE;KACzD,aAAa,aAAa,SAAS;AACnC;IACD;AAGH,QAAI,eAAe,IAAI;KACrB,MAAM,cAAc,aAAa,UAAU,GAAG,WAAW;AACzD,SAAI,aAAa;MACf,MAAM,iBAAiB,IAAI,oBAAoB;OAC7C,SAAS,IAAI,eAAe;QAC1B,SAAS;QACT,mBAAmB;SACjB,GAAG,MAAM,QAAQ;SACjB,mBAAmB;QACpB;QACD,mBAAmB,MAAM,QAAQ;QACjC,YAAY,MAAM,QAAQ;QAC1B,kBAAkB,MAAM,QAAQ;QAChC,IAAI,MAAM,QAAQ;OACnB;OACD,MAAM;OACN,gBAAgB,MAAM;MACvB;MACD,MAAM;KACP;KACD,eAAe,aAAa,UAAU,WAAW;IAClD,WAEK,cAAc;KAChB,MAAM,iBAAiB,IAAI,oBAAoB;MAC7C,SAAS,IAAI,eAAe;OAC1B,SAAS;OACT,mBAAmB;QACjB,GAAG,MAAM,QAAQ;QACjB,mBAAmB;OACpB;OACD,mBAAmB,MAAM,QAAQ;OACjC,YAAY,MAAM,QAAQ;OAC1B,kBAAkB,MAAM,QAAQ;OAChC,IAAI,MAAM,QAAQ;MACnB;MACD,MAAM;MACN,gBAAgB,MAAM;KACvB;KACD,MAAM;KACN,eAAe;IAChB;GAEJ,OAAM;IAGL,MAAM,mBAAmB;IACzB,IAAI,aAAa;AACjB,SAAK,IAAI,IAAI,GAA6B,KAAK,GAAG,IAChD,KAAI,aAAa,SAAS,iBAAiB,UAAU,GAAG,EAAE,CAAC,EAAE;KAC3D,aAAa,aAAa,SAAS;AACnC;IACD;AAGH,QAAI,eAAe,IAAI;KAErB,MAAM,cAAc,aAAa,UAAU,GAAG,WAAW;AACzD,SAAI,aAAa;MACf,MAAM,eAAe,IAAI,oBAAoB;OAC3C,SAAS,IAAI,eAAe;QAC1B,SAAS;QACT,mBAAmB,MAAM,QAAQ;QACjC,mBAAmB,MAAM,QAAQ;QACjC,YAAY,MAAM,QAAQ;QAC1B,kBAAkB,MAAM,QAAQ;QAChC,IAAI,MAAM,QAAQ;OACnB;OACD,MAAM;OACN,gBAAgB,MAAM;MACvB;MACD,MAAM;KACP;KACD,eAAe,aAAa,UAAU,WAAW;IAClD,WAEK,cAAc;KAChB,MAAM,eAAe,IAAI,oBAAoB;MAC3C,SAAS,IAAI,eAAe;OAC1B,SAAS;OACT,mBAAmB,MAAM,QAAQ;OACjC,mBAAmB,MAAM,QAAQ;OACjC,YAAY,MAAM,QAAQ;OAC1B,kBAAkB,MAAM,QAAQ;OAChC,IAAI,MAAM,QAAQ;MACnB;MACD,MAAM;MACN,gBAAgB,MAAM;KACvB;KACD,MAAM;KACN,eAAe;IAChB;GAEJ;EACF;AAGD,MAAI,aAEF,KAAI,YAAY;GACd,MAAM,iBAAiB,IAAI,oBAAoB;IAC7C,SAAS,IAAI,eAAe;KAC1B,SAAS;KACT,mBAAmB,EAAE,mBAAmB,aAAc;IACvD;IACD,MAAM;GACP;GACD,MAAM;EACP,OAAM;GACL,MAAM,eAAe,IAAI,oBAAoB;IAC3C,SAAS,IAAI,eAAe,EAC1B,SAAS,aACV;IACD,MAAM;GACP;GACD,MAAM;EACP;CAEJ;CAED,AAAmB,wCACjBC,SACAC,aACA;EACA,MAAM,mBAAmB,MAAM,wCAC7B,SACA,YACD;EACD,iBAAiB,kBAAkB,oBAEhC,QAAgB;EAEnB,iBAAiB,oBAAoB;GACnC,GAAG,iBAAiB;GACpB,gBAAgB;EACjB;AACD,SAAO;CACR;;;;;;;;;;;;;;;;;;CAmBD,IAAI,UAAwB;AAC1B,SAAOC,iBAAS,KAAK,UAAU,CAAE;CAClC;CAqCD,qBAIEC,cAIAC,QAMI;EACJ,MAAM,gBAAgB,EAAE,GAAG,OAAQ;AAEnC,MAAI,eAAe,WAAW,QAC5B,cAAc,SAAS;AAEzB,SAAO,MAAM,qBAAgC,cAAc,cAAc;CAC1E;AACF"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/deepseek",
3
- "version": "1.0.6",
3
+ "version": "1.0.7",
4
4
  "description": "Deepseek integration for LangChain.js",
5
5
  "type": "module",
6
6
  "author": "LangChain",
@@ -28,9 +28,9 @@
28
28
  "prettier": "^3.5.0",
29
29
  "typescript": "~5.8.3",
30
30
  "vitest": "^3.2.4",
31
- "@langchain/core": "1.1.16",
32
31
  "@langchain/eslint": "0.1.1",
33
- "@langchain/standard-tests": "0.0.19",
32
+ "@langchain/core": "1.1.17",
33
+ "@langchain/standard-tests": "0.0.20",
34
34
  "@langchain/tsconfig": "0.0.1"
35
35
  },
36
36
  "publishConfig": {