@librechat/agents 3.1.77-dev.1 → 3.1.77

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@ import { isAIMessage, AIMessage, AIMessageChunk } from '@langchain/core/messages
5
5
  import '@langchain/core/utils/function_calling';
6
6
  import { ChatDeepSeek as ChatDeepSeek$1 } from '@langchain/deepseek';
7
7
  import { getEndpoint, AzureChatOpenAI as AzureChatOpenAI$1, ChatOpenAI as ChatOpenAI$1, OpenAIClient, AzureChatOpenAICompletions, AzureChatOpenAIResponses, ChatOpenAIResponses, ChatOpenAICompletions, getHeadersWithUserAgent } from '@langchain/openai';
8
- import { isReasoningModel, _convertMessagesToOpenAIParams } from './utils/index.mjs';
8
+ import { _convertMessagesToOpenAIParams, isReasoningModel } from './utils/index.mjs';
9
9
  import '../../common/enum.mjs';
10
10
  import 'nanoid';
11
11
  import '../../messages/core.mjs';
@@ -51,6 +51,47 @@ function normalizeHeaders(headers) {
51
51
  });
52
52
  return Object.fromEntries(output.entries());
53
53
  }
54
+ function createUsageMetadata(usage) {
55
+ const usageMetadata = {
56
+ input_tokens: usage?.prompt_tokens ?? 0,
57
+ output_tokens: usage?.completion_tokens ?? 0,
58
+ total_tokens: usage?.total_tokens ?? 0,
59
+ };
60
+ if (usage == null) {
61
+ return usageMetadata;
62
+ }
63
+ const inputTokenDetails = {};
64
+ const outputTokenDetails = {};
65
+ let hasInputTokenDetails = false;
66
+ let hasOutputTokenDetails = false;
67
+ const audioInputTokens = usage.prompt_tokens_details?.audio_tokens;
68
+ const cachedInputTokens = usage.prompt_tokens_details?.cached_tokens;
69
+ const audioOutputTokens = usage.completion_tokens_details?.audio_tokens;
70
+ const reasoningOutputTokens = usage.completion_tokens_details?.reasoning_tokens;
71
+ if (audioInputTokens != null) {
72
+ inputTokenDetails.audio = audioInputTokens;
73
+ hasInputTokenDetails = true;
74
+ }
75
+ if (cachedInputTokens != null) {
76
+ inputTokenDetails.cache_read = cachedInputTokens;
77
+ hasInputTokenDetails = true;
78
+ }
79
+ if (audioOutputTokens != null) {
80
+ outputTokenDetails.audio = audioOutputTokens;
81
+ hasOutputTokenDetails = true;
82
+ }
83
+ if (reasoningOutputTokens != null) {
84
+ outputTokenDetails.reasoning = reasoningOutputTokens;
85
+ hasOutputTokenDetails = true;
86
+ }
87
+ if (hasInputTokenDetails) {
88
+ usageMetadata.input_token_details = inputTokenDetails;
89
+ }
90
+ if (hasOutputTokenDetails) {
91
+ usageMetadata.output_token_details = outputTokenDetails;
92
+ }
93
+ return usageMetadata;
94
+ }
54
95
  function getExposedOpenAIClient(completions, responses, preferResponses) {
55
96
  const responsesClient = responses.client;
56
97
  if (responsesClient?.abortHandler != null) {
@@ -791,6 +832,54 @@ class ChatDeepSeek extends ChatDeepSeek$1 {
791
832
  static lc_name() {
792
833
  return 'LibreChatDeepSeek';
793
834
  }
835
+ _convertDeepSeekMessages(messages) {
836
+ return _convertMessagesToOpenAIParams(messages, this.model, {
837
+ includeReasoningContent: true,
838
+ });
839
+ }
840
+ async _generate(messages, options, runManager) {
841
+ options.signal?.throwIfAborted();
842
+ const params = this.invocationParams(options);
843
+ if (params.stream === true) {
844
+ return super._generate(messages, options, runManager);
845
+ }
846
+ const messagesMapped = this._convertDeepSeekMessages(messages);
847
+ const response = await this.completionWithRetry({
848
+ ...params,
849
+ stream: false,
850
+ messages: messagesMapped,
851
+ }, {
852
+ signal: options.signal,
853
+ ...options.options,
854
+ });
855
+ const usageMetadata = createUsageMetadata(response.usage);
856
+ const generations = response.choices.map((part) => {
857
+ const text = part.message.content ?? '';
858
+ const generation = {
859
+ text,
860
+ message: this._convertCompletionsMessageToBaseMessage(part.message, response),
861
+ };
862
+ generation.generationInfo = {
863
+ finish_reason: part.finish_reason,
864
+ ...(part.logprobs != null ? { logprobs: part.logprobs } : {}),
865
+ };
866
+ if (isAIMessage(generation.message)) {
867
+ generation.message.usage_metadata = usageMetadata;
868
+ }
869
+ generation.message = new AIMessage(Object.fromEntries(Object.entries(generation.message).filter(([key]) => !key.startsWith('lc_'))));
870
+ return generation;
871
+ });
872
+ return {
873
+ generations,
874
+ llmOutput: {
875
+ tokenUsage: {
876
+ promptTokens: usageMetadata.input_tokens,
877
+ completionTokens: usageMetadata.output_tokens,
878
+ totalTokens: usageMetadata.total_tokens,
879
+ },
880
+ },
881
+ };
882
+ }
794
883
  _getClientOptions(options) {
795
884
  if (!this.client) {
796
885
  const openAIEndpointConfig = {
@@ -815,7 +904,234 @@ class ChatDeepSeek extends ChatDeepSeek$1 {
815
904
  return requestOptions;
816
905
  }
817
906
  async *_streamResponseChunks(messages, options, runManager) {
818
- yield* delayStreamChunks(super._streamResponseChunks(messages, options, runManager), this._lc_stream_delay);
907
+ yield* delayStreamChunks(this._streamResponseChunksWithReasoning(messages, options, runManager), this._lc_stream_delay);
908
+ }
909
+ /** Parses raw `<think>` fallback tags across chunks and emits sanitized DeepSeek stream chunks. */
910
+ async *_streamResponseChunksWithReasoning(messages, options, runManager) {
911
+ const stream = this._streamResponseChunksFromReasoningMessages(messages, options);
912
+ const thinkStartTag = '<think>';
913
+ const thinkEndTag = '</think>';
914
+ let tokensBuffer = '';
915
+ let isThinking = false;
916
+ for await (const chunk of stream) {
917
+ if (options.signal?.aborted === true) {
918
+ throw new Error('AbortError');
919
+ }
920
+ const reasoningContent = chunk.message.additional_kwargs.reasoning_content;
921
+ if (reasoningContent != null && reasoningContent !== '') {
922
+ yield* this._yieldDeepSeekStreamChunk(chunk, runManager);
923
+ continue;
924
+ }
925
+ const text = chunk.text;
926
+ if (text === '') {
927
+ yield* this._yieldDeepSeekStreamChunk(chunk, runManager);
928
+ continue;
929
+ }
930
+ tokensBuffer += text;
931
+ while (tokensBuffer !== '') {
932
+ if (isThinking) {
933
+ const thinkEndIndex = tokensBuffer.indexOf(thinkEndTag);
934
+ if (thinkEndIndex !== -1) {
935
+ const thoughtContent = tokensBuffer.substring(0, thinkEndIndex);
936
+ if (thoughtContent !== '') {
937
+ yield* this._yieldDeepSeekReasoningText(chunk, thoughtContent, runManager);
938
+ }
939
+ tokensBuffer = tokensBuffer.substring(thinkEndIndex + thinkEndTag.length);
940
+ isThinking = false;
941
+ continue;
942
+ }
943
+ const splitIndex = this._getDeepSeekPartialTagSplitIndex(tokensBuffer, thinkEndTag);
944
+ if (splitIndex !== -1) {
945
+ const safeToYield = tokensBuffer.substring(0, splitIndex);
946
+ if (safeToYield !== '') {
947
+ yield* this._yieldDeepSeekReasoningText(chunk, safeToYield, runManager);
948
+ }
949
+ tokensBuffer = tokensBuffer.substring(splitIndex);
950
+ break;
951
+ }
952
+ yield* this._yieldDeepSeekReasoningText(chunk, tokensBuffer, runManager);
953
+ tokensBuffer = '';
954
+ break;
955
+ }
956
+ const thinkStartIndex = tokensBuffer.indexOf(thinkStartTag);
957
+ if (thinkStartIndex !== -1) {
958
+ const beforeThink = tokensBuffer.substring(0, thinkStartIndex);
959
+ if (beforeThink !== '') {
960
+ yield* this._yieldDeepSeekStreamChunk(this._createDeepSeekStreamChunk(chunk, beforeThink), runManager);
961
+ }
962
+ tokensBuffer = tokensBuffer.substring(thinkStartIndex + thinkStartTag.length);
963
+ isThinking = true;
964
+ continue;
965
+ }
966
+ const splitIndex = this._getDeepSeekPartialTagSplitIndex(tokensBuffer, thinkStartTag);
967
+ if (splitIndex !== -1) {
968
+ const safeToYield = tokensBuffer.substring(0, splitIndex);
969
+ if (safeToYield !== '') {
970
+ yield* this._yieldDeepSeekStreamChunk(this._createDeepSeekStreamChunk(chunk, safeToYield), runManager);
971
+ }
972
+ tokensBuffer = tokensBuffer.substring(splitIndex);
973
+ break;
974
+ }
975
+ yield* this._yieldDeepSeekStreamChunk(this._createDeepSeekStreamChunk(chunk, tokensBuffer), runManager);
976
+ tokensBuffer = '';
977
+ break;
978
+ }
979
+ }
980
+ if (tokensBuffer === '') {
981
+ return;
982
+ }
983
+ if (isThinking) {
984
+ yield* this._yieldDeepSeekStreamChunk(new ChatGenerationChunk({
985
+ message: new AIMessageChunk({
986
+ content: '',
987
+ additional_kwargs: {
988
+ reasoning_content: tokensBuffer,
989
+ },
990
+ }),
991
+ text: '',
992
+ }), runManager);
993
+ return;
994
+ }
995
+ yield* this._yieldDeepSeekStreamChunk(new ChatGenerationChunk({
996
+ message: new AIMessageChunk({
997
+ content: tokensBuffer,
998
+ }),
999
+ text: tokensBuffer,
1000
+ }), runManager);
1001
+ }
1002
+ async *_streamResponseChunksFromReasoningMessages(messages, options) {
1003
+ const params = {
1004
+ ...this.invocationParams(options, { streaming: true }),
1005
+ stream: true,
1006
+ };
1007
+ const messagesMapped = this._convertDeepSeekMessages(messages);
1008
+ const streamIterable = await this.completionWithRetry({
1009
+ ...params,
1010
+ messages: messagesMapped,
1011
+ }, {
1012
+ signal: options.signal,
1013
+ ...options.options,
1014
+ });
1015
+ let defaultRole;
1016
+ let usage;
1017
+ for await (const data of streamIterable) {
1018
+ if (options.signal?.aborted === true) {
1019
+ throw new Error('AbortError');
1020
+ }
1021
+ if (data.usage != null) {
1022
+ usage = data.usage;
1023
+ }
1024
+ if (data.choices.length === 0) {
1025
+ continue;
1026
+ }
1027
+ const choice = data.choices[0];
1028
+ const { delta } = choice;
1029
+ const messageChunk = this._convertCompletionsDeltaToBaseMessageChunk(delta, data, defaultRole);
1030
+ defaultRole = delta.role ?? defaultRole;
1031
+ if (typeof messageChunk.content !== 'string') {
1032
+ continue;
1033
+ }
1034
+ const messageText = messageChunk.content;
1035
+ const newTokenIndices = {
1036
+ prompt: options.promptIndex ?? 0,
1037
+ completion: choice.index,
1038
+ };
1039
+ const generationInfo = { ...newTokenIndices };
1040
+ if (choice.finish_reason != null) {
1041
+ Object.assign(generationInfo, {
1042
+ finish_reason: choice.finish_reason,
1043
+ system_fingerprint: data.system_fingerprint,
1044
+ model_name: data.model,
1045
+ service_tier: data.service_tier,
1046
+ });
1047
+ }
1048
+ if (this.logprobs === true) {
1049
+ Object.assign(generationInfo, { logprobs: choice.logprobs });
1050
+ }
1051
+ const generationChunk = new ChatGenerationChunk({
1052
+ message: messageChunk,
1053
+ text: messageText,
1054
+ generationInfo,
1055
+ });
1056
+ yield generationChunk;
1057
+ }
1058
+ if (usage != null) {
1059
+ const usageMetadata = createUsageMetadata(usage);
1060
+ const generationChunk = new ChatGenerationChunk({
1061
+ message: new AIMessageChunk({
1062
+ content: '',
1063
+ response_metadata: {
1064
+ usage: { ...usage },
1065
+ },
1066
+ usage_metadata: usageMetadata,
1067
+ }),
1068
+ text: '',
1069
+ generationInfo: {
1070
+ prompt: 0,
1071
+ completion: 0,
1072
+ },
1073
+ });
1074
+ yield generationChunk;
1075
+ }
1076
+ if (options.signal?.aborted === true) {
1077
+ throw new Error('AbortError');
1078
+ }
1079
+ }
1080
+ _createDeepSeekStreamChunk(chunk, content, additionalKwargs, text = content) {
1081
+ if (!(chunk.message instanceof AIMessageChunk)) {
1082
+ return new ChatGenerationChunk({
1083
+ message: new AIMessageChunk({
1084
+ content,
1085
+ additional_kwargs: additionalKwargs ?? chunk.message.additional_kwargs,
1086
+ response_metadata: chunk.message.response_metadata,
1087
+ id: chunk.message.id,
1088
+ }),
1089
+ text,
1090
+ generationInfo: chunk.generationInfo,
1091
+ });
1092
+ }
1093
+ const message = chunk.message;
1094
+ return new ChatGenerationChunk({
1095
+ message: new AIMessageChunk({
1096
+ content,
1097
+ additional_kwargs: additionalKwargs ?? message.additional_kwargs,
1098
+ response_metadata: message.response_metadata,
1099
+ tool_calls: message.tool_calls,
1100
+ tool_call_chunks: message.tool_call_chunks,
1101
+ id: message.id,
1102
+ }),
1103
+ text,
1104
+ generationInfo: chunk.generationInfo,
1105
+ });
1106
+ }
1107
+ _createDeepSeekReasoningStreamChunk(chunk, reasoningContent) {
1108
+ return this._createDeepSeekStreamChunk(chunk, '', {
1109
+ ...chunk.message.additional_kwargs,
1110
+ reasoning_content: reasoningContent,
1111
+ }, '');
1112
+ }
1113
+ async *_yieldDeepSeekReasoningText(chunk, reasoningContent, runManager) {
1114
+ yield* this._yieldDeepSeekStreamChunk(this._createDeepSeekReasoningStreamChunk(chunk, reasoningContent), runManager);
1115
+ }
1116
+ async *_yieldDeepSeekStreamChunk(chunk, runManager) {
1117
+ yield chunk;
1118
+ await runManager?.handleLLMNewToken(chunk.text, this._getDeepSeekTokenIndices(chunk), undefined, undefined, undefined, { chunk });
1119
+ }
1120
+ _getDeepSeekTokenIndices(chunk) {
1121
+ const prompt = chunk.generationInfo?.prompt;
1122
+ const completion = chunk.generationInfo?.completion;
1123
+ if (typeof prompt === 'number' && typeof completion === 'number') {
1124
+ return { prompt, completion };
1125
+ }
1126
+ return undefined;
1127
+ }
1128
+ _getDeepSeekPartialTagSplitIndex(text, tag) {
1129
+ for (let i = tag.length - 1; i >= 1; i--) {
1130
+ if (text.endsWith(tag.substring(0, i))) {
1131
+ return text.length - i;
1132
+ }
1133
+ }
1134
+ return -1;
819
1135
  }
820
1136
  }
821
1137
  class ChatMoonshot extends ChatOpenAI {