@librechat/agents 3.1.28 → 3.1.30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/common/enum.cjs +1 -0
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/llm/openai/index.cjs +277 -1
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/cjs/llm/providers.cjs +14 -13
- package/dist/cjs/llm/providers.cjs.map +1 -1
- package/dist/esm/common/enum.mjs +1 -0
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/llm/openai/index.mjs +277 -2
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/esm/llm/providers.mjs +2 -1
- package/dist/esm/llm/providers.mjs.map +1 -1
- package/dist/types/common/enum.d.ts +2 -1
- package/dist/types/llm/openai/index.d.ts +13 -0
- package/dist/types/types/llm.d.ts +3 -1
- package/package.json +4 -3
- package/src/common/enum.ts +1 -0
- package/src/llm/openai/index.ts +347 -1
- package/src/llm/providers.ts +2 -0
- package/src/specs/deepseek.simple.test.ts +283 -0
- package/src/specs/moonshot.simple.test.ts +358 -0
- package/src/types/llm.ts +3 -0
- package/src/utils/llmConfig.ts +10 -0
|
@@ -1,16 +1,19 @@
|
|
|
1
1
|
import { AzureOpenAI as AzureOpenAIClient } from 'openai';
|
|
2
2
|
import { ChatXAI as OriginalChatXAI } from '@langchain/xai';
|
|
3
3
|
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
4
|
+
import { AIMessage } from '@langchain/core/messages';
|
|
4
5
|
import { ChatDeepSeek as OriginalChatDeepSeek } from '@langchain/deepseek';
|
|
5
6
|
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
|
6
7
|
import { OpenAIClient, ChatOpenAI as OriginalChatOpenAI, AzureChatOpenAI as OriginalAzureChatOpenAI } from '@langchain/openai';
|
|
7
8
|
import type { HeaderValue, HeadersLike } from './types';
|
|
8
9
|
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
|
9
10
|
import type { BaseMessage } from '@langchain/core/messages';
|
|
11
|
+
import type { ChatResult } from '@langchain/core/outputs';
|
|
10
12
|
import type { ChatXAIInput } from '@langchain/xai';
|
|
11
13
|
import type * as t from '@langchain/openai';
|
|
12
14
|
export declare function isHeaders(headers: unknown): headers is Headers;
|
|
13
15
|
export declare function normalizeHeaders(headers: HeadersLike): Record<string, HeaderValue | readonly HeaderValue[]>;
|
|
16
|
+
type OpenAICompletionParam = OpenAIClient.Chat.Completions.ChatCompletionMessageParam;
|
|
14
17
|
type OpenAICoreRequestOptions = OpenAIClient.RequestOptions;
|
|
15
18
|
/**
|
|
16
19
|
* Formats a tool in either OpenAI format, or LangChain structured tool format
|
|
@@ -76,6 +79,9 @@ export declare class AzureChatOpenAI extends OriginalAzureChatOpenAI {
|
|
|
76
79
|
export declare class ChatDeepSeek extends OriginalChatDeepSeek {
|
|
77
80
|
get exposedClient(): CustomOpenAIClient;
|
|
78
81
|
static lc_name(): 'LibreChatDeepSeek';
|
|
82
|
+
protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[];
|
|
83
|
+
_generate(messages: BaseMessage[], options: this['ParsedCallOptions'] | undefined, runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
84
|
+
protected _convertResponseToMessage(choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice, data: OpenAIClient.Chat.Completions.ChatCompletion): AIMessage;
|
|
79
85
|
protected _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
|
|
80
86
|
_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
81
87
|
}
|
|
@@ -95,6 +101,13 @@ export interface XAIUsageMetadata extends OpenAIClient.Completions.CompletionUsa
|
|
|
95
101
|
};
|
|
96
102
|
num_sources_used?: number;
|
|
97
103
|
}
|
|
104
|
+
export declare class ChatMoonshot extends ChatOpenAI {
|
|
105
|
+
static lc_name(): 'LibreChatMoonshot';
|
|
106
|
+
protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[];
|
|
107
|
+
_generate(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
108
|
+
protected _convertResponseToMessage(choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice, data: OpenAIClient.Chat.Completions.ChatCompletion): AIMessage;
|
|
109
|
+
_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
110
|
+
}
|
|
98
111
|
export declare class ChatXAI extends OriginalChatXAI {
|
|
99
112
|
_lc_stream_delay?: number;
|
|
100
113
|
constructor(fields?: Partial<ChatXAIInput> & {
|
|
@@ -14,7 +14,7 @@ import type { AnthropicInput } from '@langchain/anthropic';
|
|
|
14
14
|
import type { Runnable } from '@langchain/core/runnables';
|
|
15
15
|
import type { OpenAI as OpenAIClient } from 'openai';
|
|
16
16
|
import type { ChatXAIInput } from '@langchain/xai';
|
|
17
|
-
import { AzureChatOpenAI, ChatDeepSeek, ChatOpenAI, ChatXAI } from '@/llm/openai';
|
|
17
|
+
import { AzureChatOpenAI, ChatDeepSeek, ChatMoonshot, ChatOpenAI, ChatXAI } from '@/llm/openai';
|
|
18
18
|
import { CustomChatGoogleGenerativeAI } from '@/llm/google';
|
|
19
19
|
import { CustomChatBedrockConverse } from '@/llm/bedrock';
|
|
20
20
|
import { CustomAnthropic } from '@/llm/anthropic';
|
|
@@ -80,6 +80,7 @@ export type ProviderOptionsMap = {
|
|
|
80
80
|
[Providers.OPENROUTER]: ChatOpenRouterCallOptions;
|
|
81
81
|
[Providers.BEDROCK]: BedrockConverseClientOptions;
|
|
82
82
|
[Providers.XAI]: XAIClientOptions;
|
|
83
|
+
[Providers.MOONSHOT]: OpenAIClientOptions;
|
|
83
84
|
};
|
|
84
85
|
export type ChatModelMap = {
|
|
85
86
|
[Providers.XAI]: ChatXAI;
|
|
@@ -93,6 +94,7 @@ export type ChatModelMap = {
|
|
|
93
94
|
[Providers.OPENROUTER]: ChatOpenRouter;
|
|
94
95
|
[Providers.BEDROCK]: CustomChatBedrockConverse;
|
|
95
96
|
[Providers.GOOGLE]: CustomChatGoogleGenerativeAI;
|
|
97
|
+
[Providers.MOONSHOT]: ChatMoonshot;
|
|
96
98
|
};
|
|
97
99
|
export type ChatModelConstructorMap = {
|
|
98
100
|
[P in Providers]: new (config: ProviderOptionsMap[P]) => ChatModelMap[P];
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@librechat/agents",
|
|
3
|
-
"version": "3.1.
|
|
3
|
+
"version": "3.1.30",
|
|
4
4
|
"main": "./dist/cjs/main.cjs",
|
|
5
5
|
"module": "./dist/esm/main.mjs",
|
|
6
6
|
"types": "./dist/types/index.d.ts",
|
|
@@ -109,10 +109,11 @@
|
|
|
109
109
|
"@langchain/openai": "0.5.18",
|
|
110
110
|
"@browserbasehq/stagehand": {
|
|
111
111
|
"openai": "$openai"
|
|
112
|
-
}
|
|
112
|
+
},
|
|
113
|
+
"fast-xml-parser": "5.3.4"
|
|
113
114
|
},
|
|
114
115
|
"dependencies": {
|
|
115
|
-
"@aws-sdk/client-bedrock-runtime": "^3.
|
|
116
|
+
"@aws-sdk/client-bedrock-runtime": "^3.980.0",
|
|
116
117
|
"@langchain/anthropic": "^0.3.26",
|
|
117
118
|
"@langchain/aws": "^0.1.15",
|
|
118
119
|
"@langchain/core": "^0.3.80",
|
package/src/common/enum.ts
CHANGED
package/src/llm/openai/index.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { AzureOpenAI as AzureOpenAIClient } from 'openai';
|
|
2
|
-
import { AIMessageChunk } from '@langchain/core/messages';
|
|
3
2
|
import { ChatXAI as OriginalChatXAI } from '@langchain/xai';
|
|
4
3
|
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
4
|
+
import { AIMessage, AIMessageChunk } from '@langchain/core/messages';
|
|
5
5
|
import { ToolDefinition } from '@langchain/core/language_models/base';
|
|
6
6
|
import { isLangChainTool } from '@langchain/core/utils/function_calling';
|
|
7
7
|
import { ChatDeepSeek as OriginalChatDeepSeek } from '@langchain/deepseek';
|
|
@@ -21,6 +21,7 @@ import type {
|
|
|
21
21
|
} from './types';
|
|
22
22
|
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
|
23
23
|
import type { BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
|
24
|
+
import type { ChatResult, ChatGeneration } from '@langchain/core/outputs';
|
|
24
25
|
import type { ChatXAIInput } from '@langchain/xai';
|
|
25
26
|
import type * as t from '@langchain/openai';
|
|
26
27
|
import {
|
|
@@ -616,6 +617,111 @@ export class ChatDeepSeek extends OriginalChatDeepSeek {
|
|
|
616
617
|
static lc_name(): 'LibreChatDeepSeek' {
|
|
617
618
|
return 'LibreChatDeepSeek';
|
|
618
619
|
}
|
|
620
|
+
|
|
621
|
+
protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[] {
|
|
622
|
+
return _convertMessagesToOpenAIParams(messages, this.model, {
|
|
623
|
+
includeReasoningContent: true,
|
|
624
|
+
});
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
async _generate(
|
|
628
|
+
messages: BaseMessage[],
|
|
629
|
+
options: this['ParsedCallOptions'] | undefined,
|
|
630
|
+
runManager?: CallbackManagerForLLMRun
|
|
631
|
+
): Promise<ChatResult> {
|
|
632
|
+
const params = this.invocationParams(options);
|
|
633
|
+
|
|
634
|
+
if (params.stream === true) {
|
|
635
|
+
return super._generate(messages, options ?? {}, runManager);
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
const messagesMapped = this._convertMessages(messages);
|
|
639
|
+
const data = await this.completionWithRetry(
|
|
640
|
+
{
|
|
641
|
+
...params,
|
|
642
|
+
stream: false,
|
|
643
|
+
messages: messagesMapped,
|
|
644
|
+
},
|
|
645
|
+
{
|
|
646
|
+
signal: options?.signal,
|
|
647
|
+
...options?.options,
|
|
648
|
+
}
|
|
649
|
+
);
|
|
650
|
+
|
|
651
|
+
const { completion_tokens, prompt_tokens, total_tokens } = data.usage ?? {};
|
|
652
|
+
|
|
653
|
+
const generations = [];
|
|
654
|
+
for (const part of data.choices ?? []) {
|
|
655
|
+
const text = part.message.content ?? '';
|
|
656
|
+
const generation: ChatGeneration = {
|
|
657
|
+
text: typeof text === 'string' ? text : '',
|
|
658
|
+
message: this._convertResponseToMessage(part, data),
|
|
659
|
+
};
|
|
660
|
+
generation.generationInfo = {
|
|
661
|
+
...(part.finish_reason != null
|
|
662
|
+
? { finish_reason: part.finish_reason }
|
|
663
|
+
: {}),
|
|
664
|
+
...(part.logprobs ? { logprobs: part.logprobs } : {}),
|
|
665
|
+
};
|
|
666
|
+
generations.push(generation);
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
return {
|
|
670
|
+
generations,
|
|
671
|
+
llmOutput: {
|
|
672
|
+
tokenUsage: {
|
|
673
|
+
completionTokens: completion_tokens,
|
|
674
|
+
promptTokens: prompt_tokens,
|
|
675
|
+
totalTokens: total_tokens,
|
|
676
|
+
},
|
|
677
|
+
},
|
|
678
|
+
};
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
protected _convertResponseToMessage(
|
|
682
|
+
choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice,
|
|
683
|
+
data: OpenAIClient.Chat.Completions.ChatCompletion
|
|
684
|
+
): AIMessage {
|
|
685
|
+
const { message } = choice;
|
|
686
|
+
const rawToolCalls = message.tool_calls;
|
|
687
|
+
const toolCalls = rawToolCalls?.map((tc) => ({
|
|
688
|
+
id: tc.id,
|
|
689
|
+
name: tc.function.name,
|
|
690
|
+
args: JSON.parse(tc.function.arguments || '{}'),
|
|
691
|
+
type: 'tool_call' as const,
|
|
692
|
+
}));
|
|
693
|
+
|
|
694
|
+
const additional_kwargs: Record<string, unknown> = {};
|
|
695
|
+
if (rawToolCalls) {
|
|
696
|
+
additional_kwargs.tool_calls = rawToolCalls;
|
|
697
|
+
}
|
|
698
|
+
if (
|
|
699
|
+
'reasoning_content' in message &&
|
|
700
|
+
message.reasoning_content != null &&
|
|
701
|
+
message.reasoning_content !== ''
|
|
702
|
+
) {
|
|
703
|
+
additional_kwargs.reasoning_content = message.reasoning_content;
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
return new AIMessage({
|
|
707
|
+
content: message.content ?? '',
|
|
708
|
+
tool_calls: toolCalls,
|
|
709
|
+
additional_kwargs,
|
|
710
|
+
usage_metadata: data.usage
|
|
711
|
+
? {
|
|
712
|
+
input_tokens: data.usage.prompt_tokens,
|
|
713
|
+
output_tokens: data.usage.completion_tokens,
|
|
714
|
+
total_tokens: data.usage.total_tokens,
|
|
715
|
+
}
|
|
716
|
+
: undefined,
|
|
717
|
+
response_metadata: {
|
|
718
|
+
model_name: data.model,
|
|
719
|
+
system_fingerprint: data.system_fingerprint,
|
|
720
|
+
finish_reason: choice.finish_reason,
|
|
721
|
+
},
|
|
722
|
+
});
|
|
723
|
+
}
|
|
724
|
+
|
|
619
725
|
protected _getClientOptions(
|
|
620
726
|
options?: OpenAICoreRequestOptions
|
|
621
727
|
): OpenAICoreRequestOptions {
|
|
@@ -789,6 +895,246 @@ export interface XAIUsageMetadata
|
|
|
789
895
|
num_sources_used?: number;
|
|
790
896
|
}
|
|
791
897
|
|
|
898
|
+
export class ChatMoonshot extends ChatOpenAI {
|
|
899
|
+
static lc_name(): 'LibreChatMoonshot' {
|
|
900
|
+
return 'LibreChatMoonshot';
|
|
901
|
+
}
|
|
902
|
+
|
|
903
|
+
protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[] {
|
|
904
|
+
return _convertMessagesToOpenAIParams(messages, this.model, {
|
|
905
|
+
includeReasoningContent: true,
|
|
906
|
+
});
|
|
907
|
+
}
|
|
908
|
+
|
|
909
|
+
async _generate(
|
|
910
|
+
messages: BaseMessage[],
|
|
911
|
+
options: this['ParsedCallOptions'],
|
|
912
|
+
runManager?: CallbackManagerForLLMRun
|
|
913
|
+
): Promise<ChatResult> {
|
|
914
|
+
const params = this.invocationParams(options);
|
|
915
|
+
|
|
916
|
+
if (params.stream === true) {
|
|
917
|
+
return super._generate(messages, options, runManager);
|
|
918
|
+
}
|
|
919
|
+
|
|
920
|
+
const messagesMapped = this._convertMessages(messages);
|
|
921
|
+
const data = await this.completionWithRetry(
|
|
922
|
+
{
|
|
923
|
+
...params,
|
|
924
|
+
stream: false,
|
|
925
|
+
messages: messagesMapped,
|
|
926
|
+
},
|
|
927
|
+
{
|
|
928
|
+
signal: options.signal,
|
|
929
|
+
...options.options,
|
|
930
|
+
}
|
|
931
|
+
);
|
|
932
|
+
|
|
933
|
+
const { completion_tokens, prompt_tokens, total_tokens } = data.usage ?? {};
|
|
934
|
+
|
|
935
|
+
const generations = [];
|
|
936
|
+
for (const part of data.choices ?? []) {
|
|
937
|
+
const text = part.message.content ?? '';
|
|
938
|
+
const generation: ChatGeneration = {
|
|
939
|
+
text: typeof text === 'string' ? text : '',
|
|
940
|
+
message: this._convertResponseToMessage(part, data),
|
|
941
|
+
};
|
|
942
|
+
generation.generationInfo = {
|
|
943
|
+
...(part.finish_reason ? { finish_reason: part.finish_reason } : {}),
|
|
944
|
+
...(part.logprobs ? { logprobs: part.logprobs } : {}),
|
|
945
|
+
};
|
|
946
|
+
generations.push(generation);
|
|
947
|
+
}
|
|
948
|
+
|
|
949
|
+
return {
|
|
950
|
+
generations,
|
|
951
|
+
llmOutput: {
|
|
952
|
+
tokenUsage: {
|
|
953
|
+
completionTokens: completion_tokens,
|
|
954
|
+
promptTokens: prompt_tokens,
|
|
955
|
+
totalTokens: total_tokens,
|
|
956
|
+
},
|
|
957
|
+
},
|
|
958
|
+
};
|
|
959
|
+
}
|
|
960
|
+
|
|
961
|
+
protected _convertResponseToMessage(
|
|
962
|
+
choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice,
|
|
963
|
+
data: OpenAIClient.Chat.Completions.ChatCompletion
|
|
964
|
+
): AIMessage {
|
|
965
|
+
const { message } = choice;
|
|
966
|
+
const rawToolCalls = message.tool_calls;
|
|
967
|
+
const toolCalls = rawToolCalls?.map((tc) => ({
|
|
968
|
+
id: tc.id,
|
|
969
|
+
name: tc.function.name,
|
|
970
|
+
args: JSON.parse(tc.function.arguments || '{}'),
|
|
971
|
+
type: 'tool_call' as const,
|
|
972
|
+
}));
|
|
973
|
+
|
|
974
|
+
const additional_kwargs: Record<string, unknown> = {};
|
|
975
|
+
if (rawToolCalls) {
|
|
976
|
+
additional_kwargs.tool_calls = rawToolCalls;
|
|
977
|
+
}
|
|
978
|
+
if (
|
|
979
|
+
'reasoning_content' in message &&
|
|
980
|
+
message.reasoning_content != null &&
|
|
981
|
+
message.reasoning_content !== ''
|
|
982
|
+
) {
|
|
983
|
+
additional_kwargs.reasoning_content = message.reasoning_content;
|
|
984
|
+
}
|
|
985
|
+
|
|
986
|
+
return new AIMessage({
|
|
987
|
+
content: message.content ?? '',
|
|
988
|
+
tool_calls: toolCalls,
|
|
989
|
+
additional_kwargs,
|
|
990
|
+
usage_metadata: data.usage
|
|
991
|
+
? {
|
|
992
|
+
input_tokens: data.usage.prompt_tokens,
|
|
993
|
+
output_tokens: data.usage.completion_tokens,
|
|
994
|
+
total_tokens: data.usage.total_tokens,
|
|
995
|
+
}
|
|
996
|
+
: undefined,
|
|
997
|
+
response_metadata: {
|
|
998
|
+
model_name: data.model,
|
|
999
|
+
system_fingerprint: data.system_fingerprint,
|
|
1000
|
+
finish_reason: choice.finish_reason,
|
|
1001
|
+
},
|
|
1002
|
+
});
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
async *_streamResponseChunks(
|
|
1006
|
+
messages: BaseMessage[],
|
|
1007
|
+
options: this['ParsedCallOptions'],
|
|
1008
|
+
runManager?: CallbackManagerForLLMRun
|
|
1009
|
+
): AsyncGenerator<ChatGenerationChunk> {
|
|
1010
|
+
const messagesMapped: OpenAICompletionParam[] =
|
|
1011
|
+
_convertMessagesToOpenAIParams(messages, this.model, {
|
|
1012
|
+
includeReasoningContent: true,
|
|
1013
|
+
});
|
|
1014
|
+
|
|
1015
|
+
const params = {
|
|
1016
|
+
...this.invocationParams(options, {
|
|
1017
|
+
streaming: true,
|
|
1018
|
+
}),
|
|
1019
|
+
messages: messagesMapped,
|
|
1020
|
+
stream: true as const,
|
|
1021
|
+
};
|
|
1022
|
+
let defaultRole: OpenAIRoleEnum | undefined;
|
|
1023
|
+
|
|
1024
|
+
const streamIterable = await this.completionWithRetry(params, options);
|
|
1025
|
+
let usage: OpenAIClient.Completions.CompletionUsage | undefined;
|
|
1026
|
+
for await (const data of streamIterable) {
|
|
1027
|
+
const choice = data.choices[0] as
|
|
1028
|
+
| Partial<OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice>
|
|
1029
|
+
| undefined;
|
|
1030
|
+
if (data.usage) {
|
|
1031
|
+
usage = data.usage;
|
|
1032
|
+
}
|
|
1033
|
+
if (!choice) {
|
|
1034
|
+
continue;
|
|
1035
|
+
}
|
|
1036
|
+
|
|
1037
|
+
const { delta } = choice;
|
|
1038
|
+
if (!delta) {
|
|
1039
|
+
continue;
|
|
1040
|
+
}
|
|
1041
|
+
const chunk = this._convertOpenAIDeltaToBaseMessageChunk(
|
|
1042
|
+
delta,
|
|
1043
|
+
data,
|
|
1044
|
+
defaultRole
|
|
1045
|
+
);
|
|
1046
|
+
if ('reasoning_content' in delta) {
|
|
1047
|
+
chunk.additional_kwargs.reasoning_content = delta.reasoning_content;
|
|
1048
|
+
}
|
|
1049
|
+
defaultRole = delta.role ?? defaultRole;
|
|
1050
|
+
const newTokenIndices = {
|
|
1051
|
+
prompt: (options as OpenAIChatCallOptions).promptIndex ?? 0,
|
|
1052
|
+
completion: choice.index ?? 0,
|
|
1053
|
+
};
|
|
1054
|
+
if (typeof chunk.content !== 'string') {
|
|
1055
|
+
// eslint-disable-next-line no-console
|
|
1056
|
+
console.log(
|
|
1057
|
+
'[WARNING]: Received non-string content from OpenAI. This is currently not supported.'
|
|
1058
|
+
);
|
|
1059
|
+
continue;
|
|
1060
|
+
}
|
|
1061
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
1062
|
+
const generationInfo: Record<string, any> = { ...newTokenIndices };
|
|
1063
|
+
if (choice.finish_reason != null) {
|
|
1064
|
+
generationInfo.finish_reason = choice.finish_reason;
|
|
1065
|
+
generationInfo.system_fingerprint = data.system_fingerprint;
|
|
1066
|
+
generationInfo.model_name = data.model;
|
|
1067
|
+
generationInfo.service_tier = data.service_tier;
|
|
1068
|
+
}
|
|
1069
|
+
if (this.logprobs == true) {
|
|
1070
|
+
generationInfo.logprobs = choice.logprobs;
|
|
1071
|
+
}
|
|
1072
|
+
const generationChunk = new ChatGenerationChunk({
|
|
1073
|
+
message: chunk,
|
|
1074
|
+
text: chunk.content,
|
|
1075
|
+
generationInfo,
|
|
1076
|
+
});
|
|
1077
|
+
yield generationChunk;
|
|
1078
|
+
if (this._lc_stream_delay != null) {
|
|
1079
|
+
await sleep(this._lc_stream_delay);
|
|
1080
|
+
}
|
|
1081
|
+
await runManager?.handleLLMNewToken(
|
|
1082
|
+
generationChunk.text || '',
|
|
1083
|
+
newTokenIndices,
|
|
1084
|
+
undefined,
|
|
1085
|
+
undefined,
|
|
1086
|
+
undefined,
|
|
1087
|
+
{ chunk: generationChunk }
|
|
1088
|
+
);
|
|
1089
|
+
}
|
|
1090
|
+
if (usage) {
|
|
1091
|
+
const inputTokenDetails = {
|
|
1092
|
+
...(usage.prompt_tokens_details?.audio_tokens != null && {
|
|
1093
|
+
audio: usage.prompt_tokens_details.audio_tokens,
|
|
1094
|
+
}),
|
|
1095
|
+
...(usage.prompt_tokens_details?.cached_tokens != null && {
|
|
1096
|
+
cache_read: usage.prompt_tokens_details.cached_tokens,
|
|
1097
|
+
}),
|
|
1098
|
+
};
|
|
1099
|
+
const outputTokenDetails = {
|
|
1100
|
+
...(usage.completion_tokens_details?.audio_tokens != null && {
|
|
1101
|
+
audio: usage.completion_tokens_details.audio_tokens,
|
|
1102
|
+
}),
|
|
1103
|
+
...(usage.completion_tokens_details?.reasoning_tokens != null && {
|
|
1104
|
+
reasoning: usage.completion_tokens_details.reasoning_tokens,
|
|
1105
|
+
}),
|
|
1106
|
+
};
|
|
1107
|
+
const generationChunk = new ChatGenerationChunk({
|
|
1108
|
+
message: new AIMessageChunk({
|
|
1109
|
+
content: '',
|
|
1110
|
+
response_metadata: {
|
|
1111
|
+
usage: { ...usage },
|
|
1112
|
+
},
|
|
1113
|
+
usage_metadata: {
|
|
1114
|
+
input_tokens: usage.prompt_tokens,
|
|
1115
|
+
output_tokens: usage.completion_tokens,
|
|
1116
|
+
total_tokens: usage.total_tokens,
|
|
1117
|
+
...(Object.keys(inputTokenDetails).length > 0 && {
|
|
1118
|
+
input_token_details: inputTokenDetails,
|
|
1119
|
+
}),
|
|
1120
|
+
...(Object.keys(outputTokenDetails).length > 0 && {
|
|
1121
|
+
output_token_details: outputTokenDetails,
|
|
1122
|
+
}),
|
|
1123
|
+
},
|
|
1124
|
+
}),
|
|
1125
|
+
text: '',
|
|
1126
|
+
});
|
|
1127
|
+
yield generationChunk;
|
|
1128
|
+
if (this._lc_stream_delay != null) {
|
|
1129
|
+
await sleep(this._lc_stream_delay);
|
|
1130
|
+
}
|
|
1131
|
+
}
|
|
1132
|
+
if (options.signal?.aborted === true) {
|
|
1133
|
+
throw new Error('AbortError');
|
|
1134
|
+
}
|
|
1135
|
+
}
|
|
1136
|
+
}
|
|
1137
|
+
|
|
792
1138
|
export class ChatXAI extends OriginalChatXAI {
|
|
793
1139
|
_lc_stream_delay?: number;
|
|
794
1140
|
|
package/src/llm/providers.ts
CHANGED
|
@@ -8,6 +8,7 @@ import type {
|
|
|
8
8
|
import {
|
|
9
9
|
AzureChatOpenAI,
|
|
10
10
|
ChatDeepSeek,
|
|
11
|
+
ChatMoonshot,
|
|
11
12
|
ChatOpenAI,
|
|
12
13
|
ChatXAI,
|
|
13
14
|
} from '@/llm/openai';
|
|
@@ -31,6 +32,7 @@ export const llmProviders: Partial<ChatModelConstructorMap> = {
|
|
|
31
32
|
[Providers.BEDROCK]: CustomChatBedrockConverse,
|
|
32
33
|
// [Providers.ANTHROPIC]: ChatAnthropic,
|
|
33
34
|
[Providers.GOOGLE]: CustomChatGoogleGenerativeAI,
|
|
35
|
+
[Providers.MOONSHOT]: ChatMoonshot,
|
|
34
36
|
};
|
|
35
37
|
|
|
36
38
|
export const manualToolStreamProviders = new Set<Providers | string>([
|