@lobehub/chat 1.53.11 → 1.54.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +58 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/changelog/v1.json +21 -0
- package/locales/ar/modelProvider.json +0 -1
- package/locales/ar/setting.json +12 -9
- package/locales/bg-BG/modelProvider.json +0 -1
- package/locales/bg-BG/setting.json +12 -9
- package/locales/de-DE/modelProvider.json +0 -1
- package/locales/de-DE/setting.json +13 -10
- package/locales/en-US/modelProvider.json +0 -1
- package/locales/en-US/setting.json +12 -9
- package/locales/es-ES/modelProvider.json +0 -1
- package/locales/es-ES/setting.json +12 -9
- package/locales/fa-IR/modelProvider.json +0 -1
- package/locales/fa-IR/setting.json +12 -9
- package/locales/fr-FR/modelProvider.json +0 -1
- package/locales/fr-FR/setting.json +12 -9
- package/locales/it-IT/modelProvider.json +0 -1
- package/locales/it-IT/setting.json +13 -10
- package/locales/ja-JP/modelProvider.json +0 -1
- package/locales/ja-JP/setting.json +12 -9
- package/locales/ko-KR/modelProvider.json +0 -1
- package/locales/ko-KR/setting.json +12 -9
- package/locales/nl-NL/modelProvider.json +0 -1
- package/locales/nl-NL/setting.json +12 -9
- package/locales/pl-PL/modelProvider.json +0 -1
- package/locales/pl-PL/setting.json +12 -9
- package/locales/pt-BR/modelProvider.json +0 -1
- package/locales/pt-BR/setting.json +13 -10
- package/locales/ru-RU/modelProvider.json +0 -1
- package/locales/ru-RU/setting.json +12 -9
- package/locales/tr-TR/modelProvider.json +0 -1
- package/locales/tr-TR/setting.json +12 -9
- package/locales/vi-VN/modelProvider.json +0 -1
- package/locales/vi-VN/setting.json +12 -9
- package/locales/zh-CN/modelProvider.json +0 -1
- package/locales/zh-CN/setting.json +13 -10
- package/locales/zh-TW/modelProvider.json +0 -1
- package/locales/zh-TW/setting.json +12 -9
- package/package.json +1 -1
- package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/index.tsx +1 -1
- package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
- package/src/components/InfoTooltip/index.tsx +25 -0
- package/src/components/Loading/UpdateLoading/index.tsx +19 -0
- package/src/config/aiModels/index.ts +3 -0
- package/src/config/aiModels/nvidia.ts +155 -0
- package/src/config/aiModels/spark.ts +9 -0
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/index.ts +4 -0
- package/src/config/modelProviders/nvidia.ts +21 -0
- package/src/features/ChatInput/ActionBar/Params/ParamsControls.tsx +95 -0
- package/src/features/ChatInput/ActionBar/Params/index.tsx +47 -0
- package/src/features/ChatInput/ActionBar/config.ts +3 -2
- package/src/features/ChatInput/Mobile/index.tsx +1 -1
- package/src/features/ModelParamsControl/FrequencyPenalty.tsx +37 -0
- package/src/features/ModelParamsControl/PresencePenalty.tsx +35 -0
- package/src/features/ModelParamsControl/Temperature.tsx +71 -0
- package/src/features/ModelParamsControl/TopP.tsx +39 -0
- package/src/features/ModelParamsControl/index.ts +4 -0
- package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
- package/src/libs/agent-runtime/ai360/index.ts +37 -21
- package/src/libs/agent-runtime/anthropic/index.ts +17 -5
- package/src/libs/agent-runtime/baichuan/index.ts +11 -2
- package/src/libs/agent-runtime/cloudflare/index.ts +22 -7
- package/src/libs/agent-runtime/deepseek/index.ts +29 -13
- package/src/libs/agent-runtime/fireworksai/index.ts +30 -18
- package/src/libs/agent-runtime/giteeai/index.ts +46 -30
- package/src/libs/agent-runtime/github/index.test.ts +0 -49
- package/src/libs/agent-runtime/github/index.ts +18 -6
- package/src/libs/agent-runtime/google/index.ts +17 -7
- package/src/libs/agent-runtime/groq/index.ts +43 -27
- package/src/libs/agent-runtime/higress/index.ts +45 -25
- package/src/libs/agent-runtime/huggingface/index.ts +20 -9
- package/src/libs/agent-runtime/hunyuan/index.ts +34 -18
- package/src/libs/agent-runtime/internlm/index.ts +27 -12
- package/src/libs/agent-runtime/lmstudio/index.ts +34 -0
- package/src/libs/agent-runtime/mistral/index.ts +24 -14
- package/src/libs/agent-runtime/moonshot/index.ts +28 -13
- package/src/libs/agent-runtime/novita/index.ts +35 -18
- package/src/libs/agent-runtime/nvidia/index.ts +44 -0
- package/src/libs/agent-runtime/ollama/index.test.ts +20 -1
- package/src/libs/agent-runtime/ollama/index.ts +33 -5
- package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +108 -0
- package/src/libs/agent-runtime/openai/index.ts +43 -27
- package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +39 -11
- package/src/libs/agent-runtime/openrouter/index.ts +51 -33
- package/src/libs/agent-runtime/qwen/index.ts +45 -29
- package/src/libs/agent-runtime/sensenova/index.ts +24 -6
- package/src/libs/agent-runtime/siliconcloud/index.ts +50 -34
- package/src/libs/agent-runtime/stepfun/index.ts +42 -26
- package/src/libs/agent-runtime/tencentcloud/index.ts +44 -0
- package/src/libs/agent-runtime/togetherai/index.ts +19 -6
- package/src/libs/agent-runtime/types/type.ts +1 -0
- package/src/libs/agent-runtime/xai/index.ts +28 -13
- package/src/libs/agent-runtime/zeroone/index.ts +29 -13
- package/src/libs/agent-runtime/zhipu/index.test.ts +0 -9
- package/src/libs/agent-runtime/zhipu/index.ts +18 -6
- package/src/locales/default/setting.ts +12 -9
- package/src/types/user/settings/keyVaults.ts +1 -0
- package/src/features/ChatInput/ActionBar/Temperature.tsx +0 -49
- package/src/libs/agent-runtime/zhipu/authToken.test.ts +0 -18
- package/src/libs/agent-runtime/zhipu/authToken.ts +0 -22
@@ -2,7 +2,7 @@ import { AgentRuntimeErrorType } from '../error';
|
|
2
2
|
import { ModelProvider } from '../types';
|
3
3
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
4
4
|
|
5
|
-
import {
|
5
|
+
import type { ChatModelCard } from '@/types/llm';
|
6
6
|
|
7
7
|
export interface GroqModelCard {
|
8
8
|
context_window: number;
|
@@ -31,33 +31,49 @@ export const LobeGroq = LobeOpenAICompatibleFactory({
|
|
31
31
|
debug: {
|
32
32
|
chatCompletion: () => process.env.DEBUG_GROQ_CHAT_COMPLETION === '1',
|
33
33
|
},
|
34
|
-
models: {
|
35
|
-
|
36
|
-
const functionCallKeywords = [
|
37
|
-
'tool',
|
38
|
-
'llama-3.3',
|
39
|
-
'llama-3.1',
|
40
|
-
'llama3-',
|
41
|
-
'mixtral-8x7b-32768',
|
42
|
-
'gemma2-9b-it',
|
43
|
-
];
|
44
|
-
|
45
|
-
const reasoningKeywords = [
|
46
|
-
'deepseek-r1',
|
47
|
-
];
|
48
|
-
|
49
|
-
const model = m as unknown as GroqModelCard;
|
34
|
+
models: async ({ client }) => {
|
35
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
50
36
|
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
37
|
+
const functionCallKeywords = [
|
38
|
+
'tool',
|
39
|
+
'llama-3.3',
|
40
|
+
'llama-3.1',
|
41
|
+
'llama3-',
|
42
|
+
'mixtral-8x7b-32768',
|
43
|
+
'gemma2-9b-it',
|
44
|
+
];
|
45
|
+
|
46
|
+
const reasoningKeywords = [
|
47
|
+
'deepseek-r1',
|
48
|
+
];
|
49
|
+
|
50
|
+
const modelsPage = await client.models.list() as any;
|
51
|
+
const modelList: GroqModelCard[] = modelsPage.data;
|
52
|
+
|
53
|
+
return modelList
|
54
|
+
.map((model) => {
|
55
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
56
|
+
|
57
|
+
return {
|
58
|
+
contextWindowTokens: model.context_window,
|
59
|
+
displayName: knownModel?.displayName ?? undefined,
|
60
|
+
enabled: knownModel?.enabled || false,
|
61
|
+
functionCall:
|
62
|
+
functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
63
|
+
|| knownModel?.abilities?.functionCall
|
64
|
+
|| false,
|
65
|
+
id: model.id,
|
66
|
+
reasoning:
|
67
|
+
reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
68
|
+
|| knownModel?.abilities?.reasoning
|
69
|
+
|| false,
|
70
|
+
vision:
|
71
|
+
model.id.toLowerCase().includes('vision')
|
72
|
+
|| knownModel?.abilities?.vision
|
73
|
+
|| false,
|
74
|
+
};
|
75
|
+
})
|
76
|
+
.filter(Boolean) as ChatModelCard[];
|
61
77
|
},
|
62
78
|
provider: ModelProvider.Groq,
|
63
79
|
});
|
@@ -1,11 +1,19 @@
|
|
1
1
|
import { uniqueId } from 'lodash-es';
|
2
2
|
|
3
|
-
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders';
|
4
|
-
|
5
3
|
import { ModelProvider } from '../types';
|
6
4
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
7
5
|
|
8
|
-
|
6
|
+
import type { ChatModelCard } from '@/types/llm';
|
7
|
+
|
8
|
+
export interface HigressModelCard {
|
9
|
+
context_length: number;
|
10
|
+
description: string;
|
11
|
+
id: string;
|
12
|
+
name: string;
|
13
|
+
top_provider: {
|
14
|
+
max_completion_tokens: number;
|
15
|
+
}
|
16
|
+
}
|
9
17
|
|
10
18
|
export const LobeHigressAI = LobeOpenAICompatibleFactory({
|
11
19
|
constructorOptions: {
|
@@ -18,29 +26,41 @@ export const LobeHigressAI = LobeOpenAICompatibleFactory({
|
|
18
26
|
debug: {
|
19
27
|
chatCompletion: () => process.env.DEBUG_HIGRESS_CHAT_COMPLETION === '1',
|
20
28
|
},
|
21
|
-
models: {
|
22
|
-
|
23
|
-
const model = m as any;
|
29
|
+
models: async ({ client }) => {
|
30
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
24
31
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
32
|
+
const modelsPage = await client.models.list() as any;
|
33
|
+
const modelList: HigressModelCard[] = modelsPage.data;
|
34
|
+
|
35
|
+
return modelList
|
36
|
+
.map((model) => {
|
37
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
38
|
+
|
39
|
+
return {
|
40
|
+
contextWindowTokens: model.context_length,
|
41
|
+
description: model.description,
|
42
|
+
displayName: model.name,
|
43
|
+
enabled: knownModel?.enabled || false,
|
44
|
+
functionCall:
|
45
|
+
model.description.includes('function calling')
|
46
|
+
|| model.description.includes('tools')
|
47
|
+
|| knownModel?.abilities?.functionCall
|
48
|
+
|| false,
|
49
|
+
id: model.id,
|
50
|
+
maxTokens: model.top_provider.max_completion_tokens,
|
51
|
+
reasoning:
|
52
|
+
model.description.includes('reasoning')
|
53
|
+
|| knownModel?.abilities?.reasoning
|
54
|
+
|| false,
|
55
|
+
vision:
|
56
|
+
model.description.includes('vision')
|
57
|
+
|| model.description.includes('multimodal')
|
58
|
+
|| model.id.includes('vision')
|
59
|
+
|| knownModel?.abilities?.vision
|
60
|
+
|| false,
|
61
|
+
};
|
62
|
+
})
|
63
|
+
.filter(Boolean) as ChatModelCard[];
|
44
64
|
},
|
45
65
|
provider: ModelProvider.Higress,
|
46
66
|
});
|
@@ -6,7 +6,6 @@ import { ModelProvider } from '../types';
|
|
6
6
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
7
7
|
import { convertIterableToStream } from '../utils/streams';
|
8
8
|
|
9
|
-
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
|
10
9
|
import type { ChatModelCard } from '@/types/llm';
|
11
10
|
|
12
11
|
export interface HuggingFaceModelCard {
|
@@ -56,6 +55,8 @@ export const LobeHuggingFaceAI = LobeOpenAICompatibleFactory({
|
|
56
55
|
chatCompletion: () => process.env.DEBUG_HUGGINGFACE_CHAT_COMPLETION === '1',
|
57
56
|
},
|
58
57
|
models: async () => {
|
58
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
59
|
+
|
59
60
|
const visionKeywords = [
|
60
61
|
'image-text-to-text',
|
61
62
|
'multimodal',
|
@@ -79,16 +80,26 @@ export const LobeHuggingFaceAI = LobeOpenAICompatibleFactory({
|
|
79
80
|
|
80
81
|
return modelList
|
81
82
|
.map((model) => {
|
83
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
84
|
+
|
82
85
|
return {
|
83
|
-
contextWindowTokens:
|
84
|
-
displayName:
|
85
|
-
enabled:
|
86
|
-
functionCall:
|
86
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
87
|
+
displayName: knownModel?.displayName ?? undefined,
|
88
|
+
enabled: knownModel?.enabled || false,
|
89
|
+
functionCall:
|
90
|
+
model.tags.some(tag => tag.toLowerCase().includes('function-calling'))
|
91
|
+
|| knownModel?.abilities?.functionCall
|
92
|
+
|| false,
|
87
93
|
id: model.id,
|
88
|
-
reasoning:
|
89
|
-
|
90
|
-
|
91
|
-
|
94
|
+
reasoning:
|
95
|
+
model.tags.some(tag => tag.toLowerCase().includes('reasoning'))
|
96
|
+
|| reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
97
|
+
|| knownModel?.abilities?.reasoning
|
98
|
+
|| false,
|
99
|
+
vision:
|
100
|
+
model.tags.some(tag => visionKeywords.some(keyword => tag.toLowerCase().includes(keyword)))
|
101
|
+
|| knownModel?.abilities?.vision
|
102
|
+
|| false,
|
92
103
|
};
|
93
104
|
})
|
94
105
|
.filter(Boolean) as ChatModelCard[];
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ModelProvider } from '../types';
|
2
2
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
|
4
|
-
import {
|
4
|
+
import type { ChatModelCard } from '@/types/llm';
|
5
5
|
|
6
6
|
export interface HunyuanModelCard {
|
7
7
|
id: string;
|
@@ -12,25 +12,41 @@ export const LobeHunyuanAI = LobeOpenAICompatibleFactory({
|
|
12
12
|
debug: {
|
13
13
|
chatCompletion: () => process.env.DEBUG_HUNYUAN_CHAT_COMPLETION === '1',
|
14
14
|
},
|
15
|
-
models: {
|
16
|
-
|
17
|
-
const functionCallKeywords = [
|
18
|
-
'hunyuan-functioncall',
|
19
|
-
'hunyuan-turbo',
|
20
|
-
'hunyuan-pro',
|
21
|
-
];
|
15
|
+
models: async ({ client }) => {
|
16
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
22
17
|
|
23
|
-
|
18
|
+
const functionCallKeywords = [
|
19
|
+
'hunyuan-functioncall',
|
20
|
+
'hunyuan-turbo',
|
21
|
+
'hunyuan-pro',
|
22
|
+
];
|
24
23
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
24
|
+
const modelsPage = await client.models.list() as any;
|
25
|
+
const modelList: HunyuanModelCard[] = modelsPage.data;
|
26
|
+
|
27
|
+
return modelList
|
28
|
+
.map((model) => {
|
29
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
30
|
+
|
31
|
+
return {
|
32
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
33
|
+
displayName: knownModel?.displayName ?? undefined,
|
34
|
+
enabled: knownModel?.enabled || false,
|
35
|
+
functionCall:
|
36
|
+
functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('vision')
|
37
|
+
|| knownModel?.abilities?.functionCall
|
38
|
+
|| false,
|
39
|
+
id: model.id,
|
40
|
+
reasoning:
|
41
|
+
knownModel?.abilities?.reasoning
|
42
|
+
|| false,
|
43
|
+
vision:
|
44
|
+
model.id.toLowerCase().includes('vision')
|
45
|
+
|| knownModel?.abilities?.vision
|
46
|
+
|| false,
|
47
|
+
};
|
48
|
+
})
|
49
|
+
.filter(Boolean) as ChatModelCard[];
|
34
50
|
},
|
35
51
|
provider: ModelProvider.Hunyuan,
|
36
52
|
});
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ModelProvider } from '../types';
|
2
2
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
|
4
|
-
import {
|
4
|
+
import type { ChatModelCard } from '@/types/llm';
|
5
5
|
|
6
6
|
export interface InternLMModelCard {
|
7
7
|
id: string;
|
@@ -20,18 +20,33 @@ export const LobeInternLMAI = LobeOpenAICompatibleFactory({
|
|
20
20
|
debug: {
|
21
21
|
chatCompletion: () => process.env.DEBUG_INTERNLM_CHAT_COMPLETION === '1',
|
22
22
|
},
|
23
|
-
models: {
|
24
|
-
|
25
|
-
const model = m as unknown as InternLMModelCard;
|
23
|
+
models: async ({ client }) => {
|
24
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
26
25
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
26
|
+
const modelsPage = await client.models.list() as any;
|
27
|
+
const modelList: InternLMModelCard[] = modelsPage.data;
|
28
|
+
|
29
|
+
return modelList
|
30
|
+
.map((model) => {
|
31
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
32
|
+
|
33
|
+
return {
|
34
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
35
|
+
displayName: knownModel?.displayName ?? undefined,
|
36
|
+
enabled: knownModel?.enabled || false,
|
37
|
+
functionCall:
|
38
|
+
knownModel?.abilities?.functionCall
|
39
|
+
|| false,
|
40
|
+
id: model.id,
|
41
|
+
reasoning:
|
42
|
+
knownModel?.abilities?.reasoning
|
43
|
+
|| false,
|
44
|
+
vision:
|
45
|
+
knownModel?.abilities?.vision
|
46
|
+
|| false,
|
47
|
+
};
|
48
|
+
})
|
49
|
+
.filter(Boolean) as ChatModelCard[];
|
35
50
|
},
|
36
51
|
provider: ModelProvider.InternLM,
|
37
52
|
});
|
@@ -1,11 +1,45 @@
|
|
1
1
|
import { ModelProvider } from '../types';
|
2
2
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
|
4
|
+
import type { ChatModelCard } from '@/types/llm';
|
5
|
+
|
6
|
+
export interface LMStudioModelCard {
|
7
|
+
id: string;
|
8
|
+
}
|
9
|
+
|
4
10
|
export const LobeLMStudioAI = LobeOpenAICompatibleFactory({
|
5
11
|
apiKey: 'placeholder-to-avoid-error',
|
6
12
|
baseURL: 'http://127.0.0.1:1234/v1',
|
7
13
|
debug: {
|
8
14
|
chatCompletion: () => process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION === '1',
|
9
15
|
},
|
16
|
+
models: async ({ client }) => {
|
17
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
18
|
+
|
19
|
+
const modelsPage = await client.models.list() as any;
|
20
|
+
const modelList: LMStudioModelCard[] = modelsPage.data;
|
21
|
+
|
22
|
+
return modelList
|
23
|
+
.map((model) => {
|
24
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
25
|
+
|
26
|
+
return {
|
27
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
28
|
+
displayName: knownModel?.displayName ?? undefined,
|
29
|
+
enabled: knownModel?.enabled || false,
|
30
|
+
functionCall:
|
31
|
+
knownModel?.abilities?.functionCall
|
32
|
+
|| false,
|
33
|
+
id: model.id,
|
34
|
+
reasoning:
|
35
|
+
knownModel?.abilities?.reasoning
|
36
|
+
|| false,
|
37
|
+
vision:
|
38
|
+
knownModel?.abilities?.vision
|
39
|
+
|| false,
|
40
|
+
};
|
41
|
+
})
|
42
|
+
.filter(Boolean) as ChatModelCard[];
|
43
|
+
},
|
10
44
|
provider: ModelProvider.LMStudio,
|
11
45
|
});
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ModelProvider } from '../types';
|
2
2
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
|
4
|
-
import {
|
4
|
+
import type { ChatModelCard } from '@/types/llm';
|
5
5
|
|
6
6
|
export interface MistralModelCard {
|
7
7
|
capabilities: {
|
@@ -30,20 +30,30 @@ export const LobeMistralAI = LobeOpenAICompatibleFactory({
|
|
30
30
|
debug: {
|
31
31
|
chatCompletion: () => process.env.DEBUG_MISTRAL_CHAT_COMPLETION === '1',
|
32
32
|
},
|
33
|
-
models: {
|
34
|
-
|
35
|
-
const model = m as unknown as MistralModelCard;
|
33
|
+
models: async ({ client }) => {
|
34
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
36
35
|
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
36
|
+
const modelsPage = await client.models.list() as any;
|
37
|
+
const modelList: MistralModelCard[] = modelsPage.data;
|
38
|
+
|
39
|
+
return modelList
|
40
|
+
.map((model) => {
|
41
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
42
|
+
|
43
|
+
return {
|
44
|
+
contextWindowTokens: model.max_context_length,
|
45
|
+
description: model.description,
|
46
|
+
displayName: knownModel?.displayName ?? undefined,
|
47
|
+
enabled: knownModel?.enabled || false,
|
48
|
+
functionCall: model.capabilities.function_calling,
|
49
|
+
id: model.id,
|
50
|
+
reasoning:
|
51
|
+
knownModel?.abilities?.reasoning
|
52
|
+
|| false,
|
53
|
+
vision: model.capabilities.vision,
|
54
|
+
};
|
55
|
+
})
|
56
|
+
.filter(Boolean) as ChatModelCard[];
|
47
57
|
},
|
48
58
|
provider: ModelProvider.Mistral,
|
49
59
|
});
|
@@ -3,7 +3,7 @@ import OpenAI from 'openai';
|
|
3
3
|
import { ChatStreamPayload, ModelProvider } from '../types';
|
4
4
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
5
5
|
|
6
|
-
import {
|
6
|
+
import type { ChatModelCard } from '@/types/llm';
|
7
7
|
|
8
8
|
export interface MoonshotModelCard {
|
9
9
|
id: string;
|
@@ -24,19 +24,34 @@ export const LobeMoonshotAI = LobeOpenAICompatibleFactory({
|
|
24
24
|
debug: {
|
25
25
|
chatCompletion: () => process.env.DEBUG_MOONSHOT_CHAT_COMPLETION === '1',
|
26
26
|
},
|
27
|
-
models: {
|
28
|
-
|
29
|
-
const model = m as unknown as MoonshotModelCard;
|
27
|
+
models: async ({ client }) => {
|
28
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
30
29
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
30
|
+
const modelsPage = await client.models.list() as any;
|
31
|
+
const modelList: MoonshotModelCard[] = modelsPage.data;
|
32
|
+
|
33
|
+
return modelList
|
34
|
+
.map((model) => {
|
35
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
36
|
+
|
37
|
+
return {
|
38
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
39
|
+
displayName: knownModel?.displayName ?? undefined,
|
40
|
+
enabled: knownModel?.enabled || false,
|
41
|
+
functionCall:
|
42
|
+
knownModel?.abilities?.functionCall
|
43
|
+
|| false,
|
44
|
+
id: model.id,
|
45
|
+
reasoning:
|
46
|
+
knownModel?.abilities?.reasoning
|
47
|
+
|| false,
|
48
|
+
vision:
|
49
|
+
model.id.toLowerCase().includes('vision')
|
50
|
+
|| knownModel?.abilities?.vision
|
51
|
+
|| false,
|
52
|
+
};
|
53
|
+
})
|
54
|
+
.filter(Boolean) as ChatModelCard[];
|
40
55
|
},
|
41
56
|
provider: ModelProvider.Moonshot,
|
42
57
|
});
|
@@ -2,7 +2,7 @@ import { ModelProvider } from '../types';
|
|
2
2
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
import { NovitaModelCard } from './type';
|
4
4
|
|
5
|
-
import {
|
5
|
+
import type { ChatModelCard } from '@/types/llm';
|
6
6
|
|
7
7
|
export const LobeNovitaAI = LobeOpenAICompatibleFactory({
|
8
8
|
baseURL: 'https://api.novita.ai/v3/openai',
|
@@ -14,25 +14,42 @@ export const LobeNovitaAI = LobeOpenAICompatibleFactory({
|
|
14
14
|
debug: {
|
15
15
|
chatCompletion: () => process.env.DEBUG_NOVITA_CHAT_COMPLETION === '1',
|
16
16
|
},
|
17
|
-
models: {
|
18
|
-
|
19
|
-
const reasoningKeywords = [
|
20
|
-
'deepseek-r1',
|
21
|
-
];
|
17
|
+
models: async ({ client }) => {
|
18
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
22
19
|
|
23
|
-
|
20
|
+
const reasoningKeywords = [
|
21
|
+
'deepseek-r1',
|
22
|
+
];
|
24
23
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
24
|
+
const modelsPage = await client.models.list() as any;
|
25
|
+
const modelList: NovitaModelCard[] = modelsPage.data;
|
26
|
+
|
27
|
+
return modelList
|
28
|
+
.map((model) => {
|
29
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
30
|
+
|
31
|
+
return {
|
32
|
+
contextWindowTokens: model.context_size,
|
33
|
+
description: model.description,
|
34
|
+
displayName: model.title,
|
35
|
+
enabled: knownModel?.enabled || false,
|
36
|
+
functionCall:
|
37
|
+
model.description.toLowerCase().includes('function calling')
|
38
|
+
|| knownModel?.abilities?.functionCall
|
39
|
+
|| false,
|
40
|
+
id: model.id,
|
41
|
+
reasoning:
|
42
|
+
model.description.toLowerCase().includes('reasoning task')
|
43
|
+
|| reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
44
|
+
|| knownModel?.abilities?.reasoning
|
45
|
+
|| false,
|
46
|
+
vision:
|
47
|
+
model.description.toLowerCase().includes('vision')
|
48
|
+
|| knownModel?.abilities?.vision
|
49
|
+
|| false,
|
50
|
+
};
|
51
|
+
})
|
52
|
+
.filter(Boolean) as ChatModelCard[];
|
36
53
|
},
|
37
54
|
provider: ModelProvider.Novita,
|
38
55
|
});
|
@@ -0,0 +1,44 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
|
+
|
4
|
+
import type { ChatModelCard } from '@/types/llm';
|
5
|
+
|
6
|
+
export interface NvidiaModelCard {
|
7
|
+
id: string;
|
8
|
+
}
|
9
|
+
|
10
|
+
export const LobeNvidiaAI = LobeOpenAICompatibleFactory({
|
11
|
+
baseURL: 'https://integrate.api.nvidia.com/v1',
|
12
|
+
debug: {
|
13
|
+
chatCompletion: () => process.env.DEBUG_NVIDIA_CHAT_COMPLETION === '1',
|
14
|
+
},
|
15
|
+
models: async ({ client }) => {
|
16
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
17
|
+
|
18
|
+
const modelsPage = await client.models.list() as any;
|
19
|
+
const modelList: NvidiaModelCard[] = modelsPage.data;
|
20
|
+
|
21
|
+
return modelList
|
22
|
+
.map((model) => {
|
23
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
24
|
+
|
25
|
+
return {
|
26
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
27
|
+
displayName: knownModel?.displayName ?? undefined,
|
28
|
+
enabled: knownModel?.enabled || false,
|
29
|
+
functionCall:
|
30
|
+
knownModel?.abilities?.functionCall
|
31
|
+
|| false,
|
32
|
+
id: model.id,
|
33
|
+
reasoning:
|
34
|
+
knownModel?.abilities?.reasoning
|
35
|
+
|| false,
|
36
|
+
vision:
|
37
|
+
knownModel?.abilities?.vision
|
38
|
+
|| false,
|
39
|
+
};
|
40
|
+
})
|
41
|
+
.filter(Boolean) as ChatModelCard[];
|
42
|
+
},
|
43
|
+
provider: ModelProvider.Nvidia,
|
44
|
+
});
|
@@ -145,7 +145,26 @@ describe('LobeOllamaAI', () => {
|
|
145
145
|
const models = await ollamaAI.models();
|
146
146
|
|
147
147
|
expect(listMock).toHaveBeenCalled();
|
148
|
-
expect(models).toEqual([
|
148
|
+
expect(models).toEqual([
|
149
|
+
{
|
150
|
+
contextWindowTokens: undefined,
|
151
|
+
displayName: undefined,
|
152
|
+
enabled: false,
|
153
|
+
functionCall: false,
|
154
|
+
id: 'model-1',
|
155
|
+
reasoning: false,
|
156
|
+
vision: false
|
157
|
+
},
|
158
|
+
{
|
159
|
+
contextWindowTokens: undefined,
|
160
|
+
displayName: undefined,
|
161
|
+
enabled: false,
|
162
|
+
functionCall: false,
|
163
|
+
id: 'model-2',
|
164
|
+
reasoning: false,
|
165
|
+
vision: false
|
166
|
+
}
|
167
|
+
]);
|
149
168
|
});
|
150
169
|
});
|
151
170
|
|