@lobehub/chat 1.53.10 → 1.53.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/locales/ar/modelProvider.json +2 -2
- package/locales/bg-BG/modelProvider.json +2 -2
- package/locales/de-DE/modelProvider.json +2 -2
- package/locales/en-US/modelProvider.json +2 -2
- package/locales/es-ES/modelProvider.json +2 -2
- package/locales/fa-IR/modelProvider.json +2 -2
- package/locales/fr-FR/modelProvider.json +2 -2
- package/locales/it-IT/modelProvider.json +2 -2
- package/locales/ja-JP/modelProvider.json +2 -2
- package/locales/ko-KR/modelProvider.json +2 -2
- package/locales/nl-NL/modelProvider.json +2 -2
- package/locales/pl-PL/modelProvider.json +2 -2
- package/locales/pt-BR/modelProvider.json +2 -2
- package/locales/ru-RU/modelProvider.json +2 -2
- package/locales/tr-TR/modelProvider.json +2 -2
- package/locales/vi-VN/modelProvider.json +2 -2
- package/locales/zh-CN/modelProvider.json +3 -3
- package/locales/zh-TW/modelProvider.json +2 -2
- package/package.json +1 -1
- package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +8 -8
- package/src/config/aiModels/spark.ts +9 -0
- package/src/libs/agent-runtime/ai360/index.ts +37 -21
- package/src/libs/agent-runtime/anthropic/index.ts +17 -5
- package/src/libs/agent-runtime/baichuan/index.ts +11 -2
- package/src/libs/agent-runtime/cloudflare/index.ts +22 -7
- package/src/libs/agent-runtime/deepseek/index.ts +29 -13
- package/src/libs/agent-runtime/fireworksai/index.ts +30 -18
- package/src/libs/agent-runtime/giteeai/index.ts +46 -30
- package/src/libs/agent-runtime/github/index.test.ts +0 -49
- package/src/libs/agent-runtime/github/index.ts +18 -6
- package/src/libs/agent-runtime/google/index.ts +17 -7
- package/src/libs/agent-runtime/groq/index.ts +43 -27
- package/src/libs/agent-runtime/higress/index.ts +45 -25
- package/src/libs/agent-runtime/huggingface/index.ts +20 -9
- package/src/libs/agent-runtime/hunyuan/index.ts +34 -18
- package/src/libs/agent-runtime/internlm/index.ts +27 -12
- package/src/libs/agent-runtime/lmstudio/index.ts +34 -0
- package/src/libs/agent-runtime/mistral/index.ts +24 -14
- package/src/libs/agent-runtime/moonshot/index.ts +28 -13
- package/src/libs/agent-runtime/novita/index.ts +35 -18
- package/src/libs/agent-runtime/ollama/index.test.ts +20 -1
- package/src/libs/agent-runtime/ollama/index.ts +33 -5
- package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +108 -0
- package/src/libs/agent-runtime/openai/index.ts +43 -27
- package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +39 -11
- package/src/libs/agent-runtime/openrouter/index.ts +51 -33
- package/src/libs/agent-runtime/qwen/index.ts +45 -29
- package/src/libs/agent-runtime/sensenova/index.ts +24 -6
- package/src/libs/agent-runtime/siliconcloud/index.ts +50 -34
- package/src/libs/agent-runtime/stepfun/index.ts +42 -26
- package/src/libs/agent-runtime/tencentcloud/index.ts +44 -0
- package/src/libs/agent-runtime/togetherai/index.ts +19 -6
- package/src/libs/agent-runtime/xai/index.ts +28 -13
- package/src/libs/agent-runtime/zeroone/index.ts +29 -13
- package/src/libs/agent-runtime/zhipu/index.test.ts +0 -9
- package/src/libs/agent-runtime/zhipu/index.ts +18 -6
- package/src/locales/default/modelProvider.ts +1 -2
- package/src/server/manifest.ts +2 -2
- package/src/libs/agent-runtime/zhipu/authToken.test.ts +0 -18
- package/src/libs/agent-runtime/zhipu/authToken.ts +0 -22
@@ -12,7 +12,6 @@ import { debugStream } from '../utils/debugStream';
|
|
12
12
|
import { StreamingResponse } from '../utils/response';
|
13
13
|
import { createCallbacksTransformer } from '../utils/streams';
|
14
14
|
|
15
|
-
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
|
16
15
|
import { ChatModelCard } from '@/types/llm';
|
17
16
|
|
18
17
|
export interface CloudflareModelCard {
|
@@ -113,6 +112,8 @@ export class LobeCloudflareAI implements LobeRuntimeAI {
|
|
113
112
|
}
|
114
113
|
|
115
114
|
async models(): Promise<ChatModelCard[]> {
|
115
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
116
|
+
|
116
117
|
const url = `${DEFAULT_BASE_URL_PREFIX}/client/v4/accounts/${this.accountID}/ai/models/search`;
|
117
118
|
const response = await fetch(url, {
|
118
119
|
headers: {
|
@@ -127,16 +128,30 @@ export class LobeCloudflareAI implements LobeRuntimeAI {
|
|
127
128
|
|
128
129
|
return modelList
|
129
130
|
.map((model) => {
|
131
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.name.toLowerCase() === m.id.toLowerCase());
|
132
|
+
|
130
133
|
return {
|
131
134
|
contextWindowTokens: model.properties?.max_total_tokens
|
132
135
|
? Number(model.properties.max_total_tokens)
|
133
|
-
:
|
134
|
-
displayName:
|
135
|
-
enabled:
|
136
|
-
functionCall:
|
136
|
+
: knownModel?.contextWindowTokens ?? undefined,
|
137
|
+
displayName: knownModel?.displayName ?? (model.properties?.["beta"] === "true" ? `${model.name} (Beta)` : undefined),
|
138
|
+
enabled: knownModel?.enabled || false,
|
139
|
+
functionCall:
|
140
|
+
model.description.toLowerCase().includes('function call')
|
141
|
+
|| model.properties?.["function_calling"] === "true"
|
142
|
+
|| knownModel?.abilities?.functionCall
|
143
|
+
|| false,
|
137
144
|
id: model.name,
|
138
|
-
reasoning:
|
139
|
-
|
145
|
+
reasoning:
|
146
|
+
model.name.toLowerCase().includes('deepseek-r1')
|
147
|
+
|| knownModel?.abilities?.reasoning
|
148
|
+
|| false,
|
149
|
+
vision:
|
150
|
+
model.name.toLowerCase().includes('vision')
|
151
|
+
|| model.task?.name.toLowerCase().includes('image-to-text')
|
152
|
+
|| model.description.toLowerCase().includes('vision')
|
153
|
+
|| knownModel?.abilities?.vision
|
154
|
+
|| false,
|
140
155
|
};
|
141
156
|
})
|
142
157
|
.filter(Boolean) as ChatModelCard[];
|
@@ -3,7 +3,7 @@ import OpenAI from 'openai';
|
|
3
3
|
import { ChatStreamPayload, ModelProvider } from '../types';
|
4
4
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
5
5
|
|
6
|
-
import {
|
6
|
+
import type { ChatModelCard } from '@/types/llm';
|
7
7
|
|
8
8
|
export interface DeepSeekModelCard {
|
9
9
|
id: string;
|
@@ -59,19 +59,35 @@ export const LobeDeepSeekAI = LobeOpenAICompatibleFactory({
|
|
59
59
|
debug: {
|
60
60
|
chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',
|
61
61
|
},
|
62
|
-
models: {
|
63
|
-
|
64
|
-
const model = m as unknown as DeepSeekModelCard;
|
62
|
+
models: async ({ client }) => {
|
63
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
65
64
|
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
65
|
+
const modelsPage = await client.models.list() as any;
|
66
|
+
const modelList: DeepSeekModelCard[] = modelsPage.data;
|
67
|
+
|
68
|
+
return modelList
|
69
|
+
.map((model) => {
|
70
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
71
|
+
|
72
|
+
return {
|
73
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
74
|
+
displayName: knownModel?.displayName ?? undefined,
|
75
|
+
enabled: knownModel?.enabled || false,
|
76
|
+
functionCall:
|
77
|
+
!model.id.toLowerCase().includes('reasoner')
|
78
|
+
|| knownModel?.abilities?.functionCall
|
79
|
+
|| false,
|
80
|
+
id: model.id,
|
81
|
+
reasoning:
|
82
|
+
model.id.toLowerCase().includes('reasoner')
|
83
|
+
|| knownModel?.abilities?.reasoning
|
84
|
+
|| false,
|
85
|
+
vision:
|
86
|
+
knownModel?.abilities?.vision
|
87
|
+
|| false,
|
88
|
+
};
|
89
|
+
})
|
90
|
+
.filter(Boolean) as ChatModelCard[];
|
75
91
|
},
|
76
92
|
provider: ModelProvider.DeepSeek,
|
77
93
|
});
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ModelProvider } from '../types';
|
2
2
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
|
4
|
-
import {
|
4
|
+
import type { ChatModelCard } from '@/types/llm';
|
5
5
|
|
6
6
|
export interface FireworksAIModelCard {
|
7
7
|
context_length: number;
|
@@ -15,25 +15,37 @@ export const LobeFireworksAI = LobeOpenAICompatibleFactory({
|
|
15
15
|
debug: {
|
16
16
|
chatCompletion: () => process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION === '1',
|
17
17
|
},
|
18
|
-
models: {
|
19
|
-
|
20
|
-
const reasoningKeywords = [
|
21
|
-
'deepseek-r1',
|
22
|
-
'qwq',
|
23
|
-
];
|
18
|
+
models: async ({ client }) => {
|
19
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
24
20
|
|
25
|
-
|
21
|
+
const reasoningKeywords = [
|
22
|
+
'deepseek-r1',
|
23
|
+
'qwq',
|
24
|
+
];
|
26
25
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
26
|
+
const modelsPage = await client.models.list() as any;
|
27
|
+
const modelList: FireworksAIModelCard[] = modelsPage.data;
|
28
|
+
|
29
|
+
return modelList
|
30
|
+
.map((model) => {
|
31
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
32
|
+
|
33
|
+
return {
|
34
|
+
contextWindowTokens: model.context_length,
|
35
|
+
displayName: knownModel?.displayName ?? undefined,
|
36
|
+
enabled: knownModel?.enabled || false,
|
37
|
+
functionCall:
|
38
|
+
model.supports_tools
|
39
|
+
|| model.id.toLowerCase().includes('function'),
|
40
|
+
id: model.id,
|
41
|
+
reasoning:
|
42
|
+
reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
43
|
+
|| knownModel?.abilities?.reasoning
|
44
|
+
|| false,
|
45
|
+
vision: model.supports_image_input,
|
46
|
+
};
|
47
|
+
})
|
48
|
+
.filter(Boolean) as ChatModelCard[];
|
37
49
|
},
|
38
50
|
provider: ModelProvider.FireworksAI,
|
39
51
|
});
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ModelProvider } from '../types';
|
2
2
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
|
4
|
-
import {
|
4
|
+
import type { ChatModelCard } from '@/types/llm';
|
5
5
|
|
6
6
|
export interface GiteeAIModelCard {
|
7
7
|
id: string;
|
@@ -12,35 +12,51 @@ export const LobeGiteeAI = LobeOpenAICompatibleFactory({
|
|
12
12
|
debug: {
|
13
13
|
chatCompletion: () => process.env.DEBUG_GITEE_AI_CHAT_COMPLETION === '1',
|
14
14
|
},
|
15
|
-
models: {
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
15
|
+
models: async ({ client }) => {
|
16
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
17
|
+
|
18
|
+
const functionCallKeywords = [
|
19
|
+
'qwen2.5',
|
20
|
+
'glm-4',
|
21
|
+
];
|
22
|
+
|
23
|
+
const visionKeywords = [
|
24
|
+
'internvl',
|
25
|
+
'qwen2-vl',
|
26
|
+
];
|
27
|
+
|
28
|
+
const reasoningKeywords = [
|
29
|
+
'deepseek-r1',
|
30
|
+
'qwq',
|
31
|
+
];
|
32
|
+
|
33
|
+
const modelsPage = await client.models.list() as any;
|
34
|
+
const modelList: GiteeAIModelCard[] = modelsPage.data;
|
35
|
+
|
36
|
+
return modelList
|
37
|
+
.map((model) => {
|
38
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
39
|
+
|
40
|
+
return {
|
41
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
42
|
+
displayName: knownModel?.displayName ?? undefined,
|
43
|
+
enabled: knownModel?.enabled || false,
|
44
|
+
functionCall:
|
45
|
+
functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('qwen2.5-coder')
|
46
|
+
|| knownModel?.abilities?.functionCall
|
47
|
+
|| false,
|
48
|
+
id: model.id,
|
49
|
+
reasoning:
|
50
|
+
reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
51
|
+
|| knownModel?.abilities?.reasoning
|
52
|
+
|| false,
|
53
|
+
vision:
|
54
|
+
visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
55
|
+
|| knownModel?.abilities?.vision
|
56
|
+
|| false,
|
57
|
+
};
|
58
|
+
})
|
59
|
+
.filter(Boolean) as ChatModelCard[];
|
44
60
|
},
|
45
61
|
provider: ModelProvider.GiteeAI,
|
46
62
|
});
|
@@ -210,53 +210,4 @@ describe('LobeGithubAI', () => {
|
|
210
210
|
});
|
211
211
|
});
|
212
212
|
});
|
213
|
-
|
214
|
-
describe('models', () => {
|
215
|
-
beforeEach(() => {});
|
216
|
-
|
217
|
-
it('should return a list of models', async () => {
|
218
|
-
// Arrange
|
219
|
-
const arr = [
|
220
|
-
{
|
221
|
-
id: 'azureml://registries/azureml-cohere/models/Cohere-command-r/versions/3',
|
222
|
-
name: 'Cohere-command-r',
|
223
|
-
friendly_name: 'Cohere Command R',
|
224
|
-
model_version: 3,
|
225
|
-
publisher: 'cohere',
|
226
|
-
model_family: 'cohere',
|
227
|
-
model_registry: 'azureml-cohere',
|
228
|
-
license: 'custom',
|
229
|
-
task: 'chat-completion',
|
230
|
-
description:
|
231
|
-
"Command R is a highly performant generative large language model, optimized for a variety of use cases including reasoning, summarization, and question answering. \n\nThe model is optimized to perform well in the following languages: English, French, Spanish, Italian, German, Brazilian Portuguese, Japanese, Korean, Simplified Chinese, and Arabic.\n\nPre-training data additionally included the following 13 languages: Russian, Polish, Turkish, Vietnamese, Dutch, Czech, Indonesian, Ukrainian, Romanian, Greek, Hindi, Hebrew, Persian.\n\n## Resources\n\nFor full details of this model, [release blog post](https://aka.ms/cohere-blog).\n\n## Model Architecture\n\nThis is an auto-regressive language model that uses an optimized transformer architecture. After pretraining, this model uses supervised fine-tuning (SFT) and preference training to align model behavior to human preferences for helpfulness and safety.\n\n### Tool use capabilities\n\nCommand R has been specifically trained with conversational tool use capabilities. These have been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template will likely reduce performance, but we encourage experimentation.\n\nCommand R's tool use functionality takes a conversation as input (with an optional user-system preamble), along with a list of available tools. The model will then generate a json-formatted list of actions to execute on a subset of those tools. Command R may use one of its supplied tools more than once.\n\nThe model has been trained to recognise a special directly_answer tool, which it uses to indicate that it doesn't want to use any of its other tools. The ability to abstain from calling a specific tool can be useful in a range of situations, such as greeting a user, or asking clarifying questions. We recommend including the directly_answer tool, but it can be removed or renamed if required.\n\n### Grounded Generation and RAG Capabilities\n\nCommand R has been specifically trained with grounded generation capabilities. This means that it can generate responses based on a list of supplied document snippets, and it will include grounding spans (citations) in its response indicating the source of the information. This can be used to enable behaviors such as grounded summarization and the final step of Retrieval Augmented Generation (RAG).This behavior has been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template may reduce performance, but we encourage experimentation.\n\nCommand R's grounded generation behavior takes a conversation as input (with an optional user-supplied system preamble, indicating task, context and desired output style), along with a list of retrieved document snippets. The document snippets should be chunks, rather than long documents, typically around 100-400 words per chunk. Document snippets consist of key-value pairs. The keys should be short descriptive strings, the values can be text or semi-structured.\n\nBy default, Command R will generate grounded responses by first predicting which documents are relevant, then predicting which ones it will cite, then generating an answer. Finally, it will then insert grounding spans into the answer. See below for an example. This is referred to as accurate grounded generation.\n\nThe model is trained with a number of other answering modes, which can be selected by prompt changes . A fast citation mode is supported in the tokenizer, which will directly generate an answer with grounding spans in it, without first writing the answer out in full. This sacrifices some grounding accuracy in favor of generating fewer tokens.\n\n### Code Capabilities\n\nCommand R has been optimized to interact with your code, by requesting code snippets, code explanations, or code rewrites. It might not perform well out-of-the-box for pure code completion. For better performance, we also recommend using a low temperature (and even greedy decoding) for code-generation related instructions.\n",
|
232
|
-
summary:
|
233
|
-
'Command R is a scalable generative model targeting RAG and Tool Use to enable production-scale AI for enterprise.',
|
234
|
-
tags: ['rag', 'multilingual'],
|
235
|
-
},
|
236
|
-
];
|
237
|
-
vi.spyOn(instance['client'].models, 'list').mockResolvedValue({
|
238
|
-
body: arr,
|
239
|
-
} as any);
|
240
|
-
|
241
|
-
// Act & Assert
|
242
|
-
const models = await instance.models();
|
243
|
-
|
244
|
-
const modelsCount = models.length;
|
245
|
-
expect(modelsCount).toBe(arr.length);
|
246
|
-
|
247
|
-
for (let i = 0; i < arr.length; i++) {
|
248
|
-
const model = models[i];
|
249
|
-
expect(model).toEqual({
|
250
|
-
contextWindowTokens: undefined,
|
251
|
-
description: arr[i].description,
|
252
|
-
displayName: arr[i].friendly_name,
|
253
|
-
enabled: false,
|
254
|
-
functionCall: true,
|
255
|
-
id: arr[i].name,
|
256
|
-
reasoning: false,
|
257
|
-
vision: false,
|
258
|
-
});
|
259
|
-
}
|
260
|
-
});
|
261
|
-
});
|
262
213
|
});
|
@@ -3,7 +3,6 @@ import { pruneReasoningPayload } from '../openai';
|
|
3
3
|
import { ModelProvider } from '../types';
|
4
4
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
5
5
|
|
6
|
-
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
|
7
6
|
import type { ChatModelCard } from '@/types/llm';
|
8
7
|
|
9
8
|
export interface GithubModelCard {
|
@@ -38,6 +37,8 @@ export const LobeGithubAI = LobeOpenAICompatibleFactory({
|
|
38
37
|
invalidAPIKey: AgentRuntimeErrorType.InvalidGithubToken,
|
39
38
|
},
|
40
39
|
models: async ({ client }) => {
|
40
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
41
|
+
|
41
42
|
const functionCallKeywords = [
|
42
43
|
'function',
|
43
44
|
'tool',
|
@@ -58,15 +59,26 @@ export const LobeGithubAI = LobeOpenAICompatibleFactory({
|
|
58
59
|
|
59
60
|
return modelList
|
60
61
|
.map((model) => {
|
62
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.name.toLowerCase() === m.id.toLowerCase());
|
63
|
+
|
61
64
|
return {
|
62
|
-
contextWindowTokens:
|
65
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
63
66
|
description: model.description,
|
64
67
|
displayName: model.friendly_name,
|
65
|
-
enabled:
|
66
|
-
functionCall:
|
68
|
+
enabled: knownModel?.enabled || false,
|
69
|
+
functionCall:
|
70
|
+
functionCallKeywords.some(keyword => model.description.toLowerCase().includes(keyword))
|
71
|
+
|| knownModel?.abilities?.functionCall
|
72
|
+
|| false,
|
67
73
|
id: model.name,
|
68
|
-
reasoning:
|
69
|
-
|
74
|
+
reasoning:
|
75
|
+
reasoningKeywords.some(keyword => model.name.toLowerCase().includes(keyword))
|
76
|
+
|| knownModel?.abilities?.reasoning
|
77
|
+
|| false,
|
78
|
+
vision:
|
79
|
+
visionKeywords.some(keyword => model.description.toLowerCase().includes(keyword))
|
80
|
+
|| knownModel?.abilities?.vision
|
81
|
+
|| false,
|
70
82
|
};
|
71
83
|
})
|
72
84
|
.filter(Boolean) as ChatModelCard[];
|
@@ -8,7 +8,6 @@ import {
|
|
8
8
|
SchemaType,
|
9
9
|
} from '@google/generative-ai';
|
10
10
|
|
11
|
-
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
|
12
11
|
import type { ChatModelCard } from '@/types/llm';
|
13
12
|
import { imageUrlToBase64 } from '@/utils/imageToBase64';
|
14
13
|
import { safeParseJSON } from '@/utils/safeParseJSON';
|
@@ -137,6 +136,8 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
137
136
|
}
|
138
137
|
|
139
138
|
async models() {
|
139
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
140
|
+
|
140
141
|
const url = `${this.baseURL}/v1beta/models?key=${this.apiKey}`;
|
141
142
|
const response = await fetch(url, {
|
142
143
|
method: 'GET',
|
@@ -149,17 +150,26 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
149
150
|
.map((model) => {
|
150
151
|
const modelName = model.name.replace(/^models\//, '');
|
151
152
|
|
153
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => modelName.toLowerCase() === m.id.toLowerCase());
|
154
|
+
|
152
155
|
return {
|
153
156
|
contextWindowTokens: model.inputTokenLimit + model.outputTokenLimit,
|
154
157
|
displayName: model.displayName,
|
155
|
-
enabled:
|
156
|
-
functionCall:
|
158
|
+
enabled: knownModel?.enabled || false,
|
159
|
+
functionCall:
|
160
|
+
modelName.toLowerCase().includes('gemini') && !modelName.toLowerCase().includes('thinking')
|
161
|
+
|| knownModel?.abilities?.functionCall
|
162
|
+
|| false,
|
157
163
|
id: modelName,
|
158
|
-
reasoning:
|
164
|
+
reasoning:
|
165
|
+
modelName.toLowerCase().includes('thinking')
|
166
|
+
|| knownModel?.abilities?.reasoning
|
167
|
+
|| false,
|
159
168
|
vision:
|
160
|
-
modelName.toLowerCase().includes('vision')
|
161
|
-
(modelName.toLowerCase().includes('gemini') &&
|
162
|
-
|
169
|
+
modelName.toLowerCase().includes('vision')
|
170
|
+
|| (modelName.toLowerCase().includes('gemini') && !modelName.toLowerCase().includes('gemini-1.0'))
|
171
|
+
|| knownModel?.abilities?.vision
|
172
|
+
|| false,
|
163
173
|
};
|
164
174
|
})
|
165
175
|
.filter(Boolean) as ChatModelCard[];
|
@@ -2,7 +2,7 @@ import { AgentRuntimeErrorType } from '../error';
|
|
2
2
|
import { ModelProvider } from '../types';
|
3
3
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
4
4
|
|
5
|
-
import {
|
5
|
+
import type { ChatModelCard } from '@/types/llm';
|
6
6
|
|
7
7
|
export interface GroqModelCard {
|
8
8
|
context_window: number;
|
@@ -31,33 +31,49 @@ export const LobeGroq = LobeOpenAICompatibleFactory({
|
|
31
31
|
debug: {
|
32
32
|
chatCompletion: () => process.env.DEBUG_GROQ_CHAT_COMPLETION === '1',
|
33
33
|
},
|
34
|
-
models: {
|
35
|
-
|
36
|
-
const functionCallKeywords = [
|
37
|
-
'tool',
|
38
|
-
'llama-3.3',
|
39
|
-
'llama-3.1',
|
40
|
-
'llama3-',
|
41
|
-
'mixtral-8x7b-32768',
|
42
|
-
'gemma2-9b-it',
|
43
|
-
];
|
44
|
-
|
45
|
-
const reasoningKeywords = [
|
46
|
-
'deepseek-r1',
|
47
|
-
];
|
48
|
-
|
49
|
-
const model = m as unknown as GroqModelCard;
|
34
|
+
models: async ({ client }) => {
|
35
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
50
36
|
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
37
|
+
const functionCallKeywords = [
|
38
|
+
'tool',
|
39
|
+
'llama-3.3',
|
40
|
+
'llama-3.1',
|
41
|
+
'llama3-',
|
42
|
+
'mixtral-8x7b-32768',
|
43
|
+
'gemma2-9b-it',
|
44
|
+
];
|
45
|
+
|
46
|
+
const reasoningKeywords = [
|
47
|
+
'deepseek-r1',
|
48
|
+
];
|
49
|
+
|
50
|
+
const modelsPage = await client.models.list() as any;
|
51
|
+
const modelList: GroqModelCard[] = modelsPage.data;
|
52
|
+
|
53
|
+
return modelList
|
54
|
+
.map((model) => {
|
55
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
56
|
+
|
57
|
+
return {
|
58
|
+
contextWindowTokens: model.context_window,
|
59
|
+
displayName: knownModel?.displayName ?? undefined,
|
60
|
+
enabled: knownModel?.enabled || false,
|
61
|
+
functionCall:
|
62
|
+
functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
63
|
+
|| knownModel?.abilities?.functionCall
|
64
|
+
|| false,
|
65
|
+
id: model.id,
|
66
|
+
reasoning:
|
67
|
+
reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
68
|
+
|| knownModel?.abilities?.reasoning
|
69
|
+
|| false,
|
70
|
+
vision:
|
71
|
+
model.id.toLowerCase().includes('vision')
|
72
|
+
|| knownModel?.abilities?.vision
|
73
|
+
|| false,
|
74
|
+
};
|
75
|
+
})
|
76
|
+
.filter(Boolean) as ChatModelCard[];
|
61
77
|
},
|
62
78
|
provider: ModelProvider.Groq,
|
63
79
|
});
|
@@ -1,11 +1,19 @@
|
|
1
1
|
import { uniqueId } from 'lodash-es';
|
2
2
|
|
3
|
-
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders';
|
4
|
-
|
5
3
|
import { ModelProvider } from '../types';
|
6
4
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
7
5
|
|
8
|
-
|
6
|
+
import type { ChatModelCard } from '@/types/llm';
|
7
|
+
|
8
|
+
export interface HigressModelCard {
|
9
|
+
context_length: number;
|
10
|
+
description: string;
|
11
|
+
id: string;
|
12
|
+
name: string;
|
13
|
+
top_provider: {
|
14
|
+
max_completion_tokens: number;
|
15
|
+
}
|
16
|
+
}
|
9
17
|
|
10
18
|
export const LobeHigressAI = LobeOpenAICompatibleFactory({
|
11
19
|
constructorOptions: {
|
@@ -18,29 +26,41 @@ export const LobeHigressAI = LobeOpenAICompatibleFactory({
|
|
18
26
|
debug: {
|
19
27
|
chatCompletion: () => process.env.DEBUG_HIGRESS_CHAT_COMPLETION === '1',
|
20
28
|
},
|
21
|
-
models: {
|
22
|
-
|
23
|
-
const model = m as any;
|
29
|
+
models: async ({ client }) => {
|
30
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
24
31
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
32
|
+
const modelsPage = await client.models.list() as any;
|
33
|
+
const modelList: HigressModelCard[] = modelsPage.data;
|
34
|
+
|
35
|
+
return modelList
|
36
|
+
.map((model) => {
|
37
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
38
|
+
|
39
|
+
return {
|
40
|
+
contextWindowTokens: model.context_length,
|
41
|
+
description: model.description,
|
42
|
+
displayName: model.name,
|
43
|
+
enabled: knownModel?.enabled || false,
|
44
|
+
functionCall:
|
45
|
+
model.description.includes('function calling')
|
46
|
+
|| model.description.includes('tools')
|
47
|
+
|| knownModel?.abilities?.functionCall
|
48
|
+
|| false,
|
49
|
+
id: model.id,
|
50
|
+
maxTokens: model.top_provider.max_completion_tokens,
|
51
|
+
reasoning:
|
52
|
+
model.description.includes('reasoning')
|
53
|
+
|| knownModel?.abilities?.reasoning
|
54
|
+
|| false,
|
55
|
+
vision:
|
56
|
+
model.description.includes('vision')
|
57
|
+
|| model.description.includes('multimodal')
|
58
|
+
|| model.id.includes('vision')
|
59
|
+
|| knownModel?.abilities?.vision
|
60
|
+
|| false,
|
61
|
+
};
|
62
|
+
})
|
63
|
+
.filter(Boolean) as ChatModelCard[];
|
44
64
|
},
|
45
65
|
provider: ModelProvider.Higress,
|
46
66
|
});
|
@@ -6,7 +6,6 @@ import { ModelProvider } from '../types';
|
|
6
6
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
7
7
|
import { convertIterableToStream } from '../utils/streams';
|
8
8
|
|
9
|
-
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
|
10
9
|
import type { ChatModelCard } from '@/types/llm';
|
11
10
|
|
12
11
|
export interface HuggingFaceModelCard {
|
@@ -56,6 +55,8 @@ export const LobeHuggingFaceAI = LobeOpenAICompatibleFactory({
|
|
56
55
|
chatCompletion: () => process.env.DEBUG_HUGGINGFACE_CHAT_COMPLETION === '1',
|
57
56
|
},
|
58
57
|
models: async () => {
|
58
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
59
|
+
|
59
60
|
const visionKeywords = [
|
60
61
|
'image-text-to-text',
|
61
62
|
'multimodal',
|
@@ -79,16 +80,26 @@ export const LobeHuggingFaceAI = LobeOpenAICompatibleFactory({
|
|
79
80
|
|
80
81
|
return modelList
|
81
82
|
.map((model) => {
|
83
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
|
84
|
+
|
82
85
|
return {
|
83
|
-
contextWindowTokens:
|
84
|
-
displayName:
|
85
|
-
enabled:
|
86
|
-
functionCall:
|
86
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
87
|
+
displayName: knownModel?.displayName ?? undefined,
|
88
|
+
enabled: knownModel?.enabled || false,
|
89
|
+
functionCall:
|
90
|
+
model.tags.some(tag => tag.toLowerCase().includes('function-calling'))
|
91
|
+
|| knownModel?.abilities?.functionCall
|
92
|
+
|| false,
|
87
93
|
id: model.id,
|
88
|
-
reasoning:
|
89
|
-
|
90
|
-
|
91
|
-
|
94
|
+
reasoning:
|
95
|
+
model.tags.some(tag => tag.toLowerCase().includes('reasoning'))
|
96
|
+
|| reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
97
|
+
|| knownModel?.abilities?.reasoning
|
98
|
+
|| false,
|
99
|
+
vision:
|
100
|
+
model.tags.some(tag => visionKeywords.some(keyword => tag.toLowerCase().includes(keyword)))
|
101
|
+
|| knownModel?.abilities?.vision
|
102
|
+
|| false,
|
92
103
|
};
|
93
104
|
})
|
94
105
|
.filter(Boolean) as ChatModelCard[];
|