@lobehub/chat 1.53.10 → 1.53.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/modelProvider.json +2 -2
  4. package/locales/bg-BG/modelProvider.json +2 -2
  5. package/locales/de-DE/modelProvider.json +2 -2
  6. package/locales/en-US/modelProvider.json +2 -2
  7. package/locales/es-ES/modelProvider.json +2 -2
  8. package/locales/fa-IR/modelProvider.json +2 -2
  9. package/locales/fr-FR/modelProvider.json +2 -2
  10. package/locales/it-IT/modelProvider.json +2 -2
  11. package/locales/ja-JP/modelProvider.json +2 -2
  12. package/locales/ko-KR/modelProvider.json +2 -2
  13. package/locales/nl-NL/modelProvider.json +2 -2
  14. package/locales/pl-PL/modelProvider.json +2 -2
  15. package/locales/pt-BR/modelProvider.json +2 -2
  16. package/locales/ru-RU/modelProvider.json +2 -2
  17. package/locales/tr-TR/modelProvider.json +2 -2
  18. package/locales/vi-VN/modelProvider.json +2 -2
  19. package/locales/zh-CN/modelProvider.json +3 -3
  20. package/locales/zh-TW/modelProvider.json +2 -2
  21. package/package.json +1 -1
  22. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +8 -8
  23. package/src/config/aiModels/spark.ts +9 -0
  24. package/src/libs/agent-runtime/ai360/index.ts +37 -21
  25. package/src/libs/agent-runtime/anthropic/index.ts +17 -5
  26. package/src/libs/agent-runtime/baichuan/index.ts +11 -2
  27. package/src/libs/agent-runtime/cloudflare/index.ts +22 -7
  28. package/src/libs/agent-runtime/deepseek/index.ts +29 -13
  29. package/src/libs/agent-runtime/fireworksai/index.ts +30 -18
  30. package/src/libs/agent-runtime/giteeai/index.ts +46 -30
  31. package/src/libs/agent-runtime/github/index.test.ts +0 -49
  32. package/src/libs/agent-runtime/github/index.ts +18 -6
  33. package/src/libs/agent-runtime/google/index.ts +17 -7
  34. package/src/libs/agent-runtime/groq/index.ts +43 -27
  35. package/src/libs/agent-runtime/higress/index.ts +45 -25
  36. package/src/libs/agent-runtime/huggingface/index.ts +20 -9
  37. package/src/libs/agent-runtime/hunyuan/index.ts +34 -18
  38. package/src/libs/agent-runtime/internlm/index.ts +27 -12
  39. package/src/libs/agent-runtime/lmstudio/index.ts +34 -0
  40. package/src/libs/agent-runtime/mistral/index.ts +24 -14
  41. package/src/libs/agent-runtime/moonshot/index.ts +28 -13
  42. package/src/libs/agent-runtime/novita/index.ts +35 -18
  43. package/src/libs/agent-runtime/ollama/index.test.ts +20 -1
  44. package/src/libs/agent-runtime/ollama/index.ts +33 -5
  45. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +108 -0
  46. package/src/libs/agent-runtime/openai/index.ts +43 -27
  47. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +39 -11
  48. package/src/libs/agent-runtime/openrouter/index.ts +51 -33
  49. package/src/libs/agent-runtime/qwen/index.ts +45 -29
  50. package/src/libs/agent-runtime/sensenova/index.ts +24 -6
  51. package/src/libs/agent-runtime/siliconcloud/index.ts +50 -34
  52. package/src/libs/agent-runtime/stepfun/index.ts +42 -26
  53. package/src/libs/agent-runtime/tencentcloud/index.ts +44 -0
  54. package/src/libs/agent-runtime/togetherai/index.ts +19 -6
  55. package/src/libs/agent-runtime/xai/index.ts +28 -13
  56. package/src/libs/agent-runtime/zeroone/index.ts +29 -13
  57. package/src/libs/agent-runtime/zhipu/index.test.ts +0 -9
  58. package/src/libs/agent-runtime/zhipu/index.ts +18 -6
  59. package/src/locales/default/modelProvider.ts +1 -2
  60. package/src/server/manifest.ts +2 -2
  61. package/src/libs/agent-runtime/zhipu/authToken.test.ts +0 -18
  62. package/src/libs/agent-runtime/zhipu/authToken.ts +0 -22
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface HunyuanModelCard {
7
7
  id: string;
@@ -12,25 +12,41 @@ export const LobeHunyuanAI = LobeOpenAICompatibleFactory({
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_HUNYUAN_CHAT_COMPLETION === '1',
14
14
  },
15
- models: {
16
- transformModel: (m) => {
17
- const functionCallKeywords = [
18
- 'hunyuan-functioncall',
19
- 'hunyuan-turbo',
20
- 'hunyuan-pro',
21
- ];
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
22
17
 
23
- const model = m as unknown as HunyuanModelCard;
18
+ const functionCallKeywords = [
19
+ 'hunyuan-functioncall',
20
+ 'hunyuan-turbo',
21
+ 'hunyuan-pro',
22
+ ];
24
23
 
25
- return {
26
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
27
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
28
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
29
- functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('vision'),
30
- id: model.id,
31
- vision: model.id.toLowerCase().includes('vision'),
32
- };
33
- },
24
+ const modelsPage = await client.models.list() as any;
25
+ const modelList: HunyuanModelCard[] = modelsPage.data;
26
+
27
+ return modelList
28
+ .map((model) => {
29
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
30
+
31
+ return {
32
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
33
+ displayName: knownModel?.displayName ?? undefined,
34
+ enabled: knownModel?.enabled || false,
35
+ functionCall:
36
+ functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('vision')
37
+ || knownModel?.abilities?.functionCall
38
+ || false,
39
+ id: model.id,
40
+ reasoning:
41
+ knownModel?.abilities?.reasoning
42
+ || false,
43
+ vision:
44
+ model.id.toLowerCase().includes('vision')
45
+ || knownModel?.abilities?.vision
46
+ || false,
47
+ };
48
+ })
49
+ .filter(Boolean) as ChatModelCard[];
34
50
  },
35
51
  provider: ModelProvider.Hunyuan,
36
52
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface InternLMModelCard {
7
7
  id: string;
@@ -20,18 +20,33 @@ export const LobeInternLMAI = LobeOpenAICompatibleFactory({
20
20
  debug: {
21
21
  chatCompletion: () => process.env.DEBUG_INTERNLM_CHAT_COMPLETION === '1',
22
22
  },
23
- models: {
24
- transformModel: (m) => {
25
- const model = m as unknown as InternLMModelCard;
23
+ models: async ({ client }) => {
24
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
26
25
 
27
- return {
28
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
29
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
30
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
31
- functionCall: true,
32
- id: model.id,
33
- };
34
- },
26
+ const modelsPage = await client.models.list() as any;
27
+ const modelList: InternLMModelCard[] = modelsPage.data;
28
+
29
+ return modelList
30
+ .map((model) => {
31
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
32
+
33
+ return {
34
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
35
+ displayName: knownModel?.displayName ?? undefined,
36
+ enabled: knownModel?.enabled || false,
37
+ functionCall:
38
+ knownModel?.abilities?.functionCall
39
+ || false,
40
+ id: model.id,
41
+ reasoning:
42
+ knownModel?.abilities?.reasoning
43
+ || false,
44
+ vision:
45
+ knownModel?.abilities?.vision
46
+ || false,
47
+ };
48
+ })
49
+ .filter(Boolean) as ChatModelCard[];
35
50
  },
36
51
  provider: ModelProvider.InternLM,
37
52
  });
@@ -1,11 +1,45 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import type { ChatModelCard } from '@/types/llm';
5
+
6
+ export interface LMStudioModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeLMStudioAI = LobeOpenAICompatibleFactory({
5
11
  apiKey: 'placeholder-to-avoid-error',
6
12
  baseURL: 'http://127.0.0.1:1234/v1',
7
13
  debug: {
8
14
  chatCompletion: () => process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION === '1',
9
15
  },
16
+ models: async ({ client }) => {
17
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
18
+
19
+ const modelsPage = await client.models.list() as any;
20
+ const modelList: LMStudioModelCard[] = modelsPage.data;
21
+
22
+ return modelList
23
+ .map((model) => {
24
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
25
+
26
+ return {
27
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
28
+ displayName: knownModel?.displayName ?? undefined,
29
+ enabled: knownModel?.enabled || false,
30
+ functionCall:
31
+ knownModel?.abilities?.functionCall
32
+ || false,
33
+ id: model.id,
34
+ reasoning:
35
+ knownModel?.abilities?.reasoning
36
+ || false,
37
+ vision:
38
+ knownModel?.abilities?.vision
39
+ || false,
40
+ };
41
+ })
42
+ .filter(Boolean) as ChatModelCard[];
43
+ },
10
44
  provider: ModelProvider.LMStudio,
11
45
  });
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
+ import type { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  export interface MistralModelCard {
7
7
  capabilities: {
@@ -30,20 +30,30 @@ export const LobeMistralAI = LobeOpenAICompatibleFactory({
30
30
  debug: {
31
31
  chatCompletion: () => process.env.DEBUG_MISTRAL_CHAT_COMPLETION === '1',
32
32
  },
33
- models: {
34
- transformModel: (m) => {
35
- const model = m as unknown as MistralModelCard;
33
+ models: async ({ client }) => {
34
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
36
35
 
37
- return {
38
- contextWindowTokens: model.max_context_length,
39
- description: model.description,
40
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
41
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
42
- functionCall: model.capabilities.function_calling,
43
- id: model.id,
44
- vision: model.capabilities.vision,
45
- };
46
- },
36
+ const modelsPage = await client.models.list() as any;
37
+ const modelList: MistralModelCard[] = modelsPage.data;
38
+
39
+ return modelList
40
+ .map((model) => {
41
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
42
+
43
+ return {
44
+ contextWindowTokens: model.max_context_length,
45
+ description: model.description,
46
+ displayName: knownModel?.displayName ?? undefined,
47
+ enabled: knownModel?.enabled || false,
48
+ functionCall: model.capabilities.function_calling,
49
+ id: model.id,
50
+ reasoning:
51
+ knownModel?.abilities?.reasoning
52
+ || false,
53
+ vision: model.capabilities.vision,
54
+ };
55
+ })
56
+ .filter(Boolean) as ChatModelCard[];
47
57
  },
48
58
  provider: ModelProvider.Mistral,
49
59
  });
@@ -3,7 +3,7 @@ import OpenAI from 'openai';
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
5
 
6
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
6
+ import type { ChatModelCard } from '@/types/llm';
7
7
 
8
8
  export interface MoonshotModelCard {
9
9
  id: string;
@@ -24,19 +24,34 @@ export const LobeMoonshotAI = LobeOpenAICompatibleFactory({
24
24
  debug: {
25
25
  chatCompletion: () => process.env.DEBUG_MOONSHOT_CHAT_COMPLETION === '1',
26
26
  },
27
- models: {
28
- transformModel: (m) => {
29
- const model = m as unknown as MoonshotModelCard;
27
+ models: async ({ client }) => {
28
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
30
29
 
31
- return {
32
- contextWindowTokens: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.contextWindowTokens ?? undefined,
33
- displayName: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.displayName ?? undefined,
34
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
35
- functionCall: true,
36
- id: model.id,
37
- vision: model.id.toLowerCase().includes('vision'),
38
- };
39
- },
30
+ const modelsPage = await client.models.list() as any;
31
+ const modelList: MoonshotModelCard[] = modelsPage.data;
32
+
33
+ return modelList
34
+ .map((model) => {
35
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
36
+
37
+ return {
38
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
39
+ displayName: knownModel?.displayName ?? undefined,
40
+ enabled: knownModel?.enabled || false,
41
+ functionCall:
42
+ knownModel?.abilities?.functionCall
43
+ || false,
44
+ id: model.id,
45
+ reasoning:
46
+ knownModel?.abilities?.reasoning
47
+ || false,
48
+ vision:
49
+ model.id.toLowerCase().includes('vision')
50
+ || knownModel?.abilities?.vision
51
+ || false,
52
+ };
53
+ })
54
+ .filter(Boolean) as ChatModelCard[];
40
55
  },
41
56
  provider: ModelProvider.Moonshot,
42
57
  });
@@ -2,7 +2,7 @@ import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
  import { NovitaModelCard } from './type';
4
4
 
5
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+ import type { ChatModelCard } from '@/types/llm';
6
6
 
7
7
  export const LobeNovitaAI = LobeOpenAICompatibleFactory({
8
8
  baseURL: 'https://api.novita.ai/v3/openai',
@@ -14,25 +14,42 @@ export const LobeNovitaAI = LobeOpenAICompatibleFactory({
14
14
  debug: {
15
15
  chatCompletion: () => process.env.DEBUG_NOVITA_CHAT_COMPLETION === '1',
16
16
  },
17
- models: {
18
- transformModel: (m) => {
19
- const reasoningKeywords = [
20
- 'deepseek-r1',
21
- ];
17
+ models: async ({ client }) => {
18
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
22
19
 
23
- const model = m as unknown as NovitaModelCard;
20
+ const reasoningKeywords = [
21
+ 'deepseek-r1',
22
+ ];
24
23
 
25
- return {
26
- contextWindowTokens: model.context_size,
27
- description: model.description,
28
- displayName: model.title,
29
- enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id === m.id)?.enabled || false,
30
- functionCall: model.description.toLowerCase().includes('function calling'),
31
- id: model.id,
32
- reasoning: model.description.toLowerCase().includes('reasoning task') || reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
33
- vision: model.description.toLowerCase().includes('vision'),
34
- };
35
- },
24
+ const modelsPage = await client.models.list() as any;
25
+ const modelList: NovitaModelCard[] = modelsPage.data;
26
+
27
+ return modelList
28
+ .map((model) => {
29
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
30
+
31
+ return {
32
+ contextWindowTokens: model.context_size,
33
+ description: model.description,
34
+ displayName: model.title,
35
+ enabled: knownModel?.enabled || false,
36
+ functionCall:
37
+ model.description.toLowerCase().includes('function calling')
38
+ || knownModel?.abilities?.functionCall
39
+ || false,
40
+ id: model.id,
41
+ reasoning:
42
+ model.description.toLowerCase().includes('reasoning task')
43
+ || reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
44
+ || knownModel?.abilities?.reasoning
45
+ || false,
46
+ vision:
47
+ model.description.toLowerCase().includes('vision')
48
+ || knownModel?.abilities?.vision
49
+ || false,
50
+ };
51
+ })
52
+ .filter(Boolean) as ChatModelCard[];
36
53
  },
37
54
  provider: ModelProvider.Novita,
38
55
  });
@@ -145,7 +145,26 @@ describe('LobeOllamaAI', () => {
145
145
  const models = await ollamaAI.models();
146
146
 
147
147
  expect(listMock).toHaveBeenCalled();
148
- expect(models).toEqual([{ id: 'model-1' }, { id: 'model-2' }]);
148
+ expect(models).toEqual([
149
+ {
150
+ contextWindowTokens: undefined,
151
+ displayName: undefined,
152
+ enabled: false,
153
+ functionCall: false,
154
+ id: 'model-1',
155
+ reasoning: false,
156
+ vision: false
157
+ },
158
+ {
159
+ contextWindowTokens: undefined,
160
+ displayName: undefined,
161
+ enabled: false,
162
+ functionCall: false,
163
+ id: 'model-2',
164
+ reasoning: false,
165
+ vision: false
166
+ }
167
+ ]);
149
168
  });
150
169
  });
151
170
 
@@ -2,7 +2,6 @@ import { Ollama, Tool } from 'ollama/browser';
2
2
  import { ClientOptions } from 'openai';
3
3
 
4
4
  import { OpenAIChatMessage } from '@/libs/agent-runtime';
5
- import { ChatModelCard } from '@/types/llm';
6
5
 
7
6
  import { LobeRuntimeAI } from '../BaseAI';
8
7
  import { AgentRuntimeErrorType } from '../error';
@@ -20,6 +19,12 @@ import { OllamaStream, convertIterableToStream } from '../utils/streams';
20
19
  import { parseDataUri } from '../utils/uriParser';
21
20
  import { OllamaMessage } from './type';
22
21
 
22
+ import { ChatModelCard } from '@/types/llm';
23
+
24
+ export interface OllamaModelCard {
25
+ name: string;
26
+ }
27
+
23
28
  export class LobeOllamaAI implements LobeRuntimeAI {
24
29
  private client: Ollama;
25
30
 
@@ -102,11 +107,34 @@ export class LobeOllamaAI implements LobeRuntimeAI {
102
107
  return await Promise.all(promises);
103
108
  }
104
109
 
105
- async models(): Promise<ChatModelCard[]> {
110
+ async models() {
111
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
112
+
106
113
  const list = await this.client.list();
107
- return list.models.map((model) => ({
108
- id: model.name,
109
- }));
114
+
115
+ const modelList: OllamaModelCard[] = list.models;
116
+
117
+ return modelList
118
+ .map((model) => {
119
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.name.toLowerCase() === m.id.toLowerCase());
120
+
121
+ return {
122
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
123
+ displayName: knownModel?.displayName ?? undefined,
124
+ enabled: knownModel?.enabled || false,
125
+ functionCall:
126
+ knownModel?.abilities?.functionCall
127
+ || false,
128
+ id: model.name,
129
+ reasoning:
130
+ knownModel?.abilities?.functionCall
131
+ || false,
132
+ vision:
133
+ knownModel?.abilities?.functionCall
134
+ || false,
135
+ };
136
+ })
137
+ .filter(Boolean) as ChatModelCard[];
110
138
  }
111
139
 
112
140
  private invokeEmbeddingModel = async (payload: EmbeddingsPayload): Promise<Embeddings> => {
@@ -2,6 +2,24 @@
2
2
 
3
3
  exports[`LobeOpenAI > models > should get models 1`] = `
4
4
  [
5
+ {
6
+ "contextWindowTokens": undefined,
7
+ "displayName": "Whisper",
8
+ "enabled": false,
9
+ "functionCall": false,
10
+ "id": "whisper-1",
11
+ "reasoning": false,
12
+ "vision": false,
13
+ },
14
+ {
15
+ "contextWindowTokens": undefined,
16
+ "displayName": undefined,
17
+ "enabled": false,
18
+ "functionCall": false,
19
+ "id": "davinci-002",
20
+ "reasoning": false,
21
+ "vision": false,
22
+ },
5
23
  {
6
24
  "contextWindowTokens": 16385,
7
25
  "displayName": "GPT 3.5 Turbo",
@@ -11,6 +29,15 @@ exports[`LobeOpenAI > models > should get models 1`] = `
11
29
  "reasoning": false,
12
30
  "vision": false,
13
31
  },
32
+ {
33
+ "contextWindowTokens": undefined,
34
+ "displayName": "DALL·E 2",
35
+ "enabled": false,
36
+ "functionCall": false,
37
+ "id": "dall-e-2",
38
+ "reasoning": false,
39
+ "vision": false,
40
+ },
14
41
  {
15
42
  "contextWindowTokens": 16384,
16
43
  "displayName": "GPT 3.5 Turbo",
@@ -20,6 +47,24 @@ exports[`LobeOpenAI > models > should get models 1`] = `
20
47
  "reasoning": false,
21
48
  "vision": false,
22
49
  },
50
+ {
51
+ "contextWindowTokens": undefined,
52
+ "displayName": undefined,
53
+ "enabled": false,
54
+ "functionCall": false,
55
+ "id": "tts-1-hd-1106",
56
+ "reasoning": false,
57
+ "vision": false,
58
+ },
59
+ {
60
+ "contextWindowTokens": undefined,
61
+ "displayName": "TTS-1 HD",
62
+ "enabled": false,
63
+ "functionCall": false,
64
+ "id": "tts-1-hd",
65
+ "reasoning": false,
66
+ "vision": false,
67
+ },
23
68
  {
24
69
  "contextWindowTokens": undefined,
25
70
  "displayName": undefined,
@@ -29,6 +74,15 @@ exports[`LobeOpenAI > models > should get models 1`] = `
29
74
  "reasoning": false,
30
75
  "vision": false,
31
76
  },
77
+ {
78
+ "contextWindowTokens": 8192,
79
+ "displayName": "Text Embedding 3 Large",
80
+ "enabled": false,
81
+ "functionCall": false,
82
+ "id": "text-embedding-3-large",
83
+ "reasoning": false,
84
+ "vision": false,
85
+ },
32
86
  {
33
87
  "contextWindowTokens": undefined,
34
88
  "displayName": undefined,
@@ -92,6 +146,24 @@ exports[`LobeOpenAI > models > should get models 1`] = `
92
146
  "reasoning": false,
93
147
  "vision": false,
94
148
  },
149
+ {
150
+ "contextWindowTokens": undefined,
151
+ "displayName": "TTS-1",
152
+ "enabled": false,
153
+ "functionCall": false,
154
+ "id": "tts-1",
155
+ "reasoning": false,
156
+ "vision": false,
157
+ },
158
+ {
159
+ "contextWindowTokens": undefined,
160
+ "displayName": "DALL·E 3",
161
+ "enabled": false,
162
+ "functionCall": false,
163
+ "id": "dall-e-3",
164
+ "reasoning": false,
165
+ "vision": false,
166
+ },
95
167
  {
96
168
  "contextWindowTokens": 16385,
97
169
  "displayName": "GPT-3.5 Turbo 1106",
@@ -110,6 +182,24 @@ exports[`LobeOpenAI > models > should get models 1`] = `
110
182
  "reasoning": false,
111
183
  "vision": false,
112
184
  },
185
+ {
186
+ "contextWindowTokens": undefined,
187
+ "displayName": undefined,
188
+ "enabled": false,
189
+ "functionCall": false,
190
+ "id": "babbage-002",
191
+ "reasoning": false,
192
+ "vision": false,
193
+ },
194
+ {
195
+ "contextWindowTokens": undefined,
196
+ "displayName": undefined,
197
+ "enabled": false,
198
+ "functionCall": false,
199
+ "id": "tts-1-1106",
200
+ "reasoning": false,
201
+ "vision": false,
202
+ },
113
203
  {
114
204
  "contextWindowTokens": 128000,
115
205
  "displayName": "GPT 4 Turbo with Vision Preview",
@@ -119,6 +209,15 @@ exports[`LobeOpenAI > models > should get models 1`] = `
119
209
  "reasoning": false,
120
210
  "vision": true,
121
211
  },
212
+ {
213
+ "contextWindowTokens": 8192,
214
+ "displayName": "Text Embedding 3 Small",
215
+ "enabled": false,
216
+ "functionCall": false,
217
+ "id": "text-embedding-3-small",
218
+ "reasoning": false,
219
+ "vision": false,
220
+ },
122
221
  {
123
222
  "contextWindowTokens": 128000,
124
223
  "displayName": "GPT 4 Turbo",
@@ -126,6 +225,15 @@ exports[`LobeOpenAI > models > should get models 1`] = `
126
225
  "functionCall": true,
127
226
  "id": "gpt-4",
128
227
  "reasoning": false,
228
+ "vision": true,
229
+ },
230
+ {
231
+ "contextWindowTokens": undefined,
232
+ "displayName": undefined,
233
+ "enabled": false,
234
+ "functionCall": false,
235
+ "id": "text-embedding-ada-002",
236
+ "reasoning": false,
129
237
  "vision": false,
130
238
  },
131
239
  {