@lobehub/chat 1.42.1 → 1.42.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/README.ja-JP.md +6 -6
  3. package/README.md +6 -6
  4. package/README.zh-CN.md +6 -6
  5. package/changelog/v1.json +14 -0
  6. package/locales/ar/components.json +3 -0
  7. package/locales/ar/modelProvider.json +0 -16
  8. package/locales/ar/models.json +12 -0
  9. package/locales/ar/setting.json +1 -2
  10. package/locales/bg-BG/components.json +3 -0
  11. package/locales/bg-BG/modelProvider.json +0 -16
  12. package/locales/bg-BG/models.json +12 -0
  13. package/locales/bg-BG/setting.json +1 -2
  14. package/locales/de-DE/components.json +3 -0
  15. package/locales/de-DE/modelProvider.json +0 -16
  16. package/locales/de-DE/models.json +12 -0
  17. package/locales/de-DE/setting.json +1 -2
  18. package/locales/en-US/components.json +3 -0
  19. package/locales/en-US/modelProvider.json +0 -16
  20. package/locales/en-US/models.json +12 -0
  21. package/locales/en-US/setting.json +1 -2
  22. package/locales/es-ES/components.json +3 -0
  23. package/locales/es-ES/modelProvider.json +0 -16
  24. package/locales/es-ES/models.json +12 -0
  25. package/locales/es-ES/setting.json +1 -2
  26. package/locales/fa-IR/components.json +3 -0
  27. package/locales/fa-IR/modelProvider.json +0 -16
  28. package/locales/fa-IR/models.json +12 -0
  29. package/locales/fa-IR/setting.json +1 -2
  30. package/locales/fr-FR/components.json +3 -0
  31. package/locales/fr-FR/modelProvider.json +0 -16
  32. package/locales/fr-FR/models.json +12 -0
  33. package/locales/fr-FR/setting.json +1 -2
  34. package/locales/it-IT/components.json +3 -0
  35. package/locales/it-IT/modelProvider.json +0 -16
  36. package/locales/it-IT/models.json +12 -0
  37. package/locales/it-IT/setting.json +1 -2
  38. package/locales/ja-JP/components.json +3 -0
  39. package/locales/ja-JP/modelProvider.json +0 -16
  40. package/locales/ja-JP/models.json +12 -0
  41. package/locales/ja-JP/setting.json +1 -2
  42. package/locales/ko-KR/components.json +3 -0
  43. package/locales/ko-KR/modelProvider.json +0 -16
  44. package/locales/ko-KR/models.json +12 -0
  45. package/locales/ko-KR/setting.json +1 -2
  46. package/locales/nl-NL/components.json +3 -0
  47. package/locales/nl-NL/modelProvider.json +0 -16
  48. package/locales/nl-NL/models.json +12 -0
  49. package/locales/nl-NL/setting.json +1 -2
  50. package/locales/pl-PL/components.json +3 -0
  51. package/locales/pl-PL/modelProvider.json +0 -16
  52. package/locales/pl-PL/models.json +12 -0
  53. package/locales/pl-PL/setting.json +1 -2
  54. package/locales/pt-BR/components.json +3 -0
  55. package/locales/pt-BR/modelProvider.json +0 -16
  56. package/locales/pt-BR/models.json +12 -0
  57. package/locales/pt-BR/setting.json +1 -2
  58. package/locales/ru-RU/components.json +3 -0
  59. package/locales/ru-RU/modelProvider.json +0 -16
  60. package/locales/ru-RU/models.json +12 -0
  61. package/locales/ru-RU/setting.json +1 -2
  62. package/locales/tr-TR/components.json +3 -0
  63. package/locales/tr-TR/modelProvider.json +0 -16
  64. package/locales/tr-TR/models.json +12 -0
  65. package/locales/tr-TR/setting.json +1 -2
  66. package/locales/vi-VN/components.json +3 -0
  67. package/locales/vi-VN/modelProvider.json +0 -16
  68. package/locales/vi-VN/models.json +12 -0
  69. package/locales/vi-VN/setting.json +1 -2
  70. package/locales/zh-CN/common.json +1 -1
  71. package/locales/zh-CN/components.json +3 -0
  72. package/locales/zh-CN/modelProvider.json +0 -16
  73. package/locales/zh-CN/models.json +12 -0
  74. package/locales/zh-CN/providers.json +1 -1
  75. package/locales/zh-CN/setting.json +1 -2
  76. package/locales/zh-TW/components.json +3 -0
  77. package/locales/zh-TW/modelProvider.json +0 -16
  78. package/locales/zh-TW/models.json +12 -0
  79. package/locales/zh-TW/setting.json +1 -2
  80. package/next.config.ts +0 -1
  81. package/package.json +4 -4
  82. package/src/app/(main)/settings/llm/components/ProviderModelList/ModelConfigModal/Form.tsx +5 -3
  83. package/src/{app/(main)/settings/llm/components/ProviderModelList/ModelConfigModal → components}/MaxTokenSlider.tsx +4 -5
  84. package/src/components/ModelSelect/index.tsx +6 -3
  85. package/src/components/NProgress/index.tsx +9 -1
  86. package/src/config/modelProviders/openai.ts +15 -0
  87. package/src/config/modelProviders/openrouter.ts +15 -0
  88. package/src/const/auth.ts +1 -1
  89. package/src/database/server/models/__tests__/user.test.ts +11 -0
  90. package/src/database/server/models/user.ts +4 -0
  91. package/src/libs/agent-runtime/AgentRuntime.test.ts +10 -10
  92. package/src/libs/agent-runtime/AgentRuntime.ts +3 -3
  93. package/src/libs/agent-runtime/ollama/index.test.ts +4 -1
  94. package/src/libs/agent-runtime/ollama/index.ts +2 -2
  95. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +10 -0
  96. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +14 -3
  97. package/src/locales/default/components.ts +3 -0
  98. package/src/locales/default/setting.ts +0 -1
  99. package/src/server/modules/AgentRuntime/index.test.ts +8 -8
  100. package/src/server/modules/AgentRuntime/index.ts +5 -5
  101. package/src/services/__tests__/_auth.test.ts +5 -6
  102. package/src/services/__tests__/chat.test.ts +1 -0
  103. package/src/services/_auth.ts +3 -3
  104. package/src/services/chat.ts +7 -8
  105. package/src/store/user/slices/modelList/selectors/modelProvider.test.ts +1 -0
  106. package/src/types/aiModel.ts +275 -0
  107. package/src/types/aiProvider.ts +148 -0
  108. package/src/types/llm.ts +3 -17
  109. package/src/utils/merge.test.ts +48 -0
  110. package/src/utils/merge.ts +39 -0
@@ -27,6 +27,21 @@ const OpenRouter: ModelProviderCard = {
27
27
  },
28
28
  releasedAt: '2024-09-12',
29
29
  },
30
+ {
31
+ contextWindowTokens: 200_000,
32
+ description:
33
+ 'o1是OpenAI新的推理模型,支持图文输入并输出文本,适用于需要广泛通用知识的复杂任务。该模型具有200K上下文和2023年10月的知识截止日期。',
34
+ displayName: 'OpenAI o1',
35
+ enabled: true,
36
+ id: 'openai/o1',
37
+ maxOutput: 100_000,
38
+ pricing: {
39
+ input: 15,
40
+ output: 60,
41
+ },
42
+ releasedAt: '2024-12-17',
43
+ vision: true,
44
+ },
30
45
  {
31
46
  contextWindowTokens: 128_000,
32
47
  description:
package/src/const/auth.ts CHANGED
@@ -28,7 +28,7 @@ export interface JWTPayload {
28
28
  /**
29
29
  * Represents the endpoint of provider
30
30
  */
31
- endpoint?: string;
31
+ baseURL?: string;
32
32
 
33
33
  azureApiVersion?: string;
34
34
 
@@ -130,6 +130,17 @@ describe('UserModel', () => {
130
130
  });
131
131
  });
132
132
 
133
+ describe('getUserSettings', () => {
134
+ it('should get user settings', async () => {
135
+ await serverDB.insert(users).values({ id: userId });
136
+ await serverDB.insert(userSettings).values({ id: userId, general: { language: 'en-US' } });
137
+
138
+ const data = await userModel.getUserSettings();
139
+
140
+ expect(data).toMatchObject({ id: userId, general: { language: 'en-US' } });
141
+ });
142
+ });
143
+
133
144
  describe('deleteSetting', () => {
134
145
  it('should delete user settings', async () => {
135
146
  await serverDB.insert(users).values({ id: userId });
@@ -75,6 +75,10 @@ export class UserModel {
75
75
  };
76
76
  };
77
77
 
78
+ getUserSettings = async () => {
79
+ return this.db.query.userSettings.findFirst({ where: eq(userSettings.id, this.userId) });
80
+ };
81
+
78
82
  updateUser = async (value: Partial<UserItem>) => {
79
83
  return this.db
80
84
  .update(users)
@@ -75,8 +75,8 @@ describe('AgentRuntime', () => {
75
75
  describe('Azure OpenAI provider', () => {
76
76
  it('should initialize correctly', async () => {
77
77
  const jwtPayload = {
78
- apikey: 'user-azure-key',
79
- endpoint: 'user-azure-endpoint',
78
+ apiKey: 'user-azure-key',
79
+ baseURL: 'user-azure-endpoint',
80
80
  apiVersion: '2024-06-01',
81
81
  };
82
82
 
@@ -90,8 +90,8 @@ describe('AgentRuntime', () => {
90
90
  });
91
91
  it('should initialize with azureOpenAIParams correctly', async () => {
92
92
  const jwtPayload = {
93
- apikey: 'user-openai-key',
94
- endpoint: 'user-endpoint',
93
+ apiKey: 'user-openai-key',
94
+ baseURL: 'user-endpoint',
95
95
  apiVersion: 'custom-version',
96
96
  };
97
97
 
@@ -106,8 +106,8 @@ describe('AgentRuntime', () => {
106
106
 
107
107
  it('should initialize with AzureAI correctly', async () => {
108
108
  const jwtPayload = {
109
- apikey: 'user-azure-key',
110
- endpoint: 'user-azure-endpoint',
109
+ apiKey: 'user-azure-key',
110
+ baseURL: 'user-azure-endpoint',
111
111
  };
112
112
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Azure, {
113
113
  azure: jwtPayload,
@@ -171,7 +171,7 @@ describe('AgentRuntime', () => {
171
171
 
172
172
  describe('Ollama provider', () => {
173
173
  it('should initialize correctly', async () => {
174
- const jwtPayload: JWTPayload = { endpoint: 'user-ollama-url' };
174
+ const jwtPayload: JWTPayload = { baseURL: 'https://user-ollama-url' };
175
175
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Ollama, {
176
176
  ollama: jwtPayload,
177
177
  });
@@ -255,7 +255,7 @@ describe('AgentRuntime', () => {
255
255
 
256
256
  describe('AgentRuntime chat method', () => {
257
257
  it('should run correctly', async () => {
258
- const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', endpoint: 'user-endpoint' };
258
+ const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
259
259
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenAI, {
260
260
  openai: jwtPayload,
261
261
  });
@@ -271,7 +271,7 @@ describe('AgentRuntime', () => {
271
271
  await runtime.chat(payload);
272
272
  });
273
273
  it('should handle options correctly', async () => {
274
- const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', endpoint: 'user-endpoint' };
274
+ const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
275
275
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenAI, {
276
276
  openai: jwtPayload,
277
277
  });
@@ -300,7 +300,7 @@ describe('AgentRuntime', () => {
300
300
  });
301
301
 
302
302
  describe('callback', async () => {
303
- const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', endpoint: 'user-endpoint' };
303
+ const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
304
304
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenAI, {
305
305
  openai: jwtPayload,
306
306
  });
@@ -133,7 +133,7 @@ class AgentRuntime {
133
133
  ai21: Partial<ClientOptions>;
134
134
  ai360: Partial<ClientOptions>;
135
135
  anthropic: Partial<ClientOptions>;
136
- azure: { apiVersion?: string; apikey?: string; endpoint?: string };
136
+ azure: { apiKey?: string; apiVersion?: string; baseURL?: string };
137
137
  baichuan: Partial<ClientOptions>;
138
138
  bedrock: Partial<LobeBedrockAIParams>;
139
139
  cloudflare: Partial<LobeCloudflareParams>;
@@ -180,8 +180,8 @@ class AgentRuntime {
180
180
 
181
181
  case ModelProvider.Azure: {
182
182
  runtimeModel = new LobeAzureOpenAI(
183
- params.azure?.endpoint,
184
- params.azure?.apikey,
183
+ params.azure?.baseURL,
184
+ params.azure?.apiKey,
185
185
  params.azure?.apiVersion,
186
186
  );
187
187
  break;
@@ -29,7 +29,10 @@ describe('LobeOllamaAI', () => {
29
29
  try {
30
30
  new LobeOllamaAI({ baseURL: 'invalid-url' });
31
31
  } catch (e) {
32
- expect(e).toEqual(AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidOllamaArgs));
32
+ expect(e).toEqual({
33
+ error: new TypeError('Invalid URL'),
34
+ errorType: 'InvalidOllamaArgs',
35
+ });
33
36
  }
34
37
  });
35
38
  });
@@ -22,8 +22,8 @@ export class LobeOllamaAI implements LobeRuntimeAI {
22
22
  constructor({ baseURL }: ClientOptions = {}) {
23
23
  try {
24
24
  if (baseURL) new URL(baseURL);
25
- } catch {
26
- throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidOllamaArgs);
25
+ } catch (e) {
26
+ throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidOllamaArgs, e);
27
27
  }
28
28
 
29
29
  this.client = new Ollama(!baseURL ? undefined : { host: baseURL });
@@ -12,6 +12,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
12
12
  "input": 0.5,
13
13
  "output": 1.5,
14
14
  },
15
+ "releasedAt": "2023-02-28",
15
16
  },
16
17
  {
17
18
  "id": "gpt-3.5-turbo-16k",
@@ -35,6 +36,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
35
36
  "input": 10,
36
37
  "output": 30,
37
38
  },
39
+ "releasedAt": "2024-01-23",
38
40
  },
39
41
  {
40
42
  "contextWindowTokens": 128000,
@@ -46,6 +48,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
46
48
  "input": 10,
47
49
  "output": 30,
48
50
  },
51
+ "releasedAt": "2024-01-23",
49
52
  },
50
53
  {
51
54
  "contextWindowTokens": 4096,
@@ -56,6 +59,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
56
59
  "input": 1.5,
57
60
  "output": 2,
58
61
  },
62
+ "releasedAt": "2023-08-24",
59
63
  },
60
64
  {
61
65
  "id": "gpt-3.5-turbo-0301",
@@ -73,6 +77,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
73
77
  "input": 1,
74
78
  "output": 2,
75
79
  },
80
+ "releasedAt": "2023-11-02",
76
81
  },
77
82
  {
78
83
  "contextWindowTokens": 128000,
@@ -84,6 +89,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
84
89
  "input": 10,
85
90
  "output": 30,
86
91
  },
92
+ "releasedAt": "2023-11-02",
87
93
  },
88
94
  {
89
95
  "contextWindowTokens": 128000,
@@ -91,6 +97,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
91
97
  "description": "GPT-4 视觉预览版,专为图像分析和处理任务设计。",
92
98
  "displayName": "GPT 4 Turbo with Vision Preview",
93
99
  "id": "gpt-4-vision-preview",
100
+ "releasedAt": "2023-11-02",
94
101
  "vision": true,
95
102
  },
96
103
  {
@@ -103,6 +110,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
103
110
  "input": 30,
104
111
  "output": 60,
105
112
  },
113
+ "releasedAt": "2023-06-27",
106
114
  },
107
115
  {
108
116
  "contextWindowTokens": 16385,
@@ -114,6 +122,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
114
122
  "input": 0.5,
115
123
  "output": 1.5,
116
124
  },
125
+ "releasedAt": "2024-01-23",
117
126
  },
118
127
  {
119
128
  "contextWindowTokens": 8192,
@@ -125,6 +134,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
125
134
  "input": 30,
126
135
  "output": 60,
127
136
  },
137
+ "releasedAt": "2023-06-12",
128
138
  },
129
139
  ]
130
140
  `;
@@ -1,3 +1,5 @@
1
+ import dayjs from 'dayjs';
2
+ import utc from 'dayjs/plugin/utc';
1
3
  import OpenAI, { ClientOptions } from 'openai';
2
4
  import { Stream } from 'openai/streaming';
3
5
 
@@ -18,6 +20,7 @@ import type {
18
20
  TextToSpeechOptions,
19
21
  TextToSpeechPayload,
20
22
  } from '../../types';
23
+ import { ChatStreamCallbacks } from '../../types';
21
24
  import { AgentRuntimeError } from '../createError';
22
25
  import { debugResponse, debugStream } from '../debugStream';
23
26
  import { desensitizeUrl } from '../desensitizeUrl';
@@ -25,7 +28,6 @@ import { handleOpenAIError } from '../handleOpenAIError';
25
28
  import { convertOpenAIMessages } from '../openaiHelpers';
26
29
  import { StreamingResponse } from '../response';
27
30
  import { OpenAIStream, OpenAIStreamOptions } from '../streams';
28
- import { ChatStreamCallbacks } from '../../types';
29
31
 
30
32
  // the model contains the following keywords is not a chat model, so we should filter them out
31
33
  export const CHAT_MODELS_BLOCK_LIST = [
@@ -248,7 +250,8 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
248
250
 
249
251
  if (responseMode === 'json') return Response.json(response);
250
252
 
251
- const transformHandler = chatCompletion?.handleTransformResponseToStream || transformResponseToStream;
253
+ const transformHandler =
254
+ chatCompletion?.handleTransformResponseToStream || transformResponseToStream;
252
255
  const stream = transformHandler(response as unknown as OpenAI.ChatCompletion);
253
256
 
254
257
  const streamHandler = chatCompletion?.handleStream || OpenAIStream;
@@ -278,7 +281,15 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
278
281
 
279
282
  const knownModel = LOBE_DEFAULT_MODEL_LIST.find((model) => model.id === item.id);
280
283
 
281
- if (knownModel) return knownModel;
284
+ if (knownModel) {
285
+ dayjs.extend(utc);
286
+
287
+ return {
288
+ ...knownModel,
289
+ releasedAt:
290
+ knownModel.releasedAt ?? dayjs.utc(item.created * 1000).format('YYYY-MM-DD'),
291
+ };
292
+ }
282
293
 
283
294
  return { id: item.id };
284
295
  })
@@ -70,6 +70,9 @@ export default {
70
70
  GoBack: {
71
71
  back: '返回',
72
72
  },
73
+ MaxTokenSlider: {
74
+ unlimited: '无限制',
75
+ },
73
76
  ModelSelect: {
74
77
  featureTag: {
75
78
  custom: '自定义模型,默认设定同时支持函数调用与视觉识别,请根据实际情况验证上述能力的可用性',
@@ -86,7 +86,6 @@ export default {
86
86
  modalTitle: '自定义模型配置',
87
87
  tokens: {
88
88
  title: '最大 token 数',
89
- unlimited: '无限制',
90
89
  },
91
90
  vision: {
92
91
  extra:
@@ -70,23 +70,23 @@ vi.mock('@/config/llm', () => ({
70
70
  describe('initAgentRuntimeWithUserPayload method', () => {
71
71
  describe('should initialize with options correctly', () => {
72
72
  it('OpenAI provider: with apikey and endpoint', async () => {
73
- const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', endpoint: 'user-endpoint' };
73
+ const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
74
74
  const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.OpenAI, jwtPayload);
75
75
  expect(runtime).toBeInstanceOf(AgentRuntime);
76
76
  expect(runtime['_runtime']).toBeInstanceOf(LobeOpenAI);
77
- expect(runtime['_runtime'].baseURL).toBe(jwtPayload.endpoint);
77
+ expect(runtime['_runtime'].baseURL).toBe(jwtPayload.baseURL);
78
78
  });
79
79
 
80
80
  it('Azure AI provider: with apikey, endpoint and apiversion', async () => {
81
81
  const jwtPayload: JWTPayload = {
82
82
  apiKey: 'user-azure-key',
83
- endpoint: 'user-azure-endpoint',
83
+ baseURL: 'user-azure-endpoint',
84
84
  azureApiVersion: '2024-06-01',
85
85
  };
86
86
  const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Azure, jwtPayload);
87
87
  expect(runtime).toBeInstanceOf(AgentRuntime);
88
88
  expect(runtime['_runtime']).toBeInstanceOf(LobeAzureOpenAI);
89
- expect(runtime['_runtime'].baseURL).toBe(jwtPayload.endpoint);
89
+ expect(runtime['_runtime'].baseURL).toBe(jwtPayload.baseURL);
90
90
  });
91
91
 
92
92
  it('ZhiPu AI provider: with apikey', async () => {
@@ -130,11 +130,11 @@ describe('initAgentRuntimeWithUserPayload method', () => {
130
130
  });
131
131
 
132
132
  it('Ollama provider: with endpoint', async () => {
133
- const jwtPayload: JWTPayload = { endpoint: 'http://user-ollama-url' };
133
+ const jwtPayload: JWTPayload = { baseURL: 'http://user-ollama-url' };
134
134
  const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Ollama, jwtPayload);
135
135
  expect(runtime).toBeInstanceOf(AgentRuntime);
136
136
  expect(runtime['_runtime']).toBeInstanceOf(LobeOllamaAI);
137
- expect(runtime['_runtime']['baseURL']).toEqual(jwtPayload.endpoint);
137
+ expect(runtime['_runtime']['baseURL']).toEqual(jwtPayload.baseURL);
138
138
  });
139
139
 
140
140
  it('Perplexity AI provider: with apikey', async () => {
@@ -220,12 +220,12 @@ describe('initAgentRuntimeWithUserPayload method', () => {
220
220
  it('Unknown Provider: with apikey and endpoint, should initialize to OpenAi', async () => {
221
221
  const jwtPayload: JWTPayload = {
222
222
  apiKey: 'user-unknown-key',
223
- endpoint: 'user-unknown-endpoint',
223
+ baseURL: 'user-unknown-endpoint',
224
224
  };
225
225
  const runtime = await initAgentRuntimeWithUserPayload('unknown', jwtPayload);
226
226
  expect(runtime).toBeInstanceOf(AgentRuntime);
227
227
  expect(runtime['_runtime']).toBeInstanceOf(LobeOpenAI);
228
- expect(runtime['_runtime'].baseURL).toBe(jwtPayload.endpoint);
228
+ expect(runtime['_runtime'].baseURL).toBe(jwtPayload.baseURL);
229
229
  });
230
230
  });
231
231
 
@@ -38,23 +38,23 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
38
38
  }
39
39
 
40
40
  const apiKey = apiKeyManager.pick(payload?.apiKey || llmConfig[`${upperProvider}_API_KEY`]);
41
- const baseURL = payload?.endpoint || process.env[`${upperProvider}_PROXY_URL`];
41
+ const baseURL = payload?.baseURL || process.env[`${upperProvider}_PROXY_URL`];
42
42
 
43
43
  return baseURL ? { apiKey, baseURL } : { apiKey };
44
44
  }
45
45
 
46
46
  case ModelProvider.Ollama: {
47
- const baseURL = payload?.endpoint || process.env.OLLAMA_PROXY_URL;
47
+ const baseURL = payload?.baseURL || process.env.OLLAMA_PROXY_URL;
48
48
 
49
49
  return { baseURL };
50
50
  }
51
51
 
52
52
  case ModelProvider.Azure: {
53
53
  const { AZURE_API_KEY, AZURE_API_VERSION, AZURE_ENDPOINT } = llmConfig;
54
- const apikey = apiKeyManager.pick(payload?.apiKey || AZURE_API_KEY);
55
- const endpoint = payload?.endpoint || AZURE_ENDPOINT;
54
+ const apiKey = apiKeyManager.pick(payload?.apiKey || AZURE_API_KEY);
55
+ const baseURL = payload?.baseURL || AZURE_ENDPOINT;
56
56
  const apiVersion = payload?.azureApiVersion || AZURE_API_VERSION;
57
- return { apiVersion, apikey, endpoint };
57
+ return { apiKey, apiVersion, baseURL };
58
58
  }
59
59
 
60
60
  case ModelProvider.Bedrock: {
@@ -131,7 +131,7 @@ describe('getProviderAuthPayload', () => {
131
131
  expect(payload).toEqual({
132
132
  apiKey: mockAzureConfig.apiKey,
133
133
  azureApiVersion: mockAzureConfig.apiVersion,
134
- endpoint: mockAzureConfig.endpoint,
134
+ baseURL: mockAzureConfig.endpoint,
135
135
  });
136
136
  });
137
137
 
@@ -144,7 +144,7 @@ describe('getProviderAuthPayload', () => {
144
144
 
145
145
  const payload = getProviderAuthPayload(ModelProvider.Ollama);
146
146
  expect(payload).toEqual({
147
- endpoint: mockOllamaProxyUrl,
147
+ baseURL: mockOllamaProxyUrl,
148
148
  });
149
149
  });
150
150
 
@@ -152,8 +152,7 @@ describe('getProviderAuthPayload', () => {
152
152
  // 假设的 OpenAI 配置
153
153
  const mockOpenAIConfig = {
154
154
  apiKey: 'openai-api-key',
155
- baseURL: 'openai-baseURL',
156
- endpoint: 'openai-endpoint',
155
+ baseURL: 'openai-endpoint',
157
156
  useAzure: true,
158
157
  azureApiVersion: 'openai-azure-api-version',
159
158
  };
@@ -164,7 +163,7 @@ describe('getProviderAuthPayload', () => {
164
163
  const payload = getProviderAuthPayload(ModelProvider.OpenAI);
165
164
  expect(payload).toEqual({
166
165
  apiKey: mockOpenAIConfig.apiKey,
167
- endpoint: mockOpenAIConfig.baseURL,
166
+ baseURL: mockOpenAIConfig.baseURL,
168
167
  });
169
168
  });
170
169
 
@@ -181,7 +180,7 @@ describe('getProviderAuthPayload', () => {
181
180
  const payload = getProviderAuthPayload(ModelProvider.Stepfun);
182
181
  expect(payload).toEqual({
183
182
  apiKey: mockOpenAIConfig.apiKey,
184
- endpoint: mockOpenAIConfig.baseURL,
183
+ baseURL: mockOpenAIConfig.baseURL,
185
184
  });
186
185
  });
187
186
 
@@ -939,6 +939,7 @@ describe('AgentRuntimeOnClient', () => {
939
939
  },
940
940
  },
941
941
  } as UserSettingsState) as unknown as UserStore;
942
+
942
943
  const runtime = await initializeWithClientStore(ModelProvider.Azure, {});
943
944
  expect(runtime).toBeInstanceOf(AgentRuntime);
944
945
  expect(runtime['_runtime']).toBeInstanceOf(LobeAzureOpenAI);
@@ -45,14 +45,14 @@ export const getProviderAuthPayload = (provider: string) => {
45
45
  return {
46
46
  apiKey: azure.apiKey,
47
47
  azureApiVersion: azure.apiVersion,
48
- endpoint: azure.endpoint,
48
+ baseURL: azure.endpoint,
49
49
  };
50
50
  }
51
51
 
52
52
  case ModelProvider.Ollama: {
53
53
  const config = keyVaultsConfigSelectors.ollamaConfig(useUserStore.getState());
54
54
 
55
- return { endpoint: config?.baseURL };
55
+ return { baseURL: config?.baseURL };
56
56
  }
57
57
 
58
58
  case ModelProvider.Cloudflare: {
@@ -69,7 +69,7 @@ export const getProviderAuthPayload = (provider: string) => {
69
69
  useUserStore.getState(),
70
70
  );
71
71
 
72
- return { apiKey: config?.apiKey, endpoint: config?.baseURL };
72
+ return { apiKey: config?.apiKey, baseURL: config?.baseURL };
73
73
  }
74
74
  }
75
75
  };
@@ -94,21 +94,20 @@ export function initializeWithClientStore(provider: string, payload: any) {
94
94
  default:
95
95
  case ModelProvider.OpenAI: {
96
96
  providerOptions = {
97
- baseURL: providerAuthPayload?.endpoint,
97
+ baseURL: providerAuthPayload?.baseURL,
98
98
  };
99
99
  break;
100
100
  }
101
101
  case ModelProvider.Azure: {
102
102
  providerOptions = {
103
+ apiKey: providerAuthPayload?.apiKey,
103
104
  apiVersion: providerAuthPayload?.azureApiVersion,
104
- // That's a wired properity, but just remapped it
105
- apikey: providerAuthPayload?.apiKey,
106
105
  };
107
106
  break;
108
107
  }
109
108
  case ModelProvider.Google: {
110
109
  providerOptions = {
111
- baseURL: providerAuthPayload?.endpoint,
110
+ baseURL: providerAuthPayload?.baseURL,
112
111
  };
113
112
  break;
114
113
  }
@@ -125,27 +124,27 @@ export function initializeWithClientStore(provider: string, payload: any) {
125
124
  }
126
125
  case ModelProvider.Ollama: {
127
126
  providerOptions = {
128
- baseURL: providerAuthPayload?.endpoint,
127
+ baseURL: providerAuthPayload?.baseURL,
129
128
  };
130
129
  break;
131
130
  }
132
131
  case ModelProvider.Perplexity: {
133
132
  providerOptions = {
134
133
  apikey: providerAuthPayload?.apiKey,
135
- baseURL: providerAuthPayload?.endpoint,
134
+ baseURL: providerAuthPayload?.baseURL,
136
135
  };
137
136
  break;
138
137
  }
139
138
  case ModelProvider.Anthropic: {
140
139
  providerOptions = {
141
- baseURL: providerAuthPayload?.endpoint,
140
+ baseURL: providerAuthPayload?.baseURL,
142
141
  };
143
142
  break;
144
143
  }
145
144
  case ModelProvider.Groq: {
146
145
  providerOptions = {
147
146
  apikey: providerAuthPayload?.apiKey,
148
- baseURL: providerAuthPayload?.endpoint,
147
+ baseURL: providerAuthPayload?.baseURL,
149
148
  };
150
149
  break;
151
150
  }
@@ -51,6 +51,7 @@ describe('modelProviderSelectors', () => {
51
51
  const result = modelProviderSelectors.getDefaultEnabledModelsById('openai')(s);
52
52
  expect(result).toEqual([
53
53
  'o1-mini',
54
+ 'o1-2024-12-17',
54
55
  'o1-preview',
55
56
  'gpt-4o-mini',
56
57
  'gpt-4o-2024-11-20',