@lobehub/chat 1.30.0 → 1.31.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.31.1](https://github.com/lobehub/lobe-chat/compare/v1.31.0...v1.31.1)
6
+
7
+ <sup>Released on **2024-11-12**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Fix Windows always showing scrollbar.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Fix Windows always showing scrollbar, closes [#4502](https://github.com/lobehub/lobe-chat/issues/4502) ([780a2a9](https://github.com/lobehub/lobe-chat/commit/780a2a9))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ## [Version 1.31.0](https://github.com/lobehub/lobe-chat/compare/v1.30.0...v1.31.0)
31
+
32
+ <sup>Released on **2024-11-11**</sup>
33
+
34
+ #### ✨ Features
35
+
36
+ - **misc**: Add support xAI provider.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's improved
44
+
45
+ - **misc**: Add support xAI provider, closes [#4627](https://github.com/lobehub/lobe-chat/issues/4627) ([49e1e08](https://github.com/lobehub/lobe-chat/commit/49e1e08))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ## [Version 1.30.0](https://github.com/lobehub/lobe-chat/compare/v1.29.6...v1.30.0)
6
56
 
7
57
  <sup>Released on **2024-11-11**</sup>
package/Dockerfile CHANGED
@@ -200,6 +200,8 @@ ENV \
200
200
  UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
201
201
  # Wenxin
202
202
  WENXIN_ACCESS_KEY="" WENXIN_SECRET_KEY="" WENXIN_MODEL_LIST="" \
203
+ # xAI
204
+ XAI_API_KEY="" XAI_MODEL_LIST="" \
203
205
  # 01.AI
204
206
  ZEROONE_API_KEY="" ZEROONE_MODEL_LIST="" \
205
207
  # Zhipu
@@ -235,6 +235,8 @@ ENV \
235
235
  UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
236
236
  # Wenxin
237
237
  WENXIN_ACCESS_KEY="" WENXIN_SECRET_KEY="" WENXIN_MODEL_LIST="" \
238
+ # xAI
239
+ XAI_API_KEY="" XAI_MODEL_LIST="" \
238
240
  # 01.AI
239
241
  ZEROONE_API_KEY="" ZEROONE_MODEL_LIST="" \
240
242
  # Zhipu
@@ -32,12 +32,12 @@ tags:
32
32
 
33
33
  ## 服务端数据库:便捷与高效的使用体验
34
34
 
35
- 对于追求便捷使用体验的用户,LobeChat 支持 PostgreSQL 作为服务端数据库。通过 Dirzzle ORM 管理数据,结合 Clerk 进行身份验证,LobeChat 能够为用户提供高效、可靠的服务端数据管理方案。
35
+ 对于追求便捷使用体验的用户,LobeChat 支持 PostgreSQL 作为服务端数据库。通过 Drizzle ORM 管理数据,结合 Clerk 进行身份验证,LobeChat 能够为用户提供高效、可靠的服务端数据管理方案。
36
36
 
37
37
  ### 服务端数据库技术栈
38
38
 
39
39
  - **DB**: PostgreSQL(默认使用 Neon)
40
- - **ORM**: Dirzzle ORM
40
+ - **ORM**: Drizzle ORM
41
41
  - **Auth**: Clerk
42
42
  - **Server Router**: tRPC
43
43
 
@@ -51,4 +51,4 @@ tags:
51
51
 
52
52
  服务端数据库方案则适合那些希望简化数据管理流程,享受便捷使用体验的用户。通过服务端数据库与用户身份验证,LobeChat 能够确保数据的安全性与高效性。如果您希望了解如何配置服务端数据库,请参考我们的[详细文档](/zh/docs/self-hosting/advanced/server-database)。
53
53
 
54
- 无论选择本地数据库还是服务端数据库,LobeChat 都能为你提供卓越的用户体验。
54
+ 无论选择本地数据库还是服务端数据库,LobeChat 都能为你提供卓越的用户体验。
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.30.0",
3
+ "version": "1.31.1",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -24,6 +24,8 @@ const FileList = memo(() => {
24
24
  const showFileList = useFileStore(fileChatSelectors.chatUploadFileListHasItem);
25
25
  const { styles } = useStyles();
26
26
 
27
+ if (!inputFilesList.length) return null;
28
+
27
29
  return (
28
30
  <Flexbox
29
31
  className={styles.container}
@@ -23,6 +23,7 @@ import {
23
23
  TaichuProviderCard,
24
24
  TogetherAIProviderCard,
25
25
  UpstageProviderCard,
26
+ XAIProviderCard,
26
27
  ZeroOneProviderCard,
27
28
  ZhiPuProviderCard,
28
29
  } from '@/config/modelProviders';
@@ -70,6 +71,7 @@ export const useProviderList = (): ProviderItem[] => {
70
71
  MistralProviderCard,
71
72
  Ai21ProviderCard,
72
73
  UpstageProviderCard,
74
+ XAIProviderCard,
73
75
  QwenProviderCard,
74
76
  WenxinProvider,
75
77
  HunyuanProviderCard,
package/src/config/llm.ts CHANGED
@@ -153,6 +153,10 @@ export const getLLMConfig = () => {
153
153
  SENSENOVA_ACCESS_KEY_ID: z.string().optional(),
154
154
  SENSENOVA_ACCESS_KEY_SECRET: z.string().optional(),
155
155
  SENSENOVA_MODEL_LIST: z.string().optional(),
156
+
157
+ ENABLED_XAI: z.boolean(),
158
+ XAI_API_KEY: z.string().optional(),
159
+ XAI_MODEL_LIST: z.string().optional(),
156
160
  },
157
161
  runtimeEnv: {
158
162
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -304,6 +308,10 @@ export const getLLMConfig = () => {
304
308
  SENSENOVA_ACCESS_KEY_ID: process.env.SENSENOVA_ACCESS_KEY_ID,
305
309
  SENSENOVA_ACCESS_KEY_SECRET: process.env.SENSENOVA_ACCESS_KEY_SECRET,
306
310
  SENSENOVA_MODEL_LIST: process.env.SENSENOVA_MODEL_LIST,
311
+
312
+ ENABLED_XAI: !!process.env.XAI_API_KEY,
313
+ XAI_API_KEY: process.env.XAI_API_KEY,
314
+ XAI_MODEL_LIST: process.env.XAI_MODEL_LIST,
307
315
  },
308
316
  });
309
317
  };
@@ -31,6 +31,7 @@ import TaichuProvider from './taichu';
31
31
  import TogetherAIProvider from './togetherai';
32
32
  import UpstageProvider from './upstage';
33
33
  import WenxinProvider from './wenxin';
34
+ import XAIProvider from './xai';
34
35
  import ZeroOneProvider from './zeroone';
35
36
  import ZhiPuProvider from './zhipu';
36
37
 
@@ -53,6 +54,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
53
54
  PerplexityProvider.chatModels,
54
55
  AnthropicProvider.chatModels,
55
56
  HuggingFaceProvider.chatModels,
57
+ XAIProvider.chatModels,
56
58
  ZeroOneProvider.chatModels,
57
59
  StepfunProvider.chatModels,
58
60
  NovitaProvider.chatModels,
@@ -88,6 +90,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
88
90
  MistralProvider,
89
91
  Ai21Provider,
90
92
  UpstageProvider,
93
+ XAIProvider,
91
94
  QwenProvider,
92
95
  WenxinProvider,
93
96
  HunyuanProvider,
@@ -145,5 +148,6 @@ export { default as TaichuProviderCard } from './taichu';
145
148
  export { default as TogetherAIProviderCard } from './togetherai';
146
149
  export { default as UpstageProviderCard } from './upstage';
147
150
  export { default as WenxinProviderCard } from './wenxin';
151
+ export { default as XAIProviderCard } from './xai';
148
152
  export { default as ZeroOneProviderCard } from './zeroone';
149
153
  export { default as ZhiPuProviderCard } from './zhipu';
@@ -0,0 +1,29 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref: https://x.ai/about
4
+ const XAI: ModelProviderCard = {
5
+ chatModels: [
6
+ {
7
+ description: '拥有与 Grok 2 相当的性能,但具有更高的效率、速度和功能。',
8
+ displayName: 'Grok Beta',
9
+ enabled: true,
10
+ functionCall: true,
11
+ id: 'grok-beta',
12
+ pricing: {
13
+ input: 5,
14
+ output: 15,
15
+ },
16
+ tokens: 131_072,
17
+ },
18
+ ],
19
+ checkModel: 'grok-beta',
20
+ description:
21
+ 'xAI 是一家致力于构建人工智能以加速人类科学发现的公司。我们的使命是推动我们对宇宙的共同理解。',
22
+ id: 'xai',
23
+ modelList: { showModelFetcher: true },
24
+ modelsUrl: 'https://docs.x.ai/docs#models',
25
+ name: 'xAI',
26
+ url: 'https://console.x.ai',
27
+ };
28
+
29
+ export default XAI;
@@ -29,6 +29,7 @@ import {
29
29
  TogetherAIProviderCard,
30
30
  UpstageProviderCard,
31
31
  WenxinProviderCard,
32
+ XAIProviderCard,
32
33
  ZeroOneProviderCard,
33
34
  ZhiPuProviderCard,
34
35
  filterEnabledModels,
@@ -161,6 +162,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
161
162
  enabled: false,
162
163
  enabledModels: filterEnabledModels(WenxinProviderCard),
163
164
  },
165
+ xai: {
166
+ enabled: false,
167
+ enabledModels: filterEnabledModels(XAIProviderCard),
168
+ },
164
169
  zeroone: {
165
170
  enabled: false,
166
171
  enabledModels: filterEnabledModels(ZeroOneProviderCard),
@@ -42,6 +42,7 @@ import {
42
42
  TextToSpeechPayload,
43
43
  } from './types';
44
44
  import { LobeUpstageAI } from './upstage';
45
+ import { LobeXAI } from './xai';
45
46
  import { LobeZeroOneAI } from './zeroone';
46
47
  import { LobeZhipuAI } from './zhipu';
47
48
 
@@ -156,6 +157,7 @@ class AgentRuntime {
156
157
  taichu: Partial<ClientOptions>;
157
158
  togetherai: Partial<ClientOptions>;
158
159
  upstage: Partial<ClientOptions>;
160
+ xai: Partial<ClientOptions>;
159
161
  zeroone: Partial<ClientOptions>;
160
162
  zhipu: Partial<ClientOptions>;
161
163
  }>,
@@ -324,6 +326,11 @@ class AgentRuntime {
324
326
  break;
325
327
  }
326
328
 
329
+ case ModelProvider.XAI: {
330
+ runtimeModel = new LobeXAI(params.xai);
331
+ break;
332
+ }
333
+
327
334
  case ModelProvider.Cloudflare: {
328
335
  runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
329
336
  break;
@@ -53,6 +53,7 @@ export enum ModelProvider {
53
53
  TogetherAI = 'togetherai',
54
54
  Upstage = 'upstage',
55
55
  Wenxin = 'wenxin',
56
+ XAI = 'xai',
56
57
  ZeroOne = 'zeroone',
57
58
  ZhiPu = 'zhipu',
58
59
  }
@@ -0,0 +1,255 @@
1
+ // @vitest-environment node
2
+ import OpenAI from 'openai';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import {
6
+ ChatStreamCallbacks,
7
+ LobeOpenAICompatibleRuntime,
8
+ ModelProvider,
9
+ } from '@/libs/agent-runtime';
10
+
11
+ import * as debugStreamModule from '../utils/debugStream';
12
+ import { LobeXAI } from './index';
13
+
14
+ const provider = ModelProvider.XAI;
15
+ const defaultBaseURL = 'https://api.x.ai/v1';
16
+
17
+ const bizErrorType = 'ProviderBizError';
18
+ const invalidErrorType = 'InvalidProviderAPIKey';
19
+
20
+ // Mock the console.error to avoid polluting test output
21
+ vi.spyOn(console, 'error').mockImplementation(() => {});
22
+
23
+ let instance: LobeOpenAICompatibleRuntime;
24
+
25
+ beforeEach(() => {
26
+ instance = new LobeXAI({ apiKey: 'test' });
27
+
28
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
29
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
30
+ new ReadableStream() as any,
31
+ );
32
+ });
33
+
34
+ afterEach(() => {
35
+ vi.clearAllMocks();
36
+ });
37
+
38
+ describe('LobeXAI', () => {
39
+ describe('init', () => {
40
+ it('should correctly initialize with an API key', async () => {
41
+ const instance = new LobeXAI({ apiKey: 'test_api_key' });
42
+ expect(instance).toBeInstanceOf(LobeXAI);
43
+ expect(instance.baseURL).toEqual(defaultBaseURL);
44
+ });
45
+ });
46
+
47
+ describe('chat', () => {
48
+ describe('Error', () => {
49
+ it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
50
+ // Arrange
51
+ const apiError = new OpenAI.APIError(
52
+ 400,
53
+ {
54
+ status: 400,
55
+ error: {
56
+ message: 'Bad Request',
57
+ },
58
+ },
59
+ 'Error message',
60
+ {},
61
+ );
62
+
63
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
64
+
65
+ // Act
66
+ try {
67
+ await instance.chat({
68
+ messages: [{ content: 'Hello', role: 'user' }],
69
+ model: 'grok-beta',
70
+ temperature: 0,
71
+ });
72
+ } catch (e) {
73
+ expect(e).toEqual({
74
+ endpoint: defaultBaseURL,
75
+ error: {
76
+ error: { message: 'Bad Request' },
77
+ status: 400,
78
+ },
79
+ errorType: bizErrorType,
80
+ provider,
81
+ });
82
+ }
83
+ });
84
+
85
+ it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
86
+ try {
87
+ new LobeXAI({});
88
+ } catch (e) {
89
+ expect(e).toEqual({ errorType: invalidErrorType });
90
+ }
91
+ });
92
+
93
+ it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
94
+ // Arrange
95
+ const errorInfo = {
96
+ stack: 'abc',
97
+ cause: {
98
+ message: 'api is undefined',
99
+ },
100
+ };
101
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
102
+
103
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
104
+
105
+ // Act
106
+ try {
107
+ await instance.chat({
108
+ messages: [{ content: 'Hello', role: 'user' }],
109
+ model: 'grok-beta',
110
+ temperature: 0,
111
+ });
112
+ } catch (e) {
113
+ expect(e).toEqual({
114
+ endpoint: defaultBaseURL,
115
+ error: {
116
+ cause: { message: 'api is undefined' },
117
+ stack: 'abc',
118
+ },
119
+ errorType: bizErrorType,
120
+ provider,
121
+ });
122
+ }
123
+ });
124
+
125
+ it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
126
+ // Arrange
127
+ const errorInfo = {
128
+ stack: 'abc',
129
+ cause: { message: 'api is undefined' },
130
+ };
131
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
132
+
133
+ instance = new LobeXAI({
134
+ apiKey: 'test',
135
+
136
+ baseURL: 'https://api.abc.com/v1',
137
+ });
138
+
139
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
140
+
141
+ // Act
142
+ try {
143
+ await instance.chat({
144
+ messages: [{ content: 'Hello', role: 'user' }],
145
+ model: 'grok-beta',
146
+ temperature: 0,
147
+ });
148
+ } catch (e) {
149
+ expect(e).toEqual({
150
+ endpoint: 'https://api.***.com/v1',
151
+ error: {
152
+ cause: { message: 'api is undefined' },
153
+ stack: 'abc',
154
+ },
155
+ errorType: bizErrorType,
156
+ provider,
157
+ });
158
+ }
159
+ });
160
+
161
+ it('should throw an InvalidXAIAPIKey error type on 401 status code', async () => {
162
+ // Mock the API call to simulate a 401 error
163
+ const error = new Error('Unauthorized') as any;
164
+ error.status = 401;
165
+ vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
166
+
167
+ try {
168
+ await instance.chat({
169
+ messages: [{ content: 'Hello', role: 'user' }],
170
+ model: 'grok-beta',
171
+ temperature: 0,
172
+ });
173
+ } catch (e) {
174
+ // Expect the chat method to throw an error with InvalidXAIAPIKey
175
+ expect(e).toEqual({
176
+ endpoint: defaultBaseURL,
177
+ error: new Error('Unauthorized'),
178
+ errorType: invalidErrorType,
179
+ provider,
180
+ });
181
+ }
182
+ });
183
+
184
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
185
+ // Arrange
186
+ const genericError = new Error('Generic Error');
187
+
188
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
189
+
190
+ // Act
191
+ try {
192
+ await instance.chat({
193
+ messages: [{ content: 'Hello', role: 'user' }],
194
+ model: 'grok-beta',
195
+ temperature: 0,
196
+ });
197
+ } catch (e) {
198
+ expect(e).toEqual({
199
+ endpoint: defaultBaseURL,
200
+ errorType: 'AgentRuntimeError',
201
+ provider,
202
+ error: {
203
+ name: genericError.name,
204
+ cause: genericError.cause,
205
+ message: genericError.message,
206
+ stack: genericError.stack,
207
+ },
208
+ });
209
+ }
210
+ });
211
+ });
212
+
213
+ describe('DEBUG', () => {
214
+ it('should call debugStream and return StreamingTextResponse when DEBUG_XAI_CHAT_COMPLETION is 1', async () => {
215
+ // Arrange
216
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
217
+ const mockDebugStream = new ReadableStream({
218
+ start(controller) {
219
+ controller.enqueue('Debug stream content');
220
+ controller.close();
221
+ },
222
+ }) as any;
223
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
224
+
225
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
226
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
227
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
228
+ });
229
+
230
+ // 保存原始环境变量值
231
+ const originalDebugValue = process.env.DEBUG_XAI_CHAT_COMPLETION;
232
+
233
+ // 模拟环境变量
234
+ process.env.DEBUG_XAI_CHAT_COMPLETION = '1';
235
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
236
+
237
+ // 执行测试
238
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
239
+ // 假设的测试函数调用,你可能需要根据实际情况调整
240
+ await instance.chat({
241
+ messages: [{ content: 'Hello', role: 'user' }],
242
+ model: 'grok-beta',
243
+ stream: true,
244
+ temperature: 0,
245
+ });
246
+
247
+ // 验证 debugStream 被调用
248
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
249
+
250
+ // 恢复原始环境变量值
251
+ process.env.DEBUG_XAI_CHAT_COMPLETION = originalDebugValue;
252
+ });
253
+ });
254
+ });
255
+ });
@@ -0,0 +1,10 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ export const LobeXAI = LobeOpenAICompatibleFactory({
5
+ baseURL: 'https://api.x.ai/v1',
6
+ debug: {
7
+ chatCompletion: () => process.env.DEBUG_XAI_CHAT_COMPLETION === '1',
8
+ },
9
+ provider: ModelProvider.XAI,
10
+ });
@@ -33,6 +33,7 @@ import {
33
33
  TogetherAIProviderCard,
34
34
  UpstageProviderCard,
35
35
  WenxinProviderCard,
36
+ XAIProviderCard,
36
37
  ZeroOneProviderCard,
37
38
  ZhiPuProviderCard,
38
39
  } from '@/config/modelProviders';
@@ -146,6 +147,9 @@ export const getServerGlobalConfig = () => {
146
147
 
147
148
  ENABLED_HUGGINGFACE,
148
149
  HUGGINGFACE_MODEL_LIST,
150
+
151
+ ENABLED_XAI,
152
+ XAI_MODEL_LIST,
149
153
  } = getLLMConfig();
150
154
 
151
155
  const config: GlobalServerConfig = {
@@ -399,6 +403,14 @@ export const getServerGlobalConfig = () => {
399
403
  modelString: WENXIN_MODEL_LIST,
400
404
  }),
401
405
  },
406
+ xai: {
407
+ enabled: ENABLED_XAI,
408
+ enabledModels: extractEnabledModels(XAI_MODEL_LIST),
409
+ serverModelCards: transformToChatModelCards({
410
+ defaultChatModels: XAIProviderCard.chatModels,
411
+ modelString: XAI_MODEL_LIST,
412
+ }),
413
+ },
402
414
  zeroone: {
403
415
  enabled: ENABLED_ZEROONE,
404
416
  enabledModels: extractEnabledModels(ZEROONE_MODEL_LIST),
@@ -286,6 +286,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
286
286
 
287
287
  const apiKey = sensenovaAccessKeyID + ':' + sensenovaAccessKeySecret;
288
288
 
289
+ return { apiKey };
290
+ }
291
+ case ModelProvider.XAI: {
292
+ const { XAI_API_KEY } = getLLMConfig();
293
+
294
+ const apiKey = apiKeyManager.pick(payload?.apiKey || XAI_API_KEY);
295
+
289
296
  return { apiKey };
290
297
  }
291
298
  }
@@ -65,6 +65,7 @@ export interface UserKeyVaults {
65
65
  togetherai?: OpenAICompatibleKeyVault;
66
66
  upstage?: OpenAICompatibleKeyVault;
67
67
  wenxin?: WenxinKeyVault;
68
+ xai?: OpenAICompatibleKeyVault;
68
69
  zeroone?: OpenAICompatibleKeyVault;
69
70
  zhipu?: OpenAICompatibleKeyVault;
70
71
  }
@@ -37,6 +37,23 @@ exports[`parseModelString > duplicate naming model 1`] = `
37
37
  }
38
38
  `;
39
39
 
40
+ exports[`parseModelString > empty string model 1`] = `
41
+ {
42
+ "add": [
43
+ {
44
+ "displayName": "gpt-4-turbo",
45
+ "id": "gpt-4-1106-preview",
46
+ },
47
+ {
48
+ "displayName": undefined,
49
+ "id": "claude-2",
50
+ },
51
+ ],
52
+ "removeAll": false,
53
+ "removed": [],
54
+ }
55
+ `;
56
+
40
57
  exports[`parseModelString > only add the model 1`] = `
41
58
  {
42
59
  "add": [
@@ -25,6 +25,11 @@ describe('parseModelString', () => {
25
25
  expect(result).toMatchSnapshot();
26
26
  });
27
27
 
28
+ it('empty string model', () => {
29
+ const result = parseModelString('gpt-4-1106-preview=gpt-4-turbo,, ,\n ,+claude-2');
30
+ expect(result).toMatchSnapshot();
31
+ });
32
+
28
33
  describe('extension capabilities', () => {
29
34
  it('with token', () => {
30
35
  const result = parseModelString('chatglm-6b=ChatGLM 6B<4096>');
@@ -34,6 +34,11 @@ export const parseModelString = (modelString: string = '', withDeploymentName =
34
34
  continue;
35
35
  }
36
36
 
37
+ // remove empty model name
38
+ if (!item.trim().length) {
39
+ continue;
40
+ }
41
+
37
42
  // Remove duplicate model entries.
38
43
  const existingIndex = models.findIndex(({ id: n }) => n === id);
39
44
  if (existingIndex !== -1) {