@lobehub/chat 1.73.2 → 1.74.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +4 -0
- package/CHANGELOG.md +51 -0
- package/Dockerfile +5 -1
- package/Dockerfile.database +5 -1
- package/Dockerfile.pglite +3 -1
- package/README.md +4 -2
- package/README.zh-CN.md +4 -2
- package/changelog/v1.json +18 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +13 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +14 -0
- package/docs/usage/providers/infiniai.mdx +29 -0
- package/docs/usage/providers/infiniai.zh-CN.mdx +29 -0
- package/locales/ar/models.json +30 -0
- package/locales/ar/providers.json +3 -0
- package/locales/bg-BG/models.json +30 -0
- package/locales/bg-BG/providers.json +3 -0
- package/locales/de-DE/models.json +30 -0
- package/locales/de-DE/providers.json +3 -0
- package/locales/en-US/models.json +30 -0
- package/locales/en-US/providers.json +3 -0
- package/locales/es-ES/models.json +30 -0
- package/locales/es-ES/providers.json +3 -0
- package/locales/fa-IR/models.json +30 -0
- package/locales/fa-IR/providers.json +3 -0
- package/locales/fr-FR/models.json +30 -0
- package/locales/fr-FR/providers.json +3 -0
- package/locales/it-IT/models.json +30 -0
- package/locales/it-IT/providers.json +3 -0
- package/locales/ja-JP/models.json +22 -0
- package/locales/ja-JP/providers.json +3 -0
- package/locales/ko-KR/models.json +30 -0
- package/locales/ko-KR/providers.json +3 -0
- package/locales/nl-NL/models.json +30 -0
- package/locales/nl-NL/providers.json +3 -0
- package/locales/pl-PL/models.json +30 -0
- package/locales/pl-PL/providers.json +3 -0
- package/locales/pt-BR/models.json +30 -0
- package/locales/pt-BR/providers.json +3 -0
- package/locales/ru-RU/models.json +30 -0
- package/locales/ru-RU/providers.json +3 -0
- package/locales/tr-TR/models.json +30 -0
- package/locales/tr-TR/providers.json +3 -0
- package/locales/vi-VN/models.json +30 -0
- package/locales/vi-VN/providers.json +3 -0
- package/locales/zh-CN/models.json +30 -0
- package/locales/zh-CN/providers.json +3 -0
- package/locales/zh-TW/models.json +19 -0
- package/locales/zh-TW/providers.json +3 -0
- package/package.json +3 -3
- package/packages/web-crawler/src/utils/htmlToMarkdown.test.ts +1 -1
- package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +5 -1
- package/src/config/aiModels/index.ts +6 -0
- package/src/config/aiModels/infiniai.ts +307 -0
- package/src/config/aiModels/search1api.ts +63 -0
- package/src/config/llm.ts +12 -0
- package/src/config/modelProviders/index.ts +8 -0
- package/src/config/modelProviders/infiniai.ts +184 -0
- package/src/config/modelProviders/search1api.ts +58 -0
- package/src/database/models/__tests__/knowledgeBase.test.ts +2 -0
- package/src/libs/agent-runtime/ai21/index.test.ts +8 -250
- package/src/libs/agent-runtime/ai360/index.test.ts +8 -250
- package/src/libs/agent-runtime/anthropic/index.ts +5 -1
- package/src/libs/agent-runtime/deepseek/index.test.ts +119 -335
- package/src/libs/agent-runtime/fireworksai/index.test.ts +8 -247
- package/src/libs/agent-runtime/giteeai/index.test.ts +8 -250
- package/src/libs/agent-runtime/github/index.test.ts +8 -207
- package/src/libs/agent-runtime/infiniai/index.ts +43 -0
- package/src/libs/agent-runtime/internlm/index.test.ts +8 -250
- package/src/libs/agent-runtime/lmstudio/index.test.ts +8 -247
- package/src/libs/agent-runtime/moonshot/index.test.ts +10 -243
- package/src/libs/agent-runtime/novita/index.test.ts +9 -221
- package/src/libs/agent-runtime/ollama/index.test.ts +4 -4
- package/src/libs/agent-runtime/openrouter/index.test.ts +12 -217
- package/src/libs/agent-runtime/ppio/index.test.ts +11 -220
- package/src/libs/agent-runtime/providerTestUtils.ts +6 -6
- package/src/libs/agent-runtime/qwen/index.test.ts +10 -242
- package/src/libs/agent-runtime/runtimeMap.ts +4 -0
- package/src/libs/agent-runtime/search1api/index.ts +64 -0
- package/src/libs/agent-runtime/sensenova/index.test.ts +10 -242
- package/src/libs/agent-runtime/spark/index.test.ts +7 -242
- package/src/libs/agent-runtime/stepfun/index.test.ts +7 -242
- package/src/libs/agent-runtime/taichu/index.test.ts +12 -220
- package/src/libs/agent-runtime/types/type.ts +2 -0
- package/src/libs/agent-runtime/upstage/index.test.ts +7 -250
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +2 -2
- package/src/libs/agent-runtime/xai/index.test.ts +8 -250
- package/src/services/chat.ts +1 -4
- package/src/types/user/settings/keyVaults.ts +2 -0
- package/src/utils/fetch/__tests__/parseToolCalls.test.ts +9 -11
- package/src/utils/server/jwt.test.ts +1 -1
- package/vitest.server.config.ts +3 -1
@@ -1,255 +1,12 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import
|
3
|
-
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
2
|
+
import { testProvider } from '@/libs/agent-runtime/providerTestUtils';
|
4
3
|
|
5
|
-
import {
|
6
|
-
ChatStreamCallbacks,
|
7
|
-
LobeOpenAICompatibleRuntime,
|
8
|
-
ModelProvider,
|
9
|
-
} from '@/libs/agent-runtime';
|
10
|
-
|
11
|
-
import * as debugStreamModule from '../utils/debugStream';
|
12
4
|
import { LobeUpstageAI } from './index';
|
13
5
|
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
// Mock the console.error to avoid polluting test output
|
21
|
-
vi.spyOn(console, 'error').mockImplementation(() => {});
|
22
|
-
|
23
|
-
let instance: LobeOpenAICompatibleRuntime;
|
24
|
-
|
25
|
-
beforeEach(() => {
|
26
|
-
instance = new LobeUpstageAI({ apiKey: 'test' });
|
27
|
-
|
28
|
-
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
29
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
30
|
-
new ReadableStream() as any,
|
31
|
-
);
|
32
|
-
});
|
33
|
-
|
34
|
-
afterEach(() => {
|
35
|
-
vi.clearAllMocks();
|
36
|
-
});
|
37
|
-
|
38
|
-
describe('LobeUpstageAI', () => {
|
39
|
-
describe('init', () => {
|
40
|
-
it('should correctly initialize with an API key', async () => {
|
41
|
-
const instance = new LobeUpstageAI({ apiKey: 'test_api_key' });
|
42
|
-
expect(instance).toBeInstanceOf(LobeUpstageAI);
|
43
|
-
expect(instance.baseURL).toEqual(defaultBaseURL);
|
44
|
-
});
|
45
|
-
});
|
46
|
-
|
47
|
-
describe('chat', () => {
|
48
|
-
describe('Error', () => {
|
49
|
-
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
50
|
-
// Arrange
|
51
|
-
const apiError = new OpenAI.APIError(
|
52
|
-
400,
|
53
|
-
{
|
54
|
-
status: 400,
|
55
|
-
error: {
|
56
|
-
message: 'Bad Request',
|
57
|
-
},
|
58
|
-
},
|
59
|
-
'Error message',
|
60
|
-
{},
|
61
|
-
);
|
62
|
-
|
63
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
64
|
-
|
65
|
-
// Act
|
66
|
-
try {
|
67
|
-
await instance.chat({
|
68
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
69
|
-
model: 'solar-1-mini-chat',
|
70
|
-
temperature: 0,
|
71
|
-
});
|
72
|
-
} catch (e) {
|
73
|
-
expect(e).toEqual({
|
74
|
-
endpoint: defaultBaseURL,
|
75
|
-
error: {
|
76
|
-
error: { message: 'Bad Request' },
|
77
|
-
status: 400,
|
78
|
-
},
|
79
|
-
errorType: bizErrorType,
|
80
|
-
provider,
|
81
|
-
});
|
82
|
-
}
|
83
|
-
});
|
84
|
-
|
85
|
-
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
|
86
|
-
try {
|
87
|
-
new LobeUpstageAI({});
|
88
|
-
} catch (e) {
|
89
|
-
expect(e).toEqual({ errorType: invalidErrorType });
|
90
|
-
}
|
91
|
-
});
|
92
|
-
|
93
|
-
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
94
|
-
// Arrange
|
95
|
-
const errorInfo = {
|
96
|
-
stack: 'abc',
|
97
|
-
cause: {
|
98
|
-
message: 'api is undefined',
|
99
|
-
},
|
100
|
-
};
|
101
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
102
|
-
|
103
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
104
|
-
|
105
|
-
// Act
|
106
|
-
try {
|
107
|
-
await instance.chat({
|
108
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
109
|
-
model: 'solar-1-mini-chat',
|
110
|
-
temperature: 0,
|
111
|
-
});
|
112
|
-
} catch (e) {
|
113
|
-
expect(e).toEqual({
|
114
|
-
endpoint: defaultBaseURL,
|
115
|
-
error: {
|
116
|
-
cause: { message: 'api is undefined' },
|
117
|
-
stack: 'abc',
|
118
|
-
},
|
119
|
-
errorType: bizErrorType,
|
120
|
-
provider,
|
121
|
-
});
|
122
|
-
}
|
123
|
-
});
|
124
|
-
|
125
|
-
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
|
126
|
-
// Arrange
|
127
|
-
const errorInfo = {
|
128
|
-
stack: 'abc',
|
129
|
-
cause: { message: 'api is undefined' },
|
130
|
-
};
|
131
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
132
|
-
|
133
|
-
instance = new LobeUpstageAI({
|
134
|
-
apiKey: 'test',
|
135
|
-
|
136
|
-
baseURL: 'https://api.abc.com/v1',
|
137
|
-
});
|
138
|
-
|
139
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
140
|
-
|
141
|
-
// Act
|
142
|
-
try {
|
143
|
-
await instance.chat({
|
144
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
145
|
-
model: 'solar-1-mini-chat',
|
146
|
-
temperature: 0,
|
147
|
-
});
|
148
|
-
} catch (e) {
|
149
|
-
expect(e).toEqual({
|
150
|
-
endpoint: 'https://api.***.com/v1',
|
151
|
-
error: {
|
152
|
-
cause: { message: 'api is undefined' },
|
153
|
-
stack: 'abc',
|
154
|
-
},
|
155
|
-
errorType: bizErrorType,
|
156
|
-
provider,
|
157
|
-
});
|
158
|
-
}
|
159
|
-
});
|
160
|
-
|
161
|
-
it('should throw an InvalidUpstageAPIKey error type on 401 status code', async () => {
|
162
|
-
// Mock the API call to simulate a 401 error
|
163
|
-
const error = new Error('Unauthorized') as any;
|
164
|
-
error.status = 401;
|
165
|
-
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
166
|
-
|
167
|
-
try {
|
168
|
-
await instance.chat({
|
169
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
170
|
-
model: 'solar-1-mini-chat',
|
171
|
-
temperature: 0,
|
172
|
-
});
|
173
|
-
} catch (e) {
|
174
|
-
// Expect the chat method to throw an error with InvalidUpstageAPIKey
|
175
|
-
expect(e).toEqual({
|
176
|
-
endpoint: defaultBaseURL,
|
177
|
-
error: new Error('Unauthorized'),
|
178
|
-
errorType: invalidErrorType,
|
179
|
-
provider,
|
180
|
-
});
|
181
|
-
}
|
182
|
-
});
|
183
|
-
|
184
|
-
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
185
|
-
// Arrange
|
186
|
-
const genericError = new Error('Generic Error');
|
187
|
-
|
188
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
189
|
-
|
190
|
-
// Act
|
191
|
-
try {
|
192
|
-
await instance.chat({
|
193
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
194
|
-
model: 'solar-1-mini-chat',
|
195
|
-
temperature: 0,
|
196
|
-
});
|
197
|
-
} catch (e) {
|
198
|
-
expect(e).toEqual({
|
199
|
-
endpoint: defaultBaseURL,
|
200
|
-
errorType: 'AgentRuntimeError',
|
201
|
-
provider,
|
202
|
-
error: {
|
203
|
-
name: genericError.name,
|
204
|
-
cause: genericError.cause,
|
205
|
-
message: genericError.message,
|
206
|
-
stack: genericError.stack,
|
207
|
-
},
|
208
|
-
});
|
209
|
-
}
|
210
|
-
});
|
211
|
-
});
|
212
|
-
|
213
|
-
describe('DEBUG', () => {
|
214
|
-
it('should call debugStream and return StreamingTextResponse when DEBUG_UPSTAGE_CHAT_COMPLETION is 1', async () => {
|
215
|
-
// Arrange
|
216
|
-
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
217
|
-
const mockDebugStream = new ReadableStream({
|
218
|
-
start(controller) {
|
219
|
-
controller.enqueue('Debug stream content');
|
220
|
-
controller.close();
|
221
|
-
},
|
222
|
-
}) as any;
|
223
|
-
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
224
|
-
|
225
|
-
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
226
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
227
|
-
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
228
|
-
});
|
229
|
-
|
230
|
-
// 保存原始环境变量值
|
231
|
-
const originalDebugValue = process.env.DEBUG_UPSTAGE_CHAT_COMPLETION;
|
232
|
-
|
233
|
-
// 模拟环境变量
|
234
|
-
process.env.DEBUG_UPSTAGE_CHAT_COMPLETION = '1';
|
235
|
-
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
236
|
-
|
237
|
-
// 执行测试
|
238
|
-
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
239
|
-
// 假设的测试函数调用,你可能需要根据实际情况调整
|
240
|
-
await instance.chat({
|
241
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
242
|
-
model: 'solar-1-mini-chat',
|
243
|
-
stream: true,
|
244
|
-
temperature: 0,
|
245
|
-
});
|
246
|
-
|
247
|
-
// 验证 debugStream 被调用
|
248
|
-
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
249
|
-
|
250
|
-
// 恢复原始环境变量值
|
251
|
-
process.env.DEBUG_UPSTAGE_CHAT_COMPLETION = originalDebugValue;
|
252
|
-
});
|
253
|
-
});
|
254
|
-
});
|
6
|
+
testProvider({
|
7
|
+
Runtime: LobeUpstageAI,
|
8
|
+
provider: 'upstage',
|
9
|
+
defaultBaseURL: 'https://api.upstage.ai/v1/solar',
|
10
|
+
chatDebugEnv: 'DEBUG_UPSTAGE_CHAT_COMPLETION',
|
11
|
+
chatModel: 'solar-pro',
|
255
12
|
});
|
@@ -712,9 +712,9 @@ describe('LobeOpenAICompatibleFactory', () => {
|
|
712
712
|
});
|
713
713
|
} catch (e) {
|
714
714
|
// Expect the chat method to throw an error with InvalidMoonshotAPIKey
|
715
|
-
expect(e).
|
715
|
+
expect(e).toMatchObject({
|
716
716
|
endpoint: defaultBaseURL,
|
717
|
-
error
|
717
|
+
error,
|
718
718
|
errorType: invalidErrorType,
|
719
719
|
provider,
|
720
720
|
});
|
@@ -1,255 +1,13 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import
|
3
|
-
import {
|
2
|
+
import { ModelProvider } from '@/libs/agent-runtime';
|
3
|
+
import { testProvider } from '@/libs/agent-runtime/providerTestUtils';
|
4
4
|
|
5
|
-
import {
|
6
|
-
ChatStreamCallbacks,
|
7
|
-
LobeOpenAICompatibleRuntime,
|
8
|
-
ModelProvider,
|
9
|
-
} from '@/libs/agent-runtime';
|
10
|
-
|
11
|
-
import * as debugStreamModule from '../utils/debugStream';
|
12
5
|
import { LobeXAI } from './index';
|
13
6
|
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
// Mock the console.error to avoid polluting test output
|
21
|
-
vi.spyOn(console, 'error').mockImplementation(() => {});
|
22
|
-
|
23
|
-
let instance: LobeOpenAICompatibleRuntime;
|
24
|
-
|
25
|
-
beforeEach(() => {
|
26
|
-
instance = new LobeXAI({ apiKey: 'test' });
|
27
|
-
|
28
|
-
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
29
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
30
|
-
new ReadableStream() as any,
|
31
|
-
);
|
32
|
-
});
|
33
|
-
|
34
|
-
afterEach(() => {
|
35
|
-
vi.clearAllMocks();
|
36
|
-
});
|
37
|
-
|
38
|
-
describe('LobeXAI', () => {
|
39
|
-
describe('init', () => {
|
40
|
-
it('should correctly initialize with an API key', async () => {
|
41
|
-
const instance = new LobeXAI({ apiKey: 'test_api_key' });
|
42
|
-
expect(instance).toBeInstanceOf(LobeXAI);
|
43
|
-
expect(instance.baseURL).toEqual(defaultBaseURL);
|
44
|
-
});
|
45
|
-
});
|
46
|
-
|
47
|
-
describe('chat', () => {
|
48
|
-
describe('Error', () => {
|
49
|
-
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
50
|
-
// Arrange
|
51
|
-
const apiError = new OpenAI.APIError(
|
52
|
-
400,
|
53
|
-
{
|
54
|
-
status: 400,
|
55
|
-
error: {
|
56
|
-
message: 'Bad Request',
|
57
|
-
},
|
58
|
-
},
|
59
|
-
'Error message',
|
60
|
-
{},
|
61
|
-
);
|
62
|
-
|
63
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
64
|
-
|
65
|
-
// Act
|
66
|
-
try {
|
67
|
-
await instance.chat({
|
68
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
69
|
-
model: 'grok-beta',
|
70
|
-
temperature: 0,
|
71
|
-
});
|
72
|
-
} catch (e) {
|
73
|
-
expect(e).toEqual({
|
74
|
-
endpoint: defaultBaseURL,
|
75
|
-
error: {
|
76
|
-
error: { message: 'Bad Request' },
|
77
|
-
status: 400,
|
78
|
-
},
|
79
|
-
errorType: bizErrorType,
|
80
|
-
provider,
|
81
|
-
});
|
82
|
-
}
|
83
|
-
});
|
84
|
-
|
85
|
-
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
|
86
|
-
try {
|
87
|
-
new LobeXAI({});
|
88
|
-
} catch (e) {
|
89
|
-
expect(e).toEqual({ errorType: invalidErrorType });
|
90
|
-
}
|
91
|
-
});
|
92
|
-
|
93
|
-
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
94
|
-
// Arrange
|
95
|
-
const errorInfo = {
|
96
|
-
stack: 'abc',
|
97
|
-
cause: {
|
98
|
-
message: 'api is undefined',
|
99
|
-
},
|
100
|
-
};
|
101
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
102
|
-
|
103
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
104
|
-
|
105
|
-
// Act
|
106
|
-
try {
|
107
|
-
await instance.chat({
|
108
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
109
|
-
model: 'grok-beta',
|
110
|
-
temperature: 0,
|
111
|
-
});
|
112
|
-
} catch (e) {
|
113
|
-
expect(e).toEqual({
|
114
|
-
endpoint: defaultBaseURL,
|
115
|
-
error: {
|
116
|
-
cause: { message: 'api is undefined' },
|
117
|
-
stack: 'abc',
|
118
|
-
},
|
119
|
-
errorType: bizErrorType,
|
120
|
-
provider,
|
121
|
-
});
|
122
|
-
}
|
123
|
-
});
|
124
|
-
|
125
|
-
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
|
126
|
-
// Arrange
|
127
|
-
const errorInfo = {
|
128
|
-
stack: 'abc',
|
129
|
-
cause: { message: 'api is undefined' },
|
130
|
-
};
|
131
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
132
|
-
|
133
|
-
instance = new LobeXAI({
|
134
|
-
apiKey: 'test',
|
135
|
-
|
136
|
-
baseURL: 'https://api.abc.com/v1',
|
137
|
-
});
|
138
|
-
|
139
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
140
|
-
|
141
|
-
// Act
|
142
|
-
try {
|
143
|
-
await instance.chat({
|
144
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
145
|
-
model: 'grok-beta',
|
146
|
-
temperature: 0,
|
147
|
-
});
|
148
|
-
} catch (e) {
|
149
|
-
expect(e).toEqual({
|
150
|
-
endpoint: 'https://api.***.com/v1',
|
151
|
-
error: {
|
152
|
-
cause: { message: 'api is undefined' },
|
153
|
-
stack: 'abc',
|
154
|
-
},
|
155
|
-
errorType: bizErrorType,
|
156
|
-
provider,
|
157
|
-
});
|
158
|
-
}
|
159
|
-
});
|
160
|
-
|
161
|
-
it('should throw an InvalidXAIAPIKey error type on 401 status code', async () => {
|
162
|
-
// Mock the API call to simulate a 401 error
|
163
|
-
const error = new Error('Unauthorized') as any;
|
164
|
-
error.status = 401;
|
165
|
-
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
166
|
-
|
167
|
-
try {
|
168
|
-
await instance.chat({
|
169
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
170
|
-
model: 'grok-beta',
|
171
|
-
temperature: 0,
|
172
|
-
});
|
173
|
-
} catch (e) {
|
174
|
-
// Expect the chat method to throw an error with InvalidXAIAPIKey
|
175
|
-
expect(e).toEqual({
|
176
|
-
endpoint: defaultBaseURL,
|
177
|
-
error: new Error('Unauthorized'),
|
178
|
-
errorType: invalidErrorType,
|
179
|
-
provider,
|
180
|
-
});
|
181
|
-
}
|
182
|
-
});
|
183
|
-
|
184
|
-
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
185
|
-
// Arrange
|
186
|
-
const genericError = new Error('Generic Error');
|
187
|
-
|
188
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
189
|
-
|
190
|
-
// Act
|
191
|
-
try {
|
192
|
-
await instance.chat({
|
193
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
194
|
-
model: 'grok-beta',
|
195
|
-
temperature: 0,
|
196
|
-
});
|
197
|
-
} catch (e) {
|
198
|
-
expect(e).toEqual({
|
199
|
-
endpoint: defaultBaseURL,
|
200
|
-
errorType: 'AgentRuntimeError',
|
201
|
-
provider,
|
202
|
-
error: {
|
203
|
-
name: genericError.name,
|
204
|
-
cause: genericError.cause,
|
205
|
-
message: genericError.message,
|
206
|
-
stack: genericError.stack,
|
207
|
-
},
|
208
|
-
});
|
209
|
-
}
|
210
|
-
});
|
211
|
-
});
|
212
|
-
|
213
|
-
describe('DEBUG', () => {
|
214
|
-
it('should call debugStream and return StreamingTextResponse when DEBUG_XAI_CHAT_COMPLETION is 1', async () => {
|
215
|
-
// Arrange
|
216
|
-
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
217
|
-
const mockDebugStream = new ReadableStream({
|
218
|
-
start(controller) {
|
219
|
-
controller.enqueue('Debug stream content');
|
220
|
-
controller.close();
|
221
|
-
},
|
222
|
-
}) as any;
|
223
|
-
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
224
|
-
|
225
|
-
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
226
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
227
|
-
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
228
|
-
});
|
229
|
-
|
230
|
-
// 保存原始环境变量值
|
231
|
-
const originalDebugValue = process.env.DEBUG_XAI_CHAT_COMPLETION;
|
232
|
-
|
233
|
-
// 模拟环境变量
|
234
|
-
process.env.DEBUG_XAI_CHAT_COMPLETION = '1';
|
235
|
-
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
236
|
-
|
237
|
-
// 执行测试
|
238
|
-
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
239
|
-
// 假设的测试函数调用,你可能需要根据实际情况调整
|
240
|
-
await instance.chat({
|
241
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
242
|
-
model: 'grok-beta',
|
243
|
-
stream: true,
|
244
|
-
temperature: 0,
|
245
|
-
});
|
246
|
-
|
247
|
-
// 验证 debugStream 被调用
|
248
|
-
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
249
|
-
|
250
|
-
// 恢复原始环境变量值
|
251
|
-
process.env.DEBUG_XAI_CHAT_COMPLETION = originalDebugValue;
|
252
|
-
});
|
253
|
-
});
|
254
|
-
});
|
7
|
+
testProvider({
|
8
|
+
Runtime: LobeXAI,
|
9
|
+
provider: ModelProvider.XAI,
|
10
|
+
defaultBaseURL: 'https://api.x.ai/v1',
|
11
|
+
chatDebugEnv: 'DEBUG_XAI_CHAT_COMPLETION',
|
12
|
+
chatModel: 'grok',
|
255
13
|
});
|
package/src/services/chat.ts
CHANGED
@@ -365,10 +365,7 @@ class ChatService {
|
|
365
365
|
smoothing:
|
366
366
|
providerConfig?.settings?.smoothing ||
|
367
367
|
// @deprecated in V2
|
368
|
-
providerConfig?.smoothing
|
369
|
-
// use smoothing when enable client fetch
|
370
|
-
// https://github.com/lobehub/lobe-chat/issues/3800
|
371
|
-
enableFetchOnClient,
|
368
|
+
providerConfig?.smoothing,
|
372
369
|
});
|
373
370
|
};
|
374
371
|
|
@@ -52,6 +52,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
|
|
52
52
|
higress?: OpenAICompatibleKeyVault;
|
53
53
|
huggingface?: OpenAICompatibleKeyVault;
|
54
54
|
hunyuan?: OpenAICompatibleKeyVault;
|
55
|
+
infiniai?: OpenAICompatibleKeyVault;
|
55
56
|
internlm?: OpenAICompatibleKeyVault;
|
56
57
|
jina?: OpenAICompatibleKeyVault;
|
57
58
|
lmstudio?: OpenAICompatibleKeyVault;
|
@@ -69,6 +70,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
|
|
69
70
|
ppio?: OpenAICompatibleKeyVault;
|
70
71
|
qwen?: OpenAICompatibleKeyVault;
|
71
72
|
sambanova?: OpenAICompatibleKeyVault;
|
73
|
+
search1api?: OpenAICompatibleKeyVault;
|
72
74
|
sensenova?: OpenAICompatibleKeyVault;
|
73
75
|
siliconcloud?: OpenAICompatibleKeyVault;
|
74
76
|
spark?: OpenAICompatibleKeyVault;
|
@@ -107,17 +107,15 @@ describe('parseToolCalls', () => {
|
|
107
107
|
try {
|
108
108
|
parseToolCalls(origin, chunk as any);
|
109
109
|
} catch (e) {
|
110
|
-
expect(e).
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
]),
|
120
|
-
);
|
110
|
+
expect((e as any).issues).toMatchObject([
|
111
|
+
{
|
112
|
+
code: 'invalid_type',
|
113
|
+
expected: 'object',
|
114
|
+
received: 'undefined',
|
115
|
+
path: ['function'],
|
116
|
+
message: 'Required',
|
117
|
+
},
|
118
|
+
]);
|
121
119
|
}
|
122
120
|
});
|
123
121
|
});
|
@@ -56,7 +56,7 @@ describe('getJWTPayload', () => {
|
|
56
56
|
try {
|
57
57
|
await getJWTPayload(token);
|
58
58
|
} catch (e) {
|
59
|
-
expect(e).toEqual(
|
59
|
+
expect((e as Error).message).toEqual('"exp" claim timestamp check failed');
|
60
60
|
}
|
61
61
|
});
|
62
62
|
});
|
package/vitest.server.config.ts
CHANGED
@@ -17,7 +17,9 @@ export default defineConfig({
|
|
17
17
|
environment: 'node',
|
18
18
|
include: ['src/database/models/**/**/*.test.ts', 'src/database/server/**/**/*.test.ts'],
|
19
19
|
poolOptions: {
|
20
|
-
|
20
|
+
forks: {
|
21
|
+
singleFork: true,
|
22
|
+
},
|
21
23
|
},
|
22
24
|
setupFiles: './tests/setup-db.ts',
|
23
25
|
},
|