@lobehub/chat 1.40.4 → 1.42.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +52 -0
- package/changelog/v1.json +18 -0
- package/docs/self-hosting/advanced/auth/next-auth/wechat.mdx +46 -0
- package/docs/self-hosting/advanced/auth/next-auth/wechat.zh-CN.mdx +43 -0
- package/package.json +3 -3
- package/src/app/(backend)/webapi/assistant/store/route.ts +2 -11
- package/src/app/(main)/discover/(detail)/provider/[slug]/features/ProviderConfig.tsx +7 -4
- package/src/config/app.ts +4 -0
- package/src/config/modelProviders/spark.ts +3 -6
- package/src/features/MobileTabBar/index.tsx +3 -2
- package/src/features/User/UserAvatar.tsx +2 -2
- package/src/features/User/UserPanel/useMenu.tsx +5 -20
- package/src/hooks/useInterceptingRoutes.test.ts +2 -16
- package/src/hooks/useInterceptingRoutes.ts +2 -18
- package/src/libs/agent-runtime/qwen/index.test.ts +13 -188
- package/src/libs/agent-runtime/qwen/index.ts +47 -126
- package/src/libs/agent-runtime/spark/index.test.ts +24 -28
- package/src/libs/agent-runtime/spark/index.ts +4 -0
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +131 -0
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +14 -3
- package/src/libs/agent-runtime/utils/streams/index.ts +1 -0
- package/src/libs/agent-runtime/utils/streams/spark.test.ts +199 -0
- package/src/libs/agent-runtime/utils/streams/spark.ts +134 -0
- package/src/libs/next-auth/sso-providers/index.ts +2 -0
- package/src/libs/next-auth/sso-providers/wechat.ts +24 -0
- package/src/server/modules/AssistantStore/index.test.ts +5 -5
- package/src/server/modules/AssistantStore/index.ts +39 -1
- package/src/server/modules/EdgeConfig/index.ts +23 -0
- package/src/server/services/discover/index.ts +2 -13
- package/src/types/discover.ts +20 -0
- package/src/app/@modal/(.)settings/modal/index.tsx +0 -45
- package/src/app/@modal/(.)settings/modal/layout.tsx +0 -47
- package/src/app/@modal/(.)settings/modal/loading.tsx +0 -5
- package/src/app/@modal/(.)settings/modal/page.tsx +0 -19
@@ -2,8 +2,9 @@
|
|
2
2
|
import OpenAI from 'openai';
|
3
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
4
|
|
5
|
-
import
|
6
|
-
import {
|
5
|
+
import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
|
6
|
+
import { ModelProvider } from '@/libs/agent-runtime';
|
7
|
+
import { AgentRuntimeErrorType } from '@/libs/agent-runtime';
|
7
8
|
|
8
9
|
import * as debugStreamModule from '../utils/debugStream';
|
9
10
|
import { LobeQwenAI } from './index';
|
@@ -16,7 +17,7 @@ const invalidErrorType = AgentRuntimeErrorType.InvalidProviderAPIKey;
|
|
16
17
|
// Mock the console.error to avoid polluting test output
|
17
18
|
vi.spyOn(console, 'error').mockImplementation(() => {});
|
18
19
|
|
19
|
-
let instance:
|
20
|
+
let instance: LobeOpenAICompatibleRuntime;
|
20
21
|
|
21
22
|
beforeEach(() => {
|
22
23
|
instance = new LobeQwenAI({ apiKey: 'test' });
|
@@ -40,183 +41,7 @@ describe('LobeQwenAI', () => {
|
|
40
41
|
});
|
41
42
|
});
|
42
43
|
|
43
|
-
describe('models', () => {
|
44
|
-
it('should correctly list available models', async () => {
|
45
|
-
const instance = new LobeQwenAI({ apiKey: 'test_api_key' });
|
46
|
-
vi.spyOn(instance, 'models').mockResolvedValue(Qwen.chatModels);
|
47
|
-
|
48
|
-
const models = await instance.models();
|
49
|
-
expect(models).toEqual(Qwen.chatModels);
|
50
|
-
});
|
51
|
-
});
|
52
|
-
|
53
44
|
describe('chat', () => {
|
54
|
-
describe('Params', () => {
|
55
|
-
it('should call llms with proper options', async () => {
|
56
|
-
const mockStream = new ReadableStream();
|
57
|
-
const mockResponse = Promise.resolve(mockStream);
|
58
|
-
|
59
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
60
|
-
|
61
|
-
const result = await instance.chat({
|
62
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
63
|
-
model: 'qwen-turbo',
|
64
|
-
temperature: 0.6,
|
65
|
-
top_p: 0.7,
|
66
|
-
});
|
67
|
-
|
68
|
-
// Assert
|
69
|
-
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
70
|
-
{
|
71
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
72
|
-
model: 'qwen-turbo',
|
73
|
-
temperature: 0.6,
|
74
|
-
stream: true,
|
75
|
-
top_p: 0.7,
|
76
|
-
result_format: 'message',
|
77
|
-
},
|
78
|
-
{ headers: { Accept: '*/*' } },
|
79
|
-
);
|
80
|
-
expect(result).toBeInstanceOf(Response);
|
81
|
-
});
|
82
|
-
|
83
|
-
it('should call vlms with proper options', async () => {
|
84
|
-
const mockStream = new ReadableStream();
|
85
|
-
const mockResponse = Promise.resolve(mockStream);
|
86
|
-
|
87
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
88
|
-
|
89
|
-
const result = await instance.chat({
|
90
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
91
|
-
model: 'qwen-vl-plus',
|
92
|
-
temperature: 0.6,
|
93
|
-
top_p: 0.7,
|
94
|
-
});
|
95
|
-
|
96
|
-
// Assert
|
97
|
-
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
98
|
-
{
|
99
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
100
|
-
model: 'qwen-vl-plus',
|
101
|
-
stream: true,
|
102
|
-
},
|
103
|
-
{ headers: { Accept: '*/*' } },
|
104
|
-
);
|
105
|
-
expect(result).toBeInstanceOf(Response);
|
106
|
-
});
|
107
|
-
|
108
|
-
it('should transform non-streaming response to stream correctly', async () => {
|
109
|
-
const mockResponse = {
|
110
|
-
id: 'chatcmpl-fc539f49-51a8-94be-8061',
|
111
|
-
object: 'chat.completion',
|
112
|
-
created: 1719901794,
|
113
|
-
model: 'qwen-turbo',
|
114
|
-
choices: [
|
115
|
-
{
|
116
|
-
index: 0,
|
117
|
-
message: { role: 'assistant', content: 'Hello' },
|
118
|
-
finish_reason: 'stop',
|
119
|
-
logprobs: null,
|
120
|
-
},
|
121
|
-
],
|
122
|
-
} as OpenAI.ChatCompletion;
|
123
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
124
|
-
mockResponse as any,
|
125
|
-
);
|
126
|
-
|
127
|
-
const result = await instance.chat({
|
128
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
129
|
-
model: 'qwen-turbo',
|
130
|
-
temperature: 0.6,
|
131
|
-
stream: false,
|
132
|
-
});
|
133
|
-
|
134
|
-
const decoder = new TextDecoder();
|
135
|
-
const reader = result.body!.getReader();
|
136
|
-
const stream: string[] = [];
|
137
|
-
|
138
|
-
while (true) {
|
139
|
-
const { value, done } = await reader.read();
|
140
|
-
if (done) break;
|
141
|
-
stream.push(decoder.decode(value));
|
142
|
-
}
|
143
|
-
|
144
|
-
expect(stream).toEqual([
|
145
|
-
'id: chatcmpl-fc539f49-51a8-94be-8061\n',
|
146
|
-
'event: text\n',
|
147
|
-
'data: "Hello"\n\n',
|
148
|
-
'id: chatcmpl-fc539f49-51a8-94be-8061\n',
|
149
|
-
'event: stop\n',
|
150
|
-
'data: "stop"\n\n',
|
151
|
-
]);
|
152
|
-
|
153
|
-
expect((await reader.read()).done).toBe(true);
|
154
|
-
});
|
155
|
-
|
156
|
-
it('should set temperature to undefined if temperature is 0 or >= 2', async () => {
|
157
|
-
const temperatures = [0, 2, 3];
|
158
|
-
const expectedTemperature = undefined;
|
159
|
-
|
160
|
-
for (const temp of temperatures) {
|
161
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
162
|
-
new ReadableStream() as any,
|
163
|
-
);
|
164
|
-
await instance.chat({
|
165
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
166
|
-
model: 'qwen-turbo',
|
167
|
-
temperature: temp,
|
168
|
-
});
|
169
|
-
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
170
|
-
expect.objectContaining({
|
171
|
-
messages: expect.any(Array),
|
172
|
-
model: 'qwen-turbo',
|
173
|
-
temperature: expectedTemperature,
|
174
|
-
}),
|
175
|
-
expect.any(Object),
|
176
|
-
);
|
177
|
-
}
|
178
|
-
});
|
179
|
-
|
180
|
-
it('should set temperature to original temperature', async () => {
|
181
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
182
|
-
new ReadableStream() as any,
|
183
|
-
);
|
184
|
-
await instance.chat({
|
185
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
186
|
-
model: 'qwen-turbo',
|
187
|
-
temperature: 1.5,
|
188
|
-
});
|
189
|
-
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
190
|
-
expect.objectContaining({
|
191
|
-
messages: expect.any(Array),
|
192
|
-
model: 'qwen-turbo',
|
193
|
-
temperature: 1.5,
|
194
|
-
}),
|
195
|
-
expect.any(Object),
|
196
|
-
);
|
197
|
-
});
|
198
|
-
|
199
|
-
it('should set temperature to Float', async () => {
|
200
|
-
const createMock = vi.fn().mockResolvedValue(new ReadableStream() as any);
|
201
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockImplementation(createMock);
|
202
|
-
await instance.chat({
|
203
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
204
|
-
model: 'qwen-turbo',
|
205
|
-
temperature: 1,
|
206
|
-
});
|
207
|
-
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
208
|
-
expect.objectContaining({
|
209
|
-
messages: expect.any(Array),
|
210
|
-
model: 'qwen-turbo',
|
211
|
-
temperature: expect.any(Number),
|
212
|
-
}),
|
213
|
-
expect.any(Object),
|
214
|
-
);
|
215
|
-
const callArgs = createMock.mock.calls[0][0];
|
216
|
-
expect(Number.isInteger(callArgs.temperature)).toBe(false); // Temperature is always not an integer
|
217
|
-
});
|
218
|
-
});
|
219
|
-
|
220
45
|
describe('Error', () => {
|
221
46
|
it('should return QwenBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
222
47
|
// Arrange
|
@@ -238,7 +63,7 @@ describe('LobeQwenAI', () => {
|
|
238
63
|
try {
|
239
64
|
await instance.chat({
|
240
65
|
messages: [{ content: 'Hello', role: 'user' }],
|
241
|
-
model: 'qwen-turbo',
|
66
|
+
model: 'qwen-turbo-latest',
|
242
67
|
temperature: 0.999,
|
243
68
|
});
|
244
69
|
} catch (e) {
|
@@ -278,7 +103,7 @@ describe('LobeQwenAI', () => {
|
|
278
103
|
try {
|
279
104
|
await instance.chat({
|
280
105
|
messages: [{ content: 'Hello', role: 'user' }],
|
281
|
-
model: 'qwen-turbo',
|
106
|
+
model: 'qwen-turbo-latest',
|
282
107
|
temperature: 0.999,
|
283
108
|
});
|
284
109
|
} catch (e) {
|
@@ -304,7 +129,8 @@ describe('LobeQwenAI', () => {
|
|
304
129
|
|
305
130
|
instance = new LobeQwenAI({
|
306
131
|
apiKey: 'test',
|
307
|
-
|
132
|
+
|
133
|
+
baseURL: 'https://api.abc.com/v1',
|
308
134
|
});
|
309
135
|
|
310
136
|
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
@@ -313,13 +139,12 @@ describe('LobeQwenAI', () => {
|
|
313
139
|
try {
|
314
140
|
await instance.chat({
|
315
141
|
messages: [{ content: 'Hello', role: 'user' }],
|
316
|
-
model: 'qwen-turbo',
|
142
|
+
model: 'qwen-turbo-latest',
|
317
143
|
temperature: 0.999,
|
318
144
|
});
|
319
145
|
} catch (e) {
|
320
146
|
expect(e).toEqual({
|
321
|
-
|
322
|
-
endpoint: defaultBaseURL,
|
147
|
+
endpoint: 'https://api.***.com/v1',
|
323
148
|
error: {
|
324
149
|
cause: { message: 'api is undefined' },
|
325
150
|
stack: 'abc',
|
@@ -339,7 +164,7 @@ describe('LobeQwenAI', () => {
|
|
339
164
|
try {
|
340
165
|
await instance.chat({
|
341
166
|
messages: [{ content: 'Hello', role: 'user' }],
|
342
|
-
model: 'qwen-turbo',
|
167
|
+
model: 'qwen-turbo-latest',
|
343
168
|
temperature: 0.999,
|
344
169
|
});
|
345
170
|
} catch (e) {
|
@@ -362,7 +187,7 @@ describe('LobeQwenAI', () => {
|
|
362
187
|
try {
|
363
188
|
await instance.chat({
|
364
189
|
messages: [{ content: 'Hello', role: 'user' }],
|
365
|
-
model: 'qwen-turbo',
|
190
|
+
model: 'qwen-turbo-latest',
|
366
191
|
temperature: 0.999,
|
367
192
|
});
|
368
193
|
} catch (e) {
|
@@ -410,7 +235,7 @@ describe('LobeQwenAI', () => {
|
|
410
235
|
// 假设的测试函数调用,你可能需要根据实际情况调整
|
411
236
|
await instance.chat({
|
412
237
|
messages: [{ content: 'Hello', role: 'user' }],
|
413
|
-
model: 'qwen-turbo',
|
238
|
+
model: 'qwen-turbo-latest',
|
414
239
|
stream: true,
|
415
240
|
temperature: 0.999,
|
416
241
|
});
|
@@ -1,129 +1,50 @@
|
|
1
|
-
import {
|
2
|
-
import
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
|
4
|
-
import Qwen from '@/config/modelProviders/qwen';
|
5
|
-
|
6
|
-
import { LobeRuntimeAI } from '../BaseAI';
|
7
|
-
import { AgentRuntimeErrorType } from '../error';
|
8
|
-
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
9
|
-
import { AgentRuntimeError } from '../utils/createError';
|
10
|
-
import { debugStream } from '../utils/debugStream';
|
11
|
-
import { handleOpenAIError } from '../utils/handleOpenAIError';
|
12
|
-
import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
|
13
|
-
import { StreamingResponse } from '../utils/response';
|
14
4
|
import { QwenAIStream } from '../utils/streams';
|
15
5
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
}
|
62
|
-
|
63
|
-
return StreamingResponse(QwenAIStream(prod, options?.callback), {
|
64
|
-
headers: options?.headers,
|
65
|
-
});
|
66
|
-
}
|
67
|
-
|
68
|
-
const stream = transformResponseToStream(response as unknown as OpenAI.ChatCompletion);
|
69
|
-
|
70
|
-
return StreamingResponse(QwenAIStream(stream, options?.callback), {
|
71
|
-
headers: options?.headers,
|
72
|
-
});
|
73
|
-
} catch (error) {
|
74
|
-
if ('status' in (error as any)) {
|
75
|
-
switch ((error as Response).status) {
|
76
|
-
case 401: {
|
77
|
-
throw AgentRuntimeError.chat({
|
78
|
-
endpoint: this.baseURL,
|
79
|
-
error: error as any,
|
80
|
-
errorType: AgentRuntimeErrorType.InvalidProviderAPIKey,
|
81
|
-
provider: ModelProvider.Qwen,
|
82
|
-
});
|
83
|
-
}
|
84
|
-
|
85
|
-
default: {
|
86
|
-
break;
|
87
|
-
}
|
88
|
-
}
|
89
|
-
}
|
90
|
-
const { errorResult, RuntimeError } = handleOpenAIError(error);
|
91
|
-
const errorType = RuntimeError || AgentRuntimeErrorType.ProviderBizError;
|
92
|
-
|
93
|
-
throw AgentRuntimeError.chat({
|
94
|
-
endpoint: this.baseURL,
|
95
|
-
error: errorResult,
|
96
|
-
errorType,
|
97
|
-
provider: ModelProvider.Qwen,
|
98
|
-
});
|
99
|
-
}
|
100
|
-
}
|
101
|
-
|
102
|
-
private buildCompletionParamsByModel(payload: ChatStreamPayload) {
|
103
|
-
const { model, temperature, top_p, stream, messages, tools } = payload;
|
104
|
-
const isVisionModel = model.startsWith('qwen-vl');
|
105
|
-
|
106
|
-
const params = {
|
107
|
-
...payload,
|
108
|
-
messages,
|
109
|
-
result_format: 'message',
|
110
|
-
stream: !!tools?.length ? false : (stream ?? true),
|
111
|
-
temperature:
|
112
|
-
temperature === 0 || temperature >= 2 ? undefined : temperature === 1 ? 0.999 : temperature, // 'temperature' must be Float
|
113
|
-
top_p: top_p && top_p >= 1 ? 0.999 : top_p,
|
114
|
-
};
|
115
|
-
|
116
|
-
/* Qwen-vl models temporarily do not support parameters below. */
|
117
|
-
/* Notice: `top_p` imposes significant impact on the result,the default 1 or 0.999 is not a proper choice. */
|
118
|
-
return isVisionModel
|
119
|
-
? omit(
|
120
|
-
params,
|
121
|
-
'presence_penalty',
|
122
|
-
'frequency_penalty',
|
123
|
-
'temperature',
|
124
|
-
'result_format',
|
125
|
-
'top_p',
|
126
|
-
)
|
127
|
-
: omit(params, 'frequency_penalty');
|
128
|
-
}
|
129
|
-
}
|
6
|
+
/*
|
7
|
+
QwenLegacyModels: A set of legacy Qwen models that do not support presence_penalty.
|
8
|
+
Currently, presence_penalty is only supported on Qwen commercial models and open-source models starting from Qwen 1.5 and later.
|
9
|
+
*/
|
10
|
+
export const QwenLegacyModels = new Set([
|
11
|
+
'qwen-72b-chat',
|
12
|
+
'qwen-14b-chat',
|
13
|
+
'qwen-7b-chat',
|
14
|
+
'qwen-1.8b-chat',
|
15
|
+
'qwen-1.8b-longcontext-chat',
|
16
|
+
]);
|
17
|
+
|
18
|
+
export const LobeQwenAI = LobeOpenAICompatibleFactory({
|
19
|
+
baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
20
|
+
chatCompletion: {
|
21
|
+
handlePayload: (payload) => {
|
22
|
+
const { model, presence_penalty, temperature, top_p, ...rest } = payload;
|
23
|
+
|
24
|
+
return {
|
25
|
+
...rest,
|
26
|
+
frequency_penalty: undefined,
|
27
|
+
model,
|
28
|
+
presence_penalty:
|
29
|
+
QwenLegacyModels.has(model)
|
30
|
+
? undefined
|
31
|
+
: (presence_penalty !== undefined && presence_penalty >= -2 && presence_penalty <= 2)
|
32
|
+
? presence_penalty
|
33
|
+
: undefined,
|
34
|
+
stream: !payload.tools,
|
35
|
+
temperature: (temperature !== undefined && temperature >= 0 && temperature < 2) ? temperature : undefined,
|
36
|
+
...(model.startsWith('qwen-vl') ? {
|
37
|
+
top_p: (top_p !== undefined && top_p > 0 && top_p <= 1) ? top_p : undefined,
|
38
|
+
} : {
|
39
|
+
enable_search: true,
|
40
|
+
top_p: (top_p !== undefined && top_p > 0 && top_p < 1) ? top_p : undefined,
|
41
|
+
}),
|
42
|
+
} as any;
|
43
|
+
},
|
44
|
+
handleStream: QwenAIStream,
|
45
|
+
},
|
46
|
+
debug: {
|
47
|
+
chatCompletion: () => process.env.DEBUG_QWEN_CHAT_COMPLETION === '1',
|
48
|
+
},
|
49
|
+
provider: ModelProvider.Qwen,
|
50
|
+
});
|
@@ -2,20 +2,17 @@
|
|
2
2
|
import OpenAI from 'openai';
|
3
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
4
|
|
5
|
-
import {
|
6
|
-
|
7
|
-
|
8
|
-
ModelProvider,
|
9
|
-
} from '@/libs/agent-runtime';
|
5
|
+
import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
|
6
|
+
import { ModelProvider } from '@/libs/agent-runtime';
|
7
|
+
import { AgentRuntimeErrorType } from '@/libs/agent-runtime';
|
10
8
|
|
11
9
|
import * as debugStreamModule from '../utils/debugStream';
|
12
10
|
import { LobeSparkAI } from './index';
|
13
11
|
|
14
12
|
const provider = ModelProvider.Spark;
|
15
13
|
const defaultBaseURL = 'https://spark-api-open.xf-yun.com/v1';
|
16
|
-
|
17
|
-
const
|
18
|
-
const invalidErrorType = 'InvalidProviderAPIKey';
|
14
|
+
const bizErrorType = AgentRuntimeErrorType.ProviderBizError;
|
15
|
+
const invalidErrorType = AgentRuntimeErrorType.InvalidProviderAPIKey;
|
19
16
|
|
20
17
|
// Mock the console.error to avoid polluting test output
|
21
18
|
vi.spyOn(console, 'error').mockImplementation(() => {});
|
@@ -46,7 +43,7 @@ describe('LobeSparkAI', () => {
|
|
46
43
|
|
47
44
|
describe('chat', () => {
|
48
45
|
describe('Error', () => {
|
49
|
-
it('should return
|
46
|
+
it('should return QwenBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
50
47
|
// Arrange
|
51
48
|
const apiError = new OpenAI.APIError(
|
52
49
|
400,
|
@@ -66,8 +63,8 @@ describe('LobeSparkAI', () => {
|
|
66
63
|
try {
|
67
64
|
await instance.chat({
|
68
65
|
messages: [{ content: 'Hello', role: 'user' }],
|
69
|
-
model: '
|
70
|
-
temperature: 0,
|
66
|
+
model: 'max-32k',
|
67
|
+
temperature: 0.999,
|
71
68
|
});
|
72
69
|
} catch (e) {
|
73
70
|
expect(e).toEqual({
|
@@ -82,7 +79,7 @@ describe('LobeSparkAI', () => {
|
|
82
79
|
}
|
83
80
|
});
|
84
81
|
|
85
|
-
it('should throw AgentRuntimeError with
|
82
|
+
it('should throw AgentRuntimeError with InvalidQwenAPIKey if no apiKey is provided', async () => {
|
86
83
|
try {
|
87
84
|
new LobeSparkAI({});
|
88
85
|
} catch (e) {
|
@@ -90,7 +87,7 @@ describe('LobeSparkAI', () => {
|
|
90
87
|
}
|
91
88
|
});
|
92
89
|
|
93
|
-
it('should return
|
90
|
+
it('should return QwenBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
94
91
|
// Arrange
|
95
92
|
const errorInfo = {
|
96
93
|
stack: 'abc',
|
@@ -106,8 +103,8 @@ describe('LobeSparkAI', () => {
|
|
106
103
|
try {
|
107
104
|
await instance.chat({
|
108
105
|
messages: [{ content: 'Hello', role: 'user' }],
|
109
|
-
model: '
|
110
|
-
temperature: 0,
|
106
|
+
model: 'max-32k',
|
107
|
+
temperature: 0.999,
|
111
108
|
});
|
112
109
|
} catch (e) {
|
113
110
|
expect(e).toEqual({
|
@@ -122,7 +119,7 @@ describe('LobeSparkAI', () => {
|
|
122
119
|
}
|
123
120
|
});
|
124
121
|
|
125
|
-
it('should return
|
122
|
+
it('should return QwenBizError with an cause response with desensitize Url', async () => {
|
126
123
|
// Arrange
|
127
124
|
const errorInfo = {
|
128
125
|
stack: 'abc',
|
@@ -142,8 +139,8 @@ describe('LobeSparkAI', () => {
|
|
142
139
|
try {
|
143
140
|
await instance.chat({
|
144
141
|
messages: [{ content: 'Hello', role: 'user' }],
|
145
|
-
model: '
|
146
|
-
temperature: 0,
|
142
|
+
model: 'max-32k',
|
143
|
+
temperature: 0.999,
|
147
144
|
});
|
148
145
|
} catch (e) {
|
149
146
|
expect(e).toEqual({
|
@@ -158,23 +155,22 @@ describe('LobeSparkAI', () => {
|
|
158
155
|
}
|
159
156
|
});
|
160
157
|
|
161
|
-
it('should throw an
|
158
|
+
it('should throw an InvalidQwenAPIKey error type on 401 status code', async () => {
|
162
159
|
// Mock the API call to simulate a 401 error
|
163
|
-
const error = new Error('
|
160
|
+
const error = new Error('InvalidApiKey') as any;
|
164
161
|
error.status = 401;
|
165
162
|
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
166
163
|
|
167
164
|
try {
|
168
165
|
await instance.chat({
|
169
166
|
messages: [{ content: 'Hello', role: 'user' }],
|
170
|
-
model: '
|
171
|
-
temperature: 0,
|
167
|
+
model: 'max-32k',
|
168
|
+
temperature: 0.999,
|
172
169
|
});
|
173
170
|
} catch (e) {
|
174
|
-
// Expect the chat method to throw an error with InvalidSparkAPIKey
|
175
171
|
expect(e).toEqual({
|
176
172
|
endpoint: defaultBaseURL,
|
177
|
-
error: new Error('
|
173
|
+
error: new Error('InvalidApiKey'),
|
178
174
|
errorType: invalidErrorType,
|
179
175
|
provider,
|
180
176
|
});
|
@@ -191,8 +187,8 @@ describe('LobeSparkAI', () => {
|
|
191
187
|
try {
|
192
188
|
await instance.chat({
|
193
189
|
messages: [{ content: 'Hello', role: 'user' }],
|
194
|
-
model: '
|
195
|
-
temperature: 0,
|
190
|
+
model: 'max-32k',
|
191
|
+
temperature: 0.999,
|
196
192
|
});
|
197
193
|
} catch (e) {
|
198
194
|
expect(e).toEqual({
|
@@ -239,9 +235,9 @@ describe('LobeSparkAI', () => {
|
|
239
235
|
// 假设的测试函数调用,你可能需要根据实际情况调整
|
240
236
|
await instance.chat({
|
241
237
|
messages: [{ content: 'Hello', role: 'user' }],
|
242
|
-
model: '
|
238
|
+
model: 'max-32k',
|
243
239
|
stream: true,
|
244
|
-
temperature: 0,
|
240
|
+
temperature: 0.999,
|
245
241
|
});
|
246
242
|
|
247
243
|
// 验证 debugStream 被调用
|
@@ -1,9 +1,13 @@
|
|
1
1
|
import { ModelProvider } from '../types';
|
2
2
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
|
4
|
+
import { transformSparkResponseToStream, SparkAIStream } from '../utils/streams';
|
5
|
+
|
4
6
|
export const LobeSparkAI = LobeOpenAICompatibleFactory({
|
5
7
|
baseURL: 'https://spark-api-open.xf-yun.com/v1',
|
6
8
|
chatCompletion: {
|
9
|
+
handleStream: SparkAIStream,
|
10
|
+
handleTransformResponseToStream: transformSparkResponseToStream,
|
7
11
|
noUserId: true,
|
8
12
|
},
|
9
13
|
debug: {
|