@lobehub/chat 1.73.2 → 1.74.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +4 -0
- package/CHANGELOG.md +26 -0
- package/Dockerfile +5 -1
- package/Dockerfile.database +5 -1
- package/Dockerfile.pglite +3 -1
- package/README.md +1 -0
- package/README.zh-CN.md +1 -0
- package/changelog/v1.json +9 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +13 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +14 -0
- package/docs/usage/providers/infiniai.mdx +29 -0
- package/docs/usage/providers/infiniai.zh-CN.mdx +29 -0
- package/locales/ar/models.json +30 -0
- package/locales/ar/providers.json +3 -0
- package/locales/bg-BG/models.json +30 -0
- package/locales/bg-BG/providers.json +3 -0
- package/locales/de-DE/models.json +30 -0
- package/locales/de-DE/providers.json +3 -0
- package/locales/en-US/models.json +30 -0
- package/locales/en-US/providers.json +3 -0
- package/locales/es-ES/models.json +30 -0
- package/locales/es-ES/providers.json +3 -0
- package/locales/fa-IR/models.json +30 -0
- package/locales/fa-IR/providers.json +3 -0
- package/locales/fr-FR/models.json +30 -0
- package/locales/fr-FR/providers.json +3 -0
- package/locales/it-IT/models.json +30 -0
- package/locales/it-IT/providers.json +3 -0
- package/locales/ja-JP/models.json +22 -0
- package/locales/ja-JP/providers.json +3 -0
- package/locales/ko-KR/models.json +30 -0
- package/locales/ko-KR/providers.json +3 -0
- package/locales/nl-NL/models.json +30 -0
- package/locales/nl-NL/providers.json +3 -0
- package/locales/pl-PL/models.json +30 -0
- package/locales/pl-PL/providers.json +3 -0
- package/locales/pt-BR/models.json +30 -0
- package/locales/pt-BR/providers.json +3 -0
- package/locales/ru-RU/models.json +30 -0
- package/locales/ru-RU/providers.json +3 -0
- package/locales/tr-TR/models.json +30 -0
- package/locales/tr-TR/providers.json +3 -0
- package/locales/vi-VN/models.json +30 -0
- package/locales/vi-VN/providers.json +3 -0
- package/locales/zh-CN/models.json +30 -0
- package/locales/zh-CN/providers.json +3 -0
- package/locales/zh-TW/models.json +19 -0
- package/locales/zh-TW/providers.json +3 -0
- package/package.json +3 -3
- package/packages/web-crawler/src/utils/htmlToMarkdown.test.ts +1 -1
- package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +5 -1
- package/src/config/aiModels/index.ts +6 -0
- package/src/config/aiModels/infiniai.ts +307 -0
- package/src/config/aiModels/search1api.ts +62 -0
- package/src/config/llm.ts +12 -0
- package/src/config/modelProviders/index.ts +8 -0
- package/src/config/modelProviders/infiniai.ts +184 -0
- package/src/config/modelProviders/search1api.ts +58 -0
- package/src/database/models/__tests__/knowledgeBase.test.ts +2 -0
- package/src/libs/agent-runtime/ai21/index.test.ts +8 -250
- package/src/libs/agent-runtime/ai360/index.test.ts +8 -250
- package/src/libs/agent-runtime/anthropic/index.ts +5 -1
- package/src/libs/agent-runtime/deepseek/index.test.ts +119 -335
- package/src/libs/agent-runtime/fireworksai/index.test.ts +8 -247
- package/src/libs/agent-runtime/giteeai/index.test.ts +8 -250
- package/src/libs/agent-runtime/github/index.test.ts +8 -207
- package/src/libs/agent-runtime/infiniai/index.ts +43 -0
- package/src/libs/agent-runtime/internlm/index.test.ts +8 -250
- package/src/libs/agent-runtime/lmstudio/index.test.ts +8 -247
- package/src/libs/agent-runtime/moonshot/index.test.ts +10 -243
- package/src/libs/agent-runtime/novita/index.test.ts +9 -221
- package/src/libs/agent-runtime/ollama/index.test.ts +4 -4
- package/src/libs/agent-runtime/openrouter/index.test.ts +12 -217
- package/src/libs/agent-runtime/ppio/index.test.ts +11 -220
- package/src/libs/agent-runtime/providerTestUtils.ts +6 -6
- package/src/libs/agent-runtime/qwen/index.test.ts +10 -242
- package/src/libs/agent-runtime/runtimeMap.ts +4 -0
- package/src/libs/agent-runtime/search1api/index.ts +64 -0
- package/src/libs/agent-runtime/sensenova/index.test.ts +10 -242
- package/src/libs/agent-runtime/spark/index.test.ts +7 -242
- package/src/libs/agent-runtime/stepfun/index.test.ts +7 -242
- package/src/libs/agent-runtime/taichu/index.test.ts +12 -220
- package/src/libs/agent-runtime/types/type.ts +2 -0
- package/src/libs/agent-runtime/upstage/index.test.ts +7 -250
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +2 -2
- package/src/libs/agent-runtime/xai/index.test.ts +8 -250
- package/src/services/chat.ts +1 -4
- package/src/types/user/settings/keyVaults.ts +2 -0
- package/src/utils/fetch/__tests__/parseToolCalls.test.ts +9 -11
- package/src/utils/server/jwt.test.ts +1 -1
- package/vitest.server.config.ts +3 -1
@@ -1,10 +1,11 @@
|
|
1
|
-
|
1
|
+
// @vitest-environment node
|
2
2
|
import OpenAI from 'openai';
|
3
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
4
|
|
5
|
-
import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
|
5
|
+
import { LobeDeepSeekAI, LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
|
6
6
|
import { ModelProvider } from '@/libs/agent-runtime';
|
7
7
|
import { AgentRuntimeErrorType } from '@/libs/agent-runtime';
|
8
|
+
import { testProvider } from '@/libs/agent-runtime/providerTestUtils';
|
8
9
|
|
9
10
|
import * as debugStreamModule from '../utils/debugStream';
|
10
11
|
import models from './fixtures/models.json';
|
@@ -12,8 +13,14 @@ import { LobePPIOAI } from './index';
|
|
12
13
|
|
13
14
|
const provider = ModelProvider.PPIO;
|
14
15
|
const defaultBaseURL = 'https://api.ppinfra.com/v3/openai';
|
15
|
-
|
16
|
-
|
16
|
+
|
17
|
+
testProvider({
|
18
|
+
Runtime: LobePPIOAI,
|
19
|
+
provider,
|
20
|
+
defaultBaseURL,
|
21
|
+
chatDebugEnv: 'DEBUG_PPIO_CHAT_COMPLETION',
|
22
|
+
chatModel: 'deepseek-r1',
|
23
|
+
});
|
17
24
|
|
18
25
|
// Mock the console.error to avoid polluting test output
|
19
26
|
vi.spyOn(console, 'error').mockImplementation(() => {});
|
@@ -35,222 +42,6 @@ afterEach(() => {
|
|
35
42
|
});
|
36
43
|
|
37
44
|
describe('PPIO', () => {
|
38
|
-
describe('init', () => {
|
39
|
-
it('should correctly initialize with an API key', async () => {
|
40
|
-
const instance = new LobePPIOAI({ apiKey: 'test_api_key' });
|
41
|
-
expect(instance).toBeInstanceOf(LobePPIOAI);
|
42
|
-
expect(instance.baseURL).toEqual(defaultBaseURL);
|
43
|
-
});
|
44
|
-
});
|
45
|
-
|
46
|
-
describe('chat', () => {
|
47
|
-
describe('Error', () => {
|
48
|
-
it('should return Error with an openai error response when OpenAI.APIError is thrown', async () => {
|
49
|
-
// Arrange
|
50
|
-
const apiError = new OpenAI.APIError(
|
51
|
-
400,
|
52
|
-
{
|
53
|
-
status: 400,
|
54
|
-
error: {
|
55
|
-
message: 'Bad Request',
|
56
|
-
},
|
57
|
-
},
|
58
|
-
'Error message',
|
59
|
-
{},
|
60
|
-
);
|
61
|
-
|
62
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
63
|
-
|
64
|
-
// Act
|
65
|
-
try {
|
66
|
-
await instance.chat({
|
67
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
68
|
-
model: 'meta-llama/llama-3-8b-instruct',
|
69
|
-
temperature: 0.999,
|
70
|
-
});
|
71
|
-
} catch (e) {
|
72
|
-
expect(e).toEqual({
|
73
|
-
endpoint: defaultBaseURL,
|
74
|
-
error: {
|
75
|
-
error: { message: 'Bad Request' },
|
76
|
-
status: 400,
|
77
|
-
},
|
78
|
-
errorType: bizErrorType,
|
79
|
-
provider,
|
80
|
-
});
|
81
|
-
}
|
82
|
-
});
|
83
|
-
|
84
|
-
it('should throw AgentRuntimeError if no apiKey is provided', async () => {
|
85
|
-
try {
|
86
|
-
new LobePPIOAI({});
|
87
|
-
} catch (e) {
|
88
|
-
expect(e).toEqual({ errorType: invalidErrorType });
|
89
|
-
}
|
90
|
-
});
|
91
|
-
|
92
|
-
it('should return Error with the cause when OpenAI.APIError is thrown with cause', async () => {
|
93
|
-
// Arrange
|
94
|
-
const errorInfo = {
|
95
|
-
stack: 'abc',
|
96
|
-
cause: {
|
97
|
-
message: 'api is undefined',
|
98
|
-
},
|
99
|
-
};
|
100
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
101
|
-
|
102
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
103
|
-
|
104
|
-
// Act
|
105
|
-
try {
|
106
|
-
await instance.chat({
|
107
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
108
|
-
model: 'meta-llama/llama-3-8b-instruct',
|
109
|
-
temperature: 0.999,
|
110
|
-
});
|
111
|
-
} catch (e) {
|
112
|
-
expect(e).toEqual({
|
113
|
-
endpoint: defaultBaseURL,
|
114
|
-
error: {
|
115
|
-
cause: { message: 'api is undefined' },
|
116
|
-
stack: 'abc',
|
117
|
-
},
|
118
|
-
errorType: bizErrorType,
|
119
|
-
provider,
|
120
|
-
});
|
121
|
-
}
|
122
|
-
});
|
123
|
-
|
124
|
-
it('should return Error with an cause response with desensitize Url', async () => {
|
125
|
-
// Arrange
|
126
|
-
const errorInfo = {
|
127
|
-
stack: 'abc',
|
128
|
-
cause: { message: 'api is undefined' },
|
129
|
-
};
|
130
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
131
|
-
|
132
|
-
instance = new LobePPIOAI({
|
133
|
-
apiKey: 'test',
|
134
|
-
|
135
|
-
baseURL: 'https://api.abc.com/v1',
|
136
|
-
});
|
137
|
-
|
138
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
139
|
-
|
140
|
-
// Act
|
141
|
-
try {
|
142
|
-
await instance.chat({
|
143
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
144
|
-
model: 'meta-llama/llama-3-8b-instruct',
|
145
|
-
temperature: 0.999,
|
146
|
-
});
|
147
|
-
} catch (e) {
|
148
|
-
expect(e).toEqual({
|
149
|
-
endpoint: 'https://api.***.com/v1',
|
150
|
-
error: {
|
151
|
-
cause: { message: 'api is undefined' },
|
152
|
-
stack: 'abc',
|
153
|
-
},
|
154
|
-
errorType: bizErrorType,
|
155
|
-
provider,
|
156
|
-
});
|
157
|
-
}
|
158
|
-
});
|
159
|
-
|
160
|
-
it('should throw an error type on 401 status code', async () => {
|
161
|
-
// Mock the API call to simulate a 401 error
|
162
|
-
const error = new Error('InvalidApiKey') as any;
|
163
|
-
error.status = 401;
|
164
|
-
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
165
|
-
|
166
|
-
try {
|
167
|
-
await instance.chat({
|
168
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
169
|
-
model: 'meta-llama/llama-3-8b-instruct',
|
170
|
-
temperature: 0.999,
|
171
|
-
});
|
172
|
-
} catch (e) {
|
173
|
-
expect(e).toEqual({
|
174
|
-
endpoint: defaultBaseURL,
|
175
|
-
error: new Error('InvalidApiKey'),
|
176
|
-
errorType: invalidErrorType,
|
177
|
-
provider,
|
178
|
-
});
|
179
|
-
}
|
180
|
-
});
|
181
|
-
|
182
|
-
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
183
|
-
// Arrange
|
184
|
-
const genericError = new Error('Generic Error');
|
185
|
-
|
186
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
187
|
-
|
188
|
-
// Act
|
189
|
-
try {
|
190
|
-
await instance.chat({
|
191
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
192
|
-
model: 'meta-llama/llama-3-8b-instruct',
|
193
|
-
temperature: 0.999,
|
194
|
-
});
|
195
|
-
} catch (e) {
|
196
|
-
expect(e).toEqual({
|
197
|
-
endpoint: defaultBaseURL,
|
198
|
-
errorType: 'AgentRuntimeError',
|
199
|
-
provider,
|
200
|
-
error: {
|
201
|
-
name: genericError.name,
|
202
|
-
cause: genericError.cause,
|
203
|
-
message: genericError.message,
|
204
|
-
stack: genericError.stack,
|
205
|
-
},
|
206
|
-
});
|
207
|
-
}
|
208
|
-
});
|
209
|
-
});
|
210
|
-
|
211
|
-
describe('DEBUG', () => {
|
212
|
-
it('should call debugStream and return StreamingTextResponse when DEBUG_PPIO_CHAT_COMPLETION is 1', async () => {
|
213
|
-
// Arrange
|
214
|
-
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
215
|
-
const mockDebugStream = new ReadableStream({
|
216
|
-
start(controller) {
|
217
|
-
controller.enqueue('Debug stream content');
|
218
|
-
controller.close();
|
219
|
-
},
|
220
|
-
}) as any;
|
221
|
-
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
222
|
-
|
223
|
-
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
224
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
225
|
-
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
226
|
-
});
|
227
|
-
|
228
|
-
// 保存原始环境变量值
|
229
|
-
const originalDebugValue = process.env.DEBUG_PPIO_CHAT_COMPLETION;
|
230
|
-
|
231
|
-
// 模拟环境变量
|
232
|
-
process.env.DEBUG_PPIO_CHAT_COMPLETION = '1';
|
233
|
-
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
234
|
-
|
235
|
-
// 执行测试
|
236
|
-
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
237
|
-
// 假设的测试函数调用,你可能需要根据实际情况调整
|
238
|
-
await instance.chat({
|
239
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
240
|
-
model: 'meta-llama/llama-3-8b-instruct',
|
241
|
-
stream: true,
|
242
|
-
temperature: 0.999,
|
243
|
-
});
|
244
|
-
|
245
|
-
// 验证 debugStream 被调用
|
246
|
-
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
247
|
-
|
248
|
-
// 恢复原始环境变量值
|
249
|
-
process.env.DEBUG_PPIO_CHAT_COMPLETION = originalDebugValue;
|
250
|
-
});
|
251
|
-
});
|
252
|
-
});
|
253
|
-
|
254
45
|
describe('models', () => {
|
255
46
|
it('should get models', async () => {
|
256
47
|
// mock the models.list method
|
@@ -111,7 +111,7 @@ export const testProvider = ({
|
|
111
111
|
}
|
112
112
|
|
113
113
|
describe('Error', () => {
|
114
|
-
it('should return
|
114
|
+
it('should return ProviderBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
115
115
|
// Arrange
|
116
116
|
const apiError = new OpenAI.APIError(
|
117
117
|
400,
|
@@ -147,7 +147,7 @@ export const testProvider = ({
|
|
147
147
|
}
|
148
148
|
});
|
149
149
|
|
150
|
-
it('should throw AgentRuntimeError with
|
150
|
+
it('should throw AgentRuntimeError with InvalidProviderAPIKey if no apiKey is provided', async () => {
|
151
151
|
try {
|
152
152
|
new Runtime({});
|
153
153
|
} catch (e) {
|
@@ -155,7 +155,7 @@ export const testProvider = ({
|
|
155
155
|
}
|
156
156
|
});
|
157
157
|
|
158
|
-
it('should return
|
158
|
+
it('should return ProviderBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
159
159
|
// Arrange
|
160
160
|
const errorInfo = {
|
161
161
|
cause: {
|
@@ -187,7 +187,7 @@ export const testProvider = ({
|
|
187
187
|
}
|
188
188
|
});
|
189
189
|
|
190
|
-
it('should return
|
190
|
+
it('should return ProviderBizError with an cause response with desensitize Url', async () => {
|
191
191
|
// Arrange
|
192
192
|
const errorInfo = {
|
193
193
|
cause: { message: 'api is undefined' },
|
@@ -223,7 +223,7 @@ export const testProvider = ({
|
|
223
223
|
}
|
224
224
|
});
|
225
225
|
|
226
|
-
it(
|
226
|
+
it(`should throw an InvalidAPIKey error type on 401 status code`, async () => {
|
227
227
|
// Mock the API call to simulate a 401 error
|
228
228
|
const error = new Error('Unauthorized') as any;
|
229
229
|
error.status = 401;
|
@@ -239,7 +239,7 @@ export const testProvider = ({
|
|
239
239
|
// Expect the chat method to throw an error with InvalidHunyuanAPIKey
|
240
240
|
expect(e).toEqual({
|
241
241
|
endpoint: defaultBaseURL,
|
242
|
-
error:
|
242
|
+
error: error,
|
243
243
|
errorType: invalidErrorType,
|
244
244
|
provider,
|
245
245
|
});
|
@@ -1,251 +1,19 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import OpenAI from 'openai';
|
3
|
-
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
|
-
|
5
|
-
import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
|
6
2
|
import { ModelProvider } from '@/libs/agent-runtime';
|
7
|
-
import {
|
3
|
+
import { testProvider } from '@/libs/agent-runtime/providerTestUtils';
|
8
4
|
|
9
|
-
import * as debugStreamModule from '../utils/debugStream';
|
10
5
|
import { LobeQwenAI } from './index';
|
11
6
|
|
12
7
|
const provider = ModelProvider.Qwen;
|
13
8
|
const defaultBaseURL = 'https://dashscope.aliyuncs.com/compatible-mode/v1';
|
14
|
-
const bizErrorType = AgentRuntimeErrorType.ProviderBizError;
|
15
|
-
const invalidErrorType = AgentRuntimeErrorType.InvalidProviderAPIKey;
|
16
|
-
|
17
|
-
// Mock the console.error to avoid polluting test output
|
18
|
-
vi.spyOn(console, 'error').mockImplementation(() => {});
|
19
|
-
|
20
|
-
let instance: LobeOpenAICompatibleRuntime;
|
21
|
-
|
22
|
-
beforeEach(() => {
|
23
|
-
instance = new LobeQwenAI({ apiKey: 'test' });
|
24
|
-
|
25
|
-
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
26
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
27
|
-
new ReadableStream() as any,
|
28
|
-
);
|
29
|
-
});
|
30
|
-
|
31
|
-
afterEach(() => {
|
32
|
-
vi.clearAllMocks();
|
33
|
-
});
|
34
|
-
|
35
|
-
describe('LobeQwenAI', () => {
|
36
|
-
describe('init', () => {
|
37
|
-
it('should correctly initialize with an API key', async () => {
|
38
|
-
const instance = new LobeQwenAI({ apiKey: 'test_api_key' });
|
39
|
-
expect(instance).toBeInstanceOf(LobeQwenAI);
|
40
|
-
expect(instance.baseURL).toEqual(defaultBaseURL);
|
41
|
-
});
|
42
|
-
});
|
43
|
-
|
44
|
-
describe('chat', () => {
|
45
|
-
describe('Error', () => {
|
46
|
-
it('should return QwenBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
47
|
-
// Arrange
|
48
|
-
const apiError = new OpenAI.APIError(
|
49
|
-
400,
|
50
|
-
{
|
51
|
-
status: 400,
|
52
|
-
error: {
|
53
|
-
message: 'Bad Request',
|
54
|
-
},
|
55
|
-
},
|
56
|
-
'Error message',
|
57
|
-
{},
|
58
|
-
);
|
59
|
-
|
60
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
61
|
-
|
62
|
-
// Act
|
63
|
-
try {
|
64
|
-
await instance.chat({
|
65
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
66
|
-
model: 'qwen-turbo-latest',
|
67
|
-
temperature: 0.999,
|
68
|
-
});
|
69
|
-
} catch (e) {
|
70
|
-
expect(e).toEqual({
|
71
|
-
endpoint: defaultBaseURL,
|
72
|
-
error: {
|
73
|
-
error: { message: 'Bad Request' },
|
74
|
-
status: 400,
|
75
|
-
},
|
76
|
-
errorType: bizErrorType,
|
77
|
-
provider,
|
78
|
-
});
|
79
|
-
}
|
80
|
-
});
|
81
|
-
|
82
|
-
it('should throw AgentRuntimeError with InvalidQwenAPIKey if no apiKey is provided', async () => {
|
83
|
-
try {
|
84
|
-
new LobeQwenAI({});
|
85
|
-
} catch (e) {
|
86
|
-
expect(e).toEqual({ errorType: invalidErrorType });
|
87
|
-
}
|
88
|
-
});
|
89
|
-
|
90
|
-
it('should return QwenBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
91
|
-
// Arrange
|
92
|
-
const errorInfo = {
|
93
|
-
stack: 'abc',
|
94
|
-
cause: {
|
95
|
-
message: 'api is undefined',
|
96
|
-
},
|
97
|
-
};
|
98
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
99
|
-
|
100
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
101
|
-
|
102
|
-
// Act
|
103
|
-
try {
|
104
|
-
await instance.chat({
|
105
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
106
|
-
model: 'qwen-turbo-latest',
|
107
|
-
temperature: 0.999,
|
108
|
-
});
|
109
|
-
} catch (e) {
|
110
|
-
expect(e).toEqual({
|
111
|
-
endpoint: defaultBaseURL,
|
112
|
-
error: {
|
113
|
-
cause: { message: 'api is undefined' },
|
114
|
-
stack: 'abc',
|
115
|
-
},
|
116
|
-
errorType: bizErrorType,
|
117
|
-
provider,
|
118
|
-
});
|
119
|
-
}
|
120
|
-
});
|
121
|
-
|
122
|
-
it('should return QwenBizError with an cause response with desensitize Url', async () => {
|
123
|
-
// Arrange
|
124
|
-
const errorInfo = {
|
125
|
-
stack: 'abc',
|
126
|
-
cause: { message: 'api is undefined' },
|
127
|
-
};
|
128
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
129
|
-
|
130
|
-
instance = new LobeQwenAI({
|
131
|
-
apiKey: 'test',
|
132
|
-
|
133
|
-
baseURL: 'https://api.abc.com/v1',
|
134
|
-
});
|
135
|
-
|
136
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
137
|
-
|
138
|
-
// Act
|
139
|
-
try {
|
140
|
-
await instance.chat({
|
141
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
142
|
-
model: 'qwen-turbo-latest',
|
143
|
-
temperature: 0.999,
|
144
|
-
});
|
145
|
-
} catch (e) {
|
146
|
-
expect(e).toEqual({
|
147
|
-
endpoint: 'https://api.***.com/v1',
|
148
|
-
error: {
|
149
|
-
cause: { message: 'api is undefined' },
|
150
|
-
stack: 'abc',
|
151
|
-
},
|
152
|
-
errorType: bizErrorType,
|
153
|
-
provider,
|
154
|
-
});
|
155
|
-
}
|
156
|
-
});
|
157
|
-
|
158
|
-
it('should throw an InvalidQwenAPIKey error type on 401 status code', async () => {
|
159
|
-
// Mock the API call to simulate a 401 error
|
160
|
-
const error = new Error('InvalidApiKey') as any;
|
161
|
-
error.status = 401;
|
162
|
-
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
163
|
-
|
164
|
-
try {
|
165
|
-
await instance.chat({
|
166
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
167
|
-
model: 'qwen-turbo-latest',
|
168
|
-
temperature: 0.999,
|
169
|
-
});
|
170
|
-
} catch (e) {
|
171
|
-
expect(e).toEqual({
|
172
|
-
endpoint: defaultBaseURL,
|
173
|
-
error: new Error('InvalidApiKey'),
|
174
|
-
errorType: invalidErrorType,
|
175
|
-
provider,
|
176
|
-
});
|
177
|
-
}
|
178
|
-
});
|
179
|
-
|
180
|
-
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
181
|
-
// Arrange
|
182
|
-
const genericError = new Error('Generic Error');
|
183
|
-
|
184
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
185
|
-
|
186
|
-
// Act
|
187
|
-
try {
|
188
|
-
await instance.chat({
|
189
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
190
|
-
model: 'qwen-turbo-latest',
|
191
|
-
temperature: 0.999,
|
192
|
-
});
|
193
|
-
} catch (e) {
|
194
|
-
expect(e).toEqual({
|
195
|
-
endpoint: defaultBaseURL,
|
196
|
-
errorType: 'AgentRuntimeError',
|
197
|
-
provider,
|
198
|
-
error: {
|
199
|
-
name: genericError.name,
|
200
|
-
cause: genericError.cause,
|
201
|
-
message: genericError.message,
|
202
|
-
stack: genericError.stack,
|
203
|
-
},
|
204
|
-
});
|
205
|
-
}
|
206
|
-
});
|
207
|
-
});
|
208
|
-
|
209
|
-
describe('DEBUG', () => {
|
210
|
-
it('should call debugStream and return StreamingTextResponse when DEBUG_QWEN_CHAT_COMPLETION is 1', async () => {
|
211
|
-
// Arrange
|
212
|
-
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
213
|
-
const mockDebugStream = new ReadableStream({
|
214
|
-
start(controller) {
|
215
|
-
controller.enqueue('Debug stream content');
|
216
|
-
controller.close();
|
217
|
-
},
|
218
|
-
}) as any;
|
219
|
-
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
220
|
-
|
221
|
-
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
222
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
223
|
-
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
224
|
-
});
|
225
|
-
|
226
|
-
// 保存原始环境变量值
|
227
|
-
const originalDebugValue = process.env.DEBUG_QWEN_CHAT_COMPLETION;
|
228
|
-
|
229
|
-
// 模拟环境变量
|
230
|
-
process.env.DEBUG_QWEN_CHAT_COMPLETION = '1';
|
231
|
-
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
232
|
-
|
233
|
-
// 执行测试
|
234
|
-
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
235
|
-
// 假设的测试函数调用,你可能需要根据实际情况调整
|
236
|
-
await instance.chat({
|
237
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
238
|
-
model: 'qwen-turbo-latest',
|
239
|
-
stream: true,
|
240
|
-
temperature: 0.999,
|
241
|
-
});
|
242
|
-
|
243
|
-
// 验证 debugStream 被调用
|
244
|
-
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
245
9
|
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
10
|
+
testProvider({
|
11
|
+
Runtime: LobeQwenAI,
|
12
|
+
provider,
|
13
|
+
defaultBaseURL,
|
14
|
+
chatDebugEnv: 'DEBUG_QWEN_CHAT_COMPLETION',
|
15
|
+
chatModel: 'qwen-2.5',
|
16
|
+
test: {
|
17
|
+
skipAPICall: true,
|
18
|
+
},
|
251
19
|
});
|
@@ -16,6 +16,7 @@ import { LobeGroq } from './groq';
|
|
16
16
|
import { LobeHigressAI } from './higress';
|
17
17
|
import { LobeHuggingFaceAI } from './huggingface';
|
18
18
|
import { LobeHunyuanAI } from './hunyuan';
|
19
|
+
import { LobeInfiniAI } from './infiniai';
|
19
20
|
import { LobeInternLMAI } from './internlm';
|
20
21
|
import { LobeJinaAI } from './jina';
|
21
22
|
import { LobeLMStudioAI } from './lmstudio';
|
@@ -31,6 +32,7 @@ import { LobePerplexityAI } from './perplexity';
|
|
31
32
|
import { LobePPIOAI } from './ppio';
|
32
33
|
import { LobeQwenAI } from './qwen';
|
33
34
|
import { LobeSambaNovaAI } from './sambanova';
|
35
|
+
import { LobeSearch1API } from './search1api';
|
34
36
|
import { LobeSenseNovaAI } from './sensenova';
|
35
37
|
import { LobeSiliconCloudAI } from './siliconcloud';
|
36
38
|
import { LobeSparkAI } from './spark';
|
@@ -66,6 +68,7 @@ export const providerRuntimeMap = {
|
|
66
68
|
higress: LobeHigressAI,
|
67
69
|
huggingface: LobeHuggingFaceAI,
|
68
70
|
hunyuan: LobeHunyuanAI,
|
71
|
+
infiniai: LobeInfiniAI,
|
69
72
|
internlm: LobeInternLMAI,
|
70
73
|
jina: LobeJinaAI,
|
71
74
|
lmstudio: LobeLMStudioAI,
|
@@ -81,6 +84,7 @@ export const providerRuntimeMap = {
|
|
81
84
|
ppio: LobePPIOAI,
|
82
85
|
qwen: LobeQwenAI,
|
83
86
|
sambanova: LobeSambaNovaAI,
|
87
|
+
search1api: LobeSearch1API,
|
84
88
|
sensenova: LobeSenseNovaAI,
|
85
89
|
siliconcloud: LobeSiliconCloudAI,
|
86
90
|
spark: LobeSparkAI,
|
@@ -0,0 +1,64 @@
|
|
1
|
+
import OpenAI from 'openai';
|
2
|
+
|
3
|
+
import type { ChatModelCard } from '@/types/llm';
|
4
|
+
|
5
|
+
import { ChatStreamPayload, ModelProvider } from '../types';
|
6
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
7
|
+
|
8
|
+
export interface Search1APIModelCard {
|
9
|
+
id: string;
|
10
|
+
}
|
11
|
+
|
12
|
+
export const LobeSearch1API = LobeOpenAICompatibleFactory({
|
13
|
+
baseURL: 'https://api.search1api.com/v1',
|
14
|
+
chatCompletion: {
|
15
|
+
handlePayload: (payload: ChatStreamPayload) => {
|
16
|
+
const { presence_penalty, frequency_penalty, stream = true, temperature, ...res } = payload;
|
17
|
+
|
18
|
+
let param;
|
19
|
+
|
20
|
+
if (presence_penalty !== 0) {
|
21
|
+
param = { presence_penalty };
|
22
|
+
} else {
|
23
|
+
const defaultFrequencyPenalty = 1;
|
24
|
+
|
25
|
+
param = { frequency_penalty: frequency_penalty || defaultFrequencyPenalty };
|
26
|
+
}
|
27
|
+
|
28
|
+
return {
|
29
|
+
...res,
|
30
|
+
...param,
|
31
|
+
stream,
|
32
|
+
temperature: temperature >= 2 ? undefined : temperature,
|
33
|
+
} as OpenAI.ChatCompletionCreateParamsStreaming;
|
34
|
+
},
|
35
|
+
},
|
36
|
+
debug: {
|
37
|
+
chatCompletion: () => process.env.DEBUG_SEARCH1API_CHAT_COMPLETION === '1',
|
38
|
+
},
|
39
|
+
models: async ({ client }) => {
|
40
|
+
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
41
|
+
|
42
|
+
const modelsPage = (await client.models.list()) as any;
|
43
|
+
const modelList: Search1APIModelCard[] = modelsPage.data;
|
44
|
+
|
45
|
+
return modelList
|
46
|
+
.map((model) => {
|
47
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
48
|
+
(m) => model.id.toLowerCase() === m.id.toLowerCase(),
|
49
|
+
);
|
50
|
+
|
51
|
+
return {
|
52
|
+
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
53
|
+
displayName: knownModel?.displayName ?? undefined,
|
54
|
+
enabled: knownModel?.enabled || false,
|
55
|
+
functionCall: knownModel?.abilities?.functionCall || false,
|
56
|
+
id: model.id,
|
57
|
+
reasoning: knownModel?.abilities?.reasoning || false,
|
58
|
+
vision: knownModel?.abilities?.vision || false,
|
59
|
+
};
|
60
|
+
})
|
61
|
+
.filter(Boolean) as ChatModelCard[];
|
62
|
+
},
|
63
|
+
provider: ModelProvider.Search1API,
|
64
|
+
});
|