@lobehub/chat 1.68.9 → 1.68.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/docs/usage/providers/ppio.mdx +5 -5
- package/docs/usage/providers/ppio.zh-CN.mdx +7 -7
- package/locales/ar/chat.json +5 -1
- package/locales/ar/models.json +6 -9
- package/locales/bg-BG/chat.json +5 -1
- package/locales/bg-BG/models.json +6 -9
- package/locales/de-DE/chat.json +5 -1
- package/locales/de-DE/models.json +6 -9
- package/locales/en-US/chat.json +5 -1
- package/locales/en-US/models.json +6 -9
- package/locales/es-ES/chat.json +5 -1
- package/locales/es-ES/models.json +6 -9
- package/locales/fa-IR/chat.json +5 -1
- package/locales/fa-IR/models.json +6 -9
- package/locales/fr-FR/chat.json +5 -1
- package/locales/fr-FR/models.json +6 -9
- package/locales/it-IT/chat.json +5 -1
- package/locales/it-IT/models.json +6 -9
- package/locales/ja-JP/chat.json +5 -1
- package/locales/ja-JP/models.json +6 -9
- package/locales/ko-KR/chat.json +5 -1
- package/locales/ko-KR/models.json +6 -9
- package/locales/nl-NL/chat.json +5 -1
- package/locales/nl-NL/models.json +6 -9
- package/locales/pl-PL/chat.json +5 -1
- package/locales/pl-PL/models.json +6 -9
- package/locales/pt-BR/chat.json +5 -1
- package/locales/pt-BR/models.json +6 -9
- package/locales/ru-RU/chat.json +5 -1
- package/locales/ru-RU/models.json +6 -9
- package/locales/tr-TR/chat.json +5 -1
- package/locales/tr-TR/models.json +6 -9
- package/locales/vi-VN/chat.json +5 -1
- package/locales/vi-VN/models.json +6 -9
- package/locales/zh-CN/chat.json +5 -1
- package/locales/zh-CN/models.json +6 -9
- package/locales/zh-TW/chat.json +5 -1
- package/locales/zh-TW/models.json +6 -9
- package/package.json +1 -1
- package/src/config/aiModels/perplexity.ts +36 -20
- package/src/config/modelProviders/ppio.ts +1 -1
- package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +27 -9
- package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +77 -35
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +253 -0
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +65 -46
- package/src/libs/agent-runtime/baichuan/index.test.ts +58 -1
- package/src/libs/agent-runtime/groq/index.test.ts +36 -284
- package/src/libs/agent-runtime/mistral/index.test.ts +39 -300
- package/src/libs/agent-runtime/perplexity/index.test.ts +12 -10
- package/src/libs/agent-runtime/providerTestUtils.ts +58 -0
- package/src/libs/agent-runtime/togetherai/index.test.ts +7 -295
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +3 -0
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +5 -2
- package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +89 -5
- package/src/libs/agent-runtime/utils/streams/anthropic.ts +25 -8
- package/src/libs/agent-runtime/utils/streams/openai.test.ts +188 -84
- package/src/libs/agent-runtime/utils/streams/openai.ts +8 -17
- package/src/libs/agent-runtime/utils/usageConverter.test.ts +249 -0
- package/src/libs/agent-runtime/utils/usageConverter.ts +50 -0
- package/src/libs/agent-runtime/zeroone/index.test.ts +7 -294
- package/src/locales/default/chat.ts +4 -0
- package/src/types/message/base.ts +14 -4
- package/src/utils/filter.test.ts +0 -122
- package/src/utils/filter.ts +0 -29
@@ -1,19 +1,22 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import
|
3
|
-
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
2
|
+
import { Mock, afterEach, beforeEach, expect, it, vi } from 'vitest';
|
4
3
|
|
5
|
-
import {
|
4
|
+
import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
|
5
|
+
import { testProvider } from '@/libs/agent-runtime/providerTestUtils';
|
6
6
|
|
7
|
-
import * as debugStreamModule from '../utils/debugStream';
|
8
7
|
import { LobeMistralAI } from './index';
|
9
8
|
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
9
|
+
testProvider({
|
10
|
+
provider: 'mistral',
|
11
|
+
defaultBaseURL: 'https://api.mistral.ai/v1',
|
12
|
+
chatModel: 'open-mistral-7b',
|
13
|
+
Runtime: LobeMistralAI,
|
14
|
+
chatDebugEnv: 'DEBUG_MISTRAL_CHAT_COMPLETION',
|
14
15
|
|
15
|
-
|
16
|
-
|
16
|
+
test: {
|
17
|
+
skipAPICall: true,
|
18
|
+
},
|
19
|
+
});
|
17
20
|
|
18
21
|
let instance: LobeOpenAICompatibleRuntime;
|
19
22
|
|
@@ -30,302 +33,38 @@ afterEach(() => {
|
|
30
33
|
vi.clearAllMocks();
|
31
34
|
});
|
32
35
|
|
33
|
-
describe('LobeMistralAI', () => {
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
36
|
+
describe('specific LobeMistralAI tests', () => {
|
37
|
+
it(`should call API with corresponding options`, async () => {
|
38
|
+
// Arrange
|
39
|
+
const mockStream = new ReadableStream();
|
40
|
+
const mockResponse = Promise.resolve(mockStream);
|
41
|
+
|
42
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
43
|
+
|
44
|
+
// Act
|
45
|
+
const result = await instance.chat({
|
46
|
+
max_tokens: 1024,
|
47
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
48
|
+
model: 'open-mistral-7b',
|
49
|
+
temperature: 0.7,
|
50
|
+
top_p: 1,
|
39
51
|
});
|
40
|
-
});
|
41
|
-
|
42
|
-
describe('chat', () => {
|
43
|
-
it('should return a StreamingTextResponse on successful API call', async () => {
|
44
|
-
// Arrange
|
45
|
-
const mockStream = new ReadableStream();
|
46
|
-
const mockResponse = Promise.resolve(mockStream);
|
47
|
-
|
48
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
49
|
-
|
50
|
-
// Act
|
51
|
-
const result = await instance.chat({
|
52
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
53
|
-
model: 'open-mistral-7b',
|
54
|
-
temperature: 0,
|
55
|
-
});
|
56
52
|
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
it('should call Mistral API with supported options in streaming mode', async () => {
|
62
|
-
// Arrange
|
63
|
-
const mockStream = new ReadableStream();
|
64
|
-
const mockResponse = Promise.resolve(mockStream);
|
65
|
-
|
66
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
67
|
-
|
68
|
-
// Act
|
69
|
-
const result = await instance.chat({
|
53
|
+
// Assert
|
54
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
55
|
+
{
|
70
56
|
max_tokens: 1024,
|
71
57
|
messages: [{ content: 'Hello', role: 'user' }],
|
72
58
|
model: 'open-mistral-7b',
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
// Assert
|
78
|
-
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
79
|
-
{
|
80
|
-
max_tokens: 1024,
|
81
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
82
|
-
model: 'open-mistral-7b',
|
83
|
-
stream: true,
|
84
|
-
temperature: 0.35,
|
85
|
-
top_p: 1,
|
59
|
+
stream: true,
|
60
|
+
stream_options: {
|
61
|
+
include_usage: true,
|
86
62
|
},
|
87
|
-
|
88
|
-
);
|
89
|
-
expect(result).toBeInstanceOf(Response);
|
90
|
-
});
|
91
|
-
|
92
|
-
it('should call Mistral API without unsupported options', async () => {
|
93
|
-
// Arrange
|
94
|
-
const mockStream = new ReadableStream();
|
95
|
-
const mockResponse = Promise.resolve(mockStream);
|
96
|
-
|
97
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
98
|
-
|
99
|
-
// Act
|
100
|
-
const result = await instance.chat({
|
101
|
-
frequency_penalty: 0.5, // unsupported option
|
102
|
-
max_tokens: 1024,
|
103
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
104
|
-
model: 'open-mistral-7b',
|
105
|
-
presence_penalty: 0.5, // unsupported option
|
106
|
-
temperature: 0.7,
|
63
|
+
temperature: 0.35,
|
107
64
|
top_p: 1,
|
108
|
-
}
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
{
|
113
|
-
max_tokens: 1024,
|
114
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
115
|
-
model: 'open-mistral-7b',
|
116
|
-
stream: true,
|
117
|
-
temperature: 0.35,
|
118
|
-
top_p: 1,
|
119
|
-
},
|
120
|
-
{ headers: { Accept: '*/*' } },
|
121
|
-
);
|
122
|
-
expect(result).toBeInstanceOf(Response);
|
123
|
-
});
|
124
|
-
|
125
|
-
describe('Error', () => {
|
126
|
-
it('should return MistralBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
127
|
-
// Arrange
|
128
|
-
const apiError = new OpenAI.APIError(
|
129
|
-
400,
|
130
|
-
{
|
131
|
-
status: 400,
|
132
|
-
error: {
|
133
|
-
message: 'Bad Request',
|
134
|
-
},
|
135
|
-
},
|
136
|
-
'Error message',
|
137
|
-
{},
|
138
|
-
);
|
139
|
-
|
140
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
141
|
-
|
142
|
-
// Act
|
143
|
-
try {
|
144
|
-
await instance.chat({
|
145
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
146
|
-
model: 'open-mistral-7b',
|
147
|
-
temperature: 0,
|
148
|
-
});
|
149
|
-
} catch (e) {
|
150
|
-
expect(e).toEqual({
|
151
|
-
endpoint: defaultBaseURL,
|
152
|
-
error: {
|
153
|
-
error: { message: 'Bad Request' },
|
154
|
-
status: 400,
|
155
|
-
},
|
156
|
-
errorType: bizErrorType,
|
157
|
-
provider,
|
158
|
-
});
|
159
|
-
}
|
160
|
-
});
|
161
|
-
|
162
|
-
it('should throw AgentRuntimeError with InvalidMistralAPIKey if no apiKey is provided', async () => {
|
163
|
-
try {
|
164
|
-
new LobeMistralAI({});
|
165
|
-
} catch (e) {
|
166
|
-
expect(e).toEqual({ errorType: invalidErrorType });
|
167
|
-
}
|
168
|
-
});
|
169
|
-
|
170
|
-
it('should return MistralBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
171
|
-
// Arrange
|
172
|
-
const errorInfo = {
|
173
|
-
stack: 'abc',
|
174
|
-
cause: {
|
175
|
-
message: 'api is undefined',
|
176
|
-
},
|
177
|
-
};
|
178
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
179
|
-
|
180
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
181
|
-
|
182
|
-
// Act
|
183
|
-
try {
|
184
|
-
await instance.chat({
|
185
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
186
|
-
model: 'open-mistral-7b',
|
187
|
-
temperature: 0,
|
188
|
-
});
|
189
|
-
} catch (e) {
|
190
|
-
expect(e).toEqual({
|
191
|
-
endpoint: defaultBaseURL,
|
192
|
-
error: {
|
193
|
-
cause: { message: 'api is undefined' },
|
194
|
-
stack: 'abc',
|
195
|
-
},
|
196
|
-
errorType: bizErrorType,
|
197
|
-
provider,
|
198
|
-
});
|
199
|
-
}
|
200
|
-
});
|
201
|
-
|
202
|
-
it('should return MistralBizError with an cause response with desensitize Url', async () => {
|
203
|
-
// Arrange
|
204
|
-
const errorInfo = {
|
205
|
-
stack: 'abc',
|
206
|
-
cause: { message: 'api is undefined' },
|
207
|
-
};
|
208
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
209
|
-
|
210
|
-
instance = new LobeMistralAI({
|
211
|
-
apiKey: 'test',
|
212
|
-
|
213
|
-
baseURL: 'https://api.abc.com/v1',
|
214
|
-
});
|
215
|
-
|
216
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
217
|
-
|
218
|
-
// Act
|
219
|
-
try {
|
220
|
-
await instance.chat({
|
221
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
222
|
-
model: 'open-mistral-7b',
|
223
|
-
temperature: 0,
|
224
|
-
});
|
225
|
-
} catch (e) {
|
226
|
-
expect(e).toEqual({
|
227
|
-
endpoint: 'https://api.***.com/v1',
|
228
|
-
error: {
|
229
|
-
cause: { message: 'api is undefined' },
|
230
|
-
stack: 'abc',
|
231
|
-
},
|
232
|
-
errorType: bizErrorType,
|
233
|
-
provider,
|
234
|
-
});
|
235
|
-
}
|
236
|
-
});
|
237
|
-
|
238
|
-
it('should throw an InvalidMistralAPIKey error type on 401 status code', async () => {
|
239
|
-
// Mock the API call to simulate a 401 error
|
240
|
-
const error = new Error('Unauthorized') as any;
|
241
|
-
error.status = 401;
|
242
|
-
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
243
|
-
|
244
|
-
try {
|
245
|
-
await instance.chat({
|
246
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
247
|
-
model: 'gpt-3.5-turbo',
|
248
|
-
temperature: 0,
|
249
|
-
});
|
250
|
-
} catch (e) {
|
251
|
-
// Expect the chat method to throw an error with InvalidMoonshotAPIKey
|
252
|
-
expect(e).toEqual({
|
253
|
-
endpoint: defaultBaseURL,
|
254
|
-
error: new Error('Unauthorized'),
|
255
|
-
errorType: invalidErrorType,
|
256
|
-
provider,
|
257
|
-
});
|
258
|
-
}
|
259
|
-
});
|
260
|
-
|
261
|
-
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
262
|
-
// Arrange
|
263
|
-
const genericError = new Error('Generic Error');
|
264
|
-
|
265
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
266
|
-
|
267
|
-
// Act
|
268
|
-
try {
|
269
|
-
await instance.chat({
|
270
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
271
|
-
model: 'open-mistral-7b',
|
272
|
-
temperature: 0,
|
273
|
-
});
|
274
|
-
} catch (e) {
|
275
|
-
expect(e).toEqual({
|
276
|
-
endpoint: defaultBaseURL,
|
277
|
-
errorType: 'AgentRuntimeError',
|
278
|
-
provider,
|
279
|
-
error: {
|
280
|
-
name: genericError.name,
|
281
|
-
cause: genericError.cause,
|
282
|
-
message: genericError.message,
|
283
|
-
stack: genericError.stack,
|
284
|
-
},
|
285
|
-
});
|
286
|
-
}
|
287
|
-
});
|
288
|
-
});
|
289
|
-
|
290
|
-
describe('DEBUG', () => {
|
291
|
-
it('should call debugStream and return StreamingTextResponse when DEBUG_MISTRAL_CHAT_COMPLETION is 1', async () => {
|
292
|
-
// Arrange
|
293
|
-
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
294
|
-
const mockDebugStream = new ReadableStream({
|
295
|
-
start(controller) {
|
296
|
-
controller.enqueue('Debug stream content');
|
297
|
-
controller.close();
|
298
|
-
},
|
299
|
-
}) as any;
|
300
|
-
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
301
|
-
|
302
|
-
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
303
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
304
|
-
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
305
|
-
});
|
306
|
-
|
307
|
-
// 保存原始环境变量值
|
308
|
-
const originalDebugValue = process.env.DEBUG_MISTRAL_CHAT_COMPLETION;
|
309
|
-
|
310
|
-
// 模拟环境变量
|
311
|
-
process.env.DEBUG_MISTRAL_CHAT_COMPLETION = '1';
|
312
|
-
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
313
|
-
|
314
|
-
// 执行测试
|
315
|
-
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
316
|
-
// 假设的测试函数调用,你可能需要根据实际情况调整
|
317
|
-
await instance.chat({
|
318
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
319
|
-
model: 'open-mistral-7b',
|
320
|
-
temperature: 0,
|
321
|
-
});
|
322
|
-
|
323
|
-
// 验证 debugStream 被调用
|
324
|
-
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
325
|
-
|
326
|
-
// 恢复原始环境变量值
|
327
|
-
process.env.DEBUG_MISTRAL_CHAT_COMPLETION = originalDebugValue;
|
328
|
-
});
|
329
|
-
});
|
65
|
+
},
|
66
|
+
{ headers: { Accept: '*/*' } },
|
67
|
+
);
|
68
|
+
expect(result).toBeInstanceOf(Response);
|
330
69
|
});
|
331
70
|
});
|
@@ -163,13 +163,13 @@ describe('LobePerplexityAI', () => {
|
|
163
163
|
},
|
164
164
|
{
|
165
165
|
id: '506d64fb-e7f2-4d94-b80f-158369e9446d',
|
166
|
-
model: 'sonar-pro',
|
167
|
-
created:
|
166
|
+
model: 'sonar-reasoning-pro',
|
167
|
+
created: 1741250924,
|
168
168
|
usage: {
|
169
|
-
prompt_tokens:
|
170
|
-
completion_tokens:
|
171
|
-
total_tokens:
|
172
|
-
citation_tokens:
|
169
|
+
prompt_tokens: 2,
|
170
|
+
completion_tokens: 685,
|
171
|
+
total_tokens: 687,
|
172
|
+
citation_tokens: 3058,
|
173
173
|
num_search_queries: 1,
|
174
174
|
},
|
175
175
|
citations: [
|
@@ -185,11 +185,13 @@ describe('LobePerplexityAI', () => {
|
|
185
185
|
choices: [
|
186
186
|
{
|
187
187
|
index: 0,
|
188
|
-
finish_reason:
|
188
|
+
finish_reason: 'stop',
|
189
189
|
message: {
|
190
190
|
role: 'assistant',
|
191
|
-
content:
|
191
|
+
content:
|
192
|
+
'<think>\n好的,我现在要处理用户的我需要确定这个查询的类型。用户显然是在询问当前的天气情况和预报,因此属于天气预报类型。接下来我要查看提供的搜索结果,看看这些来源是否能提供准确的信息。\n\n第一个来源是weather.com.cn的、西北风5~6级等。接着查看第二个结果[2]是中央气象台的详细分时数据,比如7月18日和21日的温度、降水、风速等信息。[3]来自中国气象局的气象预报显示有阴天和多云交替的情况,(如星期三03/05阴温暖但空气质量差。[6][7]则是杭州市气象台的最新天气预报发布情况:后半夜转多云明天白天继续多云的天气。\n\n现在要将这些信息整合起来形成连贯的回答。需要注意是否有矛盾的地方以及按照可信度部或东北部常见四至五级阵风;昼夜温差较大比如最高温可达20多摄氏度最低至10℃左右这样需要提醒注意衣物调整防寒保暖同时也指出空气质量在某些时段可能不佳特别是根据[5],AccuWeather提示空气质响出行健康的重点要素如空气指标并且保证引用每个相关数据都注明正确的出处编号避免遗漏重要细节同时保持回答简洁明了使用户一目了然.\n</think>\n\n杭州近期以阴到多云天气为主,夜间偶有小雨[1 未来三日预报\n- **今天傍晚至夜间**:局部小雨渐止转阴到多云\\[6\\] [7]\n- **明日(周六)** \n - 白天多云为主 \n - 温度区间16℃~22℃,西北风5~6级 \\[2\\] [3]\n- **后天(周日)**\n\\] [3]\n\n### *注意事项*\n1. **昼夜温差大**:早晚低温多在10°C以下需加外套防风保温;\n2. **空气污染警告** AccuWeather指出当地PM指数超标易引发达呼吸道不适建议尽量减少户外长时间活动时r/china/zjejiang/hangzhou" target="_blank">墨迹实况雷达图</a>获取临近降水动态.',
|
192
193
|
},
|
194
|
+
delta: { role: 'assistant', content: '' },
|
193
195
|
},
|
194
196
|
],
|
195
197
|
},
|
@@ -238,8 +240,8 @@ describe('LobePerplexityAI', () => {
|
|
238
240
|
'event: text',
|
239
241
|
'data: "天和未来几天的"\n',
|
240
242
|
'id: 506d64fb-e7f2-4d94-b80f-158369e9446d',
|
241
|
-
'event:
|
242
|
-
'data: {"
|
243
|
+
'event: usage',
|
244
|
+
'data: {"inputCitationTokens":3058,"inputTextTokens":2,"outputTextTokens":685,"totalInputTokens":3060,"totalOutputTokens":685,"totalTokens":3745}\n',
|
243
245
|
].map((line) => `${line}\n`),
|
244
246
|
);
|
245
247
|
|
@@ -13,6 +13,9 @@ interface TesstProviderParams {
|
|
13
13
|
defaultBaseURL: string;
|
14
14
|
invalidErrorType?: string;
|
15
15
|
provider: string;
|
16
|
+
test?: {
|
17
|
+
skipAPICall?: boolean;
|
18
|
+
};
|
16
19
|
}
|
17
20
|
|
18
21
|
export const testProvider = ({
|
@@ -23,6 +26,7 @@ export const testProvider = ({
|
|
23
26
|
Runtime,
|
24
27
|
chatDebugEnv,
|
25
28
|
chatModel,
|
29
|
+
test = {},
|
26
30
|
}: TesstProviderParams) => {
|
27
31
|
// Mock the console.error to avoid polluting test output
|
28
32
|
vi.spyOn(console, 'error').mockImplementation(() => {});
|
@@ -52,6 +56,60 @@ export const testProvider = ({
|
|
52
56
|
});
|
53
57
|
|
54
58
|
describe('chat', () => {
|
59
|
+
it('should return a StreamingTextResponse on successful API call', async () => {
|
60
|
+
// Arrange
|
61
|
+
const mockStream = new ReadableStream();
|
62
|
+
const mockResponse = Promise.resolve(mockStream);
|
63
|
+
|
64
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
65
|
+
|
66
|
+
// Act
|
67
|
+
const result = await instance.chat({
|
68
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
69
|
+
model: chatModel,
|
70
|
+
temperature: 0,
|
71
|
+
});
|
72
|
+
|
73
|
+
// Assert
|
74
|
+
expect(result).toBeInstanceOf(Response);
|
75
|
+
});
|
76
|
+
|
77
|
+
if (!test?.skipAPICall) {
|
78
|
+
it(`should call ${provider} API with corresponding options`, async () => {
|
79
|
+
// Arrange
|
80
|
+
const mockStream = new ReadableStream();
|
81
|
+
const mockResponse = Promise.resolve(mockStream);
|
82
|
+
|
83
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
84
|
+
|
85
|
+
// Act
|
86
|
+
const result = await instance.chat({
|
87
|
+
max_tokens: 1024,
|
88
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
89
|
+
model: chatModel,
|
90
|
+
temperature: 0.7,
|
91
|
+
top_p: 1,
|
92
|
+
});
|
93
|
+
|
94
|
+
// Assert
|
95
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
96
|
+
{
|
97
|
+
max_tokens: 1024,
|
98
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
99
|
+
model: chatModel,
|
100
|
+
stream: true,
|
101
|
+
stream_options: {
|
102
|
+
include_usage: true,
|
103
|
+
},
|
104
|
+
temperature: 0.7,
|
105
|
+
top_p: 1,
|
106
|
+
},
|
107
|
+
{ headers: { Accept: '*/*' } },
|
108
|
+
);
|
109
|
+
expect(result).toBeInstanceOf(Response);
|
110
|
+
});
|
111
|
+
}
|
112
|
+
|
55
113
|
describe('Error', () => {
|
56
114
|
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
57
115
|
// Arrange
|