@lobehub/chat 1.68.8 → 1.68.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/usage/providers/ppio.mdx +5 -5
  4. package/docs/usage/providers/ppio.zh-CN.mdx +7 -7
  5. package/locales/ar/chat.json +5 -1
  6. package/locales/ar/models.json +6 -9
  7. package/locales/bg-BG/chat.json +5 -1
  8. package/locales/bg-BG/models.json +6 -9
  9. package/locales/de-DE/chat.json +5 -1
  10. package/locales/de-DE/models.json +6 -9
  11. package/locales/en-US/chat.json +5 -1
  12. package/locales/en-US/models.json +6 -9
  13. package/locales/es-ES/chat.json +5 -1
  14. package/locales/es-ES/models.json +6 -9
  15. package/locales/fa-IR/chat.json +5 -1
  16. package/locales/fa-IR/models.json +6 -9
  17. package/locales/fr-FR/chat.json +5 -1
  18. package/locales/fr-FR/models.json +6 -9
  19. package/locales/it-IT/chat.json +5 -1
  20. package/locales/it-IT/models.json +6 -9
  21. package/locales/ja-JP/chat.json +5 -1
  22. package/locales/ja-JP/models.json +6 -9
  23. package/locales/ko-KR/chat.json +5 -1
  24. package/locales/ko-KR/models.json +6 -9
  25. package/locales/nl-NL/chat.json +5 -1
  26. package/locales/nl-NL/models.json +6 -9
  27. package/locales/pl-PL/chat.json +5 -1
  28. package/locales/pl-PL/models.json +6 -9
  29. package/locales/pt-BR/chat.json +5 -1
  30. package/locales/pt-BR/models.json +6 -9
  31. package/locales/ru-RU/chat.json +5 -1
  32. package/locales/ru-RU/models.json +6 -9
  33. package/locales/tr-TR/chat.json +5 -1
  34. package/locales/tr-TR/models.json +6 -9
  35. package/locales/vi-VN/chat.json +5 -1
  36. package/locales/vi-VN/models.json +6 -9
  37. package/locales/zh-CN/chat.json +5 -1
  38. package/locales/zh-CN/models.json +6 -9
  39. package/locales/zh-TW/chat.json +5 -1
  40. package/locales/zh-TW/models.json +6 -9
  41. package/package.json +3 -1
  42. package/src/config/aiModels/perplexity.ts +36 -20
  43. package/src/config/modelProviders/ppio.ts +1 -1
  44. package/src/database/client/migrations.json +8 -3
  45. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +27 -9
  46. package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +77 -35
  47. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +253 -0
  48. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +65 -46
  49. package/src/libs/agent-runtime/baichuan/index.test.ts +58 -1
  50. package/src/libs/agent-runtime/groq/index.test.ts +36 -284
  51. package/src/libs/agent-runtime/mistral/index.test.ts +39 -300
  52. package/src/libs/agent-runtime/perplexity/index.test.ts +12 -10
  53. package/src/libs/agent-runtime/providerTestUtils.ts +58 -0
  54. package/src/libs/agent-runtime/togetherai/index.test.ts +7 -295
  55. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +3 -0
  56. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +5 -2
  57. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +89 -5
  58. package/src/libs/agent-runtime/utils/streams/anthropic.ts +25 -8
  59. package/src/libs/agent-runtime/utils/streams/openai.test.ts +188 -84
  60. package/src/libs/agent-runtime/utils/streams/openai.ts +8 -17
  61. package/src/libs/agent-runtime/utils/usageConverter.test.ts +249 -0
  62. package/src/libs/agent-runtime/utils/usageConverter.ts +50 -0
  63. package/src/libs/agent-runtime/zeroone/index.test.ts +7 -294
  64. package/src/libs/langchain/loaders/epub/__tests__/__snapshots__/index.test.ts.snap +238 -0
  65. package/src/libs/langchain/loaders/epub/__tests__/demo.epub +0 -0
  66. package/src/libs/langchain/loaders/epub/__tests__/index.test.ts +24 -0
  67. package/src/libs/langchain/loaders/epub/index.ts +21 -0
  68. package/src/libs/langchain/loaders/index.ts +9 -0
  69. package/src/libs/langchain/types.ts +2 -1
  70. package/src/locales/default/chat.ts +4 -0
  71. package/src/server/utils/tempFileManager.ts +70 -0
  72. package/src/types/message/base.ts +14 -4
  73. package/src/utils/filter.test.ts +0 -122
  74. package/src/utils/filter.ts +0 -29
@@ -11,43 +11,75 @@ export const getDetailsToken = (
11
11
  usage: ModelTokensUsage,
12
12
  modelCard?: LobeDefaultAiModelListItem,
13
13
  ) => {
14
- const uncachedInputCredit = (
15
- !!usage.inputTokens
16
- ? calcCredit(usage.inputTokens - (usage.cachedTokens || 0), modelCard?.pricing?.input)
17
- : 0
14
+ const inputTextTokens = usage.inputTextTokens || (usage as any).inputTokens || 0;
15
+ const totalInputTokens = usage.totalInputTokens || (usage as any).inputTokens || 0;
16
+
17
+ const totalOutputTokens = usage.totalOutputTokens || (usage as any).outputTokens || 0;
18
+
19
+ const outputReasoningTokens = usage.outputReasoningTokens || (usage as any).reasoningTokens || 0;
20
+
21
+ const outputTextTokens = usage.outputTextTokens
22
+ ? usage.outputTextTokens
23
+ : totalOutputTokens - outputReasoningTokens - (usage.outputAudioTokens || 0);
24
+
25
+ const inputWriteCacheTokens = usage.inputWriteCacheTokens || 0;
26
+ const inputCacheTokens = usage.inputCachedTokens || (usage as any).cachedTokens || 0;
27
+
28
+ const inputCacheMissTokens = usage?.inputCacheMissTokens
29
+ ? usage?.inputCacheMissTokens
30
+ : totalInputTokens - (inputCacheTokens || 0);
31
+
32
+ const inputCacheMissCredit = (
33
+ !!inputCacheMissTokens ? calcCredit(inputCacheMissTokens, modelCard?.pricing?.input) : 0
18
34
  ) as number;
19
35
 
20
- const cachedInputCredit = (
21
- !!usage.cachedTokens ? calcCredit(usage.cachedTokens, modelCard?.pricing?.cachedInput) : 0
36
+ const inputCachedCredit = (
37
+ !!inputCacheTokens ? calcCredit(inputCacheTokens, modelCard?.pricing?.cachedInput) : 0
22
38
  ) as number;
23
39
 
24
- const totalOutput = (
25
- !!usage.outputTokens ? calcCredit(usage.outputTokens, modelCard?.pricing?.output) : 0
40
+ const inputWriteCachedCredit = !!inputWriteCacheTokens
41
+ ? (calcCredit(inputWriteCacheTokens, modelCard?.pricing?.writeCacheInput) as number)
42
+ : 0;
43
+
44
+ const totalOutputCredit = (
45
+ !!totalOutputTokens ? calcCredit(totalOutputTokens, modelCard?.pricing?.output) : 0
26
46
  ) as number;
47
+ const totalInputCredit = (
48
+ !!totalInputTokens ? calcCredit(totalInputTokens, modelCard?.pricing?.output) : 0
49
+ ) as number;
50
+
51
+ const totalCredit =
52
+ inputCacheMissCredit + inputCachedCredit + inputWriteCachedCredit + totalOutputCredit;
27
53
 
28
- const totalTokens = uncachedInputCredit + cachedInputCredit + totalOutput;
29
54
  return {
30
- cachedInput: !!usage.cachedTokens
31
- ? {
32
- credit: cachedInputCredit,
33
- token: usage.cachedTokens,
34
- }
35
- : undefined,
36
55
  inputAudio: !!usage.inputAudioTokens
37
56
  ? {
38
57
  credit: calcCredit(usage.inputAudioTokens, modelCard?.pricing?.audioInput),
39
58
  token: usage.inputAudioTokens,
40
59
  }
41
60
  : undefined,
42
- inputText: !!usage.inputTokens
61
+ inputCacheMiss: !!inputCacheMissTokens
62
+ ? { credit: inputCacheMissCredit, token: inputCacheMissTokens }
63
+ : undefined,
64
+ inputCached: !!inputCacheTokens
65
+ ? { credit: inputCachedCredit, token: inputCacheTokens }
66
+ : undefined,
67
+ inputCachedWrite: !!inputWriteCacheTokens
68
+ ? { credit: inputWriteCachedCredit, token: inputWriteCacheTokens }
69
+ : undefined,
70
+ inputCitation: !!usage.inputCitationTokens
71
+ ? {
72
+ credit: calcCredit(usage.inputCitationTokens, modelCard?.pricing?.input),
73
+ token: usage.inputCitationTokens,
74
+ }
75
+ : undefined,
76
+ inputText: !!inputTextTokens
43
77
  ? {
44
- credit: calcCredit(
45
- usage.inputTokens - (usage.inputAudioTokens || 0),
46
- modelCard?.pricing?.input,
47
- ),
48
- token: usage.inputTokens - (usage.inputAudioTokens || 0),
78
+ credit: calcCredit(inputTextTokens, modelCard?.pricing?.input),
79
+ token: inputTextTokens,
49
80
  }
50
81
  : undefined,
82
+
51
83
  outputAudio: !!usage.outputAudioTokens
52
84
  ? {
53
85
  credit: calcCredit(usage.outputAudioTokens, modelCard?.pricing?.audioOutput),
@@ -55,40 +87,27 @@ export const getDetailsToken = (
55
87
  token: usage.outputAudioTokens,
56
88
  }
57
89
  : undefined,
58
-
59
- outputText: !!usage.outputTokens
90
+ outputReasoning: !!outputReasoningTokens
60
91
  ? {
61
- credit: calcCredit(
62
- usage.outputTokens - (usage.reasoningTokens || 0) - (usage.outputAudioTokens || 0),
63
- modelCard?.pricing?.output,
64
- ),
65
- token: usage.outputTokens - (usage.reasoningTokens || 0) - (usage.outputAudioTokens || 0),
92
+ credit: calcCredit(outputReasoningTokens, modelCard?.pricing?.output),
93
+ token: outputReasoningTokens,
66
94
  }
67
95
  : undefined,
68
- reasoning: !!usage.reasoningTokens
96
+ outputText: !!outputTextTokens
69
97
  ? {
70
- credit: calcCredit(usage.reasoningTokens, modelCard?.pricing?.output),
71
- token: usage.reasoningTokens,
98
+ credit: calcCredit(outputTextTokens, modelCard?.pricing?.output),
99
+ token: outputTextTokens,
72
100
  }
73
101
  : undefined,
74
102
 
75
- totalOutput: !!usage.outputTokens
76
- ? {
77
- credit: totalOutput,
78
- token: usage.outputTokens,
79
- }
103
+ totalInput: !!totalInputTokens
104
+ ? { credit: totalInputCredit, token: totalInputTokens }
80
105
  : undefined,
81
- totalTokens: !!usage.totalTokens
82
- ? {
83
- credit: totalTokens,
84
- token: usage.totalTokens,
85
- }
106
+ totalOutput: !!totalOutputTokens
107
+ ? { credit: totalOutputCredit, token: totalOutputTokens }
86
108
  : undefined,
87
- uncachedInput: !!usage.inputTokens
88
- ? {
89
- credit: uncachedInputCredit,
90
- token: usage.inputTokens - (usage.cachedTokens || 0),
91
- }
109
+ totalTokens: !!usage.totalTokens
110
+ ? { credit: totalCredit, token: usage.totalTokens }
92
111
  : undefined,
93
112
  };
94
113
  };
@@ -1,5 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/agent-runtime';
2
+ import { Mock } from 'vitest';
3
+
4
+ import { LobeOpenAICompatibleRuntime, ModelProvider } from '@/libs/agent-runtime';
3
5
 
4
6
  import { testProvider } from '../providerTestUtils';
5
7
  import { LobeBaichuanAI } from './index';
@@ -10,4 +12,59 @@ testProvider({
10
12
  defaultBaseURL: 'https://api.baichuan-ai.com/v1',
11
13
  chatDebugEnv: 'DEBUG_BAICHUAN_CHAT_COMPLETION',
12
14
  chatModel: 'hunyuan-lite',
15
+ test: {
16
+ skipAPICall: true,
17
+ },
18
+ });
19
+
20
+ let instance: LobeOpenAICompatibleRuntime;
21
+
22
+ beforeEach(() => {
23
+ instance = new LobeBaichuanAI({ apiKey: 'test' });
24
+
25
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
26
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
27
+ new ReadableStream() as any,
28
+ );
29
+ });
30
+
31
+ afterEach(() => {
32
+ vi.clearAllMocks();
33
+ });
34
+
35
+ describe('specific LobeBaichuanAI tests', () => {
36
+ it(`should call API with corresponding options`, async () => {
37
+ // Arrange
38
+ const mockStream = new ReadableStream();
39
+ const mockResponse = Promise.resolve(mockStream);
40
+
41
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
42
+
43
+ // Act
44
+ const result = await instance.chat({
45
+ max_tokens: 1024,
46
+ messages: [{ content: 'Hello', role: 'user' }],
47
+ model: 'open-mistral-7b',
48
+ temperature: 0.7,
49
+ stream: true,
50
+ top_p: 1,
51
+ });
52
+
53
+ // Assert
54
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
55
+ {
56
+ max_tokens: 1024,
57
+ messages: [{ content: 'Hello', role: 'user' }],
58
+ model: 'open-mistral-7b',
59
+ stream: true,
60
+ stream_options: {
61
+ include_usage: true,
62
+ },
63
+ temperature: 0.35,
64
+ top_p: 1,
65
+ },
66
+ { headers: { Accept: '*/*' } },
67
+ );
68
+ expect(result).toBeInstanceOf(Response);
69
+ });
13
70
  });
@@ -1,17 +1,18 @@
1
1
  // @vitest-environment node
2
- import OpenAI from 'openai';
3
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
2
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
3
 
5
4
  import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
5
+ import { testProvider } from '@/libs/agent-runtime/providerTestUtils';
6
6
 
7
- import * as debugStreamModule from '../utils/debugStream';
8
7
  import { LobeGroq } from './index';
9
8
 
10
- const provider = 'groq';
11
- const defaultBaseURL = 'https://api.groq.com/openai/v1';
12
-
13
- const bizErrorType = 'ProviderBizError';
14
- const invalidErrorType = 'InvalidProviderAPIKey';
9
+ testProvider({
10
+ provider: 'groq',
11
+ defaultBaseURL: 'https://api.groq.com/openai/v1',
12
+ chatModel: 'mistralai/mistral-7b-instruct:free',
13
+ Runtime: LobeGroq,
14
+ chatDebugEnv: 'DEBUG_GROQ_CHAT_COMPLETION',
15
+ });
15
16
 
16
17
  // Mock the console.error to avoid polluting test output
17
18
  vi.spyOn(console, 'error').mockImplementation(() => {});
@@ -31,295 +32,46 @@ afterEach(() => {
31
32
  vi.clearAllMocks();
32
33
  });
33
34
 
34
- describe('LobeGroqAI', () => {
35
- describe('init', () => {
36
- it('should correctly initialize with an API key', async () => {
37
- const instance = new LobeGroq({ apiKey: 'test_api_key' });
38
- expect(instance).toBeInstanceOf(LobeGroq);
39
- expect(instance.baseURL).toEqual(defaultBaseURL);
40
- });
41
- });
42
-
43
- describe('chat', () => {
44
- it('should call chat with corresponding options', async () => {
45
- // Arrange
46
- const mockStream = new ReadableStream();
47
- const mockResponse = Promise.resolve(mockStream);
48
-
49
- (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
50
-
51
- // Act
52
- const result = await instance.chat({
53
- max_tokens: 1024,
54
- messages: [{ content: 'Hello', role: 'user' }],
55
- model: 'mistralai/mistral-7b-instruct:free',
56
- temperature: 0.7,
57
- top_p: 1,
58
- });
59
-
60
- // Assert
61
- expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
62
- {
63
- max_tokens: 1024,
64
- stream: true,
65
- messages: [{ content: 'Hello', role: 'user' }],
66
- model: 'mistralai/mistral-7b-instruct:free',
67
- temperature: 0.7,
68
- top_p: 1,
69
- },
70
- { headers: { Accept: '*/*' } },
71
- );
72
- expect(result).toBeInstanceOf(Response);
73
- });
74
-
75
- describe('handlePayload option', () => {
76
- it('should set stream to false when payload contains tools', async () => {
77
- const mockCreateMethod = vi
78
- .spyOn(instance['client'].chat.completions, 'create')
79
- .mockResolvedValue({
80
- id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
81
- object: 'chat.completion',
82
- created: 1709125675,
83
- model: 'mistralai/mistral-7b-instruct:free',
84
- system_fingerprint: 'fp_86156a94a0',
85
- choices: [
86
- {
87
- index: 0,
88
- message: { role: 'assistant', content: 'hello', refusal: null },
89
- logprobs: null,
90
- finish_reason: 'stop',
91
- },
92
- ],
93
- });
94
-
95
- await instance.chat({
96
- messages: [{ content: 'Hello', role: 'user' }],
35
+ describe('LobeGroqAI Temperature Tests', () => {
36
+ describe('handlePayload option', () => {
37
+ it('should set stream to false when payload contains tools', async () => {
38
+ const mockCreateMethod = vi
39
+ .spyOn(instance['client'].chat.completions, 'create')
40
+ .mockResolvedValue({
41
+ id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
42
+ object: 'chat.completion',
43
+ created: 1709125675,
97
44
  model: 'mistralai/mistral-7b-instruct:free',
98
- temperature: 0,
99
- tools: [
45
+ system_fingerprint: 'fp_86156a94a0',
46
+ choices: [
100
47
  {
101
- type: 'function',
102
- function: { name: 'tool1', description: '', parameters: {} },
48
+ index: 0,
49
+ message: { role: 'assistant', content: 'hello', refusal: null },
50
+ logprobs: null,
51
+ finish_reason: 'stop',
103
52
  },
104
53
  ],
105
54
  });
106
55
 
107
- expect(mockCreateMethod).toHaveBeenCalledWith(
108
- expect.objectContaining({ stream: false }),
109
- expect.anything(),
110
- );
111
- });
112
- });
113
-
114
- describe('Error', () => {
115
- it('should return OpenRouterBizError with an openai error response when OpenAI.APIError is thrown', async () => {
116
- // Arrange
117
- const apiError = new OpenAI.APIError(
118
- 400,
56
+ await instance.chat({
57
+ messages: [{ content: 'Hello', role: 'user' }],
58
+ model: 'mistralai/mistral-7b-instruct:free',
59
+ temperature: 0,
60
+ tools: [
119
61
  {
120
- status: 400,
121
- error: {
122
- message: 'Bad Request',
123
- },
62
+ type: 'function',
63
+ function: { name: 'tool1', description: '', parameters: {} },
124
64
  },
125
- 'Error message',
126
- {},
127
- );
128
-
129
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
130
-
131
- // Act
132
- try {
133
- await instance.chat({
134
- messages: [{ content: 'Hello', role: 'user' }],
135
- model: 'mistralai/mistral-7b-instruct:free',
136
- temperature: 0,
137
- });
138
- } catch (e) {
139
- expect(e).toEqual({
140
- endpoint: defaultBaseURL,
141
- error: {
142
- error: { message: 'Bad Request' },
143
- status: 400,
144
- },
145
- errorType: bizErrorType,
146
- provider,
147
- });
148
- }
149
- });
150
-
151
- it('should throw AgentRuntimeError with InvalidOpenRouterAPIKey if no apiKey is provided', async () => {
152
- try {
153
- new LobeGroq({});
154
- } catch (e) {
155
- expect(e).toEqual({ errorType: invalidErrorType });
156
- }
65
+ ],
157
66
  });
158
67
 
159
- it('should return OpenRouterBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
160
- // Arrange
161
- const errorInfo = {
162
- stack: 'abc',
163
- cause: {
164
- message: 'api is undefined',
165
- },
166
- };
167
- const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
168
-
169
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
170
-
171
- // Act
172
- try {
173
- await instance.chat({
174
- messages: [{ content: 'Hello', role: 'user' }],
175
- model: 'mistralai/mistral-7b-instruct:free',
176
- temperature: 0,
177
- });
178
- } catch (e) {
179
- expect(e).toEqual({
180
- endpoint: defaultBaseURL,
181
- error: {
182
- cause: { message: 'api is undefined' },
183
- stack: 'abc',
184
- },
185
- errorType: bizErrorType,
186
- provider,
187
- });
188
- }
189
- });
190
-
191
- it('should return OpenRouterBizError with an cause response with desensitize Url', async () => {
192
- // Arrange
193
- const errorInfo = {
194
- stack: 'abc',
195
- cause: { message: 'api is undefined' },
196
- };
197
- const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
198
-
199
- instance = new LobeGroq({
200
- apiKey: 'test',
201
-
202
- baseURL: 'https://api.abc.com/v1',
203
- });
204
-
205
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
206
-
207
- // Act
208
- try {
209
- await instance.chat({
210
- messages: [{ content: 'Hello', role: 'user' }],
211
- model: 'mistralai/mistral-7b-instruct:free',
212
- temperature: 0,
213
- });
214
- } catch (e) {
215
- expect(e).toEqual({
216
- endpoint: 'https://api.***.com/v1',
217
- error: {
218
- cause: { message: 'api is undefined' },
219
- stack: 'abc',
220
- },
221
- errorType: bizErrorType,
222
- provider,
223
- });
224
- }
225
- });
226
-
227
- it('should throw an InvalidOpenRouterAPIKey error type on 401 status code', async () => {
228
- // Mock the API call to simulate a 401 error
229
- const error = new Error('Unauthorized') as any;
230
- error.status = 401;
231
- vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
232
-
233
- try {
234
- await instance.chat({
235
- messages: [{ content: 'Hello', role: 'user' }],
236
- model: 'mistralai/mistral-7b-instruct:free',
237
- temperature: 0,
238
- });
239
- } catch (e) {
240
- // Expect the chat method to throw an error with InvalidMoonshotAPIKey
241
- expect(e).toEqual({
242
- endpoint: defaultBaseURL,
243
- error: new Error('Unauthorized'),
244
- errorType: invalidErrorType,
245
- provider,
246
- });
247
- }
248
- });
249
-
250
- it('should return AgentRuntimeError for non-OpenAI errors', async () => {
251
- // Arrange
252
- const genericError = new Error('Generic Error');
253
-
254
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
255
-
256
- // Act
257
- try {
258
- await instance.chat({
259
- messages: [{ content: 'Hello', role: 'user' }],
260
- model: 'mistralai/mistral-7b-instruct:free',
261
- temperature: 0,
262
- });
263
- } catch (e) {
264
- expect(e).toEqual({
265
- endpoint: defaultBaseURL,
266
- errorType: 'AgentRuntimeError',
267
- provider,
268
- error: {
269
- name: genericError.name,
270
- cause: genericError.cause,
271
- message: genericError.message,
272
- stack: genericError.stack,
273
- },
274
- });
275
- }
276
- });
277
- });
278
-
279
- describe('DEBUG', () => {
280
- it('should call debugStream and return StreamingTextResponse when DEBUG_OPENROUTER_CHAT_COMPLETION is 1', async () => {
281
- // Arrange
282
- const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
283
- const mockDebugStream = new ReadableStream({
284
- start(controller) {
285
- controller.enqueue('Debug stream content');
286
- controller.close();
287
- },
288
- }) as any;
289
- mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
290
-
291
- // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
292
- (instance['client'].chat.completions.create as Mock).mockResolvedValue({
293
- tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
294
- });
295
-
296
- // 保存原始环境变量值
297
- const originalDebugValue = process.env.DEBUG_GROQ_CHAT_COMPLETION;
298
-
299
- // 模拟环境变量
300
- process.env.DEBUG_GROQ_CHAT_COMPLETION = '1';
301
- vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
302
-
303
- // 执行测试
304
- // 运行你的测试函数,确保它会在条件满足时调用 debugStream
305
- // 假设的测试函数调用,你可能需要根据实际情况调整
306
- await instance.chat({
307
- messages: [{ content: 'Hello', role: 'user' }],
308
- model: 'mistralai/mistral-7b-instruct:free',
309
- temperature: 0,
310
- });
311
-
312
- // 验证 debugStream 被调用
313
- expect(debugStreamModule.debugStream).toHaveBeenCalled();
314
-
315
- // 恢复原始环境变量值
316
- process.env.DEBUG_GROQ_CHAT_COMPLETION = originalDebugValue;
317
- });
68
+ expect(mockCreateMethod).toHaveBeenCalledWith(
69
+ expect.objectContaining({ stream: false }),
70
+ expect.anything(),
71
+ );
318
72
  });
319
73
  });
320
- });
321
74
 
322
- describe('LobeGroqAI Temperature Tests', () => {
323
75
  it('should set temperature to 0.7', async () => {
324
76
  await instance.chat({
325
77
  messages: [{ content: 'Hello', role: 'user' }],