@lobehub/chat 0.132.2 → 0.133.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/.env.example +5 -0
  2. package/CHANGELOG.md +25 -0
  3. package/Dockerfile +3 -0
  4. package/docs/self-hosting/environment-variables/model-provider.mdx +8 -0
  5. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +10 -1
  6. package/locales/ar/common.json +1 -0
  7. package/locales/ar/error.json +6 -0
  8. package/locales/ar/setting.json +8 -0
  9. package/locales/de-DE/common.json +1 -0
  10. package/locales/de-DE/error.json +6 -0
  11. package/locales/de-DE/setting.json +8 -0
  12. package/locales/en-US/common.json +1 -0
  13. package/locales/en-US/error.json +6 -0
  14. package/locales/en-US/setting.json +8 -0
  15. package/locales/es-ES/common.json +1 -0
  16. package/locales/es-ES/error.json +6 -0
  17. package/locales/es-ES/setting.json +8 -0
  18. package/locales/fr-FR/common.json +1 -0
  19. package/locales/fr-FR/error.json +6 -0
  20. package/locales/fr-FR/setting.json +8 -0
  21. package/locales/it-IT/common.json +1 -0
  22. package/locales/it-IT/error.json +6 -0
  23. package/locales/it-IT/setting.json +8 -0
  24. package/locales/ja-JP/common.json +1 -0
  25. package/locales/ja-JP/error.json +6 -0
  26. package/locales/ja-JP/setting.json +8 -0
  27. package/locales/ko-KR/common.json +1 -0
  28. package/locales/ko-KR/error.json +6 -0
  29. package/locales/ko-KR/setting.json +8 -0
  30. package/locales/nl-NL/common.json +1 -0
  31. package/locales/nl-NL/error.json +6 -0
  32. package/locales/nl-NL/setting.json +8 -0
  33. package/locales/pl-PL/common.json +1 -0
  34. package/locales/pl-PL/error.json +6 -0
  35. package/locales/pl-PL/setting.json +8 -0
  36. package/locales/pt-BR/common.json +1 -0
  37. package/locales/pt-BR/error.json +6 -0
  38. package/locales/pt-BR/setting.json +8 -0
  39. package/locales/ru-RU/common.json +1 -0
  40. package/locales/ru-RU/error.json +6 -0
  41. package/locales/ru-RU/setting.json +8 -0
  42. package/locales/tr-TR/common.json +1 -0
  43. package/locales/tr-TR/error.json +6 -0
  44. package/locales/tr-TR/setting.json +8 -0
  45. package/locales/vi-VN/common.json +1 -0
  46. package/locales/vi-VN/error.json +6 -0
  47. package/locales/vi-VN/setting.json +8 -0
  48. package/locales/zh-CN/common.json +1 -0
  49. package/locales/zh-CN/error.json +6 -0
  50. package/locales/zh-CN/setting.json +8 -0
  51. package/locales/zh-TW/common.json +1 -0
  52. package/locales/zh-TW/error.json +6 -0
  53. package/locales/zh-TW/setting.json +8 -0
  54. package/package.json +1 -1
  55. package/src/app/api/chat/[provider]/agentRuntime.test.ts +52 -0
  56. package/src/app/api/chat/[provider]/agentRuntime.ts +13 -0
  57. package/src/app/api/config/route.ts +2 -0
  58. package/src/app/api/errorResponse.test.ts +15 -0
  59. package/src/app/api/errorResponse.ts +3 -0
  60. package/src/app/settings/llm/Mistral/index.tsx +52 -0
  61. package/src/app/settings/llm/index.tsx +2 -0
  62. package/src/config/modelProviders/index.ts +3 -0
  63. package/src/config/modelProviders/mistral.ts +34 -0
  64. package/src/config/server/provider.ts +8 -0
  65. package/src/const/settings.ts +4 -0
  66. package/src/features/Conversation/Error/APIKeyForm/Mistral.tsx +60 -0
  67. package/src/features/Conversation/Error/APIKeyForm/index.tsx +5 -0
  68. package/src/features/Conversation/Error/index.tsx +1 -0
  69. package/src/libs/agent-runtime/anthropic/index.test.ts +78 -0
  70. package/src/libs/agent-runtime/error.ts +3 -0
  71. package/src/libs/agent-runtime/index.ts +1 -0
  72. package/src/libs/agent-runtime/mistral/index.test.ts +378 -0
  73. package/src/libs/agent-runtime/mistral/index.ts +87 -0
  74. package/src/locales/default/common.ts +1 -0
  75. package/src/locales/default/error.ts +7 -0
  76. package/src/locales/default/setting.ts +8 -0
  77. package/src/services/_auth.test.ts +20 -0
  78. package/src/services/_auth.ts +4 -0
  79. package/src/store/global/slices/settings/selectors/modelProvider.ts +9 -0
  80. package/src/types/settings/modelProvider.ts +6 -0
@@ -0,0 +1,60 @@
1
+ import { Mistral } from '@lobehub/icons';
2
+ import { Input } from 'antd';
3
+ import { memo } from 'react';
4
+ import { useTranslation } from 'react-i18next';
5
+
6
+ import { ModelProvider } from '@/libs/agent-runtime';
7
+ import { useGlobalStore } from '@/store/global';
8
+ import { modelProviderSelectors } from '@/store/global/selectors';
9
+
10
+ import { FormAction } from '../style';
11
+
12
+ const MistralForm = memo(() => {
13
+ const { t } = useTranslation('error');
14
+ // const [showProxy, setShow] = useState(false);
15
+
16
+ const [apiKey, setConfig] = useGlobalStore((s) => [
17
+ modelProviderSelectors.mistralAPIKey(s),
18
+ s.setModelProviderConfig,
19
+ ]);
20
+
21
+ return (
22
+ <FormAction
23
+ avatar={<Mistral size={56} />}
24
+ description={t('unlock.apikey.Mistral.description')}
25
+ title={t('unlock.apikey.Mistral.title')}
26
+ >
27
+ <Input.Password
28
+ autoComplete={'new-password'}
29
+ onChange={(e) => {
30
+ setConfig(ModelProvider.Mistral, { apiKey: e.target.value });
31
+ }}
32
+ placeholder={'*********************************'}
33
+ type={'block'}
34
+ value={apiKey}
35
+ />
36
+ {/*{showProxy ? (*/}
37
+ {/* <Input*/}
38
+ {/* onChange={(e) => {*/}
39
+ {/* setConfig({ endpoint: e.target.value });*/}
40
+ {/* }}*/}
41
+ {/* placeholder={'https://api.mistral.ai/v1'}*/}
42
+ {/* type={'block'}*/}
43
+ {/* value={proxyUrl}*/}
44
+ {/* />*/}
45
+ {/*) : (*/}
46
+ {/* <Button*/}
47
+ {/* icon={<Icon icon={Network} />}*/}
48
+ {/* onClick={() => {*/}
49
+ {/* setShow(true);*/}
50
+ {/* }}*/}
51
+ {/* type={'text'}*/}
52
+ {/* >*/}
53
+ {/* {t('unlock.apikey.addProxyUrl')}*/}
54
+ {/* </Button>*/}
55
+ {/*)}*/}
56
+ </FormAction>
57
+ );
58
+ });
59
+
60
+ export default MistralForm;
@@ -9,6 +9,7 @@ import { useChatStore } from '@/store/chat';
9
9
  import AnthropicForm from './Anthropic';
10
10
  import BedrockForm from './Bedrock';
11
11
  import GoogleForm from './Google';
12
+ import MistralForm from './Mistral';
12
13
  import MoonshotForm from './Moonshot';
13
14
  import OpenAIForm from './OpenAI';
14
15
  import PerplexityForm from './Perplexity';
@@ -38,6 +39,10 @@ const APIKeyForm = memo<APIKeyFormProps>(({ id, provider }) => {
38
39
  return <ZhipuForm />;
39
40
  }
40
41
 
42
+ case ModelProvider.Mistral: {
43
+ return <MistralForm />;
44
+ }
45
+
41
46
  case ModelProvider.Moonshot: {
42
47
  return <MoonshotForm />;
43
48
  }
@@ -64,6 +64,7 @@ const ErrorMessageExtra = memo<{ data: ChatMessage }>(({ data }) => {
64
64
 
65
65
  case AgentRuntimeErrorType.InvalidBedrockCredentials:
66
66
  case AgentRuntimeErrorType.InvalidZhipuAPIKey:
67
+ case AgentRuntimeErrorType.InvalidMistralAPIKey:
67
68
  case AgentRuntimeErrorType.InvalidMoonshotAPIKey:
68
69
  case AgentRuntimeErrorType.InvalidGoogleAPIKey:
69
70
  case AgentRuntimeErrorType.InvalidPerplexityAPIKey:
@@ -31,6 +31,7 @@ describe('LobeAnthropicAI', () => {
31
31
  });
32
32
 
33
33
  describe('chat', () => {
34
+
34
35
  it('should return a StreamingTextResponse on successful API call', async () => {
35
36
  const result = await instance.chat({
36
37
  messages: [{ content: 'Hello', role: 'user' }],
@@ -41,6 +42,7 @@ describe('LobeAnthropicAI', () => {
41
42
  // Assert
42
43
  expect(result).toBeInstanceOf(Response);
43
44
  });
45
+
44
46
  it('should handle text messages correctly', async () => {
45
47
  // Arrange
46
48
  const mockStream = new ReadableStream({
@@ -73,6 +75,7 @@ describe('LobeAnthropicAI', () => {
73
75
  })
74
76
  expect(result).toBeInstanceOf(Response);
75
77
  });
78
+
76
79
  it('should handle system prompt correctly', async () => {
77
80
  // Arrange
78
81
  const mockStream = new ReadableStream({
@@ -107,6 +110,81 @@ describe('LobeAnthropicAI', () => {
107
110
  })
108
111
  expect(result).toBeInstanceOf(Response);
109
112
  });
113
+
114
+ it('should call Anthropic API with supported opions in streaming mode', async () => {
115
+ // Arrange
116
+ const mockStream = new ReadableStream({
117
+ start(controller) {
118
+ controller.enqueue('Hello, world!');
119
+ controller.close();
120
+ },
121
+ });
122
+ const mockResponse = Promise.resolve(mockStream);
123
+ (instance['client'].messages.create as Mock).mockResolvedValue(mockResponse);
124
+
125
+ // Act
126
+ const result = await instance.chat({
127
+ max_tokens: 2048,
128
+ messages: [
129
+ { content: 'Hello', role: 'user' },
130
+ ],
131
+ model: 'claude-instant-1.2',
132
+ temperature: 0.5,
133
+ top_p: 1,
134
+ });
135
+
136
+ // Assert
137
+ expect(instance['client'].messages.create).toHaveBeenCalledWith({
138
+ max_tokens: 2048,
139
+ messages: [
140
+ { content: 'Hello', role: 'user' },
141
+ ],
142
+ model: 'claude-instant-1.2',
143
+ stream: true,
144
+ temperature: 0.5,
145
+ top_p: 1,
146
+ })
147
+ expect(result).toBeInstanceOf(Response);
148
+ });
149
+
150
+ it('should call Anthropic API without unsupported opions', async () => {
151
+ // Arrange
152
+ const mockStream = new ReadableStream({
153
+ start(controller) {
154
+ controller.enqueue('Hello, world!');
155
+ controller.close();
156
+ },
157
+ });
158
+ const mockResponse = Promise.resolve(mockStream);
159
+ (instance['client'].messages.create as Mock).mockResolvedValue(mockResponse);
160
+
161
+ // Act
162
+ const result = await instance.chat({
163
+ frequency_penalty: 0.5, // Unsupported option
164
+ max_tokens: 2048,
165
+ messages: [
166
+ { content: 'Hello', role: 'user' },
167
+ ],
168
+ model: 'claude-instant-1.2',
169
+ presence_penalty: 0.5,
170
+ temperature: 0.5,
171
+ top_p: 1,
172
+ });
173
+
174
+ // Assert
175
+ expect(instance['client'].messages.create).toHaveBeenCalledWith({
176
+ max_tokens: 2048,
177
+ messages: [
178
+ { content: 'Hello', role: 'user' },
179
+ ],
180
+ model: 'claude-instant-1.2',
181
+ stream: true,
182
+ temperature: 0.5,
183
+ top_p: 1,
184
+ })
185
+ expect(result).toBeInstanceOf(Response);
186
+ });
187
+
110
188
  it('should call debugStream in DEBUG mode', async () => {
111
189
  // Arrange
112
190
  const mockProdStream = new ReadableStream({
@@ -20,6 +20,9 @@ export const AgentRuntimeErrorType = {
20
20
  InvalidBedrockCredentials: 'InvalidBedrockCredentials',
21
21
  BedrockBizError: 'BedrockBizError',
22
22
 
23
+ InvalidMistralAPIKey: 'InvalidMistralAPIKey',
24
+ MistralBizError: 'MistralBizError',
25
+
23
26
  InvalidMoonshotAPIKey: 'InvalidMoonshotAPIKey',
24
27
  MoonshotBizError: 'MoonshotBizError',
25
28
 
@@ -4,6 +4,7 @@ export * from './BaseAI';
4
4
  export { LobeBedrockAI } from './bedrock';
5
5
  export * from './error';
6
6
  export { LobeGoogleAI } from './google';
7
+ export { LobeMistralAI } from './mistral';
7
8
  export { LobeMoonshotAI } from './moonshot';
8
9
  export { LobeOllamaAI } from './ollama';
9
10
  export { LobeOpenAI } from './openai';
@@ -0,0 +1,378 @@
1
+ // @vitest-environment node
2
+ import OpenAI from 'openai';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import { ChatStreamCallbacks } from '@/libs/agent-runtime';
6
+
7
+ import * as debugStreamModule from '../utils/debugStream';
8
+ import { LobeMistralAI } from './index';
9
+
10
+ const provider = 'mistral';
11
+ const defaultBaseURL = 'https://api.mistral.ai/v1';
12
+ const bizErrorType = 'MistralBizError';
13
+ const invalidErrorType = 'InvalidMistralAPIKey';
14
+
15
+ // Mock the console.error to avoid polluting test output
16
+ vi.spyOn(console, 'error').mockImplementation(() => {});
17
+
18
+ let instance: LobeMistralAI;
19
+
20
+ beforeEach(() => {
21
+ instance = new LobeMistralAI({ apiKey: 'test' });
22
+
23
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
24
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
25
+ new ReadableStream() as any,
26
+ );
27
+ });
28
+
29
+ afterEach(() => {
30
+ vi.clearAllMocks();
31
+ });
32
+
33
+ describe('LobeMistralAI', () => {
34
+ describe('init', () => {
35
+ it('should correctly initialize with an API key', async () => {
36
+ const instance = new LobeMistralAI({ apiKey: 'test_api_key' });
37
+ expect(instance).toBeInstanceOf(LobeMistralAI);
38
+ expect(instance.baseURL).toEqual(defaultBaseURL);
39
+ });
40
+ });
41
+
42
+ describe('chat', () => {
43
+ it('should return a StreamingTextResponse on successful API call', async () => {
44
+ // Arrange
45
+ const mockStream = new ReadableStream();
46
+ const mockResponse = Promise.resolve(mockStream);
47
+
48
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
49
+
50
+ // Act
51
+ const result = await instance.chat({
52
+ messages: [{ content: 'Hello', role: 'user' }],
53
+ model: 'open-mistral-7b',
54
+ temperature: 0,
55
+ });
56
+
57
+ // Assert
58
+ expect(result).toBeInstanceOf(Response);
59
+ });
60
+
61
+ it('should call Mistral API with supported options in streaming mode', async () => {
62
+ // Arrange
63
+ const mockStream = new ReadableStream();
64
+ const mockResponse = Promise.resolve(mockStream);
65
+
66
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
67
+
68
+ // Act
69
+ const result = await instance.chat({
70
+ max_tokens: 1024,
71
+ messages: [{ content: 'Hello', role: 'user' }],
72
+ model: 'open-mistral-7b',
73
+ temperature: 0.7,
74
+ top_p: 1,
75
+ });
76
+
77
+ // Assert
78
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith({
79
+ max_tokens: 1024,
80
+ messages: [{ content: 'Hello', role: 'user' }],
81
+ model: 'open-mistral-7b',
82
+ stream: true,
83
+ temperature: 0.7,
84
+ top_p: 1,
85
+ })
86
+ expect(result).toBeInstanceOf(Response);
87
+ });
88
+
89
+ it('should call Mistral API without unsupported options', async () => {
90
+ // Arrange
91
+ const mockStream = new ReadableStream();
92
+ const mockResponse = Promise.resolve(mockStream);
93
+
94
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
95
+
96
+ // Act
97
+ const result = await instance.chat({
98
+ frequency_penalty: 0.5, // unsupported option
99
+ max_tokens: 1024,
100
+ messages: [{ content: 'Hello', role: 'user' }],
101
+ model: 'open-mistral-7b',
102
+ presence_penalty: 0.5, // unsupported option
103
+ temperature: 0.7,
104
+ top_p: 1,
105
+ });
106
+
107
+ // Assert
108
+ expect(instance['client'].chat.completions.create).toHaveBeenCalledWith({
109
+ max_tokens: 1024,
110
+ messages: [{ content: 'Hello', role: 'user' }],
111
+ model: 'open-mistral-7b',
112
+ stream: true,
113
+ temperature: 0.7,
114
+ top_p: 1,
115
+ })
116
+ expect(result).toBeInstanceOf(Response);
117
+ });
118
+
119
+ describe('Error', () => {
120
+ it('should return MistralBizError with an openai error response when OpenAI.APIError is thrown', async () => {
121
+ // Arrange
122
+ const apiError = new OpenAI.APIError(
123
+ 400,
124
+ {
125
+ status: 400,
126
+ error: {
127
+ message: 'Bad Request',
128
+ },
129
+ },
130
+ 'Error message',
131
+ {},
132
+ );
133
+
134
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
135
+
136
+ // Act
137
+ try {
138
+ await instance.chat({
139
+ messages: [{ content: 'Hello', role: 'user' }],
140
+ model: 'open-mistral-7b',
141
+ temperature: 0,
142
+ });
143
+ } catch (e) {
144
+ expect(e).toEqual({
145
+ endpoint: defaultBaseURL,
146
+ error: {
147
+ error: { message: 'Bad Request' },
148
+ status: 400,
149
+ },
150
+ errorType: bizErrorType,
151
+ provider,
152
+ });
153
+ }
154
+ });
155
+
156
+ it('should throw AgentRuntimeError with InvalidMistralAPIKey if no apiKey is provided', async () => {
157
+ try {
158
+ new LobeMistralAI({});
159
+ } catch (e) {
160
+ expect(e).toEqual({ errorType: invalidErrorType });
161
+ }
162
+ });
163
+
164
+ it('should return MistralBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
165
+ // Arrange
166
+ const errorInfo = {
167
+ stack: 'abc',
168
+ cause: {
169
+ message: 'api is undefined',
170
+ },
171
+ };
172
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
173
+
174
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
175
+
176
+ // Act
177
+ try {
178
+ await instance.chat({
179
+ messages: [{ content: 'Hello', role: 'user' }],
180
+ model: 'open-mistral-7b',
181
+ temperature: 0,
182
+ });
183
+ } catch (e) {
184
+ expect(e).toEqual({
185
+ endpoint: defaultBaseURL,
186
+ error: {
187
+ cause: { message: 'api is undefined' },
188
+ stack: 'abc',
189
+ },
190
+ errorType: bizErrorType,
191
+ provider,
192
+ });
193
+ }
194
+ });
195
+
196
+ it('should return MistralBizError with an cause response with desensitize Url', async () => {
197
+ // Arrange
198
+ const errorInfo = {
199
+ stack: 'abc',
200
+ cause: { message: 'api is undefined' },
201
+ };
202
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
203
+
204
+ instance = new LobeMistralAI({
205
+ apiKey: 'test',
206
+
207
+ baseURL: 'https://api.abc.com/v1',
208
+ });
209
+
210
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
211
+
212
+ // Act
213
+ try {
214
+ await instance.chat({
215
+ messages: [{ content: 'Hello', role: 'user' }],
216
+ model: 'open-mistral-7b',
217
+ temperature: 0,
218
+ });
219
+ } catch (e) {
220
+ expect(e).toEqual({
221
+ endpoint: 'https://api.***.com/v1',
222
+ error: {
223
+ cause: { message: 'api is undefined' },
224
+ stack: 'abc',
225
+ },
226
+ errorType: bizErrorType,
227
+ provider,
228
+ });
229
+ }
230
+ });
231
+
232
+ it('should throw an InvalidMistralAPIKey error type on 401 status code', async () => {
233
+ // Mock the API call to simulate a 401 error
234
+ const error = new Error('Unauthorized') as any;
235
+ error.status = 401;
236
+ vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
237
+
238
+ try {
239
+ await instance.chat({
240
+ messages: [{ content: 'Hello', role: 'user' }],
241
+ model: 'gpt-3.5-turbo',
242
+ temperature: 0,
243
+ });
244
+ } catch (e) {
245
+ // Expect the chat method to throw an error with InvalidMoonshotAPIKey
246
+ expect(e).toEqual({
247
+ endpoint: defaultBaseURL,
248
+ error: new Error('Unauthorized'),
249
+ errorType: invalidErrorType,
250
+ provider,
251
+ });
252
+ }
253
+ });
254
+
255
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
256
+ // Arrange
257
+ const genericError = new Error('Generic Error');
258
+
259
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
260
+
261
+ // Act
262
+ try {
263
+ await instance.chat({
264
+ messages: [{ content: 'Hello', role: 'user' }],
265
+ model: 'open-mistral-7b',
266
+ temperature: 0,
267
+ });
268
+ } catch (e) {
269
+ expect(e).toEqual({
270
+ endpoint: defaultBaseURL,
271
+ errorType: 'AgentRuntimeError',
272
+ provider,
273
+ error: {
274
+ name: genericError.name,
275
+ cause: genericError.cause,
276
+ message: genericError.message,
277
+ stack: genericError.stack,
278
+ },
279
+ });
280
+ }
281
+ });
282
+ });
283
+
284
+ describe('LobeMistralAI chat with callback and headers', () => {
285
+ it('should handle callback and headers correctly', async () => {
286
+ // 模拟 chat.completions.create 方法返回一个可读流
287
+ const mockCreateMethod = vi
288
+ .spyOn(instance['client'].chat.completions, 'create')
289
+ .mockResolvedValue(
290
+ new ReadableStream({
291
+ start(controller) {
292
+ controller.enqueue({
293
+ id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
294
+ object: 'chat.completion.chunk',
295
+ created: 1709125675,
296
+ model: 'open-mistral-7b',
297
+ system_fingerprint: 'fp_86156a94a0',
298
+ choices: [
299
+ { index: 0, delta: { content: 'hello' }, logprobs: null, finish_reason: null },
300
+ ],
301
+ });
302
+ controller.close();
303
+ },
304
+ }) as any,
305
+ );
306
+
307
+ // 准备 callback 和 headers
308
+ const mockCallback: ChatStreamCallbacks = {
309
+ onStart: vi.fn(),
310
+ onToken: vi.fn(),
311
+ };
312
+ const mockHeaders = { 'Custom-Header': 'TestValue' };
313
+
314
+ // 执行测试
315
+ const result = await instance.chat(
316
+ {
317
+ messages: [{ content: 'Hello', role: 'user' }],
318
+ model: 'open-mistral-7b',
319
+ temperature: 0,
320
+ },
321
+ { callback: mockCallback, headers: mockHeaders },
322
+ );
323
+
324
+ // 验证 callback 被调用
325
+ await result.text(); // 确保流被消费
326
+ expect(mockCallback.onStart).toHaveBeenCalled();
327
+ expect(mockCallback.onToken).toHaveBeenCalledWith('hello');
328
+
329
+ // 验证 headers 被正确传递
330
+ expect(result.headers.get('Custom-Header')).toEqual('TestValue');
331
+
332
+ // 清理
333
+ mockCreateMethod.mockRestore();
334
+ });
335
+ });
336
+
337
+ describe('DEBUG', () => {
338
+ it('should call debugStream and return StreamingTextResponse when DEBUG_MISTRAL_CHAT_COMPLETION is 1', async () => {
339
+ // Arrange
340
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
341
+ const mockDebugStream = new ReadableStream({
342
+ start(controller) {
343
+ controller.enqueue('Debug stream content');
344
+ controller.close();
345
+ },
346
+ }) as any;
347
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
348
+
349
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
350
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
351
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
352
+ });
353
+
354
+ // 保存原始环境变量值
355
+ const originalDebugValue = process.env.DEBUG_MISTRAL_CHAT_COMPLETION;
356
+
357
+ // 模拟环境变量
358
+ process.env.DEBUG_MISTRAL_CHAT_COMPLETION = '1';
359
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
360
+
361
+ // 执行测试
362
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
363
+ // 假设的测试函数调用,你可能需要根据实际情况调整
364
+ await instance.chat({
365
+ messages: [{ content: 'Hello', role: 'user' }],
366
+ model: 'open-mistral-7b',
367
+ temperature: 0,
368
+ });
369
+
370
+ // 验证 debugStream 被调用
371
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
372
+
373
+ // 恢复原始环境变量值
374
+ process.env.DEBUG_MISTRAL_CHAT_COMPLETION = originalDebugValue;
375
+ });
376
+ });
377
+ });
378
+ });
@@ -0,0 +1,87 @@
1
+ import { OpenAIStream, StreamingTextResponse } from 'ai';
2
+ import OpenAI, { ClientOptions } from 'openai';
3
+
4
+ import { LobeRuntimeAI } from '../BaseAI';
5
+ import { AgentRuntimeErrorType } from '../error';
6
+ import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
7
+ import { AgentRuntimeError } from '../utils/createError';
8
+ import { debugStream } from '../utils/debugStream';
9
+ import { desensitizeUrl } from '../utils/desensitizeUrl';
10
+ import { handleOpenAIError } from '../utils/handleOpenAIError';
11
+
12
+ const DEFAULT_BASE_URL = 'https://api.mistral.ai/v1';
13
+
14
+ export class LobeMistralAI implements LobeRuntimeAI {
15
+ private client: OpenAI;
16
+
17
+ baseURL: string;
18
+
19
+ constructor({ apiKey, baseURL = DEFAULT_BASE_URL, ...res }: ClientOptions) {
20
+ if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidMistralAPIKey);
21
+
22
+ this.client = new OpenAI({ apiKey, baseURL, ...res });
23
+ this.baseURL = this.client.baseURL;
24
+ }
25
+
26
+ async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
27
+ try {
28
+ // Pick supported properties from payload
29
+ const chatPayload = {
30
+ max_tokens: payload.max_tokens,
31
+ messages: payload.messages,
32
+ model: payload.model,
33
+ stream: true,
34
+ temperature: payload.temperature,
35
+ top_p: payload.top_p,
36
+ };
37
+ const response = await this.client.chat.completions.create(
38
+ chatPayload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
39
+ );
40
+ const [prod, debug] = response.tee();
41
+
42
+ if (process.env.DEBUG_MISTRAL_CHAT_COMPLETION === '1') {
43
+ debugStream(debug.toReadableStream()).catch(console.error);
44
+ }
45
+
46
+ return new StreamingTextResponse(OpenAIStream(prod, options?.callback), {
47
+ headers: options?.headers,
48
+ });
49
+ } catch (error) {
50
+ let desensitizedEndpoint = this.baseURL;
51
+
52
+ if (this.baseURL !== DEFAULT_BASE_URL) {
53
+ desensitizedEndpoint = desensitizeUrl(this.baseURL);
54
+ }
55
+
56
+ if ('status' in (error as any)) {
57
+ switch ((error as Response).status) {
58
+ case 401: {
59
+ throw AgentRuntimeError.chat({
60
+ endpoint: desensitizedEndpoint,
61
+ error: error as any,
62
+ errorType: AgentRuntimeErrorType.InvalidMistralAPIKey,
63
+ provider: ModelProvider.Mistral,
64
+ });
65
+ }
66
+
67
+ default: {
68
+ break;
69
+ }
70
+ }
71
+ }
72
+
73
+ const { errorResult, RuntimeError } = handleOpenAIError(error);
74
+
75
+ const errorType = RuntimeError || AgentRuntimeErrorType.MistralBizError;
76
+
77
+ throw AgentRuntimeError.chat({
78
+ endpoint: desensitizedEndpoint,
79
+ error: errorResult,
80
+ errorType,
81
+ provider: ModelProvider.Mistral,
82
+ });
83
+ }
84
+ }
85
+ }
86
+
87
+ export default LobeMistralAI;
@@ -104,6 +104,7 @@ export default {
104
104
  azure: 'Azure',
105
105
  bedrock: 'AWS Bedrock',
106
106
  google: 'Google',
107
+ mistral: 'Mistral AI',
107
108
  moonshot: 'Moonshot AI',
108
109
  ollama: 'Ollama',
109
110
  oneapi: 'One API',