@lobehub/chat 0.148.9 → 0.149.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/.env.example +1 -1
  2. package/CHANGELOG.md +50 -0
  3. package/docs/self-hosting/advanced/sso-providers/github.zh-CN.mdx +1 -3
  4. package/docs/self-hosting/environment-variables/model-provider.mdx +1 -1
  5. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +1 -1
  6. package/docs/self-hosting/examples/ollama.mdx +0 -1
  7. package/docs/self-hosting/examples/ollama.zh-CN.mdx +0 -2
  8. package/locales/ar/chat.json +4 -0
  9. package/locales/ar/error.json +1 -1
  10. package/locales/ar/modelProvider.json +27 -1
  11. package/locales/ar/welcome.json +1 -0
  12. package/locales/bg-BG/chat.json +4 -0
  13. package/locales/bg-BG/error.json +1 -1
  14. package/locales/bg-BG/modelProvider.json +27 -1
  15. package/locales/bg-BG/welcome.json +1 -0
  16. package/locales/de-DE/chat.json +4 -0
  17. package/locales/de-DE/error.json +1 -1
  18. package/locales/de-DE/modelProvider.json +27 -1
  19. package/locales/de-DE/welcome.json +1 -0
  20. package/locales/en-US/chat.json +4 -0
  21. package/locales/en-US/error.json +1 -1
  22. package/locales/en-US/modelProvider.json +27 -1
  23. package/locales/en-US/welcome.json +1 -0
  24. package/locales/es-ES/chat.json +4 -0
  25. package/locales/es-ES/error.json +1 -1
  26. package/locales/es-ES/modelProvider.json +27 -1
  27. package/locales/es-ES/welcome.json +1 -0
  28. package/locales/fr-FR/chat.json +4 -0
  29. package/locales/fr-FR/error.json +1 -1
  30. package/locales/fr-FR/modelProvider.json +27 -1
  31. package/locales/fr-FR/welcome.json +1 -0
  32. package/locales/it-IT/chat.json +4 -0
  33. package/locales/it-IT/error.json +1 -1
  34. package/locales/it-IT/modelProvider.json +26 -1
  35. package/locales/it-IT/welcome.json +1 -0
  36. package/locales/ja-JP/chat.json +4 -0
  37. package/locales/ja-JP/error.json +1 -1
  38. package/locales/ja-JP/modelProvider.json +27 -1
  39. package/locales/ja-JP/welcome.json +1 -0
  40. package/locales/ko-KR/chat.json +4 -0
  41. package/locales/ko-KR/error.json +1 -1
  42. package/locales/ko-KR/modelProvider.json +27 -1
  43. package/locales/ko-KR/welcome.json +1 -0
  44. package/locales/nl-NL/chat.json +4 -0
  45. package/locales/nl-NL/error.json +1 -1
  46. package/locales/nl-NL/modelProvider.json +27 -1
  47. package/locales/nl-NL/welcome.json +1 -0
  48. package/locales/pl-PL/chat.json +4 -0
  49. package/locales/pl-PL/error.json +1 -1
  50. package/locales/pl-PL/modelProvider.json +27 -1
  51. package/locales/pl-PL/welcome.json +1 -0
  52. package/locales/pt-BR/chat.json +4 -0
  53. package/locales/pt-BR/error.json +1 -1
  54. package/locales/pt-BR/modelProvider.json +27 -1
  55. package/locales/pt-BR/welcome.json +1 -0
  56. package/locales/ru-RU/chat.json +4 -0
  57. package/locales/ru-RU/error.json +1 -1
  58. package/locales/ru-RU/modelProvider.json +27 -1
  59. package/locales/ru-RU/welcome.json +1 -0
  60. package/locales/tr-TR/chat.json +4 -0
  61. package/locales/tr-TR/error.json +1 -1
  62. package/locales/tr-TR/modelProvider.json +27 -1
  63. package/locales/tr-TR/welcome.json +1 -0
  64. package/locales/vi-VN/chat.json +4 -0
  65. package/locales/vi-VN/error.json +1 -1
  66. package/locales/vi-VN/modelProvider.json +27 -1
  67. package/locales/vi-VN/welcome.json +1 -0
  68. package/locales/zh-CN/chat.json +5 -1
  69. package/locales/zh-CN/error.json +1 -1
  70. package/locales/zh-CN/modelProvider.json +27 -1
  71. package/locales/zh-CN/welcome.json +1 -0
  72. package/locales/zh-TW/chat.json +4 -0
  73. package/locales/zh-TW/error.json +1 -1
  74. package/locales/zh-TW/modelProvider.json +27 -1
  75. package/locales/zh-TW/welcome.json +1 -0
  76. package/package.json +2 -2
  77. package/src/app/api/chat/agentRuntime.test.ts +2 -2
  78. package/src/app/api/config/route.ts +2 -0
  79. package/src/app/settings/llm/Ollama/index.tsx +3 -6
  80. package/src/app/settings/llm/components/ProviderConfig/index.tsx +15 -14
  81. package/src/config/modelProviders/ollama.ts +38 -38
  82. package/src/const/settings/index.ts +1 -0
  83. package/src/features/Conversation/Error/{InvalidOllamaModel → OllamaBizError/InvalidOllamaModel}/index.tsx +1 -1
  84. package/src/features/Conversation/Error/OllamaBizError/SetupGuide.tsx +128 -0
  85. package/src/features/Conversation/Error/{OllamaBizError.tsx → OllamaBizError/index.tsx} +15 -2
  86. package/src/features/ModelSwitchPanel/index.tsx +0 -6
  87. package/src/libs/agent-runtime/ollama/index.ts +42 -57
  88. package/src/libs/agent-runtime/ollama/stream.ts +31 -0
  89. package/src/libs/agent-runtime/ollama/type.ts +8 -0
  90. package/src/libs/agent-runtime/types/chat.ts +0 -7
  91. package/src/libs/agent-runtime/zeroone/index.test.ts +16 -16
  92. package/src/locales/default/error.ts +2 -1
  93. package/src/locales/default/modelProvider.ts +29 -1
  94. package/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json +0 -1
  95. package/src/services/__tests__/chat.test.ts +1 -1
  96. package/src/services/ollama.ts +6 -9
  97. package/src/store/global/slices/settings/actions/llm.test.ts +0 -1
  98. package/src/store/global/slices/settings/selectors/modelProvider.test.ts +1 -1
  99. package/src/types/serverConfig.ts +1 -1
  100. package/src/libs/agent-runtime/ollama/index.test.ts +0 -365
  101. /package/src/features/Conversation/Error/{InvalidOllamaModel → OllamaBizError/InvalidOllamaModel}/useDownloadMonitor.ts +0 -0
@@ -1,365 +0,0 @@
1
- // @vitest-environment node
2
- import OpenAI from 'openai';
3
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
-
5
- import { ChatStreamCallbacks, OpenAIChatMessage } from '@/libs/agent-runtime';
6
-
7
- import * as debugStreamModule from '../utils/debugStream';
8
- import { LobeOllamaAI } from './index';
9
-
10
- const provider = 'ollama';
11
- const defaultBaseURL = 'http://127.0.0.1:11434/v1';
12
- const bizErrorType = 'OllamaBizError';
13
- const invalidErrorType = 'InvalidOllamaArgs';
14
-
15
- // Mock the console.error to avoid polluting test output
16
- vi.spyOn(console, 'error').mockImplementation(() => {});
17
-
18
- let instance: LobeOllamaAI;
19
-
20
- beforeEach(() => {
21
- instance = new LobeOllamaAI({ apiKey: 'test' });
22
-
23
- // 使用 vi.spyOn 来模拟 chat.completions.create 方法
24
- vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
25
- new ReadableStream() as any,
26
- );
27
- });
28
-
29
- afterEach(() => {
30
- vi.clearAllMocks();
31
- });
32
-
33
- describe('LobeOllamaAI', () => {
34
- describe('init', () => {
35
- it('should correctly initialize with an API key', async () => {
36
- const instance = new LobeOllamaAI({ apiKey: 'test_api_key' });
37
- expect(instance).toBeInstanceOf(LobeOllamaAI);
38
- expect(instance.baseURL).toEqual(defaultBaseURL);
39
- });
40
- });
41
-
42
- describe('chat', () => {
43
- it('should return a StreamingTextResponse on successful API call', async () => {
44
- // Arrange
45
- const mockStream = new ReadableStream();
46
- const mockResponse = Promise.resolve(mockStream);
47
-
48
- (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
49
-
50
- // Act
51
- const result = await instance.chat({
52
- messages: [{ content: 'Hello', role: 'user' }],
53
- model: 'text-davinci-003',
54
- temperature: 0,
55
- });
56
-
57
- // Assert
58
- expect(result).toBeInstanceOf(Response);
59
- });
60
-
61
- describe('Error', () => {
62
- it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
63
- // Arrange
64
- const apiError = new OpenAI.APIError(
65
- 400,
66
- {
67
- status: 400,
68
- error: {
69
- message: 'Bad Request',
70
- },
71
- },
72
- 'Error message',
73
- {},
74
- );
75
-
76
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
77
-
78
- // Act
79
- try {
80
- await instance.chat({
81
- messages: [{ content: 'Hello', role: 'user' }],
82
- model: 'text-davinci-003',
83
- temperature: 0,
84
- });
85
- } catch (e) {
86
- expect(e).toEqual({
87
- endpoint: defaultBaseURL,
88
- error: {
89
- error: { message: 'Bad Request' },
90
- status: 400,
91
- },
92
- errorType: bizErrorType,
93
- provider,
94
- });
95
- }
96
- });
97
-
98
- it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
99
- try {
100
- new LobeOllamaAI({});
101
- } catch (e) {
102
- expect(e).toEqual({ errorType: invalidErrorType });
103
- }
104
- });
105
-
106
- it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
107
- // Arrange
108
- const errorInfo = {
109
- stack: 'abc',
110
- cause: {
111
- message: 'api is undefined',
112
- },
113
- };
114
- const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
115
-
116
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
117
-
118
- // Act
119
- try {
120
- await instance.chat({
121
- messages: [{ content: 'Hello', role: 'user' }],
122
- model: 'text-davinci-003',
123
- temperature: 0,
124
- });
125
- } catch (e) {
126
- expect(e).toEqual({
127
- endpoint: defaultBaseURL,
128
- error: {
129
- cause: { message: 'api is undefined' },
130
- stack: 'abc',
131
- },
132
- errorType: bizErrorType,
133
- provider,
134
- });
135
- }
136
- });
137
-
138
- it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
139
- // Arrange
140
- const errorInfo = {
141
- stack: 'abc',
142
- cause: { message: 'api is undefined' },
143
- };
144
- const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
145
-
146
- instance = new LobeOllamaAI({
147
- apiKey: 'test',
148
-
149
- baseURL: 'https://api.abc.com/v1',
150
- });
151
-
152
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
153
-
154
- // Act
155
- try {
156
- await instance.chat({
157
- messages: [{ content: 'Hello', role: 'user' }],
158
- model: 'gpt-3.5-turbo',
159
- temperature: 0,
160
- });
161
- } catch (e) {
162
- expect(e).toEqual({
163
- endpoint: 'https://api.***.com/v1',
164
- error: {
165
- cause: { message: 'api is undefined' },
166
- stack: 'abc',
167
- },
168
- errorType: bizErrorType,
169
- provider,
170
- });
171
- }
172
- });
173
-
174
- it('should throw an InvalidOllamaAPIKey error type on 401 status code', async () => {
175
- // Mock the API call to simulate a 401 error
176
- const error = new Error('Unauthorized') as any;
177
- error.status = 401;
178
- vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
179
-
180
- try {
181
- await instance.chat({
182
- messages: [{ content: 'Hello', role: 'user' }],
183
- model: 'gpt-3.5-turbo',
184
- temperature: 0,
185
- });
186
- } catch (e) {
187
- // Expect the chat method to throw an error with InvalidOllamaAPIKey
188
- expect(e).toEqual({
189
- endpoint: defaultBaseURL,
190
- error: new Error('Unauthorized'),
191
- errorType: invalidErrorType,
192
- provider,
193
- });
194
- }
195
- });
196
-
197
- it('should return AgentRuntimeError for non-OpenAI errors', async () => {
198
- // Arrange
199
- const genericError = new Error('Generic Error');
200
-
201
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
202
-
203
- // Act
204
- try {
205
- await instance.chat({
206
- messages: [{ content: 'Hello', role: 'user' }],
207
- model: 'text-davinci-003',
208
- temperature: 0,
209
- });
210
- } catch (e) {
211
- expect(e).toEqual({
212
- endpoint: defaultBaseURL,
213
- errorType: 'AgentRuntimeError',
214
- provider,
215
- error: {
216
- name: genericError.name,
217
- cause: genericError.cause,
218
- message: genericError.message,
219
- stack: genericError.stack,
220
- },
221
- });
222
- }
223
- });
224
- });
225
-
226
- describe('LobeOllamaAI chat with callback and headers', () => {
227
- it('should handle callback and headers correctly', async () => {
228
- // 模拟 chat.completions.create 方法返回一个可读流
229
- const mockCreateMethod = vi
230
- .spyOn(instance['client'].chat.completions, 'create')
231
- .mockResolvedValue(
232
- new ReadableStream({
233
- start(controller) {
234
- controller.enqueue({
235
- id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
236
- object: 'chat.completion.chunk',
237
- created: 1709125675,
238
- model: 'gpt-3.5-turbo-0125',
239
- system_fingerprint: 'fp_86156a94a0',
240
- choices: [
241
- { index: 0, delta: { content: 'hello' }, logprobs: null, finish_reason: null },
242
- ],
243
- });
244
- controller.close();
245
- },
246
- }) as any,
247
- );
248
-
249
- // 准备 callback 和 headers
250
- const mockCallback: ChatStreamCallbacks = {
251
- onStart: vi.fn(),
252
- onToken: vi.fn(),
253
- };
254
- const mockHeaders = { 'Custom-Header': 'TestValue' };
255
-
256
- // 执行测试
257
- const result = await instance.chat(
258
- {
259
- messages: [{ content: 'Hello', role: 'user' }],
260
- model: 'text-davinci-003',
261
- temperature: 0,
262
- },
263
- { callback: mockCallback, headers: mockHeaders },
264
- );
265
-
266
- // 验证 callback 被调用
267
- await result.text(); // 确保流被消费
268
- expect(mockCallback.onStart).toHaveBeenCalled();
269
- expect(mockCallback.onToken).toHaveBeenCalledWith('hello');
270
-
271
- // 验证 headers 被正确传递
272
- expect(result.headers.get('Custom-Header')).toEqual('TestValue');
273
-
274
- // 清理
275
- mockCreateMethod.mockRestore();
276
- });
277
- });
278
-
279
- describe('DEBUG', () => {
280
- it('should call debugStream and return StreamingTextResponse when DEBUG_OLLAMA_CHAT_COMPLETION is 1', async () => {
281
- // Arrange
282
- const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
283
- const mockDebugStream = new ReadableStream({
284
- start(controller) {
285
- controller.enqueue('Debug stream content');
286
- controller.close();
287
- },
288
- }) as any;
289
- mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
290
-
291
- // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
292
- (instance['client'].chat.completions.create as Mock).mockResolvedValue({
293
- tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
294
- });
295
-
296
- // 保存原始环境变量值
297
- const originalDebugValue = process.env.DEBUG_OLLAMA_CHAT_COMPLETION;
298
-
299
- // 模拟环境变量
300
- process.env.DEBUG_OLLAMA_CHAT_COMPLETION = '1';
301
- vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
302
-
303
- // 执行测试
304
- // 运行你的测试函数,确保它会在条件满足时调用 debugStream
305
- // 假设的测试函数调用,你可能需要根据实际情况调整
306
- await instance.chat({
307
- messages: [{ content: 'Hello', role: 'user' }],
308
- model: 'text-davinci-003',
309
- temperature: 0,
310
- });
311
-
312
- // 验证 debugStream 被调用
313
- expect(debugStreamModule.debugStream).toHaveBeenCalled();
314
-
315
- // 恢复原始环境变量值
316
- process.env.DEBUG_OLLAMA_CHAT_COMPLETION = originalDebugValue;
317
- });
318
- });
319
- });
320
-
321
- describe('private method', () => {
322
- describe('convertContentToOllamaMessage', () => {
323
- it('should format message array content of UserMessageContentPart to match ollama api', () => {
324
- const message: OpenAIChatMessage = {
325
- role: 'user',
326
- content: [
327
- {
328
- type: 'text',
329
- text: 'Hello',
330
- },
331
- {
332
- type: 'image_url',
333
- image_url: {
334
- detail: 'auto',
335
- url: 'data:image/png;base64,iVBO...',
336
- },
337
- },
338
- ],
339
- };
340
-
341
- const ollamaMessage = instance['convertContentToOllamaMessage'](message);
342
-
343
- expect(ollamaMessage).toEqual({
344
- role: 'user',
345
- content: 'Hello',
346
- images: ['iVBO...'],
347
- });
348
- });
349
-
350
- it('should not affect string type message content', () => {
351
- const message: OpenAIChatMessage = {
352
- role: 'user',
353
- content: 'Hello',
354
- };
355
-
356
- const ollamaMessage = instance['convertContentToOllamaMessage'](message);
357
-
358
- expect(ollamaMessage).toEqual({
359
- role: 'user',
360
- content: 'Hello',
361
- });
362
- });
363
- });
364
- });
365
- });