@lobehub/chat 1.57.0 → 1.58.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docker-compose/local/docker-compose.yml +1 -0
  4. package/locales/ar/modelProvider.json +24 -0
  5. package/locales/ar/models.json +60 -0
  6. package/locales/ar/providers.json +12 -0
  7. package/locales/bg-BG/modelProvider.json +24 -0
  8. package/locales/bg-BG/models.json +60 -0
  9. package/locales/bg-BG/providers.json +12 -0
  10. package/locales/de-DE/modelProvider.json +24 -0
  11. package/locales/de-DE/models.json +60 -0
  12. package/locales/de-DE/providers.json +12 -0
  13. package/locales/en-US/modelProvider.json +24 -0
  14. package/locales/en-US/models.json +60 -0
  15. package/locales/en-US/providers.json +12 -0
  16. package/locales/es-ES/modelProvider.json +24 -0
  17. package/locales/es-ES/models.json +60 -0
  18. package/locales/es-ES/providers.json +12 -0
  19. package/locales/fa-IR/modelProvider.json +30 -0
  20. package/locales/fa-IR/models.json +60 -0
  21. package/locales/fa-IR/providers.json +12 -0
  22. package/locales/fr-FR/modelProvider.json +24 -0
  23. package/locales/fr-FR/models.json +60 -0
  24. package/locales/fr-FR/providers.json +12 -0
  25. package/locales/it-IT/modelProvider.json +24 -0
  26. package/locales/it-IT/models.json +60 -0
  27. package/locales/it-IT/providers.json +12 -0
  28. package/locales/ja-JP/modelProvider.json +24 -0
  29. package/locales/ja-JP/models.json +60 -0
  30. package/locales/ja-JP/providers.json +12 -0
  31. package/locales/ko-KR/modelProvider.json +24 -0
  32. package/locales/ko-KR/models.json +60 -0
  33. package/locales/ko-KR/providers.json +12 -0
  34. package/locales/nl-NL/modelProvider.json +24 -0
  35. package/locales/nl-NL/models.json +60 -0
  36. package/locales/nl-NL/providers.json +12 -0
  37. package/locales/pl-PL/modelProvider.json +24 -0
  38. package/locales/pl-PL/models.json +60 -0
  39. package/locales/pl-PL/providers.json +12 -0
  40. package/locales/pt-BR/modelProvider.json +24 -0
  41. package/locales/pt-BR/models.json +60 -0
  42. package/locales/pt-BR/providers.json +12 -0
  43. package/locales/ru-RU/modelProvider.json +24 -0
  44. package/locales/ru-RU/models.json +60 -0
  45. package/locales/ru-RU/providers.json +12 -0
  46. package/locales/tr-TR/modelProvider.json +30 -0
  47. package/locales/tr-TR/models.json +60 -0
  48. package/locales/tr-TR/providers.json +12 -0
  49. package/locales/vi-VN/modelProvider.json +24 -0
  50. package/locales/vi-VN/models.json +60 -0
  51. package/locales/vi-VN/providers.json +12 -0
  52. package/locales/zh-CN/modelProvider.json +24 -0
  53. package/locales/zh-CN/models.json +1112 -1052
  54. package/locales/zh-CN/providers.json +80 -68
  55. package/locales/zh-TW/modelProvider.json +24 -0
  56. package/locales/zh-TW/models.json +60 -0
  57. package/locales/zh-TW/providers.json +12 -0
  58. package/package.json +4 -2
  59. package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/FileItem/File.tsx +1 -1
  60. package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/index.tsx +19 -9
  61. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/index.tsx +1 -2
  62. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx +15 -1
  63. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Mobile/ChatHeader/index.tsx +0 -2
  64. package/src/app/[variants]/(main)/chat/(workspace)/features/AgentSettings/index.tsx +1 -1
  65. package/src/app/[variants]/(main)/chat/settings/page.tsx +95 -5
  66. package/src/app/[variants]/(main)/settings/provider/(detail)/azureai/page.tsx +58 -0
  67. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +13 -2
  68. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +6 -8
  69. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/index.tsx +5 -6
  70. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ModelConfigModal/index.tsx +4 -3
  71. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ProviderSettingsContext.ts +2 -0
  72. package/src/app/[variants]/(main)/settings/provider/features/ModelList/index.tsx +6 -7
  73. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +1 -1
  74. package/src/config/aiModels/azureai.ts +18 -0
  75. package/src/config/aiModels/index.ts +3 -0
  76. package/src/config/modelProviders/azure.ts +2 -1
  77. package/src/config/modelProviders/azureai.ts +19 -0
  78. package/src/config/modelProviders/index.ts +3 -0
  79. package/src/database/server/models/aiProvider.ts +2 -0
  80. package/src/features/AgentSetting/AgentMeta/index.tsx +19 -9
  81. package/src/features/AgentSetting/AgentPrompt/index.tsx +32 -2
  82. package/src/features/AgentSetting/AgentSettings.tsx +4 -5
  83. package/src/features/AgentSetting/AgentSettingsProvider.tsx +17 -0
  84. package/src/features/AgentSetting/StoreUpdater.tsx +5 -2
  85. package/src/features/AgentSetting/index.tsx +1 -1
  86. package/src/features/AgentSetting/store/initialState.ts +2 -1
  87. package/src/hooks/useInterceptingRoutes.test.ts +1 -1
  88. package/src/hooks/useInterceptingRoutes.ts +1 -1
  89. package/src/layout/GlobalProvider/StoreInitialization.tsx +1 -1
  90. package/src/libs/agent-runtime/AgentRuntime.ts +13 -6
  91. package/src/libs/agent-runtime/azureai/index.ts +109 -0
  92. package/src/libs/agent-runtime/baichuan/index.test.ts +8 -250
  93. package/src/libs/agent-runtime/cloudflare/index.ts +22 -18
  94. package/src/libs/agent-runtime/index.ts +1 -0
  95. package/src/libs/agent-runtime/types/type.ts +1 -0
  96. package/src/libs/agent-runtime/utils/streams/__snapshots__/protocol.test.ts.snap +331 -0
  97. package/src/libs/agent-runtime/utils/streams/protocol.test.ts +137 -0
  98. package/src/libs/agent-runtime/utils/streams/protocol.ts +34 -0
  99. package/src/locales/default/modelProvider.ts +25 -0
  100. package/src/server/modules/AgentRuntime/index.ts +8 -1
  101. package/src/services/chat.ts +12 -3
  102. package/src/store/agent/slices/chat/action.test.ts +3 -3
  103. package/src/store/agent/slices/chat/action.ts +2 -5
  104. package/src/types/aiProvider.ts +1 -0
  105. package/src/types/user/settings/keyVaults.ts +1 -0
  106. package/src/app/[variants]/(main)/chat/settings/features/EditPage.tsx +0 -45
  107. package/src/features/AgentSetting/AgentSettingsStore.tsx +0 -14
  108. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/FileItem/Image.tsx +0 -0
  109. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/FileItem/index.tsx +0 -0
  110. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/FileItem/style.ts +0 -0
  111. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/index.tsx +0 -0
  112. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/InputArea/Container.tsx +0 -0
  113. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/InputArea/index.tsx +0 -0
  114. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Send.tsx +0 -0
@@ -0,0 +1,331 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`createSSEDataExtractor > real world data > should convert azure ai data 1`] = `
4
+ [
5
+ {
6
+ "choices": [
7
+ {
8
+ "delta": {
9
+ "content": "",
10
+ "reasoning_content": null,
11
+ "role": "assistant",
12
+ "tool_calls": null,
13
+ },
14
+ "finish_reason": null,
15
+ "index": 0,
16
+ "logprobs": null,
17
+ "matched_stop": null,
18
+ },
19
+ ],
20
+ "created": 1739714651,
21
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
22
+ "model": "DeepSeek-R1",
23
+ "object": "chat.completion.chunk",
24
+ "usage": null,
25
+ },
26
+ {
27
+ "choices": [
28
+ {
29
+ "delta": {
30
+ "content": "<think>",
31
+ "reasoning_content": null,
32
+ "role": null,
33
+ "tool_calls": null,
34
+ },
35
+ "finish_reason": null,
36
+ "index": 0,
37
+ "logprobs": null,
38
+ "matched_stop": null,
39
+ },
40
+ ],
41
+ "created": 1739714651,
42
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
43
+ "model": "DeepSeek-R1",
44
+ "object": "chat.completion.chunk",
45
+ "usage": null,
46
+ },
47
+ {
48
+ "choices": [
49
+ {
50
+ "delta": {
51
+ "content": "</think>",
52
+ "reasoning_content": null,
53
+ "role": null,
54
+ "tool_calls": null,
55
+ },
56
+ "finish_reason": null,
57
+ "index": 0,
58
+ "logprobs": null,
59
+ "matched_stop": null,
60
+ },
61
+ ],
62
+ "created": 1739714651,
63
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
64
+ "model": "DeepSeek-R1",
65
+ "object": "chat.completion.chunk",
66
+ "usage": null,
67
+ },
68
+ {
69
+ "choices": [
70
+ {
71
+ "delta": {
72
+ "content": "Hello",
73
+ "reasoning_content": null,
74
+ "role": null,
75
+ "tool_calls": null,
76
+ },
77
+ "finish_reason": null,
78
+ "index": 0,
79
+ "logprobs": null,
80
+ "matched_stop": null,
81
+ },
82
+ ],
83
+ "created": 1739714651,
84
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
85
+ "model": "DeepSeek-R1",
86
+ "object": "chat.completion.chunk",
87
+ "usage": null,
88
+ },
89
+ {
90
+ "choices": [
91
+ {
92
+ "delta": {
93
+ "content": "!",
94
+ "reasoning_content": null,
95
+ "role": null,
96
+ "tool_calls": null,
97
+ },
98
+ "finish_reason": null,
99
+ "index": 0,
100
+ "logprobs": null,
101
+ "matched_stop": null,
102
+ },
103
+ ],
104
+ "created": 1739714652,
105
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
106
+ "model": "DeepSeek-R1",
107
+ "object": "chat.completion.chunk",
108
+ "usage": null,
109
+ },
110
+ {
111
+ "choices": [
112
+ {
113
+ "delta": {
114
+ "content": " How",
115
+ "reasoning_content": null,
116
+ "role": null,
117
+ "tool_calls": null,
118
+ },
119
+ "finish_reason": null,
120
+ "index": 0,
121
+ "logprobs": null,
122
+ "matched_stop": null,
123
+ },
124
+ ],
125
+ "created": 1739714652,
126
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
127
+ "model": "DeepSeek-R1",
128
+ "object": "chat.completion.chunk",
129
+ "usage": null,
130
+ },
131
+ {
132
+ "choices": [
133
+ {
134
+ "delta": {
135
+ "content": " can",
136
+ "reasoning_content": null,
137
+ "role": null,
138
+ "tool_calls": null,
139
+ },
140
+ "finish_reason": null,
141
+ "index": 0,
142
+ "logprobs": null,
143
+ "matched_stop": null,
144
+ },
145
+ ],
146
+ "created": 1739714652,
147
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
148
+ "model": "DeepSeek-R1",
149
+ "object": "chat.completion.chunk",
150
+ "usage": null,
151
+ },
152
+ {
153
+ "choices": [
154
+ {
155
+ "delta": {
156
+ "content": " I",
157
+ "reasoning_content": null,
158
+ "role": null,
159
+ "tool_calls": null,
160
+ },
161
+ "finish_reason": null,
162
+ "index": 0,
163
+ "logprobs": null,
164
+ "matched_stop": null,
165
+ },
166
+ ],
167
+ "created": 1739714652,
168
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
169
+ "model": "DeepSeek-R1",
170
+ "object": "chat.completion.chunk",
171
+ "usage": null,
172
+ },
173
+ {
174
+ "choices": [
175
+ {
176
+ "delta": {
177
+ "content": " assist",
178
+ "reasoning_content": null,
179
+ "role": null,
180
+ "tool_calls": null,
181
+ },
182
+ "finish_reason": null,
183
+ "index": 0,
184
+ "logprobs": null,
185
+ "matched_stop": null,
186
+ },
187
+ ],
188
+ "created": 1739714652,
189
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
190
+ "model": "DeepSeek-R1",
191
+ "object": "chat.completion.chunk",
192
+ "usage": null,
193
+ },
194
+ {
195
+ "choices": [
196
+ {
197
+ "delta": {
198
+ "content": " you",
199
+ "reasoning_content": null,
200
+ "role": null,
201
+ "tool_calls": null,
202
+ },
203
+ "finish_reason": null,
204
+ "index": 0,
205
+ "logprobs": null,
206
+ "matched_stop": null,
207
+ },
208
+ ],
209
+ "created": 1739714652,
210
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
211
+ "model": "DeepSeek-R1",
212
+ "object": "chat.completion.chunk",
213
+ "usage": null,
214
+ },
215
+ {
216
+ "choices": [
217
+ {
218
+ "delta": {
219
+ "content": " today",
220
+ "reasoning_content": null,
221
+ "role": null,
222
+ "tool_calls": null,
223
+ },
224
+ "finish_reason": null,
225
+ "index": 0,
226
+ "logprobs": null,
227
+ "matched_stop": null,
228
+ },
229
+ ],
230
+ "created": 1739714652,
231
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
232
+ "model": "DeepSeek-R1",
233
+ "object": "chat.completion.chunk",
234
+ "usage": null,
235
+ },
236
+ {
237
+ "choices": [
238
+ {
239
+ "delta": {
240
+ "content": "?",
241
+ "reasoning_content": null,
242
+ "role": null,
243
+ "tool_calls": null,
244
+ },
245
+ "finish_reason": null,
246
+ "index": 0,
247
+ "logprobs": null,
248
+ "matched_stop": null,
249
+ },
250
+ ],
251
+ "created": 1739714652,
252
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
253
+ "model": "DeepSeek-R1",
254
+ "object": "chat.completion.chunk",
255
+ "usage": null,
256
+ },
257
+ {
258
+ "choices": [
259
+ {
260
+ "delta": {
261
+ "content": " ",
262
+ "reasoning_content": null,
263
+ "role": null,
264
+ "tool_calls": null,
265
+ },
266
+ "finish_reason": null,
267
+ "index": 0,
268
+ "logprobs": null,
269
+ "matched_stop": null,
270
+ },
271
+ ],
272
+ "created": 1739714652,
273
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
274
+ "model": "DeepSeek-R1",
275
+ "object": "chat.completion.chunk",
276
+ "usage": null,
277
+ },
278
+ {
279
+ "choices": [
280
+ {
281
+ "delta": {
282
+ "content": "😊",
283
+ "reasoning_content": null,
284
+ "role": null,
285
+ "tool_calls": null,
286
+ },
287
+ "finish_reason": null,
288
+ "index": 0,
289
+ "logprobs": null,
290
+ "matched_stop": null,
291
+ },
292
+ ],
293
+ "created": 1739714652,
294
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
295
+ "model": "DeepSeek-R1",
296
+ "object": "chat.completion.chunk",
297
+ "usage": null,
298
+ },
299
+ {
300
+ "choices": [
301
+ {
302
+ "delta": {
303
+ "content": "",
304
+ "reasoning_content": null,
305
+ "role": null,
306
+ "tool_calls": null,
307
+ },
308
+ "finish_reason": "stop",
309
+ "index": 0,
310
+ "logprobs": null,
311
+ "matched_stop": 1,
312
+ },
313
+ ],
314
+ "created": 1739714652,
315
+ "id": "1392a93d52c3483ea872d0ab2aaff7d7",
316
+ "model": "DeepSeek-R1",
317
+ "object": "chat.completion.chunk",
318
+ "usage": null,
319
+ },
320
+ {
321
+ "choices": [],
322
+ "id": "79fca0de792a4ffb8ec836442a2a42c0",
323
+ "model": "DeepSeek-R1",
324
+ "usage": {
325
+ "completion_tokens": 16,
326
+ "prompt_tokens": 4,
327
+ "total_tokens": 20,
328
+ },
329
+ },
330
+ ]
331
+ `;
@@ -0,0 +1,137 @@
1
+ import { describe, expect, it } from 'vitest';
2
+
3
+ import { createSSEDataExtractor } from './protocol';
4
+
5
+ describe('createSSEDataExtractor', () => {
6
+ // Helper function to convert string to Uint8Array
7
+ const stringToUint8Array = (str: string): Uint8Array => {
8
+ return new TextEncoder().encode(str);
9
+ };
10
+
11
+ // Helper function to process chunks through transformer
12
+ const processChunk = async (transformer: TransformStream, chunk: Uint8Array) => {
13
+ const results: any[] = [];
14
+ const readable = new ReadableStream({
15
+ start(controller) {
16
+ controller.enqueue(chunk);
17
+ controller.close();
18
+ },
19
+ });
20
+
21
+ const writable = new WritableStream({
22
+ write(chunk) {
23
+ results.push(chunk);
24
+ },
25
+ });
26
+
27
+ await readable.pipeThrough(transformer).pipeTo(writable);
28
+
29
+ return results;
30
+ };
31
+
32
+ it('should correctly transform single SSE data line', async () => {
33
+ const transformer = createSSEDataExtractor();
34
+ const input = 'data: {"message": "hello"}\n';
35
+ const chunk = stringToUint8Array(input);
36
+
37
+ const results = await processChunk(transformer, chunk);
38
+
39
+ expect(results).toEqual([{ message: 'hello' }]);
40
+ });
41
+
42
+ it('should handle multiple SSE data lines', async () => {
43
+ const transformer = createSSEDataExtractor();
44
+ const input = `data: {"message": "hello"}\ndata: {"message": "world"}\n`;
45
+ const chunk = stringToUint8Array(input);
46
+
47
+ const results = await processChunk(transformer, chunk);
48
+
49
+ expect(results).toEqual([{ message: 'hello' }, { message: 'world' }]);
50
+ });
51
+
52
+ it('should ignore non-data lines', async () => {
53
+ const transformer = createSSEDataExtractor();
54
+ const input = `id: 1\ndata: {"message": "hello"}\nevent: message\n`;
55
+ const chunk = stringToUint8Array(input);
56
+
57
+ const results = await processChunk(transformer, chunk);
58
+
59
+ expect(results).toEqual([{ message: 'hello' }]);
60
+ });
61
+
62
+ it('should skip [DONE] heartbeat messages', async () => {
63
+ const transformer = createSSEDataExtractor();
64
+ const input = `data: {"message": "hello"}\ndata: [DONE]\ndata: {"message": "world"}\n`;
65
+ const chunk = stringToUint8Array(input);
66
+
67
+ const results = await processChunk(transformer, chunk);
68
+
69
+ expect(results).toEqual([{ message: 'hello' }, { message: 'world' }]);
70
+ });
71
+
72
+ it('should handle invalid JSON gracefully', async () => {
73
+ const transformer = createSSEDataExtractor();
74
+ const input = `data: {"message": "hello"}\ndata: invalid-json\ndata: {"message": "world"}\n`;
75
+ const chunk = stringToUint8Array(input);
76
+
77
+ const results = await processChunk(transformer, chunk);
78
+
79
+ expect(results).toEqual([{ message: 'hello' }, { message: 'world' }]);
80
+ });
81
+
82
+ it('should handle empty data lines', async () => {
83
+ const transformer = createSSEDataExtractor();
84
+ const input = `data: \ndata: {"message": "hello"}\ndata: \n`;
85
+ const chunk = stringToUint8Array(input);
86
+
87
+ const results = await processChunk(transformer, chunk);
88
+
89
+ expect(results).toEqual([{ message: 'hello' }]);
90
+ });
91
+
92
+ it('should process large chunks of data correctly', async () => {
93
+ const transformer = createSSEDataExtractor();
94
+ const messages = Array(100)
95
+ .fill(null)
96
+ .map((_, i) => `data: {"message": "message${i}"}\n`)
97
+ .join('');
98
+ const chunk = stringToUint8Array(messages);
99
+
100
+ const results = await processChunk(transformer, chunk);
101
+
102
+ expect(results).toHaveLength(100);
103
+ expect(results[0]).toEqual({ message: 'message0' });
104
+ expect(results[99]).toEqual({ message: 'message99' });
105
+ });
106
+
107
+ describe('real world data', () => {
108
+ it('should convert azure ai data', async () => {
109
+ const chunks = [
110
+ `data: {"choices":[{"delta":{"content":"","reasoning_content":null,"role":"assistant","tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714651,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
111
+ `data: {"choices":[{"delta":{"content":"\u003cthink\u003e","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714651,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
112
+ `data: {"choices":[{"delta":{"content":"\n\n","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714651,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
113
+ `data: {"choices":[{"delta":{"content":"\u003c/think\u003e","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714651,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
114
+ `data: {"choices":[{"delta":{"content":"\n\n","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714651,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
115
+ `data: {"choices":[{"delta":{"content":"Hello","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714651,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
116
+ `data: {"choices":[{"delta":{"content":"!","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
117
+ `data: {"choices":[{"delta":{"content":" How","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
118
+ `data: {"choices":[{"delta":{"content":" can","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
119
+ `data: {"choices":[{"delta":{"content":" I","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
120
+ `data: {"choices":[{"delta":{"content":" assist","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
121
+ `data: {"choices":[{"delta":{"content":" you","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
122
+ `data: {"choices":[{"delta":{"content":" today","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
123
+ `data: {"choices":[{"delta":{"content":"?","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
124
+ `data: {"choices":[{"delta":{"content":" ","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
125
+ `data: {"choices":[{"delta":{"content":"😊","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null,"matched_stop":null}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
126
+ `data: {"choices":[{"delta":{"content":"","reasoning_content":null,"role":null,"tool_calls":null},"finish_reason":"stop","index":0,"logprobs":null,"matched_stop":1}],"created":1739714652,"id":"1392a93d52c3483ea872d0ab2aaff7d7","model":"DeepSeek-R1","object":"chat.completion.chunk","usage":null}\n`,
127
+ `data: {"choices":[],"id":"79fca0de792a4ffb8ec836442a2a42c0","model":"DeepSeek-R1","usage":{"completion_tokens":16,"prompt_tokens":4,"total_tokens":20}}\n`,
128
+ `data: [DONE]`,
129
+ ];
130
+
131
+ const transformer = createSSEDataExtractor();
132
+
133
+ const results = await processChunk(transformer, stringToUint8Array(chunks.join('')));
134
+ expect(results).matchSnapshot();
135
+ });
136
+ });
137
+ });
@@ -170,3 +170,37 @@ export const createFirstErrorHandleTransformer = (
170
170
  },
171
171
  });
172
172
  };
173
+
174
+ /**
175
+ * create a transformer to remove SSE format data
176
+ */
177
+ export const createSSEDataExtractor = () =>
178
+ new TransformStream({
179
+ transform(chunk: Uint8Array, controller) {
180
+ // 将 Uint8Array 转换为字符串
181
+ const text = new TextDecoder().decode(chunk, { stream: true });
182
+
183
+ // 处理多行数据的情况
184
+ const lines = text.split('\n');
185
+
186
+ for (const line of lines) {
187
+ // 只处理以 "data: " 开头的行
188
+ if (line.startsWith('data: ')) {
189
+ // 提取 "data: " 后面的实际数据
190
+ const jsonText = line.slice(6);
191
+
192
+ // 跳过心跳消息
193
+ if (jsonText === '[DONE]') continue;
194
+
195
+ try {
196
+ // 解析 JSON 数据
197
+ const data = JSON.parse(jsonText);
198
+ // 将解析后的数据传递给下一个处理器
199
+ controller.enqueue(data);
200
+ } catch {
201
+ console.warn('Failed to parse SSE data:', jsonText);
202
+ }
203
+ }
204
+ }
205
+ },
206
+ });
@@ -19,6 +19,25 @@ export default {
19
19
  title: 'API Key',
20
20
  },
21
21
  },
22
+ azureai: {
23
+ azureApiVersion: {
24
+ desc: 'Azure 的 API 版本,遵循 YYYY-MM-DD 格式,查阅[最新版本](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions)',
25
+ fetch: '获取列表',
26
+ title: 'Azure API Version',
27
+ },
28
+ endpoint: {
29
+ desc: '从 Azure AI 项目概述找到 Azure AI 模型推理终结点',
30
+ placeholder: 'https://ai-userxxxxxxxxxx.services.ai.azure.com/models',
31
+ title: 'Azure AI 终结点',
32
+ },
33
+ title: 'Azure OpenAI',
34
+ token: {
35
+ desc: '从 Azure AI 项目概述找到 API 密钥',
36
+ placeholder: 'Azure 密钥',
37
+ title: '密钥',
38
+ },
39
+ },
40
+
22
41
  bedrock: {
23
42
  accessKeyId: {
24
43
  desc: '填入 AWS Access Key Id',
@@ -98,6 +117,7 @@ export default {
98
117
  title: '代理地址',
99
118
  },
100
119
  sdkType: {
120
+ placeholder: 'openai/anthropic/azureai/ollama/...',
101
121
  required: '请选择 SDK 类型',
102
122
  title: '请求格式',
103
123
  },
@@ -211,6 +231,11 @@ export default {
211
231
  placeholder: '请输入 Azure 中的模型部署名称',
212
232
  title: '模型部署名称',
213
233
  },
234
+ deployName: {
235
+ extra: '发送请求时会将该字段作为模型 ID',
236
+ placeholder: '请输入模型实际部署的名称或 id',
237
+ title: '模型部署名称',
238
+ },
214
239
  displayName: {
215
240
  placeholder: '请输入模型的展示名称,例如 ChatGPT、GPT-4 等',
216
241
  title: '模型展示名称',
@@ -57,6 +57,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
57
57
  return { apiKey, apiVersion, baseURL };
58
58
  }
59
59
 
60
+ case ModelProvider.AzureAI: {
61
+ const { AZUREAI_ENDPOINT, AZUREAI_ENDPOINT_KEY } = llmConfig;
62
+ const apiKey = payload?.apiKey || AZUREAI_ENDPOINT_KEY;
63
+ const baseURL = payload?.baseURL || AZUREAI_ENDPOINT;
64
+ return { apiKey, baseURL };
65
+ }
66
+
60
67
  case ModelProvider.Bedrock: {
61
68
  const { AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID, AWS_REGION, AWS_SESSION_TOKEN } = llmConfig;
62
69
  let accessKeyId: string | undefined = AWS_ACCESS_KEY_ID;
@@ -100,7 +107,7 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
100
107
 
101
108
  return { apiKey };
102
109
  }
103
-
110
+
104
111
  case ModelProvider.TencentCloud: {
105
112
  const { TENCENT_CLOUD_API_KEY } = llmConfig;
106
113
 
@@ -50,7 +50,10 @@ const isCanUseFC = (model: string, provider: string) => {
50
50
  return aiModelSelectors.isModelSupportToolUse(model, provider)(useAiInfraStore.getState());
51
51
  };
52
52
 
53
- const findAzureDeploymentName = (model: string) => {
53
+ /**
54
+ * TODO: we need to update this function to auto find deploymentName
55
+ */
56
+ const findDeploymentName = (model: string) => {
54
57
  let deploymentId = model;
55
58
 
56
59
  // TODO: remove isDeprecatedEdition condition in V2.0
@@ -222,8 +225,14 @@ class ChatService {
222
225
  let model = res.model || DEFAULT_AGENT_CONFIG.model;
223
226
 
224
227
  // if the provider is Azure, get the deployment name as the request model
225
- if (provider === ModelProvider.Azure || provider === ModelProvider.Doubao) {
226
- model = findAzureDeploymentName(model);
228
+ const providersWithDeploymentName = [
229
+ ModelProvider.Azure,
230
+ ModelProvider.Doubao,
231
+ ModelProvider.AzureAI,
232
+ ] as string[];
233
+
234
+ if (providersWithDeploymentName.includes(provider)) {
235
+ model = findDeploymentName(model);
227
236
  }
228
237
 
229
238
  const payload = merge(
@@ -216,7 +216,7 @@ describe('AgentSlice', () => {
216
216
  model: 'gemini-pro',
217
217
  } as any);
218
218
 
219
- renderHook(() => result.current.useInitAgentStore(true));
219
+ renderHook(() => result.current.useInitInboxAgentStore(true));
220
220
 
221
221
  await waitFor(async () => {
222
222
  expect(result.current.agentMap[INBOX_SESSION_ID]).toEqual({ model: 'gemini-pro' });
@@ -230,7 +230,7 @@ describe('AgentSlice', () => {
230
230
  model: 'gemini-pro',
231
231
  } as any);
232
232
 
233
- renderHook(() => result.current.useInitAgentStore(false));
233
+ renderHook(() => result.current.useInitInboxAgentStore(false));
234
234
 
235
235
  await waitFor(async () => {
236
236
  expect(result.current.agentMap[INBOX_SESSION_ID]).toBeUndefined();
@@ -243,7 +243,7 @@ describe('AgentSlice', () => {
243
243
 
244
244
  vi.spyOn(globalService, 'getDefaultAgentConfig').mockRejectedValueOnce(new Error());
245
245
 
246
- renderHook(() => result.current.useInitAgentStore(true));
246
+ renderHook(() => result.current.useInitInboxAgentStore(true));
247
247
 
248
248
  await waitFor(async () => {
249
249
  expect(result.current.agentMap[INBOX_SESSION_ID]).toBeUndefined();
@@ -6,7 +6,6 @@ import { StateCreator } from 'zustand/vanilla';
6
6
 
7
7
  import { MESSAGE_CANCEL_FLAT } from '@/const/message';
8
8
  import { INBOX_SESSION_ID } from '@/const/session';
9
- import { DEFAULT_AGENT_CONFIG } from '@/const/settings';
10
9
  import { useClientDataSWR, useOnlyFetchOnceSWR } from '@/libs/swr';
11
10
  import { agentService } from '@/services/agent';
12
11
  import { sessionService } from '@/services/session';
@@ -51,7 +50,7 @@ export interface AgentChatAction {
51
50
  updateAgentConfig: (config: DeepPartial<LobeAgentConfig>) => Promise<void>;
52
51
  useFetchAgentConfig: (id: string) => SWRResponse<LobeAgentConfig>;
53
52
  useFetchFilesAndKnowledgeBases: () => SWRResponse<KnowledgeItem[]>;
54
- useInitAgentStore: (
53
+ useInitInboxAgentStore: (
55
54
  isLogin: boolean | undefined,
56
55
  defaultAgentConfig?: DeepPartial<LobeAgentConfig>,
57
56
  ) => SWRResponse<DeepPartial<LobeAgentConfig>>;
@@ -164,12 +163,10 @@ export const createChatSlice: StateCreator<
164
163
  [FETCH_AGENT_CONFIG_KEY, sessionId],
165
164
  ([, id]: string[]) => sessionService.getSessionConfig(id),
166
165
  {
167
- fallbackData: DEFAULT_AGENT_CONFIG,
168
166
  onSuccess: (data) => {
169
167
  get().internal_dispatchAgentMap(sessionId, data, 'fetch');
170
168
  set({ activeAgentId: data.id }, false, 'updateActiveAgentId');
171
169
  },
172
- suspense: true,
173
170
  },
174
171
  ),
175
172
  useFetchFilesAndKnowledgeBases: () => {
@@ -183,7 +180,7 @@ export const createChatSlice: StateCreator<
183
180
  );
184
181
  },
185
182
 
186
- useInitAgentStore: (isLogin, defaultAgentConfig) =>
183
+ useInitInboxAgentStore: (isLogin, defaultAgentConfig) =>
187
184
  useOnlyFetchOnceSWR<DeepPartial<LobeAgentConfig>>(
188
185
  !!isLogin ? 'fetchInboxAgentConfig' : null,
189
186
  () => sessionService.getSessionConfig(INBOX_SESSION_ID),
@@ -16,6 +16,7 @@ export type AiProviderSourceType = (typeof AiProviderSourceEnum)[keyof typeof Ai
16
16
  export const AiProviderSDKEnum = {
17
17
  Anthropic: 'anthropic',
18
18
  Azure: 'azure',
19
+ AzureAI: 'azureai',
19
20
  Bedrock: 'bedrock',
20
21
  Cloudflare: 'cloudflare',
21
22
  Doubao: 'doubao',