@lobehub/chat 1.60.8 → 1.61.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. package/.github/ISSUE_TEMPLATE/1_bug_report.yml +2 -1
  2. package/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml +1 -0
  3. package/.github/workflows/docker-pglite.yml +161 -0
  4. package/CHANGELOG.md +50 -0
  5. package/Dockerfile.pglite +244 -0
  6. package/changelog/v1.json +17 -0
  7. package/locales/ar/error.json +1 -0
  8. package/locales/ar/modelProvider.json +7 -0
  9. package/locales/ar/models.json +3 -12
  10. package/locales/ar/providers.json +3 -0
  11. package/locales/bg-BG/error.json +1 -0
  12. package/locales/bg-BG/modelProvider.json +7 -0
  13. package/locales/bg-BG/models.json +3 -12
  14. package/locales/bg-BG/providers.json +3 -0
  15. package/locales/de-DE/error.json +1 -0
  16. package/locales/de-DE/modelProvider.json +7 -0
  17. package/locales/de-DE/models.json +3 -12
  18. package/locales/de-DE/providers.json +3 -0
  19. package/locales/en-US/error.json +1 -0
  20. package/locales/en-US/modelProvider.json +7 -0
  21. package/locales/en-US/models.json +3 -12
  22. package/locales/en-US/providers.json +3 -0
  23. package/locales/es-ES/error.json +1 -0
  24. package/locales/es-ES/modelProvider.json +7 -0
  25. package/locales/es-ES/models.json +3 -12
  26. package/locales/es-ES/providers.json +3 -0
  27. package/locales/fa-IR/error.json +1 -0
  28. package/locales/fa-IR/modelProvider.json +7 -0
  29. package/locales/fa-IR/models.json +3 -12
  30. package/locales/fa-IR/providers.json +3 -0
  31. package/locales/fr-FR/error.json +1 -0
  32. package/locales/fr-FR/modelProvider.json +7 -0
  33. package/locales/fr-FR/models.json +3 -12
  34. package/locales/fr-FR/providers.json +3 -0
  35. package/locales/it-IT/error.json +1 -0
  36. package/locales/it-IT/modelProvider.json +7 -0
  37. package/locales/it-IT/models.json +3 -12
  38. package/locales/it-IT/providers.json +3 -0
  39. package/locales/ja-JP/error.json +1 -0
  40. package/locales/ja-JP/modelProvider.json +7 -0
  41. package/locales/ja-JP/models.json +3 -12
  42. package/locales/ja-JP/providers.json +3 -0
  43. package/locales/ko-KR/error.json +1 -0
  44. package/locales/ko-KR/modelProvider.json +7 -0
  45. package/locales/ko-KR/models.json +3 -12
  46. package/locales/ko-KR/providers.json +3 -0
  47. package/locales/nl-NL/error.json +1 -0
  48. package/locales/nl-NL/modelProvider.json +7 -0
  49. package/locales/nl-NL/models.json +3 -12
  50. package/locales/nl-NL/providers.json +3 -0
  51. package/locales/pl-PL/error.json +1 -0
  52. package/locales/pl-PL/modelProvider.json +7 -0
  53. package/locales/pl-PL/models.json +3 -12
  54. package/locales/pl-PL/providers.json +3 -0
  55. package/locales/pt-BR/error.json +1 -0
  56. package/locales/pt-BR/modelProvider.json +7 -0
  57. package/locales/pt-BR/models.json +3 -12
  58. package/locales/pt-BR/providers.json +3 -0
  59. package/locales/ru-RU/error.json +1 -0
  60. package/locales/ru-RU/modelProvider.json +7 -0
  61. package/locales/ru-RU/models.json +3 -12
  62. package/locales/ru-RU/providers.json +3 -0
  63. package/locales/tr-TR/error.json +1 -0
  64. package/locales/tr-TR/modelProvider.json +7 -0
  65. package/locales/tr-TR/models.json +3 -12
  66. package/locales/tr-TR/providers.json +3 -0
  67. package/locales/vi-VN/error.json +1 -0
  68. package/locales/vi-VN/modelProvider.json +7 -0
  69. package/locales/vi-VN/models.json +3 -12
  70. package/locales/vi-VN/providers.json +3 -0
  71. package/locales/zh-CN/error.json +1 -0
  72. package/locales/zh-CN/modelProvider.json +7 -0
  73. package/locales/zh-CN/models.json +3 -12
  74. package/locales/zh-CN/providers.json +3 -0
  75. package/locales/zh-TW/error.json +1 -0
  76. package/locales/zh-TW/modelProvider.json +7 -0
  77. package/locales/zh-TW/models.json +3 -12
  78. package/locales/zh-TW/providers.json +3 -0
  79. package/package.json +2 -1
  80. package/src/app/(backend)/webapi/chat/vertexai/route.ts +35 -0
  81. package/src/app/[variants]/(main)/settings/provider/(detail)/huggingface/page.tsx +3 -3
  82. package/src/app/[variants]/(main)/settings/provider/(detail)/vertexai/page.tsx +67 -0
  83. package/src/config/aiModels/index.ts +3 -0
  84. package/src/config/aiModels/vertexai.ts +200 -0
  85. package/src/config/modelProviders/index.ts +3 -0
  86. package/src/config/modelProviders/vertexai.ts +22 -0
  87. package/src/database/client/db.ts +2 -1
  88. package/src/libs/agent-runtime/error.ts +1 -0
  89. package/src/libs/agent-runtime/google/index.ts +22 -4
  90. package/src/libs/agent-runtime/types/type.ts +1 -0
  91. package/src/libs/agent-runtime/utils/streams/vertex-ai.test.ts +236 -0
  92. package/src/libs/agent-runtime/utils/streams/vertex-ai.ts +75 -0
  93. package/src/libs/agent-runtime/vertexai/index.ts +23 -0
  94. package/src/locales/default/error.ts +1 -0
  95. package/src/locales/default/modelProvider.ts +7 -0
  96. package/src/types/user/settings/keyVaults.ts +1 -0
  97. package/src/utils/safeParseJSON.ts +1 -1
@@ -0,0 +1,236 @@
1
+ import { describe, expect, it, vi } from 'vitest';
2
+
3
+ import * as uuidModule from '@/utils/uuid';
4
+
5
+ import { VertexAIStream } from './vertex-ai';
6
+
7
+ describe('VertexAIStream', () => {
8
+ it('should transform Vertex AI stream to protocol stream', async () => {
9
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
10
+ const rawChunks = [
11
+ {
12
+ candidates: [
13
+ {
14
+ content: { role: 'model', parts: [{ text: '你好' }] },
15
+ safetyRatings: [
16
+ {
17
+ category: 'HARM_CATEGORY_HATE_SPEECH',
18
+ probability: 'NEGLIGIBLE',
19
+ probabilityScore: 0.06298828,
20
+ severity: 'HARM_SEVERY_NEGLIGIBLE',
21
+ severityScore: 0.10986328,
22
+ },
23
+ {
24
+ category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
25
+ probability: 'NEGLIGIBLE',
26
+ probabilityScore: 0.05029297,
27
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
28
+ severityScore: 0.078125,
29
+ },
30
+ {
31
+ category: 'HARM_CATEGORY_HARASSMENT',
32
+ probability: 'NEGLIGIBLE',
33
+ probabilityScore: 0.19433594,
34
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
35
+ severityScore: 0.16015625,
36
+ },
37
+ {
38
+ category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
39
+ probability: 'NEGLIGIBLE',
40
+ probabilityScore: 0.059326172,
41
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
42
+ severityScore: 0.064453125,
43
+ },
44
+ ],
45
+ index: 0,
46
+ },
47
+ ],
48
+ usageMetadata: {},
49
+ modelVersion: 'gemini-1.5-flash-001',
50
+ },
51
+ {
52
+ candidates: [
53
+ {
54
+ content: { role: 'model', parts: [{ text: '! 😊' }] },
55
+ safetyRatings: [
56
+ {
57
+ category: 'HARM_CATEGORY_HATE_SPEECH',
58
+ probability: 'NEGLIGIBLE',
59
+ probabilityScore: 0.052734375,
60
+ severity: 'HARM_SEVRITY_NEGLIGIBLE',
61
+ severityScore: 0.08642578,
62
+ },
63
+ {
64
+ category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
65
+ probability: 'NEGLIGIBLE',
66
+ probabilityScore: 0.071777344,
67
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
68
+ severityScore: 0.095214844,
69
+ },
70
+ {
71
+ category: 'HARM_CATEGORY_HARASSMENT',
72
+ probability: 'NEGLIGIBLE',
73
+ probabilityScore: 0.1640625,
74
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
75
+ severityScore: 0.10498047,
76
+ },
77
+ {
78
+ category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
79
+ probability: 'NEGLIGIBLE',
80
+ probabilityScore: 0.075683594,
81
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
82
+ severityScore: 0.053466797,
83
+ },
84
+ ],
85
+ index: 0,
86
+ },
87
+ ],
88
+ modelVersion: 'gemini-1.5-flash-001',
89
+ },
90
+ ];
91
+
92
+ const mockGoogleStream = new ReadableStream({
93
+ start(controller) {
94
+ rawChunks.forEach((chunk) => controller.enqueue(chunk));
95
+
96
+ controller.close();
97
+ },
98
+ });
99
+
100
+ const onStartMock = vi.fn();
101
+ const onTextMock = vi.fn();
102
+ const onTokenMock = vi.fn();
103
+ const onToolCallMock = vi.fn();
104
+ const onCompletionMock = vi.fn();
105
+
106
+ const protocolStream = VertexAIStream(mockGoogleStream, {
107
+ onStart: onStartMock,
108
+ onText: onTextMock,
109
+ onToken: onTokenMock,
110
+ onToolCall: onToolCallMock,
111
+ onCompletion: onCompletionMock,
112
+ });
113
+
114
+ const decoder = new TextDecoder();
115
+ const chunks = [];
116
+
117
+ // @ts-ignore
118
+ for await (const chunk of protocolStream) {
119
+ chunks.push(decoder.decode(chunk, { stream: true }));
120
+ }
121
+
122
+ expect(chunks).toEqual([
123
+ // text
124
+ 'id: chat_1\n',
125
+ 'event: text\n',
126
+ `data: "你好"\n\n`,
127
+
128
+ // text
129
+ 'id: chat_1\n',
130
+ 'event: text\n',
131
+ `data: "! 😊"\n\n`,
132
+ ]);
133
+
134
+ expect(onStartMock).toHaveBeenCalledTimes(1);
135
+ expect(onTokenMock).toHaveBeenCalledTimes(2);
136
+ expect(onCompletionMock).toHaveBeenCalledTimes(1);
137
+ });
138
+
139
+ it('tool_calls', async () => {
140
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
141
+ const rawChunks = [
142
+ {
143
+ candidates: [
144
+ {
145
+ content: {
146
+ role: 'model',
147
+ parts: [
148
+ {
149
+ functionCall: {
150
+ name: 'realtime-weather____fetchCurrentWeather',
151
+ args: { city: '杭州' },
152
+ },
153
+ },
154
+ ],
155
+ },
156
+ finishReason: 'STOP',
157
+ safetyRatings: [
158
+ {
159
+ category: 'HARM_CATERY_HATE_SPEECH',
160
+ probability: 'NEGLIGIBLE',
161
+ probabilityScore: 0.09814453,
162
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
163
+ severityScore: 0.07470703,
164
+ },
165
+ {
166
+ category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
167
+ probability: 'NEGLIGIBLE',
168
+ probabilityScore: 0.1484375,
169
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
170
+ severityScore: 0.15136719,
171
+ },
172
+ {
173
+ category: 'HARM_CATEGORY_HARASSMENT',
174
+ probability: 'NEGLIGIBLE',
175
+ probabilityScore: 0.11279297,
176
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
177
+ severityScore: 0.10107422,
178
+ },
179
+ {
180
+ category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
181
+ probability: 'NEGLIGIBLE',
182
+ probabilityScore: 0.048828125,
183
+ severity: 'HARM_SEVERITY_NEGLIGIBLE',
184
+ severityScore: 0.05493164,
185
+ },
186
+ ],
187
+ index: 0,
188
+ },
189
+ ],
190
+ usageMetadata: { promptTokenCount: 95, candidatesTokenCount: 9, totalTokenCount: 104 },
191
+ modelVersion: 'gemini-1.5-flash-001',
192
+ },
193
+ ];
194
+
195
+ const mockGoogleStream = new ReadableStream({
196
+ start(controller) {
197
+ rawChunks.forEach((chunk) => controller.enqueue(chunk));
198
+
199
+ controller.close();
200
+ },
201
+ });
202
+
203
+ const onStartMock = vi.fn();
204
+ const onTextMock = vi.fn();
205
+ const onTokenMock = vi.fn();
206
+ const onToolCallMock = vi.fn();
207
+ const onCompletionMock = vi.fn();
208
+
209
+ const protocolStream = VertexAIStream(mockGoogleStream, {
210
+ onStart: onStartMock,
211
+ onText: onTextMock,
212
+ onToken: onTokenMock,
213
+ onToolCall: onToolCallMock,
214
+ onCompletion: onCompletionMock,
215
+ });
216
+
217
+ const decoder = new TextDecoder();
218
+ const chunks = [];
219
+
220
+ // @ts-ignore
221
+ for await (const chunk of protocolStream) {
222
+ chunks.push(decoder.decode(chunk, { stream: true }));
223
+ }
224
+
225
+ expect(chunks).toEqual([
226
+ // text
227
+ 'id: chat_1\n',
228
+ 'event: tool_calls\n',
229
+ `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0","index":0,"type":"function"}]\n\n`,
230
+ ]);
231
+
232
+ expect(onStartMock).toHaveBeenCalledTimes(1);
233
+ expect(onToolCallMock).toHaveBeenCalledTimes(1);
234
+ expect(onCompletionMock).toHaveBeenCalledTimes(1);
235
+ });
236
+ });
@@ -0,0 +1,75 @@
1
+ import { EnhancedGenerateContentResponse, GenerateContentResponse } from '@google/generative-ai';
2
+
3
+ import { nanoid } from '@/utils/uuid';
4
+
5
+ import { ChatStreamCallbacks } from '../../types';
6
+ import {
7
+ StreamProtocolChunk,
8
+ StreamStack,
9
+ createCallbacksTransformer,
10
+ createSSEProtocolTransformer,
11
+ generateToolCallId,
12
+ } from './protocol';
13
+
14
+ const transformVertexAIStream = (
15
+ chunk: GenerateContentResponse,
16
+ stack: StreamStack,
17
+ ): StreamProtocolChunk => {
18
+ // maybe need another structure to add support for multiple choices
19
+ const candidates = chunk.candidates;
20
+
21
+ if (!candidates)
22
+ return {
23
+ data: '',
24
+ id: stack?.id,
25
+ type: 'text',
26
+ };
27
+
28
+ const item = candidates[0];
29
+ if (item.content) {
30
+ const part = item.content.parts[0];
31
+
32
+ if (part.functionCall) {
33
+ const functionCall = part.functionCall;
34
+
35
+ return {
36
+ data: [
37
+ {
38
+ function: {
39
+ arguments: JSON.stringify(functionCall.args),
40
+ name: functionCall.name,
41
+ },
42
+ id: generateToolCallId(0, functionCall.name),
43
+ index: 0,
44
+ type: 'function',
45
+ },
46
+ ],
47
+ id: stack?.id,
48
+ type: 'tool_calls',
49
+ };
50
+ }
51
+
52
+ return {
53
+ data: part.text,
54
+ id: stack?.id,
55
+ type: 'text',
56
+ };
57
+ }
58
+
59
+ return {
60
+ data: '',
61
+ id: stack?.id,
62
+ type: 'stop',
63
+ };
64
+ };
65
+
66
+ export const VertexAIStream = (
67
+ rawStream: ReadableStream<EnhancedGenerateContentResponse>,
68
+ callbacks?: ChatStreamCallbacks,
69
+ ) => {
70
+ const streamStack: StreamStack = { id: 'chat_' + nanoid() };
71
+
72
+ return rawStream
73
+ .pipeThrough(createSSEProtocolTransformer(transformVertexAIStream, streamStack))
74
+ .pipeThrough(createCallbacksTransformer(callbacks));
75
+ };
@@ -0,0 +1,23 @@
1
+ import { VertexAI, VertexInit } from '@google-cloud/vertexai';
2
+
3
+ import { AgentRuntimeError, AgentRuntimeErrorType, LobeGoogleAI } from '@/libs/agent-runtime';
4
+
5
+ export class LobeVertexAI extends LobeGoogleAI {
6
+ static initFromVertexAI(params?: VertexInit) {
7
+ try {
8
+ const client = new VertexAI({ ...params });
9
+
10
+ return new LobeGoogleAI({ apiKey: 'avoid-error', client, isVertexAi: true });
11
+ } catch (e) {
12
+ const err = e as Error;
13
+
14
+ if (err.name === 'IllegalArgumentError') {
15
+ throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidVertexCredentials, {
16
+ message: err.message,
17
+ });
18
+ }
19
+
20
+ throw e;
21
+ }
22
+ }
23
+ }
@@ -106,6 +106,7 @@ export default {
106
106
  */
107
107
  OpenAIBizError: '请求 OpenAI 服务出错,请根据以下信息排查或重试',
108
108
 
109
+ InvalidVertexCredentials: 'Vertex 鉴权未通过,请检查鉴权凭证后重试',
109
110
  InvalidBedrockCredentials: 'Bedrock 鉴权未通过,请检查 AccessKeyId/SecretAccessKey 后重试',
110
111
  StreamChunkError:
111
112
  '流式请求的消息块解析错误,请检查当前 API 接口是否符合标准规范,或联系你的 API 供应商咨询',
@@ -325,6 +325,13 @@ export default {
325
325
  tooltip: '更新服务商基础配置',
326
326
  updateSuccess: '更新成功',
327
327
  },
328
+ vertexai: {
329
+ apiKey: {
330
+ desc: '填入你的 Vertex Ai Keys',
331
+ placeholder: `{ "type": "service_account", "project_id": "xxx", "private_key_id": ... }`,
332
+ title: 'Vertex AI Keys',
333
+ },
334
+ },
328
335
  zeroone: {
329
336
  title: '01.AI 零一万物',
330
337
  },
@@ -68,6 +68,7 @@ export interface UserKeyVaults {
68
68
  tencentcloud?: OpenAICompatibleKeyVault;
69
69
  togetherai?: OpenAICompatibleKeyVault;
70
70
  upstage?: OpenAICompatibleKeyVault;
71
+ vertexai?: OpenAICompatibleKeyVault;
71
72
  vllm?: OpenAICompatibleKeyVault;
72
73
  volcengine?: OpenAICompatibleKeyVault;
73
74
  wenxin?: OpenAICompatibleKeyVault;
@@ -1,4 +1,4 @@
1
- export const safeParseJSON = <T = Record<string, any>>(text: string) => {
1
+ export const safeParseJSON = <T = Record<string, any>>(text?: string) => {
2
2
  if (typeof text !== 'string') return undefined;
3
3
 
4
4
  let json: T;