@lobehub/chat 1.70.10 → 1.71.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/.github/ISSUE_TEMPLATE/1_bug_report.yml +1 -0
  2. package/.github/ISSUE_TEMPLATE/2_feature_request.yml +1 -0
  3. package/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml +1 -0
  4. package/.github/workflows/sync-database-schema.yml +25 -0
  5. package/CHANGELOG.md +42 -0
  6. package/README.md +1 -1
  7. package/README.zh-CN.md +1 -1
  8. package/changelog/v1.json +14 -0
  9. package/docs/developer/database-schema.dbml +569 -0
  10. package/locales/ar/models.json +3 -0
  11. package/locales/bg-BG/models.json +3 -0
  12. package/locales/de-DE/models.json +3 -0
  13. package/locales/en-US/models.json +3 -0
  14. package/locales/es-ES/models.json +3 -0
  15. package/locales/fa-IR/models.json +3 -0
  16. package/locales/fr-FR/models.json +3 -0
  17. package/locales/it-IT/models.json +3 -0
  18. package/locales/ja-JP/models.json +3 -0
  19. package/locales/ko-KR/models.json +3 -0
  20. package/locales/nl-NL/models.json +3 -0
  21. package/locales/pl-PL/models.json +3 -0
  22. package/locales/pt-BR/models.json +3 -0
  23. package/locales/ru-RU/models.json +3 -0
  24. package/locales/tr-TR/models.json +3 -0
  25. package/locales/vi-VN/models.json +3 -0
  26. package/locales/zh-CN/models.json +3 -0
  27. package/locales/zh-TW/models.json +3 -0
  28. package/package.json +6 -2
  29. package/scripts/dbmlWorkflow/index.ts +11 -0
  30. package/src/config/aiModels/google.ts +17 -0
  31. package/src/database/client/migrations.json +10 -0
  32. package/src/database/migrations/0016_add_message_index.sql +3 -0
  33. package/src/database/migrations/meta/0016_snapshot.json +4018 -0
  34. package/src/database/migrations/meta/_journal.json +7 -0
  35. package/src/database/schemas/message.ts +3 -0
  36. package/src/database/server/models/message.ts +20 -9
  37. package/src/database/server/models/user.test.ts +58 -0
  38. package/src/features/AlertBanner/CloudBanner.tsx +1 -1
  39. package/src/features/Conversation/Messages/Assistant/index.tsx +4 -1
  40. package/src/features/Conversation/Messages/User/index.tsx +4 -4
  41. package/src/libs/agent-runtime/google/index.ts +8 -2
  42. package/src/libs/agent-runtime/utils/streams/google-ai.test.ts +99 -0
  43. package/src/libs/agent-runtime/utils/streams/google-ai.ts +69 -23
  44. package/src/libs/agent-runtime/utils/streams/protocol.ts +2 -0
  45. package/src/services/chat.ts +33 -15
  46. package/src/services/file/client.ts +3 -1
  47. package/src/services/message/server.ts +2 -2
  48. package/src/services/message/type.ts +2 -2
  49. package/src/services/upload.ts +82 -1
  50. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +44 -4
  51. package/src/store/chat/slices/message/action.ts +3 -0
  52. package/src/store/file/slices/upload/action.ts +36 -13
  53. package/src/store/file/store.ts +2 -0
  54. package/src/tools/web-browsing/Render/PageContent/index.tsx +2 -2
  55. package/src/tools/web-browsing/Render/Search/SearchResult/SearchResultItem.tsx +1 -1
  56. package/src/types/files/upload.ts +7 -0
  57. package/src/types/message/base.ts +22 -1
  58. package/src/types/message/chat.ts +1 -6
  59. package/src/types/message/image.ts +11 -0
  60. package/src/types/message/index.ts +1 -0
  61. package/src/utils/fetch/fetchSSE.ts +24 -1
@@ -112,6 +112,13 @@
112
112
  "when": 1739901891891,
113
113
  "tag": "0015_add_message_search_metadata",
114
114
  "breakpoints": true
115
+ },
116
+ {
117
+ "idx": 16,
118
+ "version": "7",
119
+ "when": 1741844738677,
120
+ "tag": "0016_add_message_index",
121
+ "breakpoints": true
115
122
  }
116
123
  ],
117
124
  "version": "6"
@@ -73,6 +73,9 @@ export const messages = pgTable(
73
73
  table.clientId,
74
74
  table.userId,
75
75
  ),
76
+ topicIdIdx: index('messages_topic_id_idx').on(table.topicId),
77
+ parentIdIdx: index('messages_parent_id_idx').on(table.parentId),
78
+ quotaIdIdx: index('messages_quota_id_idx').on(table.quotaId),
76
79
  }),
77
80
  );
78
81
 
@@ -21,6 +21,7 @@ import {
21
21
  CreateMessageParams,
22
22
  MessageItem,
23
23
  ModelRankItem,
24
+ UpdateMessageParams,
24
25
  } from '@/types/message';
25
26
  import { merge } from '@/utils/merge';
26
27
  import { today } from '@/utils/time';
@@ -497,15 +498,25 @@ export class MessageModel {
497
498
  };
498
499
  // **************** Update *************** //
499
500
 
500
- update = async (id: string, message: Partial<MessageItem>) => {
501
- return this.db
502
- .update(messages)
503
- .set({
504
- ...message,
505
- // TODO: need a better way to handle this
506
- role: message.role as any,
507
- })
508
- .where(and(eq(messages.id, id), eq(messages.userId, this.userId)));
501
+ update = async (id: string, { imageList, ...message }: Partial<UpdateMessageParams>) => {
502
+ return this.db.transaction(async (trx) => {
503
+ // 1. insert message files
504
+ if (imageList && imageList.length > 0) {
505
+ await trx
506
+ .insert(messagesFiles)
507
+ .values(imageList.map((file) => ({ fileId: file.id, messageId: id })));
508
+ }
509
+
510
+ return trx
511
+ .update(messages)
512
+ .set({
513
+ ...message,
514
+ // TODO: need a better way to handle this
515
+ // TODO: but I forget why 🤡
516
+ role: message.role as any,
517
+ })
518
+ .where(and(eq(messages.id, id), eq(messages.userId, this.userId)));
519
+ });
509
520
  };
510
521
 
511
522
  updatePluginState = async (id: string, state: Record<string, any>) => {
@@ -0,0 +1,58 @@
1
+ // @vitest-environment node
2
+ import { TRPCError } from '@trpc/server';
3
+ import { describe, expect, it, vi } from 'vitest';
4
+
5
+ import { UserModel, UserNotFoundError } from '@/database/server/models/user';
6
+
7
+ describe('UserNotFoundError', () => {
8
+ it('should extend TRPCError with correct code and message', () => {
9
+ const error = new UserNotFoundError();
10
+
11
+ expect(error).toBeInstanceOf(TRPCError);
12
+ expect(error.code).toBe('UNAUTHORIZED');
13
+ expect(error.message).toBe('user not found');
14
+ });
15
+ });
16
+
17
+ describe('UserModel', () => {
18
+ const mockDb = {
19
+ query: {
20
+ users: {
21
+ findFirst: vi.fn(),
22
+ },
23
+ },
24
+ };
25
+
26
+ const mockUserId = 'test-user-id';
27
+ const userModel = new UserModel(mockDb as any, mockUserId);
28
+
29
+ describe('getUserRegistrationDuration', () => {
30
+ it('should return default values when user not found', async () => {
31
+ mockDb.query.users.findFirst.mockResolvedValue(null);
32
+
33
+ const result = await userModel.getUserRegistrationDuration();
34
+
35
+ expect(result).toEqual({
36
+ createdAt: expect.any(String),
37
+ duration: 1,
38
+ updatedAt: expect.any(String),
39
+ });
40
+ });
41
+
42
+ it('should calculate duration correctly for existing user', async () => {
43
+ const createdAt = new Date('2024-01-01');
44
+ mockDb.query.users.findFirst.mockResolvedValue({
45
+ createdAt,
46
+ });
47
+
48
+ const result = await userModel.getUserRegistrationDuration();
49
+
50
+ expect(result).toEqual({
51
+ createdAt: '2024-01-01',
52
+ duration: expect.any(Number),
53
+ updatedAt: expect.any(String),
54
+ });
55
+ expect(result.duration).toBeGreaterThan(0);
56
+ });
57
+ });
58
+ });
@@ -61,7 +61,7 @@ const CloudBanner = memo<{ mobile?: boolean }>(({ mobile }) => {
61
61
  <b>{t('alert.cloud.title', { name: LOBE_CHAT_CLOUD })}:</b>
62
62
  <span>
63
63
  {t(mobile ? 'alert.cloud.descOnMobile' : 'alert.cloud.desc', {
64
- credit: new Intl.NumberFormat('en-US').format(450_000),
64
+ credit: new Intl.NumberFormat('en-US').format(500_000),
65
65
  name: LOBE_CHAT_CLOUD,
66
66
  })}
67
67
  </span>
@@ -2,6 +2,7 @@ import { ReactNode, memo } from 'react';
2
2
  import { Flexbox } from 'react-layout-kit';
3
3
 
4
4
  import { LOADING_FLAT } from '@/const/message';
5
+ import ImageFileListViewer from '@/features/Conversation/Messages/User/ImageFileListViewer';
5
6
  import { useChatStore } from '@/store/chat';
6
7
  import { aiChatSelectors, chatSelectors } from '@/store/chat/selectors';
7
8
  import { ChatMessage } from '@/types/message';
@@ -17,7 +18,7 @@ export const AssistantMessage = memo<
17
18
  ChatMessage & {
18
19
  editableContent: ReactNode;
19
20
  }
20
- >(({ id, tools, content, chunksList, search, ...props }) => {
21
+ >(({ id, tools, content, chunksList, search, imageList, ...props }) => {
21
22
  const editing = useChatStore(chatSelectors.isMessageEditing(id));
22
23
  const generating = useChatStore(chatSelectors.isMessageGenerating(id));
23
24
 
@@ -28,6 +29,7 @@ export const AssistantMessage = memo<
28
29
  const isIntentUnderstanding = useChatStore(aiChatSelectors.isIntentUnderstanding(id));
29
30
 
30
31
  const showSearch = !!search && !!search.citations?.length;
32
+ const showImageItems = !!imageList && imageList.length > 0;
31
33
 
32
34
  // remove \n to avoid empty content
33
35
  // refs: https://github.com/lobehub/lobe-chat/pull/6153
@@ -64,6 +66,7 @@ export const AssistantMessage = memo<
64
66
  />
65
67
  )
66
68
  )}
69
+ {showImageItems && <ImageFileListViewer items={imageList} />}
67
70
  {tools && (
68
71
  <Flexbox gap={8}>
69
72
  {tools.map((toolCall, index) => (
@@ -12,16 +12,16 @@ export const UserMessage = memo<
12
12
  ChatMessage & {
13
13
  editableContent: ReactNode;
14
14
  }
15
- >(({ id, editableContent, content, ...res }) => {
15
+ >(({ id, editableContent, content, imageList, fileList }) => {
16
16
  if (content === LOADING_FLAT) return <BubblesLoading />;
17
17
 
18
18
  return (
19
19
  <Flexbox gap={8} id={id}>
20
20
  {editableContent}
21
- {res.imageList && res.imageList?.length > 0 && <ImageFileListViewer items={res.imageList} />}
22
- {res.fileList && res.fileList?.length > 0 && (
21
+ {imageList && imageList?.length > 0 && <ImageFileListViewer items={imageList} />}
22
+ {fileList && fileList?.length > 0 && (
23
23
  <div style={{ marginTop: 8 }}>
24
- <FileListViewer items={res.fileList} />
24
+ <FileListViewer items={fileList} />
25
25
  </div>
26
26
  )}
27
27
  </Flexbox>
@@ -31,6 +31,9 @@ import { StreamingResponse } from '../utils/response';
31
31
  import { GoogleGenerativeAIStream, convertIterableToStream } from '../utils/streams';
32
32
  import { parseDataUri } from '../utils/uriParser';
33
33
 
34
+ const modelsOffSafetySettings = new Set(['gemini-2.0-flash-exp']);
35
+ const modelsWithModalities = new Set(['gemini-2.0-flash-exp']);
36
+
34
37
  export interface GoogleModelCard {
35
38
  displayName: string;
36
39
  inputTokenLimit: number;
@@ -50,8 +53,7 @@ enum HarmBlockThreshold {
50
53
  }
51
54
 
52
55
  function getThreshold(model: string): HarmBlockThreshold {
53
- const useOFF = ['gemini-2.0-flash-exp'];
54
- if (useOFF.includes(model)) {
56
+ if (modelsOffSafetySettings.has(model)) {
55
57
  return 'OFF' as HarmBlockThreshold; // https://discuss.ai.google.dev/t/59352
56
58
  }
57
59
  return HarmBlockThreshold.BLOCK_NONE;
@@ -94,6 +96,10 @@ export class LobeGoogleAI implements LobeRuntimeAI {
94
96
  {
95
97
  generationConfig: {
96
98
  maxOutputTokens: payload.max_tokens,
99
+ // @ts-expect-error - Google SDK 0.24.0 doesn't have this property for now with
100
+ response_modalities: modelsWithModalities.has(model)
101
+ ? ['Text', 'Image']
102
+ : undefined,
97
103
  temperature: payload.temperature,
98
104
  topP: payload.top_p,
99
105
  },
@@ -94,4 +94,103 @@ describe('GoogleGenerativeAIStream', () => {
94
94
 
95
95
  expect(chunks).toEqual([]);
96
96
  });
97
+
98
+ it('should handle image', async () => {
99
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
100
+
101
+ const data = {
102
+ candidates: [
103
+ {
104
+ content: {
105
+ parts: [{ inlineData: { mimeType: 'image/png', data: 'iVBORw0KGgoAA' } }],
106
+ role: 'model',
107
+ },
108
+ index: 0,
109
+ },
110
+ ],
111
+ usageMetadata: {
112
+ promptTokenCount: 6,
113
+ totalTokenCount: 6,
114
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 6 }],
115
+ },
116
+ modelVersion: 'gemini-2.0-flash-exp',
117
+ };
118
+ const mockGenerateContentResponse = (text: string, functionCalls?: any[]) =>
119
+ ({
120
+ text: () => text,
121
+ functionCall: () => functionCalls?.[0],
122
+ functionCalls: () => functionCalls,
123
+ }) as EnhancedGenerateContentResponse;
124
+
125
+ const mockGoogleStream = new ReadableStream({
126
+ start(controller) {
127
+ controller.enqueue(data);
128
+
129
+ controller.close();
130
+ },
131
+ });
132
+
133
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
134
+
135
+ const decoder = new TextDecoder();
136
+ const chunks = [];
137
+
138
+ // @ts-ignore
139
+ for await (const chunk of protocolStream) {
140
+ chunks.push(decoder.decode(chunk, { stream: true }));
141
+ }
142
+
143
+ expect(chunks).toEqual([
144
+ // image
145
+ 'id: chat_1\n',
146
+ 'event: base64_image\n',
147
+ `data: "data:image/png;base64,iVBORw0KGgoAA"\n\n`,
148
+ ]);
149
+ });
150
+
151
+ it('should handle token count', async () => {
152
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
153
+
154
+ const data = {
155
+ candidates: [{ content: { role: 'model' }, finishReason: 'STOP', index: 0 }],
156
+ usageMetadata: {
157
+ promptTokenCount: 266,
158
+ totalTokenCount: 266,
159
+ promptTokensDetails: [
160
+ { modality: 'TEXT', tokenCount: 8 },
161
+ { modality: 'IMAGE', tokenCount: 258 },
162
+ ],
163
+ },
164
+ modelVersion: 'gemini-2.0-flash-exp',
165
+ };
166
+
167
+ const mockGoogleStream = new ReadableStream({
168
+ start(controller) {
169
+ controller.enqueue(data);
170
+
171
+ controller.close();
172
+ },
173
+ });
174
+
175
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
176
+
177
+ const decoder = new TextDecoder();
178
+ const chunks = [];
179
+
180
+ // @ts-ignore
181
+ for await (const chunk of protocolStream) {
182
+ chunks.push(decoder.decode(chunk, { stream: true }));
183
+ }
184
+
185
+ expect(chunks).toEqual([
186
+ // stop
187
+ 'id: chat_1\n',
188
+ 'event: stop\n',
189
+ `data: "STOP"\n\n`,
190
+ // usage
191
+ 'id: chat_1\n',
192
+ 'event: usage\n',
193
+ `data: {"inputImageTokens":258,"inputTextTokens":8,"totalInputTokens":266,"totalTokens":266}\n\n`,
194
+ ]);
195
+ });
97
196
  });
@@ -1,5 +1,6 @@
1
1
  import { EnhancedGenerateContentResponse } from '@google/generative-ai';
2
2
 
3
+ import { ModelTokensUsage } from '@/types/message';
3
4
  import { GroundingSearch } from '@/types/search';
4
5
  import { nanoid } from '@/utils/uuid';
5
6
 
@@ -18,7 +19,7 @@ const transformGoogleGenerativeAIStream = (
18
19
  context: StreamContext,
19
20
  ): StreamProtocolChunk | StreamProtocolChunk[] => {
20
21
  // maybe need another structure to add support for multiple choices
21
- const functionCalls = chunk.functionCalls();
22
+ const functionCalls = chunk.functionCalls?.();
22
23
 
23
24
  if (functionCalls) {
24
25
  return {
@@ -37,30 +38,75 @@ const transformGoogleGenerativeAIStream = (
37
38
  type: 'tool_calls',
38
39
  };
39
40
  }
40
- const text = chunk.text();
41
41
 
42
- if (chunk.candidates && chunk.candidates[0].groundingMetadata) {
43
- const { webSearchQueries, groundingSupports, groundingChunks } =
44
- chunk.candidates[0].groundingMetadata;
45
- console.log({ groundingChunks, groundingSupports, webSearchQueries });
42
+ const text = chunk.text?.();
46
43
 
47
- return [
48
- { data: text, id: context.id, type: 'text' },
49
- {
50
- data: {
51
- citations: groundingChunks?.map((chunk) => ({
52
- // google 返回的 uri 是经过 google 自己处理过的 url,因此无法展现真实的 favicon
53
- // 需要使用 title 作为替换
54
- favicon: chunk.web?.title,
55
- title: chunk.web?.title,
56
- url: chunk.web?.uri,
57
- })),
58
- searchQueries: webSearchQueries,
59
- } as GroundingSearch,
60
- id: context.id,
61
- type: 'grounding',
62
- },
63
- ];
44
+ if (chunk.candidates) {
45
+ const candidate = chunk.candidates[0];
46
+
47
+ // return the grounding
48
+ if (candidate.groundingMetadata) {
49
+ const { webSearchQueries, groundingChunks } = candidate.groundingMetadata;
50
+
51
+ return [
52
+ { data: text, id: context.id, type: 'text' },
53
+ {
54
+ data: {
55
+ citations: groundingChunks?.map((chunk) => ({
56
+ // google 返回的 uri 是经过 google 自己处理过的 url,因此无法展现真实的 favicon
57
+ // 需要使用 title 作为替换
58
+ favicon: chunk.web?.title,
59
+ title: chunk.web?.title,
60
+ url: chunk.web?.uri,
61
+ })),
62
+ searchQueries: webSearchQueries,
63
+ } as GroundingSearch,
64
+ id: context.id,
65
+ type: 'grounding',
66
+ },
67
+ ];
68
+ }
69
+
70
+ if (candidate.finishReason) {
71
+ if (chunk.usageMetadata) {
72
+ const usage = chunk.usageMetadata;
73
+ return [
74
+ { data: candidate.finishReason, id: context?.id, type: 'stop' },
75
+ {
76
+ data: {
77
+ // TODO: Google SDK 0.24.0 don't have promptTokensDetails types
78
+ inputImageTokens: (usage as any).promptTokensDetails?.find(
79
+ (i: any) => i.modality === 'IMAGE',
80
+ )?.tokenCount,
81
+ inputTextTokens: (usage as any).promptTokensDetails?.find(
82
+ (i: any) => i.modality === 'TEXT',
83
+ )?.tokenCount,
84
+ totalInputTokens: usage.promptTokenCount,
85
+ totalOutputTokens: usage.candidatesTokenCount,
86
+ totalTokens: usage.totalTokenCount,
87
+ } as ModelTokensUsage,
88
+ id: context?.id,
89
+ type: 'usage',
90
+ },
91
+ ];
92
+ }
93
+ return { data: candidate.finishReason, id: context?.id, type: 'stop' };
94
+ }
95
+
96
+ if (!!text?.trim()) return { data: text, id: context?.id, type: 'text' };
97
+
98
+ // streaming the image
99
+ if (Array.isArray(candidate.content.parts) && candidate.content.parts.length > 0) {
100
+ const part = candidate.content.parts[0];
101
+
102
+ if (part && part.inlineData && part.inlineData.data && part.inlineData.mimeType) {
103
+ return {
104
+ data: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`,
105
+ id: context.id,
106
+ type: 'base64_image',
107
+ };
108
+ }
109
+ }
64
110
  }
65
111
 
66
112
  return {
@@ -32,6 +32,8 @@ export interface StreamProtocolChunk {
32
32
  id?: string;
33
33
  type: // pure text
34
34
  | 'text'
35
+ // base64 format image
36
+ | 'base64_image'
35
37
  // Tools use
36
38
  | 'tool_calls'
37
39
  // Model Thinking
@@ -438,6 +438,8 @@ class ChatService {
438
438
  provider: params.provider!,
439
439
  });
440
440
 
441
+ // remove plugins
442
+ delete params.plugins;
441
443
  await this.getChatCompletion(
442
444
  { ...params, messages: oaiMessages, tools },
443
445
  {
@@ -474,7 +476,7 @@ class ChatService {
474
476
  // handle content type for vision model
475
477
  // for the models with visual ability, add image url to content
476
478
  // refs: https://platform.openai.com/docs/guides/vision/quick-start
477
- const getContent = (m: ChatMessage) => {
479
+ const getUserContent = (m: ChatMessage) => {
478
480
  // only if message doesn't have images and files, then return the plain content
479
481
  if ((!m.imageList || m.imageList.length === 0) && (!m.fileList || m.fileList.length === 0))
480
482
  return m.content;
@@ -490,27 +492,43 @@ class ChatService {
490
492
  ] as UserMessageContentPart[];
491
493
  };
492
494
 
495
+ const getAssistantContent = (m: ChatMessage) => {
496
+ // signature is a signal of anthropic thinking mode
497
+ const shouldIncludeThinking = m.reasoning && !!m.reasoning?.signature;
498
+
499
+ if (shouldIncludeThinking) {
500
+ return [
501
+ {
502
+ signature: m.reasoning!.signature,
503
+ thinking: m.reasoning!.content,
504
+ type: 'thinking',
505
+ },
506
+ { text: m.content, type: 'text' },
507
+ ] as UserMessageContentPart[];
508
+ }
509
+ // only if message doesn't have images and files, then return the plain content
510
+
511
+ if (m.imageList && m.imageList.length > 0) {
512
+ return [
513
+ !!m.content ? { text: m.content, type: 'text' } : undefined,
514
+ ...m.imageList.map(
515
+ (i) => ({ image_url: { detail: 'auto', url: i.url }, type: 'image_url' }) as const,
516
+ ),
517
+ ].filter(Boolean) as UserMessageContentPart[];
518
+ }
519
+
520
+ return m.content;
521
+ };
522
+
493
523
  let postMessages = messages.map((m): OpenAIChatMessage => {
494
524
  const supportTools = isCanUseFC(model, provider);
495
525
  switch (m.role) {
496
526
  case 'user': {
497
- return { content: getContent(m), role: m.role };
527
+ return { content: getUserContent(m), role: m.role };
498
528
  }
499
529
 
500
530
  case 'assistant': {
501
- // signature is a signal of anthropic thinking mode
502
- const shouldIncludeThinking = m.reasoning && !!m.reasoning?.signature;
503
-
504
- const content = shouldIncludeThinking
505
- ? [
506
- {
507
- signature: m.reasoning!.signature,
508
- thinking: m.reasoning!.content,
509
- type: 'thinking',
510
- } as any,
511
- { text: m.content, type: 'text' },
512
- ]
513
- : m.content;
531
+ const content = getAssistantContent(m);
514
532
 
515
533
  if (!supportTools) {
516
534
  return { content, role: m.role };
@@ -11,6 +11,8 @@ export class ClientService extends BaseClientService implements IFileService {
11
11
  }
12
12
 
13
13
  createFile: IFileService['createFile'] = async (file) => {
14
+ const { isExist } = await this.fileModel.checkHash(file.hash!);
15
+
14
16
  // save to local storage
15
17
  // we may want to save to a remote server later
16
18
  const res = await this.fileModel.create(
@@ -23,7 +25,7 @@ export class ClientService extends BaseClientService implements IFileService {
23
25
  size: file.size,
24
26
  url: file.url!,
25
27
  },
26
- true,
28
+ !isExist,
27
29
  );
28
30
 
29
31
  // get file to base64 url
@@ -64,8 +64,8 @@ export class ServerService implements IMessageService {
64
64
  return lambdaClient.message.updateMessagePlugin.mutate({ id, value: { arguments: args } });
65
65
  };
66
66
 
67
- updateMessage: IMessageService['updateMessage'] = async (id, message) => {
68
- return lambdaClient.message.update.mutate({ id, value: message });
67
+ updateMessage: IMessageService['updateMessage'] = async (id, value) => {
68
+ return lambdaClient.message.update.mutate({ id, value });
69
69
  };
70
70
 
71
71
  updateMessageTranslate: IMessageService['updateMessageTranslate'] = async (id, translate) => {
@@ -8,7 +8,7 @@ import {
8
8
  ChatTranslate,
9
9
  CreateMessageParams,
10
10
  MessageItem,
11
- ModelRankItem,
11
+ ModelRankItem, UpdateMessageParams,
12
12
  } from '@/types/message';
13
13
 
14
14
  /* eslint-disable typescript-sort-keys/interface */
@@ -33,7 +33,7 @@ export interface IMessageService {
33
33
  rankModels(): Promise<ModelRankItem[]>;
34
34
  getHeatmaps(): Promise<HeatmapsProps['data']>;
35
35
  updateMessageError(id: string, error: ChatMessageError): Promise<any>;
36
- updateMessage(id: string, message: Partial<MessageItem>): Promise<any>;
36
+ updateMessage(id: string, message: Partial<UpdateMessageParams>): Promise<any>;
37
37
  updateMessageTTS(id: string, tts: Partial<ChatTTS> | false): Promise<any>;
38
38
  updateMessageTranslate(id: string, translate: Partial<ChatTranslate> | false): Promise<any>;
39
39
  updateMessagePluginState(id: string, value: Record<string, any>): Promise<any>;
@@ -1,14 +1,95 @@
1
+ import dayjs from 'dayjs';
2
+ import { sha256 } from 'js-sha256';
3
+
1
4
  import { fileEnv } from '@/config/file';
5
+ import { isServerMode } from '@/const/version';
6
+ import { parseDataUri } from '@/libs/agent-runtime/utils/uriParser';
2
7
  import { edgeClient } from '@/libs/trpc/client';
3
8
  import { API_ENDPOINTS } from '@/services/_url';
4
9
  import { clientS3Storage } from '@/services/file/ClientS3';
5
- import { FileMetadata } from '@/types/files';
10
+ import { FileMetadata, UploadBase64ToS3Result } from '@/types/files';
6
11
  import { FileUploadState, FileUploadStatus } from '@/types/files/upload';
7
12
  import { uuid } from '@/utils/uuid';
8
13
 
9
14
  export const UPLOAD_NETWORK_ERROR = 'NetWorkError';
10
15
 
16
+ interface UploadFileToS3Options {
17
+ directory?: string;
18
+ filename?: string;
19
+ onProgress?: (status: FileUploadStatus, state: FileUploadState) => void;
20
+ }
21
+
11
22
  class UploadService {
23
+ /**
24
+ * uniform upload method for both server and client
25
+ */
26
+ uploadFileToS3 = async (
27
+ file: File,
28
+ options: UploadFileToS3Options = {},
29
+ ): Promise<FileMetadata> => {
30
+ const { directory, onProgress } = options;
31
+
32
+ if (isServerMode) {
33
+ return this.uploadWithProgress(file, { directory, onProgress });
34
+ } else {
35
+ const fileArrayBuffer = await file.arrayBuffer();
36
+
37
+ // 1. check file hash
38
+ const hash = sha256(fileArrayBuffer);
39
+
40
+ return this.uploadToClientS3(hash, file);
41
+ }
42
+ };
43
+
44
+ uploadBase64ToS3 = async (
45
+ base64Data: string,
46
+ options: UploadFileToS3Options = {},
47
+ ): Promise<UploadBase64ToS3Result> => {
48
+ // 解析 base64 数据
49
+ const { base64, mimeType, type } = parseDataUri(base64Data);
50
+
51
+ if (!base64 || !mimeType || type !== 'base64') {
52
+ throw new Error('Invalid base64 data for image');
53
+ }
54
+
55
+ // 将 base64 转换为 Blob
56
+ const byteCharacters = atob(base64);
57
+ const byteArrays = [];
58
+
59
+ // 分块处理以避免内存问题
60
+ for (let offset = 0; offset < byteCharacters.length; offset += 1024) {
61
+ const slice = byteCharacters.slice(offset, offset + 1024);
62
+
63
+ const byteNumbers: number[] = Array.from({ length: slice.length });
64
+ for (let i = 0; i < slice.length; i++) {
65
+ byteNumbers[i] = slice.charCodeAt(i);
66
+ }
67
+
68
+ const byteArray = new Uint8Array(byteNumbers);
69
+ byteArrays.push(byteArray);
70
+ }
71
+
72
+ const blob = new Blob(byteArrays, { type: mimeType });
73
+
74
+ // 确定文件扩展名
75
+ const fileExtension = mimeType.split('/')[1] || 'png';
76
+ const fileName = `${options.filename || `image_${dayjs().format('YYYY-MM-DD-hh-mm-ss')}`}.${fileExtension}`;
77
+
78
+ // 创建文件对象
79
+ const file = new File([blob], fileName, { type: mimeType });
80
+
81
+ // 使用统一的上传方法
82
+ const metadata = await this.uploadFileToS3(file, options);
83
+ const hash = sha256(await file.arrayBuffer());
84
+
85
+ return {
86
+ fileType: mimeType,
87
+ hash,
88
+ metadata,
89
+ size: file.size,
90
+ };
91
+ };
92
+
12
93
  uploadWithProgress = async (
13
94
  file: File,
14
95
  {