@lobehub/chat 1.84.26 → 1.85.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/development/database-schema.dbml +59 -1
  4. package/package.json +3 -2
  5. package/packages/file-loaders/package.json +5 -1
  6. package/packages/file-loaders/src/loadFile.ts +51 -1
  7. package/packages/file-loaders/src/loaders/docx/index.ts +16 -1
  8. package/packages/file-loaders/src/loaders/excel/index.ts +30 -2
  9. package/packages/file-loaders/src/loaders/pdf/__snapshots__/index.test.ts.snap +1 -1
  10. package/packages/file-loaders/src/loaders/pdf/index.ts +52 -12
  11. package/packages/file-loaders/src/loaders/pptx/index.ts +32 -1
  12. package/packages/file-loaders/src/loaders/text/index.test.ts +1 -1
  13. package/packages/file-loaders/src/loaders/text/index.ts +13 -1
  14. package/packages/file-loaders/test/__snapshots__/loaders.test.ts.snap +41 -0
  15. package/packages/file-loaders/test/loaders.test.ts +20 -0
  16. package/packages/file-loaders/test/setup.ts +17 -0
  17. package/packages/file-loaders/vitest.config.ts +14 -0
  18. package/src/config/aiModels/vertexai.ts +6 -6
  19. package/src/const/file.ts +8 -1
  20. package/src/database/client/migrations.json +23 -1
  21. package/src/database/migrations/0022_add_documents.sql +49 -0
  22. package/src/database/migrations/meta/0022_snapshot.json +5340 -0
  23. package/src/database/migrations/meta/_journal.json +7 -0
  24. package/src/database/models/_template.ts +1 -1
  25. package/src/database/models/document.ts +54 -0
  26. package/src/database/models/message.ts +25 -0
  27. package/src/database/repositories/tableViewer/index.test.ts +1 -1
  28. package/src/database/schemas/document.ts +104 -0
  29. package/src/database/schemas/index.ts +1 -0
  30. package/src/database/schemas/relations.ts +34 -2
  31. package/src/database/schemas/topic.ts +31 -8
  32. package/src/database/utils/idGenerator.ts +1 -0
  33. package/src/features/ChatInput/Desktop/FilePreview/FileItem/Content.tsx +1 -1
  34. package/src/features/ChatInput/Desktop/FilePreview/FileItem/index.tsx +10 -10
  35. package/src/features/ChatInput/components/UploadDetail/UploadStatus.tsx +2 -2
  36. package/src/features/Conversation/Actions/Error.tsx +2 -2
  37. package/src/libs/agent-runtime/google/index.ts +2 -1
  38. package/src/libs/agent-runtime/utils/streams/google-ai.test.ts +101 -6
  39. package/src/libs/agent-runtime/utils/streams/google-ai.ts +62 -38
  40. package/src/libs/agent-runtime/utils/streams/protocol.ts +24 -4
  41. package/src/libs/agent-runtime/utils/streams/vertex-ai.test.ts +109 -8
  42. package/src/libs/agent-runtime/utils/streams/vertex-ai.ts +68 -23
  43. package/src/libs/trpc/lambda/context.ts +7 -0
  44. package/src/prompts/files/file.ts +6 -4
  45. package/src/server/routers/lambda/__tests__/file.test.ts +213 -0
  46. package/src/server/routers/lambda/document.ts +36 -0
  47. package/src/server/routers/lambda/index.ts +2 -0
  48. package/src/server/services/document/index.ts +66 -0
  49. package/src/server/services/mcp/index.ts +0 -4
  50. package/src/services/rag.ts +4 -0
  51. package/src/store/chat/slices/aiChat/actions/__tests__/rag.test.ts +2 -2
  52. package/src/store/chat/slices/aiChat/actions/rag.ts +2 -3
  53. package/src/store/file/slices/chat/action.ts +3 -51
  54. package/src/types/document/index.ts +172 -0
  55. package/src/types/message/chat.ts +1 -0
  56. package/src/features/ChatInput/Desktop/FilePreview/FileItem/style.ts +0 -4
@@ -11,6 +11,7 @@ import {
11
11
  StreamToolCallChunkData,
12
12
  createCallbacksTransformer,
13
13
  createSSEProtocolTransformer,
14
+ createTokenSpeedCalculator,
14
15
  generateToolCallId,
15
16
  } from './protocol';
16
17
 
@@ -19,31 +20,62 @@ const transformGoogleGenerativeAIStream = (
19
20
  context: StreamContext,
20
21
  ): StreamProtocolChunk | StreamProtocolChunk[] => {
21
22
  // maybe need another structure to add support for multiple choices
23
+ const candidate = chunk.candidates?.[0];
24
+ const usage = chunk.usageMetadata;
25
+ const usageChunks: StreamProtocolChunk[] = [];
26
+ if (candidate?.finishReason && usage) {
27
+ const outputReasoningTokens = (usage as any).thoughtsTokenCount || undefined;
28
+ const totalOutputTokens = (usage.candidatesTokenCount ?? 0) + (outputReasoningTokens ?? 0);
29
+
30
+ usageChunks.push(
31
+ { data: candidate.finishReason, id: context?.id, type: 'stop' },
32
+ {
33
+ data: {
34
+ // TODO: Google SDK 0.24.0 don't have promptTokensDetails types
35
+ inputImageTokens: (usage as any).promptTokensDetails?.find(
36
+ (i: any) => i.modality === 'IMAGE',
37
+ )?.tokenCount,
38
+ inputTextTokens: (usage as any).promptTokensDetails?.find(
39
+ (i: any) => i.modality === 'TEXT',
40
+ )?.tokenCount,
41
+ outputReasoningTokens,
42
+ outputTextTokens: totalOutputTokens - (outputReasoningTokens ?? 0),
43
+ totalInputTokens: usage.promptTokenCount,
44
+ totalOutputTokens,
45
+ totalTokens: usage.totalTokenCount,
46
+ } as ModelTokensUsage,
47
+ id: context?.id,
48
+ type: 'usage',
49
+ },
50
+ );
51
+ }
52
+
22
53
  const functionCalls = chunk.functionCalls?.();
23
54
 
24
55
  if (functionCalls) {
25
- return {
26
- data: functionCalls.map(
27
- (value, index): StreamToolCallChunkData => ({
28
- function: {
29
- arguments: JSON.stringify(value.args),
30
- name: value.name,
31
- },
32
- id: generateToolCallId(index, value.name),
33
- index: index,
34
- type: 'function',
35
- }),
36
- ),
37
- id: context.id,
38
- type: 'tool_calls',
39
- };
56
+ return [
57
+ {
58
+ data: functionCalls.map(
59
+ (value, index): StreamToolCallChunkData => ({
60
+ function: {
61
+ arguments: JSON.stringify(value.args),
62
+ name: value.name,
63
+ },
64
+ id: generateToolCallId(index, value.name),
65
+ index: index,
66
+ type: 'function',
67
+ }),
68
+ ),
69
+ id: context.id,
70
+ type: 'tool_calls',
71
+ },
72
+ ...usageChunks,
73
+ ];
40
74
  }
41
75
 
42
76
  const text = chunk.text?.();
43
77
 
44
- if (chunk.candidates) {
45
- const candidate = chunk.candidates[0];
46
-
78
+ if (candidate) {
47
79
  // return the grounding
48
80
  if (candidate.groundingMetadata) {
49
81
  const { webSearchQueries, groundingChunks } = candidate.groundingMetadata;
@@ -64,31 +96,15 @@ const transformGoogleGenerativeAIStream = (
64
96
  id: context.id,
65
97
  type: 'grounding',
66
98
  },
99
+ ...usageChunks,
67
100
  ];
68
101
  }
69
102
 
70
103
  if (candidate.finishReason) {
71
104
  if (chunk.usageMetadata) {
72
- const usage = chunk.usageMetadata;
73
105
  return [
74
106
  !!text ? { data: text, id: context?.id, type: 'text' } : undefined,
75
- { data: candidate.finishReason, id: context?.id, type: 'stop' },
76
- {
77
- data: {
78
- // TODO: Google SDK 0.24.0 don't have promptTokensDetails types
79
- inputImageTokens: (usage as any).promptTokensDetails?.find(
80
- (i: any) => i.modality === 'IMAGE',
81
- )?.tokenCount,
82
- inputTextTokens: (usage as any).promptTokensDetails?.find(
83
- (i: any) => i.modality === 'TEXT',
84
- )?.tokenCount,
85
- totalInputTokens: usage.promptTokenCount,
86
- totalOutputTokens: usage.candidatesTokenCount,
87
- totalTokens: usage.totalTokenCount,
88
- } as ModelTokensUsage,
89
- id: context?.id,
90
- type: 'usage',
91
- },
107
+ ...usageChunks,
92
108
  ].filter(Boolean) as StreamProtocolChunk[];
93
109
  }
94
110
  return { data: candidate.finishReason, id: context?.id, type: 'stop' };
@@ -117,13 +133,21 @@ const transformGoogleGenerativeAIStream = (
117
133
  };
118
134
  };
119
135
 
136
+ export interface GoogleAIStreamOptions {
137
+ callbacks?: ChatStreamCallbacks;
138
+ inputStartAt?: number;
139
+ }
140
+
120
141
  export const GoogleGenerativeAIStream = (
121
142
  rawStream: ReadableStream<EnhancedGenerateContentResponse>,
122
- callbacks?: ChatStreamCallbacks,
143
+ { callbacks, inputStartAt }: GoogleAIStreamOptions = {},
123
144
  ) => {
124
145
  const streamStack: StreamContext = { id: 'chat_' + nanoid() };
125
146
 
126
147
  return rawStream
127
- .pipeThrough(createSSEProtocolTransformer(transformGoogleGenerativeAIStream, streamStack))
148
+ .pipeThrough(
149
+ createTokenSpeedCalculator(transformGoogleGenerativeAIStream, { inputStartAt, streamStack }),
150
+ )
151
+ .pipeThrough(createSSEProtocolTransformer((c) => c, streamStack))
128
152
  .pipeThrough(createCallbacksTransformer(callbacks));
129
153
  };
@@ -298,17 +298,37 @@ export const TOKEN_SPEED_CHUNK_ID = 'output_speed';
298
298
  */
299
299
  export const createTokenSpeedCalculator = (
300
300
  transformer: (chunk: any, stack: StreamContext) => StreamProtocolChunk | StreamProtocolChunk[],
301
- { streamStack, inputStartAt }: { inputStartAt?: number; streamStack?: StreamContext } = {},
301
+ { inputStartAt, streamStack }: { inputStartAt?: number; streamStack?: StreamContext } = {},
302
302
  ) => {
303
303
  let outputStartAt: number | undefined;
304
+ let outputThinking: boolean | undefined;
304
305
 
305
306
  const process = (chunk: StreamProtocolChunk) => {
306
307
  let result = [chunk];
307
- // if the chunk is the first text chunk, set as output start
308
- if (!outputStartAt && chunk.type === 'text') outputStartAt = Date.now();
308
+ // if the chunk is the first text or reasoning chunk, set as output start
309
+ if (!outputStartAt && (chunk.type === 'text' || chunk.type === 'reasoning')) {
310
+ outputStartAt = Date.now();
311
+ }
312
+
313
+ /**
314
+ * 部分 provider 在正式输出 reasoning 前,可能会先输出 content 为空字符串的 chunk,
315
+ * 其中 reasoning 可能为 null,会导致判断是否输出思考内容错误,所以过滤掉 null 或者空字符串。
316
+ * 也可能是某些特殊 token,所以不修改 outputStartAt 的逻辑。
317
+ */
318
+ if (
319
+ outputThinking === undefined &&
320
+ (chunk.type === 'text' || chunk.type === 'reasoning') &&
321
+ typeof chunk.data === 'string' &&
322
+ chunk.data.length > 0
323
+ ) {
324
+ outputThinking = chunk.type === 'reasoning';
325
+ }
309
326
  // if the chunk is the stop chunk, set as output finish
310
327
  if (inputStartAt && outputStartAt && chunk.type === 'usage') {
311
- const outputTokens = chunk.data?.totalOutputTokens || chunk.data?.outputTextTokens;
328
+ const totalOutputTokens = chunk.data?.totalOutputTokens || chunk.data?.outputTextTokens;
329
+ const reasoningTokens = chunk.data?.outputReasoningTokens || 0;
330
+ const outputTokens =
331
+ (outputThinking ?? false) ? totalOutputTokens : totalOutputTokens - reasoningTokens;
312
332
  result.push({
313
333
  data: {
314
334
  tps: (outputTokens / (Date.now() - outputStartAt)) * 1000,
@@ -103,10 +103,12 @@ describe('VertexAIStream', () => {
103
103
  const onCompletionMock = vi.fn();
104
104
 
105
105
  const protocolStream = VertexAIStream(mockGoogleStream, {
106
- onStart: onStartMock,
107
- onText: onTextMock,
108
- onToolsCalling: onToolCallMock,
109
- onCompletion: onCompletionMock,
106
+ callbacks: {
107
+ onStart: onStartMock,
108
+ onText: onTextMock,
109
+ onToolsCalling: onToolCallMock,
110
+ onCompletion: onCompletionMock,
111
+ },
110
112
  });
111
113
 
112
114
  const decoder = new TextDecoder();
@@ -136,6 +138,7 @@ describe('VertexAIStream', () => {
136
138
 
137
139
  it('tool_calls', async () => {
138
140
  vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
141
+
139
142
  const rawChunks = [
140
143
  {
141
144
  candidates: [
@@ -204,10 +207,12 @@ describe('VertexAIStream', () => {
204
207
  const onCompletionMock = vi.fn();
205
208
 
206
209
  const protocolStream = VertexAIStream(mockGoogleStream, {
207
- onStart: onStartMock,
208
- onText: onTextMock,
209
- onToolsCalling: onToolCallMock,
210
- onCompletion: onCompletionMock,
210
+ callbacks: {
211
+ onStart: onStartMock,
212
+ onText: onTextMock,
213
+ onToolsCalling: onToolCallMock,
214
+ onCompletion: onCompletionMock,
215
+ },
211
216
  });
212
217
 
213
218
  const decoder = new TextDecoder();
@@ -223,10 +228,106 @@ describe('VertexAIStream', () => {
223
228
  'id: chat_1\n',
224
229
  'event: tool_calls\n',
225
230
  `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0","index":0,"type":"function"}]\n\n`,
231
+ 'id: chat_1\n',
232
+ 'event: stop\n',
233
+ 'data: "STOP"\n\n',
234
+ 'id: chat_1\n',
235
+ 'event: usage\n',
236
+ 'data: {"outputTextTokens":9,"totalInputTokens":95,"totalOutputTokens":9,"totalTokens":104}\n\n',
226
237
  ]);
227
238
 
228
239
  expect(onStartMock).toHaveBeenCalledTimes(1);
229
240
  expect(onToolCallMock).toHaveBeenCalledTimes(1);
230
241
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
231
242
  });
243
+
244
+ it('should handle stop with content', async () => {
245
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
246
+
247
+ const data = [
248
+ {
249
+ candidates: [
250
+ {
251
+ content: { parts: [{ text: '234' }], role: 'model' },
252
+ safetyRatings: [
253
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
254
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
255
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
256
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
257
+ ],
258
+ },
259
+ ],
260
+ text: () => '234',
261
+ usageMetadata: {
262
+ promptTokenCount: 20,
263
+ totalTokenCount: 20,
264
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 20 }],
265
+ },
266
+ modelVersion: 'gemini-2.0-flash-exp-image-generation',
267
+ },
268
+ {
269
+ text: () => '567890\n',
270
+ candidates: [
271
+ {
272
+ content: { parts: [{ text: '567890\n' }], role: 'model' },
273
+ finishReason: 'STOP',
274
+ safetyRatings: [
275
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
276
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
277
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
278
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
279
+ ],
280
+ },
281
+ ],
282
+ usageMetadata: {
283
+ promptTokenCount: 19,
284
+ candidatesTokenCount: 11,
285
+ totalTokenCount: 30,
286
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
287
+ candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 11 }],
288
+ },
289
+ modelVersion: 'gemini-2.0-flash-exp-image-generation',
290
+ },
291
+ ];
292
+
293
+ const mockGoogleStream = new ReadableStream({
294
+ start(controller) {
295
+ data.forEach((item) => {
296
+ controller.enqueue(item);
297
+ });
298
+
299
+ controller.close();
300
+ },
301
+ });
302
+
303
+ const protocolStream = VertexAIStream(mockGoogleStream);
304
+
305
+ const decoder = new TextDecoder();
306
+ const chunks = [];
307
+
308
+ // @ts-ignore
309
+ for await (const chunk of protocolStream) {
310
+ chunks.push(decoder.decode(chunk, { stream: true }));
311
+ }
312
+
313
+ expect(chunks).toEqual(
314
+ [
315
+ 'id: chat_1',
316
+ 'event: text',
317
+ 'data: "234"\n',
318
+
319
+ 'id: chat_1',
320
+ 'event: text',
321
+ `data: "567890\\n"\n`,
322
+ // stop
323
+ 'id: chat_1',
324
+ 'event: stop',
325
+ `data: "STOP"\n`,
326
+ // usage
327
+ 'id: chat_1',
328
+ 'event: usage',
329
+ `data: {"inputTextTokens":19,"outputTextTokens":11,"totalInputTokens":19,"totalOutputTokens":11,"totalTokens":30}\n`,
330
+ ].map((i) => i + '\n'),
331
+ );
332
+ });
232
333
  });
@@ -1,27 +1,58 @@
1
1
  import { EnhancedGenerateContentResponse, GenerateContentResponse } from '@google/generative-ai';
2
2
 
3
+ import { ModelTokensUsage } from '@/types/message';
3
4
  import { nanoid } from '@/utils/uuid';
4
5
 
5
- import { ChatStreamCallbacks } from '../../types';
6
+ import { type GoogleAIStreamOptions } from './google-ai';
6
7
  import {
7
8
  StreamContext,
8
9
  StreamProtocolChunk,
9
10
  createCallbacksTransformer,
10
11
  createSSEProtocolTransformer,
12
+ createTokenSpeedCalculator,
11
13
  generateToolCallId,
12
14
  } from './protocol';
13
15
 
14
16
  const transformVertexAIStream = (
15
17
  chunk: GenerateContentResponse,
16
- stack: StreamContext,
17
- ): StreamProtocolChunk => {
18
+ context: StreamContext,
19
+ ): StreamProtocolChunk | StreamProtocolChunk[] => {
18
20
  // maybe need another structure to add support for multiple choices
19
- const candidates = chunk.candidates;
21
+ const candidate = chunk.candidates?.[0];
22
+ const usage = chunk.usageMetadata;
23
+ const usageChunks: StreamProtocolChunk[] = [];
24
+ if (candidate?.finishReason && usage) {
25
+ const outputReasoningTokens = (usage as any).thoughtsTokenCount || undefined;
26
+ const totalOutputTokens = (usage.candidatesTokenCount ?? 0) + (outputReasoningTokens ?? 0);
27
+
28
+ usageChunks.push(
29
+ { data: candidate.finishReason, id: context?.id, type: 'stop' },
30
+ {
31
+ data: {
32
+ // TODO: Google SDK 0.24.0 don't have promptTokensDetails types
33
+ inputImageTokens: (usage as any).promptTokensDetails?.find(
34
+ (i: any) => i.modality === 'IMAGE',
35
+ )?.tokenCount,
36
+ inputTextTokens: (usage as any).promptTokensDetails?.find(
37
+ (i: any) => i.modality === 'TEXT',
38
+ )?.tokenCount,
39
+ outputReasoningTokens,
40
+ outputTextTokens: totalOutputTokens - (outputReasoningTokens ?? 0),
41
+ totalInputTokens: usage.promptTokenCount,
42
+ totalOutputTokens,
43
+ totalTokens: usage.totalTokenCount,
44
+ } as ModelTokensUsage,
45
+ id: context?.id,
46
+ type: 'usage',
47
+ },
48
+ );
49
+ }
20
50
 
51
+ const candidates = chunk.candidates;
21
52
  if (!candidates)
22
53
  return {
23
54
  data: '',
24
- id: stack?.id,
55
+ id: context?.id,
25
56
  type: 'text',
26
57
  };
27
58
 
@@ -32,44 +63,58 @@ const transformVertexAIStream = (
32
63
  if (part.functionCall) {
33
64
  const functionCall = part.functionCall;
34
65
 
35
- return {
36
- data: [
37
- {
38
- function: {
39
- arguments: JSON.stringify(functionCall.args),
40
- name: functionCall.name,
66
+ return [
67
+ {
68
+ data: [
69
+ {
70
+ function: {
71
+ arguments: JSON.stringify(functionCall.args),
72
+ name: functionCall.name,
73
+ },
74
+ id: generateToolCallId(0, functionCall.name),
75
+ index: 0,
76
+ type: 'function',
41
77
  },
42
- id: generateToolCallId(0, functionCall.name),
43
- index: 0,
44
- type: 'function',
45
- },
46
- ],
47
- id: stack?.id,
48
- type: 'tool_calls',
49
- };
78
+ ],
79
+ id: context?.id,
80
+ type: 'tool_calls',
81
+ },
82
+ ...usageChunks,
83
+ ];
84
+ }
85
+
86
+ if (item.finishReason) {
87
+ if (chunk.usageMetadata) {
88
+ return [
89
+ !!part.text ? { data: part.text, id: context?.id, type: 'text' } : undefined,
90
+ ...usageChunks,
91
+ ].filter(Boolean) as StreamProtocolChunk[];
92
+ }
93
+ return { data: item.finishReason, id: context?.id, type: 'stop' };
50
94
  }
51
95
 
52
96
  return {
53
97
  data: part.text,
54
- id: stack?.id,
98
+ id: context?.id,
55
99
  type: 'text',
56
100
  };
57
101
  }
58
102
 
59
103
  return {
60
104
  data: '',
61
- id: stack?.id,
105
+ id: context?.id,
62
106
  type: 'stop',
63
107
  };
64
108
  };
65
109
 
66
110
  export const VertexAIStream = (
67
111
  rawStream: ReadableStream<EnhancedGenerateContentResponse>,
68
- callbacks?: ChatStreamCallbacks,
112
+ { callbacks, inputStartAt }: GoogleAIStreamOptions = {},
69
113
  ) => {
70
114
  const streamStack: StreamContext = { id: 'chat_' + nanoid() };
71
115
 
72
116
  return rawStream
73
- .pipeThrough(createSSEProtocolTransformer(transformVertexAIStream, streamStack))
117
+ .pipeThrough(createTokenSpeedCalculator(transformVertexAIStream, { inputStartAt, streamStack }))
118
+ .pipeThrough(createSSEProtocolTransformer((c) => c, streamStack))
74
119
  .pipeThrough(createCallbacksTransformer(callbacks));
75
120
  };
@@ -57,6 +57,13 @@ export type LambdaContext = Awaited<ReturnType<typeof createContextInner>>;
57
57
  * @link https://trpc.io/docs/v11/context
58
58
  */
59
59
  export const createLambdaContext = async (request: NextRequest): Promise<LambdaContext> => {
60
+ // we have a special header to debug the api endpoint in development mode
61
+ // IT WON'T GO INTO PRODUCTION ANYMORE
62
+ const isDebugApi = request.headers.get('lobe-auth-dev-backend-api') === '1';
63
+ if (process.env.NODE_ENV === 'development' && isDebugApi) {
64
+ return { userId: process.env.MOCK_DEV_USER_ID };
65
+ }
66
+
60
67
  log('createLambdaContext called for request');
61
68
  // for API-response caching see https://trpc.io/docs/v11/caching
62
69
 
@@ -1,9 +1,11 @@
1
1
  import { ChatFileItem } from '@/types/message';
2
2
 
3
- const filePrompt = (item: ChatFileItem, addUrl: boolean) =>
4
- addUrl
5
- ? `<file id="${item.id}" name="${item.name}" type="${item.fileType}" size="${item.size}" url="${item.url}"></file>`
6
- : `<file id="${item.id}" name="${item.name}" type="${item.fileType}" size="${item.size}"></file>`;
3
+ const filePrompt = (item: ChatFileItem, addUrl: boolean) => {
4
+ const content = item.content || '';
5
+ return addUrl
6
+ ? `<file id="${item.id}" name="${item.name}" type="${item.fileType}" size="${item.size}" url="${item.url}">${content}</file>`
7
+ : `<file id="${item.id}" name="${item.name}" type="${item.fileType}" size="${item.size}">${content}</file>`;
8
+ };
7
9
 
8
10
  export const filePrompts = (fileList: ChatFileItem[], addUrl: boolean) => {
9
11
  if (fileList.length === 0) return '';