@lobehub/chat 1.96.8 → 1.96.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.96.10](https://github.com/lobehub/lobe-chat/compare/v1.96.9...v1.96.10)
6
+
7
+ <sup>Released on **2025-06-28**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix desktop chunk issue.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Fix desktop chunk issue, closes [#8280](https://github.com/lobehub/lobe-chat/issues/8280) ([c193e65](https://github.com/lobehub/lobe-chat/commit/c193e65))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.96.9](https://github.com/lobehub/lobe-chat/compare/v1.96.8...v1.96.9)
31
+
32
+ <sup>Released on **2025-06-23**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Google Gemini tools declarations.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: Google Gemini tools declarations, closes [#8256](https://github.com/lobehub/lobe-chat/issues/8256) ([08f5d73](https://github.com/lobehub/lobe-chat/commit/08f5d73))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.96.8](https://github.com/lobehub/lobe-chat/compare/v1.96.7...v1.96.8)
6
56
 
7
57
  <sup>Released on **2025-06-23**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Fix desktop chunk issue."
6
+ ]
7
+ },
8
+ "date": "2025-06-28",
9
+ "version": "1.96.10"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Google Gemini tools declarations."
15
+ ]
16
+ },
17
+ "date": "2025-06-23",
18
+ "version": "1.96.9"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.96.8",
3
+ "version": "1.96.10",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -78,13 +78,6 @@ export const generateViewport = async (props: DynamicLayoutProps): ResolvingView
78
78
  };
79
79
 
80
80
  export const generateStaticParams = () => {
81
- // if in dev mode or in vercel preview mode, use ISR to speed up
82
- const isVercelPreview = process.env.VERCEL === '1' && process.env.VERCEL_ENV !== 'production';
83
-
84
- if (process.env.NODE_ENV !== 'production' || isVercelPreview) {
85
- return [];
86
- }
87
-
88
81
  const themes: ThemeAppearance[] = ['dark', 'light'];
89
82
  const mobileOptions = isDesktop ? [false] : [true, false];
90
83
  // only static for serveral page, other go to dynamtic
@@ -560,6 +560,58 @@ describe('LobeGoogleAI', () => {
560
560
  },
561
561
  ]);
562
562
  });
563
+
564
+ it('should correctly convert function response message', async () => {
565
+ const messages: OpenAIChatMessage[] = [
566
+ {
567
+ content: '',
568
+ role: 'assistant',
569
+ tool_calls: [
570
+ {
571
+ id: 'call_1',
572
+ function: {
573
+ name: 'get_current_weather',
574
+ arguments: JSON.stringify({ location: 'London', unit: 'celsius' }),
575
+ },
576
+ type: 'function',
577
+ },
578
+ ],
579
+ },
580
+ {
581
+ content: '{"success":true,"data":{"temperature":"14°C"}}',
582
+ name: 'get_current_weather',
583
+ role: 'tool',
584
+ tool_call_id: 'call_1',
585
+ },
586
+ ];
587
+
588
+ const contents = await instance['buildGoogleMessages'](messages);
589
+ expect(contents).toHaveLength(2);
590
+ expect(contents).toEqual([
591
+ {
592
+ parts: [
593
+ {
594
+ functionCall: {
595
+ args: { location: 'London', unit: 'celsius' },
596
+ name: 'get_current_weather',
597
+ },
598
+ },
599
+ ],
600
+ role: 'model',
601
+ },
602
+ {
603
+ parts: [
604
+ {
605
+ functionResponse: {
606
+ name: 'get_current_weather',
607
+ response: { result: '{"success":true,"data":{"temperature":"14°C"}}' },
608
+ },
609
+ },
610
+ ],
611
+ role: 'user',
612
+ },
613
+ ]);
614
+ });
563
615
  });
564
616
 
565
617
  describe('buildGoogleTools', () => {
@@ -690,7 +742,7 @@ describe('LobeGoogleAI', () => {
690
742
 
691
743
  const converted = await instance['convertOAIMessagesToGoogleMessage'](message);
692
744
  expect(converted).toEqual({
693
- role: 'function',
745
+ role: 'model',
694
746
  parts: [
695
747
  {
696
748
  functionCall: {
@@ -353,6 +353,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
353
353
  system: system_message?.content,
354
354
  };
355
355
  }
356
+
356
357
  private convertContentToGooglePart = async (
357
358
  content: UserMessageContentPart,
358
359
  ): Promise<Part | undefined> => {
@@ -399,6 +400,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
399
400
 
400
401
  private convertOAIMessagesToGoogleMessage = async (
401
402
  message: OpenAIChatMessage,
403
+ toolCallNameMap?: Map<string, string>,
402
404
  ): Promise<Content> => {
403
405
  const content = message.content as string | UserMessageContentPart[];
404
406
  if (!!message.tool_calls) {
@@ -409,10 +411,28 @@ export class LobeGoogleAI implements LobeRuntimeAI {
409
411
  name: tool.function.name,
410
412
  },
411
413
  })),
412
- role: 'function',
414
+ role: 'model',
413
415
  };
414
416
  }
415
417
 
418
+ // 将 tool_call result 转成 functionResponse part
419
+ if (message.role === 'tool' && toolCallNameMap && message.tool_call_id) {
420
+ const functionName = toolCallNameMap.get(message.tool_call_id);
421
+ if (functionName) {
422
+ return {
423
+ parts: [
424
+ {
425
+ functionResponse: {
426
+ name: functionName,
427
+ response: { result: message.content },
428
+ },
429
+ },
430
+ ],
431
+ role: 'user',
432
+ };
433
+ }
434
+ }
435
+
416
436
  const getParts = async () => {
417
437
  if (typeof content === 'string') return [{ text: content }];
418
438
 
@@ -430,9 +450,20 @@ export class LobeGoogleAI implements LobeRuntimeAI {
430
450
 
431
451
  // convert messages from the OpenAI format to Google GenAI SDK
432
452
  private buildGoogleMessages = async (messages: OpenAIChatMessage[]): Promise<Content[]> => {
453
+ const toolCallNameMap = new Map<string, string>();
454
+ messages.forEach((message) => {
455
+ if (message.role === 'assistant' && message.tool_calls) {
456
+ message.tool_calls.forEach((toolCall) => {
457
+ if (toolCall.type === 'function') {
458
+ toolCallNameMap.set(toolCall.id, toolCall.function.name);
459
+ }
460
+ });
461
+ }
462
+ });
463
+
433
464
  const pools = messages
434
465
  .filter((message) => message.role !== 'function')
435
- .map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg));
466
+ .map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg, toolCallNameMap));
436
467
 
437
468
  return Promise.all(pools);
438
469
  };
@@ -493,12 +524,18 @@ export class LobeGoogleAI implements LobeRuntimeAI {
493
524
  ): GoogleFunctionCallTool[] | undefined {
494
525
  // 目前 Tools (例如 googleSearch) 无法与其他 FunctionCall 同时使用
495
526
  if (payload?.messages?.some((m) => m.tool_calls?.length)) {
496
- return; // 若历史消息中已有 function calling,则不再注入任何 Tools
527
+ return this.buildFunctionDeclarations(tools);
497
528
  }
498
529
  if (payload?.enabledSearch) {
499
530
  return [{ googleSearch: {} } as GoogleSearchRetrievalTool];
500
531
  }
501
532
 
533
+ return this.buildFunctionDeclarations(tools);
534
+ }
535
+
536
+ private buildFunctionDeclarations(
537
+ tools: ChatCompletionTool[] | undefined,
538
+ ): GoogleFunctionCallTool[] | undefined {
502
539
  if (!tools || tools.length === 0) return;
503
540
 
504
541
  return [
@@ -7,7 +7,7 @@ import { GoogleGenerativeAIStream } from './google-ai';
7
7
 
8
8
  describe('GoogleGenerativeAIStream', () => {
9
9
  it('should transform Google Generative AI stream to protocol stream', async () => {
10
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
10
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
11
11
 
12
12
  const mockGenerateContentResponse = (text: string, functionCalls?: any[]) =>
13
13
  ({
@@ -59,7 +59,7 @@ describe('GoogleGenerativeAIStream', () => {
59
59
  // tool call
60
60
  'id: chat_1\n',
61
61
  'event: tool_calls\n',
62
- `data: [{"function":{"arguments":"{\\"arg1\\":\\"value1\\"}","name":"testFunction"},"id":"testFunction_0","index":0,"type":"function"}]\n\n`,
62
+ `data: [{"function":{"arguments":"{\\"arg1\\":\\"value1\\"}","name":"testFunction"},"id":"testFunction_0_abcd1234","index":0,"type":"function"}]\n\n`,
63
63
 
64
64
  // text
65
65
  'id: chat_1\n',
@@ -30,7 +30,7 @@ describe('OllamaStream', () => {
30
30
  });
31
31
 
32
32
  const protocolStream = OllamaStream(mockOllamaStream);
33
-
33
+
34
34
  const decoder = new TextDecoder();
35
35
  const chunks = [];
36
36
 
@@ -62,7 +62,7 @@ describe('OllamaStream', () => {
62
62
  'id: chat_2',
63
63
  'event: stop',
64
64
  `data: "finished"\n`,
65
- ].map((line) => `${line}\n`)
65
+ ].map((line) => `${line}\n`),
66
66
  );
67
67
  });
68
68
 
@@ -116,7 +116,7 @@ describe('OllamaStream', () => {
116
116
  });
117
117
 
118
118
  it('tools use', async () => {
119
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
119
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
120
120
 
121
121
  const mockOllamaStream = new ReadableStream<ChatResponse>({
122
122
  start(controller) {
@@ -178,7 +178,7 @@ describe('OllamaStream', () => {
178
178
  [
179
179
  'id: chat_1',
180
180
  'event: tool_calls',
181
- `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0","index":0,"type":"function"}]\n`,
181
+ `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0_abcd1234","index":0,"type":"function"}]\n`,
182
182
  'id: chat_1',
183
183
  'event: stop',
184
184
  `data: "finished"\n`,
@@ -1,5 +1,6 @@
1
1
  import { CitationItem, ModelSpeed, ModelTokensUsage } from '@/types/message';
2
2
  import { safeParseJSON } from '@/utils/safeParseJSON';
3
+ import { nanoid } from '@/utils/uuid';
3
4
 
4
5
  import { AgentRuntimeErrorType } from '../../error';
5
6
  import { parseToolCalls } from '../../helpers';
@@ -98,7 +99,7 @@ export interface StreamProtocolToolCallChunk {
98
99
  }
99
100
 
100
101
  export const generateToolCallId = (index: number, functionName?: string) =>
101
- `${functionName || 'unknown_tool_call'}_${index}`;
102
+ `${functionName || 'unknown_tool_call'}_${index}_${nanoid()}`;
102
103
 
103
104
  const chatStreamable = async function* <T>(stream: AsyncIterable<T>) {
104
105
  for await (const response of stream) {
@@ -137,7 +137,7 @@ describe('VertexAIStream', () => {
137
137
  });
138
138
 
139
139
  it('tool_calls', async () => {
140
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
140
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
141
141
 
142
142
  const rawChunks = [
143
143
  {
@@ -227,7 +227,7 @@ describe('VertexAIStream', () => {
227
227
  // text
228
228
  'id: chat_1\n',
229
229
  'event: tool_calls\n',
230
- `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0","index":0,"type":"function"}]\n\n`,
230
+ `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0_abcd1234","index":0,"type":"function"}]\n\n`,
231
231
  'id: chat_1\n',
232
232
  'event: stop\n',
233
233
  'data: "STOP"\n\n',
@@ -4,9 +4,13 @@ import urlJoin from 'url-join';
4
4
 
5
5
  import { serverDBEnv } from '@/config/db';
6
6
  import { JWTPayload, LOBE_CHAT_AUTH_HEADER } from '@/const/auth';
7
+ import { isDesktop } from '@/const/version';
7
8
  import { appEnv } from '@/envs/app';
9
+ import { createAsyncCallerFactory } from '@/libs/trpc/async';
10
+ import { createAsyncContextInner } from '@/libs/trpc/async/context';
8
11
  import { KeyVaultsGateKeeper } from '@/server/modules/KeyVaultsEncrypt';
9
12
 
13
+ import { asyncRouter } from './index';
10
14
  import type { AsyncRouter } from './index';
11
15
 
12
16
  export const createAsyncServerClient = async (userId: string, payload: JWTPayload) => {
@@ -30,3 +34,77 @@ export const createAsyncServerClient = async (userId: string, payload: JWTPayloa
30
34
  ],
31
35
  });
32
36
  };
37
+
38
+ /**
39
+ * 用来推断 caller 类型辅助方法,但不实际调用 createAsyncCallerFactory,调用会报错:asyncRouter 没有初始化
40
+ */
41
+ const helperFunc = () => {
42
+ const dummyCreateCaller = createAsyncCallerFactory(asyncRouter);
43
+ return {} as unknown as ReturnType<typeof dummyCreateCaller>;
44
+ };
45
+
46
+ export type UnifiedAsyncCaller = ReturnType<typeof helperFunc>;
47
+
48
+ interface CreateCallerOptions {
49
+ jwtPayload: any;
50
+ userId: string;
51
+ }
52
+
53
+ /**
54
+ * 创建 caller 的工厂方法,兼容 desktop server 和 remote server 环境
55
+ * 使用方式统一成 caller.a.b() 的调用方式
56
+ */
57
+ export const createAsyncCaller = async (
58
+ options: CreateCallerOptions,
59
+ ): Promise<UnifiedAsyncCaller> => {
60
+ const { userId, jwtPayload } = options;
61
+
62
+ if (isDesktop) {
63
+ // Desktop 环境:使用 caller 直接同线程调用方法
64
+ const asyncContext = await createAsyncContextInner({
65
+ jwtPayload,
66
+ // 参考 src/libs/trpc/async/asyncAuth.ts
67
+ secret: serverDBEnv.KEY_VAULTS_SECRET,
68
+ userId,
69
+ });
70
+
71
+ const createCaller = createAsyncCallerFactory(asyncRouter);
72
+ const caller = createCaller(asyncContext);
73
+
74
+ return caller;
75
+ }
76
+ // 非 Desktop 环境:使用 HTTP Client
77
+ // http client 调用方式是 client.a.b.mutate(), 我希望统一成 caller.a.b() 的调用方式
78
+ else {
79
+ const httpClient = await createAsyncServerClient(userId, jwtPayload);
80
+ const createRecursiveProxy = (client: any, path: string[]): any => {
81
+ // The target is a dummy function, so that 'apply' can be triggered.
82
+ return new Proxy(() => {}, {
83
+ apply: (target, thisArg, args) => {
84
+ // 'apply' is triggered by the function call `(...)`.
85
+ // The `path` at this point is the full path to the procedure.
86
+
87
+ // Traverse the original httpClient to get the actual procedure object.
88
+ const procedure = path.reduce((obj, key) => (obj ? obj[key] : undefined), client);
89
+
90
+ if (procedure && typeof procedure.mutate === 'function') {
91
+ // If we found a valid procedure, call its mutate method.
92
+ return procedure.mutate(...args);
93
+ } else {
94
+ // This should not happen if the call path is correct.
95
+ const message = `Procedure not found or not valid at path: ${path.join('.')}`;
96
+ throw new Error(message);
97
+ }
98
+ },
99
+ get: (_, property: string) => {
100
+ // When a property is accessed, we just extend the path and return a new proxy.
101
+ // This handles `caller.file.parseFileToChunks`
102
+ if (property === 'then') return undefined; // Prevent async/await issues
103
+ return createRecursiveProxy(client, [...path, property as string]);
104
+ },
105
+ });
106
+ };
107
+
108
+ return createRecursiveProxy(httpClient, []);
109
+ }
110
+ };
@@ -11,4 +11,5 @@ export const asyncRouter = router({
11
11
 
12
12
  export type AsyncRouter = typeof asyncRouter;
13
13
 
14
- export * from './caller';
14
+ export type { UnifiedAsyncCaller } from './caller';
15
+ export { createAsyncCaller, createAsyncServerClient } from './caller';
@@ -15,7 +15,7 @@ import {
15
15
  } from '@/database/server/models/ragEval';
16
16
  import { authedProcedure, router } from '@/libs/trpc/lambda';
17
17
  import { keyVaults, serverDatabase } from '@/libs/trpc/lambda/middleware';
18
- import { createAsyncServerClient } from '@/server/routers/async';
18
+ import { createAsyncCaller } from '@/server/routers/async';
19
19
  import { FileService } from '@/server/services/file';
20
20
  import {
21
21
  EvalDatasetRecord,
@@ -201,15 +201,18 @@ export const ragEvalRouter = router({
201
201
  })),
202
202
  );
203
203
 
204
- const asyncCaller = await createAsyncServerClient(ctx.userId, ctx.jwtPayload);
204
+ const asyncCaller = await createAsyncCaller({
205
+ userId: ctx.userId,
206
+ jwtPayload: ctx.jwtPayload,
207
+ });
205
208
 
206
209
  await ctx.evaluationModel.update(input.id, { status: EvalEvaluationStatus.Processing });
207
210
  try {
208
211
  await pMap(
209
212
  evalRecords,
210
213
  async (record) => {
211
- asyncCaller.ragEval.runRecordEvaluation
212
- .mutate({ evalRecordId: record.id })
214
+ asyncCaller.ragEval
215
+ .runRecordEvaluation({ evalRecordId: record.id })
213
216
  .catch(async (e) => {
214
217
  await ctx.evaluationModel.update(input.id, { status: EvalEvaluationStatus.Error });
215
218
 
@@ -3,7 +3,7 @@ import { AsyncTaskModel } from '@/database/models/asyncTask';
3
3
  import { FileModel } from '@/database/models/file';
4
4
  import { serverDB } from '@/database/server';
5
5
  import { ChunkContentParams, ContentChunk } from '@/server/modules/ContentChunk';
6
- import { createAsyncServerClient } from '@/server/routers/async';
6
+ import { createAsyncCaller } from '@/server/routers/async';
7
7
  import {
8
8
  AsyncTaskError,
9
9
  AsyncTaskErrorType,
@@ -43,11 +43,11 @@ export class ChunkService {
43
43
 
44
44
  await this.fileModel.update(fileId, { embeddingTaskId: asyncTaskId });
45
45
 
46
- const asyncCaller = await createAsyncServerClient(this.userId, payload);
46
+ const asyncCaller = await createAsyncCaller({ jwtPayload: payload, userId: this.userId });
47
47
 
48
48
  // trigger embedding task asynchronously
49
49
  try {
50
- await asyncCaller.file.embeddingChunks.mutate({ fileId, taskId: asyncTaskId });
50
+ await asyncCaller.file.embeddingChunks({ fileId, taskId: asyncTaskId });
51
51
  } catch (e) {
52
52
  console.error('[embeddingFileChunks] error:', e);
53
53
 
@@ -82,22 +82,20 @@ export class ChunkService {
82
82
 
83
83
  await this.fileModel.update(fileId, { chunkTaskId: asyncTaskId });
84
84
 
85
- const asyncCaller = await createAsyncServerClient(this.userId, payload);
85
+ const asyncCaller = await createAsyncCaller({ jwtPayload: payload, userId: this.userId });
86
86
 
87
87
  // trigger parse file task asynchronously
88
- asyncCaller.file.parseFileToChunks
89
- .mutate({ fileId: fileId, taskId: asyncTaskId })
90
- .catch(async (e) => {
91
- console.error('[ParseFileToChunks] error:', e);
92
-
93
- await this.asyncTaskModel.update(asyncTaskId, {
94
- error: new AsyncTaskError(
95
- AsyncTaskErrorType.TaskTriggerError,
96
- 'trigger chunk embedding async task error. Please make sure the APP_URL is available from your server. You can check the proxy config or WAF blocking',
97
- ),
98
- status: AsyncTaskStatus.Error,
99
- });
88
+ asyncCaller.file.parseFileToChunks({ fileId: fileId, taskId: asyncTaskId }).catch(async (e) => {
89
+ console.error('[ParseFileToChunks] error:', e);
90
+
91
+ await this.asyncTaskModel.update(asyncTaskId, {
92
+ error: new AsyncTaskError(
93
+ AsyncTaskErrorType.TaskTriggerError,
94
+ 'trigger chunk embedding async task error. Please make sure the APP_URL is available from your server. You can check the proxy config or WAF blocking',
95
+ ),
96
+ status: AsyncTaskStatus.Error,
100
97
  });
98
+ });
101
99
 
102
100
  return asyncTaskId;
103
101
  }