@lobehub/lobehub 2.0.0-next.43 → 2.0.0-next.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/package.json +1 -1
  4. package/packages/conversation-flow/src/__tests__/parse.test.ts +23 -0
  5. package/packages/model-runtime/src/core/contextBuilders/openai.test.ts +29 -5
  6. package/packages/model-runtime/src/core/contextBuilders/openai.ts +17 -3
  7. package/packages/model-runtime/src/providers/azureai/index.ts +34 -2
  8. package/packages/model-runtime/src/types/chat.ts +4 -0
  9. package/packages/utils/src/compass.ts +11 -11
  10. package/packages/utils/src/fetch/headers.ts +3 -3
  11. package/packages/utils/src/fetch/request.ts +5 -5
  12. package/packages/utils/src/object.ts +3 -3
  13. package/packages/utils/src/server/__tests__/response.test.ts +79 -0
  14. package/packages/utils/src/server/index.ts +1 -0
  15. package/packages/utils/src/server/response.ts +110 -0
  16. package/src/app/(backend)/webapi/stt/openai/route.ts +0 -2
  17. package/src/app/(backend)/webapi/tts/edge/route.ts +8 -2
  18. package/src/app/(backend)/webapi/tts/microsoft/route.ts +8 -2
  19. package/src/app/(backend)/webapi/tts/openai/route.ts +15 -3
  20. package/src/server/routers/mobile/index.ts +14 -2
  21. package/src/server/utils/createSpeechResponse.ts +55 -0
  22. package/src/services/discover.ts +12 -12
  23. package/src/services/message/index.ts +4 -1
  24. package/src/store/chat/agents/createAgentExecutors.ts +2 -1
  25. package/src/store/chat/slices/portal/selectors.test.ts +7 -7
  26. package/src/store/chat/slices/portal/selectors.ts +2 -2
  27. package/src/utils/server/routeVariants.ts +10 -10
  28. package/src/app/(backend)/webapi/chat/azureai/route.test.ts +0 -25
  29. package/src/app/(backend)/webapi/chat/azureai/route.ts +0 -6
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.45](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.44...v2.0.0-next.45)
6
+
7
+ <sup>Released on **2025-11-10**</sup>
8
+
9
+ #### ♻ Code Refactoring
10
+
11
+ - **misc**: Edge to node runtime.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Code refactoring
19
+
20
+ - **misc**: Edge to node runtime, closes [#10149](https://github.com/lobehub/lobe-chat/issues/10149) ([2f4c25d](https://github.com/lobehub/lobe-chat/commit/2f4c25d))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ## [Version 2.0.0-next.44](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.43...v2.0.0-next.44)
31
+
32
+ <sup>Released on **2025-11-10**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Fix reasoning issue with claude and Response API thinking.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: Fix reasoning issue with claude and Response API thinking, closes [#10147](https://github.com/lobehub/lobe-chat/issues/10147) ([cf6bd53](https://github.com/lobehub/lobe-chat/commit/cf6bd53))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ## [Version 2.0.0-next.43](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.42...v2.0.0-next.43)
6
56
 
7
57
  <sup>Released on **2025-11-09**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Edge to node runtime."
6
+ ]
7
+ },
8
+ "date": "2025-11-10",
9
+ "version": "2.0.0-next.45"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Fix reasoning issue with claude and Response API thinking."
15
+ ]
16
+ },
17
+ "date": "2025-11-10",
18
+ "version": "2.0.0-next.44"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.43",
3
+ "version": "2.0.0-next.45",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -106,4 +106,27 @@ describe('parse', () => {
106
106
  expect(serializeParseResult(result)).toEqual(outputs.complexScenario);
107
107
  });
108
108
  });
109
+
110
+ describe('Performance', () => {
111
+ it('should parse 10000 items within 50ms', () => {
112
+ // Generate 10000 messages as flat siblings (no deep nesting to avoid stack overflow)
113
+ // This simulates a more realistic scenario where messages are not deeply nested
114
+ const largeInput = Array.from({ length: 10000 }, (_, i) => ({
115
+ id: `msg-${i}`,
116
+ role: i % 2 === 0 ? ('user' as const) : ('assistant' as const),
117
+ content: `Message ${i}`,
118
+ parentId: undefined, // All messages at the same level
119
+ createdAt: Date.now() + i,
120
+ }));
121
+
122
+ const startTime = performance.now();
123
+ const result = parse(largeInput as any[]);
124
+ const endTime = performance.now();
125
+
126
+ const executionTime = endTime - startTime;
127
+
128
+ expect(result.flatList.length).toBeGreaterThan(0);
129
+ expect(executionTime).toBeLessThan(50);
130
+ });
131
+ });
109
132
  });
@@ -1,6 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
+ import { OpenAIChatMessage } from '../../types';
4
5
  import { imageUrlToBase64 } from '../../utils/imageToBase64';
5
6
  import { parseDataUri } from '../../utils/uriParser';
6
7
  import {
@@ -151,7 +152,7 @@ describe('convertOpenAIMessages', () => {
151
152
 
152
153
  describe('convertOpenAIResponseInputs', () => {
153
154
  it('应该正确转换普通文本消息', async () => {
154
- const messages: OpenAI.ChatCompletionMessageParam[] = [
155
+ const messages: OpenAIChatMessage[] = [
155
156
  { role: 'user', content: 'Hello' },
156
157
  { role: 'assistant', content: 'Hi there!' },
157
158
  ];
@@ -165,7 +166,7 @@ describe('convertOpenAIResponseInputs', () => {
165
166
  });
166
167
 
167
168
  it('应该正确转换带有工具调用的消息', async () => {
168
- const messages: OpenAI.ChatCompletionMessageParam[] = [
169
+ const messages: OpenAIChatMessage[] = [
169
170
  {
170
171
  role: 'assistant',
171
172
  content: '',
@@ -195,7 +196,7 @@ describe('convertOpenAIResponseInputs', () => {
195
196
  });
196
197
 
197
198
  it('应该正确转换工具响应消息', async () => {
198
- const messages: OpenAI.ChatCompletionMessageParam[] = [
199
+ const messages: OpenAIChatMessage[] = [
199
200
  {
200
201
  role: 'tool',
201
202
  content: 'Function result',
@@ -215,7 +216,7 @@ describe('convertOpenAIResponseInputs', () => {
215
216
  });
216
217
 
217
218
  it('应该正确转换包含图片的消息', async () => {
218
- const messages: OpenAI.ChatCompletionMessageParam[] = [
219
+ const messages: OpenAIChatMessage[] = [
219
220
  {
220
221
  role: 'user',
221
222
  content: [
@@ -247,7 +248,7 @@ describe('convertOpenAIResponseInputs', () => {
247
248
  });
248
249
 
249
250
  it('应该正确处理混合类型的消息序列', async () => {
250
- const messages: OpenAI.ChatCompletionMessageParam[] = [
251
+ const messages: OpenAIChatMessage[] = [
251
252
  { role: 'user', content: 'I need help with a function' },
252
253
  {
253
254
  role: 'assistant',
@@ -287,6 +288,29 @@ describe('convertOpenAIResponseInputs', () => {
287
288
  },
288
289
  ]);
289
290
  });
291
+
292
+ it('should extract reasoning.content into a separate reasoning item', async () => {
293
+ const messages: OpenAIChatMessage[] = [
294
+ { content: 'system prompts', role: 'system' },
295
+ { content: '你好', role: 'user' },
296
+ {
297
+ content: 'hello',
298
+ role: 'assistant',
299
+ reasoning: { content: 'reasoning content', duration: 2706 },
300
+ },
301
+ { content: '杭州天气如何', role: 'user' },
302
+ ];
303
+
304
+ const result = await convertOpenAIResponseInputs(messages);
305
+
306
+ expect(result).toEqual([
307
+ { content: 'system prompts', role: 'developer' },
308
+ { content: '你好', role: 'user' },
309
+ { summary: [{ text: 'reasoning content', type: 'summary_text' }], type: 'reasoning' },
310
+ { content: 'hello', role: 'assistant' },
311
+ { content: '杭州天气如何', role: 'user' },
312
+ ]);
313
+ });
290
314
  });
291
315
 
292
316
  describe('convertImageUrlToFile', () => {
@@ -40,12 +40,18 @@ export const convertOpenAIMessages = async (messages: OpenAI.ChatCompletionMessa
40
40
  )) as OpenAI.ChatCompletionMessageParam[];
41
41
  };
42
42
 
43
- export const convertOpenAIResponseInputs = async (
44
- messages: OpenAI.ChatCompletionMessageParam[],
45
- ) => {
43
+ export const convertOpenAIResponseInputs = async (messages: OpenAIChatMessage[]) => {
46
44
  let input: OpenAI.Responses.ResponseInputItem[] = [];
47
45
  await Promise.all(
48
46
  messages.map(async (message) => {
47
+ // if message has reasoning, add it as a separate reasoning item
48
+ if (message.reasoning?.content) {
49
+ input.push({
50
+ summary: [{ text: message.reasoning.content, type: 'summary_text' }],
51
+ type: 'reasoning',
52
+ } as OpenAI.Responses.ResponseReasoningItem);
53
+ }
54
+
49
55
  // if message is assistant messages with tool calls , transform it to function type item
50
56
  if (message.role === 'assistant' && message.tool_calls && message.tool_calls?.length > 0) {
51
57
  message.tool_calls?.forEach((tool) => {
@@ -70,6 +76,11 @@ export const convertOpenAIResponseInputs = async (
70
76
  return;
71
77
  }
72
78
 
79
+ if (message.role === 'system') {
80
+ input.push({ ...message, role: 'developer' } as OpenAI.Responses.ResponseInputItem);
81
+ return;
82
+ }
83
+
73
84
  // default item
74
85
  // also need handle image
75
86
  const item = {
@@ -92,6 +103,9 @@ export const convertOpenAIResponseInputs = async (
92
103
  ),
93
104
  } as OpenAI.Responses.ResponseInputItem;
94
105
 
106
+ // remove reasoning field from the message item
107
+ delete (item as any).reasoning;
108
+
95
109
  input.push(item);
96
110
  }),
97
111
  );
@@ -1,6 +1,7 @@
1
1
  import createClient, { ModelClient } from '@azure-rest/ai-inference';
2
2
  import { AzureKeyCredential } from '@azure/core-auth';
3
3
  import { ModelProvider } from 'model-bank';
4
+ import type { Readable as NodeReadable } from 'node:stream';
4
5
  import OpenAI from 'openai';
5
6
 
6
7
  import { systemToUserModels } from '../../const/models';
@@ -64,9 +65,40 @@ export class LobeAzureAI implements LobeRuntimeAI {
64
65
  });
65
66
 
66
67
  if (enableStreaming) {
67
- const stream = await response.asBrowserStream();
68
+ const unifiedStream = await (async () => {
69
+ if (typeof window === 'undefined') {
70
+ /**
71
+ * In Node.js the SDK exposes a Node readable stream, so we convert it to a Web ReadableStream
72
+ * to reuse the same streaming pipeline used by Edge/browser runtimes.
73
+ */
74
+ const streamModule = await import('node:stream');
75
+ const Readable = streamModule.Readable ?? streamModule.default.Readable;
76
+
77
+ if (!Readable) throw new Error('node:stream module missing Readable export');
78
+ if (typeof Readable.toWeb !== 'function')
79
+ throw new Error('Readable.toWeb is not a function');
80
+
81
+ const nodeResponse = await response.asNodeStream();
82
+ const nodeStream = nodeResponse.body;
83
+
84
+ if (!nodeStream) {
85
+ throw new Error('Azure AI response body is empty');
86
+ }
87
+
88
+ return Readable.toWeb(nodeStream as unknown as NodeReadable) as ReadableStream;
89
+ }
90
+
91
+ const browserResponse = await response.asBrowserStream();
92
+ const browserStream = browserResponse.body;
93
+
94
+ if (!browserStream) {
95
+ throw new Error('Azure AI response body is empty');
96
+ }
97
+
98
+ return browserStream;
99
+ })();
68
100
 
69
- const [prod, debug] = stream.body!.tee();
101
+ const [prod, debug] = unifiedStream.tee();
70
102
 
71
103
  if (process.env.DEBUG_AZURE_AI_CHAT_COMPLETION === '1') {
72
104
  debugStream(debug).catch(console.error);
@@ -47,6 +47,10 @@ export type UserMessageContentPart =
47
47
  export interface OpenAIChatMessage {
48
48
  content: string | UserMessageContentPart[];
49
49
  name?: string;
50
+ reasoning?: {
51
+ content?: string;
52
+ duration?: number;
53
+ };
50
54
  role: LLMRoleType;
51
55
  tool_call_id?: string;
52
56
  tool_calls?: MessageToolCall[];
@@ -1,7 +1,7 @@
1
1
  import brotliPromise from 'brotli-wasm';
2
2
 
3
3
  /**
4
- * @title 字符串压缩器
4
+ * @title String Compressor
5
5
  */
6
6
  export class StrCompressor {
7
7
  /**
@@ -17,9 +17,9 @@ export class StrCompressor {
17
17
  }
18
18
 
19
19
  /**
20
- * @title 压缩字符串
21
- * @param str - 要压缩的字符串
22
- * @returns 压缩后的字符串
20
+ * @title Compress string
21
+ * @param str - String to compress
22
+ * @returns Compressed string
23
23
  */
24
24
  compress(str: string): string {
25
25
  const input = new TextEncoder().encode(str);
@@ -30,9 +30,9 @@ export class StrCompressor {
30
30
  }
31
31
 
32
32
  /**
33
- * @title 解压缩字符串
34
- * @param str - 要解压缩的字符串
35
- * @returns 解压缩后的字符串
33
+ * @title Decompress string
34
+ * @param str - String to decompress
35
+ * @returns Decompressed string
36
36
  */
37
37
  decompress(str: string): string {
38
38
  const compressedData = this.urlSafeBase64Decode(str);
@@ -43,8 +43,8 @@ export class StrCompressor {
43
43
  }
44
44
 
45
45
  /**
46
- * @title 异步压缩字符串
47
- * @param str - 要压缩的字符串
46
+ * @title Compress string asynchronously
47
+ * @param str - String to compress
48
48
  * @returns Promise
49
49
  */
50
50
  async compressAsync(str: string) {
@@ -58,8 +58,8 @@ export class StrCompressor {
58
58
  }
59
59
 
60
60
  /**
61
- * @title 异步解压缩字符串
62
- * @param str - 要解压缩的字符串
61
+ * @title Decompress string asynchronously
62
+ * @param str - String to decompress
63
63
  * @returns Promise
64
64
  */
65
65
  async decompressAsync(str: string) {
@@ -1,7 +1,7 @@
1
1
  /**
2
- * HeadersInit 转换为 Record<string, string>
3
- * @param headersInit - Headers 初始化对象
4
- * @returns 转换后的记录对象
2
+ * Convert HeadersInit to Record<string, string>
3
+ * @param headersInit - Headers initialization object
4
+ * @returns The converted record object
5
5
  */
6
6
  // eslint-disable-next-line no-undef
7
7
  export const headersToRecord = (headersInit?: HeadersInit): Record<string, string> => {
@@ -1,7 +1,7 @@
1
1
  /**
2
- * 从请求体中获取数据
3
- * @param body - 请求体
4
- * @returns 转换后的请求体数据
2
+ * Get data from request body
3
+ * @param body - Request body
4
+ * @returns Converted request body data
5
5
  */
6
6
  export const getRequestBody = async (
7
7
  // eslint-disable-next-line no-undef
@@ -23,6 +23,6 @@ export const getRequestBody = async (
23
23
  return await body.arrayBuffer();
24
24
  }
25
25
 
26
- console.warn('不支持的 IPC 代理请求体类型:', typeof body);
27
- throw new Error('不支持的 IPC 代理请求体类型');
26
+ console.warn('Unsupported IPC proxy request body type:', typeof body);
27
+ throw new Error('Unsupported IPC proxy request body type');
28
28
  };
@@ -1,9 +1,9 @@
1
1
  import { isNil, omitBy } from 'lodash-es';
2
2
 
3
3
  /**
4
- * 清理对象中的空值(undefinednull、空字符串)
5
- * @param obj 要清理的对象
6
- * @returns 清理后的对象
4
+ * Clean empty values (undefined, null, empty string) from an object
5
+ * @param obj The object to clean
6
+ * @returns The cleaned object
7
7
  */
8
8
  export const cleanObject = <T extends Record<string, any>>(obj: T): T => {
9
9
  return omitBy(obj, (value) => isNil(value) || value === '') as T;
@@ -0,0 +1,79 @@
1
+ import { describe, expect, it } from 'vitest';
2
+
3
+ import { createNodeResponse } from '../response';
4
+
5
+ describe('createNodeResponse', () => {
6
+ it('wraps successful Response with default headers', async () => {
7
+ const upstream = new Response('audio-chunk', {
8
+ headers: {
9
+ 'x-source': 'sdk',
10
+ },
11
+ status: 201,
12
+ statusText: 'Created',
13
+ });
14
+ upstream.headers.delete('content-type');
15
+
16
+ const result = await createNodeResponse(() => Promise.resolve(upstream), {
17
+ success: {
18
+ cacheControl: 'no-store',
19
+ defaultContentType: 'audio/mpeg',
20
+ },
21
+ });
22
+
23
+ expect(await result.text()).toBe('audio-chunk');
24
+ expect(result.status).toBe(201);
25
+ expect(result.headers.get('x-source')).toBe('sdk');
26
+ expect(result.headers.get('content-type')).toBe('audio/mpeg');
27
+ expect(result.headers.get('cache-control')).toBe('no-store');
28
+ });
29
+
30
+ it('delegates to onInvalidResponse when payload is not Response-like', async () => {
31
+ const fallback = new Response('invalid', { status: 500 });
32
+
33
+ const result = await createNodeResponse(() => Promise.resolve({} as any), {
34
+ onInvalidResponse: () => fallback,
35
+ });
36
+
37
+ expect(result).toBe(fallback);
38
+ });
39
+
40
+ it('normalizes thrown Response-like errors via error options', async () => {
41
+ const upstreamError = new Response(JSON.stringify({ error: 'boom' }), {
42
+ status: 429,
43
+ statusText: 'Too Many Requests',
44
+ });
45
+ upstreamError.headers.delete('content-type');
46
+
47
+ const result = await createNodeResponse(
48
+ async () => {
49
+ throw upstreamError;
50
+ },
51
+ {
52
+ error: {
53
+ cacheControl: 'no-store',
54
+ defaultContentType: 'application/json',
55
+ },
56
+ },
57
+ );
58
+
59
+ expect(result.status).toBe(429);
60
+ expect(result.headers.get('content-type')).toBe('application/json');
61
+ expect(result.headers.get('cache-control')).toBe('no-store');
62
+ expect(await result.json()).toEqual({ error: 'boom' });
63
+ });
64
+
65
+ it('delegates to onNonResponseError for unexpected exceptions', async () => {
66
+ const fallback = new Response('fallback', { status: 500 });
67
+
68
+ const result = await createNodeResponse(
69
+ async () => {
70
+ throw new Error('unexpected');
71
+ },
72
+ {
73
+ onNonResponseError: () => fallback,
74
+ },
75
+ );
76
+
77
+ expect(result).toBe(fallback);
78
+ });
79
+ });
@@ -1,5 +1,6 @@
1
1
  export * from './auth';
2
2
  export * from './correctOIDCUrl';
3
3
  export * from './geo';
4
+ export * from './response';
4
5
  export * from './responsive';
5
6
  export * from './xor';
@@ -0,0 +1,110 @@
1
+ /**
2
+ * Options for normalizing a Response so it can be consumed by the platform runtime.
3
+ */
4
+ export interface EnsureNodeResponseOptions {
5
+ /**
6
+ * Force update the cache-control header, usually to disable caching for APIs.
7
+ */
8
+ cacheControl?: string;
9
+ /**
10
+ * Sets a default content-type header when the original Response omitted it.
11
+ */
12
+ defaultContentType?: string;
13
+ /**
14
+ * Force buffering even if a readable body stream exists.
15
+ */
16
+ forceBuffering?: boolean;
17
+ }
18
+
19
+ /**
20
+ * Checks whether a value structurally matches the minimal Response interface.
21
+ */
22
+ export const isResponseLike = (value: unknown): value is Response => {
23
+ if (typeof value !== 'object' || value === null) return false;
24
+
25
+ const candidate = value as Partial<Response>;
26
+
27
+ return (
28
+ typeof candidate.arrayBuffer === 'function' &&
29
+ !!candidate.headers &&
30
+ typeof (candidate.headers as Headers).get === 'function' &&
31
+ typeof candidate.status === 'number' &&
32
+ typeof candidate.statusText === 'string'
33
+ );
34
+ };
35
+
36
+ /**
37
+ * Re-wraps an arbitrary Response-like object into the platform Response implementation.
38
+ *
39
+ * This is required because some SDKs (e.g., OpenAI) ship their own Response shim
40
+ * that is not recognized by Next.js when running in the Node.js runtime.
41
+ */
42
+ export const ensureNodeResponse = async (
43
+ source: Response,
44
+ options: EnsureNodeResponseOptions = {},
45
+ ) => {
46
+ const headers = new Headers(source.headers);
47
+
48
+ if (options.defaultContentType && !headers.has('content-type')) {
49
+ headers.set('content-type', options.defaultContentType);
50
+ }
51
+
52
+ if (options.cacheControl) {
53
+ headers.set('cache-control', options.cacheControl);
54
+ }
55
+
56
+ const body = !options.forceBuffering && source.body ? source.body : await source.arrayBuffer();
57
+
58
+ return new Response(body, {
59
+ headers,
60
+ status: source.status,
61
+ statusText: source.statusText,
62
+ });
63
+ };
64
+
65
+ export interface CreateNodeResponseOptions {
66
+ /**
67
+ * Options applied when a Response-like error is thrown.
68
+ */
69
+ error?: EnsureNodeResponseOptions;
70
+ /**
71
+ * Callback when the resolved value is not Response-like.
72
+ */
73
+ onInvalidResponse?: (payload: unknown) => Response;
74
+ /**
75
+ * Callback when a non-Response error is thrown.
76
+ */
77
+ onNonResponseError?: (error: unknown) => Response;
78
+ /**
79
+ * Options applied when the resolved Response is normalized.
80
+ */
81
+ success?: EnsureNodeResponseOptions;
82
+ }
83
+
84
+ /**
85
+ * Runs a response factory and ensures every exit path returns a platform Response.
86
+ */
87
+ export const createNodeResponse = async <T>(
88
+ responseCreator: () => Promise<T>,
89
+ options: CreateNodeResponseOptions = {},
90
+ ) => {
91
+ try {
92
+ const response = await responseCreator();
93
+
94
+ if (!isResponseLike(response)) {
95
+ if (options.onInvalidResponse) return options.onInvalidResponse(response);
96
+
97
+ throw new Error('Expected a Response-like object from responseCreator.');
98
+ }
99
+
100
+ return ensureNodeResponse(response, options.success);
101
+ } catch (error) {
102
+ if (isResponseLike(error)) {
103
+ return ensureNodeResponse(error, options.error);
104
+ }
105
+
106
+ if (options.onNonResponseError) return options.onNonResponseError(error);
107
+
108
+ throw error;
109
+ }
110
+ };
@@ -3,8 +3,6 @@ import { createOpenaiAudioTranscriptions } from '@lobehub/tts/server';
3
3
 
4
4
  import { createBizOpenAI } from '@/app/(backend)/_deprecated/createBizOpenAI';
5
5
 
6
- export const runtime = 'edge';
7
-
8
6
  export const preferredRegion = [
9
7
  'arn1',
10
8
  'bom1',
@@ -1,9 +1,15 @@
1
1
  import { EdgeSpeechPayload, EdgeSpeechTTS } from '@lobehub/tts';
2
2
 
3
- export const runtime = 'edge';
3
+ import { createSpeechResponse } from '@/server/utils/createSpeechResponse';
4
4
 
5
5
  export const POST = async (req: Request) => {
6
6
  const payload = (await req.json()) as EdgeSpeechPayload;
7
7
 
8
- return await EdgeSpeechTTS.createRequest({ payload });
8
+ return createSpeechResponse(() => EdgeSpeechTTS.createRequest({ payload }), {
9
+ logTag: 'webapi/tts/edge',
10
+ messages: {
11
+ failure: 'Failed to synthesize speech',
12
+ invalid: 'Unexpected payload from Edge speech API',
13
+ },
14
+ });
9
15
  };
@@ -1,9 +1,15 @@
1
1
  import { MicrosoftSpeechPayload, MicrosoftSpeechTTS } from '@lobehub/tts';
2
2
 
3
- export const runtime = 'edge';
3
+ import { createSpeechResponse } from '@/server/utils/createSpeechResponse';
4
4
 
5
5
  export const POST = async (req: Request) => {
6
6
  const payload = (await req.json()) as MicrosoftSpeechPayload;
7
7
 
8
- return await MicrosoftSpeechTTS.createRequest({ payload });
8
+ return createSpeechResponse(() => MicrosoftSpeechTTS.createRequest({ payload }), {
9
+ logTag: 'webapi/tts/microsoft',
10
+ messages: {
11
+ failure: 'Failed to synthesize speech',
12
+ invalid: 'Unexpected payload from Microsoft speech API',
13
+ },
14
+ });
9
15
  };
@@ -2,8 +2,7 @@ import { OpenAITTSPayload } from '@lobehub/tts';
2
2
  import { createOpenaiAudioSpeech } from '@lobehub/tts/server';
3
3
 
4
4
  import { createBizOpenAI } from '@/app/(backend)/_deprecated/createBizOpenAI';
5
-
6
- export const runtime = 'edge';
5
+ import { createSpeechResponse } from '@/server/utils/createSpeechResponse';
7
6
 
8
7
  export const preferredRegion = [
9
8
  'arn1',
@@ -34,5 +33,18 @@ export const POST = async (req: Request) => {
34
33
  // if resOrOpenAI is a Response, it means there is an error,just return it
35
34
  if (openaiOrErrResponse instanceof Response) return openaiOrErrResponse;
36
35
 
37
- return await createOpenaiAudioSpeech({ openai: openaiOrErrResponse as any, payload });
36
+ return createSpeechResponse(
37
+ () =>
38
+ createOpenaiAudioSpeech({
39
+ openai: openaiOrErrResponse as any,
40
+ payload,
41
+ }),
42
+ {
43
+ logTag: 'webapi/tts/openai',
44
+ messages: {
45
+ failure: 'Failed to synthesize speech',
46
+ invalid: 'Unexpected payload from OpenAI TTS',
47
+ },
48
+ },
49
+ );
38
50
  };
@@ -8,23 +8,35 @@ import { agentRouter } from '../lambda/agent';
8
8
  import { aiChatRouter } from '../lambda/aiChat';
9
9
  import { aiModelRouter } from '../lambda/aiModel';
10
10
  import { aiProviderRouter } from '../lambda/aiProvider';
11
+ import { chunkRouter } from '../lambda/chunk';
12
+ import { configRouter } from '../lambda/config';
13
+ import { documentRouter } from '../lambda/document';
14
+ import { fileRouter } from '../lambda/file';
15
+ import { knowledgeBaseRouter } from '../lambda/knowledgeBase';
11
16
  import { marketRouter } from '../lambda/market';
12
17
  import { messageRouter } from '../lambda/message';
13
18
  import { sessionRouter } from '../lambda/session';
14
19
  import { sessionGroupRouter } from '../lambda/sessionGroup';
15
20
  import { topicRouter } from '../lambda/topic';
21
+ import { uploadRouter } from '../lambda/upload';
22
+ import { userRouter } from '../lambda/user';
16
23
 
17
24
  export const mobileRouter = router({
18
25
  agent: agentRouter,
19
26
  aiChat: aiChatRouter,
20
27
  aiModel: aiModelRouter,
21
28
  aiProvider: aiProviderRouter,
29
+ chunk: chunkRouter,
30
+ config: configRouter,
31
+ document: documentRouter,
32
+ file: fileRouter,
22
33
  healthcheck: publicProcedure.query(() => "i'm live!"),
34
+ knowledgeBase: knowledgeBaseRouter,
23
35
  market: marketRouter,
24
36
  message: messageRouter,
25
37
  session: sessionRouter,
26
38
  sessionGroup: sessionGroupRouter,
27
39
  topic: topicRouter,
40
+ upload: uploadRouter,
41
+ user: userRouter,
28
42
  });
29
-
30
- export type MobileRouter = typeof mobileRouter;
@@ -0,0 +1,55 @@
1
+ import { ChatErrorType } from '@lobechat/types';
2
+
3
+ import { createErrorResponse } from '@/utils/errorResponse';
4
+ import { createNodeResponse } from '@/utils/server/response';
5
+
6
+ export interface CreateSpeechResponseOptions {
7
+ errorContentType?: string;
8
+ logTag: string;
9
+ messages?: {
10
+ failure?: string;
11
+ invalid?: string;
12
+ };
13
+ successContentType?: string;
14
+ }
15
+
16
+ /**
17
+ * Wraps a third-party speech SDK response so the Node.js runtime always receives
18
+ * a valid platform Response, while keeping logging and error handling consistent.
19
+ */
20
+ export const createSpeechResponse = async <T>(
21
+ responseCreator: () => Promise<T>,
22
+ {
23
+ logTag,
24
+ successContentType = 'audio/mpeg',
25
+ errorContentType = 'application/json',
26
+ messages,
27
+ }: CreateSpeechResponseOptions,
28
+ ) => {
29
+ const prefix = `[${logTag}]`;
30
+ const invalidMessage = messages?.invalid ?? 'Unexpected payload from speech provider';
31
+ const failureMessage = messages?.failure ?? 'Failed to synthesize speech';
32
+
33
+ return createNodeResponse(responseCreator, {
34
+ error: {
35
+ cacheControl: 'no-store',
36
+ defaultContentType: errorContentType,
37
+ },
38
+ onInvalidResponse: (response) => {
39
+ console.error(`${prefix} ${invalidMessage}`, response);
40
+
41
+ return createErrorResponse(ChatErrorType.InternalServerError);
42
+ },
43
+ onNonResponseError: (error) => {
44
+ console.error(`${prefix} ${failureMessage}`, error);
45
+
46
+ return createErrorResponse(ChatErrorType.InternalServerError, {
47
+ message: error instanceof Error ? error.message : String(error),
48
+ });
49
+ },
50
+ success: {
51
+ cacheControl: 'no-store',
52
+ defaultContentType: successContentType,
53
+ },
54
+ });
55
+ };
@@ -137,7 +137,7 @@ class DiscoverService {
137
137
  };
138
138
 
139
139
  /**
140
- * 上报 MCP 插件安装结果
140
+ * Report MCP plugin installation result
141
141
  */
142
142
  reportMcpInstallResult = async ({
143
143
  success,
@@ -168,7 +168,7 @@ class DiscoverService {
168
168
  };
169
169
 
170
170
  /**
171
- * 上报插件调用结果
171
+ * Report plugin call result
172
172
  */
173
173
  reportPluginCall = async (reportData: CallReportRequest) => {
174
174
  // if user don't allow tracing , just not report calling
@@ -283,27 +283,27 @@ class DiscoverService {
283
283
  private async injectMPToken() {
284
284
  if (typeof localStorage === 'undefined') return;
285
285
 
286
- // 检查服务端设置的状态标记 cookie
286
+ // Check server-set status flag cookie
287
287
  const tokenStatus = this.getTokenStatusFromCookie();
288
288
  if (tokenStatus === 'active') return;
289
289
 
290
290
  let clientId: string;
291
291
  let clientSecret: string;
292
292
 
293
- // 1. localStorage 获取客户端信息
293
+ // 1. Get client information from localStorage
294
294
  const item = localStorage.getItem('_mpc');
295
295
  if (!item) {
296
- // 2. 如果没有,则注册客户端
296
+ // 2. If not exists, register client
297
297
  const clientInfo = await this.registerClient();
298
298
  clientId = clientInfo.clientId;
299
299
  clientSecret = clientInfo.clientSecret;
300
300
 
301
- // 3. Base64 编码并保存到 localStorage
301
+ // 3. Base64 encode and save to localStorage
302
302
  const clientData = JSON.stringify({ clientId, clientSecret });
303
303
  const encodedData = btoa(clientData);
304
304
  localStorage.setItem('_mpc', encodedData);
305
305
  } else {
306
- // 4. 如果有,则解码获取客户端信息
306
+ // 4. If exists, decode to get client information
307
307
  try {
308
308
  const decodedData = atob(item);
309
309
  const clientData = JSON.parse(decodedData);
@@ -311,7 +311,7 @@ class DiscoverService {
311
311
  clientSecret = clientData.clientSecret;
312
312
  } catch (error) {
313
313
  console.error('Failed to decode client data:', error);
314
- // 如果解码失败,重新注册
314
+ // If decoding fails, re-register
315
315
  const clientInfo = await this.registerClient();
316
316
  clientId = clientInfo.clientId;
317
317
  clientSecret = clientInfo.clientSecret;
@@ -322,23 +322,23 @@ class DiscoverService {
322
322
  }
323
323
  }
324
324
 
325
- // 5. 获取访问令牌(服务端会自动设置 HTTP-Only cookie
325
+ // 5. Get access token (server will automatically set HTTP-Only cookie)
326
326
  try {
327
327
  const result = await lambdaClient.market.registerM2MToken.query({
328
328
  clientId,
329
329
  clientSecret,
330
330
  });
331
331
 
332
- // 检查服务端返回的结果
332
+ // Check server response result
333
333
  if (!result.success) {
334
334
  console.warn(
335
335
  'Token registration failed, client credentials may be invalid. Clearing and retrying...',
336
336
  );
337
337
 
338
- // 清空相关的本地存储数据
338
+ // Clear related local storage data
339
339
  localStorage.removeItem('_mpc');
340
340
 
341
- // 重新执行完整的注册流程(但只重试一次)
341
+ // Re-execute the complete registration process (but only retry once)
342
342
  if (!this._isRetrying) {
343
343
  this._isRetrying = true;
344
344
  try {
@@ -76,7 +76,10 @@ export class MessageService {
76
76
  return lambdaClient.message.getHeatmaps.query();
77
77
  };
78
78
 
79
- updateMessageError = async (id: string, error: ChatMessageError) => {
79
+ updateMessageError = async (id: string, value: ChatMessageError) => {
80
+ const error = value.type ? value : { body: value, message: value.message, type: 'ApplicationRuntimeError' };
81
+
82
+
80
83
  return lambdaClient.message.update.mutate({ id, value: { error } });
81
84
  };
82
85
 
@@ -106,6 +106,7 @@ export const createAgentExecutors = (context: {
106
106
  // - Loading state management
107
107
  // - Error handling
108
108
  // Use messages from state (already contains full conversation history)
109
+ const messages = llmPayload.messages.filter((message) => message.id !== assistantMessageId);
109
110
  const {
110
111
  isFunctionCall,
111
112
  content,
@@ -114,7 +115,7 @@ export const createAgentExecutors = (context: {
114
115
  tool_calls,
115
116
  } = await context.get().internal_fetchAIChatMessage({
116
117
  messageId: assistantMessageId,
117
- messages: llmPayload.messages,
118
+ messages: messages,
118
119
  model: llmPayload.model,
119
120
  params: context.params,
120
121
  provider: llmPayload.provider,
@@ -10,7 +10,7 @@ describe('chatDockSelectors', () => {
10
10
  const state = {
11
11
  showPortal: false,
12
12
  portalToolMessage: undefined,
13
- messagesMap: {},
13
+ dbMessagesMap: {},
14
14
  activeId: 'test-id',
15
15
  activeTopicId: undefined,
16
16
  ...overrides,
@@ -109,7 +109,7 @@ describe('chatDockSelectors', () => {
109
109
  it('should return message content when message exists', () => {
110
110
  const messageContent = 'Test message content';
111
111
  const state = createState({
112
- messagesMap: {
112
+ dbMessagesMap: {
113
113
  'test-id_null': [
114
114
  {
115
115
  id: 'test-id',
@@ -150,7 +150,7 @@ describe('chatDockSelectors', () => {
150
150
  it('should extract content from artifact tag', () => {
151
151
  const artifactContent = 'Test artifact content';
152
152
  const state = createState({
153
- messagesMap: {
153
+ dbMessagesMap: {
154
154
  'test-id_null': [
155
155
  {
156
156
  id: 'test-id',
@@ -178,7 +178,7 @@ describe('chatDockSelectors', () => {
178
178
  </body>
179
179
  </html>`;
180
180
  const state = createState({
181
- messagesMap: {
181
+ dbMessagesMap: {
182
182
  'test-id_null': [
183
183
  {
184
184
  id: 'test-id',
@@ -203,7 +203,7 @@ ${htmlContent}
203
203
  describe('isArtifactTagClosed', () => {
204
204
  it('should return false for unclosed artifact tag', () => {
205
205
  const state = createState({
206
- messagesMap: {
206
+ dbMessagesMap: {
207
207
  'test-id_null': [
208
208
  {
209
209
  id: 'test-id',
@@ -222,7 +222,7 @@ ${htmlContent}
222
222
 
223
223
  it('should return true for closed artifact tag', () => {
224
224
  const state = createState({
225
- messagesMap: {
225
+ dbMessagesMap: {
226
226
  'test-id_null': [
227
227
  {
228
228
  id: 'test-id',
@@ -241,7 +241,7 @@ ${htmlContent}
241
241
 
242
242
  it('should return false when no artifact tag exists', () => {
243
243
  const state = createState({
244
- messagesMap: {
244
+ dbMessagesMap: {
245
245
  'test-id_null': [
246
246
  {
247
247
  id: 'test-id',
@@ -1,7 +1,7 @@
1
1
  import { ARTIFACT_TAG_CLOSED_REGEX, ARTIFACT_TAG_REGEX } from '@/const/plugin';
2
2
  import type { ChatStoreState } from '@/store/chat';
3
3
 
4
- import { chatSelectors } from '../message/selectors';
4
+ import { dbMessageSelectors } from '../message/selectors';
5
5
 
6
6
  const showPortal = (s: ChatStoreState) => s.showPortal;
7
7
 
@@ -27,7 +27,7 @@ const artifactType = (s: ChatStoreState) => s.portalArtifact?.type;
27
27
  const artifactCodeLanguage = (s: ChatStoreState) => s.portalArtifact?.language;
28
28
 
29
29
  const artifactMessageContent = (id: string) => (s: ChatStoreState) => {
30
- const message = chatSelectors.getMessageById(id)(s);
30
+ const message = dbMessageSelectors.getDbMessageById(id)(s);
31
31
  return message?.content || '';
32
32
  };
33
33
 
@@ -1,11 +1,11 @@
1
- // 定义主题类型
1
+ // Define theme type
2
2
  import { ThemeAppearance } from 'antd-style/lib/types/appearance';
3
3
 
4
4
  import { DEFAULT_LANG } from '@/const/locale';
5
5
  import { Locales, locales } from '@/locales/resources';
6
6
  import { DynamicLayoutProps } from '@/types/next';
7
7
 
8
- // 定义变体接口
8
+ // Define variant interface
9
9
  export interface IRouteVariants {
10
10
  isMobile: boolean;
11
11
  locale: Locales;
@@ -14,10 +14,10 @@ export interface IRouteVariants {
14
14
  theme: ThemeAppearance;
15
15
  }
16
16
 
17
- // 支持的主题
17
+ // Supported themes
18
18
  const SUPPORTED_THEMES = ['dark', 'light'] as const;
19
19
 
20
- // 默认变体配置
20
+ // Default variant configuration
21
21
  export const DEFAULT_VARIANTS: IRouteVariants = {
22
22
  isMobile: false,
23
23
  locale: DEFAULT_LANG,
@@ -28,8 +28,8 @@ const SPLITTER = '__';
28
28
 
29
29
  export class RouteVariants {
30
30
  static serializeVariants = (variants: IRouteVariants): string => {
31
- // 使用紧凑的格式: locale_isMobile_theme
32
- // 例如: "en-US_0_dark" 表示 英文_非移动端_深色主题
31
+ // Use compact format: locale_isMobile_theme
32
+ // Example: "en-US_0_dark" represents English_Non-mobile_Dark theme
33
33
  return [variants.locale, Number(variants.isMobile), variants.theme].join(SPLITTER);
34
34
  };
35
35
 
@@ -37,14 +37,14 @@ export class RouteVariants {
37
37
  try {
38
38
  const [locale, isMobile, theme] = serialized.split(SPLITTER);
39
39
 
40
- // 验证并返回变体
40
+ // Validate and return variant
41
41
  return {
42
42
  isMobile: isMobile === '1',
43
43
  locale: this.isValidLocale(locale) ? (locale as Locales) : DEFAULT_VARIANTS.locale,
44
44
  theme: this.isValidTheme(theme) ? theme : DEFAULT_VARIANTS.theme,
45
45
  };
46
46
  } catch {
47
- // 解析失败时返回默认值
47
+ // Return default value on parse failure
48
48
  return { ...DEFAULT_VARIANTS };
49
49
  }
50
50
  };
@@ -66,14 +66,14 @@ export class RouteVariants {
66
66
  return locale;
67
67
  };
68
68
 
69
- // 工具函数:创建变体
69
+ // Utility function: create variant
70
70
 
71
71
  static createVariants = (options: Partial<IRouteVariants> = {}): IRouteVariants => ({
72
72
  ...DEFAULT_VARIANTS,
73
73
  ...options,
74
74
  });
75
75
 
76
- // 验证函数
76
+ // Validation functions
77
77
  private static isValidLocale = (locale: string): boolean => locales.includes(locale as any);
78
78
 
79
79
  private static isValidTheme = (theme: string): boolean => SUPPORTED_THEMES.includes(theme as any);
@@ -1,25 +0,0 @@
1
- // @vitest-environment edge-runtime
2
- import { describe, expect, it, vi } from 'vitest';
3
-
4
- import { POST as UniverseRoute } from '../[provider]/route';
5
- import { POST, runtime } from './route';
6
-
7
- vi.mock('../[provider]/route', () => ({
8
- POST: vi.fn().mockResolvedValue('mocked response'),
9
- }));
10
-
11
- describe('Configuration tests', () => {
12
- it('should have runtime set to "edge"', () => {
13
- expect(runtime).toBe('edge');
14
- });
15
- });
16
-
17
- describe('Groq POST function tests', () => {
18
- it('should call UniverseRoute with correct parameters', async () => {
19
- const mockRequest = new Request('https://example.com', { method: 'POST' });
20
- await POST(mockRequest);
21
- expect(UniverseRoute).toHaveBeenCalledWith(mockRequest, {
22
- params: Promise.resolve({ provider: 'azureai' }),
23
- });
24
- });
25
- });
@@ -1,6 +0,0 @@
1
- import { POST as UniverseRoute } from '../[provider]/route';
2
-
3
- export const runtime = 'edge';
4
-
5
- export const POST = async (req: Request) =>
6
- UniverseRoute(req, { params: Promise.resolve({ provider: 'azureai' }) });