@lobehub/lobehub 2.0.0-next.44 → 2.0.0-next.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.45](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.44...v2.0.0-next.45)
6
+
7
+ <sup>Released on **2025-11-10**</sup>
8
+
9
+ #### ♻ Code Refactoring
10
+
11
+ - **misc**: Edge to node runtime.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Code refactoring
19
+
20
+ - **misc**: Edge to node runtime, closes [#10149](https://github.com/lobehub/lobe-chat/issues/10149) ([2f4c25d](https://github.com/lobehub/lobe-chat/commit/2f4c25d))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ## [Version 2.0.0-next.44](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.43...v2.0.0-next.44)
6
31
 
7
32
  <sup>Released on **2025-11-10**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Edge to node runtime."
6
+ ]
7
+ },
8
+ "date": "2025-11-10",
9
+ "version": "2.0.0-next.45"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.44",
3
+ "version": "2.0.0-next.45",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -1,6 +1,7 @@
1
1
  import createClient, { ModelClient } from '@azure-rest/ai-inference';
2
2
  import { AzureKeyCredential } from '@azure/core-auth';
3
3
  import { ModelProvider } from 'model-bank';
4
+ import type { Readable as NodeReadable } from 'node:stream';
4
5
  import OpenAI from 'openai';
5
6
 
6
7
  import { systemToUserModels } from '../../const/models';
@@ -64,9 +65,40 @@ export class LobeAzureAI implements LobeRuntimeAI {
64
65
  });
65
66
 
66
67
  if (enableStreaming) {
67
- const stream = await response.asBrowserStream();
68
+ const unifiedStream = await (async () => {
69
+ if (typeof window === 'undefined') {
70
+ /**
71
+ * In Node.js the SDK exposes a Node readable stream, so we convert it to a Web ReadableStream
72
+ * to reuse the same streaming pipeline used by Edge/browser runtimes.
73
+ */
74
+ const streamModule = await import('node:stream');
75
+ const Readable = streamModule.Readable ?? streamModule.default.Readable;
76
+
77
+ if (!Readable) throw new Error('node:stream module missing Readable export');
78
+ if (typeof Readable.toWeb !== 'function')
79
+ throw new Error('Readable.toWeb is not a function');
80
+
81
+ const nodeResponse = await response.asNodeStream();
82
+ const nodeStream = nodeResponse.body;
83
+
84
+ if (!nodeStream) {
85
+ throw new Error('Azure AI response body is empty');
86
+ }
87
+
88
+ return Readable.toWeb(nodeStream as unknown as NodeReadable) as ReadableStream;
89
+ }
90
+
91
+ const browserResponse = await response.asBrowserStream();
92
+ const browserStream = browserResponse.body;
93
+
94
+ if (!browserStream) {
95
+ throw new Error('Azure AI response body is empty');
96
+ }
97
+
98
+ return browserStream;
99
+ })();
68
100
 
69
- const [prod, debug] = stream.body!.tee();
101
+ const [prod, debug] = unifiedStream.tee();
70
102
 
71
103
  if (process.env.DEBUG_AZURE_AI_CHAT_COMPLETION === '1') {
72
104
  debugStream(debug).catch(console.error);
@@ -0,0 +1,79 @@
1
+ import { describe, expect, it } from 'vitest';
2
+
3
+ import { createNodeResponse } from '../response';
4
+
5
+ describe('createNodeResponse', () => {
6
+ it('wraps successful Response with default headers', async () => {
7
+ const upstream = new Response('audio-chunk', {
8
+ headers: {
9
+ 'x-source': 'sdk',
10
+ },
11
+ status: 201,
12
+ statusText: 'Created',
13
+ });
14
+ upstream.headers.delete('content-type');
15
+
16
+ const result = await createNodeResponse(() => Promise.resolve(upstream), {
17
+ success: {
18
+ cacheControl: 'no-store',
19
+ defaultContentType: 'audio/mpeg',
20
+ },
21
+ });
22
+
23
+ expect(await result.text()).toBe('audio-chunk');
24
+ expect(result.status).toBe(201);
25
+ expect(result.headers.get('x-source')).toBe('sdk');
26
+ expect(result.headers.get('content-type')).toBe('audio/mpeg');
27
+ expect(result.headers.get('cache-control')).toBe('no-store');
28
+ });
29
+
30
+ it('delegates to onInvalidResponse when payload is not Response-like', async () => {
31
+ const fallback = new Response('invalid', { status: 500 });
32
+
33
+ const result = await createNodeResponse(() => Promise.resolve({} as any), {
34
+ onInvalidResponse: () => fallback,
35
+ });
36
+
37
+ expect(result).toBe(fallback);
38
+ });
39
+
40
+ it('normalizes thrown Response-like errors via error options', async () => {
41
+ const upstreamError = new Response(JSON.stringify({ error: 'boom' }), {
42
+ status: 429,
43
+ statusText: 'Too Many Requests',
44
+ });
45
+ upstreamError.headers.delete('content-type');
46
+
47
+ const result = await createNodeResponse(
48
+ async () => {
49
+ throw upstreamError;
50
+ },
51
+ {
52
+ error: {
53
+ cacheControl: 'no-store',
54
+ defaultContentType: 'application/json',
55
+ },
56
+ },
57
+ );
58
+
59
+ expect(result.status).toBe(429);
60
+ expect(result.headers.get('content-type')).toBe('application/json');
61
+ expect(result.headers.get('cache-control')).toBe('no-store');
62
+ expect(await result.json()).toEqual({ error: 'boom' });
63
+ });
64
+
65
+ it('delegates to onNonResponseError for unexpected exceptions', async () => {
66
+ const fallback = new Response('fallback', { status: 500 });
67
+
68
+ const result = await createNodeResponse(
69
+ async () => {
70
+ throw new Error('unexpected');
71
+ },
72
+ {
73
+ onNonResponseError: () => fallback,
74
+ },
75
+ );
76
+
77
+ expect(result).toBe(fallback);
78
+ });
79
+ });
@@ -1,5 +1,6 @@
1
1
  export * from './auth';
2
2
  export * from './correctOIDCUrl';
3
3
  export * from './geo';
4
+ export * from './response';
4
5
  export * from './responsive';
5
6
  export * from './xor';
@@ -0,0 +1,110 @@
1
+ /**
2
+ * Options for normalizing a Response so it can be consumed by the platform runtime.
3
+ */
4
+ export interface EnsureNodeResponseOptions {
5
+ /**
6
+ * Force update the cache-control header, usually to disable caching for APIs.
7
+ */
8
+ cacheControl?: string;
9
+ /**
10
+ * Sets a default content-type header when the original Response omitted it.
11
+ */
12
+ defaultContentType?: string;
13
+ /**
14
+ * Force buffering even if a readable body stream exists.
15
+ */
16
+ forceBuffering?: boolean;
17
+ }
18
+
19
+ /**
20
+ * Checks whether a value structurally matches the minimal Response interface.
21
+ */
22
+ export const isResponseLike = (value: unknown): value is Response => {
23
+ if (typeof value !== 'object' || value === null) return false;
24
+
25
+ const candidate = value as Partial<Response>;
26
+
27
+ return (
28
+ typeof candidate.arrayBuffer === 'function' &&
29
+ !!candidate.headers &&
30
+ typeof (candidate.headers as Headers).get === 'function' &&
31
+ typeof candidate.status === 'number' &&
32
+ typeof candidate.statusText === 'string'
33
+ );
34
+ };
35
+
36
+ /**
37
+ * Re-wraps an arbitrary Response-like object into the platform Response implementation.
38
+ *
39
+ * This is required because some SDKs (e.g., OpenAI) ship their own Response shim
40
+ * that is not recognized by Next.js when running in the Node.js runtime.
41
+ */
42
+ export const ensureNodeResponse = async (
43
+ source: Response,
44
+ options: EnsureNodeResponseOptions = {},
45
+ ) => {
46
+ const headers = new Headers(source.headers);
47
+
48
+ if (options.defaultContentType && !headers.has('content-type')) {
49
+ headers.set('content-type', options.defaultContentType);
50
+ }
51
+
52
+ if (options.cacheControl) {
53
+ headers.set('cache-control', options.cacheControl);
54
+ }
55
+
56
+ const body = !options.forceBuffering && source.body ? source.body : await source.arrayBuffer();
57
+
58
+ return new Response(body, {
59
+ headers,
60
+ status: source.status,
61
+ statusText: source.statusText,
62
+ });
63
+ };
64
+
65
+ export interface CreateNodeResponseOptions {
66
+ /**
67
+ * Options applied when a Response-like error is thrown.
68
+ */
69
+ error?: EnsureNodeResponseOptions;
70
+ /**
71
+ * Callback when the resolved value is not Response-like.
72
+ */
73
+ onInvalidResponse?: (payload: unknown) => Response;
74
+ /**
75
+ * Callback when a non-Response error is thrown.
76
+ */
77
+ onNonResponseError?: (error: unknown) => Response;
78
+ /**
79
+ * Options applied when the resolved Response is normalized.
80
+ */
81
+ success?: EnsureNodeResponseOptions;
82
+ }
83
+
84
+ /**
85
+ * Runs a response factory and ensures every exit path returns a platform Response.
86
+ */
87
+ export const createNodeResponse = async <T>(
88
+ responseCreator: () => Promise<T>,
89
+ options: CreateNodeResponseOptions = {},
90
+ ) => {
91
+ try {
92
+ const response = await responseCreator();
93
+
94
+ if (!isResponseLike(response)) {
95
+ if (options.onInvalidResponse) return options.onInvalidResponse(response);
96
+
97
+ throw new Error('Expected a Response-like object from responseCreator.');
98
+ }
99
+
100
+ return ensureNodeResponse(response, options.success);
101
+ } catch (error) {
102
+ if (isResponseLike(error)) {
103
+ return ensureNodeResponse(error, options.error);
104
+ }
105
+
106
+ if (options.onNonResponseError) return options.onNonResponseError(error);
107
+
108
+ throw error;
109
+ }
110
+ };
@@ -3,8 +3,6 @@ import { createOpenaiAudioTranscriptions } from '@lobehub/tts/server';
3
3
 
4
4
  import { createBizOpenAI } from '@/app/(backend)/_deprecated/createBizOpenAI';
5
5
 
6
- export const runtime = 'edge';
7
-
8
6
  export const preferredRegion = [
9
7
  'arn1',
10
8
  'bom1',
@@ -1,9 +1,15 @@
1
1
  import { EdgeSpeechPayload, EdgeSpeechTTS } from '@lobehub/tts';
2
2
 
3
- export const runtime = 'edge';
3
+ import { createSpeechResponse } from '@/server/utils/createSpeechResponse';
4
4
 
5
5
  export const POST = async (req: Request) => {
6
6
  const payload = (await req.json()) as EdgeSpeechPayload;
7
7
 
8
- return await EdgeSpeechTTS.createRequest({ payload });
8
+ return createSpeechResponse(() => EdgeSpeechTTS.createRequest({ payload }), {
9
+ logTag: 'webapi/tts/edge',
10
+ messages: {
11
+ failure: 'Failed to synthesize speech',
12
+ invalid: 'Unexpected payload from Edge speech API',
13
+ },
14
+ });
9
15
  };
@@ -1,9 +1,15 @@
1
1
  import { MicrosoftSpeechPayload, MicrosoftSpeechTTS } from '@lobehub/tts';
2
2
 
3
- export const runtime = 'edge';
3
+ import { createSpeechResponse } from '@/server/utils/createSpeechResponse';
4
4
 
5
5
  export const POST = async (req: Request) => {
6
6
  const payload = (await req.json()) as MicrosoftSpeechPayload;
7
7
 
8
- return await MicrosoftSpeechTTS.createRequest({ payload });
8
+ return createSpeechResponse(() => MicrosoftSpeechTTS.createRequest({ payload }), {
9
+ logTag: 'webapi/tts/microsoft',
10
+ messages: {
11
+ failure: 'Failed to synthesize speech',
12
+ invalid: 'Unexpected payload from Microsoft speech API',
13
+ },
14
+ });
9
15
  };
@@ -2,8 +2,7 @@ import { OpenAITTSPayload } from '@lobehub/tts';
2
2
  import { createOpenaiAudioSpeech } from '@lobehub/tts/server';
3
3
 
4
4
  import { createBizOpenAI } from '@/app/(backend)/_deprecated/createBizOpenAI';
5
-
6
- export const runtime = 'edge';
5
+ import { createSpeechResponse } from '@/server/utils/createSpeechResponse';
7
6
 
8
7
  export const preferredRegion = [
9
8
  'arn1',
@@ -34,5 +33,18 @@ export const POST = async (req: Request) => {
34
33
  // if resOrOpenAI is a Response, it means there is an error,just return it
35
34
  if (openaiOrErrResponse instanceof Response) return openaiOrErrResponse;
36
35
 
37
- return await createOpenaiAudioSpeech({ openai: openaiOrErrResponse as any, payload });
36
+ return createSpeechResponse(
37
+ () =>
38
+ createOpenaiAudioSpeech({
39
+ openai: openaiOrErrResponse as any,
40
+ payload,
41
+ }),
42
+ {
43
+ logTag: 'webapi/tts/openai',
44
+ messages: {
45
+ failure: 'Failed to synthesize speech',
46
+ invalid: 'Unexpected payload from OpenAI TTS',
47
+ },
48
+ },
49
+ );
38
50
  };
@@ -0,0 +1,55 @@
1
+ import { ChatErrorType } from '@lobechat/types';
2
+
3
+ import { createErrorResponse } from '@/utils/errorResponse';
4
+ import { createNodeResponse } from '@/utils/server/response';
5
+
6
+ export interface CreateSpeechResponseOptions {
7
+ errorContentType?: string;
8
+ logTag: string;
9
+ messages?: {
10
+ failure?: string;
11
+ invalid?: string;
12
+ };
13
+ successContentType?: string;
14
+ }
15
+
16
+ /**
17
+ * Wraps a third-party speech SDK response so the Node.js runtime always receives
18
+ * a valid platform Response, while keeping logging and error handling consistent.
19
+ */
20
+ export const createSpeechResponse = async <T>(
21
+ responseCreator: () => Promise<T>,
22
+ {
23
+ logTag,
24
+ successContentType = 'audio/mpeg',
25
+ errorContentType = 'application/json',
26
+ messages,
27
+ }: CreateSpeechResponseOptions,
28
+ ) => {
29
+ const prefix = `[${logTag}]`;
30
+ const invalidMessage = messages?.invalid ?? 'Unexpected payload from speech provider';
31
+ const failureMessage = messages?.failure ?? 'Failed to synthesize speech';
32
+
33
+ return createNodeResponse(responseCreator, {
34
+ error: {
35
+ cacheControl: 'no-store',
36
+ defaultContentType: errorContentType,
37
+ },
38
+ onInvalidResponse: (response) => {
39
+ console.error(`${prefix} ${invalidMessage}`, response);
40
+
41
+ return createErrorResponse(ChatErrorType.InternalServerError);
42
+ },
43
+ onNonResponseError: (error) => {
44
+ console.error(`${prefix} ${failureMessage}`, error);
45
+
46
+ return createErrorResponse(ChatErrorType.InternalServerError, {
47
+ message: error instanceof Error ? error.message : String(error),
48
+ });
49
+ },
50
+ success: {
51
+ cacheControl: 'no-store',
52
+ defaultContentType: successContentType,
53
+ },
54
+ });
55
+ };
@@ -1,25 +0,0 @@
1
- // @vitest-environment edge-runtime
2
- import { describe, expect, it, vi } from 'vitest';
3
-
4
- import { POST as UniverseRoute } from '../[provider]/route';
5
- import { POST, runtime } from './route';
6
-
7
- vi.mock('../[provider]/route', () => ({
8
- POST: vi.fn().mockResolvedValue('mocked response'),
9
- }));
10
-
11
- describe('Configuration tests', () => {
12
- it('should have runtime set to "edge"', () => {
13
- expect(runtime).toBe('edge');
14
- });
15
- });
16
-
17
- describe('Groq POST function tests', () => {
18
- it('should call UniverseRoute with correct parameters', async () => {
19
- const mockRequest = new Request('https://example.com', { method: 'POST' });
20
- await POST(mockRequest);
21
- expect(UniverseRoute).toHaveBeenCalledWith(mockRequest, {
22
- params: Promise.resolve({ provider: 'azureai' }),
23
- });
24
- });
25
- });
@@ -1,6 +0,0 @@
1
- import { POST as UniverseRoute } from '../[provider]/route';
2
-
3
- export const runtime = 'edge';
4
-
5
- export const POST = async (req: Request) =>
6
- UniverseRoute(req, { params: Promise.resolve({ provider: 'azureai' }) });