@lobehub/lobehub 2.0.0-next.209 → 2.0.0-next.210

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/.vscode/settings.json +2 -17
  2. package/CHANGELOG.md +25 -0
  3. package/apps/desktop/src/main/controllers/SystemCtr.ts +10 -0
  4. package/apps/desktop/src/main/core/App.ts +10 -188
  5. package/apps/desktop/src/main/core/__tests__/App.test.ts +6 -42
  6. package/apps/desktop/src/main/core/browser/Browser.ts +17 -9
  7. package/apps/desktop/src/main/core/infrastructure/RendererUrlManager.ts +126 -0
  8. package/apps/desktop/src/main/core/infrastructure/__tests__/RendererUrlManager.test.ts +72 -0
  9. package/changelog/v1.json +5 -0
  10. package/package.json +1 -1
  11. package/packages/builtin-tool-web-browsing/src/client/Inspector/Search/index.tsx +1 -1
  12. package/packages/desktop-bridge/src/index.ts +0 -2
  13. package/packages/desktop-bridge/src/routeVariants.ts +0 -2
  14. package/packages/electron-client-ipc/src/types/system.ts +1 -0
  15. package/packages/model-bank/src/aiModels/lobehub.ts +0 -3
  16. package/packages/model-runtime/src/core/streams/openai/openai.test.ts +167 -0
  17. package/packages/model-runtime/src/core/streams/openai/openai.ts +30 -6
  18. package/packages/model-runtime/src/core/streams/protocol.ts +5 -0
  19. package/packages/model-runtime/src/core/streams/qwen.test.ts +131 -2
  20. package/packages/model-runtime/src/core/streams/qwen.ts +9 -1
  21. package/scripts/electronWorkflow/modifiers/index.mts +2 -0
  22. package/scripts/electronWorkflow/modifiers/nextConfig.mts +1 -1
  23. package/scripts/electronWorkflow/modifiers/staticExport.mts +174 -0
  24. package/src/layout/GlobalProvider/Locale.tsx +1 -1
  25. package/src/store/electron/actions/app.ts +6 -0
  26. package/src/utils/server/routeVariants.ts +2 -2
@@ -0,0 +1,72 @@
1
+ import { beforeEach, describe, expect, it, vi } from 'vitest';
2
+
3
+ import { RendererUrlManager } from '../RendererUrlManager';
4
+
5
+ const mockPathExistsSync = vi.fn();
6
+
7
+ vi.mock('electron', () => ({
8
+ app: {
9
+ isReady: vi.fn(() => true),
10
+ whenReady: vi.fn(() => Promise.resolve()),
11
+ },
12
+ protocol: {
13
+ handle: vi.fn(),
14
+ },
15
+ }));
16
+
17
+ vi.mock('fs-extra', () => ({
18
+ pathExistsSync: (...args: any[]) => mockPathExistsSync(...args),
19
+ }));
20
+
21
+ vi.mock('@/const/dir', () => ({
22
+ nextExportDir: '/mock/export/out',
23
+ }));
24
+
25
+ vi.mock('@/const/env', () => ({
26
+ isDev: false,
27
+ }));
28
+
29
+ vi.mock('@/env', () => ({
30
+ getDesktopEnv: vi.fn(() => ({ DESKTOP_RENDERER_STATIC: false })),
31
+ }));
32
+
33
+ vi.mock('@/utils/logger', () => ({
34
+ createLogger: () => ({
35
+ debug: vi.fn(),
36
+ info: vi.fn(),
37
+ warn: vi.fn(),
38
+ error: vi.fn(),
39
+ }),
40
+ }));
41
+
42
+ describe('RendererUrlManager', () => {
43
+ let manager: RendererUrlManager;
44
+
45
+ beforeEach(() => {
46
+ vi.clearAllMocks();
47
+ mockPathExistsSync.mockReset();
48
+ manager = new RendererUrlManager();
49
+ });
50
+
51
+ describe('resolveRendererFilePath', () => {
52
+ it('should resolve asset requests directly', async () => {
53
+ mockPathExistsSync.mockImplementation(
54
+ (p: string) => p === '/mock/export/out/en-US__0__light.txt',
55
+ );
56
+
57
+ const resolved = await manager.resolveRendererFilePath(
58
+ new URL('app://next/en-US__0__light.txt'),
59
+ );
60
+
61
+ expect(resolved).toBe('/mock/export/out/en-US__0__light.txt');
62
+ });
63
+
64
+ it('should fall back to index.html for app routes', async () => {
65
+ mockPathExistsSync.mockImplementation((p: string) => p === '/mock/export/out/index.html');
66
+
67
+ const resolved = await manager.resolveRendererFilePath(new URL('app://next/settings'));
68
+
69
+ expect(resolved).toBe('/mock/export/out/index.html');
70
+ });
71
+ });
72
+ });
package/changelog/v1.json CHANGED
@@ -1,4 +1,9 @@
1
1
  [
2
+ {
3
+ "children": {},
4
+ "date": "2026-01-04",
5
+ "version": "2.0.0-next.210"
6
+ },
2
7
  {
3
8
  "children": {
4
9
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.209",
3
+ "version": "2.0.0-next.210",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -47,7 +47,7 @@ export const SearchInspector = memo<BuiltinInspectorProps<SearchQuery, UniformSe
47
47
  )}
48
48
  >
49
49
  <span>{t('builtins.lobe-web-browsing.apiName.search')}: </span>
50
- {query && <span className={highlightTextStyles.gold}>{query}</span>}
50
+ {query && <span className={highlightTextStyles.primary}>{query}</span>}
51
51
  {!isLoading &&
52
52
  !isArgumentsStreaming &&
53
53
  pluginState?.results &&
@@ -3,8 +3,6 @@ export {
3
3
  DEFAULT_LANG,
4
4
  DEFAULT_VARIANTS,
5
5
  type IRouteVariants,
6
- LOBE_LOCALE_COOKIE,
7
- LOBE_THEME_APPEARANCE,
8
6
  type Locales,
9
7
  locales,
10
8
  RouteVariants,
@@ -1,7 +1,5 @@
1
1
  // Shared route variants utilities for desktop and web builds
2
2
 
3
- export const LOBE_LOCALE_COOKIE = 'LOBE_LOCALE';
4
- export const LOBE_THEME_APPEARANCE = 'LOBE_THEME_APPEARANCE';
5
3
  export const DEFAULT_LANG = 'en-US';
6
4
 
7
5
  // Supported locales (keep aligned with web resources)
@@ -3,6 +3,7 @@ export interface ElectronAppState {
3
3
  isLinux?: boolean;
4
4
  isMac?: boolean;
5
5
  isWindows?: boolean;
6
+ locale?: string;
6
7
  platform?: 'darwin' | 'win32' | 'linux';
7
8
  systemAppearance?: string;
8
9
  userPath?: UserPathData;
@@ -28,7 +28,6 @@ const lobehubChatModels: AIChatModelCard[] = [
28
28
  releasedAt: '2025-12-11',
29
29
  settings: {
30
30
  extendParams: ['gpt5_2ReasoningEffort', 'textVerbosity'],
31
- searchImpl: 'params',
32
31
  },
33
32
  type: 'chat',
34
33
  },
@@ -56,7 +55,6 @@ const lobehubChatModels: AIChatModelCard[] = [
56
55
  releasedAt: '2025-11-13',
57
56
  settings: {
58
57
  extendParams: ['gpt5_1ReasoningEffort', 'textVerbosity'],
59
- searchImpl: 'params',
60
58
  },
61
59
  type: 'chat',
62
60
  },
@@ -84,7 +82,6 @@ const lobehubChatModels: AIChatModelCard[] = [
84
82
  releasedAt: '2025-08-07',
85
83
  settings: {
86
84
  extendParams: ['reasoningEffort'],
87
- searchImpl: 'params',
88
85
  },
89
86
  type: 'chat',
90
87
  },
@@ -1203,6 +1203,173 @@ describe('OpenAIStream', () => {
1203
1203
  'thoughtSignature',
1204
1204
  );
1205
1205
  });
1206
+
1207
+ it('should handle GPT-5.2 parallel tool calls with correct id mapping', async () => {
1208
+ // GPT-5.2 returns multiple tool calls in parallel with different indices
1209
+ // Each tool call starts with id+name, followed by arguments-only chunks
1210
+ // The key issue is that subsequent chunks without id should use the correct id
1211
+ // based on their index, not the first tool's id
1212
+ const streamData = [
1213
+ // Tool 0: first chunk with id
1214
+ {
1215
+ id: 'chatcmpl-test',
1216
+ choices: [
1217
+ {
1218
+ index: 0,
1219
+ delta: {
1220
+ tool_calls: [
1221
+ {
1222
+ id: 'call_tool0',
1223
+ type: 'function',
1224
+ function: { name: 'search', arguments: '' },
1225
+ index: 0,
1226
+ },
1227
+ ],
1228
+ },
1229
+ },
1230
+ ],
1231
+ },
1232
+ // Tool 0: arguments chunk
1233
+ {
1234
+ id: 'chatcmpl-test',
1235
+ choices: [
1236
+ {
1237
+ index: 0,
1238
+ delta: {
1239
+ tool_calls: [{ function: { arguments: '{"query":' }, index: 0 }],
1240
+ },
1241
+ },
1242
+ ],
1243
+ },
1244
+ // Tool 1: first chunk with id (parallel tool call starts)
1245
+ {
1246
+ id: 'chatcmpl-test',
1247
+ choices: [
1248
+ {
1249
+ index: 0,
1250
+ delta: {
1251
+ tool_calls: [
1252
+ {
1253
+ id: 'call_tool1',
1254
+ type: 'function',
1255
+ function: { name: 'search', arguments: '' },
1256
+ index: 1,
1257
+ },
1258
+ ],
1259
+ },
1260
+ },
1261
+ ],
1262
+ },
1263
+ // Tool 0: more arguments (continuing tool 0)
1264
+ {
1265
+ id: 'chatcmpl-test',
1266
+ choices: [
1267
+ {
1268
+ index: 0,
1269
+ delta: {
1270
+ tool_calls: [{ function: { arguments: ' "test0"}' }, index: 0 }],
1271
+ },
1272
+ },
1273
+ ],
1274
+ },
1275
+ // Tool 1: arguments chunk
1276
+ {
1277
+ id: 'chatcmpl-test',
1278
+ choices: [
1279
+ {
1280
+ index: 0,
1281
+ delta: {
1282
+ tool_calls: [{ function: { arguments: '{"query": "test1"}' }, index: 1 }],
1283
+ },
1284
+ },
1285
+ ],
1286
+ },
1287
+ // Tool 2: first chunk with id
1288
+ {
1289
+ id: 'chatcmpl-test',
1290
+ choices: [
1291
+ {
1292
+ index: 0,
1293
+ delta: {
1294
+ tool_calls: [
1295
+ {
1296
+ id: 'call_tool2',
1297
+ type: 'function',
1298
+ function: { name: 'search', arguments: '' },
1299
+ index: 2,
1300
+ },
1301
+ ],
1302
+ },
1303
+ },
1304
+ ],
1305
+ },
1306
+ // Tool 2: arguments chunk
1307
+ {
1308
+ id: 'chatcmpl-test',
1309
+ choices: [
1310
+ {
1311
+ index: 0,
1312
+ delta: {
1313
+ tool_calls: [{ function: { arguments: '{"query": "test2"}' }, index: 2 }],
1314
+ },
1315
+ },
1316
+ ],
1317
+ },
1318
+ // Finish
1319
+ {
1320
+ id: 'chatcmpl-test',
1321
+ choices: [{ index: 0, delta: {}, finish_reason: 'tool_calls' }],
1322
+ },
1323
+ ];
1324
+
1325
+ const mockOpenAIStream = new ReadableStream({
1326
+ start(controller) {
1327
+ streamData.forEach((data) => {
1328
+ controller.enqueue(data);
1329
+ });
1330
+ controller.close();
1331
+ },
1332
+ });
1333
+
1334
+ const protocolStream = OpenAIStream(mockOpenAIStream);
1335
+ const decoder = new TextDecoder();
1336
+ const chunks: string[] = [];
1337
+
1338
+ // @ts-ignore
1339
+ for await (const chunk of protocolStream) {
1340
+ chunks.push(decoder.decode(chunk, { stream: true }));
1341
+ }
1342
+
1343
+ // Verify the exact output - each tool call chunk should have the correct id based on index
1344
+ expect(chunks).toEqual(
1345
+ [
1346
+ 'id: chatcmpl-test',
1347
+ 'event: tool_calls',
1348
+ `data: [{"function":{"arguments":"","name":"search"},"id":"call_tool0","index":0,"type":"function"}]\n`,
1349
+ 'id: chatcmpl-test',
1350
+ 'event: tool_calls',
1351
+ `data: [{"function":{"arguments":"{\\"query\\":","name":null},"id":"call_tool0","index":0,"type":"function"}]\n`,
1352
+ 'id: chatcmpl-test',
1353
+ 'event: tool_calls',
1354
+ `data: [{"function":{"arguments":"","name":"search"},"id":"call_tool1","index":1,"type":"function"}]\n`,
1355
+ 'id: chatcmpl-test',
1356
+ 'event: tool_calls',
1357
+ `data: [{"function":{"arguments":" \\"test0\\"}","name":null},"id":"call_tool0","index":0,"type":"function"}]\n`,
1358
+ 'id: chatcmpl-test',
1359
+ 'event: tool_calls',
1360
+ `data: [{"function":{"arguments":"{\\"query\\": \\"test1\\"}","name":null},"id":"call_tool1","index":1,"type":"function"}]\n`,
1361
+ 'id: chatcmpl-test',
1362
+ 'event: tool_calls',
1363
+ `data: [{"function":{"arguments":"","name":"search"},"id":"call_tool2","index":2,"type":"function"}]\n`,
1364
+ 'id: chatcmpl-test',
1365
+ 'event: tool_calls',
1366
+ `data: [{"function":{"arguments":"{\\"query\\": \\"test2\\"}","name":null},"id":"call_tool2","index":2,"type":"function"}]\n`,
1367
+ 'id: chatcmpl-test',
1368
+ 'event: stop',
1369
+ `data: "tool_calls"\n`,
1370
+ ].map((i) => `${i}\n`),
1371
+ );
1372
+ });
1206
1373
  });
1207
1374
 
1208
1375
  describe('Reasoning', () => {
@@ -157,13 +157,35 @@ const transformOpenAIStream = (
157
157
  );
158
158
 
159
159
  if (tool_calls.length > 0) {
160
+ // Validate tool calls - function must exist for valid tool calls
161
+ // This ensures proper error handling for malformed chunks
162
+ const hasInvalidToolCall = item.delta.tool_calls.some((tc) => tc.function === null);
163
+ if (hasInvalidToolCall) {
164
+ throw new Error('Invalid tool call: function is null');
165
+ }
166
+
160
167
  return {
161
- data: item.delta.tool_calls.map((value, index): StreamToolCallChunkData => {
162
- if (streamContext && !streamContext.tool) {
168
+ data: item.delta.tool_calls.map((value, mapIndex): StreamToolCallChunkData => {
169
+ // Determine the actual tool index
170
+ const toolIndex = typeof value.index !== 'undefined' ? value.index : mapIndex;
171
+
172
+ // Store tool info by index for parallel tool calls (e.g., GPT-5.2)
173
+ // When a chunk has id and name, it's the start of a new tool call
174
+ if (streamContext && value.id && value.function?.name) {
175
+ if (!streamContext.tools) streamContext.tools = {};
176
+ streamContext.tools[toolIndex] = {
177
+ id: value.id,
178
+ index: toolIndex,
179
+ name: value.function.name,
180
+ };
181
+ }
182
+
183
+ // Also maintain backward compatibility with single tool context
184
+ if (streamContext && !streamContext.tool && value.id) {
163
185
  streamContext.tool = {
164
186
  id: value.id!,
165
- index: value.index,
166
- name: value.function!.name!,
187
+ index: toolIndex,
188
+ name: value.function?.name ?? '',
167
189
  };
168
190
  }
169
191
 
@@ -172,10 +194,12 @@ const transformOpenAIStream = (
172
194
  arguments: value.function?.arguments ?? '',
173
195
  name: value.function?.name ?? null,
174
196
  },
197
+ // Priority: explicit id > tools map by index > single tool fallback > generated id
175
198
  id:
176
199
  value.id ||
200
+ streamContext?.tools?.[toolIndex]?.id ||
177
201
  streamContext?.tool?.id ||
178
- generateToolCallId(index, value.function?.name),
202
+ generateToolCallId(mapIndex, value.function?.name),
179
203
 
180
204
  // mistral's tool calling don't have index and function field, it's data like:
181
205
  // [{"id":"xbhnmTtY7","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"A photo of a small, fluffy dog with a playful expression and wagging tail.\", \"A watercolor painting of a small, energetic dog with a glossy coat and bright eyes.\", \"A vector illustration of a small, adorable dog with a short snout and perky ears.\", \"A drawing of a small, scruffy dog with a mischievous grin and a wagging tail.\"], \"quality\": \"standard\", \"seeds\": [123456, 654321, 111222, 333444], \"size\": \"1024x1024\", \"style\": \"vivid\"}"}}]
@@ -184,7 +208,7 @@ const transformOpenAIStream = (
184
208
  // [{"id":"call_function_4752059746","type":"function","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"一个流浪的地球,背景是浩瀚"}}]
185
209
 
186
210
  // so we need to add these default values
187
- index: typeof value.index !== 'undefined' ? value.index : index,
211
+ index: toolIndex,
188
212
  type: value.type || 'function',
189
213
  };
190
214
 
@@ -59,6 +59,11 @@ export interface StreamContext {
59
59
  name: string;
60
60
  };
61
61
  toolIndex?: number;
62
+ /**
63
+ * Map of tool information by index for parallel tool calls
64
+ * Used when multiple tools are called in parallel (e.g., GPT-5.2 parallel search)
65
+ */
66
+ tools?: Record<number, { id: string; index: number; name: string }>;
62
67
  usage?: ModelUsage;
63
68
  }
64
69
 
@@ -131,7 +131,7 @@ describe('QwenAIStream', () => {
131
131
  expect(chunks).toEqual([
132
132
  'id: 2\n',
133
133
  'event: tool_calls\n',
134
- `data: [{"function":{"name":"tool1","arguments":"{}"},"id":"call_1","index":0,"type":"function"},{"function":{"name":"tool2","arguments":"{}"},"id":"call_2","index":1,"type":"function"}]\n\n`,
134
+ `data: [{"function":{"arguments":"{}","name":"tool1"},"id":"call_1","index":0,"type":"function"},{"function":{"arguments":"{}","name":"tool2"},"id":"call_2","index":1,"type":"function"}]\n\n`,
135
135
  ]);
136
136
 
137
137
  expect(onToolCallMock).toHaveBeenCalledTimes(1);
@@ -347,7 +347,136 @@ describe('QwenAIStream', () => {
347
347
  expect(chunks).toEqual([
348
348
  'id: 5\n',
349
349
  'event: tool_calls\n',
350
- `data: [{"function":{"name":"tool1","arguments":"{}"},"id":"call_1","index":0,"type":"function"},{"function":{"name":"tool2","arguments":"{}"},"id":"call_2","index":1,"type":"function"}]\n\n`,
350
+ `data: [{"function":{"arguments":"{}","name":"tool1"},"id":"call_1","index":0,"type":"function"},{"function":{"arguments":"{}","name":"tool2"},"id":"call_2","index":1,"type":"function"}]\n\n`,
351
+ ]);
352
+ });
353
+
354
+ // Test case for Qwen models sending tool_calls in two separate chunks:
355
+ // 1. First chunk: {id, name} without arguments
356
+ // 2. Second chunk: {id, arguments} without name
357
+ // This behavior is observed in qwen3-vl-235b-a22b-thinking model
358
+ it('should handle tool calls with name in first chunk and arguments in second chunk (Qwen behavior)', async () => {
359
+ const mockOpenAIStream = new ReadableStream({
360
+ start(controller) {
361
+ // First chunk: has id and name, but no arguments
362
+ controller.enqueue({
363
+ choices: [
364
+ {
365
+ delta: {
366
+ content: null,
367
+ tool_calls: [
368
+ {
369
+ index: 0,
370
+ id: 'call_4bde23783e314f219c6d65',
371
+ type: 'function',
372
+ function: { name: 'time____get_current_time____mcp' },
373
+ },
374
+ ],
375
+ },
376
+ finish_reason: null,
377
+ index: 0,
378
+ },
379
+ ],
380
+ id: 'chatcmpl-f574998f-e5b0-9b80-aac5-14b58e6978b5',
381
+ });
382
+
383
+ // Second chunk: same id, has arguments but no name
384
+ controller.enqueue({
385
+ choices: [
386
+ {
387
+ delta: {
388
+ content: null,
389
+ tool_calls: [
390
+ {
391
+ index: 0,
392
+ id: 'call_4bde23783e314f219c6d65',
393
+ type: 'function',
394
+ function: { arguments: '{"timezone": "Asia/Shanghai"}' },
395
+ },
396
+ ],
397
+ },
398
+ finish_reason: null,
399
+ index: 0,
400
+ },
401
+ ],
402
+ id: 'chatcmpl-f574998f-e5b0-9b80-aac5-14b58e6978b5',
403
+ });
404
+
405
+ controller.close();
406
+ },
407
+ });
408
+
409
+ const onToolCallMock = vi.fn();
410
+
411
+ const protocolStream = QwenAIStream(mockOpenAIStream, {
412
+ callbacks: {
413
+ onToolsCalling: onToolCallMock,
414
+ },
415
+ });
416
+
417
+ const decoder = new TextDecoder();
418
+ const chunks = [];
419
+
420
+ // @ts-ignore
421
+ for await (const chunk of protocolStream) {
422
+ chunks.push(decoder.decode(chunk, { stream: true }));
423
+ }
424
+
425
+ // First chunk should have name with empty arguments
426
+ // Second chunk should have arguments with null name (same as OpenAI/vLLM behavior)
427
+ expect(chunks).toEqual([
428
+ 'id: chatcmpl-f574998f-e5b0-9b80-aac5-14b58e6978b5\n',
429
+ 'event: tool_calls\n',
430
+ `data: [{"function":{"arguments":"","name":"time____get_current_time____mcp"},"id":"call_4bde23783e314f219c6d65","index":0,"type":"function"}]\n\n`,
431
+ 'id: chatcmpl-f574998f-e5b0-9b80-aac5-14b58e6978b5\n',
432
+ 'event: tool_calls\n',
433
+ `data: [{"function":{"arguments":"{\\"timezone\\": \\"Asia/Shanghai\\"}","name":null},"id":"call_4bde23783e314f219c6d65","index":0,"type":"function"}]\n\n`,
434
+ ]);
435
+
436
+ expect(onToolCallMock).toHaveBeenCalledTimes(2);
437
+ });
438
+
439
+ it('should handle tool calls with only name (no arguments field)', async () => {
440
+ const mockOpenAIStream = new ReadableStream({
441
+ start(controller) {
442
+ controller.enqueue({
443
+ choices: [
444
+ {
445
+ delta: {
446
+ tool_calls: [
447
+ {
448
+ index: 0,
449
+ id: 'call_123',
450
+ type: 'function',
451
+ function: { name: 'get_weather' },
452
+ },
453
+ ],
454
+ },
455
+ index: 0,
456
+ },
457
+ ],
458
+ id: '6',
459
+ });
460
+
461
+ controller.close();
462
+ },
463
+ });
464
+
465
+ const protocolStream = QwenAIStream(mockOpenAIStream);
466
+
467
+ const decoder = new TextDecoder();
468
+ const chunks = [];
469
+
470
+ // @ts-ignore
471
+ for await (const chunk of protocolStream) {
472
+ chunks.push(decoder.decode(chunk, { stream: true }));
473
+ }
474
+
475
+ // Should have empty string for arguments, not undefined
476
+ expect(chunks).toEqual([
477
+ 'id: 6\n',
478
+ 'event: tool_calls\n',
479
+ `data: [{"function":{"arguments":"","name":"get_weather"},"id":"call_123","index":0,"type":"function"}]\n\n`,
351
480
  ]);
352
481
  });
353
482
  });
@@ -72,7 +72,15 @@ export const transformQwenStream = (
72
72
  return {
73
73
  data: item.delta.tool_calls.map(
74
74
  (value, index): StreamToolCallChunkData => ({
75
- function: value.function,
75
+ // Qwen models may send tool_calls in two separate chunks:
76
+ // 1. First chunk: {id, name} without arguments
77
+ // 2. Second chunk: {id, arguments} without name
78
+ // We need to provide default values to handle both cases
79
+ // Use null for missing name (same as OpenAI stream behavior)
80
+ function: {
81
+ arguments: value.function?.arguments ?? '',
82
+ name: value.function?.name ?? null,
83
+ },
76
84
  id: value.id || generateToolCallId(index, value.function?.name),
77
85
  index: typeof value.index !== 'undefined' ? value.index : index,
78
86
  type: value.type || 'function',
@@ -5,12 +5,14 @@ import { modifyAppCode } from './appCode.mjs';
5
5
  import { cleanUpCode } from './cleanUp.mjs';
6
6
  import { modifyNextConfig } from './nextConfig.mjs';
7
7
  import { modifyRoutes } from './routes.mjs';
8
+ import { modifyStaticExport } from './staticExport.mjs';
8
9
  import { isDirectRun, runStandalone } from './utils.mjs';
9
10
 
10
11
  export const modifySourceForElectron = async (TEMP_DIR: string) => {
11
12
  await modifyNextConfig(TEMP_DIR);
12
13
  await modifyAppCode(TEMP_DIR);
13
14
  await modifyRoutes(TEMP_DIR);
15
+ await modifyStaticExport(TEMP_DIR);
14
16
  await cleanUpCode(TEMP_DIR);
15
17
  };
16
18
 
@@ -21,6 +21,7 @@ export const modifyNextConfig = async (TEMP_DIR: string) => {
21
21
 
22
22
  console.log(` Processing ${path.relative(TEMP_DIR, nextConfigPath)}...`);
23
23
  await updateFile({
24
+ assertAfter: (code) => /output\s*:\s*["']export["']/.test(code) && !/withPWA\s*\(/.test(code),
24
25
  filePath: nextConfigPath,
25
26
  name: 'modifyNextConfig',
26
27
  transformer: (code) => {
@@ -147,7 +148,6 @@ export const modifyNextConfig = async (TEMP_DIR: string) => {
147
148
 
148
149
  return newCode;
149
150
  },
150
- assertAfter: (code) => /output\s*:\s*['"]export['"]/.test(code) && !/withPWA\s*\(/.test(code),
151
151
  });
152
152
  };
153
153