@lobehub/chat 1.122.1 → 1.122.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,64 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.122.3](https://github.com/lobehub/lobe-chat/compare/v1.122.2...v1.122.3)
6
+
7
+ <sup>Released on **2025-09-04**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Support base64 image from markdown image syntax.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Update the price of the o3 model in OpenRouter.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's fixed
23
+
24
+ - **misc**: Support base64 image from markdown image syntax, closes [#9054](https://github.com/lobehub/lobe-chat/issues/9054) ([d013a16](https://github.com/lobehub/lobe-chat/commit/d013a16))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Update the price of the o3 model in OpenRouter, closes [#9075](https://github.com/lobehub/lobe-chat/issues/9075) ([43ef47c](https://github.com/lobehub/lobe-chat/commit/43ef47c))
29
+
30
+ </details>
31
+
32
+ <div align="right">
33
+
34
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
35
+
36
+ </div>
37
+
38
+ ### [Version 1.122.2](https://github.com/lobehub/lobe-chat/compare/v1.122.1...v1.122.2)
39
+
40
+ <sup>Released on **2025-09-04**</sup>
41
+
42
+ #### 🐛 Bug Fixes
43
+
44
+ - **modelProvider**: Add lmstudio to provider whitelist to enable fetchOnClient toggle.
45
+
46
+ <br/>
47
+
48
+ <details>
49
+ <summary><kbd>Improvements and Fixes</kbd></summary>
50
+
51
+ #### What's fixed
52
+
53
+ - **modelProvider**: Add lmstudio to provider whitelist to enable fetchOnClient toggle, closes [#9067](https://github.com/lobehub/lobe-chat/issues/9067) ([e58864f](https://github.com/lobehub/lobe-chat/commit/e58864f))
54
+
55
+ </details>
56
+
57
+ <div align="right">
58
+
59
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
60
+
61
+ </div>
62
+
5
63
  ### [Version 1.122.1](https://github.com/lobehub/lobe-chat/compare/v1.122.0...v1.122.1)
6
64
 
7
65
  <sup>Released on **2025-09-04**</sup>
package/README.md CHANGED
@@ -150,7 +150,7 @@ From productivity tools to development environments, discover new ways to extend
150
150
 
151
151
  **Peak Performance, Zero Distractions**
152
152
 
153
- Get the full LobeChat experience without browser limitations—lightweight, focused, and always ready to go. Our desktop application provides a dedicated environment for your AI interactions, ensuring optimal performance and minimal distractions.
153
+ Get the full LobeChat experience without browser limitations—comprehensive, focused, and always ready to go. Our desktop application provides a dedicated environment for your AI interactions, ensuring optimal performance and minimal distractions.
154
154
 
155
155
  Experience faster response times, better resource management, and a more stable connection to your AI assistant. The desktop app is designed for users who demand the best performance from their AI tools.
156
156
 
@@ -481,7 +481,7 @@ We deeply understand the importance of providing a seamless experience for users
481
481
  Therefore, we have adopted Progressive Web Application ([PWA](https://support.google.com/chrome/answer/9658361)) technology,
482
482
  a modern web technology that elevates web applications to an experience close to that of native apps.
483
483
 
484
- Through PWA, LobeChat can offer a highly optimized user experience on both desktop and mobile devices while maintaining its lightweight and high-performance characteristics.
484
+ Through PWA, LobeChat can offer a highly optimized user experience on both desktop and mobile devices while maintaining high-performance characteristics.
485
485
  Visually and in terms of feel, we have also meticulously designed the interface to ensure it is indistinguishable from native apps,
486
486
  providing smooth animations, responsive layouts, and adapting to different device screen resolutions.
487
487
 
package/changelog/v1.json CHANGED
@@ -1,4 +1,21 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Support base64 image from markdown image syntax."
6
+ ],
7
+ "improvements": [
8
+ "Update the price of the o3 model in OpenRouter."
9
+ ]
10
+ },
11
+ "date": "2025-09-04",
12
+ "version": "1.122.3"
13
+ },
14
+ {
15
+ "children": {},
16
+ "date": "2025-09-04",
17
+ "version": "1.122.2"
18
+ },
2
19
  {
3
20
  "children": {},
4
21
  "date": "2025-09-04",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.122.1",
3
+ "version": "1.122.3",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -396,9 +396,9 @@ const openrouterChatModels: AIChatModelCard[] = [
396
396
  maxOutput: 100_000,
397
397
  pricing: {
398
398
  units: [
399
- { name: 'textInput_cacheRead', rate: 2.5, strategy: 'fixed', unit: 'millionTokens' },
400
- { name: 'textInput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
401
- { name: 'textOutput', rate: 40, strategy: 'fixed', unit: 'millionTokens' },
399
+ { name: 'textInput_cacheRead', rate: 0.5, strategy: 'fixed', unit: 'millionTokens' },
400
+ { name: 'textInput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
401
+ { name: 'textOutput', rate: 8, strategy: 'fixed', unit: 'millionTokens' },
402
402
  ],
403
403
  },
404
404
  releasedAt: '2025-04-17',
@@ -163,6 +163,61 @@ describe('OpenAIStream', () => {
163
163
  );
164
164
  });
165
165
 
166
+ it('should emit base64_image and strip markdown data:image from text', async () => {
167
+ const data = [
168
+ {
169
+ id: 'img-1',
170
+ choices: [
171
+ { index: 0, delta: { role: 'assistant', content: '这是一张图片: ' } },
172
+ ],
173
+ },
174
+ {
175
+ id: 'img-1',
176
+ choices: [
177
+ {
178
+ index: 0,
179
+ delta: {
180
+ content:
181
+ '![image](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAAB3D1E1AA==)',
182
+ },
183
+ },
184
+ ],
185
+ },
186
+ { id: 'img-1', choices: [{ index: 0, delta: {}, finish_reason: 'stop' }] },
187
+ ];
188
+
189
+ const mockOpenAIStream = new ReadableStream({
190
+ start(controller) {
191
+ data.forEach((c) => controller.enqueue(c));
192
+ controller.close();
193
+ },
194
+ });
195
+
196
+ const protocolStream = OpenAIStream(mockOpenAIStream);
197
+
198
+ const decoder = new TextDecoder();
199
+ const chunks: string[] = [];
200
+
201
+ // @ts-ignore
202
+ for await (const chunk of protocolStream) {
203
+ chunks.push(decoder.decode(chunk, { stream: true }));
204
+ }
205
+
206
+ expect(chunks).toEqual(
207
+ [
208
+ 'id: img-1',
209
+ 'event: text',
210
+ `data: "这是一张图片: "\n`,
211
+ 'id: img-1',
212
+ 'event: base64_image',
213
+ `data: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAAB3D1E1AA=="\n`,
214
+ 'id: img-1',
215
+ 'event: stop',
216
+ `data: "stop"\n`,
217
+ ].map((i) => `${i}\n`),
218
+ );
219
+ });
220
+
166
221
  it('should handle content with tool_calls but is an empty object', async () => {
167
222
  // data: {"id":"chatcmpl-A7pokGUqSov0JuMkhiHhWU9GRtAgJ", "object":"chat.completion.chunk", "created":1726430846, "model":"gpt-4o-2024-05-13", "choices":[{"index":0, "delta":{"content":" today", "role":"", "tool_calls":[]}, "finish_reason":"", "logprobs":""}], "prompt_annotations":[{"prompt_index":0, "content_filter_results":null}]}
168
223
  const mockOpenAIStream = new ReadableStream({
@@ -2311,4 +2366,86 @@ describe('OpenAIStream', () => {
2311
2366
 
2312
2367
  expect(chunks).toEqual(['id: 6\n', 'event: base64_image\n', `data: "${base64}"\n\n`]);
2313
2368
  });
2369
+
2370
+ it('should handle finish_reason with markdown image in content', async () => {
2371
+ const base64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQVR4nFy9a5okSY4jCFBU3SOr53HdvcZeYW/YVZnhZqpCYn+AVIuZ7PqqKyPczfQhQgIgSOH/+//9PxRVu7QzX5nvqveVP5mv+3rf+XPt985b2NIVgVgK1jr0da7zrAiegWPhPBABLi1GILhCEMkFnCuOFRFxHN/r/CbOym/om/h1X+d1H/v667rP9328r9g3VNblpoXsAwsnTtnWp0kQ40siih6NixuHlN9Rt7ehv1mbW2dkg1ef03J9zQQpQg5yc/XllveG4wa4arKtSr0NwSCdGEJVNeKlkDZMov695YaQ5NVK3fmjn4OrE9N/U04C0EqT/2HCBxrf9pJe1L2nPBjqhKEq1TEi1Q/OXiIq+IrqX2fUb+qF+2kF10k/4ScwIXidU6/T6vGkA/bSR/fZ7Ok8yOd0s+27CnP8PH3cijINdbAcAAAAASUVORK5CYII=';
2372
+ const mockOpenAIStream = new ReadableStream({
2373
+ start(controller) {
2374
+ controller.enqueue({
2375
+ id: 'chatcmpl-test',
2376
+ choices: [
2377
+ {
2378
+ index: 0,
2379
+ delta: { content: `这有一张图片: ![image](${base64})` },
2380
+ finish_reason: 'stop',
2381
+ },
2382
+ ],
2383
+ });
2384
+
2385
+ controller.close();
2386
+ },
2387
+ });
2388
+
2389
+ const protocolStream = OpenAIStream(mockOpenAIStream);
2390
+
2391
+ const decoder = new TextDecoder();
2392
+ const chunks = [];
2393
+
2394
+ // @ts-ignore
2395
+ for await (const chunk of protocolStream) {
2396
+ chunks.push(decoder.decode(chunk, { stream: true }));
2397
+ }
2398
+
2399
+ expect(chunks).toEqual([
2400
+ 'id: chatcmpl-test\n',
2401
+ 'event: text\n',
2402
+ `data: "这有一张图片:"\n\n`,
2403
+ 'id: chatcmpl-test\n',
2404
+ 'event: base64_image\n',
2405
+ `data: "${base64}"\n\n`,
2406
+ ]);
2407
+ });
2408
+
2409
+ it('should handle finish_reason with multiple markdown images in content', async () => {
2410
+ const base64_1 = 'data:image/png;base64,first';
2411
+ const base64_2 = 'data:image/jpeg;base64,second';
2412
+ const mockOpenAIStream = new ReadableStream({
2413
+ start(controller) {
2414
+ controller.enqueue({
2415
+ id: 'chatcmpl-multi',
2416
+ choices: [
2417
+ {
2418
+ index: 0,
2419
+ delta: { content: `![img1](${base64_1}) and ![img2](${base64_2})` },
2420
+ finish_reason: 'stop',
2421
+ },
2422
+ ],
2423
+ });
2424
+
2425
+ controller.close();
2426
+ },
2427
+ });
2428
+
2429
+ const protocolStream = OpenAIStream(mockOpenAIStream);
2430
+
2431
+ const decoder = new TextDecoder();
2432
+ const chunks = [];
2433
+
2434
+ // @ts-ignore
2435
+ for await (const chunk of protocolStream) {
2436
+ chunks.push(decoder.decode(chunk, { stream: true }));
2437
+ }
2438
+
2439
+ expect(chunks).toEqual([
2440
+ 'id: chatcmpl-multi\n',
2441
+ 'event: text\n',
2442
+ `data: "and"\n\n`, // Remove all markdown base64 image segments
2443
+ 'id: chatcmpl-multi\n',
2444
+ 'event: base64_image\n',
2445
+ `data: "${base64_1}"\n\n`,
2446
+ 'id: chatcmpl-multi\n',
2447
+ 'event: base64_image\n',
2448
+ `data: "${base64_2}"\n\n`,
2449
+ ]);
2450
+ });
2314
2451
  });
@@ -20,6 +20,28 @@ import {
20
20
  generateToolCallId,
21
21
  } from '../protocol';
22
22
 
23
+ // Process markdown base64 images: extract URLs and clean text in one pass
24
+ const processMarkdownBase64Images = (text: string): { cleanedText: string, urls: string[]; } => {
25
+ if (!text) return { cleanedText: text, urls: [] };
26
+
27
+ const urls: string[] = [];
28
+ const mdRegex = /!\[[^\]]*]\(\s*(data:image\/[\d+.A-Za-z-]+;base64,[^\s)]+)\s*\)/g;
29
+ let cleanedText = text;
30
+ let m: RegExpExecArray | null;
31
+
32
+ // Reset regex lastIndex to ensure we start from the beginning
33
+ mdRegex.lastIndex = 0;
34
+
35
+ while ((m = mdRegex.exec(text)) !== null) {
36
+ if (m[1]) urls.push(m[1]);
37
+ }
38
+
39
+ // Remove all markdown base64 image segments
40
+ cleanedText = text.replaceAll(mdRegex, '').trim();
41
+
42
+ return { cleanedText, urls };
43
+ };
44
+
23
45
  const transformOpenAIStream = (
24
46
  chunk: OpenAI.ChatCompletionChunk,
25
47
  streamContext: StreamContext,
@@ -137,7 +159,19 @@ const transformOpenAIStream = (
137
159
  return { data: null, id: chunk.id, type: 'text' };
138
160
  }
139
161
 
140
- return { data: item.delta.content, id: chunk.id, type: 'text' };
162
+
163
+ const text = item.delta.content as string;
164
+ const { urls: images, cleanedText: cleaned } = processMarkdownBase64Images(text);
165
+ if (images.length > 0) {
166
+ const arr: StreamProtocolChunk[] = [];
167
+ if (cleaned) arr.push({ data: cleaned, id: chunk.id, type: 'text' });
168
+ arr.push(
169
+ ...images.map((url: string) => ({ data: url, id: chunk.id, type: 'base64_image' as const })),
170
+ );
171
+ return arr;
172
+ }
173
+
174
+ return { data: text, id: chunk.id, type: 'text' };
141
175
  }
142
176
 
143
177
  // OpenAI Search Preview 模型返回引用源
@@ -284,7 +318,7 @@ const transformOpenAIStream = (
284
318
  if (citations) {
285
319
  streamContext.returnedCitation = true;
286
320
 
287
- return [
321
+ const baseChunks: StreamProtocolChunk[] = [
288
322
  {
289
323
  data: {
290
324
  citations: (citations as any[])
@@ -303,6 +337,20 @@ const transformOpenAIStream = (
303
337
  type: streamContext?.thinkingInContent ? 'reasoning' : 'text',
304
338
  },
305
339
  ];
340
+ return baseChunks;
341
+ }
342
+ }
343
+
344
+ // 非思考模式下,额外解析 markdown 中的 base64 图片,按顺序输出 text -> base64_image
345
+ if (!streamContext?.thinkingInContent) {
346
+ const { urls, cleanedText: cleaned } = processMarkdownBase64Images(thinkingContent);
347
+ if (urls.length > 0) {
348
+ const arr: StreamProtocolChunk[] = [];
349
+ if (cleaned) arr.push({ data: cleaned, id: chunk.id, type: 'text' });
350
+ arr.push(
351
+ ...urls.map((url: string) => ({ data: url, id: chunk.id, type: 'base64_image' as const })),
352
+ );
353
+ return arr;
306
354
  }
307
355
  }
308
356
 
@@ -25,7 +25,7 @@ const activeProviderConfig = (s: AIProviderStoreState) => s.aiProviderDetail;
25
25
  const isAiProviderConfigLoading = (id: string) => (s: AIProviderStoreState) =>
26
26
  s.activeAiProvider !== id;
27
27
 
28
- const providerWhitelist = new Set(['ollama']);
28
+ const providerWhitelist = new Set(['ollama', 'lmstudio']);
29
29
 
30
30
  const activeProviderKeyVaults = (s: AIProviderStoreState) => activeProviderConfig(s)?.keyVaults;
31
31
 
@@ -9,7 +9,7 @@ import { keyVaultsConfigSelectors } from './keyVaults';
9
9
  const isProviderEnabled = (provider: GlobalLLMProviderKey) => (s: UserStore) =>
10
10
  getProviderConfigById(provider)(s)?.enabled || false;
11
11
 
12
- const providerWhitelist = new Set(['ollama']);
12
+ const providerWhitelist = new Set(['ollama', 'lmstudio']);
13
13
  /**
14
14
  * @description The conditions to enable client fetch
15
15
  * 1. If no baseUrl and apikey input, force on Server.