@lobehub/chat 1.90.2 → 1.90.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.90.3](https://github.com/lobehub/lobe-chat/compare/v1.90.2...v1.90.3)
6
+
7
+ <sup>Released on **2025-06-01**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Enable deploymentName for Aliyun Bailian.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Enable deploymentName for Aliyun Bailian, closes [#7576](https://github.com/lobehub/lobe-chat/issues/7576) ([169e598](https://github.com/lobehub/lobe-chat/commit/169e598))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.90.2](https://github.com/lobehub/lobe-chat/compare/v1.90.1...v1.90.2)
6
31
 
7
32
  <sup>Released on **2025-06-01**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Enable deploymentName for Aliyun Bailian."
6
+ ]
7
+ },
8
+ "date": "2025-06-01",
9
+ "version": "1.90.3"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.90.2",
3
+ "version": "1.90.3",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -196,12 +196,15 @@ const qwenChatModels: AIChatModelCard[] = [
196
196
  reasoning: true,
197
197
  search: true,
198
198
  },
199
+ config: {
200
+ deploymentName: 'qwq-plus-latest', // expired on 2025-09-02
201
+ },
199
202
  contextWindowTokens: 131_072,
200
203
  description:
201
204
  '基于 Qwen2.5 模型训练的 QwQ 推理模型,通过强化学习大幅度提升了模型推理能力。模型数学代码等核心指标(AIME 24/25、LiveCodeBench)以及部分通用指标(IFEval、LiveBench等)达到DeepSeek-R1 满血版水平。',
202
205
  displayName: 'QwQ Plus',
203
206
  enabled: true,
204
- id: 'qwq-plus-latest',
207
+ id: 'qwq-plus',
205
208
  maxOutput: 8192,
206
209
  organization: 'Qwen',
207
210
  pricing: {
@@ -221,11 +224,14 @@ const qwenChatModels: AIChatModelCard[] = [
221
224
  reasoning: true,
222
225
  search: true,
223
226
  },
227
+ config: {
228
+ deploymentName: 'qwen-turbo-2025-04-28', // expired on 2025-10-26
229
+ },
224
230
  contextWindowTokens: 1_000_000,
225
231
  description: '通义千问超大规模语言模型,支持中文、英文等不同语言输入。',
226
232
  displayName: 'Qwen Turbo',
227
233
  enabled: true,
228
- id: 'qwen-turbo-latest',
234
+ id: 'qwen-turbo',
229
235
  maxOutput: 8192,
230
236
  organization: 'Qwen',
231
237
  pricing: {
@@ -246,11 +252,14 @@ const qwenChatModels: AIChatModelCard[] = [
246
252
  reasoning: true,
247
253
  search: true,
248
254
  },
255
+ config: {
256
+ deploymentName: 'qwen-plus-2025-04-28', // expired on 2025-10-26
257
+ },
249
258
  contextWindowTokens: 131_072,
250
259
  description: '通义千问超大规模语言模型增强版,支持中文、英文等不同语言输入。',
251
260
  displayName: 'Qwen Plus',
252
261
  enabled: true,
253
- id: 'qwen-plus-latest',
262
+ id: 'qwen-plus',
254
263
  maxOutput: 8192,
255
264
  organization: 'Qwen',
256
265
  pricing: {
@@ -270,12 +279,15 @@ const qwenChatModels: AIChatModelCard[] = [
270
279
  functionCall: true,
271
280
  search: true,
272
281
  },
282
+ config: {
283
+ deploymentName: 'qwen-max-2025-01-25',
284
+ },
273
285
  contextWindowTokens: 131_072,
274
286
  description:
275
287
  '通义千问千亿级别超大规模语言模型,支持中文、英文等不同语言输入,当前通义千问2.5产品版本背后的API模型。',
276
288
  displayName: 'Qwen Max',
277
289
  enabled: true,
278
- id: 'qwen-max-latest',
290
+ id: 'qwen-max',
279
291
  maxOutput: 8192,
280
292
  organization: 'Qwen',
281
293
  pricing: {
@@ -292,6 +304,9 @@ const qwenChatModels: AIChatModelCard[] = [
292
304
  abilities: {
293
305
  functionCall: true,
294
306
  },
307
+ config: {
308
+ deploymentName: 'qwen-long-latest',
309
+ },
295
310
  contextWindowTokens: 10_000_000,
296
311
  description:
297
312
  '通义千问超大规模语言模型,支持长文本上下文,以及基于长文档、多文档等多个场景的对话功能。',
@@ -311,12 +326,15 @@ const qwenChatModels: AIChatModelCard[] = [
311
326
  abilities: {
312
327
  vision: true,
313
328
  },
329
+ config: {
330
+ deploymentName: 'qwen-omni-turbo-latest',
331
+ },
314
332
  contextWindowTokens: 32_768,
315
333
  description:
316
334
  'Qwen-Omni 系列模型支持输入多种模态的数据,包括视频、音频、图片、文本,并输出音频与文本。',
317
335
  displayName: 'Qwen Omni Turbo',
318
336
  enabled: true,
319
- id: 'qwen-omni-turbo-latest',
337
+ id: 'qwen-omni-turbo',
320
338
  maxOutput: 2048,
321
339
  organization: 'Qwen',
322
340
  pricing: {
@@ -348,11 +366,14 @@ const qwenChatModels: AIChatModelCard[] = [
348
366
  abilities: {
349
367
  vision: true,
350
368
  },
369
+ config: {
370
+ deploymentName: 'qwen-vl-plus-2025-01-25',
371
+ },
351
372
  contextWindowTokens: 131_072,
352
373
  description:
353
374
  '通义千问大规模视觉语言模型增强版。大幅提升细节识别能力和文字识别能力,支持超百万像素分辨率和任意长宽比规格的图像。',
354
375
  displayName: 'Qwen VL Plus',
355
- id: 'qwen-vl-plus-latest',
376
+ id: 'qwen-vl-plus',
356
377
  maxOutput: 8192,
357
378
  organization: 'Qwen',
358
379
  pricing: {
@@ -366,12 +387,15 @@ const qwenChatModels: AIChatModelCard[] = [
366
387
  abilities: {
367
388
  vision: true,
368
389
  },
390
+ config: {
391
+ deploymentName: 'qwen-vl-max-2025-04-08',
392
+ },
369
393
  contextWindowTokens: 131_072,
370
394
  description:
371
395
  '通义千问超大规模视觉语言模型。相比增强版,再次提升视觉推理能力和指令遵循能力,提供更高的视觉感知和认知水平。',
372
396
  displayName: 'Qwen VL Max',
373
397
  enabled: true,
374
- id: 'qwen-vl-max-latest',
398
+ id: 'qwen-vl-max',
375
399
  maxOutput: 8192,
376
400
  organization: 'Qwen',
377
401
  pricing: {
@@ -385,11 +409,14 @@ const qwenChatModels: AIChatModelCard[] = [
385
409
  abilities: {
386
410
  vision: true,
387
411
  },
412
+ config: {
413
+ deploymentName: 'qwen-vl-ocr-2025-04-13',
414
+ },
388
415
  contextWindowTokens: 34_096,
389
416
  description:
390
417
  '通义千问OCR是文字提取专有模型,专注于文档、表格、试题、手写体文字等类型图像的文字提取能力。它能够识别多种文字,目前支持的语言有:汉语、英语、法语、日语、韩语、德语、俄语、意大利语、越南语、阿拉伯语。',
391
418
  displayName: 'Qwen VL OCR',
392
- id: 'qwen-vl-ocr-latest',
419
+ id: 'qwen-vl-ocr',
393
420
  maxOutput: 4096,
394
421
  organization: 'Qwen',
395
422
  pricing: {
@@ -400,10 +427,13 @@ const qwenChatModels: AIChatModelCard[] = [
400
427
  type: 'chat',
401
428
  },
402
429
  {
430
+ config: {
431
+ deploymentName: 'qwen-math-turbo-latest',
432
+ },
403
433
  contextWindowTokens: 4096,
404
434
  description: '通义千问数学模型是专门用于数学解题的语言模型。',
405
435
  displayName: 'Qwen Math Turbo',
406
- id: 'qwen-math-turbo-latest',
436
+ id: 'qwen-math-turbo',
407
437
  maxOutput: 3072,
408
438
  organization: 'Qwen',
409
439
  pricing: {
@@ -414,10 +444,13 @@ const qwenChatModels: AIChatModelCard[] = [
414
444
  type: 'chat',
415
445
  },
416
446
  {
447
+ config: {
448
+ deploymentName: 'qwen-math-plus-latest',
449
+ },
417
450
  contextWindowTokens: 4096,
418
451
  description: '通义千问数学模型是专门用于数学解题的语言模型。',
419
452
  displayName: 'Qwen Math Plus',
420
- id: 'qwen-math-plus-latest',
453
+ id: 'qwen-math-plus',
421
454
  maxOutput: 3072,
422
455
  organization: 'Qwen',
423
456
  pricing: {
@@ -428,10 +461,13 @@ const qwenChatModels: AIChatModelCard[] = [
428
461
  type: 'chat',
429
462
  },
430
463
  {
464
+ config: {
465
+ deploymentName: 'qwen-coder-turbo-latest',
466
+ },
431
467
  contextWindowTokens: 131_072,
432
468
  description: '通义千问代码模型。',
433
469
  displayName: 'Qwen Coder Turbo',
434
- id: 'qwen-coder-turbo-latest',
470
+ id: 'qwen-coder-turbo',
435
471
  maxOutput: 8192,
436
472
  organization: 'Qwen',
437
473
  pricing: {
@@ -442,10 +478,13 @@ const qwenChatModels: AIChatModelCard[] = [
442
478
  type: 'chat',
443
479
  },
444
480
  {
481
+ config: {
482
+ deploymentName: 'qwen-coder-plus-latest',
483
+ },
445
484
  contextWindowTokens: 131_072,
446
485
  description: '通义千问代码模型。',
447
486
  displayName: 'Qwen Coder Plus',
448
- id: 'qwen-coder-plus-latest',
487
+ id: 'qwen-coder-plus',
449
488
  maxOutput: 8192,
450
489
  organization: 'Qwen',
451
490
  pricing: {
@@ -501,11 +540,14 @@ const qwenChatModels: AIChatModelCard[] = [
501
540
  reasoning: true,
502
541
  vision: true,
503
542
  },
543
+ config: {
544
+ deploymentName: 'qvq-max-latest',
545
+ },
504
546
  contextWindowTokens: 122_880,
505
547
  description:
506
548
  '通义千问QVQ视觉推理模型,支持视觉输入及思维链输出,在数学、编程、视觉分析、创作以及通用任务上都表现了更强的能力。',
507
549
  displayName: 'QVQ Max',
508
- id: 'qvq-max-latest',
550
+ id: 'qvq-max',
509
551
  maxOutput: 8192,
510
552
  organization: 'Qwen',
511
553
  pricing: {
@@ -667,8 +709,8 @@ const qwenChatModels: AIChatModelCard[] = [
667
709
  {
668
710
  contextWindowTokens: 131_072,
669
711
  description: '通义千问代码模型开源版。',
670
- displayName: 'Qwen2.5 Coder 32B',
671
- id: 'qwen2.5-coder-32b-instruct',
712
+ displayName: 'Qwen2.5 Coder 14B',
713
+ id: 'qwen2.5-coder-14b-instruct',
672
714
  maxOutput: 8192,
673
715
  organization: 'Qwen',
674
716
  pricing: {
@@ -679,36 +721,16 @@ const qwenChatModels: AIChatModelCard[] = [
679
721
  type: 'chat',
680
722
  },
681
723
  {
682
- abilities: {
683
- vision: true,
684
- },
685
- contextWindowTokens: 8000,
686
- description: '以 Qwen-7B 语言模型初始化,添加图像模型,图像输入分辨率为448的预训练模型。',
687
- displayName: 'Qwen VL',
688
- id: 'qwen-vl-v1',
689
- maxOutput: 1500,
690
- organization: 'Qwen',
691
- pricing: {
692
- currency: 'CNY',
693
- input: 0,
694
- output: 0,
695
- },
696
- type: 'chat',
697
- },
698
- {
699
- abilities: {
700
- vision: true,
701
- },
702
- contextWindowTokens: 8000,
703
- description: '通义千问VL支持灵活的交互方式,包括多图、多轮问答、创作等能力的模型。',
704
- displayName: 'Qwen VL Chat',
705
- id: 'qwen-vl-chat-v1',
706
- maxOutput: 1500,
724
+ contextWindowTokens: 131_072,
725
+ description: '通义千问代码模型开源版。',
726
+ displayName: 'Qwen2.5 Coder 32B',
727
+ id: 'qwen2.5-coder-32b-instruct',
728
+ maxOutput: 8192,
707
729
  organization: 'Qwen',
708
730
  pricing: {
709
731
  currency: 'CNY',
710
- input: 0,
711
- output: 0,
732
+ input: 2,
733
+ output: 6,
712
734
  },
713
735
  type: 'chat',
714
736
  },
@@ -419,7 +419,7 @@ const Qwen: ModelProviderCard = {
419
419
  id: 'qwen',
420
420
  modelList: { showModelFetcher: true },
421
421
  modelsUrl: 'https://help.aliyun.com/zh/dashscope/developer-reference/api-details',
422
- name: 'Qwen',
422
+ name: 'Aliyun Bailian',
423
423
  proxyUrl: {
424
424
  placeholder: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
425
425
  },
@@ -429,16 +429,13 @@ const Qwen: ModelProviderCard = {
429
429
  placeholder: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
430
430
  },
431
431
  sdkType: 'openai',
432
+ showDeployName: true,
432
433
  showModelFetcher: true,
433
434
  smoothing: {
434
435
  speed: 2,
435
436
  text: true,
436
437
  },
437
438
  },
438
- smoothing: {
439
- speed: 2,
440
- text: true,
441
- },
442
439
  url: 'https://www.aliyun.com/product/bailian',
443
440
  };
444
441
 
@@ -62,7 +62,7 @@ const XAI: ModelProviderCard = {
62
62
  id: 'xai',
63
63
  modelList: { showModelFetcher: true },
64
64
  modelsUrl: 'https://docs.x.ai/docs#models',
65
- name: 'xAI',
65
+ name: 'xAI (Grok)',
66
66
  proxyUrl: {
67
67
  placeholder: 'https://api.x.ai/v1',
68
68
  },
@@ -40,6 +40,9 @@ export const getServerGlobalConfig = async () => {
40
40
  enabled: isDesktop ? true : undefined,
41
41
  fetchOnClient: isDesktop ? false : !process.env.OLLAMA_PROXY_URL,
42
42
  },
43
+ qwen: {
44
+ withDeploymentName: true,
45
+ },
43
46
  tencentcloud: {
44
47
  enabledKey: 'ENABLED_TENCENT_CLOUD',
45
48
  modelListKey: 'TENCENT_CLOUD_MODEL_LIST',
@@ -300,6 +300,7 @@ class ChatService {
300
300
  ModelProvider.Azure,
301
301
  ModelProvider.Volcengine,
302
302
  ModelProvider.AzureAI,
303
+ ModelProvider.Qwen,
303
304
  ] as string[];
304
305
 
305
306
  if (providersWithDeploymentName.includes(provider)) {
@@ -23,6 +23,7 @@ export const AiProviderSDKEnum = {
23
23
  Huggingface: 'huggingface',
24
24
  Ollama: 'ollama',
25
25
  Openai: 'openai',
26
+ Qwen: 'qwen',
26
27
  Volcengine: 'volcengine',
27
28
  } as const;
28
29