@lobehub/chat 1.90.0 → 1.90.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,65 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.90.2](https://github.com/lobehub/lobe-chat/compare/v1.90.1...v1.90.2)
6
+
7
+ <sup>Released on **2025-06-01**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Support `web_search` tool for MiniMax & Zhipu.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Support `web_search` tool for MiniMax & Zhipu, closes [#7980](https://github.com/lobehub/lobe-chat/issues/7980) ([28cdafb](https://github.com/lobehub/lobe-chat/commit/28cdafb))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.90.1](https://github.com/lobehub/lobe-chat/compare/v1.90.0...v1.90.1)
31
+
32
+ <sup>Released on **2025-06-01**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Disable LaTeX and Mermaid rendering in SystemRoleContent to prevent lag caused by massive rendering tasks when switching topics, fix DeepSeek new R1 Search error.
37
+
38
+ #### 💄 Styles
39
+
40
+ - **misc**: Use default deployment name when parseModelString doesn't contain deployment name.
41
+
42
+ <br/>
43
+
44
+ <details>
45
+ <summary><kbd>Improvements and Fixes</kbd></summary>
46
+
47
+ #### What's fixed
48
+
49
+ - **misc**: Disable LaTeX and Mermaid rendering in SystemRoleContent to prevent lag caused by massive rendering tasks when switching topics, closes [#8034](https://github.com/lobehub/lobe-chat/issues/8034) ([5b42ee2](https://github.com/lobehub/lobe-chat/commit/5b42ee2))
50
+ - **misc**: Fix DeepSeek new R1 Search error, closes [#8035](https://github.com/lobehub/lobe-chat/issues/8035) ([cf58628](https://github.com/lobehub/lobe-chat/commit/cf58628))
51
+
52
+ #### Styles
53
+
54
+ - **misc**: Use default deployment name when parseModelString doesn't contain deployment name, closes [#7719](https://github.com/lobehub/lobe-chat/issues/7719) ([aef19f4](https://github.com/lobehub/lobe-chat/commit/aef19f4))
55
+
56
+ </details>
57
+
58
+ <div align="right">
59
+
60
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
61
+
62
+ </div>
63
+
5
64
  ## [Version 1.90.0](https://github.com/lobehub/lobe-chat/compare/v1.89.0...v1.90.0)
6
65
 
7
66
  <sup>Released on **2025-06-01**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,25 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Support web_search tool for MiniMax & Zhipu."
6
+ ]
7
+ },
8
+ "date": "2025-06-01",
9
+ "version": "1.90.2"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Disable LaTeX and Mermaid rendering in SystemRoleContent to prevent lag caused by massive rendering tasks when switching topics, fix DeepSeek new R1 Search error."
15
+ ],
16
+ "improvements": [
17
+ "Use default deployment name when parseModelString doesn't contain deployment name."
18
+ ]
19
+ },
20
+ "date": "2025-06-01",
21
+ "version": "1.90.1"
22
+ },
2
23
  {
3
24
  "children": {
4
25
  "features": [
@@ -8,10 +8,10 @@
8
8
  # ref: https://github.com/lobehub/lobe-chat/pull/5247
9
9
  if [[ "$OSTYPE" == "darwin"* ]]; then
10
10
  # macOS
11
- SED_COMMAND="sed -i ''"
11
+ SED_INPLACE_ARGS=('-i' '')
12
12
  else
13
13
  # not macOS
14
- SED_COMMAND="sed -i"
14
+ SED_INPLACE_ARGS=('-i')
15
15
  fi
16
16
 
17
17
  # ======================
@@ -519,12 +519,12 @@ section_configurate_host() {
519
519
  if [[ "$ask_result" == "y" ]]; then
520
520
  PROTOCOL="https"
521
521
  # Replace all http with https
522
- $SED_COMMAND "s#http://#https://#" .env
522
+ sed "${SED_INPLACE_ARGS[@]}" "s#http://#https://#" .env
523
523
  fi
524
524
  fi
525
525
 
526
526
  # Check if sed is installed
527
- if ! command -v $SED_COMMAND &> /dev/null ; then
527
+ if ! command -v sed "${SED_INPLACE_ARGS[@]}" &> /dev/null ; then
528
528
  echo "sed" $(show_message "tips_no_executable")
529
529
  exit 1
530
530
  fi
@@ -553,7 +553,7 @@ section_configurate_host() {
553
553
  ask "(auth.example.com)"
554
554
  CASDOOR_HOST="$ask_result"
555
555
  # Setup callback url for Casdoor
556
- $SED_COMMAND "s/"example.com"/${LOBE_HOST}/" init_data.json
556
+ sed "${SED_INPLACE_ARGS[@]}" "s/"example.com"/${LOBE_HOST}/" init_data.json
557
557
  ;;
558
558
  1)
559
559
  DEPLOY_MODE="ip"
@@ -566,7 +566,7 @@ section_configurate_host() {
566
566
  MINIO_HOST="${HOST}:9000"
567
567
  CASDOOR_HOST="${HOST}:8000"
568
568
  # Setup callback url for Casdoor
569
- $SED_COMMAND "s/"localhost:3210"/${LOBE_HOST}/" init_data.json
569
+ sed "${SED_INPLACE_ARGS[@]}" "s/"localhost:3210"/${LOBE_HOST}/" init_data.json
570
570
  ;;
571
571
  *)
572
572
  echo "Invalid deploy mode: $ask_result"
@@ -575,14 +575,14 @@ section_configurate_host() {
575
575
  esac
576
576
 
577
577
  # lobe host
578
- $SED_COMMAND "s#^APP_URL=.*#APP_URL=$PROTOCOL://$LOBE_HOST#" .env
578
+ sed "${SED_INPLACE_ARGS[@]}" "s#^APP_URL=.*#APP_URL=$PROTOCOL://$LOBE_HOST#" .env
579
579
  # auth related
580
- $SED_COMMAND "s#^AUTH_URL=.*#AUTH_URL=$PROTOCOL://$LOBE_HOST/api/auth#" .env
581
- $SED_COMMAND "s#^AUTH_CASDOOR_ISSUER=.*#AUTH_CASDOOR_ISSUER=$PROTOCOL://$CASDOOR_HOST#" .env
582
- $SED_COMMAND "s#^origin=.*#origin=$PROTOCOL://$CASDOOR_HOST#" .env
580
+ sed "${SED_INPLACE_ARGS[@]}" "s#^AUTH_URL=.*#AUTH_URL=$PROTOCOL://$LOBE_HOST/api/auth#" .env
581
+ sed "${SED_INPLACE_ARGS[@]}" "s#^AUTH_CASDOOR_ISSUER=.*#AUTH_CASDOOR_ISSUER=$PROTOCOL://$CASDOOR_HOST#" .env
582
+ sed "${SED_INPLACE_ARGS[@]}" "s#^origin=.*#origin=$PROTOCOL://$CASDOOR_HOST#" .env
583
583
  # s3 related
584
- $SED_COMMAND "s#^S3_PUBLIC_DOMAIN=.*#S3_PUBLIC_DOMAIN=$PROTOCOL://$MINIO_HOST#" .env
585
- $SED_COMMAND "s#^S3_ENDPOINT=.*#S3_ENDPOINT=$PROTOCOL://$MINIO_HOST#" .env
584
+ sed "${SED_INPLACE_ARGS[@]}" "s#^S3_PUBLIC_DOMAIN=.*#S3_PUBLIC_DOMAIN=$PROTOCOL://$MINIO_HOST#" .env
585
+ sed "${SED_INPLACE_ARGS[@]}" "s#^S3_ENDPOINT=.*#S3_ENDPOINT=$PROTOCOL://$MINIO_HOST#" .env
586
586
 
587
587
 
588
588
  # Check if env modified success
@@ -641,12 +641,12 @@ section_regenerate_secrets() {
641
641
  echo $(show_message "security_secrect_regenerate_failed") "CASDOOR_SECRET"
642
642
  else
643
643
  # Search and replace the value of CASDOOR_SECRET in .env
644
- $SED_COMMAND "s#^AUTH_CASDOOR_SECRET=.*#AUTH_CASDOOR_SECRET=${CASDOOR_SECRET}#" .env
644
+ sed "${SED_INPLACE_ARGS[@]}" "s#^AUTH_CASDOOR_SECRET=.*#AUTH_CASDOOR_SECRET=${CASDOOR_SECRET}#" .env
645
645
  if [ $? -ne 0 ]; then
646
646
  echo $(show_message "security_secrect_regenerate_failed") "AUTH_CASDOOR_SECRET in \`.env\`"
647
647
  fi
648
648
  # replace `clientSecrect` in init_data.json
649
- $SED_COMMAND "s#dbf205949d704de81b0b5b3603174e23fbecc354#${CASDOOR_SECRET}#" init_data.json
649
+ sed "${SED_INPLACE_ARGS[@]}" "s#dbf205949d704de81b0b5b3603174e23fbecc354#${CASDOOR_SECRET}#" init_data.json
650
650
  if [ $? -ne 0 ]; then
651
651
  echo $(show_message "security_secrect_regenerate_failed") "AUTH_CASDOOR_SECRET in \`init_data.json\`"
652
652
  fi
@@ -660,7 +660,7 @@ section_regenerate_secrets() {
660
660
  CASDOOR_PASSWORD="123"
661
661
  else
662
662
  # replace `password` in init_data.json
663
- $SED_COMMAND "s/"123"/${CASDOOR_PASSWORD}/" init_data.json
663
+ sed "${SED_INPLACE_ARGS[@]}" "s/"123"/${CASDOOR_PASSWORD}/" init_data.json
664
664
  if [ $? -ne 0 ]; then
665
665
  echo $(show_message "security_secrect_regenerate_failed") "CASDOOR_PASSWORD in \`init_data.json\`"
666
666
  fi
@@ -672,7 +672,7 @@ section_regenerate_secrets() {
672
672
  MINIO_ROOT_PASSWORD="YOUR_MINIO_PASSWORD"
673
673
  else
674
674
  # Search and replace the value of S3_SECRET_ACCESS_KEY in .env
675
- $SED_COMMAND "s#^MINIO_ROOT_PASSWORD=.*#MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}#" .env
675
+ sed "${SED_INPLACE_ARGS[@]}" "s#^MINIO_ROOT_PASSWORD=.*#MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}#" .env
676
676
  if [ $? -ne 0 ]; then
677
677
  echo $(show_message "security_secrect_regenerate_failed") "MINIO_ROOT_PASSWORD in \`.env\`"
678
678
  fi
@@ -19,6 +19,7 @@ You can use `+` to add a model, `-` to hide a model, and use `model name->deploy
19
19
  ```text
20
20
  id->deploymentName=displayName<maxToken:vision:reasoning:search:fc:file:imageOutput>,model2,model3
21
21
  ```
22
+ The deploymentName `->deploymentName` can be omitted, and it defaults to the latest model version. Currently, the model service providers that support `->deploymentName` are: Azure and Volcengine.
22
23
 
23
24
  For example: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
24
25
 
@@ -18,6 +18,7 @@ LobeChat 支持在部署时自定义模型列表,详情请参考 [模型提供
18
18
  ```text
19
19
  id->deploymentName=displayName<maxToken:vision:reasoning:search:fc:file:imageOutput>,model2,model3
20
20
  ```
21
+ 部署名`->deploymentName`可以省略,默认为最新版本的模型。当前支持`->deploymentName`的模型服务商有:Azure和Volcengine。
21
22
 
22
23
  例如: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
23
24
 
@@ -599,9 +599,9 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
599
599
  ### `VOLCENGINE_MODEL_LIST`
600
600
 
601
601
  - Type: Optional
602
- - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name->deploymentName=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
602
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name->deploymentName=display_name` to customize the display name of a model, separated by commas. The deploymentName `->deploymentName` can be omitted, and it defaults to the latest model version. Definition syntax rules see [model-list][model-list]
603
603
  - Default: `-`
604
- - Example: `-all,+deepseek-r1->deepseek-r1-250120,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k->doubao-1-5-pro-256k-250115,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k->doubao-1-5-lite-32k-250115`
604
+ - Example: `-all,+deepseek-r1,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k`
605
605
 
606
606
  ### `VOLCENGINE_PROXY_URL`
607
607
 
@@ -622,7 +622,7 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
622
622
  ### `INFINIAI_MODEL_LIST`
623
623
 
624
624
  - Type: Optional
625
- - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name->deploymentName=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
625
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
626
626
  - Default: `-`
627
627
  - Example: `-all,+qwq-32b,+deepseek-r1`
628
628
 
@@ -597,9 +597,9 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
597
597
  ### `VOLCENGINE_MODEL_LIST`
598
598
 
599
599
  - 类型:可选
600
- - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名->部署名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
600
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名->部署名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。部署名`->部署名`可以省略,默认为最新版本的模型。模型定义语法规则见 [模型列表][model-list]
601
601
  - 默认值:`-`
602
- - 示例:`-all,+deepseek-r1->deepseek-r1-250120,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k->doubao-1-5-pro-256k-250115,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k->doubao-1-5-lite-32k-250115`
602
+ - 示例:`-all,+deepseek-r1,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k`
603
603
 
604
604
  ### `VOLCENGINE_PROXY_URL`
605
605
 
@@ -620,7 +620,7 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
620
620
  ### `INFINIAI_MODEL_LIST`
621
621
 
622
622
  - 类型:可选
623
- - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名->部署名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
623
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
624
624
  - 默认值:`-`
625
625
  - 示例:`-all,+qwq-32b,+deepseek-r1`
626
626
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.90.0",
3
+ "version": "1.90.2",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -107,6 +107,7 @@ const SystemRole = memo(() => {
107
107
  <EditableMessage
108
108
  classNames={{ markdown: styles.prompt }}
109
109
  editing={editing}
110
+ markdownProps={{ enableLatex: false, enableMermaid: false }}
110
111
  model={{
111
112
  extra: (
112
113
  <AgentInfo
@@ -4,6 +4,7 @@ const minimaxChatModels: AIChatModelCard[] = [
4
4
  {
5
5
  abilities: {
6
6
  functionCall: true,
7
+ search: true,
7
8
  vision: true,
8
9
  },
9
10
  contextWindowTokens: 1_000_192,
@@ -19,11 +20,15 @@ const minimaxChatModels: AIChatModelCard[] = [
19
20
  output: 8,
20
21
  },
21
22
  releasedAt: '2025-01-15',
23
+ settings: {
24
+ searchImpl: 'params',
25
+ },
22
26
  type: 'chat',
23
27
  },
24
28
  {
25
29
  abilities: {
26
30
  functionCall: true,
31
+ search: true,
27
32
  vision: true,
28
33
  },
29
34
  contextWindowTokens: 245_760,
@@ -37,6 +42,9 @@ const minimaxChatModels: AIChatModelCard[] = [
37
42
  input: 1,
38
43
  output: 1,
39
44
  },
45
+ settings: {
46
+ searchImpl: 'params',
47
+ },
40
48
  type: 'chat',
41
49
  },
42
50
  {
@@ -236,6 +236,26 @@ export const openaiChatModels: AIChatModelCard[] = [
236
236
  releasedAt: '2024-07-18',
237
237
  type: 'chat',
238
238
  },
239
+ {
240
+ abilities: {
241
+ search: true,
242
+ },
243
+ contextWindowTokens: 128_000,
244
+ description:
245
+ 'GPT-4o mini 搜索预览版是一个专门训练用于理解和执行网页搜索查询的模型,使用的是 Chat Completions API。除了令牌费用之外,网页搜索查询还会按每次工具调用收取费用。',
246
+ displayName: 'GPT-4o mini Search Preview',
247
+ id: 'gpt-4o-mini-search-preview',
248
+ maxOutput: 16_384,
249
+ pricing: {
250
+ input: 0.15,
251
+ output: 0.6,
252
+ },
253
+ releasedAt: '2025-03-11',
254
+ settings: {
255
+ searchImpl: 'internal',
256
+ },
257
+ type: 'chat',
258
+ },
239
259
  {
240
260
  abilities: {
241
261
  functionCall: true,
@@ -244,14 +264,34 @@ export const openaiChatModels: AIChatModelCard[] = [
244
264
  contextWindowTokens: 128_000,
245
265
  description:
246
266
  'ChatGPT-4o 是一款动态模型,实时更新以保持当前最新版本。它结合了强大的语言理解与生成能力,适合于大规模应用场景,包括客户服务、教育和技术支持。',
247
- displayName: 'GPT-4o 1120',
248
- id: 'gpt-4o-2024-11-20',
267
+ displayName: 'GPT-4o',
268
+ id: 'gpt-4o',
249
269
  pricing: {
250
270
  cachedInput: 1.25,
251
271
  input: 2.5,
252
272
  output: 10,
253
273
  },
254
- releasedAt: '2024-11-20',
274
+ releasedAt: '2024-05-13',
275
+ type: 'chat',
276
+ },
277
+ {
278
+ abilities: {
279
+ search: true,
280
+ },
281
+ contextWindowTokens: 128_000,
282
+ description:
283
+ 'GPT-4o 搜索预览版是一个专门训练用于理解和执行网页搜索查询的模型,使用的是 Chat Completions API。除了令牌费用之外,网页搜索查询还会按每次工具调用收取费用。',
284
+ displayName: 'GPT-4o Search Preview',
285
+ id: 'gpt-4o-search-preview',
286
+ maxOutput: 16_384,
287
+ pricing: {
288
+ input: 2.5,
289
+ output: 10,
290
+ },
291
+ releasedAt: '2025-03-11',
292
+ settings: {
293
+ searchImpl: 'internal',
294
+ },
255
295
  type: 'chat',
256
296
  },
257
297
  {
@@ -262,14 +302,14 @@ export const openaiChatModels: AIChatModelCard[] = [
262
302
  contextWindowTokens: 128_000,
263
303
  description:
264
304
  'ChatGPT-4o 是一款动态模型,实时更新以保持当前最新版本。它结合了强大的语言理解与生成能力,适合于大规模应用场景,包括客户服务、教育和技术支持。',
265
- displayName: 'GPT-4o',
266
- id: 'gpt-4o',
305
+ displayName: 'GPT-4o 1120',
306
+ id: 'gpt-4o-2024-11-20',
267
307
  pricing: {
268
308
  cachedInput: 1.25,
269
309
  input: 2.5,
270
310
  output: 10,
271
311
  },
272
- releasedAt: '2024-05-13',
312
+ releasedAt: '2024-11-20',
273
313
  type: 'chat',
274
314
  },
275
315
  {
@@ -1,12 +1,5 @@
1
1
  // @vitest-environment node
2
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
-
4
- import {
5
- ChatStreamPayload,
6
- LLMRoleType,
7
- LobeOpenAICompatibleRuntime,
8
- ModelProvider,
9
- } from '@/libs/model-runtime';
2
+ import { ModelProvider } from '@/libs/model-runtime';
10
3
  import { testProvider } from '@/libs/model-runtime/providerTestUtils';
11
4
 
12
5
  import { LobeDeepSeekAI } from './index';
@@ -24,151 +17,3 @@ testProvider({
24
17
  skipAPICall: true,
25
18
  },
26
19
  });
27
-
28
- let instance: LobeOpenAICompatibleRuntime;
29
-
30
- const createDeepSeekAIInstance = () => new LobeDeepSeekAI({ apiKey: 'test' });
31
-
32
- const mockSuccessfulChatCompletion = () => {
33
- vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue({
34
- id: 'cmpl-mock',
35
- object: 'chat.completion',
36
- created: Date.now(),
37
- choices: [
38
- { index: 0, message: { role: 'assistant', content: 'Mock response' }, finish_reason: 'stop' },
39
- ],
40
- } as any);
41
- };
42
-
43
- beforeEach(() => {
44
- instance = new LobeDeepSeekAI({ apiKey: 'test' });
45
-
46
- // 使用 vi.spyOn 来模拟 chat.completions.create 方法
47
- vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
48
- new ReadableStream() as any,
49
- );
50
- });
51
-
52
- afterEach(() => {
53
- vi.clearAllMocks();
54
- });
55
-
56
- describe('LobeDeepSeekAI', () => {
57
- describe('deepseek-reasoner', () => {
58
- beforeEach(() => {
59
- instance = createDeepSeekAIInstance();
60
- mockSuccessfulChatCompletion();
61
- });
62
-
63
- it('should insert a user message if the first message is from assistant', async () => {
64
- const payloadMessages = [{ content: 'Hello', role: 'assistant' as LLMRoleType }];
65
- const expectedMessages = [{ content: '', role: 'user' }, ...payloadMessages];
66
-
67
- const payload: ChatStreamPayload = {
68
- messages: payloadMessages,
69
- model: 'deepseek-reasoner',
70
- temperature: 0,
71
- };
72
-
73
- await instance.chat(payload);
74
-
75
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
76
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
77
- const actualMessages = actualArgs[0].messages;
78
- expect(actualMessages).toEqual(expectedMessages);
79
- });
80
-
81
- it('should insert a user message if the first message is from assistant (with system summary)', async () => {
82
- const payloadMessages = [
83
- { content: 'System summary', role: 'system' as LLMRoleType },
84
- { content: 'Hello', role: 'assistant' as LLMRoleType },
85
- ];
86
- const expectedMessages = [
87
- { content: 'System summary', role: 'system' },
88
- { content: '', role: 'user' },
89
- { content: 'Hello', role: 'assistant' },
90
- ];
91
-
92
- const payload: ChatStreamPayload = {
93
- messages: payloadMessages,
94
- model: 'deepseek-reasoner',
95
- temperature: 0,
96
- };
97
-
98
- await instance.chat(payload);
99
-
100
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
101
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
102
- const actualMessages = actualArgs[0].messages;
103
- expect(actualMessages).toEqual(expectedMessages);
104
- });
105
-
106
- it('should insert alternating roles if messages do not alternate', async () => {
107
- const payloadMessages = [
108
- { content: 'user1', role: 'user' as LLMRoleType },
109
- { content: 'user2', role: 'user' as LLMRoleType },
110
- { content: 'assistant1', role: 'assistant' as LLMRoleType },
111
- { content: 'assistant2', role: 'assistant' as LLMRoleType },
112
- ];
113
- const expectedMessages = [
114
- { content: 'user1', role: 'user' },
115
- { content: '', role: 'assistant' },
116
- { content: 'user2', role: 'user' },
117
- { content: 'assistant1', role: 'assistant' },
118
- { content: '', role: 'user' },
119
- { content: 'assistant2', role: 'assistant' },
120
- ];
121
-
122
- const payload: ChatStreamPayload = {
123
- messages: payloadMessages,
124
- model: 'deepseek-reasoner',
125
- temperature: 0,
126
- };
127
-
128
- await instance.chat(payload);
129
-
130
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
131
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
132
- const actualMessages = actualArgs[0].messages;
133
- expect(actualMessages).toEqual(expectedMessages);
134
- });
135
-
136
- it('complex condition', async () => {
137
- const payloadMessages = [
138
- { content: 'system', role: 'system' as LLMRoleType },
139
- { content: 'assistant', role: 'assistant' as LLMRoleType },
140
- { content: 'user1', role: 'user' as LLMRoleType },
141
- { content: 'user2', role: 'user' as LLMRoleType },
142
- { content: 'user3', role: 'user' as LLMRoleType },
143
- { content: 'assistant1', role: 'assistant' as LLMRoleType },
144
- { content: 'assistant2', role: 'assistant' as LLMRoleType },
145
- ];
146
- const expectedMessages = [
147
- { content: 'system', role: 'system' },
148
- { content: '', role: 'user' },
149
- { content: 'assistant', role: 'assistant' },
150
- { content: 'user1', role: 'user' },
151
- { content: '', role: 'assistant' },
152
- { content: 'user2', role: 'user' },
153
- { content: '', role: 'assistant' },
154
- { content: 'user3', role: 'user' },
155
- { content: 'assistant1', role: 'assistant' },
156
- { content: '', role: 'user' },
157
- { content: 'assistant2', role: 'assistant' },
158
- ];
159
-
160
- const payload: ChatStreamPayload = {
161
- messages: payloadMessages,
162
- model: 'deepseek-reasoner',
163
- temperature: 0,
164
- };
165
-
166
- await instance.chat(payload);
167
-
168
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
169
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
170
- const actualMessages = actualArgs[0].messages;
171
- expect(actualMessages).toEqual(expectedMessages);
172
- });
173
- });
174
- });
@@ -1,8 +1,6 @@
1
- import OpenAI from 'openai';
2
-
3
1
  import type { ChatModelCard } from '@/types/llm';
4
2
 
5
- import { ChatStreamPayload, ModelProvider } from '../types';
3
+ import { ModelProvider } from '../types';
6
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
7
5
 
8
6
  export interface DeepSeekModelCard {
@@ -11,59 +9,6 @@ export interface DeepSeekModelCard {
11
9
 
12
10
  export const LobeDeepSeekAI = LobeOpenAICompatibleFactory({
13
11
  baseURL: 'https://api.deepseek.com/v1',
14
- chatCompletion: {
15
- handlePayload: ({
16
- frequency_penalty,
17
- messages,
18
- model,
19
- presence_penalty,
20
- temperature,
21
- top_p,
22
- ...payload
23
- }: ChatStreamPayload) => {
24
- // github.com/lobehub/lobe-chat/pull/5548
25
- let filteredMessages = messages.filter((message) => message.role !== 'system');
26
-
27
- if (filteredMessages.length > 0 && filteredMessages[0].role === 'assistant') {
28
- filteredMessages.unshift({ content: '', role: 'user' });
29
- }
30
-
31
- let lastRole = '';
32
- for (let i = 0; i < filteredMessages.length; i++) {
33
- const message = filteredMessages[i];
34
- if (message.role === lastRole) {
35
- const newRole = lastRole === 'assistant' ? 'user' : 'assistant';
36
- filteredMessages.splice(i, 0, { content: '', role: newRole });
37
- i++;
38
- }
39
- lastRole = message.role;
40
- }
41
-
42
- if (messages.length > 0 && messages[0].role === 'system') {
43
- filteredMessages.unshift(messages[0]);
44
- }
45
-
46
- return {
47
- ...payload,
48
- model,
49
- ...(model === 'deepseek-reasoner'
50
- ? {
51
- frequency_penalty: undefined,
52
- messages: filteredMessages,
53
- presence_penalty: undefined,
54
- temperature: undefined,
55
- top_p: undefined,
56
- }
57
- : {
58
- frequency_penalty,
59
- messages,
60
- presence_penalty,
61
- temperature,
62
- top_p,
63
- }),
64
- } as OpenAI.ChatCompletionCreateParamsStreaming;
65
- },
66
- },
67
12
  debug: {
68
13
  chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',
69
14
  },
@@ -12,7 +12,16 @@ export const LobeMinimaxAI = LobeOpenAICompatibleFactory({
12
12
  baseURL: 'https://api.minimax.chat/v1',
13
13
  chatCompletion: {
14
14
  handlePayload: (payload) => {
15
- const { max_tokens, temperature, top_p, ...params } = payload;
15
+ const { enabledSearch, max_tokens, temperature, tools, top_p, ...params } = payload;
16
+
17
+ const minimaxTools = enabledSearch
18
+ ? [
19
+ ...(tools || []),
20
+ {
21
+ type: 'web_search',
22
+ },
23
+ ]
24
+ : tools;
16
25
 
17
26
  return {
18
27
  ...params,
@@ -20,6 +29,7 @@ export const LobeMinimaxAI = LobeOpenAICompatibleFactory({
20
29
  max_tokens: max_tokens !== undefined ? max_tokens : getMinimaxMaxOutputs(payload.model),
21
30
  presence_penalty: undefined,
22
31
  temperature: temperature === undefined || temperature <= 0 ? undefined : temperature / 2,
32
+ tools: minimaxTools,
23
33
  top_p: top_p !== undefined && top_p > 0 && top_p <= 1 ? top_p : undefined,
24
34
  } as any;
25
35
  },
@@ -21,6 +21,8 @@ export const LobeOpenAI = LobeOpenAICompatibleFactory({
21
21
  }
22
22
 
23
23
  if (model.includes('-search-')) {
24
+ const oaiSearchContextSize = process.env.OPENAI_SEARCH_CONTEXT_SIZE; // low, medium, high
25
+
24
26
  return {
25
27
  ...payload,
26
28
  frequency_penalty: undefined,
@@ -28,7 +30,12 @@ export const LobeOpenAI = LobeOpenAICompatibleFactory({
28
30
  stream: payload.stream ?? true,
29
31
  temperature: undefined,
30
32
  top_p: undefined,
31
- };
33
+ ...(oaiSearchContextSize && {
34
+ web_search_options: {
35
+ search_context_size: oaiSearchContextSize,
36
+ },
37
+ }),
38
+ } as any;
32
39
  }
33
40
 
34
41
  return { ...payload, stream: payload.stream ?? true };
@@ -99,16 +99,87 @@ export const transformOpenAIStream = (
99
99
  if (item.finish_reason) {
100
100
  // one-api 的流式接口,会出现既有 finish_reason ,也有 content 的情况
101
101
  // {"id":"demo","model":"deepl-en","choices":[{"index":0,"delta":{"role":"assistant","content":"Introduce yourself."},"finish_reason":"stop"}]}
102
-
103
102
  if (typeof item.delta?.content === 'string' && !!item.delta.content) {
103
+ // MiniMax 内建搜索功能会在第一个 tools 流中 content 返回引用源,需要忽略
104
+ // {"id":"0483748a25071c611e2f48d2982fbe96","choices":[{"finish_reason":"stop","index":0,"delta":{"content":"[{\"no\":1,\"url\":\"https://www.xiaohongshu.com/discovery/item/66d8de3c000000001f01e752\",\"title\":\"郑钦文为国而战,没有理由不坚持🏅\",\"content\":\"·2024年08月03日\\n中国队选手郑钦文夺得巴黎奥运会网球女单比赛金牌(巴黎奥运第16金)\\n#巴黎奥运会[话题]# #郑钦文[话题]# #人物素材积累[话题]# #作文素材积累[话题]# #申论素材[话题]#\",\"web_icon\":\"https://www.xiaohongshu.com/favicon.ico\"}]","role":"tool","tool_call_id":"call_function_6696730535"}}],"created":1748255114,"model":"abab6.5s-chat","object":"chat.completion.chunk","usage":{"total_tokens":0,"total_characters":0},"input_sensitive":false,"output_sensitive":false,"input_sensitive_type":0,"output_sensitive_type":0,"output_sensitive_int":0}
105
+ if (typeof item.delta?.role === 'string' && item.delta.role === 'tool') {
106
+ return { data: null, id: chunk.id, type: 'text' };
107
+ }
108
+
104
109
  return { data: item.delta.content, id: chunk.id, type: 'text' };
105
110
  }
106
111
 
112
+ // OpenAI Search Preview 模型返回引用源
113
+ // {"id":"chatcmpl-18037d13-243c-4941-8b05-9530b352cf17","object":"chat.completion.chunk","created":1748351805,"model":"gpt-4o-mini-search-preview-2025-03-11","choices":[{"index":0,"delta":{"annotations":[{"type":"url_citation","url_citation":{"url":"https://zh.wikipedia.org/wiki/%E4%B8%8A%E6%B5%B7%E4%B9%90%E9%AB%98%E4%B9%90%E5%9B%AD?utm_source=openai","title":"上海乐高乐园","start_index":75,"end_index":199}}]},"finish_reason":"stop"}],"service_tier":"default"}
114
+ if ((item as any).delta?.annotations && (item as any).delta.annotations.length > 0) {
115
+ const citations = (item as any).delta.annotations;
116
+
117
+ return [
118
+ {
119
+ data: {
120
+ citations: citations.map(
121
+ (item: any) =>
122
+ ({
123
+ title: item.url_citation.title,
124
+ url: item.url_citation.url,
125
+ }) as CitationItem,
126
+ ),
127
+ },
128
+ id: chunk.id,
129
+ type: 'grounding',
130
+ },
131
+ ];
132
+ }
133
+
134
+ // MiniMax 内建搜索功能会在最后一个流中的 message 数组中返回 4 个 Object,其中最后一个为 annotations
135
+ // {"id":"0483bf14ba55225a66de2342a21b4003","choices":[{"finish_reason":"tool_calls","index":0,"messages":[{"content":"","role":"user","reasoning_content":""},{"content":"","role":"assistant","tool_calls":[{"id":"call_function_0872338692","type":"web_search","function":{"name":"get_search_result","arguments":"{\"query_tag\":[\"天气\"],\"query_list\":[\"上海 2025年5月26日 天气\"]}"}}],"reasoning_content":""},{"content":"","role":"tool","tool_call_id":"call_function_0872338692","reasoning_content":""},{"content":"","role":"assistant","name":"海螺AI","annotations":[{"text":"【5†source】","url":"https://mtianqi.eastday.com/tianqi/shanghai/20250526.html","quote":"上海天气预报提供上海2025年05月26日天气"}],"audio_content":"","reasoning_content":""}]}],"created":1748274196,"model":"MiniMax-Text-01","object":"chat.completion","usage":{"total_tokens":13110,"total_characters":0,"prompt_tokens":12938,"completion_tokens":172},"base_resp":{"status_code":0,"status_msg":"Invalid parameters detected, json: unknown field \"user\""}}
136
+ if ((item as any).messages && (item as any).messages.length > 0) {
137
+ const citations = (item as any).messages.at(-1).annotations;
138
+
139
+ return [
140
+ {
141
+ data: {
142
+ citations: citations.map(
143
+ (item: any) =>
144
+ ({
145
+ title: item.url,
146
+ url: item.url,
147
+ }) as CitationItem,
148
+ ),
149
+ },
150
+ id: chunk.id,
151
+ type: 'grounding',
152
+ },
153
+ ];
154
+ }
155
+
107
156
  if (chunk.usage) {
108
157
  const usage = chunk.usage;
109
158
  return { data: convertUsage(usage), id: chunk.id, type: 'usage' };
110
159
  }
111
160
 
161
+ // xAI Live Search 功能返回引用源
162
+ // {"id":"8721eebb-6465-4c47-ba2e-8e2ec0f97055","object":"chat.completion.chunk","created":1747809109,"model":"grok-3","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":"stop"}],"system_fingerprint":"fp_1affcf9872","citations":["https://world.huanqiu.com/"]}
163
+ if ((chunk as any).citations) {
164
+ const citations = (chunk as any).citations;
165
+
166
+ return [
167
+ {
168
+ data: {
169
+ citations: citations.map(
170
+ (item: any) =>
171
+ ({
172
+ title: item,
173
+ url: item,
174
+ }) as CitationItem,
175
+ ),
176
+ },
177
+ id: chunk.id,
178
+ type: 'grounding',
179
+ },
180
+ ];
181
+ }
182
+
112
183
  return { data: item.finish_reason, id: chunk.id, type: 'stop' };
113
184
  }
114
185
 
@@ -146,7 +217,9 @@ export const transformOpenAIStream = (
146
217
  // in Hunyuan api, the citation is in every chunk
147
218
  ('search_info' in chunk && (chunk.search_info as any)?.search_results) ||
148
219
  // in Wenxin api, the citation is in the first and last chunk
149
- ('search_results' in chunk && chunk.search_results);
220
+ ('search_results' in chunk && chunk.search_results) ||
221
+ // in Zhipu api, the citation is in the first chunk
222
+ ('web_search' in chunk && chunk.web_search);
150
223
 
151
224
  if (citations) {
152
225
  streamContext.returnedCitation = true;
@@ -154,13 +227,10 @@ export const transformOpenAIStream = (
154
227
  return [
155
228
  {
156
229
  data: {
157
- citations: (citations as any[]).map(
158
- (item) =>
159
- ({
160
- title: typeof item === 'string' ? item : item.title,
161
- url: typeof item === 'string' ? item : item.url,
162
- }) as CitationItem,
163
- ),
230
+ citations: (citations as any[]).map((item) => ({
231
+ title: typeof item === 'string' ? item : item.title,
232
+ url: typeof item === 'string' ? item : item.url || item.link,
233
+ })).filter(c => c.title && c.url), // Zhipu 内建搜索工具有时会返回空 link 引发程序崩溃
164
234
  },
165
235
  id: chunk.id,
166
236
  type: 'grounding',
@@ -22,6 +22,9 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
22
22
  type: 'web_search',
23
23
  web_search: {
24
24
  enable: true,
25
+ result_sequence: 'before', // 将搜索结果返回顺序更改为 before 适配最小化 OpenAIStream 改动
26
+ search_engine: process.env.ZHIPU_SEARCH_ENGINE || 'search_std', // search_std, search_pro
27
+ search_result: true,
25
28
  },
26
29
  },
27
30
  ]
@@ -306,16 +306,13 @@ describe('parseModelString', () => {
306
306
  });
307
307
 
308
308
  describe('deployment name', () => {
309
- it('should have same deployment name as id', () => {
309
+ it('should have no deployment name', () => {
310
310
  const result = parseModelString('model1=Model 1', true);
311
311
  expect(result.add[0]).toEqual({
312
312
  id: 'model1',
313
313
  displayName: 'Model 1',
314
314
  abilities: {},
315
315
  type: 'chat',
316
- config: {
317
- deploymentName: 'model1',
318
- },
319
316
  });
320
317
  });
321
318
 
@@ -455,6 +452,61 @@ describe('transformToChatModelCards', () => {
455
452
  expect(result).toMatchSnapshot();
456
453
  });
457
454
 
455
+ it('should use default deploymentName from known model when not specified in string (VolcEngine case)', () => {
456
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
457
+ (m) => m.id === 'deepseek-r1' && m.providerId === 'volcengine',
458
+ );
459
+ const defaultChatModels: AiFullModelCard[] = [];
460
+ const result = transformToAiChatModelList({
461
+ modelString: '+deepseek-r1',
462
+ defaultChatModels,
463
+ providerId: 'volcengine',
464
+ withDeploymentName: true,
465
+ });
466
+ expect(result).toContainEqual({
467
+ ...knownModel,
468
+ enabled: true,
469
+ });
470
+ });
471
+
472
+ it('should use deploymentName from modelString when specified (VolcEngine case)', () => {
473
+ const defaultChatModels: AiFullModelCard[] = [];
474
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
475
+ (m) => m.id === 'deepseek-r1' && m.providerId === 'volcengine',
476
+ );
477
+ const result = transformToAiChatModelList({
478
+ modelString: `+deepseek-r1->my-custom-deploy`,
479
+ defaultChatModels,
480
+ providerId: 'volcengine',
481
+ withDeploymentName: true,
482
+ });
483
+ expect(result).toContainEqual({
484
+ ...knownModel,
485
+ enabled: true,
486
+ config: { deploymentName: 'my-custom-deploy' },
487
+ });
488
+ });
489
+
490
+ it('should set both id and deploymentName to the full string when no -> is used and withDeploymentName is true', () => {
491
+ const defaultChatModels: AiFullModelCard[] = [];
492
+ const result = transformToAiChatModelList({
493
+ modelString: `+my_model`,
494
+ defaultChatModels,
495
+ providerId: 'volcengine',
496
+ withDeploymentName: true,
497
+ });
498
+ expect(result).toContainEqual({
499
+ id: `my_model`,
500
+ displayName: `my_model`,
501
+ type: 'chat',
502
+ abilities: {},
503
+ enabled: true,
504
+ config: {
505
+ deploymentName: `my_model`,
506
+ },
507
+ });
508
+ });
509
+
458
510
  it('should handle azure real case', () => {
459
511
  const defaultChatModels = [
460
512
  {
@@ -23,7 +23,7 @@ export const parseModelString = (modelString: string = '', withDeploymentName =
23
23
 
24
24
  if (withDeploymentName) {
25
25
  [id, deploymentName] = id.split('->');
26
- if (!deploymentName) deploymentName = id;
26
+ // if (!deploymentName) deploymentName = id;
27
27
  }
28
28
 
29
29
  if (disable) {
@@ -141,6 +141,12 @@ export const transformToAiChatModelList = ({
141
141
  knownModel = LOBE_DEFAULT_MODEL_LIST.find((model) => model.id === toAddModel.id);
142
142
  if (knownModel) knownModel.providerId = providerId;
143
143
  }
144
+ if (withDeploymentName) {
145
+ toAddModel.config = toAddModel.config || {};
146
+ if (!toAddModel.config.deploymentName) {
147
+ toAddModel.config.deploymentName = knownModel?.config?.deploymentName ?? toAddModel.id;
148
+ }
149
+ }
144
150
 
145
151
  // if the model is known, update it based on the known model
146
152
  if (knownModel) {