@lobehub/chat 1.90.0 → 1.90.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,40 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.90.1](https://github.com/lobehub/lobe-chat/compare/v1.90.0...v1.90.1)
6
+
7
+ <sup>Released on **2025-06-01**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Disable LaTeX and Mermaid rendering in SystemRoleContent to prevent lag caused by massive rendering tasks when switching topics, fix DeepSeek new R1 Search error.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Use default deployment name when parseModelString doesn't contain deployment name.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's fixed
23
+
24
+ - **misc**: Disable LaTeX and Mermaid rendering in SystemRoleContent to prevent lag caused by massive rendering tasks when switching topics, closes [#8034](https://github.com/lobehub/lobe-chat/issues/8034) ([5b42ee2](https://github.com/lobehub/lobe-chat/commit/5b42ee2))
25
+ - **misc**: Fix DeepSeek new R1 Search error, closes [#8035](https://github.com/lobehub/lobe-chat/issues/8035) ([cf58628](https://github.com/lobehub/lobe-chat/commit/cf58628))
26
+
27
+ #### Styles
28
+
29
+ - **misc**: Use default deployment name when parseModelString doesn't contain deployment name, closes [#7719](https://github.com/lobehub/lobe-chat/issues/7719) ([aef19f4](https://github.com/lobehub/lobe-chat/commit/aef19f4))
30
+
31
+ </details>
32
+
33
+ <div align="right">
34
+
35
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
36
+
37
+ </div>
38
+
5
39
  ## [Version 1.90.0](https://github.com/lobehub/lobe-chat/compare/v1.89.0...v1.90.0)
6
40
 
7
41
  <sup>Released on **2025-06-01**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,16 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Disable LaTeX and Mermaid rendering in SystemRoleContent to prevent lag caused by massive rendering tasks when switching topics, fix DeepSeek new R1 Search error."
6
+ ],
7
+ "improvements": [
8
+ "Use default deployment name when parseModelString doesn't contain deployment name."
9
+ ]
10
+ },
11
+ "date": "2025-06-01",
12
+ "version": "1.90.1"
13
+ },
2
14
  {
3
15
  "children": {
4
16
  "features": [
@@ -8,10 +8,10 @@
8
8
  # ref: https://github.com/lobehub/lobe-chat/pull/5247
9
9
  if [[ "$OSTYPE" == "darwin"* ]]; then
10
10
  # macOS
11
- SED_COMMAND="sed -i ''"
11
+ SED_INPLACE_ARGS=('-i' '')
12
12
  else
13
13
  # not macOS
14
- SED_COMMAND="sed -i"
14
+ SED_INPLACE_ARGS=('-i')
15
15
  fi
16
16
 
17
17
  # ======================
@@ -519,12 +519,12 @@ section_configurate_host() {
519
519
  if [[ "$ask_result" == "y" ]]; then
520
520
  PROTOCOL="https"
521
521
  # Replace all http with https
522
- $SED_COMMAND "s#http://#https://#" .env
522
+ sed "${SED_INPLACE_ARGS[@]}" "s#http://#https://#" .env
523
523
  fi
524
524
  fi
525
525
 
526
526
  # Check if sed is installed
527
- if ! command -v $SED_COMMAND &> /dev/null ; then
527
+ if ! command -v sed "${SED_INPLACE_ARGS[@]}" &> /dev/null ; then
528
528
  echo "sed" $(show_message "tips_no_executable")
529
529
  exit 1
530
530
  fi
@@ -553,7 +553,7 @@ section_configurate_host() {
553
553
  ask "(auth.example.com)"
554
554
  CASDOOR_HOST="$ask_result"
555
555
  # Setup callback url for Casdoor
556
- $SED_COMMAND "s/"example.com"/${LOBE_HOST}/" init_data.json
556
+ sed "${SED_INPLACE_ARGS[@]}" "s/"example.com"/${LOBE_HOST}/" init_data.json
557
557
  ;;
558
558
  1)
559
559
  DEPLOY_MODE="ip"
@@ -566,7 +566,7 @@ section_configurate_host() {
566
566
  MINIO_HOST="${HOST}:9000"
567
567
  CASDOOR_HOST="${HOST}:8000"
568
568
  # Setup callback url for Casdoor
569
- $SED_COMMAND "s/"localhost:3210"/${LOBE_HOST}/" init_data.json
569
+ sed "${SED_INPLACE_ARGS[@]}" "s/"localhost:3210"/${LOBE_HOST}/" init_data.json
570
570
  ;;
571
571
  *)
572
572
  echo "Invalid deploy mode: $ask_result"
@@ -575,14 +575,14 @@ section_configurate_host() {
575
575
  esac
576
576
 
577
577
  # lobe host
578
- $SED_COMMAND "s#^APP_URL=.*#APP_URL=$PROTOCOL://$LOBE_HOST#" .env
578
+ sed "${SED_INPLACE_ARGS[@]}" "s#^APP_URL=.*#APP_URL=$PROTOCOL://$LOBE_HOST#" .env
579
579
  # auth related
580
- $SED_COMMAND "s#^AUTH_URL=.*#AUTH_URL=$PROTOCOL://$LOBE_HOST/api/auth#" .env
581
- $SED_COMMAND "s#^AUTH_CASDOOR_ISSUER=.*#AUTH_CASDOOR_ISSUER=$PROTOCOL://$CASDOOR_HOST#" .env
582
- $SED_COMMAND "s#^origin=.*#origin=$PROTOCOL://$CASDOOR_HOST#" .env
580
+ sed "${SED_INPLACE_ARGS[@]}" "s#^AUTH_URL=.*#AUTH_URL=$PROTOCOL://$LOBE_HOST/api/auth#" .env
581
+ sed "${SED_INPLACE_ARGS[@]}" "s#^AUTH_CASDOOR_ISSUER=.*#AUTH_CASDOOR_ISSUER=$PROTOCOL://$CASDOOR_HOST#" .env
582
+ sed "${SED_INPLACE_ARGS[@]}" "s#^origin=.*#origin=$PROTOCOL://$CASDOOR_HOST#" .env
583
583
  # s3 related
584
- $SED_COMMAND "s#^S3_PUBLIC_DOMAIN=.*#S3_PUBLIC_DOMAIN=$PROTOCOL://$MINIO_HOST#" .env
585
- $SED_COMMAND "s#^S3_ENDPOINT=.*#S3_ENDPOINT=$PROTOCOL://$MINIO_HOST#" .env
584
+ sed "${SED_INPLACE_ARGS[@]}" "s#^S3_PUBLIC_DOMAIN=.*#S3_PUBLIC_DOMAIN=$PROTOCOL://$MINIO_HOST#" .env
585
+ sed "${SED_INPLACE_ARGS[@]}" "s#^S3_ENDPOINT=.*#S3_ENDPOINT=$PROTOCOL://$MINIO_HOST#" .env
586
586
 
587
587
 
588
588
  # Check if env modified success
@@ -641,12 +641,12 @@ section_regenerate_secrets() {
641
641
  echo $(show_message "security_secrect_regenerate_failed") "CASDOOR_SECRET"
642
642
  else
643
643
  # Search and replace the value of CASDOOR_SECRET in .env
644
- $SED_COMMAND "s#^AUTH_CASDOOR_SECRET=.*#AUTH_CASDOOR_SECRET=${CASDOOR_SECRET}#" .env
644
+ sed "${SED_INPLACE_ARGS[@]}" "s#^AUTH_CASDOOR_SECRET=.*#AUTH_CASDOOR_SECRET=${CASDOOR_SECRET}#" .env
645
645
  if [ $? -ne 0 ]; then
646
646
  echo $(show_message "security_secrect_regenerate_failed") "AUTH_CASDOOR_SECRET in \`.env\`"
647
647
  fi
648
648
  # replace `clientSecrect` in init_data.json
649
- $SED_COMMAND "s#dbf205949d704de81b0b5b3603174e23fbecc354#${CASDOOR_SECRET}#" init_data.json
649
+ sed "${SED_INPLACE_ARGS[@]}" "s#dbf205949d704de81b0b5b3603174e23fbecc354#${CASDOOR_SECRET}#" init_data.json
650
650
  if [ $? -ne 0 ]; then
651
651
  echo $(show_message "security_secrect_regenerate_failed") "AUTH_CASDOOR_SECRET in \`init_data.json\`"
652
652
  fi
@@ -660,7 +660,7 @@ section_regenerate_secrets() {
660
660
  CASDOOR_PASSWORD="123"
661
661
  else
662
662
  # replace `password` in init_data.json
663
- $SED_COMMAND "s/"123"/${CASDOOR_PASSWORD}/" init_data.json
663
+ sed "${SED_INPLACE_ARGS[@]}" "s/"123"/${CASDOOR_PASSWORD}/" init_data.json
664
664
  if [ $? -ne 0 ]; then
665
665
  echo $(show_message "security_secrect_regenerate_failed") "CASDOOR_PASSWORD in \`init_data.json\`"
666
666
  fi
@@ -672,7 +672,7 @@ section_regenerate_secrets() {
672
672
  MINIO_ROOT_PASSWORD="YOUR_MINIO_PASSWORD"
673
673
  else
674
674
  # Search and replace the value of S3_SECRET_ACCESS_KEY in .env
675
- $SED_COMMAND "s#^MINIO_ROOT_PASSWORD=.*#MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}#" .env
675
+ sed "${SED_INPLACE_ARGS[@]}" "s#^MINIO_ROOT_PASSWORD=.*#MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}#" .env
676
676
  if [ $? -ne 0 ]; then
677
677
  echo $(show_message "security_secrect_regenerate_failed") "MINIO_ROOT_PASSWORD in \`.env\`"
678
678
  fi
@@ -19,6 +19,7 @@ You can use `+` to add a model, `-` to hide a model, and use `model name->deploy
19
19
  ```text
20
20
  id->deploymentName=displayName<maxToken:vision:reasoning:search:fc:file:imageOutput>,model2,model3
21
21
  ```
22
+ The deploymentName `->deploymentName` can be omitted, and it defaults to the latest model version. Currently, the model service providers that support `->deploymentName` are: Azure and Volcengine.
22
23
 
23
24
  For example: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
24
25
 
@@ -18,6 +18,7 @@ LobeChat 支持在部署时自定义模型列表,详情请参考 [模型提供
18
18
  ```text
19
19
  id->deploymentName=displayName<maxToken:vision:reasoning:search:fc:file:imageOutput>,model2,model3
20
20
  ```
21
+ 部署名`->deploymentName`可以省略,默认为最新版本的模型。当前支持`->deploymentName`的模型服务商有:Azure和Volcengine。
21
22
 
22
23
  例如: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
23
24
 
@@ -599,9 +599,9 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
599
599
  ### `VOLCENGINE_MODEL_LIST`
600
600
 
601
601
  - Type: Optional
602
- - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name->deploymentName=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
602
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name->deploymentName=display_name` to customize the display name of a model, separated by commas. The deploymentName `->deploymentName` can be omitted, and it defaults to the latest model version. Definition syntax rules see [model-list][model-list]
603
603
  - Default: `-`
604
- - Example: `-all,+deepseek-r1->deepseek-r1-250120,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k->doubao-1-5-pro-256k-250115,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k->doubao-1-5-lite-32k-250115`
604
+ - Example: `-all,+deepseek-r1,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k`
605
605
 
606
606
  ### `VOLCENGINE_PROXY_URL`
607
607
 
@@ -622,7 +622,7 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
622
622
  ### `INFINIAI_MODEL_LIST`
623
623
 
624
624
  - Type: Optional
625
- - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name->deploymentName=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
625
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
626
626
  - Default: `-`
627
627
  - Example: `-all,+qwq-32b,+deepseek-r1`
628
628
 
@@ -597,9 +597,9 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
597
597
  ### `VOLCENGINE_MODEL_LIST`
598
598
 
599
599
  - 类型:可选
600
- - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名->部署名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
600
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名->部署名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。部署名`->部署名`可以省略,默认为最新版本的模型。模型定义语法规则见 [模型列表][model-list]
601
601
  - 默认值:`-`
602
- - 示例:`-all,+deepseek-r1->deepseek-r1-250120,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k->doubao-1-5-pro-256k-250115,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k->doubao-1-5-lite-32k-250115`
602
+ - 示例:`-all,+deepseek-r1,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k`
603
603
 
604
604
  ### `VOLCENGINE_PROXY_URL`
605
605
 
@@ -620,7 +620,7 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
620
620
  ### `INFINIAI_MODEL_LIST`
621
621
 
622
622
  - 类型:可选
623
- - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名->部署名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
623
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
624
624
  - 默认值:`-`
625
625
  - 示例:`-all,+qwq-32b,+deepseek-r1`
626
626
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.90.0",
3
+ "version": "1.90.1",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -107,6 +107,7 @@ const SystemRole = memo(() => {
107
107
  <EditableMessage
108
108
  classNames={{ markdown: styles.prompt }}
109
109
  editing={editing}
110
+ markdownProps={{ enableLatex: false, enableMermaid: false }}
110
111
  model={{
111
112
  extra: (
112
113
  <AgentInfo
@@ -1,12 +1,5 @@
1
1
  // @vitest-environment node
2
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
-
4
- import {
5
- ChatStreamPayload,
6
- LLMRoleType,
7
- LobeOpenAICompatibleRuntime,
8
- ModelProvider,
9
- } from '@/libs/model-runtime';
2
+ import { ModelProvider } from '@/libs/model-runtime';
10
3
  import { testProvider } from '@/libs/model-runtime/providerTestUtils';
11
4
 
12
5
  import { LobeDeepSeekAI } from './index';
@@ -24,151 +17,3 @@ testProvider({
24
17
  skipAPICall: true,
25
18
  },
26
19
  });
27
-
28
- let instance: LobeOpenAICompatibleRuntime;
29
-
30
- const createDeepSeekAIInstance = () => new LobeDeepSeekAI({ apiKey: 'test' });
31
-
32
- const mockSuccessfulChatCompletion = () => {
33
- vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue({
34
- id: 'cmpl-mock',
35
- object: 'chat.completion',
36
- created: Date.now(),
37
- choices: [
38
- { index: 0, message: { role: 'assistant', content: 'Mock response' }, finish_reason: 'stop' },
39
- ],
40
- } as any);
41
- };
42
-
43
- beforeEach(() => {
44
- instance = new LobeDeepSeekAI({ apiKey: 'test' });
45
-
46
- // 使用 vi.spyOn 来模拟 chat.completions.create 方法
47
- vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
48
- new ReadableStream() as any,
49
- );
50
- });
51
-
52
- afterEach(() => {
53
- vi.clearAllMocks();
54
- });
55
-
56
- describe('LobeDeepSeekAI', () => {
57
- describe('deepseek-reasoner', () => {
58
- beforeEach(() => {
59
- instance = createDeepSeekAIInstance();
60
- mockSuccessfulChatCompletion();
61
- });
62
-
63
- it('should insert a user message if the first message is from assistant', async () => {
64
- const payloadMessages = [{ content: 'Hello', role: 'assistant' as LLMRoleType }];
65
- const expectedMessages = [{ content: '', role: 'user' }, ...payloadMessages];
66
-
67
- const payload: ChatStreamPayload = {
68
- messages: payloadMessages,
69
- model: 'deepseek-reasoner',
70
- temperature: 0,
71
- };
72
-
73
- await instance.chat(payload);
74
-
75
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
76
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
77
- const actualMessages = actualArgs[0].messages;
78
- expect(actualMessages).toEqual(expectedMessages);
79
- });
80
-
81
- it('should insert a user message if the first message is from assistant (with system summary)', async () => {
82
- const payloadMessages = [
83
- { content: 'System summary', role: 'system' as LLMRoleType },
84
- { content: 'Hello', role: 'assistant' as LLMRoleType },
85
- ];
86
- const expectedMessages = [
87
- { content: 'System summary', role: 'system' },
88
- { content: '', role: 'user' },
89
- { content: 'Hello', role: 'assistant' },
90
- ];
91
-
92
- const payload: ChatStreamPayload = {
93
- messages: payloadMessages,
94
- model: 'deepseek-reasoner',
95
- temperature: 0,
96
- };
97
-
98
- await instance.chat(payload);
99
-
100
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
101
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
102
- const actualMessages = actualArgs[0].messages;
103
- expect(actualMessages).toEqual(expectedMessages);
104
- });
105
-
106
- it('should insert alternating roles if messages do not alternate', async () => {
107
- const payloadMessages = [
108
- { content: 'user1', role: 'user' as LLMRoleType },
109
- { content: 'user2', role: 'user' as LLMRoleType },
110
- { content: 'assistant1', role: 'assistant' as LLMRoleType },
111
- { content: 'assistant2', role: 'assistant' as LLMRoleType },
112
- ];
113
- const expectedMessages = [
114
- { content: 'user1', role: 'user' },
115
- { content: '', role: 'assistant' },
116
- { content: 'user2', role: 'user' },
117
- { content: 'assistant1', role: 'assistant' },
118
- { content: '', role: 'user' },
119
- { content: 'assistant2', role: 'assistant' },
120
- ];
121
-
122
- const payload: ChatStreamPayload = {
123
- messages: payloadMessages,
124
- model: 'deepseek-reasoner',
125
- temperature: 0,
126
- };
127
-
128
- await instance.chat(payload);
129
-
130
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
131
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
132
- const actualMessages = actualArgs[0].messages;
133
- expect(actualMessages).toEqual(expectedMessages);
134
- });
135
-
136
- it('complex condition', async () => {
137
- const payloadMessages = [
138
- { content: 'system', role: 'system' as LLMRoleType },
139
- { content: 'assistant', role: 'assistant' as LLMRoleType },
140
- { content: 'user1', role: 'user' as LLMRoleType },
141
- { content: 'user2', role: 'user' as LLMRoleType },
142
- { content: 'user3', role: 'user' as LLMRoleType },
143
- { content: 'assistant1', role: 'assistant' as LLMRoleType },
144
- { content: 'assistant2', role: 'assistant' as LLMRoleType },
145
- ];
146
- const expectedMessages = [
147
- { content: 'system', role: 'system' },
148
- { content: '', role: 'user' },
149
- { content: 'assistant', role: 'assistant' },
150
- { content: 'user1', role: 'user' },
151
- { content: '', role: 'assistant' },
152
- { content: 'user2', role: 'user' },
153
- { content: '', role: 'assistant' },
154
- { content: 'user3', role: 'user' },
155
- { content: 'assistant1', role: 'assistant' },
156
- { content: '', role: 'user' },
157
- { content: 'assistant2', role: 'assistant' },
158
- ];
159
-
160
- const payload: ChatStreamPayload = {
161
- messages: payloadMessages,
162
- model: 'deepseek-reasoner',
163
- temperature: 0,
164
- };
165
-
166
- await instance.chat(payload);
167
-
168
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
169
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
170
- const actualMessages = actualArgs[0].messages;
171
- expect(actualMessages).toEqual(expectedMessages);
172
- });
173
- });
174
- });
@@ -1,8 +1,6 @@
1
- import OpenAI from 'openai';
2
-
3
1
  import type { ChatModelCard } from '@/types/llm';
4
2
 
5
- import { ChatStreamPayload, ModelProvider } from '../types';
3
+ import { ModelProvider } from '../types';
6
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
7
5
 
8
6
  export interface DeepSeekModelCard {
@@ -11,59 +9,6 @@ export interface DeepSeekModelCard {
11
9
 
12
10
  export const LobeDeepSeekAI = LobeOpenAICompatibleFactory({
13
11
  baseURL: 'https://api.deepseek.com/v1',
14
- chatCompletion: {
15
- handlePayload: ({
16
- frequency_penalty,
17
- messages,
18
- model,
19
- presence_penalty,
20
- temperature,
21
- top_p,
22
- ...payload
23
- }: ChatStreamPayload) => {
24
- // github.com/lobehub/lobe-chat/pull/5548
25
- let filteredMessages = messages.filter((message) => message.role !== 'system');
26
-
27
- if (filteredMessages.length > 0 && filteredMessages[0].role === 'assistant') {
28
- filteredMessages.unshift({ content: '', role: 'user' });
29
- }
30
-
31
- let lastRole = '';
32
- for (let i = 0; i < filteredMessages.length; i++) {
33
- const message = filteredMessages[i];
34
- if (message.role === lastRole) {
35
- const newRole = lastRole === 'assistant' ? 'user' : 'assistant';
36
- filteredMessages.splice(i, 0, { content: '', role: newRole });
37
- i++;
38
- }
39
- lastRole = message.role;
40
- }
41
-
42
- if (messages.length > 0 && messages[0].role === 'system') {
43
- filteredMessages.unshift(messages[0]);
44
- }
45
-
46
- return {
47
- ...payload,
48
- model,
49
- ...(model === 'deepseek-reasoner'
50
- ? {
51
- frequency_penalty: undefined,
52
- messages: filteredMessages,
53
- presence_penalty: undefined,
54
- temperature: undefined,
55
- top_p: undefined,
56
- }
57
- : {
58
- frequency_penalty,
59
- messages,
60
- presence_penalty,
61
- temperature,
62
- top_p,
63
- }),
64
- } as OpenAI.ChatCompletionCreateParamsStreaming;
65
- },
66
- },
67
12
  debug: {
68
13
  chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',
69
14
  },
@@ -306,16 +306,13 @@ describe('parseModelString', () => {
306
306
  });
307
307
 
308
308
  describe('deployment name', () => {
309
- it('should have same deployment name as id', () => {
309
+ it('should have no deployment name', () => {
310
310
  const result = parseModelString('model1=Model 1', true);
311
311
  expect(result.add[0]).toEqual({
312
312
  id: 'model1',
313
313
  displayName: 'Model 1',
314
314
  abilities: {},
315
315
  type: 'chat',
316
- config: {
317
- deploymentName: 'model1',
318
- },
319
316
  });
320
317
  });
321
318
 
@@ -455,6 +452,61 @@ describe('transformToChatModelCards', () => {
455
452
  expect(result).toMatchSnapshot();
456
453
  });
457
454
 
455
+ it('should use default deploymentName from known model when not specified in string (VolcEngine case)', () => {
456
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
457
+ (m) => m.id === 'deepseek-r1' && m.providerId === 'volcengine',
458
+ );
459
+ const defaultChatModels: AiFullModelCard[] = [];
460
+ const result = transformToAiChatModelList({
461
+ modelString: '+deepseek-r1',
462
+ defaultChatModels,
463
+ providerId: 'volcengine',
464
+ withDeploymentName: true,
465
+ });
466
+ expect(result).toContainEqual({
467
+ ...knownModel,
468
+ enabled: true,
469
+ });
470
+ });
471
+
472
+ it('should use deploymentName from modelString when specified (VolcEngine case)', () => {
473
+ const defaultChatModels: AiFullModelCard[] = [];
474
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
475
+ (m) => m.id === 'deepseek-r1' && m.providerId === 'volcengine',
476
+ );
477
+ const result = transformToAiChatModelList({
478
+ modelString: `+deepseek-r1->my-custom-deploy`,
479
+ defaultChatModels,
480
+ providerId: 'volcengine',
481
+ withDeploymentName: true,
482
+ });
483
+ expect(result).toContainEqual({
484
+ ...knownModel,
485
+ enabled: true,
486
+ config: { deploymentName: 'my-custom-deploy' },
487
+ });
488
+ });
489
+
490
+ it('should set both id and deploymentName to the full string when no -> is used and withDeploymentName is true', () => {
491
+ const defaultChatModels: AiFullModelCard[] = [];
492
+ const result = transformToAiChatModelList({
493
+ modelString: `+my_model`,
494
+ defaultChatModels,
495
+ providerId: 'volcengine',
496
+ withDeploymentName: true,
497
+ });
498
+ expect(result).toContainEqual({
499
+ id: `my_model`,
500
+ displayName: `my_model`,
501
+ type: 'chat',
502
+ abilities: {},
503
+ enabled: true,
504
+ config: {
505
+ deploymentName: `my_model`,
506
+ },
507
+ });
508
+ });
509
+
458
510
  it('should handle azure real case', () => {
459
511
  const defaultChatModels = [
460
512
  {
@@ -23,7 +23,7 @@ export const parseModelString = (modelString: string = '', withDeploymentName =
23
23
 
24
24
  if (withDeploymentName) {
25
25
  [id, deploymentName] = id.split('->');
26
- if (!deploymentName) deploymentName = id;
26
+ // if (!deploymentName) deploymentName = id;
27
27
  }
28
28
 
29
29
  if (disable) {
@@ -141,6 +141,12 @@ export const transformToAiChatModelList = ({
141
141
  knownModel = LOBE_DEFAULT_MODEL_LIST.find((model) => model.id === toAddModel.id);
142
142
  if (knownModel) knownModel.providerId = providerId;
143
143
  }
144
+ if (withDeploymentName) {
145
+ toAddModel.config = toAddModel.config || {};
146
+ if (!toAddModel.config.deploymentName) {
147
+ toAddModel.config.deploymentName = knownModel?.config?.deploymentName ?? toAddModel.id;
148
+ }
149
+ }
144
150
 
145
151
  // if the model is known, update it based on the known model
146
152
  if (knownModel) {