@lobehub/chat 1.89.0 → 1.90.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +59 -0
  2. package/README.md +3 -2
  3. package/README.zh-CN.md +3 -2
  4. package/changelog/v1.json +21 -0
  5. package/docker-compose/setup.sh +16 -16
  6. package/docs/self-hosting/advanced/model-list.mdx +1 -0
  7. package/docs/self-hosting/advanced/model-list.zh-CN.mdx +1 -0
  8. package/docs/self-hosting/environment-variables/basic.mdx +7 -0
  9. package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +9 -2
  10. package/docs/self-hosting/environment-variables/model-provider.mdx +3 -3
  11. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +3 -3
  12. package/package.json +1 -1
  13. package/src/app/(backend)/_deprecated/createBizOpenAI/auth.ts +1 -1
  14. package/src/app/(backend)/middleware/auth/utils.test.ts +2 -2
  15. package/src/app/(backend)/middleware/auth/utils.ts +1 -1
  16. package/src/app/(backend)/webapi/plugin/gateway/route.ts +1 -1
  17. package/src/app/(backend)/webapi/proxy/route.ts +1 -1
  18. package/src/app/[variants]/(main)/chat/(workspace)/@topic/features/SystemRole/SystemRoleContent.tsx +1 -0
  19. package/src/app/[variants]/metadata.ts +1 -1
  20. package/src/components/Error/sentryCaptureException.ts +1 -1
  21. package/src/config/__tests__/app.test.ts +1 -1
  22. package/src/{config → envs}/app.ts +2 -0
  23. package/src/layout/GlobalProvider/index.tsx +1 -1
  24. package/src/libs/model-runtime/deepseek/index.test.ts +1 -156
  25. package/src/libs/model-runtime/deepseek/index.ts +1 -56
  26. package/src/libs/oidc-provider/http-adapter.ts +1 -1
  27. package/src/libs/oidc-provider/provider.ts +1 -1
  28. package/src/middleware.ts +16 -3
  29. package/src/server/globalConfig/index.test.ts +2 -2
  30. package/src/server/globalConfig/index.ts +1 -1
  31. package/src/server/modules/AssistantStore/index.ts +1 -1
  32. package/src/server/modules/EdgeConfig/index.ts +1 -1
  33. package/src/server/modules/PluginStore/index.ts +1 -1
  34. package/src/server/routers/async/caller.ts +1 -1
  35. package/src/server/services/agent/index.test.ts +1 -1
  36. package/src/utils/basePath.ts +1 -1
  37. package/src/utils/parseModels.test.ts +56 -4
  38. package/src/utils/parseModels.ts +7 -1
  39. package/src/utils/server/jwt.test.ts +1 -1
package/CHANGELOG.md CHANGED
@@ -2,6 +2,65 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.90.1](https://github.com/lobehub/lobe-chat/compare/v1.90.0...v1.90.1)
6
+
7
+ <sup>Released on **2025-06-01**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Disable LaTeX and Mermaid rendering in SystemRoleContent to prevent lag caused by massive rendering tasks when switching topics, fix DeepSeek new R1 Search error.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Use default deployment name when parseModelString doesn't contain deployment name.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's fixed
23
+
24
+ - **misc**: Disable LaTeX and Mermaid rendering in SystemRoleContent to prevent lag caused by massive rendering tasks when switching topics, closes [#8034](https://github.com/lobehub/lobe-chat/issues/8034) ([5b42ee2](https://github.com/lobehub/lobe-chat/commit/5b42ee2))
25
+ - **misc**: Fix DeepSeek new R1 Search error, closes [#8035](https://github.com/lobehub/lobe-chat/issues/8035) ([cf58628](https://github.com/lobehub/lobe-chat/commit/cf58628))
26
+
27
+ #### Styles
28
+
29
+ - **misc**: Use default deployment name when parseModelString doesn't contain deployment name, closes [#7719](https://github.com/lobehub/lobe-chat/issues/7719) ([aef19f4](https://github.com/lobehub/lobe-chat/commit/aef19f4))
30
+
31
+ </details>
32
+
33
+ <div align="right">
34
+
35
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
36
+
37
+ </div>
38
+
39
+ ## [Version 1.90.0](https://github.com/lobehub/lobe-chat/compare/v1.89.0...v1.90.0)
40
+
41
+ <sup>Released on **2025-06-01**</sup>
42
+
43
+ #### ✨ Features
44
+
45
+ - **misc**: Support protect page.
46
+
47
+ <br/>
48
+
49
+ <details>
50
+ <summary><kbd>Improvements and Fixes</kbd></summary>
51
+
52
+ #### What's improved
53
+
54
+ - **misc**: Support protect page, closes [#8024](https://github.com/lobehub/lobe-chat/issues/8024) ([d61a9f5](https://github.com/lobehub/lobe-chat/commit/d61a9f5))
55
+
56
+ </details>
57
+
58
+ <div align="right">
59
+
60
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
61
+
62
+ </div>
63
+
5
64
  ## [Version 1.89.0](https://github.com/lobehub/lobe-chat/compare/v1.88.23...v1.89.0)
6
65
 
7
66
  <sup>Released on **2025-06-01**</sup>
package/README.md CHANGED
@@ -196,7 +196,7 @@ We have implemented support for the following model service providers:
196
196
  - **[OpenRouter](https://lobechat.com/discover/provider/openrouter)**: OpenRouter is a service platform providing access to various cutting-edge large model interfaces, supporting OpenAI, Anthropic, LLaMA, and more, suitable for diverse development and application needs. Users can flexibly choose the optimal model and pricing based on their requirements, enhancing the AI experience.
197
197
  - **[Cloudflare Workers AI](https://lobechat.com/discover/provider/cloudflare)**: Run serverless GPU-powered machine learning models on Cloudflare's global network.
198
198
 
199
- <details><summary><kbd>See more providers (+31)</kbd></summary>
199
+ <details><summary><kbd>See more providers (+32)</kbd></summary>
200
200
 
201
201
  - **[GitHub](https://lobechat.com/discover/provider/github)**: With GitHub Models, developers can become AI engineers and leverage the industry's leading AI models.
202
202
  - **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI is a platform providing a variety of large language models and AI image generation API services, flexible, reliable, and cost-effective. It supports the latest open-source models like Llama3 and Mistral, offering a comprehensive, user-friendly, and auto-scaling API solution for generative AI application development, suitable for the rapid growth of AI startups.
@@ -206,6 +206,7 @@ We have implemented support for the following model service providers:
206
206
  - **[Groq](https://lobechat.com/discover/provider/groq)**: Groq's LPU inference engine has excelled in the latest independent large language model (LLM) benchmarks, redefining the standards for AI solutions with its remarkable speed and efficiency. Groq represents instant inference speed, demonstrating strong performance in cloud-based deployments.
207
207
  - **[Perplexity](https://lobechat.com/discover/provider/perplexity)**: Perplexity is a leading provider of conversational generation models, offering various advanced Llama 3.1 models that support both online and offline applications, particularly suited for complex natural language processing tasks.
208
208
  - **[Mistral](https://lobechat.com/discover/provider/mistral)**: Mistral provides advanced general, specialized, and research models widely used in complex reasoning, multilingual tasks, and code generation. Through functional calling interfaces, users can integrate custom functionalities for specific applications.
209
+ - **[ModelScope](https://lobechat.com/discover/provider/modelscope)**:
209
210
  - **[Ai21Labs](https://lobechat.com/discover/provider/ai21)**: AI21 Labs builds foundational models and AI systems for enterprises, accelerating the application of generative AI in production.
210
211
  - **[Upstage](https://lobechat.com/discover/provider/upstage)**: Upstage focuses on developing AI models for various business needs, including Solar LLM and document AI, aiming to achieve artificial general intelligence (AGI) for work. It allows for the creation of simple conversational agents through Chat API and supports functional calling, translation, embedding, and domain-specific applications.
211
212
  - **[xAI](https://lobechat.com/discover/provider/xai)**: xAI is a company dedicated to building artificial intelligence to accelerate human scientific discovery. Our mission is to advance our collective understanding of the universe.
@@ -232,7 +233,7 @@ We have implemented support for the following model service providers:
232
233
 
233
234
  </details>
234
235
 
235
- > 📊 Total providers: [<kbd>**41**</kbd>](https://lobechat.com/discover/providers)
236
+ > 📊 Total providers: [<kbd>**42**</kbd>](https://lobechat.com/discover/providers)
236
237
 
237
238
  <!-- PROVIDER LIST -->
238
239
 
package/README.zh-CN.md CHANGED
@@ -196,7 +196,7 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
196
196
  - **[OpenRouter](https://lobechat.com/discover/provider/openrouter)**: OpenRouter 是一个提供多种前沿大模型接口的服务平台,支持 OpenAI、Anthropic、LLaMA 及更多,适合多样化的开发和应用需求。用户可根据自身需求灵活选择最优的模型和价格,助力 AI 体验的提升。
197
197
  - **[Cloudflare Workers AI](https://lobechat.com/discover/provider/cloudflare)**: 在 Cloudflare 的全球网络上运行由无服务器 GPU 驱动的机器学习模型。
198
198
 
199
- <details><summary><kbd>See more providers (+31)</kbd></summary>
199
+ <details><summary><kbd>See more providers (+32)</kbd></summary>
200
200
 
201
201
  - **[GitHub](https://lobechat.com/discover/provider/github)**: 通过 GitHub 模型,开发人员可以成为 AI 工程师,并使用行业领先的 AI 模型进行构建。
202
202
  - **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI 是一个提供多种大语言模型与 AI 图像生成的 API 服务的平台,灵活、可靠且具有成本效益。它支持 Llama3、Mistral 等最新的开源模型,并为生成式 AI 应用开发提供了全面、用户友好且自动扩展的 API 解决方案,适合 AI 初创公司的快速发展。
@@ -206,6 +206,7 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
206
206
  - **[Groq](https://lobechat.com/discover/provider/groq)**: Groq 的 LPU 推理引擎在最新的独立大语言模型(LLM)基准测试中表现卓越,以其惊人的速度和效率重新定义了 AI 解决方案的标准。Groq 是一种即时推理速度的代表,在基于云的部署中展现了良好的性能。
207
207
  - **[Perplexity](https://lobechat.com/discover/provider/perplexity)**: Perplexity 是一家领先的对话生成模型提供商,提供多种先进的 Llama 3.1 模型,支持在线和离线应用,特别适用于复杂的自然语言处理任务。
208
208
  - **[Mistral](https://lobechat.com/discover/provider/mistral)**: Mistral 提供先进的通用、专业和研究型模型,广泛应用于复杂推理、多语言任务、代码生成等领域,通过功能调用接口,用户可以集成自定义功能,实现特定应用。
209
+ - **[ModelScope](https://lobechat.com/discover/provider/modelscope)**:
209
210
  - **[Ai21Labs](https://lobechat.com/discover/provider/ai21)**: AI21 Labs 为企业构建基础模型和人工智能系统,加速生成性人工智能在生产中的应用。
210
211
  - **[Upstage](https://lobechat.com/discover/provider/upstage)**: Upstage 专注于为各种商业需求开发 AI 模型,包括 Solar LLM 和文档 AI,旨在实现工作的人造通用智能(AGI)。通过 Chat API 创建简单的对话代理,并支持功能调用、翻译、嵌入以及特定领域应用。
211
212
  - **[xAI](https://lobechat.com/discover/provider/xai)**: xAI 是一家致力于构建人工智能以加速人类科学发现的公司。我们的使命是推动我们对宇宙的共同理解。
@@ -232,7 +233,7 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
232
233
 
233
234
  </details>
234
235
 
235
- > 📊 Total providers: [<kbd>**41**</kbd>](https://lobechat.com/discover/providers)
236
+ > 📊 Total providers: [<kbd>**42**</kbd>](https://lobechat.com/discover/providers)
236
237
 
237
238
  <!-- PROVIDER LIST -->
238
239
 
package/changelog/v1.json CHANGED
@@ -1,4 +1,25 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Disable LaTeX and Mermaid rendering in SystemRoleContent to prevent lag caused by massive rendering tasks when switching topics, fix DeepSeek new R1 Search error."
6
+ ],
7
+ "improvements": [
8
+ "Use default deployment name when parseModelString doesn't contain deployment name."
9
+ ]
10
+ },
11
+ "date": "2025-06-01",
12
+ "version": "1.90.1"
13
+ },
14
+ {
15
+ "children": {
16
+ "features": [
17
+ "Support protect page."
18
+ ]
19
+ },
20
+ "date": "2025-06-01",
21
+ "version": "1.90.0"
22
+ },
2
23
  {
3
24
  "children": {
4
25
  "features": [
@@ -8,10 +8,10 @@
8
8
  # ref: https://github.com/lobehub/lobe-chat/pull/5247
9
9
  if [[ "$OSTYPE" == "darwin"* ]]; then
10
10
  # macOS
11
- SED_COMMAND="sed -i ''"
11
+ SED_INPLACE_ARGS=('-i' '')
12
12
  else
13
13
  # not macOS
14
- SED_COMMAND="sed -i"
14
+ SED_INPLACE_ARGS=('-i')
15
15
  fi
16
16
 
17
17
  # ======================
@@ -519,12 +519,12 @@ section_configurate_host() {
519
519
  if [[ "$ask_result" == "y" ]]; then
520
520
  PROTOCOL="https"
521
521
  # Replace all http with https
522
- $SED_COMMAND "s#http://#https://#" .env
522
+ sed "${SED_INPLACE_ARGS[@]}" "s#http://#https://#" .env
523
523
  fi
524
524
  fi
525
525
 
526
526
  # Check if sed is installed
527
- if ! command -v $SED_COMMAND &> /dev/null ; then
527
+ if ! command -v sed "${SED_INPLACE_ARGS[@]}" &> /dev/null ; then
528
528
  echo "sed" $(show_message "tips_no_executable")
529
529
  exit 1
530
530
  fi
@@ -553,7 +553,7 @@ section_configurate_host() {
553
553
  ask "(auth.example.com)"
554
554
  CASDOOR_HOST="$ask_result"
555
555
  # Setup callback url for Casdoor
556
- $SED_COMMAND "s/"example.com"/${LOBE_HOST}/" init_data.json
556
+ sed "${SED_INPLACE_ARGS[@]}" "s/"example.com"/${LOBE_HOST}/" init_data.json
557
557
  ;;
558
558
  1)
559
559
  DEPLOY_MODE="ip"
@@ -566,7 +566,7 @@ section_configurate_host() {
566
566
  MINIO_HOST="${HOST}:9000"
567
567
  CASDOOR_HOST="${HOST}:8000"
568
568
  # Setup callback url for Casdoor
569
- $SED_COMMAND "s/"localhost:3210"/${LOBE_HOST}/" init_data.json
569
+ sed "${SED_INPLACE_ARGS[@]}" "s/"localhost:3210"/${LOBE_HOST}/" init_data.json
570
570
  ;;
571
571
  *)
572
572
  echo "Invalid deploy mode: $ask_result"
@@ -575,14 +575,14 @@ section_configurate_host() {
575
575
  esac
576
576
 
577
577
  # lobe host
578
- $SED_COMMAND "s#^APP_URL=.*#APP_URL=$PROTOCOL://$LOBE_HOST#" .env
578
+ sed "${SED_INPLACE_ARGS[@]}" "s#^APP_URL=.*#APP_URL=$PROTOCOL://$LOBE_HOST#" .env
579
579
  # auth related
580
- $SED_COMMAND "s#^AUTH_URL=.*#AUTH_URL=$PROTOCOL://$LOBE_HOST/api/auth#" .env
581
- $SED_COMMAND "s#^AUTH_CASDOOR_ISSUER=.*#AUTH_CASDOOR_ISSUER=$PROTOCOL://$CASDOOR_HOST#" .env
582
- $SED_COMMAND "s#^origin=.*#origin=$PROTOCOL://$CASDOOR_HOST#" .env
580
+ sed "${SED_INPLACE_ARGS[@]}" "s#^AUTH_URL=.*#AUTH_URL=$PROTOCOL://$LOBE_HOST/api/auth#" .env
581
+ sed "${SED_INPLACE_ARGS[@]}" "s#^AUTH_CASDOOR_ISSUER=.*#AUTH_CASDOOR_ISSUER=$PROTOCOL://$CASDOOR_HOST#" .env
582
+ sed "${SED_INPLACE_ARGS[@]}" "s#^origin=.*#origin=$PROTOCOL://$CASDOOR_HOST#" .env
583
583
  # s3 related
584
- $SED_COMMAND "s#^S3_PUBLIC_DOMAIN=.*#S3_PUBLIC_DOMAIN=$PROTOCOL://$MINIO_HOST#" .env
585
- $SED_COMMAND "s#^S3_ENDPOINT=.*#S3_ENDPOINT=$PROTOCOL://$MINIO_HOST#" .env
584
+ sed "${SED_INPLACE_ARGS[@]}" "s#^S3_PUBLIC_DOMAIN=.*#S3_PUBLIC_DOMAIN=$PROTOCOL://$MINIO_HOST#" .env
585
+ sed "${SED_INPLACE_ARGS[@]}" "s#^S3_ENDPOINT=.*#S3_ENDPOINT=$PROTOCOL://$MINIO_HOST#" .env
586
586
 
587
587
 
588
588
  # Check if env modified success
@@ -641,12 +641,12 @@ section_regenerate_secrets() {
641
641
  echo $(show_message "security_secrect_regenerate_failed") "CASDOOR_SECRET"
642
642
  else
643
643
  # Search and replace the value of CASDOOR_SECRET in .env
644
- $SED_COMMAND "s#^AUTH_CASDOOR_SECRET=.*#AUTH_CASDOOR_SECRET=${CASDOOR_SECRET}#" .env
644
+ sed "${SED_INPLACE_ARGS[@]}" "s#^AUTH_CASDOOR_SECRET=.*#AUTH_CASDOOR_SECRET=${CASDOOR_SECRET}#" .env
645
645
  if [ $? -ne 0 ]; then
646
646
  echo $(show_message "security_secrect_regenerate_failed") "AUTH_CASDOOR_SECRET in \`.env\`"
647
647
  fi
648
648
  # replace `clientSecrect` in init_data.json
649
- $SED_COMMAND "s#dbf205949d704de81b0b5b3603174e23fbecc354#${CASDOOR_SECRET}#" init_data.json
649
+ sed "${SED_INPLACE_ARGS[@]}" "s#dbf205949d704de81b0b5b3603174e23fbecc354#${CASDOOR_SECRET}#" init_data.json
650
650
  if [ $? -ne 0 ]; then
651
651
  echo $(show_message "security_secrect_regenerate_failed") "AUTH_CASDOOR_SECRET in \`init_data.json\`"
652
652
  fi
@@ -660,7 +660,7 @@ section_regenerate_secrets() {
660
660
  CASDOOR_PASSWORD="123"
661
661
  else
662
662
  # replace `password` in init_data.json
663
- $SED_COMMAND "s/"123"/${CASDOOR_PASSWORD}/" init_data.json
663
+ sed "${SED_INPLACE_ARGS[@]}" "s/"123"/${CASDOOR_PASSWORD}/" init_data.json
664
664
  if [ $? -ne 0 ]; then
665
665
  echo $(show_message "security_secrect_regenerate_failed") "CASDOOR_PASSWORD in \`init_data.json\`"
666
666
  fi
@@ -672,7 +672,7 @@ section_regenerate_secrets() {
672
672
  MINIO_ROOT_PASSWORD="YOUR_MINIO_PASSWORD"
673
673
  else
674
674
  # Search and replace the value of S3_SECRET_ACCESS_KEY in .env
675
- $SED_COMMAND "s#^MINIO_ROOT_PASSWORD=.*#MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}#" .env
675
+ sed "${SED_INPLACE_ARGS[@]}" "s#^MINIO_ROOT_PASSWORD=.*#MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}#" .env
676
676
  if [ $? -ne 0 ]; then
677
677
  echo $(show_message "security_secrect_regenerate_failed") "MINIO_ROOT_PASSWORD in \`.env\`"
678
678
  fi
@@ -19,6 +19,7 @@ You can use `+` to add a model, `-` to hide a model, and use `model name->deploy
19
19
  ```text
20
20
  id->deploymentName=displayName<maxToken:vision:reasoning:search:fc:file:imageOutput>,model2,model3
21
21
  ```
22
+ The deploymentName `->deploymentName` can be omitted, and it defaults to the latest model version. Currently, the model service providers that support `->deploymentName` are: Azure and Volcengine.
22
23
 
23
24
  For example: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
24
25
 
@@ -18,6 +18,7 @@ LobeChat 支持在部署时自定义模型列表,详情请参考 [模型提供
18
18
  ```text
19
19
  id->deploymentName=displayName<maxToken:vision:reasoning:search:fc:file:imageOutput>,model2,model3
20
20
  ```
21
+ 部署名`->deploymentName`可以省略,默认为最新版本的模型。当前支持`->deploymentName`的模型服务商有:Azure和Volcengine。
21
22
 
22
23
  例如: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
23
24
 
@@ -146,6 +146,13 @@ For specific content, please refer to the [Feature Flags](/docs/self-hosting/adv
146
146
  - Default: -
147
147
  - Example: `198.18.1.62,224.0.0.3`
148
148
 
149
+ ### `ENABLE_AUTH_PROTECTION`
150
+
151
+ - Type: Optional
152
+ - Description: Controls whether to enable route protection. When set to `1`, all routes except public routes (like `/api/auth`, `/next-auth/*`, `/login`, `/signup`) will require authentication. When set to `0` or not set, only specific protected routes (like `/settings`, `/files`) will require authentication.
153
+ - Default: `0`
154
+ - Example: `1` or `0`
155
+
149
156
  ## Plugin Service
150
157
 
151
158
  ### `PLUGINS_INDEX_URL`
@@ -123,7 +123,7 @@ LobeChat 在部署时提供了一些额外的配置项,你可以使用环境
123
123
  ### `ENABLE_PROXY_DNS`
124
124
 
125
125
  - 类型:可选
126
- - 描述:用于控制是否将DNS发送到代理服务器,配置为 `0` 时所有 DNS 查询在本地完成,当你的网络环境无法访问 API 或访问超时,请尝试将该项配置为 `1`。
126
+ - 描述:用于控制是否将 DNS 发送到代理服务器,配置为 `0` 时所有 DNS 查询在本地完成,当你的网络环境无法访问 API 或访问超时,请尝试将该项配置为 `1`。
127
127
  - 默认值:`0`
128
128
  - 示例:`1` or `0`
129
129
 
@@ -137,10 +137,17 @@ LobeChat 在部署时提供了一些额外的配置项,你可以使用环境
137
137
  ### `SSRF_ALLOW_IP_ADDRESS_LIST`
138
138
 
139
139
  - 类型:可选
140
- - 描述:允许连接的私有 IP 地址列表,多个 IP 地址时使用逗号分隔。当 `SSRF_ALLOW_PRIVATE_IP_ADDRESS` 为 `0` 时才会生效。
140
+ - 说明:允许的私有 IP 地址列表,多个 IP 地址用逗号分隔。仅在 `SSRF_ALLOW_PRIVATE_IP_ADDRESS` 为 `0` 时生效。
141
141
  - 默认值:-
142
142
  - 示例:`198.18.1.62,224.0.0.3`
143
143
 
144
+ ### `ENABLE_AUTH_PROTECTION`
145
+
146
+ - 类型:可选
147
+ - 说明:控制是否启用路由保护。当设置为 `1` 时,除了公共路由(如 `/api/auth`、`/next-auth/*`、`/login`、`/signup`)外,所有路由都需要认证。当设置为 `0` 或未设置时,只有特定的受保护路由(如 `/settings`、`/files` 等)需要认证。
148
+ - 默认值:`0`
149
+ - 示例:`1` 或 `0`
150
+
144
151
  ## 插件服务
145
152
 
146
153
  ### `PLUGINS_INDEX_URL`
@@ -599,9 +599,9 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
599
599
  ### `VOLCENGINE_MODEL_LIST`
600
600
 
601
601
  - Type: Optional
602
- - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name->deploymentName=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
602
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name->deploymentName=display_name` to customize the display name of a model, separated by commas. The deploymentName `->deploymentName` can be omitted, and it defaults to the latest model version. Definition syntax rules see [model-list][model-list]
603
603
  - Default: `-`
604
- - Example: `-all,+deepseek-r1->deepseek-r1-250120,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k->doubao-1-5-pro-256k-250115,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k->doubao-1-5-lite-32k-250115`
604
+ - Example: `-all,+deepseek-r1,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k`
605
605
 
606
606
  ### `VOLCENGINE_PROXY_URL`
607
607
 
@@ -622,7 +622,7 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
622
622
  ### `INFINIAI_MODEL_LIST`
623
623
 
624
624
  - Type: Optional
625
- - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name->deploymentName=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
625
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
626
626
  - Default: `-`
627
627
  - Example: `-all,+qwq-32b,+deepseek-r1`
628
628
 
@@ -597,9 +597,9 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
597
597
  ### `VOLCENGINE_MODEL_LIST`
598
598
 
599
599
  - 类型:可选
600
- - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名->部署名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
600
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名->部署名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。部署名`->部署名`可以省略,默认为最新版本的模型。模型定义语法规则见 [模型列表][model-list]
601
601
  - 默认值:`-`
602
- - 示例:`-all,+deepseek-r1->deepseek-r1-250120,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k->doubao-1-5-pro-256k-250115,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k->doubao-1-5-lite-32k-250115`
602
+ - 示例:`-all,+deepseek-r1,+deepseek-v3->deepseek-v3-250324,+doubao-1.5-pro-256k,+doubao-1.5-pro-32k->doubao-1-5-pro-32k-250115,+doubao-1.5-lite-32k`
603
603
 
604
604
  ### `VOLCENGINE_PROXY_URL`
605
605
 
@@ -620,7 +620,7 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
620
620
  ### `INFINIAI_MODEL_LIST`
621
621
 
622
622
  - 类型:可选
623
- - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名->部署名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
623
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
624
624
  - 默认值:`-`
625
625
  - 示例:`-all,+qwq-32b,+deepseek-r1`
626
626
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.89.0",
3
+ "version": "1.90.1",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -1,4 +1,4 @@
1
- import { getAppConfig } from '@/config/app';
1
+ import { getAppConfig } from '@/envs/app';
2
2
  import { ChatErrorType } from '@/types/fetch';
3
3
 
4
4
  interface AuthConfig {
@@ -1,7 +1,7 @@
1
1
  import { type AuthObject } from '@clerk/backend';
2
2
  import { beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
- import { getAppConfig } from '@/config/app';
4
+ import { getAppConfig } from '@/envs/app';
5
5
 
6
6
  import { checkAuthMethod } from './utils';
7
7
 
@@ -22,7 +22,7 @@ vi.mock('@/const/auth', async (importOriginal) => {
22
22
  };
23
23
  });
24
24
 
25
- vi.mock('@/config/app', () => ({
25
+ vi.mock('@/envs/app', () => ({
26
26
  getAppConfig: vi.fn(),
27
27
  }));
28
28
 
@@ -1,7 +1,7 @@
1
1
  import { type AuthObject } from '@clerk/backend';
2
2
 
3
- import { getAppConfig } from '@/config/app';
4
3
  import { enableClerk, enableNextAuth } from '@/const/auth';
4
+ import { getAppConfig } from '@/envs/app';
5
5
  import { AgentRuntimeError } from '@/libs/model-runtime';
6
6
  import { ChatErrorType } from '@/types/fetch';
7
7
 
@@ -1,9 +1,9 @@
1
1
  import { PluginRequestPayload } from '@lobehub/chat-plugin-sdk';
2
2
  import { createGatewayOnEdgeRuntime } from '@lobehub/chat-plugins-gateway';
3
3
 
4
- import { getAppConfig } from '@/config/app';
5
4
  import { LOBE_CHAT_AUTH_HEADER, OAUTH_AUTHORIZED, enableNextAuth } from '@/const/auth';
6
5
  import { LOBE_CHAT_TRACE_ID, TraceNameMap } from '@/const/trace';
6
+ import { getAppConfig } from '@/envs/app';
7
7
  import { AgentRuntimeError } from '@/libs/model-runtime';
8
8
  import { TraceClient } from '@/libs/traces';
9
9
  import { ChatErrorType, ErrorType } from '@/types/fetch';
@@ -2,7 +2,7 @@ import { NextResponse } from 'next/server';
2
2
  import fetch from 'node-fetch';
3
3
  import { RequestFilteringAgentOptions, useAgent as ssrfAgent } from 'request-filtering-agent';
4
4
 
5
- import { appEnv } from '@/config/app';
5
+ import { appEnv } from '@/envs/app';
6
6
 
7
7
  /**
8
8
  * just for a proxy
@@ -107,6 +107,7 @@ const SystemRole = memo(() => {
107
107
  <EditableMessage
108
108
  classNames={{ markdown: styles.prompt }}
109
109
  editing={editing}
110
+ markdownProps={{ enableLatex: false, enableMermaid: false }}
110
111
  model={{
111
112
  extra: (
112
113
  <AgentInfo
@@ -1,8 +1,8 @@
1
- import { appEnv } from '@/config/app';
2
1
  import { BRANDING_LOGO_URL, BRANDING_NAME, ORG_NAME } from '@/const/branding';
3
2
  import { DEFAULT_LANG } from '@/const/locale';
4
3
  import { OFFICIAL_URL, OG_URL } from '@/const/url';
5
4
  import { isCustomBranding, isCustomORG } from '@/const/version';
5
+ import { appEnv } from '@/envs/app';
6
6
  import { translation } from '@/server/translation';
7
7
  import { DynamicLayoutProps } from '@/types/next';
8
8
  import { RouteVariants } from '@/utils/server/routeVariants';
@@ -1,4 +1,4 @@
1
- import { appEnv } from '@/config/app';
1
+ import { appEnv } from '@/envs/app';
2
2
 
3
3
  export type ErrorType = Error & { digest?: string };
4
4
 
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
2
  import { beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
- import { getAppConfig } from '../app';
4
+ import { getAppConfig } from '../../envs/app';
5
5
 
6
6
  // Stub the global process object to safely mock environment variables
7
7
  vi.stubGlobal('process', {
@@ -49,6 +49,7 @@ export const getAppConfig = () => {
49
49
  APP_URL: z.string().optional(),
50
50
  VERCEL_EDGE_CONFIG: z.string().optional(),
51
51
  MIDDLEWARE_REWRITE_THROUGH_LOCAL: z.boolean().optional(),
52
+ ENABLE_AUTH_PROTECTION: z.boolean().optional(),
52
53
 
53
54
  CDN_USE_GLOBAL: z.boolean().optional(),
54
55
  CUSTOM_FONT_FAMILY: z.string().optional(),
@@ -82,6 +83,7 @@ export const getAppConfig = () => {
82
83
 
83
84
  APP_URL,
84
85
  MIDDLEWARE_REWRITE_THROUGH_LOCAL: process.env.MIDDLEWARE_REWRITE_THROUGH_LOCAL === '1',
86
+ ENABLE_AUTH_PROTECTION: process.env.ENABLE_AUTH_PROTECTION === '1',
85
87
 
86
88
  CUSTOM_FONT_FAMILY: process.env.CUSTOM_FONT_FAMILY,
87
89
  CUSTOM_FONT_URL: process.env.CUSTOM_FONT_URL,
@@ -1,7 +1,7 @@
1
1
  import { ReactNode, Suspense } from 'react';
2
2
 
3
- import { appEnv } from '@/config/app';
4
3
  import { getServerFeatureFlagsValue } from '@/config/featureFlags';
4
+ import { appEnv } from '@/envs/app';
5
5
  import DevPanel from '@/features/DevPanel';
6
6
  import { getServerGlobalConfig } from '@/server/globalConfig';
7
7
  import { ServerConfigStoreProvider } from '@/store/serverConfig/Provider';
@@ -1,12 +1,5 @@
1
1
  // @vitest-environment node
2
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
-
4
- import {
5
- ChatStreamPayload,
6
- LLMRoleType,
7
- LobeOpenAICompatibleRuntime,
8
- ModelProvider,
9
- } from '@/libs/model-runtime';
2
+ import { ModelProvider } from '@/libs/model-runtime';
10
3
  import { testProvider } from '@/libs/model-runtime/providerTestUtils';
11
4
 
12
5
  import { LobeDeepSeekAI } from './index';
@@ -24,151 +17,3 @@ testProvider({
24
17
  skipAPICall: true,
25
18
  },
26
19
  });
27
-
28
- let instance: LobeOpenAICompatibleRuntime;
29
-
30
- const createDeepSeekAIInstance = () => new LobeDeepSeekAI({ apiKey: 'test' });
31
-
32
- const mockSuccessfulChatCompletion = () => {
33
- vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue({
34
- id: 'cmpl-mock',
35
- object: 'chat.completion',
36
- created: Date.now(),
37
- choices: [
38
- { index: 0, message: { role: 'assistant', content: 'Mock response' }, finish_reason: 'stop' },
39
- ],
40
- } as any);
41
- };
42
-
43
- beforeEach(() => {
44
- instance = new LobeDeepSeekAI({ apiKey: 'test' });
45
-
46
- // 使用 vi.spyOn 来模拟 chat.completions.create 方法
47
- vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
48
- new ReadableStream() as any,
49
- );
50
- });
51
-
52
- afterEach(() => {
53
- vi.clearAllMocks();
54
- });
55
-
56
- describe('LobeDeepSeekAI', () => {
57
- describe('deepseek-reasoner', () => {
58
- beforeEach(() => {
59
- instance = createDeepSeekAIInstance();
60
- mockSuccessfulChatCompletion();
61
- });
62
-
63
- it('should insert a user message if the first message is from assistant', async () => {
64
- const payloadMessages = [{ content: 'Hello', role: 'assistant' as LLMRoleType }];
65
- const expectedMessages = [{ content: '', role: 'user' }, ...payloadMessages];
66
-
67
- const payload: ChatStreamPayload = {
68
- messages: payloadMessages,
69
- model: 'deepseek-reasoner',
70
- temperature: 0,
71
- };
72
-
73
- await instance.chat(payload);
74
-
75
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
76
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
77
- const actualMessages = actualArgs[0].messages;
78
- expect(actualMessages).toEqual(expectedMessages);
79
- });
80
-
81
- it('should insert a user message if the first message is from assistant (with system summary)', async () => {
82
- const payloadMessages = [
83
- { content: 'System summary', role: 'system' as LLMRoleType },
84
- { content: 'Hello', role: 'assistant' as LLMRoleType },
85
- ];
86
- const expectedMessages = [
87
- { content: 'System summary', role: 'system' },
88
- { content: '', role: 'user' },
89
- { content: 'Hello', role: 'assistant' },
90
- ];
91
-
92
- const payload: ChatStreamPayload = {
93
- messages: payloadMessages,
94
- model: 'deepseek-reasoner',
95
- temperature: 0,
96
- };
97
-
98
- await instance.chat(payload);
99
-
100
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
101
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
102
- const actualMessages = actualArgs[0].messages;
103
- expect(actualMessages).toEqual(expectedMessages);
104
- });
105
-
106
- it('should insert alternating roles if messages do not alternate', async () => {
107
- const payloadMessages = [
108
- { content: 'user1', role: 'user' as LLMRoleType },
109
- { content: 'user2', role: 'user' as LLMRoleType },
110
- { content: 'assistant1', role: 'assistant' as LLMRoleType },
111
- { content: 'assistant2', role: 'assistant' as LLMRoleType },
112
- ];
113
- const expectedMessages = [
114
- { content: 'user1', role: 'user' },
115
- { content: '', role: 'assistant' },
116
- { content: 'user2', role: 'user' },
117
- { content: 'assistant1', role: 'assistant' },
118
- { content: '', role: 'user' },
119
- { content: 'assistant2', role: 'assistant' },
120
- ];
121
-
122
- const payload: ChatStreamPayload = {
123
- messages: payloadMessages,
124
- model: 'deepseek-reasoner',
125
- temperature: 0,
126
- };
127
-
128
- await instance.chat(payload);
129
-
130
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
131
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
132
- const actualMessages = actualArgs[0].messages;
133
- expect(actualMessages).toEqual(expectedMessages);
134
- });
135
-
136
- it('complex condition', async () => {
137
- const payloadMessages = [
138
- { content: 'system', role: 'system' as LLMRoleType },
139
- { content: 'assistant', role: 'assistant' as LLMRoleType },
140
- { content: 'user1', role: 'user' as LLMRoleType },
141
- { content: 'user2', role: 'user' as LLMRoleType },
142
- { content: 'user3', role: 'user' as LLMRoleType },
143
- { content: 'assistant1', role: 'assistant' as LLMRoleType },
144
- { content: 'assistant2', role: 'assistant' as LLMRoleType },
145
- ];
146
- const expectedMessages = [
147
- { content: 'system', role: 'system' },
148
- { content: '', role: 'user' },
149
- { content: 'assistant', role: 'assistant' },
150
- { content: 'user1', role: 'user' },
151
- { content: '', role: 'assistant' },
152
- { content: 'user2', role: 'user' },
153
- { content: '', role: 'assistant' },
154
- { content: 'user3', role: 'user' },
155
- { content: 'assistant1', role: 'assistant' },
156
- { content: '', role: 'user' },
157
- { content: 'assistant2', role: 'assistant' },
158
- ];
159
-
160
- const payload: ChatStreamPayload = {
161
- messages: payloadMessages,
162
- model: 'deepseek-reasoner',
163
- temperature: 0,
164
- };
165
-
166
- await instance.chat(payload);
167
-
168
- expect(instance['client'].chat.completions.create).toHaveBeenCalled();
169
- const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
170
- const actualMessages = actualArgs[0].messages;
171
- expect(actualMessages).toEqual(expectedMessages);
172
- });
173
- });
174
- });
@@ -1,8 +1,6 @@
1
- import OpenAI from 'openai';
2
-
3
1
  import type { ChatModelCard } from '@/types/llm';
4
2
 
5
- import { ChatStreamPayload, ModelProvider } from '../types';
3
+ import { ModelProvider } from '../types';
6
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
7
5
 
8
6
  export interface DeepSeekModelCard {
@@ -11,59 +9,6 @@ export interface DeepSeekModelCard {
11
9
 
12
10
  export const LobeDeepSeekAI = LobeOpenAICompatibleFactory({
13
11
  baseURL: 'https://api.deepseek.com/v1',
14
- chatCompletion: {
15
- handlePayload: ({
16
- frequency_penalty,
17
- messages,
18
- model,
19
- presence_penalty,
20
- temperature,
21
- top_p,
22
- ...payload
23
- }: ChatStreamPayload) => {
24
- // github.com/lobehub/lobe-chat/pull/5548
25
- let filteredMessages = messages.filter((message) => message.role !== 'system');
26
-
27
- if (filteredMessages.length > 0 && filteredMessages[0].role === 'assistant') {
28
- filteredMessages.unshift({ content: '', role: 'user' });
29
- }
30
-
31
- let lastRole = '';
32
- for (let i = 0; i < filteredMessages.length; i++) {
33
- const message = filteredMessages[i];
34
- if (message.role === lastRole) {
35
- const newRole = lastRole === 'assistant' ? 'user' : 'assistant';
36
- filteredMessages.splice(i, 0, { content: '', role: newRole });
37
- i++;
38
- }
39
- lastRole = message.role;
40
- }
41
-
42
- if (messages.length > 0 && messages[0].role === 'system') {
43
- filteredMessages.unshift(messages[0]);
44
- }
45
-
46
- return {
47
- ...payload,
48
- model,
49
- ...(model === 'deepseek-reasoner'
50
- ? {
51
- frequency_penalty: undefined,
52
- messages: filteredMessages,
53
- presence_penalty: undefined,
54
- temperature: undefined,
55
- top_p: undefined,
56
- }
57
- : {
58
- frequency_penalty,
59
- messages,
60
- presence_penalty,
61
- temperature,
62
- top_p,
63
- }),
64
- } as OpenAI.ChatCompletionCreateParamsStreaming;
65
- },
66
- },
67
12
  debug: {
68
13
  chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',
69
14
  },
@@ -4,7 +4,7 @@ import { NextRequest } from 'next/server';
4
4
  import { IncomingMessage, ServerResponse } from 'node:http';
5
5
  import urlJoin from 'url-join';
6
6
 
7
- import { appEnv } from '@/config/app';
7
+ import { appEnv } from '@/envs/app';
8
8
 
9
9
  const log = debug('lobe-oidc:http-adapter');
10
10
 
@@ -2,10 +2,10 @@ import debug from 'debug';
2
2
  import Provider, { Configuration, KoaContextWithOIDC } from 'oidc-provider';
3
3
  import urlJoin from 'url-join';
4
4
 
5
- import { appEnv } from '@/config/app';
6
5
  import { serverDBEnv } from '@/config/db';
7
6
  import { UserModel } from '@/database/models/user';
8
7
  import { LobeChatDatabase } from '@/database/type';
8
+ import { appEnv } from '@/envs/app';
9
9
  import { oidcEnv } from '@/envs/oidc';
10
10
 
11
11
  import { DrizzleAdapter } from './adapter';
package/src/middleware.ts CHANGED
@@ -4,10 +4,10 @@ import { NextRequest, NextResponse } from 'next/server';
4
4
  import { UAParser } from 'ua-parser-js';
5
5
  import urlJoin from 'url-join';
6
6
 
7
- import { appEnv } from '@/config/app';
8
7
  import { authEnv } from '@/config/auth';
9
8
  import { LOBE_LOCALE_COOKIE } from '@/const/locale';
10
9
  import { LOBE_THEME_APPEARANCE } from '@/const/theme';
10
+ import { appEnv } from '@/envs/app';
11
11
  import NextAuthEdge from '@/libs/next-auth/edge';
12
12
  import { Locales } from '@/locales/resources';
13
13
  import { parseBrowserLanguage } from '@/utils/locale';
@@ -134,6 +134,16 @@ const defaultMiddleware = (request: NextRequest) => {
134
134
  return NextResponse.rewrite(url, { status: 200 });
135
135
  };
136
136
 
137
+ const isPublicRoute = createRouteMatcher([
138
+ '/api/auth(.*)',
139
+ '/trpc/edge(.*)',
140
+ // next auth
141
+ '/next-auth/(.*)',
142
+ // clerk
143
+ '/login',
144
+ '/signup',
145
+ ]);
146
+
137
147
  const isProtectedRoute = createRouteMatcher([
138
148
  '/settings(.*)',
139
149
  '/files(.*)',
@@ -148,7 +158,9 @@ const nextAuthMiddleware = NextAuthEdge.auth((req) => {
148
158
 
149
159
  const response = defaultMiddleware(req);
150
160
 
151
- const isProtected = isProtectedRoute(req);
161
+ // when enable auth protection, only public route is not protected, others are all protected
162
+ const isProtected = appEnv.ENABLE_AUTH_PROTECTION ? !isPublicRoute(req) : isProtectedRoute(req);
163
+
152
164
  logNextAuth('Route protection status: %s, %s', req.url, isProtected ? 'protected' : 'public');
153
165
 
154
166
  // Just check if session exists
@@ -181,7 +193,7 @@ const nextAuthMiddleware = NextAuthEdge.auth((req) => {
181
193
  if (isProtected) {
182
194
  logNextAuth('Request a protected route, redirecting to sign-in page');
183
195
  const nextLoginUrl = new URL('/next-auth/signin', req.nextUrl.origin);
184
- nextLoginUrl.searchParams.set('callbackUrl', req.nextUrl.pathname);
196
+ nextLoginUrl.searchParams.set('callbackUrl', req.nextUrl.href);
185
197
  return Response.redirect(nextLoginUrl);
186
198
  }
187
199
  logNextAuth('Request a free route but not login, allow visit without auth header');
@@ -229,6 +241,7 @@ const clerkAuthMiddleware = clerkMiddleware(
229
241
  );
230
242
 
231
243
  logDefault('Middleware configuration: %O', {
244
+ enableAuthProtection: appEnv.ENABLE_AUTH_PROTECTION,
232
245
  enableClerk: authEnv.NEXT_PUBLIC_ENABLE_CLERK_AUTH,
233
246
  enableNextAuth: authEnv.NEXT_PUBLIC_ENABLE_NEXT_AUTH,
234
247
  enableOIDC: oidcEnv.ENABLE_OIDC,
@@ -1,7 +1,7 @@
1
1
  import { describe, expect, it, vi } from 'vitest';
2
2
 
3
- import { getAppConfig } from '@/config/app';
4
3
  import { knowledgeEnv } from '@/config/knowledge';
4
+ import { getAppConfig } from '@/envs/app';
5
5
  import { SystemEmbeddingConfig } from '@/types/knowledgeBase';
6
6
  import { FilesConfigItem } from '@/types/user/settings/filesConfig';
7
7
 
@@ -9,7 +9,7 @@ import { getServerDefaultAgentConfig, getServerDefaultFilesConfig } from './inde
9
9
  import { parseAgentConfig } from './parseDefaultAgent';
10
10
  import { parseFilesConfig } from './parseFilesConfig';
11
11
 
12
- vi.mock('@/config/app', () => ({
12
+ vi.mock('@/envs/app', () => ({
13
13
  getAppConfig: vi.fn(),
14
14
  }));
15
15
 
@@ -1,10 +1,10 @@
1
- import { appEnv, getAppConfig } from '@/config/app';
2
1
  import { authEnv } from '@/config/auth';
3
2
  import { fileEnv } from '@/config/file';
4
3
  import { knowledgeEnv } from '@/config/knowledge';
5
4
  import { langfuseEnv } from '@/config/langfuse';
6
5
  import { enableNextAuth } from '@/const/auth';
7
6
  import { isDesktop } from '@/const/version';
7
+ import { appEnv, getAppConfig } from '@/envs/app';
8
8
  import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
9
9
  import { GlobalServerConfig } from '@/types/serverConfig';
10
10
 
@@ -1,7 +1,7 @@
1
1
  import urlJoin from 'url-join';
2
2
 
3
- import { appEnv } from '@/config/app';
4
3
  import { DEFAULT_LANG, isLocaleNotSupport } from '@/const/locale';
4
+ import { appEnv } from '@/envs/app';
5
5
  import { Locales, normalizeLocale } from '@/locales/resources';
6
6
  import { EdgeConfig } from '@/server/modules/EdgeConfig';
7
7
  import { AgentStoreIndex } from '@/types/discover';
@@ -1,6 +1,6 @@
1
1
  import { EdgeConfigClient, createClient } from '@vercel/edge-config';
2
2
 
3
- import { appEnv } from '@/config/app';
3
+ import { appEnv } from '@/envs/app';
4
4
 
5
5
  const EdgeConfigKeys = {
6
6
  /**
@@ -1,7 +1,7 @@
1
1
  import urlJoin from 'url-join';
2
2
 
3
- import { appEnv } from '@/config/app';
4
3
  import { DEFAULT_LANG, isLocaleNotSupport } from '@/const/locale';
4
+ import { appEnv } from '@/envs/app';
5
5
  import { Locales, normalizeLocale } from '@/locales/resources';
6
6
 
7
7
  export class PluginStore {
@@ -2,9 +2,9 @@ import { createTRPCClient, httpBatchLink } from '@trpc/client';
2
2
  import superjson from 'superjson';
3
3
  import urlJoin from 'url-join';
4
4
 
5
- import { appEnv } from '@/config/app';
6
5
  import { serverDBEnv } from '@/config/db';
7
6
  import { JWTPayload, LOBE_CHAT_AUTH_HEADER } from '@/const/auth';
7
+ import { appEnv } from '@/envs/app';
8
8
  import { KeyVaultsGateKeeper } from '@/server/modules/KeyVaultsEncrypt';
9
9
 
10
10
  import type { AsyncRouter } from './index';
@@ -6,7 +6,7 @@ import { parseAgentConfig } from '@/server/globalConfig/parseDefaultAgent';
6
6
 
7
7
  import { AgentService } from './index';
8
8
 
9
- vi.mock('@/config/app', () => ({
9
+ vi.mock('@/envs/app', () => ({
10
10
  appEnv: {
11
11
  DEFAULT_AGENT_CONFIG: 'model=gpt-4;temperature=0.7',
12
12
  },
@@ -1,3 +1,3 @@
1
- import { appEnv } from '@/config/app';
1
+ import { appEnv } from '@/envs/app';
2
2
 
3
3
  export const withBasePath = (path: string) => appEnv.NEXT_PUBLIC_BASE_PATH + path;
@@ -306,16 +306,13 @@ describe('parseModelString', () => {
306
306
  });
307
307
 
308
308
  describe('deployment name', () => {
309
- it('should have same deployment name as id', () => {
309
+ it('should have no deployment name', () => {
310
310
  const result = parseModelString('model1=Model 1', true);
311
311
  expect(result.add[0]).toEqual({
312
312
  id: 'model1',
313
313
  displayName: 'Model 1',
314
314
  abilities: {},
315
315
  type: 'chat',
316
- config: {
317
- deploymentName: 'model1',
318
- },
319
316
  });
320
317
  });
321
318
 
@@ -455,6 +452,61 @@ describe('transformToChatModelCards', () => {
455
452
  expect(result).toMatchSnapshot();
456
453
  });
457
454
 
455
+ it('should use default deploymentName from known model when not specified in string (VolcEngine case)', () => {
456
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
457
+ (m) => m.id === 'deepseek-r1' && m.providerId === 'volcengine',
458
+ );
459
+ const defaultChatModels: AiFullModelCard[] = [];
460
+ const result = transformToAiChatModelList({
461
+ modelString: '+deepseek-r1',
462
+ defaultChatModels,
463
+ providerId: 'volcengine',
464
+ withDeploymentName: true,
465
+ });
466
+ expect(result).toContainEqual({
467
+ ...knownModel,
468
+ enabled: true,
469
+ });
470
+ });
471
+
472
+ it('should use deploymentName from modelString when specified (VolcEngine case)', () => {
473
+ const defaultChatModels: AiFullModelCard[] = [];
474
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
475
+ (m) => m.id === 'deepseek-r1' && m.providerId === 'volcengine',
476
+ );
477
+ const result = transformToAiChatModelList({
478
+ modelString: `+deepseek-r1->my-custom-deploy`,
479
+ defaultChatModels,
480
+ providerId: 'volcengine',
481
+ withDeploymentName: true,
482
+ });
483
+ expect(result).toContainEqual({
484
+ ...knownModel,
485
+ enabled: true,
486
+ config: { deploymentName: 'my-custom-deploy' },
487
+ });
488
+ });
489
+
490
+ it('should set both id and deploymentName to the full string when no -> is used and withDeploymentName is true', () => {
491
+ const defaultChatModels: AiFullModelCard[] = [];
492
+ const result = transformToAiChatModelList({
493
+ modelString: `+my_model`,
494
+ defaultChatModels,
495
+ providerId: 'volcengine',
496
+ withDeploymentName: true,
497
+ });
498
+ expect(result).toContainEqual({
499
+ id: `my_model`,
500
+ displayName: `my_model`,
501
+ type: 'chat',
502
+ abilities: {},
503
+ enabled: true,
504
+ config: {
505
+ deploymentName: `my_model`,
506
+ },
507
+ });
508
+ });
509
+
458
510
  it('should handle azure real case', () => {
459
511
  const defaultChatModels = [
460
512
  {
@@ -23,7 +23,7 @@ export const parseModelString = (modelString: string = '', withDeploymentName =
23
23
 
24
24
  if (withDeploymentName) {
25
25
  [id, deploymentName] = id.split('->');
26
- if (!deploymentName) deploymentName = id;
26
+ // if (!deploymentName) deploymentName = id;
27
27
  }
28
28
 
29
29
  if (disable) {
@@ -141,6 +141,12 @@ export const transformToAiChatModelList = ({
141
141
  knownModel = LOBE_DEFAULT_MODEL_LIST.find((model) => model.id === toAddModel.id);
142
142
  if (knownModel) knownModel.providerId = providerId;
143
143
  }
144
+ if (withDeploymentName) {
145
+ toAddModel.config = toAddModel.config || {};
146
+ if (!toAddModel.config.deploymentName) {
147
+ toAddModel.config.deploymentName = knownModel?.config?.deploymentName ?? toAddModel.id;
148
+ }
149
+ }
144
150
 
145
151
  // if the model is known, update it based on the known model
146
152
  if (knownModel) {
@@ -21,7 +21,7 @@ vi.mock('@/const/auth', async (importOriginal) => {
21
21
  };
22
22
  });
23
23
 
24
- vi.mock('@/config/app', () => ({
24
+ vi.mock('@/envs/app', () => ({
25
25
  getAppConfig: vi.fn(),
26
26
  }));
27
27