@lobehub/chat 1.88.23 → 1.90.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/Dockerfile +2 -0
  3. package/Dockerfile.database +2 -0
  4. package/Dockerfile.pglite +2 -0
  5. package/README.md +3 -2
  6. package/README.zh-CN.md +3 -2
  7. package/changelog/v1.json +18 -0
  8. package/docs/self-hosting/environment-variables/basic.mdx +7 -0
  9. package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +9 -2
  10. package/docs/usage/providers/modelscope.mdx +113 -0
  11. package/docs/usage/providers/modelscope.zh-CN.mdx +133 -0
  12. package/package.json +1 -1
  13. package/src/app/(backend)/_deprecated/createBizOpenAI/auth.ts +1 -1
  14. package/src/app/(backend)/middleware/auth/utils.test.ts +2 -2
  15. package/src/app/(backend)/middleware/auth/utils.ts +1 -1
  16. package/src/app/(backend)/webapi/plugin/gateway/route.ts +1 -1
  17. package/src/app/(backend)/webapi/proxy/route.ts +1 -1
  18. package/src/app/[variants]/metadata.ts +1 -1
  19. package/src/components/Error/sentryCaptureException.ts +1 -1
  20. package/src/config/__tests__/app.test.ts +1 -1
  21. package/src/config/aiModels/index.ts +3 -0
  22. package/src/config/aiModels/modelscope.ts +63 -0
  23. package/src/config/llm.ts +6 -0
  24. package/src/config/modelProviders/index.ts +4 -0
  25. package/src/config/modelProviders/modelscope.ts +62 -0
  26. package/src/{config → envs}/app.ts +2 -0
  27. package/src/layout/GlobalProvider/index.tsx +1 -1
  28. package/src/libs/model-runtime/modelscope/index.ts +69 -0
  29. package/src/libs/model-runtime/runtimeMap.ts +6 -4
  30. package/src/libs/model-runtime/types/type.ts +1 -0
  31. package/src/libs/oidc-provider/http-adapter.ts +1 -1
  32. package/src/libs/oidc-provider/provider.ts +1 -1
  33. package/src/middleware.ts +16 -3
  34. package/src/server/globalConfig/index.test.ts +2 -2
  35. package/src/server/globalConfig/index.ts +1 -1
  36. package/src/server/modules/AssistantStore/index.ts +1 -1
  37. package/src/server/modules/EdgeConfig/index.ts +1 -1
  38. package/src/server/modules/PluginStore/index.ts +1 -1
  39. package/src/server/routers/async/caller.ts +1 -1
  40. package/src/server/services/agent/index.test.ts +1 -1
  41. package/src/types/user/settings/keyVaults.ts +1 -0
  42. package/src/utils/basePath.ts +1 -1
  43. package/src/utils/server/jwt.test.ts +1 -1
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.90.0](https://github.com/lobehub/lobe-chat/compare/v1.89.0...v1.90.0)
6
+
7
+ <sup>Released on **2025-06-01**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Support protect page.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Support protect page, closes [#8024](https://github.com/lobehub/lobe-chat/issues/8024) ([d61a9f5](https://github.com/lobehub/lobe-chat/commit/d61a9f5))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ## [Version 1.89.0](https://github.com/lobehub/lobe-chat/compare/v1.88.23...v1.89.0)
31
+
32
+ <sup>Released on **2025-06-01**</sup>
33
+
34
+ #### ✨ Features
35
+
36
+ - **misc**: Support ModelScope Provider.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's improved
44
+
45
+ - **misc**: Support ModelScope Provider, closes [#8026](https://github.com/lobehub/lobe-chat/issues/8026) ([7b91dfd](https://github.com/lobehub/lobe-chat/commit/7b91dfd))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.88.23](https://github.com/lobehub/lobe-chat/compare/v1.88.22...v1.88.23)
6
56
 
7
57
  <sup>Released on **2025-05-31**</sup>
package/Dockerfile CHANGED
@@ -186,6 +186,8 @@ ENV \
186
186
  MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
187
187
  # Mistral
188
188
  MISTRAL_API_KEY="" MISTRAL_MODEL_LIST="" \
189
+ # ModelScope
190
+ MODELSCOPE_API_KEY="" MODELSCOPE_MODEL_LIST="" MODELSCOPE_PROXY_URL="" \
189
191
  # Moonshot
190
192
  MOONSHOT_API_KEY="" MOONSHOT_MODEL_LIST="" MOONSHOT_PROXY_URL="" \
191
193
  # Novita
@@ -230,6 +230,8 @@ ENV \
230
230
  MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
231
231
  # Mistral
232
232
  MISTRAL_API_KEY="" MISTRAL_MODEL_LIST="" \
233
+ # ModelScope
234
+ MODELSCOPE_API_KEY="" MODELSCOPE_MODEL_LIST="" MODELSCOPE_PROXY_URL="" \
233
235
  # Moonshot
234
236
  MOONSHOT_API_KEY="" MOONSHOT_MODEL_LIST="" MOONSHOT_PROXY_URL="" \
235
237
  # Novita
package/Dockerfile.pglite CHANGED
@@ -188,6 +188,8 @@ ENV \
188
188
  MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
189
189
  # Mistral
190
190
  MISTRAL_API_KEY="" MISTRAL_MODEL_LIST="" \
191
+ # ModelScope
192
+ MODELSCOPE_API_KEY="" MODELSCOPE_MODEL_LIST="" MODELSCOPE_PROXY_URL="" \
191
193
  # Moonshot
192
194
  MOONSHOT_API_KEY="" MOONSHOT_MODEL_LIST="" MOONSHOT_PROXY_URL="" \
193
195
  # Novita
package/README.md CHANGED
@@ -196,7 +196,7 @@ We have implemented support for the following model service providers:
196
196
  - **[OpenRouter](https://lobechat.com/discover/provider/openrouter)**: OpenRouter is a service platform providing access to various cutting-edge large model interfaces, supporting OpenAI, Anthropic, LLaMA, and more, suitable for diverse development and application needs. Users can flexibly choose the optimal model and pricing based on their requirements, enhancing the AI experience.
197
197
  - **[Cloudflare Workers AI](https://lobechat.com/discover/provider/cloudflare)**: Run serverless GPU-powered machine learning models on Cloudflare's global network.
198
198
 
199
- <details><summary><kbd>See more providers (+31)</kbd></summary>
199
+ <details><summary><kbd>See more providers (+32)</kbd></summary>
200
200
 
201
201
  - **[GitHub](https://lobechat.com/discover/provider/github)**: With GitHub Models, developers can become AI engineers and leverage the industry's leading AI models.
202
202
  - **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI is a platform providing a variety of large language models and AI image generation API services, flexible, reliable, and cost-effective. It supports the latest open-source models like Llama3 and Mistral, offering a comprehensive, user-friendly, and auto-scaling API solution for generative AI application development, suitable for the rapid growth of AI startups.
@@ -206,6 +206,7 @@ We have implemented support for the following model service providers:
206
206
  - **[Groq](https://lobechat.com/discover/provider/groq)**: Groq's LPU inference engine has excelled in the latest independent large language model (LLM) benchmarks, redefining the standards for AI solutions with its remarkable speed and efficiency. Groq represents instant inference speed, demonstrating strong performance in cloud-based deployments.
207
207
  - **[Perplexity](https://lobechat.com/discover/provider/perplexity)**: Perplexity is a leading provider of conversational generation models, offering various advanced Llama 3.1 models that support both online and offline applications, particularly suited for complex natural language processing tasks.
208
208
  - **[Mistral](https://lobechat.com/discover/provider/mistral)**: Mistral provides advanced general, specialized, and research models widely used in complex reasoning, multilingual tasks, and code generation. Through functional calling interfaces, users can integrate custom functionalities for specific applications.
209
+ - **[ModelScope](https://lobechat.com/discover/provider/modelscope)**:
209
210
  - **[Ai21Labs](https://lobechat.com/discover/provider/ai21)**: AI21 Labs builds foundational models and AI systems for enterprises, accelerating the application of generative AI in production.
210
211
  - **[Upstage](https://lobechat.com/discover/provider/upstage)**: Upstage focuses on developing AI models for various business needs, including Solar LLM and document AI, aiming to achieve artificial general intelligence (AGI) for work. It allows for the creation of simple conversational agents through Chat API and supports functional calling, translation, embedding, and domain-specific applications.
211
212
  - **[xAI](https://lobechat.com/discover/provider/xai)**: xAI is a company dedicated to building artificial intelligence to accelerate human scientific discovery. Our mission is to advance our collective understanding of the universe.
@@ -232,7 +233,7 @@ We have implemented support for the following model service providers:
232
233
 
233
234
  </details>
234
235
 
235
- > 📊 Total providers: [<kbd>**41**</kbd>](https://lobechat.com/discover/providers)
236
+ > 📊 Total providers: [<kbd>**42**</kbd>](https://lobechat.com/discover/providers)
236
237
 
237
238
  <!-- PROVIDER LIST -->
238
239
 
package/README.zh-CN.md CHANGED
@@ -196,7 +196,7 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
196
196
  - **[OpenRouter](https://lobechat.com/discover/provider/openrouter)**: OpenRouter 是一个提供多种前沿大模型接口的服务平台,支持 OpenAI、Anthropic、LLaMA 及更多,适合多样化的开发和应用需求。用户可根据自身需求灵活选择最优的模型和价格,助力 AI 体验的提升。
197
197
  - **[Cloudflare Workers AI](https://lobechat.com/discover/provider/cloudflare)**: 在 Cloudflare 的全球网络上运行由无服务器 GPU 驱动的机器学习模型。
198
198
 
199
- <details><summary><kbd>See more providers (+31)</kbd></summary>
199
+ <details><summary><kbd>See more providers (+32)</kbd></summary>
200
200
 
201
201
  - **[GitHub](https://lobechat.com/discover/provider/github)**: 通过 GitHub 模型,开发人员可以成为 AI 工程师,并使用行业领先的 AI 模型进行构建。
202
202
  - **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI 是一个提供多种大语言模型与 AI 图像生成的 API 服务的平台,灵活、可靠且具有成本效益。它支持 Llama3、Mistral 等最新的开源模型,并为生成式 AI 应用开发提供了全面、用户友好且自动扩展的 API 解决方案,适合 AI 初创公司的快速发展。
@@ -206,6 +206,7 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
206
206
  - **[Groq](https://lobechat.com/discover/provider/groq)**: Groq 的 LPU 推理引擎在最新的独立大语言模型(LLM)基准测试中表现卓越,以其惊人的速度和效率重新定义了 AI 解决方案的标准。Groq 是一种即时推理速度的代表,在基于云的部署中展现了良好的性能。
207
207
  - **[Perplexity](https://lobechat.com/discover/provider/perplexity)**: Perplexity 是一家领先的对话生成模型提供商,提供多种先进的 Llama 3.1 模型,支持在线和离线应用,特别适用于复杂的自然语言处理任务。
208
208
  - **[Mistral](https://lobechat.com/discover/provider/mistral)**: Mistral 提供先进的通用、专业和研究型模型,广泛应用于复杂推理、多语言任务、代码生成等领域,通过功能调用接口,用户可以集成自定义功能,实现特定应用。
209
+ - **[ModelScope](https://lobechat.com/discover/provider/modelscope)**:
209
210
  - **[Ai21Labs](https://lobechat.com/discover/provider/ai21)**: AI21 Labs 为企业构建基础模型和人工智能系统,加速生成性人工智能在生产中的应用。
210
211
  - **[Upstage](https://lobechat.com/discover/provider/upstage)**: Upstage 专注于为各种商业需求开发 AI 模型,包括 Solar LLM 和文档 AI,旨在实现工作的人造通用智能(AGI)。通过 Chat API 创建简单的对话代理,并支持功能调用、翻译、嵌入以及特定领域应用。
211
212
  - **[xAI](https://lobechat.com/discover/provider/xai)**: xAI 是一家致力于构建人工智能以加速人类科学发现的公司。我们的使命是推动我们对宇宙的共同理解。
@@ -232,7 +233,7 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
232
233
 
233
234
  </details>
234
235
 
235
- > 📊 Total providers: [<kbd>**41**</kbd>](https://lobechat.com/discover/providers)
236
+ > 📊 Total providers: [<kbd>**42**</kbd>](https://lobechat.com/discover/providers)
236
237
 
237
238
  <!-- PROVIDER LIST -->
238
239
 
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Support protect page."
6
+ ]
7
+ },
8
+ "date": "2025-06-01",
9
+ "version": "1.90.0"
10
+ },
11
+ {
12
+ "children": {
13
+ "features": [
14
+ "Support ModelScope Provider."
15
+ ]
16
+ },
17
+ "date": "2025-06-01",
18
+ "version": "1.89.0"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
@@ -146,6 +146,13 @@ For specific content, please refer to the [Feature Flags](/docs/self-hosting/adv
146
146
  - Default: -
147
147
  - Example: `198.18.1.62,224.0.0.3`
148
148
 
149
+ ### `ENABLE_AUTH_PROTECTION`
150
+
151
+ - Type: Optional
152
+ - Description: Controls whether to enable route protection. When set to `1`, all routes except public routes (like `/api/auth`, `/next-auth/*`, `/login`, `/signup`) will require authentication. When set to `0` or not set, only specific protected routes (like `/settings`, `/files`) will require authentication.
153
+ - Default: `0`
154
+ - Example: `1` or `0`
155
+
149
156
  ## Plugin Service
150
157
 
151
158
  ### `PLUGINS_INDEX_URL`
@@ -123,7 +123,7 @@ LobeChat 在部署时提供了一些额外的配置项,你可以使用环境
123
123
  ### `ENABLE_PROXY_DNS`
124
124
 
125
125
  - 类型:可选
126
- - 描述:用于控制是否将DNS发送到代理服务器,配置为 `0` 时所有 DNS 查询在本地完成,当你的网络环境无法访问 API 或访问超时,请尝试将该项配置为 `1`。
126
+ - 描述:用于控制是否将 DNS 发送到代理服务器,配置为 `0` 时所有 DNS 查询在本地完成,当你的网络环境无法访问 API 或访问超时,请尝试将该项配置为 `1`。
127
127
  - 默认值:`0`
128
128
  - 示例:`1` or `0`
129
129
 
@@ -137,10 +137,17 @@ LobeChat 在部署时提供了一些额外的配置项,你可以使用环境
137
137
  ### `SSRF_ALLOW_IP_ADDRESS_LIST`
138
138
 
139
139
  - 类型:可选
140
- - 描述:允许连接的私有 IP 地址列表,多个 IP 地址时使用逗号分隔。当 `SSRF_ALLOW_PRIVATE_IP_ADDRESS` 为 `0` 时才会生效。
140
+ - 说明:允许的私有 IP 地址列表,多个 IP 地址用逗号分隔。仅在 `SSRF_ALLOW_PRIVATE_IP_ADDRESS` 为 `0` 时生效。
141
141
  - 默认值:-
142
142
  - 示例:`198.18.1.62,224.0.0.3`
143
143
 
144
+ ### `ENABLE_AUTH_PROTECTION`
145
+
146
+ - 类型:可选
147
+ - 说明:控制是否启用路由保护。当设置为 `1` 时,除了公共路由(如 `/api/auth`、`/next-auth/*`、`/login`、`/signup`)外,所有路由都需要认证。当设置为 `0` 或未设置时,只有特定的受保护路由(如 `/settings`、`/files` 等)需要认证。
148
+ - 默认值:`0`
149
+ - 示例:`1` 或 `0`
150
+
144
151
  ## 插件服务
145
152
 
146
153
  ### `PLUGINS_INDEX_URL`
@@ -0,0 +1,113 @@
1
+ ---
2
+ title: ModelScope Provider Setup
3
+ description: Learn how to configure and use ModelScope provider in LobeChat
4
+ tags:
5
+ - ModelScope
6
+ ---
7
+
8
+ # ModelScope Provider Setup
9
+
10
+ ModelScope (魔塔社区) is Alibaba's open-source model community that provides access to various AI models. This guide will help you set up the ModelScope provider in LobeChat.
11
+
12
+ ## Prerequisites
13
+
14
+ Before using ModelScope API, you need to:
15
+
16
+ 1. **Create a ModelScope Account**
17
+ - Visit [ModelScope](https://www.modelscope.cn/)
18
+ - Register for an account
19
+
20
+ 2. **Bind Alibaba Cloud Account**
21
+ - **Important**: ModelScope API requires binding with an Alibaba Cloud account
22
+ - Visit your [ModelScope Access Token page](https://www.modelscope.cn/my/myaccesstoken)
23
+ - Follow the instructions to bind your Alibaba Cloud account
24
+ - This step is mandatory for API access
25
+
26
+ 3. **Get API Token**
27
+ - After binding your Alibaba Cloud account, generate an API token
28
+ - Copy the token for use in LobeChat
29
+
30
+ ## Configuration
31
+
32
+ ### Environment Variables
33
+
34
+ Add the following environment variables to your `.env` file:
35
+
36
+ ```bash
37
+ # Enable ModelScope provider
38
+ ENABLED_MODELSCOPE=1
39
+
40
+ # ModelScope API key (required)
41
+ MODELSCOPE_API_KEY=your_modelscope_api_token
42
+
43
+ # Optional: Custom model list (comma-separated)
44
+ MODELSCOPE_MODEL_LIST=deepseek-ai/DeepSeek-V3-0324,Qwen/Qwen3-235B-A22B
45
+
46
+ # Optional: Proxy URL if needed
47
+ MODELSCOPE_PROXY_URL=https://your-proxy-url
48
+ ```
49
+
50
+ ### Docker Configuration
51
+
52
+ If using Docker, add the ModelScope environment variables to your `docker-compose.yml`:
53
+
54
+ ```yaml
55
+ environment:
56
+ - ENABLED_MODELSCOPE=1
57
+ - MODELSCOPE_API_KEY=your_modelscope_api_token
58
+ - MODELSCOPE_MODEL_LIST=deepseek-ai/DeepSeek-V3-0324,Qwen/Qwen3-235B-A22B
59
+ ```
60
+
61
+ ## Available Models
62
+
63
+ ModelScope provides access to various models including:
64
+
65
+ - **DeepSeek Models**: DeepSeek-V3, DeepSeek-R1 series
66
+ - **Qwen Models**: Qwen3 series, Qwen2.5 series
67
+ - **Llama Models**: Meta-Llama-3 series
68
+ - **Other Models**: Various open-source models
69
+
70
+ ## Troubleshooting
71
+
72
+ ### Common Issues
73
+
74
+ 1. **"Please bind your Alibaba Cloud account before use" Error**
75
+ - This means you haven't bound your Alibaba Cloud account to ModelScope
76
+ - Visit [ModelScope Access Token page](https://www.modelscope.cn/my/myaccesstoken)
77
+ - Complete the Alibaba Cloud account binding process
78
+
79
+ 2. **401 Authentication Error**
80
+ - Check if your API token is correct
81
+ - Ensure the token hasn't expired
82
+ - Verify that your Alibaba Cloud account is properly bound
83
+
84
+ 3. **Model Not Available**
85
+ - Some models may require additional permissions
86
+ - Check the model's page on ModelScope for access requirements
87
+
88
+ ### Debug Mode
89
+
90
+ Enable debug mode to see detailed logs:
91
+
92
+ ```bash
93
+ DEBUG_MODELSCOPE_CHAT_COMPLETION=1
94
+ ```
95
+
96
+ ## Notes
97
+
98
+ - ModelScope API is compatible with OpenAI API format
99
+ - The service is primarily designed for users in China
100
+ - Some models may have usage limitations or require additional verification
101
+ - API responses are in Chinese by default for some models
102
+
103
+ ## Support
104
+
105
+ For ModelScope-specific issues:
106
+
107
+ - Visit [ModelScope Documentation](https://www.modelscope.cn/docs)
108
+ - Check [ModelScope Community](https://www.modelscope.cn/community)
109
+
110
+ For LobeChat integration issues:
111
+
112
+ - Check our [GitHub Issues](https://github.com/lobehub/lobe-chat/issues)
113
+ - Join our community discussions
@@ -0,0 +1,133 @@
1
+ ---
2
+ title: ModelScope 提供商配置
3
+ description: 学习如何在 LobeChat 中配置和使用 ModelScope 提供商
4
+ tags:
5
+ - ModelScope
6
+ ---
7
+
8
+ # ModelScope 提供商配置
9
+
10
+ ModelScope(魔塔社区)是阿里巴巴的开源模型社区,提供各种 AI 模型的访问服务。本指南将帮助您在 LobeChat 中设置 ModelScope 提供商。
11
+
12
+ ## 前置条件
13
+
14
+ 在使用 ModelScope API 之前,您需要:
15
+
16
+ 1. **创建 ModelScope 账户**
17
+ - 访问 [ModelScope](https://www.modelscope.cn/)
18
+ - 注册账户
19
+
20
+ 2. **绑定阿里云账户**
21
+ - **重要**:ModelScope API 需要绑定阿里云账户
22
+ - 访问您的 [ModelScope 访问令牌页面](https://www.modelscope.cn/my/myaccesstoken)
23
+ - 按照说明绑定您的阿里云账户
24
+ - 此步骤是 API 访问的必要条件
25
+
26
+ 3. **获取 API 令牌**
27
+ - 绑定阿里云账户后,生成 API 令牌
28
+ - 复制令牌以在 LobeChat 中使用
29
+
30
+ ## 配置
31
+
32
+ ### 环境变量
33
+
34
+ 在您的 `.env` 文件中添加以下环境变量:
35
+
36
+ ```bash
37
+ # 启用 ModelScope 提供商
38
+ ENABLED_MODELSCOPE=1
39
+
40
+ # ModelScope API 密钥(必需)
41
+ MODELSCOPE_API_KEY=your_modelscope_api_token
42
+
43
+ # 可选:自定义模型列表(逗号分隔)
44
+ MODELSCOPE_MODEL_LIST=deepseek-ai/DeepSeek-V3-0324,Qwen/Qwen3-235B-A22B
45
+
46
+ # 可选:代理 URL(如需要)
47
+ MODELSCOPE_PROXY_URL=https://your-proxy-url
48
+ ```
49
+
50
+ ### Docker 配置
51
+
52
+ 如果使用 Docker,请在您的 `docker-compose.yml` 中添加 ModelScope 环境变量:
53
+
54
+ ```yaml
55
+ environment:
56
+ - ENABLED_MODELSCOPE=1
57
+ - MODELSCOPE_API_KEY=your_modelscope_api_token
58
+ - MODELSCOPE_MODEL_LIST=deepseek-ai/DeepSeek-V3-0324,Qwen/Qwen3-235B-A22B
59
+ ```
60
+
61
+ ## 可用模型
62
+
63
+ ModelScope 提供各种模型的访问,包括:
64
+
65
+ - **DeepSeek 模型**:DeepSeek-V3、DeepSeek-R1 系列
66
+ - **Qwen 模型**:Qwen3 系列、Qwen2.5 系列
67
+ - **Llama 模型**:Meta-Llama-3 系列
68
+ - **其他模型**:各种开源模型
69
+
70
+ ## 故障排除
71
+
72
+ ### 常见问题
73
+
74
+ 1. **"请先绑定阿里云账户后使用" 错误**
75
+ - 这意味着您还没有将阿里云账户绑定到 ModelScope
76
+ - 访问 [ModelScope 访问令牌页面](https://www.modelscope.cn/my/myaccesstoken)
77
+ - 完成阿里云账户绑定流程
78
+
79
+ 2. **401 认证错误**
80
+ - 检查您的 API 令牌是否正确
81
+ - 确保令牌没有过期
82
+ - 验证您的阿里云账户是否正确绑定
83
+
84
+ 3. **模型不可用**
85
+ - 某些模型可能需要额外权限
86
+ - 查看 ModelScope 上模型页面的访问要求
87
+
88
+ ### 调试模式
89
+
90
+ 启用调试模式以查看详细日志:
91
+
92
+ ```bash
93
+ DEBUG_MODELSCOPE_CHAT_COMPLETION=1
94
+ ```
95
+
96
+ ## 注意事项
97
+
98
+ - ModelScope API 与 OpenAI API 格式兼容
99
+ - 该服务主要面向中国用户设计
100
+ - 某些模型可能有使用限制或需要额外验证
101
+ - 某些模型的 API 响应默认为中文
102
+
103
+ ## 支持
104
+
105
+ 对于 ModelScope 特定问题:
106
+
107
+ - 访问 [ModelScope 文档](https://www.modelscope.cn/docs)
108
+ - 查看 [ModelScope 社区](https://www.modelscope.cn/community)
109
+
110
+ 对于 LobeChat 集成问题:
111
+
112
+ - 查看我们的 [GitHub Issues](https://github.com/lobehub/lobe-chat/issues)
113
+ - 加入我们的社区讨论
114
+
115
+ ## 模型 ID 格式
116
+
117
+ ModelScope 使用命名空间前缀格式的模型 ID,例如:
118
+
119
+ ```
120
+ deepseek-ai/DeepSeek-V3-0324
121
+ deepseek-ai/DeepSeek-R1-0528
122
+ Qwen/Qwen3-235B-A22B
123
+ Qwen/Qwen3-32B
124
+ ```
125
+
126
+ 在配置模型列表时,请使用完整的模型 ID 格式。
127
+
128
+ ## API 限制
129
+
130
+ - ModelScope API 有速率限制
131
+ - 某些模型可能需要特殊权限
132
+ - 建议在生产环境中监控 API 使用情况
133
+ - 部分高级模型可能需要付费使用
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.88.23",
3
+ "version": "1.90.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -1,4 +1,4 @@
1
- import { getAppConfig } from '@/config/app';
1
+ import { getAppConfig } from '@/envs/app';
2
2
  import { ChatErrorType } from '@/types/fetch';
3
3
 
4
4
  interface AuthConfig {
@@ -1,7 +1,7 @@
1
1
  import { type AuthObject } from '@clerk/backend';
2
2
  import { beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
- import { getAppConfig } from '@/config/app';
4
+ import { getAppConfig } from '@/envs/app';
5
5
 
6
6
  import { checkAuthMethod } from './utils';
7
7
 
@@ -22,7 +22,7 @@ vi.mock('@/const/auth', async (importOriginal) => {
22
22
  };
23
23
  });
24
24
 
25
- vi.mock('@/config/app', () => ({
25
+ vi.mock('@/envs/app', () => ({
26
26
  getAppConfig: vi.fn(),
27
27
  }));
28
28
 
@@ -1,7 +1,7 @@
1
1
  import { type AuthObject } from '@clerk/backend';
2
2
 
3
- import { getAppConfig } from '@/config/app';
4
3
  import { enableClerk, enableNextAuth } from '@/const/auth';
4
+ import { getAppConfig } from '@/envs/app';
5
5
  import { AgentRuntimeError } from '@/libs/model-runtime';
6
6
  import { ChatErrorType } from '@/types/fetch';
7
7
 
@@ -1,9 +1,9 @@
1
1
  import { PluginRequestPayload } from '@lobehub/chat-plugin-sdk';
2
2
  import { createGatewayOnEdgeRuntime } from '@lobehub/chat-plugins-gateway';
3
3
 
4
- import { getAppConfig } from '@/config/app';
5
4
  import { LOBE_CHAT_AUTH_HEADER, OAUTH_AUTHORIZED, enableNextAuth } from '@/const/auth';
6
5
  import { LOBE_CHAT_TRACE_ID, TraceNameMap } from '@/const/trace';
6
+ import { getAppConfig } from '@/envs/app';
7
7
  import { AgentRuntimeError } from '@/libs/model-runtime';
8
8
  import { TraceClient } from '@/libs/traces';
9
9
  import { ChatErrorType, ErrorType } from '@/types/fetch';
@@ -2,7 +2,7 @@ import { NextResponse } from 'next/server';
2
2
  import fetch from 'node-fetch';
3
3
  import { RequestFilteringAgentOptions, useAgent as ssrfAgent } from 'request-filtering-agent';
4
4
 
5
- import { appEnv } from '@/config/app';
5
+ import { appEnv } from '@/envs/app';
6
6
 
7
7
  /**
8
8
  * just for a proxy
@@ -1,8 +1,8 @@
1
- import { appEnv } from '@/config/app';
2
1
  import { BRANDING_LOGO_URL, BRANDING_NAME, ORG_NAME } from '@/const/branding';
3
2
  import { DEFAULT_LANG } from '@/const/locale';
4
3
  import { OFFICIAL_URL, OG_URL } from '@/const/url';
5
4
  import { isCustomBranding, isCustomORG } from '@/const/version';
5
+ import { appEnv } from '@/envs/app';
6
6
  import { translation } from '@/server/translation';
7
7
  import { DynamicLayoutProps } from '@/types/next';
8
8
  import { RouteVariants } from '@/utils/server/routeVariants';
@@ -1,4 +1,4 @@
1
- import { appEnv } from '@/config/app';
1
+ import { appEnv } from '@/envs/app';
2
2
 
3
3
  export type ErrorType = Error & { digest?: string };
4
4
 
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
2
  import { beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
- import { getAppConfig } from '../app';
4
+ import { getAppConfig } from '../../envs/app';
5
5
 
6
6
  // Stub the global process object to safely mock environment variables
7
7
  vi.stubGlobal('process', {
@@ -24,6 +24,7 @@ import { default as jina } from './jina';
24
24
  import { default as lmstudio } from './lmstudio';
25
25
  import { default as minimax } from './minimax';
26
26
  import { default as mistral } from './mistral';
27
+ import { default as modelscope } from './modelscope';
27
28
  import { default as moonshot } from './moonshot';
28
29
  import { default as novita } from './novita';
29
30
  import { default as nvidia } from './nvidia';
@@ -97,6 +98,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
97
98
  lmstudio,
98
99
  minimax,
99
100
  mistral,
101
+ modelscope,
100
102
  moonshot,
101
103
  novita,
102
104
  nvidia,
@@ -151,6 +153,7 @@ export { default as jina } from './jina';
151
153
  export { default as lmstudio } from './lmstudio';
152
154
  export { default as minimax } from './minimax';
153
155
  export { default as mistral } from './mistral';
156
+ export { default as modelscope } from './modelscope';
154
157
  export { default as moonshot } from './moonshot';
155
158
  export { default as novita } from './novita';
156
159
  export { default as nvidia } from './nvidia';
@@ -0,0 +1,63 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const modelscopeChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ },
8
+ contextWindowTokens: 131_072,
9
+ description: 'DeepSeek-V3是DeepSeek第三代模型,在多项基准测试中表现优异。',
10
+ displayName: 'DeepSeek-V3-0324',
11
+ enabled: true,
12
+ id: 'deepseek-ai/DeepSeek-V3-0324',
13
+ type: 'chat',
14
+ },
15
+ {
16
+ abilities: {
17
+ functionCall: true,
18
+ },
19
+ contextWindowTokens: 131_072,
20
+ description: 'DeepSeek-V3是DeepSeek第三代模型的最新版本,具有强大的推理和对话能力。',
21
+ displayName: 'DeepSeek-V3',
22
+ enabled: true,
23
+ id: 'deepseek-ai/DeepSeek-V3',
24
+ type: 'chat',
25
+ },
26
+ {
27
+ abilities: {
28
+ functionCall: true,
29
+ },
30
+ contextWindowTokens: 131_072,
31
+ description: 'DeepSeek-R1是DeepSeek最新的推理模型,专注于复杂推理任务。',
32
+ displayName: 'DeepSeek-R1',
33
+ enabled: true,
34
+ id: 'deepseek-ai/DeepSeek-R1',
35
+ type: 'chat',
36
+ },
37
+ {
38
+ abilities: {
39
+ functionCall: true,
40
+ },
41
+ contextWindowTokens: 131_072,
42
+ description: 'Qwen3-235B-A22B是通义千问3代超大规模模型,提供顶级的AI能力。',
43
+ displayName: 'Qwen3-235B-A22B',
44
+ enabled: true,
45
+ id: 'Qwen/Qwen3-235B-A22B',
46
+ type: 'chat',
47
+ },
48
+ {
49
+ abilities: {
50
+ functionCall: true,
51
+ },
52
+ contextWindowTokens: 131_072,
53
+ description: 'Qwen3-32B是通义千问3代模型,具有强大的推理和对话能力。',
54
+ displayName: 'Qwen3-32B',
55
+ enabled: true,
56
+ id: 'Qwen/Qwen3-32B',
57
+ type: 'chat',
58
+ },
59
+ ];
60
+
61
+ export const allModels = [...modelscopeChatModels];
62
+
63
+ export default allModels;
package/src/config/llm.ts CHANGED
@@ -162,6 +162,9 @@ export const getLLMConfig = () => {
162
162
 
163
163
  ENABLED_INFINIAI: z.boolean(),
164
164
  INFINIAI_API_KEY: z.string().optional(),
165
+
166
+ ENABLED_MODELSCOPE: z.boolean(),
167
+ MODELSCOPE_API_KEY: z.string().optional(),
165
168
  },
166
169
  runtimeEnv: {
167
170
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -322,6 +325,9 @@ export const getLLMConfig = () => {
322
325
 
323
326
  ENABLED_INFINIAI: !!process.env.INFINIAI_API_KEY,
324
327
  INFINIAI_API_KEY: process.env.INFINIAI_API_KEY,
328
+
329
+ ENABLED_MODELSCOPE: !!process.env.MODELSCOPE_API_KEY,
330
+ MODELSCOPE_API_KEY: process.env.MODELSCOPE_API_KEY,
325
331
  },
326
332
  });
327
333
  };
@@ -24,6 +24,7 @@ import JinaProvider from './jina';
24
24
  import LMStudioProvider from './lmstudio';
25
25
  import MinimaxProvider from './minimax';
26
26
  import MistralProvider from './mistral';
27
+ import ModelScopeProvider from './modelscope';
27
28
  import MoonshotProvider from './moonshot';
28
29
  import NovitaProvider from './novita';
29
30
  import NvidiaProvider from './nvidia';
@@ -67,6 +68,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
67
68
  GithubProvider.chatModels,
68
69
  MinimaxProvider.chatModels,
69
70
  MistralProvider.chatModels,
71
+ ModelScopeProvider.chatModels,
70
72
  MoonshotProvider.chatModels,
71
73
  OllamaProvider.chatModels,
72
74
  VLLMProvider.chatModels,
@@ -130,6 +132,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
130
132
  GroqProvider,
131
133
  PerplexityProvider,
132
134
  MistralProvider,
135
+ ModelScopeProvider,
133
136
  Ai21Provider,
134
137
  UpstageProvider,
135
138
  XAIProvider,
@@ -194,6 +197,7 @@ export { default as JinaProviderCard } from './jina';
194
197
  export { default as LMStudioProviderCard } from './lmstudio';
195
198
  export { default as MinimaxProviderCard } from './minimax';
196
199
  export { default as MistralProviderCard } from './mistral';
200
+ export { default as ModelScopeProviderCard } from './modelscope';
197
201
  export { default as MoonshotProviderCard } from './moonshot';
198
202
  export { default as NovitaProviderCard } from './novita';
199
203
  export { default as NvidiaProviderCard } from './nvidia';
@@ -0,0 +1,62 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref: https://modelscope.cn/docs/model-service/API-Inference/intro
4
+ const ModelScope: ModelProviderCard = {
5
+ chatModels: [
6
+ {
7
+ contextWindowTokens: 131_072,
8
+ description: 'DeepSeek-V3是DeepSeek第三代模型,在多项基准测试中表现优异。',
9
+ displayName: 'DeepSeek-V3-0324',
10
+ enabled: true,
11
+ functionCall: true,
12
+ id: 'deepseek-ai/DeepSeek-V3-0324',
13
+ },
14
+ {
15
+ contextWindowTokens: 131_072,
16
+ description: 'DeepSeek-V3是DeepSeek第三代模型的最新版本,具有强大的推理和对话能力。',
17
+ displayName: 'DeepSeek-V3',
18
+ enabled: true,
19
+ functionCall: true,
20
+ id: 'deepseek-ai/DeepSeek-V3',
21
+ },
22
+ {
23
+ contextWindowTokens: 131_072,
24
+ description: 'DeepSeek-R1是DeepSeek最新的推理模型,专注于复杂推理任务。',
25
+ displayName: 'DeepSeek-R1',
26
+ enabled: true,
27
+ functionCall: true,
28
+ id: 'deepseek-ai/DeepSeek-R1',
29
+ },
30
+ {
31
+ contextWindowTokens: 131_072,
32
+ description: 'Qwen3-235B-A22B是通义千问3代超大规模模型,提供顶级的AI能力。',
33
+ displayName: 'Qwen3-235B-A22B',
34
+ enabled: true,
35
+ functionCall: true,
36
+ id: 'Qwen/Qwen3-235B-A22B',
37
+ },
38
+ {
39
+ contextWindowTokens: 131_072,
40
+ description: 'Qwen3-32B是通义千问3代模型,具有强大的推理和对话能力。',
41
+ displayName: 'Qwen3-32B',
42
+ enabled: true,
43
+ functionCall: true,
44
+ id: 'Qwen/Qwen3-32B',
45
+ },
46
+ ],
47
+ checkModel: 'Qwen/Qwen3-32B',
48
+ description: 'ModelScope是阿里云推出的模型即服务平台,提供丰富的AI模型和推理服务。',
49
+ id: 'modelscope',
50
+ modelList: { showModelFetcher: true },
51
+ name: 'ModelScope',
52
+ settings: {
53
+ proxyUrl: {
54
+ placeholder: 'https://api-inference.modelscope.cn/v1',
55
+ },
56
+ sdkType: 'openai',
57
+ showModelFetcher: true,
58
+ },
59
+ url: 'https://modelscope.cn',
60
+ };
61
+
62
+ export default ModelScope;
@@ -49,6 +49,7 @@ export const getAppConfig = () => {
49
49
  APP_URL: z.string().optional(),
50
50
  VERCEL_EDGE_CONFIG: z.string().optional(),
51
51
  MIDDLEWARE_REWRITE_THROUGH_LOCAL: z.boolean().optional(),
52
+ ENABLE_AUTH_PROTECTION: z.boolean().optional(),
52
53
 
53
54
  CDN_USE_GLOBAL: z.boolean().optional(),
54
55
  CUSTOM_FONT_FAMILY: z.string().optional(),
@@ -82,6 +83,7 @@ export const getAppConfig = () => {
82
83
 
83
84
  APP_URL,
84
85
  MIDDLEWARE_REWRITE_THROUGH_LOCAL: process.env.MIDDLEWARE_REWRITE_THROUGH_LOCAL === '1',
86
+ ENABLE_AUTH_PROTECTION: process.env.ENABLE_AUTH_PROTECTION === '1',
85
87
 
86
88
  CUSTOM_FONT_FAMILY: process.env.CUSTOM_FONT_FAMILY,
87
89
  CUSTOM_FONT_URL: process.env.CUSTOM_FONT_URL,
@@ -1,7 +1,7 @@
1
1
  import { ReactNode, Suspense } from 'react';
2
2
 
3
- import { appEnv } from '@/config/app';
4
3
  import { getServerFeatureFlagsValue } from '@/config/featureFlags';
4
+ import { appEnv } from '@/envs/app';
5
5
  import DevPanel from '@/features/DevPanel';
6
6
  import { getServerGlobalConfig } from '@/server/globalConfig';
7
7
  import { ServerConfigStoreProvider } from '@/store/serverConfig/Provider';
@@ -0,0 +1,69 @@
1
+
2
+ import type { ChatModelCard } from '@/types/llm';
3
+
4
+ import { ModelProvider } from '../types';
5
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
6
+
7
+ export interface ModelScopeModelCard {
8
+ created: number;
9
+ id: string;
10
+ object: string;
11
+ owned_by: string;
12
+ }
13
+
14
+ export const LobeModelScopeAI = LobeOpenAICompatibleFactory({
15
+ baseURL: 'https://api-inference.modelscope.cn/v1',
16
+ debug: {
17
+ chatCompletion: () => process.env.DEBUG_MODELSCOPE_CHAT_COMPLETION === '1',
18
+ },
19
+ models: async ({ client }) => {
20
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
21
+
22
+ const functionCallKeywords = ['qwen', 'deepseek', 'llama'];
23
+
24
+ const visionKeywords = ['qwen-vl', 'qwen2-vl', 'llava'];
25
+
26
+ const reasoningKeywords = ['qwq', 'deepseek-r1'];
27
+
28
+ try {
29
+ const modelsPage = (await client.models.list()) as any;
30
+ const modelList: ModelScopeModelCard[] = modelsPage.data || [];
31
+
32
+ return modelList
33
+ .map((model) => {
34
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
35
+ (m) => model.id.toLowerCase() === m.id.toLowerCase(),
36
+ );
37
+
38
+ const modelId = model.id.toLowerCase();
39
+
40
+ return {
41
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
42
+ displayName: knownModel?.displayName ?? model.id,
43
+ enabled: knownModel?.enabled || false,
44
+ functionCall:
45
+ functionCallKeywords.some((keyword) => modelId.includes(keyword)) ||
46
+ knownModel?.abilities?.functionCall ||
47
+ false,
48
+ id: model.id,
49
+ reasoning:
50
+ reasoningKeywords.some((keyword) => modelId.includes(keyword)) ||
51
+ knownModel?.abilities?.reasoning ||
52
+ false,
53
+ vision:
54
+ visionKeywords.some((keyword) => modelId.includes(keyword)) ||
55
+ knownModel?.abilities?.vision ||
56
+ false,
57
+ };
58
+ })
59
+ .filter(Boolean) as ChatModelCard[];
60
+ } catch (error) {
61
+ console.warn(
62
+ 'Failed to fetch ModelScope models. Please ensure your ModelScope API key is valid and your Alibaba Cloud account is properly bound:',
63
+ error,
64
+ );
65
+ return [];
66
+ }
67
+ },
68
+ provider: ModelProvider.ModelScope,
69
+ });
@@ -1,17 +1,17 @@
1
1
  import { LobeAi21AI } from './ai21';
2
2
  import { LobeAi360AI } from './ai360';
3
- import LobeAnthropicAI from './anthropic';
3
+ import { LobeAnthropicAI } from './anthropic';
4
4
  import { LobeAzureOpenAI } from './azureOpenai';
5
5
  import { LobeAzureAI } from './azureai';
6
6
  import { LobeBaichuanAI } from './baichuan';
7
- import LobeBedrockAI from './bedrock';
7
+ import { LobeBedrockAI } from './bedrock';
8
8
  import { LobeCloudflareAI } from './cloudflare';
9
9
  import { LobeCohereAI } from './cohere';
10
10
  import { LobeDeepSeekAI } from './deepseek';
11
11
  import { LobeFireworksAI } from './fireworksai';
12
12
  import { LobeGiteeAI } from './giteeai';
13
13
  import { LobeGithubAI } from './github';
14
- import LobeGoogleAI from './google';
14
+ import { LobeGoogleAI } from './google';
15
15
  import { LobeGroq } from './groq';
16
16
  import { LobeHigressAI } from './higress';
17
17
  import { LobeHuggingFaceAI } from './huggingface';
@@ -22,10 +22,11 @@ import { LobeJinaAI } from './jina';
22
22
  import { LobeLMStudioAI } from './lmstudio';
23
23
  import { LobeMinimaxAI } from './minimax';
24
24
  import { LobeMistralAI } from './mistral';
25
+ import { LobeModelScopeAI } from './modelscope';
25
26
  import { LobeMoonshotAI } from './moonshot';
26
27
  import { LobeNovitaAI } from './novita';
27
28
  import { LobeNvidiaAI } from './nvidia';
28
- import LobeOllamaAI from './ollama';
29
+ import { LobeOllamaAI } from './ollama';
29
30
  import { LobeOpenAI } from './openai';
30
31
  import { LobeOpenRouterAI } from './openrouter';
31
32
  import { LobePerplexityAI } from './perplexity';
@@ -75,6 +76,7 @@ export const providerRuntimeMap = {
75
76
  lmstudio: LobeLMStudioAI,
76
77
  minimax: LobeMinimaxAI,
77
78
  mistral: LobeMistralAI,
79
+ modelscope: LobeModelScopeAI,
78
80
  moonshot: LobeMoonshotAI,
79
81
  novita: LobeNovitaAI,
80
82
  nvidia: LobeNvidiaAI,
@@ -46,6 +46,7 @@ export enum ModelProvider {
46
46
  LMStudio = 'lmstudio',
47
47
  Minimax = 'minimax',
48
48
  Mistral = 'mistral',
49
+ ModelScope = 'modelscope',
49
50
  Moonshot = 'moonshot',
50
51
  Novita = 'novita',
51
52
  Nvidia = 'nvidia',
@@ -4,7 +4,7 @@ import { NextRequest } from 'next/server';
4
4
  import { IncomingMessage, ServerResponse } from 'node:http';
5
5
  import urlJoin from 'url-join';
6
6
 
7
- import { appEnv } from '@/config/app';
7
+ import { appEnv } from '@/envs/app';
8
8
 
9
9
  const log = debug('lobe-oidc:http-adapter');
10
10
 
@@ -2,10 +2,10 @@ import debug from 'debug';
2
2
  import Provider, { Configuration, KoaContextWithOIDC } from 'oidc-provider';
3
3
  import urlJoin from 'url-join';
4
4
 
5
- import { appEnv } from '@/config/app';
6
5
  import { serverDBEnv } from '@/config/db';
7
6
  import { UserModel } from '@/database/models/user';
8
7
  import { LobeChatDatabase } from '@/database/type';
8
+ import { appEnv } from '@/envs/app';
9
9
  import { oidcEnv } from '@/envs/oidc';
10
10
 
11
11
  import { DrizzleAdapter } from './adapter';
package/src/middleware.ts CHANGED
@@ -4,10 +4,10 @@ import { NextRequest, NextResponse } from 'next/server';
4
4
  import { UAParser } from 'ua-parser-js';
5
5
  import urlJoin from 'url-join';
6
6
 
7
- import { appEnv } from '@/config/app';
8
7
  import { authEnv } from '@/config/auth';
9
8
  import { LOBE_LOCALE_COOKIE } from '@/const/locale';
10
9
  import { LOBE_THEME_APPEARANCE } from '@/const/theme';
10
+ import { appEnv } from '@/envs/app';
11
11
  import NextAuthEdge from '@/libs/next-auth/edge';
12
12
  import { Locales } from '@/locales/resources';
13
13
  import { parseBrowserLanguage } from '@/utils/locale';
@@ -134,6 +134,16 @@ const defaultMiddleware = (request: NextRequest) => {
134
134
  return NextResponse.rewrite(url, { status: 200 });
135
135
  };
136
136
 
137
+ const isPublicRoute = createRouteMatcher([
138
+ '/api/auth(.*)',
139
+ '/trpc/edge(.*)',
140
+ // next auth
141
+ '/next-auth/(.*)',
142
+ // clerk
143
+ '/login',
144
+ '/signup',
145
+ ]);
146
+
137
147
  const isProtectedRoute = createRouteMatcher([
138
148
  '/settings(.*)',
139
149
  '/files(.*)',
@@ -148,7 +158,9 @@ const nextAuthMiddleware = NextAuthEdge.auth((req) => {
148
158
 
149
159
  const response = defaultMiddleware(req);
150
160
 
151
- const isProtected = isProtectedRoute(req);
161
+ // when enable auth protection, only public route is not protected, others are all protected
162
+ const isProtected = appEnv.ENABLE_AUTH_PROTECTION ? !isPublicRoute(req) : isProtectedRoute(req);
163
+
152
164
  logNextAuth('Route protection status: %s, %s', req.url, isProtected ? 'protected' : 'public');
153
165
 
154
166
  // Just check if session exists
@@ -181,7 +193,7 @@ const nextAuthMiddleware = NextAuthEdge.auth((req) => {
181
193
  if (isProtected) {
182
194
  logNextAuth('Request a protected route, redirecting to sign-in page');
183
195
  const nextLoginUrl = new URL('/next-auth/signin', req.nextUrl.origin);
184
- nextLoginUrl.searchParams.set('callbackUrl', req.nextUrl.pathname);
196
+ nextLoginUrl.searchParams.set('callbackUrl', req.nextUrl.href);
185
197
  return Response.redirect(nextLoginUrl);
186
198
  }
187
199
  logNextAuth('Request a free route but not login, allow visit without auth header');
@@ -229,6 +241,7 @@ const clerkAuthMiddleware = clerkMiddleware(
229
241
  );
230
242
 
231
243
  logDefault('Middleware configuration: %O', {
244
+ enableAuthProtection: appEnv.ENABLE_AUTH_PROTECTION,
232
245
  enableClerk: authEnv.NEXT_PUBLIC_ENABLE_CLERK_AUTH,
233
246
  enableNextAuth: authEnv.NEXT_PUBLIC_ENABLE_NEXT_AUTH,
234
247
  enableOIDC: oidcEnv.ENABLE_OIDC,
@@ -1,7 +1,7 @@
1
1
  import { describe, expect, it, vi } from 'vitest';
2
2
 
3
- import { getAppConfig } from '@/config/app';
4
3
  import { knowledgeEnv } from '@/config/knowledge';
4
+ import { getAppConfig } from '@/envs/app';
5
5
  import { SystemEmbeddingConfig } from '@/types/knowledgeBase';
6
6
  import { FilesConfigItem } from '@/types/user/settings/filesConfig';
7
7
 
@@ -9,7 +9,7 @@ import { getServerDefaultAgentConfig, getServerDefaultFilesConfig } from './inde
9
9
  import { parseAgentConfig } from './parseDefaultAgent';
10
10
  import { parseFilesConfig } from './parseFilesConfig';
11
11
 
12
- vi.mock('@/config/app', () => ({
12
+ vi.mock('@/envs/app', () => ({
13
13
  getAppConfig: vi.fn(),
14
14
  }));
15
15
 
@@ -1,10 +1,10 @@
1
- import { appEnv, getAppConfig } from '@/config/app';
2
1
  import { authEnv } from '@/config/auth';
3
2
  import { fileEnv } from '@/config/file';
4
3
  import { knowledgeEnv } from '@/config/knowledge';
5
4
  import { langfuseEnv } from '@/config/langfuse';
6
5
  import { enableNextAuth } from '@/const/auth';
7
6
  import { isDesktop } from '@/const/version';
7
+ import { appEnv, getAppConfig } from '@/envs/app';
8
8
  import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
9
9
  import { GlobalServerConfig } from '@/types/serverConfig';
10
10
 
@@ -1,7 +1,7 @@
1
1
  import urlJoin from 'url-join';
2
2
 
3
- import { appEnv } from '@/config/app';
4
3
  import { DEFAULT_LANG, isLocaleNotSupport } from '@/const/locale';
4
+ import { appEnv } from '@/envs/app';
5
5
  import { Locales, normalizeLocale } from '@/locales/resources';
6
6
  import { EdgeConfig } from '@/server/modules/EdgeConfig';
7
7
  import { AgentStoreIndex } from '@/types/discover';
@@ -1,6 +1,6 @@
1
1
  import { EdgeConfigClient, createClient } from '@vercel/edge-config';
2
2
 
3
- import { appEnv } from '@/config/app';
3
+ import { appEnv } from '@/envs/app';
4
4
 
5
5
  const EdgeConfigKeys = {
6
6
  /**
@@ -1,7 +1,7 @@
1
1
  import urlJoin from 'url-join';
2
2
 
3
- import { appEnv } from '@/config/app';
4
3
  import { DEFAULT_LANG, isLocaleNotSupport } from '@/const/locale';
4
+ import { appEnv } from '@/envs/app';
5
5
  import { Locales, normalizeLocale } from '@/locales/resources';
6
6
 
7
7
  export class PluginStore {
@@ -2,9 +2,9 @@ import { createTRPCClient, httpBatchLink } from '@trpc/client';
2
2
  import superjson from 'superjson';
3
3
  import urlJoin from 'url-join';
4
4
 
5
- import { appEnv } from '@/config/app';
6
5
  import { serverDBEnv } from '@/config/db';
7
6
  import { JWTPayload, LOBE_CHAT_AUTH_HEADER } from '@/const/auth';
7
+ import { appEnv } from '@/envs/app';
8
8
  import { KeyVaultsGateKeeper } from '@/server/modules/KeyVaultsEncrypt';
9
9
 
10
10
  import type { AsyncRouter } from './index';
@@ -6,7 +6,7 @@ import { parseAgentConfig } from '@/server/globalConfig/parseDefaultAgent';
6
6
 
7
7
  import { AgentService } from './index';
8
8
 
9
- vi.mock('@/config/app', () => ({
9
+ vi.mock('@/envs/app', () => ({
10
10
  appEnv: {
11
11
  DEFAULT_AGENT_CONFIG: 'model=gpt-4;temperature=0.7',
12
12
  },
@@ -58,6 +58,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
58
58
  lobehub?: any;
59
59
  minimax?: OpenAICompatibleKeyVault;
60
60
  mistral?: OpenAICompatibleKeyVault;
61
+ modelscope?: OpenAICompatibleKeyVault;
61
62
  moonshot?: OpenAICompatibleKeyVault;
62
63
  novita?: OpenAICompatibleKeyVault;
63
64
  nvidia?: OpenAICompatibleKeyVault;
@@ -1,3 +1,3 @@
1
- import { appEnv } from '@/config/app';
1
+ import { appEnv } from '@/envs/app';
2
2
 
3
3
  export const withBasePath = (path: string) => appEnv.NEXT_PUBLIC_BASE_PATH + path;
@@ -21,7 +21,7 @@ vi.mock('@/const/auth', async (importOriginal) => {
21
21
  };
22
22
  });
23
23
 
24
- vi.mock('@/config/app', () => ({
24
+ vi.mock('@/envs/app', () => ({
25
25
  getAppConfig: vi.fn(),
26
26
  }));
27
27