@lobehub/chat 1.15.2 → 1.15.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,58 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.15.4](https://github.com/lobehub/lobe-chat/compare/v1.15.3...v1.15.4)
6
+
7
+ <sup>Released on **2024-09-01**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Update Novita AI model info & add `NOVITA_MODEL_LIST` support.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Update Novita AI model info & add `NOVITA_MODEL_LIST` support, closes [#3715](https://github.com/lobehub/lobe-chat/issues/3715) ([4ab33f6](https://github.com/lobehub/lobe-chat/commit/4ab33f6))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.15.3](https://github.com/lobehub/lobe-chat/compare/v1.15.2...v1.15.3)
31
+
32
+ <sup>Released on **2024-09-01**</sup>
33
+
34
+ #### 💄 Styles
35
+
36
+ - **misc**: Add `*_MODEL_LIST` for Qwen and ZeroOne, fix model info, update Claude 3.5 Sonnet maxOutput vaule.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Styles
44
+
45
+ - **misc**: Add `*_MODEL_LIST` for Qwen and ZeroOne, closes [#3704](https://github.com/lobehub/lobe-chat/issues/3704) ([05419dc](https://github.com/lobehub/lobe-chat/commit/05419dc))
46
+ - **misc**: Fix model info, closes [#3696](https://github.com/lobehub/lobe-chat/issues/3696) ([4d98037](https://github.com/lobehub/lobe-chat/commit/4d98037))
47
+ - **misc**: Update Claude 3.5 Sonnet maxOutput vaule, closes [#3705](https://github.com/lobehub/lobe-chat/issues/3705) ([685bd74](https://github.com/lobehub/lobe-chat/commit/685bd74))
48
+
49
+ </details>
50
+
51
+ <div align="right">
52
+
53
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
54
+
55
+ </div>
56
+
5
57
  ### [Version 1.15.2](https://github.com/lobehub/lobe-chat/compare/v1.15.1...v1.15.2)
6
58
 
7
59
  <sup>Released on **2024-08-30**</sup>
package/Dockerfile CHANGED
@@ -127,7 +127,7 @@ ENV \
127
127
  # Moonshot
128
128
  MOONSHOT_API_KEY="" MOONSHOT_PROXY_URL="" \
129
129
  # Novita
130
- NOVITA_API_KEY="" \
130
+ NOVITA_API_KEY="" NOVITA_MODEL_LIST="" \
131
131
  # Ollama
132
132
  OLLAMA_MODEL_LIST="" OLLAMA_PROXY_URL="" \
133
133
  # OpenAI
@@ -137,7 +137,7 @@ ENV \
137
137
  # Perplexity
138
138
  PERPLEXITY_API_KEY="" PERPLEXITY_PROXY_URL="" \
139
139
  # Qwen
140
- QWEN_API_KEY="" \
140
+ QWEN_API_KEY="" QWEN_MODEL_LIST="" \
141
141
  # SiliconCloud
142
142
  SILICONCLOUD_API_KEY="" SILICONCLOUD_MODEL_LIST="" SILICONCLOUD_PROXY_URL="" \
143
143
  # Stepfun
@@ -149,7 +149,7 @@ ENV \
149
149
  # Upstage
150
150
  UPSTAGE_API_KEY="" \
151
151
  # 01.AI
152
- ZEROONE_API_KEY="" \
152
+ ZEROONE_API_KEY="" ZEROONE_MODEL_LIST="" \
153
153
  # Zhipu
154
154
  ZHIPU_API_KEY="" ZHIPU_MODEL_LIST=""
155
155
 
@@ -159,7 +159,7 @@ ENV \
159
159
  # Moonshot
160
160
  MOONSHOT_API_KEY="" MOONSHOT_PROXY_URL="" \
161
161
  # Novita
162
- NOVITA_API_KEY="" \
162
+ NOVITA_API_KEY="" NOVITA_MODEL_LIST="" \
163
163
  # Ollama
164
164
  OLLAMA_MODEL_LIST="" OLLAMA_PROXY_URL="" \
165
165
  # OpenAI
@@ -169,7 +169,7 @@ ENV \
169
169
  # Perplexity
170
170
  PERPLEXITY_API_KEY="" PERPLEXITY_PROXY_URL="" \
171
171
  # Qwen
172
- QWEN_API_KEY="" \
172
+ QWEN_API_KEY="" QWEN_MODEL_LIST="" \
173
173
  # SiliconCloud
174
174
  SILICONCLOUD_API_KEY="" SILICONCLOUD_MODEL_LIST="" SILICONCLOUD_PROXY_URL="" \
175
175
  # Stepfun
@@ -181,7 +181,7 @@ ENV \
181
181
  # Upstage
182
182
  UPSTAGE_API_KEY="" \
183
183
  # 01.AI
184
- ZEROONE_API_KEY="" \
184
+ ZEROONE_API_KEY="" ZEROONE_MODEL_LIST="" \
185
185
  # Zhipu
186
186
  ZHIPU_API_KEY=""
187
187
 
package/README.md CHANGED
@@ -52,18 +52,19 @@ One-click **FREE** deployment of your private OpenAI ChatGPT/Claude/Gemini/Groq/
52
52
 
53
53
  - [👋🏻 Getting Started & Join Our Community](#-getting-started--join-our-community)
54
54
  - [✨ Features](#-features)
55
- - [`1` Multi-Model Service Provider Support](#1-multi-model-service-provider-support)
56
- - [`2` Local Large Language Model (LLM) Support](#2-local-large-language-model-llm-support)
57
- - [`3` Model Visual Recognition](#3-model-visual-recognition)
58
- - [`4` TTS & STT Voice Conversation](#4-tts--stt-voice-conversation)
59
- - [`5` Text to Image Generation](#5-text-to-image-generation)
60
- - [`6` Plugin System (Function Calling)](#6-plugin-system-function-calling)
61
- - [`7` Agent Market (GPTs)](#7-agent-market-gpts)
62
- - [`8` Support Local / Remote Database](#8-support-local--remote-database)
63
- - [`9` Support Multi-User Management](#9-support-multi-user-management)
64
- - [`10` Progressive Web App (PWA)](#10-progressive-web-app-pwa)
65
- - [`11` Mobile Device Adaptation](#11-mobile-device-adaptation)
66
- - [`12` Custom Themes](#12-custom-themes)
55
+ - [`1` File Upload/Knowledge Base](#1-file-uploadknowledge-base)
56
+ - [`2` Multi-Model Service Provider Support](#2-multi-model-service-provider-support)
57
+ - [`3` Local Large Language Model (LLM) Support](#3-local-large-language-model-llm-support)
58
+ - [`4` Model Visual Recognition](#4-model-visual-recognition)
59
+ - [`5` TTS & STT Voice Conversation](#5-tts--stt-voice-conversation)
60
+ - [`6` Text to Image Generation](#6-text-to-image-generation)
61
+ - [`7` Plugin System (Function Calling)](#7-plugin-system-function-calling)
62
+ - [`8` Agent Market (GPTs)](#8-agent-market-gpts)
63
+ - [`9` Support Local / Remote Database](#9-support-local--remote-database)
64
+ - [`10` Support Multi-User Management](#10-support-multi-user-management)
65
+ - [`11` Progressive Web App (PWA)](#11-progressive-web-app-pwa)
66
+ - [`12` Mobile Device Adaptation](#12-mobile-device-adaptation)
67
+ - [`13` Custom Themes](#13-custom-themes)
67
68
  - [`*` What's more](#-whats-more)
68
69
  - [⚡️ Performance](#️-performance)
69
70
  - [🛳 Self Hosting](#-self-hosting)
@@ -110,9 +111,27 @@ Whether for users or professional developers, LobeHub will be your AI Agent play
110
111
 
111
112
  ## ✨ Features
112
113
 
114
+ [![][image-feat-knowledgebase]][docs-feat-knowledgebase]
115
+
116
+ ### `1` [File Upload/Knowledge Base][docs-feat-knowledgebase]
117
+
118
+ LobeChat supports file upload and knowledge base functionality. You can upload various types of files including documents, images, audio, and video, as well as create knowledge bases, making it convenient for users to manage and search for files. Additionally, you can utilize files and knowledge base features during conversations, enabling a richer dialogue experience.
119
+
120
+ <https://github.com/user-attachments/assets/faa8cf67-e743-4590-8bf6-ebf6ccc34175>
121
+
122
+ > \[!TIP]
123
+ >
124
+ > Learn more on [📘 LobeChat Knowledge Base Launch — From Now On, Every Step Counts](https://lobehub.com/blog/knowledge-base)
125
+
126
+ <div align="right">
127
+
128
+ [![][back-to-top]](#readme-top)
129
+
130
+ </div>
131
+
113
132
  [![][image-feat-privoder]][docs-feat-provider]
114
133
 
115
- ### `1` [Multi-Model Service Provider Support][docs-feat-provider]
134
+ ### `2` [Multi-Model Service Provider Support][docs-feat-provider]
116
135
 
117
136
  In the continuous development of LobeChat, we deeply understand the importance of diversity in model service providers for meeting the needs of the community when providing AI conversation services. Therefore, we have expanded our support to multiple model service providers, rather than being limited to a single one, in order to offer users a more diverse and rich selection of conversations.
118
137
 
@@ -146,7 +165,7 @@ At the same time, we are also planning to support more model service providers,
146
165
 
147
166
  [![][image-feat-local]][docs-feat-local]
148
167
 
149
- ### `2` [Local Large Language Model (LLM) Support][docs-feat-local]
168
+ ### `3` [Local Large Language Model (LLM) Support][docs-feat-local]
150
169
 
151
170
  To meet the specific needs of users, LobeChat also supports the use of local models based on [Ollama](https://ollama.ai), allowing users to flexibly use their own or third-party models.
152
171
 
@@ -162,7 +181,7 @@ To meet the specific needs of users, LobeChat also supports the use of local mod
162
181
 
163
182
  [![][image-feat-vision]][docs-feat-vision]
164
183
 
165
- ### `3` [Model Visual Recognition][docs-feat-vision]
184
+ ### `4` [Model Visual Recognition][docs-feat-vision]
166
185
 
167
186
  LobeChat now supports OpenAI's latest [`gpt-4-vision`](https://platform.openai.com/docs/guides/vision) model with visual recognition capabilities,
168
187
  a multimodal intelligence that can perceive visuals. Users can easily upload or drag and drop images into the dialogue box,
@@ -180,7 +199,7 @@ Whether it's sharing images in daily use or interpreting images within specific
180
199
 
181
200
  [![][image-feat-tts]][docs-feat-tts]
182
201
 
183
- ### `4` [TTS & STT Voice Conversation][docs-feat-tts]
202
+ ### `5` [TTS & STT Voice Conversation][docs-feat-tts]
184
203
 
185
204
  LobeChat supports Text-to-Speech (TTS) and Speech-to-Text (STT) technologies, enabling our application to convert text messages into clear voice outputs,
186
205
  allowing users to interact with our conversational agent as if they were talking to a real person. Users can choose from a variety of voices to pair with the agent.
@@ -197,7 +216,7 @@ Users can choose the voice that suits their personal preferences or specific sce
197
216
 
198
217
  [![][image-feat-t2i]][docs-feat-t2i]
199
218
 
200
- ### `5` [Text to Image Generation][docs-feat-t2i]
219
+ ### `6` [Text to Image Generation][docs-feat-t2i]
201
220
 
202
221
  With support for the latest text-to-image generation technology, LobeChat now allows users to invoke image creation tools directly within conversations with the agent. By leveraging the capabilities of AI tools such as [`DALL-E 3`](https://openai.com/dall-e-3), [`MidJourney`](https://www.midjourney.com/), and [`Pollinations`](https://pollinations.ai/), the agents are now equipped to transform your ideas into images.
203
222
 
@@ -211,7 +230,7 @@ This enables a more private and immersive creative process, allowing for the sea
211
230
 
212
231
  [![][image-feat-plugin]][docs-feat-plugin]
213
232
 
214
- ### `6` [Plugin System (Function Calling)][docs-feat-plugin]
233
+ ### `7` [Plugin System (Function Calling)][docs-feat-plugin]
215
234
 
216
235
  The plugin ecosystem of LobeChat is an important extension of its core functionality, greatly enhancing the practicality and flexibility of the LobeChat assistant.
217
236
 
@@ -246,7 +265,7 @@ In addition, these plugins are not limited to news aggregation, but can also ext
246
265
 
247
266
  [![][image-feat-agent]][docs-feat-agent]
248
267
 
249
- ### `7` [Agent Market (GPTs)][docs-feat-agent]
268
+ ### `8` [Agent Market (GPTs)][docs-feat-agent]
250
269
 
251
270
  In LobeChat Agent Marketplace, creators can discover a vibrant and innovative community that brings together a multitude of well-designed agents,
252
271
  which not only play an important role in work scenarios but also offer great convenience in learning processes.
@@ -291,7 +310,7 @@ Please tell me what issue you would like to explore?<br/>`backtracking-questions
291
310
 
292
311
  [![][image-feat-database]][docs-feat-database]
293
312
 
294
- ### `8` [Support Local / Remote Database][docs-feat-database]
313
+ ### `9` [Support Local / Remote Database][docs-feat-database]
295
314
 
296
315
  LobeChat supports the use of both server-side and local databases. Depending on your needs, you can choose the appropriate deployment solution:
297
316
 
@@ -308,7 +327,7 @@ Regardless of which database you choose, LobeChat can provide you with an excell
308
327
 
309
328
  [![][image-feat-auth]][docs-feat-auth]
310
329
 
311
- ### `9` [Support Multi-User Management][docs-feat-auth]
330
+ ### `10` [Support Multi-User Management][docs-feat-auth]
312
331
 
313
332
  LobeChat supports multi-user management and provides two main user authentication and management solutions to meet different needs:
314
333
 
@@ -326,7 +345,7 @@ Regardless of which user management solution you choose, LobeChat can provide yo
326
345
 
327
346
  [![][image-feat-pwa]][docs-feat-pwa]
328
347
 
329
- ### `10` [Progressive Web App (PWA)][docs-feat-pwa]
348
+ ### `11` [Progressive Web App (PWA)][docs-feat-pwa]
330
349
 
331
350
  We deeply understand the importance of providing a seamless experience for users in today's multi-device environment.
332
351
  Therefore, we have adopted Progressive Web Application ([PWA](https://support.google.com/chrome/answer/9658361)) technology,
@@ -353,7 +372,7 @@ providing smooth animations, responsive layouts, and adapting to different devic
353
372
 
354
373
  [![][image-feat-mobile]][docs-feat-mobile]
355
374
 
356
- ### `11` [Mobile Device Adaptation][docs-feat-mobile]
375
+ ### `12` [Mobile Device Adaptation][docs-feat-mobile]
357
376
 
358
377
  We have carried out a series of optimization designs for mobile devices to enhance the user's mobile experience. Currently, we are iterating on the mobile user experience to achieve smoother and more intuitive interactions. If you have any suggestions or ideas, we welcome you to provide feedback through GitHub Issues or Pull Requests.
359
378
 
@@ -365,7 +384,7 @@ We have carried out a series of optimization designs for mobile devices to enhan
365
384
 
366
385
  [![][image-feat-theme]][docs-feat-theme]
367
386
 
368
- ### `12` [Custom Themes][docs-feat-theme]
387
+ ### `13` [Custom Themes][docs-feat-theme]
369
388
 
370
389
  As a design-engineering-oriented application, LobeChat places great emphasis on users' personalized experiences,
371
390
  hence introducing flexible and diverse theme modes, including a light mode for daytime and a dark mode for nighttime.
@@ -712,6 +731,7 @@ This project is [Apache 2.0](./LICENSE) licensed.
712
731
  [docs-feat-agent]: https://lobehub.com/docs/usage/features/agent-market
713
732
  [docs-feat-auth]: https://lobehub.com/docs/usage/features/auth
714
733
  [docs-feat-database]: https://lobehub.com/docs/usage/features/database
734
+ [docs-feat-knowledgebase]: https://lobehub.com/blog/knowledge-base
715
735
  [docs-feat-local]: https://lobehub.com/docs/usage/features/local-llm
716
736
  [docs-feat-mobile]: https://lobehub.com/docs/usage/features/mobile
717
737
  [docs-feat-plugin]: https://lobehub.com/docs/usage/features/plugin-system
@@ -755,6 +775,7 @@ This project is [Apache 2.0](./LICENSE) licensed.
755
775
  [image-feat-agent]: https://github-production-user-asset-6210df.s3.amazonaws.com/17870709/268670869-f1ffbf66-42b6-42cf-a937-9ce1f8328514.png
756
776
  [image-feat-auth]: https://github.com/lobehub/lobe-chat/assets/17870709/8ce70e15-40df-451e-b700-66090fe5b8c2
757
777
  [image-feat-database]: https://github.com/lobehub/lobe-chat/assets/17870709/c27a0234-a4e9-40e5-8bcb-42d5ce7e40f9
778
+ [image-feat-knowledgebase]: https://github.com/user-attachments/assets/77e58e1c-c82f-4341-b159-f4eeede9967f
758
779
  [image-feat-local]: https://github.com/lobehub/lobe-chat/assets/28616219/ca9a21bc-ea6c-4c90-bf4a-fa53b4fb2b5c
759
780
  [image-feat-mobile]: https://gw.alipayobjects.com/zos/kitchen/R441AuFS4W/mobile.webp
760
781
  [image-feat-plugin]: https://github-production-user-asset-6210df.s3.amazonaws.com/17870709/268670883-33c43a5c-a512-467e-855c-fa299548cce5.png
package/README.zh-CN.md CHANGED
@@ -52,18 +52,19 @@
52
52
 
53
53
  - [👋🏻 开始使用 & 交流](#-开始使用--交流)
54
54
  - [✨ 特性一览](#-特性一览)
55
- - [`1` 多模型服务商支持](#1-多模型服务商支持)
56
- - [`2` 支持本地大语言模型 (LLM)](#2-支持本地大语言模型-llm)
57
- - [`3` 模型视觉识别 (Model Visual)](#3-模型视觉识别-model-visual)
58
- - [`4` TTS & STT 语音会话](#4-tts--stt-语音会话)
59
- - [`5` Text to Image 文生图](#5-text-to-image-文生图)
60
- - [`6` 插件系统 (Function Calling)](#6-插件系统-function-calling)
61
- - [`7` 助手市场 (GPTs)](#7-助手市场-gpts)
62
- - [`8` 支持本地 / 远程数据库](#8-支持本地--远程数据库)
63
- - [`9` 支持多用户管理](#9-支持多用户管理)
64
- - [`10` 渐进式 Web 应用 (PWA)](#10-渐进式-web-应用-pwa)
65
- - [`11` 移动设备适配](#11-移动设备适配)
66
- - [`12` 自定义主题](#12-自定义主题)
55
+ - [`1` 文件上传 / 知识库](#1-文件上传--知识库)
56
+ - [`2` 多模型服务商支持](#2-多模型服务商支持)
57
+ - [`3` 支持本地大语言模型 (LLM)](#3-支持本地大语言模型-llm)
58
+ - [`4` 模型视觉识别 (Model Visual)](#4-模型视觉识别-model-visual)
59
+ - [`5` TTS & STT 语音会话](#5-tts--stt-语音会话)
60
+ - [`6` Text to Image 文生图](#6-text-to-image-文生图)
61
+ - [`7` 插件系统 (Tools Calling)](#7-插件系统-tools-calling)
62
+ - [`8` 助手市场 (GPTs)](#8-助手市场-gpts)
63
+ - [`9` 支持本地 / 远程数据库](#9-支持本地--远程数据库)
64
+ - [`10` 支持多用户管理](#10-支持多用户管理)
65
+ - [`11` 渐进式 Web 应用 (PWA)](#11-渐进式-web-应用-pwa)
66
+ - [`12` 移动设备适配](#12-移动设备适配)
67
+ - [`13` 自定义主题](#13-自定义主题)
67
68
  - [更多特性](#更多特性)
68
69
  - [⚡️ 性能测试](#️-性能测试)
69
70
  - [🛳 开箱即用](#-开箱即用)
@@ -110,9 +111,27 @@
110
111
 
111
112
  ## ✨ 特性一览
112
113
 
114
+ [![][image-feat-knowledgebase]][docs-feat-knowledgebase]
115
+
116
+ ### `1` [文件上传 / 知识库][docs-feat-knowledgebase]
117
+
118
+ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片、音频、视频等多种类型的文件,以及创建知识库,方便用户管理和查找文件。同时在对话中使用文件和知识库功能,实现更加丰富的对话体验。
119
+
120
+ <https://github.com/user-attachments/assets/faa8cf67-e743-4590-8bf6-ebf6ccc34175>
121
+
122
+ > \[!TIP]
123
+ >
124
+ > 查阅 [📘 LobeChat 知识库上线 —— 此刻起,跬步千里](https://lobehub.com/zh/blog/knowledge-base) 了解详情。
125
+
126
+ <div align="right">
127
+
128
+ [![][back-to-top]](#readme-top)
129
+
130
+ </div>
131
+
113
132
  [![][image-feat-privoder]][docs-feat-provider]
114
133
 
115
- ### `1` [多模型服务商支持][docs-feat-provider]
134
+ ### `2` [多模型服务商支持][docs-feat-provider]
116
135
 
117
136
  在 LobeChat 的不断发展过程中,我们深刻理解到在提供 AI 会话服务时模型服务商的多样性对于满足社区需求的重要性。因此,我们不再局限于单一的模型服务商,而是拓展了对多种模型服务商的支持,以便为用户提供更为丰富和多样化的会话选择。
118
137
 
@@ -145,7 +164,7 @@
145
164
 
146
165
  [![][image-feat-local]][docs-feat-local]
147
166
 
148
- ### `2` [支持本地大语言模型 (LLM)][docs-feat-local]
167
+ ### `3` [支持本地大语言模型 (LLM)][docs-feat-local]
149
168
 
150
169
  为了满足特定用户的需求,LobeChat 还基于 [Ollama](https://ollama.ai) 支持了本地模型的使用,让用户能够更灵活地使用自己的或第三方的模型。
151
170
 
@@ -161,7 +180,7 @@
161
180
 
162
181
  [![][image-feat-vision]][docs-feat-vision]
163
182
 
164
- ### `3` [模型视觉识别 (Model Visual)][docs-feat-vision]
183
+ ### `4` [模型视觉识别 (Model Visual)][docs-feat-vision]
165
184
 
166
185
  LobeChat 已经支持 OpenAI 最新的 [`gpt-4-vision`](https://platform.openai.com/docs/guides/vision) 支持视觉识别的模型,这是一个具备视觉识别能力的多模态应用。
167
186
  用户可以轻松上传图片或者拖拽图片到对话框中,助手将能够识别图片内容,并在此基础上进行智能对话,构建更智能、更多元化的聊天场景。
@@ -176,7 +195,7 @@ LobeChat 已经支持 OpenAI 最新的 [`gpt-4-vision`](https://platform.openai.
176
195
 
177
196
  [![][image-feat-tts]][docs-feat-tts]
178
197
 
179
- ### `4` [TTS & STT 语音会话][docs-feat-tts]
198
+ ### `5` [TTS & STT 语音会话][docs-feat-tts]
180
199
 
181
200
  LobeChat 支持文字转语音(Text-to-Speech,TTS)和语音转文字(Speech-to-Text,STT)技术,这使得我们的应用能够将文本信息转化为清晰的语音输出,用户可以像与真人交谈一样与我们的对话助手进行交流。
182
201
  用户可以从多种声音中选择,给助手搭配合适的音源。 同时,对于那些倾向于听觉学习或者想要在忙碌中获取信息的用户来说,TTS 提供了一个极佳的解决方案。
@@ -191,7 +210,7 @@ LobeChat 支持文字转语音(Text-to-Speech,TTS)和语音转文字(Spe
191
210
 
192
211
  [![][image-feat-t2i]][docs-feat-t2i]
193
212
 
194
- ### `5` [Text to Image 文生图][docs-feat-t2i]
213
+ ### `6` [Text to Image 文生图][docs-feat-t2i]
195
214
 
196
215
  支持最新的文本到图片生成技术,LobeChat 现在能够让用户在与助手对话中直接调用文生图工具进行创作。
197
216
  通过利用 [`DALL-E 3`](https://openai.com/dall-e-3)、[`MidJourney`](https://www.midjourney.com/) 和 [`Pollinations`](https://pollinations.ai/) 等 AI 工具的能力, 助手们现在可以将你的想法转化为图像。
@@ -205,7 +224,7 @@ LobeChat 支持文字转语音(Text-to-Speech,TTS)和语音转文字(Spe
205
224
 
206
225
  [![][image-feat-plugin]][docs-feat-plugin]
207
226
 
208
- ### `6` [插件系统 (Function Calling)][docs-feat-plugin]
227
+ ### `7` [插件系统 (Tools Calling)][docs-feat-plugin]
209
228
 
210
229
  LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地增强了 ChatGPT 的实用性和灵活性。
211
230
 
@@ -238,7 +257,7 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地
238
257
 
239
258
  [![][image-feat-agent]][docs-feat-agent]
240
259
 
241
- ### `7` [助手市场 (GPTs)][docs-feat-agent]
260
+ ### `8` [助手市场 (GPTs)][docs-feat-agent]
242
261
 
243
262
  在 LobeChat 的助手市场中,创作者们可以发现一个充满活力和创新的社区,它汇聚了众多精心设计的助手,这些助手不仅在工作场景中发挥着重要作用,也在学习过程中提供了极大的便利。
244
263
  我们的市场不仅是一个展示平台,更是一个协作的空间。在这里,每个人都可以贡献自己的智慧,分享个人开发的助手。
@@ -279,7 +298,7 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地
279
298
 
280
299
  [![][image-feat-database]][docs-feat-database]
281
300
 
282
- ### `8` [支持本地 / 远程数据库][docs-feat-database]
301
+ ### `9` [支持本地 / 远程数据库][docs-feat-database]
283
302
 
284
303
  LobeChat 支持同时使用服务端数据库和本地数据库。根据您的需求,您可以选择合适的部署方案:
285
304
 
@@ -296,7 +315,7 @@ LobeChat 支持同时使用服务端数据库和本地数据库。根据您的
296
315
 
297
316
  [![][image-feat-auth]][docs-feat-auth]
298
317
 
299
- ### `9` [支持多用户管理][docs-feat-auth]
318
+ ### `10` [支持多用户管理][docs-feat-auth]
300
319
 
301
320
  LobeChat 支持多用户管理,提供了两种主要的用户认证和管理方案,以满足不同需求:
302
321
 
@@ -314,7 +333,7 @@ LobeChat 支持多用户管理,提供了两种主要的用户认证和管理
314
333
 
315
334
  [![][image-feat-pwa]][docs-feat-pwa]
316
335
 
317
- ### `10` [渐进式 Web 应用 (PWA)][docs-feat-pwa]
336
+ ### `11` [渐进式 Web 应用 (PWA)][docs-feat-pwa]
318
337
 
319
338
  我们深知在当今多设备环境下为用户提供无缝体验的重要性。为此,我们采用了渐进式 Web 应用 [PWA](https://support.google.com/chrome/answer/9658361) 技术,
320
339
  这是一种能够将网页应用提升至接近原生应用体验的现代 Web 技术。通过 PWA,LobeChat 能够在桌面和移动设备上提供高度优化的用户体验,同时保持轻量级和高性能的特点。
@@ -337,7 +356,7 @@ LobeChat 支持多用户管理,提供了两种主要的用户认证和管理
337
356
 
338
357
  [![][image-feat-mobile]][docs-feat-mobile]
339
358
 
340
- ### `11` [移动设备适配][docs-feat-mobile]
359
+ ### `12` [移动设备适配][docs-feat-mobile]
341
360
 
342
361
  针对移动设备进行了一系列的优化设计,以提升用户的移动体验。目前,我们正在对移动端的用户体验进行版本迭代,以实现更加流畅和直观的交互。如果您有任何建议或想法,我们非常欢迎您通过 GitHub Issues 或者 Pull Requests 提供反馈。
343
362
 
@@ -349,7 +368,7 @@ LobeChat 支持多用户管理,提供了两种主要的用户认证和管理
349
368
 
350
369
  [![][image-feat-theme]][docs-feat-theme]
351
370
 
352
- ### `12` [自定义主题][docs-feat-theme]
371
+ ### `13` [自定义主题][docs-feat-theme]
353
372
 
354
373
  作为设计工程师出身,LobeChat 在界面设计上充分考虑用户的个性化体验,因此引入了灵活多变的主题模式,其中包括日间的亮色模式和夜间的深色模式。
355
374
  除了主题模式的切换,还提供了一系列的颜色定制选项,允许用户根据自己的喜好来调整应用的主题色彩。无论是想要沉稳的深蓝,还是希望活泼的桃粉,或者是专业的灰白,用户都能够在 LobeChat 中找到匹配自己风格的颜色选择。
@@ -733,6 +752,7 @@ This project is [Apache 2.0](./LICENSE) licensed.
733
752
  [docs-feat-agent]: https://lobehub.com/docs/usage/features/agent-market
734
753
  [docs-feat-auth]: https://lobehub.com/docs/usage/features/auth
735
754
  [docs-feat-database]: https://lobehub.com/docs/usage/features/database
755
+ [docs-feat-knowledgebase]: https://lobehub.com/blog/knowledge-base
736
756
  [docs-feat-local]: https://lobehub.com/docs/usage/features/local-llm
737
757
  [docs-feat-mobile]: https://lobehub.com/docs/usage/features/mobile
738
758
  [docs-feat-plugin]: https://lobehub.com/docs/usage/features/plugin-system
@@ -778,6 +798,7 @@ This project is [Apache 2.0](./LICENSE) licensed.
778
798
  [image-feat-agent]: https://github-production-user-asset-6210df.s3.amazonaws.com/17870709/268670869-f1ffbf66-42b6-42cf-a937-9ce1f8328514.png
779
799
  [image-feat-auth]: https://github.com/lobehub/lobe-chat/assets/17870709/8ce70e15-40df-451e-b700-66090fe5b8c2
780
800
  [image-feat-database]: https://github.com/lobehub/lobe-chat/assets/17870709/c27a0234-a4e9-40e5-8bcb-42d5ce7e40f9
801
+ [image-feat-knowledgebase]: https://github.com/user-attachments/assets/77e58e1c-c82f-4341-b159-f4eeede9967f
781
802
  [image-feat-local]: https://github.com/lobehub/lobe-chat/assets/28616219/ca9a21bc-ea6c-4c90-bf4a-fa53b4fb2b5c
782
803
  [image-feat-mobile]: https://gw.alipayobjects.com/zos/kitchen/R441AuFS4W/mobile.webp
783
804
  [image-feat-plugin]: https://github-production-user-asset-6210df.s3.amazonaws.com/17870709/268670883-33c43a5c-a512-467e-855c-fa299548cce5.png
@@ -299,7 +299,7 @@ If you configure using the second method (which is also the default method), you
299
299
 
300
300
  You need to first access the WebUI for configuration:
301
301
 
302
- - If you configured the reverse proxy as mentioned earlier, open `https://lobe-s3-api.example.com`
302
+ - If you configured the reverse proxy as mentioned earlier, open `https://lobe-s3-ui.example.com`
303
303
  - Otherwise, after port mapping, open `http://localhost:9001`
304
304
 
305
305
  1. Enter your `MINIO_ROOT_USER` and `MINIO_ROOT_PASSWORD` on the login screen, then click login.
@@ -296,7 +296,7 @@ docker compose up -d # 重新启动
296
296
 
297
297
  你需要首先访问 WebUI 来进行配置:
298
298
 
299
- - 如果你按照前文配置了反向代理,打开 `https://lobe-s3-api.example.com`
299
+ - 如果你按照前文配置了反向代理,打开 `https://lobe-s3-ui.example.com`
300
300
  - 否则,请在进行端口映射后,打开 `http://localhost:9001`
301
301
 
302
302
  1. 在登录界面输入你设置的 `MINIO_ROOT_USER` 和 `MINIO_ROOT_PASSWORD`,然后点击登录
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.15.2",
3
+ "version": "1.15.4",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -292,7 +292,7 @@
292
292
  "vitest": "~1.2.2",
293
293
  "vitest-canvas-mock": "^0.3.3"
294
294
  },
295
- "packageManager": "pnpm@9.8.0",
295
+ "packageManager": "pnpm@9.9.0",
296
296
  "publishConfig": {
297
297
  "access": "public",
298
298
  "registry": "https://registry.npmjs.org"
package/src/config/llm.ts CHANGED
@@ -57,6 +57,7 @@ export const getLLMConfig = () => {
57
57
 
58
58
  ENABLED_ZEROONE: z.boolean(),
59
59
  ZEROONE_API_KEY: z.string().optional(),
60
+ ZEROONE_MODEL_LIST: z.string().optional(),
60
61
 
61
62
  ENABLED_TOGETHERAI: z.boolean(),
62
63
  TOGETHERAI_API_KEY: z.string().optional(),
@@ -73,12 +74,14 @@ export const getLLMConfig = () => {
73
74
 
74
75
  ENABLED_QWEN: z.boolean(),
75
76
  QWEN_API_KEY: z.string().optional(),
77
+ QWEN_MODEL_LIST: z.string().optional(),
76
78
 
77
79
  ENABLED_STEPFUN: z.boolean(),
78
80
  STEPFUN_API_KEY: z.string().optional(),
79
81
 
80
82
  ENABLED_NOVITA: z.boolean(),
81
83
  NOVITA_API_KEY: z.string().optional(),
84
+ NOVITA_MODEL_LIST: z.string().optional(),
82
85
 
83
86
  ENABLED_BAICHUAN: z.boolean(),
84
87
  BAICHUAN_API_KEY: z.string().optional(),
@@ -154,6 +157,7 @@ export const getLLMConfig = () => {
154
157
 
155
158
  ENABLED_ZEROONE: !!process.env.ZEROONE_API_KEY,
156
159
  ZEROONE_API_KEY: process.env.ZEROONE_API_KEY,
160
+ ZEROONE_MODEL_LIST: process.env.ZEROONE_MODEL_LIST,
157
161
 
158
162
  ENABLED_AWS_BEDROCK: process.env.ENABLED_AWS_BEDROCK === '1',
159
163
  AWS_REGION: process.env.AWS_REGION,
@@ -166,12 +170,14 @@ export const getLLMConfig = () => {
166
170
 
167
171
  ENABLED_QWEN: !!process.env.QWEN_API_KEY,
168
172
  QWEN_API_KEY: process.env.QWEN_API_KEY,
173
+ QWEN_MODEL_LIST: process.env.QWEN_MODEL_LIST,
169
174
 
170
175
  ENABLED_STEPFUN: !!process.env.STEPFUN_API_KEY,
171
176
  STEPFUN_API_KEY: process.env.STEPFUN_API_KEY,
172
177
 
173
178
  ENABLED_NOVITA: !!process.env.NOVITA_API_KEY,
174
179
  NOVITA_API_KEY: process.env.NOVITA_API_KEY,
180
+ NOVITA_MODEL_LIST: process.env.NOVITA_MODEL_LIST,
175
181
 
176
182
  ENABLED_BAICHUAN: !!process.env.BAICHUAN_API_KEY,
177
183
  BAICHUAN_API_KEY: process.env.BAICHUAN_API_KEY,
@@ -10,7 +10,7 @@ const Anthropic: ModelProviderCard = {
10
10
  enabled: true,
11
11
  functionCall: true,
12
12
  id: 'claude-3-5-sonnet-20240620',
13
- maxOutput: 4096,
13
+ maxOutput: 8192,
14
14
  tokens: 200_000,
15
15
  vision: true,
16
16
  },
@@ -1,30 +1,47 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref: https://novita.ai/model-api/product/llm-api
3
4
  const Novita: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
6
- displayName: 'Llama3 8B Instruct',
7
+ displayName: 'Llama3.1 8B Instruct',
8
+ enabled: true,
9
+ id: 'meta-llama/llama-3.1-8b-instruct',
10
+ tokens: 8192,
11
+ },
12
+ {
13
+ displayName: 'Llama3.1 70B Instruct',
7
14
  enabled: true,
15
+ id: 'meta-llama/llama-3.1-70b-instruct',
16
+ tokens: 131_072,
17
+ },
18
+ {
19
+ displayName: 'Llama3.1 405B Instruct',
20
+ enabled: true,
21
+ id: 'meta-llama/llama-3.1-405b-instruct',
22
+ tokens: 32_768,
23
+ },
24
+ {
25
+ displayName: 'Llama3 8B Instruct',
8
26
  id: 'meta-llama/llama-3-8b-instruct',
9
27
  tokens: 8192,
10
28
  },
11
29
  {
12
30
  displayName: 'Llama3 70B Instruct',
13
- enabled: true,
14
31
  id: 'meta-llama/llama-3-70b-instruct',
15
32
  tokens: 8192,
16
33
  },
17
34
  {
18
- displayName: 'Nous Hermes 2 Pro - Llama3 8B',
35
+ displayName: 'Gemma 2 9B',
19
36
  enabled: true,
20
- id: 'nousresearch/hermes-2-pro-llama-3-8b',
37
+ id: 'google/gemma-2-9b-it',
21
38
  tokens: 8192,
22
39
  },
23
40
  {
24
- displayName: 'Nous Hermes - Llama2 8B',
41
+ displayName: 'Mistral Nemo',
25
42
  enabled: true,
26
- id: 'nousresearch/nous-hermes-llama2-13b',
27
- tokens: 4096,
43
+ id: 'mistralai/mistral-nemo',
44
+ tokens: 32_768,
28
45
  },
29
46
  {
30
47
  displayName: 'Mistral 7B Instruct',
@@ -33,55 +50,44 @@ const Novita: ModelProviderCard = {
33
50
  tokens: 32_768,
34
51
  },
35
52
  {
36
- displayName: 'Dolphin Mixtral 8x22B',
53
+ displayName: 'WizardLM 2 7B',
37
54
  enabled: true,
38
- id: 'cognitivecomputations/dolphin-mixtral-8x22b',
39
- tokens: 16_000,
55
+ id: 'microsoft/wizardlm 2-7b',
56
+ tokens: 32_768,
40
57
  },
41
58
  {
42
- displayName: 'L3-70b-Euryale-v2.1',
59
+ displayName: 'WizardLM-2 8x22B',
43
60
  enabled: true,
44
- id: 'sao10k/l3-70b-euryale-v2.1',
45
- tokens: 16_000,
61
+ id: 'microsoft/wizardlm-2-8x22b',
62
+ tokens: 65_535,
46
63
  },
47
64
  {
48
- displayName: 'Midnight Rose 70B',
49
- enabled: true,
50
- id: 'sophosympatheia/midnight-rose-70b',
51
- tokens: 4096,
65
+ displayName: 'Dolphin Mixtral 8x22B',
66
+ id: 'cognitivecomputations/dolphin-mixtral-8x22b',
67
+ tokens: 16_000,
52
68
  },
53
69
  {
54
- displayName: 'Mythomax L2 13b',
55
- enabled: true,
56
- id: 'gryphe/mythomax-l2-13b',
57
- tokens: 4096,
70
+ displayName: 'Hermes 2 Pro Llama 3 8B',
71
+ id: 'nousresearch/hermes-2-pro-llama-3-8b',
72
+ tokens: 8192,
58
73
  },
59
74
  {
60
- displayName: 'Nous Hermes 2 - Mixtral 8x7B-DPO',
61
- enabled: true,
75
+ displayName: 'Hermes 2 Mixtral 8x7B DPO',
62
76
  id: 'Nous-Hermes-2-Mixtral-8x7B-DPO',
63
77
  tokens: 32_768,
64
78
  },
65
79
  {
66
- displayName: 'Lzlv 70b',
67
- enabled: true,
68
- id: 'lzlv_70b',
80
+ displayName: 'MythoMax l2 13B',
81
+ id: 'gryphe/mythomax-l2-13b',
69
82
  tokens: 4096,
70
83
  },
71
84
  {
72
- displayName: 'Open Hermes 2.5 Mistral 7B',
73
- enabled: true,
74
- id: 'teknium/openhermes-2.5-mistral-7b',
85
+ displayName: 'OpenChat 7B',
86
+ id: 'openchat/openchat-7b',
75
87
  tokens: 4096,
76
88
  },
77
- {
78
- displayName: 'Wizardlm2 8x22B',
79
- enabled: true,
80
- id: 'microsoft/wizardlm-2-8x22b',
81
- tokens: 65_535,
82
- },
83
89
  ],
84
- checkModel: 'meta-llama/llama-3-70b-instruct',
90
+ checkModel: 'meta-llama/llama-3.1-8b-instruct',
85
91
  disableBrowserRequest: true,
86
92
  id: 'novita',
87
93
  modelList: { showModelFetcher: true },
@@ -16,7 +16,7 @@ const Qwen: ModelProviderCard = {
16
16
  enabled: true,
17
17
  functionCall: true,
18
18
  id: 'qwen-turbo',
19
- tokens: 8192,
19
+ tokens: 8000, // https://www.alibabacloud.com/help/zh/model-studio/developer-reference/use-qwen-by-calling-api
20
20
  },
21
21
  {
22
22
  description: '通义千问超大规模语言模型增强版,支持中文、英文等不同语言输入',
@@ -24,7 +24,7 @@ const Qwen: ModelProviderCard = {
24
24
  enabled: true,
25
25
  functionCall: true,
26
26
  id: 'qwen-plus',
27
- tokens: 130_000,
27
+ tokens: 131_072, // https://help.aliyun.com/zh/dashscope/developer-reference/model-introduction
28
28
  },
29
29
  {
30
30
  description:
@@ -33,7 +33,7 @@ const Qwen: ModelProviderCard = {
33
33
  enabled: true,
34
34
  functionCall: true,
35
35
  id: 'qwen-max',
36
- tokens: 8192,
36
+ tokens: 8000,
37
37
  },
38
38
  {
39
39
  description:
@@ -41,7 +41,7 @@ const Qwen: ModelProviderCard = {
41
41
  displayName: 'Qwen Max LongContext',
42
42
  functionCall: true,
43
43
  id: 'qwen-max-longcontext',
44
- tokens: 30_720,
44
+ tokens: 30_000,
45
45
  },
46
46
  {
47
47
  description:
@@ -70,36 +70,37 @@ const Qwen: ModelProviderCard = {
70
70
  tokens: 32_768,
71
71
  vision: true,
72
72
  },
73
+ // ref https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-qianwen-7b-14b-72b-api-detailes
73
74
  {
74
75
  description: '通义千问2对外开源的7B规模的模型',
75
76
  displayName: 'Qwen2 7B',
76
77
  id: 'qwen2-7b-instruct',
77
- tokens: 128_000,
78
+ tokens: 131_072, // https://huggingface.co/Qwen/Qwen2-7B-Instruct
78
79
  },
79
80
  {
80
81
  description: '通义千问2对外开源的57B规模14B激活参数的MOE模型',
81
82
  displayName: 'Qwen2 57B-A14B MoE',
82
83
  id: 'qwen2-57b-a14b-instruct',
83
- tokens: 32_768,
84
+ tokens: 65_536, // https://huggingface.co/Qwen/Qwen2-57B-A14B-Instruct
84
85
  },
85
86
  {
86
87
  description: '通义千问2对外开源的72B规模的模型',
87
88
  displayName: 'Qwen2 72B',
88
89
  id: 'qwen2-72b-instruct',
89
- tokens: 128_000,
90
+ tokens: 131_072, // https://huggingface.co/Qwen/Qwen2-72B-Instruct
90
91
  },
91
92
  {
92
93
  description: 'Qwen2-Math 模型具有强大的数学解题能力',
93
94
  displayName: 'Qwen2 Math 72B',
94
95
  id: 'qwen2-math-72b-instruct',
95
- tokens: 128_000,
96
+ tokens: 4096, // https://help.aliyun.com/zh/dashscope/developer-reference/use-qwen2-math-by-calling-api
96
97
  },
97
98
  {
98
99
  description:
99
100
  '以 Qwen-7B 语言模型初始化,添加图像模型,图像输入分辨率为448的预训练模型。',
100
101
  displayName: 'Qwen VL',
101
102
  id: 'qwen-vl-v1',
102
- tokens: 8192,
103
+ tokens: 8192, // https://huggingface.co/Qwen/Qwen-VL/blob/main/config.json
103
104
  vision: true,
104
105
  },
105
106
  {
@@ -107,12 +108,12 @@ const Qwen: ModelProviderCard = {
107
108
  '通义千问VL支持灵活的交互方式,包括多图、多轮问答、创作等能力的模型。',
108
109
  displayName: 'Qwen VL Chat',
109
110
  id: 'qwen-vl-chat-v1',
110
- tokens: 8192,
111
+ tokens: 8192, // https://huggingface.co/Qwen/Qwen-VL-Chat/blob/main/config.json
111
112
  vision: true,
112
113
  },
113
114
  ],
114
115
  checkModel: 'qwen-turbo',
115
- disableBrowserRequest: true,
116
+ disableBrowserRequest: true, // CORS issue
116
117
  id: 'qwen',
117
118
  modelList: { showModelFetcher: true },
118
119
  name: 'Qwen',
@@ -1,11 +1,12 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
3
  // ref https://platform.stepfun.com/docs/llm/text
4
+ // 根据文档,阶级星辰大模型的上下文长度,其 k 的含义为 1000
4
5
  const Stepfun: ModelProviderCard = {
5
6
  chatModels: [
6
7
  {
7
8
  id: 'step-2-16k-nightly',
8
- tokens: 16_384,
9
+ tokens: 16_000,
9
10
  },
10
11
  {
11
12
  id: 'step-1-256k',
@@ -19,27 +20,31 @@ const Stepfun: ModelProviderCard = {
19
20
  {
20
21
  enabled: true,
21
22
  id: 'step-1-32k',
22
- tokens: 32_768,
23
+ tokens: 32_000,
23
24
  },
24
25
  {
25
- enabled: true,
26
26
  id: 'step-1-8k',
27
- tokens: 8192,
27
+ tokens: 8000,
28
+ },
29
+ {
30
+ enabled: true,
31
+ id: 'step-1-flash',
32
+ tokens: 8000,
28
33
  },
29
34
  {
30
35
  enabled: true,
31
36
  id: 'step-1v-32k',
32
- tokens: 32_768,
37
+ tokens: 32_000,
33
38
  vision: true,
34
39
  },
35
40
  {
36
41
  enabled: true,
37
42
  id: 'step-1v-8k',
38
- tokens: 8192,
43
+ tokens: 8000,
39
44
  vision: true,
40
45
  },
41
46
  ],
42
- checkModel: 'step-1-8k',
47
+ checkModel: 'step-1-flash',
43
48
  // after test, currently https://api.stepfun.com/v1/chat/completions has the CORS issue
44
49
  // So we should close the browser request mode
45
50
  disableBrowserRequest: true,
@@ -4,11 +4,14 @@ import { fileEnv } from '@/config/file';
4
4
  import { langfuseEnv } from '@/config/langfuse';
5
5
  import { getLLMConfig } from '@/config/llm';
6
6
  import {
7
+ NovitaProviderCard,
7
8
  OllamaProviderCard,
8
9
  OpenAIProviderCard,
9
10
  OpenRouterProviderCard,
11
+ QwenProviderCard,
10
12
  SiliconCloudProviderCard,
11
13
  TogetherAIProviderCard,
14
+ ZeroOneProviderCard,
12
15
  ZhiPuProviderCard,
13
16
  } from '@/config/modelProviders';
14
17
  import { enableNextAuth } from '@/const/auth';
@@ -37,8 +40,13 @@ export const getServerGlobalConfig = () => {
37
40
  ENABLED_ANTHROPIC,
38
41
  ENABLED_MINIMAX,
39
42
  ENABLED_MISTRAL,
43
+
40
44
  ENABLED_NOVITA,
45
+ NOVITA_MODEL_LIST,
46
+
41
47
  ENABLED_QWEN,
48
+ QWEN_MODEL_LIST,
49
+
42
50
  ENABLED_STEPFUN,
43
51
  ENABLED_BAICHUAN,
44
52
  ENABLED_TAICHU,
@@ -60,6 +68,8 @@ export const getServerGlobalConfig = () => {
60
68
  OPENROUTER_MODEL_LIST,
61
69
 
62
70
  ENABLED_ZEROONE,
71
+ ZEROONE_MODEL_LIST,
72
+
63
73
  ENABLED_TOGETHERAI,
64
74
  TOGETHERAI_MODEL_LIST,
65
75
  } = getLLMConfig();
@@ -93,7 +103,14 @@ export const getServerGlobalConfig = () => {
93
103
  minimax: { enabled: ENABLED_MINIMAX },
94
104
  mistral: { enabled: ENABLED_MISTRAL },
95
105
  moonshot: { enabled: ENABLED_MOONSHOT },
96
- novita: { enabled: ENABLED_NOVITA },
106
+ novita: {
107
+ enabled: ENABLED_NOVITA,
108
+ enabledModels: extractEnabledModels(NOVITA_MODEL_LIST),
109
+ serverModelCards: transformToChatModelCards({
110
+ defaultChatModels: NovitaProviderCard.chatModels,
111
+ modelString: NOVITA_MODEL_LIST,
112
+ }),
113
+ },
97
114
  ollama: {
98
115
  enabled: ENABLED_OLLAMA,
99
116
  fetchOnClient: !OLLAMA_PROXY_URL,
@@ -120,7 +137,14 @@ export const getServerGlobalConfig = () => {
120
137
  }),
121
138
  },
122
139
  perplexity: { enabled: ENABLED_PERPLEXITY },
123
- qwen: { enabled: ENABLED_QWEN },
140
+ qwen: {
141
+ enabled: ENABLED_QWEN,
142
+ enabledModels: extractEnabledModels(QWEN_MODEL_LIST),
143
+ serverModelCards: transformToChatModelCards({
144
+ defaultChatModels: QwenProviderCard.chatModels,
145
+ modelString: QWEN_MODEL_LIST,
146
+ }),
147
+ },
124
148
  siliconcloud: {
125
149
  enabled: ENABLED_SILICONCLOUD,
126
150
  enabledModels: extractEnabledModels(SILICONCLOUD_MODEL_LIST),
@@ -141,7 +165,14 @@ export const getServerGlobalConfig = () => {
141
165
  }),
142
166
  },
143
167
  upstage: { enabled: ENABLED_UPSTAGE },
144
- zeroone: { enabled: ENABLED_ZEROONE },
168
+ zeroone: {
169
+ enabled: ENABLED_ZEROONE,
170
+ enabledModels: extractEnabledModels(ZEROONE_MODEL_LIST),
171
+ serverModelCards: transformToChatModelCards({
172
+ defaultChatModels: ZeroOneProviderCard.chatModels,
173
+ modelString: ZEROONE_MODEL_LIST,
174
+ }),
175
+ },
145
176
  zhipu: {
146
177
  enabled: ENABLED_ZHIPU,
147
178
  enabledModels: extractEnabledModels(ZHIPU_MODEL_LIST),