@lobehub/chat 1.31.6 → 1.31.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/Dockerfile +3 -1
- package/Dockerfile.database +3 -1
- package/README.md +2 -2
- package/README.zh-CN.md +2 -2
- package/docs/usage/providers/cloudflare.mdx +82 -0
- package/docs/usage/providers/cloudflare.zh-CN.mdx +79 -0
- package/package.json +1 -1
- package/src/app/(main)/chat/(workspace)/@conversation/default.tsx +1 -3
- package/src/app/(main)/chat/(workspace)/@conversation/features/ChatInput/index.tsx +12 -0
- package/src/config/llm.ts +0 -66
- package/src/server/globalConfig/genServerLLMConfig.test.ts +5 -1
- package/src/server/globalConfig/genServerLLMConfig.ts +3 -2
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/FilePreview/FileItem/Content.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/FilePreview/FileItem/index.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/FilePreview/FileItem/style.ts +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/FilePreview/FileItem/utils.ts +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/FilePreview/FileList.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/FilePreview/index.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/Footer/SendMore.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/Footer/index.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/Header/index.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/TextArea.test.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/TextArea.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/__tests__/useAutoFocus.test.ts +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/index.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Desktop/useAutoFocus.ts +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Mobile/Files/FileItem/File.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Mobile/Files/FileItem/Image.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Mobile/Files/FileItem/index.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Mobile/Files/FileItem/style.ts +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Mobile/Files/index.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Mobile/InputArea/Container.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Mobile/InputArea/index.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Mobile/Send.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/Mobile/index.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/components/UploadDetail/UploadStatus.tsx +0 -0
- /package/src/{app/(main)/chat/(workspace)/@conversation/features → features}/ChatInput/components/UploadDetail/index.tsx +0 -0
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.31.8](https://github.com/lobehub/lobe-chat/compare/v1.31.7...v1.31.8)
|
6
|
+
|
7
|
+
<sup>Released on **2024-11-15**</sup>
|
8
|
+
|
9
|
+
#### ♻ Code Refactoring
|
10
|
+
|
11
|
+
- **misc**: Move ChatInput to features/ChatInput.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Code refactoring
|
19
|
+
|
20
|
+
- **misc**: Move ChatInput to features/ChatInput, closes [#4699](https://github.com/lobehub/lobe-chat/issues/4699) ([5737d3f](https://github.com/lobehub/lobe-chat/commit/5737d3f))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.31.7](https://github.com/lobehub/lobe-chat/compare/v1.31.6...v1.31.7)
|
31
|
+
|
32
|
+
<sup>Released on **2024-11-15**</sup>
|
33
|
+
|
34
|
+
#### ♻ Code Refactoring
|
35
|
+
|
36
|
+
- **misc**: `genServerLLMConfig` function, get \*\_MODEL_LIST from env.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Code refactoring
|
44
|
+
|
45
|
+
- **misc**: `genServerLLMConfig` function, get \*\_MODEL_LIST from env, closes [#4694](https://github.com/lobehub/lobe-chat/issues/4694) ([d84f369](https://github.com/lobehub/lobe-chat/commit/d84f369))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.31.6](https://github.com/lobehub/lobe-chat/compare/v1.31.5...v1.31.6)
|
6
56
|
|
7
57
|
<sup>Released on **2024-11-13**</sup>
|
package/Dockerfile
CHANGED
@@ -152,6 +152,8 @@ ENV \
|
|
152
152
|
AZURE_API_KEY="" AZURE_API_VERSION="" AZURE_ENDPOINT="" AZURE_MODEL_LIST="" \
|
153
153
|
# Baichuan
|
154
154
|
BAICHUAN_API_KEY="" BAICHUAN_MODEL_LIST="" \
|
155
|
+
# Cloudflare
|
156
|
+
CLOUDFLARE_API_KEY="" CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID="" CLOUDFLARE_MODEL_LIST="" \
|
155
157
|
# DeepSeek
|
156
158
|
DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
|
157
159
|
# Fireworks AI
|
@@ -175,7 +177,7 @@ ENV \
|
|
175
177
|
# Novita
|
176
178
|
NOVITA_API_KEY="" NOVITA_MODEL_LIST="" \
|
177
179
|
# Ollama
|
178
|
-
OLLAMA_MODEL_LIST="" OLLAMA_PROXY_URL="" \
|
180
|
+
ENABLED_OLLAMA="" OLLAMA_MODEL_LIST="" OLLAMA_PROXY_URL="" \
|
179
181
|
# OpenAI
|
180
182
|
OPENAI_API_KEY="" OPENAI_MODEL_LIST="" OPENAI_PROXY_URL="" \
|
181
183
|
# OpenRouter
|
package/Dockerfile.database
CHANGED
@@ -187,6 +187,8 @@ ENV \
|
|
187
187
|
AZURE_API_KEY="" AZURE_API_VERSION="" AZURE_ENDPOINT="" AZURE_MODEL_LIST="" \
|
188
188
|
# Baichuan
|
189
189
|
BAICHUAN_API_KEY="" BAICHUAN_MODEL_LIST="" \
|
190
|
+
# Cloudflare
|
191
|
+
CLOUDFLARE_API_KEY="" CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID="" CLOUDFLARE_MODEL_LIST="" \
|
190
192
|
# DeepSeek
|
191
193
|
DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
|
192
194
|
# Fireworks AI
|
@@ -210,7 +212,7 @@ ENV \
|
|
210
212
|
# Novita
|
211
213
|
NOVITA_API_KEY="" NOVITA_MODEL_LIST="" \
|
212
214
|
# Ollama
|
213
|
-
OLLAMA_MODEL_LIST="" OLLAMA_PROXY_URL="" \
|
215
|
+
ENABLED_OLLAMA="" OLLAMA_MODEL_LIST="" OLLAMA_PROXY_URL="" \
|
214
216
|
# OpenAI
|
215
217
|
OPENAI_API_KEY="" OPENAI_MODEL_LIST="" OPENAI_PROXY_URL="" \
|
216
218
|
# OpenRouter
|
package/README.md
CHANGED
@@ -287,12 +287,12 @@ Our marketplace is not just a showcase platform but also a collaborative space.
|
|
287
287
|
|
288
288
|
| Recent Submits | Description |
|
289
289
|
| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
290
|
+
| [Thinking Claude](https://chat-preview.lobehub.com/market?agent=thinking-claude)<br/><sup>By **[AnoyiX](https://github.com/AnoyiX)** on **2024-11-14**</sup> | Let Claude think comprehensively before responding!<br/>`common` |
|
290
291
|
| [Machine Vision LaTeX](https://chat-preview.lobehub.com/market?agent=cv-latex)<br/><sup>By **[5xiao0qing5](https://github.com/5xiao0qing5)** on **2024-10-29**</sup> | Specializes in explaining concepts of machine learning and deep learning<br/>`machine-learning` `deep-learning` `image-processing` `computer-vision` `la-te-x` |
|
291
292
|
| [Domain Analysis Master](https://chat-preview.lobehub.com/market?agent=domain)<br/><sup>By **[ccbikai](https://github.com/ccbikai)** on **2024-10-29**</sup> | Expert in domain analysis and humorous suggestions<br/>`domain-analysis` `humor` `culture` `website-suggestions` `purchase-advice` |
|
292
293
|
| [Ingo Hausmann](https://chat-preview.lobehub.com/market?agent=pc-beschaffung-ingo-hausmann)<br/><sup>By **[bionicprompter](https://github.com/bionicprompter)** on **2024-10-29**</sup> | Ingo Hausmann wants to be advised on a new purchase of PCs<br/>`company` `hardware` `needs-assessment` `it` `applications` |
|
293
|
-
| [Print to Table](https://chat-preview.lobehub.com/market?agent=print-to-table)<br/><sup>By **[printtotable](https://github.com/printtotable)** on **2024-10-29**</sup> | Transform data from images into organized tables in Excel.<br/>`data-extraction` `tables` `advertising` `influencer` `excel` |
|
294
294
|
|
295
|
-
> 📊 Total agents: [<kbd>**
|
295
|
+
> 📊 Total agents: [<kbd>**431**</kbd> ](https://github.com/lobehub/lobe-chat-agents)
|
296
296
|
|
297
297
|
<!-- AGENT LIST -->
|
298
298
|
|
package/README.zh-CN.md
CHANGED
@@ -276,12 +276,12 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地
|
|
276
276
|
|
277
277
|
| 最近新增 | 助手说明 |
|
278
278
|
| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ |
|
279
|
+
| [思考克劳德](https://chat-preview.lobehub.com/market?agent=thinking-claude)<br/><sup>By **[AnoyiX](https://github.com/AnoyiX)** on **2024-11-14**</sup> | 让克劳德在回应之前全面思考!<br/>`常见` |
|
279
280
|
| [机器视觉 latex](https://chat-preview.lobehub.com/market?agent=cv-latex)<br/><sup>By **[5xiao0qing5](https://github.com/5xiao0qing5)** on **2024-10-29**</sup> | 擅长机器学习与深度学习概念解析<br/>`机器学习` `深度学习` `图像处理` `计算机视觉` `la-te-x` |
|
280
281
|
| [域名分析大师](https://chat-preview.lobehub.com/market?agent=domain)<br/><sup>By **[ccbikai](https://github.com/ccbikai)** on **2024-10-29**</sup> | 擅长域名分析与幽默建议<br/>`域名分析` `幽默` `文化` `建站建议` `购买建议` |
|
281
282
|
| [英戈・豪斯曼](https://chat-preview.lobehub.com/market?agent=pc-beschaffung-ingo-hausmann)<br/><sup>By **[bionicprompter](https://github.com/bionicprompter)** on **2024-10-29**</sup> | 英戈・豪斯曼希望就新购置的电脑进行咨询<br/>`公司` `硬件` `需求分析` `it` `应用` |
|
282
|
-
| [打印到表格](https://chat-preview.lobehub.com/market?agent=print-to-table)<br/><sup>By **[printtotable](https://github.com/printtotable)** on **2024-10-29**</sup> | 将图像数据转换为 Excel 中的组织表格。<br/>`数据提取` `表格` `广告` `影响者` `excel` |
|
283
283
|
|
284
|
-
> 📊 Total agents: [<kbd>**
|
284
|
+
> 📊 Total agents: [<kbd>**431**</kbd> ](https://github.com/lobehub/lobe-chat-agents)
|
285
285
|
|
286
286
|
<!-- AGENT LIST -->
|
287
287
|
|
@@ -0,0 +1,82 @@
|
|
1
|
+
---
|
2
|
+
title: Using Cloudflare Workers AI in LobeChat
|
3
|
+
description: Learn how to integrate and utilize Cloudflare Workers AI Models in LobeChat.
|
4
|
+
tags:
|
5
|
+
- LobeChat
|
6
|
+
- Cloudflare
|
7
|
+
- Workers AI
|
8
|
+
- Provider
|
9
|
+
- API Key
|
10
|
+
- Web UI
|
11
|
+
---
|
12
|
+
|
13
|
+
# Using Cloudflare Workers AI in LobeChat
|
14
|
+
|
15
|
+
<Image
|
16
|
+
cover
|
17
|
+
src={'https://github.com/user-attachments/assets/91fe32a8-e5f0-47ff-b8ae-d036c8a7bff1'}
|
18
|
+
/>
|
19
|
+
|
20
|
+
[Cloudflare Workers AI](https://www.cloudflare.com/developer-platform/products/workers-ai/) is a service that integrates AI capabilities into the Cloudflare Workers serverless computing platform. Its core functionality lies in delivering fast, scalable computing power through Cloudflare's global network, thereby reducing operational overhead.
|
21
|
+
|
22
|
+
This document will guide you on how to use Cloudflare Workers AI in LobeChat:
|
23
|
+
|
24
|
+
<Steps>
|
25
|
+
|
26
|
+
### Step 1: Obtain Your Cloudflare Workers AI API Key
|
27
|
+
|
28
|
+
- Visit the [Cloudflare website](https://www.cloudflare.com/) and sign up for an account.
|
29
|
+
- Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/).
|
30
|
+
- In the left-hand menu, locate the `AI` > `Workers AI` option.
|
31
|
+
|
32
|
+
<Image
|
33
|
+
alt={'Cloudflare Workers AI'}
|
34
|
+
inStep
|
35
|
+
src={'https://github.com/user-attachments/assets/4257e123-9018-4562-ac66-0f39278906f5'}
|
36
|
+
/>
|
37
|
+
|
38
|
+
- In the `Using REST API` section, click the `Create Workers AI API Token` button.
|
39
|
+
- In the drawer dialog, copy and save your `API token`.
|
40
|
+
- Also, copy and save your `Account ID`.
|
41
|
+
|
42
|
+
<Image
|
43
|
+
alt={'Cloudflare Workers AI API Token'}
|
44
|
+
inStep
|
45
|
+
src={'https://github.com/user-attachments/assets/f54c912d-3ee9-4f85-b8bf-619790e51b49'}
|
46
|
+
/>
|
47
|
+
|
48
|
+
<Callout type={"warning"}>
|
49
|
+
|
50
|
+
- Please store your API token securely, as it will only be displayed once. If you accidentally lose it, you will need to create a new token.
|
51
|
+
|
52
|
+
</Callout>
|
53
|
+
|
54
|
+
### Step 2: Configure Cloudflare Workers AI in LobeChat
|
55
|
+
|
56
|
+
- Go to the `Settings` interface in LobeChat.
|
57
|
+
- Under `Language Model`, find the `Cloudflare` settings.
|
58
|
+
|
59
|
+
<Image
|
60
|
+
alt={'Input API Token'}
|
61
|
+
inStep
|
62
|
+
src={'https://github.com/user-attachments/assets/82a7ebe0-69ad-43b6-8767-1316b443fa03'}
|
63
|
+
/>
|
64
|
+
|
65
|
+
- Enter the `API Token` you obtained.
|
66
|
+
- Input your `Account ID`.
|
67
|
+
- Choose a Cloudflare Workers AI model for your AI assistant to start the conversation.
|
68
|
+
|
69
|
+
<Image
|
70
|
+
alt={'Choose Cloudflare Workers AI Model and Start Conversation'}
|
71
|
+
inStep
|
72
|
+
src={'https://github.com/user-attachments/assets/09be499c-3b04-4dd6-a161-6e8ebe788354'}
|
73
|
+
/>
|
74
|
+
|
75
|
+
<Callout type={'warning'}>
|
76
|
+
You may incur charges while using the API service, please refer to Cloudflare's pricing policy for
|
77
|
+
details.
|
78
|
+
</Callout>
|
79
|
+
|
80
|
+
</Steps>
|
81
|
+
|
82
|
+
At this point, you can start conversing with the model provided by Cloudflare Workers AI in LobeChat.
|
@@ -0,0 +1,79 @@
|
|
1
|
+
---
|
2
|
+
title: 在 LobeChat 中使用 Cloudflare Workers AI
|
3
|
+
description: 学习如何在 LobeChat 中配置和使用 Cloudflare Workers AI 的 API Key,以便开始对话和交互。
|
4
|
+
tags:
|
5
|
+
- LobeChat
|
6
|
+
- Cloudflare
|
7
|
+
- Workers AI
|
8
|
+
- 供应商
|
9
|
+
- API密钥
|
10
|
+
- Web UI
|
11
|
+
---
|
12
|
+
|
13
|
+
# 在 LobeChat 中使用 Cloudflare Workers AI
|
14
|
+
|
15
|
+
<Image
|
16
|
+
cover
|
17
|
+
src={'https://github.com/user-attachments/assets/91fe32a8-e5f0-47ff-b8ae-d036c8a7bff1'}
|
18
|
+
/>
|
19
|
+
|
20
|
+
[Cloudflare Workers AI](https://www.cloudflare.com/developer-platform/products/workers-ai/) 是一种将人工智能能力集成到 Cloudflare Workers 无服务器计算平台的服务。其核心功能在于通过 Cloudflare 的全球网络提供快速、可扩展的计算能力,降低运维开销。
|
21
|
+
|
22
|
+
本文档将指导你如何在 LobeChat 中使用 Cloudflare Workers AI:
|
23
|
+
|
24
|
+
<Steps>
|
25
|
+
|
26
|
+
### 步骤一: 获取 Cloudflare Workers AI 的 API Key
|
27
|
+
|
28
|
+
- 访问 [Cloudflare 官网](https://www.cloudflare.com/) 并注册一个账号。
|
29
|
+
- 登录 [Cloudflare 控制台](https://dash.cloudflare.com/).
|
30
|
+
- 在左侧的菜单中找到 `AI` > `Workers AI` 选项。
|
31
|
+
|
32
|
+
<Image
|
33
|
+
alt={'Cloudflare Workers AI'}
|
34
|
+
inStep
|
35
|
+
src={'https://github.com/user-attachments/assets/4257e123-9018-4562-ac66-0f39278906f5'}
|
36
|
+
/>
|
37
|
+
|
38
|
+
- 在 `使用 REST API` 中点击 `创建 Workers AI API 令牌` 按钮
|
39
|
+
- 在弹出的侧边栏中复制并保存你的 `API 令牌`
|
40
|
+
- 同时也复制并保存你的 `账户ID`
|
41
|
+
|
42
|
+
<Image
|
43
|
+
alt={'Cloudflare Workers AI API Token'}
|
44
|
+
inStep
|
45
|
+
src={'https://github.com/user-attachments/assets/f54c912d-3ee9-4f85-b8bf-619790e51b49'}
|
46
|
+
/>
|
47
|
+
|
48
|
+
<Callout type={'warning'}>
|
49
|
+
- 请安全地存储 API 令牌,因为它只会出现一次。如果您意外丢失它,您将需要创建一个新令牌。
|
50
|
+
</Callout>
|
51
|
+
|
52
|
+
### 步骤二: 在 LobeChat 中配置 Cloudflare Workers AI
|
53
|
+
|
54
|
+
- 访问 LobeChat 的`设置`界面
|
55
|
+
- 在`语言模型`下找到 `Cloudflare` 的设置项
|
56
|
+
|
57
|
+
<Image
|
58
|
+
alt={'填入访问令牌'}
|
59
|
+
inStep
|
60
|
+
src={'https://github.com/user-attachments/assets/82a7ebe0-69ad-43b6-8767-1316b443fa03'}
|
61
|
+
/>
|
62
|
+
|
63
|
+
- 填入获得的 `API 令牌`
|
64
|
+
- 填入你的`账户ID`
|
65
|
+
- 为你的 AI 助手选择一个 Cloudflare Workers AI 的模型即可开始对话
|
66
|
+
|
67
|
+
<Image
|
68
|
+
alt={'选择 Cloudflare Workers AI 模型并开始对话'}
|
69
|
+
inStep
|
70
|
+
src={'https://github.com/user-attachments/assets/09be499c-3b04-4dd6-a161-6e8ebe788354'}
|
71
|
+
/>
|
72
|
+
|
73
|
+
<Callout type={'warning'}>
|
74
|
+
在使用过程中你可能需要向 API 服务提供商付费,请参考 Cloudflare 的相关费用政策。
|
75
|
+
</Callout>
|
76
|
+
|
77
|
+
</Steps>
|
78
|
+
|
79
|
+
至此你已经可以在 LobeChat 中使用 Cloudflare Workers AI 提供的模型进行对话了。
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.31.
|
3
|
+
"version": "1.31.8",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -1,14 +1,12 @@
|
|
1
1
|
import { isMobileDevice } from '@/utils/server/responsive';
|
2
2
|
|
3
3
|
import ChatHydration from './features/ChatHydration';
|
4
|
-
import
|
5
|
-
import MobileChatInput from './features/ChatInput/Mobile';
|
4
|
+
import ChatInput from './features/ChatInput';
|
6
5
|
import ChatList from './features/ChatList';
|
7
6
|
import ZenModeToast from './features/ZenModeToast';
|
8
7
|
|
9
8
|
const ChatConversation = () => {
|
10
9
|
const mobile = isMobileDevice();
|
11
|
-
const ChatInput = mobile ? MobileChatInput : DesktopChatInput;
|
12
10
|
|
13
11
|
return (
|
14
12
|
<>
|
@@ -0,0 +1,12 @@
|
|
1
|
+
import DesktopChatInput from '@/features/ChatInput/Desktop';
|
2
|
+
import MobileChatInput from '@/features/ChatInput/Mobile';
|
3
|
+
import { isMobileDevice } from '@/utils/server/responsive';
|
4
|
+
|
5
|
+
const ChatInput = () => {
|
6
|
+
const mobile = isMobileDevice();
|
7
|
+
const Input = mobile ? MobileChatInput : DesktopChatInput;
|
8
|
+
|
9
|
+
return <Input />;
|
10
|
+
};
|
11
|
+
|
12
|
+
export default ChatInput;
|
package/src/config/llm.ts
CHANGED
@@ -10,77 +10,60 @@ export const getLLMConfig = () => {
|
|
10
10
|
ENABLED_OPENAI: z.boolean(),
|
11
11
|
OPENAI_API_KEY: z.string().optional(),
|
12
12
|
OPENAI_PROXY_URL: z.string().optional(),
|
13
|
-
OPENAI_MODEL_LIST: z.string().optional(),
|
14
13
|
|
15
14
|
ENABLED_AZURE_OPENAI: z.boolean(),
|
16
15
|
AZURE_API_KEY: z.string().optional(),
|
17
16
|
AZURE_API_VERSION: z.string().optional(),
|
18
17
|
AZURE_ENDPOINT: z.string().optional(),
|
19
|
-
AZURE_MODEL_LIST: z.string().optional(),
|
20
18
|
|
21
19
|
ENABLED_ZHIPU: z.boolean(),
|
22
20
|
ZHIPU_API_KEY: z.string().optional(),
|
23
|
-
ZHIPU_MODEL_LIST: z.string().optional(),
|
24
21
|
|
25
22
|
ENABLED_DEEPSEEK: z.boolean(),
|
26
23
|
DEEPSEEK_API_KEY: z.string().optional(),
|
27
|
-
DEEPSEEK_MODEL_LIST: z.string().optional(),
|
28
24
|
|
29
25
|
ENABLED_GOOGLE: z.boolean(),
|
30
26
|
GOOGLE_API_KEY: z.string().optional(),
|
31
27
|
GOOGLE_PROXY_URL: z.string().optional(),
|
32
|
-
GOOGLE_MODEL_LIST: z.string().optional(),
|
33
28
|
|
34
29
|
ENABLED_MOONSHOT: z.boolean(),
|
35
30
|
MOONSHOT_API_KEY: z.string().optional(),
|
36
|
-
MOONSHOT_MODEL_LIST: z.string().optional(),
|
37
31
|
MOONSHOT_PROXY_URL: z.string().optional(),
|
38
32
|
|
39
33
|
ENABLED_PERPLEXITY: z.boolean(),
|
40
34
|
PERPLEXITY_API_KEY: z.string().optional(),
|
41
|
-
PERPLEXITY_MODEL_LIST: z.string().optional(),
|
42
35
|
PERPLEXITY_PROXY_URL: z.string().optional(),
|
43
36
|
|
44
37
|
ENABLED_ANTHROPIC: z.boolean(),
|
45
38
|
ANTHROPIC_API_KEY: z.string().optional(),
|
46
|
-
ANTHROPIC_MODEL_LIST: z.string().optional(),
|
47
39
|
ANTHROPIC_PROXY_URL: z.string().optional(),
|
48
40
|
|
49
41
|
ENABLED_MINIMAX: z.boolean(),
|
50
|
-
MINIMAX_MODEL_LIST: z.string().optional(),
|
51
42
|
MINIMAX_API_KEY: z.string().optional(),
|
52
43
|
|
53
44
|
ENABLED_MISTRAL: z.boolean(),
|
54
45
|
MISTRAL_API_KEY: z.string().optional(),
|
55
|
-
MISTRAL_MODEL_LIST: z.string().optional(),
|
56
46
|
|
57
47
|
ENABLED_GROQ: z.boolean(),
|
58
48
|
GROQ_API_KEY: z.string().optional(),
|
59
|
-
GROQ_MODEL_LIST: z.string().optional(),
|
60
49
|
GROQ_PROXY_URL: z.string().optional(),
|
61
50
|
|
62
51
|
ENABLED_GITHUB: z.boolean(),
|
63
52
|
GITHUB_TOKEN: z.string().optional(),
|
64
|
-
GITHUB_MODEL_LIST: z.string().optional(),
|
65
53
|
|
66
54
|
ENABLED_OPENROUTER: z.boolean(),
|
67
55
|
OPENROUTER_API_KEY: z.string().optional(),
|
68
|
-
OPENROUTER_MODEL_LIST: z.string().optional(),
|
69
56
|
|
70
57
|
ENABLED_ZEROONE: z.boolean(),
|
71
58
|
ZEROONE_API_KEY: z.string().optional(),
|
72
|
-
ZEROONE_MODEL_LIST: z.string().optional(),
|
73
59
|
|
74
60
|
ENABLED_TOGETHERAI: z.boolean(),
|
75
61
|
TOGETHERAI_API_KEY: z.string().optional(),
|
76
|
-
TOGETHERAI_MODEL_LIST: z.string().optional(),
|
77
62
|
|
78
63
|
ENABLED_FIREWORKSAI: z.boolean(),
|
79
64
|
FIREWORKSAI_API_KEY: z.string().optional(),
|
80
|
-
FIREWORKSAI_MODEL_LIST: z.string().optional(),
|
81
65
|
|
82
66
|
ENABLED_AWS_BEDROCK: z.boolean(),
|
83
|
-
AWS_BEDROCK_MODEL_LIST: z.string().optional(),
|
84
67
|
AWS_REGION: z.string().optional(),
|
85
68
|
AWS_ACCESS_KEY_ID: z.string().optional(),
|
86
69
|
AWS_SECRET_ACCESS_KEY: z.string().optional(),
|
@@ -89,31 +72,24 @@ export const getLLMConfig = () => {
|
|
89
72
|
ENABLED_WENXIN: z.boolean(),
|
90
73
|
WENXIN_ACCESS_KEY: z.string().optional(),
|
91
74
|
WENXIN_SECRET_KEY: z.string().optional(),
|
92
|
-
WENXIN_MODEL_LIST: z.string().optional(),
|
93
75
|
|
94
76
|
ENABLED_OLLAMA: z.boolean(),
|
95
77
|
OLLAMA_PROXY_URL: z.string().optional(),
|
96
|
-
OLLAMA_MODEL_LIST: z.string().optional(),
|
97
78
|
|
98
79
|
ENABLED_QWEN: z.boolean(),
|
99
80
|
QWEN_API_KEY: z.string().optional(),
|
100
|
-
QWEN_MODEL_LIST: z.string().optional(),
|
101
81
|
|
102
82
|
ENABLED_STEPFUN: z.boolean(),
|
103
83
|
STEPFUN_API_KEY: z.string().optional(),
|
104
|
-
STEPFUN_MODEL_LIST: z.string().optional(),
|
105
84
|
|
106
85
|
ENABLED_NOVITA: z.boolean(),
|
107
86
|
NOVITA_API_KEY: z.string().optional(),
|
108
|
-
NOVITA_MODEL_LIST: z.string().optional(),
|
109
87
|
|
110
88
|
ENABLED_BAICHUAN: z.boolean(),
|
111
89
|
BAICHUAN_API_KEY: z.string().optional(),
|
112
|
-
BAICHUAN_MODEL_LIST: z.string().optional(),
|
113
90
|
|
114
91
|
ENABLED_TAICHU: z.boolean(),
|
115
92
|
TAICHU_API_KEY: z.string().optional(),
|
116
|
-
TAICHU_MODEL_LIST: z.string().optional(),
|
117
93
|
|
118
94
|
ENABLED_CLOUDFLARE: z.boolean(),
|
119
95
|
CLOUDFLARE_API_KEY: z.string().optional(),
|
@@ -121,42 +97,33 @@ export const getLLMConfig = () => {
|
|
121
97
|
|
122
98
|
ENABLED_AI360: z.boolean(),
|
123
99
|
AI360_API_KEY: z.string().optional(),
|
124
|
-
AI360_MODEL_LIST: z.string().optional(),
|
125
100
|
|
126
101
|
ENABLED_SILICONCLOUD: z.boolean(),
|
127
102
|
SILICONCLOUD_API_KEY: z.string().optional(),
|
128
|
-
SILICONCLOUD_MODEL_LIST: z.string().optional(),
|
129
103
|
SILICONCLOUD_PROXY_URL: z.string().optional(),
|
130
104
|
|
131
105
|
ENABLED_UPSTAGE: z.boolean(),
|
132
106
|
UPSTAGE_API_KEY: z.string().optional(),
|
133
|
-
UPSTAGE_MODEL_LIST: z.string().optional(),
|
134
107
|
|
135
108
|
ENABLED_SPARK: z.boolean(),
|
136
109
|
SPARK_API_KEY: z.string().optional(),
|
137
|
-
SPARK_MODEL_LIST: z.string().optional(),
|
138
110
|
|
139
111
|
ENABLED_AI21: z.boolean(),
|
140
112
|
AI21_API_KEY: z.string().optional(),
|
141
|
-
AI21_MODEL_LIST: z.string().optional(),
|
142
113
|
|
143
114
|
ENABLED_HUNYUAN: z.boolean(),
|
144
115
|
HUNYUAN_API_KEY: z.string().optional(),
|
145
|
-
HUNYUAN_MODEL_LIST: z.string().optional(),
|
146
116
|
|
147
117
|
ENABLED_HUGGINGFACE: z.boolean(),
|
148
118
|
HUGGINGFACE_API_KEY: z.string().optional(),
|
149
119
|
HUGGINGFACE_PROXY_URL: z.string().optional(),
|
150
|
-
HUGGINGFACE_MODEL_LIST: z.string().optional(),
|
151
120
|
|
152
121
|
ENABLED_SENSENOVA: z.boolean(),
|
153
122
|
SENSENOVA_ACCESS_KEY_ID: z.string().optional(),
|
154
123
|
SENSENOVA_ACCESS_KEY_SECRET: z.string().optional(),
|
155
|
-
SENSENOVA_MODEL_LIST: z.string().optional(),
|
156
124
|
|
157
125
|
ENABLED_XAI: z.boolean(),
|
158
126
|
XAI_API_KEY: z.string().optional(),
|
159
|
-
XAI_MODEL_LIST: z.string().optional(),
|
160
127
|
},
|
161
128
|
runtimeEnv: {
|
162
129
|
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
|
@@ -164,77 +131,60 @@ export const getLLMConfig = () => {
|
|
164
131
|
ENABLED_OPENAI: process.env.ENABLED_OPENAI !== '0',
|
165
132
|
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
166
133
|
OPENAI_PROXY_URL: process.env.OPENAI_PROXY_URL,
|
167
|
-
OPENAI_MODEL_LIST: process.env.OPENAI_MODEL_LIST,
|
168
134
|
|
169
135
|
ENABLED_AZURE_OPENAI: !!process.env.AZURE_API_KEY,
|
170
136
|
AZURE_API_KEY: process.env.AZURE_API_KEY,
|
171
137
|
AZURE_API_VERSION: process.env.AZURE_API_VERSION,
|
172
138
|
AZURE_ENDPOINT: process.env.AZURE_ENDPOINT,
|
173
|
-
AZURE_MODEL_LIST: process.env.AZURE_MODEL_LIST,
|
174
139
|
|
175
140
|
ENABLED_ZHIPU: !!process.env.ZHIPU_API_KEY,
|
176
141
|
ZHIPU_API_KEY: process.env.ZHIPU_API_KEY,
|
177
|
-
ZHIPU_MODEL_LIST: process.env.ZHIPU_MODEL_LIST,
|
178
142
|
|
179
143
|
ENABLED_DEEPSEEK: !!process.env.DEEPSEEK_API_KEY,
|
180
144
|
DEEPSEEK_API_KEY: process.env.DEEPSEEK_API_KEY,
|
181
|
-
DEEPSEEK_MODEL_LIST: process.env.DEEPSEEK_MODEL_LIST,
|
182
145
|
|
183
146
|
ENABLED_GOOGLE: !!process.env.GOOGLE_API_KEY,
|
184
147
|
GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
|
185
148
|
GOOGLE_PROXY_URL: process.env.GOOGLE_PROXY_URL,
|
186
|
-
GOOGLE_MODEL_LIST: process.env.GOOGLE_MODEL_LIST,
|
187
149
|
|
188
150
|
ENABLED_PERPLEXITY: !!process.env.PERPLEXITY_API_KEY,
|
189
151
|
PERPLEXITY_API_KEY: process.env.PERPLEXITY_API_KEY,
|
190
|
-
PERPLEXITY_MODEL_LIST: process.env.PERPLEXITY_MODEL_LIST,
|
191
152
|
PERPLEXITY_PROXY_URL: process.env.PERPLEXITY_PROXY_URL,
|
192
153
|
|
193
154
|
ENABLED_ANTHROPIC: !!process.env.ANTHROPIC_API_KEY,
|
194
155
|
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY,
|
195
|
-
ANTHROPIC_MODEL_LIST: process.env.ANTHROPIC_MODEL_LIST,
|
196
156
|
ANTHROPIC_PROXY_URL: process.env.ANTHROPIC_PROXY_URL,
|
197
157
|
|
198
158
|
ENABLED_MINIMAX: !!process.env.MINIMAX_API_KEY,
|
199
159
|
MINIMAX_API_KEY: process.env.MINIMAX_API_KEY,
|
200
|
-
MINIMAX_MODEL_LIST: process.env.MINIMAX_MODEL_LIST,
|
201
160
|
|
202
161
|
ENABLED_MISTRAL: !!process.env.MISTRAL_API_KEY,
|
203
162
|
MISTRAL_API_KEY: process.env.MISTRAL_API_KEY,
|
204
|
-
MISTRAL_MODEL_LIST: process.env.MISTRAL_MODEL_LIST,
|
205
163
|
|
206
164
|
ENABLED_OPENROUTER: !!process.env.OPENROUTER_API_KEY,
|
207
165
|
OPENROUTER_API_KEY: process.env.OPENROUTER_API_KEY,
|
208
|
-
OPENROUTER_MODEL_LIST: process.env.OPENROUTER_MODEL_LIST,
|
209
166
|
|
210
167
|
ENABLED_TOGETHERAI: !!process.env.TOGETHERAI_API_KEY,
|
211
168
|
TOGETHERAI_API_KEY: process.env.TOGETHERAI_API_KEY,
|
212
|
-
TOGETHERAI_MODEL_LIST: process.env.TOGETHERAI_MODEL_LIST,
|
213
169
|
|
214
170
|
ENABLED_FIREWORKSAI: !!process.env.FIREWORKSAI_API_KEY,
|
215
171
|
FIREWORKSAI_API_KEY: process.env.FIREWORKSAI_API_KEY,
|
216
|
-
FIREWORKSAI_MODEL_LIST: process.env.FIREWORKSAI_MODEL_LIST,
|
217
172
|
|
218
173
|
ENABLED_MOONSHOT: !!process.env.MOONSHOT_API_KEY,
|
219
174
|
MOONSHOT_API_KEY: process.env.MOONSHOT_API_KEY,
|
220
|
-
MOONSHOT_MODEL_LIST: process.env.MOONSHOT_MODEL_LIST,
|
221
175
|
MOONSHOT_PROXY_URL: process.env.MOONSHOT_PROXY_URL,
|
222
176
|
|
223
177
|
ENABLED_GROQ: !!process.env.GROQ_API_KEY,
|
224
178
|
GROQ_API_KEY: process.env.GROQ_API_KEY,
|
225
|
-
GROQ_MODEL_LIST: process.env.GROQ_MODEL_LIST,
|
226
179
|
GROQ_PROXY_URL: process.env.GROQ_PROXY_URL,
|
227
180
|
|
228
181
|
ENABLED_GITHUB: !!process.env.GITHUB_TOKEN,
|
229
182
|
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
230
|
-
GITHUB_MODEL_LIST: process.env.GITHUB_MODEL_LIST,
|
231
183
|
|
232
184
|
ENABLED_ZEROONE: !!process.env.ZEROONE_API_KEY,
|
233
185
|
ZEROONE_API_KEY: process.env.ZEROONE_API_KEY,
|
234
|
-
ZEROONE_MODEL_LIST: process.env.ZEROONE_MODEL_LIST,
|
235
186
|
|
236
187
|
ENABLED_AWS_BEDROCK: process.env.ENABLED_AWS_BEDROCK === '1',
|
237
|
-
AWS_BEDROCK_MODEL_LIST: process.env.AWS_BEDROCK_MODEL_LIST,
|
238
188
|
AWS_REGION: process.env.AWS_REGION,
|
239
189
|
AWS_ACCESS_KEY_ID: process.env.AWS_ACCESS_KEY_ID,
|
240
190
|
AWS_SECRET_ACCESS_KEY: process.env.AWS_SECRET_ACCESS_KEY,
|
@@ -243,31 +193,24 @@ export const getLLMConfig = () => {
|
|
243
193
|
ENABLED_WENXIN: !!process.env.WENXIN_ACCESS_KEY && !!process.env.WENXIN_SECRET_KEY,
|
244
194
|
WENXIN_ACCESS_KEY: process.env.WENXIN_ACCESS_KEY,
|
245
195
|
WENXIN_SECRET_KEY: process.env.WENXIN_SECRET_KEY,
|
246
|
-
WENXIN_MODEL_LIST: process.env.WENXIN_MODEL_LIST,
|
247
196
|
|
248
197
|
ENABLED_OLLAMA: process.env.ENABLED_OLLAMA !== '0',
|
249
198
|
OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
|
250
|
-
OLLAMA_MODEL_LIST: process.env.OLLAMA_MODEL_LIST,
|
251
199
|
|
252
200
|
ENABLED_QWEN: !!process.env.QWEN_API_KEY,
|
253
201
|
QWEN_API_KEY: process.env.QWEN_API_KEY,
|
254
|
-
QWEN_MODEL_LIST: process.env.QWEN_MODEL_LIST,
|
255
202
|
|
256
203
|
ENABLED_STEPFUN: !!process.env.STEPFUN_API_KEY,
|
257
204
|
STEPFUN_API_KEY: process.env.STEPFUN_API_KEY,
|
258
|
-
STEPFUN_MODEL_LIST: process.env.STEPFUN_MODEL_LIST,
|
259
205
|
|
260
206
|
ENABLED_NOVITA: !!process.env.NOVITA_API_KEY,
|
261
207
|
NOVITA_API_KEY: process.env.NOVITA_API_KEY,
|
262
|
-
NOVITA_MODEL_LIST: process.env.NOVITA_MODEL_LIST,
|
263
208
|
|
264
209
|
ENABLED_BAICHUAN: !!process.env.BAICHUAN_API_KEY,
|
265
210
|
BAICHUAN_API_KEY: process.env.BAICHUAN_API_KEY,
|
266
|
-
BAICHUAN_MODEL_LIST: process.env.BAICHUAN_MODEL_LIST,
|
267
211
|
|
268
212
|
ENABLED_TAICHU: !!process.env.TAICHU_API_KEY,
|
269
213
|
TAICHU_API_KEY: process.env.TAICHU_API_KEY,
|
270
|
-
TAICHU_MODEL_LIST: process.env.TAICHU_MODEL_LIST,
|
271
214
|
|
272
215
|
ENABLED_CLOUDFLARE:
|
273
216
|
!!process.env.CLOUDFLARE_API_KEY && !!process.env.CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID,
|
@@ -276,42 +219,33 @@ export const getLLMConfig = () => {
|
|
276
219
|
|
277
220
|
ENABLED_AI360: !!process.env.AI360_API_KEY,
|
278
221
|
AI360_API_KEY: process.env.AI360_API_KEY,
|
279
|
-
AI360_MODEL_LIST: process.env.AI360_MODEL_LIST,
|
280
222
|
|
281
223
|
ENABLED_SILICONCLOUD: !!process.env.SILICONCLOUD_API_KEY,
|
282
224
|
SILICONCLOUD_API_KEY: process.env.SILICONCLOUD_API_KEY,
|
283
|
-
SILICONCLOUD_MODEL_LIST: process.env.SILICONCLOUD_MODEL_LIST,
|
284
225
|
SILICONCLOUD_PROXY_URL: process.env.SILICONCLOUD_PROXY_URL,
|
285
226
|
|
286
227
|
ENABLED_UPSTAGE: !!process.env.UPSTAGE_API_KEY,
|
287
228
|
UPSTAGE_API_KEY: process.env.UPSTAGE_API_KEY,
|
288
|
-
UPSTAGE_MODEL_LIST: process.env.UPSTAGE_MODEL_LIST,
|
289
229
|
|
290
230
|
ENABLED_SPARK: !!process.env.SPARK_API_KEY,
|
291
231
|
SPARK_API_KEY: process.env.SPARK_API_KEY,
|
292
|
-
SPARK_MODEL_LIST: process.env.SPARK_MODEL_LIST,
|
293
232
|
|
294
233
|
ENABLED_AI21: !!process.env.AI21_API_KEY,
|
295
234
|
AI21_API_KEY: process.env.AI21_API_KEY,
|
296
|
-
AI21_MODEL_LIST: process.env.AI21_MODEL_LIST,
|
297
235
|
|
298
236
|
ENABLED_HUNYUAN: !!process.env.HUNYUAN_API_KEY,
|
299
237
|
HUNYUAN_API_KEY: process.env.HUNYUAN_API_KEY,
|
300
|
-
HUNYUAN_MODEL_LIST: process.env.HUNYUAN_MODEL_LIST,
|
301
238
|
|
302
239
|
ENABLED_HUGGINGFACE: !!process.env.HUGGINGFACE_API_KEY,
|
303
240
|
HUGGINGFACE_API_KEY: process.env.HUGGINGFACE_API_KEY,
|
304
241
|
HUGGINGFACE_PROXY_URL: process.env.HUGGINGFACE_PROXY_URL,
|
305
|
-
HUGGINGFACE_MODEL_LIST: process.env.HUGGINGFACE_MODEL_LIST,
|
306
242
|
|
307
243
|
ENABLED_SENSENOVA: !!process.env.SENSENOVA_ACCESS_KEY_ID && !!process.env.SENSENOVA_ACCESS_KEY_SECRET,
|
308
244
|
SENSENOVA_ACCESS_KEY_ID: process.env.SENSENOVA_ACCESS_KEY_ID,
|
309
245
|
SENSENOVA_ACCESS_KEY_SECRET: process.env.SENSENOVA_ACCESS_KEY_SECRET,
|
310
|
-
SENSENOVA_MODEL_LIST: process.env.SENSENOVA_MODEL_LIST,
|
311
246
|
|
312
247
|
ENABLED_XAI: !!process.env.XAI_API_KEY,
|
313
248
|
XAI_API_KEY: process.env.XAI_API_KEY,
|
314
|
-
XAI_MODEL_LIST: process.env.XAI_MODEL_LIST,
|
315
249
|
},
|
316
250
|
});
|
317
251
|
};
|
@@ -53,6 +53,10 @@ vi.mock('@/utils/parseModels', () => ({
|
|
53
53
|
|
54
54
|
describe('genServerLLMConfig', () => {
|
55
55
|
it('should generate correct LLM config for Azure, Bedrock, and Ollama', () => {
|
56
|
+
vi.stubEnv('AZURE_MODEL_LIST', 'azureModels');
|
57
|
+
vi.stubEnv('AWS_BEDROCK_MODEL_LIST', 'bedrockModels');
|
58
|
+
vi.stubEnv('OLLAMA_MODEL_LIST', 'ollamaModels');
|
59
|
+
|
56
60
|
const specificConfig = {
|
57
61
|
azure: {
|
58
62
|
enabledKey: 'ENABLED_AZURE_OPENAI',
|
@@ -67,7 +71,7 @@ describe('genServerLLMConfig', () => {
|
|
67
71
|
},
|
68
72
|
};
|
69
73
|
const config = genServerLLMConfig(specificConfig);
|
70
|
-
|
74
|
+
|
71
75
|
expect(config.azure).toEqual({
|
72
76
|
enabled: true,
|
73
77
|
enabledModels: ['azureModels_withDeployment'],
|
@@ -14,16 +14,17 @@ export const genServerLLMConfig = (specificConfig: Record<any, any>) => {
|
|
14
14
|
const providerUpperCase = provider.toUpperCase();
|
15
15
|
const providerCard = ProviderCards[`${provider}ProviderCard` as keyof typeof ProviderCards] as ModelProviderCard;
|
16
16
|
const providerConfig = specificConfig[provider as keyof typeof specificConfig] || {};
|
17
|
+
const providerModelList = process.env[providerConfig.modelListKey ?? `${providerUpperCase}_MODEL_LIST`];
|
17
18
|
|
18
19
|
config[provider] = {
|
19
20
|
enabled: llmConfig[providerConfig.enabledKey || `ENABLED_${providerUpperCase}`],
|
20
21
|
enabledModels: extractEnabledModels(
|
21
|
-
|
22
|
+
providerModelList,
|
22
23
|
providerConfig.withDeploymentName || false,
|
23
24
|
),
|
24
25
|
serverModelCards: transformToChatModelCards({
|
25
26
|
defaultChatModels: (providerCard as ModelProviderCard)?.chatModels || [],
|
26
|
-
modelString:
|
27
|
+
modelString: providerModelList,
|
27
28
|
withDeploymentName: providerConfig.withDeploymentName || false,
|
28
29
|
}),
|
29
30
|
...(providerConfig.fetchOnClient !== undefined && { fetchOnClient: providerConfig.fetchOnClient }),
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|