@lobehub/chat 1.9.8 → 1.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example CHANGED
@@ -112,6 +112,10 @@ OPENAI_API_KEY=sk-xxxxxxxxx
112
112
 
113
113
  # QWEN_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
114
114
 
115
+ ### SiliconCloud AI ####
116
+
117
+ # SILICONCLOUD_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
118
+
115
119
  ########################################
116
120
  ############ Market Service ############
117
121
  ########################################
package/CHANGELOG.md CHANGED
@@ -2,6 +2,48 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.10.1](https://github.com/lobehub/lobe-chat/compare/v1.10.0...v1.10.1)
6
+
7
+ <sup>Released on **2024-08-14**</sup>
8
+
9
+ <br/>
10
+
11
+ <details>
12
+ <summary><kbd>Improvements and Fixes</kbd></summary>
13
+
14
+ </details>
15
+
16
+ <div align="right">
17
+
18
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
19
+
20
+ </div>
21
+
22
+ ## [Version 1.10.0](https://github.com/lobehub/lobe-chat/compare/v1.9.8...v1.10.0)
23
+
24
+ <sup>Released on **2024-08-14**</sup>
25
+
26
+ #### ✨ Features
27
+
28
+ - **misc**: Add SiliconCloud model provider.
29
+
30
+ <br/>
31
+
32
+ <details>
33
+ <summary><kbd>Improvements and Fixes</kbd></summary>
34
+
35
+ #### What's improved
36
+
37
+ - **misc**: Add SiliconCloud model provider, closes [#3092](https://github.com/lobehub/lobe-chat/issues/3092) ([0781dc5](https://github.com/lobehub/lobe-chat/commit/0781dc5))
38
+
39
+ </details>
40
+
41
+ <div align="right">
42
+
43
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
44
+
45
+ </div>
46
+
5
47
  ### [Version 1.9.8](https://github.com/lobehub/lobe-chat/compare/v1.9.7...v1.9.8)
6
48
 
7
49
  <sup>Released on **2024-08-13**</sup>
package/Dockerfile CHANGED
@@ -8,9 +8,9 @@ RUN \
8
8
  if [ "${USE_CN_MIRROR:-false}" = "true" ]; then \
9
9
  sed -i "s/dl-cdn.alpinelinux.org/mirrors.ustc.edu.cn/g" "/etc/apk/repositories"; \
10
10
  fi \
11
- # Add proxychains-ng package & update base package
11
+ # Add required package & update base package
12
12
  && apk update \
13
- && apk add --no-cache proxychains-ng \
13
+ && apk add --no-cache bind-tools proxychains-ng \
14
14
  && apk upgrade --no-cache \
15
15
  # Add user nextjs to run the app
16
16
  && addgroup --system --gid 1001 nodejs \
@@ -0,0 +1,46 @@
1
+ ---
2
+ title: Using SiliconCloud API Key in LobeChat
3
+ description: Learn how to configure and use SiliconCloud's large language models in LobeChat, get your API key, and start chatting.
4
+ tags:
5
+ - LobeChat
6
+ - SiliconCloud
7
+ - API Key
8
+ - Web UI
9
+ ---
10
+
11
+ # Using SiliconCloud in LobeChat
12
+
13
+
14
+ [SiliconCloud](https://siliconflow.cn/zh-cn/siliconcloud) is a cost-effective large model service provider, offering various services such as text generation and image generation.
15
+
16
+ This document will guide you on how to use SiliconCloud in LobeChat:
17
+
18
+ <Steps>
19
+
20
+ ### Step 1: Get your SiliconCloud API Key
21
+
22
+ - First, you need to register and log in to [SiliconCloud](https://cloud.siliconflow.cn/auth/login)
23
+
24
+ <Callout type={'info'}>Currently, new users can get 14 yuan free credit upon registration</Callout>
25
+
26
+ - Go to the `API Key` menu and click `Create New API Key`
27
+
28
+ - Click copy API key and keep it safe
29
+
30
+ ### Step 2: Configure SiliconCloud in LobeChat
31
+
32
+ - Visit the `App Settings` interface of LobeChat
33
+
34
+ - Under `Language Model`, find the `SiliconCloud` settings
35
+
36
+ - Enable SiliconCloud and enter the obtained API key
37
+
38
+ - Choose a SiliconCloud model for your assistant and start chatting
39
+
40
+ <Callout type={'warning'}>
41
+ You may need to pay the API service provider during use. Please refer to SiliconCloud's relevant fee policy.
42
+ </Callout>
43
+
44
+ </Steps>
45
+
46
+ Now you can use the models provided by SiliconCloud for conversation in LobeChat.
@@ -0,0 +1,46 @@
1
+ ---
2
+ title: 在 LobeChat 中使用 SiliconCloud API Key
3
+ description: 学习如何在 LobeChat 中配置和使用 SiliconCloud 提供的大语言模型,获取 API 密钥并开始对话。
4
+ tags:
5
+ - LobeChat
6
+ - SiliconCloud
7
+ - API密钥
8
+ - Web UI
9
+ ---
10
+
11
+ # 在 LobeChat 中使用 SiliconCloud
12
+
13
+
14
+ [SiliconCloud](https://siliconflow.cn/zh-cn/siliconcloud) 是高性价比的大模型服务提供商,提供文本生成与图片生成等多种服务。
15
+
16
+ 本文档将指导你如何在 LobeChat 中使用 SiliconCloud:
17
+
18
+ <Steps>
19
+
20
+ ### 步骤一:获取 SiliconCloud API 密钥
21
+
22
+ - 首先,你需要注册并登录 [SiliconCloud](https://cloud.siliconflow.cn/auth/login)
23
+
24
+ <Callout type={'info'}>当前新用户注册可获赠 14 元免费额度</Callout>
25
+
26
+ - 进入 `API密钥` 菜单,并点击 `创建新API密钥`
27
+
28
+ - 点击复制 API 密钥并妥善保存
29
+
30
+ ### 步骤二:在 LobeChat 中配置 SiliconCloud
31
+
32
+ - 访问 LobeChat 的 `应用设置` 界面
33
+
34
+ - 在 `语言模型` 下找到 `SiliconCloud` 的设置项
35
+
36
+ - 打开 SiliconCloud 并填入获取的 API 密钥
37
+
38
+ - 为你的助手选择一个 SiliconCloud 模型即可开始对话
39
+
40
+ <Callout type={'warning'}>
41
+ 在使用过程中你可能需要向 API 服务提供商付费,请参考 SiliconCloud 的相关费用政策。
42
+ </Callout>
43
+
44
+ </Steps>
45
+
46
+ 至此你已经可以在 LobeChat 中使用 SiliconCloud 提供的模型进行对话了。
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.9.8",
3
+ "version": "1.10.1",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -14,6 +14,7 @@ import {
14
14
  Novita,
15
15
  OpenRouter,
16
16
  Perplexity,
17
+ SiliconCloud,
17
18
  Stepfun,
18
19
  Together,
19
20
  Tongyi,
@@ -40,6 +41,7 @@ import {
40
41
  OpenRouterProviderCard,
41
42
  PerplexityProviderCard,
42
43
  QwenProviderCard,
44
+ SiliconCloudProviderCard,
43
45
  StepfunProviderCard,
44
46
  TaichuProviderCard,
45
47
  TogetherAIProviderCard,
@@ -198,6 +200,11 @@ export const useProviderList = (): ProviderItem[] => {
198
200
  docUrl: urlJoin(BASE_DOC_URL, 'ai360'),
199
201
  title: <Ai360.Combine size={ 20 } type={ 'color' } />,
200
202
  },
203
+ {
204
+ ...SiliconCloudProviderCard,
205
+ docUrl: urlJoin(BASE_DOC_URL, 'siliconcloud'),
206
+ title: <SiliconCloud.Combine size={20} type={'color'} />,
207
+ },
201
208
  ],
202
209
  [azureProvider, ollamaProvider, ollamaProvider, bedrockProvider],
203
210
  );
@@ -198,6 +198,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
198
198
 
199
199
  const apiKey = apiKeyManager.pick(payload?.apiKey || AI360_API_KEY);
200
200
 
201
+ return { apiKey };
202
+ }
203
+ case ModelProvider.SiliconCloud: {
204
+ const { SILICONCLOUD_API_KEY } = getLLMConfig();
205
+
206
+ const apiKey = apiKeyManager.pick(payload?.apiKey || SILICONCLOUD_API_KEY);
207
+
201
208
  return { apiKey };
202
209
  }
203
210
  }
@@ -51,7 +51,7 @@ const ModelIcon = memo<ModelProviderIconProps>(({ model: originModel, size = 12
51
51
  // currently supported models, maybe not in its own provider
52
52
  if (model.includes('gpt-3')) return <OpenAI.Avatar size={size} type={'gpt3'} />;
53
53
  if (model.includes('gpt-4')) return <OpenAI.Avatar size={size} type={'gpt4'} />;
54
- if (model.startsWith('glm') || model.includes('chatglm')) return <ChatGLM.Avatar size={size} />;
54
+ if (model.includes('glm-') || model.includes('chatglm')) return <ChatGLM.Avatar size={size} />;
55
55
  if (model.startsWith('codegeex')) return <CodeGeeX.Avatar size={size} />;
56
56
  if (model.includes('deepseek')) return <DeepSeek.Avatar size={size} />;
57
57
  if (model.includes('claude')) return <Claude.Avatar size={size} />;
@@ -17,6 +17,7 @@ import {
17
17
  OpenAI,
18
18
  OpenRouter,
19
19
  Perplexity,
20
+ SiliconCloud,
20
21
  Stepfun,
21
22
  Together,
22
23
  Tongyi,
@@ -134,6 +135,10 @@ const ModelProviderIcon = memo<ModelProviderIconProps>(({ provider }) => {
134
135
  return <Ai360 size={20} />;
135
136
  }
136
137
 
138
+ case ModelProvider.SiliconCloud: {
139
+ return <SiliconCloud size={20} />;
140
+ }
141
+
137
142
  default: {
138
143
  return null;
139
144
  }
package/src/config/llm.ts CHANGED
@@ -87,6 +87,9 @@ export const getLLMConfig = () => {
87
87
 
88
88
  ENABLED_AI360: z.boolean(),
89
89
  AI360_API_KEY: z.string().optional(),
90
+
91
+ ENABLED_SILICONCLOUD: z.boolean(),
92
+ SILICONCLOUD_API_KEY: z.string().optional(),
90
93
  },
91
94
  runtimeEnv: {
92
95
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -171,6 +174,9 @@ export const getLLMConfig = () => {
171
174
 
172
175
  ENABLED_AI360: !!process.env.AI360_API_KEY,
173
176
  AI360_API_KEY: process.env.AI360_API_KEY,
177
+
178
+ ENABLED_SILICONCLOUD: !!process.env.SILICONCLOUD_API_KEY,
179
+ SILICONCLOUD_API_KEY: process.env.SILICONCLOUD_API_KEY,
174
180
  },
175
181
  });
176
182
  };
@@ -17,6 +17,7 @@ import OpenAIProvider from './openai';
17
17
  import OpenRouterProvider from './openrouter';
18
18
  import PerplexityProvider from './perplexity';
19
19
  import QwenProvider from './qwen';
20
+ import SiliconCloudProvider from './siliconcloud';
20
21
  import StepfunProvider from './stepfun';
21
22
  import TaichuProvider from './taichu';
22
23
  import TogetherAIProvider from './togetherai';
@@ -45,6 +46,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
45
46
  BaichuanProvider.chatModels,
46
47
  TaichuProvider.chatModels,
47
48
  Ai360Provider.chatModels,
49
+ SiliconCloudProvider.chatModels,
48
50
  ].flat();
49
51
 
50
52
  export const DEFAULT_MODEL_PROVIDER_LIST = [
@@ -70,6 +72,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
70
72
  BaichuanProvider,
71
73
  TaichuProvider,
72
74
  Ai360Provider,
75
+ SiliconCloudProvider,
73
76
  ];
74
77
 
75
78
  export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -98,6 +101,7 @@ export { default as OpenAIProviderCard } from './openai';
98
101
  export { default as OpenRouterProviderCard } from './openrouter';
99
102
  export { default as PerplexityProviderCard } from './perplexity';
100
103
  export { default as QwenProviderCard } from './qwen';
104
+ export { default as SiliconCloudProviderCard } from './siliconcloud';
101
105
  export { default as StepfunProviderCard } from './stepfun';
102
106
  export { default as TaichuProviderCard } from './taichu';
103
107
  export { default as TogetherAIProviderCard } from './togetherai';
@@ -0,0 +1,127 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref https://siliconflow.cn/zh-cn/models
4
+ const SiliconCloud: ModelProviderCard = {
5
+ chatModels: [
6
+ {
7
+ enabled: true,
8
+ id: 'Qwen/Qwen2-72B-Instruct',
9
+ tokens: 32_768,
10
+ },
11
+ {
12
+ enabled: true,
13
+ id: 'Qwen/Qwen2-Math-72B-Instruct',
14
+ tokens: 32_768,
15
+ },
16
+ {
17
+ enabled: true,
18
+ id: 'Qwen/Qwen2-57B-A14B-Instruct',
19
+ tokens: 32_768,
20
+ },
21
+ {
22
+ id: 'Qwen/Qwen2-7B-Instruct',
23
+ tokens: 32_768,
24
+ },
25
+ {
26
+ id: 'Qwen/Qwen2-1.5B-Instruct',
27
+ tokens: 32_768,
28
+ },
29
+ {
30
+ id: 'Qwen/Qwen1.5-110B-Chat',
31
+ tokens: 32_768,
32
+ },
33
+ {
34
+ id: 'Qwen/Qwen1.5-32B-Chat',
35
+ tokens: 32_768,
36
+ },
37
+ {
38
+ id: 'Qwen/Qwen1.5-14B-Chat',
39
+ tokens: 32_768,
40
+ },
41
+ {
42
+ id: 'Qwen/Qwen1.5-7B-Chat',
43
+ tokens: 32_768,
44
+ },
45
+ {
46
+ id: 'deepseek-ai/DeepSeek-Coder-V2-Instruct',
47
+ tokens: 32_768,
48
+ },
49
+ {
50
+ enabled: true,
51
+ id: 'deepseek-ai/DeepSeek-V2-Chat',
52
+ tokens: 32_768,
53
+ },
54
+ {
55
+ id: 'deepseek-ai/deepseek-llm-67b-chat',
56
+ tokens: 32_768,
57
+ },
58
+ {
59
+ id: 'THUDM/glm-4-9b-chat',
60
+ tokens: 32_768,
61
+ },
62
+ {
63
+ id: 'THUDM/chatglm3-6b',
64
+ tokens: 32_768,
65
+ },
66
+ {
67
+ enabled: true,
68
+ id: '01-ai/Yi-1.5-34B-Chat-16K',
69
+ tokens: 16_384,
70
+ },
71
+ {
72
+ id: '01-ai/Yi-1.5-9B-Chat-16K',
73
+ tokens: 16_384,
74
+ },
75
+ {
76
+ id: '01-ai/Yi-1.5-6B-Chat',
77
+ tokens: 4096,
78
+ },
79
+ {
80
+ id: 'internlm/internlm2_5-7b-chat',
81
+ tokens: 32_768,
82
+ },
83
+ {
84
+ id: 'google/gemma-2-9b-it',
85
+ tokens: 8192,
86
+ },
87
+ {
88
+ id: 'google/gemma-2-27b-it',
89
+ tokens: 8192,
90
+ },
91
+ {
92
+ id: 'internlm/internlm2_5-20b-chat',
93
+ tokens: 32_768,
94
+ },
95
+ {
96
+ id: 'meta-llama/Meta-Llama-3.1-8B-Instruct',
97
+ tokens: 32_768,
98
+ },
99
+ {
100
+ enabled: true,
101
+ id: 'meta-llama/Meta-Llama-3.1-70B-Instruct',
102
+ tokens: 32_768,
103
+ },
104
+ {
105
+ id: 'meta-llama/Meta-Llama-3.1-405B-Instruct',
106
+ tokens: 32_768,
107
+ },
108
+ {
109
+ id: 'meta-llama/Meta-Llama-3-70B-Instruct',
110
+ tokens: 8192,
111
+ },
112
+ {
113
+ id: 'mistralai/Mistral-7B-Instruct-v0.2',
114
+ tokens: 32_768,
115
+ },
116
+ {
117
+ id: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
118
+ tokens: 32_768,
119
+ },
120
+ ],
121
+ checkModel: 'Qwen/Qwen2-1.5B-Instruct',
122
+ id: 'siliconcloud',
123
+ modelList: { showModelFetcher: true },
124
+ name: 'SiliconCloud',
125
+ };
126
+
127
+ export default SiliconCloud;
@@ -15,6 +15,7 @@ import {
15
15
  OpenRouterProviderCard,
16
16
  PerplexityProviderCard,
17
17
  QwenProviderCard,
18
+ SiliconCloudProviderCard,
18
19
  StepfunProviderCard,
19
20
  TaichuProviderCard,
20
21
  TogetherAIProviderCard,
@@ -94,6 +95,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
94
95
  enabled: false,
95
96
  enabledModels: filterEnabledModels(QwenProviderCard),
96
97
  },
98
+ siliconcloud: {
99
+ enabled: false,
100
+ enabledModels: filterEnabledModels(SiliconCloudProviderCard),
101
+ },
97
102
  stepfun: {
98
103
  enabled: false,
99
104
  enabledModels: filterEnabledModels(StepfunProviderCard),
@@ -20,6 +20,7 @@ import { LobeOpenAI } from './openai';
20
20
  import { LobeOpenRouterAI } from './openrouter';
21
21
  import { LobePerplexityAI } from './perplexity';
22
22
  import { LobeQwenAI } from './qwen';
23
+ import { LobeSiliconCloudAI } from './siliconcloud';
23
24
  import { LobeStepfunAI } from './stepfun';
24
25
  import { LobeTaichuAI } from './taichu';
25
26
  import { LobeTogetherAI } from './togetherai';
@@ -122,6 +123,7 @@ class AgentRuntime {
122
123
  openrouter: Partial<ClientOptions>;
123
124
  perplexity: Partial<ClientOptions>;
124
125
  qwen: Partial<ClientOptions>;
126
+ siliconcloud: Partial<ClientOptions>;
125
127
  stepfun: Partial<ClientOptions>;
126
128
  taichu: Partial<ClientOptions>;
127
129
  togetherai: Partial<ClientOptions>;
@@ -247,6 +249,11 @@ class AgentRuntime {
247
249
  runtimeModel = new LobeAi360AI(params.ai360 ?? {});
248
250
  break
249
251
  }
252
+
253
+ case ModelProvider.SiliconCloud: {
254
+ runtimeModel = new LobeSiliconCloudAI(params.siliconcloud ?? {});
255
+ break
256
+ }
250
257
  }
251
258
 
252
259
  return new AgentRuntime(runtimeModel);
@@ -0,0 +1,10 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
5
+ baseURL: 'https://api.siliconflow.cn/v1',
6
+ debug: {
7
+ chatCompletion: () => process.env.DEBUG_SILICONCLOUD_CHAT_COMPLETION === '1',
8
+ },
9
+ provider: ModelProvider.SiliconCloud,
10
+ });
@@ -39,6 +39,7 @@ export enum ModelProvider {
39
39
  OpenRouter = 'openrouter',
40
40
  Perplexity = 'perplexity',
41
41
  Qwen = 'qwen',
42
+ SiliconCloud = 'siliconcloud',
42
43
  Stepfun = 'stepfun',
43
44
  Taichu = 'taichu',
44
45
  TogetherAI = 'togetherai',
@@ -39,6 +39,7 @@ export const getServerGlobalConfig = () => {
39
39
  ENABLED_BAICHUAN,
40
40
  ENABLED_TAICHU,
41
41
  ENABLED_AI360,
42
+ ENABLED_SILICONCLOUD,
42
43
 
43
44
  ENABLED_AZURE_OPENAI,
44
45
  AZURE_MODEL_LIST,
@@ -112,7 +113,7 @@ export const getServerGlobalConfig = () => {
112
113
  },
113
114
  perplexity: { enabled: ENABLED_PERPLEXITY },
114
115
  qwen: { enabled: ENABLED_QWEN },
115
-
116
+ siliconcloud: { enabled: ENABLED_SILICONCLOUD },
116
117
  stepfun: { enabled: ENABLED_STEPFUN },
117
118
 
118
119
  taichu: { enabled: ENABLED_TAICHU },
@@ -35,6 +35,7 @@ export interface UserKeyVaults {
35
35
  password?: string;
36
36
  perplexity?: OpenAICompatibleKeyVault;
37
37
  qwen?: OpenAICompatibleKeyVault;
38
+ siliconcloud?: OpenAICompatibleKeyVault;
38
39
  stepfun?: OpenAICompatibleKeyVault;
39
40
  taichu?: OpenAICompatibleKeyVault;
40
41
  togetherai?: OpenAICompatibleKeyVault;