@lobehub/chat 1.15.5 → 1.15.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,57 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.15.7](https://github.com/lobehub/lobe-chat/compare/v1.15.6...v1.15.7)
6
+
7
+ <sup>Released on **2024-09-03**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Fix provider disabled title style.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Fix provider disabled title style, closes [#3743](https://github.com/lobehub/lobe-chat/issues/3743) ([2c72452](https://github.com/lobehub/lobe-chat/commit/2c72452))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.15.6](https://github.com/lobehub/lobe-chat/compare/v1.15.5...v1.15.6)
31
+
32
+ <sup>Released on **2024-09-01**</sup>
33
+
34
+ #### 💄 Styles
35
+
36
+ - **misc**: Stepfun default enabled model, update Groq model list & add `GROQ_MODEL_LIST` support.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Styles
44
+
45
+ - **misc**: Stepfun default enabled model, closes [#3712](https://github.com/lobehub/lobe-chat/issues/3712) ([7e41d54](https://github.com/lobehub/lobe-chat/commit/7e41d54))
46
+ - **misc**: Update Groq model list & add `GROQ_MODEL_LIST` support, closes [#3716](https://github.com/lobehub/lobe-chat/issues/3716) ([75c9247](https://github.com/lobehub/lobe-chat/commit/75c9247))
47
+
48
+ </details>
49
+
50
+ <div align="right">
51
+
52
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
53
+
54
+ </div>
55
+
5
56
  ### [Version 1.15.5](https://github.com/lobehub/lobe-chat/compare/v1.15.4...v1.15.5)
6
57
 
7
58
  <sup>Released on **2024-09-01**</sup>
package/Dockerfile CHANGED
@@ -119,7 +119,7 @@ ENV \
119
119
  # Google
120
120
  GOOGLE_API_KEY="" GOOGLE_PROXY_URL="" \
121
121
  # Groq
122
- GROQ_API_KEY="" GROQ_PROXY_URL="" \
122
+ GROQ_API_KEY="" GROQ_MODEL_LIST="" GROQ_PROXY_URL="" \
123
123
  # Minimax
124
124
  MINIMAX_API_KEY="" \
125
125
  # Mistral
@@ -151,7 +151,7 @@ ENV \
151
151
  # Google
152
152
  GOOGLE_API_KEY="" GOOGLE_PROXY_URL="" \
153
153
  # Groq
154
- GROQ_API_KEY="" GROQ_PROXY_URL="" \
154
+ GROQ_API_KEY="" GROQ_MODEL_LIST="" GROQ_PROXY_URL="" \
155
155
  # Minimax
156
156
  MINIMAX_API_KEY="" \
157
157
  # Mistral
@@ -88,7 +88,7 @@ docker compose up -d
88
88
  - `Redirect URI` should be `http://localhost:3210/api/auth/callback/logto`
89
89
  - `Post sign-out redirect URI` should be `http://localhost:3210/`
90
90
 
91
- 3. Obtain the `Client ID` and `Client Secret`, and fill them into your `.env` file corresponding to `LOGTO_CLIENT_ID` and `LOGTO_CLIENT_SECRET`.
91
+ 3. Obtain the `App ID` and `App secrets`, and fill them into your `.env` file corresponding to `LOGTO_CLIENT_ID` and `LOGTO_CLIENT_SECRET`.
92
92
 
93
93
  ### Configure MinIO S3
94
94
 
@@ -257,7 +257,7 @@ You need to first access the WebUI for configuration:
257
257
  src="https://github.com/user-attachments/assets/5b816379-c07b-40ea-bde4-df16e2e4e523"
258
258
  />
259
259
 
260
- 5. Obtain `Client ID` and `Client Secret`, and fill them into your `.env` file under `LOGTO_CLIENT_ID` and `LOGTO_CLIENT_SECRET`.
260
+ 5. Obtain `App ID` and `App secrets`, and fill them into your `.env` file under `LOGTO_CLIENT_ID` and `LOGTO_CLIENT_SECRET`.
261
261
 
262
262
  6. Set `LOGTO_ISSUER` in your `.env` file to `https://lobe-auth-api.example.com/oidc`.
263
263
 
@@ -86,7 +86,7 @@ docker compose up -d
86
86
  - `Redirect URI` 为 `http://localhost:3210/api/auth/callback/logto`
87
87
  - `Post sign-out redirect URI` 为 `http://localhost:3210/`
88
88
 
89
- 3. 获取 `Client ID` 和 `Client Secret`,填入 `.env` 文件中对应的 `LOGTO_CLIENT_ID` 、 `LOGTO_CLIENT_SECRETT`
89
+ 3. 获取 `App ID` 和 `App secrets`,填入 `.env` 文件中对应的 `LOGTO_CLIENT_ID` 、 `LOGTO_CLIENT_SECRETT`
90
90
 
91
91
  ### 配置 MinIO S3
92
92
 
@@ -255,7 +255,7 @@ docker compose up -d # 重新启动
255
255
  src="https://github.com/user-attachments/assets/5b816379-c07b-40ea-bde4-df16e2e4e523"
256
256
  />
257
257
 
258
- 5. 获取 `Client ID` 和 `Client Secret`,填入你的 `.env` 文件中的 `LOGTO_CLIENT_ID` 和 `LOGTO_CLIENT_SECRETT` 中
258
+ 5. 获取 `App ID` 和 `App secrets`,填入你的 `.env` 文件中的 `LOGTO_CLIENT_ID` 和 `LOGTO_CLIENT_SECRETT` 中
259
259
 
260
260
  6. 配置你的 `.env` 文件中 `LOGTO_ISSUER` 为 `https://lobe-auth-api.example.com/oidc`
261
261
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.15.5",
3
+ "version": "1.15.7",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -266,7 +266,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
266
266
  ) : undefined}
267
267
  </Flexbox>
268
268
  ),
269
- title: title ?? (
269
+ title: (
270
270
  <Flexbox
271
271
  align={'center'}
272
272
  className={styles.safariIconWidthFix}
@@ -277,7 +277,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
277
277
  ...(enabled ? {} : { filter: 'grayscale(100%)', maxHeight: 24, opacity: 0.66 }),
278
278
  }}
279
279
  >
280
- <ProviderCombine provider={id} size={24} />
280
+ {title ?? <ProviderCombine provider={id} size={24} />}
281
281
  </Flexbox>
282
282
  ),
283
283
  };
package/src/config/llm.ts CHANGED
@@ -49,6 +49,7 @@ export const getLLMConfig = () => {
49
49
 
50
50
  ENABLED_GROQ: z.boolean(),
51
51
  GROQ_API_KEY: z.string().optional(),
52
+ GROQ_MODEL_LIST: z.string().optional(),
52
53
  GROQ_PROXY_URL: z.string().optional(),
53
54
 
54
55
  ENABLED_OPENROUTER: z.boolean(),
@@ -153,6 +154,7 @@ export const getLLMConfig = () => {
153
154
 
154
155
  ENABLED_GROQ: !!process.env.GROQ_API_KEY,
155
156
  GROQ_API_KEY: process.env.GROQ_API_KEY,
157
+ GROQ_MODEL_LIST: process.env.GROQ_MODEL_LIST,
156
158
  GROQ_PROXY_URL: process.env.GROQ_PROXY_URL,
157
159
 
158
160
  ENABLED_ZEROONE: !!process.env.ZEROONE_API_KEY,
@@ -1,59 +1,59 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
3
  // ref https://console.groq.com/docs/models
4
+ // ref https://console.groq.com/docs/tool-use
4
5
  const Groq: ModelProviderCard = {
5
6
  chatModels: [
7
+ // TODO: During preview launch, Groq is limiting 3.1 models to max_tokens of 8k.
6
8
  {
7
- displayName: 'LLaMA3.1 405B (Preview)',
9
+ displayName: 'Llama 3.1 8B (Preview)',
10
+ enabled: true,
8
11
  functionCall: true,
9
- id: 'llama-3.1-405b-reasoning',
10
- tokens: 16_000,
12
+ id: 'llama-3.1-8b-instant',
13
+ tokens: 8000,
11
14
  },
12
15
  {
13
- displayName: 'LLaMA 3.1 70B (Preview)',
16
+ displayName: 'Llama 3.1 70B (Preview)',
14
17
  enabled: true,
15
18
  functionCall: true,
16
19
  id: 'llama-3.1-70b-versatile',
17
20
  tokens: 8000,
18
21
  },
22
+ /*
23
+ // Offline due to overwhelming demand! Stay tuned for updates.
19
24
  {
20
- displayName: 'LLaMA 3.1 8B (Preview)',
21
- enabled: true,
25
+ displayName: 'Llama 3.1 405B (Preview)',
22
26
  functionCall: true,
23
- id: 'llama-3.1-8b-instant',
27
+ id: 'llama-3.1-405b-reasoning',
24
28
  tokens: 8000,
25
29
  },
30
+ */
26
31
  {
27
- displayName: 'LLaMA 3 Groq 70b Tool Use (preview)',
32
+ displayName: 'Llama 3 Groq 8B Tool Use (Preview)',
28
33
  enabled: true,
29
34
  functionCall: true,
30
- id: 'llama3-groq-70b-8192-tool-use-preview',
35
+ id: 'llama3-groq-8b-8192-tool-use-preview',
31
36
  tokens: 8192,
32
37
  },
33
38
  {
34
- displayName: 'LLaMA 3 Groq 8b Tool Use (preview)',
39
+ displayName: 'Llama 3 Groq 70B Tool Use (Preview)',
35
40
  enabled: true,
36
41
  functionCall: true,
37
- id: 'llama3-groq-8b-8192-tool-use-preview',
42
+ id: 'llama3-groq-70b-8192-tool-use-preview',
43
+ tokens: 8192,
38
44
  },
39
45
  {
40
- displayName: 'LLaMA3 70B',
46
+ displayName: 'Meta Llama 3 8B',
41
47
  enabled: true,
42
48
  functionCall: true,
43
- id: 'llama3-70b-8192',
49
+ id: 'llama3-8b-8192',
44
50
  tokens: 8192,
45
51
  },
46
52
  {
47
- displayName: 'Mixtral-8x7b',
53
+ displayName: 'Meta Llama 3 70B',
48
54
  enabled: true,
49
55
  functionCall: true,
50
- id: 'mixtral-8x7b-32768',
51
- tokens: 32_768,
52
- },
53
- {
54
- displayName: 'Gemma 7B',
55
- functionCall: true,
56
- id: 'gemma-7b-it',
56
+ id: 'llama3-70b-8192',
57
57
  tokens: 8192,
58
58
  },
59
59
  {
@@ -64,16 +64,17 @@ const Groq: ModelProviderCard = {
64
64
  tokens: 8192,
65
65
  },
66
66
  {
67
- displayName: 'LLaMA3 8B',
68
- enabled: true,
67
+ displayName: 'Gemma 7B',
69
68
  functionCall: true,
70
- id: 'llama3-8b-8192',
69
+ id: 'gemma-7b-it',
71
70
  tokens: 8192,
72
71
  },
73
72
  {
74
- displayName: 'LLaMA2-70b-chat',
75
- id: 'llama2-70b-4096',
76
- tokens: 4096,
73
+ displayName: 'Mixtral 8x7B',
74
+ enabled: true,
75
+ functionCall: true,
76
+ id: 'mixtral-8x7b-32768',
77
+ tokens: 32_768,
77
78
  },
78
79
  ],
79
80
  checkModel: 'gemma2-9b-it',
@@ -23,6 +23,7 @@ const Stepfun: ModelProviderCard = {
23
23
  tokens: 32_000,
24
24
  },
25
25
  {
26
+ enabled: true,
26
27
  id: 'step-1-8k',
27
28
  tokens: 8000,
28
29
  },
@@ -4,6 +4,7 @@ import { fileEnv } from '@/config/file';
4
4
  import { langfuseEnv } from '@/config/langfuse';
5
5
  import { getLLMConfig } from '@/config/llm';
6
6
  import {
7
+ GroqProviderCard,
7
8
  NovitaProviderCard,
8
9
  OllamaProviderCard,
9
10
  OpenAIProviderCard,
@@ -34,7 +35,10 @@ export const getServerGlobalConfig = () => {
34
35
 
35
36
  ENABLED_AWS_BEDROCK,
36
37
  ENABLED_GOOGLE,
38
+
37
39
  ENABLED_GROQ,
40
+ GROQ_MODEL_LIST,
41
+
38
42
  ENABLED_DEEPSEEK,
39
43
  ENABLED_PERPLEXITY,
40
44
  ENABLED_ANTHROPIC,
@@ -99,7 +103,14 @@ export const getServerGlobalConfig = () => {
99
103
  bedrock: { enabled: ENABLED_AWS_BEDROCK },
100
104
  deepseek: { enabled: ENABLED_DEEPSEEK },
101
105
  google: { enabled: ENABLED_GOOGLE },
102
- groq: { enabled: ENABLED_GROQ },
106
+ groq: {
107
+ enabled: ENABLED_GROQ,
108
+ enabledModels: extractEnabledModels(GROQ_MODEL_LIST),
109
+ serverModelCards: transformToChatModelCards({
110
+ defaultChatModels: GroqProviderCard.chatModels,
111
+ modelString: GROQ_MODEL_LIST,
112
+ }),
113
+ },
103
114
  minimax: { enabled: ENABLED_MINIMAX },
104
115
  mistral: { enabled: ENABLED_MISTRAL },
105
116
  moonshot: { enabled: ENABLED_MOONSHOT },