@lobehub/chat 1.15.3 → 1.15.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.15.4](https://github.com/lobehub/lobe-chat/compare/v1.15.3...v1.15.4)
6
+
7
+ <sup>Released on **2024-09-01**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Update Novita AI model info & add `NOVITA_MODEL_LIST` support.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Update Novita AI model info & add `NOVITA_MODEL_LIST` support, closes [#3715](https://github.com/lobehub/lobe-chat/issues/3715) ([4ab33f6](https://github.com/lobehub/lobe-chat/commit/4ab33f6))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.15.3](https://github.com/lobehub/lobe-chat/compare/v1.15.2...v1.15.3)
6
31
 
7
32
  <sup>Released on **2024-09-01**</sup>
package/Dockerfile CHANGED
@@ -127,7 +127,7 @@ ENV \
127
127
  # Moonshot
128
128
  MOONSHOT_API_KEY="" MOONSHOT_PROXY_URL="" \
129
129
  # Novita
130
- NOVITA_API_KEY="" \
130
+ NOVITA_API_KEY="" NOVITA_MODEL_LIST="" \
131
131
  # Ollama
132
132
  OLLAMA_MODEL_LIST="" OLLAMA_PROXY_URL="" \
133
133
  # OpenAI
@@ -159,7 +159,7 @@ ENV \
159
159
  # Moonshot
160
160
  MOONSHOT_API_KEY="" MOONSHOT_PROXY_URL="" \
161
161
  # Novita
162
- NOVITA_API_KEY="" \
162
+ NOVITA_API_KEY="" NOVITA_MODEL_LIST="" \
163
163
  # Ollama
164
164
  OLLAMA_MODEL_LIST="" OLLAMA_PROXY_URL="" \
165
165
  # OpenAI
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.15.3",
3
+ "version": "1.15.4",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
package/src/config/llm.ts CHANGED
@@ -81,6 +81,7 @@ export const getLLMConfig = () => {
81
81
 
82
82
  ENABLED_NOVITA: z.boolean(),
83
83
  NOVITA_API_KEY: z.string().optional(),
84
+ NOVITA_MODEL_LIST: z.string().optional(),
84
85
 
85
86
  ENABLED_BAICHUAN: z.boolean(),
86
87
  BAICHUAN_API_KEY: z.string().optional(),
@@ -176,6 +177,7 @@ export const getLLMConfig = () => {
176
177
 
177
178
  ENABLED_NOVITA: !!process.env.NOVITA_API_KEY,
178
179
  NOVITA_API_KEY: process.env.NOVITA_API_KEY,
180
+ NOVITA_MODEL_LIST: process.env.NOVITA_MODEL_LIST,
179
181
 
180
182
  ENABLED_BAICHUAN: !!process.env.BAICHUAN_API_KEY,
181
183
  BAICHUAN_API_KEY: process.env.BAICHUAN_API_KEY,
@@ -1,30 +1,47 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref: https://novita.ai/model-api/product/llm-api
3
4
  const Novita: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
6
- displayName: 'Llama3 8B Instruct',
7
+ displayName: 'Llama3.1 8B Instruct',
8
+ enabled: true,
9
+ id: 'meta-llama/llama-3.1-8b-instruct',
10
+ tokens: 8192,
11
+ },
12
+ {
13
+ displayName: 'Llama3.1 70B Instruct',
7
14
  enabled: true,
15
+ id: 'meta-llama/llama-3.1-70b-instruct',
16
+ tokens: 131_072,
17
+ },
18
+ {
19
+ displayName: 'Llama3.1 405B Instruct',
20
+ enabled: true,
21
+ id: 'meta-llama/llama-3.1-405b-instruct',
22
+ tokens: 32_768,
23
+ },
24
+ {
25
+ displayName: 'Llama3 8B Instruct',
8
26
  id: 'meta-llama/llama-3-8b-instruct',
9
27
  tokens: 8192,
10
28
  },
11
29
  {
12
30
  displayName: 'Llama3 70B Instruct',
13
- enabled: true,
14
31
  id: 'meta-llama/llama-3-70b-instruct',
15
32
  tokens: 8192,
16
33
  },
17
34
  {
18
- displayName: 'Nous Hermes 2 Pro - Llama3 8B',
35
+ displayName: 'Gemma 2 9B',
19
36
  enabled: true,
20
- id: 'nousresearch/hermes-2-pro-llama-3-8b',
37
+ id: 'google/gemma-2-9b-it',
21
38
  tokens: 8192,
22
39
  },
23
40
  {
24
- displayName: 'Nous Hermes - Llama2 8B',
41
+ displayName: 'Mistral Nemo',
25
42
  enabled: true,
26
- id: 'nousresearch/nous-hermes-llama2-13b',
27
- tokens: 4096,
43
+ id: 'mistralai/mistral-nemo',
44
+ tokens: 32_768,
28
45
  },
29
46
  {
30
47
  displayName: 'Mistral 7B Instruct',
@@ -33,55 +50,44 @@ const Novita: ModelProviderCard = {
33
50
  tokens: 32_768,
34
51
  },
35
52
  {
36
- displayName: 'Dolphin Mixtral 8x22B',
53
+ displayName: 'WizardLM 2 7B',
37
54
  enabled: true,
38
- id: 'cognitivecomputations/dolphin-mixtral-8x22b',
39
- tokens: 16_000,
55
+ id: 'microsoft/wizardlm 2-7b',
56
+ tokens: 32_768,
40
57
  },
41
58
  {
42
- displayName: 'L3-70b-Euryale-v2.1',
59
+ displayName: 'WizardLM-2 8x22B',
43
60
  enabled: true,
44
- id: 'sao10k/l3-70b-euryale-v2.1',
45
- tokens: 16_000,
61
+ id: 'microsoft/wizardlm-2-8x22b',
62
+ tokens: 65_535,
46
63
  },
47
64
  {
48
- displayName: 'Midnight Rose 70B',
49
- enabled: true,
50
- id: 'sophosympatheia/midnight-rose-70b',
51
- tokens: 4096,
65
+ displayName: 'Dolphin Mixtral 8x22B',
66
+ id: 'cognitivecomputations/dolphin-mixtral-8x22b',
67
+ tokens: 16_000,
52
68
  },
53
69
  {
54
- displayName: 'Mythomax L2 13b',
55
- enabled: true,
56
- id: 'gryphe/mythomax-l2-13b',
57
- tokens: 4096,
70
+ displayName: 'Hermes 2 Pro Llama 3 8B',
71
+ id: 'nousresearch/hermes-2-pro-llama-3-8b',
72
+ tokens: 8192,
58
73
  },
59
74
  {
60
- displayName: 'Nous Hermes 2 - Mixtral 8x7B-DPO',
61
- enabled: true,
75
+ displayName: 'Hermes 2 Mixtral 8x7B DPO',
62
76
  id: 'Nous-Hermes-2-Mixtral-8x7B-DPO',
63
77
  tokens: 32_768,
64
78
  },
65
79
  {
66
- displayName: 'Lzlv 70b',
67
- enabled: true,
68
- id: 'lzlv_70b',
80
+ displayName: 'MythoMax l2 13B',
81
+ id: 'gryphe/mythomax-l2-13b',
69
82
  tokens: 4096,
70
83
  },
71
84
  {
72
- displayName: 'Open Hermes 2.5 Mistral 7B',
73
- enabled: true,
74
- id: 'teknium/openhermes-2.5-mistral-7b',
85
+ displayName: 'OpenChat 7B',
86
+ id: 'openchat/openchat-7b',
75
87
  tokens: 4096,
76
88
  },
77
- {
78
- displayName: 'Wizardlm2 8x22B',
79
- enabled: true,
80
- id: 'microsoft/wizardlm-2-8x22b',
81
- tokens: 65_535,
82
- },
83
89
  ],
84
- checkModel: 'meta-llama/llama-3-70b-instruct',
90
+ checkModel: 'meta-llama/llama-3.1-8b-instruct',
85
91
  disableBrowserRequest: true,
86
92
  id: 'novita',
87
93
  modelList: { showModelFetcher: true },
@@ -4,6 +4,7 @@ import { fileEnv } from '@/config/file';
4
4
  import { langfuseEnv } from '@/config/langfuse';
5
5
  import { getLLMConfig } from '@/config/llm';
6
6
  import {
7
+ NovitaProviderCard,
7
8
  OllamaProviderCard,
8
9
  OpenAIProviderCard,
9
10
  OpenRouterProviderCard,
@@ -39,7 +40,9 @@ export const getServerGlobalConfig = () => {
39
40
  ENABLED_ANTHROPIC,
40
41
  ENABLED_MINIMAX,
41
42
  ENABLED_MISTRAL,
43
+
42
44
  ENABLED_NOVITA,
45
+ NOVITA_MODEL_LIST,
43
46
 
44
47
  ENABLED_QWEN,
45
48
  QWEN_MODEL_LIST,
@@ -100,7 +103,14 @@ export const getServerGlobalConfig = () => {
100
103
  minimax: { enabled: ENABLED_MINIMAX },
101
104
  mistral: { enabled: ENABLED_MISTRAL },
102
105
  moonshot: { enabled: ENABLED_MOONSHOT },
103
- novita: { enabled: ENABLED_NOVITA },
106
+ novita: {
107
+ enabled: ENABLED_NOVITA,
108
+ enabledModels: extractEnabledModels(NOVITA_MODEL_LIST),
109
+ serverModelCards: transformToChatModelCards({
110
+ defaultChatModels: NovitaProviderCard.chatModels,
111
+ modelString: NOVITA_MODEL_LIST,
112
+ }),
113
+ },
104
114
  ollama: {
105
115
  enabled: ENABLED_OLLAMA,
106
116
  fetchOnClient: !OLLAMA_PROXY_URL,