@lobehub/chat 1.16.7 → 1.16.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @lobehub/chat might be problematic. Click here for more details.

Files changed (40) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/README.md +8 -8
  3. package/README.zh-CN.md +8 -8
  4. package/package.json +1 -1
  5. package/src/config/modelProviders/ai360.ts +34 -68
  6. package/src/config/modelProviders/anthropic.ts +57 -11
  7. package/src/config/modelProviders/azure.ts +12 -3
  8. package/src/config/modelProviders/baichuan.ts +33 -12
  9. package/src/config/modelProviders/bedrock.ts +88 -25
  10. package/src/config/modelProviders/deepseek.ts +14 -3
  11. package/src/config/modelProviders/fireworksai.ts +37 -5
  12. package/src/config/modelProviders/google.ts +69 -15
  13. package/src/config/modelProviders/groq.ts +55 -5
  14. package/src/config/modelProviders/minimax.ts +10 -6
  15. package/src/config/modelProviders/mistral.ts +19 -3
  16. package/src/config/modelProviders/moonshot.ts +11 -1
  17. package/src/config/modelProviders/novita.ts +24 -0
  18. package/src/config/modelProviders/ollama.ts +58 -1
  19. package/src/config/modelProviders/openai.ts +153 -18
  20. package/src/config/modelProviders/openrouter.ts +21 -1
  21. package/src/config/modelProviders/perplexity.ts +19 -3
  22. package/src/config/modelProviders/qwen.ts +11 -8
  23. package/src/config/modelProviders/siliconcloud.ts +34 -1
  24. package/src/config/modelProviders/spark.ts +16 -7
  25. package/src/config/modelProviders/stepfun.ts +13 -1
  26. package/src/config/modelProviders/taichu.ts +7 -2
  27. package/src/config/modelProviders/togetherai.ts +38 -2
  28. package/src/config/modelProviders/upstage.ts +11 -4
  29. package/src/config/modelProviders/zeroone.ts +5 -1
  30. package/src/config/modelProviders/zhipu.ts +20 -18
  31. package/src/const/discover.ts +1 -0
  32. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +69 -6
  33. package/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json +1 -0
  34. package/src/server/routers/edge/config/__snapshots__/index.test.ts.snap +56 -4
  35. package/src/server/routers/edge/config/index.test.ts +3 -7
  36. package/src/store/user/slices/modelList/__snapshots__/action.test.ts.snap +12 -0
  37. package/src/store/user/slices/modelList/action.test.ts +3 -7
  38. package/src/types/llm.ts +30 -1
  39. package/src/utils/__snapshots__/parseModels.test.ts.snap +32 -0
  40. package/src/utils/parseModels.test.ts +1 -20
@@ -20,10 +20,15 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
20
20
  "id": "claude-2",
21
21
  },
22
22
  {
23
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
23
24
  "displayName": "gpt-4-32k",
24
25
  "enabled": true,
25
26
  "functionCall": true,
26
27
  "id": "gpt-4-0125-preview",
28
+ "pricing": {
29
+ "input": 10,
30
+ "output": 30,
31
+ },
27
32
  "tokens": 128000,
28
33
  },
29
34
  ],
@@ -33,10 +38,15 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
33
38
  exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST > should work correct with gpt-4 1`] = `
34
39
  [
35
40
  {
41
+ "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
36
42
  "displayName": "GPT-3.5 Turbo (1106)",
37
43
  "enabled": true,
38
44
  "functionCall": true,
39
45
  "id": "gpt-3.5-turbo-1106",
46
+ "pricing": {
47
+ "input": 1,
48
+ "output": 2,
49
+ },
40
50
  "tokens": 16385,
41
51
  },
42
52
  {
@@ -45,50 +55,90 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
45
55
  "enabled": true,
46
56
  "functionCall": true,
47
57
  "id": "gpt-3.5-turbo",
58
+ "pricing": {
59
+ "input": 0.5,
60
+ "output": 1.5,
61
+ },
48
62
  "tokens": 16385,
49
63
  },
50
64
  {
51
- "description": "Currently points to gpt-3.5-turbo-16k-0613",
65
+ "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
52
66
  "displayName": "GPT-3.5 Turbo 16K",
53
67
  "enabled": true,
54
68
  "id": "gpt-3.5-turbo-16k",
55
69
  "legacy": true,
70
+ "pricing": {
71
+ "input": 3,
72
+ "output": 4,
73
+ },
56
74
  "tokens": 16385,
57
75
  },
58
76
  {
59
- "description": "Currently points to gpt-4-0613",
77
+ "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。",
60
78
  "displayName": "GPT-4",
61
79
  "enabled": true,
62
80
  "functionCall": true,
63
81
  "id": "gpt-4",
82
+ "pricing": {
83
+ "input": 30,
84
+ "output": 60,
85
+ },
64
86
  "tokens": 8192,
65
87
  },
66
88
  {
67
- "description": "Currently points to gpt-4-32k-0613",
89
+ "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。",
68
90
  "displayName": "GPT-4 32K",
69
91
  "enabled": true,
70
92
  "functionCall": true,
71
93
  "id": "gpt-4-32k",
94
+ "pricing": {
95
+ "input": 60,
96
+ "output": 120,
97
+ },
72
98
  "tokens": 32768,
73
99
  },
74
100
  {
101
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
75
102
  "displayName": "GPT-4 Turbo Preview (1106)",
76
103
  "enabled": true,
77
104
  "functionCall": true,
78
105
  "id": "gpt-4-1106-preview",
106
+ "pricing": {
107
+ "input": 10,
108
+ "output": 30,
109
+ },
79
110
  "tokens": 128000,
80
111
  },
81
112
  {
82
- "description": "Currently points to gpt-4-1106-vision-preview",
113
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
83
114
  "displayName": "GPT-4 Turbo Vision Preview",
84
115
  "enabled": true,
85
116
  "id": "gpt-4-vision-preview",
117
+ "pricing": {
118
+ "input": 10,
119
+ "output": 30,
120
+ },
86
121
  "tokens": 128000,
87
122
  "vision": true,
88
123
  },
89
124
  ]
90
125
  `;
91
126
 
127
+ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST > show the hidden model 1`] = `
128
+ {
129
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
130
+ "displayName": "GPT-4 Turbo Preview (1106)",
131
+ "enabled": true,
132
+ "functionCall": true,
133
+ "id": "gpt-4-1106-preview",
134
+ "pricing": {
135
+ "input": 10,
136
+ "output": 30,
137
+ },
138
+ "tokens": 128000,
139
+ }
140
+ `;
141
+
92
142
  exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_LIST > custom deletion, addition, and renaming of models 1`] = `
93
143
  {
94
144
  "enabled": false,
@@ -98,6 +148,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_
98
148
  ],
99
149
  "serverModelCards": [
100
150
  {
151
+ "description": "Google 的 Gemma 7B 具有出色的计算效率,适适用于多种硬件架构,如GPU和TPU。",
101
152
  "displayName": "Google: Gemma 7B (free)",
102
153
  "enabled": true,
103
154
  "functionCall": false,
@@ -106,6 +157,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_
106
157
  "vision": false,
107
158
  },
108
159
  {
160
+ "description": "Mistral 7B Instruct 是一款高效的多语言模型,优化用于对话和问答,能在资源受限的环境中表现出色。",
109
161
  "displayName": "Mistral 7B Instruct (free)",
110
162
  "enabled": true,
111
163
  "functionCall": false,
@@ -90,13 +90,9 @@ describe('configRouter', () => {
90
90
 
91
91
  const result = response.languageModel?.openai?.serverModelCards;
92
92
 
93
- expect(result?.find((o) => o.id === 'gpt-4-1106-preview')).toEqual({
94
- displayName: 'GPT-4 Turbo Preview (1106)',
95
- functionCall: true,
96
- enabled: true,
97
- id: 'gpt-4-1106-preview',
98
- tokens: 128000,
99
- });
93
+ const model = result?.find((o) => o.id === 'gpt-4-1106-preview');
94
+
95
+ expect(model).toMatchSnapshot();
100
96
 
101
97
  process.env.OPENAI_MODEL_LIST = '';
102
98
  });
@@ -0,0 +1,12 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`LLMSettingsSliceAction > refreshModelProviderList > visible 1`] = `
4
+ {
5
+ "description": "LLaVA 是结合视觉编码器和 Vicuna 的多模态模型,用于强大的视觉和语言理解。",
6
+ "displayName": "LLaVA 7B",
7
+ "enabled": true,
8
+ "id": "llava",
9
+ "tokens": 4096,
10
+ "vision": true,
11
+ }
12
+ `;
@@ -154,13 +154,9 @@ describe('LLMSettingsSliceAction', () => {
154
154
 
155
155
  const ollamaList = result.current.modelProviderList.find((r) => r.id === 'ollama');
156
156
  // Assert that setModelProviderConfig was not called
157
- expect(ollamaList?.chatModels.find((c) => c.id === 'llava')).toEqual({
158
- displayName: 'LLaVA 7B',
159
- enabled: true,
160
- id: 'llava',
161
- tokens: 4096,
162
- vision: true,
163
- });
157
+ const model = ollamaList?.chatModels.find((c) => c.id === 'llava');
158
+
159
+ expect(model).toMatchSnapshot();
164
160
  });
165
161
 
166
162
  it('modelProviderListForModelSelect should return only enabled providers', () => {
package/src/types/llm.ts CHANGED
@@ -33,10 +33,29 @@ export interface ChatModelCard {
33
33
  */
34
34
  legacy?: boolean;
35
35
  maxOutput?: number;
36
+ pricing?: {
37
+ cachedInput?: number;
38
+ /**
39
+ * the currency of the pricing
40
+ * @default USD
41
+ */
42
+ currency?: 'CNY' | 'USD';
43
+ /**
44
+ * the input pricing, e.g. $1 / 1M tokens
45
+ */
46
+ input?: number;
47
+ /**
48
+ * the output pricing, e.g. $2 / 1M tokens
49
+ */
50
+ output?: number;
51
+ writeCacheInput?: number;
52
+ };
53
+ releasedAt?: string;
36
54
  /**
37
55
  * the context window (or input + output tokens limit)
38
56
  */
39
57
  tokens?: number;
58
+
40
59
  /**
41
60
  * whether model supports vision
42
61
  */
@@ -61,6 +80,7 @@ export interface ModelProviderCard {
61
80
  * @default false
62
81
  */
63
82
  defaultShowBrowserRequest?: boolean;
83
+ description?: string;
64
84
  /**
65
85
  * some provider server like stepfun and aliyun don't support browser request,
66
86
  * So we should disable it
@@ -79,6 +99,10 @@ export interface ModelProviderCard {
79
99
  placeholder?: string;
80
100
  showModelFetcher?: boolean;
81
101
  };
102
+ /**
103
+ * the url show the all models in the provider
104
+ */
105
+ modelsUrl?: string;
82
106
  /**
83
107
  * the name show for end user
84
108
  */
@@ -90,16 +114,21 @@ export interface ModelProviderCard {
90
114
  title?: string;
91
115
  }
92
116
  | false;
117
+
93
118
  /**
94
119
  * whether show api key in the provider config
95
120
  * so provider like ollama don't need api key field
96
121
  */
97
122
  showApiKey?: boolean;
98
-
99
123
  /**
100
124
  * whether to smoothing the output
101
125
  */
102
126
  smoothing?: SmoothingParams;
127
+
128
+ /**
129
+ * provider's website url
130
+ */
131
+ url?: string;
103
132
  }
104
133
 
105
134
  // 语言模型的设置参数
@@ -61,3 +61,35 @@ exports[`parseModelString > only add the model 1`] = `
61
61
  "removed": [],
62
62
  }
63
63
  `;
64
+
65
+ exports[`transformToChatModelCards > should have file with builtin models like gpt-4-0125-preview 1`] = `
66
+ [
67
+ {
68
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
69
+ "displayName": "ChatGPT-4",
70
+ "enabled": true,
71
+ "files": true,
72
+ "functionCall": true,
73
+ "id": "gpt-4-0125-preview",
74
+ "pricing": {
75
+ "input": 10,
76
+ "output": 30,
77
+ },
78
+ "tokens": 128000,
79
+ },
80
+ {
81
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
82
+ "displayName": "ChatGPT-4 Vision",
83
+ "enabled": true,
84
+ "files": true,
85
+ "functionCall": true,
86
+ "id": "gpt-4-turbo-2024-04-09",
87
+ "pricing": {
88
+ "input": 10,
89
+ "output": 30,
90
+ },
91
+ "tokens": 128000,
92
+ "vision": true,
93
+ },
94
+ ]
95
+ `;
@@ -266,25 +266,6 @@ describe('transformToChatModelCards', () => {
266
266
  defaultChatModels: OpenAIProviderCard.chatModels,
267
267
  });
268
268
 
269
- expect(result).toEqual([
270
- {
271
- displayName: 'ChatGPT-4',
272
- files: true,
273
- functionCall: true,
274
- enabled: true,
275
- id: 'gpt-4-0125-preview',
276
- tokens: 128000,
277
- },
278
- {
279
- description: 'GPT-4 Turbo 视觉版 (240409)',
280
- displayName: 'ChatGPT-4 Vision',
281
- files: true,
282
- functionCall: true,
283
- enabled: true,
284
- id: 'gpt-4-turbo-2024-04-09',
285
- tokens: 128000,
286
- vision: true,
287
- },
288
- ]);
269
+ expect(result).toMatchSnapshot();
289
270
  });
290
271
  });