@lobehub/chat 1.16.14 → 1.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @lobehub/chat might be problematic. Click here for more details.

package/CHANGELOG.md CHANGED
@@ -2,6 +2,39 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.17.0](https://github.com/lobehub/lobe-chat/compare/v1.16.14...v1.17.0)
6
+
7
+ <sup>Released on **2024-09-13**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Support openai new OpenAI o1-preview/o1-mini models.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Support Google Model List.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's improved
23
+
24
+ - **misc**: Support openai new OpenAI o1-preview/o1-mini models, closes [#3943](https://github.com/lobehub/lobe-chat/issues/3943) ([61bfeb2](https://github.com/lobehub/lobe-chat/commit/61bfeb2))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Support Google Model List, closes [#3938](https://github.com/lobehub/lobe-chat/issues/3938) ([be4efc7](https://github.com/lobehub/lobe-chat/commit/be4efc7))
29
+
30
+ </details>
31
+
32
+ <div align="right">
33
+
34
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
35
+
36
+ </div>
37
+
5
38
  ### [Version 1.16.14](https://github.com/lobehub/lobe-chat/compare/v1.16.13...v1.16.14)
6
39
 
7
40
  <sup>Released on **2024-09-13**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.16.14",
3
+ "version": "1.17.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
package/src/config/llm.ts CHANGED
@@ -28,6 +28,7 @@ export const getLLMConfig = () => {
28
28
  ENABLED_GOOGLE: z.boolean(),
29
29
  GOOGLE_API_KEY: z.string().optional(),
30
30
  GOOGLE_PROXY_URL: z.string().optional(),
31
+ GOOGLE_MODEL_LIST: z.string().optional(),
31
32
 
32
33
  ENABLED_MOONSHOT: z.boolean(),
33
34
  MOONSHOT_API_KEY: z.string().optional(),
@@ -134,6 +135,7 @@ export const getLLMConfig = () => {
134
135
  ENABLED_GOOGLE: !!process.env.GOOGLE_API_KEY,
135
136
  GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
136
137
  GOOGLE_PROXY_URL: process.env.GOOGLE_PROXY_URL,
138
+ GOOGLE_MODEL_LIST: process.env.GOOGLE_MODEL_LIST,
137
139
 
138
140
  ENABLED_PERPLEXITY: !!process.env.PERPLEXITY_API_KEY,
139
141
  PERPLEXITY_API_KEY: process.env.PERPLEXITY_API_KEY,
@@ -3,6 +3,34 @@ import { ModelProviderCard } from '@/types/llm';
3
3
  // ref: https://platform.openai.com/docs/deprecations
4
4
  const OpenAI: ModelProviderCard = {
5
5
  chatModels: [
6
+ {
7
+ description:
8
+ 'o1-mini是一款针对编程、数学和科学应用场景而设计的快速、经济高效的推理模型。该模型具有128K上下文和2023年10月的知识截止日期。',
9
+ displayName: 'OpenAI o1-mini',
10
+ enabled: true,
11
+ id: 'o1-mini',
12
+ maxOutput: 65_536,
13
+ pricing: {
14
+ input: 3,
15
+ output: 12,
16
+ },
17
+ releasedAt: '2024-09-12',
18
+ tokens: 128_000,
19
+ },
20
+ {
21
+ description:
22
+ 'o1是OpenAI新的推理模型,适用于需要广泛通用知识的复杂任务。该模型具有128K上下文和2023年10月的知识截止日期。',
23
+ displayName: 'OpenAI o1-preview',
24
+ enabled: true,
25
+ id: 'o1-preview',
26
+ maxOutput: 32_768,
27
+ pricing: {
28
+ input: 15,
29
+ output: 60,
30
+ },
31
+ releasedAt: '2024-09-12',
32
+ tokens: 128_000,
33
+ },
6
34
  {
7
35
  description:
8
36
  'GPT-4o mini是OpenAI在GPT-4 Omni之后推出的最新模型,支持图文输入并输出文本。作为他们最先进的小型模型,它比其他近期的前沿模型便宜很多,并且比GPT-3.5 Turbo便宜超过60%。它保持了最先进的智能,同时具有显著的性价比。GPT-4o mini在MMLU测试中获得了 82% 的得分,目前在聊天偏好上排名高于 GPT-4。',
@@ -1,8 +1,34 @@
1
- import { ModelProvider } from '../types';
1
+ import { ChatStreamPayload, ModelProvider, OpenAIChatMessage } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ // TODO: 临时写法,后续要重构成 model card 展示配置
5
+ const o1Models = new Set(['o1-preview', 'o1-mini']);
6
+ const truneO1Payload = (payload: ChatStreamPayload) => ({
7
+ ...payload,
8
+ frequency_penalty: 0,
9
+ messages: payload.messages.map((message: OpenAIChatMessage) => ({
10
+ ...message,
11
+ role: message.role === 'system' ? 'user' : message.role,
12
+ })),
13
+ presence_penalty: 0,
14
+ stream: false,
15
+ temperature: 1,
16
+ top_p: 1,
17
+ });
18
+
4
19
  export const LobeOpenAI = LobeOpenAICompatibleFactory({
5
20
  baseURL: 'https://api.openai.com/v1',
21
+ chatCompletion: {
22
+ handlePayload: (payload) => {
23
+ const { model } = payload;
24
+
25
+ if (o1Models.has(model)) {
26
+ return truneO1Payload(payload) as any;
27
+ }
28
+
29
+ return { ...payload, stream: payload.stream ?? true };
30
+ },
31
+ },
6
32
  debug: {
7
33
  chatCompletion: () => process.env.DEBUG_OPENAI_CHAT_COMPLETION === '1',
8
34
  },
@@ -6,6 +6,7 @@ import { getLLMConfig } from '@/config/llm';
6
6
  import {
7
7
  BedrockProviderCard,
8
8
  FireworksAIProviderCard,
9
+ GoogleProviderCard,
9
10
  GroqProviderCard,
10
11
  NovitaProviderCard,
11
12
  OllamaProviderCard,
@@ -39,6 +40,7 @@ export const getServerGlobalConfig = () => {
39
40
  AWS_BEDROCK_MODEL_LIST,
40
41
 
41
42
  ENABLED_GOOGLE,
43
+ GOOGLE_MODEL_LIST,
42
44
 
43
45
  ENABLED_GROQ,
44
46
  GROQ_MODEL_LIST,
@@ -128,7 +130,14 @@ export const getServerGlobalConfig = () => {
128
130
  }),
129
131
  },
130
132
 
131
- google: { enabled: ENABLED_GOOGLE },
133
+ google: {
134
+ enabled: ENABLED_GOOGLE,
135
+ enabledModels: extractEnabledModels(GOOGLE_MODEL_LIST),
136
+ serverModelCards: transformToChatModelCards({
137
+ defaultChatModels: GoogleProviderCard.chatModels,
138
+ modelString: GOOGLE_MODEL_LIST,
139
+ }),
140
+ },
132
141
  groq: {
133
142
  enabled: ENABLED_GROQ,
134
143
  enabledModels: extractEnabledModels(GROQ_MODEL_LIST),
@@ -49,7 +49,14 @@ describe('modelProviderSelectors', () => {
49
49
  const s = merge(initialState, {}) as unknown as UserStore;
50
50
 
51
51
  const result = modelProviderSelectors.getDefaultEnabledModelsById('openai')(s);
52
- expect(result).toEqual(['gpt-4o-mini', 'gpt-4o', 'gpt-4o-2024-08-06', 'chatgpt-4o-latest']);
52
+ expect(result).toEqual([
53
+ 'o1-mini',
54
+ 'o1-preview',
55
+ 'gpt-4o-mini',
56
+ 'gpt-4o',
57
+ 'gpt-4o-2024-08-06',
58
+ 'chatgpt-4o-latest',
59
+ ]);
53
60
  });
54
61
 
55
62
  it('should return undefined for a non-existing provider', () => {