@lobehub/chat 1.42.0 → 1.42.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/changelog/v1.json +18 -0
  3. package/next.config.ts +0 -1
  4. package/package.json +2 -2
  5. package/src/app/(main)/settings/llm/components/ProviderModelList/ModelConfigModal/Form.tsx +5 -3
  6. package/src/{app/(main)/settings/llm/components/ProviderModelList/ModelConfigModal → components}/MaxTokenSlider.tsx +4 -5
  7. package/src/components/ModelSelect/index.tsx +6 -3
  8. package/src/components/NProgress/index.tsx +9 -1
  9. package/src/config/modelProviders/openai.ts +15 -0
  10. package/src/config/modelProviders/openrouter.ts +15 -0
  11. package/src/const/auth.ts +1 -1
  12. package/src/database/server/models/__tests__/user.test.ts +11 -0
  13. package/src/database/server/models/user.ts +4 -0
  14. package/src/libs/agent-runtime/AgentRuntime.test.ts +10 -10
  15. package/src/libs/agent-runtime/AgentRuntime.ts +3 -3
  16. package/src/libs/agent-runtime/ollama/index.test.ts +4 -1
  17. package/src/libs/agent-runtime/ollama/index.ts +2 -2
  18. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +10 -0
  19. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +14 -3
  20. package/src/locales/default/components.ts +3 -0
  21. package/src/locales/default/setting.ts +0 -1
  22. package/src/server/modules/AgentRuntime/index.test.ts +8 -8
  23. package/src/server/modules/AgentRuntime/index.ts +5 -5
  24. package/src/services/__tests__/_auth.test.ts +5 -6
  25. package/src/services/__tests__/chat.test.ts +1 -0
  26. package/src/services/_auth.ts +3 -3
  27. package/src/services/chat.ts +7 -8
  28. package/src/store/user/slices/modelList/selectors/modelProvider.test.ts +1 -0
  29. package/src/types/aiModel.ts +275 -0
  30. package/src/types/aiProvider.ts +148 -0
  31. package/src/types/llm.ts +3 -17
  32. package/src/utils/merge.test.ts +48 -0
  33. package/src/utils/merge.ts +39 -0
package/CHANGELOG.md CHANGED
@@ -2,6 +2,64 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.42.2](https://github.com/lobehub/lobe-chat/compare/v1.42.1...v1.42.2)
6
+
7
+ <sup>Released on **2024-12-31**</sup>
8
+
9
+ #### ♻ Code Refactoring
10
+
11
+ - **misc**: Refactor the agent runtime payload.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Add o1 model in openai and openrouter models.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### Code refactoring
23
+
24
+ - **misc**: Refactor the agent runtime payload, closes [#5250](https://github.com/lobehub/lobe-chat/issues/5250) ([e420ab3](https://github.com/lobehub/lobe-chat/commit/e420ab3))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Add o1 model in openai and openrouter models, closes [#5236](https://github.com/lobehub/lobe-chat/issues/5236) ([f733852](https://github.com/lobehub/lobe-chat/commit/f733852))
29
+
30
+ </details>
31
+
32
+ <div align="right">
33
+
34
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
35
+
36
+ </div>
37
+
38
+ ### [Version 1.42.1](https://github.com/lobehub/lobe-chat/compare/v1.42.0...v1.42.1)
39
+
40
+ <sup>Released on **2024-12-29**</sup>
41
+
42
+ #### 🐛 Bug Fixes
43
+
44
+ - **misc**: Fix custom max_token not saved from customModelCards.
45
+
46
+ <br/>
47
+
48
+ <details>
49
+ <summary><kbd>Improvements and Fixes</kbd></summary>
50
+
51
+ #### What's fixed
52
+
53
+ - **misc**: Fix custom max_token not saved from customModelCards, closes [#5226](https://github.com/lobehub/lobe-chat/issues/5226) ([ab6d17c](https://github.com/lobehub/lobe-chat/commit/ab6d17c))
54
+
55
+ </details>
56
+
57
+ <div align="right">
58
+
59
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
60
+
61
+ </div>
62
+
5
63
  ## [Version 1.42.0](https://github.com/lobehub/lobe-chat/compare/v1.41.0...v1.42.0)
6
64
 
7
65
  <sup>Released on **2024-12-29**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Add o1 model in openai and openrouter models."
6
+ ]
7
+ },
8
+ "date": "2024-12-31",
9
+ "version": "1.42.2"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Fix custom max_token not saved from customModelCards."
15
+ ]
16
+ },
17
+ "date": "2024-12-29",
18
+ "version": "1.42.1"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "features": [
package/next.config.ts CHANGED
@@ -25,7 +25,6 @@ const nextConfig: NextConfig = {
25
25
  '@icons-pack/react-simple-icons',
26
26
  '@lobehub/ui',
27
27
  'gpt-tokenizer',
28
- 'chroma-js',
29
28
  ],
30
29
  webVitalsAttribution: ['CLS', 'LCP'],
31
30
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.42.0",
3
+ "version": "1.42.2",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -265,7 +265,7 @@
265
265
  "@types/numeral": "^2.0.5",
266
266
  "@types/pg": "^8.11.10",
267
267
  "@types/react": "18.3.13",
268
- "@types/react-dom": "^18.3.1",
268
+ "@types/react-dom": "^19.0.0",
269
269
  "@types/rtl-detect": "^1.0.3",
270
270
  "@types/semver": "^7.5.8",
271
271
  "@types/systemjs": "^6.15.1",
@@ -2,11 +2,10 @@ import { Checkbox, Form, FormInstance, Input } from 'antd';
2
2
  import { memo, useEffect } from 'react';
3
3
  import { useTranslation } from 'react-i18next';
4
4
 
5
+ import MaxTokenSlider from '@/components/MaxTokenSlider';
5
6
  import { useIsMobile } from '@/hooks/useIsMobile';
6
7
  import { ChatModelCard } from '@/types/llm';
7
8
 
8
- import MaxTokenSlider from './MaxTokenSlider';
9
-
10
9
  interface ModelConfigFormProps {
11
10
  initialValues?: ChatModelCard;
12
11
  onFormInstanceReady: (instance: FormInstance) => void;
@@ -66,7 +65,10 @@ const ModelConfigForm = memo<ModelConfigFormProps>(
66
65
  >
67
66
  <Input placeholder={t('llm.customModelCards.modelConfig.displayName.placeholder')} />
68
67
  </Form.Item>
69
- <Form.Item label={t('llm.customModelCards.modelConfig.tokens.title')} name={'tokens'}>
68
+ <Form.Item
69
+ label={t('llm.customModelCards.modelConfig.tokens.title')}
70
+ name={'contextWindowTokens'}
71
+ >
70
72
  <MaxTokenSlider />
71
73
  </Form.Item>
72
74
  <Form.Item
@@ -4,8 +4,7 @@ import { useTranslation } from 'react-i18next';
4
4
  import { Flexbox } from 'react-layout-kit';
5
5
  import useMergeState from 'use-merge-value';
6
6
 
7
- import { useServerConfigStore } from '@/store/serverConfig';
8
- import { serverConfigSelectors } from '@/store/serverConfig/selectors';
7
+ import { useIsMobile } from '@/hooks/useIsMobile';
9
8
 
10
9
  const Kibi = 1024;
11
10
 
@@ -20,7 +19,7 @@ interface MaxTokenSliderProps {
20
19
  }
21
20
 
22
21
  const MaxTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValue }) => {
23
- const { t } = useTranslation('setting');
22
+ const { t } = useTranslation('components');
24
23
 
25
24
  const [token, setTokens] = useMergeState(0, {
26
25
  defaultValue,
@@ -45,7 +44,7 @@ const MaxTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValu
45
44
  setPowValue(exponent(value / Kibi));
46
45
  };
47
46
 
48
- const isMobile = useServerConfigStore(serverConfigSelectors.isMobile);
47
+ const isMobile = useIsMobile();
49
48
 
50
49
  const marks = useMemo(() => {
51
50
  return {
@@ -74,7 +73,7 @@ const MaxTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValu
74
73
  tooltip={{
75
74
  formatter: (x) => {
76
75
  if (typeof x === 'undefined') return;
77
- if (x === 0) return t('llm.customModelCards.modelConfig.tokens.unlimited');
76
+ if (x === 0) return t('MaxTokenSlider.unlimited');
78
77
 
79
78
  let value = getRealValue(x);
80
79
  if (value < 125) return value.toFixed(0) + 'K';
@@ -9,6 +9,7 @@ import { FC, memo } from 'react';
9
9
  import { useTranslation } from 'react-i18next';
10
10
  import { Center, Flexbox } from 'react-layout-kit';
11
11
 
12
+ import { ModelAbilities } from '@/types/aiModel';
12
13
  import { ChatModelCard } from '@/types/llm';
13
14
  import { formatTokenNumber } from '@/utils/format';
14
15
 
@@ -57,8 +58,10 @@ const useStyles = createStyles(({ css, token }) => ({
57
58
  `,
58
59
  }));
59
60
 
60
- interface ModelInfoTagsProps extends ChatModelCard {
61
+ interface ModelInfoTagsProps extends ModelAbilities {
62
+ contextWindowTokens?: number | null;
61
63
  directionReverse?: boolean;
64
+ isCustom?: boolean;
62
65
  placement?: 'top' | 'right';
63
66
  }
64
67
 
@@ -102,7 +105,7 @@ export const ModelInfoTags = memo<ModelInfoTagsProps>(
102
105
  </div>
103
106
  </Tooltip>
104
107
  )}
105
- {model.contextWindowTokens !== undefined && (
108
+ {typeof model.contextWindowTokens === 'number' && (
106
109
  <Tooltip
107
110
  overlayStyle={{ maxWidth: 'unset', pointerEvents: 'none' }}
108
111
  placement={placement}
@@ -117,7 +120,7 @@ export const ModelInfoTags = memo<ModelInfoTagsProps>(
117
120
  {model.contextWindowTokens === 0 ? (
118
121
  <Infinity size={17} strokeWidth={1.6} />
119
122
  ) : (
120
- formatTokenNumber(model.contextWindowTokens)
123
+ formatTokenNumber(model.contextWindowTokens as number)
121
124
  )}
122
125
  </Center>
123
126
  </Tooltip>
@@ -6,7 +6,15 @@ import { memo } from 'react';
6
6
 
7
7
  const NProgress = memo(() => {
8
8
  const theme = useTheme();
9
- return <NextTopLoader color={theme.colorText} height={2} shadow={false} showSpinner={false} />;
9
+ return (
10
+ <NextTopLoader
11
+ color={theme.colorText}
12
+ height={2}
13
+ shadow={false}
14
+ showSpinner={false}
15
+ zIndex={1000}
16
+ />
17
+ );
10
18
  });
11
19
 
12
20
  export default NProgress;
@@ -17,6 +17,21 @@ const OpenAI: ModelProviderCard = {
17
17
  },
18
18
  releasedAt: '2024-09-12',
19
19
  },
20
+ {
21
+ contextWindowTokens: 200_000,
22
+ description:
23
+ 'o1是OpenAI新的推理模型,支持图文输入并输出文本,适用于需要广泛通用知识的复杂任务。该模型具有200K上下文和2023年10月的知识截止日期。',
24
+ displayName: 'OpenAI o1',
25
+ enabled: true,
26
+ id: 'o1-2024-12-17',
27
+ maxOutput: 100_000,
28
+ pricing: {
29
+ input: 15,
30
+ output: 60,
31
+ },
32
+ releasedAt: '2024-12-17',
33
+ vision: true,
34
+ },
20
35
  {
21
36
  contextWindowTokens: 128_000,
22
37
  description:
@@ -27,6 +27,21 @@ const OpenRouter: ModelProviderCard = {
27
27
  },
28
28
  releasedAt: '2024-09-12',
29
29
  },
30
+ {
31
+ contextWindowTokens: 200_000,
32
+ description:
33
+ 'o1是OpenAI新的推理模型,支持图文输入并输出文本,适用于需要广泛通用知识的复杂任务。该模型具有200K上下文和2023年10月的知识截止日期。',
34
+ displayName: 'OpenAI o1',
35
+ enabled: true,
36
+ id: 'openai/o1',
37
+ maxOutput: 100_000,
38
+ pricing: {
39
+ input: 15,
40
+ output: 60,
41
+ },
42
+ releasedAt: '2024-12-17',
43
+ vision: true,
44
+ },
30
45
  {
31
46
  contextWindowTokens: 128_000,
32
47
  description:
package/src/const/auth.ts CHANGED
@@ -28,7 +28,7 @@ export interface JWTPayload {
28
28
  /**
29
29
  * Represents the endpoint of provider
30
30
  */
31
- endpoint?: string;
31
+ baseURL?: string;
32
32
 
33
33
  azureApiVersion?: string;
34
34
 
@@ -130,6 +130,17 @@ describe('UserModel', () => {
130
130
  });
131
131
  });
132
132
 
133
+ describe('getUserSettings', () => {
134
+ it('should get user settings', async () => {
135
+ await serverDB.insert(users).values({ id: userId });
136
+ await serverDB.insert(userSettings).values({ id: userId, general: { language: 'en-US' } });
137
+
138
+ const data = await userModel.getUserSettings();
139
+
140
+ expect(data).toMatchObject({ id: userId, general: { language: 'en-US' } });
141
+ });
142
+ });
143
+
133
144
  describe('deleteSetting', () => {
134
145
  it('should delete user settings', async () => {
135
146
  await serverDB.insert(users).values({ id: userId });
@@ -75,6 +75,10 @@ export class UserModel {
75
75
  };
76
76
  };
77
77
 
78
+ getUserSettings = async () => {
79
+ return this.db.query.userSettings.findFirst({ where: eq(userSettings.id, this.userId) });
80
+ };
81
+
78
82
  updateUser = async (value: Partial<UserItem>) => {
79
83
  return this.db
80
84
  .update(users)
@@ -75,8 +75,8 @@ describe('AgentRuntime', () => {
75
75
  describe('Azure OpenAI provider', () => {
76
76
  it('should initialize correctly', async () => {
77
77
  const jwtPayload = {
78
- apikey: 'user-azure-key',
79
- endpoint: 'user-azure-endpoint',
78
+ apiKey: 'user-azure-key',
79
+ baseURL: 'user-azure-endpoint',
80
80
  apiVersion: '2024-06-01',
81
81
  };
82
82
 
@@ -90,8 +90,8 @@ describe('AgentRuntime', () => {
90
90
  });
91
91
  it('should initialize with azureOpenAIParams correctly', async () => {
92
92
  const jwtPayload = {
93
- apikey: 'user-openai-key',
94
- endpoint: 'user-endpoint',
93
+ apiKey: 'user-openai-key',
94
+ baseURL: 'user-endpoint',
95
95
  apiVersion: 'custom-version',
96
96
  };
97
97
 
@@ -106,8 +106,8 @@ describe('AgentRuntime', () => {
106
106
 
107
107
  it('should initialize with AzureAI correctly', async () => {
108
108
  const jwtPayload = {
109
- apikey: 'user-azure-key',
110
- endpoint: 'user-azure-endpoint',
109
+ apiKey: 'user-azure-key',
110
+ baseURL: 'user-azure-endpoint',
111
111
  };
112
112
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Azure, {
113
113
  azure: jwtPayload,
@@ -171,7 +171,7 @@ describe('AgentRuntime', () => {
171
171
 
172
172
  describe('Ollama provider', () => {
173
173
  it('should initialize correctly', async () => {
174
- const jwtPayload: JWTPayload = { endpoint: 'user-ollama-url' };
174
+ const jwtPayload: JWTPayload = { baseURL: 'https://user-ollama-url' };
175
175
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.Ollama, {
176
176
  ollama: jwtPayload,
177
177
  });
@@ -255,7 +255,7 @@ describe('AgentRuntime', () => {
255
255
 
256
256
  describe('AgentRuntime chat method', () => {
257
257
  it('should run correctly', async () => {
258
- const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', endpoint: 'user-endpoint' };
258
+ const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
259
259
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenAI, {
260
260
  openai: jwtPayload,
261
261
  });
@@ -271,7 +271,7 @@ describe('AgentRuntime', () => {
271
271
  await runtime.chat(payload);
272
272
  });
273
273
  it('should handle options correctly', async () => {
274
- const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', endpoint: 'user-endpoint' };
274
+ const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
275
275
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenAI, {
276
276
  openai: jwtPayload,
277
277
  });
@@ -300,7 +300,7 @@ describe('AgentRuntime', () => {
300
300
  });
301
301
 
302
302
  describe('callback', async () => {
303
- const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', endpoint: 'user-endpoint' };
303
+ const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
304
304
  const runtime = await AgentRuntime.initializeWithProviderOptions(ModelProvider.OpenAI, {
305
305
  openai: jwtPayload,
306
306
  });
@@ -133,7 +133,7 @@ class AgentRuntime {
133
133
  ai21: Partial<ClientOptions>;
134
134
  ai360: Partial<ClientOptions>;
135
135
  anthropic: Partial<ClientOptions>;
136
- azure: { apiVersion?: string; apikey?: string; endpoint?: string };
136
+ azure: { apiKey?: string; apiVersion?: string; baseURL?: string };
137
137
  baichuan: Partial<ClientOptions>;
138
138
  bedrock: Partial<LobeBedrockAIParams>;
139
139
  cloudflare: Partial<LobeCloudflareParams>;
@@ -180,8 +180,8 @@ class AgentRuntime {
180
180
 
181
181
  case ModelProvider.Azure: {
182
182
  runtimeModel = new LobeAzureOpenAI(
183
- params.azure?.endpoint,
184
- params.azure?.apikey,
183
+ params.azure?.baseURL,
184
+ params.azure?.apiKey,
185
185
  params.azure?.apiVersion,
186
186
  );
187
187
  break;
@@ -29,7 +29,10 @@ describe('LobeOllamaAI', () => {
29
29
  try {
30
30
  new LobeOllamaAI({ baseURL: 'invalid-url' });
31
31
  } catch (e) {
32
- expect(e).toEqual(AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidOllamaArgs));
32
+ expect(e).toEqual({
33
+ error: new TypeError('Invalid URL'),
34
+ errorType: 'InvalidOllamaArgs',
35
+ });
33
36
  }
34
37
  });
35
38
  });
@@ -22,8 +22,8 @@ export class LobeOllamaAI implements LobeRuntimeAI {
22
22
  constructor({ baseURL }: ClientOptions = {}) {
23
23
  try {
24
24
  if (baseURL) new URL(baseURL);
25
- } catch {
26
- throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidOllamaArgs);
25
+ } catch (e) {
26
+ throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidOllamaArgs, e);
27
27
  }
28
28
 
29
29
  this.client = new Ollama(!baseURL ? undefined : { host: baseURL });
@@ -12,6 +12,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
12
12
  "input": 0.5,
13
13
  "output": 1.5,
14
14
  },
15
+ "releasedAt": "2023-02-28",
15
16
  },
16
17
  {
17
18
  "id": "gpt-3.5-turbo-16k",
@@ -35,6 +36,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
35
36
  "input": 10,
36
37
  "output": 30,
37
38
  },
39
+ "releasedAt": "2024-01-23",
38
40
  },
39
41
  {
40
42
  "contextWindowTokens": 128000,
@@ -46,6 +48,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
46
48
  "input": 10,
47
49
  "output": 30,
48
50
  },
51
+ "releasedAt": "2024-01-23",
49
52
  },
50
53
  {
51
54
  "contextWindowTokens": 4096,
@@ -56,6 +59,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
56
59
  "input": 1.5,
57
60
  "output": 2,
58
61
  },
62
+ "releasedAt": "2023-08-24",
59
63
  },
60
64
  {
61
65
  "id": "gpt-3.5-turbo-0301",
@@ -73,6 +77,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
73
77
  "input": 1,
74
78
  "output": 2,
75
79
  },
80
+ "releasedAt": "2023-11-02",
76
81
  },
77
82
  {
78
83
  "contextWindowTokens": 128000,
@@ -84,6 +89,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
84
89
  "input": 10,
85
90
  "output": 30,
86
91
  },
92
+ "releasedAt": "2023-11-02",
87
93
  },
88
94
  {
89
95
  "contextWindowTokens": 128000,
@@ -91,6 +97,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
91
97
  "description": "GPT-4 视觉预览版,专为图像分析和处理任务设计。",
92
98
  "displayName": "GPT 4 Turbo with Vision Preview",
93
99
  "id": "gpt-4-vision-preview",
100
+ "releasedAt": "2023-11-02",
94
101
  "vision": true,
95
102
  },
96
103
  {
@@ -103,6 +110,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
103
110
  "input": 30,
104
111
  "output": 60,
105
112
  },
113
+ "releasedAt": "2023-06-27",
106
114
  },
107
115
  {
108
116
  "contextWindowTokens": 16385,
@@ -114,6 +122,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
114
122
  "input": 0.5,
115
123
  "output": 1.5,
116
124
  },
125
+ "releasedAt": "2024-01-23",
117
126
  },
118
127
  {
119
128
  "contextWindowTokens": 8192,
@@ -125,6 +134,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
125
134
  "input": 30,
126
135
  "output": 60,
127
136
  },
137
+ "releasedAt": "2023-06-12",
128
138
  },
129
139
  ]
130
140
  `;
@@ -1,3 +1,5 @@
1
+ import dayjs from 'dayjs';
2
+ import utc from 'dayjs/plugin/utc';
1
3
  import OpenAI, { ClientOptions } from 'openai';
2
4
  import { Stream } from 'openai/streaming';
3
5
 
@@ -18,6 +20,7 @@ import type {
18
20
  TextToSpeechOptions,
19
21
  TextToSpeechPayload,
20
22
  } from '../../types';
23
+ import { ChatStreamCallbacks } from '../../types';
21
24
  import { AgentRuntimeError } from '../createError';
22
25
  import { debugResponse, debugStream } from '../debugStream';
23
26
  import { desensitizeUrl } from '../desensitizeUrl';
@@ -25,7 +28,6 @@ import { handleOpenAIError } from '../handleOpenAIError';
25
28
  import { convertOpenAIMessages } from '../openaiHelpers';
26
29
  import { StreamingResponse } from '../response';
27
30
  import { OpenAIStream, OpenAIStreamOptions } from '../streams';
28
- import { ChatStreamCallbacks } from '../../types';
29
31
 
30
32
  // the model contains the following keywords is not a chat model, so we should filter them out
31
33
  export const CHAT_MODELS_BLOCK_LIST = [
@@ -248,7 +250,8 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
248
250
 
249
251
  if (responseMode === 'json') return Response.json(response);
250
252
 
251
- const transformHandler = chatCompletion?.handleTransformResponseToStream || transformResponseToStream;
253
+ const transformHandler =
254
+ chatCompletion?.handleTransformResponseToStream || transformResponseToStream;
252
255
  const stream = transformHandler(response as unknown as OpenAI.ChatCompletion);
253
256
 
254
257
  const streamHandler = chatCompletion?.handleStream || OpenAIStream;
@@ -278,7 +281,15 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
278
281
 
279
282
  const knownModel = LOBE_DEFAULT_MODEL_LIST.find((model) => model.id === item.id);
280
283
 
281
- if (knownModel) return knownModel;
284
+ if (knownModel) {
285
+ dayjs.extend(utc);
286
+
287
+ return {
288
+ ...knownModel,
289
+ releasedAt:
290
+ knownModel.releasedAt ?? dayjs.utc(item.created * 1000).format('YYYY-MM-DD'),
291
+ };
292
+ }
282
293
 
283
294
  return { id: item.id };
284
295
  })
@@ -70,6 +70,9 @@ export default {
70
70
  GoBack: {
71
71
  back: '返回',
72
72
  },
73
+ MaxTokenSlider: {
74
+ unlimited: '无限制',
75
+ },
73
76
  ModelSelect: {
74
77
  featureTag: {
75
78
  custom: '自定义模型,默认设定同时支持函数调用与视觉识别,请根据实际情况验证上述能力的可用性',
@@ -86,7 +86,6 @@ export default {
86
86
  modalTitle: '自定义模型配置',
87
87
  tokens: {
88
88
  title: '最大 token 数',
89
- unlimited: '无限制',
90
89
  },
91
90
  vision: {
92
91
  extra:
@@ -70,23 +70,23 @@ vi.mock('@/config/llm', () => ({
70
70
  describe('initAgentRuntimeWithUserPayload method', () => {
71
71
  describe('should initialize with options correctly', () => {
72
72
  it('OpenAI provider: with apikey and endpoint', async () => {
73
- const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', endpoint: 'user-endpoint' };
73
+ const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
74
74
  const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.OpenAI, jwtPayload);
75
75
  expect(runtime).toBeInstanceOf(AgentRuntime);
76
76
  expect(runtime['_runtime']).toBeInstanceOf(LobeOpenAI);
77
- expect(runtime['_runtime'].baseURL).toBe(jwtPayload.endpoint);
77
+ expect(runtime['_runtime'].baseURL).toBe(jwtPayload.baseURL);
78
78
  });
79
79
 
80
80
  it('Azure AI provider: with apikey, endpoint and apiversion', async () => {
81
81
  const jwtPayload: JWTPayload = {
82
82
  apiKey: 'user-azure-key',
83
- endpoint: 'user-azure-endpoint',
83
+ baseURL: 'user-azure-endpoint',
84
84
  azureApiVersion: '2024-06-01',
85
85
  };
86
86
  const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Azure, jwtPayload);
87
87
  expect(runtime).toBeInstanceOf(AgentRuntime);
88
88
  expect(runtime['_runtime']).toBeInstanceOf(LobeAzureOpenAI);
89
- expect(runtime['_runtime'].baseURL).toBe(jwtPayload.endpoint);
89
+ expect(runtime['_runtime'].baseURL).toBe(jwtPayload.baseURL);
90
90
  });
91
91
 
92
92
  it('ZhiPu AI provider: with apikey', async () => {
@@ -130,11 +130,11 @@ describe('initAgentRuntimeWithUserPayload method', () => {
130
130
  });
131
131
 
132
132
  it('Ollama provider: with endpoint', async () => {
133
- const jwtPayload: JWTPayload = { endpoint: 'http://user-ollama-url' };
133
+ const jwtPayload: JWTPayload = { baseURL: 'http://user-ollama-url' };
134
134
  const runtime = await initAgentRuntimeWithUserPayload(ModelProvider.Ollama, jwtPayload);
135
135
  expect(runtime).toBeInstanceOf(AgentRuntime);
136
136
  expect(runtime['_runtime']).toBeInstanceOf(LobeOllamaAI);
137
- expect(runtime['_runtime']['baseURL']).toEqual(jwtPayload.endpoint);
137
+ expect(runtime['_runtime']['baseURL']).toEqual(jwtPayload.baseURL);
138
138
  });
139
139
 
140
140
  it('Perplexity AI provider: with apikey', async () => {
@@ -220,12 +220,12 @@ describe('initAgentRuntimeWithUserPayload method', () => {
220
220
  it('Unknown Provider: with apikey and endpoint, should initialize to OpenAi', async () => {
221
221
  const jwtPayload: JWTPayload = {
222
222
  apiKey: 'user-unknown-key',
223
- endpoint: 'user-unknown-endpoint',
223
+ baseURL: 'user-unknown-endpoint',
224
224
  };
225
225
  const runtime = await initAgentRuntimeWithUserPayload('unknown', jwtPayload);
226
226
  expect(runtime).toBeInstanceOf(AgentRuntime);
227
227
  expect(runtime['_runtime']).toBeInstanceOf(LobeOpenAI);
228
- expect(runtime['_runtime'].baseURL).toBe(jwtPayload.endpoint);
228
+ expect(runtime['_runtime'].baseURL).toBe(jwtPayload.baseURL);
229
229
  });
230
230
  });
231
231
 
@@ -38,23 +38,23 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
38
38
  }
39
39
 
40
40
  const apiKey = apiKeyManager.pick(payload?.apiKey || llmConfig[`${upperProvider}_API_KEY`]);
41
- const baseURL = payload?.endpoint || process.env[`${upperProvider}_PROXY_URL`];
41
+ const baseURL = payload?.baseURL || process.env[`${upperProvider}_PROXY_URL`];
42
42
 
43
43
  return baseURL ? { apiKey, baseURL } : { apiKey };
44
44
  }
45
45
 
46
46
  case ModelProvider.Ollama: {
47
- const baseURL = payload?.endpoint || process.env.OLLAMA_PROXY_URL;
47
+ const baseURL = payload?.baseURL || process.env.OLLAMA_PROXY_URL;
48
48
 
49
49
  return { baseURL };
50
50
  }
51
51
 
52
52
  case ModelProvider.Azure: {
53
53
  const { AZURE_API_KEY, AZURE_API_VERSION, AZURE_ENDPOINT } = llmConfig;
54
- const apikey = apiKeyManager.pick(payload?.apiKey || AZURE_API_KEY);
55
- const endpoint = payload?.endpoint || AZURE_ENDPOINT;
54
+ const apiKey = apiKeyManager.pick(payload?.apiKey || AZURE_API_KEY);
55
+ const baseURL = payload?.baseURL || AZURE_ENDPOINT;
56
56
  const apiVersion = payload?.azureApiVersion || AZURE_API_VERSION;
57
- return { apiVersion, apikey, endpoint };
57
+ return { apiKey, apiVersion, baseURL };
58
58
  }
59
59
 
60
60
  case ModelProvider.Bedrock: {