@lobehub/chat 1.42.0 → 1.42.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +58 -0
- package/changelog/v1.json +18 -0
- package/next.config.ts +0 -1
- package/package.json +2 -2
- package/src/app/(main)/settings/llm/components/ProviderModelList/ModelConfigModal/Form.tsx +5 -3
- package/src/{app/(main)/settings/llm/components/ProviderModelList/ModelConfigModal → components}/MaxTokenSlider.tsx +4 -5
- package/src/components/ModelSelect/index.tsx +6 -3
- package/src/components/NProgress/index.tsx +9 -1
- package/src/config/modelProviders/openai.ts +15 -0
- package/src/config/modelProviders/openrouter.ts +15 -0
- package/src/const/auth.ts +1 -1
- package/src/database/server/models/__tests__/user.test.ts +11 -0
- package/src/database/server/models/user.ts +4 -0
- package/src/libs/agent-runtime/AgentRuntime.test.ts +10 -10
- package/src/libs/agent-runtime/AgentRuntime.ts +3 -3
- package/src/libs/agent-runtime/ollama/index.test.ts +4 -1
- package/src/libs/agent-runtime/ollama/index.ts +2 -2
- package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +10 -0
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +14 -3
- package/src/locales/default/components.ts +3 -0
- package/src/locales/default/setting.ts +0 -1
- package/src/server/modules/AgentRuntime/index.test.ts +8 -8
- package/src/server/modules/AgentRuntime/index.ts +5 -5
- package/src/services/__tests__/_auth.test.ts +5 -6
- package/src/services/__tests__/chat.test.ts +1 -0
- package/src/services/_auth.ts +3 -3
- package/src/services/chat.ts +7 -8
- package/src/store/user/slices/modelList/selectors/modelProvider.test.ts +1 -0
- package/src/types/aiModel.ts +275 -0
- package/src/types/aiProvider.ts +148 -0
- package/src/types/llm.ts +3 -17
- package/src/utils/merge.test.ts +48 -0
- package/src/utils/merge.ts +39 -0
@@ -131,7 +131,7 @@ describe('getProviderAuthPayload', () => {
|
|
131
131
|
expect(payload).toEqual({
|
132
132
|
apiKey: mockAzureConfig.apiKey,
|
133
133
|
azureApiVersion: mockAzureConfig.apiVersion,
|
134
|
-
|
134
|
+
baseURL: mockAzureConfig.endpoint,
|
135
135
|
});
|
136
136
|
});
|
137
137
|
|
@@ -144,7 +144,7 @@ describe('getProviderAuthPayload', () => {
|
|
144
144
|
|
145
145
|
const payload = getProviderAuthPayload(ModelProvider.Ollama);
|
146
146
|
expect(payload).toEqual({
|
147
|
-
|
147
|
+
baseURL: mockOllamaProxyUrl,
|
148
148
|
});
|
149
149
|
});
|
150
150
|
|
@@ -152,8 +152,7 @@ describe('getProviderAuthPayload', () => {
|
|
152
152
|
// 假设的 OpenAI 配置
|
153
153
|
const mockOpenAIConfig = {
|
154
154
|
apiKey: 'openai-api-key',
|
155
|
-
baseURL: 'openai-
|
156
|
-
endpoint: 'openai-endpoint',
|
155
|
+
baseURL: 'openai-endpoint',
|
157
156
|
useAzure: true,
|
158
157
|
azureApiVersion: 'openai-azure-api-version',
|
159
158
|
};
|
@@ -164,7 +163,7 @@ describe('getProviderAuthPayload', () => {
|
|
164
163
|
const payload = getProviderAuthPayload(ModelProvider.OpenAI);
|
165
164
|
expect(payload).toEqual({
|
166
165
|
apiKey: mockOpenAIConfig.apiKey,
|
167
|
-
|
166
|
+
baseURL: mockOpenAIConfig.baseURL,
|
168
167
|
});
|
169
168
|
});
|
170
169
|
|
@@ -181,7 +180,7 @@ describe('getProviderAuthPayload', () => {
|
|
181
180
|
const payload = getProviderAuthPayload(ModelProvider.Stepfun);
|
182
181
|
expect(payload).toEqual({
|
183
182
|
apiKey: mockOpenAIConfig.apiKey,
|
184
|
-
|
183
|
+
baseURL: mockOpenAIConfig.baseURL,
|
185
184
|
});
|
186
185
|
});
|
187
186
|
|
@@ -939,6 +939,7 @@ describe('AgentRuntimeOnClient', () => {
|
|
939
939
|
},
|
940
940
|
},
|
941
941
|
} as UserSettingsState) as unknown as UserStore;
|
942
|
+
|
942
943
|
const runtime = await initializeWithClientStore(ModelProvider.Azure, {});
|
943
944
|
expect(runtime).toBeInstanceOf(AgentRuntime);
|
944
945
|
expect(runtime['_runtime']).toBeInstanceOf(LobeAzureOpenAI);
|
package/src/services/_auth.ts
CHANGED
@@ -45,14 +45,14 @@ export const getProviderAuthPayload = (provider: string) => {
|
|
45
45
|
return {
|
46
46
|
apiKey: azure.apiKey,
|
47
47
|
azureApiVersion: azure.apiVersion,
|
48
|
-
|
48
|
+
baseURL: azure.endpoint,
|
49
49
|
};
|
50
50
|
}
|
51
51
|
|
52
52
|
case ModelProvider.Ollama: {
|
53
53
|
const config = keyVaultsConfigSelectors.ollamaConfig(useUserStore.getState());
|
54
54
|
|
55
|
-
return {
|
55
|
+
return { baseURL: config?.baseURL };
|
56
56
|
}
|
57
57
|
|
58
58
|
case ModelProvider.Cloudflare: {
|
@@ -69,7 +69,7 @@ export const getProviderAuthPayload = (provider: string) => {
|
|
69
69
|
useUserStore.getState(),
|
70
70
|
);
|
71
71
|
|
72
|
-
return { apiKey: config?.apiKey,
|
72
|
+
return { apiKey: config?.apiKey, baseURL: config?.baseURL };
|
73
73
|
}
|
74
74
|
}
|
75
75
|
};
|
package/src/services/chat.ts
CHANGED
@@ -94,21 +94,20 @@ export function initializeWithClientStore(provider: string, payload: any) {
|
|
94
94
|
default:
|
95
95
|
case ModelProvider.OpenAI: {
|
96
96
|
providerOptions = {
|
97
|
-
baseURL: providerAuthPayload?.
|
97
|
+
baseURL: providerAuthPayload?.baseURL,
|
98
98
|
};
|
99
99
|
break;
|
100
100
|
}
|
101
101
|
case ModelProvider.Azure: {
|
102
102
|
providerOptions = {
|
103
|
+
apiKey: providerAuthPayload?.apiKey,
|
103
104
|
apiVersion: providerAuthPayload?.azureApiVersion,
|
104
|
-
// That's a wired properity, but just remapped it
|
105
|
-
apikey: providerAuthPayload?.apiKey,
|
106
105
|
};
|
107
106
|
break;
|
108
107
|
}
|
109
108
|
case ModelProvider.Google: {
|
110
109
|
providerOptions = {
|
111
|
-
baseURL: providerAuthPayload?.
|
110
|
+
baseURL: providerAuthPayload?.baseURL,
|
112
111
|
};
|
113
112
|
break;
|
114
113
|
}
|
@@ -125,27 +124,27 @@ export function initializeWithClientStore(provider: string, payload: any) {
|
|
125
124
|
}
|
126
125
|
case ModelProvider.Ollama: {
|
127
126
|
providerOptions = {
|
128
|
-
baseURL: providerAuthPayload?.
|
127
|
+
baseURL: providerAuthPayload?.baseURL,
|
129
128
|
};
|
130
129
|
break;
|
131
130
|
}
|
132
131
|
case ModelProvider.Perplexity: {
|
133
132
|
providerOptions = {
|
134
133
|
apikey: providerAuthPayload?.apiKey,
|
135
|
-
baseURL: providerAuthPayload?.
|
134
|
+
baseURL: providerAuthPayload?.baseURL,
|
136
135
|
};
|
137
136
|
break;
|
138
137
|
}
|
139
138
|
case ModelProvider.Anthropic: {
|
140
139
|
providerOptions = {
|
141
|
-
baseURL: providerAuthPayload?.
|
140
|
+
baseURL: providerAuthPayload?.baseURL,
|
142
141
|
};
|
143
142
|
break;
|
144
143
|
}
|
145
144
|
case ModelProvider.Groq: {
|
146
145
|
providerOptions = {
|
147
146
|
apikey: providerAuthPayload?.apiKey,
|
148
|
-
baseURL: providerAuthPayload?.
|
147
|
+
baseURL: providerAuthPayload?.baseURL,
|
149
148
|
};
|
150
149
|
break;
|
151
150
|
}
|
@@ -0,0 +1,275 @@
|
|
1
|
+
import { z } from 'zod';
|
2
|
+
|
3
|
+
export type ModelPriceCurrency = 'CNY' | 'USD';
|
4
|
+
|
5
|
+
export const AiModelSourceEnum = {
|
6
|
+
Builtin: 'builtin',
|
7
|
+
Custom: 'custom',
|
8
|
+
Remote: 'remote',
|
9
|
+
} as const;
|
10
|
+
export type AiModelSourceType = (typeof AiModelSourceEnum)[keyof typeof AiModelSourceEnum];
|
11
|
+
|
12
|
+
export type AiModelType =
|
13
|
+
| 'chat'
|
14
|
+
| 'embedding'
|
15
|
+
| 'tts'
|
16
|
+
| 'stt'
|
17
|
+
| 'image'
|
18
|
+
| 'text2video'
|
19
|
+
| 'text2music';
|
20
|
+
|
21
|
+
export interface ModelAbilities {
|
22
|
+
/**
|
23
|
+
* whether model supports file upload
|
24
|
+
*/
|
25
|
+
files?: boolean;
|
26
|
+
/**
|
27
|
+
* whether model supports function call
|
28
|
+
*/
|
29
|
+
functionCall?: boolean;
|
30
|
+
/**
|
31
|
+
* whether model supports vision
|
32
|
+
*/
|
33
|
+
vision?: boolean;
|
34
|
+
}
|
35
|
+
|
36
|
+
const AiModelAbilitiesSchema = z.object({
|
37
|
+
// files: z.boolean().optional(),
|
38
|
+
functionCall: z.boolean().optional(),
|
39
|
+
vision: z.boolean().optional(),
|
40
|
+
});
|
41
|
+
|
42
|
+
// 语言模型的设置参数
|
43
|
+
export interface LLMParams {
|
44
|
+
/**
|
45
|
+
* 控制生成文本中的惩罚系数,用于减少重复性
|
46
|
+
* @default 0
|
47
|
+
*/
|
48
|
+
frequency_penalty?: number;
|
49
|
+
/**
|
50
|
+
* 生成文本的最大长度
|
51
|
+
*/
|
52
|
+
max_tokens?: number;
|
53
|
+
/**
|
54
|
+
* 控制生成文本中的惩罚系数,用于减少主题的变化
|
55
|
+
* @default 0
|
56
|
+
*/
|
57
|
+
presence_penalty?: number;
|
58
|
+
/**
|
59
|
+
* 生成文本的随机度量,用于控制文本的创造性和多样性
|
60
|
+
* @default 1
|
61
|
+
*/
|
62
|
+
temperature?: number;
|
63
|
+
/**
|
64
|
+
* 控制生成文本中最高概率的单个 token
|
65
|
+
* @default 1
|
66
|
+
*/
|
67
|
+
top_p?: number;
|
68
|
+
}
|
69
|
+
|
70
|
+
export interface BasicModelPricing {
|
71
|
+
/**
|
72
|
+
* the currency of the pricing
|
73
|
+
* @default USD
|
74
|
+
*/
|
75
|
+
currency?: ModelPriceCurrency;
|
76
|
+
/**
|
77
|
+
* the input pricing, e.g. $1 / 1M tokens
|
78
|
+
*/
|
79
|
+
input?: number;
|
80
|
+
}
|
81
|
+
|
82
|
+
export interface ChatModelPricing extends BasicModelPricing {
|
83
|
+
audioInput?: number;
|
84
|
+
audioOutput?: number;
|
85
|
+
cachedAudioInput?: number;
|
86
|
+
cachedInput?: number;
|
87
|
+
/**
|
88
|
+
* the output pricing, e.g. $2 / 1M tokens
|
89
|
+
*/
|
90
|
+
output?: number;
|
91
|
+
writeCacheInput?: number;
|
92
|
+
}
|
93
|
+
|
94
|
+
interface AIBaseModelCard {
|
95
|
+
/**
|
96
|
+
* the context window (or input + output tokens limit)
|
97
|
+
*/
|
98
|
+
contextWindowTokens?: number;
|
99
|
+
description?: string;
|
100
|
+
/**
|
101
|
+
* the name show for end user
|
102
|
+
*/
|
103
|
+
displayName?: string;
|
104
|
+
enabled?: boolean;
|
105
|
+
id: string;
|
106
|
+
/**
|
107
|
+
* whether model is legacy (deprecated but not removed yet)
|
108
|
+
*/
|
109
|
+
legacy?: boolean;
|
110
|
+
/**
|
111
|
+
* who create this model
|
112
|
+
*/
|
113
|
+
organization?: string;
|
114
|
+
|
115
|
+
releasedAt?: string;
|
116
|
+
}
|
117
|
+
|
118
|
+
export interface AIChatModelCard extends AIBaseModelCard {
|
119
|
+
abilities?: {
|
120
|
+
/**
|
121
|
+
* whether model supports file upload
|
122
|
+
*/
|
123
|
+
files?: boolean;
|
124
|
+
/**
|
125
|
+
* whether model supports function call
|
126
|
+
*/
|
127
|
+
functionCall?: boolean;
|
128
|
+
/**
|
129
|
+
* whether model supports vision
|
130
|
+
*/
|
131
|
+
vision?: boolean;
|
132
|
+
};
|
133
|
+
/**
|
134
|
+
* used in azure and doubao
|
135
|
+
*/
|
136
|
+
deploymentName?: string;
|
137
|
+
maxOutput?: number;
|
138
|
+
pricing?: ChatModelPricing;
|
139
|
+
type: 'chat';
|
140
|
+
}
|
141
|
+
|
142
|
+
export interface AIEmbeddingModelCard extends AIBaseModelCard {
|
143
|
+
maxDimension: number;
|
144
|
+
pricing?: {
|
145
|
+
/**
|
146
|
+
* the currency of the pricing
|
147
|
+
* @default USD
|
148
|
+
*/
|
149
|
+
currency?: ModelPriceCurrency;
|
150
|
+
/**
|
151
|
+
* the input pricing, e.g. $1 / 1M tokens
|
152
|
+
*/
|
153
|
+
input?: number;
|
154
|
+
};
|
155
|
+
type: 'embedding';
|
156
|
+
}
|
157
|
+
|
158
|
+
export interface AIText2ImageModelCard extends AIBaseModelCard {
|
159
|
+
pricing?: {
|
160
|
+
/**
|
161
|
+
* the currency of the pricing
|
162
|
+
* @default USD
|
163
|
+
*/
|
164
|
+
currency?: ModelPriceCurrency;
|
165
|
+
} & Record<string, number>; // [resolution: string]: number;
|
166
|
+
resolutions: string[];
|
167
|
+
type: 'image';
|
168
|
+
}
|
169
|
+
|
170
|
+
export interface AITTSModelCard extends AIBaseModelCard {
|
171
|
+
pricing?: {
|
172
|
+
/**
|
173
|
+
* the currency of the pricing
|
174
|
+
* @default USD
|
175
|
+
*/
|
176
|
+
currency?: ModelPriceCurrency;
|
177
|
+
/**
|
178
|
+
* the input pricing, e.g. $1 / 1M tokens
|
179
|
+
*/
|
180
|
+
input?: number;
|
181
|
+
};
|
182
|
+
type: 'tts';
|
183
|
+
}
|
184
|
+
|
185
|
+
export interface AISTTModelCard extends AIBaseModelCard {
|
186
|
+
pricing?: {
|
187
|
+
/**
|
188
|
+
* the currency of the pricing
|
189
|
+
* @default USD
|
190
|
+
*/
|
191
|
+
currency?: ModelPriceCurrency;
|
192
|
+
/**
|
193
|
+
* the input pricing, e.g. $1 / 1M tokens
|
194
|
+
*/
|
195
|
+
input?: number;
|
196
|
+
};
|
197
|
+
type: 'stt';
|
198
|
+
}
|
199
|
+
|
200
|
+
export interface AIRealtimeModelCard extends AIBaseModelCard {
|
201
|
+
abilities?: {
|
202
|
+
/**
|
203
|
+
* whether model supports file upload
|
204
|
+
*/
|
205
|
+
files?: boolean;
|
206
|
+
/**
|
207
|
+
* whether model supports function call
|
208
|
+
*/
|
209
|
+
functionCall?: boolean;
|
210
|
+
/**
|
211
|
+
* whether model supports vision
|
212
|
+
*/
|
213
|
+
vision?: boolean;
|
214
|
+
};
|
215
|
+
/**
|
216
|
+
* used in azure and doubao
|
217
|
+
*/
|
218
|
+
deploymentName?: string;
|
219
|
+
maxOutput?: number;
|
220
|
+
pricing?: ChatModelPricing;
|
221
|
+
type: 'realtime';
|
222
|
+
}
|
223
|
+
|
224
|
+
// create
|
225
|
+
export const CreateAiModelSchema = z.object({
|
226
|
+
abilities: AiModelAbilitiesSchema.optional(),
|
227
|
+
contextWindowTokens: z.number().optional(),
|
228
|
+
displayName: z.string().optional(),
|
229
|
+
id: z.string(),
|
230
|
+
providerId: z.string(),
|
231
|
+
releasedAt: z.string().optional(),
|
232
|
+
|
233
|
+
// checkModel: z.string().optional(),
|
234
|
+
// homeUrl: z.string().optional(),
|
235
|
+
// modelsUrl: z.string().optional(),
|
236
|
+
});
|
237
|
+
|
238
|
+
export type CreateAiModelParams = z.infer<typeof CreateAiModelSchema>;
|
239
|
+
|
240
|
+
// List Query
|
241
|
+
|
242
|
+
export interface AiProviderModelListItem {
|
243
|
+
abilities?: ModelAbilities;
|
244
|
+
contextWindowTokens?: number;
|
245
|
+
displayName?: string;
|
246
|
+
enabled: boolean;
|
247
|
+
id: string;
|
248
|
+
pricing?: ChatModelPricing;
|
249
|
+
releasedAt?: string;
|
250
|
+
source?: AiModelSourceType;
|
251
|
+
type: AiModelType;
|
252
|
+
}
|
253
|
+
|
254
|
+
// Update
|
255
|
+
export const UpdateAiModelSchema = z.object({
|
256
|
+
abilities: AiModelAbilitiesSchema.optional(),
|
257
|
+
contextWindowTokens: z.number().optional(),
|
258
|
+
displayName: z.string().optional(),
|
259
|
+
});
|
260
|
+
|
261
|
+
export type UpdateAiModelParams = z.infer<typeof UpdateAiModelSchema>;
|
262
|
+
|
263
|
+
export interface AiModelSortMap {
|
264
|
+
id: string;
|
265
|
+
sort: number;
|
266
|
+
}
|
267
|
+
|
268
|
+
export const ToggleAiModelEnableSchema = z.object({
|
269
|
+
enabled: z.boolean(),
|
270
|
+
id: z.string(),
|
271
|
+
providerId: z.string(),
|
272
|
+
source: z.enum(['builtin', 'custom', 'remote']).optional(),
|
273
|
+
});
|
274
|
+
|
275
|
+
export type ToggleAiModelEnableParams = z.infer<typeof ToggleAiModelEnableSchema>;
|
@@ -0,0 +1,148 @@
|
|
1
|
+
import { z } from 'zod';
|
2
|
+
|
3
|
+
import { SmoothingParams } from '@/types/llm';
|
4
|
+
|
5
|
+
// create
|
6
|
+
export const CreateAiProviderSchema = z.object({
|
7
|
+
config: z.object({}).passthrough().optional(),
|
8
|
+
description: z.string().optional(),
|
9
|
+
id: z.string(),
|
10
|
+
keyVaults: z.any().optional(),
|
11
|
+
logo: z.string().optional(),
|
12
|
+
name: z.string(),
|
13
|
+
sdkType: z.enum(['openai', 'anthropic']).optional(),
|
14
|
+
// checkModel: z.string().optional(),
|
15
|
+
// homeUrl: z.string().optional(),
|
16
|
+
// modelsUrl: z.string().optional(),
|
17
|
+
});
|
18
|
+
|
19
|
+
export type CreateAiProviderParams = z.infer<typeof CreateAiProviderSchema>;
|
20
|
+
|
21
|
+
// List Query
|
22
|
+
|
23
|
+
export interface AiProviderListItem {
|
24
|
+
description?: string;
|
25
|
+
enabled: boolean;
|
26
|
+
id: string;
|
27
|
+
logo?: string;
|
28
|
+
name?: string;
|
29
|
+
sort?: number;
|
30
|
+
source: 'builtin' | 'custom';
|
31
|
+
}
|
32
|
+
|
33
|
+
// Detail Query
|
34
|
+
|
35
|
+
interface AiProviderConfig {
|
36
|
+
/**
|
37
|
+
* whether provider show browser request option by default
|
38
|
+
*
|
39
|
+
* @default false
|
40
|
+
*/
|
41
|
+
defaultShowBrowserRequest?: boolean;
|
42
|
+
/**
|
43
|
+
* some provider server like stepfun and aliyun don't support browser request,
|
44
|
+
* So we should disable it
|
45
|
+
*
|
46
|
+
* @default false
|
47
|
+
*/
|
48
|
+
disableBrowserRequest?: boolean;
|
49
|
+
proxyUrl?:
|
50
|
+
| {
|
51
|
+
desc?: string;
|
52
|
+
placeholder: string;
|
53
|
+
title?: string;
|
54
|
+
}
|
55
|
+
| false;
|
56
|
+
|
57
|
+
/**
|
58
|
+
* whether show api key in the provider config
|
59
|
+
* so provider like ollama don't need api key field
|
60
|
+
*/
|
61
|
+
showApiKey?: boolean;
|
62
|
+
|
63
|
+
/**
|
64
|
+
* whether show checker in the provider config
|
65
|
+
*/
|
66
|
+
showChecker?: boolean;
|
67
|
+
showDeployName?: boolean;
|
68
|
+
showModelFetcher?: boolean;
|
69
|
+
/**
|
70
|
+
* whether to smoothing the output
|
71
|
+
*/
|
72
|
+
smoothing?: SmoothingParams;
|
73
|
+
}
|
74
|
+
|
75
|
+
export interface AiProviderItem {
|
76
|
+
/**
|
77
|
+
* the default model that used for connection check
|
78
|
+
*/
|
79
|
+
checkModel?: string;
|
80
|
+
config: AiProviderConfig;
|
81
|
+
description?: string;
|
82
|
+
enabled: boolean;
|
83
|
+
enabledChatModels: string[];
|
84
|
+
/**
|
85
|
+
* provider's website url
|
86
|
+
*/
|
87
|
+
homeUrl?: string;
|
88
|
+
id: string;
|
89
|
+
logo?: string;
|
90
|
+
/**
|
91
|
+
* the url show the all models in the provider
|
92
|
+
*/
|
93
|
+
modelsUrl?: string;
|
94
|
+
/**
|
95
|
+
* the name show for end user
|
96
|
+
*/
|
97
|
+
name: string;
|
98
|
+
/**
|
99
|
+
* default openai
|
100
|
+
*/
|
101
|
+
sdkType?: 'openai' | 'anthropic';
|
102
|
+
source: 'builtin' | 'custom';
|
103
|
+
}
|
104
|
+
|
105
|
+
export interface AiProviderDetailItem {
|
106
|
+
/**
|
107
|
+
* the default model that used for connection check
|
108
|
+
*/
|
109
|
+
checkModel?: string;
|
110
|
+
config: AiProviderConfig;
|
111
|
+
description?: string;
|
112
|
+
enabled: boolean;
|
113
|
+
fetchOnClient?: boolean;
|
114
|
+
/**
|
115
|
+
* provider's website url
|
116
|
+
*/
|
117
|
+
homeUrl?: string;
|
118
|
+
id: string;
|
119
|
+
keyVaults?: Record<string, any>;
|
120
|
+
logo?: string;
|
121
|
+
/**
|
122
|
+
* the url show the all models in the provider
|
123
|
+
*/
|
124
|
+
modelsUrl?: string;
|
125
|
+
/**
|
126
|
+
* the name show for end user
|
127
|
+
*/
|
128
|
+
name: string;
|
129
|
+
/**
|
130
|
+
* default openai
|
131
|
+
*/
|
132
|
+
sdkType?: 'openai' | 'anthropic';
|
133
|
+
source: 'builtin' | 'custom';
|
134
|
+
}
|
135
|
+
|
136
|
+
// Update
|
137
|
+
export const UpdateAiProviderConfigSchema = z.object({
|
138
|
+
checkModel: z.string().optional(),
|
139
|
+
fetchOnClient: z.boolean().optional(),
|
140
|
+
keyVaults: z.object({}).passthrough().optional(),
|
141
|
+
});
|
142
|
+
|
143
|
+
export type UpdateAiProviderConfigParams = z.infer<typeof UpdateAiProviderConfigSchema>;
|
144
|
+
|
145
|
+
export interface AiProviderSortMap {
|
146
|
+
id: string;
|
147
|
+
sort: number;
|
148
|
+
}
|
package/src/types/llm.ts
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
import { ReactNode } from 'react';
|
2
2
|
|
3
|
+
import { ChatModelPricing } from '@/types/aiModel';
|
4
|
+
|
3
5
|
export type ModelPriceCurrency = 'CNY' | 'USD';
|
4
6
|
|
5
7
|
export interface ChatModelCard {
|
@@ -38,23 +40,7 @@ export interface ChatModelCard {
|
|
38
40
|
*/
|
39
41
|
legacy?: boolean;
|
40
42
|
maxOutput?: number;
|
41
|
-
pricing?:
|
42
|
-
cachedInput?: number;
|
43
|
-
/**
|
44
|
-
* the currency of the pricing
|
45
|
-
* @default USD
|
46
|
-
*/
|
47
|
-
currency?: ModelPriceCurrency;
|
48
|
-
/**
|
49
|
-
* the input pricing, e.g. $1 / 1M tokens
|
50
|
-
*/
|
51
|
-
input?: number;
|
52
|
-
/**
|
53
|
-
* the output pricing, e.g. $2 / 1M tokens
|
54
|
-
*/
|
55
|
-
output?: number;
|
56
|
-
writeCacheInput?: number;
|
57
|
-
};
|
43
|
+
pricing?: ChatModelPricing;
|
58
44
|
releasedAt?: string;
|
59
45
|
|
60
46
|
/**
|
@@ -0,0 +1,48 @@
|
|
1
|
+
import { expect } from 'vitest';
|
2
|
+
|
3
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
4
|
+
|
5
|
+
import { mergeArrayById } from './merge';
|
6
|
+
|
7
|
+
describe('mergeArrayById', () => {
|
8
|
+
it('should merge data', () => {
|
9
|
+
const data = mergeArrayById(
|
10
|
+
[
|
11
|
+
{
|
12
|
+
contextWindowTokens: 128_000,
|
13
|
+
description:
|
14
|
+
'o1-mini是一款针对编程、数学和科学应用场景而设计的快速、经济高效的推理模型。该模型具有128K上下文和2023年10月的知识截止日期。',
|
15
|
+
displayName: 'OpenAI o1-mini',
|
16
|
+
enabled: true,
|
17
|
+
id: 'o1-mini',
|
18
|
+
maxOutput: 65_536,
|
19
|
+
pricing: {
|
20
|
+
input: 3,
|
21
|
+
output: 12,
|
22
|
+
},
|
23
|
+
releasedAt: '2024-09-12',
|
24
|
+
type: 'chat',
|
25
|
+
},
|
26
|
+
],
|
27
|
+
[{ id: 'o1-mini', displayName: 'OpenAI o1-mini ABC', type: 'chat' }],
|
28
|
+
);
|
29
|
+
|
30
|
+
expect(data).toEqual([
|
31
|
+
{
|
32
|
+
contextWindowTokens: 128_000,
|
33
|
+
description:
|
34
|
+
'o1-mini是一款针对编程、数学和科学应用场景而设计的快速、经济高效的推理模型。该模型具有128K上下文和2023年10月的知识截止日期。',
|
35
|
+
displayName: 'OpenAI o1-mini ABC',
|
36
|
+
enabled: true,
|
37
|
+
id: 'o1-mini',
|
38
|
+
maxOutput: 65_536,
|
39
|
+
pricing: {
|
40
|
+
input: 3,
|
41
|
+
output: 12,
|
42
|
+
},
|
43
|
+
releasedAt: '2024-09-12',
|
44
|
+
type: 'chat',
|
45
|
+
},
|
46
|
+
]);
|
47
|
+
});
|
48
|
+
});
|
package/src/utils/merge.ts
CHANGED
@@ -9,3 +9,42 @@ export const merge: typeof _merge = <T = object>(target: T, source: T) =>
|
|
9
9
|
mergeWith({}, target, source, (obj, src) => {
|
10
10
|
if (Array.isArray(obj)) return src;
|
11
11
|
});
|
12
|
+
|
13
|
+
type MergeableItem = {
|
14
|
+
[key: string]: any;
|
15
|
+
id: string;
|
16
|
+
};
|
17
|
+
|
18
|
+
/**
|
19
|
+
* Merge two arrays based on id, preserving metadata from default items
|
20
|
+
* @param defaultItems Items with default configuration and metadata
|
21
|
+
* @param userItems User-defined items with higher priority
|
22
|
+
*/
|
23
|
+
export const mergeArrayById = <T extends MergeableItem>(defaultItems: T[], userItems: T[]): T[] => {
|
24
|
+
// Create a map of default items for faster lookup
|
25
|
+
const defaultItemsMap = new Map(defaultItems.map((item) => [item.id, item]));
|
26
|
+
|
27
|
+
// Process user items with default metadata
|
28
|
+
const mergedItems = userItems.map((userItem) => {
|
29
|
+
const defaultItem = defaultItemsMap.get(userItem.id);
|
30
|
+
if (!defaultItem) return userItem;
|
31
|
+
|
32
|
+
// Merge strategy: use default value when user value is null or undefined
|
33
|
+
const mergedItem: T = { ...defaultItem };
|
34
|
+
Object.entries(userItem).forEach(([key, value]) => {
|
35
|
+
// Only use user value if it's not null and not undefined
|
36
|
+
if (value !== null && value !== undefined) {
|
37
|
+
// @ts-expect-error
|
38
|
+
mergedItem[key] = value;
|
39
|
+
}
|
40
|
+
});
|
41
|
+
|
42
|
+
return mergedItem;
|
43
|
+
});
|
44
|
+
|
45
|
+
// Add items that only exist in default configuration
|
46
|
+
const userItemIds = new Set(userItems.map((item) => item.id));
|
47
|
+
const onlyInDefaultItems = defaultItems.filter((item) => !userItemIds.has(item.id));
|
48
|
+
|
49
|
+
return [...mergedItems, ...onlyInDefaultItems];
|
50
|
+
};
|