@lobehub/lobehub 2.0.0-next.15 → 2.0.0-next.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/docs/self-hosting/advanced/feature-flags.mdx +0 -1
- package/docs/self-hosting/advanced/feature-flags.zh-CN.mdx +0 -1
- package/package.json +1 -1
- package/packages/types/src/serverConfig.ts +2 -6
- package/src/app/[variants]/(auth)/signup/[[...signup]]/page.tsx +1 -8
- package/src/app/[variants]/(main)/(mobile)/me/(home)/features/UserBanner.tsx +3 -6
- package/src/app/[variants]/(main)/labs/components/LabCard.tsx +3 -1
- package/src/app/[variants]/(main)/settings/provider/detail/azure/index.tsx +5 -7
- package/src/components/InvalidAPIKey/APIKeyForm/Bedrock.tsx +8 -13
- package/src/config/featureFlags/schema.test.ts +0 -2
- package/src/config/featureFlags/schema.ts +0 -6
- package/src/config/modelProviders/ai21.ts +1 -16
- package/src/config/modelProviders/ai302.ts +1 -128
- package/src/config/modelProviders/ai360.ts +1 -32
- package/src/config/modelProviders/anthropic.ts +1 -71
- package/src/config/modelProviders/azure.ts +1 -51
- package/src/config/modelProviders/baichuan.ts +1 -57
- package/src/config/modelProviders/bedrock.ts +1 -276
- package/src/config/modelProviders/cloudflare.ts +1 -64
- package/src/config/modelProviders/deepseek.ts +1 -19
- package/src/config/modelProviders/fireworksai.ts +1 -174
- package/src/config/modelProviders/giteeai.ts +1 -135
- package/src/config/modelProviders/github.ts +1 -254
- package/src/config/modelProviders/google.ts +1 -130
- package/src/config/modelProviders/groq.ts +1 -119
- package/src/config/modelProviders/higress.ts +1 -1713
- package/src/config/modelProviders/huggingface.ts +1 -54
- package/src/config/modelProviders/hunyuan.ts +1 -83
- package/src/config/modelProviders/infiniai.ts +1 -74
- package/src/config/modelProviders/internlm.ts +1 -20
- package/src/config/modelProviders/mistral.ts +1 -95
- package/src/config/modelProviders/modelscope.ts +1 -27
- package/src/config/modelProviders/moonshot.ts +1 -29
- package/src/config/modelProviders/novita.ts +1 -105
- package/src/config/modelProviders/ollama.ts +1 -325
- package/src/config/modelProviders/openai.ts +1 -242
- package/src/config/modelProviders/openrouter.ts +1 -240
- package/src/config/modelProviders/perplexity.ts +1 -45
- package/src/config/modelProviders/ppio.ts +1 -152
- package/src/config/modelProviders/qiniu.ts +1 -18
- package/src/config/modelProviders/qwen.ts +1 -245
- package/src/config/modelProviders/search1api.ts +1 -34
- package/src/config/modelProviders/sensenova.ts +1 -69
- package/src/config/modelProviders/siliconcloud.ts +1 -417
- package/src/config/modelProviders/spark.ts +1 -59
- package/src/config/modelProviders/stepfun.ts +1 -98
- package/src/config/modelProviders/taichu.ts +1 -18
- package/src/config/modelProviders/togetherai.ts +1 -274
- package/src/config/modelProviders/upstage.ts +1 -28
- package/src/config/modelProviders/wenxin.ts +1 -140
- package/src/config/modelProviders/xai.ts +1 -38
- package/src/config/modelProviders/zeroone.ts +1 -81
- package/src/config/modelProviders/zhipu.ts +1 -108
- package/src/helpers/isCanUseFC.ts +0 -8
- package/src/hooks/useEnabledChatModels.ts +0 -8
- package/src/hooks/useModelContextWindowTokens.ts +0 -8
- package/src/hooks/useModelHasContextWindowToken.ts +1 -10
- package/src/hooks/useModelSupportFiles.ts +1 -11
- package/src/hooks/useModelSupportReasoning.ts +1 -11
- package/src/hooks/useModelSupportToolUse.ts +1 -11
- package/src/hooks/useModelSupportVision.ts +1 -11
- package/src/layout/AuthProvider/Clerk/index.tsx +2 -16
- package/src/server/globalConfig/index.ts +0 -23
- package/src/server/routers/lambda/config/__snapshots__/index.test.ts.snap +175 -12
- package/src/server/routers/lambda/config/index.test.ts +36 -28
- package/src/services/chat/chat.test.ts +12 -0
- package/src/services/chat/helper.ts +7 -31
- package/src/services/models.ts +2 -11
- package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +41 -14
- package/src/store/global/store.ts +1 -7
- package/src/store/user/initialState.ts +1 -7
- package/src/store/user/selectors.ts +1 -5
- package/src/store/user/slices/common/action.ts +5 -4
- package/src/store/user/slices/settings/selectors/index.ts +1 -0
- package/src/store/user/slices/settings/selectors/keyVaults.ts +21 -0
- package/src/store/user/store.ts +0 -3
- package/src/tools/web-browsing/Render/Search/ConfigForm/Form.tsx +1 -1
- package/packages/utils/src/_deprecated/__snapshots__/parseModels.test.ts.snap +0 -104
- package/packages/utils/src/_deprecated/parseModels.test.ts +0 -287
- package/packages/utils/src/_deprecated/parseModels.ts +0 -165
- package/src/hooks/_header.ts +0 -23
- package/src/server/globalConfig/_deprecated.test.ts +0 -92
- package/src/server/globalConfig/_deprecated.ts +0 -41
- package/src/store/global/actions/clientDb.ts +0 -67
- package/src/store/user/slices/modelList/__snapshots__/action.test.ts.snap +0 -12
- package/src/store/user/slices/modelList/action.test.ts +0 -359
- package/src/store/user/slices/modelList/action.ts +0 -223
- package/src/store/user/slices/modelList/initialState.ts +0 -15
- package/src/store/user/slices/modelList/reducers/customModelCard.test.ts +0 -204
- package/src/store/user/slices/modelList/reducers/customModelCard.ts +0 -64
- package/src/store/user/slices/modelList/selectors/index.ts +0 -3
- package/src/store/user/slices/modelList/selectors/keyVaults.test.ts +0 -201
- package/src/store/user/slices/modelList/selectors/keyVaults.ts +0 -50
- package/src/store/user/slices/modelList/selectors/modelConfig.test.ts +0 -219
- package/src/store/user/slices/modelList/selectors/modelConfig.ts +0 -95
- package/src/store/user/slices/modelList/selectors/modelProvider.test.ts +0 -138
- package/src/store/user/slices/modelList/selectors/modelProvider.ts +0 -170
|
@@ -1,359 +0,0 @@
|
|
|
1
|
-
import { act, renderHook, waitFor } from '@testing-library/react';
|
|
2
|
-
import { describe, expect, it, vi } from 'vitest';
|
|
3
|
-
|
|
4
|
-
import { modelsService } from '@/services/models';
|
|
5
|
-
import { userService } from '@/services/user';
|
|
6
|
-
import { useUserStore } from '@/store/user';
|
|
7
|
-
import { ProviderConfig } from '@/types/user/settings';
|
|
8
|
-
|
|
9
|
-
import { settingsSelectors } from '../settings/selectors';
|
|
10
|
-
import { CustomModelCardDispatch } from './reducers/customModelCard';
|
|
11
|
-
import { modelProviderSelectors } from './selectors';
|
|
12
|
-
|
|
13
|
-
// Mock userService
|
|
14
|
-
vi.mock('@/services/user', () => ({
|
|
15
|
-
userService: {
|
|
16
|
-
updateUserSettings: vi.fn(),
|
|
17
|
-
resetUserSettings: vi.fn(),
|
|
18
|
-
},
|
|
19
|
-
}));
|
|
20
|
-
|
|
21
|
-
vi.mock('zustand/traditional');
|
|
22
|
-
|
|
23
|
-
describe('LLMSettingsSliceAction', () => {
|
|
24
|
-
describe('setModelProviderConfig', () => {
|
|
25
|
-
it('should set OpenAI configuration', async () => {
|
|
26
|
-
const { result } = renderHook(() => useUserStore());
|
|
27
|
-
const openAIConfig: Partial<ProviderConfig> = { fetchOnClient: true };
|
|
28
|
-
|
|
29
|
-
// Perform the action
|
|
30
|
-
await act(async () => {
|
|
31
|
-
await result.current.setModelProviderConfig('openai', openAIConfig);
|
|
32
|
-
});
|
|
33
|
-
|
|
34
|
-
// Assert that updateUserSettings was called with the correct OpenAI configuration
|
|
35
|
-
expect(userService.updateUserSettings).toHaveBeenCalledWith(
|
|
36
|
-
{ languageModel: { openai: openAIConfig } },
|
|
37
|
-
expect.any(AbortSignal),
|
|
38
|
-
);
|
|
39
|
-
});
|
|
40
|
-
});
|
|
41
|
-
|
|
42
|
-
describe('dispatchCustomModelCards', () => {
|
|
43
|
-
it('should return early when prevState does not exist', async () => {
|
|
44
|
-
const { result } = renderHook(() => useUserStore());
|
|
45
|
-
const provider = 'openai';
|
|
46
|
-
const payload: CustomModelCardDispatch = { type: 'add', modelCard: { id: 'test-id' } };
|
|
47
|
-
|
|
48
|
-
// Mock the selector to return undefined
|
|
49
|
-
vi.spyOn(settingsSelectors, 'providerConfig').mockReturnValueOnce(() => undefined);
|
|
50
|
-
vi.spyOn(result.current, 'setModelProviderConfig');
|
|
51
|
-
|
|
52
|
-
await act(async () => {
|
|
53
|
-
await result.current.dispatchCustomModelCards(provider, payload);
|
|
54
|
-
});
|
|
55
|
-
|
|
56
|
-
// Assert that setModelProviderConfig was not called
|
|
57
|
-
expect(result.current.setModelProviderConfig).not.toHaveBeenCalled();
|
|
58
|
-
});
|
|
59
|
-
});
|
|
60
|
-
|
|
61
|
-
describe('refreshDefaultModelProviderList', () => {
|
|
62
|
-
it('default', async () => {
|
|
63
|
-
const { result } = renderHook(() => useUserStore());
|
|
64
|
-
|
|
65
|
-
act(() => {
|
|
66
|
-
useUserStore.setState({
|
|
67
|
-
serverLanguageModel: {
|
|
68
|
-
azure: { serverModelCards: [{ id: 'abc', deploymentName: 'abc' }] },
|
|
69
|
-
},
|
|
70
|
-
});
|
|
71
|
-
});
|
|
72
|
-
|
|
73
|
-
await act(async () => {
|
|
74
|
-
await result.current.refreshDefaultModelProviderList();
|
|
75
|
-
});
|
|
76
|
-
|
|
77
|
-
// Assert that setModelProviderConfig was not called
|
|
78
|
-
const azure = result.current.defaultModelProviderList.find((m) => m.id === 'azure');
|
|
79
|
-
expect(azure?.chatModels).toEqual([{ id: 'abc', deploymentName: 'abc' }]);
|
|
80
|
-
});
|
|
81
|
-
|
|
82
|
-
it('openai', async () => {
|
|
83
|
-
const { result } = renderHook(() => useUserStore());
|
|
84
|
-
act(() => {
|
|
85
|
-
useUserStore.setState({
|
|
86
|
-
serverLanguageModel: {
|
|
87
|
-
openai: {
|
|
88
|
-
enabled: true,
|
|
89
|
-
enabledModels: ['gpt-4-0125-preview', 'gpt-4-turbo-2024-04-09'],
|
|
90
|
-
serverModelCards: [
|
|
91
|
-
{
|
|
92
|
-
displayName: 'ChatGPT-4',
|
|
93
|
-
functionCall: true,
|
|
94
|
-
id: 'gpt-4-0125-preview',
|
|
95
|
-
contextWindowTokens: 128000,
|
|
96
|
-
enabled: true,
|
|
97
|
-
},
|
|
98
|
-
{
|
|
99
|
-
displayName: 'ChatGPT-4 Vision',
|
|
100
|
-
functionCall: true,
|
|
101
|
-
id: 'gpt-4-turbo-2024-04-09',
|
|
102
|
-
contextWindowTokens: 128000,
|
|
103
|
-
vision: true,
|
|
104
|
-
enabled: true,
|
|
105
|
-
},
|
|
106
|
-
],
|
|
107
|
-
},
|
|
108
|
-
},
|
|
109
|
-
});
|
|
110
|
-
});
|
|
111
|
-
|
|
112
|
-
await act(async () => {
|
|
113
|
-
await result.current.refreshDefaultModelProviderList();
|
|
114
|
-
});
|
|
115
|
-
|
|
116
|
-
// Assert that setModelProviderConfig was not called
|
|
117
|
-
const openai = result.current.defaultModelProviderList.find((m) => m.id === 'openai');
|
|
118
|
-
expect(openai?.chatModels).toEqual([
|
|
119
|
-
{
|
|
120
|
-
displayName: 'ChatGPT-4',
|
|
121
|
-
enabled: true,
|
|
122
|
-
functionCall: true,
|
|
123
|
-
id: 'gpt-4-0125-preview',
|
|
124
|
-
contextWindowTokens: 128000,
|
|
125
|
-
},
|
|
126
|
-
{
|
|
127
|
-
displayName: 'ChatGPT-4 Vision',
|
|
128
|
-
enabled: true,
|
|
129
|
-
functionCall: true,
|
|
130
|
-
id: 'gpt-4-turbo-2024-04-09',
|
|
131
|
-
contextWindowTokens: 128000,
|
|
132
|
-
vision: true,
|
|
133
|
-
},
|
|
134
|
-
]);
|
|
135
|
-
});
|
|
136
|
-
});
|
|
137
|
-
|
|
138
|
-
describe('refreshModelProviderList', () => {
|
|
139
|
-
it('visible', async () => {
|
|
140
|
-
const { result } = renderHook(() => useUserStore());
|
|
141
|
-
act(() => {
|
|
142
|
-
useUserStore.setState({
|
|
143
|
-
settings: {
|
|
144
|
-
languageModel: {
|
|
145
|
-
ollama: { enabledModels: ['llava'] },
|
|
146
|
-
},
|
|
147
|
-
},
|
|
148
|
-
});
|
|
149
|
-
});
|
|
150
|
-
|
|
151
|
-
act(() => {
|
|
152
|
-
result.current.refreshModelProviderList();
|
|
153
|
-
});
|
|
154
|
-
|
|
155
|
-
const ollamaList = result.current.modelProviderList.find((r) => r.id === 'ollama');
|
|
156
|
-
// Assert that setModelProviderConfig was not called
|
|
157
|
-
const model = ollamaList?.chatModels.find((c) => c.id === 'llava');
|
|
158
|
-
|
|
159
|
-
expect(model).toMatchSnapshot();
|
|
160
|
-
});
|
|
161
|
-
|
|
162
|
-
it('modelProviderListForModelSelect should return only enabled providers', () => {
|
|
163
|
-
const { result } = renderHook(() => useUserStore());
|
|
164
|
-
|
|
165
|
-
act(() => {
|
|
166
|
-
useUserStore.setState({
|
|
167
|
-
settings: {
|
|
168
|
-
languageModel: {
|
|
169
|
-
perplexity: { enabled: true },
|
|
170
|
-
azure: { enabled: false },
|
|
171
|
-
},
|
|
172
|
-
},
|
|
173
|
-
});
|
|
174
|
-
});
|
|
175
|
-
|
|
176
|
-
act(() => {
|
|
177
|
-
result.current.refreshModelProviderList();
|
|
178
|
-
});
|
|
179
|
-
|
|
180
|
-
const enabledProviders = modelProviderSelectors.modelProviderListForModelSelect(
|
|
181
|
-
result.current,
|
|
182
|
-
);
|
|
183
|
-
expect(enabledProviders).toHaveLength(3);
|
|
184
|
-
expect(enabledProviders.at(-1)!.id).toBe('perplexity');
|
|
185
|
-
});
|
|
186
|
-
});
|
|
187
|
-
|
|
188
|
-
describe('removeEnabledModels', () => {
|
|
189
|
-
it('should remove the specified model from enabledModels', async () => {
|
|
190
|
-
const { result } = renderHook(() => useUserStore());
|
|
191
|
-
const model = 'gpt-3.5-turbo';
|
|
192
|
-
|
|
193
|
-
const spyOn = vi.spyOn(userService, 'updateUserSettings');
|
|
194
|
-
|
|
195
|
-
act(() => {
|
|
196
|
-
useUserStore.setState({
|
|
197
|
-
settings: {
|
|
198
|
-
languageModel: {
|
|
199
|
-
azure: { enabledModels: ['gpt-3.5-turbo', 'gpt-4'] },
|
|
200
|
-
},
|
|
201
|
-
},
|
|
202
|
-
});
|
|
203
|
-
});
|
|
204
|
-
|
|
205
|
-
await act(async () => {
|
|
206
|
-
console.log(JSON.stringify(result.current.settings));
|
|
207
|
-
await result.current.removeEnabledModels('azure', model);
|
|
208
|
-
});
|
|
209
|
-
|
|
210
|
-
expect(spyOn).toHaveBeenCalledWith(
|
|
211
|
-
{ languageModel: { azure: { enabledModels: ['gpt-4'] } } },
|
|
212
|
-
expect.any(AbortSignal),
|
|
213
|
-
);
|
|
214
|
-
});
|
|
215
|
-
});
|
|
216
|
-
|
|
217
|
-
describe('toggleEditingCustomModelCard', () => {
|
|
218
|
-
it('should update editingCustomCardModel when params are provided', () => {
|
|
219
|
-
const { result } = renderHook(() => useUserStore());
|
|
220
|
-
|
|
221
|
-
act(() => {
|
|
222
|
-
result.current.toggleEditingCustomModelCard({ id: 'test-id', provider: 'openai' });
|
|
223
|
-
});
|
|
224
|
-
|
|
225
|
-
expect(result.current.editingCustomCardModel).toEqual({ id: 'test-id', provider: 'openai' });
|
|
226
|
-
});
|
|
227
|
-
|
|
228
|
-
it('should reset editingCustomCardModel when no params are provided', () => {
|
|
229
|
-
const { result } = renderHook(() => useUserStore());
|
|
230
|
-
|
|
231
|
-
act(() => {
|
|
232
|
-
result.current.toggleEditingCustomModelCard();
|
|
233
|
-
});
|
|
234
|
-
|
|
235
|
-
expect(result.current.editingCustomCardModel).toBeUndefined();
|
|
236
|
-
});
|
|
237
|
-
});
|
|
238
|
-
|
|
239
|
-
describe('toggleProviderEnabled', () => {
|
|
240
|
-
it('should enable the provider', async () => {
|
|
241
|
-
const { result } = renderHook(() => useUserStore());
|
|
242
|
-
|
|
243
|
-
await act(async () => {
|
|
244
|
-
await result.current.toggleProviderEnabled('minimax', true);
|
|
245
|
-
});
|
|
246
|
-
|
|
247
|
-
expect(userService.updateUserSettings).toHaveBeenCalledWith(
|
|
248
|
-
{ languageModel: { minimax: { enabled: true } } },
|
|
249
|
-
expect.any(AbortSignal),
|
|
250
|
-
);
|
|
251
|
-
});
|
|
252
|
-
|
|
253
|
-
it('should disable the provider', async () => {
|
|
254
|
-
const { result } = renderHook(() => useUserStore());
|
|
255
|
-
const provider = 'openai';
|
|
256
|
-
|
|
257
|
-
await act(async () => {
|
|
258
|
-
await result.current.toggleProviderEnabled(provider, false);
|
|
259
|
-
});
|
|
260
|
-
|
|
261
|
-
expect(userService.updateUserSettings).toHaveBeenCalledWith(
|
|
262
|
-
{ languageModel: { openai: { enabled: false } } },
|
|
263
|
-
expect.any(AbortSignal),
|
|
264
|
-
);
|
|
265
|
-
});
|
|
266
|
-
});
|
|
267
|
-
|
|
268
|
-
describe('updateEnabledModels', () => {
|
|
269
|
-
// TODO: 有待 updateEnabledModels 实现的同步改造
|
|
270
|
-
it('should add new custom model to customModelCards', async () => {
|
|
271
|
-
const { result } = renderHook(() => useUserStore());
|
|
272
|
-
const provider = 'openai';
|
|
273
|
-
const modelKeys = ['gpt-3.5-turbo', 'custom-model'];
|
|
274
|
-
const options = [{ value: 'gpt-3.5-turbo' }, {}];
|
|
275
|
-
|
|
276
|
-
await act(async () => {
|
|
277
|
-
await result.current.updateEnabledModels(provider, modelKeys, options);
|
|
278
|
-
});
|
|
279
|
-
|
|
280
|
-
expect(userService.updateUserSettings).toHaveBeenCalledWith(
|
|
281
|
-
{
|
|
282
|
-
languageModel: {
|
|
283
|
-
openai: {
|
|
284
|
-
customModelCards: [{ id: 'custom-model' }],
|
|
285
|
-
// TODO:目标单测中需要包含下面这一行
|
|
286
|
-
// enabledModels: ['gpt-3.5-turbo', 'custom-model'],
|
|
287
|
-
},
|
|
288
|
-
},
|
|
289
|
-
},
|
|
290
|
-
expect.any(AbortSignal),
|
|
291
|
-
);
|
|
292
|
-
});
|
|
293
|
-
|
|
294
|
-
it('should not add removed model to customModelCards', async () => {
|
|
295
|
-
const { result } = renderHook(() => useUserStore());
|
|
296
|
-
const provider = 'openai';
|
|
297
|
-
const modelKeys = ['gpt-3.5-turbo'];
|
|
298
|
-
const options = [{ value: 'gpt-3.5-turbo' }];
|
|
299
|
-
|
|
300
|
-
act(() => {
|
|
301
|
-
useUserStore.setState({
|
|
302
|
-
settings: {
|
|
303
|
-
languageModel: {
|
|
304
|
-
openai: { enabledModels: ['gpt-3.5-turbo', 'gpt-4'] },
|
|
305
|
-
},
|
|
306
|
-
},
|
|
307
|
-
});
|
|
308
|
-
});
|
|
309
|
-
|
|
310
|
-
await act(async () => {
|
|
311
|
-
await result.current.updateEnabledModels(provider, modelKeys, options);
|
|
312
|
-
});
|
|
313
|
-
|
|
314
|
-
expect(userService.updateUserSettings).toHaveBeenCalledWith(
|
|
315
|
-
{
|
|
316
|
-
languageModel: { openai: { enabledModels: ['gpt-3.5-turbo'] } },
|
|
317
|
-
},
|
|
318
|
-
expect.any(AbortSignal),
|
|
319
|
-
);
|
|
320
|
-
});
|
|
321
|
-
});
|
|
322
|
-
|
|
323
|
-
describe('useFetchProviderModelList', () => {
|
|
324
|
-
it('should fetch data when enabledAutoFetch is true', async () => {
|
|
325
|
-
const { result } = renderHook(() => useUserStore());
|
|
326
|
-
const provider = 'openai';
|
|
327
|
-
const enabledAutoFetch = true;
|
|
328
|
-
|
|
329
|
-
const spyOn = vi.spyOn(result.current, 'refreshDefaultModelProviderList');
|
|
330
|
-
|
|
331
|
-
vi.spyOn(modelsService, 'getModels').mockResolvedValueOnce([]);
|
|
332
|
-
|
|
333
|
-
renderHook(() => result.current.useFetchProviderModelList(provider, enabledAutoFetch));
|
|
334
|
-
|
|
335
|
-
await waitFor(() => {
|
|
336
|
-
expect(spyOn).toHaveBeenCalled();
|
|
337
|
-
});
|
|
338
|
-
|
|
339
|
-
// expect(result.current.settings.languageModel.openai?.latestFetchTime).toBeDefined();
|
|
340
|
-
// expect(result.current.settings.languageModel.openai?.remoteModelCards).toBeDefined();
|
|
341
|
-
});
|
|
342
|
-
|
|
343
|
-
it('should not fetch data when enabledAutoFetch is false', async () => {
|
|
344
|
-
const { result } = renderHook(() => useUserStore());
|
|
345
|
-
const provider = 'openai';
|
|
346
|
-
const enabledAutoFetch = false;
|
|
347
|
-
|
|
348
|
-
const spyOn = vi.spyOn(result.current, 'refreshDefaultModelProviderList');
|
|
349
|
-
|
|
350
|
-
vi.spyOn(modelsService, 'getModels').mockResolvedValueOnce([]);
|
|
351
|
-
|
|
352
|
-
renderHook(() => result.current.useFetchProviderModelList(provider, enabledAutoFetch));
|
|
353
|
-
|
|
354
|
-
await waitFor(() => {
|
|
355
|
-
expect(spyOn).not.toHaveBeenCalled();
|
|
356
|
-
});
|
|
357
|
-
});
|
|
358
|
-
});
|
|
359
|
-
});
|
|
@@ -1,223 +0,0 @@
|
|
|
1
|
-
import type {
|
|
2
|
-
ChatModelCard,
|
|
3
|
-
GlobalLLMProviderKey,
|
|
4
|
-
ModelProviderCard,
|
|
5
|
-
UserKeyVaults,
|
|
6
|
-
UserModelProviderConfig,
|
|
7
|
-
} from '@lobechat/types';
|
|
8
|
-
import { produce } from 'immer';
|
|
9
|
-
import { ModelProvider } from 'model-bank';
|
|
10
|
-
import useSWR, { SWRResponse } from 'swr';
|
|
11
|
-
import type { StateCreator } from 'zustand/vanilla';
|
|
12
|
-
|
|
13
|
-
import type { UserStore } from '@/store/user';
|
|
14
|
-
|
|
15
|
-
import { settingsSelectors } from '../settings/selectors';
|
|
16
|
-
import { CustomModelCardDispatch, customModelCardsReducer } from './reducers/customModelCard';
|
|
17
|
-
import { modelProviderSelectors } from './selectors/modelProvider';
|
|
18
|
-
|
|
19
|
-
/**
|
|
20
|
-
* 设置操作
|
|
21
|
-
*/
|
|
22
|
-
export interface ModelListAction {
|
|
23
|
-
clearObtainedModels: (provider: GlobalLLMProviderKey) => Promise<void>;
|
|
24
|
-
dispatchCustomModelCards: (
|
|
25
|
-
provider: GlobalLLMProviderKey,
|
|
26
|
-
payload: CustomModelCardDispatch,
|
|
27
|
-
) => Promise<void>;
|
|
28
|
-
/**
|
|
29
|
-
* make sure the default model provider list is sync to latest state
|
|
30
|
-
*/
|
|
31
|
-
refreshDefaultModelProviderList: (params?: { trigger?: string }) => Promise<void>;
|
|
32
|
-
refreshModelProviderList: (params?: { trigger?: string }) => void;
|
|
33
|
-
removeEnabledModels: (provider: GlobalLLMProviderKey, model: string) => Promise<void>;
|
|
34
|
-
setModelProviderConfig: <T extends GlobalLLMProviderKey>(
|
|
35
|
-
provider: T,
|
|
36
|
-
config: Partial<UserModelProviderConfig[T]>,
|
|
37
|
-
) => Promise<void>;
|
|
38
|
-
toggleEditingCustomModelCard: (params?: { id: string; provider: GlobalLLMProviderKey }) => void;
|
|
39
|
-
|
|
40
|
-
toggleProviderEnabled: (provider: GlobalLLMProviderKey, enabled: boolean) => Promise<void>;
|
|
41
|
-
|
|
42
|
-
updateEnabledModels: (
|
|
43
|
-
provider: GlobalLLMProviderKey,
|
|
44
|
-
modelKeys: string[],
|
|
45
|
-
options: { label?: string; value?: string }[],
|
|
46
|
-
) => Promise<void>;
|
|
47
|
-
|
|
48
|
-
updateKeyVaultConfig: <T extends GlobalLLMProviderKey>(
|
|
49
|
-
provider: T,
|
|
50
|
-
config: Partial<UserKeyVaults[T]>,
|
|
51
|
-
) => Promise<void>;
|
|
52
|
-
|
|
53
|
-
updateKeyVaultSettings: (key: string, config: any) => Promise<void>;
|
|
54
|
-
|
|
55
|
-
useFetchProviderModelList: (
|
|
56
|
-
provider: GlobalLLMProviderKey,
|
|
57
|
-
enabledAutoFetch: boolean,
|
|
58
|
-
) => SWRResponse;
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
export const createModelListSlice: StateCreator<
|
|
62
|
-
UserStore,
|
|
63
|
-
[['zustand/devtools', never]],
|
|
64
|
-
[],
|
|
65
|
-
ModelListAction
|
|
66
|
-
> = (set, get) => ({
|
|
67
|
-
clearObtainedModels: async (provider: GlobalLLMProviderKey) => {
|
|
68
|
-
await get().setModelProviderConfig(provider, {
|
|
69
|
-
remoteModelCards: [],
|
|
70
|
-
});
|
|
71
|
-
|
|
72
|
-
await get().refreshDefaultModelProviderList();
|
|
73
|
-
},
|
|
74
|
-
dispatchCustomModelCards: async (provider, payload) => {
|
|
75
|
-
const prevState = settingsSelectors.providerConfig(provider)(get());
|
|
76
|
-
|
|
77
|
-
if (!prevState) return;
|
|
78
|
-
|
|
79
|
-
const nextState = customModelCardsReducer(prevState.customModelCards, payload);
|
|
80
|
-
|
|
81
|
-
await get().setModelProviderConfig(provider, { customModelCards: nextState });
|
|
82
|
-
},
|
|
83
|
-
refreshDefaultModelProviderList: async (params) => {
|
|
84
|
-
/**
|
|
85
|
-
* Because we have several model cards sources, we need to merge the model cards
|
|
86
|
-
* the priority is below:
|
|
87
|
-
* 1 - server side model cards
|
|
88
|
-
* 2 - remote model cards
|
|
89
|
-
* 3 - default model cards
|
|
90
|
-
*/
|
|
91
|
-
|
|
92
|
-
const mergeModels = (providerKey: GlobalLLMProviderKey, providerCard: ModelProviderCard) => {
|
|
93
|
-
// if the chat model is config in the server side, use the server side model cards
|
|
94
|
-
const serverChatModels = modelProviderSelectors.serverProviderModelCards(providerKey)(get());
|
|
95
|
-
const remoteChatModels = providerCard.modelList?.showModelFetcher
|
|
96
|
-
? modelProviderSelectors.remoteProviderModelCards(providerKey)(get())
|
|
97
|
-
: undefined;
|
|
98
|
-
|
|
99
|
-
if (serverChatModels && serverChatModels.length > 0) {
|
|
100
|
-
return serverChatModels;
|
|
101
|
-
}
|
|
102
|
-
if (remoteChatModels && remoteChatModels.length > 0) {
|
|
103
|
-
return remoteChatModels;
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
return providerCard.chatModels;
|
|
107
|
-
};
|
|
108
|
-
|
|
109
|
-
const { DEFAULT_MODEL_PROVIDER_LIST } = await import('@/config/modelProviders');
|
|
110
|
-
const defaultModelProviderList = produce(DEFAULT_MODEL_PROVIDER_LIST, (draft) => {
|
|
111
|
-
Object.values(ModelProvider).forEach((id) => {
|
|
112
|
-
const provider = draft.find((d) => d.id === id);
|
|
113
|
-
if (provider) provider.chatModels = mergeModels(id as any, provider);
|
|
114
|
-
});
|
|
115
|
-
});
|
|
116
|
-
|
|
117
|
-
set({ defaultModelProviderList }, false, `refreshDefaultModelList - ${params?.trigger}`);
|
|
118
|
-
|
|
119
|
-
get().refreshModelProviderList({ trigger: 'refreshDefaultModelList' });
|
|
120
|
-
},
|
|
121
|
-
refreshModelProviderList: (params) => {
|
|
122
|
-
const modelProviderList = get().defaultModelProviderList.map((list) => {
|
|
123
|
-
const enabledModels = modelProviderSelectors.getEnableModelsById(list.id)(get());
|
|
124
|
-
return {
|
|
125
|
-
...list,
|
|
126
|
-
chatModels: modelProviderSelectors
|
|
127
|
-
.getModelCardsById(list.id)(get())
|
|
128
|
-
?.map((model) => {
|
|
129
|
-
if (!enabledModels) return model;
|
|
130
|
-
|
|
131
|
-
return {
|
|
132
|
-
...model,
|
|
133
|
-
enabled: enabledModels?.some((m) => m === model.id),
|
|
134
|
-
};
|
|
135
|
-
}),
|
|
136
|
-
enabled: modelProviderSelectors.isProviderEnabled(list.id as any)(get()),
|
|
137
|
-
};
|
|
138
|
-
});
|
|
139
|
-
|
|
140
|
-
set({ modelProviderList }, false, `refreshModelList - ${params?.trigger}`);
|
|
141
|
-
},
|
|
142
|
-
|
|
143
|
-
removeEnabledModels: async (provider, model) => {
|
|
144
|
-
const config = settingsSelectors.providerConfig(provider)(get());
|
|
145
|
-
|
|
146
|
-
await get().setModelProviderConfig(provider, {
|
|
147
|
-
enabledModels: config?.enabledModels?.filter((s) => s !== model).filter(Boolean),
|
|
148
|
-
});
|
|
149
|
-
},
|
|
150
|
-
|
|
151
|
-
setModelProviderConfig: async (provider, config) => {
|
|
152
|
-
await get().setSettings({ languageModel: { [provider]: config } });
|
|
153
|
-
},
|
|
154
|
-
|
|
155
|
-
toggleEditingCustomModelCard: (params) => {
|
|
156
|
-
set({ editingCustomCardModel: params }, false, 'toggleEditingCustomModelCard');
|
|
157
|
-
},
|
|
158
|
-
|
|
159
|
-
toggleProviderEnabled: async (provider, enabled) => {
|
|
160
|
-
await get().setSettings({ languageModel: { [provider]: { enabled } } });
|
|
161
|
-
},
|
|
162
|
-
updateEnabledModels: async (provider, value, options) => {
|
|
163
|
-
const { dispatchCustomModelCards, setModelProviderConfig } = get();
|
|
164
|
-
const enabledModels = modelProviderSelectors.getEnableModelsById(provider)(get());
|
|
165
|
-
|
|
166
|
-
// if there is a new model, add it to `customModelCards`
|
|
167
|
-
const pools = options.map(async (option: { label?: string; value?: string }, index: number) => {
|
|
168
|
-
// if is a known model, it should have value
|
|
169
|
-
// if is an unknown model, the option will be {}
|
|
170
|
-
if (option.value) return;
|
|
171
|
-
|
|
172
|
-
const modelId = value[index];
|
|
173
|
-
|
|
174
|
-
// if is in enabledModels, it means it's a removed model
|
|
175
|
-
if (enabledModels?.some((m) => modelId === m)) return;
|
|
176
|
-
|
|
177
|
-
await dispatchCustomModelCards(provider, {
|
|
178
|
-
modelCard: { id: modelId },
|
|
179
|
-
type: 'add',
|
|
180
|
-
});
|
|
181
|
-
});
|
|
182
|
-
|
|
183
|
-
// TODO: 当前的这个 pool 方法并不是最好的实现,因为它会触发 setModelProviderConfig 的多次更新。
|
|
184
|
-
// 理论上应该合并这些变更,然后最后只做一次触发
|
|
185
|
-
// 因此后续的做法应该是将 dispatchCustomModelCards 改造为同步方法,并在最后做一次异步更新
|
|
186
|
-
// 对应需要改造 'should add new custom model to customModelCards' 这一个单测
|
|
187
|
-
await Promise.all(pools);
|
|
188
|
-
|
|
189
|
-
await setModelProviderConfig(provider, { enabledModels: value.filter(Boolean) });
|
|
190
|
-
},
|
|
191
|
-
|
|
192
|
-
updateKeyVaultConfig: async (provider, config) => {
|
|
193
|
-
await get().setSettings({ keyVaults: { [provider]: config } });
|
|
194
|
-
},
|
|
195
|
-
|
|
196
|
-
updateKeyVaultSettings: async (provider, config) => {
|
|
197
|
-
await get().setSettings({ keyVaults: { [provider]: config } });
|
|
198
|
-
},
|
|
199
|
-
|
|
200
|
-
useFetchProviderModelList: (provider, enabledAutoFetch) =>
|
|
201
|
-
useSWR<ChatModelCard[] | undefined>(
|
|
202
|
-
[provider, enabledAutoFetch],
|
|
203
|
-
async ([p]) => {
|
|
204
|
-
const { modelsService } = await import('@/services/models');
|
|
205
|
-
|
|
206
|
-
return modelsService.getModels(p);
|
|
207
|
-
},
|
|
208
|
-
{
|
|
209
|
-
onSuccess: async (data) => {
|
|
210
|
-
if (data) {
|
|
211
|
-
await get().setModelProviderConfig(provider, {
|
|
212
|
-
latestFetchTime: Date.now(),
|
|
213
|
-
remoteModelCards: data,
|
|
214
|
-
});
|
|
215
|
-
|
|
216
|
-
get().refreshDefaultModelProviderList();
|
|
217
|
-
}
|
|
218
|
-
},
|
|
219
|
-
revalidateOnFocus: false,
|
|
220
|
-
revalidateOnMount: enabledAutoFetch,
|
|
221
|
-
},
|
|
222
|
-
),
|
|
223
|
-
});
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import { DEFAULT_MODEL_PROVIDER_LIST } from '@/config/modelProviders';
|
|
2
|
-
import { ModelProviderCard } from '@/types/llm';
|
|
3
|
-
import { ServerLanguageModel } from '@/types/serverConfig';
|
|
4
|
-
|
|
5
|
-
export interface ModelListState {
|
|
6
|
-
defaultModelProviderList: ModelProviderCard[];
|
|
7
|
-
editingCustomCardModel?: { id: string; provider: string } | undefined;
|
|
8
|
-
modelProviderList: ModelProviderCard[];
|
|
9
|
-
serverLanguageModel?: ServerLanguageModel;
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
export const initialModelListState: ModelListState = {
|
|
13
|
-
defaultModelProviderList: DEFAULT_MODEL_PROVIDER_LIST,
|
|
14
|
-
modelProviderList: DEFAULT_MODEL_PROVIDER_LIST,
|
|
15
|
-
};
|