@lobehub/chat 1.53.8 → 1.53.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.53.10](https://github.com/lobehub/lobe-chat/compare/v1.53.9...v1.53.10)
6
+
7
+ <sup>Released on **2025-02-13**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix api key input issue.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Fix api key input issue, closes [#6112](https://github.com/lobehub/lobe-chat/issues/6112) ([48e3b85](https://github.com/lobehub/lobe-chat/commit/48e3b85))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.53.9](https://github.com/lobehub/lobe-chat/compare/v1.53.8...v1.53.9)
31
+
32
+ <sup>Released on **2025-02-13**</sup>
33
+
34
+ #### 💄 Styles
35
+
36
+ - **misc**: Support select check models.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Styles
44
+
45
+ - **misc**: Support select check models, closes [#6106](https://github.com/lobehub/lobe-chat/issues/6106) ([2243bbb](https://github.com/lobehub/lobe-chat/commit/2243bbb))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.53.8](https://github.com/lobehub/lobe-chat/compare/v1.53.7...v1.53.8)
6
56
 
7
57
  <sup>Released on **2025-02-13**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Fix api key input issue."
6
+ ]
7
+ },
8
+ "date": "2025-02-13",
9
+ "version": "1.53.10"
10
+ },
11
+ {
12
+ "children": {
13
+ "improvements": [
14
+ "Support select check models."
15
+ ]
16
+ },
17
+ "date": "2025-02-13",
18
+ "version": "1.53.9"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "fixes": [
@@ -17,7 +17,7 @@ LobeChat supports customizing the model list during deployment. This configurati
17
17
  You can use `+` to add a model, `-` to hide a model, and use `model name=display name<extension configuration>` to customize the display name of a model, separated by English commas. The basic syntax is as follows:
18
18
 
19
19
  ```text
20
- id=displayName<maxToken:vision:fc:file>,model2,model3
20
+ id=displayName<maxToken:vision:reasoning:fc:file>,model2,model3
21
21
  ```
22
22
 
23
23
  For example: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
@@ -29,7 +29,7 @@ In the above example, it adds `qwen-7b-chat` and `glm-6b` to the model list, rem
29
29
  Considering the diversity of model capabilities, we started to add extension configuration in version `0.147.8`, with the following rules:
30
30
 
31
31
  ```shell
32
- id=displayName<maxToken:vision:fc:file>
32
+ id=displayName<maxToken:vision:reasoning:fc:file>
33
33
  ```
34
34
 
35
35
  The first value in angle brackets is designated as the `maxToken` for this model. The second value and beyond are the model's extension capabilities, separated by colons `:`, and the order is not important.
@@ -39,12 +39,14 @@ Examples are as follows:
39
39
  - `chatglm-6b=ChatGLM 6B<4096>`: ChatGLM 6B, maximum context of 4k, no advanced capabilities;
40
40
  - `spark-v3.5=讯飞星火 v3.5<8192:fc>`: Xunfei Spark 3.5 model, maximum context of 8k, supports Function Call;
41
41
  - `gemini-1.5-flash-latest=Gemini 1.5 Flash<16000:vision>`: Google Vision model, maximum context of 16k, supports image recognition;
42
+ - `o3-mini=OpenAI o3-mini<200000:reasoning:fc>`: OpenAI o3-mini model, maximum context of 200k, supports reasoning and Function Call;
42
43
  - `gpt-4-all=ChatGPT Plus<128000:fc:vision:file>`, hacked version of ChatGPT Plus web, context of 128k, supports image recognition, Function Call, file upload.
43
44
 
44
45
  Currently supported extension capabilities are:
45
46
 
46
- | --- | Description |
47
- | -------- | -------------------------------------------------------- |
48
- | `fc` | Function Calling |
49
- | `vision` | Image Recognition |
50
- | `file` | File Upload (a bit hacky, not recommended for daily use) |
47
+ | --- | Description |
48
+ | ----------- | -------------------------------------------------------- |
49
+ | `fc` | Function Calling |
50
+ | `vision` | Image Recognition |
51
+ | `reasoning` | Support Reasoning |
52
+ | `file` | File Upload (a bit hacky, not recommended for daily use) |
@@ -16,7 +16,7 @@ LobeChat 支持在部署时自定义模型列表,详情请参考 [模型提供
16
16
  你可以使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。通过 `<>` 来添加扩展配置。基本语法如下:
17
17
 
18
18
  ```text
19
- id=displayName<maxToken:vision:fc:file>,model2,model3
19
+ id=displayName<maxToken:vision:reasoning:fc:file>,model2,model3
20
20
  ```
21
21
 
22
22
  例如: `+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo`
@@ -28,7 +28,7 @@ id=displayName<maxToken:vision:fc:file>,model2,model3
28
28
  考虑到模型的能力多样性,我们在 `0.147.8` 版本开始增加扩展性配置,它的规则如下:
29
29
 
30
30
  ```shell
31
- id=displayName<maxToken:vision:fc:file>
31
+ id=displayName<maxToken:vision:reasoning:fc:file>
32
32
  ```
33
33
 
34
34
  尖括号第一个值约定为这个模型的 `maxToken` 。第二个及以后作为模型的扩展能力,能力与能力之间用冒号 `:` 作为分隔符,顺序不重要。
@@ -38,12 +38,14 @@ id=displayName<maxToken:vision:fc:file>
38
38
  - `chatglm-6b=ChatGLM 6B<4096>`:ChatGLM 6B,最大上下文 4k,没有高阶能力;
39
39
  - `spark-v3.5=讯飞星火 v3.5<8192:fc>`:讯飞星火 3.5 模型,最大上下文 8k,支持 Function Call;
40
40
  - `gemini-1.5-flash-latest=Gemini 1.5 Flash<16000:vision>`:Google 视觉模型,最大上下文 16k,支持图像识别;
41
+ - `o3-mini=OpenAI o3-mini<200000:reasoning:fc>`:OpenAI o3-mini 模型,最大上下文 200k,支持推理及 Function Call;
41
42
  - `gpt-4-all=ChatGPT Plus<128000:fc:vision:file>`,hack 的 ChatGPT Plus 网页版,上下 128k ,支持图像识别、Function Call、文件上传
42
43
 
43
44
  目前支持的扩展能力有:
44
45
 
45
- | --- | 描述 |
46
- | -------- | ---------------------- |
47
- | `fc` | 函数调用(function calling) |
48
- | `vision` | 视觉识别 |
49
- | `file` | 文件上传(比较 hack,不建议日常使用) |
46
+ | --- | 描述 |
47
+ | ----------- | ------------------------------------- |
48
+ | `fc` | 函数调用(function calling) |
49
+ | `vision` | 视觉识别 |
50
+ | `reasoning` | 支持推理 |
51
+ | `file` | 文件上传(比较 hack,不建议日常使用) |
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.53.8",
3
+ "version": "1.53.10",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -45,5 +45,3 @@ const Page = async (props: PagePropsWithId) => {
45
45
  };
46
46
 
47
47
  export default Page;
48
-
49
- export const dynamic = 'auto';
@@ -77,15 +77,11 @@ const useProviderCard = (): ProviderItem => {
77
77
  ) : (
78
78
  <AutoComplete
79
79
  options={[
80
+ '2024-10-21',
80
81
  '2024-06-01',
81
- '2024-02-01',
82
- '2024-05-01-preview',
83
- '2024-04-01-preview',
84
- '2024-03-01-preview',
85
- '2024-02-15-preview',
86
- '2023-10-01-preview',
87
- '2023-06-01-preview',
88
- '2023-05-15',
82
+ '2025-01-01-preview',
83
+ '2024-09-01-preview',
84
+ '2024-10-01-preview',
89
85
  ].map((i) => ({ label: i, value: i }))}
90
86
  placeholder={'20XX-XX-XX'}
91
87
  />
@@ -1,18 +1,19 @@
1
1
  'use client';
2
2
 
3
3
  import { CheckCircleFilled } from '@ant-design/icons';
4
- import { Alert, Highlighter } from '@lobehub/ui';
5
- import { Button } from 'antd';
4
+ import { ModelIcon } from '@lobehub/icons';
5
+ import { Alert, Highlighter, Icon } from '@lobehub/ui';
6
+ import { Button, Select, Space } from 'antd';
6
7
  import { useTheme } from 'antd-style';
8
+ import { Loader2Icon } from 'lucide-react';
7
9
  import { ReactNode, memo, useState } from 'react';
8
10
  import { useTranslation } from 'react-i18next';
9
11
  import { Flexbox } from 'react-layout-kit';
10
12
 
11
13
  import { TraceNameMap } from '@/const/trace';
12
- import { useIsMobile } from '@/hooks/useIsMobile';
13
14
  import { useProviderName } from '@/hooks/useProviderName';
14
15
  import { chatService } from '@/services/chat';
15
- import { aiProviderSelectors, useAiInfraStore } from '@/store/aiInfra';
16
+ import { aiModelSelectors, aiProviderSelectors, useAiInfraStore } from '@/store/aiInfra';
16
17
  import { ChatMessageError } from '@/types/message';
17
18
 
18
19
  const Error = memo<{ error: ChatMessageError }>(({ error }) => {
@@ -20,9 +21,8 @@ const Error = memo<{ error: ChatMessageError }>(({ error }) => {
20
21
  const providerName = useProviderName(error.body?.provider);
21
22
 
22
23
  return (
23
- <Flexbox gap={8} style={{ maxWidth: '600px', width: '100%' }}>
24
+ <Flexbox gap={8} style={{ width: '100%' }}>
24
25
  <Alert
25
- banner
26
26
  extra={
27
27
  <Flexbox>
28
28
  <Highlighter copyButtonSize={'small'} language={'json'} type={'pure'}>
@@ -54,10 +54,15 @@ const Checker = memo<ConnectionCheckerProps>(
54
54
  ({ model, provider, checkErrorRender: CheckErrorRender }) => {
55
55
  const { t } = useTranslation('setting');
56
56
 
57
- const disabled = useAiInfraStore(aiProviderSelectors.isProviderConfigUpdating(provider));
57
+ const isProviderConfigUpdating = useAiInfraStore(
58
+ aiProviderSelectors.isProviderConfigUpdating(provider),
59
+ );
60
+ const totalModels = useAiInfraStore(aiModelSelectors.aiProviderChatModelListIds);
61
+ const updateAiProviderConfig = useAiInfraStore((s) => s.updateAiProviderConfig);
58
62
 
59
63
  const [loading, setLoading] = useState(false);
60
64
  const [pass, setPass] = useState(false);
65
+ const [checkModel, setCheckModel] = useState(model);
61
66
 
62
67
  const theme = useTheme();
63
68
  const [error, setError] = useState<ChatMessageError | undefined>();
@@ -71,6 +76,7 @@ const Checker = memo<ConnectionCheckerProps>(
71
76
  setPass(false);
72
77
  isError = true;
73
78
  },
79
+
74
80
  onFinish: async (value) => {
75
81
  if (!isError && value) {
76
82
  setError(undefined);
@@ -104,7 +110,6 @@ const Checker = memo<ConnectionCheckerProps>(
104
110
  },
105
111
  });
106
112
  };
107
- const isMobile = useIsMobile();
108
113
 
109
114
  const defaultError = error ? <Error error={error as ChatMessageError} /> : null;
110
115
 
@@ -115,26 +120,42 @@ const Checker = memo<ConnectionCheckerProps>(
115
120
  );
116
121
 
117
122
  return (
118
- <Flexbox align={isMobile ? 'flex-start' : 'flex-end'} gap={8}>
119
- <Flexbox
120
- align={'center'}
121
- direction={isMobile ? 'horizontal-reverse' : 'horizontal'}
122
- gap={12}
123
- >
124
- {pass && (
125
- <Flexbox gap={4} horizontal>
126
- <CheckCircleFilled
127
- style={{
128
- color: theme.colorSuccess,
129
- }}
130
- />
131
- {t('llm.checker.pass')}
132
- </Flexbox>
133
- )}
134
- <Button disabled={disabled} loading={loading} onClick={checkConnection}>
123
+ <Flexbox gap={8}>
124
+ <Space.Compact block>
125
+ <Select
126
+ listItemHeight={36}
127
+ onSelect={async (value) => {
128
+ setCheckModel(value);
129
+ await updateAiProviderConfig(provider, { checkModel: value });
130
+ }}
131
+ optionRender={({ value }) => {
132
+ return (
133
+ <Flexbox align={'center'} gap={6} horizontal>
134
+ <ModelIcon model={value as string} size={20} />
135
+ {value}
136
+ </Flexbox>
137
+ );
138
+ }}
139
+ options={totalModels.map((id) => ({ label: id, value: id }))}
140
+ suffixIcon={isProviderConfigUpdating && <Icon icon={Loader2Icon} spin />}
141
+ value={checkModel}
142
+ virtual
143
+ />
144
+ <Button disabled={isProviderConfigUpdating} loading={loading} onClick={checkConnection}>
135
145
  {t('llm.checker.button')}
136
146
  </Button>
137
- </Flexbox>
147
+ </Space.Compact>
148
+
149
+ {pass && (
150
+ <Flexbox gap={4} horizontal>
151
+ <CheckCircleFilled
152
+ style={{
153
+ color: theme.colorSuccess,
154
+ }}
155
+ />
156
+ {t('llm.checker.pass')}
157
+ </Flexbox>
158
+ )}
138
159
  {error && errorContent}
139
160
  </Flexbox>
140
161
  );
@@ -275,7 +275,11 @@ const ProviderConfig = memo<ProviderConfigProps>(
275
275
  children: isLoading ? (
276
276
  <Skeleton.Button active />
277
277
  ) : (
278
- <Checker checkErrorRender={checkErrorRender} model={checkModel!} provider={id} />
278
+ <Checker
279
+ checkErrorRender={checkErrorRender}
280
+ model={data?.checkModel || checkModel!}
281
+ provider={id}
282
+ />
279
283
  ),
280
284
  desc: t('providerModels.config.checker.desc'),
281
285
  label: t('providerModels.config.checker.title'),
@@ -0,0 +1,11 @@
1
+ import { createContext } from 'react';
2
+
3
+ interface LoadingContextValue {
4
+ loading: boolean;
5
+ setLoading: (loading: boolean) => void;
6
+ }
7
+
8
+ export const LoadingContext = createContext<LoadingContextValue>({
9
+ loading: false,
10
+ setLoading: () => {},
11
+ });
@@ -1,9 +1,11 @@
1
1
  import { Icon } from '@lobehub/ui';
2
- import { Button, Input } from 'antd';
3
- import { Network } from 'lucide-react';
4
- import { ReactNode, memo, useState } from 'react';
2
+ import { Button } from 'antd';
3
+ import { Loader2Icon, Network } from 'lucide-react';
4
+ import { ReactNode, memo, useContext, useState } from 'react';
5
5
  import { useTranslation } from 'react-i18next';
6
6
 
7
+ import { FormInput, FormPassword } from '@/components/FormInput';
8
+ import { LoadingContext } from '@/features/Conversation/Error/APIKeyForm/LoadingContext';
7
9
  import { useProviderName } from '@/hooks/useProviderName';
8
10
  import { featureFlagsSelectors, useServerConfigStore } from '@/store/serverConfig';
9
11
  import { GlobalLLMProviderKey } from '@/types/user/settings';
@@ -27,6 +29,7 @@ const ProviderApiKeyForm = memo<ProviderApiKeyFormProps>(
27
29
  const { apiKey, baseURL, setConfig } = useApiKey(provider);
28
30
  const { showOpenAIProxyUrl } = useServerConfigStore(featureFlagsSelectors);
29
31
  const providerName = useProviderName(provider);
32
+ const { loading } = useContext(LoadingContext);
30
33
 
31
34
  return (
32
35
  <FormAction
@@ -34,25 +37,25 @@ const ProviderApiKeyForm = memo<ProviderApiKeyFormProps>(
34
37
  description={t(`unlock.apiKey.description`, { name: providerName, ns: 'error' })}
35
38
  title={t(`unlock.apiKey.title`, { name: providerName, ns: 'error' })}
36
39
  >
37
- <Input.Password
40
+ <FormPassword
38
41
  autoComplete={'new-password'}
39
- onChange={(e) => {
40
- setConfig(provider, { apiKey: e.target.value });
42
+ onChange={(value) => {
43
+ setConfig(provider, { apiKey: value });
41
44
  }}
42
45
  placeholder={apiKeyPlaceholder || 'sk-***********************'}
43
- type={'block'}
46
+ suffix={<div>{loading && <Icon icon={Loader2Icon} spin />}</div>}
44
47
  value={apiKey}
45
48
  />
46
49
 
47
50
  {showEndpoint &&
48
51
  showOpenAIProxyUrl &&
49
52
  (showProxy ? (
50
- <Input
51
- onChange={(e) => {
52
- setConfig(provider, { baseURL: e.target.value });
53
+ <FormInput
54
+ onChange={(value) => {
55
+ setConfig(provider, { baseURL: value });
53
56
  }}
54
57
  placeholder={'https://api.openai.com/v1'}
55
- type={'block'}
58
+ suffix={<div>{loading && <Icon icon={Loader2Icon} spin />}</div>}
56
59
  value={baseURL}
57
60
  />
58
61
  ) : (
@@ -1,6 +1,6 @@
1
1
  import { ProviderIcon } from '@lobehub/icons';
2
2
  import { Button } from 'antd';
3
- import { memo, useMemo } from 'react';
3
+ import { memo, useMemo, useState } from 'react';
4
4
  import { useTranslation } from 'react-i18next';
5
5
  import { Center, Flexbox } from 'react-layout-kit';
6
6
 
@@ -9,6 +9,7 @@ import { useChatStore } from '@/store/chat';
9
9
  import { GlobalLLMProviderKey } from '@/types/user/settings';
10
10
 
11
11
  import BedrockForm from './Bedrock';
12
+ import { LoadingContext } from './LoadingContext';
12
13
  import ProviderApiKeyForm from './ProviderApiKeyForm';
13
14
 
14
15
  interface APIKeyFormProps {
@@ -18,6 +19,7 @@ interface APIKeyFormProps {
18
19
 
19
20
  const APIKeyForm = memo<APIKeyFormProps>(({ id, provider }) => {
20
21
  const { t } = useTranslation('error');
22
+ const [loading, setLoading] = useState(false);
21
23
 
22
24
  const [resend, deleteMessage] = useChatStore((s) => [s.regenerateMessage, s.deleteMessage]);
23
25
 
@@ -62,38 +64,41 @@ const APIKeyForm = memo<APIKeyFormProps>(({ id, provider }) => {
62
64
  }, [provider]);
63
65
 
64
66
  return (
65
- <Center gap={16} style={{ maxWidth: 300 }}>
66
- {provider === ModelProvider.Bedrock ? (
67
- <BedrockForm />
68
- ) : (
69
- <ProviderApiKeyForm
70
- apiKeyPlaceholder={apiKeyPlaceholder}
71
- avatar={<ProviderIcon provider={provider} size={80} type={'avatar'} />}
72
- provider={provider as GlobalLLMProviderKey}
73
- showEndpoint={provider === ModelProvider.OpenAI}
74
- />
75
- )}
76
- <Flexbox gap={12} width={'100%'}>
77
- <Button
78
- block
79
- onClick={() => {
80
- resend(id);
81
- deleteMessage(id);
82
- }}
83
- style={{ marginTop: 8 }}
84
- type={'primary'}
85
- >
86
- {t('unlock.confirm')}
87
- </Button>
88
- <Button
89
- onClick={() => {
90
- deleteMessage(id);
91
- }}
92
- >
93
- {t('unlock.closeMessage')}
94
- </Button>
95
- </Flexbox>
96
- </Center>
67
+ <LoadingContext value={{ loading, setLoading }}>
68
+ <Center gap={16} style={{ maxWidth: 300 }}>
69
+ {provider === ModelProvider.Bedrock ? (
70
+ <BedrockForm />
71
+ ) : (
72
+ <ProviderApiKeyForm
73
+ apiKeyPlaceholder={apiKeyPlaceholder}
74
+ avatar={<ProviderIcon provider={provider} size={80} type={'avatar'} />}
75
+ provider={provider as GlobalLLMProviderKey}
76
+ showEndpoint={provider === ModelProvider.OpenAI}
77
+ />
78
+ )}
79
+ <Flexbox gap={12} width={'100%'}>
80
+ <Button
81
+ block
82
+ disabled={loading}
83
+ onClick={() => {
84
+ resend(id);
85
+ deleteMessage(id);
86
+ }}
87
+ style={{ marginTop: 8 }}
88
+ type={'primary'}
89
+ >
90
+ {t('unlock.confirm')}
91
+ </Button>
92
+ <Button
93
+ onClick={() => {
94
+ deleteMessage(id);
95
+ }}
96
+ >
97
+ {t('unlock.closeMessage')}
98
+ </Button>
99
+ </Flexbox>
100
+ </Center>
101
+ </LoadingContext>
97
102
  );
98
103
  });
99
104
 
@@ -1,6 +1,8 @@
1
1
  import isEqual from 'fast-deep-equal';
2
+ import { useContext } from 'react';
2
3
 
3
4
  import { isDeprecatedEdition } from '@/const/version';
5
+ import { LoadingContext } from '@/features/Conversation/Error/APIKeyForm/LoadingContext';
4
6
  import { aiProviderSelectors, useAiInfraStore } from '@/store/aiInfra';
5
7
  import { useUserStore } from '@/store/user';
6
8
  import { keyVaultsConfigSelectors } from '@/store/user/selectors';
@@ -11,7 +13,7 @@ export const useApiKey = (provider: string) => {
11
13
  keyVaultsConfigSelectors.getVaultByProvider(provider as any)(s)?.baseURL,
12
14
  s.updateKeyVaultConfig,
13
15
  ]);
14
-
16
+ const { setLoading } = useContext(LoadingContext);
15
17
  const updateAiProviderConfig = useAiInfraStore((s) => s.updateAiProviderConfig);
16
18
  const data = useAiInfraStore(aiProviderSelectors.providerConfigById(provider), isEqual);
17
19
 
@@ -23,12 +25,14 @@ export const useApiKey = (provider: string) => {
23
25
  apiKey: data?.keyVaults.apiKey,
24
26
  baseURL: data?.keyVaults?.baseURL,
25
27
  setConfig: async (id: string, params: Record<string, string>) => {
28
+ const next = { ...data?.keyVaults, ...params };
29
+ if (isEqual(data?.keyVaults, next)) return;
30
+
31
+ setLoading(true);
26
32
  await updateAiProviderConfig(id, {
27
- keyVaults: {
28
- ...data?.keyVaults,
29
- ...params,
30
- },
33
+ keyVaults: { ...data?.keyVaults, ...params },
31
34
  });
35
+ setLoading(false);
32
36
  },
33
37
  };
34
38
  };
@@ -0,0 +1,122 @@
1
+ import { describe, expect, it, vi } from 'vitest';
2
+
3
+ import { lambdaClient } from '@/libs/trpc/client';
4
+ import { AiProviderModelListItem } from '@/types/aiModel';
5
+
6
+ import { ServerService } from './server';
7
+
8
+ vi.mock('@/libs/trpc/client', () => ({
9
+ lambdaClient: {
10
+ aiModel: {
11
+ createAiModel: { mutate: vi.fn() },
12
+ getAiProviderModelList: { query: vi.fn() },
13
+ getAiModelById: { query: vi.fn() },
14
+ toggleModelEnabled: { mutate: vi.fn() },
15
+ updateAiModel: { mutate: vi.fn() },
16
+ batchUpdateAiModels: { mutate: vi.fn() },
17
+ batchToggleAiModels: { mutate: vi.fn() },
18
+ clearModelsByProvider: { mutate: vi.fn() },
19
+ clearRemoteModels: { mutate: vi.fn() },
20
+ updateAiModelOrder: { mutate: vi.fn() },
21
+ removeAiModel: { mutate: vi.fn() },
22
+ },
23
+ },
24
+ }));
25
+
26
+ describe('ServerService', () => {
27
+ const service = new ServerService();
28
+
29
+ it('should create AI model', async () => {
30
+ const params = {
31
+ id: 'test-id',
32
+ providerId: 'test-provider',
33
+ displayName: 'Test Model',
34
+ };
35
+ await service.createAiModel(params);
36
+ expect(vi.mocked(lambdaClient.aiModel.createAiModel.mutate)).toHaveBeenCalledWith(params);
37
+ });
38
+
39
+ it('should get AI provider model list', async () => {
40
+ await service.getAiProviderModelList('123');
41
+ expect(vi.mocked(lambdaClient.aiModel.getAiProviderModelList.query)).toHaveBeenCalledWith({
42
+ id: '123',
43
+ });
44
+ });
45
+
46
+ it('should get AI model by id', async () => {
47
+ await service.getAiModelById('123');
48
+ expect(vi.mocked(lambdaClient.aiModel.getAiModelById.query)).toHaveBeenCalledWith({
49
+ id: '123',
50
+ });
51
+ });
52
+
53
+ it('should toggle model enabled', async () => {
54
+ const params = { id: '123', providerId: 'test', enabled: true };
55
+ await service.toggleModelEnabled(params);
56
+ expect(vi.mocked(lambdaClient.aiModel.toggleModelEnabled.mutate)).toHaveBeenCalledWith(params);
57
+ });
58
+
59
+ it('should update AI model', async () => {
60
+ const value = { contextWindowTokens: 4000, displayName: 'Updated Model' };
61
+ await service.updateAiModel('123', 'openai', value);
62
+ expect(vi.mocked(lambdaClient.aiModel.updateAiModel.mutate)).toHaveBeenCalledWith({
63
+ id: '123',
64
+ providerId: 'openai',
65
+ value,
66
+ });
67
+ });
68
+
69
+ it('should batch update AI models', async () => {
70
+ const models: AiProviderModelListItem[] = [
71
+ {
72
+ id: '123',
73
+ enabled: true,
74
+ type: 'chat',
75
+ },
76
+ ];
77
+ await service.batchUpdateAiModels('provider1', models);
78
+ expect(vi.mocked(lambdaClient.aiModel.batchUpdateAiModels.mutate)).toHaveBeenCalledWith({
79
+ id: 'provider1',
80
+ models,
81
+ });
82
+ });
83
+
84
+ it('should batch toggle AI models', async () => {
85
+ const models = ['123', '456'];
86
+ await service.batchToggleAiModels('provider1', models, true);
87
+ expect(vi.mocked(lambdaClient.aiModel.batchToggleAiModels.mutate)).toHaveBeenCalledWith({
88
+ id: 'provider1',
89
+ models,
90
+ enabled: true,
91
+ });
92
+ });
93
+
94
+ it('should clear models by provider', async () => {
95
+ await service.clearModelsByProvider('provider1');
96
+ expect(vi.mocked(lambdaClient.aiModel.clearModelsByProvider.mutate)).toHaveBeenCalledWith({
97
+ providerId: 'provider1',
98
+ });
99
+ });
100
+
101
+ it('should clear remote models', async () => {
102
+ await service.clearRemoteModels('provider1');
103
+ expect(vi.mocked(lambdaClient.aiModel.clearRemoteModels.mutate)).toHaveBeenCalledWith({
104
+ providerId: 'provider1',
105
+ });
106
+ });
107
+
108
+ it('should update AI model order', async () => {
109
+ const items = [{ id: '123', sort: 1 }];
110
+ await service.updateAiModelOrder('provider1', items);
111
+ expect(vi.mocked(lambdaClient.aiModel.updateAiModelOrder.mutate)).toHaveBeenCalledWith({
112
+ providerId: 'provider1',
113
+ sortMap: items,
114
+ });
115
+ });
116
+
117
+ it('should delete AI model', async () => {
118
+ const params = { id: '123', providerId: 'openai' };
119
+ await service.deleteAiModel(params);
120
+ expect(vi.mocked(lambdaClient.aiModel.removeAiModel.mutate)).toHaveBeenCalledWith(params);
121
+ });
122
+ });
@@ -1,6 +1,8 @@
1
1
  import { AIProviderStoreState } from '@/store/aiInfra/initialState';
2
2
  import { AiModelSourceEnum } from '@/types/aiModel';
3
3
 
4
+ const aiProviderChatModelListIds = (s: AIProviderStoreState) =>
5
+ s.aiProviderModelList.filter((item) => item.type === 'chat').map((item) => item.id);
4
6
  // List
5
7
  const enabledAiProviderModelList = (s: AIProviderStoreState) =>
6
8
  s.aiProviderModelList.filter((item) => item.enabled);
@@ -68,6 +70,7 @@ const modelContextWindowTokens = (id: string, provider: string) => (s: AIProvide
68
70
  };
69
71
 
70
72
  export const aiModelSelectors = {
73
+ aiProviderChatModelListIds,
71
74
  disabledAiProviderModelList,
72
75
  enabledAiProviderModelList,
73
76
  filteredAiProviderModelList,