@lobehub/chat 1.49.9 → 1.49.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/components.json +24 -0
  4. package/locales/ar/modelProvider.json +0 -24
  5. package/locales/ar/models.json +15 -0
  6. package/locales/bg-BG/components.json +24 -0
  7. package/locales/bg-BG/modelProvider.json +0 -24
  8. package/locales/bg-BG/models.json +15 -0
  9. package/locales/de-DE/components.json +24 -0
  10. package/locales/de-DE/modelProvider.json +0 -24
  11. package/locales/de-DE/models.json +15 -0
  12. package/locales/en-US/components.json +24 -0
  13. package/locales/en-US/modelProvider.json +0 -24
  14. package/locales/en-US/models.json +15 -0
  15. package/locales/es-ES/components.json +24 -0
  16. package/locales/es-ES/modelProvider.json +0 -24
  17. package/locales/es-ES/models.json +15 -0
  18. package/locales/fa-IR/components.json +24 -0
  19. package/locales/fa-IR/modelProvider.json +0 -24
  20. package/locales/fa-IR/models.json +15 -0
  21. package/locales/fr-FR/components.json +24 -0
  22. package/locales/fr-FR/modelProvider.json +0 -24
  23. package/locales/fr-FR/models.json +15 -0
  24. package/locales/it-IT/components.json +24 -0
  25. package/locales/it-IT/modelProvider.json +0 -24
  26. package/locales/it-IT/models.json +15 -0
  27. package/locales/ja-JP/components.json +24 -0
  28. package/locales/ja-JP/modelProvider.json +0 -24
  29. package/locales/ja-JP/models.json +15 -0
  30. package/locales/ko-KR/components.json +24 -0
  31. package/locales/ko-KR/modelProvider.json +0 -24
  32. package/locales/ko-KR/models.json +4 -0
  33. package/locales/nl-NL/components.json +24 -0
  34. package/locales/nl-NL/modelProvider.json +0 -24
  35. package/locales/nl-NL/models.json +15 -0
  36. package/locales/pl-PL/components.json +24 -0
  37. package/locales/pl-PL/modelProvider.json +0 -24
  38. package/locales/pl-PL/models.json +15 -0
  39. package/locales/pt-BR/components.json +24 -0
  40. package/locales/pt-BR/modelProvider.json +0 -24
  41. package/locales/pt-BR/models.json +15 -0
  42. package/locales/ru-RU/components.json +24 -0
  43. package/locales/ru-RU/modelProvider.json +0 -24
  44. package/locales/ru-RU/models.json +15 -0
  45. package/locales/tr-TR/components.json +24 -0
  46. package/locales/tr-TR/modelProvider.json +0 -24
  47. package/locales/tr-TR/models.json +15 -0
  48. package/locales/vi-VN/components.json +24 -0
  49. package/locales/vi-VN/modelProvider.json +0 -24
  50. package/locales/vi-VN/models.json +15 -0
  51. package/locales/zh-CN/components.json +24 -0
  52. package/locales/zh-CN/modelProvider.json +0 -24
  53. package/locales/zh-CN/models.json +16 -1
  54. package/locales/zh-TW/components.json +24 -0
  55. package/locales/zh-TW/modelProvider.json +0 -24
  56. package/locales/zh-TW/models.json +15 -0
  57. package/package.json +1 -1
  58. package/src/app/(main)/settings/provider/(detail)/[id]/page.tsx +10 -3
  59. package/src/app/(main)/settings/provider/(detail)/ollama/CheckError.tsx +70 -0
  60. package/src/app/(main)/settings/provider/(detail)/ollama/Container.tsx +57 -0
  61. package/src/app/(main)/settings/provider/(detail)/ollama/OllamaModelDownloader/index.tsx +127 -0
  62. package/src/app/(main)/settings/provider/(detail)/ollama/OllamaModelDownloader/useDownloadMonitor.ts +29 -0
  63. package/src/app/(main)/settings/provider/(detail)/ollama/page.tsx +2 -7
  64. package/src/app/(main)/settings/provider/features/ProviderConfig/Checker.tsx +90 -69
  65. package/src/app/(main)/settings/provider/features/ProviderConfig/index.tsx +6 -6
  66. package/src/components/FormAction/index.tsx +66 -0
  67. package/src/components/OllamaSetupGuide/index.tsx +217 -0
  68. package/src/config/aiModels/ollama.ts +12 -19
  69. package/src/config/modelProviders/ollama.ts +1 -0
  70. package/src/database/repositories/aiInfra/index.ts +33 -2
  71. package/src/database/server/models/aiProvider.ts +5 -1
  72. package/src/features/Conversation/Error/OllamaBizError/SetupGuide.tsx +2 -209
  73. package/src/features/Conversation/components/MarkdownElements/Thinking/remarkPlugin.ts +8 -1
  74. package/src/libs/agent-runtime/ollama/index.ts +1 -1
  75. package/src/locales/default/components.ts +26 -0
  76. package/src/locales/default/modelProvider.ts +0 -26
  77. package/src/server/routers/lambda/aiProvider.ts +2 -10
  78. package/src/services/aiProvider/client.ts +2 -8
  79. package/src/store/serverConfig/selectors.test.ts +3 -0
  80. package/src/store/serverConfig/store.test.ts +3 -2
  81. package/src/store/serverConfig/store.ts +1 -1
  82. package/src/store/user/slices/common/action.test.ts +1 -0
  83. package/src/types/serverConfig.ts +1 -1
  84. package/src/app/(main)/settings/provider/(detail)/ollama/Checker.tsx +0 -73
@@ -4,7 +4,7 @@ import { CheckCircleFilled } from '@ant-design/icons';
4
4
  import { Alert, Highlighter } from '@lobehub/ui';
5
5
  import { Button } from 'antd';
6
6
  import { useTheme } from 'antd-style';
7
- import { memo, useState } from 'react';
7
+ import { ReactNode, memo, useState } from 'react';
8
8
  import { useTranslation } from 'react-i18next';
9
9
  import { Flexbox } from 'react-layout-kit';
10
10
 
@@ -38,86 +38,107 @@ const Error = memo<{ error: ChatMessageError }>(({ error }) => {
38
38
  );
39
39
  });
40
40
 
41
+ export type CheckErrorRender = (props: {
42
+ defaultError: ReactNode;
43
+ error?: ChatMessageError;
44
+ setError: (error?: ChatMessageError) => void;
45
+ }) => ReactNode;
46
+
41
47
  interface ConnectionCheckerProps {
48
+ checkErrorRender?: CheckErrorRender;
42
49
  model: string;
43
50
  provider: string;
44
51
  }
45
52
 
46
- const Checker = memo<ConnectionCheckerProps>(({ model, provider }) => {
47
- const { t } = useTranslation('setting');
53
+ const Checker = memo<ConnectionCheckerProps>(
54
+ ({ model, provider, checkErrorRender: CheckErrorRender }) => {
55
+ const { t } = useTranslation('setting');
48
56
 
49
- const disabled = useAiInfraStore(aiProviderSelectors.isProviderConfigUpdating(provider));
57
+ const disabled = useAiInfraStore(aiProviderSelectors.isProviderConfigUpdating(provider));
50
58
 
51
- const [loading, setLoading] = useState(false);
52
- const [pass, setPass] = useState(false);
59
+ const [loading, setLoading] = useState(false);
60
+ const [pass, setPass] = useState(false);
53
61
 
54
- const theme = useTheme();
55
- const [error, setError] = useState<ChatMessageError | undefined>();
62
+ const theme = useTheme();
63
+ const [error, setError] = useState<ChatMessageError | undefined>();
56
64
 
57
- const checkConnection = async () => {
58
- let isError = false;
65
+ const checkConnection = async () => {
66
+ let isError = false;
59
67
 
60
- await chatService.fetchPresetTaskResult({
61
- onError: (_, rawError) => {
62
- setError(rawError);
63
- setPass(false);
64
- isError = true;
65
- },
66
- onFinish: async (value) => {
67
- if (!isError && value) {
68
- setError(undefined);
69
- setPass(true);
70
- } else {
68
+ await chatService.fetchPresetTaskResult({
69
+ onError: (_, rawError) => {
70
+ setError(rawError);
71
71
  setPass(false);
72
- setError({
73
- body: value,
74
- message: t('response.ConnectionCheckFailed', { ns: 'error' }),
75
- type: 'ConnectionCheckFailed',
76
- });
77
- }
78
- },
79
- onLoadingChange: (loading) => {
80
- setLoading(loading);
81
- },
82
- params: {
83
- messages: [
84
- {
85
- content: '你好',
86
- role: 'user',
87
- },
88
- ],
89
- model,
90
- provider,
91
- },
92
- trace: {
93
- sessionId: `connection:${provider}`,
94
- topicId: model,
95
- traceName: TraceNameMap.ConnectivityChecker,
96
- },
97
- });
98
- };
99
- const isMobile = useIsMobile();
72
+ isError = true;
73
+ },
74
+ onFinish: async (value) => {
75
+ if (!isError && value) {
76
+ setError(undefined);
77
+ setPass(true);
78
+ } else {
79
+ setPass(false);
80
+ setError({
81
+ body: value,
82
+ message: t('response.ConnectionCheckFailed', { ns: 'error' }),
83
+ type: 'ConnectionCheckFailed',
84
+ });
85
+ }
86
+ },
87
+ onLoadingChange: (loading) => {
88
+ setLoading(loading);
89
+ },
90
+ params: {
91
+ messages: [
92
+ {
93
+ content: 'hello',
94
+ role: 'user',
95
+ },
96
+ ],
97
+ model,
98
+ provider,
99
+ },
100
+ trace: {
101
+ sessionId: `connection:${provider}`,
102
+ topicId: model,
103
+ traceName: TraceNameMap.ConnectivityChecker,
104
+ },
105
+ });
106
+ };
107
+ const isMobile = useIsMobile();
100
108
 
101
- return (
102
- <Flexbox align={isMobile ? 'flex-start' : 'flex-end'} gap={8}>
103
- <Flexbox align={'center'} direction={isMobile ? 'horizontal-reverse' : 'horizontal'} gap={12}>
104
- {pass && (
105
- <Flexbox gap={4} horizontal>
106
- <CheckCircleFilled
107
- style={{
108
- color: theme.colorSuccess,
109
- }}
110
- />
111
- {t('llm.checker.pass')}
112
- </Flexbox>
113
- )}
114
- <Button disabled={disabled} loading={loading} onClick={checkConnection}>
115
- {t('llm.checker.button')}
116
- </Button>
109
+ const defaultError = error ? <Error error={error as ChatMessageError} /> : null;
110
+
111
+ const errorContent = CheckErrorRender ? (
112
+ <CheckErrorRender defaultError={defaultError} error={error} setError={setError} />
113
+ ) : (
114
+ defaultError
115
+ );
116
+
117
+ return (
118
+ <Flexbox align={isMobile ? 'flex-start' : 'flex-end'} gap={8}>
119
+ <Flexbox
120
+ align={'center'}
121
+ direction={isMobile ? 'horizontal-reverse' : 'horizontal'}
122
+ gap={12}
123
+ >
124
+ {pass && (
125
+ <Flexbox gap={4} horizontal>
126
+ <CheckCircleFilled
127
+ style={{
128
+ color: theme.colorSuccess,
129
+ }}
130
+ />
131
+ {t('llm.checker.pass')}
132
+ </Flexbox>
133
+ )}
134
+ <Button disabled={disabled} loading={loading} onClick={checkConnection}>
135
+ {t('llm.checker.button')}
136
+ </Button>
137
+ </Flexbox>
138
+ {error && errorContent}
117
139
  </Flexbox>
118
- {error && <Error error={error} />}
119
- </Flexbox>
120
- );
121
- });
140
+ );
141
+ },
142
+ );
122
143
 
123
144
  export default Checker;
@@ -25,7 +25,7 @@ import {
25
25
  } from '@/types/aiProvider';
26
26
 
27
27
  import { KeyVaultsConfigKey, LLMProviderApiTokenKey, LLMProviderBaseUrlKey } from '../../const';
28
- import Checker from './Checker';
28
+ import Checker, { CheckErrorRender } from './Checker';
29
29
  import EnableSwitch from './EnableSwitch';
30
30
  import { SkeletonInput } from './SkeletonInput';
31
31
  import UpdateProviderInfo from './UpdateProviderInfo';
@@ -91,7 +91,7 @@ const useStyles = createStyles(({ css, prefixCls, responsive, token }) => ({
91
91
  export interface ProviderConfigProps extends Omit<AiProviderDetailItem, 'enabled' | 'source'> {
92
92
  apiKeyItems?: FormItemProps[];
93
93
  canDeactivate?: boolean;
94
- checkerItem?: FormItemProps;
94
+ checkErrorRender?: CheckErrorRender;
95
95
  className?: string;
96
96
  enabled?: boolean;
97
97
  extra?: ReactNode;
@@ -113,9 +113,9 @@ const ProviderConfig = memo<ProviderConfigProps>(
113
113
  id,
114
114
  settings,
115
115
  checkModel,
116
- checkerItem,
117
116
  logo,
118
117
  className,
118
+ checkErrorRender,
119
119
  name,
120
120
  showAceGcm = true,
121
121
  extra,
@@ -271,16 +271,16 @@ const ProviderConfig = memo<ProviderConfigProps>(
271
271
  endpointItem,
272
272
  clientFetchItem,
273
273
  showChecker
274
- ? (checkerItem ?? {
274
+ ? {
275
275
  children: isLoading ? (
276
276
  <Skeleton.Button active />
277
277
  ) : (
278
- <Checker model={checkModel!} provider={id} />
278
+ <Checker checkErrorRender={checkErrorRender} model={checkModel!} provider={id} />
279
279
  ),
280
280
  desc: t('providerModels.config.checker.desc'),
281
281
  label: t('providerModels.config.checker.title'),
282
282
  minWidth: undefined,
283
- })
283
+ }
284
284
  : undefined,
285
285
  showAceGcm && isServerMode && aceGcmItem,
286
286
  ].filter(Boolean) as FormItemProps[];
@@ -0,0 +1,66 @@
1
+ import { Avatar } from '@lobehub/ui';
2
+ import { createStyles } from 'antd-style';
3
+ import { ReactNode, memo } from 'react';
4
+ import { Center, CenterProps, Flexbox } from 'react-layout-kit';
5
+
6
+ export const useStyles = createStyles(({ css, token }) => ({
7
+ container: css`
8
+ border: 1px solid ${token.colorSplit};
9
+ border-radius: 8px;
10
+ color: ${token.colorText};
11
+ background: ${token.colorBgContainer};
12
+ `,
13
+ desc: css`
14
+ color: ${token.colorTextTertiary};
15
+ text-align: center;
16
+ `,
17
+ form: css`
18
+ width: 100%;
19
+ max-width: 300px;
20
+ `,
21
+ }));
22
+
23
+ const FormAction = memo<
24
+ {
25
+ animation?: boolean;
26
+ avatar: ReactNode;
27
+ background?: string;
28
+ description: string;
29
+ title: string;
30
+ } & CenterProps
31
+ >(
32
+ ({
33
+ children,
34
+ background,
35
+ title,
36
+ description,
37
+ avatar,
38
+ animation,
39
+ className,
40
+ gap = 16,
41
+ ...rest
42
+ }) => {
43
+ const { cx, styles, theme } = useStyles();
44
+
45
+ return (
46
+ <Center className={cx(styles.form, className)} gap={gap} {...rest}>
47
+ <Avatar
48
+ animation={animation}
49
+ avatar={avatar}
50
+ background={background ?? theme.colorFillContent}
51
+ gap={12}
52
+ size={80}
53
+ />
54
+ <Flexbox gap={8} width={'100%'}>
55
+ <Flexbox style={{ fontSize: 18, fontWeight: 'bold', textAlign: 'center' }}>
56
+ {title}
57
+ </Flexbox>
58
+ <Flexbox className={styles.desc}>{description}</Flexbox>
59
+ </Flexbox>
60
+ {children}
61
+ </Center>
62
+ );
63
+ },
64
+ );
65
+
66
+ export default FormAction;
@@ -0,0 +1,217 @@
1
+ import { Highlighter, Snippet, TabsNav } from '@lobehub/ui';
2
+ import { Steps } from 'antd';
3
+ import { createStyles } from 'antd-style';
4
+ import Link from 'next/link';
5
+ import { readableColor } from 'polished';
6
+ import { memo } from 'react';
7
+ import { Trans, useTranslation } from 'react-i18next';
8
+ import { Flexbox } from 'react-layout-kit';
9
+
10
+ const useStyles = createStyles(({ css, prefixCls, token }) => ({
11
+ steps: css`
12
+ margin-block-start: 32px;
13
+ &.${prefixCls}-steps-small .${prefixCls}-steps-item-title {
14
+ margin-block-end: 16px;
15
+ font-size: 16px;
16
+ font-weight: bold;
17
+ }
18
+
19
+ .${prefixCls}-steps-item-description {
20
+ margin-block-end: 24px;
21
+ }
22
+
23
+ .${prefixCls}-steps-icon {
24
+ color: ${readableColor(token.colorPrimary)} !important;
25
+ }
26
+ `,
27
+ }));
28
+
29
+ const SetupGuide = memo(() => {
30
+ const { styles } = useStyles();
31
+ const { t } = useTranslation('components');
32
+ return (
33
+ <TabsNav
34
+ items={[
35
+ {
36
+ children: (
37
+ <Steps
38
+ className={styles.steps}
39
+ direction={'vertical'}
40
+ items={[
41
+ {
42
+ description: (
43
+ <Trans i18nKey={'OllamaSetupGuide.install.description'} ns={'components'}>
44
+ 请确认你已经开启 Ollama ,如果没有安装 Ollama ,请前往官网
45
+ <Link href={'https://ollama.com/download'}>下载</Link>
46
+ </Trans>
47
+ ),
48
+ status: 'process',
49
+ title: t('OllamaSetupGuide.install.title'),
50
+ },
51
+ {
52
+ description: (
53
+ <Flexbox gap={8}>
54
+ {t('OllamaSetupGuide.cors.description')}
55
+
56
+ <Flexbox gap={8}>
57
+ {t('OllamaSetupGuide.cors.macos')}
58
+ <Snippet language={'bash'}>
59
+ {/* eslint-disable-next-line react/no-unescaped-entities */}
60
+ launchctl setenv OLLAMA_ORIGINS "*"
61
+ </Snippet>
62
+ {t('OllamaSetupGuide.cors.reboot')}
63
+ </Flexbox>
64
+ </Flexbox>
65
+ ),
66
+ status: 'process',
67
+ title: t('OllamaSetupGuide.cors.title'),
68
+ },
69
+ ]}
70
+ size={'small'}
71
+ />
72
+ ),
73
+ key: 'macos',
74
+ label: 'macOS',
75
+ },
76
+ {
77
+ children: (
78
+ <Steps
79
+ className={styles.steps}
80
+ direction={'vertical'}
81
+ items={[
82
+ {
83
+ description: (
84
+ <Trans i18nKey={'OllamaSetupGuide.install.description'} ns={'components'}>
85
+ 请确认你已经开启 Ollama ,如果没有安装 Ollama ,请前往官网
86
+ <Link href={'https://ollama.com/download'}>下载</Link>
87
+ </Trans>
88
+ ),
89
+ status: 'process',
90
+ title: t('OllamaSetupGuide.install.title'),
91
+ },
92
+ {
93
+ description: (
94
+ <Flexbox gap={8}>
95
+ {t('OllamaSetupGuide.cors.description')}
96
+ <div>{t('OllamaSetupGuide.cors.windows')}</div>
97
+ <div>{t('OllamaSetupGuide.cors.reboot')}</div>
98
+ </Flexbox>
99
+ ),
100
+ status: 'process',
101
+ title: t('OllamaSetupGuide.cors.title'),
102
+ },
103
+ ]}
104
+ size={'small'}
105
+ />
106
+ ),
107
+ key: 'windows',
108
+ label: t('OllamaSetupGuide.install.windowsTab'),
109
+ },
110
+ {
111
+ children: (
112
+ <Steps
113
+ className={styles.steps}
114
+ direction={'vertical'}
115
+ items={[
116
+ {
117
+ description: (
118
+ <Flexbox gap={8}>
119
+ {t('OllamaSetupGuide.install.linux.command')}
120
+ <Snippet language={'bash'}>
121
+ curl -fsSL https://ollama.com/install.sh | sh
122
+ </Snippet>
123
+ <div>
124
+ <Trans i18nKey={'OllamaSetupGuide.install.linux.manual'} ns={'components'}>
125
+ 或者,你也可以参考
126
+ <Link href={'https://github.com/ollama/ollama/blob/main/docs/linux.md'}>
127
+ Linux 手动安装指南
128
+ </Link>
129
+
130
+ </Trans>
131
+ </div>
132
+ </Flexbox>
133
+ ),
134
+ status: 'process',
135
+ title: t('OllamaSetupGuide.install.title'),
136
+ },
137
+ {
138
+ description: (
139
+ <Flexbox gap={8}>
140
+ <div>{t('OllamaSetupGuide.cors.description')}</div>
141
+
142
+ <div>{t('OllamaSetupGuide.cors.linux.systemd')}</div>
143
+ {/* eslint-disable-next-line react/no-unescaped-entities */}
144
+ <Snippet language={'bash'}> sudo systemctl edit ollama.service</Snippet>
145
+ {t('OllamaSetupGuide.cors.linux.env')}
146
+ <Highlighter
147
+ // eslint-disable-next-line react/no-children-prop
148
+ children={`[Service]
149
+
150
+ Environment="OLLAMA_ORIGINS=*"`}
151
+ fileName={'ollama.service'}
152
+ fullFeatured
153
+ language={'bash'}
154
+ showLanguage
155
+ />
156
+ {t('OllamaSetupGuide.cors.linux.reboot')}
157
+ </Flexbox>
158
+ ),
159
+ status: 'process',
160
+ title: t('OllamaSetupGuide.cors.title'),
161
+ },
162
+ ]}
163
+ size={'small'}
164
+ />
165
+ ),
166
+ key: 'linux',
167
+ label: 'Linux',
168
+ },
169
+ {
170
+ children: (
171
+ <Steps
172
+ className={styles.steps}
173
+ direction={'vertical'}
174
+ items={[
175
+ {
176
+ description: (
177
+ <Flexbox gap={8}>
178
+ {t('OllamaSetupGuide.install.description')}
179
+ <div>{t('OllamaSetupGuide.install.docker')}</div>
180
+ <Snippet language={'bash'}>docker pull ollama/ollama</Snippet>
181
+ </Flexbox>
182
+ ),
183
+ status: 'process',
184
+ title: t('OllamaSetupGuide.install.title'),
185
+ },
186
+ {
187
+ description: (
188
+ <Flexbox gap={8}>
189
+ {t('OllamaSetupGuide.cors.description')}
190
+ <Highlighter
191
+ fileName={'ollama.service'}
192
+ fullFeatured
193
+ language={'bash'}
194
+ showLanguage
195
+ >
196
+ {/* eslint-disable-next-line react/no-unescaped-entities */}
197
+ docker run -d --gpus=all -v ollama:/root/.ollama -e OLLAMA_ORIGINS="*" -p
198
+ 11434:11434 --name ollama ollama/ollama
199
+ </Highlighter>
200
+ </Flexbox>
201
+ ),
202
+ status: 'process',
203
+ title: t('OllamaSetupGuide.cors.title'),
204
+ },
205
+ ]}
206
+ size={'small'}
207
+ />
208
+ ),
209
+ key: 'docker',
210
+ label: 'Docker',
211
+ },
212
+ ]}
213
+ />
214
+ );
215
+ });
216
+
217
+ export default SetupGuide;
@@ -1,6 +1,18 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
3
  const ollamaChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ reasoning: true,
7
+ },
8
+ contextWindowTokens: 65_536,
9
+ description:
10
+ 'DeepSeek-R1 是一款强化学习(RL)驱动的推理模型,解决了模型中的重复性和可读性问题。在 RL 之前,DeepSeek-R1 引入了冷启动数据,进一步优化了推理性能。它在数学、代码和推理任务中与 OpenAI-o1 表现相当,并且通过精心设计的训练方法,提升了整体效果。',
11
+ displayName: 'DeepSeek R1',
12
+ enabled: true,
13
+ id: 'deepseek-r1',
14
+ type: 'chat',
15
+ },
4
16
  {
5
17
  abilities: {
6
18
  functionCall: true,
@@ -9,7 +21,6 @@ const ollamaChatModels: AIChatModelCard[] = [
9
21
  description:
10
22
  'Llama 3.1 是 Meta 推出的领先模型,支持高达 405B 参数,可应用于复杂对话、多语言翻译和数据分析领域。',
11
23
  displayName: 'Llama 3.1 8B',
12
- enabled: true,
13
24
  id: 'llama3.1',
14
25
  type: 'chat',
15
26
  },
@@ -34,7 +45,6 @@ const ollamaChatModels: AIChatModelCard[] = [
34
45
  description:
35
46
  'Code Llama 是一款专注于代码生成和讨论的 LLM,结合广泛的编程语言支持,适用于开发者环境。',
36
47
  displayName: 'Code Llama 7B',
37
- enabled: true,
38
48
  id: 'codellama',
39
49
  type: 'chat',
40
50
  },
@@ -69,7 +79,6 @@ const ollamaChatModels: AIChatModelCard[] = [
69
79
  contextWindowTokens: 128_000,
70
80
  description: 'QwQ 是一个实验研究模型,专注于提高 AI 推理能力。',
71
81
  displayName: 'QwQ 32B',
72
- enabled: true,
73
82
  id: 'qwq',
74
83
  releasedAt: '2024-11-28',
75
84
  type: 'chat',
@@ -95,7 +104,6 @@ const ollamaChatModels: AIChatModelCard[] = [
95
104
  contextWindowTokens: 128_000,
96
105
  description: 'Qwen2.5 是阿里巴巴的新一代大规模语言模型,以优异的性能支持多元化的应用需求。',
97
106
  displayName: 'Qwen2.5 7B',
98
- enabled: true,
99
107
  id: 'qwen2.5',
100
108
  type: 'chat',
101
109
  },
@@ -195,7 +203,6 @@ const ollamaChatModels: AIChatModelCard[] = [
195
203
  contextWindowTokens: 128_000,
196
204
  description: 'Phi-3 是微软推出的轻量级开放模型,适用于高效集成和大规模知识推理。',
197
205
  displayName: 'Phi-3 3.8B',
198
- enabled: true,
199
206
  id: 'phi3',
200
207
  type: 'chat',
201
208
  },
@@ -211,7 +218,6 @@ const ollamaChatModels: AIChatModelCard[] = [
211
218
  description:
212
219
  'WizardLM 2 是微软AI提供的语言模型,在复杂对话、多语言、推理和智能助手领域表现尤为出色。',
213
220
  displayName: 'WizardLM 2 7B',
214
- enabled: true,
215
221
  id: 'wizardlm2',
216
222
  type: 'chat',
217
223
  },
@@ -227,7 +233,6 @@ const ollamaChatModels: AIChatModelCard[] = [
227
233
  contextWindowTokens: 32_768,
228
234
  description: 'MathΣtral 专为科学研究和数学推理设计,提供有效的计算能力和结果解释。',
229
235
  displayName: 'MathΣtral 7B',
230
- enabled: true,
231
236
  id: 'mathstral',
232
237
  type: 'chat',
233
238
  },
@@ -238,7 +243,6 @@ const ollamaChatModels: AIChatModelCard[] = [
238
243
  contextWindowTokens: 32_768,
239
244
  description: 'Mistral 是 Mistral AI 发布的 7B 模型,适合多变的语言处理需求。',
240
245
  displayName: 'Mistral 7B',
241
- enabled: true,
242
246
  id: 'mistral',
243
247
  type: 'chat',
244
248
  },
@@ -250,7 +254,6 @@ const ollamaChatModels: AIChatModelCard[] = [
250
254
  description:
251
255
  'Mixtral 是 Mistral AI 的专家模型,具有开源权重,并在代码生成和语言理解方面提供支持。',
252
256
  displayName: 'Mixtral 8x7B',
253
- enabled: true,
254
257
  id: 'mixtral',
255
258
  type: 'chat',
256
259
  },
@@ -270,7 +273,6 @@ const ollamaChatModels: AIChatModelCard[] = [
270
273
  description:
271
274
  'Mixtral Large 是 Mistral 的旗舰模型,结合代码生成、数学和推理的能力,支持 128k 上下文窗口。',
272
275
  displayName: 'Mixtral Large 123B',
273
- enabled: true,
274
276
  id: 'mistral-large',
275
277
  type: 'chat',
276
278
  },
@@ -281,7 +283,6 @@ const ollamaChatModels: AIChatModelCard[] = [
281
283
  contextWindowTokens: 128_000,
282
284
  description: 'Mistral Nemo 由 Mistral AI 和 NVIDIA 合作推出,是高效性能的 12B 模型。',
283
285
  displayName: 'Mixtral Nemo 12B',
284
- enabled: true,
285
286
  id: 'mistral-nemo',
286
287
  type: 'chat',
287
288
  },
@@ -289,7 +290,6 @@ const ollamaChatModels: AIChatModelCard[] = [
289
290
  contextWindowTokens: 32_768,
290
291
  description: 'Codestral 是 Mistral AI 的首款代码模型,为代码生成任务提供优异支持。',
291
292
  displayName: 'Codestral 22B',
292
- enabled: true,
293
293
  id: 'codestral',
294
294
  type: 'chat',
295
295
  },
@@ -297,7 +297,6 @@ const ollamaChatModels: AIChatModelCard[] = [
297
297
  contextWindowTokens: 8192,
298
298
  description: 'Aya 23 是 Cohere 推出的多语言模型,支持 23 种语言,为多元化语言应用提供便利。',
299
299
  displayName: 'Aya 23 8B',
300
- enabled: true,
301
300
  id: 'aya',
302
301
  type: 'chat',
303
302
  },
@@ -315,7 +314,6 @@ const ollamaChatModels: AIChatModelCard[] = [
315
314
  contextWindowTokens: 131_072,
316
315
  description: 'Command R 是优化用于对话和长上下文任务的LLM,特别适合动态交互与知识管理。',
317
316
  displayName: 'Command R 35B',
318
- enabled: true,
319
317
  id: 'command-r',
320
318
  type: 'chat',
321
319
  },
@@ -326,7 +324,6 @@ const ollamaChatModels: AIChatModelCard[] = [
326
324
  contextWindowTokens: 131_072,
327
325
  description: 'Command R+ 是一款高性能的大型语言模型,专为真实企业场景和复杂应用而设计。',
328
326
  displayName: 'Command R+ 104B',
329
- enabled: true,
330
327
  id: 'command-r-plus',
331
328
  type: 'chat',
332
329
  },
@@ -334,7 +331,6 @@ const ollamaChatModels: AIChatModelCard[] = [
334
331
  contextWindowTokens: 32_768,
335
332
  description: 'DeepSeek V2 是高效的 Mixture-of-Experts 语言模型,适用于经济高效的处理需求。',
336
333
  displayName: 'DeepSeek V2 16B',
337
- enabled: true,
338
334
  id: 'deepseek-v2',
339
335
  type: 'chat',
340
336
  },
@@ -350,7 +346,6 @@ const ollamaChatModels: AIChatModelCard[] = [
350
346
  description:
351
347
  'DeepSeek Coder V2 是开源的混合专家代码模型,在代码任务方面表现优异,与 GPT4-Turbo 相媲美。',
352
348
  displayName: 'DeepSeek Coder V2 16B',
353
- enabled: true,
354
349
  id: 'deepseek-coder-v2',
355
350
  type: 'chat',
356
351
  },
@@ -369,7 +364,6 @@ const ollamaChatModels: AIChatModelCard[] = [
369
364
  contextWindowTokens: 4096,
370
365
  description: 'LLaVA 是结合视觉编码器和 Vicuna 的多模态模型,用于强大的视觉和语言理解。',
371
366
  displayName: 'LLaVA 7B',
372
- enabled: true,
373
367
  id: 'llava',
374
368
  type: 'chat',
375
369
  },
@@ -401,7 +395,6 @@ const ollamaChatModels: AIChatModelCard[] = [
401
395
  description:
402
396
  'MiniCPM-V 是 OpenBMB 推出的新一代多模态大模型,具备卓越的 OCR 识别和多模态理解能力,支持广泛的应用场景。',
403
397
  displayName: 'MiniCPM-V 8B',
404
- enabled: true,
405
398
  id: 'minicpm-v',
406
399
  type: 'chat',
407
400
  },
@@ -326,6 +326,7 @@ const Ollama: ModelProviderCard = {
326
326
  vision: true,
327
327
  },
328
328
  ],
329
+ checkModel: 'deepseek-r1',
329
330
  defaultShowBrowserRequest: true,
330
331
  description:
331
332
  'Ollama 提供的模型广泛涵盖代码生成、数学运算、多语种处理和对话互动等领域,支持企业级和本地化部署的多样化需求。',