@lobehub/chat 1.49.10 → 1.49.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/components.json +24 -0
  4. package/locales/ar/modelProvider.json +0 -24
  5. package/locales/ar/models.json +15 -0
  6. package/locales/bg-BG/components.json +24 -0
  7. package/locales/bg-BG/modelProvider.json +0 -24
  8. package/locales/bg-BG/models.json +15 -0
  9. package/locales/de-DE/components.json +24 -0
  10. package/locales/de-DE/modelProvider.json +0 -24
  11. package/locales/de-DE/models.json +15 -0
  12. package/locales/en-US/components.json +24 -0
  13. package/locales/en-US/modelProvider.json +0 -24
  14. package/locales/en-US/models.json +15 -0
  15. package/locales/es-ES/components.json +24 -0
  16. package/locales/es-ES/modelProvider.json +0 -24
  17. package/locales/es-ES/models.json +15 -0
  18. package/locales/fa-IR/components.json +24 -0
  19. package/locales/fa-IR/modelProvider.json +0 -24
  20. package/locales/fa-IR/models.json +15 -0
  21. package/locales/fr-FR/components.json +24 -0
  22. package/locales/fr-FR/modelProvider.json +0 -24
  23. package/locales/fr-FR/models.json +15 -0
  24. package/locales/it-IT/components.json +24 -0
  25. package/locales/it-IT/modelProvider.json +0 -24
  26. package/locales/it-IT/models.json +15 -0
  27. package/locales/ja-JP/components.json +24 -0
  28. package/locales/ja-JP/modelProvider.json +0 -24
  29. package/locales/ja-JP/models.json +15 -0
  30. package/locales/ko-KR/components.json +24 -0
  31. package/locales/ko-KR/modelProvider.json +0 -24
  32. package/locales/ko-KR/models.json +4 -0
  33. package/locales/nl-NL/components.json +24 -0
  34. package/locales/nl-NL/modelProvider.json +0 -24
  35. package/locales/nl-NL/models.json +15 -0
  36. package/locales/pl-PL/components.json +24 -0
  37. package/locales/pl-PL/modelProvider.json +0 -24
  38. package/locales/pl-PL/models.json +15 -0
  39. package/locales/pt-BR/components.json +24 -0
  40. package/locales/pt-BR/modelProvider.json +0 -24
  41. package/locales/pt-BR/models.json +15 -0
  42. package/locales/ru-RU/components.json +24 -0
  43. package/locales/ru-RU/modelProvider.json +0 -24
  44. package/locales/ru-RU/models.json +15 -0
  45. package/locales/tr-TR/components.json +24 -0
  46. package/locales/tr-TR/modelProvider.json +0 -24
  47. package/locales/tr-TR/models.json +15 -0
  48. package/locales/vi-VN/components.json +24 -0
  49. package/locales/vi-VN/modelProvider.json +0 -24
  50. package/locales/vi-VN/models.json +15 -0
  51. package/locales/zh-CN/components.json +24 -0
  52. package/locales/zh-CN/modelProvider.json +0 -24
  53. package/locales/zh-CN/models.json +16 -1
  54. package/locales/zh-TW/components.json +24 -0
  55. package/locales/zh-TW/modelProvider.json +0 -24
  56. package/locales/zh-TW/models.json +15 -0
  57. package/package.json +1 -1
  58. package/src/app/(main)/chat/(workspace)/@portal/_layout/Mobile.tsx +1 -0
  59. package/src/app/(main)/chat/(workspace)/_layout/Desktop/Portal.tsx +26 -2
  60. package/src/app/(main)/settings/provider/(detail)/[id]/page.tsx +10 -3
  61. package/src/app/(main)/settings/provider/(detail)/ollama/CheckError.tsx +70 -0
  62. package/src/app/(main)/settings/provider/(detail)/ollama/Container.tsx +57 -0
  63. package/src/app/(main)/settings/provider/(detail)/ollama/OllamaModelDownloader/index.tsx +127 -0
  64. package/src/app/(main)/settings/provider/(detail)/ollama/OllamaModelDownloader/useDownloadMonitor.ts +29 -0
  65. package/src/app/(main)/settings/provider/(detail)/ollama/page.tsx +2 -7
  66. package/src/app/(main)/settings/provider/features/ProviderConfig/Checker.tsx +90 -69
  67. package/src/app/(main)/settings/provider/features/ProviderConfig/index.tsx +6 -6
  68. package/src/components/FormAction/index.tsx +66 -0
  69. package/src/components/OllamaSetupGuide/index.tsx +217 -0
  70. package/src/components/Thinking/index.tsx +14 -16
  71. package/src/config/aiModels/ollama.ts +12 -19
  72. package/src/config/modelProviders/ollama.ts +1 -0
  73. package/src/config/modelProviders/siliconcloud.ts +2 -2
  74. package/src/database/repositories/aiInfra/index.ts +33 -2
  75. package/src/database/server/models/aiProvider.ts +5 -1
  76. package/src/features/Conversation/Error/OllamaBizError/SetupGuide.tsx +2 -209
  77. package/src/features/Conversation/components/MarkdownElements/LobeThinking/Render.tsx +7 -58
  78. package/src/libs/agent-runtime/ollama/index.ts +1 -1
  79. package/src/libs/agent-runtime/siliconcloud/index.ts +33 -1
  80. package/src/locales/default/components.ts +26 -0
  81. package/src/locales/default/modelProvider.ts +0 -26
  82. package/src/server/routers/lambda/aiProvider.ts +2 -10
  83. package/src/services/aiProvider/client.ts +2 -8
  84. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +10 -10
  85. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +4 -3
  86. package/src/store/chat/slices/aiChat/initialState.ts +1 -1
  87. package/src/store/chat/slices/message/action.ts +4 -3
  88. package/src/store/global/initialState.ts +2 -0
  89. package/src/store/global/selectors.ts +2 -0
  90. package/src/store/serverConfig/selectors.test.ts +3 -0
  91. package/src/store/serverConfig/store.test.ts +3 -2
  92. package/src/store/serverConfig/store.ts +1 -1
  93. package/src/store/user/slices/common/action.test.ts +1 -0
  94. package/src/types/serverConfig.ts +1 -1
  95. package/src/app/(main)/settings/provider/(detail)/ollama/Checker.tsx +0 -73
@@ -1,6 +1,18 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
3
  const ollamaChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ reasoning: true,
7
+ },
8
+ contextWindowTokens: 65_536,
9
+ description:
10
+ 'DeepSeek-R1 是一款强化学习(RL)驱动的推理模型,解决了模型中的重复性和可读性问题。在 RL 之前,DeepSeek-R1 引入了冷启动数据,进一步优化了推理性能。它在数学、代码和推理任务中与 OpenAI-o1 表现相当,并且通过精心设计的训练方法,提升了整体效果。',
11
+ displayName: 'DeepSeek R1',
12
+ enabled: true,
13
+ id: 'deepseek-r1',
14
+ type: 'chat',
15
+ },
4
16
  {
5
17
  abilities: {
6
18
  functionCall: true,
@@ -9,7 +21,6 @@ const ollamaChatModels: AIChatModelCard[] = [
9
21
  description:
10
22
  'Llama 3.1 是 Meta 推出的领先模型,支持高达 405B 参数,可应用于复杂对话、多语言翻译和数据分析领域。',
11
23
  displayName: 'Llama 3.1 8B',
12
- enabled: true,
13
24
  id: 'llama3.1',
14
25
  type: 'chat',
15
26
  },
@@ -34,7 +45,6 @@ const ollamaChatModels: AIChatModelCard[] = [
34
45
  description:
35
46
  'Code Llama 是一款专注于代码生成和讨论的 LLM,结合广泛的编程语言支持,适用于开发者环境。',
36
47
  displayName: 'Code Llama 7B',
37
- enabled: true,
38
48
  id: 'codellama',
39
49
  type: 'chat',
40
50
  },
@@ -69,7 +79,6 @@ const ollamaChatModels: AIChatModelCard[] = [
69
79
  contextWindowTokens: 128_000,
70
80
  description: 'QwQ 是一个实验研究模型,专注于提高 AI 推理能力。',
71
81
  displayName: 'QwQ 32B',
72
- enabled: true,
73
82
  id: 'qwq',
74
83
  releasedAt: '2024-11-28',
75
84
  type: 'chat',
@@ -95,7 +104,6 @@ const ollamaChatModels: AIChatModelCard[] = [
95
104
  contextWindowTokens: 128_000,
96
105
  description: 'Qwen2.5 是阿里巴巴的新一代大规模语言模型,以优异的性能支持多元化的应用需求。',
97
106
  displayName: 'Qwen2.5 7B',
98
- enabled: true,
99
107
  id: 'qwen2.5',
100
108
  type: 'chat',
101
109
  },
@@ -195,7 +203,6 @@ const ollamaChatModels: AIChatModelCard[] = [
195
203
  contextWindowTokens: 128_000,
196
204
  description: 'Phi-3 是微软推出的轻量级开放模型,适用于高效集成和大规模知识推理。',
197
205
  displayName: 'Phi-3 3.8B',
198
- enabled: true,
199
206
  id: 'phi3',
200
207
  type: 'chat',
201
208
  },
@@ -211,7 +218,6 @@ const ollamaChatModels: AIChatModelCard[] = [
211
218
  description:
212
219
  'WizardLM 2 是微软AI提供的语言模型,在复杂对话、多语言、推理和智能助手领域表现尤为出色。',
213
220
  displayName: 'WizardLM 2 7B',
214
- enabled: true,
215
221
  id: 'wizardlm2',
216
222
  type: 'chat',
217
223
  },
@@ -227,7 +233,6 @@ const ollamaChatModels: AIChatModelCard[] = [
227
233
  contextWindowTokens: 32_768,
228
234
  description: 'MathΣtral 专为科学研究和数学推理设计,提供有效的计算能力和结果解释。',
229
235
  displayName: 'MathΣtral 7B',
230
- enabled: true,
231
236
  id: 'mathstral',
232
237
  type: 'chat',
233
238
  },
@@ -238,7 +243,6 @@ const ollamaChatModels: AIChatModelCard[] = [
238
243
  contextWindowTokens: 32_768,
239
244
  description: 'Mistral 是 Mistral AI 发布的 7B 模型,适合多变的语言处理需求。',
240
245
  displayName: 'Mistral 7B',
241
- enabled: true,
242
246
  id: 'mistral',
243
247
  type: 'chat',
244
248
  },
@@ -250,7 +254,6 @@ const ollamaChatModels: AIChatModelCard[] = [
250
254
  description:
251
255
  'Mixtral 是 Mistral AI 的专家模型,具有开源权重,并在代码生成和语言理解方面提供支持。',
252
256
  displayName: 'Mixtral 8x7B',
253
- enabled: true,
254
257
  id: 'mixtral',
255
258
  type: 'chat',
256
259
  },
@@ -270,7 +273,6 @@ const ollamaChatModels: AIChatModelCard[] = [
270
273
  description:
271
274
  'Mixtral Large 是 Mistral 的旗舰模型,结合代码生成、数学和推理的能力,支持 128k 上下文窗口。',
272
275
  displayName: 'Mixtral Large 123B',
273
- enabled: true,
274
276
  id: 'mistral-large',
275
277
  type: 'chat',
276
278
  },
@@ -281,7 +283,6 @@ const ollamaChatModels: AIChatModelCard[] = [
281
283
  contextWindowTokens: 128_000,
282
284
  description: 'Mistral Nemo 由 Mistral AI 和 NVIDIA 合作推出,是高效性能的 12B 模型。',
283
285
  displayName: 'Mixtral Nemo 12B',
284
- enabled: true,
285
286
  id: 'mistral-nemo',
286
287
  type: 'chat',
287
288
  },
@@ -289,7 +290,6 @@ const ollamaChatModels: AIChatModelCard[] = [
289
290
  contextWindowTokens: 32_768,
290
291
  description: 'Codestral 是 Mistral AI 的首款代码模型,为代码生成任务提供优异支持。',
291
292
  displayName: 'Codestral 22B',
292
- enabled: true,
293
293
  id: 'codestral',
294
294
  type: 'chat',
295
295
  },
@@ -297,7 +297,6 @@ const ollamaChatModels: AIChatModelCard[] = [
297
297
  contextWindowTokens: 8192,
298
298
  description: 'Aya 23 是 Cohere 推出的多语言模型,支持 23 种语言,为多元化语言应用提供便利。',
299
299
  displayName: 'Aya 23 8B',
300
- enabled: true,
301
300
  id: 'aya',
302
301
  type: 'chat',
303
302
  },
@@ -315,7 +314,6 @@ const ollamaChatModels: AIChatModelCard[] = [
315
314
  contextWindowTokens: 131_072,
316
315
  description: 'Command R 是优化用于对话和长上下文任务的LLM,特别适合动态交互与知识管理。',
317
316
  displayName: 'Command R 35B',
318
- enabled: true,
319
317
  id: 'command-r',
320
318
  type: 'chat',
321
319
  },
@@ -326,7 +324,6 @@ const ollamaChatModels: AIChatModelCard[] = [
326
324
  contextWindowTokens: 131_072,
327
325
  description: 'Command R+ 是一款高性能的大型语言模型,专为真实企业场景和复杂应用而设计。',
328
326
  displayName: 'Command R+ 104B',
329
- enabled: true,
330
327
  id: 'command-r-plus',
331
328
  type: 'chat',
332
329
  },
@@ -334,7 +331,6 @@ const ollamaChatModels: AIChatModelCard[] = [
334
331
  contextWindowTokens: 32_768,
335
332
  description: 'DeepSeek V2 是高效的 Mixture-of-Experts 语言模型,适用于经济高效的处理需求。',
336
333
  displayName: 'DeepSeek V2 16B',
337
- enabled: true,
338
334
  id: 'deepseek-v2',
339
335
  type: 'chat',
340
336
  },
@@ -350,7 +346,6 @@ const ollamaChatModels: AIChatModelCard[] = [
350
346
  description:
351
347
  'DeepSeek Coder V2 是开源的混合专家代码模型,在代码任务方面表现优异,与 GPT4-Turbo 相媲美。',
352
348
  displayName: 'DeepSeek Coder V2 16B',
353
- enabled: true,
354
349
  id: 'deepseek-coder-v2',
355
350
  type: 'chat',
356
351
  },
@@ -369,7 +364,6 @@ const ollamaChatModels: AIChatModelCard[] = [
369
364
  contextWindowTokens: 4096,
370
365
  description: 'LLaVA 是结合视觉编码器和 Vicuna 的多模态模型,用于强大的视觉和语言理解。',
371
366
  displayName: 'LLaVA 7B',
372
- enabled: true,
373
367
  id: 'llava',
374
368
  type: 'chat',
375
369
  },
@@ -401,7 +395,6 @@ const ollamaChatModels: AIChatModelCard[] = [
401
395
  description:
402
396
  'MiniCPM-V 是 OpenBMB 推出的新一代多模态大模型,具备卓越的 OCR 识别和多模态理解能力,支持广泛的应用场景。',
403
397
  displayName: 'MiniCPM-V 8B',
404
- enabled: true,
405
398
  id: 'minicpm-v',
406
399
  type: 'chat',
407
400
  },
@@ -326,6 +326,7 @@ const Ollama: ModelProviderCard = {
326
326
  vision: true,
327
327
  },
328
328
  ],
329
+ checkModel: 'deepseek-r1',
329
330
  defaultShowBrowserRequest: true,
330
331
  description:
331
332
  'Ollama 提供的模型广泛涵盖代码生成、数学运算、多语种处理和对话互动等领域,支持企业级和本地化部署的多样化需求。',
@@ -1,6 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref :https://siliconflow.cn/zh-cn/pricing
3
+ // ref: https://siliconflow.cn/zh-cn/pricing
4
4
  const SiliconCloud: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
@@ -582,7 +582,7 @@ const SiliconCloud: ModelProviderCard = {
582
582
  vision: true,
583
583
  },
584
584
  ],
585
- checkModel: 'Qwen/Qwen2.5-7B-Instruct',
585
+ checkModel: 'Pro/Qwen/Qwen2-1.5B-Instruct',
586
586
  description: 'SiliconCloud,基于优秀开源基础模型的高性价比 GenAI 云服务',
587
587
  id: 'siliconcloud',
588
588
  modelList: { showModelFetcher: true },
@@ -5,9 +5,16 @@ import { AiModelModel } from '@/database/server/models/aiModel';
5
5
  import { AiProviderModel } from '@/database/server/models/aiProvider';
6
6
  import { LobeChatDatabase } from '@/database/type';
7
7
  import { AIChatModelCard, AiModelSourceEnum, AiProviderModelListItem } from '@/types/aiModel';
8
- import { AiProviderListItem, EnabledAiModel } from '@/types/aiProvider';
8
+ import {
9
+ AiProviderDetailItem,
10
+ AiProviderListItem,
11
+ AiProviderRuntimeState,
12
+ EnabledAiModel,
13
+ } from '@/types/aiProvider';
9
14
  import { ProviderConfig } from '@/types/user/settings';
10
- import { mergeArrayById } from '@/utils/merge';
15
+ import { merge, mergeArrayById } from '@/utils/merge';
16
+
17
+ type DecryptUserKeyVaults = (encryptKeyVaultsStr: string | null) => Promise<any>;
11
18
 
12
19
  export class AiInfraRepos {
13
20
  private userId: string;
@@ -112,6 +119,30 @@ export class AiInfraRepos {
112
119
  return mergeArrayById(defaultModels, aiModels) as AiProviderModelListItem[];
113
120
  };
114
121
 
122
+ getAiProviderRuntimeState = async (
123
+ decryptor?: DecryptUserKeyVaults,
124
+ ): Promise<AiProviderRuntimeState> => {
125
+ const result = await this.aiProviderModel.getAiProviderRuntimeConfig(decryptor);
126
+
127
+ const runtimeConfig = result;
128
+
129
+ Object.entries(result).forEach(([key, value]) => {
130
+ runtimeConfig[key] = merge(this.providerConfigs[key] || {}, value);
131
+ });
132
+
133
+ const enabledAiProviders = await this.getUserEnabledProviderList();
134
+
135
+ const enabledAiModels = await this.getEnabledModels();
136
+
137
+ return { enabledAiModels, enabledAiProviders, runtimeConfig };
138
+ };
139
+
140
+ getAiProviderDetail = async (id: string, decryptor?: DecryptUserKeyVaults) => {
141
+ const config = await this.aiProviderModel.getAiProviderById(id, decryptor);
142
+
143
+ return merge(this.providerConfigs[id] || {}, config) as AiProviderDetailItem;
144
+ };
145
+
115
146
  /**
116
147
  * Fetch builtin models from config
117
148
  */
@@ -202,7 +202,11 @@ export class AiProviderModel {
202
202
 
203
203
  const keyVaults = !!result.keyVaults ? await decrypt(result.keyVaults) : {};
204
204
 
205
- return { ...result, keyVaults } as AiProviderDetailItem;
205
+ return {
206
+ ...result,
207
+ fetchOnClient: typeof result.fetchOnClient === 'boolean' ? result.fetchOnClient : undefined,
208
+ keyVaults,
209
+ } as AiProviderDetailItem;
206
210
  };
207
211
 
208
212
  getAiProviderRuntimeConfig = async (decryptor?: DecryptUserKeyVaults) => {
@@ -1,219 +1,12 @@
1
- import { Highlighter, Snippet, TabsNav } from '@lobehub/ui';
2
- import { Steps } from 'antd';
3
- import { createStyles } from 'antd-style';
4
- import Link from 'next/link';
5
- import { readableColor } from 'polished';
6
1
  import { memo } from 'react';
7
- import { Trans, useTranslation } from 'react-i18next';
8
- import { Flexbox } from 'react-layout-kit';
9
2
 
3
+ import OllamaSetupGuide from '@/components/OllamaSetupGuide';
10
4
  import { ErrorActionContainer } from '@/features/Conversation/Error/style';
11
5
 
12
- const useStyles = createStyles(({ css, prefixCls, token }) => ({
13
- steps: css`
14
- margin-block-start: 32px;
15
- &.${prefixCls}-steps-small .${prefixCls}-steps-item-title {
16
- margin-block-end: 16px;
17
- font-size: 16px;
18
- font-weight: bold;
19
- }
20
-
21
- .${prefixCls}-steps-item-description {
22
- margin-block-end: 24px;
23
- }
24
-
25
- .${prefixCls}-steps-icon {
26
- color: ${readableColor(token.colorPrimary)} !important;
27
- }
28
- `,
29
- }));
30
-
31
6
  const SetupGuide = memo(() => {
32
- const { styles } = useStyles();
33
- const { t } = useTranslation('modelProvider');
34
7
  return (
35
8
  <ErrorActionContainer style={{ paddingBlock: 0 }}>
36
- <TabsNav
37
- items={[
38
- {
39
- children: (
40
- <Steps
41
- className={styles.steps}
42
- direction={'vertical'}
43
- items={[
44
- {
45
- description: (
46
- <Trans i18nKey={'ollama.setup.install.description'} ns={'modelProvider'}>
47
- 请确认你已经开启 Ollama ,如果没有安装 Ollama ,请前往官网
48
- <Link href={'https://ollama.com/download'}>下载</Link>
49
- </Trans>
50
- ),
51
- status: 'process',
52
- title: t('ollama.setup.install.title'),
53
- },
54
- {
55
- description: (
56
- <Flexbox gap={8}>
57
- {t('ollama.setup.cors.description')}
58
-
59
- <Flexbox gap={8}>
60
- {t('ollama.setup.cors.macos')}
61
- <Snippet language={'bash'}>
62
- {/* eslint-disable-next-line react/no-unescaped-entities */}
63
- launchctl setenv OLLAMA_ORIGINS "*"
64
- </Snippet>
65
- {t('ollama.setup.cors.reboot')}
66
- </Flexbox>
67
- </Flexbox>
68
- ),
69
- status: 'process',
70
- title: t('ollama.setup.cors.title'),
71
- },
72
- ]}
73
- size={'small'}
74
- />
75
- ),
76
- key: 'macos',
77
- label: 'macOS',
78
- },
79
- {
80
- children: (
81
- <Steps
82
- className={styles.steps}
83
- direction={'vertical'}
84
- items={[
85
- {
86
- description: (
87
- <Trans i18nKey={'ollama.setup.install.description'} ns={'modelProvider'}>
88
- 请确认你已经开启 Ollama ,如果没有安装 Ollama ,请前往官网
89
- <Link href={'https://ollama.com/download'}>下载</Link>
90
- </Trans>
91
- ),
92
- status: 'process',
93
- title: t('ollama.setup.install.title'),
94
- },
95
- {
96
- description: (
97
- <Flexbox gap={8}>
98
- {t('ollama.setup.cors.description')}
99
- <div>{t('ollama.setup.cors.windows')}</div>
100
- <div>{t('ollama.setup.cors.reboot')}</div>
101
- </Flexbox>
102
- ),
103
- status: 'process',
104
- title: t('ollama.setup.cors.title'),
105
- },
106
- ]}
107
- size={'small'}
108
- />
109
- ),
110
- key: 'windows',
111
- label: t('ollama.setup.install.windowsTab'),
112
- },
113
- {
114
- children: (
115
- <Steps
116
- className={styles.steps}
117
- direction={'vertical'}
118
- items={[
119
- {
120
- description: (
121
- <Flexbox gap={8}>
122
- {t('ollama.setup.install.linux.command')}
123
- <Snippet language={'bash'}>
124
- curl -fsSL https://ollama.com/install.sh | sh
125
- </Snippet>
126
- <div>
127
- <Trans i18nKey={'ollama.setup.install.linux.manual'} ns={'modelProvider'}>
128
- 或者,你也可以参考
129
- <Link href={'https://github.com/ollama/ollama/blob/main/docs/linux.md'}>
130
- Linux 手动安装指南
131
- </Link>
132
-
133
- </Trans>
134
- </div>
135
- </Flexbox>
136
- ),
137
- status: 'process',
138
- title: t('ollama.setup.install.title'),
139
- },
140
- {
141
- description: (
142
- <Flexbox gap={8}>
143
- <div>{t('ollama.setup.cors.description')}</div>
144
-
145
- <div>{t('ollama.setup.cors.linux.systemd')}</div>
146
- {/* eslint-disable-next-line react/no-unescaped-entities */}
147
- <Snippet language={'bash'}> sudo systemctl edit ollama.service</Snippet>
148
- {t('ollama.setup.cors.linux.env')}
149
- <Highlighter
150
- // eslint-disable-next-line react/no-children-prop
151
- children={`[Service]
152
-
153
- Environment="OLLAMA_ORIGINS=*"`}
154
- fileName={'ollama.service'}
155
- fullFeatured
156
- language={'bash'}
157
- showLanguage
158
- />
159
- {t('ollama.setup.cors.linux.reboot')}
160
- </Flexbox>
161
- ),
162
- status: 'process',
163
- title: t('ollama.setup.cors.title'),
164
- },
165
- ]}
166
- size={'small'}
167
- />
168
- ),
169
- key: 'linux',
170
- label: 'Linux',
171
- },
172
- {
173
- children: (
174
- <Steps
175
- className={styles.steps}
176
- direction={'vertical'}
177
- items={[
178
- {
179
- description: (
180
- <Flexbox gap={8}>
181
- {t('ollama.setup.install.description')}
182
- <div>{t('ollama.setup.install.docker')}</div>
183
- <Snippet language={'bash'}>docker pull ollama/ollama</Snippet>
184
- </Flexbox>
185
- ),
186
- status: 'process',
187
- title: t('ollama.setup.install.title'),
188
- },
189
- {
190
- description: (
191
- <Flexbox gap={8}>
192
- {t('ollama.setup.cors.description')}
193
- <Highlighter
194
- fileName={'ollama.service'}
195
- fullFeatured
196
- language={'bash'}
197
- showLanguage
198
- >
199
- {/* eslint-disable-next-line react/no-unescaped-entities */}
200
- docker run -d --gpus=all -v ollama:/root/.ollama -e OLLAMA_ORIGINS="*" -p
201
- 11434:11434 --name ollama ollama/ollama
202
- </Highlighter>
203
- </Flexbox>
204
- ),
205
- status: 'process',
206
- title: t('ollama.setup.cors.title'),
207
- },
208
- ]}
209
- size={'small'}
210
- />
211
- ),
212
- key: 'docker',
213
- label: 'Docker',
214
- },
215
- ]}
216
- />
9
+ <OllamaSetupGuide />
217
10
  </ErrorActionContainer>
218
11
  );
219
12
  });
@@ -1,14 +1,9 @@
1
- import { Icon } from '@lobehub/ui';
2
- import { createStyles } from 'antd-style';
3
- import { BringToFrontIcon, ChevronDown, ChevronRight, Loader2Icon } from 'lucide-react';
4
- import { memo, useState } from 'react';
5
- import { useTranslation } from 'react-i18next';
6
- import { Flexbox } from 'react-layout-kit';
1
+ import { memo } from 'react';
7
2
 
3
+ import Thinking from '@/components/Thinking';
8
4
  import { ARTIFACT_THINKING_TAG } from '@/const/plugin';
9
5
  import { useChatStore } from '@/store/chat';
10
6
  import { chatSelectors } from '@/store/chat/selectors';
11
- import { dotLoading } from '@/styles/loading';
12
7
 
13
8
  import { MarkdownElementProps } from '../type';
14
9
 
@@ -22,64 +17,18 @@ export const isLobeThinkingClosed = (input: string = '') => {
22
17
  return input.includes(openTag) && input.includes(closeTag);
23
18
  };
24
19
 
25
- const useStyles = createStyles(({ css, token }) => ({
26
- container: css`
27
- cursor: pointer;
28
-
29
- padding-block: 8px;
30
- padding-inline: 12px;
31
- padding-inline-end: 12px;
32
- border-radius: 8px;
33
-
34
- color: ${token.colorText};
35
-
36
- background: ${token.colorFillQuaternary};
37
- `,
38
- title: css`
39
- overflow: hidden;
40
- display: -webkit-box;
41
- -webkit-box-orient: vertical;
42
- -webkit-line-clamp: 1;
43
-
44
- font-size: 12px;
45
- text-overflow: ellipsis;
46
- `,
47
- }));
48
-
49
20
  const Render = memo<MarkdownElementProps>(({ children, id }) => {
50
- const { t } = useTranslation('chat');
51
- const { styles, cx } = useStyles();
52
-
53
21
  const [isGenerating] = useChatStore((s) => {
54
22
  const message = chatSelectors.getMessageById(id)(s);
55
23
  return [!isLobeThinkingClosed(message?.content)];
56
24
  });
57
25
 
58
- const [showDetail, setShowDetail] = useState(false);
59
-
60
- const expand = showDetail || isGenerating;
61
26
  return (
62
- <Flexbox
63
- className={styles.container}
64
- gap={16}
65
- onClick={() => {
66
- setShowDetail(!showDetail);
67
- }}
68
- width={'100%'}
69
- >
70
- <Flexbox distribution={'space-between'} flex={1} horizontal>
71
- <Flexbox gap={8} horizontal>
72
- <Icon icon={isGenerating ? Loader2Icon : BringToFrontIcon} spin={isGenerating} />
73
- {isGenerating ? (
74
- <span className={cx(dotLoading)}>{t('artifact.thinking')}</span>
75
- ) : (
76
- t('artifact.thought')
77
- )}
78
- </Flexbox>
79
- <Icon icon={expand ? ChevronDown : ChevronRight} />
80
- </Flexbox>
81
- {expand && children}
82
- </Flexbox>
27
+ <Thinking
28
+ content={children as string}
29
+ style={{ width: isGenerating ? '100%' : undefined }}
30
+ thinking={isGenerating}
31
+ />
83
32
  );
84
33
  });
85
34
 
@@ -79,7 +79,7 @@ export class LobeOllamaAI implements LobeRuntimeAI {
79
79
 
80
80
  throw AgentRuntimeError.chat({
81
81
  error: {
82
- ...e.error,
82
+ ...(typeof e.error !== 'string' ? e.error : undefined),
83
83
  message: String(e.error?.message || e.message),
84
84
  name: e.name,
85
85
  status_code: e.status_code,
@@ -1,4 +1,5 @@
1
- import { ModelProvider } from '../types';
1
+ import { AgentRuntimeErrorType } from '../error';
2
+ import { ChatCompletionErrorPayload, ModelProvider } from '../types';
2
3
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
4
 
4
5
  import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
@@ -10,6 +11,33 @@ export interface SiliconCloudModelCard {
10
11
  export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
11
12
  baseURL: 'https://api.siliconflow.cn/v1',
12
13
  chatCompletion: {
14
+ handleError: (error: any): Omit<ChatCompletionErrorPayload, 'provider'> | undefined => {
15
+ let errorResponse: Response | undefined;
16
+ if (error instanceof Response) {
17
+ errorResponse = error;
18
+ } else if ('status' in (error as any)) {
19
+ errorResponse = error as Response;
20
+ }
21
+ if (errorResponse) {
22
+ if (errorResponse.status === 401) {
23
+ return {
24
+ error: errorResponse.status,
25
+ errorType: AgentRuntimeErrorType.InvalidProviderAPIKey,
26
+ };
27
+ }
28
+
29
+ if (errorResponse.status === 403) {
30
+ return {
31
+ error: errorResponse.status,
32
+ errorType: AgentRuntimeErrorType.ProviderBizError,
33
+ message: '请检查 API Key 余额是否充足,或者是否在用未实名的 API Key 访问需要实名的模型。',
34
+ };
35
+ }
36
+ }
37
+ return {
38
+ error,
39
+ };
40
+ },
13
41
  handlePayload: (payload) => {
14
42
  return {
15
43
  ...payload,
@@ -20,6 +48,10 @@ export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
20
48
  debug: {
21
49
  chatCompletion: () => process.env.DEBUG_SILICONCLOUD_CHAT_COMPLETION === '1',
22
50
  },
51
+ errorType: {
52
+ bizError: AgentRuntimeErrorType.ProviderBizError,
53
+ invalidAPIKey: AgentRuntimeErrorType.InvalidProviderAPIKey,
54
+ },
23
55
  models: {
24
56
  transformModel: (m) => {
25
57
  const functionCallKeywords = [
@@ -88,6 +88,32 @@ export default {
88
88
  emptyModel: '没有启用的模型,请前往设置开启',
89
89
  provider: '提供商',
90
90
  },
91
+ OllamaSetupGuide: {
92
+ cors: {
93
+ description: '因浏览器安全限制,你需要为 Ollama 进行跨域配置后方可正常使用。',
94
+ linux: {
95
+ env: '在 [Service] 部分下添加 `Environment`,添加 OLLAMA_ORIGINS 环境变量:',
96
+ reboot: '重载 systemd 并重启 Ollama',
97
+ systemd: '调用 systemd 编辑 ollama 服务:',
98
+ },
99
+ macos: '请打开「终端」应用程序,并粘贴以下指令,并按回车运行',
100
+ reboot: '请在执行完成后重启 Ollama 服务',
101
+ title: '配置 Ollama 允许跨域访问',
102
+ windows:
103
+ '在 Windows 上,点击「控制面板」,进入编辑系统环境变量。为您的用户账户新建名为 「OLLAMA_ORIGINS」 的环境变量,值为 * ,点击 「OK/应用」 保存',
104
+ },
105
+ install: {
106
+ description: '请确认你已经开启 Ollama ,如果没有下载 Ollama ,请前往官网<1>下载</1>',
107
+ docker:
108
+ '如果你更倾向于使用 Docker,Ollama 也提供了官方 Docker 镜像,你可以通过以下命令拉取:',
109
+ linux: {
110
+ command: '通过以下命令安装:',
111
+ manual: '或者,你也可以参考 <1>Linux 手动安装指南</1> 自行安装',
112
+ },
113
+ title: '在本地安装并开启 Ollama 应用',
114
+ windowsTab: 'Windows (预览版)',
115
+ },
116
+ },
91
117
  Thinking: {
92
118
  thinking: '深度思考中...',
93
119
  thought: '已深度思考(用时 {{duration}} 秒)',