@lobehub/chat 1.53.12 → 1.54.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/CHANGELOG.md +33 -0
  2. package/Dockerfile +2 -0
  3. package/Dockerfile.database +2 -0
  4. package/changelog/v1.json +12 -0
  5. package/locales/ar/modelProvider.json +0 -1
  6. package/locales/ar/setting.json +12 -9
  7. package/locales/bg-BG/modelProvider.json +0 -1
  8. package/locales/bg-BG/setting.json +12 -9
  9. package/locales/de-DE/modelProvider.json +0 -1
  10. package/locales/de-DE/setting.json +13 -10
  11. package/locales/en-US/modelProvider.json +0 -1
  12. package/locales/en-US/setting.json +12 -9
  13. package/locales/es-ES/modelProvider.json +0 -1
  14. package/locales/es-ES/setting.json +12 -9
  15. package/locales/fa-IR/modelProvider.json +0 -1
  16. package/locales/fa-IR/setting.json +12 -9
  17. package/locales/fr-FR/modelProvider.json +0 -1
  18. package/locales/fr-FR/setting.json +12 -9
  19. package/locales/it-IT/modelProvider.json +0 -1
  20. package/locales/it-IT/setting.json +13 -10
  21. package/locales/ja-JP/modelProvider.json +0 -1
  22. package/locales/ja-JP/setting.json +12 -9
  23. package/locales/ko-KR/modelProvider.json +0 -1
  24. package/locales/ko-KR/setting.json +12 -9
  25. package/locales/nl-NL/modelProvider.json +0 -1
  26. package/locales/nl-NL/setting.json +12 -9
  27. package/locales/pl-PL/modelProvider.json +0 -1
  28. package/locales/pl-PL/setting.json +12 -9
  29. package/locales/pt-BR/modelProvider.json +0 -1
  30. package/locales/pt-BR/setting.json +13 -10
  31. package/locales/ru-RU/modelProvider.json +0 -1
  32. package/locales/ru-RU/setting.json +12 -9
  33. package/locales/tr-TR/modelProvider.json +0 -1
  34. package/locales/tr-TR/setting.json +12 -9
  35. package/locales/vi-VN/modelProvider.json +0 -1
  36. package/locales/vi-VN/setting.json +12 -9
  37. package/locales/zh-CN/modelProvider.json +0 -1
  38. package/locales/zh-CN/setting.json +13 -10
  39. package/locales/zh-TW/modelProvider.json +0 -1
  40. package/locales/zh-TW/setting.json +12 -9
  41. package/package.json +1 -1
  42. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/index.tsx +1 -1
  43. package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
  44. package/src/components/InfoTooltip/index.tsx +25 -0
  45. package/src/components/Loading/UpdateLoading/index.tsx +19 -0
  46. package/src/config/aiModels/index.ts +3 -0
  47. package/src/config/aiModels/nvidia.ts +155 -0
  48. package/src/config/llm.ts +6 -0
  49. package/src/config/modelProviders/index.ts +4 -0
  50. package/src/config/modelProviders/nvidia.ts +21 -0
  51. package/src/features/ChatInput/ActionBar/Params/ParamsControls.tsx +95 -0
  52. package/src/features/ChatInput/ActionBar/Params/index.tsx +47 -0
  53. package/src/features/ChatInput/ActionBar/config.ts +3 -2
  54. package/src/features/ChatInput/Mobile/index.tsx +1 -1
  55. package/src/features/ModelParamsControl/FrequencyPenalty.tsx +37 -0
  56. package/src/features/ModelParamsControl/PresencePenalty.tsx +35 -0
  57. package/src/features/ModelParamsControl/Temperature.tsx +71 -0
  58. package/src/features/ModelParamsControl/TopP.tsx +39 -0
  59. package/src/features/ModelParamsControl/index.ts +4 -0
  60. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  61. package/src/libs/agent-runtime/nvidia/index.ts +44 -0
  62. package/src/libs/agent-runtime/types/type.ts +1 -0
  63. package/src/locales/default/setting.ts +12 -9
  64. package/src/types/user/settings/keyVaults.ts +1 -0
  65. package/src/features/ChatInput/ActionBar/Temperature.tsx +0 -49
@@ -0,0 +1,155 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const nvidiaChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ },
8
+ contextWindowTokens: 128_000,
9
+ description: '先进的 LLM,擅长推理、数学、常识和函数调用。',
10
+ displayName: 'Llama 3.3 70B Instruct',
11
+ enabled: true,
12
+ id: 'meta/llama-3.3-70b-instruct',
13
+ type: 'chat'
14
+ },
15
+ {
16
+ contextWindowTokens: 128_000,
17
+ description: '先进的最尖端小型语言模型,具备语言理解、卓越的推理能力和文本生成能力。',
18
+ displayName: 'Llama 3.2 1B Instruct',
19
+ id: 'meta/llama-3.2-1b-instruct',
20
+ type: 'chat'
21
+ },
22
+ {
23
+ contextWindowTokens: 128_000,
24
+ description: '先进的最尖端小型语言模型,具备语言理解、卓越的推理能力和文本生成能力。',
25
+ displayName: 'Llama 3.2 3B Instruct',
26
+ id: 'meta/llama-3.2-3b-instruct',
27
+ type: 'chat'
28
+ },
29
+ {
30
+ abilities: {
31
+ vision: true,
32
+ },
33
+ contextWindowTokens: 128_000,
34
+ description: '尖端的视觉-语言模型,擅长从图像中进行高质量推理。',
35
+ displayName: 'Llama 3.2 11B Vision Instruct',
36
+ enabled: true,
37
+ id: 'meta/llama-3.2-11b-vision-instruct',
38
+ type: 'chat'
39
+ },
40
+ {
41
+ abilities: {
42
+ vision: true,
43
+ },
44
+ contextWindowTokens: 128_000,
45
+ description: '尖端的视觉-语言模型,擅长从图像中进行高质量推理。',
46
+ displayName: 'Llama 3.2 90B Vision Instruct',
47
+ id: 'meta/llama-3.2-90b-vision-instruct',
48
+ type: 'chat'
49
+ },
50
+ {
51
+ abilities: {
52
+ functionCall: true,
53
+ },
54
+ contextWindowTokens: 128_000,
55
+ description: '先进的最尖端模型,具备语言理解、卓越的推理能力和文本生成能力。',
56
+ displayName: 'Llama 3.1 8B Instruct',
57
+ id: 'meta/llama-3.1-8b-instruct',
58
+ type: 'chat'
59
+ },
60
+ {
61
+ abilities: {
62
+ functionCall: true,
63
+ },
64
+ contextWindowTokens: 128_000,
65
+ description: '赋能复杂对话,具备卓越的上下文理解、推理能力和文本生成能力。',
66
+ displayName: 'Llama 3.1 70B Instruct',
67
+ id: 'meta/llama-3.1-70b-instruct',
68
+ type: 'chat'
69
+ },
70
+ {
71
+ abilities: {
72
+ functionCall: true,
73
+ },
74
+ contextWindowTokens: 128_000,
75
+ description: '高级 LLM,支持合成数据生成、知识蒸馏和推理,适用于聊天机器人、编程和特定领域任务。',
76
+ displayName: 'Llama 3.1 405B Instruct',
77
+ id: 'meta/llama-3.1-405b-instruct',
78
+ type: 'chat'
79
+ },
80
+ {
81
+ contextWindowTokens: 32_768,
82
+ description: '独特的语言模型,提供无与伦比的准确性和效率表现。',
83
+ displayName: 'Llama 3.1 Nemotron 51B Instruct',
84
+ id: 'nvidia/llama-3.1-nemotron-51b-instruct',
85
+ type: 'chat'
86
+ },
87
+ {
88
+ contextWindowTokens: 32_768,
89
+ description: 'Llama-3.1-Nemotron-70B-Instruct 是 NVIDIA 定制的大型语言模型,旨在提高 LLM 生成的响应的帮助性。',
90
+ displayName: 'Llama 3.1 Nemotron 70B Instruct',
91
+ id: 'nvidia/llama-3.1-nemotron-70b-instruct',
92
+ type: 'chat'
93
+ },
94
+ {
95
+ contextWindowTokens: 8192,
96
+ description: '面向边缘应用的高级小型语言生成 AI 模型。',
97
+ displayName: 'Gemma 2 2B Instruct',
98
+ id: 'google/gemma-2-2b-it',
99
+ type: 'chat'
100
+ },
101
+ {
102
+ contextWindowTokens: 8192,
103
+ description: '尖端文本生成模型,擅长文本理解、转换和代码生成。',
104
+ displayName: 'Gemma 2 9B Instruct',
105
+ id: 'google/gemma-2-9b-it',
106
+ type: 'chat'
107
+ },
108
+ {
109
+ contextWindowTokens: 8192,
110
+ description: '尖端文本生成模型,擅长文本理解、转换和代码生成。',
111
+ displayName: 'Gemma 2 27B Instruct',
112
+ id: 'google/gemma-2-27b-it',
113
+ type: 'chat'
114
+ },
115
+ {
116
+ abilities: {
117
+ reasoning: true,
118
+ },
119
+ contextWindowTokens: 128_000,
120
+ description: '最先进的高效 LLM,擅长推理、数学和编程。',
121
+ displayName: 'DeepSeek R1',
122
+ enabled: true,
123
+ id: 'deepseek-ai/deepseek-r1',
124
+ type: 'chat'
125
+ },
126
+ {
127
+ abilities: {
128
+ functionCall: true,
129
+ },
130
+ contextWindowTokens: 32_768,
131
+ description: '面向中文和英文的 LLM,针对语言、编程、数学、推理等领域。',
132
+ displayName: 'Qwen2.5 7B Instruct',
133
+ enabled: true,
134
+ id: 'qwen/qwen2.5-7b-instruct',
135
+ type: 'chat'
136
+ },
137
+ {
138
+ contextWindowTokens: 32_768,
139
+ description: '强大的中型代码模型,支持 32K 上下文长度,擅长多语言编程。',
140
+ displayName: 'Qwen2.5 Coder 7B Instruct',
141
+ id: 'qwen/qwen2.5-coder-7b-instruct',
142
+ type: 'chat'
143
+ },
144
+ {
145
+ contextWindowTokens: 32_768,
146
+ description: '高级 LLM,支持代码生成、推理和修复,涵盖主流编程语言。',
147
+ displayName: 'Qwen2.5 Coder 32B Instruct',
148
+ id: 'qwen/qwen2.5-coder-32b-instruct',
149
+ type: 'chat'
150
+ },
151
+ ]
152
+
153
+ export const allModels = [...nvidiaChatModels];
154
+
155
+ export default allModels;
package/src/config/llm.ts CHANGED
@@ -77,6 +77,9 @@ export const getLLMConfig = () => {
77
77
  ENABLED_NOVITA: z.boolean(),
78
78
  NOVITA_API_KEY: z.string().optional(),
79
79
 
80
+ ENABLED_NVIDIA: z.boolean(),
81
+ NVIDIA_API_KEY: z.string().optional(),
82
+
80
83
  ENABLED_BAICHUAN: z.boolean(),
81
84
  BAICHUAN_API_KEY: z.string().optional(),
82
85
 
@@ -202,6 +205,9 @@ export const getLLMConfig = () => {
202
205
  ENABLED_NOVITA: !!process.env.NOVITA_API_KEY,
203
206
  NOVITA_API_KEY: process.env.NOVITA_API_KEY,
204
207
 
208
+ ENABLED_NVIDIA: !!process.env.NVIDIA_API_KEY,
209
+ NVIDIA_API_KEY: process.env.NVIDIA_API_KEY,
210
+
205
211
  ENABLED_BAICHUAN: !!process.env.BAICHUAN_API_KEY,
206
212
  BAICHUAN_API_KEY: process.env.BAICHUAN_API_KEY,
207
213
 
@@ -23,6 +23,7 @@ import MinimaxProvider from './minimax';
23
23
  import MistralProvider from './mistral';
24
24
  import MoonshotProvider from './moonshot';
25
25
  import NovitaProvider from './novita';
26
+ import NvidiaProvider from './nvidia';
26
27
  import OllamaProvider from './ollama';
27
28
  import OpenAIProvider from './openai';
28
29
  import OpenRouterProvider from './openrouter';
@@ -67,6 +68,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
67
68
  ZeroOneProvider.chatModels,
68
69
  StepfunProvider.chatModels,
69
70
  NovitaProvider.chatModels,
71
+ NvidiaProvider.chatModels,
70
72
  BaichuanProvider.chatModels,
71
73
  TaichuProvider.chatModels,
72
74
  CloudflareProvider.chatModels,
@@ -96,6 +98,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
96
98
  CloudflareProvider,
97
99
  GithubProvider,
98
100
  NovitaProvider,
101
+ NvidiaProvider,
99
102
  TogetherAIProvider,
100
103
  FireworksAIProvider,
101
104
  GroqProvider,
@@ -158,6 +161,7 @@ export { default as MinimaxProviderCard } from './minimax';
158
161
  export { default as MistralProviderCard } from './mistral';
159
162
  export { default as MoonshotProviderCard } from './moonshot';
160
163
  export { default as NovitaProviderCard } from './novita';
164
+ export { default as NvidiaProviderCard } from './nvidia';
161
165
  export { default as OllamaProviderCard } from './ollama';
162
166
  export { default as OpenAIProviderCard } from './openai';
163
167
  export { default as OpenRouterProviderCard } from './openrouter';
@@ -0,0 +1,21 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ const Nvidia: ModelProviderCard = {
4
+ chatModels: [],
5
+ checkModel: 'meta/llama-3.2-1b-instruct',
6
+ description: 'NVIDIA NIM™ 提供容器,可用于自托管 GPU 加速推理微服务,支持在云端、数据中心、RTX™ AI 个人电脑和工作站上部署预训练和自定义 AI 模型。',
7
+ id: 'nvidia',
8
+ modelList: { showModelFetcher: true },
9
+ modelsUrl: 'https://build.nvidia.com/models',
10
+ name: 'Nvidia',
11
+ settings: {
12
+ proxyUrl: {
13
+ placeholder: 'https://integrate.api.nvidia.com/v1',
14
+ },
15
+ sdkType: 'openai',
16
+ showModelFetcher: true,
17
+ },
18
+ url: 'https://build.nvidia.com',
19
+ };
20
+
21
+ export default Nvidia;
@@ -0,0 +1,95 @@
1
+ import { Form, Tag } from '@lobehub/ui';
2
+ import type { FormItemProps } from '@lobehub/ui/es/Form/components/FormItem';
3
+ import isEqual from 'fast-deep-equal';
4
+ import { debounce } from 'lodash-es';
5
+ import { memo } from 'react';
6
+ import { useTranslation } from 'react-i18next';
7
+ import { Flexbox } from 'react-layout-kit';
8
+
9
+ import InfoTooltip from '@/components/InfoTooltip';
10
+ import {
11
+ FrequencyPenalty,
12
+ PresencePenalty,
13
+ Temperature,
14
+ TopP,
15
+ } from '@/features/ModelParamsControl';
16
+ import { useAgentStore } from '@/store/agent';
17
+ import { agentSelectors } from '@/store/agent/selectors';
18
+
19
+ interface ParamsControlsProps {
20
+ setUpdating: (updating: boolean) => void;
21
+ }
22
+ const ParamsControls = memo<ParamsControlsProps>(({ setUpdating }) => {
23
+ const { t } = useTranslation('setting');
24
+
25
+ const updateAgentConfig = useAgentStore((s) => s.updateAgentConfig);
26
+
27
+ const config = useAgentStore(agentSelectors.currentAgentConfig, isEqual);
28
+
29
+ const items: FormItemProps[] = [
30
+ {
31
+ children: <Temperature />,
32
+ desc: <Tag>temperature</Tag>,
33
+ label: (
34
+ <Flexbox gap={8} horizontal>
35
+ {t('settingModel.temperature.title')}
36
+ <InfoTooltip title={t('settingModel.temperature.desc')} />
37
+ </Flexbox>
38
+ ),
39
+ name: ['params', 'temperature'],
40
+ },
41
+ {
42
+ children: <TopP />,
43
+ desc: <Tag>top_p</Tag>,
44
+ label: (
45
+ <Flexbox gap={8} horizontal>
46
+ {t('settingModel.topP.title')}
47
+ <InfoTooltip title={t('settingModel.topP.desc')} />
48
+ </Flexbox>
49
+ ),
50
+ name: ['params', 'top_p'],
51
+ },
52
+ {
53
+ children: <PresencePenalty />,
54
+ desc: <Tag>presence_penalty</Tag>,
55
+ label: (
56
+ <Flexbox gap={8} horizontal>
57
+ {t('settingModel.presencePenalty.title')}
58
+ <InfoTooltip title={t('settingModel.presencePenalty.desc')} />
59
+ </Flexbox>
60
+ ),
61
+ name: ['params', 'presence_penalty'],
62
+ },
63
+ {
64
+ children: <FrequencyPenalty />,
65
+ desc: <Tag>frequency_penalty</Tag>,
66
+ label: (
67
+ <Flexbox gap={8} horizontal>
68
+ {t('settingModel.frequencyPenalty.title')}
69
+ <InfoTooltip title={t('settingModel.frequencyPenalty.desc')} />
70
+ </Flexbox>
71
+ ),
72
+ name: ['params', 'frequency_penalty'],
73
+ },
74
+ ];
75
+
76
+ return (
77
+ <Form
78
+ initialValues={config}
79
+ itemMinWidth={200}
80
+ items={items}
81
+ itemsType={'flat'}
82
+ onValuesChange={debounce(async (values) => {
83
+ setUpdating(true);
84
+ console.log(values);
85
+ await updateAgentConfig(values);
86
+ setUpdating(false);
87
+ }, 500)}
88
+ size={'small'}
89
+ style={{ fontSize: 12 }}
90
+ variant={'pure'}
91
+ />
92
+ );
93
+ });
94
+
95
+ export default ParamsControls;
@@ -0,0 +1,47 @@
1
+ import { ActionIcon } from '@lobehub/ui';
2
+ import { Popover } from 'antd';
3
+ import { useTheme } from 'antd-style';
4
+ import { Settings2Icon } from 'lucide-react';
5
+ import { memo, useState } from 'react';
6
+ import { useTranslation } from 'react-i18next';
7
+ import { Flexbox } from 'react-layout-kit';
8
+
9
+ import UpdateLoading from '@/components/Loading/UpdateLoading';
10
+
11
+ import ParamsControls from './ParamsControls';
12
+
13
+ const Params = memo(() => {
14
+ const { t } = useTranslation('setting');
15
+ const [popoverOpen, setPopoverOpen] = useState(false);
16
+ const [isUpdating, setUpdating] = useState(false);
17
+
18
+ const theme = useTheme();
19
+ return (
20
+ <Popover
21
+ arrow={false}
22
+ content={<ParamsControls setUpdating={setUpdating} />}
23
+ onOpenChange={setPopoverOpen}
24
+ open={popoverOpen}
25
+ placement={'top'}
26
+ styles={{
27
+ body: { minWidth: 400 },
28
+ }}
29
+ title={
30
+ <Flexbox horizontal justify={'space-between'}>
31
+ {t('settingModel.params.title')}
32
+
33
+ {isUpdating && <UpdateLoading style={{ color: theme.colorTextSecondary }} />}
34
+ </Flexbox>
35
+ }
36
+ trigger={'click'}
37
+ >
38
+ <ActionIcon
39
+ icon={Settings2Icon}
40
+ placement={'bottom'}
41
+ title={popoverOpen ? undefined : t('settingModel.params.title')}
42
+ />
43
+ </Popover>
44
+ );
45
+ });
46
+
47
+ export default Params;
@@ -3,7 +3,7 @@ import Clear from './Clear';
3
3
  import History from './History';
4
4
  import Knowledge from './Knowledge';
5
5
  import ModelSwitch from './ModelSwitch';
6
- import Temperature from './Temperature';
6
+ import Params from './Params';
7
7
  import { MainToken, PortalToken } from './Token';
8
8
  import Tools from './Tools';
9
9
  import Upload from './Upload';
@@ -15,9 +15,10 @@ export const actionMap = {
15
15
  knowledgeBase: Knowledge,
16
16
  mainToken: MainToken,
17
17
  model: ModelSwitch,
18
+ params: Params,
18
19
  portalToken: PortalToken,
19
20
  stt: STT,
20
- temperature: Temperature,
21
+ temperature: Params,
21
22
  tools: Tools,
22
23
  } as const;
23
24
 
@@ -20,9 +20,9 @@ const defaultLeftActions: ActionKeys[] = [
20
20
  'model',
21
21
  'fileUpload',
22
22
  'knowledgeBase',
23
- 'temperature',
24
23
  'history',
25
24
  'tools',
25
+ 'params',
26
26
  'mainToken',
27
27
  ];
28
28
 
@@ -0,0 +1,37 @@
1
+ import { Icon, SliderWithInput } from '@lobehub/ui';
2
+ import { useTheme } from 'antd-style';
3
+ import { BookOpenText, FileIcon } from 'lucide-react';
4
+ import { memo } from 'react';
5
+ import { Flexbox } from 'react-layout-kit';
6
+
7
+ interface FrequencyPenaltyProps {
8
+ onChange?: (value: number) => void;
9
+ value?: number;
10
+ }
11
+
12
+ const FrequencyPenalty = memo<FrequencyPenaltyProps>(({ value, onChange }) => {
13
+ const theme = useTheme();
14
+
15
+ return (
16
+ <Flexbox style={{ paddingInlineStart: 8 }}>
17
+ <SliderWithInput
18
+ marks={{
19
+ '-2': (
20
+ <Icon icon={FileIcon} size={'small'} style={{ color: theme.colorTextQuaternary }} />
21
+ ),
22
+ 0: <div />,
23
+ 2: (
24
+ <Icon icon={BookOpenText} size={'small'} style={{ color: theme.colorTextQuaternary }} />
25
+ ),
26
+ }}
27
+ max={2}
28
+ min={-2}
29
+ onChange={onChange}
30
+ size={'small'}
31
+ step={0.1}
32
+ value={value}
33
+ />
34
+ </Flexbox>
35
+ );
36
+ });
37
+ export default FrequencyPenalty;
@@ -0,0 +1,35 @@
1
+ import { Icon, SliderWithInput } from '@lobehub/ui';
2
+ import { useTheme } from 'antd-style';
3
+ import { AtomIcon, RepeatIcon } from 'lucide-react';
4
+ import { memo } from 'react';
5
+ import { Flexbox } from 'react-layout-kit';
6
+
7
+ interface PresencePenaltyProps {
8
+ onChange?: (value: number) => void;
9
+ value?: number;
10
+ }
11
+
12
+ const PresencePenalty = memo<PresencePenaltyProps>(({ value, onChange }) => {
13
+ const theme = useTheme();
14
+
15
+ return (
16
+ <Flexbox style={{ paddingInlineStart: 8 }}>
17
+ <SliderWithInput
18
+ marks={{
19
+ '-2': (
20
+ <Icon icon={RepeatIcon} size={'small'} style={{ color: theme.colorTextQuaternary }} />
21
+ ),
22
+ 0: <div />,
23
+ 2: <Icon icon={AtomIcon} size={'small'} style={{ color: theme.colorTextQuaternary }} />,
24
+ }}
25
+ max={2}
26
+ min={-2}
27
+ onChange={onChange}
28
+ size={'small'}
29
+ step={0.1}
30
+ value={value}
31
+ />
32
+ </Flexbox>
33
+ );
34
+ });
35
+ export default PresencePenalty;
@@ -0,0 +1,71 @@
1
+ import { Alert, Icon, SliderWithInput } from '@lobehub/ui';
2
+ import { css, cx, useTheme } from 'antd-style';
3
+ import { Sparkle, Sparkles } from 'lucide-react';
4
+ import { memo } from 'react';
5
+ import { useTranslation } from 'react-i18next';
6
+ import { Flexbox } from 'react-layout-kit';
7
+
8
+ import { useAgentStore } from '@/store/agent';
9
+ import { agentSelectors } from '@/store/agent/selectors';
10
+
11
+ const alertCls = css`
12
+ .ant-alert-message {
13
+ font-size: 12px;
14
+ line-height: 18px !important;
15
+ }
16
+
17
+ .ant-alert-icon {
18
+ height: 18px !important;
19
+ }
20
+ `;
21
+
22
+ const Warning = memo(() => {
23
+ const { t } = useTranslation('setting');
24
+ const [temperature] = useAgentStore((s) => {
25
+ const config = agentSelectors.currentAgentConfig(s);
26
+ return [config.params?.temperature];
27
+ });
28
+
29
+ return (
30
+ typeof temperature === 'number' &&
31
+ temperature >= 1.5 && (
32
+ <Alert
33
+ classNames={{ alert: cx(alertCls) }}
34
+ message={t('settingModel.temperature.warning')}
35
+ style={{ fontSize: 12 }}
36
+ type={'warning'}
37
+ variant={'pure'}
38
+ />
39
+ )
40
+ );
41
+ });
42
+
43
+ interface TemperatureProps {
44
+ onChange?: (value: number) => void;
45
+ value?: number;
46
+ }
47
+
48
+ const Temperature = memo<TemperatureProps>(({ value, onChange }) => {
49
+ const theme = useTheme();
50
+ return (
51
+ <Flexbox gap={4} style={{ paddingInlineStart: 8 }}>
52
+ <SliderWithInput
53
+ controls={false}
54
+ marks={{
55
+ 0: <Icon icon={Sparkle} size={'small'} style={{ color: theme.colorTextQuaternary }} />,
56
+ 1: <div />,
57
+ 2: <Icon icon={Sparkles} size={'small'} style={{ color: theme.colorTextQuaternary }} />,
58
+ }}
59
+ max={2}
60
+ onChange={onChange}
61
+ size={'small'}
62
+ step={0.1}
63
+ style={{ height: 48 }}
64
+ value={value}
65
+ />
66
+ <Warning />
67
+ </Flexbox>
68
+ );
69
+ });
70
+
71
+ export default Temperature;
@@ -0,0 +1,39 @@
1
+ import { Icon, SliderWithInput } from '@lobehub/ui';
2
+ import { useTheme } from 'antd-style';
3
+ import { FlowerIcon, TrainFrontTunnel } from 'lucide-react';
4
+ import { memo } from 'react';
5
+ import { Flexbox } from 'react-layout-kit';
6
+
7
+ interface TopPProps {
8
+ onChange?: (value: number) => void;
9
+ value?: number;
10
+ }
11
+
12
+ const TopP = memo<TopPProps>(({ value, onChange }) => {
13
+ const theme = useTheme();
14
+
15
+ return (
16
+ <Flexbox style={{ paddingInlineStart: 8 }}>
17
+ <SliderWithInput
18
+ marks={{
19
+ 0: (
20
+ <Icon
21
+ icon={TrainFrontTunnel}
22
+ size={'small'}
23
+ style={{ color: theme.colorTextQuaternary }}
24
+ />
25
+ ),
26
+ 0.9: <div />,
27
+ 1: <Icon icon={FlowerIcon} size={'small'} style={{ color: theme.colorTextQuaternary }} />,
28
+ }}
29
+ max={1}
30
+ min={0}
31
+ onChange={onChange}
32
+ size={'small'}
33
+ step={0.1}
34
+ value={value}
35
+ />
36
+ </Flexbox>
37
+ );
38
+ });
39
+ export default TopP;
@@ -0,0 +1,4 @@
1
+ export { default as FrequencyPenalty } from './FrequencyPenalty';
2
+ export { default as PresencePenalty } from './PresencePenalty';
3
+ export { default as Temperature } from './Temperature';
4
+ export { default as TopP } from './TopP';
@@ -26,6 +26,7 @@ import { LobeMinimaxAI } from './minimax';
26
26
  import { LobeMistralAI } from './mistral';
27
27
  import { LobeMoonshotAI } from './moonshot';
28
28
  import { LobeNovitaAI } from './novita';
29
+ import { LobeNvidiaAI } from './nvidia';
29
30
  import { LobeOllamaAI } from './ollama';
30
31
  import { LobeOpenAI } from './openai';
31
32
  import { LobeOpenRouterAI } from './openrouter';
@@ -157,6 +158,7 @@ class AgentRuntime {
157
158
  mistral: Partial<ClientOptions>;
158
159
  moonshot: Partial<ClientOptions>;
159
160
  novita: Partial<ClientOptions>;
161
+ nvidia: Partial<ClientOptions>;
160
162
  ollama: Partial<ClientOptions>;
161
163
  openai: Partial<ClientOptions>;
162
164
  openrouter: Partial<ClientOptions>;
@@ -300,6 +302,11 @@ class AgentRuntime {
300
302
  break;
301
303
  }
302
304
 
305
+ case ModelProvider.Nvidia: {
306
+ runtimeModel = new LobeNvidiaAI(params.nvidia);
307
+ break;
308
+ }
309
+
303
310
  case ModelProvider.Baichuan: {
304
311
  runtimeModel = new LobeBaichuanAI(params.baichuan ?? {});
305
312
  break;
@@ -0,0 +1,44 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ import type { ChatModelCard } from '@/types/llm';
5
+
6
+ export interface NvidiaModelCard {
7
+ id: string;
8
+ }
9
+
10
+ export const LobeNvidiaAI = LobeOpenAICompatibleFactory({
11
+ baseURL: 'https://integrate.api.nvidia.com/v1',
12
+ debug: {
13
+ chatCompletion: () => process.env.DEBUG_NVIDIA_CHAT_COMPLETION === '1',
14
+ },
15
+ models: async ({ client }) => {
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
17
+
18
+ const modelsPage = await client.models.list() as any;
19
+ const modelList: NvidiaModelCard[] = modelsPage.data;
20
+
21
+ return modelList
22
+ .map((model) => {
23
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
24
+
25
+ return {
26
+ contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
27
+ displayName: knownModel?.displayName ?? undefined,
28
+ enabled: knownModel?.enabled || false,
29
+ functionCall:
30
+ knownModel?.abilities?.functionCall
31
+ || false,
32
+ id: model.id,
33
+ reasoning:
34
+ knownModel?.abilities?.reasoning
35
+ || false,
36
+ vision:
37
+ knownModel?.abilities?.vision
38
+ || false,
39
+ };
40
+ })
41
+ .filter(Boolean) as ChatModelCard[];
42
+ },
43
+ provider: ModelProvider.Nvidia,
44
+ });
@@ -45,6 +45,7 @@ export enum ModelProvider {
45
45
  Mistral = 'mistral',
46
46
  Moonshot = 'moonshot',
47
47
  Novita = 'novita',
48
+ Nvidia = 'nvidia',
48
49
  Ollama = 'ollama',
49
50
  OpenAI = 'openai',
50
51
  OpenRouter = 'openrouter',