@lobehub/chat 1.57.1 → 1.59.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docker-compose/local/docker-compose.yml +1 -0
  4. package/locales/ar/modelProvider.json +24 -0
  5. package/locales/ar/models.json +60 -0
  6. package/locales/ar/providers.json +15 -0
  7. package/locales/bg-BG/modelProvider.json +24 -0
  8. package/locales/bg-BG/models.json +60 -0
  9. package/locales/bg-BG/providers.json +15 -0
  10. package/locales/de-DE/modelProvider.json +24 -0
  11. package/locales/de-DE/models.json +60 -0
  12. package/locales/de-DE/providers.json +15 -0
  13. package/locales/en-US/modelProvider.json +24 -0
  14. package/locales/en-US/models.json +60 -0
  15. package/locales/en-US/providers.json +15 -0
  16. package/locales/es-ES/modelProvider.json +24 -0
  17. package/locales/es-ES/models.json +60 -0
  18. package/locales/es-ES/providers.json +15 -0
  19. package/locales/fa-IR/modelProvider.json +24 -0
  20. package/locales/fa-IR/models.json +60 -0
  21. package/locales/fa-IR/providers.json +15 -0
  22. package/locales/fr-FR/modelProvider.json +24 -0
  23. package/locales/fr-FR/models.json +60 -0
  24. package/locales/fr-FR/providers.json +15 -0
  25. package/locales/it-IT/modelProvider.json +24 -0
  26. package/locales/it-IT/models.json +60 -0
  27. package/locales/it-IT/providers.json +15 -0
  28. package/locales/ja-JP/modelProvider.json +24 -0
  29. package/locales/ja-JP/models.json +60 -0
  30. package/locales/ja-JP/providers.json +15 -0
  31. package/locales/ko-KR/modelProvider.json +24 -0
  32. package/locales/ko-KR/models.json +60 -0
  33. package/locales/ko-KR/providers.json +15 -0
  34. package/locales/nl-NL/modelProvider.json +24 -0
  35. package/locales/nl-NL/models.json +60 -0
  36. package/locales/nl-NL/providers.json +15 -0
  37. package/locales/pl-PL/modelProvider.json +24 -0
  38. package/locales/pl-PL/models.json +60 -0
  39. package/locales/pl-PL/providers.json +15 -0
  40. package/locales/pt-BR/modelProvider.json +24 -0
  41. package/locales/pt-BR/models.json +60 -0
  42. package/locales/pt-BR/providers.json +15 -0
  43. package/locales/ru-RU/modelProvider.json +24 -0
  44. package/locales/ru-RU/models.json +60 -0
  45. package/locales/ru-RU/providers.json +15 -0
  46. package/locales/tr-TR/modelProvider.json +24 -0
  47. package/locales/tr-TR/models.json +60 -0
  48. package/locales/tr-TR/providers.json +15 -0
  49. package/locales/vi-VN/modelProvider.json +24 -0
  50. package/locales/vi-VN/models.json +60 -0
  51. package/locales/vi-VN/providers.json +15 -0
  52. package/locales/zh-CN/modelProvider.json +24 -0
  53. package/locales/zh-CN/models.json +1109 -1049
  54. package/locales/zh-CN/providers.json +82 -67
  55. package/locales/zh-TW/modelProvider.json +24 -0
  56. package/locales/zh-TW/models.json +60 -0
  57. package/locales/zh-TW/providers.json +15 -0
  58. package/next.config.ts +5 -0
  59. package/package.json +4 -2
  60. package/src/app/[variants]/(main)/settings/provider/(detail)/azureai/page.tsx +58 -0
  61. package/src/app/[variants]/(main)/settings/provider/(list)/ProviderGrid/index.tsx +6 -3
  62. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +13 -2
  63. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +6 -8
  64. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/index.tsx +5 -6
  65. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ModelConfigModal/index.tsx +4 -3
  66. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ProviderSettingsContext.ts +2 -0
  67. package/src/app/[variants]/(main)/settings/provider/features/ModelList/index.tsx +6 -7
  68. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +1 -1
  69. package/src/config/aiModels/azureai.ts +18 -0
  70. package/src/config/aiModels/index.ts +6 -0
  71. package/src/config/aiModels/volcengine.ts +83 -0
  72. package/src/config/llm.ts +6 -0
  73. package/src/config/modelProviders/azure.ts +2 -1
  74. package/src/config/modelProviders/azureai.ts +19 -0
  75. package/src/config/modelProviders/index.ts +6 -0
  76. package/src/config/modelProviders/volcengine.ts +23 -0
  77. package/src/database/server/models/aiProvider.ts +2 -0
  78. package/src/libs/agent-runtime/AgentRuntime.ts +17 -8
  79. package/src/libs/agent-runtime/azureai/index.ts +109 -0
  80. package/src/libs/agent-runtime/baichuan/index.test.ts +8 -250
  81. package/src/libs/agent-runtime/cloudflare/index.ts +22 -18
  82. package/src/libs/agent-runtime/index.ts +2 -1
  83. package/src/libs/agent-runtime/types/type.ts +5 -0
  84. package/src/libs/agent-runtime/utils/streams/__snapshots__/protocol.test.ts.snap +331 -0
  85. package/src/libs/agent-runtime/utils/streams/protocol.test.ts +137 -0
  86. package/src/libs/agent-runtime/utils/streams/protocol.ts +34 -0
  87. package/src/libs/agent-runtime/{doubao → volcengine}/index.ts +3 -3
  88. package/src/locales/default/modelProvider.ts +25 -0
  89. package/src/server/modules/AgentRuntime/index.ts +8 -1
  90. package/src/services/chat.ts +16 -4
  91. package/src/types/aiProvider.ts +5 -0
  92. package/src/types/user/settings/keyVaults.ts +2 -0
@@ -1,11 +1,11 @@
1
1
  import { Modal } from '@lobehub/ui';
2
2
  import { Button, FormInstance } from 'antd';
3
- import { memo, useState } from 'react';
3
+ import { memo, use, useState } from 'react';
4
4
  import { useTranslation } from 'react-i18next';
5
5
 
6
- import { ModelProvider } from '@/libs/agent-runtime';
7
6
  import { useAiInfraStore } from '@/store/aiInfra';
8
7
 
8
+ import { ProviderSettingsContext } from '../ProviderSettingsContext';
9
9
  import ModelConfigForm from './Form';
10
10
 
11
11
  interface ModelConfigModalProps {
@@ -26,6 +26,8 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ open, setOpen }) => {
26
26
  setOpen(false);
27
27
  };
28
28
 
29
+ const { showDeployName } = use(ProviderSettingsContext);
30
+
29
31
  return (
30
32
  <Modal
31
33
  destroyOnClose
@@ -65,10 +67,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ open, setOpen }) => {
65
67
  title={t('providerModels.createNew.title')}
66
68
  zIndex={1251} // Select is 1150
67
69
  >
68
- <ModelConfigForm
69
- onFormInstanceReady={setFormInstance}
70
- showAzureDeployName={editingProvider === ModelProvider.Azure}
71
- />
70
+ <ModelConfigForm onFormInstanceReady={setFormInstance} showDeployName={showDeployName} />
72
71
  </Modal>
73
72
  );
74
73
  });
@@ -1,13 +1,13 @@
1
1
  import { Modal } from '@lobehub/ui';
2
2
  import { Button, FormInstance } from 'antd';
3
3
  import isEqual from 'fast-deep-equal';
4
- import { memo, useState } from 'react';
4
+ import { memo, use, useState } from 'react';
5
5
  import { useTranslation } from 'react-i18next';
6
6
 
7
- import { ModelProvider } from '@/libs/agent-runtime';
8
7
  import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
9
8
 
10
9
  import ModelConfigForm from '../CreateNewModelModal/Form';
10
+ import { ProviderSettingsContext } from '../ProviderSettingsContext';
11
11
 
12
12
  interface ModelConfigModalProps {
13
13
  id: string;
@@ -28,6 +28,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ id, open, setOpen }) =>
28
28
  const closeModal = () => {
29
29
  setOpen(false);
30
30
  };
31
+ const { showDeployName } = use(ProviderSettingsContext);
31
32
 
32
33
  return (
33
34
  <Modal
@@ -66,7 +67,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ id, open, setOpen }) =>
66
67
  idEditable={false}
67
68
  initialValues={model}
68
69
  onFormInstanceReady={setFormInstance}
69
- showAzureDeployName={editingProvider === ModelProvider.Azure || editingProvider === ModelProvider.Doubao}
70
+ showDeployName={showDeployName}
70
71
  type={model?.type}
71
72
  />
72
73
  </Modal>
@@ -2,7 +2,9 @@ import { createContext } from 'react';
2
2
 
3
3
  export interface ProviderSettingsContextValue {
4
4
  modelEditable?: boolean;
5
+ sdkType?: string;
5
6
  showAddNewModel?: boolean;
7
+ showDeployName?: boolean;
6
8
  showModelFetcher?: boolean;
7
9
  }
8
10
 
@@ -3,7 +3,6 @@
3
3
  import { Suspense, memo } from 'react';
4
4
  import { Flexbox } from 'react-layout-kit';
5
5
 
6
- import { ProviderSettingsContext } from '@/app/[variants]/(main)/settings/provider/features/ModelList/ProviderSettingsContext';
7
6
  import { useIsMobile } from '@/hooks/useIsMobile';
8
7
  import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
9
8
 
@@ -11,6 +10,7 @@ import DisabledModels from './DisabledModels';
11
10
  import EmptyModels from './EmptyModels';
12
11
  import EnabledModelList from './EnabledModelList';
13
12
  import ModelTitle from './ModelTitle';
13
+ import { ProviderSettingsContext, ProviderSettingsContextValue } from './ProviderSettingsContext';
14
14
  import SearchResult from './SearchResult';
15
15
  import SkeletonList from './SkeletonList';
16
16
 
@@ -41,19 +41,18 @@ const Content = memo<ContentProps>(({ id }) => {
41
41
  );
42
42
  });
43
43
 
44
- interface ModelListProps {
44
+ interface ModelListProps extends ProviderSettingsContextValue {
45
45
  id: string;
46
- modelEditable?: boolean;
47
- showAddNewModel?: boolean;
48
- showModelFetcher?: boolean;
49
46
  }
50
47
 
51
48
  const ModelList = memo<ModelListProps>(
52
- ({ id, showModelFetcher, showAddNewModel, modelEditable = true }) => {
49
+ ({ id, showModelFetcher, sdkType, showAddNewModel, showDeployName, modelEditable = true }) => {
53
50
  const mobile = useIsMobile();
54
51
 
55
52
  return (
56
- <ProviderSettingsContext value={{ modelEditable, showAddNewModel, showModelFetcher }}>
53
+ <ProviderSettingsContext
54
+ value={{ modelEditable, sdkType, showAddNewModel, showDeployName, showModelFetcher }}
55
+ >
57
56
  <Flexbox gap={16} paddingInline={mobile ? 12 : 0}>
58
57
  <ModelTitle
59
58
  provider={id}
@@ -127,7 +127,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
127
127
  defaultShowBrowserRequest,
128
128
  disableBrowserRequest,
129
129
  showChecker = true,
130
- } = settings;
130
+ } = settings || {};
131
131
  const { t } = useTranslation('modelProvider');
132
132
  const [form] = Form.useForm();
133
133
  const { cx, styles, theme } = useStyles();
@@ -0,0 +1,18 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const azureChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ reasoning: true,
7
+ },
8
+ contextWindowTokens: 128_000,
9
+ displayName: 'DeepSeek R1',
10
+ id: 'DeepSeek-R1',
11
+ maxOutput: 4096,
12
+ type: 'chat',
13
+ },
14
+ ];
15
+
16
+ export const allModels = [...azureChatModels];
17
+
18
+ export default allModels;
@@ -4,6 +4,7 @@ import { default as ai21 } from './ai21';
4
4
  import { default as ai360 } from './ai360';
5
5
  import { default as anthropic } from './anthropic';
6
6
  import { default as azure } from './azure';
7
+ import { default as azureai } from './azureai';
7
8
  import { default as baichuan } from './baichuan';
8
9
  import { default as bedrock } from './bedrock';
9
10
  import { default as cloudflare } from './cloudflare';
@@ -39,6 +40,7 @@ import { default as tencentcloud } from './tencentcloud';
39
40
  import { default as togetherai } from './togetherai';
40
41
  import { default as upstage } from './upstage';
41
42
  import { default as vllm } from './vllm';
43
+ import { default as volcengine } from './volcengine';
42
44
  import { default as wenxin } from './wenxin';
43
45
  import { default as xai } from './xai';
44
46
  import { default as zeroone } from './zeroone';
@@ -68,6 +70,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
68
70
  ai360,
69
71
  anthropic,
70
72
  azure,
73
+ azureai,
71
74
  baichuan,
72
75
  bedrock,
73
76
  cloudflare,
@@ -103,6 +106,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
103
106
  togetherai,
104
107
  upstage,
105
108
  vllm,
109
+ volcengine,
106
110
  wenxin,
107
111
  xai,
108
112
  zeroone,
@@ -113,6 +117,7 @@ export { default as ai21 } from './ai21';
113
117
  export { default as ai360 } from './ai360';
114
118
  export { default as anthropic } from './anthropic';
115
119
  export { default as azure } from './azure';
120
+ export { default as azureai } from './azureai';
116
121
  export { default as baichuan } from './baichuan';
117
122
  export { default as bedrock } from './bedrock';
118
123
  export { default as cloudflare } from './cloudflare';
@@ -148,6 +153,7 @@ export { default as tencentcloud } from './tencentcloud';
148
153
  export { default as togetherai } from './togetherai';
149
154
  export { default as upstage } from './upstage';
150
155
  export { default as vllm } from './vllm';
156
+ export { default as volcengine } from './volcengine';
151
157
  export { default as wenxin } from './wenxin';
152
158
  export { default as xai } from './xai';
153
159
  export { default as zeroone } from './zeroone';
@@ -0,0 +1,83 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const doubaoChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ reasoning: true,
7
+ },
8
+ contextWindowTokens: 65_536,
9
+ description:
10
+ '拥有极致的响应速度,更好的性价比,为客户不同场景提供更灵活的选择。支持 4k 上下文窗口的推理和精调。',
11
+ displayName: 'DeepSeek R1',
12
+ enabled: true,
13
+ id: 'deepseek-r1',
14
+ type: 'chat',
15
+ },
16
+ {
17
+ abilities: {
18
+ functionCall: true,
19
+ },
20
+ contextWindowTokens: 65_536,
21
+ description:
22
+ 'DeepSeek-V3 是一款由深度求索公司自研的MoE模型。DeepSeek-V3 多项评测成绩超越了 Qwen2.5-72B 和 Llama-3.1-405B 等其他开源模型,并在性能上和世界顶尖的闭源模型 GPT-4o 以及 Claude-3.5-Sonnet 不分伯仲。',
23
+ displayName: 'DeepSeek V3',
24
+ enabled: true,
25
+ id: 'deepseek-v3',
26
+ type: 'chat',
27
+ },
28
+ {
29
+ contextWindowTokens: 4096,
30
+ description:
31
+ '拥有极致的响应速度,更好的性价比,为客户不同场景提供更灵活的选择。支持 4k 上下文窗口的推理和精调。',
32
+ displayName: 'Doubao Lite 4k',
33
+ id: 'Doubao-lite-4k',
34
+ type: 'chat',
35
+ },
36
+ {
37
+ contextWindowTokens: 32_768,
38
+ description:
39
+ '拥有极致的响应速度,更好的性价比,为客户不同场景提供更灵活的选择。支持 32k 上下文窗口的推理和精调。',
40
+ displayName: 'Doubao Lite 32k',
41
+ id: 'Doubao-lite-32k',
42
+ type: 'chat',
43
+ },
44
+ {
45
+ contextWindowTokens: 128_000,
46
+ description:
47
+ '拥有极致的响应速度,更好的性价比,为客户不同场景提供更灵活的选择。支持 128k 上下文窗口的推理和精调。',
48
+ displayName: 'Doubao Lite 128k',
49
+ id: 'Doubao-lite-128k',
50
+ type: 'chat',
51
+ },
52
+ {
53
+ contextWindowTokens: 4096,
54
+ description:
55
+ '效果最好的主力模型,适合处理复杂任务,在参考问答、总结摘要、创作、文本分类、角色扮演等场景都有很好的效果。支持 4k 上下文窗口的推理和精调。',
56
+ displayName: 'Doubao Pro 4k',
57
+ id: 'Doubao-pro-4k',
58
+ type: 'chat',
59
+ },
60
+ {
61
+ config: {
62
+ deploymentName: 'Doubao-pro-test',
63
+ },
64
+ contextWindowTokens: 32_768,
65
+ description:
66
+ '效果最好的主力模型,适合处理复杂任务,在参考问答、总结摘要、创作、文本分类、角色扮演等场景都有很好的效果。支持 32k 上下文窗口的推理和精调。',
67
+ displayName: 'Doubao Pro 32k',
68
+ id: 'Doubao-pro-32k',
69
+ type: 'chat',
70
+ },
71
+ {
72
+ contextWindowTokens: 128_000,
73
+ description:
74
+ '效果最好的主力模型,适合处理复杂任务,在参考问答、总结摘要、创作、文本分类、角色扮演等场景都有很好的效果。支持 128k 上下文窗口的推理和精调。',
75
+ displayName: 'Doubao Pro 128k',
76
+ id: 'Doubao-pro-128k',
77
+ type: 'chat',
78
+ },
79
+ ];
80
+
81
+ export const allModels = [...doubaoChatModels];
82
+
83
+ export default allModels;
package/src/config/llm.ts CHANGED
@@ -132,6 +132,9 @@ export const getLLMConfig = () => {
132
132
  ENABLED_DOUBAO: z.boolean(),
133
133
  DOUBAO_API_KEY: z.string().optional(),
134
134
 
135
+ ENABLED_VOLCENGINE: z.boolean(),
136
+ VOLCENGINE_API_KEY: z.string().optional(),
137
+
135
138
  ENABLED_TENCENT_CLOUD: z.boolean(),
136
139
  TENCENT_CLOUD_API_KEY: z.string().optional(),
137
140
 
@@ -158,6 +161,9 @@ export const getLLMConfig = () => {
158
161
  ENABLED_GOOGLE: !!process.env.GOOGLE_API_KEY,
159
162
  GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
160
163
 
164
+ ENABLED_VOLCENGINE: !!process.env.VOLCENGINE_API_KEY,
165
+ VOLCENGINE_API_KEY: process.env.VOLCENGINE_API_KEY,
166
+
161
167
  ENABLED_PERPLEXITY: !!process.env.PERPLEXITY_API_KEY,
162
168
  PERPLEXITY_API_KEY: process.env.PERPLEXITY_API_KEY,
163
169
 
@@ -58,10 +58,11 @@ const Azure: ModelProviderCard = {
58
58
  'Azure 提供多种先进的AI模型,包括GPT-3.5和最新的GPT-4系列,支持多种数据类型和复杂任务,致力于安全、可靠和可持续的AI解决方案。',
59
59
  id: 'azure',
60
60
  modelsUrl: 'https://learn.microsoft.com/azure/ai-services/openai/concepts/models',
61
- name: 'Azure',
61
+ name: 'Azure OpenAI',
62
62
  settings: {
63
63
  defaultShowBrowserRequest: true,
64
64
  sdkType: 'azure',
65
+ showDeployName: true,
65
66
  },
66
67
  url: 'https://azure.microsoft.com',
67
68
  };
@@ -0,0 +1,19 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref: https://learn.microsoft.com/azure/ai-services/openai/concepts/models
4
+ const Azure: ModelProviderCard = {
5
+ chatModels: [],
6
+ description:
7
+ 'Azure 提供多种先进的AI模型,包括GPT-3.5和最新的GPT-4系列,支持多种数据类型和复杂任务,致力于安全、可靠和可持续的AI解决方案。',
8
+ id: 'azureai',
9
+ modelsUrl: 'https://ai.azure.com/explore/models',
10
+ name: 'Azure AI',
11
+ settings: {
12
+ defaultShowBrowserRequest: true,
13
+ sdkType: 'azureai',
14
+ showDeployName: true,
15
+ },
16
+ url: 'https://ai.azure.com',
17
+ };
18
+
19
+ export default Azure;
@@ -4,6 +4,7 @@ import Ai21Provider from './ai21';
4
4
  import Ai360Provider from './ai360';
5
5
  import AnthropicProvider from './anthropic';
6
6
  import AzureProvider from './azure';
7
+ import AzureAIProvider from './azureai';
7
8
  import BaichuanProvider from './baichuan';
8
9
  import BedrockProvider from './bedrock';
9
10
  import CloudflareProvider from './cloudflare';
@@ -39,6 +40,7 @@ import TencentcloudProvider from './tencentcloud';
39
40
  import TogetherAIProvider from './togetherai';
40
41
  import UpstageProvider from './upstage';
41
42
  import VLLMProvider from './vllm';
43
+ import VolcengineProvider from './volcengine';
42
44
  import WenxinProvider from './wenxin';
43
45
  import XAIProvider from './xai';
44
46
  import ZeroOneProvider from './zeroone';
@@ -92,6 +94,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
92
94
  export const DEFAULT_MODEL_PROVIDER_LIST = [
93
95
  OpenAIProvider,
94
96
  { ...AzureProvider, chatModels: [] },
97
+ AzureAIProvider,
95
98
  OllamaProvider,
96
99
  VLLMProvider,
97
100
  AnthropicProvider,
@@ -125,6 +128,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
125
128
  StepfunProvider,
126
129
  MoonshotProvider,
127
130
  BaichuanProvider,
131
+ VolcengineProvider,
128
132
  MinimaxProvider,
129
133
  LMStudioProvider,
130
134
  InternLMProvider,
@@ -148,6 +152,7 @@ export { default as Ai21ProviderCard } from './ai21';
148
152
  export { default as Ai360ProviderCard } from './ai360';
149
153
  export { default as AnthropicProviderCard } from './anthropic';
150
154
  export { default as AzureProviderCard } from './azure';
155
+ export { default as AzureAIProviderCard } from './azureai';
151
156
  export { default as BaichuanProviderCard } from './baichuan';
152
157
  export { default as BedrockProviderCard } from './bedrock';
153
158
  export { default as CloudflareProviderCard } from './cloudflare';
@@ -183,6 +188,7 @@ export { default as TencentCloudProviderCard } from './tencentcloud';
183
188
  export { default as TogetherAIProviderCard } from './togetherai';
184
189
  export { default as UpstageProviderCard } from './upstage';
185
190
  export { default as VLLMProviderCard } from './vllm';
191
+ export { default as VolcengineProviderCard } from './volcengine';
186
192
  export { default as WenxinProviderCard } from './wenxin';
187
193
  export { default as XAIProviderCard } from './xai';
188
194
  export { default as ZeroOneProviderCard } from './zeroone';
@@ -0,0 +1,23 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref https://www.volcengine.com/docs/82379/1330310
4
+ const Doubao: ModelProviderCard = {
5
+ chatModels: [],
6
+ description:
7
+ '字节跳动推出的大模型服务的开发平台,提供功能丰富、安全以及具备价格竞争力的模型调用服务,同时提供模型数据、精调、推理、评测等端到端功能,全方位保障您的 AI 应用开发落地。',
8
+ id: 'volcengine',
9
+ modelsUrl: 'https://www.volcengine.com/docs/82379/1330310',
10
+ name: '火山引擎',
11
+ settings: {
12
+ disableBrowserRequest: true, // CORS error
13
+ sdkType: 'openai',
14
+ showDeployName: true,
15
+ smoothing: {
16
+ speed: 2,
17
+ text: true,
18
+ },
19
+ },
20
+ url: 'https://www.volcengine.com/product/ark',
21
+ };
22
+
23
+ export default Doubao;
@@ -1,4 +1,5 @@
1
1
  import { and, asc, desc, eq } from 'drizzle-orm/expressions';
2
+ import { isEmpty } from 'lodash-es';
2
3
 
3
4
  import { LobeChatDatabase } from '@/database/type';
4
5
  import { ModelProvider } from '@/libs/agent-runtime';
@@ -207,6 +208,7 @@ export class AiProviderModel {
207
208
  ...result,
208
209
  fetchOnClient: typeof result.fetchOnClient === 'boolean' ? result.fetchOnClient : undefined,
209
210
  keyVaults,
211
+ settings: isEmpty(result.settings) ? undefined : result.settings,
210
212
  } as AiProviderDetailItem;
211
213
  };
212
214
 
@@ -7,11 +7,11 @@ import { LobeAi21AI } from './ai21';
7
7
  import { LobeAi360AI } from './ai360';
8
8
  import { LobeAnthropicAI } from './anthropic';
9
9
  import { LobeAzureOpenAI } from './azureOpenai';
10
+ import { LobeAzureAI } from './azureai';
10
11
  import { LobeBaichuanAI } from './baichuan';
11
12
  import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
12
13
  import { LobeCloudflareAI, LobeCloudflareParams } from './cloudflare';
13
14
  import { LobeDeepSeekAI } from './deepseek';
14
- import { LobeDoubaoAI } from './doubao';
15
15
  import { LobeFireworksAI } from './fireworksai';
16
16
  import { LobeGiteeAI } from './giteeai';
17
17
  import { LobeGithubAI } from './github';
@@ -51,6 +51,7 @@ import {
51
51
  } from './types';
52
52
  import { LobeUpstageAI } from './upstage';
53
53
  import { LobeVLLMAI } from './vllm';
54
+ import { LobeVolcengineAI } from './volcengine';
54
55
  import { LobeWenxinAI } from './wenxin';
55
56
  import { LobeXAI } from './xai';
56
57
  import { LobeZeroOneAI } from './zeroone';
@@ -141,6 +142,7 @@ class AgentRuntime {
141
142
  ai360: Partial<ClientOptions>;
142
143
  anthropic: Partial<ClientOptions>;
143
144
  azure: { apiKey?: string; apiVersion?: string; baseURL?: string };
145
+ azureai: { apiKey?: string; apiVersion?: string; baseURL?: string };
144
146
  baichuan: Partial<ClientOptions>;
145
147
  bedrock: Partial<LobeBedrockAIParams>;
146
148
  cloudflare: Partial<LobeCloudflareParams>;
@@ -176,6 +178,7 @@ class AgentRuntime {
176
178
  togetherai: Partial<ClientOptions>;
177
179
  upstage: Partial<ClientOptions>;
178
180
  vllm: Partial<ClientOptions>;
181
+ volcengine: Partial<ClientOptions>;
179
182
  wenxin: Partial<ClientOptions>;
180
183
  xai: Partial<ClientOptions>;
181
184
  zeroone: Partial<ClientOptions>;
@@ -201,6 +204,11 @@ class AgentRuntime {
201
204
  break;
202
205
  }
203
206
 
207
+ case ModelProvider.AzureAI: {
208
+ runtimeModel = new LobeAzureAI(params.azureai);
209
+ break;
210
+ }
211
+
204
212
  case ModelProvider.ZhiPu: {
205
213
  runtimeModel = new LobeZhipuAI(params.zhipu);
206
214
  break;
@@ -307,7 +315,7 @@ class AgentRuntime {
307
315
  }
308
316
 
309
317
  case ModelProvider.Novita: {
310
- runtimeModel = new LobeNovitaAI(params.novita ?? {});
318
+ runtimeModel = new LobeNovitaAI(params.novita);
311
319
  break;
312
320
  }
313
321
 
@@ -317,7 +325,7 @@ class AgentRuntime {
317
325
  }
318
326
 
319
327
  case ModelProvider.Baichuan: {
320
- runtimeModel = new LobeBaichuanAI(params.baichuan ?? {});
328
+ runtimeModel = new LobeBaichuanAI(params.baichuan);
321
329
  break;
322
330
  }
323
331
 
@@ -327,12 +335,12 @@ class AgentRuntime {
327
335
  }
328
336
 
329
337
  case ModelProvider.Ai360: {
330
- runtimeModel = new LobeAi360AI(params.ai360 ?? {});
338
+ runtimeModel = new LobeAi360AI(params.ai360);
331
339
  break;
332
340
  }
333
341
 
334
342
  case ModelProvider.SiliconCloud: {
335
- runtimeModel = new LobeSiliconCloudAI(params.siliconcloud ?? {});
343
+ runtimeModel = new LobeSiliconCloudAI(params.siliconcloud);
336
344
  break;
337
345
  }
338
346
 
@@ -372,12 +380,12 @@ class AgentRuntime {
372
380
  }
373
381
 
374
382
  case ModelProvider.Jina: {
375
- runtimeModel = new LobeJinaAI(params.jina ?? {});
383
+ runtimeModel = new LobeJinaAI(params.jina);
376
384
  break;
377
385
  }
378
386
 
379
387
  case ModelProvider.Cloudflare: {
380
- runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
388
+ runtimeModel = new LobeCloudflareAI(params.cloudflare);
381
389
  break;
382
390
  }
383
391
 
@@ -396,8 +404,9 @@ class AgentRuntime {
396
404
  break;
397
405
  }
398
406
 
407
+ case ModelProvider.Volcengine:
399
408
  case ModelProvider.Doubao: {
400
- runtimeModel = new LobeDoubaoAI(params.doubao);
409
+ runtimeModel = new LobeVolcengineAI(params.volcengine || params.doubao);
401
410
  break;
402
411
  }
403
412
 
@@ -0,0 +1,109 @@
1
+ import createClient, { ModelClient } from '@azure-rest/ai-inference';
2
+ import { AzureKeyCredential } from '@azure/core-auth';
3
+ import OpenAI from 'openai';
4
+
5
+ import { LobeRuntimeAI } from '../BaseAI';
6
+ import { AgentRuntimeErrorType } from '../error';
7
+ import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
8
+ import { AgentRuntimeError } from '../utils/createError';
9
+ import { debugStream } from '../utils/debugStream';
10
+ import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
11
+ import { StreamingResponse } from '../utils/response';
12
+ import { OpenAIStream, createSSEDataExtractor } from '../utils/streams';
13
+
14
+ export class LobeAzureAI implements LobeRuntimeAI {
15
+ client: ModelClient;
16
+
17
+ constructor(params?: { apiKey?: string; apiVersion?: string; baseURL?: string }) {
18
+ if (!params?.apiKey || !params?.baseURL)
19
+ throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
20
+
21
+ this.client = createClient(params?.baseURL, new AzureKeyCredential(params?.apiKey));
22
+
23
+ this.baseURL = params?.baseURL;
24
+ }
25
+
26
+ baseURL: string;
27
+
28
+ async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
29
+ const { messages, model, ...params } = payload;
30
+ // o1 series models on Azure OpenAI does not support streaming currently
31
+ const enableStreaming = model.includes('o1') ? false : (params.stream ?? true);
32
+ try {
33
+ const response = this.client.path('/chat/completions').post({
34
+ body: {
35
+ messages: messages as OpenAI.ChatCompletionMessageParam[],
36
+ model,
37
+ ...params,
38
+ stream: enableStreaming,
39
+ tool_choice: params.tools ? 'auto' : undefined,
40
+ },
41
+ });
42
+
43
+ if (enableStreaming) {
44
+ const stream = await response.asBrowserStream();
45
+
46
+ const [prod, debug] = stream.body!.tee();
47
+
48
+ if (process.env.DEBUG_AZURE_AI_CHAT_COMPLETION === '1') {
49
+ debugStream(debug).catch(console.error);
50
+ }
51
+
52
+ return StreamingResponse(
53
+ OpenAIStream(prod.pipeThrough(createSSEDataExtractor()), {
54
+ callbacks: options?.callback,
55
+ }),
56
+ {
57
+ headers: options?.headers,
58
+ },
59
+ );
60
+ } else {
61
+ const res = await response;
62
+
63
+ // the azure AI inference response is openai compatible
64
+ const stream = transformResponseToStream(res.body as OpenAI.ChatCompletion);
65
+ return StreamingResponse(OpenAIStream(stream, { callbacks: options?.callback }), {
66
+ headers: options?.headers,
67
+ });
68
+ }
69
+ } catch (e) {
70
+ let error = e as { [key: string]: any; code: string; message: string };
71
+
72
+ if (error.code) {
73
+ switch (error.code) {
74
+ case 'DeploymentNotFound': {
75
+ error = { ...error, deployId: model };
76
+ }
77
+ }
78
+ } else {
79
+ error = {
80
+ cause: error.cause,
81
+ message: error.message,
82
+ name: error.name,
83
+ } as any;
84
+ }
85
+
86
+ const errorType = error.code
87
+ ? AgentRuntimeErrorType.ProviderBizError
88
+ : AgentRuntimeErrorType.AgentRuntimeError;
89
+
90
+ throw AgentRuntimeError.chat({
91
+ endpoint: this.maskSensitiveUrl(this.baseURL),
92
+ error,
93
+ errorType,
94
+ provider: ModelProvider.Azure,
95
+ });
96
+ }
97
+ }
98
+
99
+ private maskSensitiveUrl = (url: string) => {
100
+ // 使用正则表达式匹配 'https://' 后面和 '.azure.com/' 前面的内容
101
+ const regex = /^(https:\/\/)([^.]+)(\.azure\.com\/.*)$/;
102
+
103
+ // 使用替换函数
104
+ return url.replace(regex, (match, protocol, subdomain, rest) => {
105
+ // 将子域名替换为 '***'
106
+ return `${protocol}***${rest}`;
107
+ });
108
+ };
109
+ }