@lobehub/lobehub 2.0.0-next.1 → 2.0.0-next.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/CHANGELOG.md +61 -0
  2. package/changelog/v1.json +18 -0
  3. package/package.json +1 -1
  4. package/packages/const/src/models.ts +13 -0
  5. package/packages/model-bank/src/aiModels/azure.ts +155 -0
  6. package/packages/model-bank/src/aiModels/bedrock.ts +44 -0
  7. package/packages/model-runtime/src/core/openaiCompatibleFactory/createImage.ts +1 -1
  8. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.test.ts +33 -3
  9. package/packages/model-runtime/src/core/parameterResolver.ts +3 -0
  10. package/packages/model-runtime/src/providers/azureOpenai/index.ts +2 -1
  11. package/src/app/(backend)/oidc/consent/route.ts +0 -1
  12. package/src/app/[variants]/(main)/settings/_layout/SettingsContent.tsx +0 -3
  13. package/src/features/AgentSetting/AgentPlugin/index.tsx +20 -12
  14. package/src/app/[variants]/(main)/settings/llm/ProviderList/Azure/index.tsx +0 -93
  15. package/src/app/[variants]/(main)/settings/llm/ProviderList/Bedrock/index.tsx +0 -70
  16. package/src/app/[variants]/(main)/settings/llm/ProviderList/Cloudflare/index.tsx +0 -39
  17. package/src/app/[variants]/(main)/settings/llm/ProviderList/Github/index.tsx +0 -52
  18. package/src/app/[variants]/(main)/settings/llm/ProviderList/HuggingFace/index.tsx +0 -52
  19. package/src/app/[variants]/(main)/settings/llm/ProviderList/Ollama/index.tsx +0 -20
  20. package/src/app/[variants]/(main)/settings/llm/ProviderList/OpenAI/index.tsx +0 -17
  21. package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +0 -132
  22. package/src/app/[variants]/(main)/settings/llm/components/Checker.tsx +0 -118
  23. package/src/app/[variants]/(main)/settings/llm/components/ProviderConfig/index.tsx +0 -303
  24. package/src/app/[variants]/(main)/settings/llm/components/ProviderModelList/CustomModelOption.tsx +0 -98
  25. package/src/app/[variants]/(main)/settings/llm/components/ProviderModelList/ModelConfigModal/Form.tsx +0 -104
  26. package/src/app/[variants]/(main)/settings/llm/components/ProviderModelList/ModelConfigModal/index.tsx +0 -77
  27. package/src/app/[variants]/(main)/settings/llm/components/ProviderModelList/ModelFetcher.tsx +0 -105
  28. package/src/app/[variants]/(main)/settings/llm/components/ProviderModelList/Option.tsx +0 -68
  29. package/src/app/[variants]/(main)/settings/llm/components/ProviderModelList/index.tsx +0 -146
  30. package/src/app/[variants]/(main)/settings/llm/const.ts +0 -20
  31. package/src/app/[variants]/(main)/settings/llm/features/Footer.tsx +0 -35
  32. package/src/app/[variants]/(main)/settings/llm/index.tsx +0 -30
  33. package/src/app/[variants]/(main)/settings/llm/type.ts +0 -5
package/CHANGELOG.md CHANGED
@@ -2,6 +2,67 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.3](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.2...v2.0.0-next.3)
6
+
7
+ <sup>Released on **2025-10-30**</sup>
8
+
9
+ #### ♻ Code Refactoring
10
+
11
+ - **misc**: Remove llm page.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Add new bedrock model support, add pricing info for Azure GPT-5 series models.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### Code refactoring
23
+
24
+ - **misc**: Remove llm page, closes [#9940](https://github.com/lobehub/lobe-chat/issues/9940) ([6ec01a3](https://github.com/lobehub/lobe-chat/commit/6ec01a3))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Add new bedrock model support, closes [#9826](https://github.com/lobehub/lobe-chat/issues/9826) ([1b8a981](https://github.com/lobehub/lobe-chat/commit/1b8a981))
29
+ - **misc**: Add pricing info for Azure GPT-5 series models, closes [#9833](https://github.com/lobehub/lobe-chat/issues/9833) ([39a80c5](https://github.com/lobehub/lobe-chat/commit/39a80c5))
30
+
31
+ </details>
32
+
33
+ <div align="right">
34
+
35
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
36
+
37
+ </div>
38
+
39
+ ## [Version 2.0.0-next.2](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.1...v2.0.0-next.2)
40
+
41
+ <sup>Released on **2025-10-30**</sup>
42
+
43
+ #### 🐛 Bug Fixes
44
+
45
+ - **misc**: Hide marketplace link from Plugin List when market disabled, OIDC error when connecting to self-host instance, only include input_fidelity parameter for gpt-image-1..
46
+
47
+ <br/>
48
+
49
+ <details>
50
+ <summary><kbd>Improvements and Fixes</kbd></summary>
51
+
52
+ #### What's fixed
53
+
54
+ - **misc**: Hide marketplace link from Plugin List when market disabled, closes [#9929](https://github.com/lobehub/lobe-chat/issues/9929) ([e303979](https://github.com/lobehub/lobe-chat/commit/e303979))
55
+ - **misc**: OIDC error when connecting to self-host instance, closes [#9916](https://github.com/lobehub/lobe-chat/issues/9916) ([7a2ca19](https://github.com/lobehub/lobe-chat/commit/7a2ca19))
56
+ - **misc**: Only include input_fidelity parameter for gpt-image-1., closes [#9920](https://github.com/lobehub/lobe-chat/issues/9920) ([65dbc63](https://github.com/lobehub/lobe-chat/commit/65dbc63))
57
+
58
+ </details>
59
+
60
+ <div align="right">
61
+
62
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
63
+
64
+ </div>
65
+
5
66
  ## [Version 2.0.0-next.1](https://github.com/lobehub/lobe-chat/compare/v1.143.0-next.2...v2.0.0-next.1)
6
67
 
7
68
  <sup>Released on **2025-10-30**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Add new bedrock model support, add pricing info for Azure GPT-5 series models."
6
+ ]
7
+ },
8
+ "date": "2025-10-30",
9
+ "version": "2.0.0-next.3"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Hide marketplace link from Plugin List when market disabled, OIDC error when connecting to self-host instance, only include input_fidelity parameter for gpt-image-1.."
15
+ ]
16
+ },
17
+ "date": "2025-10-30",
18
+ "version": "2.0.0-next.2"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "features": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.1",
3
+ "version": "2.0.0-next.3",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -35,6 +35,9 @@ export const responsesAPIModels = new Set([
35
35
  'codex-mini-latest',
36
36
  'computer-use-preview',
37
37
  'computer-use-preview-2025-03-11',
38
+ 'gpt-5-codex',
39
+ 'gpt-5-pro',
40
+ 'gpt-5-pro-2025-10-06',
38
41
  ]);
39
42
 
40
43
  /**
@@ -56,6 +59,11 @@ export const contextCachingModels = new Set([
56
59
  'claude-3-5-sonnet-20240620',
57
60
  'claude-3-5-haiku-latest',
58
61
  'claude-3-5-haiku-20241022',
62
+ // Bedrock model IDs
63
+ 'us.anthropic.claude-sonnet-4-5-20250929-v1:0',
64
+ 'anthropic.claude-sonnet-4-5-20250929-v1:0',
65
+ 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
66
+ 'anthropic.claude-haiku-4-5-20251001-v1:0',
59
67
  ]);
60
68
 
61
69
  export const thinkingWithToolClaudeModels = new Set([
@@ -69,4 +77,9 @@ export const thinkingWithToolClaudeModels = new Set([
69
77
  'anthropic/claude-sonnet-4.5',
70
78
  'claude-3-7-sonnet-latest',
71
79
  'claude-3-7-sonnet-20250219',
80
+ // Bedrock model IDs
81
+ 'us.anthropic.claude-sonnet-4-5-20250929-v1:0',
82
+ 'anthropic.claude-sonnet-4-5-20250929-v1:0',
83
+ 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
84
+ 'anthropic.claude-haiku-4-5-20251001-v1:0',
72
85
  ]);
@@ -1,6 +1,161 @@
1
1
  import { AIChatModelCard, AIImageModelCard } from '../types/aiModel';
2
2
 
3
3
  const azureChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ reasoning: true,
8
+ structuredOutput: true,
9
+ vision: true,
10
+ },
11
+ config: {
12
+ deploymentName: 'gpt-5-pro',
13
+ },
14
+ contextWindowTokens: 400_000,
15
+ description:
16
+ 'GPT-5 Pro 是 GPT-5 系列的高级版本,具备增强的推理能力。支持结构化输出、函数调用和文本/图像处理,适用于复杂的专业任务。',
17
+ displayName: 'GPT-5 Pro',
18
+ enabled: true,
19
+ id: 'gpt-5-pro',
20
+ maxOutput: 128_000,
21
+ pricing: {
22
+ units: [
23
+ { name: 'textInput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
24
+ { name: 'textOutput', rate: 120, strategy: 'fixed', unit: 'millionTokens' },
25
+ ],
26
+ },
27
+ releasedAt: '2025-10-06',
28
+ type: 'chat',
29
+ },
30
+ {
31
+ abilities: {
32
+ functionCall: true,
33
+ structuredOutput: true,
34
+ },
35
+ config: {
36
+ deploymentName: 'gpt-5-codex',
37
+ },
38
+ contextWindowTokens: 400_000,
39
+ description:
40
+ 'GPT-5 Codex 专为编程任务优化,针对 Codex CLI 和 VS Code 扩展进行了优化。支持结构化输出和函数调用,适用于代码生成和分析。',
41
+ displayName: 'GPT-5 Codex',
42
+ enabled: true,
43
+ id: 'gpt-5-codex',
44
+ maxOutput: 128_000,
45
+ pricing: {
46
+ units: [
47
+ { name: 'textInput', rate: 1.25, strategy: 'fixed', unit: 'millionTokens' },
48
+ { name: 'textOutput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
49
+ { name: 'textInput_cacheRead', rate: 0.125, strategy: 'fixed', unit: 'millionTokens' },
50
+ ],
51
+ },
52
+ releasedAt: '2025-09-11',
53
+ type: 'chat',
54
+ },
55
+ {
56
+ abilities: {
57
+ functionCall: true,
58
+ reasoning: true,
59
+ structuredOutput: true,
60
+ vision: true,
61
+ },
62
+ config: {
63
+ deploymentName: 'gpt-5',
64
+ },
65
+ contextWindowTokens: 400_000,
66
+ description:
67
+ 'GPT-5 是 OpenAI 最新的旗舰模型,具备卓越的推理能力。支持文本和图像输入,结构化输出和并行工具调用,适用于需要深度理解和分析的复杂任务。',
68
+ displayName: 'GPT-5',
69
+ enabled: true,
70
+ id: 'gpt-5',
71
+ maxOutput: 128_000,
72
+ pricing: {
73
+ units: [
74
+ { name: 'textInput', rate: 1.25, strategy: 'fixed', unit: 'millionTokens' },
75
+ { name: 'textOutput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
76
+ { name: 'textInput_cacheRead', rate: 0.125, strategy: 'fixed', unit: 'millionTokens' },
77
+ ],
78
+ },
79
+ releasedAt: '2025-08-07',
80
+ type: 'chat',
81
+ },
82
+ {
83
+ abilities: {
84
+ functionCall: true,
85
+ reasoning: true,
86
+ structuredOutput: true,
87
+ vision: true,
88
+ },
89
+ config: {
90
+ deploymentName: 'gpt-5-mini',
91
+ },
92
+ contextWindowTokens: 400_000,
93
+ description:
94
+ 'GPT-5 Mini 提供与 GPT-5 相似的能力,但更加高效和经济。支持推理、函数调用和视觉功能,适合大规模部署和对成本敏感的应用场景。',
95
+ displayName: 'GPT-5 Mini',
96
+ enabled: true,
97
+ id: 'gpt-5-mini',
98
+ maxOutput: 128_000,
99
+ pricing: {
100
+ units: [
101
+ { name: 'textInput', rate: 0.25, strategy: 'fixed', unit: 'millionTokens' },
102
+ { name: 'textOutput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
103
+ { name: 'textInput_cacheRead', rate: 0.025, strategy: 'fixed', unit: 'millionTokens' },
104
+ ],
105
+ },
106
+ releasedAt: '2025-08-07',
107
+ type: 'chat',
108
+ },
109
+ {
110
+ abilities: {
111
+ functionCall: true,
112
+ reasoning: true,
113
+ structuredOutput: true,
114
+ vision: true,
115
+ },
116
+ config: {
117
+ deploymentName: 'gpt-5-nano',
118
+ },
119
+ contextWindowTokens: 400_000,
120
+ description:
121
+ 'GPT-5 Nano 是 GPT-5 系列中最小、最快的版本。在保持核心能力的同时,提供超低延迟和成本效益,适合边缘计算和实时应用。',
122
+ displayName: 'GPT-5 Nano',
123
+ enabled: true,
124
+ id: 'gpt-5-nano',
125
+ maxOutput: 128_000,
126
+ pricing: {
127
+ units: [
128
+ { name: 'textInput', rate: 0.05, strategy: 'fixed', unit: 'millionTokens' },
129
+ { name: 'textOutput', rate: 0.4, strategy: 'fixed', unit: 'millionTokens' },
130
+ { name: 'textInput_cacheRead', rate: 0.005, strategy: 'fixed', unit: 'millionTokens' },
131
+ ],
132
+ },
133
+ releasedAt: '2025-08-07',
134
+ type: 'chat',
135
+ },
136
+ {
137
+ abilities: {
138
+ vision: true,
139
+ },
140
+ config: {
141
+ deploymentName: 'gpt-5-chat',
142
+ },
143
+ contextWindowTokens: 128_000,
144
+ description:
145
+ 'GPT-5 Chat 专为对话场景优化的预览版本。支持文本和图像输入,仅输出文本,适用于聊天机器人和对话式AI应用。',
146
+ displayName: 'GPT-5 Chat',
147
+ id: 'gpt-5-chat',
148
+ maxOutput: 16_384,
149
+ pricing: {
150
+ units: [
151
+ { name: 'textInput', rate: 1.25, strategy: 'fixed', unit: 'millionTokens' },
152
+ { name: 'textOutput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
153
+ { name: 'textInput_cacheRead', rate: 0.125, strategy: 'fixed', unit: 'millionTokens' },
154
+ ],
155
+ },
156
+ releasedAt: '2025-08-07',
157
+ type: 'chat',
158
+ },
4
159
  {
5
160
  abilities: {
6
161
  functionCall: true,
@@ -1,6 +1,50 @@
1
1
  import { AIChatModelCard } from '../types/aiModel';
2
2
 
3
3
  const bedrockChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ reasoning: true,
8
+ structuredOutput: true,
9
+ vision: true,
10
+ },
11
+ contextWindowTokens: 200_000,
12
+ description: 'Claude Sonnet 4.5 是 Anthropic 迄今为止最智能的模型。',
13
+ displayName: 'Claude Sonnet 4.5',
14
+ enabled: true,
15
+ id: 'us.anthropic.claude-sonnet-4-5-20250929-v1:0',
16
+ maxOutput: 64_000,
17
+ pricing: {
18
+ units: [
19
+ { name: 'textInput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
20
+ { name: 'textOutput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
21
+ ],
22
+ },
23
+ releasedAt: '2025-09-29',
24
+ type: 'chat',
25
+ },
26
+ {
27
+ abilities: {
28
+ functionCall: true,
29
+ reasoning: true,
30
+ structuredOutput: true,
31
+ vision: true,
32
+ },
33
+ contextWindowTokens: 200_000,
34
+ description: 'Claude Haiku 4.5 是 Anthropic 最快且最智能的 Haiku 模型,具有闪电般的速度和扩展思考能力。',
35
+ displayName: 'Claude Haiku 4.5',
36
+ enabled: true,
37
+ id: 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
38
+ maxOutput: 64_000,
39
+ pricing: {
40
+ units: [
41
+ { name: 'textInput', rate: 1, strategy: 'fixed', unit: 'millionTokens' },
42
+ { name: 'textOutput', rate: 5, strategy: 'fixed', unit: 'millionTokens' },
43
+ ],
44
+ },
45
+ releasedAt: '2025-10-15',
46
+ type: 'chat',
47
+ },
4
48
  /*
5
49
  // TODO: Not support for now
6
50
  {
@@ -67,7 +67,7 @@ async function generateByImageMode(
67
67
  const defaultInput = {
68
68
  n: 1,
69
69
  ...(model.includes('dall-e') ? { response_format: 'b64_json' } : {}),
70
- ...(isImageEdit ? { input_fidelity: 'high' } : {}),
70
+ ...(isImageEdit && model === 'gpt-image-1' ? { input_fidelity: 'high' } : {}),
71
71
  };
72
72
 
73
73
  const options = cleanObject({
@@ -1254,7 +1254,6 @@ describe('LobeOpenAICompatibleFactory', () => {
1254
1254
  );
1255
1255
  expect(instance['client'].images.edit).toHaveBeenCalledWith({
1256
1256
  image: expect.any(File),
1257
- input_fidelity: 'high',
1258
1257
  mask: 'https://example.com/mask.jpg',
1259
1258
  model: 'dall-e-2',
1260
1259
  n: 1,
@@ -1301,7 +1300,6 @@ describe('LobeOpenAICompatibleFactory', () => {
1301
1300
 
1302
1301
  expect(instance['client'].images.edit).toHaveBeenCalledWith({
1303
1302
  image: [mockFile1, mockFile2],
1304
- input_fidelity: 'high',
1305
1303
  model: 'dall-e-2',
1306
1304
  n: 1,
1307
1305
  prompt: 'Merge these images',
@@ -1330,6 +1328,39 @@ describe('LobeOpenAICompatibleFactory', () => {
1330
1328
  'Failed to convert image URLs to File objects: Error: Failed to download image',
1331
1329
  );
1332
1330
  });
1331
+
1332
+ it('should include input_fidelity parameter for gpt-image-1 model', async () => {
1333
+ const mockResponse = {
1334
+ data: [{ b64_json: 'gpt-image-edited-base64' }],
1335
+ };
1336
+
1337
+ const mockFile = new File(['content'], 'test-image.jpg', { type: 'image/jpeg' });
1338
+
1339
+ vi.mocked(openaiHelpers.convertImageUrlToFile).mockResolvedValue(mockFile);
1340
+ vi.spyOn(instance['client'].images, 'edit').mockResolvedValue(mockResponse as any);
1341
+
1342
+ const payload = {
1343
+ model: 'gpt-image-1',
1344
+ params: {
1345
+ imageUrl: 'https://example.com/image.jpg',
1346
+ prompt: 'Edit this image with gpt-image-1',
1347
+ },
1348
+ };
1349
+
1350
+ const result = await (instance as any).createImage(payload);
1351
+
1352
+ expect(instance['client'].images.edit).toHaveBeenCalledWith({
1353
+ image: expect.any(File),
1354
+ input_fidelity: 'high',
1355
+ model: 'gpt-image-1',
1356
+ n: 1,
1357
+ prompt: 'Edit this image with gpt-image-1',
1358
+ });
1359
+
1360
+ expect(result).toEqual({
1361
+ imageUrl: 'data:image/png;base64,gpt-image-edited-base64',
1362
+ });
1363
+ });
1333
1364
  });
1334
1365
 
1335
1366
  describe('error handling', () => {
@@ -1431,7 +1462,6 @@ describe('LobeOpenAICompatibleFactory', () => {
1431
1462
  expect(instance['client'].images.edit).toHaveBeenCalledWith({
1432
1463
  customParam: 'should remain unchanged',
1433
1464
  image: expect.any(File),
1434
- input_fidelity: 'high',
1435
1465
  model: 'dall-e-2',
1436
1466
  n: 1,
1437
1467
  prompt: 'Test prompt',
@@ -263,6 +263,7 @@ export const MODEL_PARAMETER_CONFLICTS = {
263
263
  'claude-opus-4-20250514',
264
264
  'claude-sonnet-4-20250514',
265
265
  'claude-sonnet-4-5-20250929',
266
+ 'claude-haiku-4-5-20251001',
266
267
  // Bedrock model IDs
267
268
  'anthropic.claude-opus-4-1-20250805-v1:0',
268
269
  'us.anthropic.claude-opus-4-1-20250805-v1:0',
@@ -272,5 +273,7 @@ export const MODEL_PARAMETER_CONFLICTS = {
272
273
  'us.anthropic.claude-sonnet-4-20250514-v1:0',
273
274
  'anthropic.claude-sonnet-4-5-20250929-v1:0',
274
275
  'us.anthropic.claude-sonnet-4-5-20250929-v1:0',
276
+ 'anthropic.claude-haiku-4-5-20251001-v1:0',
277
+ 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
275
278
  ]),
276
279
  };
@@ -51,7 +51,8 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
51
51
  ...message,
52
52
  role:
53
53
  // Convert 'system' role to 'user' or 'developer' based on the model
54
- (model.includes('o1') || model.includes('o3')) && message.role === 'system'
54
+ (model.includes('o1') || model.includes('o3') || model.includes('gpt-5')) &&
55
+ message.role === 'system'
55
56
  ? [...systemToUserModels].some((sub) => model.includes(sub))
56
57
  ? 'user'
57
58
  : 'developer'
@@ -122,7 +122,6 @@ export async function POST(request: NextRequest) {
122
122
  }
123
123
 
124
124
  return NextResponse.redirect(finalRedirectUrl, {
125
- headers: request.headers,
126
125
  status: 303,
127
126
  });
128
127
  } catch (error) {
@@ -15,9 +15,6 @@ const componentMap = {
15
15
  [SettingsTabs.Agent]: dynamic(() => import('../agent'), {
16
16
  loading: () => <Loading />,
17
17
  }),
18
- [SettingsTabs.LLM]: dynamic(() => import('../llm'), {
19
- loading: () => <Loading />,
20
- }),
21
18
  [SettingsTabs.Provider]: dynamic(() => import('../provider'), {
22
19
  loading: () => <Loading />,
23
20
  }),
@@ -34,7 +34,7 @@ const AgentPlugin = memo(() => {
34
34
  s.toggleAgentPlugin,
35
35
  ]);
36
36
 
37
- const { showDalle } = useServerConfigStore(featureFlagsSelectors);
37
+ const { showDalle, showMarket } = useServerConfigStore(featureFlagsSelectors);
38
38
  const installedPlugins = useToolStore(toolSelectors.metaList(showDalle), isEqual);
39
39
 
40
40
  const { isLoading } = useFetchInstalledPlugins();
@@ -112,16 +112,18 @@ const AgentPlugin = memo(() => {
112
112
  />
113
113
  </Tooltip>
114
114
  ) : null}
115
- <Tooltip title={t('plugin.store')}>
116
- <Button
117
- icon={Store}
118
- onClick={(e) => {
119
- e.stopPropagation();
120
- setShowStore(true);
121
- }}
122
- size={'small'}
123
- />
124
- </Tooltip>
115
+ {showMarket ? (
116
+ <Tooltip title={t('plugin.store')}>
117
+ <Button
118
+ icon={Store}
119
+ onClick={(e) => {
120
+ e.stopPropagation();
121
+ setShowStore(true);
122
+ }}
123
+ size={'small'}
124
+ />
125
+ </Tooltip>
126
+ ) : null}
125
127
  </Space.Compact>
126
128
  );
127
129
 
@@ -149,7 +151,13 @@ const AgentPlugin = memo(() => {
149
151
  );
150
152
 
151
153
  const plugin: FormGroupItemType = {
152
- children: isLoading ? loadingSkeleton : isEmpty ? empty : [...deprecatedList, ...list],
154
+ children: isLoading
155
+ ? loadingSkeleton
156
+ : isEmpty
157
+ ? showMarket
158
+ ? empty
159
+ : []
160
+ : [...deprecatedList, ...list],
153
161
  extra,
154
162
  title: t('settingPlugin.title'),
155
163
  };
@@ -1,93 +0,0 @@
1
- 'use client';
2
-
3
- import { AutoComplete, Input, InputPassword, Markdown } from '@lobehub/ui';
4
- import { createStyles } from 'antd-style';
5
- import { ModelProvider } from 'model-bank';
6
- import { useTranslation } from 'react-i18next';
7
-
8
- import { AzureProviderCard } from '@/config/modelProviders';
9
- import { useUserStore } from '@/store/user';
10
- import { modelProviderSelectors } from '@/store/user/selectors';
11
-
12
- import { KeyVaultsConfigKey, LLMProviderApiTokenKey } from '../../const';
13
- import { ProviderItem } from '../../type';
14
-
15
- const useStyles = createStyles(({ css, token }) => ({
16
- markdown: css`
17
- p {
18
- color: ${token.colorTextDescription} !important;
19
- }
20
- `,
21
- tip: css`
22
- font-size: 12px;
23
- color: ${token.colorTextDescription};
24
- `,
25
- }));
26
-
27
- const providerKey = ModelProvider.Azure;
28
-
29
- export const useAzureProvider = (): ProviderItem => {
30
- const { t } = useTranslation('modelProvider');
31
- const { styles } = useStyles();
32
-
33
- // Get the first model card's deployment name as the check model
34
- const checkModel = useUserStore((s) => {
35
- const chatModelCards = modelProviderSelectors.getModelCardsById(providerKey)(s);
36
-
37
- if (chatModelCards.length > 0) {
38
- return chatModelCards[0].deploymentName;
39
- }
40
-
41
- return 'gpt-35-turbo';
42
- });
43
- return {
44
- ...AzureProviderCard,
45
- apiKeyItems: [
46
- {
47
- children: (
48
- <InputPassword autoComplete={'new-password'} placeholder={t('azure.token.placeholder')} />
49
- ),
50
- desc: t('azure.token.desc'),
51
- label: t('azure.token.title'),
52
- name: [KeyVaultsConfigKey, providerKey, LLMProviderApiTokenKey],
53
- },
54
- {
55
- children: <Input allowClear placeholder={t('azure.endpoint.placeholder')} />,
56
- desc: t('azure.endpoint.desc'),
57
- label: t('azure.endpoint.title'),
58
- name: [KeyVaultsConfigKey, providerKey, 'endpoint'],
59
- },
60
- {
61
- children: (
62
- <AutoComplete
63
- options={[
64
- '2024-06-01',
65
- '2024-02-01',
66
- '2024-05-01-preview',
67
- '2024-04-01-preview',
68
- '2024-03-01-preview',
69
- '2024-02-15-preview',
70
- '2023-10-01-preview',
71
- '2023-06-01-preview',
72
- '2023-05-15',
73
- ].map((i) => ({ label: i, value: i }))}
74
- placeholder={'20XX-XX-XX'}
75
- />
76
- ),
77
- desc: (
78
- <Markdown className={styles.markdown} fontSize={12} variant={'chat'}>
79
- {t('azure.azureApiVersion.desc')}
80
- </Markdown>
81
- ),
82
- label: t('azure.azureApiVersion.title'),
83
- name: [KeyVaultsConfigKey, providerKey, 'apiVersion'],
84
- },
85
- ],
86
- checkModel,
87
- modelList: {
88
- azureDeployName: true,
89
- notFoundContent: t('azure.empty'),
90
- placeholder: t('azure.modelListPlaceholder'),
91
- },
92
- };
93
- };
@@ -1,70 +0,0 @@
1
- 'use client';
2
-
3
- import { InputPassword, Select } from '@lobehub/ui';
4
- import { useTranslation } from 'react-i18next';
5
-
6
- import { BedrockProviderCard } from '@/config/modelProviders';
7
- import { GlobalLLMProviderKey } from '@/types/user/settings';
8
-
9
- import { KeyVaultsConfigKey } from '../../const';
10
- import { ProviderItem } from '../../type';
11
-
12
- const providerKey: GlobalLLMProviderKey = 'bedrock';
13
-
14
- export const useBedrockProvider = (): ProviderItem => {
15
- const { t } = useTranslation('modelProvider');
16
-
17
- return {
18
- ...BedrockProviderCard,
19
- apiKeyItems: [
20
- {
21
- children: (
22
- <InputPassword
23
- autoComplete={'new-password'}
24
- placeholder={t(`${providerKey}.accessKeyId.placeholder`)}
25
- />
26
- ),
27
- desc: t(`${providerKey}.accessKeyId.desc`),
28
- label: t(`${providerKey}.accessKeyId.title`),
29
- name: [KeyVaultsConfigKey, providerKey, 'accessKeyId'],
30
- },
31
- {
32
- children: (
33
- <InputPassword
34
- autoComplete={'new-password'}
35
- placeholder={t(`${providerKey}.secretAccessKey.placeholder`)}
36
- />
37
- ),
38
- desc: t(`${providerKey}.secretAccessKey.desc`),
39
- label: t(`${providerKey}.secretAccessKey.title`),
40
- name: [KeyVaultsConfigKey, providerKey, 'secretAccessKey'],
41
- },
42
- {
43
- children: (
44
- <InputPassword
45
- autoComplete={'new-password'}
46
- placeholder={t(`${providerKey}.sessionToken.placeholder`)}
47
- />
48
- ),
49
- desc: t(`${providerKey}.sessionToken.desc`),
50
- label: t(`${providerKey}.sessionToken.title`),
51
- name: [KeyVaultsConfigKey, providerKey, 'sessionToken'],
52
- },
53
- {
54
- children: (
55
- <Select
56
- allowClear
57
- options={['us-east-1', 'us-west-2', 'ap-southeast-1', 'eu-central-1'].map((i) => ({
58
- label: i,
59
- value: i,
60
- }))}
61
- placeholder={'us-east-1'}
62
- />
63
- ),
64
- desc: t(`${providerKey}.region.desc`),
65
- label: t(`${providerKey}.region.title`),
66
- name: [KeyVaultsConfigKey, providerKey, 'region'],
67
- },
68
- ],
69
- };
70
- };