@lobehub/lobehub 2.0.0-next.101 → 2.0.0-next.102

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/package.json +1 -1
  4. package/packages/model-bank/package.json +1 -0
  5. package/packages/model-bank/src/aiModels/aihubmix.ts +27 -0
  6. package/packages/model-bank/src/aiModels/google.ts +69 -10
  7. package/packages/model-bank/src/aiModels/index.ts +3 -0
  8. package/packages/model-bank/src/aiModels/infiniai.ts +5 -22
  9. package/packages/model-bank/src/aiModels/ollamacloud.ts +12 -0
  10. package/packages/model-bank/src/aiModels/siliconcloud.ts +0 -61
  11. package/packages/model-bank/src/aiModels/vertexai.ts +88 -1
  12. package/packages/model-bank/src/aiModels/zenmux.ts +1423 -0
  13. package/packages/model-bank/src/const/modelProvider.ts +1 -0
  14. package/packages/model-bank/src/standard-parameters/index.ts +9 -0
  15. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.test.ts +2 -2
  16. package/packages/model-runtime/src/core/streams/google/index.ts +7 -2
  17. package/packages/model-runtime/src/core/streams/openai/__snapshots__/responsesStream.test.ts.snap +166 -166
  18. package/packages/model-runtime/src/index.ts +1 -1
  19. package/packages/model-runtime/src/providers/google/createImage.ts +1 -0
  20. package/packages/model-runtime/src/providers/google/index.ts +11 -1
  21. package/packages/model-runtime/src/providers/zenmux/index.test.ts +320 -0
  22. package/packages/model-runtime/src/providers/zenmux/index.ts +84 -0
  23. package/packages/model-runtime/src/runtimeMap.ts +2 -0
  24. package/packages/types/src/user/settings/keyVaults.ts +1 -0
  25. package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/components/ResolutionSelect.tsx +88 -0
  26. package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/index.tsx +9 -0
  27. package/src/config/modelProviders/index.ts +3 -0
  28. package/src/config/modelProviders/zenmux.ts +21 -0
  29. package/src/envs/llm.ts +6 -0
  30. package/src/locales/default/image.ts +8 -0
  31. package/src/store/chat/slices/aiChat/actions/__tests__/conversationLifecycle.test.ts +3 -0
  32. package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +11 -0
@@ -31,6 +31,7 @@ export { LobeQwenAI } from './providers/qwen';
31
31
  export { LobeStepfunAI } from './providers/stepfun';
32
32
  export { LobeTogetherAI } from './providers/togetherai';
33
33
  export { LobeVolcengineAI } from './providers/volcengine';
34
+ export { LobeZenMuxAI } from './providers/zenmux';
34
35
  export { LobeZeroOneAI } from './providers/zeroone';
35
36
  export { LobeZhipuAI } from './providers/zhipu';
36
37
  export * from './types';
@@ -40,4 +41,3 @@ export { AgentRuntimeError } from './utils/createError';
40
41
  export { getModelPropertyWithFallback } from './utils/getFallbackModelProperty';
41
42
  export { getModelPricing } from './utils/getModelPricing';
42
43
  export { parseDataUri } from './utils/uriParser';
43
-
@@ -147,6 +147,7 @@ async function generateImageByChatModel(
147
147
  ? {
148
148
  imageConfig: {
149
149
  aspectRatio: params.aspectRatio,
150
+ imageSize: params.resolution,
150
151
  },
151
152
  }
152
153
  : {}),
@@ -38,6 +38,8 @@ const modelsWithModalities = new Set([
38
38
  'gemini-2.0-flash-preview-image-generation',
39
39
  'gemini-2.5-flash-image-preview',
40
40
  'gemini-2.5-flash-image',
41
+ 'gemini-3-pro-image-preview',
42
+ 'nano-banana-pro-preview',
41
43
  ]);
42
44
 
43
45
  const modelsDisableInstuction = new Set([
@@ -51,6 +53,11 @@ const modelsDisableInstuction = new Set([
51
53
  'gemma-3-12b-it',
52
54
  'gemma-3-27b-it',
53
55
  'gemma-3n-e4b-it',
56
+ // ZenMux
57
+ 'google/gemini-2.5-flash-image-free',
58
+ 'google/gemini-2.5-flash-image',
59
+ 'google/gemini-3-pro-image-preview-free',
60
+ 'google/gemini-3-pro-image-preview',
54
61
  ]);
55
62
 
56
63
  const PRO_THINKING_MIN = 128;
@@ -203,7 +210,10 @@ export class LobeGoogleAI implements LobeRuntimeAI {
203
210
  includeThoughts:
204
211
  (!!thinkingBudget ||
205
212
  !!thinkingLevel ||
206
- (model && (model.includes('-3-pro-image') || model.includes('thinking')))) &&
213
+ (model &&
214
+ (model.includes('-3-pro-image') ||
215
+ model.includes('nano-banana-pro') ||
216
+ model.includes('thinking')))) &&
207
217
  resolvedThinkingBudget !== 0
208
218
  ? true
209
219
  : undefined,
@@ -0,0 +1,320 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from 'model-bank';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import * as modelParseModule from '../../utils/modelParse';
6
+ import { LobeZenMuxAI, params } from './index';
7
+
8
+ // Mock external dependencies
9
+ vi.mock('../../utils/modelParse');
10
+
11
+ // Mock console methods
12
+ vi.spyOn(console, 'error').mockImplementation(() => {});
13
+ vi.spyOn(console, 'debug').mockImplementation(() => {});
14
+
15
+ describe('ZenMux Runtime', () => {
16
+ let mockFetch: Mock;
17
+ let mockProcessMultiProviderModelList: Mock;
18
+ let mockDetectModelProvider: Mock;
19
+
20
+ beforeEach(() => {
21
+ // Setup fetch mock
22
+ mockFetch = vi.fn();
23
+ global.fetch = mockFetch;
24
+
25
+ // Setup utility function mocks
26
+ mockProcessMultiProviderModelList = vi.mocked(modelParseModule.processMultiProviderModelList);
27
+ mockDetectModelProvider = vi.mocked(modelParseModule.detectModelProvider);
28
+
29
+ // Clear environment variables
30
+ delete process.env.DEBUG_ZENMUX_CHAT_COMPLETION;
31
+ });
32
+
33
+ afterEach(() => {
34
+ vi.clearAllMocks();
35
+ delete process.env.DEBUG_ZENMUX_CHAT_COMPLETION;
36
+ });
37
+
38
+ describe('Debug Configuration', () => {
39
+ it('should return false when DEBUG_ZENMUX_CHAT_COMPLETION is not set', () => {
40
+ delete process.env.DEBUG_ZENMUX_CHAT_COMPLETION;
41
+ const debugResult = process.env.DEBUG_ZENMUX_CHAT_COMPLETION === '1';
42
+ expect(debugResult).toBe(false);
43
+ });
44
+
45
+ it('should return true when DEBUG_ZENMUX_CHAT_COMPLETION is set to 1', () => {
46
+ process.env.DEBUG_ZENMUX_CHAT_COMPLETION = '1';
47
+ const debugResult = process.env.DEBUG_ZENMUX_CHAT_COMPLETION === '1';
48
+ expect(debugResult).toBe(true);
49
+ });
50
+ });
51
+
52
+ describe('LobeZenMuxAI - custom features', () => {
53
+ describe('Params Export', () => {
54
+ it('should export params object', () => {
55
+ expect(params).toBeDefined();
56
+ expect(params.id).toBe('zenmux');
57
+ });
58
+
59
+ it('should have routers configuration', () => {
60
+ expect(params.routers).toBeDefined();
61
+ expect(typeof params.routers).toBe('function');
62
+ });
63
+
64
+ it('should have models function', () => {
65
+ expect(params.models).toBeDefined();
66
+ expect(typeof params.models).toBe('function');
67
+ });
68
+
69
+ it('should have correct provider ID', () => {
70
+ expect(params.id).toBe(ModelProvider.ZenMux);
71
+ });
72
+
73
+ it('should have chatCompletion handlePayload function', () => {
74
+ expect(params.chatCompletion).toBeDefined();
75
+ expect(params.chatCompletion?.handlePayload).toBeDefined();
76
+ expect(typeof params.chatCompletion?.handlePayload).toBe('function');
77
+ });
78
+ });
79
+
80
+ describe('ChatCompletion HandlePayload', () => {
81
+ it('should map reasoning_effort to reasoning.effort', () => {
82
+ const payload = {
83
+ model: 'gpt-4o',
84
+ messages: [],
85
+ reasoning_effort: 'high' as const,
86
+ } as any;
87
+
88
+ const result = params.chatCompletion?.handlePayload?.(payload);
89
+
90
+ expect(result).toBeDefined();
91
+ expect(result?.reasoning).toBeDefined();
92
+ expect(result?.reasoning?.effort).toBe('high');
93
+ expect(result?.reasoning_effort).toBeUndefined();
94
+ });
95
+
96
+ it('should map thinking.budget_tokens to reasoning.max_tokens', () => {
97
+ const payload = {
98
+ model: 'gpt-4o',
99
+ messages: [],
100
+ thinking: { budget_tokens: 2048, type: 'enabled' as const },
101
+ } as any;
102
+
103
+ const result = params.chatCompletion?.handlePayload?.(payload);
104
+
105
+ expect(result).toBeDefined();
106
+ expect(result?.reasoning).toBeDefined();
107
+ expect(result?.reasoning?.max_tokens).toBe(2048);
108
+ expect(result?.thinking).toBeUndefined();
109
+ });
110
+
111
+ it('should map thinking.type=enabled to reasoning.enabled=true', () => {
112
+ const payload = {
113
+ model: 'gpt-4o',
114
+ messages: [],
115
+ thinking: { budget_tokens: 1024, type: 'enabled' as const },
116
+ } as any;
117
+
118
+ const result = params.chatCompletion?.handlePayload?.(payload);
119
+
120
+ expect(result).toBeDefined();
121
+ expect(result?.reasoning).toBeDefined();
122
+ expect(result?.reasoning?.enabled).toBe(true);
123
+ expect(result?.thinking).toBeUndefined();
124
+ });
125
+
126
+ it('should not set reasoning.enabled when thinking.type=disabled', () => {
127
+ const payload = {
128
+ model: 'gpt-4o',
129
+ messages: [],
130
+ thinking: { budget_tokens: 1024, type: 'disabled' as const },
131
+ } as any;
132
+
133
+ const result = params.chatCompletion?.handlePayload?.(payload);
134
+
135
+ expect(result).toBeDefined();
136
+ // When thinking.type is 'disabled', max_tokens should be mapped and enabled should be false
137
+ expect(result?.reasoning?.max_tokens).toBe(1024);
138
+ expect(result?.reasoning?.enabled).toBe(false);
139
+ });
140
+
141
+ it('should map both reasoning_effort and thinking.budget_tokens together', () => {
142
+ const payload = {
143
+ model: 'gpt-4o',
144
+ messages: [],
145
+ reasoning_effort: 'high' as const,
146
+ thinking: { budget_tokens: 2048, type: 'enabled' as const },
147
+ } as any;
148
+
149
+ const result = params.chatCompletion?.handlePayload?.(payload);
150
+
151
+ expect(result).toBeDefined();
152
+ expect(result?.reasoning).toBeDefined();
153
+ expect(result?.reasoning?.effort).toBe('high');
154
+ expect(result?.reasoning?.max_tokens).toBe(2048);
155
+ });
156
+
157
+ it('should preserve existing reasoning properties', () => {
158
+ const payload = {
159
+ model: 'gpt-4o',
160
+ messages: [],
161
+ reasoning: { summary: 'auto' },
162
+ reasoning_effort: 'medium' as const,
163
+ } as any;
164
+
165
+ const result = params.chatCompletion?.handlePayload?.(payload);
166
+
167
+ expect(result).toBeDefined();
168
+ expect(result?.reasoning).toBeDefined();
169
+ expect(result?.reasoning?.summary).toBe('auto');
170
+ expect(result?.reasoning?.effort).toBe('medium');
171
+ });
172
+
173
+ it('should not include reasoning when no reasoning-related properties', () => {
174
+ const payload = {
175
+ model: 'gpt-4o',
176
+ messages: [],
177
+ temperature: 0.7,
178
+ } as any;
179
+
180
+ const result = params.chatCompletion?.handlePayload?.(payload);
181
+
182
+ expect(result).toBeDefined();
183
+ expect(result?.reasoning).toBeUndefined();
184
+ });
185
+
186
+ it('should preserve other payload properties', () => {
187
+ const payload = {
188
+ model: 'gpt-4o',
189
+ messages: [],
190
+ temperature: 0.7,
191
+ top_p: 0.9,
192
+ max_tokens: 1024,
193
+ reasoning_effort: 'high' as const,
194
+ } as any;
195
+
196
+ const result = params.chatCompletion?.handlePayload?.(payload);
197
+
198
+ expect(result).toBeDefined();
199
+ expect(result?.temperature).toBe(0.7);
200
+ expect(result?.top_p).toBe(0.9);
201
+ expect(result?.max_tokens).toBe(1024);
202
+ expect(result?.reasoning?.effort).toBe('high');
203
+ });
204
+ });
205
+
206
+ describe('Routers Configuration', () => {
207
+ it('should configure routers with correct endpoints', () => {
208
+ const mockOptions = { baseURL: 'https://zenmux.ai/api/v1' };
209
+ const routers = params.routers(mockOptions);
210
+
211
+ expect(routers).toBeDefined();
212
+ expect(Array.isArray(routers)).toBe(true);
213
+
214
+ // Check anthropic router
215
+ const anthropicRouter = routers.find((r) => r.apiType === 'anthropic');
216
+ expect(anthropicRouter).toBeDefined();
217
+ expect(anthropicRouter?.options.baseURL).toContain('/api/anthropic');
218
+
219
+ // Check google router
220
+ const googleRouter = routers.find((r) => r.apiType === 'google');
221
+ expect(googleRouter).toBeDefined();
222
+ expect(googleRouter?.options.baseURL).toContain('/api/vertex-ai');
223
+
224
+ // Check openai router (default)
225
+ const openaiRouter = routers.find((r) => r.apiType === 'openai');
226
+ expect(openaiRouter).toBeDefined();
227
+ expect(openaiRouter?.options.baseURL).toContain('/api/v1');
228
+ });
229
+
230
+ it('should strip version paths from baseURL', () => {
231
+ const mockOptions = { baseURL: 'https://zenmux.ai/v1' };
232
+ const routers = params.routers(mockOptions);
233
+
234
+ const anthropicRouter = routers.find((r) => r.apiType === 'anthropic');
235
+ expect(anthropicRouter?.options.baseURL).toBe('https://zenmux.ai/api/anthropic');
236
+ });
237
+
238
+ it('should use default baseURL when not provided', () => {
239
+ const mockOptions = {}; // No baseURL provided
240
+ const routers = params.routers(mockOptions);
241
+
242
+ const anthropicRouter = routers.find((r) => r.apiType === 'anthropic');
243
+ expect(anthropicRouter?.options.baseURL).toBe('https://zenmux.ai/api/anthropic');
244
+
245
+ const googleRouter = routers.find((r) => r.apiType === 'google');
246
+ expect(googleRouter?.options.baseURL).toBe('https://zenmux.ai/api/vertex-ai');
247
+
248
+ const openaiRouter = routers.find((r) => r.apiType === 'openai');
249
+ expect(openaiRouter?.options.baseURL).toBe('https://zenmux.ai/api/v1');
250
+ });
251
+ });
252
+ describe('Models Function', () => {
253
+ it('should fetch and process models correctly', async () => {
254
+ const mockClient = {
255
+ apiKey: 'test-key',
256
+ baseURL: 'https://zenmux.ai/api/v1',
257
+ models: {
258
+ list: vi.fn().mockResolvedValue({
259
+ data: [
260
+ {
261
+ id: 'openai/gpt-4o-mini',
262
+ object: 'model',
263
+ created: 1755177025,
264
+ owned_by: 'openai',
265
+ },
266
+ {
267
+ id: 'anthropic/claude-3-5-sonnet-20241022',
268
+ object: 'model',
269
+ created: 1755177025,
270
+ owned_by: 'anthropic',
271
+ },
272
+ ],
273
+ }),
274
+ },
275
+ } as any;
276
+
277
+ // Mock processMultiProviderModelList to return processed models
278
+ mockProcessMultiProviderModelList.mockResolvedValue([
279
+ {
280
+ id: 'openai/gpt-4o-mini',
281
+ displayName: 'GPT-4o Mini',
282
+ providerId: 'openai',
283
+ },
284
+ {
285
+ id: 'anthropic/claude-3-5-sonnet-20241022',
286
+ displayName: 'Claude 3.5 Sonnet',
287
+ providerId: 'anthropic',
288
+ },
289
+ ] as any);
290
+
291
+ const models = await params.models({ client: mockClient });
292
+
293
+ expect(models).toBeDefined();
294
+ expect(Array.isArray(models)).toBe(true);
295
+ expect(models.length).toBeGreaterThan(0);
296
+ });
297
+
298
+ it('should handle empty model list', async () => {
299
+ const mockClient = {
300
+ apiKey: 'test-key',
301
+ baseURL: 'https://zenmux.ai/api/v1',
302
+ models: {
303
+ list: vi.fn().mockResolvedValue({
304
+ data: [],
305
+ }),
306
+ },
307
+ } as any;
308
+
309
+ // Mock processMultiProviderModelList
310
+ mockProcessMultiProviderModelList.mockResolvedValue([]);
311
+
312
+ const models = await params.models({ client: mockClient });
313
+
314
+ expect(models).toBeDefined();
315
+ expect(Array.isArray(models)).toBe(true);
316
+ expect(models.length).toBe(0);
317
+ });
318
+ });
319
+ });
320
+ });
@@ -0,0 +1,84 @@
1
+ import { LOBE_DEFAULT_MODEL_LIST, ModelProvider } from 'model-bank';
2
+ import urlJoin from 'url-join';
3
+
4
+ import { createRouterRuntime } from '../../core/RouterRuntime';
5
+ import { CreateRouterRuntimeOptions } from '../../core/RouterRuntime/createRuntime';
6
+ import { detectModelProvider, processMultiProviderModelList } from '../../utils/modelParse';
7
+
8
+ export interface ZenMuxModelCard {
9
+ created: number;
10
+ id: string;
11
+ object: string;
12
+ owned_by: string;
13
+ }
14
+
15
+ const DEFAULT_BASE_URL = 'https://zenmux.ai';
16
+
17
+ export const params = {
18
+ chatCompletion: {
19
+ handlePayload: (payload) => {
20
+ const { reasoning_effort, thinking, reasoning, ...rest } = payload;
21
+
22
+ const finalReasoning = {
23
+ ...reasoning,
24
+ ...(reasoning_effort && { effort: reasoning_effort }),
25
+ ...(thinking?.budget_tokens && { max_tokens: thinking.budget_tokens }),
26
+ ...(thinking?.type === 'enabled' && { enabled: true }),
27
+ ...(thinking?.type === 'disabled' && { enabled: false }),
28
+ };
29
+
30
+ const hasReasoning = Object.keys(finalReasoning).length > 0;
31
+
32
+ return {
33
+ ...rest,
34
+ ...(hasReasoning && { reasoning: finalReasoning }),
35
+ } as any;
36
+ },
37
+ },
38
+ debug: {
39
+ chatCompletion: () => process.env.DEBUG_ZENMUX_CHAT_COMPLETION === '1',
40
+ },
41
+ id: ModelProvider.ZenMux,
42
+ models: async ({ client: openAIClient }) => {
43
+ const modelsPage = (await openAIClient.models.list()) as any;
44
+ const modelList: ZenMuxModelCard[] = modelsPage.data || [];
45
+
46
+ return processMultiProviderModelList(modelList, 'zenmux');
47
+ },
48
+ routers: (options) => {
49
+ const baseURL = options.baseURL || DEFAULT_BASE_URL;
50
+ const userBaseURL = baseURL.replace(/\/v\d+[a-z]*\/?$/, '').replace(/\/api\/?$/, '');
51
+
52
+ return [
53
+ {
54
+ apiType: 'anthropic',
55
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
56
+ (id) => detectModelProvider(id) === 'anthropic',
57
+ ),
58
+ options: {
59
+ ...options,
60
+ baseURL: urlJoin(userBaseURL, '/api/anthropic'),
61
+ },
62
+ },
63
+ {
64
+ apiType: 'google',
65
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
66
+ (id) => detectModelProvider(id) === 'google',
67
+ ),
68
+ options: {
69
+ ...options,
70
+ baseURL: urlJoin(userBaseURL, '/api/vertex-ai'),
71
+ },
72
+ },
73
+ {
74
+ apiType: 'openai',
75
+ options: {
76
+ ...options,
77
+ baseURL: urlJoin(userBaseURL, '/api/v1'),
78
+ },
79
+ },
80
+ ];
81
+ },
82
+ } satisfies CreateRouterRuntimeOptions;
83
+
84
+ export const LobeZenMuxAI = createRouterRuntime(params);
@@ -61,6 +61,7 @@ import { LobeVolcengineAI } from './providers/volcengine';
61
61
  import { LobeWenxinAI } from './providers/wenxin';
62
62
  import { LobeXAI } from './providers/xai';
63
63
  import { LobeXinferenceAI } from './providers/xinference';
64
+ import { LobeZenMuxAI } from './providers/zenmux';
64
65
  import { LobeZeroOneAI } from './providers/zeroone';
65
66
  import { LobeZhipuAI } from './providers/zhipu';
66
67
 
@@ -128,6 +129,7 @@ export const providerRuntimeMap = {
128
129
  wenxin: LobeWenxinAI,
129
130
  xai: LobeXAI,
130
131
  xinference: LobeXinferenceAI,
132
+ zenmux: LobeZenMuxAI,
131
133
  zeroone: LobeZeroOneAI,
132
134
  zhipu: LobeZhipuAI,
133
135
  };
@@ -117,6 +117,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
117
117
  wenxin?: OpenAICompatibleKeyVault;
118
118
  xai?: OpenAICompatibleKeyVault;
119
119
  xinference?: OpenAICompatibleKeyVault;
120
+ zenmux?: OpenAICompatibleKeyVault;
120
121
  zeroone?: OpenAICompatibleKeyVault;
121
122
  zhipu?: OpenAICompatibleKeyVault;
122
123
  }
@@ -0,0 +1,88 @@
1
+ import { createStyles } from 'antd-style';
2
+ import { memo, useCallback } from 'react';
3
+ import { useTranslation } from 'react-i18next';
4
+ import { Flexbox } from 'react-layout-kit';
5
+
6
+ import { useGenerationConfigParam } from '@/store/image/slices/generationConfig/hooks';
7
+
8
+ const useStyles = createStyles(({ css, token }) => ({
9
+ button: css`
10
+ cursor: pointer;
11
+
12
+ display: flex;
13
+ align-items: center;
14
+ justify-content: center;
15
+
16
+ min-width: 60px;
17
+ height: 32px;
18
+ padding-block: 0;
19
+ padding-inline: 16px;
20
+ border: 1px solid ${token.colorBorder};
21
+ border-radius: ${token.borderRadius}px;
22
+
23
+ font-size: 14px;
24
+ font-weight: 500;
25
+ color: ${token.colorText};
26
+
27
+ background: ${token.colorBgContainer};
28
+
29
+ transition: all 0.2s ease;
30
+
31
+ &:hover {
32
+ border-color: ${token.colorPrimary};
33
+ background: ${token.colorBgTextHover};
34
+ }
35
+ `,
36
+
37
+ container: css`
38
+ display: flex;
39
+ gap: 8px;
40
+ align-items: center;
41
+ `,
42
+
43
+ selectedButton: css`
44
+ border-color: ${token.colorPrimary};
45
+ color: ${token.colorPrimary};
46
+ background: ${token.colorPrimaryBg};
47
+
48
+ &:hover {
49
+ border-color: ${token.colorPrimary};
50
+ color: ${token.colorPrimary};
51
+ background: ${token.colorPrimaryBgHover};
52
+ }
53
+ `,
54
+ }));
55
+
56
+ const ResolutionSelect = memo(() => {
57
+ const { t } = useTranslation('image');
58
+ const { value, setValue, enumValues } = useGenerationConfigParam('resolution');
59
+ const { styles, cx } = useStyles();
60
+
61
+ const handleClick = useCallback(
62
+ (resolution: string) => {
63
+ setValue(resolution);
64
+ },
65
+ [setValue],
66
+ );
67
+
68
+ if (!enumValues || enumValues.length === 0) {
69
+ return null;
70
+ }
71
+
72
+ return (
73
+ <Flexbox className={styles.container} horizontal>
74
+ {enumValues.map((resolution) => (
75
+ <button
76
+ className={cx(styles.button, value === resolution && styles.selectedButton)}
77
+ key={resolution}
78
+ onClick={() => handleClick(resolution)}
79
+ type="button"
80
+ >
81
+ {t(`config.resolution.options.${resolution}`, { defaultValue: resolution })}
82
+ </button>
83
+ ))}
84
+ </Flexbox>
85
+ );
86
+ });
87
+
88
+ export default ResolutionSelect;
@@ -19,6 +19,7 @@ import ImageUrl from './components/ImageUrl';
19
19
  import ImageUrlsUpload from './components/ImageUrlsUpload';
20
20
  import ModelSelect from './components/ModelSelect';
21
21
  import QualitySelect from './components/QualitySelect';
22
+ import ResolutionSelect from './components/ResolutionSelect';
22
23
  import SeedNumberInput from './components/SeedNumberInput';
23
24
  import SizeSelect from './components/SizeSelect';
24
25
  import StepsSliderInput from './components/StepsSliderInput';
@@ -54,6 +55,7 @@ const ConfigPanel = memo(() => {
54
55
  const isSupportImageUrl = useImageStore(isSupportedParamSelector('imageUrl'));
55
56
  const isSupportSize = useImageStore(isSupportedParamSelector('size'));
56
57
  const isSupportQuality = useImageStore(isSupportedParamSelector('quality'));
58
+ const isSupportResolution = useImageStore(isSupportedParamSelector('resolution'));
57
59
  const isSupportSeed = useImageStore(isSupportedParamSelector('seed'));
58
60
  const isSupportSteps = useImageStore(isSupportedParamSelector('steps'));
59
61
  const isSupportCfg = useImageStore(isSupportedParamSelector('cfg'));
@@ -78,6 +80,7 @@ const ConfigPanel = memo(() => {
78
80
  isSupportImageUrl,
79
81
  isSupportSize,
80
82
  isSupportQuality,
83
+ isSupportResolution,
81
84
  isSupportSeed,
82
85
  isSupportSteps,
83
86
  isSupportCfg,
@@ -168,6 +171,12 @@ const ConfigPanel = memo(() => {
168
171
  </ConfigItemLayout>
169
172
  )}
170
173
 
174
+ {isSupportResolution && (
175
+ <ConfigItemLayout label={t('config.resolution.label')}>
176
+ <ResolutionSelect />
177
+ </ConfigItemLayout>
178
+ )}
179
+
171
180
  {showDimensionControl && <DimensionControlGroup />}
172
181
 
173
182
  {isSupportSteps && (
@@ -64,6 +64,7 @@ import VolcengineProvider from './volcengine';
64
64
  import WenxinProvider from './wenxin';
65
65
  import XAIProvider from './xai';
66
66
  import XinferenceProvider from './xinference';
67
+ import ZenMuxProvider from './zenmux';
67
68
  import ZeroOneProvider from './zeroone';
68
69
  import ZhiPuProvider from './zhipu';
69
70
 
@@ -190,6 +191,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
190
191
  CometAPIProvider,
191
192
  VercelAIGatewayProvider,
192
193
  CerebrasProvider,
194
+ ZenMuxProvider,
193
195
  ];
194
196
 
195
197
  export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -266,5 +268,6 @@ export { default as VolcengineProviderCard } from './volcengine';
266
268
  export { default as WenxinProviderCard } from './wenxin';
267
269
  export { default as XAIProviderCard } from './xai';
268
270
  export { default as XinferenceProviderCard } from './xinference';
271
+ export { default as ZenMuxProviderCard } from './zenmux';
269
272
  export { default as ZeroOneProviderCard } from './zeroone';
270
273
  export { default as ZhiPuProviderCard } from './zhipu';
@@ -0,0 +1,21 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ const ZenMux: ModelProviderCard = {
4
+ chatModels: [],
5
+ checkModel: 'openai/gpt-5-nano',
6
+ description:
7
+ 'ZenMux 是一个统一的 AI 服务聚合平台,支持 OpenAI、Anthropic、Google VertexAI 等多种主流 AI 服务接口。提供灵活的路由能力,让您可以轻松切换和管理不同的 AI 模型。',
8
+ id: 'zenmux',
9
+ name: 'ZenMux',
10
+ settings: {
11
+ disableBrowserRequest: true, // CORS error
12
+ proxyUrl: {
13
+ placeholder: 'https://zenmux.ai',
14
+ },
15
+ sdkType: 'router',
16
+ showModelFetcher: true,
17
+ },
18
+ url: 'https://zenmux.ai',
19
+ };
20
+
21
+ export default ZenMux;
package/src/envs/llm.ts CHANGED
@@ -209,6 +209,9 @@ export const getLLMConfig = () => {
209
209
 
210
210
  ENABLED_CEREBRAS: z.boolean(),
211
211
  CEREBRAS_API_KEY: z.string().optional(),
212
+
213
+ ENABLED_ZENMUX: z.boolean(),
214
+ ZENMUX_API_KEY: z.string().optional(),
212
215
  },
213
216
  runtimeEnv: {
214
217
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -416,6 +419,9 @@ export const getLLMConfig = () => {
416
419
 
417
420
  ENABLED_CEREBRAS: !!process.env.CEREBRAS_API_KEY,
418
421
  CEREBRAS_API_KEY: process.env.CEREBRAS_API_KEY,
422
+
423
+ ENABLED_ZENMUX: !!process.env.ZENMUX_API_KEY,
424
+ ZENMUX_API_KEY: process.env.ZENMUX_API_KEY,
419
425
  },
420
426
  });
421
427
  };
@@ -37,6 +37,14 @@ export default {
37
37
  standard: '标准',
38
38
  },
39
39
  },
40
+ resolution: {
41
+ label: '分辨率',
42
+ options: {
43
+ '1K': '1K',
44
+ '2K': '2K',
45
+ '4K': '4K',
46
+ },
47
+ },
40
48
  seed: {
41
49
  label: '种子',
42
50
  random: '随机种子',