@lobehub/lobehub 2.0.0-next.203 → 2.0.0-next.204

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.204](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.203...v2.0.0-next.204)
6
+
7
+ <sup>Released on **2026-01-04**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Add new provider Xiaomi MiMo.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Add new provider Xiaomi MiMo, closes [#10834](https://github.com/lobehub/lobe-chat/issues/10834) ([62f7858](https://github.com/lobehub/lobe-chat/commit/62f7858))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ## [Version 2.0.0-next.203](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.202...v2.0.0-next.203)
6
31
 
7
32
  <sup>Released on **2026-01-04**</sup>
@@ -39,7 +39,6 @@ export default class SystemController extends ControllerModule {
39
39
  isMac: platform === 'darwin',
40
40
  isWindows: platform === 'win32',
41
41
  platform: platform as 'darwin' | 'win32' | 'linux',
42
- systemAppearance: nativeTheme.shouldUseDarkColors ? 'dark' : 'light',
43
42
  userPath: {
44
43
  // User Paths (ensure keys match UserPathData / DesktopAppState interface)
45
44
  desktop: app.getPath('desktop'),
@@ -265,11 +264,6 @@ export default class SystemController extends ControllerModule {
265
264
 
266
265
  logger.info('Initializing system theme listener');
267
266
 
268
- // Get initial system theme
269
- const initialDarkMode = nativeTheme.shouldUseDarkColors;
270
- const initialSystemTheme: ThemeMode = initialDarkMode ? 'dark' : 'light';
271
- logger.info(`Initial system theme: ${initialSystemTheme}`);
272
-
273
267
  // Listen for system theme changes
274
268
  nativeTheme.on('updated', () => {
275
269
  const isDarkMode = nativeTheme.shouldUseDarkColors;
@@ -72,6 +72,7 @@ vi.mock('electron', () => ({
72
72
  nativeTheme: {
73
73
  on: vi.fn(),
74
74
  shouldUseDarkColors: false,
75
+ themeSource: 'system',
75
76
  },
76
77
  shell: {
77
78
  openExternal: vi.fn().mockResolvedValue(undefined),
@@ -138,7 +139,6 @@ describe('SystemController', () => {
138
139
  expect(result).toMatchObject({
139
140
  arch: expect.any(String),
140
141
  platform: expect.any(String),
141
- systemAppearance: 'light',
142
142
  userPath: {
143
143
  desktop: '/mock/path/desktop',
144
144
  documents: '/mock/path/documents',
@@ -151,18 +151,6 @@ describe('SystemController', () => {
151
151
  },
152
152
  });
153
153
  });
154
-
155
- it('should return dark appearance when nativeTheme is dark', async () => {
156
- const { nativeTheme } = await import('electron');
157
- Object.defineProperty(nativeTheme, 'shouldUseDarkColors', { value: true });
158
-
159
- const result = await invokeIpc('system.getAppState');
160
-
161
- expect(result.systemAppearance).toBe('dark');
162
-
163
- // Reset
164
- Object.defineProperty(nativeTheme, 'shouldUseDarkColors', { value: false });
165
- });
166
154
  });
167
155
 
168
156
  describe('accessibility', () => {
@@ -6,7 +6,7 @@ import {
6
6
  RouteVariants,
7
7
  } from '@lobechat/desktop-bridge';
8
8
  import { ElectronIPCEventHandler, ElectronIPCServer } from '@lobechat/electron-server-ipc';
9
- import { app, protocol, session } from 'electron';
9
+ import { app, nativeTheme, protocol, session } from 'electron';
10
10
  import installExtension, { REACT_DEVELOPER_TOOLS } from 'electron-devtools-installer';
11
11
  import { macOS, windows } from 'electron-is';
12
12
  import { pathExistsSync } from 'fs-extra';
@@ -154,9 +154,27 @@ export class App {
154
154
  // 统一处理 before-quit 事件
155
155
  app.on('before-quit', this.handleBeforeQuit);
156
156
 
157
+ // Initialize theme mode from store
158
+ this.initializeThemeMode();
159
+
157
160
  logger.info('App initialization completed');
158
161
  }
159
162
 
163
+ /**
164
+ * Initialize nativeTheme.themeSource from stored themeMode preference
165
+ * This allows nativeTheme.shouldUseDarkColors to be used consistently everywhere
166
+ */
167
+ private initializeThemeMode() {
168
+ const themeMode = this.storeManager.get('themeMode');
169
+
170
+ if (themeMode) {
171
+ nativeTheme.themeSource = themeMode === 'auto' ? 'system' : themeMode;
172
+ logger.debug(
173
+ `Theme mode initialized to: ${themeMode} (themeSource: ${nativeTheme.themeSource})`,
174
+ );
175
+ }
176
+ }
177
+
160
178
  bootstrap = async () => {
161
179
  logger.info('Bootstrapping application');
162
180
  // make single instance
@@ -28,7 +28,7 @@ vi.mock('electron', () => ({
28
28
  },
29
29
  nativeTheme: {
30
30
  on: vi.fn(),
31
- shouldUseDarkColors: false,
31
+ themeSource: 'system',
32
32
  },
33
33
  protocol: {
34
34
  registerSchemesAsPrivileged: vi.fn(),
@@ -78,11 +78,9 @@ export default class Browser {
78
78
  /**
79
79
  * Get platform-specific theme configuration for window creation
80
80
  */
81
- private getPlatformThemeConfig(isDarkMode?: boolean): Record<string, any> {
82
- const darkMode = isDarkMode ?? nativeTheme.shouldUseDarkColors;
83
-
81
+ private getPlatformThemeConfig(): Record<string, any> {
84
82
  if (isWindows) {
85
- return this.getWindowsThemeConfig(darkMode);
83
+ return this.getWindowsThemeConfig(this.isDarkMode);
86
84
  }
87
85
 
88
86
  return {};
@@ -164,10 +162,7 @@ export default class Browser {
164
162
  }
165
163
 
166
164
  private get isDarkMode() {
167
- const themeMode = this.app.storeManager.get('themeMode');
168
- if (themeMode === 'auto') return nativeTheme.shouldUseDarkColors;
169
-
170
- return themeMode === 'dark';
165
+ return nativeTheme.shouldUseDarkColors;
171
166
  }
172
167
 
173
168
  loadUrl = async (path: string) => {
@@ -328,13 +323,11 @@ export default class Browser {
328
323
  `[${this.identifier}] Saved window state (only size used): ${JSON.stringify(savedState)}`,
329
324
  );
330
325
 
331
- const isDarkMode = nativeTheme.shouldUseDarkColors;
332
-
333
326
  const browserWindow = new BrowserWindow({
334
327
  ...res,
335
328
  autoHideMenuBar: true,
336
329
  backgroundColor: '#00000000',
337
- darkTheme: isDarkMode,
330
+ darkTheme: this.isDarkMode,
338
331
  frame: false,
339
332
  height: savedState?.height || height,
340
333
  show: false,
@@ -348,7 +341,7 @@ export default class Browser {
348
341
  sandbox: false,
349
342
  },
350
343
  width: savedState?.width || width,
351
- ...this.getPlatformThemeConfig(isDarkMode),
344
+ ...this.getPlatformThemeConfig(),
352
345
  });
353
346
 
354
347
  this._browserWindow = browserWindow;
@@ -53,6 +53,7 @@ const { mockBrowserWindow, mockNativeTheme, mockIpcMain, mockScreen, MockBrowser
53
53
  off: vi.fn(),
54
54
  on: vi.fn(),
55
55
  shouldUseDarkColors: false,
56
+ themeSource: 'system',
56
57
  },
57
58
  mockScreen: {
58
59
  getDisplayNearestPoint: vi.fn().mockReturnValue({
@@ -272,7 +273,7 @@ describe('Browser', () => {
272
273
 
273
274
  describe('theme management', () => {
274
275
  describe('getPlatformThemeConfig', () => {
275
- it('should return Windows dark theme config', () => {
276
+ it('should return Windows dark theme config when shouldUseDarkColors is true', () => {
276
277
  mockNativeTheme.shouldUseDarkColors = true;
277
278
 
278
279
  // Create browser with dark mode
@@ -289,7 +290,7 @@ describe('Browser', () => {
289
290
  );
290
291
  });
291
292
 
292
- it('should return Windows light theme config', () => {
293
+ it('should return Windows light theme config when shouldUseDarkColors is false', () => {
293
294
  mockNativeTheme.shouldUseDarkColors = false;
294
295
 
295
296
  expect(MockBrowserWindow).toHaveBeenCalledWith(
@@ -334,11 +335,8 @@ describe('Browser', () => {
334
335
  });
335
336
 
336
337
  describe('isDarkMode', () => {
337
- it('should return true when themeMode is dark', () => {
338
- mockStoreManagerGet.mockImplementation((key: string) => {
339
- if (key === 'themeMode') return 'dark';
340
- return undefined;
341
- });
338
+ it('should return true when shouldUseDarkColors is true', () => {
339
+ mockNativeTheme.shouldUseDarkColors = true;
342
340
 
343
341
  const darkBrowser = new Browser(defaultOptions, mockApp);
344
342
  // Access private getter through handleAppThemeChange which uses isDarkMode
@@ -348,18 +346,14 @@ describe('Browser', () => {
348
346
  expect(mockBrowserWindow.setBackgroundColor).toHaveBeenCalledWith('#1a1a1a');
349
347
  });
350
348
 
351
- it('should use system theme when themeMode is auto', () => {
352
- mockStoreManagerGet.mockImplementation((key: string) => {
353
- if (key === 'themeMode') return 'auto';
354
- return undefined;
355
- });
356
- mockNativeTheme.shouldUseDarkColors = true;
349
+ it('should return false when shouldUseDarkColors is false', () => {
350
+ mockNativeTheme.shouldUseDarkColors = false;
357
351
 
358
- const autoBrowser = new Browser(defaultOptions, mockApp);
359
- autoBrowser.handleAppThemeChange();
352
+ const lightBrowser = new Browser(defaultOptions, mockApp);
353
+ lightBrowser.handleAppThemeChange();
360
354
  vi.advanceTimersByTime(0);
361
355
 
362
- expect(mockBrowserWindow.setBackgroundColor).toHaveBeenCalledWith('#1a1a1a');
356
+ expect(mockBrowserWindow.setBackgroundColor).toHaveBeenCalledWith('#ffffff');
363
357
  });
364
358
  });
365
359
  });
@@ -57,7 +57,7 @@ export class TrayManager {
57
57
  logger.debug('初始化主托盘');
58
58
  return this.retrieveOrInitialize({
59
59
  iconPath: isMac
60
- ? nativeTheme.shouldUseDarkColors
60
+ ? nativeTheme.shouldUseDarkColorsForSystemIntegratedUI
61
61
  ? 'tray-dark.png'
62
62
  : 'tray-light.png'
63
63
  : 'tray.png',
@@ -8,7 +8,7 @@ import { TrayManager } from '../TrayManager';
8
8
  // Mock electron modules
9
9
  vi.mock('electron', () => ({
10
10
  nativeTheme: {
11
- shouldUseDarkColors: false,
11
+ shouldUseDarkColorsForSystemIntegratedUI: false,
12
12
  },
13
13
  }));
14
14
 
@@ -90,7 +90,7 @@ describe('TrayManager', () => {
90
90
 
91
91
  describe('initializeMainTray', () => {
92
92
  it('should create main tray with dark icon on macOS when dark mode is enabled', () => {
93
- Object.defineProperty(nativeTheme, 'shouldUseDarkColors', {
93
+ Object.defineProperty(nativeTheme, 'shouldUseDarkColorsForSystemIntegratedUI', {
94
94
  value: true,
95
95
  writable: true,
96
96
  configurable: true,
@@ -110,7 +110,7 @@ describe('TrayManager', () => {
110
110
  });
111
111
 
112
112
  it('should create main tray with light icon on macOS when light mode is enabled', () => {
113
- Object.defineProperty(nativeTheme, 'shouldUseDarkColors', {
113
+ Object.defineProperty(nativeTheme, 'shouldUseDarkColorsForSystemIntegratedUI', {
114
114
  value: false,
115
115
  writable: true,
116
116
  configurable: true,
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Add new provider Xiaomi MiMo."
6
+ ]
7
+ },
8
+ "date": "2026-01-04",
9
+ "version": "2.0.0-next.204"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.203",
3
+ "version": "2.0.0-next.204",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -71,6 +71,7 @@
71
71
  "./volcengine": "./src/aiModels/volcengine.ts",
72
72
  "./wenxin": "./src/aiModels/wenxin.ts",
73
73
  "./xai": "./src/aiModels/xai.ts",
74
+ "./xiaomimimo": "./src/aiModels/xiaomimimo.ts",
74
75
  "./xinference": "./src/aiModels/xinference.ts",
75
76
  "./zenmux": "./src/aiModels/zenmux.ts",
76
77
  "./zeroone": "./src/aiModels/zeroone.ts",
@@ -66,6 +66,7 @@ import { default as vllm } from './vllm';
66
66
  import { default as volcengine } from './volcengine';
67
67
  import { default as wenxin } from './wenxin';
68
68
  import { default as xai } from './xai';
69
+ import { default as xiaomimimo } from './xiaomimimo';
69
70
  import { default as xinference } from './xinference';
70
71
  import { default as zenmux } from './zenmux';
71
72
  import { default as zeroone } from './zeroone';
@@ -156,6 +157,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
156
157
  volcengine,
157
158
  wenxin,
158
159
  xai,
160
+ xiaomimimo,
159
161
  xinference,
160
162
  zenmux,
161
163
  zeroone,
@@ -227,6 +229,7 @@ export { default as vllm } from './vllm';
227
229
  export { default as volcengine } from './volcengine';
228
230
  export { default as wenxin } from './wenxin';
229
231
  export { default as xai } from './xai';
232
+ export { default as xiaomimimo } from './xiaomimimo';
230
233
  export { default as xinference } from './xinference';
231
234
  export { default as zenmux } from './zenmux';
232
235
  export { default as zeroone } from './zeroone';
@@ -0,0 +1,24 @@
1
+ import { AIChatModelCard } from '../types/aiModel';
2
+
3
+ const xiaomimimoChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ reasoning: true,
8
+ },
9
+ contextWindowTokens: 262_144,
10
+ description: 'MiMo-V2-Flash: An efficient model for reasoning, coding, and agent foundations.',
11
+ displayName: 'MiMo-V2 Flash',
12
+ enabled: true,
13
+ id: 'mimo-v2-flash',
14
+ maxOutput: 131_072,
15
+ settings: {
16
+ extendParams: ['enableReasoning'],
17
+ },
18
+ type: 'chat',
19
+ },
20
+ ];
21
+
22
+ export const allModels = [...xiaomimimoChatModels];
23
+
24
+ export default allModels;
@@ -64,6 +64,7 @@ export enum ModelProvider {
64
64
  Volcengine = 'volcengine',
65
65
  Wenxin = 'wenxin',
66
66
  XAI = 'xai',
67
+ XiaomiMiMo = 'xiaomimimo',
67
68
  Xinference = 'xinference',
68
69
  ZenMux = 'zenmux',
69
70
  ZeroOne = 'zeroone',
@@ -67,6 +67,7 @@ import VLLMProvider from './vllm';
67
67
  import VolcengineProvider from './volcengine';
68
68
  import WenxinProvider from './wenxin';
69
69
  import XAIProvider from './xai';
70
+ import XiaomiMiMoProvider from './xiaomimimo';
70
71
  import XinferenceProvider from './xinference';
71
72
  import ZenMuxProvider from './zenmux';
72
73
  import ZeroOneProvider from './zeroone';
@@ -198,6 +199,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
198
199
  VercelAIGatewayProvider,
199
200
  CerebrasProvider,
200
201
  ZenMuxProvider,
202
+ XiaomiMiMoProvider,
201
203
  ];
202
204
 
203
205
  export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -274,6 +276,7 @@ export { default as VLLMProviderCard } from './vllm';
274
276
  export { default as VolcengineProviderCard } from './volcengine';
275
277
  export { default as WenxinProviderCard } from './wenxin';
276
278
  export { default as XAIProviderCard } from './xai';
279
+ export { default as XiaomiMiMoProviderCard } from './xiaomimimo';
277
280
  export { default as XinferenceProviderCard } from './xinference';
278
281
  export { default as ZenMuxProviderCard } from './zenmux';
279
282
  export { default as ZeroOneProviderCard } from './zeroone';
@@ -0,0 +1,22 @@
1
+ import { type ModelProviderCard } from '@/types/llm';
2
+
3
+ const XiaomiMiMo: ModelProviderCard = {
4
+ chatModels: [],
5
+ checkModel: 'mimo-v2-flash',
6
+ description:
7
+ 'Xiaomi MiMo provides a conversational model service with an OpenAI-compatible API. The mimo-v2-flash model supports deep reasoning, streaming output, function calling, a 256K context window, and a maximum output of 128K.',
8
+ id: 'xiaomimimo',
9
+ modelList: { showModelFetcher: true },
10
+ name: 'Xiaomi MiMo',
11
+ settings: {
12
+ disableBrowserRequest: true, // CORS error
13
+ proxyUrl: {
14
+ placeholder: 'https://api.xiaomimimo.com/v1',
15
+ },
16
+ sdkType: 'openai',
17
+ showModelFetcher: true,
18
+ },
19
+ url: 'https://platform.xiaomimimo.com/',
20
+ };
21
+
22
+ export default XiaomiMiMo;
@@ -32,6 +32,7 @@ export { LobeQwenAI } from './providers/qwen';
32
32
  export { LobeStepfunAI } from './providers/stepfun';
33
33
  export { LobeTogetherAI } from './providers/togetherai';
34
34
  export { LobeVolcengineAI } from './providers/volcengine';
35
+ export { LobeXiaomiMiMoAI } from './providers/xiaomimimo';
35
36
  export { LobeZenMuxAI } from './providers/zenmux';
36
37
  export { LobeZeroOneAI } from './providers/zeroone';
37
38
  export { LobeZhipuAI } from './providers/zhipu';
@@ -0,0 +1,147 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from 'model-bank';
3
+ import { describe, expect, it, vi } from 'vitest';
4
+
5
+ import { testProvider } from '../../providerTestUtils';
6
+ import { LobeXiaomiMiMoAI, params } from './index';
7
+
8
+ const provider = ModelProvider.XiaomiMiMo;
9
+ const defaultBaseURL = 'https://api.xiaomimimo.com/v1';
10
+
11
+ testProvider({
12
+ Runtime: LobeXiaomiMiMoAI,
13
+ provider,
14
+ defaultBaseURL,
15
+ chatDebugEnv: 'DEBUG_XIAOMIMIMO_CHAT_COMPLETION',
16
+ chatModel: 'gpt-4o',
17
+ test: {
18
+ skipAPICall: true,
19
+ },
20
+ });
21
+
22
+ describe('LobeXiaomiMiMoAI - custom features', () => {
23
+ describe('chatCompletion.handlePayload', () => {
24
+ it('should map max_tokens to max_completion_tokens', () => {
25
+ const payload = {
26
+ max_tokens: 1000,
27
+ model: 'gpt-4o',
28
+ };
29
+
30
+ const result = params.chatCompletion!.handlePayload!(payload as any);
31
+
32
+ expect(result.max_completion_tokens).toBe(1000);
33
+ expect(result.max_tokens).toBeUndefined();
34
+ });
35
+
36
+ it('should set stream to true by default', () => {
37
+ const payload = {
38
+ model: 'gpt-4o',
39
+ };
40
+
41
+ const result = params.chatCompletion!.handlePayload!(payload as any);
42
+
43
+ expect(result.stream).toBe(true);
44
+ });
45
+
46
+ it('should preserve existing stream value', () => {
47
+ const payload = {
48
+ model: 'gpt-4o',
49
+ stream: false,
50
+ };
51
+
52
+ const result = params.chatCompletion!.handlePayload!(payload as any);
53
+
54
+ expect(result.stream).toBe(false);
55
+ });
56
+
57
+ it('should clamp temperature between 0 and 1.5', () => {
58
+ const payloadLow = {
59
+ temperature: -1,
60
+ model: 'gpt-4o',
61
+ };
62
+ const resultLow = params.chatCompletion!.handlePayload!(payloadLow as any);
63
+ expect(resultLow.temperature).toBe(0);
64
+
65
+ const payloadHigh = {
66
+ temperature: 2,
67
+ model: 'gpt-4o',
68
+ };
69
+ const resultHigh = params.chatCompletion!.handlePayload!(payloadHigh as any);
70
+ expect(resultHigh.temperature).toBe(1.5);
71
+
72
+ const payloadNormal = {
73
+ temperature: 0.7,
74
+ model: 'gpt-4o',
75
+ };
76
+ const resultNormal = params.chatCompletion!.handlePayload!(payloadNormal as any);
77
+ expect(resultNormal.temperature).toBe(0.7);
78
+ });
79
+
80
+ it('should clamp top_p between 0.01 and 1', () => {
81
+ const payloadLow = {
82
+ top_p: 0,
83
+ model: 'gpt-4o',
84
+ };
85
+ const resultLow = params.chatCompletion!.handlePayload!(payloadLow as any);
86
+ expect(resultLow.top_p).toBe(0.01);
87
+
88
+ const payloadHigh = {
89
+ top_p: 1.5,
90
+ model: 'gpt-4o',
91
+ };
92
+ const resultHigh = params.chatCompletion!.handlePayload!(payloadHigh as any);
93
+ expect(resultHigh.top_p).toBe(1);
94
+
95
+ const payloadNormal = {
96
+ top_p: 0.5,
97
+ model: 'gpt-4o',
98
+ };
99
+ const resultNormal = params.chatCompletion!.handlePayload!(payloadNormal as any);
100
+ expect(resultNormal.top_p).toBe(0.5);
101
+ });
102
+
103
+ it('should handle thinking type enabled/disabled', () => {
104
+ const payloadEnabled = {
105
+ thinking: { type: 'enabled' },
106
+ model: 'gpt-4o',
107
+ };
108
+ const resultEnabled = params.chatCompletion!.handlePayload!(payloadEnabled as any);
109
+ expect(resultEnabled.thinking).toEqual({ type: 'enabled' });
110
+
111
+ const payloadDisabled = {
112
+ thinking: { type: 'disabled' },
113
+ model: 'gpt-4o',
114
+ };
115
+ const resultDisabled = params.chatCompletion!.handlePayload!(payloadDisabled as any);
116
+ expect(resultDisabled.thinking).toEqual({ type: 'disabled' });
117
+
118
+ const payloadOther = {
119
+ thinking: { type: 'other' },
120
+ model: 'gpt-4o',
121
+ };
122
+ const resultOther = params.chatCompletion!.handlePayload!(payloadOther as any);
123
+ expect(resultOther.thinking).toBeUndefined();
124
+ });
125
+ });
126
+
127
+ describe('models', () => {
128
+ it('should fetch and process model list', async () => {
129
+ const mockModels = [{ id: 'model-1' }, { id: 'model-2' }];
130
+ const client = {
131
+ models: {
132
+ list: vi.fn().mockResolvedValue({ data: mockModels }),
133
+ },
134
+ };
135
+
136
+ const result = await params.models!({ client: client as any });
137
+
138
+ expect(client.models.list).toHaveBeenCalled();
139
+ expect(result).toEqual(
140
+ expect.arrayContaining([
141
+ expect.objectContaining({ id: 'model-1' }),
142
+ expect.objectContaining({ id: 'model-2' }),
143
+ ]),
144
+ );
145
+ });
146
+ });
147
+ });
@@ -0,0 +1,48 @@
1
+ import { ModelProvider } from 'model-bank';
2
+
3
+ import {
4
+ type OpenAICompatibleFactoryOptions,
5
+ createOpenAICompatibleRuntime,
6
+ } from '../../core/openaiCompatibleFactory';
7
+ import { MODEL_LIST_CONFIGS, processModelList } from '../../utils/modelParse';
8
+
9
+ const clamp = (value: number, min: number, max: number) => Math.min(max, Math.max(min, value));
10
+
11
+ export interface XiaomiMiMoModelCard {
12
+ id: string;
13
+ }
14
+
15
+ export const params = {
16
+ baseURL: 'https://api.xiaomimimo.com/v1',
17
+ chatCompletion: {
18
+ handlePayload: (payload) => {
19
+ const { thinking, temperature, top_p, max_tokens, stream, ...rest } = payload as any;
20
+ const thinkingType = thinking?.type;
21
+
22
+ return {
23
+ ...rest,
24
+ max_completion_tokens: max_tokens,
25
+ stream: stream ?? true,
26
+ ...(typeof temperature === 'number'
27
+ ? { temperature: clamp(temperature, 0, 1.5) }
28
+ : undefined),
29
+ ...(typeof top_p === 'number' ? { top_p: clamp(top_p, 0.01, 1) } : undefined),
30
+ ...(thinkingType === 'enabled' || thinkingType === 'disabled'
31
+ ? { thinking: { type: thinkingType } }
32
+ : undefined),
33
+ } as any;
34
+ },
35
+ },
36
+ debug: {
37
+ chatCompletion: () => process.env.DEBUG_XIAOMIMIMO_CHAT_COMPLETION === '1',
38
+ },
39
+ models: async ({ client }) => {
40
+ const modelsPage = (await client.models.list()) as any;
41
+ const modelList: XiaomiMiMoModelCard[] = modelsPage.data;
42
+
43
+ return processModelList(modelList, MODEL_LIST_CONFIGS.xiaomimimo, 'xiaomimimo');
44
+ },
45
+ provider: ModelProvider.XiaomiMiMo,
46
+ } satisfies OpenAICompatibleFactoryOptions;
47
+
48
+ export const LobeXiaomiMiMoAI = createOpenAICompatibleRuntime(params);
@@ -62,6 +62,7 @@ import { LobeVLLMAI } from './providers/vllm';
62
62
  import { LobeVolcengineAI } from './providers/volcengine';
63
63
  import { LobeWenxinAI } from './providers/wenxin';
64
64
  import { LobeXAI } from './providers/xai';
65
+ import { LobeXiaomiMiMoAI } from './providers/xiaomimimo';
65
66
  import { LobeXinferenceAI } from './providers/xinference';
66
67
  import { LobeZenMuxAI } from './providers/zenmux';
67
68
  import { LobeZeroOneAI } from './providers/zeroone';
@@ -133,6 +134,7 @@ export const providerRuntimeMap = {
133
134
  volcengine: LobeVolcengineAI,
134
135
  wenxin: LobeWenxinAI,
135
136
  xai: LobeXAI,
137
+ xiaomimimo: LobeXiaomiMiMoAI,
136
138
  xinference: LobeXinferenceAI,
137
139
  zenmux: LobeZenMuxAI,
138
140
  zeroone: LobeZeroOneAI,
@@ -120,6 +120,11 @@ export const MODEL_LIST_CONFIGS = {
120
120
  reasoningKeywords: ['mini', 'grok-4', 'grok-code-fast', '!non-reasoning'],
121
121
  visionKeywords: ['vision', 'grok-4'],
122
122
  },
123
+ xiaomimimo: {
124
+ functionCallKeywords: ['mimo'],
125
+ reasoningKeywords: ['mimo'],
126
+ visionKeywords: [],
127
+ },
123
128
  zeroone: {
124
129
  functionCallKeywords: ['fc'],
125
130
  visionKeywords: ['vision'],
@@ -149,6 +154,7 @@ export const MODEL_OWNER_DETECTION_CONFIG = {
149
154
  volcengine: ['doubao'],
150
155
  wenxin: ['ernie', 'qianfan'],
151
156
  xai: ['grok'],
157
+ xiaomimimo: ['mimo-'],
152
158
  zeroone: ['yi-'],
153
159
  zhipu: ['glm'],
154
160
  } as const;
package/src/envs/llm.ts CHANGED
@@ -216,6 +216,9 @@ export const getLLMConfig = () => {
216
216
  ZENMUX_API_KEY: z.string().optional(),
217
217
 
218
218
  ENABLED_LOBEHUB: z.boolean(),
219
+
220
+ ENABLED_XIAOMIMIMO: z.boolean(),
221
+ XIAOMIMIMO_API_KEY: z.string().optional(),
219
222
  },
220
223
  runtimeEnv: {
221
224
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -430,6 +433,9 @@ export const getLLMConfig = () => {
430
433
  ZENMUX_API_KEY: process.env.ZENMUX_API_KEY,
431
434
 
432
435
  ENABLED_LOBEHUB: !!process.env.ENABLED_LOBEHUB,
436
+
437
+ ENABLED_XIAOMIMIMO: !!process.env.XIAOMIMIMO_API_KEY,
438
+ XIAOMIMIMO_API_KEY: process.env.XIAOMIMIMO_API_KEY,
433
439
  },
434
440
  });
435
441
  };
@@ -7,6 +7,10 @@ import { useGlobalStore } from '@/store/global';
7
7
  import { systemStatusSelectors } from '@/store/global/selectors';
8
8
  import { ensureElectronIpc } from '@/utils/electron/ipc';
9
9
 
10
+ const sidebarColors = {
11
+ dark: '#000',
12
+ light: '#f8f8f8',
13
+ };
10
14
  export const useWatchThemeUpdate = () => {
11
15
  const [isAppStateInit, systemAppearance, updateElectronAppState, isMac] = useElectronStore(
12
16
  (s) => [
@@ -43,8 +47,9 @@ export const useWatchThemeUpdate = () => {
43
47
 
44
48
  const lobeApp = document.querySelector('#' + LOBE_THEME_APP_ID);
45
49
  if (!lobeApp) return;
46
- const hexColor = getComputedStyle(lobeApp).getPropertyValue('--ant-color-bg-layout');
47
50
 
48
- document.body.style.background = `color-mix(in srgb, ${hexColor} 86%, transparent)`;
51
+ if (systemAppearance) {
52
+ document.body.style.background = `color-mix(in srgb, ${sidebarColors[systemAppearance as 'dark' | 'light']} 86%, transparent)`;
53
+ }
49
54
  }, [systemAppearance, isAppStateInit, isMac]);
50
55
  };