@lobehub/lobehub 2.0.0-next.202 → 2.0.0-next.204
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/apps/desktop/src/main/controllers/SystemCtr.ts +0 -6
- package/apps/desktop/src/main/controllers/__tests__/SystemCtr.test.ts +1 -13
- package/apps/desktop/src/main/core/App.ts +19 -1
- package/apps/desktop/src/main/core/__tests__/App.test.ts +1 -1
- package/apps/desktop/src/main/core/browser/Browser.ts +5 -12
- package/apps/desktop/src/main/core/browser/__tests__/Browser.test.ts +10 -16
- package/apps/desktop/src/main/core/ui/TrayManager.ts +1 -1
- package/apps/desktop/src/main/core/ui/__tests__/TrayManager.test.ts +3 -3
- package/changelog/v1.json +18 -0
- package/locales/ar/models.json +48 -7
- package/locales/ar/providers.json +0 -1
- package/locales/bg-BG/models.json +5 -7
- package/locales/bg-BG/providers.json +0 -1
- package/locales/de-DE/models.json +47 -6
- package/locales/de-DE/providers.json +0 -1
- package/locales/en-US/models.json +10 -10
- package/locales/en-US/providers.json +0 -1
- package/locales/es-ES/models.json +36 -10
- package/locales/es-ES/providers.json +0 -1
- package/locales/fa-IR/models.json +5 -7
- package/locales/fa-IR/providers.json +0 -1
- package/locales/fr-FR/models.json +36 -7
- package/locales/fr-FR/providers.json +0 -1
- package/locales/it-IT/models.json +5 -7
- package/locales/it-IT/providers.json +0 -1
- package/locales/ja-JP/models.json +49 -10
- package/locales/ja-JP/providers.json +0 -1
- package/locales/ko-KR/models.json +48 -7
- package/locales/ko-KR/providers.json +0 -1
- package/locales/nl-NL/models.json +4 -4
- package/locales/nl-NL/providers.json +0 -1
- package/locales/pl-PL/models.json +30 -5
- package/locales/pl-PL/providers.json +0 -1
- package/locales/pt-BR/models.json +51 -6
- package/locales/pt-BR/providers.json +0 -1
- package/locales/ru-RU/models.json +5 -7
- package/locales/ru-RU/providers.json +0 -1
- package/locales/tr-TR/models.json +5 -7
- package/locales/tr-TR/providers.json +0 -1
- package/locales/vi-VN/models.json +5 -7
- package/locales/vi-VN/providers.json +0 -1
- package/locales/zh-CN/models.json +61 -6
- package/locales/zh-CN/providers.json +0 -1
- package/locales/zh-TW/models.json +32 -7
- package/locales/zh-TW/providers.json +0 -1
- package/package.json +1 -1
- package/packages/model-bank/package.json +1 -0
- package/packages/model-bank/src/aiModels/index.ts +3 -0
- package/packages/model-bank/src/aiModels/xiaomimimo.ts +24 -0
- package/packages/model-bank/src/const/modelProvider.ts +1 -0
- package/packages/model-bank/src/modelProviders/index.ts +3 -0
- package/packages/model-bank/src/modelProviders/xiaomimimo.ts +22 -0
- package/packages/model-runtime/src/index.ts +1 -0
- package/packages/model-runtime/src/providers/xiaomimimo/index.test.ts +147 -0
- package/packages/model-runtime/src/providers/xiaomimimo/index.ts +48 -0
- package/packages/model-runtime/src/runtimeMap.ts +2 -0
- package/packages/model-runtime/src/utils/modelParse.ts +6 -0
- package/src/envs/llm.ts +6 -0
- package/src/features/ElectronTitlebar/hooks/useWatchThemeUpdate.ts +7 -2
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/lobehub",
|
|
3
|
-
"version": "2.0.0-next.
|
|
3
|
+
"version": "2.0.0-next.204",
|
|
4
4
|
"description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -71,6 +71,7 @@
|
|
|
71
71
|
"./volcengine": "./src/aiModels/volcengine.ts",
|
|
72
72
|
"./wenxin": "./src/aiModels/wenxin.ts",
|
|
73
73
|
"./xai": "./src/aiModels/xai.ts",
|
|
74
|
+
"./xiaomimimo": "./src/aiModels/xiaomimimo.ts",
|
|
74
75
|
"./xinference": "./src/aiModels/xinference.ts",
|
|
75
76
|
"./zenmux": "./src/aiModels/zenmux.ts",
|
|
76
77
|
"./zeroone": "./src/aiModels/zeroone.ts",
|
|
@@ -66,6 +66,7 @@ import { default as vllm } from './vllm';
|
|
|
66
66
|
import { default as volcengine } from './volcengine';
|
|
67
67
|
import { default as wenxin } from './wenxin';
|
|
68
68
|
import { default as xai } from './xai';
|
|
69
|
+
import { default as xiaomimimo } from './xiaomimimo';
|
|
69
70
|
import { default as xinference } from './xinference';
|
|
70
71
|
import { default as zenmux } from './zenmux';
|
|
71
72
|
import { default as zeroone } from './zeroone';
|
|
@@ -156,6 +157,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
|
|
|
156
157
|
volcengine,
|
|
157
158
|
wenxin,
|
|
158
159
|
xai,
|
|
160
|
+
xiaomimimo,
|
|
159
161
|
xinference,
|
|
160
162
|
zenmux,
|
|
161
163
|
zeroone,
|
|
@@ -227,6 +229,7 @@ export { default as vllm } from './vllm';
|
|
|
227
229
|
export { default as volcengine } from './volcengine';
|
|
228
230
|
export { default as wenxin } from './wenxin';
|
|
229
231
|
export { default as xai } from './xai';
|
|
232
|
+
export { default as xiaomimimo } from './xiaomimimo';
|
|
230
233
|
export { default as xinference } from './xinference';
|
|
231
234
|
export { default as zenmux } from './zenmux';
|
|
232
235
|
export { default as zeroone } from './zeroone';
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { AIChatModelCard } from '../types/aiModel';
|
|
2
|
+
|
|
3
|
+
const xiaomimimoChatModels: AIChatModelCard[] = [
|
|
4
|
+
{
|
|
5
|
+
abilities: {
|
|
6
|
+
functionCall: true,
|
|
7
|
+
reasoning: true,
|
|
8
|
+
},
|
|
9
|
+
contextWindowTokens: 262_144,
|
|
10
|
+
description: 'MiMo-V2-Flash: An efficient model for reasoning, coding, and agent foundations.',
|
|
11
|
+
displayName: 'MiMo-V2 Flash',
|
|
12
|
+
enabled: true,
|
|
13
|
+
id: 'mimo-v2-flash',
|
|
14
|
+
maxOutput: 131_072,
|
|
15
|
+
settings: {
|
|
16
|
+
extendParams: ['enableReasoning'],
|
|
17
|
+
},
|
|
18
|
+
type: 'chat',
|
|
19
|
+
},
|
|
20
|
+
];
|
|
21
|
+
|
|
22
|
+
export const allModels = [...xiaomimimoChatModels];
|
|
23
|
+
|
|
24
|
+
export default allModels;
|
|
@@ -67,6 +67,7 @@ import VLLMProvider from './vllm';
|
|
|
67
67
|
import VolcengineProvider from './volcengine';
|
|
68
68
|
import WenxinProvider from './wenxin';
|
|
69
69
|
import XAIProvider from './xai';
|
|
70
|
+
import XiaomiMiMoProvider from './xiaomimimo';
|
|
70
71
|
import XinferenceProvider from './xinference';
|
|
71
72
|
import ZenMuxProvider from './zenmux';
|
|
72
73
|
import ZeroOneProvider from './zeroone';
|
|
@@ -198,6 +199,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
|
198
199
|
VercelAIGatewayProvider,
|
|
199
200
|
CerebrasProvider,
|
|
200
201
|
ZenMuxProvider,
|
|
202
|
+
XiaomiMiMoProvider,
|
|
201
203
|
];
|
|
202
204
|
|
|
203
205
|
export const filterEnabledModels = (provider: ModelProviderCard) => {
|
|
@@ -274,6 +276,7 @@ export { default as VLLMProviderCard } from './vllm';
|
|
|
274
276
|
export { default as VolcengineProviderCard } from './volcengine';
|
|
275
277
|
export { default as WenxinProviderCard } from './wenxin';
|
|
276
278
|
export { default as XAIProviderCard } from './xai';
|
|
279
|
+
export { default as XiaomiMiMoProviderCard } from './xiaomimimo';
|
|
277
280
|
export { default as XinferenceProviderCard } from './xinference';
|
|
278
281
|
export { default as ZenMuxProviderCard } from './zenmux';
|
|
279
282
|
export { default as ZeroOneProviderCard } from './zeroone';
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { type ModelProviderCard } from '@/types/llm';
|
|
2
|
+
|
|
3
|
+
const XiaomiMiMo: ModelProviderCard = {
|
|
4
|
+
chatModels: [],
|
|
5
|
+
checkModel: 'mimo-v2-flash',
|
|
6
|
+
description:
|
|
7
|
+
'Xiaomi MiMo provides a conversational model service with an OpenAI-compatible API. The mimo-v2-flash model supports deep reasoning, streaming output, function calling, a 256K context window, and a maximum output of 128K.',
|
|
8
|
+
id: 'xiaomimimo',
|
|
9
|
+
modelList: { showModelFetcher: true },
|
|
10
|
+
name: 'Xiaomi MiMo',
|
|
11
|
+
settings: {
|
|
12
|
+
disableBrowserRequest: true, // CORS error
|
|
13
|
+
proxyUrl: {
|
|
14
|
+
placeholder: 'https://api.xiaomimimo.com/v1',
|
|
15
|
+
},
|
|
16
|
+
sdkType: 'openai',
|
|
17
|
+
showModelFetcher: true,
|
|
18
|
+
},
|
|
19
|
+
url: 'https://platform.xiaomimimo.com/',
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
export default XiaomiMiMo;
|
|
@@ -32,6 +32,7 @@ export { LobeQwenAI } from './providers/qwen';
|
|
|
32
32
|
export { LobeStepfunAI } from './providers/stepfun';
|
|
33
33
|
export { LobeTogetherAI } from './providers/togetherai';
|
|
34
34
|
export { LobeVolcengineAI } from './providers/volcengine';
|
|
35
|
+
export { LobeXiaomiMiMoAI } from './providers/xiaomimimo';
|
|
35
36
|
export { LobeZenMuxAI } from './providers/zenmux';
|
|
36
37
|
export { LobeZeroOneAI } from './providers/zeroone';
|
|
37
38
|
export { LobeZhipuAI } from './providers/zhipu';
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
// @vitest-environment node
|
|
2
|
+
import { ModelProvider } from 'model-bank';
|
|
3
|
+
import { describe, expect, it, vi } from 'vitest';
|
|
4
|
+
|
|
5
|
+
import { testProvider } from '../../providerTestUtils';
|
|
6
|
+
import { LobeXiaomiMiMoAI, params } from './index';
|
|
7
|
+
|
|
8
|
+
const provider = ModelProvider.XiaomiMiMo;
|
|
9
|
+
const defaultBaseURL = 'https://api.xiaomimimo.com/v1';
|
|
10
|
+
|
|
11
|
+
testProvider({
|
|
12
|
+
Runtime: LobeXiaomiMiMoAI,
|
|
13
|
+
provider,
|
|
14
|
+
defaultBaseURL,
|
|
15
|
+
chatDebugEnv: 'DEBUG_XIAOMIMIMO_CHAT_COMPLETION',
|
|
16
|
+
chatModel: 'gpt-4o',
|
|
17
|
+
test: {
|
|
18
|
+
skipAPICall: true,
|
|
19
|
+
},
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
describe('LobeXiaomiMiMoAI - custom features', () => {
|
|
23
|
+
describe('chatCompletion.handlePayload', () => {
|
|
24
|
+
it('should map max_tokens to max_completion_tokens', () => {
|
|
25
|
+
const payload = {
|
|
26
|
+
max_tokens: 1000,
|
|
27
|
+
model: 'gpt-4o',
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
const result = params.chatCompletion!.handlePayload!(payload as any);
|
|
31
|
+
|
|
32
|
+
expect(result.max_completion_tokens).toBe(1000);
|
|
33
|
+
expect(result.max_tokens).toBeUndefined();
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
it('should set stream to true by default', () => {
|
|
37
|
+
const payload = {
|
|
38
|
+
model: 'gpt-4o',
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
const result = params.chatCompletion!.handlePayload!(payload as any);
|
|
42
|
+
|
|
43
|
+
expect(result.stream).toBe(true);
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
it('should preserve existing stream value', () => {
|
|
47
|
+
const payload = {
|
|
48
|
+
model: 'gpt-4o',
|
|
49
|
+
stream: false,
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
const result = params.chatCompletion!.handlePayload!(payload as any);
|
|
53
|
+
|
|
54
|
+
expect(result.stream).toBe(false);
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
it('should clamp temperature between 0 and 1.5', () => {
|
|
58
|
+
const payloadLow = {
|
|
59
|
+
temperature: -1,
|
|
60
|
+
model: 'gpt-4o',
|
|
61
|
+
};
|
|
62
|
+
const resultLow = params.chatCompletion!.handlePayload!(payloadLow as any);
|
|
63
|
+
expect(resultLow.temperature).toBe(0);
|
|
64
|
+
|
|
65
|
+
const payloadHigh = {
|
|
66
|
+
temperature: 2,
|
|
67
|
+
model: 'gpt-4o',
|
|
68
|
+
};
|
|
69
|
+
const resultHigh = params.chatCompletion!.handlePayload!(payloadHigh as any);
|
|
70
|
+
expect(resultHigh.temperature).toBe(1.5);
|
|
71
|
+
|
|
72
|
+
const payloadNormal = {
|
|
73
|
+
temperature: 0.7,
|
|
74
|
+
model: 'gpt-4o',
|
|
75
|
+
};
|
|
76
|
+
const resultNormal = params.chatCompletion!.handlePayload!(payloadNormal as any);
|
|
77
|
+
expect(resultNormal.temperature).toBe(0.7);
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
it('should clamp top_p between 0.01 and 1', () => {
|
|
81
|
+
const payloadLow = {
|
|
82
|
+
top_p: 0,
|
|
83
|
+
model: 'gpt-4o',
|
|
84
|
+
};
|
|
85
|
+
const resultLow = params.chatCompletion!.handlePayload!(payloadLow as any);
|
|
86
|
+
expect(resultLow.top_p).toBe(0.01);
|
|
87
|
+
|
|
88
|
+
const payloadHigh = {
|
|
89
|
+
top_p: 1.5,
|
|
90
|
+
model: 'gpt-4o',
|
|
91
|
+
};
|
|
92
|
+
const resultHigh = params.chatCompletion!.handlePayload!(payloadHigh as any);
|
|
93
|
+
expect(resultHigh.top_p).toBe(1);
|
|
94
|
+
|
|
95
|
+
const payloadNormal = {
|
|
96
|
+
top_p: 0.5,
|
|
97
|
+
model: 'gpt-4o',
|
|
98
|
+
};
|
|
99
|
+
const resultNormal = params.chatCompletion!.handlePayload!(payloadNormal as any);
|
|
100
|
+
expect(resultNormal.top_p).toBe(0.5);
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
it('should handle thinking type enabled/disabled', () => {
|
|
104
|
+
const payloadEnabled = {
|
|
105
|
+
thinking: { type: 'enabled' },
|
|
106
|
+
model: 'gpt-4o',
|
|
107
|
+
};
|
|
108
|
+
const resultEnabled = params.chatCompletion!.handlePayload!(payloadEnabled as any);
|
|
109
|
+
expect(resultEnabled.thinking).toEqual({ type: 'enabled' });
|
|
110
|
+
|
|
111
|
+
const payloadDisabled = {
|
|
112
|
+
thinking: { type: 'disabled' },
|
|
113
|
+
model: 'gpt-4o',
|
|
114
|
+
};
|
|
115
|
+
const resultDisabled = params.chatCompletion!.handlePayload!(payloadDisabled as any);
|
|
116
|
+
expect(resultDisabled.thinking).toEqual({ type: 'disabled' });
|
|
117
|
+
|
|
118
|
+
const payloadOther = {
|
|
119
|
+
thinking: { type: 'other' },
|
|
120
|
+
model: 'gpt-4o',
|
|
121
|
+
};
|
|
122
|
+
const resultOther = params.chatCompletion!.handlePayload!(payloadOther as any);
|
|
123
|
+
expect(resultOther.thinking).toBeUndefined();
|
|
124
|
+
});
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
describe('models', () => {
|
|
128
|
+
it('should fetch and process model list', async () => {
|
|
129
|
+
const mockModels = [{ id: 'model-1' }, { id: 'model-2' }];
|
|
130
|
+
const client = {
|
|
131
|
+
models: {
|
|
132
|
+
list: vi.fn().mockResolvedValue({ data: mockModels }),
|
|
133
|
+
},
|
|
134
|
+
};
|
|
135
|
+
|
|
136
|
+
const result = await params.models!({ client: client as any });
|
|
137
|
+
|
|
138
|
+
expect(client.models.list).toHaveBeenCalled();
|
|
139
|
+
expect(result).toEqual(
|
|
140
|
+
expect.arrayContaining([
|
|
141
|
+
expect.objectContaining({ id: 'model-1' }),
|
|
142
|
+
expect.objectContaining({ id: 'model-2' }),
|
|
143
|
+
]),
|
|
144
|
+
);
|
|
145
|
+
});
|
|
146
|
+
});
|
|
147
|
+
});
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import { ModelProvider } from 'model-bank';
|
|
2
|
+
|
|
3
|
+
import {
|
|
4
|
+
type OpenAICompatibleFactoryOptions,
|
|
5
|
+
createOpenAICompatibleRuntime,
|
|
6
|
+
} from '../../core/openaiCompatibleFactory';
|
|
7
|
+
import { MODEL_LIST_CONFIGS, processModelList } from '../../utils/modelParse';
|
|
8
|
+
|
|
9
|
+
const clamp = (value: number, min: number, max: number) => Math.min(max, Math.max(min, value));
|
|
10
|
+
|
|
11
|
+
export interface XiaomiMiMoModelCard {
|
|
12
|
+
id: string;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export const params = {
|
|
16
|
+
baseURL: 'https://api.xiaomimimo.com/v1',
|
|
17
|
+
chatCompletion: {
|
|
18
|
+
handlePayload: (payload) => {
|
|
19
|
+
const { thinking, temperature, top_p, max_tokens, stream, ...rest } = payload as any;
|
|
20
|
+
const thinkingType = thinking?.type;
|
|
21
|
+
|
|
22
|
+
return {
|
|
23
|
+
...rest,
|
|
24
|
+
max_completion_tokens: max_tokens,
|
|
25
|
+
stream: stream ?? true,
|
|
26
|
+
...(typeof temperature === 'number'
|
|
27
|
+
? { temperature: clamp(temperature, 0, 1.5) }
|
|
28
|
+
: undefined),
|
|
29
|
+
...(typeof top_p === 'number' ? { top_p: clamp(top_p, 0.01, 1) } : undefined),
|
|
30
|
+
...(thinkingType === 'enabled' || thinkingType === 'disabled'
|
|
31
|
+
? { thinking: { type: thinkingType } }
|
|
32
|
+
: undefined),
|
|
33
|
+
} as any;
|
|
34
|
+
},
|
|
35
|
+
},
|
|
36
|
+
debug: {
|
|
37
|
+
chatCompletion: () => process.env.DEBUG_XIAOMIMIMO_CHAT_COMPLETION === '1',
|
|
38
|
+
},
|
|
39
|
+
models: async ({ client }) => {
|
|
40
|
+
const modelsPage = (await client.models.list()) as any;
|
|
41
|
+
const modelList: XiaomiMiMoModelCard[] = modelsPage.data;
|
|
42
|
+
|
|
43
|
+
return processModelList(modelList, MODEL_LIST_CONFIGS.xiaomimimo, 'xiaomimimo');
|
|
44
|
+
},
|
|
45
|
+
provider: ModelProvider.XiaomiMiMo,
|
|
46
|
+
} satisfies OpenAICompatibleFactoryOptions;
|
|
47
|
+
|
|
48
|
+
export const LobeXiaomiMiMoAI = createOpenAICompatibleRuntime(params);
|
|
@@ -62,6 +62,7 @@ import { LobeVLLMAI } from './providers/vllm';
|
|
|
62
62
|
import { LobeVolcengineAI } from './providers/volcengine';
|
|
63
63
|
import { LobeWenxinAI } from './providers/wenxin';
|
|
64
64
|
import { LobeXAI } from './providers/xai';
|
|
65
|
+
import { LobeXiaomiMiMoAI } from './providers/xiaomimimo';
|
|
65
66
|
import { LobeXinferenceAI } from './providers/xinference';
|
|
66
67
|
import { LobeZenMuxAI } from './providers/zenmux';
|
|
67
68
|
import { LobeZeroOneAI } from './providers/zeroone';
|
|
@@ -133,6 +134,7 @@ export const providerRuntimeMap = {
|
|
|
133
134
|
volcengine: LobeVolcengineAI,
|
|
134
135
|
wenxin: LobeWenxinAI,
|
|
135
136
|
xai: LobeXAI,
|
|
137
|
+
xiaomimimo: LobeXiaomiMiMoAI,
|
|
136
138
|
xinference: LobeXinferenceAI,
|
|
137
139
|
zenmux: LobeZenMuxAI,
|
|
138
140
|
zeroone: LobeZeroOneAI,
|
|
@@ -120,6 +120,11 @@ export const MODEL_LIST_CONFIGS = {
|
|
|
120
120
|
reasoningKeywords: ['mini', 'grok-4', 'grok-code-fast', '!non-reasoning'],
|
|
121
121
|
visionKeywords: ['vision', 'grok-4'],
|
|
122
122
|
},
|
|
123
|
+
xiaomimimo: {
|
|
124
|
+
functionCallKeywords: ['mimo'],
|
|
125
|
+
reasoningKeywords: ['mimo'],
|
|
126
|
+
visionKeywords: [],
|
|
127
|
+
},
|
|
123
128
|
zeroone: {
|
|
124
129
|
functionCallKeywords: ['fc'],
|
|
125
130
|
visionKeywords: ['vision'],
|
|
@@ -149,6 +154,7 @@ export const MODEL_OWNER_DETECTION_CONFIG = {
|
|
|
149
154
|
volcengine: ['doubao'],
|
|
150
155
|
wenxin: ['ernie', 'qianfan'],
|
|
151
156
|
xai: ['grok'],
|
|
157
|
+
xiaomimimo: ['mimo-'],
|
|
152
158
|
zeroone: ['yi-'],
|
|
153
159
|
zhipu: ['glm'],
|
|
154
160
|
} as const;
|
package/src/envs/llm.ts
CHANGED
|
@@ -216,6 +216,9 @@ export const getLLMConfig = () => {
|
|
|
216
216
|
ZENMUX_API_KEY: z.string().optional(),
|
|
217
217
|
|
|
218
218
|
ENABLED_LOBEHUB: z.boolean(),
|
|
219
|
+
|
|
220
|
+
ENABLED_XIAOMIMIMO: z.boolean(),
|
|
221
|
+
XIAOMIMIMO_API_KEY: z.string().optional(),
|
|
219
222
|
},
|
|
220
223
|
runtimeEnv: {
|
|
221
224
|
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
|
|
@@ -430,6 +433,9 @@ export const getLLMConfig = () => {
|
|
|
430
433
|
ZENMUX_API_KEY: process.env.ZENMUX_API_KEY,
|
|
431
434
|
|
|
432
435
|
ENABLED_LOBEHUB: !!process.env.ENABLED_LOBEHUB,
|
|
436
|
+
|
|
437
|
+
ENABLED_XIAOMIMIMO: !!process.env.XIAOMIMIMO_API_KEY,
|
|
438
|
+
XIAOMIMIMO_API_KEY: process.env.XIAOMIMIMO_API_KEY,
|
|
433
439
|
},
|
|
434
440
|
});
|
|
435
441
|
};
|
|
@@ -7,6 +7,10 @@ import { useGlobalStore } from '@/store/global';
|
|
|
7
7
|
import { systemStatusSelectors } from '@/store/global/selectors';
|
|
8
8
|
import { ensureElectronIpc } from '@/utils/electron/ipc';
|
|
9
9
|
|
|
10
|
+
const sidebarColors = {
|
|
11
|
+
dark: '#000',
|
|
12
|
+
light: '#f8f8f8',
|
|
13
|
+
};
|
|
10
14
|
export const useWatchThemeUpdate = () => {
|
|
11
15
|
const [isAppStateInit, systemAppearance, updateElectronAppState, isMac] = useElectronStore(
|
|
12
16
|
(s) => [
|
|
@@ -43,8 +47,9 @@ export const useWatchThemeUpdate = () => {
|
|
|
43
47
|
|
|
44
48
|
const lobeApp = document.querySelector('#' + LOBE_THEME_APP_ID);
|
|
45
49
|
if (!lobeApp) return;
|
|
46
|
-
const hexColor = getComputedStyle(lobeApp).getPropertyValue('--ant-color-bg-layout');
|
|
47
50
|
|
|
48
|
-
|
|
51
|
+
if (systemAppearance) {
|
|
52
|
+
document.body.style.background = `color-mix(in srgb, ${sidebarColors[systemAppearance as 'dark' | 'light']} 86%, transparent)`;
|
|
53
|
+
}
|
|
49
54
|
}, [systemAppearance, isAppStateInit, isMac]);
|
|
50
55
|
};
|