@lobehub/chat 1.77.11 → 1.77.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/.github/scripts/pr-comment.js +111 -55
  2. package/CHANGELOG.md +51 -0
  3. package/changelog/v1.json +18 -0
  4. package/docs/self-hosting/environment-variables/basic.mdx +39 -0
  5. package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +40 -1
  6. package/locales/ar/hotkey.json +4 -0
  7. package/locales/ar/models.json +6 -0
  8. package/locales/bg-BG/hotkey.json +4 -0
  9. package/locales/bg-BG/models.json +6 -0
  10. package/locales/de-DE/hotkey.json +4 -0
  11. package/locales/de-DE/models.json +6 -0
  12. package/locales/en-US/hotkey.json +4 -0
  13. package/locales/en-US/models.json +6 -0
  14. package/locales/es-ES/hotkey.json +4 -0
  15. package/locales/es-ES/models.json +6 -0
  16. package/locales/fa-IR/hotkey.json +4 -0
  17. package/locales/fa-IR/models.json +6 -0
  18. package/locales/fr-FR/hotkey.json +4 -0
  19. package/locales/fr-FR/models.json +6 -0
  20. package/locales/it-IT/hotkey.json +4 -0
  21. package/locales/it-IT/models.json +6 -0
  22. package/locales/ja-JP/hotkey.json +4 -0
  23. package/locales/ja-JP/models.json +6 -0
  24. package/locales/ko-KR/hotkey.json +4 -0
  25. package/locales/ko-KR/models.json +6 -0
  26. package/locales/nl-NL/hotkey.json +4 -0
  27. package/locales/nl-NL/models.json +6 -0
  28. package/locales/pl-PL/hotkey.json +4 -0
  29. package/locales/pl-PL/models.json +6 -0
  30. package/locales/pt-BR/hotkey.json +4 -0
  31. package/locales/pt-BR/models.json +6 -0
  32. package/locales/ru-RU/hotkey.json +4 -0
  33. package/locales/ru-RU/models.json +6 -0
  34. package/locales/tr-TR/hotkey.json +4 -0
  35. package/locales/tr-TR/models.json +6 -0
  36. package/locales/vi-VN/hotkey.json +4 -0
  37. package/locales/vi-VN/models.json +6 -0
  38. package/locales/zh-CN/hotkey.json +4 -0
  39. package/locales/zh-CN/models.json +7 -1
  40. package/locales/zh-TW/hotkey.json +4 -0
  41. package/locales/zh-TW/models.json +6 -0
  42. package/package.json +1 -1
  43. package/packages/electron-client-ipc/src/dispatch.ts +29 -0
  44. package/packages/electron-client-ipc/src/events/index.ts +9 -2
  45. package/packages/electron-client-ipc/src/events/menu.ts +5 -0
  46. package/packages/electron-client-ipc/src/events/search.ts +4 -0
  47. package/packages/electron-client-ipc/src/events/system.ts +3 -0
  48. package/packages/electron-client-ipc/src/events/windows.ts +8 -0
  49. package/packages/electron-client-ipc/src/index.ts +1 -0
  50. package/scripts/migrateServerDB/index.ts +3 -2
  51. package/src/app/[variants]/(main)/(mobile)/me/(home)/__tests__/UserBanner.test.tsx +1 -0
  52. package/src/app/[variants]/(main)/_layout/Desktop/SideBar/TopActions.tsx +3 -2
  53. package/src/app/[variants]/(main)/_layout/Desktop/SideBar/index.tsx +24 -5
  54. package/src/app/[variants]/(main)/_layout/Desktop/Titlebar.tsx +27 -0
  55. package/src/app/[variants]/(main)/_layout/Desktop/index.tsx +32 -3
  56. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/index.tsx +12 -2
  57. package/src/app/[variants]/(main)/chat/(workspace)/page.tsx +2 -1
  58. package/src/app/[variants]/(main)/settings/hotkey/features/HotkeySetting.tsx +15 -6
  59. package/src/app/[variants]/(main)/settings/hotkey/page.tsx +10 -1
  60. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +3 -1
  61. package/src/app/desktop/devtools/page.tsx +37 -38
  62. package/src/config/aiModels/google.ts +25 -1
  63. package/src/config/aiModels/novita.ts +1 -32
  64. package/src/config/aiModels/ollama.ts +12 -1
  65. package/src/config/aiModels/qwen.ts +1 -1
  66. package/src/const/hotkeys.ts +10 -0
  67. package/src/features/DevPanel/PostgresViewer/SchemaSidebar/index.tsx +1 -1
  68. package/src/features/User/UserPanel/index.tsx +1 -1
  69. package/src/features/User/__tests__/PanelContent.test.tsx +1 -0
  70. package/src/libs/trpc/index.ts +1 -0
  71. package/src/libs/trpc/middleware/userAuth.ts +2 -3
  72. package/src/locales/default/hotkey.ts +4 -0
  73. package/src/server/globalConfig/genServerAiProviderConfig.ts +12 -3
  74. package/src/server/globalConfig/index.ts +6 -1
  75. package/src/server/globalConfig/parseSystemAgent.test.ts +56 -0
  76. package/src/server/globalConfig/parseSystemAgent.ts +25 -0
  77. package/src/server/services/changelog/index.test.ts +2 -0
  78. package/src/server/services/changelog/index.ts +6 -2
  79. package/src/services/__tests__/chat.test.ts +3 -0
  80. package/src/services/electron/devtools.ts +1 -1
  81. package/src/store/user/slices/modelList/selectors/modelConfig.ts +4 -0
  82. package/src/styles/global.ts +14 -0
  83. package/src/types/hotkey.ts +1 -0
  84. package/tsconfig.json +1 -6
  85. package/packages/electron-client-ipc/src/events/devtools.ts +0 -6
  86. package/src/app/[variants]/(main)/settings/hotkey/index.tsx +0 -9
  87. package/src/types/electron.ts +0 -11
  88. package/src/utils/electron/dispatch.ts +0 -10
@@ -4,6 +4,7 @@ import { ChatHeader } from '@lobehub/ui/chat';
4
4
 
5
5
  import { useGlobalStore } from '@/store/global';
6
6
  import { systemStatusSelectors } from '@/store/global/selectors';
7
+ import { electronStylish } from '@/styles/electron';
7
8
 
8
9
  import HeaderAction from './HeaderAction';
9
10
  import Main from './Main';
@@ -14,8 +15,17 @@ const Header = () => {
14
15
  return (
15
16
  showHeader && (
16
17
  <ChatHeader
17
- left={<Main />}
18
- right={<HeaderAction />}
18
+ className={electronStylish.draggable}
19
+ left={
20
+ <div className={electronStylish.nodrag}>
21
+ <Main />
22
+ </div>
23
+ }
24
+ right={
25
+ <div className={electronStylish.nodrag}>
26
+ <HeaderAction />
27
+ </div>
28
+ }
19
29
  style={{ height: 48, minHeight: 48, paddingInline: 8, position: 'initial', zIndex: 11 }}
20
30
  />
21
31
  )
@@ -3,6 +3,7 @@ import { Suspense } from 'react';
3
3
  import StructuredData from '@/components/StructuredData';
4
4
  import { serverFeatureFlags } from '@/config/featureFlags';
5
5
  import { BRANDING_NAME } from '@/const/branding';
6
+ import { isDesktop } from '@/const/version';
6
7
  import { ldModule } from '@/server/ld';
7
8
  import { metadataModule } from '@/server/metadata';
8
9
  import { translation } from '@/server/translation';
@@ -38,7 +39,7 @@ const Page = async (props: DynamicLayoutProps) => {
38
39
  <StructuredData ld={ld} />
39
40
  <PageTitle />
40
41
  <TelemetryNotification mobile={isMobile} />
41
- {showChangelog && !hideDocs && !isMobile && (
42
+ {!isDesktop && showChangelog && !hideDocs && !isMobile && (
42
43
  <Suspense>
43
44
  <Changelog />
44
45
  </Suspense>
@@ -7,11 +7,19 @@ import { useTranslation } from 'react-i18next';
7
7
 
8
8
  import { HOTKEYS_REGISTRATION } from '@/const/hotkeys';
9
9
  import { FORM_STYLE } from '@/const/layoutTokens';
10
+ import { isDesktop } from '@/const/version';
10
11
  import hotkeyMeta from '@/locales/default/hotkey';
11
12
  import { useUserStore } from '@/store/user';
12
13
  import { settingsSelectors } from '@/store/user/selectors';
13
14
  import { HotkeyGroupEnum, HotkeyItem } from '@/types/hotkey';
14
15
 
16
+ const filterByDesktop = (item: HotkeyItem) => {
17
+ if (isDesktop) return true;
18
+
19
+ // is not desktop, filter out desktop only items
20
+ if (!isDesktop) return !item.isDesktop;
21
+ };
22
+
15
23
  type SettingItemGroup = ItemGroup;
16
24
 
17
25
  const HOTKEY_SETTING_KEY = 'hotkey';
@@ -30,6 +38,7 @@ const HotkeySetting = memo(() => {
30
38
  return value;
31
39
  })
32
40
  .filter(Boolean) as string[];
41
+
33
42
  return {
34
43
  children: (
35
44
  <HotkeyInput
@@ -51,16 +60,16 @@ const HotkeySetting = memo(() => {
51
60
  };
52
61
 
53
62
  const essential: SettingItemGroup = {
54
- children: HOTKEYS_REGISTRATION.filter((item) => item.group === HotkeyGroupEnum.Essential).map(
55
- (item) => mapHotkeyItem(item),
56
- ),
63
+ children: HOTKEYS_REGISTRATION.filter((item) => item.group === HotkeyGroupEnum.Essential)
64
+ .filter((item) => filterByDesktop(item))
65
+ .map((item) => mapHotkeyItem(item)),
57
66
  title: t('hotkey.group.essential'),
58
67
  };
59
68
 
60
69
  const conversation: SettingItemGroup = {
61
- children: HOTKEYS_REGISTRATION.filter(
62
- (item) => item.group === HotkeyGroupEnum.Conversation,
63
- ).map((item) => mapHotkeyItem(item)),
70
+ children: HOTKEYS_REGISTRATION.filter((item) => item.group === HotkeyGroupEnum.Conversation)
71
+ .filter((item) => filterByDesktop(item))
72
+ .map((item) => mapHotkeyItem(item)),
64
73
  title: t('hotkey.group.conversation'),
65
74
  };
66
75
 
@@ -3,6 +3,8 @@ import { translation } from '@/server/translation';
3
3
  import { DynamicLayoutProps } from '@/types/next';
4
4
  import { RouteVariants } from '@/utils/server/routeVariants';
5
5
 
6
+ import HotkeySetting from './features/HotkeySetting';
7
+
6
8
  export const generateMetadata = async (props: DynamicLayoutProps) => {
7
9
  const locale = await RouteVariants.getLocale(props);
8
10
  const { t } = await translation('setting', locale);
@@ -12,4 +14,11 @@ export const generateMetadata = async (props: DynamicLayoutProps) => {
12
14
  url: '/settings/hotkey',
13
15
  });
14
16
  };
15
- export { default } from './index';
17
+
18
+ const Page = () => {
19
+ return <HotkeySetting />;
20
+ };
21
+
22
+ Page.displayName = 'HotkeySetting';
23
+
24
+ export default Page;
@@ -16,7 +16,7 @@ import { z } from 'zod';
16
16
  import { FormInput, FormPassword } from '@/components/FormInput';
17
17
  import { FORM_STYLE } from '@/const/layoutTokens';
18
18
  import { AES_GCM_URL, BASE_PROVIDER_DOC_URL } from '@/const/url';
19
- import { isServerMode } from '@/const/version';
19
+ import { isDesktop, isServerMode } from '@/const/version';
20
20
  import { aiProviderSelectors, useAiInfraStore } from '@/store/aiInfra';
21
21
  import {
22
22
  AiProviderDetailItem,
@@ -244,12 +244,14 @@ const ProviderConfig = memo<ProviderConfigProps>(
244
244
 
245
245
  /*
246
246
  * Conditions to show Client Fetch Switch
247
+ * 0. is not desktop app
247
248
  * 1. provider is not disabled browser request
248
249
  * 2. provider show browser request by default
249
250
  * 3. Provider allow to edit endpoint and the value of endpoint is not empty
250
251
  * 4. There is an apikey provided by user
251
252
  */
252
253
  const showClientFetch =
254
+ !isDesktop &&
253
255
  !disableBrowserRequest &&
254
256
  (defaultShowBrowserRequest ||
255
257
  (showEndpoint && isProviderEndpointNotEmpty) ||
@@ -1,6 +1,6 @@
1
1
  'use client';
2
2
 
3
- import { ActionIcon, FluentEmoji, SideNav } from '@lobehub/ui';
3
+ import { ActionIcon, SideNav } from '@lobehub/ui';
4
4
  import { Cog, DatabaseIcon } from 'lucide-react';
5
5
  import { memo, useState } from 'react';
6
6
  import { Flexbox } from 'react-layout-kit';
@@ -30,44 +30,43 @@ const DevTools = memo(() => {
30
30
  const [tab, setTab] = useState<string>(items[0].key);
31
31
 
32
32
  return (
33
- <Flexbox
34
- height={'100%'}
35
- horizontal
36
- style={{ overflow: 'hidden', position: 'relative' }}
37
- width={'100%'}
38
- >
39
- <SideNav
40
- avatar={<FluentEmoji emoji={'🧰'} size={24} />}
41
- bottomActions={[]}
42
- style={{
43
- paddingBlock: 32,
44
- width: 48,
45
- }}
46
- topActions={items.map((item) => (
47
- <ActionIcon
48
- active={tab === item.key}
49
- key={item.key}
50
- onClick={() => setTab(item.key)}
51
- placement={'right'}
52
- title={item.key}
53
- >
54
- {item.icon}
55
- </ActionIcon>
56
- ))}
57
- />
58
- <Flexbox height={'100%'} style={{ overflow: 'hidden', position: 'relative' }} width={'100%'}>
59
- <Flexbox
60
- align={'center'}
61
- className={cx(`panel-drag-handle`, styles.header, electronStylish.draggable)}
62
- horizontal
63
- justify={'center'}
64
- >
65
- <Flexbox align={'baseline'} gap={6} horizontal>
66
- <b>{BRANDING_NAME} Dev Tools</b>
67
- <span style={{ color: theme.colorTextDescription }}>/</span>
68
- <span style={{ color: theme.colorTextDescription }}>{tab}</span>
69
- </Flexbox>
33
+ <Flexbox height={'100%'} style={{ overflow: 'hidden', position: 'relative' }} width={'100%'}>
34
+ <Flexbox
35
+ align={'center'}
36
+ className={cx(`panel-drag-handle`, styles.header, electronStylish.draggable)}
37
+ horizontal
38
+ justify={'center'}
39
+ >
40
+ <Flexbox align={'baseline'} gap={6} horizontal>
41
+ <b>{BRANDING_NAME} Dev Tools</b>
42
+ <span style={{ color: theme.colorTextDescription }}>/</span>
43
+ <span style={{ color: theme.colorTextDescription }}>{tab}</span>
70
44
  </Flexbox>
45
+ </Flexbox>
46
+ <Flexbox
47
+ height={'100%'}
48
+ horizontal
49
+ style={{ background: theme.colorBgLayout, overflow: 'hidden', position: 'relative' }}
50
+ width={'100%'}
51
+ >
52
+ <SideNav
53
+ bottomActions={[]}
54
+ style={{
55
+ background: 'transparent',
56
+ width: 48,
57
+ }}
58
+ topActions={items.map((item) => (
59
+ <ActionIcon
60
+ active={tab === item.key}
61
+ key={item.key}
62
+ onClick={() => setTab(item.key)}
63
+ placement={'right'}
64
+ title={item.key}
65
+ >
66
+ {item.icon}
67
+ </ActionIcon>
68
+ ))}
69
+ />
71
70
  {items.map((item) => (
72
71
  <Flexbox
73
72
  flex={1}
@@ -10,7 +10,7 @@ const googleChatModels: AIChatModelCard[] = [
10
10
  },
11
11
  contextWindowTokens: 1_048_576 + 65_536,
12
12
  description:
13
- 'Gemini 2.5 Pro Experimental 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,还能利用长上下文来分析大型数据集、代码库和文档。',
13
+ 'Gemini 2.5 Pro Experimental 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
14
14
  displayName: 'Gemini 2.5 Pro Experimental 03-25',
15
15
  enabled: true,
16
16
  id: 'gemini-2.5-pro-exp-03-25',
@@ -27,6 +27,30 @@ const googleChatModels: AIChatModelCard[] = [
27
27
  },
28
28
  type: 'chat',
29
29
  },
30
+ {
31
+ abilities: {
32
+ functionCall: true,
33
+ reasoning: true,
34
+ search: true,
35
+ vision: true,
36
+ },
37
+ contextWindowTokens: 1_048_576 + 65_536,
38
+ description:
39
+ 'Gemini 2.5 Pro Preview 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
40
+ displayName: 'Gemini 2.5 Pro Preview 03-25 (Paid)',
41
+ id: 'gemini-2.5-pro-preview-03-25',
42
+ maxOutput: 65_536,
43
+ pricing: {
44
+ input: 1.25, // prompts <= 200k tokens
45
+ output: 10, // prompts <= 200k tokens
46
+ },
47
+ releasedAt: '2025-03-25',
48
+ settings: {
49
+ searchImpl: 'params',
50
+ searchProvider: 'google',
51
+ },
52
+ type: 'chat',
53
+ },
30
54
  {
31
55
  abilities: {
32
56
  reasoning: true,
@@ -9,7 +9,7 @@ const novitaChatModels: AIChatModelCard[] = [
9
9
  enabled: true,
10
10
  id: 'meta-llama/llama-3.3-70b-instruct',
11
11
  pricing: {
12
- input: 0.39,
12
+ input: 0.13,
13
13
  output: 0.39,
14
14
  },
15
15
  type: 'chat',
@@ -147,17 +147,6 @@ const novitaChatModels: AIChatModelCard[] = [
147
147
  },
148
148
  type: 'chat',
149
149
  },
150
- {
151
- contextWindowTokens: 4096,
152
- description: 'OpenChat 7B 是经过“C-RLFT(条件强化学习微调)”策略精调的开源语言模型库。',
153
- displayName: 'OpenChat 7B',
154
- id: 'openchat/openchat-7b',
155
- pricing: {
156
- input: 0.06,
157
- output: 0.06,
158
- },
159
- type: 'chat',
160
- },
161
150
  {
162
151
  contextWindowTokens: 64_000,
163
152
  displayName: 'Deepseek V3 Turbo',
@@ -308,26 +297,6 @@ const novitaChatModels: AIChatModelCard[] = [
308
297
  },
309
298
  type: 'chat',
310
299
  },
311
- {
312
- contextWindowTokens: 4096,
313
- displayName: 'Nous Hermes Llama2 13B',
314
- id: 'nousresearch/nous-hermes-llama2-13b',
315
- pricing: {
316
- input: 0.17,
317
- output: 0.17,
318
- },
319
- type: 'chat',
320
- },
321
- {
322
- contextWindowTokens: 4096,
323
- displayName: 'OpenHermes 2.5 Mistral 7B',
324
- id: 'teknium/openhermes-2.5-mistral-7b',
325
- pricing: {
326
- input: 0.17,
327
- output: 0.17,
328
- },
329
- type: 'chat',
330
- },
331
300
  {
332
301
  contextWindowTokens: 4096,
333
302
  displayName: 'Midnight Rose 70B',
@@ -13,6 +13,14 @@ const ollamaChatModels: AIChatModelCard[] = [
13
13
  id: 'deepseek-r1',
14
14
  type: 'chat',
15
15
  },
16
+ {
17
+ contextWindowTokens: 65_536,
18
+ description:
19
+ 'DeepSeek-V3 是一个强大的专家混合(MoE)语言模型,总参数量为 671B,每个 Token 激活 37B 参数。该模型采用多头潜在注意力(MLA)和 DeepSeekMoE 架构,实现了高效推理和经济训练,并在前代 DeepSeek-V3 的基础上显著提升了性能。',
20
+ displayName: 'DeepSeek V3 671B',
21
+ id: 'deepseek-v3',
22
+ type: 'chat',
23
+ },
16
24
  {
17
25
  abilities: {
18
26
  functionCall: true,
@@ -78,8 +86,10 @@ const ollamaChatModels: AIChatModelCard[] = [
78
86
  reasoning: true,
79
87
  },
80
88
  contextWindowTokens: 128_000,
81
- description: 'QwQ 是一个实验研究模型,专注于提高 AI 推理能力。',
89
+ description:
90
+ 'QwQ 是 Qwen 系列的推理模型。与传统的指令调优模型相比,QwQ 具备思考和推理的能力,能够在下游任务中,尤其是困难问题上,显著提升性能。QwQ-32B 是中型推理模型,能够在与最先进的推理模型(如 DeepSeek-R1、o1-mini)竞争时取得可观的表现。',
82
91
  displayName: 'QwQ 32B',
92
+ enabled: true,
83
93
  id: 'qwq',
84
94
  releasedAt: '2024-11-28',
85
95
  type: 'chat',
@@ -105,6 +115,7 @@ const ollamaChatModels: AIChatModelCard[] = [
105
115
  contextWindowTokens: 128_000,
106
116
  description: 'Qwen2.5 是阿里巴巴的新一代大规模语言模型,以优异的性能支持多元化的应用需求。',
107
117
  displayName: 'Qwen2.5 7B',
118
+ enabled: true,
108
119
  id: 'qwen2.5',
109
120
  type: 'chat',
110
121
  },
@@ -302,7 +302,7 @@ const qwenChatModels: AIChatModelCard[] = [
302
302
  description:
303
303
  '通义千问QVQ视觉推理模型,支持视觉输入及思维链输出,在数学、编程、视觉分析、创作以及通用任务上都表现了更强的能力。',
304
304
  displayName: 'QVQ Max',
305
- id: 'qvq-max-latest',
305
+ id: 'qvq-max', // Unsupported model `qvq-max-latest` for OpenAI compatibility mode
306
306
  maxOutput: 8192,
307
307
  organization: 'Qwen',
308
308
  pricing: {
@@ -10,6 +10,7 @@ import {
10
10
 
11
11
  // mod 在 Mac 上是 command 键,alt 在 Win 上是 ctrl 键
12
12
  export const HOTKEYS_REGISTRATION: HotkeyRegistration = [
13
+ // basic
13
14
  {
14
15
  group: HotkeyGroupEnum.Essential,
15
16
  id: HotkeyEnum.Search,
@@ -47,6 +48,15 @@ export const HOTKEYS_REGISTRATION: HotkeyRegistration = [
47
48
  keys: combineKeys([KeyEnum.Ctrl, KeyEnum.Shift, KeyEnum.QuestionMark]),
48
49
  scopes: [HotkeyScopeEnum.Global],
49
50
  },
51
+ {
52
+ group: HotkeyGroupEnum.Essential,
53
+ id: HotkeyEnum.OpenSettings,
54
+ isDesktop: true,
55
+ keys: combineKeys([KeyEnum.Mod, KeyEnum.Comma]),
56
+ nonEditable: true,
57
+ scopes: [HotkeyScopeEnum.Global],
58
+ },
59
+ // Chat
50
60
  {
51
61
  group: HotkeyGroupEnum.Conversation,
52
62
  id: HotkeyEnum.OpenChatSettings,
@@ -129,7 +129,7 @@ const SchemaPanel = ({ onTableSelect, selectedTable }: SchemaPanelProps) => {
129
129
  };
130
130
 
131
131
  return (
132
- <DraggablePanel placement={'left'}>
132
+ <DraggablePanel minWidth={264} placement={'left'}>
133
133
  <Flexbox height={'100%'} style={{ overflow: 'hidden', position: 'relative' }}>
134
134
  <Flexbox
135
135
  align={'center'}
@@ -13,7 +13,7 @@ import { useNewVersion } from './useNewVersion';
13
13
  const useStyles = createStyles(({ css }) => {
14
14
  return {
15
15
  popover: css`
16
- inset-block-start: ${isDesktop ? 24 : 8}px !important;
16
+ inset-block-start: ${isDesktop ? 32 : 8}px !important;
17
17
  inset-inline-start: 8px !important;
18
18
  `,
19
19
  };
@@ -63,6 +63,7 @@ vi.mock('../DataStatistics', () => ({
63
63
 
64
64
  vi.mock('@/const/version', () => ({
65
65
  isDeprecatedEdition: false,
66
+ isDesktop: false,
66
67
  }));
67
68
 
68
69
  // 定义一个变量来存储 enableAuth 的值
@@ -7,6 +7,7 @@
7
7
  * @link https://trpc.io/docs/v11/router
8
8
  * @link https://trpc.io/docs/v11/procedures
9
9
  */
10
+
10
11
  import { trpc } from './init';
11
12
  import { jwtPayloadChecker } from './middleware/jwtPayload';
12
13
  import { userAuth } from './middleware/userAuth';
@@ -9,11 +9,10 @@ import { trpc } from '../init';
9
9
  export const userAuth = trpc.middleware(async (opts) => {
10
10
  const { ctx } = opts;
11
11
 
12
+ // 桌面端模式下,跳过默认鉴权逻辑
12
13
  if (isDesktop) {
13
14
  return opts.next({
14
- ctx: {
15
- userId: DESKTOP_USER_ID,
16
- },
15
+ ctx: { userId: DESKTOP_USER_ID },
17
16
  });
18
17
  }
19
18
  // `ctx.user` is nullable
@@ -17,6 +17,10 @@ const hotkey: HotkeyI18nTranslations = {
17
17
  desc: '查看所有快捷键的使用说明',
18
18
  title: '打开快捷键帮助',
19
19
  },
20
+ openSettings: {
21
+ desc: '打开应用设置页面',
22
+ title: '应用设置',
23
+ },
20
24
  regenerateMessage: {
21
25
  desc: '重新生成最后一条消息',
22
26
  title: '重新生成消息',
@@ -5,7 +5,15 @@ import { AiFullModelCard } from '@/types/aiModel';
5
5
  import { ProviderConfig } from '@/types/user/settings';
6
6
  import { extractEnabledModels, transformToAiChatModelList } from '@/utils/parseModels';
7
7
 
8
- export const genServerAiProvidersConfig = (specificConfig: Record<any, any>) => {
8
+ interface ProviderSpecificConfig {
9
+ enabled?: boolean;
10
+ enabledKey?: string;
11
+ fetchOnClient?: boolean;
12
+ modelListKey?: string;
13
+ withDeploymentName?: boolean;
14
+ }
15
+
16
+ export const genServerAiProvidersConfig = (specificConfig: Record<any, ProviderSpecificConfig>) => {
9
17
  const llmConfig = getLLMConfig() as Record<string, any>;
10
18
 
11
19
  return Object.values(ModelProvider).reduce(
@@ -26,8 +34,9 @@ export const genServerAiProvidersConfig = (specificConfig: Record<any, any>) =>
26
34
 
27
35
  config[provider] = {
28
36
  enabled:
29
- providerConfig.enabled ||
30
- llmConfig[providerConfig.enabledKey || `ENABLED_${providerUpperCase}`],
37
+ typeof providerConfig.enabled !== 'undefined'
38
+ ? providerConfig.enabled
39
+ : llmConfig[providerConfig.enabledKey || `ENABLED_${providerUpperCase}`],
31
40
 
32
41
  enabledModels: extractEnabledModels(
33
42
  providerModelList,
@@ -4,6 +4,7 @@ import { fileEnv } from '@/config/file';
4
4
  import { knowledgeEnv } from '@/config/knowledge';
5
5
  import { langfuseEnv } from '@/config/langfuse';
6
6
  import { enableNextAuth } from '@/const/auth';
7
+ import { isDesktop } from '@/const/version';
7
8
  import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
8
9
  import { GlobalServerConfig } from '@/types/serverConfig';
9
10
 
@@ -36,7 +37,11 @@ export const getServerGlobalConfig = async () => {
36
37
 
37
38
  /* ↑ cloud slot ↑ */
38
39
  ollama: {
39
- fetchOnClient: !process.env.OLLAMA_PROXY_URL,
40
+ enabled: isDesktop ? true : undefined,
41
+ fetchOnClient: isDesktop ? false : !process.env.OLLAMA_PROXY_URL,
42
+ },
43
+ openai: {
44
+ enabled: isDesktop ? false : undefined,
40
45
  },
41
46
  volcengine: {
42
47
  withDeploymentName: true,
@@ -92,4 +92,60 @@ describe('parseSystemAgent', () => {
92
92
 
93
93
  expect(parseSystemAgent(envValue)).toEqual(expected);
94
94
  });
95
+
96
+ it('should apply default setting to all system agents when default is specified', () => {
97
+ const envValue = 'default=ollama/deepseek-v3';
98
+
99
+ const result = parseSystemAgent(envValue);
100
+
101
+ expect(result.topic).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
102
+ expect(result.translation).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
103
+ expect(result.agentMeta).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
104
+ expect(result.historyCompress).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
105
+ expect(result.thread).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
106
+ expect(result.queryRewrite).toEqual({
107
+ provider: 'ollama',
108
+ model: 'deepseek-v3',
109
+ enabled: true,
110
+ });
111
+ });
112
+
113
+ it('should override default setting with specific settings', () => {
114
+ const envValue = 'default=ollama/deepseek-v3,topic=openai/gpt-4';
115
+
116
+ const result = parseSystemAgent(envValue);
117
+
118
+ expect(result.topic).toEqual({ provider: 'openai', model: 'gpt-4' });
119
+
120
+ expect(result.translation).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
121
+ expect(result.agentMeta).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
122
+ expect(result.historyCompress).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
123
+ expect(result.thread).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
124
+ expect(result.queryRewrite).toEqual({
125
+ provider: 'ollama',
126
+ model: 'deepseek-v3',
127
+ enabled: true,
128
+ });
129
+ });
130
+
131
+ it('should properly handle priority when topic appears before default in the string', () => {
132
+ // 即使 topic 在 default 之前出现,topic 的设置仍然应该优先
133
+ const envValue = 'topic=openai/gpt-4,default=ollama/deepseek-v3';
134
+
135
+ const result = parseSystemAgent(envValue);
136
+
137
+ // topic 应该保持自己的设置而不被 default 覆盖
138
+ expect(result.topic).toEqual({ provider: 'openai', model: 'gpt-4' });
139
+
140
+ // 其他系统智能体应该使用默认配置
141
+ expect(result.translation).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
142
+ expect(result.agentMeta).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
143
+ expect(result.historyCompress).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
144
+ expect(result.thread).toEqual({ provider: 'ollama', model: 'deepseek-v3' });
145
+ expect(result.queryRewrite).toEqual({
146
+ provider: 'ollama',
147
+ model: 'deepseek-v3',
148
+ enabled: true,
149
+ });
150
+ });
95
151
  });
@@ -13,6 +13,9 @@ export const parseSystemAgent = (envString: string = ''): Partial<UserSystemAgen
13
13
 
14
14
  const pairs = envValue.split(',');
15
15
 
16
+ // 用于存储默认设置,如果有 default=provider/model 的情况
17
+ let defaultSetting: { model: string; provider: string } | undefined;
18
+
16
19
  for (const pair of pairs) {
17
20
  const [key, value] = pair.split('=').map((s) => s.trim());
18
21
 
@@ -24,6 +27,15 @@ export const parseSystemAgent = (envString: string = ''): Partial<UserSystemAgen
24
27
  throw new Error('Missing model or provider value');
25
28
  }
26
29
 
30
+ // 如果是 default 键,保存默认设置
31
+ if (key === 'default') {
32
+ defaultSetting = {
33
+ model: model.trim(),
34
+ provider: provider.trim(),
35
+ };
36
+ continue;
37
+ }
38
+
27
39
  if (protectedKeys.includes(key)) {
28
40
  config[key as keyof UserSystemAgentConfig] = {
29
41
  enabled: key === 'queryRewrite' ? true : undefined,
@@ -36,5 +48,18 @@ export const parseSystemAgent = (envString: string = ''): Partial<UserSystemAgen
36
48
  }
37
49
  }
38
50
 
51
+ // 如果有默认设置,应用到所有未设置的系统智能体
52
+ if (defaultSetting) {
53
+ for (const key of protectedKeys) {
54
+ if (!config[key as keyof UserSystemAgentConfig]) {
55
+ config[key as keyof UserSystemAgentConfig] = {
56
+ enabled: key === 'queryRewrite' ? true : undefined,
57
+ model: defaultSetting.model,
58
+ provider: defaultSetting.provider,
59
+ } as any;
60
+ }
61
+ }
62
+ }
63
+
39
64
  return config;
40
65
  };
@@ -79,6 +79,7 @@ describe('ChangelogService', () => {
79
79
  describe('getChangelogIndex', () => {
80
80
  it('should fetch and merge changelog data', async () => {
81
81
  const mockResponse = {
82
+ ok: true,
82
83
  json: vi.fn().mockResolvedValue({
83
84
  cloud: [{ id: 'cloud1', date: '2023-01-01', versionRange: ['1.0.0'] }],
84
85
  community: [{ id: 'community1', date: '2023-01-02', versionRange: ['1.1.0'] }],
@@ -104,6 +105,7 @@ describe('ChangelogService', () => {
104
105
  it('should return only community items when config type is community', async () => {
105
106
  service.config.type = 'community';
106
107
  const mockResponse = {
108
+ ok: true,
107
109
  json: vi.fn().mockResolvedValue({
108
110
  cloud: [{ id: 'cloud1', date: '2023-01-01', versionRange: ['1.0.0'] }],
109
111
  community: [{ id: 'community1', date: '2023-01-02', versionRange: ['1.1.0'] }],
@@ -55,9 +55,13 @@ export class ChangelogService {
55
55
  next: { revalidate: 3600, tags: [FetchCacheTag.Changelog] },
56
56
  });
57
57
 
58
- const data = await res.json();
58
+ if (res.ok) {
59
+ const data = await res.json();
60
+
61
+ return this.mergeChangelogs(data.cloud, data.community).slice(0, 5);
62
+ }
59
63
 
60
- return this.mergeChangelogs(data.cloud, data.community).slice(0, 5);
64
+ return [];
61
65
  } catch (e) {
62
66
  const cause = (e as Error).cause as { code: string };
63
67
  if (cause?.code.includes('ETIMEDOUT')) {