@lobehub/lobehub 2.0.0-next.147 → 2.0.0-next.148

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,32 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.148](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.147...v2.0.0-next.148)
6
+
7
+ <sup>Released on **2025-12-03**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Remove apiMode param from Azure and Cloudflare provider requests, when desktop use contextMenu not work.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Remove apiMode param from Azure and Cloudflare provider requests, closes [#10571](https://github.com/lobehub/lobe-chat/issues/10571) ([7e44faa](https://github.com/lobehub/lobe-chat/commit/7e44faa))
21
+ - **misc**: When desktop use contextMenu not work, closes [#10545](https://github.com/lobehub/lobe-chat/issues/10545) ([43c4db7](https://github.com/lobehub/lobe-chat/commit/43c4db7))
22
+
23
+ </details>
24
+
25
+ <div align="right">
26
+
27
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
28
+
29
+ </div>
30
+
5
31
  ## [Version 2.0.0-next.147](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.146...v2.0.0-next.147)
6
32
 
7
33
  <sup>Released on **2025-12-02**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Remove apiMode param from Azure and Cloudflare provider requests, when desktop use contextMenu not work."
6
+ ]
7
+ },
8
+ "date": "2025-12-03",
9
+ "version": "2.0.0-next.148"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "features": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.147",
3
+ "version": "2.0.0-next.148",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -863,17 +863,18 @@ const aihubmixModels: AIChatModelCard[] = [
863
863
  },
864
864
  contextWindowTokens: 131_072,
865
865
  description:
866
- 'DeepSeek V3.2 DeepSeek 最新发布的通用大模型,支持混合推理架构,具备更强的 Agent 能力。',
867
- displayName: 'DeepSeek V3.2 Exp',
868
- id: 'DeepSeek-V3.2-Exp',
866
+ 'DeepSeek-V3.2 是一款高效的大语言模型,具备 DSA 稀疏注意力与强化推理能力,其核心亮点在于强大的 Agent 能力——通过大规模任务合成,将推理与真实工具调用深度融合,实现更稳健、合规、可泛化的智能体表现。',
867
+ displayName: 'DeepSeek V3.2',
868
+ id: 'deepseek-chat',
869
869
  maxOutput: 8192,
870
870
  pricing: {
871
871
  units: [
872
- { name: 'textInput', rate: 0.28, strategy: 'fixed', unit: 'millionTokens' },
873
- { name: 'textOutput', rate: 0.42, strategy: 'fixed', unit: 'millionTokens' },
872
+ { name: 'textInput', rate: 0.3, strategy: 'fixed', unit: 'millionTokens' },
873
+ { name: 'textOutput', rate: 0.45, strategy: 'fixed', unit: 'millionTokens' },
874
+ { name: 'textInput_cacheRead', rate: 0.03, strategy: 'fixed', unit: 'millionTokens' },
874
875
  ],
875
876
  },
876
- releasedAt: '2025-09-29',
877
+ releasedAt: '2025-12-01',
877
878
  type: 'chat',
878
879
  },
879
880
  {
@@ -884,34 +885,18 @@ const aihubmixModels: AIChatModelCard[] = [
884
885
  contextWindowTokens: 131_072,
885
886
  description:
886
887
  'DeepSeek V3.2 思考模式。在输出最终回答之前,模型会先输出一段思维链内容,以提升最终答案的准确性。',
887
- displayName: 'DeepSeek V3.2 Exp Thinking',
888
+ displayName: 'DeepSeek V3.2 Thinking',
888
889
  enabled: true,
889
- id: 'DeepSeek-V3.2-Exp-Think',
890
+ id: 'deepseek-reasoner',
890
891
  maxOutput: 65_536,
891
892
  pricing: {
892
893
  units: [
893
- { name: 'textInput', rate: 0.28, strategy: 'fixed', unit: 'millionTokens' },
894
- { name: 'textOutput', rate: 0.42, strategy: 'fixed', unit: 'millionTokens' },
895
- ],
896
- },
897
- releasedAt: '2025-09-29',
898
- type: 'chat',
899
- },
900
- {
901
- abilities: {
902
- functionCall: true,
903
- },
904
- contextWindowTokens: 131_072,
905
- description:
906
- 'DeepSeek-V3.1-非思考模式;DeepSeek-V3.1 是深度求索全新推出的混合推理模型,支持思考与非思考2种推理模式,较 DeepSeek-R1-0528 思考效率更高。经 Post-Training 优化,Agent 工具使用与智能体任务表现大幅提升。',
907
- displayName: 'DeepSeek V3.1 (non-Think)',
908
- id: 'DeepSeek-V3.1',
909
- pricing: {
910
- units: [
911
- { name: 'textInput', rate: 0.56, strategy: 'fixed', unit: 'millionTokens' },
912
- { name: 'textOutput', rate: 1.68, strategy: 'fixed', unit: 'millionTokens' },
894
+ { name: 'textInput', rate: 0.3, strategy: 'fixed', unit: 'millionTokens' },
895
+ { name: 'textOutput', rate: 0.45, strategy: 'fixed', unit: 'millionTokens' },
896
+ { name: 'textInput_cacheRead', rate: 0.03, strategy: 'fixed', unit: 'millionTokens' },
913
897
  ],
914
898
  },
899
+ releasedAt: '2025-12-01',
915
900
  type: 'chat',
916
901
  },
917
902
  {
@@ -963,8 +948,8 @@ const aihubmixModels: AIChatModelCard[] = [
963
948
  id: 'DeepSeek-R1',
964
949
  pricing: {
965
950
  units: [
966
- { name: 'textInput', rate: 0.546, strategy: 'fixed', unit: 'millionTokens' },
967
- { name: 'textOutput', rate: 2.184, strategy: 'fixed', unit: 'millionTokens' },
951
+ { name: 'textInput', rate: 0.4, strategy: 'fixed', unit: 'millionTokens' },
952
+ { name: 'textOutput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
968
953
  ],
969
954
  },
970
955
  type: 'chat',
@@ -22,7 +22,7 @@ const deepseekChatModels: AIChatModelCard[] = [
22
22
  { name: 'textOutput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
23
23
  ],
24
24
  },
25
- releasedAt: '2025-09-29',
25
+ releasedAt: '2025-12-01',
26
26
  type: 'chat',
27
27
  },
28
28
  {
@@ -45,7 +45,7 @@ const deepseekChatModels: AIChatModelCard[] = [
45
45
  { name: 'textOutput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
46
46
  ],
47
47
  },
48
- releasedAt: '2025-09-29',
48
+ releasedAt: '2025-12-01',
49
49
  type: 'chat',
50
50
  },
51
51
  ];
@@ -1012,7 +1012,7 @@ const qwenChatModels: AIChatModelCard[] = [
1012
1012
  search: true,
1013
1013
  },
1014
1014
  config: {
1015
- deploymentName: 'qwen-plus-2025-09-11',
1015
+ deploymentName: 'qwen-plus-2025-12-01',
1016
1016
  },
1017
1017
  contextWindowTokens: 1_000_000,
1018
1018
  description: '通义千问超大规模语言模型增强版,支持中文、英文等不同语言输入。',
@@ -1,6 +1,7 @@
1
1
  import { LobeAnthropicAI } from '../../providers/anthropic';
2
2
  import { LobeAzureAI } from '../../providers/azureai';
3
3
  import { LobeCloudflareAI } from '../../providers/cloudflare';
4
+ import { LobeDeepSeekAI } from '../../providers/deepseek';
4
5
  import { LobeFalAI } from '../../providers/fal';
5
6
  import { LobeGoogleAI } from '../../providers/google';
6
7
  import { LobeOpenAI } from '../../providers/openai';
@@ -11,6 +12,7 @@ export const baseRuntimeMap = {
11
12
  anthropic: LobeAnthropicAI,
12
13
  azure: LobeAzureAI,
13
14
  cloudflare: LobeCloudflareAI,
15
+ deepseek: LobeDeepSeekAI,
14
16
  fal: LobeFalAI,
15
17
  google: LobeGoogleAI,
16
18
  openai: LobeOpenAI,
@@ -59,6 +59,11 @@ export const params: CreateRouterRuntimeOptions = {
59
59
  ),
60
60
  options: { baseURL: urlJoin(baseURL, '/v1') },
61
61
  },
62
+ {
63
+ apiType: 'deepseek',
64
+ models: ['deepseek-chat', 'deepseek-reasoner'],
65
+ options: { baseURL: urlJoin(baseURL, '/v1') },
66
+ },
62
67
  {
63
68
  apiType: 'openai',
64
69
  options: {
@@ -43,7 +43,9 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
43
43
  baseURL: string;
44
44
 
45
45
  async chat(payload: ChatStreamPayload, options?: ChatMethodOptions) {
46
- const { messages, model, ...params } = payload;
46
+ // Remove internal apiMode parameter to prevent sending to Azure OpenAI API
47
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
48
+ const { messages, model, apiMode: _, ...params } = payload;
47
49
  // o1 series models on Azure OpenAI does not support streaming currently
48
50
  const enableStreaming = model.includes('o1') ? false : (params.stream ?? true);
49
51
 
@@ -36,7 +36,9 @@ export class LobeAzureAI implements LobeRuntimeAI {
36
36
  baseURL: string;
37
37
 
38
38
  async chat(payload: ChatStreamPayload, options?: ChatMethodOptions) {
39
- const { messages, model, temperature, top_p, ...params } = payload;
39
+ // Remove internal apiMode parameter to prevent sending to Azure AI API
40
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
41
+ const { messages, model, temperature, top_p, apiMode: _, ...params } = payload;
40
42
  // o1 series models on Azure OpenAI does not support streaming currently
41
43
  const enableStreaming = model.includes('o1') ? false : (params.stream ?? true);
42
44
 
@@ -57,7 +57,9 @@ export class LobeCloudflareAI implements LobeRuntimeAI {
57
57
 
58
58
  async chat(payload: ChatStreamPayload, options?: ChatMethodOptions): Promise<Response> {
59
59
  try {
60
- const { model, tools, ...restPayload } = payload;
60
+ // Remove internal apiMode parameter to prevent sending to Cloudflare API
61
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
62
+ const { model, tools, apiMode: _, ...restPayload } = payload;
61
63
  const functions = tools?.map((tool) => tool.function);
62
64
  const headers = options?.headers || {};
63
65
  if (this.apiKey) {
@@ -7,7 +7,7 @@ import { Flexbox } from 'react-layout-kit';
7
7
  import { useChatStore } from '@/store/chat';
8
8
  import { aiChatSelectors, messageStateSelectors } from '@/store/chat/selectors';
9
9
 
10
- import { DefaultMessage } from '../Default';
10
+ import { DefaultMessage, MessageContentClassName } from '../Default';
11
11
  import ImageFileListViewer from '../User/ImageFileListViewer';
12
12
  import { CollapsedMessage } from './CollapsedMessage';
13
13
  import MessageContent from './DisplayContent';
@@ -71,7 +71,7 @@ export const AssistantMessageBody = memo<
71
71
  if (isCollapsed) return <CollapsedMessage content={content} id={id} />;
72
72
 
73
73
  return (
74
- <Flexbox gap={8} id={id}>
74
+ <Flexbox className={MessageContentClassName} gap={8} id={id}>
75
75
  {showSearch && (
76
76
  <SearchGrounding citations={search?.citations} searchQueries={search?.searchQueries} />
77
77
  )}
@@ -80,6 +80,7 @@ const Item = memo<ChatListItemProps>(
80
80
  contextMenuState,
81
81
  handleContextMenu,
82
82
  hideContextMenu,
83
+ contextMenuMode,
83
84
  } = useChatItemContextMenu({
84
85
  editing,
85
86
  onActionClick: () => {},
@@ -140,6 +141,10 @@ const Item = memo<ChatListItemProps>(
140
141
  if (!item) return;
141
142
 
142
143
  if (isDesktop) {
144
+ if (contextMenuMode !== 'disabled') {
145
+ handleContextMenu(event);
146
+ return;
147
+ }
143
148
  const { electronSystemService } = await import('@/services/electron/system');
144
149
 
145
150
  electronSystemService.showContextMenu('chat', {
@@ -127,6 +127,7 @@ export const useChatItemContextMenu = ({
127
127
 
128
128
  return {
129
129
  containerRef,
130
+ contextMenuMode,
130
131
  contextMenuState,
131
132
  handleContextMenu,
132
133
  handleMenuClick,