@lobehub/chat 1.88.16 → 1.88.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,57 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.88.18](https://github.com/lobehub/lobe-chat/compare/v1.88.17...v1.88.18)
6
+
7
+ <sup>Released on **2025-05-29**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Close historySummary correctly, Enable thinking output only for supported Gemini thinking models.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Close historySummary correctly, closes [#7010](https://github.com/lobehub/lobe-chat/issues/7010) ([90a6f68](https://github.com/lobehub/lobe-chat/commit/90a6f68))
21
+ - **misc**: Enable thinking output only for supported Gemini thinking models, closes [#7987](https://github.com/lobehub/lobe-chat/issues/7987) ([f503c53](https://github.com/lobehub/lobe-chat/commit/f503c53))
22
+
23
+ </details>
24
+
25
+ <div align="right">
26
+
27
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
28
+
29
+ </div>
30
+
31
+ ### [Version 1.88.17](https://github.com/lobehub/lobe-chat/compare/v1.88.16...v1.88.17)
32
+
33
+ <sup>Released on **2025-05-29**</sup>
34
+
35
+ #### 💄 Styles
36
+
37
+ - **misc**: Increase the history limit.
38
+
39
+ <br/>
40
+
41
+ <details>
42
+ <summary><kbd>Improvements and Fixes</kbd></summary>
43
+
44
+ #### Styles
45
+
46
+ - **misc**: Increase the history limit, closes [#8007](https://github.com/lobehub/lobe-chat/issues/8007) ([5ec7c8d](https://github.com/lobehub/lobe-chat/commit/5ec7c8d))
47
+
48
+ </details>
49
+
50
+ <div align="right">
51
+
52
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
53
+
54
+ </div>
55
+
5
56
  ### [Version 1.88.16](https://github.com/lobehub/lobe-chat/compare/v1.88.15...v1.88.16)
6
57
 
7
58
  <sup>Released on **2025-05-29**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Close historySummary correctly, Enable thinking output only for supported Gemini thinking models."
6
+ ]
7
+ },
8
+ "date": "2025-05-29",
9
+ "version": "1.88.18"
10
+ },
11
+ {
12
+ "children": {
13
+ "improvements": [
14
+ "Increase the history limit."
15
+ ]
16
+ },
17
+ "date": "2025-05-29",
18
+ "version": "1.88.17"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.88.16",
3
+ "version": "1.88.18",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -24,7 +24,7 @@ export const DEFAULT_AGENT_CHAT_CONFIG: LobeAgentChatConfig = {
24
24
  enableCompressHistory: true,
25
25
  enableHistoryCount: true,
26
26
  enableReasoning: false,
27
- historyCount: 8,
27
+ historyCount: 20,
28
28
  reasoningBudgetToken: 1024,
29
29
  searchFCModel: DEFAULT_AGENT_SEARCH_FC_MODEL,
30
30
  searchMode: 'off',
@@ -7,6 +7,8 @@ import { memo } from 'react';
7
7
  import { useTranslation } from 'react-i18next';
8
8
  import { Center, Flexbox } from 'react-layout-kit';
9
9
 
10
+ import { agentChatConfigSelectors } from '@/store/agent/selectors';
11
+ import { useAgentStore } from '@/store/agent/store';
10
12
  import { useChatStore } from '@/store/chat';
11
13
  import { topicSelectors } from '@/store/chat/selectors';
12
14
 
@@ -35,10 +37,14 @@ const History = memo(() => {
35
37
  return [history?.content, history?.model];
36
38
  });
37
39
 
40
+ const enableCompressHistory = useAgentStore(
41
+ (s) => agentChatConfigSelectors.currentChatConfig(s).enableCompressHistory,
42
+ );
43
+
38
44
  return (
39
45
  <Flexbox paddingInline={16} style={{ paddingBottom: 8 }}>
40
46
  <HistoryDivider enable />
41
- {!!content && (
47
+ {enableCompressHistory && !!content && (
42
48
  <Flexbox className={styles.container} gap={8}>
43
49
  <Flexbox align={'flex-start'} gap={8} horizontal>
44
50
  <Center height={20} width={20}>
@@ -117,8 +117,17 @@ export class LobeGoogleAI implements LobeRuntimeAI {
117
117
  const { model, thinking } = payload;
118
118
 
119
119
  const thinkingConfig: GoogleAIThinkingConfig = {
120
- includeThoughts: true,
121
- thinkingBudget: thinking?.type === 'enabled' ? Math.min(thinking.budget_tokens, 24_576) : 0,
120
+ includeThoughts:
121
+ (thinking?.type === 'enabled') ||
122
+ (!thinking && model && (model.includes('-2.5-') || model.includes('thinking')))
123
+ ? true
124
+ : undefined,
125
+ thinkingBudget:
126
+ thinking?.type === 'enabled'
127
+ ? Math.min(thinking.budget_tokens, 24_576)
128
+ : thinking?.type === 'disabled'
129
+ ? 0
130
+ : undefined,
122
131
  };
123
132
 
124
133
  const contents = await this.buildGoogleMessages(payload.messages);
@@ -132,8 +141,8 @@ export class LobeGoogleAI implements LobeRuntimeAI {
132
141
  // @ts-expect-error - Google SDK 0.24.0 doesn't have this property for now with
133
142
  response_modalities: modelsWithModalities.has(model) ? ['Text', 'Image'] : undefined,
134
143
  temperature: payload.temperature,
135
- thinkingConfig,
136
144
  topP: payload.top_p,
145
+ ...(modelsDisableInstuction.has(model) || model.toLowerCase().includes('learnlm') ? {} : { thinkingConfig }),
137
146
  },
138
147
  model,
139
148
  // avoid wide sensitive words
@@ -225,12 +225,20 @@ class ChatService {
225
225
  )(getAiInfraStoreState());
226
226
  // if model has extended params, then we need to check if the model can use reasoning
227
227
 
228
- if (modelExtendParams!.includes('enableReasoning') && chatConfig.enableReasoning) {
229
- extendParams.thinking = {
230
- budget_tokens: chatConfig.reasoningBudgetToken || 1024,
231
- type: 'enabled',
232
- };
228
+ if (modelExtendParams!.includes('enableReasoning')) {
229
+ if (chatConfig.enableReasoning) {
230
+ extendParams.thinking = {
231
+ budget_tokens: chatConfig.reasoningBudgetToken || 1024,
232
+ type: 'enabled',
233
+ };
234
+ } else {
235
+ extendParams.thinking = {
236
+ budget_tokens: 0,
237
+ type: 'disabled',
238
+ };
239
+ }
233
240
  }
241
+
234
242
  if (
235
243
  modelExtendParams!.includes('disableContextCaching') &&
236
244
  chatConfig.disableContextCaching
@@ -9,7 +9,7 @@ exports[`agentSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CONFIG
9
9
  "enableCompressHistory": true,
10
10
  "enableHistoryCount": true,
11
11
  "enableReasoning": false,
12
- "historyCount": 8,
12
+ "historyCount": 20,
13
13
  "reasoningBudgetToken": 1024,
14
14
  "searchFCModel": {
15
15
  "model": "gpt-4.1-mini",
@@ -88,7 +88,7 @@ describe('agentChatConfigSelectors', () => {
88
88
  describe('historyCount', () => {
89
89
  it('should return undefined when historyCount is not defined', () => {
90
90
  const state = createMockState();
91
- expect(agentChatConfigSelectors.historyCount(state)).toBe(8);
91
+ expect(agentChatConfigSelectors.historyCount(state)).toBe(20);
92
92
  });
93
93
 
94
94
  it('should return the historyCount value when defined', () => {
@@ -552,7 +552,9 @@ export const generateAIChat: StateCreator<
552
552
  // to upload image
553
553
  const uploadTasks: Map<string, Promise<{ id?: string; url?: string }>> = new Map();
554
554
 
555
- const historySummary = topicSelectors.currentActiveTopicSummary(get());
555
+ const historySummary = chatConfig.enableCompressHistory
556
+ ? topicSelectors.currentActiveTopicSummary(get())
557
+ : undefined;
556
558
  await chatService.createAssistantMessageStream({
557
559
  abortController,
558
560
  params: {
@@ -108,7 +108,7 @@ exports[`settingsSelectors > defaultAgent > should merge DEFAULT_AGENT and s.set
108
108
  "enableCompressHistory": true,
109
109
  "enableHistoryCount": true,
110
110
  "enableReasoning": false,
111
- "historyCount": 8,
111
+ "historyCount": 20,
112
112
  "reasoningBudgetToken": 1024,
113
113
  "searchFCModel": {
114
114
  "model": "gpt-4.1-mini",
@@ -152,7 +152,7 @@ exports[`settingsSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CON
152
152
  "enableCompressHistory": true,
153
153
  "enableHistoryCount": true,
154
154
  "enableReasoning": false,
155
- "historyCount": 8,
155
+ "historyCount": 20,
156
156
  "reasoningBudgetToken": 1024,
157
157
  "searchFCModel": {
158
158
  "model": "gpt-4.1-mini",