@lobehub/chat 1.88.17 → 1.88.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,57 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.88.19](https://github.com/lobehub/lobe-chat/compare/v1.88.18...v1.88.19)
6
+
7
+ <sup>Released on **2025-05-30**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Support Web Search Tools and Beta Header from Anthropic.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Support Web Search Tools and Beta Header from Anthropic, closes [#7964](https://github.com/lobehub/lobe-chat/issues/7964) ([a47ddc5](https://github.com/lobehub/lobe-chat/commit/a47ddc5))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.88.18](https://github.com/lobehub/lobe-chat/compare/v1.88.17...v1.88.18)
31
+
32
+ <sup>Released on **2025-05-29**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Close historySummary correctly, Enable thinking output only for supported Gemini thinking models.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: Close historySummary correctly, closes [#7010](https://github.com/lobehub/lobe-chat/issues/7010) ([90a6f68](https://github.com/lobehub/lobe-chat/commit/90a6f68))
46
+ - **misc**: Enable thinking output only for supported Gemini thinking models, closes [#7987](https://github.com/lobehub/lobe-chat/issues/7987) ([f503c53](https://github.com/lobehub/lobe-chat/commit/f503c53))
47
+
48
+ </details>
49
+
50
+ <div align="right">
51
+
52
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
53
+
54
+ </div>
55
+
5
56
  ### [Version 1.88.17](https://github.com/lobehub/lobe-chat/compare/v1.88.16...v1.88.17)
6
57
 
7
58
  <sup>Released on **2025-05-29**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Support Web Search Tools and Beta Header from Anthropic."
6
+ ]
7
+ },
8
+ "date": "2025-05-30",
9
+ "version": "1.88.19"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Close historySummary correctly, Enable thinking output only for supported Gemini thinking models."
15
+ ]
16
+ },
17
+ "date": "2025-05-29",
18
+ "version": "1.88.18"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.88.17",
3
+ "version": "1.88.19",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -5,6 +5,7 @@ const anthropicChatModels: AIChatModelCard[] = [
5
5
  abilities: {
6
6
  functionCall: true,
7
7
  reasoning: true,
8
+ search: true,
8
9
  vision: true,
9
10
  },
10
11
  contextWindowTokens: 200_000,
@@ -23,6 +24,7 @@ const anthropicChatModels: AIChatModelCard[] = [
23
24
  releasedAt: '2025-05-23',
24
25
  settings: {
25
26
  extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
27
+ searchImpl: 'params',
26
28
  },
27
29
  type: 'chat',
28
30
  },
@@ -30,6 +32,7 @@ const anthropicChatModels: AIChatModelCard[] = [
30
32
  abilities: {
31
33
  functionCall: true,
32
34
  reasoning: true,
35
+ search: true,
33
36
  vision: true,
34
37
  },
35
38
  contextWindowTokens: 200_000,
@@ -48,6 +51,7 @@ const anthropicChatModels: AIChatModelCard[] = [
48
51
  releasedAt: '2025-05-23',
49
52
  settings: {
50
53
  extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
54
+ searchImpl: 'params',
51
55
  },
52
56
  type: 'chat',
53
57
  },
@@ -55,6 +59,7 @@ const anthropicChatModels: AIChatModelCard[] = [
55
59
  abilities: {
56
60
  functionCall: true,
57
61
  reasoning: true,
62
+ search: true,
58
63
  vision: true,
59
64
  },
60
65
  contextWindowTokens: 200_000,
@@ -73,12 +78,14 @@ const anthropicChatModels: AIChatModelCard[] = [
73
78
  releasedAt: '2025-02-24',
74
79
  settings: {
75
80
  extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
81
+ searchImpl: 'params',
76
82
  },
77
83
  type: 'chat',
78
84
  },
79
85
  {
80
86
  abilities: {
81
87
  functionCall: true,
88
+ search: true,
82
89
  vision: true,
83
90
  },
84
91
  contextWindowTokens: 200_000,
@@ -96,6 +103,7 @@ const anthropicChatModels: AIChatModelCard[] = [
96
103
  releasedAt: '2024-10-22',
97
104
  settings: {
98
105
  extendParams: ['disableContextCaching'],
106
+ searchImpl: 'params',
99
107
  },
100
108
  type: 'chat',
101
109
  },
@@ -119,6 +127,7 @@ const anthropicChatModels: AIChatModelCard[] = [
119
127
  releasedAt: '2024-06-20',
120
128
  settings: {
121
129
  extendParams: ['disableContextCaching'],
130
+ searchImpl: 'params',
122
131
  },
123
132
  type: 'chat',
124
133
  },
@@ -7,6 +7,8 @@ import { memo } from 'react';
7
7
  import { useTranslation } from 'react-i18next';
8
8
  import { Center, Flexbox } from 'react-layout-kit';
9
9
 
10
+ import { agentChatConfigSelectors } from '@/store/agent/selectors';
11
+ import { useAgentStore } from '@/store/agent/store';
10
12
  import { useChatStore } from '@/store/chat';
11
13
  import { topicSelectors } from '@/store/chat/selectors';
12
14
 
@@ -35,10 +37,14 @@ const History = memo(() => {
35
37
  return [history?.content, history?.model];
36
38
  });
37
39
 
40
+ const enableCompressHistory = useAgentStore(
41
+ (s) => agentChatConfigSelectors.currentChatConfig(s).enableCompressHistory,
42
+ );
43
+
38
44
  return (
39
45
  <Flexbox paddingInline={16} style={{ paddingBottom: 8 }}>
40
46
  <HistoryDivider enable />
41
- {!!content && (
47
+ {enableCompressHistory && !!content && (
42
48
  <Flexbox className={styles.container} gap={8}>
43
49
  <Flexbox align={'flex-start'} gap={8} horizontal>
44
50
  <Center height={20} width={20}>
@@ -302,6 +302,64 @@ describe('LobeAnthropicAI', () => {
302
302
  { enabledContextCaching: true },
303
303
  );
304
304
  });
305
+
306
+ it('should build payload with tools and web search enabled', async () => {
307
+ const tools: ChatCompletionTool[] = [
308
+ { function: { name: 'tool1', description: 'desc1' }, type: 'function' }
309
+ ];
310
+
311
+ const mockAnthropicTools = [{ name: 'tool1', description: 'desc1' }];
312
+
313
+ vi.spyOn(anthropicHelpers, 'buildAnthropicTools').mockReturnValue(mockAnthropicTools as any);
314
+
315
+ const payload: ChatStreamPayload = {
316
+ messages: [{ content: 'Search and get info', role: 'user' }],
317
+ model: 'claude-3-haiku-20240307',
318
+ temperature: 0.5,
319
+ tools,
320
+ enabledSearch: true,
321
+ };
322
+
323
+ const result = await instance['buildAnthropicPayload'](payload);
324
+
325
+ expect(anthropicHelpers.buildAnthropicTools).toHaveBeenCalledWith(tools, {
326
+ enabledContextCaching: true,
327
+ });
328
+
329
+ // Should include both the converted tools and web search tool
330
+ expect(result.tools).toEqual([
331
+ ...mockAnthropicTools,
332
+ {
333
+ name: 'web_search',
334
+ type: 'web_search_20250305',
335
+ },
336
+ ]);
337
+ });
338
+
339
+ it('should build payload with web search enabled but no other tools', async () => {
340
+ vi.spyOn(anthropicHelpers, 'buildAnthropicTools').mockReturnValue(undefined);
341
+
342
+ const payload: ChatStreamPayload = {
343
+ messages: [{ content: 'Search for information', role: 'user' }],
344
+ model: 'claude-3-haiku-20240307',
345
+ temperature: 0.5,
346
+ enabledSearch: true,
347
+ };
348
+
349
+ const result = await instance['buildAnthropicPayload'](payload);
350
+
351
+ expect(anthropicHelpers.buildAnthropicTools).toHaveBeenCalledWith(undefined, {
352
+ enabledContextCaching: true,
353
+ });
354
+
355
+ // Should only include web search tool
356
+ expect(result.tools).toEqual([
357
+ {
358
+ name: 'web_search',
359
+ type: 'web_search_20250305',
360
+ },
361
+ ]);
362
+ });
305
363
  });
306
364
 
307
365
  describe('Error', () => {
@@ -23,6 +23,8 @@ export interface AnthropicModelCard {
23
23
  id: string;
24
24
  }
25
25
 
26
+ type anthropicTools = Anthropic.Tool | Anthropic.WebSearchTool20250305;
27
+
26
28
  const modelsWithSmallContextWindow = new Set(['claude-3-opus-20240229', 'claude-3-haiku-20240307']);
27
29
 
28
30
  const DEFAULT_BASE_URL = 'https://api.anthropic.com';
@@ -45,7 +47,14 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
45
47
  constructor({ apiKey, baseURL = DEFAULT_BASE_URL, id, ...res }: AnthropicAIParams = {}) {
46
48
  if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
47
49
 
48
- this.client = new Anthropic({ apiKey, baseURL, ...res });
50
+ const betaHeaders = process.env.ANTHROPIC_BETA_HEADERS;
51
+
52
+ this.client = new Anthropic({
53
+ apiKey,
54
+ baseURL,
55
+ ...(betaHeaders ? { defaultHeaders: { "anthropic-beta": betaHeaders } } : {}),
56
+ ...res
57
+ });
49
58
  this.baseURL = this.client.baseURL;
50
59
  this.apiKey = apiKey;
51
60
  this.id = id || ModelProvider.Anthropic;
@@ -99,6 +108,7 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
99
108
  tools,
100
109
  thinking,
101
110
  enabledContextCaching = true,
111
+ enabledSearch,
102
112
  } = payload;
103
113
 
104
114
  const { default: anthropicModels } = await import('@/config/aiModels/anthropic');
@@ -127,7 +137,27 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
127
137
 
128
138
  const postMessages = await buildAnthropicMessages(user_messages, { enabledContextCaching });
129
139
 
130
- const postTools = buildAnthropicTools(tools, { enabledContextCaching });
140
+ let postTools: anthropicTools[] | undefined = buildAnthropicTools(tools, { enabledContextCaching });
141
+
142
+ if (enabledSearch) {
143
+ // Limit the number of searches per request
144
+ const maxUses = process.env.ANTHROPIC_MAX_USES;
145
+
146
+ const webSearchTool: Anthropic.WebSearchTool20250305 = {
147
+ name: 'web_search',
148
+ type: 'web_search_20250305',
149
+ ...(maxUses && Number.isInteger(Number(maxUses)) && Number(maxUses) > 0 && {
150
+ max_uses: Number(maxUses)
151
+ }),
152
+ };
153
+
154
+ // 如果已有工具,则添加到现有工具列表中;否则创建新的工具列表
155
+ if (postTools && postTools.length > 0) {
156
+ postTools = [...postTools, webSearchTool];
157
+ } else {
158
+ postTools = [webSearchTool];
159
+ }
160
+ }
131
161
 
132
162
  if (!!thinking && thinking.type === 'enabled') {
133
163
  const maxTokens = getMaxTokens() || 32_000; // Claude Opus 4 has minimum maxOutput
@@ -117,8 +117,17 @@ export class LobeGoogleAI implements LobeRuntimeAI {
117
117
  const { model, thinking } = payload;
118
118
 
119
119
  const thinkingConfig: GoogleAIThinkingConfig = {
120
- includeThoughts: true,
121
- thinkingBudget: thinking?.type === 'enabled' ? Math.min(thinking.budget_tokens, 24_576) : 0,
120
+ includeThoughts:
121
+ (thinking?.type === 'enabled') ||
122
+ (!thinking && model && (model.includes('-2.5-') || model.includes('thinking')))
123
+ ? true
124
+ : undefined,
125
+ thinkingBudget:
126
+ thinking?.type === 'enabled'
127
+ ? Math.min(thinking.budget_tokens, 24_576)
128
+ : thinking?.type === 'disabled'
129
+ ? 0
130
+ : undefined,
122
131
  };
123
132
 
124
133
  const contents = await this.buildGoogleMessages(payload.messages);
@@ -132,8 +141,8 @@ export class LobeGoogleAI implements LobeRuntimeAI {
132
141
  // @ts-expect-error - Google SDK 0.24.0 doesn't have this property for now with
133
142
  response_modalities: modelsWithModalities.has(model) ? ['Text', 'Image'] : undefined,
134
143
  temperature: payload.temperature,
135
- thinkingConfig,
136
144
  topP: payload.top_p,
145
+ ...(modelsDisableInstuction.has(model) || model.toLowerCase().includes('learnlm') ? {} : { thinkingConfig }),
137
146
  },
138
147
  model,
139
148
  // avoid wide sensitive words
@@ -1,7 +1,7 @@
1
1
  import Anthropic from '@anthropic-ai/sdk';
2
2
  import type { Stream } from '@anthropic-ai/sdk/streaming';
3
3
 
4
- import { ModelTokensUsage } from '@/types/message';
4
+ import { ModelTokensUsage, CitationItem } from '@/types/message';
5
5
 
6
6
  import { ChatStreamCallbacks } from '../../types';
7
7
  import {
@@ -23,6 +23,7 @@ export const transformAnthropicStream = (
23
23
  switch (chunk.type) {
24
24
  case 'message_start': {
25
25
  context.id = chunk.message.id;
26
+ context.returnedCitationArray = [];
26
27
  let totalInputTokens = chunk.message.usage?.input_tokens;
27
28
 
28
29
  if (
@@ -59,6 +60,7 @@ export const transformAnthropicStream = (
59
60
  return { data: chunk.content_block.text, id: context.id, type: 'data' };
60
61
  }
61
62
 
63
+ case 'server_tool_use':
62
64
  case 'tool_use': {
63
65
  const toolChunk = chunk.content_block;
64
66
 
@@ -85,6 +87,29 @@ export const transformAnthropicStream = (
85
87
 
86
88
  return { data: [toolCall], id: context.id, type: 'tool_calls' };
87
89
  }
90
+
91
+ /*
92
+ case 'web_search_tool_result': {
93
+ const citations = chunk.content_block.content;
94
+
95
+ return [
96
+ {
97
+ data: {
98
+ citations: (citations as any[]).map(
99
+ (item) =>
100
+ ({
101
+ title: item.title,
102
+ url: item.url,
103
+ }) as CitationItem,
104
+ ),
105
+ },
106
+ id: context.id,
107
+ type: 'grounding',
108
+ },
109
+ ];
110
+ }
111
+ */
112
+
88
113
  case 'thinking': {
89
114
  const thinkingChunk = chunk.content_block;
90
115
 
@@ -148,6 +173,19 @@ export const transformAnthropicStream = (
148
173
  };
149
174
  }
150
175
 
176
+ case 'citations_delta': {
177
+ const citations = (chunk as any).delta.citation;
178
+
179
+ if (context.returnedCitationArray) {
180
+ context.returnedCitationArray.push({
181
+ title: citations.title,
182
+ url: citations.url,
183
+ } as CitationItem)
184
+ }
185
+
186
+ return { data: null, id: context.id, type: 'text' };
187
+ }
188
+
151
189
  default: {
152
190
  break;
153
191
  }
@@ -180,7 +218,17 @@ export const transformAnthropicStream = (
180
218
  }
181
219
 
182
220
  case 'message_stop': {
183
- return { data: 'message_stop', id: context.id, type: 'stop' };
221
+ return [
222
+ ...(context.returnedCitationArray?.length
223
+ ? [{
224
+ data: { citations: context.returnedCitationArray },
225
+ id: context.id,
226
+ type: 'grounding'
227
+ }]
228
+ : []
229
+ ),
230
+ { data: 'message_stop', id: context.id, type: 'stop' }
231
+ ] as any;
184
232
  }
185
233
 
186
234
  default: {
@@ -1,4 +1,4 @@
1
- import { ModelSpeed, ModelTokensUsage } from '@/types/message';
1
+ import { CitationItem, ModelSpeed, ModelTokensUsage } from '@/types/message';
2
2
  import { safeParseJSON } from '@/utils/safeParseJSON';
3
3
 
4
4
  import { AgentRuntimeErrorType } from '../../error';
@@ -16,6 +16,13 @@ export interface StreamContext {
16
16
  * Same as Hunyuan and Wenxin
17
17
  */
18
18
  returnedCitation?: boolean;
19
+ /**
20
+ * Claude's citations are inline and interleaved with text output.
21
+ * Each text segment may carry references to sources (e.g., web search results)
22
+ * relevant to that specific portion of the generated content.
23
+ * This array accumulates all citation items received during the streaming response.
24
+ */
25
+ returnedCitationArray?: CitationItem[];
19
26
  thinking?: {
20
27
  id: string;
21
28
  name: string;
@@ -225,12 +225,20 @@ class ChatService {
225
225
  )(getAiInfraStoreState());
226
226
  // if model has extended params, then we need to check if the model can use reasoning
227
227
 
228
- if (modelExtendParams!.includes('enableReasoning') && chatConfig.enableReasoning) {
229
- extendParams.thinking = {
230
- budget_tokens: chatConfig.reasoningBudgetToken || 1024,
231
- type: 'enabled',
232
- };
228
+ if (modelExtendParams!.includes('enableReasoning')) {
229
+ if (chatConfig.enableReasoning) {
230
+ extendParams.thinking = {
231
+ budget_tokens: chatConfig.reasoningBudgetToken || 1024,
232
+ type: 'enabled',
233
+ };
234
+ } else {
235
+ extendParams.thinking = {
236
+ budget_tokens: 0,
237
+ type: 'disabled',
238
+ };
239
+ }
233
240
  }
241
+
234
242
  if (
235
243
  modelExtendParams!.includes('disableContextCaching') &&
236
244
  chatConfig.disableContextCaching
@@ -552,7 +552,9 @@ export const generateAIChat: StateCreator<
552
552
  // to upload image
553
553
  const uploadTasks: Map<string, Promise<{ id?: string; url?: string }>> = new Map();
554
554
 
555
- const historySummary = topicSelectors.currentActiveTopicSummary(get());
555
+ const historySummary = chatConfig.enableCompressHistory
556
+ ? topicSelectors.currentActiveTopicSummary(get())
557
+ : undefined;
556
558
  await chatService.createAssistantMessageStream({
557
559
  abortController,
558
560
  params: {