@lobehub/chat 1.96.7 → 1.96.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/package.json +1 -1
- package/src/config/aiModels/google.ts +6 -6
- package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +12 -0
- package/src/features/ChatInput/ActionBar/Model/ThinkingBudgetSlider.tsx +133 -0
- package/src/libs/model-runtime/google/index.test.ts +53 -1
- package/src/libs/model-runtime/google/index.ts +66 -20
- package/src/libs/model-runtime/types/chat.ts +1 -0
- package/src/libs/model-runtime/utils/streams/google-ai.test.ts +2 -2
- package/src/libs/model-runtime/utils/streams/ollama.test.ts +4 -4
- package/src/libs/model-runtime/utils/streams/protocol.ts +2 -1
- package/src/libs/model-runtime/utils/streams/vertex-ai.test.ts +2 -2
- package/src/services/chat.ts +7 -0
- package/src/types/agent/chatConfig.ts +1 -1
- package/src/types/aiModel.ts +2 -1
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.96.9](https://github.com/lobehub/lobe-chat/compare/v1.96.8...v1.96.9)
|
6
|
+
|
7
|
+
<sup>Released on **2025-06-23**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Google Gemini tools declarations.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **misc**: Google Gemini tools declarations, closes [#8256](https://github.com/lobehub/lobe-chat/issues/8256) ([08f5d73](https://github.com/lobehub/lobe-chat/commit/08f5d73))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.96.8](https://github.com/lobehub/lobe-chat/compare/v1.96.7...v1.96.8)
|
31
|
+
|
32
|
+
<sup>Released on **2025-06-23**</sup>
|
33
|
+
|
34
|
+
#### 💄 Styles
|
35
|
+
|
36
|
+
- **misc**: Optimized Gemini thinkingBudget configuration.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Styles
|
44
|
+
|
45
|
+
- **misc**: Optimized Gemini thinkingBudget configuration, closes [#8224](https://github.com/lobehub/lobe-chat/issues/8224) ([03625e8](https://github.com/lobehub/lobe-chat/commit/03625e8))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.96.7](https://github.com/lobehub/lobe-chat/compare/v1.96.6...v1.96.7)
|
6
56
|
|
7
57
|
<sup>Released on **2025-06-23**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"fixes": [
|
5
|
+
"Google Gemini tools declarations."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-06-23",
|
9
|
+
"version": "1.96.9"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"improvements": [
|
14
|
+
"Optimized Gemini thinkingBudget configuration."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-06-23",
|
18
|
+
"version": "1.96.8"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"improvements": [
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.96.
|
3
|
+
"version": "1.96.9",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -20,7 +20,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
20
20
|
},
|
21
21
|
releasedAt: '2025-06-17',
|
22
22
|
settings: {
|
23
|
-
extendParams: ['
|
23
|
+
extendParams: ['thinkingBudget'],
|
24
24
|
searchImpl: 'params',
|
25
25
|
searchProvider: 'google',
|
26
26
|
},
|
@@ -45,7 +45,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
45
45
|
},
|
46
46
|
releasedAt: '2025-06-05',
|
47
47
|
settings: {
|
48
|
-
extendParams: ['
|
48
|
+
extendParams: ['thinkingBudget'],
|
49
49
|
searchImpl: 'params',
|
50
50
|
searchProvider: 'google',
|
51
51
|
},
|
@@ -119,7 +119,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
119
119
|
},
|
120
120
|
releasedAt: '2025-06-17',
|
121
121
|
settings: {
|
122
|
-
extendParams: ['
|
122
|
+
extendParams: ['thinkingBudget'],
|
123
123
|
searchImpl: 'params',
|
124
124
|
searchProvider: 'google',
|
125
125
|
},
|
@@ -143,7 +143,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
143
143
|
},
|
144
144
|
releasedAt: '2025-05-20',
|
145
145
|
settings: {
|
146
|
-
extendParams: ['
|
146
|
+
extendParams: ['thinkingBudget'],
|
147
147
|
searchImpl: 'params',
|
148
148
|
searchProvider: 'google',
|
149
149
|
},
|
@@ -167,7 +167,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
167
167
|
},
|
168
168
|
releasedAt: '2025-04-17',
|
169
169
|
settings: {
|
170
|
-
extendParams: ['
|
170
|
+
extendParams: ['thinkingBudget'],
|
171
171
|
searchImpl: 'params',
|
172
172
|
searchProvider: 'google',
|
173
173
|
},
|
@@ -215,7 +215,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
215
215
|
},
|
216
216
|
releasedAt: '2025-06-11',
|
217
217
|
settings: {
|
218
|
-
extendParams: ['
|
218
|
+
extendParams: ['thinkingBudget'],
|
219
219
|
searchImpl: 'params',
|
220
220
|
searchProvider: 'google',
|
221
221
|
},
|
@@ -13,6 +13,7 @@ import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
|
13
13
|
import ContextCachingSwitch from './ContextCachingSwitch';
|
14
14
|
import ReasoningEffortSlider from './ReasoningEffortSlider';
|
15
15
|
import ReasoningTokenSlider from './ReasoningTokenSlider';
|
16
|
+
import ThinkingBudgetSlider from './ThinkingBudgetSlider';
|
16
17
|
|
17
18
|
const ControlsForm = memo(() => {
|
18
19
|
const { t } = useTranslation('chat');
|
@@ -93,6 +94,17 @@ const ControlsForm = memo(() => {
|
|
93
94
|
paddingBottom: 0,
|
94
95
|
},
|
95
96
|
},
|
97
|
+
{
|
98
|
+
children: <ThinkingBudgetSlider />,
|
99
|
+
label: t('extendParams.reasoningBudgetToken.title'),
|
100
|
+
layout: 'vertical',
|
101
|
+
minWidth: 500,
|
102
|
+
name: 'thinkingBudget',
|
103
|
+
style: {
|
104
|
+
paddingBottom: 0,
|
105
|
+
},
|
106
|
+
tag: 'thinkingBudget',
|
107
|
+
},
|
96
108
|
].filter(Boolean) as FormItemProps[];
|
97
109
|
|
98
110
|
return (
|
@@ -0,0 +1,133 @@
|
|
1
|
+
import { InputNumber } from '@lobehub/ui';
|
2
|
+
import { Slider } from 'antd';
|
3
|
+
import { memo, useMemo } from 'react';
|
4
|
+
import { Flexbox } from 'react-layout-kit';
|
5
|
+
import useMergeState from 'use-merge-value';
|
6
|
+
|
7
|
+
// 定义特殊值映射
|
8
|
+
const SPECIAL_VALUES = {
|
9
|
+
AUTO: -1,
|
10
|
+
OFF: 0,
|
11
|
+
};
|
12
|
+
|
13
|
+
// 定义滑块位置到实际值的映射
|
14
|
+
const SLIDER_TO_VALUE_MAP = [
|
15
|
+
SPECIAL_VALUES.AUTO, // 位置 0 -> -1 (Auto)
|
16
|
+
SPECIAL_VALUES.OFF, // 位置 1 -> 0 (OFF)
|
17
|
+
128, // 位置 2 -> 128
|
18
|
+
512, // 位置 3 -> 512
|
19
|
+
1024, // 位置 4 -> 1024
|
20
|
+
2048, // 位置 5 -> 2048
|
21
|
+
4096, // 位置 6 -> 4096
|
22
|
+
8192, // 位置 7 -> 8192
|
23
|
+
16_384, // 位置 8 -> 16384
|
24
|
+
24_576, // 位置 9 -> 24576
|
25
|
+
32_768, // 位置 10 -> 32768
|
26
|
+
];
|
27
|
+
|
28
|
+
// 从实际值获取滑块位置
|
29
|
+
const getSliderPosition = (value: number): number => {
|
30
|
+
const index = SLIDER_TO_VALUE_MAP.indexOf(value);
|
31
|
+
return index === -1 ? 0 : index;
|
32
|
+
};
|
33
|
+
|
34
|
+
// 从滑块位置获取实际值(修复:0 不再被当作 falsy)
|
35
|
+
const getValueFromPosition = (position: number): number => {
|
36
|
+
const v = SLIDER_TO_VALUE_MAP[position];
|
37
|
+
return v === undefined ? SPECIAL_VALUES.AUTO : v;
|
38
|
+
};
|
39
|
+
|
40
|
+
interface ThinkingBudgetSliderProps {
|
41
|
+
defaultValue?: number;
|
42
|
+
onChange?: (value: number) => void;
|
43
|
+
value?: number;
|
44
|
+
}
|
45
|
+
|
46
|
+
const ThinkingBudgetSlider = memo<ThinkingBudgetSliderProps>(
|
47
|
+
({ value, onChange, defaultValue }) => {
|
48
|
+
// 首先确定初始的 budget 值
|
49
|
+
const initialBudget = value ?? defaultValue ?? SPECIAL_VALUES.AUTO;
|
50
|
+
|
51
|
+
const [budget, setBudget] = useMergeState(initialBudget, {
|
52
|
+
defaultValue,
|
53
|
+
onChange,
|
54
|
+
value,
|
55
|
+
});
|
56
|
+
|
57
|
+
const sliderPosition = getSliderPosition(budget);
|
58
|
+
|
59
|
+
const updateWithSliderPosition = (position: number) => {
|
60
|
+
const newValue = getValueFromPosition(position);
|
61
|
+
setBudget(newValue);
|
62
|
+
};
|
63
|
+
|
64
|
+
const updateWithRealValue = (value: number) => {
|
65
|
+
setBudget(value);
|
66
|
+
};
|
67
|
+
|
68
|
+
const marks = useMemo(() => {
|
69
|
+
return {
|
70
|
+
0: 'Auto',
|
71
|
+
1: 'OFF',
|
72
|
+
2: '128',
|
73
|
+
3: '512',
|
74
|
+
4: '1K',
|
75
|
+
5: '2K',
|
76
|
+
6: '4K',
|
77
|
+
7: '8K',
|
78
|
+
8: '16K',
|
79
|
+
9: '24K',
|
80
|
+
// eslint-disable-next-line sort-keys-fix/sort-keys-fix
|
81
|
+
10: '32K',
|
82
|
+
};
|
83
|
+
}, []);
|
84
|
+
|
85
|
+
return (
|
86
|
+
<Flexbox align={'center'} gap={12} horizontal paddingInline={'4px 0'}>
|
87
|
+
<Flexbox flex={1}>
|
88
|
+
<Slider
|
89
|
+
marks={marks}
|
90
|
+
max={10}
|
91
|
+
min={0}
|
92
|
+
onChange={updateWithSliderPosition}
|
93
|
+
step={null}
|
94
|
+
tooltip={{ open: false }}
|
95
|
+
value={sliderPosition}
|
96
|
+
/>
|
97
|
+
</Flexbox>
|
98
|
+
<div>
|
99
|
+
<InputNumber
|
100
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
101
|
+
formatter={(value, _info) => {
|
102
|
+
if (value === SPECIAL_VALUES.AUTO) return 'Auto';
|
103
|
+
if (value === SPECIAL_VALUES.OFF) return 'OFF';
|
104
|
+
return `${value}`;
|
105
|
+
}}
|
106
|
+
max={32_768}
|
107
|
+
min={-1}
|
108
|
+
onChange={(e) => {
|
109
|
+
if (e === null || e === undefined) return;
|
110
|
+
updateWithRealValue(e as number);
|
111
|
+
}}
|
112
|
+
parser={(value) => {
|
113
|
+
if (typeof value === 'string') {
|
114
|
+
if (value.toLowerCase() === 'auto') return SPECIAL_VALUES.AUTO;
|
115
|
+
if (value.toLowerCase() === 'off') return SPECIAL_VALUES.OFF;
|
116
|
+
return parseInt(value.replaceAll(/[^\d-]/g, ''), 10) || 0;
|
117
|
+
}
|
118
|
+
if (typeof value === 'number') {
|
119
|
+
return value;
|
120
|
+
}
|
121
|
+
return SPECIAL_VALUES.AUTO;
|
122
|
+
}}
|
123
|
+
step={128}
|
124
|
+
style={{ width: 80 }}
|
125
|
+
value={budget}
|
126
|
+
/>
|
127
|
+
</div>
|
128
|
+
</Flexbox>
|
129
|
+
);
|
130
|
+
},
|
131
|
+
);
|
132
|
+
|
133
|
+
export default ThinkingBudgetSlider;
|
@@ -560,6 +560,58 @@ describe('LobeGoogleAI', () => {
|
|
560
560
|
},
|
561
561
|
]);
|
562
562
|
});
|
563
|
+
|
564
|
+
it('should correctly convert function response message', async () => {
|
565
|
+
const messages: OpenAIChatMessage[] = [
|
566
|
+
{
|
567
|
+
content: '',
|
568
|
+
role: 'assistant',
|
569
|
+
tool_calls: [
|
570
|
+
{
|
571
|
+
id: 'call_1',
|
572
|
+
function: {
|
573
|
+
name: 'get_current_weather',
|
574
|
+
arguments: JSON.stringify({ location: 'London', unit: 'celsius' }),
|
575
|
+
},
|
576
|
+
type: 'function',
|
577
|
+
},
|
578
|
+
],
|
579
|
+
},
|
580
|
+
{
|
581
|
+
content: '{"success":true,"data":{"temperature":"14°C"}}',
|
582
|
+
name: 'get_current_weather',
|
583
|
+
role: 'tool',
|
584
|
+
tool_call_id: 'call_1',
|
585
|
+
},
|
586
|
+
];
|
587
|
+
|
588
|
+
const contents = await instance['buildGoogleMessages'](messages);
|
589
|
+
expect(contents).toHaveLength(2);
|
590
|
+
expect(contents).toEqual([
|
591
|
+
{
|
592
|
+
parts: [
|
593
|
+
{
|
594
|
+
functionCall: {
|
595
|
+
args: { location: 'London', unit: 'celsius' },
|
596
|
+
name: 'get_current_weather',
|
597
|
+
},
|
598
|
+
},
|
599
|
+
],
|
600
|
+
role: 'model',
|
601
|
+
},
|
602
|
+
{
|
603
|
+
parts: [
|
604
|
+
{
|
605
|
+
functionResponse: {
|
606
|
+
name: 'get_current_weather',
|
607
|
+
response: { result: '{"success":true,"data":{"temperature":"14°C"}}' },
|
608
|
+
},
|
609
|
+
},
|
610
|
+
],
|
611
|
+
role: 'user',
|
612
|
+
},
|
613
|
+
]);
|
614
|
+
});
|
563
615
|
});
|
564
616
|
|
565
617
|
describe('buildGoogleTools', () => {
|
@@ -690,7 +742,7 @@ describe('LobeGoogleAI', () => {
|
|
690
742
|
|
691
743
|
const converted = await instance['convertOAIMessagesToGoogleMessage'](message);
|
692
744
|
expect(converted).toEqual({
|
693
|
-
role: '
|
745
|
+
role: 'model',
|
694
746
|
parts: [
|
695
747
|
{
|
696
748
|
functionCall: {
|
@@ -120,28 +120,37 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
120
120
|
async chat(rawPayload: ChatStreamPayload, options?: ChatMethodOptions) {
|
121
121
|
try {
|
122
122
|
const payload = this.buildPayload(rawPayload);
|
123
|
-
const { model,
|
123
|
+
const { model, thinkingBudget } = payload;
|
124
124
|
|
125
125
|
const thinkingConfig: GoogleAIThinkingConfig = {
|
126
126
|
includeThoughts:
|
127
|
-
|
128
|
-
(!
|
127
|
+
!!thinkingBudget ||
|
128
|
+
(!thinkingBudget && model && (model.includes('-2.5-') || model.includes('thinking')))
|
129
129
|
? true
|
130
130
|
: undefined,
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
131
|
+
// https://ai.google.dev/gemini-api/docs/thinking#set-budget
|
132
|
+
thinkingBudget: (() => {
|
133
|
+
if (thinkingBudget !== undefined && thinkingBudget !== null) {
|
134
|
+
if (model.includes('-2.5-flash-lite')) {
|
135
|
+
if (thinkingBudget === 0 || thinkingBudget === -1) {
|
136
|
+
return thinkingBudget;
|
137
|
+
}
|
138
|
+
return Math.max(512, Math.min(thinkingBudget, 24_576));
|
139
|
+
} else if (model.includes('-2.5-flash')) {
|
140
|
+
return Math.min(thinkingBudget, 24_576);
|
141
|
+
} else if (model.includes('-2.5-pro')) {
|
142
|
+
return Math.max(128, Math.min(thinkingBudget, 32_768));
|
143
|
+
}
|
144
|
+
return Math.min(thinkingBudget, 24_576);
|
145
|
+
}
|
146
|
+
|
147
|
+
if (model.includes('-2.5-pro') || model.includes('-2.5-flash')) {
|
148
|
+
return -1;
|
149
|
+
} else if (model.includes('-2.5-flash-lite')) {
|
150
|
+
return 0;
|
151
|
+
}
|
152
|
+
return undefined;
|
153
|
+
})(),
|
145
154
|
};
|
146
155
|
|
147
156
|
const contents = await this.buildGoogleMessages(payload.messages);
|
@@ -344,6 +353,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
344
353
|
system: system_message?.content,
|
345
354
|
};
|
346
355
|
}
|
356
|
+
|
347
357
|
private convertContentToGooglePart = async (
|
348
358
|
content: UserMessageContentPart,
|
349
359
|
): Promise<Part | undefined> => {
|
@@ -390,6 +400,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
390
400
|
|
391
401
|
private convertOAIMessagesToGoogleMessage = async (
|
392
402
|
message: OpenAIChatMessage,
|
403
|
+
toolCallNameMap?: Map<string, string>,
|
393
404
|
): Promise<Content> => {
|
394
405
|
const content = message.content as string | UserMessageContentPart[];
|
395
406
|
if (!!message.tool_calls) {
|
@@ -400,10 +411,28 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
400
411
|
name: tool.function.name,
|
401
412
|
},
|
402
413
|
})),
|
403
|
-
role: '
|
414
|
+
role: 'model',
|
404
415
|
};
|
405
416
|
}
|
406
417
|
|
418
|
+
// 将 tool_call result 转成 functionResponse part
|
419
|
+
if (message.role === 'tool' && toolCallNameMap && message.tool_call_id) {
|
420
|
+
const functionName = toolCallNameMap.get(message.tool_call_id);
|
421
|
+
if (functionName) {
|
422
|
+
return {
|
423
|
+
parts: [
|
424
|
+
{
|
425
|
+
functionResponse: {
|
426
|
+
name: functionName,
|
427
|
+
response: { result: message.content },
|
428
|
+
},
|
429
|
+
},
|
430
|
+
],
|
431
|
+
role: 'user',
|
432
|
+
};
|
433
|
+
}
|
434
|
+
}
|
435
|
+
|
407
436
|
const getParts = async () => {
|
408
437
|
if (typeof content === 'string') return [{ text: content }];
|
409
438
|
|
@@ -421,9 +450,20 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
421
450
|
|
422
451
|
// convert messages from the OpenAI format to Google GenAI SDK
|
423
452
|
private buildGoogleMessages = async (messages: OpenAIChatMessage[]): Promise<Content[]> => {
|
453
|
+
const toolCallNameMap = new Map<string, string>();
|
454
|
+
messages.forEach((message) => {
|
455
|
+
if (message.role === 'assistant' && message.tool_calls) {
|
456
|
+
message.tool_calls.forEach((toolCall) => {
|
457
|
+
if (toolCall.type === 'function') {
|
458
|
+
toolCallNameMap.set(toolCall.id, toolCall.function.name);
|
459
|
+
}
|
460
|
+
});
|
461
|
+
}
|
462
|
+
});
|
463
|
+
|
424
464
|
const pools = messages
|
425
465
|
.filter((message) => message.role !== 'function')
|
426
|
-
.map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg));
|
466
|
+
.map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg, toolCallNameMap));
|
427
467
|
|
428
468
|
return Promise.all(pools);
|
429
469
|
};
|
@@ -484,12 +524,18 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
484
524
|
): GoogleFunctionCallTool[] | undefined {
|
485
525
|
// 目前 Tools (例如 googleSearch) 无法与其他 FunctionCall 同时使用
|
486
526
|
if (payload?.messages?.some((m) => m.tool_calls?.length)) {
|
487
|
-
return;
|
527
|
+
return this.buildFunctionDeclarations(tools);
|
488
528
|
}
|
489
529
|
if (payload?.enabledSearch) {
|
490
530
|
return [{ googleSearch: {} } as GoogleSearchRetrievalTool];
|
491
531
|
}
|
492
532
|
|
533
|
+
return this.buildFunctionDeclarations(tools);
|
534
|
+
}
|
535
|
+
|
536
|
+
private buildFunctionDeclarations(
|
537
|
+
tools: ChatCompletionTool[] | undefined,
|
538
|
+
): GoogleFunctionCallTool[] | undefined {
|
493
539
|
if (!tools || tools.length === 0) return;
|
494
540
|
|
495
541
|
return [
|
@@ -7,7 +7,7 @@ import { GoogleGenerativeAIStream } from './google-ai';
|
|
7
7
|
|
8
8
|
describe('GoogleGenerativeAIStream', () => {
|
9
9
|
it('should transform Google Generative AI stream to protocol stream', async () => {
|
10
|
-
vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
|
10
|
+
vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
|
11
11
|
|
12
12
|
const mockGenerateContentResponse = (text: string, functionCalls?: any[]) =>
|
13
13
|
({
|
@@ -59,7 +59,7 @@ describe('GoogleGenerativeAIStream', () => {
|
|
59
59
|
// tool call
|
60
60
|
'id: chat_1\n',
|
61
61
|
'event: tool_calls\n',
|
62
|
-
`data: [{"function":{"arguments":"{\\"arg1\\":\\"value1\\"}","name":"testFunction"},"id":"
|
62
|
+
`data: [{"function":{"arguments":"{\\"arg1\\":\\"value1\\"}","name":"testFunction"},"id":"testFunction_0_abcd1234","index":0,"type":"function"}]\n\n`,
|
63
63
|
|
64
64
|
// text
|
65
65
|
'id: chat_1\n',
|
@@ -30,7 +30,7 @@ describe('OllamaStream', () => {
|
|
30
30
|
});
|
31
31
|
|
32
32
|
const protocolStream = OllamaStream(mockOllamaStream);
|
33
|
-
|
33
|
+
|
34
34
|
const decoder = new TextDecoder();
|
35
35
|
const chunks = [];
|
36
36
|
|
@@ -62,7 +62,7 @@ describe('OllamaStream', () => {
|
|
62
62
|
'id: chat_2',
|
63
63
|
'event: stop',
|
64
64
|
`data: "finished"\n`,
|
65
|
-
].map((line) => `${line}\n`)
|
65
|
+
].map((line) => `${line}\n`),
|
66
66
|
);
|
67
67
|
});
|
68
68
|
|
@@ -116,7 +116,7 @@ describe('OllamaStream', () => {
|
|
116
116
|
});
|
117
117
|
|
118
118
|
it('tools use', async () => {
|
119
|
-
vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
|
119
|
+
vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
|
120
120
|
|
121
121
|
const mockOllamaStream = new ReadableStream<ChatResponse>({
|
122
122
|
start(controller) {
|
@@ -178,7 +178,7 @@ describe('OllamaStream', () => {
|
|
178
178
|
[
|
179
179
|
'id: chat_1',
|
180
180
|
'event: tool_calls',
|
181
|
-
`data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-
|
181
|
+
`data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0_abcd1234","index":0,"type":"function"}]\n`,
|
182
182
|
'id: chat_1',
|
183
183
|
'event: stop',
|
184
184
|
`data: "finished"\n`,
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import { CitationItem, ModelSpeed, ModelTokensUsage } from '@/types/message';
|
2
2
|
import { safeParseJSON } from '@/utils/safeParseJSON';
|
3
|
+
import { nanoid } from '@/utils/uuid';
|
3
4
|
|
4
5
|
import { AgentRuntimeErrorType } from '../../error';
|
5
6
|
import { parseToolCalls } from '../../helpers';
|
@@ -98,7 +99,7 @@ export interface StreamProtocolToolCallChunk {
|
|
98
99
|
}
|
99
100
|
|
100
101
|
export const generateToolCallId = (index: number, functionName?: string) =>
|
101
|
-
`${functionName || 'unknown_tool_call'}_${index}`;
|
102
|
+
`${functionName || 'unknown_tool_call'}_${index}_${nanoid()}`;
|
102
103
|
|
103
104
|
const chatStreamable = async function* <T>(stream: AsyncIterable<T>) {
|
104
105
|
for await (const response of stream) {
|
@@ -137,7 +137,7 @@ describe('VertexAIStream', () => {
|
|
137
137
|
});
|
138
138
|
|
139
139
|
it('tool_calls', async () => {
|
140
|
-
vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
|
140
|
+
vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
|
141
141
|
|
142
142
|
const rawChunks = [
|
143
143
|
{
|
@@ -227,7 +227,7 @@ describe('VertexAIStream', () => {
|
|
227
227
|
// text
|
228
228
|
'id: chat_1\n',
|
229
229
|
'event: tool_calls\n',
|
230
|
-
`data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-
|
230
|
+
`data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0_abcd1234","index":0,"type":"function"}]\n\n`,
|
231
231
|
'id: chat_1\n',
|
232
232
|
'event: stop\n',
|
233
233
|
'data: "STOP"\n\n',
|
package/src/services/chat.ts
CHANGED
@@ -263,6 +263,13 @@ class ChatService {
|
|
263
263
|
if (modelExtendParams!.includes('reasoningEffort') && chatConfig.reasoningEffort) {
|
264
264
|
extendParams.reasoning_effort = chatConfig.reasoningEffort;
|
265
265
|
}
|
266
|
+
|
267
|
+
if (
|
268
|
+
modelExtendParams!.includes('thinkingBudget') &&
|
269
|
+
chatConfig.thinkingBudget !== undefined
|
270
|
+
) {
|
271
|
+
extendParams.thinkingBudget = chatConfig.thinkingBudget;
|
272
|
+
}
|
266
273
|
}
|
267
274
|
|
268
275
|
return this.getChatCompletion(
|
package/src/types/aiModel.ts
CHANGED
@@ -147,7 +147,8 @@ export type ExtendParamsType =
|
|
147
147
|
| 'reasoningBudgetToken'
|
148
148
|
| 'enableReasoning'
|
149
149
|
| 'disableContextCaching'
|
150
|
-
| 'reasoningEffort'
|
150
|
+
| 'reasoningEffort'
|
151
|
+
| 'thinkingBudget';
|
151
152
|
|
152
153
|
export interface AiModelSettings {
|
153
154
|
extendParams?: ExtendParamsType[];
|