@lobehub/chat 1.64.3 → 1.65.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +33 -0
- package/README.md +1 -1
- package/changelog/v1.json +12 -0
- package/locales/ar/chat.json +7 -1
- package/locales/ar/models.json +6 -9
- package/locales/bg-BG/chat.json +7 -1
- package/locales/bg-BG/models.json +6 -9
- package/locales/de-DE/chat.json +7 -1
- package/locales/de-DE/models.json +6 -9
- package/locales/en-US/chat.json +7 -1
- package/locales/en-US/models.json +6 -9
- package/locales/es-ES/chat.json +8 -2
- package/locales/es-ES/models.json +6 -9
- package/locales/fa-IR/chat.json +7 -1
- package/locales/fa-IR/models.json +6 -3
- package/locales/fr-FR/chat.json +7 -1
- package/locales/fr-FR/models.json +6 -9
- package/locales/it-IT/chat.json +7 -1
- package/locales/it-IT/models.json +6 -9
- package/locales/ja-JP/chat.json +7 -1
- package/locales/ja-JP/models.json +6 -9
- package/locales/ko-KR/chat.json +7 -1
- package/locales/ko-KR/models.json +6 -9
- package/locales/nl-NL/chat.json +8 -2
- package/locales/nl-NL/models.json +6 -9
- package/locales/pl-PL/chat.json +7 -1
- package/locales/pl-PL/models.json +6 -9
- package/locales/pt-BR/chat.json +7 -1
- package/locales/pt-BR/models.json +6 -9
- package/locales/ru-RU/chat.json +8 -2
- package/locales/ru-RU/models.json +6 -9
- package/locales/tr-TR/chat.json +7 -1
- package/locales/tr-TR/models.json +6 -9
- package/locales/vi-VN/chat.json +7 -1
- package/locales/vi-VN/models.json +6 -9
- package/locales/zh-CN/chat.json +7 -1
- package/locales/zh-CN/models.json +6 -9
- package/locales/zh-TW/chat.json +7 -1
- package/locales/zh-TW/models.json +6 -9
- package/package.json +2 -2
- package/src/config/aiModels/anthropic.ts +5 -2
- package/src/config/aiModels/google.ts +7 -0
- package/src/const/settings/agent.ts +2 -0
- package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +38 -13
- package/src/features/ChatInput/ActionBar/Model/ReasoningTokenSlider.tsx +92 -0
- package/src/features/ChatInput/ActionBar/Model/index.tsx +13 -18
- package/src/libs/agent-runtime/anthropic/index.ts +32 -14
- package/src/libs/agent-runtime/types/chat.ts +7 -1
- package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +126 -0
- package/src/libs/agent-runtime/utils/streams/anthropic.ts +46 -16
- package/src/libs/agent-runtime/utils/streams/protocol.ts +4 -0
- package/src/locales/default/chat.ts +7 -1
- package/src/services/chat.ts +26 -0
- package/src/store/agent/slices/chat/__snapshots__/selectors.test.ts.snap +2 -0
- package/src/store/aiInfra/slices/aiModel/selectors.ts +6 -6
- package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +2 -0
- package/src/types/agent/index.ts +23 -9
- package/src/types/aiModel.ts +3 -8
- package/src/features/ChatInput/ActionBar/Model/ExtendControls.tsx +0 -40
@@ -4,6 +4,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
4
4
|
{
|
5
5
|
abilities: {
|
6
6
|
functionCall: true,
|
7
|
+
search: true,
|
7
8
|
vision: true,
|
8
9
|
},
|
9
10
|
contextWindowTokens: 2_097_152 + 8192,
|
@@ -19,6 +20,10 @@ const googleChatModels: AIChatModelCard[] = [
|
|
19
20
|
output: 0,
|
20
21
|
},
|
21
22
|
releasedAt: '2025-02-05',
|
23
|
+
settings: {
|
24
|
+
searchImpl: 'params',
|
25
|
+
searchProvider: 'google',
|
26
|
+
},
|
22
27
|
type: 'chat',
|
23
28
|
},
|
24
29
|
{
|
@@ -49,6 +54,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
49
54
|
{
|
50
55
|
abilities: {
|
51
56
|
functionCall: true,
|
57
|
+
search: true,
|
52
58
|
vision: true,
|
53
59
|
},
|
54
60
|
contextWindowTokens: 1_048_576 + 8192,
|
@@ -65,6 +71,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
65
71
|
releasedAt: '2025-02-05',
|
66
72
|
settings: {
|
67
73
|
searchImpl: 'params',
|
74
|
+
searchProvider: 'google',
|
68
75
|
},
|
69
76
|
type: 'chat',
|
70
77
|
},
|
@@ -19,7 +19,9 @@ export const DEFAULT_AGENT_CHAT_CONFIG: LobeAgentChatConfig = {
|
|
19
19
|
enableAutoCreateTopic: true,
|
20
20
|
enableCompressHistory: true,
|
21
21
|
enableHistoryCount: true,
|
22
|
+
enableReasoning: true,
|
22
23
|
historyCount: 8,
|
24
|
+
reasoningBudgetToken: 1024,
|
23
25
|
searchMode: 'off',
|
24
26
|
};
|
25
27
|
|
@@ -1,32 +1,57 @@
|
|
1
1
|
import { Form } from '@lobehub/ui';
|
2
|
+
import type { FormItemProps } from '@lobehub/ui';
|
2
3
|
import { Switch } from 'antd';
|
4
|
+
import isEqual from 'fast-deep-equal';
|
3
5
|
import { memo } from 'react';
|
6
|
+
import { useTranslation } from 'react-i18next';
|
4
7
|
|
5
8
|
import { useAgentStore } from '@/store/agent';
|
6
9
|
import { agentSelectors } from '@/store/agent/slices/chat';
|
7
10
|
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
8
11
|
|
12
|
+
import ReasoningTokenSlider from './ReasoningTokenSlider';
|
13
|
+
|
9
14
|
const ControlsForm = memo(() => {
|
10
|
-
const
|
15
|
+
const { t } = useTranslation('chat');
|
16
|
+
const [model, provider, updateAgentChatConfig] = useAgentStore((s) => [
|
11
17
|
agentSelectors.currentAgentModel(s),
|
12
18
|
agentSelectors.currentAgentModelProvider(s),
|
19
|
+
s.updateAgentChatConfig,
|
13
20
|
]);
|
14
|
-
const
|
15
|
-
|
16
|
-
);
|
21
|
+
const config = useAgentStore(agentSelectors.currentAgentChatConfig, isEqual);
|
22
|
+
|
23
|
+
const modelExtendParams = useAiInfraStore(aiModelSelectors.modelExtendParams(model, provider));
|
24
|
+
|
25
|
+
const items: FormItemProps[] = [
|
26
|
+
{
|
27
|
+
children: <Switch />,
|
28
|
+
label: t('extendParams.enableReasoning.title'),
|
29
|
+
minWidth: undefined,
|
30
|
+
name: 'enableReasoning',
|
31
|
+
},
|
32
|
+
{
|
33
|
+
children: <ReasoningTokenSlider />,
|
34
|
+
label: t('extendParams.reasoningBudgetToken.title'),
|
35
|
+
layout: 'vertical',
|
36
|
+
minWidth: undefined,
|
37
|
+
name: 'reasoningBudgetToken',
|
38
|
+
style: {
|
39
|
+
paddingBottom: 0,
|
40
|
+
},
|
41
|
+
},
|
42
|
+
];
|
17
43
|
|
18
44
|
return (
|
19
45
|
<Form
|
20
|
-
|
21
|
-
items={
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
}))}
|
46
|
+
initialValues={config}
|
47
|
+
items={
|
48
|
+
(modelExtendParams || [])
|
49
|
+
.map((item: any) => items.find((i) => i.name === item))
|
50
|
+
.filter(Boolean) as FormItemProps[]
|
51
|
+
}
|
27
52
|
itemsType={'flat'}
|
28
|
-
onValuesChange={(_, values) => {
|
29
|
-
|
53
|
+
onValuesChange={async (_, values) => {
|
54
|
+
await updateAgentChatConfig(values);
|
30
55
|
}}
|
31
56
|
size={'small'}
|
32
57
|
style={{ fontSize: 12 }}
|
@@ -0,0 +1,92 @@
|
|
1
|
+
import { InputNumber, Slider } from 'antd';
|
2
|
+
import { memo, useMemo } from 'react';
|
3
|
+
import { Flexbox } from 'react-layout-kit';
|
4
|
+
import useMergeState from 'use-merge-value';
|
5
|
+
|
6
|
+
const Kibi = 1024;
|
7
|
+
|
8
|
+
const exponent = (num: number) => Math.log2(num);
|
9
|
+
const getRealValue = (num: number) => Math.round(Math.pow(2, num));
|
10
|
+
const powerKibi = (num: number) => Math.round(Math.pow(2, num) * Kibi);
|
11
|
+
|
12
|
+
interface MaxTokenSliderProps {
|
13
|
+
defaultValue?: number;
|
14
|
+
onChange?: (value: number) => void;
|
15
|
+
value?: number;
|
16
|
+
}
|
17
|
+
|
18
|
+
const MaxTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValue }) => {
|
19
|
+
const [token, setTokens] = useMergeState(0, {
|
20
|
+
defaultValue,
|
21
|
+
onChange,
|
22
|
+
value: value,
|
23
|
+
});
|
24
|
+
|
25
|
+
const [powValue, setPowValue] = useMergeState(0, {
|
26
|
+
defaultValue: exponent(typeof defaultValue === 'undefined' ? 0 : defaultValue / 1024),
|
27
|
+
value: exponent(typeof value === 'undefined' ? 0 : value / Kibi),
|
28
|
+
});
|
29
|
+
|
30
|
+
const updateWithPowValue = (value: number) => {
|
31
|
+
setPowValue(value);
|
32
|
+
|
33
|
+
setTokens(powerKibi(value));
|
34
|
+
};
|
35
|
+
|
36
|
+
const updateWithRealValue = (value: number) => {
|
37
|
+
setTokens(Math.round(value));
|
38
|
+
|
39
|
+
setPowValue(exponent(value / Kibi));
|
40
|
+
};
|
41
|
+
|
42
|
+
const marks = useMemo(() => {
|
43
|
+
return {
|
44
|
+
[exponent(1)]: '1k',
|
45
|
+
[exponent(2)]: '2k',
|
46
|
+
[exponent(4)]: '4k', // 4 kibi = 4096
|
47
|
+
[exponent(8)]: '8k',
|
48
|
+
[exponent(16)]: '16k',
|
49
|
+
[exponent(32)]: '32k',
|
50
|
+
[exponent(64)]: '64k',
|
51
|
+
};
|
52
|
+
}, []);
|
53
|
+
|
54
|
+
return (
|
55
|
+
<Flexbox align={'center'} gap={12} horizontal>
|
56
|
+
<Flexbox flex={1}>
|
57
|
+
<Slider
|
58
|
+
marks={marks}
|
59
|
+
max={exponent(64)}
|
60
|
+
min={exponent(1)}
|
61
|
+
onChange={updateWithPowValue}
|
62
|
+
step={null}
|
63
|
+
tooltip={{
|
64
|
+
formatter: (x) => {
|
65
|
+
if (typeof x === 'undefined') return;
|
66
|
+
|
67
|
+
let value = getRealValue(x);
|
68
|
+
|
69
|
+
if (value < Kibi) return ((value * Kibi) / 1000).toFixed(0) + 'k';
|
70
|
+
},
|
71
|
+
}}
|
72
|
+
value={powValue}
|
73
|
+
/>
|
74
|
+
</Flexbox>
|
75
|
+
<div>
|
76
|
+
<InputNumber
|
77
|
+
changeOnWheel
|
78
|
+
min={0}
|
79
|
+
onChange={(e) => {
|
80
|
+
if (!e && e !== 0) return;
|
81
|
+
|
82
|
+
updateWithRealValue(e);
|
83
|
+
}}
|
84
|
+
step={4 * Kibi}
|
85
|
+
style={{ width: 60 }}
|
86
|
+
value={token}
|
87
|
+
/>
|
88
|
+
</div>
|
89
|
+
</Flexbox>
|
90
|
+
);
|
91
|
+
});
|
92
|
+
export default MaxTokenSlider;
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { ModelIcon } from '@lobehub/icons';
|
2
|
-
import { ActionIcon
|
2
|
+
import { ActionIcon } from '@lobehub/ui';
|
3
3
|
import { Popover } from 'antd';
|
4
4
|
import { createStyles } from 'antd-style';
|
5
5
|
import { Settings2Icon } from 'lucide-react';
|
@@ -63,8 +63,8 @@ const ModelSwitch = memo(() => {
|
|
63
63
|
agentSelectors.currentAgentModelProvider(s),
|
64
64
|
]);
|
65
65
|
|
66
|
-
const
|
67
|
-
aiModelSelectors.
|
66
|
+
const isModelHasExtendParams = useAiInfraStore(
|
67
|
+
aiModelSelectors.isModelHasExtendParams(model, provider),
|
68
68
|
);
|
69
69
|
|
70
70
|
const isMobile = useIsMobile();
|
@@ -82,34 +82,29 @@ const ModelSwitch = memo(() => {
|
|
82
82
|
// );
|
83
83
|
|
84
84
|
return (
|
85
|
-
<Flexbox
|
86
|
-
align={'center'}
|
87
|
-
className={isModelHasExtendControls ? styles.container : ''}
|
88
|
-
horizontal
|
89
|
-
>
|
85
|
+
<Flexbox align={'center'} className={isModelHasExtendParams ? styles.container : ''} horizontal>
|
90
86
|
<ModelSwitchPanel>
|
91
87
|
<Center
|
92
|
-
className={cx(styles.model,
|
88
|
+
className={cx(styles.model, isModelHasExtendParams && styles.modelWithControl)}
|
93
89
|
height={36}
|
94
90
|
width={36}
|
95
91
|
>
|
96
|
-
<
|
97
|
-
<
|
98
|
-
|
99
|
-
</div>
|
100
|
-
</Tooltip>
|
92
|
+
<div className={styles.icon}>
|
93
|
+
<ModelIcon model={model} size={22} />
|
94
|
+
</div>
|
101
95
|
</Center>
|
102
96
|
</ModelSwitchPanel>
|
103
97
|
|
104
|
-
{
|
98
|
+
{isModelHasExtendParams && (
|
105
99
|
<Flexbox style={{ marginInlineStart: -4 }}>
|
106
100
|
<Popover
|
107
101
|
arrow={false}
|
108
102
|
content={<ControlsForm />}
|
109
|
-
|
103
|
+
placement={'topLeft'}
|
110
104
|
styles={{
|
111
105
|
body: {
|
112
|
-
minWidth: isMobile ? undefined :
|
106
|
+
minWidth: isMobile ? undefined : 350,
|
107
|
+
paddingBlock: 4,
|
113
108
|
width: isMobile ? '100vw' : undefined,
|
114
109
|
},
|
115
110
|
}}
|
@@ -118,7 +113,7 @@ const ModelSwitch = memo(() => {
|
|
118
113
|
icon={Settings2Icon}
|
119
114
|
placement={'bottom'}
|
120
115
|
style={{ borderRadius: 20 }}
|
121
|
-
title={t('
|
116
|
+
title={t('extendParams.title')}
|
122
117
|
/>
|
123
118
|
</Popover>
|
124
119
|
</Flexbox>
|
@@ -97,12 +97,29 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
|
|
97
97
|
}
|
98
98
|
|
99
99
|
private async buildAnthropicPayload(payload: ChatStreamPayload) {
|
100
|
-
const { messages, model, max_tokens
|
100
|
+
const { messages, model, max_tokens, temperature, top_p, tools, thinking } = payload;
|
101
101
|
const system_message = messages.find((m) => m.role === 'system');
|
102
102
|
const user_messages = messages.filter((m) => m.role !== 'system');
|
103
103
|
|
104
|
+
if (!!thinking) {
|
105
|
+
const maxTokens =
|
106
|
+
max_tokens ?? (thinking?.budget_tokens ? thinking?.budget_tokens + 4096 : 4096);
|
107
|
+
|
108
|
+
// `temperature` may only be set to 1 when thinking is enabled.
|
109
|
+
// `top_p` must be unset when thinking is enabled.
|
110
|
+
return {
|
111
|
+
max_tokens: maxTokens,
|
112
|
+
messages: await buildAnthropicMessages(user_messages),
|
113
|
+
model,
|
114
|
+
system: system_message?.content as string,
|
115
|
+
|
116
|
+
thinking,
|
117
|
+
tools: buildAnthropicTools(tools),
|
118
|
+
} satisfies Anthropic.MessageCreateParams;
|
119
|
+
}
|
120
|
+
|
104
121
|
return {
|
105
|
-
max_tokens,
|
122
|
+
max_tokens: max_tokens ?? 4096,
|
106
123
|
messages: await buildAnthropicMessages(user_messages),
|
107
124
|
model,
|
108
125
|
system: system_message?.content as string,
|
@@ -124,29 +141,30 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
|
|
124
141
|
method: 'GET',
|
125
142
|
});
|
126
143
|
const json = await response.json();
|
127
|
-
|
144
|
+
|
128
145
|
const modelList: AnthropicModelCard[] = json['data'];
|
129
|
-
|
146
|
+
|
130
147
|
return modelList
|
131
148
|
.map((model) => {
|
132
|
-
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
149
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
150
|
+
(m) => model.id.toLowerCase() === m.id.toLowerCase(),
|
151
|
+
);
|
133
152
|
|
134
153
|
return {
|
135
154
|
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
136
155
|
displayName: model.display_name,
|
137
156
|
enabled: knownModel?.enabled || false,
|
138
157
|
functionCall:
|
139
|
-
model.id.toLowerCase().includes('claude-3')
|
140
|
-
|
141
|
-
|
158
|
+
model.id.toLowerCase().includes('claude-3') ||
|
159
|
+
knownModel?.abilities?.functionCall ||
|
160
|
+
false,
|
142
161
|
id: model.id,
|
143
|
-
reasoning:
|
144
|
-
knownModel?.abilities?.reasoning
|
145
|
-
|| false,
|
162
|
+
reasoning: knownModel?.abilities?.reasoning || false,
|
146
163
|
vision:
|
147
|
-
model.id.toLowerCase().includes('claude-3') &&
|
148
|
-
|
149
|
-
||
|
164
|
+
(model.id.toLowerCase().includes('claude-3') &&
|
165
|
+
!model.id.toLowerCase().includes('claude-3-5-haiku')) ||
|
166
|
+
knownModel?.abilities?.vision ||
|
167
|
+
false,
|
150
168
|
};
|
151
169
|
})
|
152
170
|
.filter(Boolean) as ChatModelCard[];
|
@@ -88,8 +88,14 @@ export interface ChatStreamPayload {
|
|
88
88
|
* @default 1
|
89
89
|
*/
|
90
90
|
temperature: number;
|
91
|
+
/**
|
92
|
+
* use for Claude
|
93
|
+
*/
|
94
|
+
thinking?: {
|
95
|
+
budget_tokens: number;
|
96
|
+
type: 'enabled' | 'disabled';
|
97
|
+
};
|
91
98
|
tool_choice?: string;
|
92
|
-
|
93
99
|
tools?: ChatCompletionTool[];
|
94
100
|
/**
|
95
101
|
* @title 控制生成文本中最高概率的单个令牌
|
@@ -384,6 +384,132 @@ describe('AnthropicStream', () => {
|
|
384
384
|
expect(onToolCallMock).toHaveBeenCalledTimes(6);
|
385
385
|
});
|
386
386
|
|
387
|
+
it('should handle thinking ', async () => {
|
388
|
+
const streams = [
|
389
|
+
{
|
390
|
+
type: 'message_start',
|
391
|
+
message: {
|
392
|
+
id: 'msg_01MNsLe7n1uVLtu6W8rCFujD',
|
393
|
+
type: 'message',
|
394
|
+
role: 'assistant',
|
395
|
+
model: 'claude-3-7-sonnet-20250219',
|
396
|
+
content: [],
|
397
|
+
stop_reason: null,
|
398
|
+
stop_sequence: null,
|
399
|
+
usage: {
|
400
|
+
input_tokens: 46,
|
401
|
+
cache_creation_input_tokens: 0,
|
402
|
+
cache_read_input_tokens: 0,
|
403
|
+
output_tokens: 11,
|
404
|
+
},
|
405
|
+
},
|
406
|
+
},
|
407
|
+
{
|
408
|
+
type: 'content_block_start',
|
409
|
+
index: 0,
|
410
|
+
content_block: { type: 'thinking', thinking: '', signature: '' },
|
411
|
+
},
|
412
|
+
{
|
413
|
+
type: 'content_block_delta',
|
414
|
+
index: 0,
|
415
|
+
delta: { type: 'thinking_delta', thinking: '我需要比较两个数字的' },
|
416
|
+
},
|
417
|
+
{
|
418
|
+
type: 'content_block_delta',
|
419
|
+
index: 0,
|
420
|
+
delta: { type: 'thinking_delta', thinking: '大小:9.8和9' },
|
421
|
+
},
|
422
|
+
{
|
423
|
+
type: 'content_block_delta',
|
424
|
+
index: 0,
|
425
|
+
delta: { type: 'thinking_delta', thinking: '11\n\n所以9.8比9.11大。' },
|
426
|
+
},
|
427
|
+
{
|
428
|
+
type: 'content_block_delta',
|
429
|
+
index: 0,
|
430
|
+
delta: {
|
431
|
+
type: 'signature_delta',
|
432
|
+
signature:
|
433
|
+
'EuYBCkQYAiJAHnHRJG4nPBrdTlo6CmXoyE8WYoQeoPiLnXaeuaM8ExdiIEkVvxK1DYXOz5sCubs2s/G1NsST8A003Zb8XmuhYBIMwDGMZSZ3+gxOEBpVGgzdpOlDNBTxke31SngiMKUk6WcSiA11OSVBuInNukoAhnRd5jPAEg7e5mIoz/qJwnQHV8I+heKUreP77eJdFipQaM3FHn+avEHuLa/Z/fu0O9BftDi+caB1UWDwJakNeWX1yYTvK+N1v4gRpKbj4AhctfYHMjq8qX9XTnXme5AGzCYC6HgYw2/RfalWzwNxI6k=',
|
434
|
+
},
|
435
|
+
},
|
436
|
+
{ type: 'content_block_stop', index: 0 },
|
437
|
+
{ type: 'content_block_start', index: 1, content_block: { type: 'text', text: '' } },
|
438
|
+
{
|
439
|
+
type: 'content_block_delta',
|
440
|
+
index: 1,
|
441
|
+
delta: { type: 'text_delta', text: '9.8比9.11大。' },
|
442
|
+
},
|
443
|
+
{ type: 'content_block_stop', index: 1 },
|
444
|
+
{
|
445
|
+
type: 'message_delta',
|
446
|
+
delta: { stop_reason: 'end_turn', stop_sequence: null },
|
447
|
+
usage: { output_tokens: 354 },
|
448
|
+
},
|
449
|
+
{ type: 'message_stop' },
|
450
|
+
];
|
451
|
+
|
452
|
+
const mockReadableStream = new ReadableStream({
|
453
|
+
start(controller) {
|
454
|
+
streams.forEach((chunk) => {
|
455
|
+
controller.enqueue(chunk);
|
456
|
+
});
|
457
|
+
controller.close();
|
458
|
+
},
|
459
|
+
});
|
460
|
+
|
461
|
+
const protocolStream = AnthropicStream(mockReadableStream);
|
462
|
+
|
463
|
+
const decoder = new TextDecoder();
|
464
|
+
const chunks = [];
|
465
|
+
|
466
|
+
// @ts-ignore
|
467
|
+
for await (const chunk of protocolStream) {
|
468
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
469
|
+
}
|
470
|
+
|
471
|
+
expect(chunks).toEqual(
|
472
|
+
[
|
473
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
474
|
+
'event: data',
|
475
|
+
'data: {"id":"msg_01MNsLe7n1uVLtu6W8rCFujD","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":46,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":11}}\n',
|
476
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
477
|
+
'event: reasoning',
|
478
|
+
'data: ""\n',
|
479
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
480
|
+
'event: reasoning',
|
481
|
+
'data: "我需要比较两个数字的"\n',
|
482
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
483
|
+
'event: reasoning',
|
484
|
+
'data: "大小:9.8和9"\n',
|
485
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
486
|
+
'event: reasoning',
|
487
|
+
'data: "11\\n\\n所以9.8比9.11大。"\n',
|
488
|
+
// Tool calls
|
489
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
490
|
+
'event: reasoning_signature',
|
491
|
+
`data: "EuYBCkQYAiJAHnHRJG4nPBrdTlo6CmXoyE8WYoQeoPiLnXaeuaM8ExdiIEkVvxK1DYXOz5sCubs2s/G1NsST8A003Zb8XmuhYBIMwDGMZSZ3+gxOEBpVGgzdpOlDNBTxke31SngiMKUk6WcSiA11OSVBuInNukoAhnRd5jPAEg7e5mIoz/qJwnQHV8I+heKUreP77eJdFipQaM3FHn+avEHuLa/Z/fu0O9BftDi+caB1UWDwJakNeWX1yYTvK+N1v4gRpKbj4AhctfYHMjq8qX9XTnXme5AGzCYC6HgYw2/RfalWzwNxI6k="\n`,
|
492
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
493
|
+
'event: data',
|
494
|
+
`data: {"type":"content_block_stop","index":0}\n`,
|
495
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
496
|
+
'event: data',
|
497
|
+
`data: ""\n`,
|
498
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
499
|
+
'event: text',
|
500
|
+
`data: "9.8比9.11大。"\n`,
|
501
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
502
|
+
'event: data',
|
503
|
+
`data: {"type":"content_block_stop","index":1}\n`,
|
504
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
505
|
+
'event: stop',
|
506
|
+
'data: "end_turn"\n',
|
507
|
+
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
508
|
+
'event: stop',
|
509
|
+
'data: "message_stop"\n',
|
510
|
+
].map((item) => `${item}\n`),
|
511
|
+
);
|
512
|
+
});
|
387
513
|
it('should handle ReadableStream input', async () => {
|
388
514
|
const mockReadableStream = new ReadableStream({
|
389
515
|
start(controller) {
|
@@ -14,12 +14,12 @@ import {
|
|
14
14
|
|
15
15
|
export const transformAnthropicStream = (
|
16
16
|
chunk: Anthropic.MessageStreamEvent,
|
17
|
-
|
17
|
+
context: StreamContext,
|
18
18
|
): StreamProtocolChunk => {
|
19
19
|
// maybe need another structure to add support for multiple choices
|
20
20
|
switch (chunk.type) {
|
21
21
|
case 'message_start': {
|
22
|
-
|
22
|
+
context.id = chunk.message.id;
|
23
23
|
return { data: chunk.message, id: chunk.message.id, type: 'data' };
|
24
24
|
}
|
25
25
|
case 'content_block_start': {
|
@@ -27,12 +27,12 @@ export const transformAnthropicStream = (
|
|
27
27
|
const toolChunk = chunk.content_block;
|
28
28
|
|
29
29
|
// if toolIndex is not defined, set it to 0
|
30
|
-
if (typeof
|
31
|
-
|
30
|
+
if (typeof context.toolIndex === 'undefined') {
|
31
|
+
context.toolIndex = 0;
|
32
32
|
}
|
33
33
|
// if toolIndex is defined, increment it
|
34
34
|
else {
|
35
|
-
|
35
|
+
context.toolIndex += 1;
|
36
36
|
}
|
37
37
|
|
38
38
|
const toolCall: StreamToolCallChunkData = {
|
@@ -41,22 +41,36 @@ export const transformAnthropicStream = (
|
|
41
41
|
name: toolChunk.name,
|
42
42
|
},
|
43
43
|
id: toolChunk.id,
|
44
|
-
index:
|
44
|
+
index: context.toolIndex,
|
45
45
|
type: 'function',
|
46
46
|
};
|
47
47
|
|
48
|
-
|
48
|
+
context.tool = { id: toolChunk.id, index: context.toolIndex, name: toolChunk.name };
|
49
49
|
|
50
|
-
return { data: [toolCall], id:
|
50
|
+
return { data: [toolCall], id: context.id, type: 'tool_calls' };
|
51
51
|
}
|
52
52
|
|
53
|
-
|
53
|
+
if (chunk.content_block.type === 'thinking') {
|
54
|
+
const thinkingChunk = chunk.content_block;
|
55
|
+
|
56
|
+
return { data: thinkingChunk.thinking, id: context.id, type: 'reasoning' };
|
57
|
+
}
|
58
|
+
|
59
|
+
if (chunk.content_block.type === 'redacted_thinking') {
|
60
|
+
return {
|
61
|
+
data: chunk.content_block.data,
|
62
|
+
id: context.id,
|
63
|
+
type: 'reasoning',
|
64
|
+
};
|
65
|
+
}
|
66
|
+
|
67
|
+
return { data: chunk.content_block.text, id: context.id, type: 'data' };
|
54
68
|
}
|
55
69
|
|
56
70
|
case 'content_block_delta': {
|
57
71
|
switch (chunk.delta.type) {
|
58
72
|
case 'text_delta': {
|
59
|
-
return { data: chunk.delta.text, id:
|
73
|
+
return { data: chunk.delta.text, id: context.id, type: 'text' };
|
60
74
|
}
|
61
75
|
|
62
76
|
case 'input_json_delta': {
|
@@ -64,34 +78,50 @@ export const transformAnthropicStream = (
|
|
64
78
|
|
65
79
|
const toolCall: StreamToolCallChunkData = {
|
66
80
|
function: { arguments: delta },
|
67
|
-
index:
|
81
|
+
index: context.toolIndex || 0,
|
68
82
|
type: 'function',
|
69
83
|
};
|
70
84
|
|
71
85
|
return {
|
72
86
|
data: [toolCall],
|
73
|
-
id:
|
87
|
+
id: context.id,
|
74
88
|
type: 'tool_calls',
|
75
89
|
} as StreamProtocolToolCallChunk;
|
76
90
|
}
|
77
91
|
|
92
|
+
case 'signature_delta': {
|
93
|
+
return {
|
94
|
+
data: chunk.delta.signature,
|
95
|
+
id: context.id,
|
96
|
+
type: 'reasoning_signature' as any,
|
97
|
+
};
|
98
|
+
}
|
99
|
+
|
100
|
+
case 'thinking_delta': {
|
101
|
+
return {
|
102
|
+
data: chunk.delta.thinking,
|
103
|
+
id: context.id,
|
104
|
+
type: 'reasoning',
|
105
|
+
};
|
106
|
+
}
|
107
|
+
|
78
108
|
default: {
|
79
109
|
break;
|
80
110
|
}
|
81
111
|
}
|
82
|
-
return { data: chunk, id:
|
112
|
+
return { data: chunk, id: context.id, type: 'data' };
|
83
113
|
}
|
84
114
|
|
85
115
|
case 'message_delta': {
|
86
|
-
return { data: chunk.delta.stop_reason, id:
|
116
|
+
return { data: chunk.delta.stop_reason, id: context.id, type: 'stop' };
|
87
117
|
}
|
88
118
|
|
89
119
|
case 'message_stop': {
|
90
|
-
return { data: 'message_stop', id:
|
120
|
+
return { data: 'message_stop', id: context.id, type: 'stop' };
|
91
121
|
}
|
92
122
|
|
93
123
|
default: {
|
94
|
-
return { data: chunk, id:
|
124
|
+
return { data: chunk, id: context.id, type: 'data' };
|
95
125
|
}
|
96
126
|
}
|
97
127
|
};
|
@@ -12,6 +12,10 @@ export interface StreamContext {
|
|
12
12
|
* this flag is used to check if the pplx citation is returned,and then not return it again
|
13
13
|
*/
|
14
14
|
returnedPplxCitation?: boolean;
|
15
|
+
thinking?: {
|
16
|
+
id: string;
|
17
|
+
name: string;
|
18
|
+
};
|
15
19
|
tool?: {
|
16
20
|
id: string;
|
17
21
|
index: number;
|
@@ -32,7 +32,13 @@ export default {
|
|
32
32
|
},
|
33
33
|
duplicateTitle: '{{title}} 副本',
|
34
34
|
emptyAgent: '暂无助手',
|
35
|
-
|
35
|
+
extendParams: {
|
36
|
+
enableReasoning: {
|
37
|
+
title: '开启深度思考',
|
38
|
+
},
|
39
|
+
reasoningBudgetToken: {
|
40
|
+
title: '思考消耗 Token',
|
41
|
+
},
|
36
42
|
title: '模型扩展功能',
|
37
43
|
},
|
38
44
|
historyRange: '历史范围',
|