@librechat/agents 3.0.78 → 3.0.80
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/llm/bedrock/index.cjs +98 -25
- package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
- package/dist/cjs/messages/core.cjs +1 -1
- package/dist/cjs/messages/core.cjs.map +1 -1
- package/dist/cjs/stream.cjs +4 -2
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/esm/llm/bedrock/index.mjs +97 -24
- package/dist/esm/llm/bedrock/index.mjs.map +1 -1
- package/dist/esm/messages/core.mjs +1 -1
- package/dist/esm/messages/core.mjs.map +1 -1
- package/dist/esm/stream.mjs +4 -2
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/types/llm/bedrock/index.d.ts +86 -7
- package/dist/types/llm/bedrock/types.d.ts +27 -0
- package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
- package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
- package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
- package/package.json +5 -3
- package/src/llm/bedrock/index.ts +180 -43
- package/src/llm/bedrock/llm.spec.ts +616 -0
- package/src/llm/bedrock/types.ts +51 -0
- package/src/llm/bedrock/utils/index.ts +18 -0
- package/src/llm/bedrock/utils/message_inputs.ts +563 -0
- package/src/llm/bedrock/utils/message_outputs.ts +310 -0
- package/src/messages/core.ts +1 -1
- package/src/scripts/thinking-bedrock.ts +159 -0
- package/src/scripts/thinking.ts +39 -18
- package/src/scripts/tools.ts +7 -3
- package/src/stream.ts +4 -2
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Utility functions for converting Bedrock Converse responses to LangChain messages.
|
|
3
|
+
* Ported from @langchain/aws common.js
|
|
4
|
+
*/
|
|
5
|
+
import { AIMessage, AIMessageChunk } from '@langchain/core/messages';
|
|
6
|
+
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
7
|
+
import type {
|
|
8
|
+
BedrockMessage,
|
|
9
|
+
ConverseResponse,
|
|
10
|
+
ContentBlockDeltaEvent,
|
|
11
|
+
ConverseStreamMetadataEvent,
|
|
12
|
+
ContentBlockStartEvent,
|
|
13
|
+
ReasoningContentBlock,
|
|
14
|
+
ReasoningContentBlockDelta,
|
|
15
|
+
MessageContentReasoningBlock,
|
|
16
|
+
MessageContentReasoningBlockReasoningTextPartial,
|
|
17
|
+
MessageContentReasoningBlockRedacted,
|
|
18
|
+
} from '../types';
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Convert a Bedrock reasoning block delta to a LangChain partial reasoning block.
|
|
22
|
+
*/
|
|
23
|
+
export function bedrockReasoningDeltaToLangchainPartialReasoningBlock(
|
|
24
|
+
reasoningContent: ReasoningContentBlockDelta
|
|
25
|
+
):
|
|
26
|
+
| MessageContentReasoningBlockReasoningTextPartial
|
|
27
|
+
| MessageContentReasoningBlockRedacted {
|
|
28
|
+
const { text, redactedContent, signature } =
|
|
29
|
+
reasoningContent as ReasoningContentBlockDelta & {
|
|
30
|
+
text?: string;
|
|
31
|
+
redactedContent?: Uint8Array;
|
|
32
|
+
signature?: string;
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
if (typeof text === 'string') {
|
|
36
|
+
return {
|
|
37
|
+
type: 'reasoning_content',
|
|
38
|
+
reasoningText: { text },
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
if (signature != null) {
|
|
42
|
+
return {
|
|
43
|
+
type: 'reasoning_content',
|
|
44
|
+
reasoningText: { signature },
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
if (redactedContent != null) {
|
|
48
|
+
return {
|
|
49
|
+
type: 'reasoning_content',
|
|
50
|
+
redactedContent: Buffer.from(redactedContent).toString('base64'),
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
throw new Error('Invalid reasoning content');
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Convert a Bedrock reasoning block to a LangChain reasoning block.
|
|
58
|
+
*/
|
|
59
|
+
export function bedrockReasoningBlockToLangchainReasoningBlock(
|
|
60
|
+
reasoningContent: ReasoningContentBlock
|
|
61
|
+
): MessageContentReasoningBlock {
|
|
62
|
+
const { reasoningText, redactedContent } =
|
|
63
|
+
reasoningContent as ReasoningContentBlock & {
|
|
64
|
+
reasoningText?: { text?: string; signature?: string };
|
|
65
|
+
redactedContent?: Uint8Array;
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
if (reasoningText != null) {
|
|
69
|
+
return {
|
|
70
|
+
type: 'reasoning_content',
|
|
71
|
+
reasoningText: reasoningText,
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
if (redactedContent != null) {
|
|
75
|
+
return {
|
|
76
|
+
type: 'reasoning_content',
|
|
77
|
+
redactedContent: Buffer.from(redactedContent).toString('base64'),
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
throw new Error('Invalid reasoning content');
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Convert a Bedrock Converse message to a LangChain message.
|
|
85
|
+
*/
|
|
86
|
+
export function convertConverseMessageToLangChainMessage(
|
|
87
|
+
message: BedrockMessage,
|
|
88
|
+
responseMetadata: Omit<ConverseResponse, 'output'>
|
|
89
|
+
): AIMessage {
|
|
90
|
+
if (message.content == null) {
|
|
91
|
+
throw new Error('No message content found in response.');
|
|
92
|
+
}
|
|
93
|
+
if (message.role !== 'assistant') {
|
|
94
|
+
throw new Error(
|
|
95
|
+
`Unsupported message role received in ChatBedrockConverse response: ${message.role}`
|
|
96
|
+
);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
let requestId: string | undefined;
|
|
100
|
+
if (
|
|
101
|
+
'$metadata' in responseMetadata &&
|
|
102
|
+
responseMetadata.$metadata != null &&
|
|
103
|
+
typeof responseMetadata.$metadata === 'object' &&
|
|
104
|
+
'requestId' in responseMetadata.$metadata
|
|
105
|
+
) {
|
|
106
|
+
requestId = responseMetadata.$metadata.requestId as string;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
let tokenUsage:
|
|
110
|
+
| { input_tokens: number; output_tokens: number; total_tokens: number }
|
|
111
|
+
| undefined;
|
|
112
|
+
if (responseMetadata.usage != null) {
|
|
113
|
+
const input_tokens = responseMetadata.usage.inputTokens ?? 0;
|
|
114
|
+
const output_tokens = responseMetadata.usage.outputTokens ?? 0;
|
|
115
|
+
tokenUsage = {
|
|
116
|
+
input_tokens,
|
|
117
|
+
output_tokens,
|
|
118
|
+
total_tokens:
|
|
119
|
+
responseMetadata.usage.totalTokens ?? input_tokens + output_tokens,
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
if (
|
|
124
|
+
message.content.length === 1 &&
|
|
125
|
+
'text' in message.content[0] &&
|
|
126
|
+
typeof message.content[0].text === 'string'
|
|
127
|
+
) {
|
|
128
|
+
return new AIMessage({
|
|
129
|
+
content: message.content[0].text,
|
|
130
|
+
response_metadata: responseMetadata,
|
|
131
|
+
usage_metadata: tokenUsage,
|
|
132
|
+
id: requestId,
|
|
133
|
+
});
|
|
134
|
+
} else {
|
|
135
|
+
const toolCalls: Array<{
|
|
136
|
+
id?: string;
|
|
137
|
+
name: string;
|
|
138
|
+
args: Record<string, unknown>;
|
|
139
|
+
type: 'tool_call';
|
|
140
|
+
}> = [];
|
|
141
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
142
|
+
const content: any[] = [];
|
|
143
|
+
|
|
144
|
+
message.content.forEach((c) => {
|
|
145
|
+
if (
|
|
146
|
+
'toolUse' in c &&
|
|
147
|
+
c.toolUse != null &&
|
|
148
|
+
c.toolUse.name != null &&
|
|
149
|
+
c.toolUse.name !== '' &&
|
|
150
|
+
c.toolUse.input != null &&
|
|
151
|
+
typeof c.toolUse.input === 'object'
|
|
152
|
+
) {
|
|
153
|
+
toolCalls.push({
|
|
154
|
+
id: c.toolUse.toolUseId,
|
|
155
|
+
name: c.toolUse.name,
|
|
156
|
+
args: c.toolUse.input as Record<string, unknown>,
|
|
157
|
+
type: 'tool_call',
|
|
158
|
+
});
|
|
159
|
+
} else if ('text' in c && typeof c.text === 'string') {
|
|
160
|
+
content.push({ type: 'text', text: c.text });
|
|
161
|
+
} else if ('reasoningContent' in c && c.reasoningContent != null) {
|
|
162
|
+
content.push(
|
|
163
|
+
bedrockReasoningBlockToLangchainReasoningBlock(c.reasoningContent)
|
|
164
|
+
);
|
|
165
|
+
} else {
|
|
166
|
+
content.push(c);
|
|
167
|
+
}
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
return new AIMessage({
|
|
171
|
+
content: content.length ? content : '',
|
|
172
|
+
tool_calls: toolCalls.length ? toolCalls : undefined,
|
|
173
|
+
response_metadata: responseMetadata,
|
|
174
|
+
usage_metadata: tokenUsage,
|
|
175
|
+
id: requestId,
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* Handle a content block delta event from Bedrock Converse stream.
|
|
182
|
+
*/
|
|
183
|
+
export function handleConverseStreamContentBlockDelta(
|
|
184
|
+
contentBlockDelta: ContentBlockDeltaEvent
|
|
185
|
+
): ChatGenerationChunk {
|
|
186
|
+
if (contentBlockDelta.delta == null) {
|
|
187
|
+
throw new Error('No delta found in content block.');
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
if (typeof contentBlockDelta.delta.text === 'string') {
|
|
191
|
+
return new ChatGenerationChunk({
|
|
192
|
+
text: contentBlockDelta.delta.text,
|
|
193
|
+
message: new AIMessageChunk({
|
|
194
|
+
content: contentBlockDelta.delta.text,
|
|
195
|
+
response_metadata: {
|
|
196
|
+
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
197
|
+
},
|
|
198
|
+
}),
|
|
199
|
+
});
|
|
200
|
+
} else if (contentBlockDelta.delta.toolUse != null) {
|
|
201
|
+
const index = contentBlockDelta.contentBlockIndex;
|
|
202
|
+
return new ChatGenerationChunk({
|
|
203
|
+
text: '',
|
|
204
|
+
message: new AIMessageChunk({
|
|
205
|
+
content: '',
|
|
206
|
+
tool_call_chunks: [
|
|
207
|
+
{
|
|
208
|
+
args: contentBlockDelta.delta.toolUse.input as string,
|
|
209
|
+
index,
|
|
210
|
+
type: 'tool_call_chunk',
|
|
211
|
+
},
|
|
212
|
+
],
|
|
213
|
+
response_metadata: {
|
|
214
|
+
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
215
|
+
},
|
|
216
|
+
}),
|
|
217
|
+
});
|
|
218
|
+
} else if (contentBlockDelta.delta.reasoningContent != null) {
|
|
219
|
+
const reasoningBlock =
|
|
220
|
+
bedrockReasoningDeltaToLangchainPartialReasoningBlock(
|
|
221
|
+
contentBlockDelta.delta.reasoningContent
|
|
222
|
+
);
|
|
223
|
+
// Extract the text for additional_kwargs.reasoning_content (for stream handler compatibility)
|
|
224
|
+
const reasoningText =
|
|
225
|
+
'reasoningText' in reasoningBlock
|
|
226
|
+
? (reasoningBlock.reasoningText.text ??
|
|
227
|
+
reasoningBlock.reasoningText.signature ??
|
|
228
|
+
('redactedContent' in reasoningBlock
|
|
229
|
+
? reasoningBlock.redactedContent
|
|
230
|
+
: ''))
|
|
231
|
+
: '';
|
|
232
|
+
return new ChatGenerationChunk({
|
|
233
|
+
text: '',
|
|
234
|
+
message: new AIMessageChunk({
|
|
235
|
+
content: [reasoningBlock],
|
|
236
|
+
additional_kwargs: {
|
|
237
|
+
// Set reasoning_content for stream handler to detect reasoning mode
|
|
238
|
+
reasoning_content: reasoningText,
|
|
239
|
+
},
|
|
240
|
+
response_metadata: {
|
|
241
|
+
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
242
|
+
},
|
|
243
|
+
}),
|
|
244
|
+
});
|
|
245
|
+
} else {
|
|
246
|
+
throw new Error(
|
|
247
|
+
`Unsupported content block type(s): ${JSON.stringify(contentBlockDelta.delta, null, 2)}`
|
|
248
|
+
);
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Handle a content block start event from Bedrock Converse stream.
|
|
254
|
+
*/
|
|
255
|
+
export function handleConverseStreamContentBlockStart(
|
|
256
|
+
contentBlockStart: ContentBlockStartEvent
|
|
257
|
+
): ChatGenerationChunk | null {
|
|
258
|
+
const index = contentBlockStart.contentBlockIndex;
|
|
259
|
+
|
|
260
|
+
if (contentBlockStart.start?.toolUse != null) {
|
|
261
|
+
return new ChatGenerationChunk({
|
|
262
|
+
text: '',
|
|
263
|
+
message: new AIMessageChunk({
|
|
264
|
+
content: '',
|
|
265
|
+
tool_call_chunks: [
|
|
266
|
+
{
|
|
267
|
+
name: contentBlockStart.start.toolUse.name,
|
|
268
|
+
id: contentBlockStart.start.toolUse.toolUseId,
|
|
269
|
+
index,
|
|
270
|
+
type: 'tool_call_chunk',
|
|
271
|
+
},
|
|
272
|
+
],
|
|
273
|
+
response_metadata: {
|
|
274
|
+
contentBlockIndex: index,
|
|
275
|
+
},
|
|
276
|
+
}),
|
|
277
|
+
});
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// Return null for non-tool content block starts (text blocks don't need special handling)
|
|
281
|
+
return null;
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
/**
|
|
285
|
+
* Handle a metadata event from Bedrock Converse stream.
|
|
286
|
+
*/
|
|
287
|
+
export function handleConverseStreamMetadata(
|
|
288
|
+
metadata: ConverseStreamMetadataEvent,
|
|
289
|
+
extra: { streamUsage: boolean }
|
|
290
|
+
): ChatGenerationChunk {
|
|
291
|
+
const inputTokens = metadata.usage?.inputTokens ?? 0;
|
|
292
|
+
const outputTokens = metadata.usage?.outputTokens ?? 0;
|
|
293
|
+
const usage_metadata = {
|
|
294
|
+
input_tokens: inputTokens,
|
|
295
|
+
output_tokens: outputTokens,
|
|
296
|
+
total_tokens: metadata.usage?.totalTokens ?? inputTokens + outputTokens,
|
|
297
|
+
};
|
|
298
|
+
|
|
299
|
+
return new ChatGenerationChunk({
|
|
300
|
+
text: '',
|
|
301
|
+
message: new AIMessageChunk({
|
|
302
|
+
content: '',
|
|
303
|
+
usage_metadata: extra.streamUsage ? usage_metadata : undefined,
|
|
304
|
+
response_metadata: {
|
|
305
|
+
// Use the same key as returned from the Converse API
|
|
306
|
+
metadata,
|
|
307
|
+
},
|
|
308
|
+
}),
|
|
309
|
+
});
|
|
310
|
+
}
|
package/src/messages/core.ts
CHANGED
|
@@ -41,7 +41,7 @@ User: ${userMessage[1]}
|
|
|
41
41
|
const _allowedTypes = ['image_url', 'text', 'tool_use', 'tool_result'];
|
|
42
42
|
const allowedTypesByProvider: Record<string, string[]> = {
|
|
43
43
|
default: _allowedTypes,
|
|
44
|
-
[Providers.ANTHROPIC]: [..._allowedTypes, 'thinking'],
|
|
44
|
+
[Providers.ANTHROPIC]: [..._allowedTypes, 'thinking', 'redacted_thinking'],
|
|
45
45
|
[Providers.BEDROCK]: [..._allowedTypes, 'reasoning_content'],
|
|
46
46
|
[Providers.OPENAI]: _allowedTypes,
|
|
47
47
|
};
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
// src/scripts/thinking-bedrock.ts
|
|
2
|
+
import { config } from 'dotenv';
|
|
3
|
+
config();
|
|
4
|
+
import { HumanMessage, BaseMessage } from '@langchain/core/messages';
|
|
5
|
+
import type { UsageMetadata } from '@langchain/core/messages';
|
|
6
|
+
import * as t from '@/types';
|
|
7
|
+
import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
|
|
8
|
+
import { createCodeExecutionTool } from '@/tools/CodeExecutor';
|
|
9
|
+
import { ToolEndHandler, ModelEndHandler } from '@/events';
|
|
10
|
+
import { GraphEvents, Providers } from '@/common';
|
|
11
|
+
import { getLLMConfig } from '@/utils/llmConfig';
|
|
12
|
+
import { getArgs } from '@/scripts/args';
|
|
13
|
+
import { Run } from '@/run';
|
|
14
|
+
|
|
15
|
+
const conversationHistory: BaseMessage[] = [];
|
|
16
|
+
let _contentParts: t.MessageContentComplex[] = [];
|
|
17
|
+
const collectedUsage: UsageMetadata[] = [];
|
|
18
|
+
|
|
19
|
+
async function testBedrockThinking(): Promise<void> {
|
|
20
|
+
const { userName } = await getArgs();
|
|
21
|
+
const instructions = `You are a helpful AI assistant for ${userName}. When answering questions, be thorough in your reasoning.`;
|
|
22
|
+
const { contentParts, aggregateContent } = createContentAggregator();
|
|
23
|
+
_contentParts = contentParts as t.MessageContentComplex[];
|
|
24
|
+
|
|
25
|
+
// Set up event handlers
|
|
26
|
+
const customHandlers = {
|
|
27
|
+
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
28
|
+
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
|
|
29
|
+
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
30
|
+
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
31
|
+
handle: (
|
|
32
|
+
event: GraphEvents.ON_RUN_STEP_COMPLETED,
|
|
33
|
+
data: t.StreamEventData
|
|
34
|
+
): void => {
|
|
35
|
+
console.log('====== ON_RUN_STEP_COMPLETED ======');
|
|
36
|
+
aggregateContent({
|
|
37
|
+
event,
|
|
38
|
+
data: data as unknown as { result: t.ToolEndEvent },
|
|
39
|
+
});
|
|
40
|
+
},
|
|
41
|
+
},
|
|
42
|
+
[GraphEvents.ON_RUN_STEP]: {
|
|
43
|
+
handle: (event: GraphEvents.ON_RUN_STEP, data: t.RunStep) => {
|
|
44
|
+
aggregateContent({ event, data });
|
|
45
|
+
},
|
|
46
|
+
},
|
|
47
|
+
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
48
|
+
handle: (
|
|
49
|
+
event: GraphEvents.ON_RUN_STEP_DELTA,
|
|
50
|
+
data: t.RunStepDeltaEvent
|
|
51
|
+
) => {
|
|
52
|
+
aggregateContent({ event, data });
|
|
53
|
+
},
|
|
54
|
+
},
|
|
55
|
+
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
56
|
+
handle: (
|
|
57
|
+
event: GraphEvents.ON_MESSAGE_DELTA,
|
|
58
|
+
data: t.MessageDeltaEvent
|
|
59
|
+
) => {
|
|
60
|
+
aggregateContent({ event, data });
|
|
61
|
+
},
|
|
62
|
+
},
|
|
63
|
+
[GraphEvents.ON_REASONING_DELTA]: {
|
|
64
|
+
handle: (
|
|
65
|
+
event: GraphEvents.ON_REASONING_DELTA,
|
|
66
|
+
data: t.ReasoningDeltaEvent
|
|
67
|
+
) => {
|
|
68
|
+
aggregateContent({ event, data });
|
|
69
|
+
},
|
|
70
|
+
},
|
|
71
|
+
};
|
|
72
|
+
|
|
73
|
+
const baseLlmConfig = getLLMConfig(Providers.BEDROCK);
|
|
74
|
+
|
|
75
|
+
// Enable thinking with token budget for Bedrock
|
|
76
|
+
const llmConfig = {
|
|
77
|
+
...baseLlmConfig,
|
|
78
|
+
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
79
|
+
maxTokens: 5000,
|
|
80
|
+
additionalModelRequestFields: {
|
|
81
|
+
thinking: { type: 'enabled', budget_tokens: 2000 },
|
|
82
|
+
},
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
const run = await Run.create<t.IState>({
|
|
86
|
+
runId: 'test-bedrock-thinking-id',
|
|
87
|
+
graphConfig: {
|
|
88
|
+
instructions,
|
|
89
|
+
type: 'standard',
|
|
90
|
+
tools: [createCodeExecutionTool()],
|
|
91
|
+
llmConfig,
|
|
92
|
+
},
|
|
93
|
+
returnContent: true,
|
|
94
|
+
customHandlers: customHandlers as t.RunConfig['customHandlers'],
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
const config = {
|
|
98
|
+
configurable: {
|
|
99
|
+
thread_id: 'bedrock-thinking-test-thread',
|
|
100
|
+
},
|
|
101
|
+
streamMode: 'values',
|
|
102
|
+
version: 'v2' as const,
|
|
103
|
+
};
|
|
104
|
+
|
|
105
|
+
// Test 1: Regular thinking mode
|
|
106
|
+
console.log('\n\nTest 1: Bedrock Regular thinking mode');
|
|
107
|
+
const userMessage1 = `Please print 'hello world' in python`;
|
|
108
|
+
conversationHistory.push(new HumanMessage(userMessage1));
|
|
109
|
+
|
|
110
|
+
console.log('Running first query with Bedrock thinking enabled...');
|
|
111
|
+
const firstInputs = { messages: [...conversationHistory] };
|
|
112
|
+
await run.processStream(firstInputs, config);
|
|
113
|
+
|
|
114
|
+
// Extract and display thinking blocks
|
|
115
|
+
const finalMessages = run.getRunMessages();
|
|
116
|
+
console.log('\n\nFinal messages after Test 1:');
|
|
117
|
+
console.dir(finalMessages, { depth: null });
|
|
118
|
+
|
|
119
|
+
// Test 2: Try multi-turn conversation
|
|
120
|
+
console.log(
|
|
121
|
+
'\n\nTest 2: Multi-turn conversation with Bedrock thinking enabled'
|
|
122
|
+
);
|
|
123
|
+
const userMessage2 = `Given your previous analysis, what would be the most significant technical challenges in making this transition?`;
|
|
124
|
+
conversationHistory.push(new HumanMessage(userMessage2));
|
|
125
|
+
|
|
126
|
+
console.log('Running second query with Bedrock thinking enabled...');
|
|
127
|
+
const secondInputs = { messages: [...conversationHistory] };
|
|
128
|
+
await run.processStream(secondInputs, config);
|
|
129
|
+
|
|
130
|
+
// Display thinking blocks for second response
|
|
131
|
+
const finalMessages2 = run.getRunMessages();
|
|
132
|
+
console.log('\n\nBedrock thinking feature test completed!');
|
|
133
|
+
console.dir(finalMessages2, { depth: null });
|
|
134
|
+
|
|
135
|
+
console.log('\n\nContent parts:');
|
|
136
|
+
console.dir(_contentParts, { depth: null });
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
process.on('unhandledRejection', (reason, promise) => {
|
|
140
|
+
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
|
141
|
+
console.log('Conversation history:');
|
|
142
|
+
console.dir(conversationHistory, { depth: null });
|
|
143
|
+
console.log('Content parts:');
|
|
144
|
+
console.dir(_contentParts, { depth: null });
|
|
145
|
+
process.exit(1);
|
|
146
|
+
});
|
|
147
|
+
|
|
148
|
+
process.on('uncaughtException', (err) => {
|
|
149
|
+
console.error('Uncaught Exception:', err);
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
testBedrockThinking().catch((err) => {
|
|
153
|
+
console.error(err);
|
|
154
|
+
console.log('Conversation history:');
|
|
155
|
+
console.dir(conversationHistory, { depth: null });
|
|
156
|
+
console.log('Content parts:');
|
|
157
|
+
console.dir(_contentParts, { depth: null });
|
|
158
|
+
process.exit(1);
|
|
159
|
+
});
|
package/src/scripts/thinking.ts
CHANGED
|
@@ -1,7 +1,11 @@
|
|
|
1
1
|
// src/scripts/test-thinking.ts
|
|
2
2
|
import { config } from 'dotenv';
|
|
3
3
|
config();
|
|
4
|
-
import {
|
|
4
|
+
import {
|
|
5
|
+
HumanMessage,
|
|
6
|
+
SystemMessage,
|
|
7
|
+
BaseMessage,
|
|
8
|
+
} from '@langchain/core/messages';
|
|
5
9
|
import type { UsageMetadata } from '@langchain/core/messages';
|
|
6
10
|
import * as t from '@/types';
|
|
7
11
|
import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
|
|
@@ -21,17 +25,23 @@ async function testThinking(): Promise<void> {
|
|
|
21
25
|
const instructions = `You are a helpful AI assistant for ${userName}. When answering questions, be thorough in your reasoning.`;
|
|
22
26
|
const { contentParts, aggregateContent } = createContentAggregator();
|
|
23
27
|
_contentParts = contentParts as t.MessageContentComplex[];
|
|
24
|
-
|
|
28
|
+
|
|
25
29
|
// Set up event handlers
|
|
26
30
|
const customHandlers = {
|
|
27
31
|
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
28
32
|
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
|
|
29
33
|
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
30
34
|
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
31
|
-
handle: (
|
|
35
|
+
handle: (
|
|
36
|
+
event: GraphEvents.ON_RUN_STEP_COMPLETED,
|
|
37
|
+
data: t.StreamEventData
|
|
38
|
+
): void => {
|
|
32
39
|
console.log('====== ON_RUN_STEP_COMPLETED ======');
|
|
33
|
-
aggregateContent({
|
|
34
|
-
|
|
40
|
+
aggregateContent({
|
|
41
|
+
event,
|
|
42
|
+
data: data as unknown as { result: t.ToolEndEvent },
|
|
43
|
+
});
|
|
44
|
+
},
|
|
35
45
|
},
|
|
36
46
|
[GraphEvents.ON_RUN_STEP]: {
|
|
37
47
|
handle: (event: GraphEvents.ON_RUN_STEP, data: t.RunStep) => {
|
|
@@ -39,29 +49,38 @@ async function testThinking(): Promise<void> {
|
|
|
39
49
|
},
|
|
40
50
|
},
|
|
41
51
|
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
42
|
-
handle: (
|
|
52
|
+
handle: (
|
|
53
|
+
event: GraphEvents.ON_RUN_STEP_DELTA,
|
|
54
|
+
data: t.RunStepDeltaEvent
|
|
55
|
+
) => {
|
|
43
56
|
aggregateContent({ event, data });
|
|
44
57
|
},
|
|
45
58
|
},
|
|
46
59
|
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
47
|
-
handle: (
|
|
60
|
+
handle: (
|
|
61
|
+
event: GraphEvents.ON_MESSAGE_DELTA,
|
|
62
|
+
data: t.MessageDeltaEvent
|
|
63
|
+
) => {
|
|
48
64
|
aggregateContent({ event, data });
|
|
49
65
|
},
|
|
50
66
|
},
|
|
51
67
|
[GraphEvents.ON_REASONING_DELTA]: {
|
|
52
|
-
handle: (
|
|
68
|
+
handle: (
|
|
69
|
+
event: GraphEvents.ON_REASONING_DELTA,
|
|
70
|
+
data: t.ReasoningDeltaEvent
|
|
71
|
+
) => {
|
|
53
72
|
aggregateContent({ event, data });
|
|
54
73
|
},
|
|
55
74
|
},
|
|
56
75
|
};
|
|
57
76
|
|
|
58
77
|
const baseLlmConfig: t.LLMConfig = getLLMConfig(Providers.ANTHROPIC);
|
|
59
|
-
|
|
78
|
+
|
|
60
79
|
// Enable thinking with token budget
|
|
61
80
|
const llmConfig = {
|
|
62
81
|
...baseLlmConfig,
|
|
63
82
|
model: 'claude-3-7-sonnet-latest',
|
|
64
|
-
thinking: { type:
|
|
83
|
+
thinking: { type: 'enabled', budget_tokens: 2000 },
|
|
65
84
|
};
|
|
66
85
|
|
|
67
86
|
const run = await Run.create<t.IState>({
|
|
@@ -93,7 +112,7 @@ async function testThinking(): Promise<void> {
|
|
|
93
112
|
console.log('Running first query with thinking enabled...');
|
|
94
113
|
const firstInputs = { messages: [...conversationHistory] };
|
|
95
114
|
await run.processStream(firstInputs, config);
|
|
96
|
-
|
|
115
|
+
|
|
97
116
|
// Extract and display thinking blocks
|
|
98
117
|
const finalMessages = run.getRunMessages();
|
|
99
118
|
|
|
@@ -101,30 +120,32 @@ async function testThinking(): Promise<void> {
|
|
|
101
120
|
console.log('\n\nTest 2: Multi-turn conversation with thinking enabled');
|
|
102
121
|
const userMessage2 = `Given your previous analysis, what would be the most significant technical challenges in making this transition?`;
|
|
103
122
|
conversationHistory.push(new HumanMessage(userMessage2));
|
|
104
|
-
|
|
123
|
+
|
|
105
124
|
console.log('Running second query with thinking enabled...');
|
|
106
125
|
const secondInputs = { messages: [...conversationHistory] };
|
|
107
126
|
await run.processStream(secondInputs, config);
|
|
108
|
-
|
|
127
|
+
|
|
109
128
|
// Display thinking blocks for second response
|
|
110
129
|
const finalMessages2 = run.getRunMessages();
|
|
111
130
|
|
|
112
131
|
// Test 3: Redacted thinking mode
|
|
113
132
|
console.log('\n\nTest 3: Redacted thinking mode');
|
|
114
|
-
const magicString =
|
|
133
|
+
const magicString =
|
|
134
|
+
'ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB';
|
|
115
135
|
const userMessage3 = `${magicString}\n\nExplain how quantum computing works in simple terms.`;
|
|
116
|
-
|
|
136
|
+
|
|
117
137
|
// Reset conversation for clean test
|
|
118
138
|
conversationHistory.length = 0;
|
|
119
139
|
conversationHistory.push(new HumanMessage(userMessage3));
|
|
120
|
-
|
|
140
|
+
|
|
121
141
|
console.log('Running query with redacted thinking...');
|
|
122
142
|
const thirdInputs = { messages: [...conversationHistory] };
|
|
123
143
|
await run.processStream(thirdInputs, config);
|
|
124
|
-
|
|
144
|
+
|
|
125
145
|
// Display redacted thinking blocks
|
|
126
146
|
const finalMessages3 = run.getRunMessages();
|
|
127
147
|
console.log('\n\nThinking feature test completed!');
|
|
148
|
+
console.dir(finalMessages3, { depth: null });
|
|
128
149
|
}
|
|
129
150
|
|
|
130
151
|
process.on('unhandledRejection', (reason, promise) => {
|
|
@@ -147,4 +168,4 @@ testThinking().catch((err) => {
|
|
|
147
168
|
console.log('Content parts:');
|
|
148
169
|
console.dir(_contentParts, { depth: null });
|
|
149
170
|
process.exit(1);
|
|
150
|
-
});
|
|
171
|
+
});
|
package/src/scripts/tools.ts
CHANGED
|
@@ -18,9 +18,13 @@ async function testStandardStreaming(): Promise<void> {
|
|
|
18
18
|
const { userName, location, provider, currentDate } = await getArgs();
|
|
19
19
|
const { contentParts, aggregateContent } = createContentAggregator();
|
|
20
20
|
const customHandlers = {
|
|
21
|
-
[GraphEvents.TOOL_END]: new ToolEndHandler(
|
|
22
|
-
|
|
23
|
-
|
|
21
|
+
[GraphEvents.TOOL_END]: new ToolEndHandler(
|
|
22
|
+
undefined,
|
|
23
|
+
undefined,
|
|
24
|
+
(name?: string) => {
|
|
25
|
+
return true;
|
|
26
|
+
}
|
|
27
|
+
),
|
|
24
28
|
[GraphEvents.CHAT_MODEL_END]: {
|
|
25
29
|
handle: (
|
|
26
30
|
_event: string,
|
package/src/stream.ts
CHANGED
|
@@ -339,7 +339,8 @@ hasToolCallChunks: ${hasToolCallChunks}
|
|
|
339
339
|
(c) =>
|
|
340
340
|
(c.type?.startsWith(ContentTypes.THINKING) ?? false) ||
|
|
341
341
|
(c.type?.startsWith(ContentTypes.REASONING) ?? false) ||
|
|
342
|
-
(c.type?.startsWith(ContentTypes.REASONING_CONTENT) ?? false)
|
|
342
|
+
(c.type?.startsWith(ContentTypes.REASONING_CONTENT) ?? false) ||
|
|
343
|
+
c.type === 'redacted_thinking'
|
|
343
344
|
)
|
|
344
345
|
) {
|
|
345
346
|
await graph.dispatchReasoningDelta(stepId, {
|
|
@@ -365,7 +366,8 @@ hasToolCallChunks: ${hasToolCallChunks}
|
|
|
365
366
|
Array.isArray(chunk.content) &&
|
|
366
367
|
(chunk.content[0]?.type === ContentTypes.THINKING ||
|
|
367
368
|
chunk.content[0]?.type === ContentTypes.REASONING ||
|
|
368
|
-
chunk.content[0]?.type === ContentTypes.REASONING_CONTENT
|
|
369
|
+
chunk.content[0]?.type === ContentTypes.REASONING_CONTENT ||
|
|
370
|
+
chunk.content[0]?.type === 'redacted_thinking')
|
|
369
371
|
) {
|
|
370
372
|
reasoning_content = 'valid';
|
|
371
373
|
} else if (
|