@librechat/agents 3.0.776 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/cjs/graphs/Graph.cjs +19 -5
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/llm/bedrock/index.cjs +98 -25
  4. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  5. package/dist/cjs/messages/core.cjs +1 -1
  6. package/dist/cjs/messages/core.cjs.map +1 -1
  7. package/dist/cjs/stream.cjs +4 -2
  8. package/dist/cjs/stream.cjs.map +1 -1
  9. package/dist/cjs/tools/ToolNode.cjs +9 -5
  10. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  11. package/dist/esm/graphs/Graph.mjs +19 -5
  12. package/dist/esm/graphs/Graph.mjs.map +1 -1
  13. package/dist/esm/llm/bedrock/index.mjs +97 -24
  14. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  15. package/dist/esm/messages/core.mjs +1 -1
  16. package/dist/esm/messages/core.mjs.map +1 -1
  17. package/dist/esm/stream.mjs +4 -2
  18. package/dist/esm/stream.mjs.map +1 -1
  19. package/dist/esm/tools/ToolNode.mjs +9 -5
  20. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  21. package/dist/types/llm/bedrock/index.d.ts +86 -7
  22. package/dist/types/llm/bedrock/types.d.ts +27 -0
  23. package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
  24. package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
  25. package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
  26. package/dist/types/types/tools.d.ts +2 -0
  27. package/package.json +5 -2
  28. package/src/graphs/Graph.ts +23 -5
  29. package/src/llm/bedrock/index.ts +180 -43
  30. package/src/llm/bedrock/llm.spec.ts +616 -0
  31. package/src/llm/bedrock/types.ts +51 -0
  32. package/src/llm/bedrock/utils/index.ts +18 -0
  33. package/src/llm/bedrock/utils/message_inputs.ts +563 -0
  34. package/src/llm/bedrock/utils/message_outputs.ts +310 -0
  35. package/src/messages/core.ts +1 -1
  36. package/src/scripts/code_exec_multi_session.ts +241 -0
  37. package/src/scripts/thinking-bedrock.ts +159 -0
  38. package/src/scripts/thinking.ts +39 -18
  39. package/src/scripts/tools.ts +7 -3
  40. package/src/stream.ts +4 -2
  41. package/src/tools/ToolNode.ts +9 -5
  42. package/src/types/tools.ts +2 -0
@@ -0,0 +1,616 @@
1
+ /* eslint-disable no-process-env */
2
+ /* eslint-disable @typescript-eslint/no-explicit-any */
3
+ import { config } from 'dotenv';
4
+ config();
5
+ import { expect, test, describe, jest } from '@jest/globals';
6
+ import {
7
+ AIMessage,
8
+ AIMessageChunk,
9
+ HumanMessage,
10
+ SystemMessage,
11
+ ToolMessage,
12
+ } from '@langchain/core/messages';
13
+ import { concat } from '@langchain/core/utils/stream';
14
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
15
+ import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime';
16
+ import { CustomChatBedrockConverse, ServiceTierType } from './index';
17
+ import { convertToConverseMessages } from './utils';
18
+
19
+ jest.setTimeout(120000);
20
+
21
+ // Base constructor args for tests
22
+ const baseConstructorArgs = {
23
+ region: 'us-east-1',
24
+ credentials: {
25
+ secretAccessKey: 'test-secret-key',
26
+ accessKeyId: 'test-access-key',
27
+ },
28
+ };
29
+
30
+ describe('CustomChatBedrockConverse', () => {
31
+ describe('applicationInferenceProfile parameter', () => {
32
+ test('should initialize applicationInferenceProfile from constructor', () => {
33
+ const testArn =
34
+ 'arn:aws:bedrock:eu-west-1:123456789012:application-inference-profile/test-profile';
35
+ const model = new CustomChatBedrockConverse({
36
+ ...baseConstructorArgs,
37
+ model: 'anthropic.claude-3-haiku-20240307-v1:0',
38
+ applicationInferenceProfile: testArn,
39
+ });
40
+ expect(model.model).toBe('anthropic.claude-3-haiku-20240307-v1:0');
41
+ expect(model.applicationInferenceProfile).toBe(testArn);
42
+ });
43
+
44
+ test('should be undefined when not provided in constructor', () => {
45
+ const model = new CustomChatBedrockConverse({
46
+ ...baseConstructorArgs,
47
+ model: 'anthropic.claude-3-haiku-20240307-v1:0',
48
+ });
49
+ expect(model.model).toBe('anthropic.claude-3-haiku-20240307-v1:0');
50
+ expect(model.applicationInferenceProfile).toBeUndefined();
51
+ });
52
+
53
+ test('should send applicationInferenceProfile as modelId in ConverseCommand when provided', async () => {
54
+ const testArn =
55
+ 'arn:aws:bedrock:eu-west-1:123456789012:application-inference-profile/test-profile';
56
+ const mockSend = jest.fn<any>().mockResolvedValue({
57
+ output: {
58
+ message: {
59
+ role: 'assistant',
60
+ content: [{ text: 'Test response' }],
61
+ },
62
+ },
63
+ stopReason: 'end_turn',
64
+ usage: {
65
+ inputTokens: 10,
66
+ outputTokens: 5,
67
+ totalTokens: 15,
68
+ },
69
+ });
70
+
71
+ const mockClient = {
72
+ send: mockSend,
73
+ } as unknown as BedrockRuntimeClient;
74
+
75
+ const model = new CustomChatBedrockConverse({
76
+ ...baseConstructorArgs,
77
+ model: 'anthropic.claude-3-haiku-20240307-v1:0',
78
+ applicationInferenceProfile: testArn,
79
+ client: mockClient,
80
+ });
81
+
82
+ await model.invoke([new HumanMessage('Hello')]);
83
+
84
+ expect(mockSend).toHaveBeenCalledTimes(1);
85
+ const commandArg = mockSend.mock.calls[0][0] as {
86
+ input: { modelId: string };
87
+ };
88
+ expect(commandArg.input.modelId).toBe(testArn);
89
+ expect(commandArg.input.modelId).not.toBe(
90
+ 'anthropic.claude-3-haiku-20240307-v1:0'
91
+ );
92
+ });
93
+
94
+ test('should send model as modelId in ConverseCommand when applicationInferenceProfile is not provided', async () => {
95
+ const mockSend = jest.fn<any>().mockResolvedValue({
96
+ output: {
97
+ message: {
98
+ role: 'assistant',
99
+ content: [{ text: 'Test response' }],
100
+ },
101
+ },
102
+ stopReason: 'end_turn',
103
+ usage: {
104
+ inputTokens: 10,
105
+ outputTokens: 5,
106
+ totalTokens: 15,
107
+ },
108
+ });
109
+
110
+ const mockClient = {
111
+ send: mockSend,
112
+ } as unknown as BedrockRuntimeClient;
113
+
114
+ const model = new CustomChatBedrockConverse({
115
+ ...baseConstructorArgs,
116
+ model: 'anthropic.claude-3-haiku-20240307-v1:0',
117
+ client: mockClient,
118
+ });
119
+
120
+ await model.invoke([new HumanMessage('Hello')]);
121
+
122
+ expect(mockSend).toHaveBeenCalledTimes(1);
123
+ const commandArg = mockSend.mock.calls[0][0] as {
124
+ input: { modelId: string };
125
+ };
126
+ expect(commandArg.input.modelId).toBe(
127
+ 'anthropic.claude-3-haiku-20240307-v1:0'
128
+ );
129
+ });
130
+ });
131
+
132
+ describe('serviceTier configuration', () => {
133
+ test('should set serviceTier in constructor', () => {
134
+ const model = new CustomChatBedrockConverse({
135
+ ...baseConstructorArgs,
136
+ serviceTier: 'priority',
137
+ });
138
+ expect(model.serviceTier).toBe('priority');
139
+ });
140
+
141
+ test('should set serviceTier as undefined when not provided', () => {
142
+ const model = new CustomChatBedrockConverse({
143
+ ...baseConstructorArgs,
144
+ });
145
+ expect(model.serviceTier).toBeUndefined();
146
+ });
147
+
148
+ test.each(['priority', 'default', 'flex', 'reserved'])(
149
+ 'should include serviceTier in invocationParams when set to %s',
150
+ (serviceTier) => {
151
+ const model = new CustomChatBedrockConverse({
152
+ ...baseConstructorArgs,
153
+ serviceTier: serviceTier as ServiceTierType,
154
+ });
155
+ const params = model.invocationParams({});
156
+ expect(params.serviceTier).toEqual({ type: serviceTier });
157
+ }
158
+ );
159
+
160
+ test('should not include serviceTier in invocationParams when not set', () => {
161
+ const model = new CustomChatBedrockConverse({
162
+ ...baseConstructorArgs,
163
+ });
164
+ const params = model.invocationParams({});
165
+ expect(params.serviceTier).toBeUndefined();
166
+ });
167
+
168
+ test('should override serviceTier from call options in invocationParams', () => {
169
+ const model = new CustomChatBedrockConverse({
170
+ ...baseConstructorArgs,
171
+ serviceTier: 'default',
172
+ });
173
+ const params = model.invocationParams({
174
+ serviceTier: 'priority',
175
+ });
176
+ expect(params.serviceTier).toEqual({ type: 'priority' });
177
+ });
178
+
179
+ test('should use class-level serviceTier when call options do not override it', () => {
180
+ const model = new CustomChatBedrockConverse({
181
+ ...baseConstructorArgs,
182
+ serviceTier: 'flex',
183
+ });
184
+ const params = model.invocationParams({});
185
+ expect(params.serviceTier).toEqual({ type: 'flex' });
186
+ });
187
+
188
+ test('should handle serviceTier in invocationParams with other config options', () => {
189
+ const model = new CustomChatBedrockConverse({
190
+ ...baseConstructorArgs,
191
+ serviceTier: 'reserved',
192
+ temperature: 0.5,
193
+ maxTokens: 100,
194
+ });
195
+ const params = model.invocationParams({
196
+ stop: ['stop_sequence'],
197
+ });
198
+ expect(params.serviceTier).toEqual({ type: 'reserved' });
199
+ expect(params.inferenceConfig?.temperature).toBe(0.5);
200
+ expect(params.inferenceConfig?.maxTokens).toBe(100);
201
+ expect(params.inferenceConfig?.stopSequences).toEqual(['stop_sequence']);
202
+ });
203
+ });
204
+
205
+ describe('contentBlockIndex cleanup', () => {
206
+ // Access private methods for testing via any cast
207
+ function getModelWithCleanMethods() {
208
+ const model = new CustomChatBedrockConverse({
209
+ ...baseConstructorArgs,
210
+ model: 'anthropic.claude-3-haiku-20240307-v1:0',
211
+ });
212
+ return model as any;
213
+ }
214
+
215
+ test('should detect contentBlockIndex at top level', () => {
216
+ const model = getModelWithCleanMethods();
217
+ const objWithIndex = { contentBlockIndex: 0, text: 'hello' };
218
+ const objWithoutIndex = { text: 'hello' };
219
+
220
+ expect(model.hasContentBlockIndex(objWithIndex)).toBe(true);
221
+ expect(model.hasContentBlockIndex(objWithoutIndex)).toBe(false);
222
+ });
223
+
224
+ test('should detect contentBlockIndex in nested objects', () => {
225
+ const model = getModelWithCleanMethods();
226
+ const nestedWithIndex = {
227
+ outer: {
228
+ inner: {
229
+ contentBlockIndex: 1,
230
+ data: 'test',
231
+ },
232
+ },
233
+ };
234
+ const nestedWithoutIndex = {
235
+ outer: {
236
+ inner: {
237
+ data: 'test',
238
+ },
239
+ },
240
+ };
241
+
242
+ expect(model.hasContentBlockIndex(nestedWithIndex)).toBe(true);
243
+ expect(model.hasContentBlockIndex(nestedWithoutIndex)).toBe(false);
244
+ });
245
+
246
+ test('should return false for null, undefined, and primitives', () => {
247
+ const model = getModelWithCleanMethods();
248
+
249
+ expect(model.hasContentBlockIndex(null)).toBe(false);
250
+ expect(model.hasContentBlockIndex(undefined)).toBe(false);
251
+ expect(model.hasContentBlockIndex('string')).toBe(false);
252
+ expect(model.hasContentBlockIndex(123)).toBe(false);
253
+ expect(model.hasContentBlockIndex(true)).toBe(false);
254
+ });
255
+
256
+ test('should remove contentBlockIndex from top level', () => {
257
+ const model = getModelWithCleanMethods();
258
+ const obj = {
259
+ contentBlockIndex: 0,
260
+ text: 'hello',
261
+ other: 'data',
262
+ };
263
+
264
+ const cleaned = model.removeContentBlockIndex(obj);
265
+
266
+ expect(cleaned).toEqual({ text: 'hello', other: 'data' });
267
+ expect(cleaned.contentBlockIndex).toBeUndefined();
268
+ });
269
+
270
+ test('should remove contentBlockIndex from nested objects', () => {
271
+ const model = getModelWithCleanMethods();
272
+ const obj = {
273
+ outer: {
274
+ contentBlockIndex: 1,
275
+ inner: {
276
+ contentBlockIndex: 2,
277
+ data: 'test',
278
+ },
279
+ },
280
+ topLevel: 'value',
281
+ };
282
+
283
+ const cleaned = model.removeContentBlockIndex(obj);
284
+
285
+ expect(cleaned).toEqual({
286
+ outer: {
287
+ inner: {
288
+ data: 'test',
289
+ },
290
+ },
291
+ topLevel: 'value',
292
+ });
293
+ });
294
+
295
+ test('should handle arrays when removing contentBlockIndex', () => {
296
+ const model = getModelWithCleanMethods();
297
+ const obj = {
298
+ items: [
299
+ { contentBlockIndex: 0, text: 'first' },
300
+ { contentBlockIndex: 1, text: 'second' },
301
+ ],
302
+ };
303
+
304
+ const cleaned = model.removeContentBlockIndex(obj);
305
+
306
+ expect(cleaned).toEqual({
307
+ items: [{ text: 'first' }, { text: 'second' }],
308
+ });
309
+ });
310
+
311
+ test('should preserve null and undefined values', () => {
312
+ const model = getModelWithCleanMethods();
313
+
314
+ expect(model.removeContentBlockIndex(null)).toBeNull();
315
+ expect(model.removeContentBlockIndex(undefined)).toBeUndefined();
316
+ });
317
+
318
+ test('cleanChunk should remove contentBlockIndex from AIMessageChunk response_metadata', () => {
319
+ const model = getModelWithCleanMethods();
320
+
321
+ const chunkWithIndex = new ChatGenerationChunk({
322
+ text: 'Hello',
323
+ message: new AIMessageChunk({
324
+ content: 'Hello',
325
+ response_metadata: {
326
+ contentBlockIndex: 0,
327
+ stopReason: null,
328
+ },
329
+ }),
330
+ });
331
+
332
+ const cleaned = model.cleanChunk(chunkWithIndex);
333
+
334
+ expect(cleaned.message.response_metadata).toEqual({
335
+ stopReason: null,
336
+ });
337
+ expect(
338
+ (cleaned.message.response_metadata as any).contentBlockIndex
339
+ ).toBeUndefined();
340
+ expect(cleaned.text).toBe('Hello');
341
+ });
342
+
343
+ test('cleanChunk should pass through chunks without contentBlockIndex unchanged', () => {
344
+ const model = getModelWithCleanMethods();
345
+
346
+ const chunkWithoutIndex = new ChatGenerationChunk({
347
+ text: 'Hello',
348
+ message: new AIMessageChunk({
349
+ content: 'Hello',
350
+ response_metadata: {
351
+ stopReason: 'end_turn',
352
+ usage: { inputTokens: 10, outputTokens: 5 },
353
+ },
354
+ }),
355
+ });
356
+
357
+ const cleaned = model.cleanChunk(chunkWithoutIndex);
358
+
359
+ expect(cleaned.message.response_metadata).toEqual({
360
+ stopReason: 'end_turn',
361
+ usage: { inputTokens: 10, outputTokens: 5 },
362
+ });
363
+ });
364
+
365
+ test('cleanChunk should handle deeply nested contentBlockIndex in response_metadata', () => {
366
+ const model = getModelWithCleanMethods();
367
+
368
+ const chunkWithNestedIndex = new ChatGenerationChunk({
369
+ text: 'Test',
370
+ message: new AIMessageChunk({
371
+ content: 'Test',
372
+ response_metadata: {
373
+ amazon: {
374
+ bedrock: {
375
+ contentBlockIndex: 0,
376
+ trace: { something: 'value' },
377
+ },
378
+ },
379
+ otherData: 'preserved',
380
+ },
381
+ }),
382
+ });
383
+
384
+ const cleaned = model.cleanChunk(chunkWithNestedIndex);
385
+
386
+ expect(cleaned.message.response_metadata).toEqual({
387
+ amazon: {
388
+ bedrock: {
389
+ trace: { something: 'value' },
390
+ },
391
+ },
392
+ otherData: 'preserved',
393
+ });
394
+ });
395
+ });
396
+ });
397
+
398
+ describe('convertToConverseMessages', () => {
399
+ test('should convert basic messages', () => {
400
+ const { converseMessages, converseSystem } = convertToConverseMessages([
401
+ new SystemMessage("You're an AI assistant."),
402
+ new HumanMessage('Hello!'),
403
+ ]);
404
+
405
+ expect(converseSystem).toEqual([{ text: "You're an AI assistant." }]);
406
+ expect(converseMessages).toHaveLength(1);
407
+ expect(converseMessages[0].role).toBe('user');
408
+ expect(converseMessages[0].content).toEqual([{ text: 'Hello!' }]);
409
+ });
410
+
411
+ test('should handle standard v1 format with tool_call blocks (e.g., from Anthropic provider)', () => {
412
+ const { converseMessages, converseSystem } = convertToConverseMessages([
413
+ new SystemMessage("You're an advanced AI assistant."),
414
+ new HumanMessage("What's the weather in SF?"),
415
+ new AIMessage({
416
+ content: [
417
+ { type: 'text', text: 'Let me check the weather for you.' },
418
+ {
419
+ type: 'tool_call',
420
+ id: 'call_123',
421
+ name: 'get_weather',
422
+ args: { location: 'San Francisco' },
423
+ },
424
+ ],
425
+ response_metadata: {
426
+ output_version: 'v1',
427
+ model_provider: 'anthropic',
428
+ },
429
+ }),
430
+ new ToolMessage({
431
+ tool_call_id: 'call_123',
432
+ content: '72°F and sunny',
433
+ }),
434
+ ]);
435
+
436
+ expect(converseSystem).toEqual([
437
+ { text: "You're an advanced AI assistant." },
438
+ ]);
439
+ expect(converseMessages).toHaveLength(3);
440
+
441
+ // Check user message
442
+ expect(converseMessages[0].role).toBe('user');
443
+ expect(converseMessages[0].content).toEqual([
444
+ { text: "What's the weather in SF?" },
445
+ ]);
446
+
447
+ // Check AI message with tool use
448
+ expect(converseMessages[1].role).toBe('assistant');
449
+ expect(converseMessages[1].content).toHaveLength(2);
450
+ expect(converseMessages[1].content?.[0]).toEqual({
451
+ text: 'Let me check the weather for you.',
452
+ });
453
+ expect(converseMessages[1].content?.[1]).toEqual({
454
+ toolUse: {
455
+ toolUseId: 'call_123',
456
+ name: 'get_weather',
457
+ input: { location: 'San Francisco' },
458
+ },
459
+ });
460
+
461
+ // Check tool result
462
+ expect(converseMessages[2].role).toBe('user');
463
+ expect(converseMessages[2].content).toHaveLength(1);
464
+ expect((converseMessages[2].content?.[0] as any).toolResult).toBeDefined();
465
+ expect((converseMessages[2].content?.[0] as any).toolResult.toolUseId).toBe(
466
+ 'call_123'
467
+ );
468
+ });
469
+
470
+ test('should handle standard v1 format with reasoning blocks (e.g., from Anthropic provider)', () => {
471
+ const { converseMessages, converseSystem } = convertToConverseMessages([
472
+ new SystemMessage("You're an advanced AI assistant."),
473
+ new HumanMessage('What is 2+2?'),
474
+ new AIMessage({
475
+ content: [
476
+ {
477
+ type: 'reasoning',
478
+ reasoning: 'I need to add 2 and 2 together.',
479
+ },
480
+ { type: 'text', text: 'The answer is 4.' },
481
+ ],
482
+ response_metadata: {
483
+ output_version: 'v1',
484
+ model_provider: 'anthropic',
485
+ },
486
+ }),
487
+ new HumanMessage('Thanks! What about 3+3?'),
488
+ ]);
489
+
490
+ expect(converseSystem).toEqual([
491
+ { text: "You're an advanced AI assistant." },
492
+ ]);
493
+ expect(converseMessages).toHaveLength(3);
494
+
495
+ // Check AI message with reasoning
496
+ expect(converseMessages[1].role).toBe('assistant');
497
+ expect(converseMessages[1].content).toHaveLength(2);
498
+ expect(
499
+ (converseMessages[1].content?.[0] as any).reasoningContent
500
+ ).toBeDefined();
501
+ expect(
502
+ (converseMessages[1].content?.[0] as any).reasoningContent.reasoningText
503
+ .text
504
+ ).toBe('I need to add 2 and 2 together.');
505
+ expect(converseMessages[1].content?.[1]).toEqual({
506
+ text: 'The answer is 4.',
507
+ });
508
+ });
509
+
510
+ test('should handle messages without v1 format', () => {
511
+ const { converseMessages } = convertToConverseMessages([
512
+ new HumanMessage('Hello'),
513
+ new AIMessage({
514
+ content: 'Hi there!',
515
+ tool_calls: [],
516
+ }),
517
+ ]);
518
+
519
+ expect(converseMessages).toHaveLength(2);
520
+ expect(converseMessages[1].role).toBe('assistant');
521
+ expect(converseMessages[1].content).toEqual([{ text: 'Hi there!' }]);
522
+ });
523
+
524
+ test('should combine consecutive tool result messages', () => {
525
+ const { converseMessages } = convertToConverseMessages([
526
+ new HumanMessage('Get weather for SF and NYC'),
527
+ new AIMessage({
528
+ content: 'I will check both cities.',
529
+ tool_calls: [
530
+ { id: 'call_1', name: 'get_weather', args: { city: 'SF' } },
531
+ { id: 'call_2', name: 'get_weather', args: { city: 'NYC' } },
532
+ ],
533
+ }),
534
+ new ToolMessage({
535
+ tool_call_id: 'call_1',
536
+ content: 'SF: 72°F',
537
+ }),
538
+ new ToolMessage({
539
+ tool_call_id: 'call_2',
540
+ content: 'NYC: 65°F',
541
+ }),
542
+ ]);
543
+
544
+ // Tool messages should be combined into one user message
545
+ expect(converseMessages).toHaveLength(3);
546
+ const toolResultMessage = converseMessages[2];
547
+ expect(toolResultMessage.role).toBe('user');
548
+ expect(toolResultMessage.content).toHaveLength(2);
549
+ expect((toolResultMessage.content?.[0] as any).toolResult.toolUseId).toBe(
550
+ 'call_1'
551
+ );
552
+ expect((toolResultMessage.content?.[1] as any).toolResult.toolUseId).toBe(
553
+ 'call_2'
554
+ );
555
+ });
556
+ });
557
+
558
+ // Integration tests (require AWS credentials)
559
+ describe.skip('Integration tests', () => {
560
+ const integrationArgs = {
561
+ region: process.env.BEDROCK_AWS_REGION ?? 'us-east-1',
562
+ credentials: {
563
+ secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
564
+ accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
565
+ },
566
+ };
567
+
568
+ test('basic invoke', async () => {
569
+ const model = new CustomChatBedrockConverse({
570
+ ...integrationArgs,
571
+ model: 'anthropic.claude-3-haiku-20240307-v1:0',
572
+ maxRetries: 0,
573
+ });
574
+ const message = new HumanMessage('Hello!');
575
+ const res = await model.invoke([message]);
576
+ expect(res.response_metadata.usage).toBeDefined();
577
+ });
578
+
579
+ test('basic streaming', async () => {
580
+ const model = new CustomChatBedrockConverse({
581
+ ...integrationArgs,
582
+ model: 'anthropic.claude-3-haiku-20240307-v1:0',
583
+ maxRetries: 0,
584
+ });
585
+
586
+ let fullMessage: AIMessageChunk | undefined;
587
+ for await (const chunk of await model.stream('Hello!')) {
588
+ fullMessage = fullMessage ? concat(fullMessage, chunk) : chunk;
589
+ }
590
+
591
+ expect(fullMessage).toBeDefined();
592
+ expect(fullMessage?.content).toBeDefined();
593
+ });
594
+
595
+ test('with thinking/reasoning enabled', async () => {
596
+ const model = new CustomChatBedrockConverse({
597
+ ...integrationArgs,
598
+ model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
599
+ maxTokens: 5000,
600
+ additionalModelRequestFields: {
601
+ thinking: { type: 'enabled', budget_tokens: 2000 },
602
+ },
603
+ });
604
+
605
+ const result = await model.invoke('What is 2 + 2?');
606
+ expect(result.content).toBeDefined();
607
+
608
+ // Should have reasoning content if the model supports it
609
+ if (Array.isArray(result.content)) {
610
+ const reasoningBlocks = result.content.filter(
611
+ (b: any) => b.type === 'reasoning_content' || b.type === 'reasoning'
612
+ );
613
+ expect(reasoningBlocks.length).toBeGreaterThanOrEqual(0);
614
+ }
615
+ });
616
+ });
@@ -0,0 +1,51 @@
1
+ /**
2
+ * Type definitions for Bedrock Converse utilities.
3
+ */
4
+ import type {
5
+ Message as BedrockMessage,
6
+ SystemContentBlock as BedrockSystemContentBlock,
7
+ ContentBlock as BedrockContentBlock,
8
+ ConverseResponse,
9
+ ContentBlockDeltaEvent,
10
+ ConverseStreamMetadataEvent,
11
+ ContentBlockStartEvent,
12
+ ReasoningContentBlock,
13
+ ReasoningContentBlockDelta,
14
+ } from '@aws-sdk/client-bedrock-runtime';
15
+
16
+ /**
17
+ * Reasoning content block type for LangChain messages.
18
+ */
19
+ export interface MessageContentReasoningBlock {
20
+ type: 'reasoning_content';
21
+ reasoningText?: {
22
+ text?: string;
23
+ signature?: string;
24
+ };
25
+ redactedContent?: string;
26
+ }
27
+
28
+ export interface MessageContentReasoningBlockReasoningTextPartial {
29
+ type: 'reasoning_content';
30
+ reasoningText: {
31
+ text?: string;
32
+ signature?: string;
33
+ };
34
+ }
35
+
36
+ export interface MessageContentReasoningBlockRedacted {
37
+ type: 'reasoning_content';
38
+ redactedContent: string;
39
+ }
40
+
41
+ export type {
42
+ BedrockMessage,
43
+ BedrockSystemContentBlock,
44
+ BedrockContentBlock,
45
+ ConverseResponse,
46
+ ContentBlockDeltaEvent,
47
+ ConverseStreamMetadataEvent,
48
+ ContentBlockStartEvent,
49
+ ReasoningContentBlock,
50
+ ReasoningContentBlockDelta,
51
+ };
@@ -0,0 +1,18 @@
1
+ /**
2
+ * Bedrock Converse utility exports.
3
+ */
4
+ export {
5
+ convertToConverseMessages,
6
+ extractImageInfo,
7
+ langchainReasoningBlockToBedrockReasoningBlock,
8
+ concatenateLangchainReasoningBlocks,
9
+ } from './message_inputs';
10
+
11
+ export {
12
+ convertConverseMessageToLangChainMessage,
13
+ handleConverseStreamContentBlockStart,
14
+ handleConverseStreamContentBlockDelta,
15
+ handleConverseStreamMetadata,
16
+ bedrockReasoningBlockToLangchainReasoningBlock,
17
+ bedrockReasoningDeltaToLangchainPartialReasoningBlock,
18
+ } from './message_outputs';