bedrock-wrapper 2.3.1 → 2.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,6 +1,16 @@
1
1
  # Changelog
2
2
  All notable changes to this project will be documented in this file.
3
3
 
4
+ ## [2.4.0] - 2025-07-24 (AWS Nova Models)
5
+ ### Added
6
+ - Support for AWS Nova models
7
+ - Nova-Pro (300K context, multimodal, 5K output tokens)
8
+ - Nova-Lite (300K context, multimodal, optimized for speed)
9
+ - Nova-Micro (128K context, text-only, lowest latency)
10
+ - Nova-specific API format handling with schemaVersion "messages-v1"
11
+ - Proper inferenceConfig parameter structure for Nova models
12
+ - Automatic content array formatting for Nova message compatibility
13
+
4
14
  ## [2.3.1] - 2025-05-22 (Claude 4 Opus / Sonnet)
5
15
  ### Added
6
16
  - Support for Claude 4 Opus & Claude 4 Sonnet models
package/README.md CHANGED
@@ -103,12 +103,19 @@ Bedrock Wrapper is an npm package that simplifies the integration of existing Op
103
103
 
104
104
  | modelName | AWS Model Id | Image |
105
105
  |----------------------------|----------------------------------------------|-------|
106
+ | Claude-4-Opus | us.anthropic.claude-opus-4-20250514-v1:0 | ✅ |
107
+ | Claude-4-Opus-Thinking | us.anthropic.claude-opus-4-20250514-v1:0 | ✅ |
108
+ | Claude-4-Sonnet | us.anthropic.claude-sonnet-4-20250514-v1:0 | ✅ |
109
+ | Claude-4-Sonnet-Thinking | us.anthropic.claude-sonnet-4-20250514-v1:0 | ✅ |
106
110
  | Claude-3-7-Sonnet-Thinking | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | ✅ |
107
111
  | Claude-3-7-Sonnet | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | ✅ |
108
112
  | Claude-3-5-Sonnet-v2 | anthropic.claude-3-5-sonnet-20241022-v2:0 | ✅ |
109
113
  | Claude-3-5-Sonnet | anthropic.claude-3-5-sonnet-20240620-v1:0 | ✅ |
110
114
  | Claude-3-5-Haiku | anthropic.claude-3-5-haiku-20241022-v1:0 | ❌ |
111
- | Claude-3-Haiku | anthropic.claude-3-haiku-20240307-v1:0 | |
115
+ | Claude-3-Haiku | anthropic.claude-3-haiku-20240307-v1:0 | |
116
+ | Nova-Pro | us.amazon.nova-pro-v1:0 | ✅ |
117
+ | Nova-Lite | us.amazon.nova-lite-v1:0 | ✅ |
118
+ | Nova-Micro | us.amazon.nova-micro-v1:0 | ❌ |
112
119
  | Llama-3-3-70b | us.meta.llama3-3-70b-instruct-v1:0 | ❌ |
113
120
  | Llama-3-2-1b | us.meta.llama3-2-1b-instruct-v1:0 | ❌ |
114
121
  | Llama-3-2-3b | us.meta.llama3-2-3b-instruct-v1:0 | ❌ |
@@ -136,7 +143,7 @@ Please modify the `bedrock_models.js` file and submit a PR 🏆 or create an Iss
136
143
 
137
144
  ### Image Support
138
145
 
139
- For models with image support (Claude 3.5 Sonnet, Claude 3.7 Sonnet, and Claude 3.7 Sonnet Thinking), you can include images in your messages using the following format:
146
+ For models with image support (Claude 4 series, Claude 3.7 Sonnet, Claude 3.5 Sonnet, Claude 3 Haiku, Nova Pro, and Nova Lite), you can include images in your messages using the following format:
140
147
 
141
148
  ```javascript
142
149
  messages = [
package/bedrock-models.js CHANGED
@@ -248,7 +248,7 @@ export const bedrock_models = [
248
248
  // ====================
249
249
  "modelName": "Claude-3-Haiku",
250
250
  "modelId": "anthropic.claude-3-haiku-20240307-v1:0",
251
- "vision": false,
251
+ "vision": true,
252
252
  "messages_api": true,
253
253
  "system_as_separate_field": true,
254
254
  "display_role_names": true,
@@ -540,6 +540,73 @@ export const bedrock_models = [
540
540
  "max_supported_response_tokens": 2048,
541
541
  "response_chunk_element": "generation"
542
542
  },
543
+ {
544
+ // ===============
545
+ // == Nova Pro ==
546
+ // ===============
547
+ "modelName": "Nova-Pro",
548
+ "modelId": "us.amazon.nova-pro-v1:0",
549
+ "vision": true,
550
+ "messages_api": true,
551
+ "system_as_separate_field": true,
552
+ "display_role_names": true,
553
+ "max_tokens_param_name": "maxTokens",
554
+ "max_supported_response_tokens": 5000,
555
+ "response_chunk_element": "contentBlockDelta.delta.text",
556
+ "response_nonchunk_element": "output.message.content[0].text",
557
+ "special_request_schema": {
558
+ "schemaVersion": "messages-v1",
559
+ "inferenceConfig": {}
560
+ },
561
+ "image_support": {
562
+ "max_image_size": 5242880, // 5MB per image
563
+ "supported_formats": ["jpeg", "png", "gif", "webp"],
564
+ "max_images_per_request": 10
565
+ }
566
+ },
567
+ {
568
+ // ================
569
+ // == Nova Lite ==
570
+ // ================
571
+ "modelName": "Nova-Lite",
572
+ "modelId": "us.amazon.nova-lite-v1:0",
573
+ "vision": true,
574
+ "messages_api": true,
575
+ "system_as_separate_field": true,
576
+ "display_role_names": true,
577
+ "max_tokens_param_name": "maxTokens",
578
+ "max_supported_response_tokens": 5000,
579
+ "response_chunk_element": "contentBlockDelta.delta.text",
580
+ "response_nonchunk_element": "output.message.content[0].text",
581
+ "special_request_schema": {
582
+ "schemaVersion": "messages-v1",
583
+ "inferenceConfig": {}
584
+ },
585
+ "image_support": {
586
+ "max_image_size": 5242880, // 5MB per image
587
+ "supported_formats": ["jpeg", "png", "gif", "webp"],
588
+ "max_images_per_request": 10
589
+ }
590
+ },
591
+ {
592
+ // =================
593
+ // == Nova Micro ==
594
+ // =================
595
+ "modelName": "Nova-Micro",
596
+ "modelId": "us.amazon.nova-micro-v1:0",
597
+ "vision": false,
598
+ "messages_api": true,
599
+ "system_as_separate_field": true,
600
+ "display_role_names": true,
601
+ "max_tokens_param_name": "maxTokens",
602
+ "max_supported_response_tokens": 5000,
603
+ "response_chunk_element": "contentBlockDelta.delta.text",
604
+ "response_nonchunk_element": "output.message.content[0].text",
605
+ "special_request_schema": {
606
+ "schemaVersion": "messages-v1",
607
+ "inferenceConfig": {}
608
+ }
609
+ },
543
610
  {
544
611
  // ================
545
612
  // == Mistral-7b ==
@@ -232,14 +232,71 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
232
232
  // }
233
233
 
234
234
  // Format the request payload using the model's native structure.
235
- const request = awsModel.messages_api ? {
236
- messages: prompt,
237
- ...(awsModel.system_as_separate_field && system_message && { system: system_message }), // Only add system field if model requires it and there's a system message
238
- [awsModel.max_tokens_param_name]: max_gen_tokens,
239
- temperature: temperature,
240
- top_p: top_p,
241
- ...awsModel.special_request_schema
242
- } : {
235
+ const request = awsModel.messages_api ? (() => {
236
+ // Check if this is a Nova model (has schemaVersion in special_request_schema)
237
+ if (awsModel.special_request_schema?.schemaVersion === "messages-v1") {
238
+ // Nova model format - convert messages to Nova's expected format
239
+ const novaMessages = prompt.map(msg => {
240
+ let content;
241
+
242
+ // Convert content to array format for Nova
243
+ if (typeof msg.content === 'string') {
244
+ content = [{ text: msg.content }];
245
+ } else if (Array.isArray(msg.content)) {
246
+ // Already in array format, ensure proper structure
247
+ content = msg.content.map(item => {
248
+ if (item.type === 'text') {
249
+ return { text: item.text || item };
250
+ } else if (item.type === 'image') {
251
+ return {
252
+ image: {
253
+ format: 'jpeg',
254
+ source: {
255
+ bytes: item.source.data
256
+ }
257
+ }
258
+ };
259
+ }
260
+ return item;
261
+ });
262
+ } else {
263
+ content = [{ text: String(msg.content) }];
264
+ }
265
+
266
+ return {
267
+ role: msg.role,
268
+ content: content
269
+ };
270
+ });
271
+
272
+ const novaRequest = {
273
+ ...awsModel.special_request_schema,
274
+ messages: novaMessages,
275
+ inferenceConfig: {
276
+ [awsModel.max_tokens_param_name]: max_gen_tokens,
277
+ temperature: temperature,
278
+ topP: top_p
279
+ }
280
+ };
281
+
282
+ // Add system message if present
283
+ if (awsModel.system_as_separate_field && system_message) {
284
+ novaRequest.system = [{ text: system_message }];
285
+ }
286
+
287
+ return novaRequest;
288
+ } else {
289
+ // Standard messages API format (Claude, etc.)
290
+ return {
291
+ messages: prompt,
292
+ ...(awsModel.system_as_separate_field && system_message && { system: system_message }),
293
+ [awsModel.max_tokens_param_name]: max_gen_tokens,
294
+ temperature: temperature,
295
+ top_p: top_p,
296
+ ...awsModel.special_request_schema
297
+ };
298
+ }
299
+ })() : {
243
300
  prompt: typeof prompt === 'string' ? prompt : {
244
301
  messages: prompt.map(msg => ({
245
302
  role: msg.role,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bedrock-wrapper",
3
- "version": "2.3.1",
3
+ "version": "2.4.1",
4
4
  "description": "🪨 Bedrock Wrapper is an npm package that simplifies the integration of existing OpenAI-compatible API objects with AWS Bedrock's serverless inference LLMs.",
5
5
  "homepage": "https://www.equilllabs.com/projects/bedrock-wrapper",
6
6
  "repository": {
@@ -32,9 +32,9 @@
32
32
  "author": "",
33
33
  "license": "ISC",
34
34
  "dependencies": {
35
- "@aws-sdk/client-bedrock-runtime": "^3.816.0",
36
- "dotenv": "^16.5.0",
37
- "sharp": "^0.34.2"
35
+ "@aws-sdk/client-bedrock-runtime": "^3.848.0",
36
+ "dotenv": "^17.2.1",
37
+ "sharp": "^0.34.3"
38
38
  },
39
39
  "devDependencies": {
40
40
  "chalk": "^5.4.1"
package/test-vision.js CHANGED
@@ -1,6 +1,8 @@
1
1
  import { bedrockWrapper } from "./bedrock-wrapper.js";
2
+ import { bedrock_models } from "./bedrock-models.js";
2
3
  import dotenv from 'dotenv';
3
4
  import fs from 'fs/promises';
5
+ import chalk from 'chalk';
4
6
 
5
7
  dotenv.config();
6
8
 
@@ -10,10 +12,37 @@ const awsCreds = {
10
12
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
11
13
  };
12
14
 
15
+ async function logOutput(message, type = 'info', writeToFile = true) {
16
+ if (writeToFile) {
17
+ // Log to file
18
+ await fs.appendFile('test-vision-models-output.txt', message + '\n');
19
+ }
20
+
21
+ // Log to console with colors
22
+ switch(type) {
23
+ case 'success':
24
+ console.log(chalk.green('✓ ' + message));
25
+ break;
26
+ case 'error':
27
+ console.log(chalk.red('✗ ' + message));
28
+ break;
29
+ case 'info':
30
+ console.log(chalk.blue('ℹ ' + message));
31
+ break;
32
+ case 'running':
33
+ console.log(chalk.yellow(message));
34
+ break;
35
+ default:
36
+ console.log(message);
37
+ }
38
+ }
39
+
13
40
  async function testVisionCapabilities() {
14
41
  // Read and convert image to base64
15
42
  const imageBuffer = await fs.readFile('./test-image.jpg');
16
43
  const base64Image = imageBuffer.toString('base64');
44
+
45
+ const testPrompt = "What's in this image? Please describe it in detail.";
17
46
 
18
47
  const messages = [
19
48
  {
@@ -21,7 +50,7 @@ async function testVisionCapabilities() {
21
50
  content: [
22
51
  {
23
52
  type: "text",
24
- text: "What's in this image? Please describe it in detail."
53
+ text: testPrompt
25
54
  },
26
55
  {
27
56
  type: "image_url",
@@ -34,11 +63,25 @@ async function testVisionCapabilities() {
34
63
  }
35
64
  ];
36
65
 
37
- // Test with both Claude and Llama models that support vision
38
- const visionModels = ["Claude-3-5-Sonnet-v2", "Claude-3-7-Sonnet", "Claude-4-Sonnet", "Claude-4-Sonnet-Thinking", "Claude-4-Opus", "Claude-4-Opus-Thinking"];
66
+ // Filter vision-capable models from bedrock_models
67
+ const visionModels = bedrock_models
68
+ .filter(model => model.vision === true)
69
+ .map(model => model.modelName);
70
+
71
+ // Clear output file and add header
72
+ await fs.writeFile('test-vision-models-output.txt',
73
+ `Vision Test Results\n` +
74
+ `Test Question: "${testPrompt}"\n` +
75
+ `Test Date: ${new Date().toISOString()}\n` +
76
+ `${'='.repeat(50)}\n\n`
77
+ );
78
+
79
+ console.clear();
80
+ await logOutput(`Starting vision tests with ${visionModels.length} models...`, 'info');
81
+ await logOutput(`Testing image description capabilities\n`, 'info');
39
82
 
40
83
  for (const model of visionModels) {
41
- console.log(`\nTesting vision capabilities with ${model}...`);
84
+ await logOutput(`\n${'-'.repeat(50)}\nTesting ${model} ⇢`, 'running');
42
85
 
43
86
  const openaiChatCompletionsCreateObject = {
44
87
  messages,
@@ -49,23 +92,34 @@ async function testVisionCapabilities() {
49
92
  };
50
93
 
51
94
  try {
52
- console.log(`\nSending request to ${model} with format:`,
53
- JSON.stringify(openaiChatCompletionsCreateObject, null, 2));
95
+ console.log(`\nSending request to ${model}...`);
54
96
 
55
97
  let response = "";
56
- for await (const chunk of bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging: true })) {
98
+ for await (const chunk of bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging: false })) {
57
99
  response += chunk;
58
100
  process.stdout.write(chunk);
59
101
  }
60
- console.log("\n-------------------");
102
+
103
+ // Write successful response to file
104
+ await logOutput(`\nModel: ${model}`, 'success');
105
+ await logOutput(`Response: ${response.trim()}\n`, 'info', true);
106
+
61
107
  } catch (error) {
62
- console.error(`Error with ${model}:`, error);
63
- // Log the full error details
108
+ const errorMessage = `Error with ${model}: ${error.message}`;
109
+ await logOutput(errorMessage, 'error');
110
+
111
+ // Log the full error details to file
64
112
  if (error.response) {
65
- console.error('Response error:', error.response);
113
+ await fs.appendFile('test-vision-models-output.txt',
114
+ `Error details: ${JSON.stringify(error.response, null, 2)}\n\n`
115
+ );
66
116
  }
67
117
  }
118
+
119
+ console.log("\n-------------------");
68
120
  }
121
+
122
+ await logOutput('\nVision testing complete! Check test-vision-models-output.txt for full results.', 'info', false);
69
123
  }
70
124
 
71
- testVisionCapabilities().catch(console.error);
125
+ testVisionCapabilities().catch(console.error);
package/tea.yaml DELETED
@@ -1,6 +0,0 @@
1
- # https://tea.xyz/what-is-this-file
2
- ---
3
- version: 1.0.0
4
- codeOwners:
5
- - '0x97C7174b24d637d2624284AbaBEB17b1C04533D0'
6
- quorum: 1
@@ -1,120 +0,0 @@
1
- Test Question: "Respond with exactly one word: What is 1+1?"
2
- ==================================================
3
-
4
- Starting tests with 20 models...
5
- Each model will be tested with streaming and non-streaming calls
6
-
7
-
8
- --------------------------------------------------
9
- Testing Claude-4-Sonnet ⇢
10
- Streaming test passed for Claude-4-Sonnet: "Two"
11
- Non-streaming test passed for Claude-4-Sonnet: "<think>The human is asking me to respond with exactly one word to the question "What is 1+1?". The answer to 1+1 is 2. So I need to respond with just the word "Two" or "2". Since they asked for exactly one word, I'll use "Two".</think>
12
-
13
- Two"
14
-
15
- --------------------------------------------------
16
- Testing Claude-3-7-Sonnet-Thinking ⇢
17
- Streaming test passed for Claude-3-7-Sonnet-Thinking: "2"
18
- Non-streaming test passed for Claude-3-7-Sonnet-Thinking: "<think>The human is asking me to respond with exactly one word, and the question is "What is 1+1?".
19
-
20
- The answer to 1+1 is 2.
21
-
22
- I need to follow the instruction precisely and respond with exactly one word, which in this case is "Two" or just "2".
23
-
24
- I'll go with "Two" since that's the word form rather than the numeral.</think>
25
-
26
- Two"
27
-
28
- --------------------------------------------------
29
- Testing Claude-3-7-Sonnet ⇢
30
- Streaming test passed for Claude-3-7-Sonnet: "2"
31
- Non-streaming test passed for Claude-3-7-Sonnet: "2"
32
-
33
- --------------------------------------------------
34
- Testing Claude-3-5-Sonnet-v2 ⇢
35
- Streaming test passed for Claude-3-5-Sonnet-v2: "two"
36
- Non-streaming test passed for Claude-3-5-Sonnet-v2: "two"
37
-
38
- --------------------------------------------------
39
- Testing Claude-3-5-Sonnet ⇢
40
- Streaming test passed for Claude-3-5-Sonnet: "Two"
41
- Non-streaming test passed for Claude-3-5-Sonnet: "Two"
42
-
43
- --------------------------------------------------
44
- Testing Claude-3-5-Haiku ⇢
45
- Streaming test passed for Claude-3-5-Haiku: "Two"
46
- Non-streaming test passed for Claude-3-5-Haiku: "Two"
47
-
48
- --------------------------------------------------
49
- Testing Claude-3-Haiku ⇢
50
- Streaming test passed for Claude-3-Haiku: "Two."
51
- Non-streaming test passed for Claude-3-Haiku: "Two."
52
-
53
- --------------------------------------------------
54
- Testing Llama-3-3-70b ⇢
55
- Streaming test passed for Llama-3-3-70b: "Two."
56
- Non-streaming test passed for Llama-3-3-70b: "Two."
57
-
58
- --------------------------------------------------
59
- Testing Llama-3-2-1b ⇢
60
- Streaming test passed for Llama-3-2-1b: "Two"
61
- Non-streaming test passed for Llama-3-2-1b: "Two"
62
-
63
- --------------------------------------------------
64
- Testing Llama-3-2-3b ⇢
65
- Streaming test passed for Llama-3-2-3b: "2"
66
- Non-streaming test passed for Llama-3-2-3b: "2"
67
-
68
- --------------------------------------------------
69
- Testing Llama-3-2-11b ⇢
70
- Streaming test passed for Llama-3-2-11b: "Two."
71
- Non-streaming test passed for Llama-3-2-11b: "Two."
72
-
73
- --------------------------------------------------
74
- Testing Llama-3-2-90b ⇢
75
- Streaming test passed for Llama-3-2-90b: "Two."
76
- Non-streaming test passed for Llama-3-2-90b: "Two."
77
-
78
- --------------------------------------------------
79
- Testing Llama-3-1-8b ⇢
80
- Streaming test passed for Llama-3-1-8b: "Two."
81
- Non-streaming test passed for Llama-3-1-8b: "Two."
82
-
83
- --------------------------------------------------
84
- Testing Llama-3-1-70b ⇢
85
- Streaming test passed for Llama-3-1-70b: "Two."
86
- Non-streaming test passed for Llama-3-1-70b: "Two."
87
-
88
- --------------------------------------------------
89
- Testing Llama-3-1-405b ⇢
90
- Streaming test passed for Llama-3-1-405b: "Two."
91
- Non-streaming test passed for Llama-3-1-405b: "Two"
92
-
93
- --------------------------------------------------
94
- Testing Llama-3-8b ⇢
95
- Streaming test passed for Llama-3-8b: "Two"
96
- Non-streaming test passed for Llama-3-8b: "Two"
97
-
98
- --------------------------------------------------
99
- Testing Llama-3-70b ⇢
100
- Streaming test passed for Llama-3-70b: "Two."
101
- Non-streaming test passed for Llama-3-70b: "Two"
102
-
103
- --------------------------------------------------
104
- Testing Mistral-7b ⇢
105
- Streaming test passed for Mistral-7b: "Two. (I've given you two words, but the first one was "What" which was not part of the mathematical equation.)"
106
- Non-streaming test passed for Mistral-7b: "Two. (I've given you two words, but the first one was "What" which was not part of the mathematical equation.)"
107
-
108
- --------------------------------------------------
109
- Testing Mixtral-8x7b ⇢
110
- Streaming test passed for Mixtral-8x7b: "Two.
111
-
112
- The word you are looking for is "two." The sum of 1+1 is indeed two. I'm here to provide accurate and helpful responses to your questions, and I'll always do my best to give you the information you need. In this case, the answer to 1+1 is a simple and well-known mathematical fact. If you have any more questions or need further clarification, please don't hesitate to ask. I'm here to help!"
113
- Non-streaming test passed for Mixtral-8x7b: "Two.
114
-
115
- The word you are looking for is "two." The sum of 1 + 1 is equal to 2. I am programmed to provide accurate and helpful responses, so I wanted to make sure that I gave you the correct answer. If you have any other questions or need further clarification, please don't hesitate to ask. I'm here to help!"
116
-
117
- --------------------------------------------------
118
- Testing Mistral-Large ⇢
119
- Streaming test passed for Mistral-Large: "Two."
120
- Non-streaming test passed for Mistral-Large: "Two."