bedrock-wrapper 2.7.0 → 2.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -115,52 +115,57 @@ Bedrock Wrapper is an npm package that simplifies the integration of existing Op
115
115
  - **Tool use support**: Native support for function calling (where supported)
116
116
  - **Unified multimodal**: Consistent handling of text and image inputs
117
117
 
118
+ **Note**: Some models only support the Converse API and will automatically use it regardless of the `useConverseAPI` flag:
119
+ - DeepSeek-V3.1
120
+
118
121
  ---
119
122
 
120
123
  ### Supported Models
121
124
 
122
125
  | modelName | AWS Model Id | Image |
123
126
  |----------------------------|----------------------------------------------|-------|
124
- | Claude-4-1-Opus | us.anthropic.claude-opus-4-1-20250805-v1:0 | |
125
- | Claude-4-1-Opus-Thinking | us.anthropic.claude-opus-4-1-20250805-v1:0 | ✅ |
127
+ | Claude-3-5-Haiku | anthropic.claude-3-5-haiku-20241022-v1:0 | |
128
+ | Claude-3-5-Sonnet | anthropic.claude-3-5-sonnet-20240620-v1:0 | ✅ |
129
+ | Claude-3-5-Sonnet-v2 | anthropic.claude-3-5-sonnet-20241022-v2:0 | ✅ |
130
+ | Claude-3-7-Sonnet | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | ✅ |
131
+ | Claude-3-7-Sonnet-Thinking | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | ✅ |
132
+ | Claude-3-Haiku | anthropic.claude-3-haiku-20240307-v1:0 | ✅ |
126
133
  | Claude-4-Opus | us.anthropic.claude-opus-4-20250514-v1:0 | ✅ |
127
134
  | Claude-4-Opus-Thinking | us.anthropic.claude-opus-4-20250514-v1:0 | ✅ |
128
- | Claude-4-5-Sonnet | us.anthropic.claude-sonnet-4-5-20250929-v1:0 | ✅ |
129
- | Claude-4-5-Sonnet-Thinking | us.anthropic.claude-sonnet-4-5-20250929-v1:0 | ✅ |
130
- | Claude-4-5-Haiku | us.anthropic.claude-haiku-4-5-20251001-v1:0 | ✅ |
131
- | Claude-4-5-Haiku-Thinking | us.anthropic.claude-haiku-4-5-20251001-v1:0 | ✅ |
132
135
  | Claude-4-Sonnet | us.anthropic.claude-sonnet-4-20250514-v1:0 | ✅ |
133
136
  | Claude-4-Sonnet-Thinking | us.anthropic.claude-sonnet-4-20250514-v1:0 | ✅ |
134
- | Claude-3-7-Sonnet-Thinking | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | ✅ |
135
- | Claude-3-7-Sonnet | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | ✅ |
136
- | Claude-3-5-Sonnet-v2 | anthropic.claude-3-5-sonnet-20241022-v2:0 | ✅ |
137
- | Claude-3-5-Sonnet | anthropic.claude-3-5-sonnet-20240620-v1:0 | ✅ |
138
- | Claude-3-5-Haiku | anthropic.claude-3-5-haiku-20241022-v1:0 | |
139
- | Claude-3-Haiku | anthropic.claude-3-haiku-20240307-v1:0 | ✅ |
140
- | Nova-Pro | us.amazon.nova-pro-v1:0 | |
141
- | Nova-Lite | us.amazon.nova-lite-v1:0 | |
142
- | Nova-Micro | us.amazon.nova-micro-v1:0 | ❌ |
137
+ | Claude-4-1-Opus | us.anthropic.claude-opus-4-1-20250805-v1:0 | ✅ |
138
+ | Claude-4-1-Opus-Thinking | us.anthropic.claude-opus-4-1-20250805-v1:0 | ✅ |
139
+ | Claude-4-5-Haiku | us.anthropic.claude-haiku-4-5-20251001-v1:0 | ✅ |
140
+ | Claude-4-5-Haiku-Thinking | us.anthropic.claude-haiku-4-5-20251001-v1:0 | ✅ |
141
+ | Claude-4-5-Sonnet | us.anthropic.claude-sonnet-4-5-20250929-v1:0 | |
142
+ | Claude-4-5-Sonnet-Thinking | us.anthropic.claude-sonnet-4-5-20250929-v1:0 | ✅ |
143
+ | DeepSeek-R1 | us.deepseek.r1-v1:0 | |
144
+ | DeepSeek-V3.1 | deepseek.v3-v1:0 | |
143
145
  | GPT-OSS-120B | openai.gpt-oss-120b-1:0 | ❌ |
144
146
  | GPT-OSS-120B-Thinking | openai.gpt-oss-120b-1:0 | ❌ |
145
147
  | GPT-OSS-20B | openai.gpt-oss-20b-1:0 | ❌ |
146
148
  | GPT-OSS-20B-Thinking | openai.gpt-oss-20b-1:0 | ❌ |
147
- | Llama-3-3-70b | us.meta.llama3-3-70b-instruct-v1:0 | ❌ |
149
+ | Llama-3-8b | meta.llama3-8b-instruct-v1:0 | ❌ |
150
+ | Llama-3-70b | meta.llama3-70b-instruct-v1:0 | ❌ |
151
+ | Llama-3-1-8b | meta.llama3-1-8b-instruct-v1:0 | ❌ |
152
+ | Llama-3-1-70b | meta.llama3-1-70b-instruct-v1:0 | ❌ |
153
+ | Llama-3-1-405b | meta.llama3-1-405b-instruct-v1:0 | ❌ |
148
154
  | Llama-3-2-1b | us.meta.llama3-2-1b-instruct-v1:0 | ❌ |
149
155
  | Llama-3-2-3b | us.meta.llama3-2-3b-instruct-v1:0 | ❌ |
150
156
  | Llama-3-2-11b | us.meta.llama3-2-11b-instruct-v1:0 | ❌ |
151
157
  | Llama-3-2-90b | us.meta.llama3-2-90b-instruct-v1:0 | ❌ |
152
- | Llama-3-1-8b | meta.llama3-1-8b-instruct-v1:0 | ❌ |
153
- | Llama-3-1-70b | meta.llama3-1-70b-instruct-v1:0 | ❌ |
154
- | Llama-3-1-405b | meta.llama3-1-405b-instruct-v1:0 | ❌ |
155
- | Llama-3-8b | meta.llama3-8b-instruct-v1:0 | ❌ |
156
- | Llama-3-70b | meta.llama3-70b-instruct-v1:0 | ❌ |
158
+ | Llama-3-3-70b | us.meta.llama3-3-70b-instruct-v1:0 | ❌ |
157
159
  | Mistral-7b | mistral.mistral-7b-instruct-v0:2 | ❌ |
158
160
  | Mixtral-8x7b | mistral.mixtral-8x7b-instruct-v0:1 | ❌ |
159
161
  | Mistral-Large | mistral.mistral-large-2402-v1:0 | ❌ |
160
- | Qwen3-32B | alibaba.qwen3-32b-instruct-v1:0 | ❌ |
161
- | Qwen3-Coder-30B-A3B | alibaba.qwen3-coder-30b-a3b-instruct-v1:0 | |
162
- | Qwen3-235B-A22B-2507 | alibaba.qwen3-235b-a22b-instruct-2507-v1:0 | |
163
- | Qwen3-Coder-480B-A35B | alibaba.qwen3-coder-480b-a35b-instruct-v1:0 | ❌ |
162
+ | Nova-Micro | us.amazon.nova-micro-v1:0 | ❌ |
163
+ | Nova-Lite | us.amazon.nova-lite-v1:0 | |
164
+ | Nova-Pro | us.amazon.nova-pro-v1:0 | |
165
+ | Qwen3-32B | qwen.qwen3-32b-v1:0 | ❌ |
166
+ | Qwen3-235B-A22B-2507 | qwen.qwen3-235b-a22b-2507-v1:0 | ❌ |
167
+ | Qwen3-Coder-30B-A3B | qwen.qwen3-coder-30b-a3b-v1:0 | ❌ |
168
+ | Qwen3-Coder-480B-A35B | qwen.qwen3-coder-480b-a35b-v1:0 | ❌ |
164
169
 
165
170
  To return the list progrmatically you can import and call `listBedrockWrapperSupportedModels`:
166
171
  ```javascript
@@ -173,6 +178,39 @@ Please modify the `bedrock_models.js` file and submit a PR 🏆 or create an Iss
173
178
 
174
179
  ---
175
180
 
181
+ ### Thinking Models
182
+
183
+ Some models support extended reasoning capabilities through "thinking mode". These models include:
184
+ - **Claude models**: Claude-4-1-Opus-Thinking, Claude-4-Opus-Thinking, Claude-4-5-Sonnet-Thinking, Claude-4-5-Haiku-Thinking, Claude-4-Sonnet-Thinking, Claude-3-7-Sonnet-Thinking
185
+ - **GPT-OSS models**: GPT-OSS-120B-Thinking, GPT-OSS-20B-Thinking
186
+
187
+ To use thinking mode and see the model's reasoning process, set `include_thinking_data: true` in your request:
188
+
189
+ ```javascript
190
+ const openaiChatCompletionsCreateObject = {
191
+ "messages": messages,
192
+ "model": "Claude-4-5-Sonnet-Thinking",
193
+ "max_tokens": 4000,
194
+ "stream": true,
195
+ "temperature": 1.0, // Thinking models require temperature of 1.0
196
+ "include_thinking_data": true // Enable thinking output
197
+ };
198
+
199
+ let completeResponse = "";
200
+ for await (const chunk of bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject)) {
201
+ completeResponse += chunk;
202
+ process.stdout.write(chunk); // Shows both thinking and response
203
+ }
204
+ ```
205
+
206
+ **Features:**
207
+ - Thinking content appears in `<think>...</think>` tags for Claude models
208
+ - Thinking content appears in `<reasoning>...</reasoning>` tags for GPT-OSS models
209
+ - Temperature is automatically set to 1.0 for optimal thinking performance
210
+ - Budget tokens are automatically calculated based on max_tokens
211
+
212
+ ---
213
+
176
214
  ### Image Support
177
215
 
178
216
  For models with image support (Claude 4+ series including Claude 4.5 Sonnet, Claude 4.5 Haiku, Claude 3.7 Sonnet, Claude 3.5 Sonnet, Claude 3 Haiku, Nova Pro, and Nova Lite), you can include images in your messages using the following format (not all models support system prompts):
@@ -312,6 +350,12 @@ npm run test-stop:invoke
312
350
  # Test stop sequences functionality with Converse API
313
351
  npm run test-stop:converse
314
352
 
353
+ # Test Converse API specifically
354
+ npm run test-converse
355
+
356
+ # Run all test suites
357
+ npm run test:all
358
+
315
359
  # Interactive testing
316
360
  npm run interactive
317
361
  ```
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bedrock-wrapper",
3
- "version": "2.7.0",
3
+ "version": "2.7.1",
4
4
  "description": "🪨 Bedrock Wrapper is an npm package that simplifies the integration of existing OpenAI-compatible API objects with AWS Bedrock's serverless inference LLMs.",
5
5
  "homepage": "https://www.equilllabs.com/projects/bedrock-wrapper",
6
6
  "repository": {
@@ -1,66 +0,0 @@
1
- // Quick test for DeepSeek models using interactive example logic
2
- import dotenv from 'dotenv';
3
- dotenv.config();
4
-
5
- import { bedrockWrapper } from "./bedrock-wrapper.js";
6
-
7
- const AWS_REGION = process.env.AWS_REGION;
8
- const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID;
9
- const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY;
10
-
11
- const awsCreds = {
12
- region: AWS_REGION,
13
- accessKeyId: AWS_ACCESS_KEY_ID,
14
- secretAccessKey: AWS_SECRET_ACCESS_KEY,
15
- };
16
-
17
- async function quickTest(modelName, useConverseAPI) {
18
- const apiType = useConverseAPI ? "Converse API" : "Invoke API";
19
- console.log(`\nTesting ${modelName} with ${apiType}...`);
20
-
21
- const messages = [
22
- { role: "user", content: "What is 2+2? Answer with just the number." }
23
- ];
24
-
25
- const requestObject = {
26
- messages,
27
- model: modelName,
28
- max_tokens: 50,
29
- stream: false,
30
- temperature: 0.1,
31
- include_thinking_data: true,
32
- };
33
-
34
- try {
35
- const response = await bedrockWrapper(awsCreds, requestObject, { logging: false, useConverseAPI });
36
- let completeResponse = "";
37
- for await (const data of response) {
38
- completeResponse += data;
39
- }
40
- console.log(`✓ SUCCESS: ${completeResponse.trim().substring(0, 100)}...`);
41
- return true;
42
- } catch (error) {
43
- console.log(`✗ FAILED: ${error.message}`);
44
- return false;
45
- }
46
- }
47
-
48
- async function main() {
49
- console.log("\n" + "=".repeat(60));
50
- console.log("QUICK DEEPSEEK MODEL TEST");
51
- console.log("=".repeat(60));
52
-
53
- const models = ["DeepSeek-R1", "DeepSeek-V3.1"];
54
-
55
- for (const model of models) {
56
- console.log(`\n--- Testing ${model} ---`);
57
- await quickTest(model, false); // Invoke API
58
- await quickTest(model, true); // Converse API
59
- }
60
-
61
- console.log("\n" + "=".repeat(60));
62
- console.log("Tests completed!");
63
- console.log("=".repeat(60) + "\n");
64
- }
65
-
66
- main().catch(console.error);