bedrock-wrapper 2.4.4 → 2.4.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +20 -0
- package/README.md +5 -0
- package/bedrock-models.js +78 -0
- package/bedrock-wrapper.js +29 -1
- package/logs/7aa436f5-0b5d-44bd-8860-e1a898f87df2/notification.json +65 -0
- package/logs/7aa436f5-0b5d-44bd-8860-e1a898f87df2/post_tool_use.json +5194 -0
- package/logs/7aa436f5-0b5d-44bd-8860-e1a898f87df2/pre_tool_use.json +1919 -0
- package/logs/7aa436f5-0b5d-44bd-8860-e1a898f87df2/stop.json +44 -0
- package/logs/7aa436f5-0b5d-44bd-8860-e1a898f87df2/user_prompt_submit.json +44 -0
- package/package.json +1 -1
- package/test-stop-sequences.js +4 -0
package/CHANGELOG.md
CHANGED
|
@@ -1,6 +1,26 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
All notable changes to this project will be documented in this file.
|
|
3
3
|
|
|
4
|
+
## [2.4.5] - 2025-08-06 (GPT-OSS Models)
|
|
5
|
+
### Added
|
|
6
|
+
- Support for OpenAI GPT-OSS models on AWS Bedrock
|
|
7
|
+
- GPT-OSS-120B (120B parameter open weight model)
|
|
8
|
+
- GPT-OSS-20B (20B parameter open weight model)
|
|
9
|
+
- GPT-OSS-120B-Thinking (with reasoning tag preservation)
|
|
10
|
+
- GPT-OSS-20B-Thinking (with reasoning tag preservation)
|
|
11
|
+
- `<reasoning>` tag processing for GPT-OSS thinking variants
|
|
12
|
+
- Regular GPT-OSS models automatically strip `<reasoning>` tags
|
|
13
|
+
- Thinking variants preserve `<reasoning>` tags (similar to Claude's `<think>` tags)
|
|
14
|
+
- Non-streaming support for GPT-OSS models (streaming not supported by AWS Bedrock)
|
|
15
|
+
- OpenAI-compatible API format with `max_completion_tokens` parameter
|
|
16
|
+
|
|
17
|
+
### Technical Details
|
|
18
|
+
- **Model Configuration**: All GPT-OSS models use standard messages API format
|
|
19
|
+
- **API Compatibility**: Supports OpenAI-style requests with Apache 2.0 licensed models
|
|
20
|
+
- **Response Processing**: Automatic reasoning tag handling based on model variant
|
|
21
|
+
- **Streaming Fallback**: Automatic detection and fallback to non-streaming for unsupported models
|
|
22
|
+
- **Testing Coverage**: Full integration with existing test suites and interactive example
|
|
23
|
+
|
|
4
24
|
## [2.4.4] - 2025-08-05 (Claude 4.1 Opus)
|
|
5
25
|
### Added
|
|
6
26
|
- Support for Claude 4.1 Opus models
|
package/README.md
CHANGED
|
@@ -119,6 +119,10 @@ Bedrock Wrapper is an npm package that simplifies the integration of existing Op
|
|
|
119
119
|
| Nova-Pro | us.amazon.nova-pro-v1:0 | ✅ |
|
|
120
120
|
| Nova-Lite | us.amazon.nova-lite-v1:0 | ✅ |
|
|
121
121
|
| Nova-Micro | us.amazon.nova-micro-v1:0 | ❌ |
|
|
122
|
+
| GPT-OSS-120B | openai.gpt-oss-120b-1:0 | ❌ |
|
|
123
|
+
| GPT-OSS-120B-Thinking | openai.gpt-oss-120b-1:0 | ❌ |
|
|
124
|
+
| GPT-OSS-20B | openai.gpt-oss-20b-1:0 | ❌ |
|
|
125
|
+
| GPT-OSS-20B-Thinking | openai.gpt-oss-20b-1:0 | ❌ |
|
|
122
126
|
| Llama-3-3-70b | us.meta.llama3-3-70b-instruct-v1:0 | ❌ |
|
|
123
127
|
| Llama-3-2-1b | us.meta.llama3-2-1b-instruct-v1:0 | ❌ |
|
|
124
128
|
| Llama-3-2-3b | us.meta.llama3-2-3b-instruct-v1:0 | ❌ |
|
|
@@ -210,6 +214,7 @@ const openaiChatCompletionsCreateObject = {
|
|
|
210
214
|
**Model Support:**
|
|
211
215
|
- ✅ **Claude models**: Fully supported (up to 8,191 sequences)
|
|
212
216
|
- ✅ **Nova models**: Fully supported (up to 4 sequences)
|
|
217
|
+
- ✅ **GPT-OSS models**: Fully supported
|
|
213
218
|
- ✅ **Mistral models**: Fully supported (up to 10 sequences)
|
|
214
219
|
- ❌ **Llama models**: Not supported (AWS Bedrock limitation)
|
|
215
220
|
|
package/bedrock-models.js
CHANGED
|
@@ -678,6 +678,84 @@ export const bedrock_models = [
|
|
|
678
678
|
"inferenceConfig": {}
|
|
679
679
|
}
|
|
680
680
|
},
|
|
681
|
+
{
|
|
682
|
+
// ====================
|
|
683
|
+
// == GPT-OSS-120B ==
|
|
684
|
+
// ====================
|
|
685
|
+
"modelName": "GPT-OSS-120B",
|
|
686
|
+
"modelId": "openai.gpt-oss-120b-1:0",
|
|
687
|
+
// "modelId": "us.openai.gpt-oss-120b-1:0",
|
|
688
|
+
"vision": false,
|
|
689
|
+
"messages_api": true,
|
|
690
|
+
"system_as_separate_field": false,
|
|
691
|
+
"display_role_names": true,
|
|
692
|
+
"max_tokens_param_name": "max_completion_tokens",
|
|
693
|
+
"max_supported_response_tokens": 5000,
|
|
694
|
+
"stop_sequences_param_name": "stop_sequences",
|
|
695
|
+
"response_chunk_element": "choices[0].delta.content",
|
|
696
|
+
"response_nonchunk_element": "choices[0].message.content",
|
|
697
|
+
"streaming_supported": false,
|
|
698
|
+
"special_request_schema": {}
|
|
699
|
+
},
|
|
700
|
+
{
|
|
701
|
+
// ===================
|
|
702
|
+
// == GPT-OSS-20B ==
|
|
703
|
+
// ===================
|
|
704
|
+
"modelName": "GPT-OSS-20B",
|
|
705
|
+
"modelId": "openai.gpt-oss-20b-1:0",
|
|
706
|
+
// "modelId": "us.openai.gpt-oss-20b-1:0",
|
|
707
|
+
"vision": false,
|
|
708
|
+
"messages_api": true,
|
|
709
|
+
"system_as_separate_field": false,
|
|
710
|
+
"display_role_names": true,
|
|
711
|
+
"max_tokens_param_name": "max_completion_tokens",
|
|
712
|
+
"max_supported_response_tokens": 5000,
|
|
713
|
+
"stop_sequences_param_name": "stop_sequences",
|
|
714
|
+
"response_chunk_element": "choices[0].delta.content",
|
|
715
|
+
"response_nonchunk_element": "choices[0].message.content",
|
|
716
|
+
"streaming_supported": false,
|
|
717
|
+
"special_request_schema": {}
|
|
718
|
+
},
|
|
719
|
+
{
|
|
720
|
+
// ==============================
|
|
721
|
+
// == GPT-OSS-120B-Thinking ==
|
|
722
|
+
// ==============================
|
|
723
|
+
"modelName": "GPT-OSS-120B-Thinking",
|
|
724
|
+
"modelId": "openai.gpt-oss-120b-1:0",
|
|
725
|
+
// "modelId": "us.openai.gpt-oss-120b-1:0",
|
|
726
|
+
"vision": false,
|
|
727
|
+
"messages_api": true,
|
|
728
|
+
"system_as_separate_field": false,
|
|
729
|
+
"display_role_names": true,
|
|
730
|
+
"max_tokens_param_name": "max_completion_tokens",
|
|
731
|
+
"max_supported_response_tokens": 5000,
|
|
732
|
+
"stop_sequences_param_name": "stop_sequences",
|
|
733
|
+
"response_chunk_element": "choices[0].delta.content",
|
|
734
|
+
"response_nonchunk_element": "choices[0].message.content",
|
|
735
|
+
"streaming_supported": false,
|
|
736
|
+
"preserve_reasoning": true,
|
|
737
|
+
"special_request_schema": {}
|
|
738
|
+
},
|
|
739
|
+
{
|
|
740
|
+
// =============================
|
|
741
|
+
// == GPT-OSS-20B-Thinking ==
|
|
742
|
+
// =============================
|
|
743
|
+
"modelName": "GPT-OSS-20B-Thinking",
|
|
744
|
+
"modelId": "openai.gpt-oss-20b-1:0",
|
|
745
|
+
// "modelId": "us.openai.gpt-oss-20b-1:0",
|
|
746
|
+
"vision": false,
|
|
747
|
+
"messages_api": true,
|
|
748
|
+
"system_as_separate_field": false,
|
|
749
|
+
"display_role_names": true,
|
|
750
|
+
"max_tokens_param_name": "max_completion_tokens",
|
|
751
|
+
"max_supported_response_tokens": 5000,
|
|
752
|
+
"stop_sequences_param_name": "stop_sequences",
|
|
753
|
+
"response_chunk_element": "choices[0].delta.content",
|
|
754
|
+
"response_nonchunk_element": "choices[0].message.content",
|
|
755
|
+
"streaming_supported": false,
|
|
756
|
+
"preserve_reasoning": true,
|
|
757
|
+
"special_request_schema": {}
|
|
758
|
+
},
|
|
681
759
|
{
|
|
682
760
|
// ================
|
|
683
761
|
// == Mistral-7b ==
|
package/bedrock-wrapper.js
CHANGED
|
@@ -62,6 +62,25 @@ async function processImage(imageInput) {
|
|
|
62
62
|
return processedImage.toString('base64');
|
|
63
63
|
}
|
|
64
64
|
|
|
65
|
+
function processReasoningTags(text, awsModel) {
|
|
66
|
+
if (!text) return text;
|
|
67
|
+
|
|
68
|
+
// Check if this is a GPT-OSS model (has reasoning tags)
|
|
69
|
+
const hasReasoningTags = text.includes('<reasoning>') && text.includes('</reasoning>');
|
|
70
|
+
|
|
71
|
+
if (!hasReasoningTags) {
|
|
72
|
+
return text;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// If model should preserve reasoning, return as-is
|
|
76
|
+
if (awsModel.preserve_reasoning) {
|
|
77
|
+
return text;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// Strip reasoning tags for non-thinking GPT-OSS models
|
|
81
|
+
return text.replace(/<reasoning>[\s\S]*?<\/reasoning>/g, '').trim();
|
|
82
|
+
}
|
|
83
|
+
|
|
65
84
|
export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging = false } = {} ) {
|
|
66
85
|
const { region, accessKeyId, secretAccessKey } = awsCreds;
|
|
67
86
|
let { messages, model, max_tokens, stream, temperature, top_p, include_thinking_data, stop, stop_sequences } = openaiChatCompletionsCreateObject;
|
|
@@ -341,7 +360,11 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
|
|
|
341
360
|
console.log("\nFinal request:", JSON.stringify(request, null, 2));
|
|
342
361
|
}
|
|
343
362
|
|
|
344
|
-
if
|
|
363
|
+
// Check if model supports streaming, override stream parameter if not
|
|
364
|
+
const modelSupportsStreaming = awsModel.streaming_supported !== false;
|
|
365
|
+
const shouldStream = stream && modelSupportsStreaming;
|
|
366
|
+
|
|
367
|
+
if (shouldStream) {
|
|
345
368
|
const responseStream = await client.send(
|
|
346
369
|
new InvokeModelWithResponseStreamCommand({
|
|
347
370
|
contentType: "application/json",
|
|
@@ -361,6 +384,8 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
|
|
|
361
384
|
is_thinking = false;
|
|
362
385
|
result = `</think>\n\n${result}`;
|
|
363
386
|
}
|
|
387
|
+
// Process reasoning tags for GPT-OSS models
|
|
388
|
+
result = processReasoningTags(result, awsModel);
|
|
364
389
|
yield result;
|
|
365
390
|
} else {
|
|
366
391
|
if (include_thinking_data && awsModel.thinking_response_chunk_element) {
|
|
@@ -417,6 +442,9 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
|
|
|
417
442
|
text_result = "";
|
|
418
443
|
}
|
|
419
444
|
|
|
445
|
+
// Process reasoning tags for GPT-OSS models
|
|
446
|
+
text_result = processReasoningTags(text_result, awsModel);
|
|
447
|
+
|
|
420
448
|
let result = thinking_result ? `<think>${thinking_result}</think>\n\n${text_result}` : text_result;
|
|
421
449
|
|
|
422
450
|
// Ensure final result is a string, in case thinking_result was also empty
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"session_id": "7aa436f5-0b5d-44bd-8860-e1a898f87df2",
|
|
4
|
+
"transcript_path": "C:\\Users\\Justin.Parker\\.claude\\projects\\C--git-bedrock-wrapper\\7aa436f5-0b5d-44bd-8860-e1a898f87df2.jsonl",
|
|
5
|
+
"cwd": "C:\\git\\bedrock-wrapper",
|
|
6
|
+
"hook_event_name": "Notification",
|
|
7
|
+
"message": "Claude needs your permission to use Fetch"
|
|
8
|
+
},
|
|
9
|
+
{
|
|
10
|
+
"session_id": "7aa436f5-0b5d-44bd-8860-e1a898f87df2",
|
|
11
|
+
"transcript_path": "C:\\Users\\Justin.Parker\\.claude\\projects\\C--git-bedrock-wrapper\\7aa436f5-0b5d-44bd-8860-e1a898f87df2.jsonl",
|
|
12
|
+
"cwd": "C:\\git\\bedrock-wrapper",
|
|
13
|
+
"hook_event_name": "Notification",
|
|
14
|
+
"message": "Claude needs your permission to use Fetch"
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
"session_id": "7aa436f5-0b5d-44bd-8860-e1a898f87df2",
|
|
18
|
+
"transcript_path": "C:\\Users\\Justin.Parker\\.claude\\projects\\C--git-bedrock-wrapper\\7aa436f5-0b5d-44bd-8860-e1a898f87df2.jsonl",
|
|
19
|
+
"cwd": "C:\\git\\bedrock-wrapper",
|
|
20
|
+
"hook_event_name": "Notification",
|
|
21
|
+
"message": "Claude needs your permission to use "
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
"session_id": "7aa436f5-0b5d-44bd-8860-e1a898f87df2",
|
|
25
|
+
"transcript_path": "C:\\Users\\Justin.Parker\\.claude\\projects\\C--git-bedrock-wrapper\\7aa436f5-0b5d-44bd-8860-e1a898f87df2.jsonl",
|
|
26
|
+
"cwd": "C:\\git\\bedrock-wrapper",
|
|
27
|
+
"hook_event_name": "Notification",
|
|
28
|
+
"message": "Claude is waiting for your input"
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
"session_id": "7aa436f5-0b5d-44bd-8860-e1a898f87df2",
|
|
32
|
+
"transcript_path": "C:\\Users\\Justin.Parker\\.claude\\projects\\C--git-bedrock-wrapper\\7aa436f5-0b5d-44bd-8860-e1a898f87df2.jsonl",
|
|
33
|
+
"cwd": "C:\\git\\bedrock-wrapper",
|
|
34
|
+
"hook_event_name": "Notification",
|
|
35
|
+
"message": "Claude is waiting for your input"
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
"session_id": "7aa436f5-0b5d-44bd-8860-e1a898f87df2",
|
|
39
|
+
"transcript_path": "C:\\Users\\Justin.Parker\\.claude\\projects\\C--git-bedrock-wrapper\\7aa436f5-0b5d-44bd-8860-e1a898f87df2.jsonl",
|
|
40
|
+
"cwd": "C:\\git\\bedrock-wrapper",
|
|
41
|
+
"hook_event_name": "Notification",
|
|
42
|
+
"message": "Claude is waiting for your input"
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"session_id": "7aa436f5-0b5d-44bd-8860-e1a898f87df2",
|
|
46
|
+
"transcript_path": "C:\\Users\\Justin.Parker\\.claude\\projects\\C--git-bedrock-wrapper\\7aa436f5-0b5d-44bd-8860-e1a898f87df2.jsonl",
|
|
47
|
+
"cwd": "C:\\git\\bedrock-wrapper",
|
|
48
|
+
"hook_event_name": "Notification",
|
|
49
|
+
"message": "Claude needs your permission to use "
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
"session_id": "7aa436f5-0b5d-44bd-8860-e1a898f87df2",
|
|
53
|
+
"transcript_path": "C:\\Users\\Justin.Parker\\.claude\\projects\\C--git-bedrock-wrapper\\7aa436f5-0b5d-44bd-8860-e1a898f87df2.jsonl",
|
|
54
|
+
"cwd": "C:\\git\\bedrock-wrapper",
|
|
55
|
+
"hook_event_name": "Notification",
|
|
56
|
+
"message": "Claude is waiting for your input"
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
"session_id": "7aa436f5-0b5d-44bd-8860-e1a898f87df2",
|
|
60
|
+
"transcript_path": "C:\\Users\\Justin.Parker\\.claude\\projects\\C--git-bedrock-wrapper\\7aa436f5-0b5d-44bd-8860-e1a898f87df2.jsonl",
|
|
61
|
+
"cwd": "C:\\git\\bedrock-wrapper",
|
|
62
|
+
"hook_event_name": "Notification",
|
|
63
|
+
"message": "Claude is waiting for your input"
|
|
64
|
+
}
|
|
65
|
+
]
|