bedrock-wrapper 1.3.1 → 2.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +59 -0
- package/README.md +24 -18
- package/bedrock-models.js +80 -0
- package/bedrock-wrapper.js +72 -38
- package/{example-test.js → interactive-example.js} +152 -98
- package/package.json +8 -5
- package/test-models-output.txt +90 -0
- package/test-models.js +145 -0
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
All notable changes to this project will be documented in this file.
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
## [2.1.0] - 2024-11-21 (Claude 3.5 Haiku)
|
|
6
|
+
### Added
|
|
7
|
+
- Support for Claude 3.5 Haiku
|
|
8
|
+
|
|
9
|
+
## [2.0.0] - 2024-10-31 (Claude Sonnet & Haiku)
|
|
10
|
+
### Added
|
|
11
|
+
- Support for Anthropic Sonnet & Haiku models
|
|
12
|
+
- Claude-3-5-Sonnet-v2
|
|
13
|
+
- Claude-3-5-Sonnet
|
|
14
|
+
- Claude-3-Haiku
|
|
15
|
+
- Interactive example script for testing models
|
|
16
|
+
- Testing script with streaming and non-streaming support for all models
|
|
17
|
+
- Stardardize output to be a string via Streamed and non-Streamed responses
|
|
18
|
+
> **NOTE:** This is a breaking change for previous non-streaming responses. Existing streaming responses will remain unchanged.
|
|
19
|
+
|
|
20
|
+
### Changed
|
|
21
|
+
- Complete architecture overhaul for better model support
|
|
22
|
+
- Improved message handling with role-based formatting
|
|
23
|
+
- Enhanced error handling and response processing
|
|
24
|
+
- Standardized model configuration format
|
|
25
|
+
- Updated AWS SDK integration
|
|
26
|
+
|
|
27
|
+
### Technical Details
|
|
28
|
+
- Implemented messages API support for compatible models
|
|
29
|
+
- Added system message handling as separate field where supported
|
|
30
|
+
- Configurable token limits per model
|
|
31
|
+
- Flexible response parsing with chunk/non-chunk handling
|
|
32
|
+
- Cross-region profile support for certain models
|
|
33
|
+
|
|
34
|
+
## [1.3.0] - 2024-07-24 (Llama3.2)
|
|
35
|
+
### Added
|
|
36
|
+
- Support for Llama 3.2 series models
|
|
37
|
+
- Llama-3-2-1b
|
|
38
|
+
- Llama-3-2-3b
|
|
39
|
+
- Llama-3-2-11b
|
|
40
|
+
- Llama-3-2-90b
|
|
41
|
+
|
|
42
|
+
## [1.1.0] - 2024-07-24 (Llama3.1)
|
|
43
|
+
### Added
|
|
44
|
+
- Support for Llama 3.1 series models
|
|
45
|
+
- Llama-3-1-8b
|
|
46
|
+
- Llama-3-1-70b
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
## [1.0.14] - 2024-05-06 (Initial Stable Release)
|
|
50
|
+
### Added
|
|
51
|
+
- Initial stablerelease of Bedrock Wrapper
|
|
52
|
+
- Basic AWS Bedrock integration
|
|
53
|
+
- OpenAI-compatible API object support
|
|
54
|
+
- Basic model support
|
|
55
|
+
- Llama-3-8b
|
|
56
|
+
- Llama-3-70b
|
|
57
|
+
- Mistral-7b
|
|
58
|
+
- Mixtral-8x7b
|
|
59
|
+
- Mistral-Large
|
package/README.md
CHANGED
|
@@ -76,13 +76,10 @@ Bedrock Wrapper is an npm package that simplifies the integration of existing Op
|
|
|
76
76
|
```javascript
|
|
77
77
|
// create a variable to hold the complete response
|
|
78
78
|
let completeResponse = "";
|
|
79
|
-
// invoke the streamed bedrock api response
|
|
80
79
|
if (!openaiChatCompletionsCreateObject.stream){ // invoke the unstreamed bedrock api response
|
|
81
80
|
const response = await bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject);
|
|
82
81
|
for await (const data of response) {
|
|
83
|
-
|
|
84
|
-
const jsonResponse = JSON.parse(jsonString);
|
|
85
|
-
completeResponse += jsonResponse.generation;
|
|
82
|
+
completeResponse += data;
|
|
86
83
|
}
|
|
87
84
|
// ----------------------------------------------------
|
|
88
85
|
// -- unstreamed complete response is available here --
|
|
@@ -94,20 +91,24 @@ Bedrock Wrapper is an npm package that simplifies the integration of existing Op
|
|
|
94
91
|
|
|
95
92
|
### Supported Models
|
|
96
93
|
|
|
97
|
-
| modelName
|
|
98
|
-
|
|
99
|
-
|
|
|
100
|
-
|
|
|
101
|
-
|
|
|
102
|
-
|
|
|
103
|
-
| Llama-3-
|
|
104
|
-
| Llama-3-
|
|
105
|
-
| Llama-3-
|
|
106
|
-
| Llama-3-
|
|
107
|
-
| Llama-3-
|
|
108
|
-
|
|
|
109
|
-
|
|
|
110
|
-
|
|
|
94
|
+
| modelName | modelId |
|
|
95
|
+
|----------------------|-------------------------------------------|
|
|
96
|
+
| Claude-3-5-Sonnet-v2 | anthropic.claude-3-5-sonnet-20241022-v2:0 |
|
|
97
|
+
| Claude-3-5-Sonnet | anthropic.claude-3-5-sonnet-20240620-v1:0 |
|
|
98
|
+
| Claude-3-5-Haiku | anthropic.claude-3-5-haiku-20241022-v1:0 |
|
|
99
|
+
| Claude-3-Haiku | anthropic.claude-3-haiku-20240307-v1:0 |
|
|
100
|
+
| Llama-3-2-1b | us.meta.llama3-2-1b-instruct-v1:0 |
|
|
101
|
+
| Llama-3-2-3b | us.meta.llama3-2-3b-instruct-v1:0 |
|
|
102
|
+
| Llama-3-2-11b | us.meta.llama3-2-11b-instruct-v1:0 |
|
|
103
|
+
| Llama-3-2-90b | us.meta.llama3-2-90b-instruct-v1:0 |
|
|
104
|
+
| Llama-3-1-8b | meta.llama3-1-8b-instruct-v1:0 |
|
|
105
|
+
| Llama-3-1-70b | meta.llama3-1-70b-instruct-v1:0 |
|
|
106
|
+
| Llama-3-1-405b | meta.llama3-1-405b-instruct-v1:0 |
|
|
107
|
+
| Llama-3-8b | meta.llama3-8b-instruct-v1:0 |
|
|
108
|
+
| Llama-3-70b | meta.llama3-70b-instruct-v1:0 |
|
|
109
|
+
| Mistral-7b | mistral.mistral-7b-instruct-v0:2 |
|
|
110
|
+
| Mixtral-8x7b | mistral.mixtral-8x7b-instruct-v0:1 |
|
|
111
|
+
| Mistral-Large | mistral.mistral-large-2402-v1:0 |
|
|
111
112
|
|
|
112
113
|
To return the list progrmatically you can import and call `listBedrockWrapperSupportedModels`:
|
|
113
114
|
```javascript
|
|
@@ -135,3 +136,8 @@ In case you missed it at the beginning of this doc, for an even easier setup, us
|
|
|
135
136
|
- [OpenAI API](https://platform.openai.com/docs/api-reference/chat/create)
|
|
136
137
|
- [AWS Bedrock](https://aws.amazon.com/bedrock/)
|
|
137
138
|
- [AWS SDK for JavaScript](https://aws.amazon.com/sdk-for-javascript/)
|
|
139
|
+
|
|
140
|
+
---
|
|
141
|
+
|
|
142
|
+
Please consider sending me a tip to support my work 😀
|
|
143
|
+
# [🍵 tip me here](https://ko-fi.com/jparkerweb)
|
package/bedrock-models.js
CHANGED
|
@@ -6,6 +6,74 @@
|
|
|
6
6
|
// https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/cross-region-inference
|
|
7
7
|
|
|
8
8
|
export const bedrock_models = [
|
|
9
|
+
{
|
|
10
|
+
// ==========================
|
|
11
|
+
// == Claude 3.5 Sonnet v2 ==
|
|
12
|
+
// ==========================
|
|
13
|
+
"modelName": "Claude-3-5-Sonnet-v2",
|
|
14
|
+
"modelId": "anthropic.claude-3-5-sonnet-20241022-v2:0",
|
|
15
|
+
"messages_api": true,
|
|
16
|
+
"system_as_separate_field": true,
|
|
17
|
+
"display_role_names": true,
|
|
18
|
+
"max_tokens_param_name": "max_tokens",
|
|
19
|
+
"max_supported_response_tokens": 8192,
|
|
20
|
+
"response_chunk_element": "delta.text",
|
|
21
|
+
"response_nonchunk_element": "content[0].text",
|
|
22
|
+
"special_request_schema": {
|
|
23
|
+
"anthropic_version": "bedrock-2023-05-31"
|
|
24
|
+
}
|
|
25
|
+
},
|
|
26
|
+
{
|
|
27
|
+
// =======================
|
|
28
|
+
// == Claude 3.5 Sonnet ==
|
|
29
|
+
// =======================
|
|
30
|
+
"modelName": "Claude-3-5-Sonnet",
|
|
31
|
+
"modelId": "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
32
|
+
"messages_api": true,
|
|
33
|
+
"system_as_separate_field": true,
|
|
34
|
+
"display_role_names": true,
|
|
35
|
+
"max_tokens_param_name": "max_tokens",
|
|
36
|
+
"max_supported_response_tokens": 8192,
|
|
37
|
+
"response_chunk_element": "delta.text",
|
|
38
|
+
"response_nonchunk_element": "content[0].text",
|
|
39
|
+
"special_request_schema": {
|
|
40
|
+
"anthropic_version": "bedrock-2023-05-31"
|
|
41
|
+
}
|
|
42
|
+
},
|
|
43
|
+
{
|
|
44
|
+
// ======================
|
|
45
|
+
// == Claude 3.5 Haiku ==
|
|
46
|
+
// ======================
|
|
47
|
+
"modelName": "Claude-3-5-Haiku",
|
|
48
|
+
"modelId": "anthropic.claude-3-5-haiku-20241022-v1:0",
|
|
49
|
+
"messages_api": true,
|
|
50
|
+
"system_as_separate_field": true,
|
|
51
|
+
"display_role_names": true,
|
|
52
|
+
"max_tokens_param_name": "max_tokens",
|
|
53
|
+
"max_supported_response_tokens": 8192,
|
|
54
|
+
"response_chunk_element": "delta.text",
|
|
55
|
+
"response_nonchunk_element": "content[0].text",
|
|
56
|
+
"special_request_schema": {
|
|
57
|
+
"anthropic_version": "bedrock-2023-05-31"
|
|
58
|
+
}
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
// ====================
|
|
62
|
+
// == Claude 3 Haiku ==
|
|
63
|
+
// ====================
|
|
64
|
+
"modelName": "Claude-3-Haiku",
|
|
65
|
+
"modelId": "anthropic.claude-3-haiku-20240307-v1:0",
|
|
66
|
+
"messages_api": true,
|
|
67
|
+
"system_as_separate_field": true,
|
|
68
|
+
"display_role_names": true,
|
|
69
|
+
"max_tokens_param_name": "max_tokens",
|
|
70
|
+
"max_supported_response_tokens": 8192,
|
|
71
|
+
"response_chunk_element": "delta.text",
|
|
72
|
+
"response_nonchunk_element": "content[0].text",
|
|
73
|
+
"special_request_schema": {
|
|
74
|
+
"anthropic_version": "bedrock-2023-05-31"
|
|
75
|
+
}
|
|
76
|
+
},
|
|
9
77
|
{
|
|
10
78
|
// ==================
|
|
11
79
|
// == Llama 3.2 1b ==
|
|
@@ -13,6 +81,7 @@ export const bedrock_models = [
|
|
|
13
81
|
"modelName": "Llama-3-2-1b",
|
|
14
82
|
// "modelId": "meta.llama3-2-1b-instruct-v1:0",
|
|
15
83
|
"modelId": "us.meta.llama3-2-1b-instruct-v1:0",
|
|
84
|
+
"messages_api": false,
|
|
16
85
|
"bos_text": "<|begin_of_text|>",
|
|
17
86
|
"role_system_message_prefix": "",
|
|
18
87
|
"role_system_message_suffix": "",
|
|
@@ -39,6 +108,7 @@ export const bedrock_models = [
|
|
|
39
108
|
"modelName": "Llama-3-2-3b",
|
|
40
109
|
// "modelId": "meta.llama3-2-3b-instruct-v1:0",
|
|
41
110
|
"modelId": "us.meta.llama3-2-3b-instruct-v1:0",
|
|
111
|
+
"messages_api": false,
|
|
42
112
|
"bos_text": "<|begin_of_text|>",
|
|
43
113
|
"role_system_message_prefix": "",
|
|
44
114
|
"role_system_message_suffix": "",
|
|
@@ -65,6 +135,7 @@ export const bedrock_models = [
|
|
|
65
135
|
"modelName": "Llama-3-2-11b",
|
|
66
136
|
// "modelId": "meta.llama3-2-11b-instruct-v1:0",
|
|
67
137
|
"modelId": "us.meta.llama3-2-11b-instruct-v1:0",
|
|
138
|
+
"messages_api": false,
|
|
68
139
|
"bos_text": "<|begin_of_text|>",
|
|
69
140
|
"role_system_message_prefix": "",
|
|
70
141
|
"role_system_message_suffix": "",
|
|
@@ -91,6 +162,7 @@ export const bedrock_models = [
|
|
|
91
162
|
"modelName": "Llama-3-2-90b",
|
|
92
163
|
// "modelId": "meta.llama3-2-90b-instruct-v1:0",
|
|
93
164
|
"modelId": "us.meta.llama3-2-90b-instruct-v1:0",
|
|
165
|
+
"messages_api": false,
|
|
94
166
|
"bos_text": "<|begin_of_text|>",
|
|
95
167
|
"role_system_message_prefix": "",
|
|
96
168
|
"role_system_message_suffix": "",
|
|
@@ -116,6 +188,7 @@ export const bedrock_models = [
|
|
|
116
188
|
// ==================
|
|
117
189
|
"modelName": "Llama-3-1-8b",
|
|
118
190
|
"modelId": "meta.llama3-1-8b-instruct-v1:0",
|
|
191
|
+
"messages_api": false,
|
|
119
192
|
"bos_text": "<|begin_of_text|>",
|
|
120
193
|
"role_system_message_prefix": "",
|
|
121
194
|
"role_system_message_suffix": "",
|
|
@@ -141,6 +214,7 @@ export const bedrock_models = [
|
|
|
141
214
|
// ===================
|
|
142
215
|
"modelName": "Llama-3-1-70b",
|
|
143
216
|
"modelId": "meta.llama3-1-70b-instruct-v1:0",
|
|
217
|
+
"messages_api": false,
|
|
144
218
|
"bos_text": "<|begin_of_text|>",
|
|
145
219
|
"role_system_message_prefix": "",
|
|
146
220
|
"role_system_message_suffix": "",
|
|
@@ -166,6 +240,7 @@ export const bedrock_models = [
|
|
|
166
240
|
// ====================
|
|
167
241
|
"modelName": "Llama-3-1-405b",
|
|
168
242
|
"modelId": "meta.llama3-1-405b-instruct-v1:0",
|
|
243
|
+
"messages_api": false,
|
|
169
244
|
"bos_text": "<|begin_of_text|>",
|
|
170
245
|
"role_system_message_prefix": "",
|
|
171
246
|
"role_system_message_suffix": "",
|
|
@@ -191,6 +266,7 @@ export const bedrock_models = [
|
|
|
191
266
|
// ================
|
|
192
267
|
"modelName": "Llama-3-8b",
|
|
193
268
|
"modelId": "meta.llama3-8b-instruct-v1:0",
|
|
269
|
+
"messages_api": false,
|
|
194
270
|
"bos_text": "<|begin_of_text|>",
|
|
195
271
|
"role_system_message_prefix": "",
|
|
196
272
|
"role_system_message_suffix": "",
|
|
@@ -216,6 +292,7 @@ export const bedrock_models = [
|
|
|
216
292
|
// =================
|
|
217
293
|
"modelName": "Llama-3-70b",
|
|
218
294
|
"modelId": "meta.llama3-70b-instruct-v1:0",
|
|
295
|
+
"messages_api": false,
|
|
219
296
|
"bos_text": "<|begin_of_text|>",
|
|
220
297
|
"role_system_message_prefix": "",
|
|
221
298
|
"role_system_message_suffix": "",
|
|
@@ -241,6 +318,7 @@ export const bedrock_models = [
|
|
|
241
318
|
// ================
|
|
242
319
|
"modelName": "Mistral-7b",
|
|
243
320
|
"modelId": "mistral.mistral-7b-instruct-v0:2",
|
|
321
|
+
"messages_api": false,
|
|
244
322
|
"bos_text": "<s>",
|
|
245
323
|
"role_system_message_prefix": "",
|
|
246
324
|
"role_system_message_suffix": "",
|
|
@@ -266,6 +344,7 @@ export const bedrock_models = [
|
|
|
266
344
|
// ==================
|
|
267
345
|
"modelName": "Mixtral-8x7b",
|
|
268
346
|
"modelId": "mistral.mixtral-8x7b-instruct-v0:1",
|
|
347
|
+
"messages_api": false,
|
|
269
348
|
"bos_text": "<s>",
|
|
270
349
|
"role_system_message_prefix": "",
|
|
271
350
|
"role_system_message_suffix": "",
|
|
@@ -291,6 +370,7 @@ export const bedrock_models = [
|
|
|
291
370
|
// ===================
|
|
292
371
|
"modelName": "Mistral-Large",
|
|
293
372
|
"modelId": "mistral.mistral-large-2402-v1:0",
|
|
373
|
+
"messages_api": false,
|
|
294
374
|
"bos_text": "<s>",
|
|
295
375
|
"role_system_message_prefix": "",
|
|
296
376
|
"role_system_message_suffix": "",
|
package/bedrock-wrapper.js
CHANGED
|
@@ -41,9 +41,16 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
|
|
|
41
41
|
|
|
42
42
|
// cleanup message content before formatting prompt message
|
|
43
43
|
let message_cleaned = [];
|
|
44
|
+
let system_message = "";
|
|
45
|
+
|
|
44
46
|
for (let i = 0; i < messages.length; i++) {
|
|
45
47
|
if (messages[i].content !== "") {
|
|
46
|
-
|
|
48
|
+
// Extract system message only if model requires it as separate field
|
|
49
|
+
if (awsModel.system_as_separate_field && messages[i].role === "system") {
|
|
50
|
+
system_message = messages[i].content;
|
|
51
|
+
} else {
|
|
52
|
+
message_cleaned.push(messages[i]);
|
|
53
|
+
}
|
|
47
54
|
} else if (awsModel.display_role_names) {
|
|
48
55
|
message_cleaned.push(messages[i]);
|
|
49
56
|
}
|
|
@@ -53,58 +60,77 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
|
|
|
53
60
|
}
|
|
54
61
|
}
|
|
55
62
|
|
|
63
|
+
let prompt;
|
|
64
|
+
|
|
56
65
|
// format prompt message from message array
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
if
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
prompt +=
|
|
66
|
-
if (
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
if (
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
if (
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
66
|
+
if (awsModel.messages_api) {
|
|
67
|
+
// convert message array to prompt object if model supports messages api
|
|
68
|
+
prompt = message_cleaned;
|
|
69
|
+
} else {
|
|
70
|
+
// convert message array to prompt string if model does not support messages api
|
|
71
|
+
prompt = awsModel.bos_text;
|
|
72
|
+
let eom_text_inserted = false;
|
|
73
|
+
for (let i = 0; i < message_cleaned.length; i++) {
|
|
74
|
+
prompt += "\n";
|
|
75
|
+
if (message_cleaned[i].role === "system") {
|
|
76
|
+
prompt += awsModel.role_system_message_prefix;
|
|
77
|
+
prompt += awsModel.role_system_prefix;
|
|
78
|
+
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
79
|
+
prompt += awsModel.role_system_suffix;
|
|
80
|
+
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
81
|
+
prompt += message_cleaned[i].content;
|
|
82
|
+
prompt += awsModel.role_system_message_suffix;
|
|
83
|
+
} else if (message_cleaned[i].role === "user") {
|
|
84
|
+
prompt += awsModel.role_user_message_prefix;
|
|
85
|
+
prompt += awsModel.role_user_prefix;
|
|
86
|
+
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
87
|
+
prompt += awsModel.role_user_suffix;
|
|
88
|
+
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
89
|
+
prompt += message_cleaned[i].content;
|
|
90
|
+
prompt += awsModel.role_user_message_suffix;
|
|
91
|
+
} else if (message_cleaned[i].role === "assistant") {
|
|
92
|
+
prompt += awsModel.role_assistant_message_prefix;
|
|
93
|
+
prompt += awsModel.role_assistant_prefix;
|
|
94
|
+
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
95
|
+
prompt += awsModel.role_assistant_suffix;
|
|
96
|
+
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
97
|
+
prompt += message_cleaned[i].content;
|
|
98
|
+
prompt += awsModel.role_assistant_message_suffix;
|
|
99
|
+
}
|
|
100
|
+
if (message_cleaned[i+1] && message_cleaned[i+1].content === "") {
|
|
101
|
+
prompt += `\n${awsModel.eom_text}`;
|
|
102
|
+
eom_text_inserted = true;
|
|
103
|
+
} else if ((i+1) === (message_cleaned.length - 1) && !eom_text_inserted) {
|
|
104
|
+
prompt += `\n${awsModel.eom_text}`;
|
|
105
|
+
}
|
|
91
106
|
}
|
|
92
107
|
}
|
|
93
108
|
|
|
94
109
|
// logging
|
|
95
110
|
if (logging) {
|
|
96
|
-
|
|
111
|
+
if (awsModel.system_as_separate_field && system_message) {
|
|
112
|
+
console.log(`\nsystem: ${system_message}`);
|
|
113
|
+
}
|
|
114
|
+
console.log(`\nprompt: ${typeof prompt === 'object' ? JSON.stringify(prompt) : prompt}\n`);
|
|
97
115
|
}
|
|
98
116
|
|
|
99
117
|
const max_gen_tokens = max_tokens <= awsModel.max_supported_response_tokens ? max_tokens : awsModel.max_supported_response_tokens;
|
|
100
118
|
|
|
101
119
|
// Format the request payload using the model's native structure.
|
|
102
|
-
const request = {
|
|
120
|
+
const request = awsModel.messages_api ? {
|
|
121
|
+
messages: prompt,
|
|
122
|
+
...(awsModel.system_as_separate_field && system_message && { system: system_message }), // Only add system field if model requires it and there's a system message
|
|
123
|
+
[awsModel.max_tokens_param_name]: max_gen_tokens,
|
|
124
|
+
temperature: temperature,
|
|
125
|
+
top_p: top_p,
|
|
126
|
+
...awsModel.special_request_schema
|
|
127
|
+
} : {
|
|
103
128
|
prompt,
|
|
104
129
|
// Optional inference parameters:
|
|
105
130
|
[awsModel.max_tokens_param_name]: max_gen_tokens,
|
|
106
131
|
temperature: temperature,
|
|
107
132
|
top_p: top_p,
|
|
133
|
+
...awsModel.special_request_schema
|
|
108
134
|
};
|
|
109
135
|
|
|
110
136
|
// Create a Bedrock Runtime client in the AWS Region of your choice
|
|
@@ -139,7 +165,15 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
|
|
|
139
165
|
modelId: awsModel.modelId,
|
|
140
166
|
}),
|
|
141
167
|
);
|
|
142
|
-
|
|
168
|
+
|
|
169
|
+
const decodedBodyResponse = JSON.parse(new TextDecoder().decode(apiResponse.body));
|
|
170
|
+
let result;
|
|
171
|
+
if (awsModel.response_nonchunk_element) {
|
|
172
|
+
result = getValueByPath(decodedBodyResponse, awsModel.response_nonchunk_element);
|
|
173
|
+
} else {
|
|
174
|
+
result = getValueByPath(decodedBodyResponse, awsModel.response_chunk_element);
|
|
175
|
+
}
|
|
176
|
+
yield result;
|
|
143
177
|
}
|
|
144
178
|
}
|
|
145
179
|
|
|
@@ -1,98 +1,152 @@
|
|
|
1
|
-
//
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
//
|
|
6
|
-
//
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
const
|
|
14
|
-
const
|
|
15
|
-
const
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
//
|
|
21
|
-
//
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
//
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
}
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
1
|
+
// Clear terminal
|
|
2
|
+
console.clear();
|
|
3
|
+
|
|
4
|
+
// ================================================================================
|
|
5
|
+
// == AWS Bedrock Example: Invoke a Model with a Streamed or Unstreamed Response ==
|
|
6
|
+
// ================================================================================
|
|
7
|
+
|
|
8
|
+
// ---------------------------------------------------------------------
|
|
9
|
+
// -- import environment variables from .env file or define them here --
|
|
10
|
+
// ---------------------------------------------------------------------
|
|
11
|
+
import dotenv from 'dotenv';
|
|
12
|
+
dotenv.config();
|
|
13
|
+
const AWS_REGION = process.env.AWS_REGION;
|
|
14
|
+
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID;
|
|
15
|
+
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY;
|
|
16
|
+
const LLM_MAX_GEN_TOKENS = parseInt(process.env.LLM_MAX_GEN_TOKENS);
|
|
17
|
+
const LLM_TEMPERATURE = parseFloat(process.env.LLM_TEMPERATURE);
|
|
18
|
+
const LLM_TOP_P = parseFloat(process.env.LLM_TOP_P);
|
|
19
|
+
|
|
20
|
+
// --------------------------------------------
|
|
21
|
+
// -- import functions from bedrock-wrapper --
|
|
22
|
+
// -- - bedrockWrapper --
|
|
23
|
+
// -- - listBedrockWrapperSupportedModels --
|
|
24
|
+
// --------------------------------------------
|
|
25
|
+
import {
|
|
26
|
+
bedrockWrapper,
|
|
27
|
+
listBedrockWrapperSupportedModels
|
|
28
|
+
} from "./bedrock-wrapper.js";
|
|
29
|
+
|
|
30
|
+
// ----------------------------------------------
|
|
31
|
+
// -- Get and process supported models --
|
|
32
|
+
// ----------------------------------------------
|
|
33
|
+
const supportedModels = await listBedrockWrapperSupportedModels();
|
|
34
|
+
const availableModels = supportedModels.map(model => {
|
|
35
|
+
// Fix both modelName and modelId by adding quotes
|
|
36
|
+
const fixedJson = model
|
|
37
|
+
.replace(/modelName": ([^,]+),/, 'modelName": "$1",')
|
|
38
|
+
.replace(/modelId": ([^}]+)}/, 'modelId": "$1"}');
|
|
39
|
+
return JSON.parse(fixedJson).modelName;
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
// Display models with numbers
|
|
43
|
+
console.log('\nAvailable Models:');
|
|
44
|
+
availableModels.forEach((model, index) => {
|
|
45
|
+
console.log(`${index + 1}. ${model}`);
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
// Prompt user for input
|
|
49
|
+
import readline from 'readline';
|
|
50
|
+
const rl = readline.createInterface({
|
|
51
|
+
input: process.stdin,
|
|
52
|
+
output: process.stdout
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
// Get user selection
|
|
56
|
+
const selectedModel = await new Promise((resolve) => {
|
|
57
|
+
rl.question('\nEnter the number of the model you want to use: ', (answer) => {
|
|
58
|
+
const selection = parseInt(answer) - 1;
|
|
59
|
+
if (selection >= 0 && selection < availableModels.length) {
|
|
60
|
+
resolve(availableModels[selection]);
|
|
61
|
+
} else {
|
|
62
|
+
console.log('Invalid selection, defaulting to Claude-3-5-Sonnet');
|
|
63
|
+
resolve('Claude-3-5-Sonnet-v2');
|
|
64
|
+
}
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
// Ask for streaming preference
|
|
69
|
+
const shouldStream = await new Promise((resolve) => {
|
|
70
|
+
rl.question('\nDo you want streamed responses? (Y/n): ', (answer) => {
|
|
71
|
+
resolve(answer.toLowerCase() !== 'n');
|
|
72
|
+
});
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
console.log(`\nUsing model: ${selectedModel}`);
|
|
76
|
+
console.log(`Streaming: ${shouldStream ? 'enabled' : 'disabled'}\n`);
|
|
77
|
+
|
|
78
|
+
const defaultPrompt = "Describe what the openai api standard used by lots of serverless LLM api providers is and why it has been widely adopted.";
|
|
79
|
+
|
|
80
|
+
// Get user prompt
|
|
81
|
+
const userPrompt = await new Promise((resolve) => {
|
|
82
|
+
rl.question(`\nEnter your prompt (press Enter to use default):\n> `, (answer) => {
|
|
83
|
+
resolve(answer.trim() || defaultPrompt);
|
|
84
|
+
rl.close(); // Only close after all prompts are complete
|
|
85
|
+
});
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
// -----------------------------------------------
|
|
89
|
+
// -- example prompt in `messages` array format --
|
|
90
|
+
// -----------------------------------------------
|
|
91
|
+
const messages = [
|
|
92
|
+
{
|
|
93
|
+
role: "system",
|
|
94
|
+
content: "You are a helpful AI assistant that follows instructions extremely well. Answer the user questions accurately. Think step by step before answering the question.",
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
role: "user",
|
|
98
|
+
content: userPrompt,
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
role: "assistant",
|
|
102
|
+
content: "",
|
|
103
|
+
},
|
|
104
|
+
];
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
// ---------------------------------------------------
|
|
108
|
+
// -- create an object to hold your AWS credentials --
|
|
109
|
+
// ---------------------------------------------------
|
|
110
|
+
const awsCreds = {
|
|
111
|
+
region: AWS_REGION,
|
|
112
|
+
accessKeyId: AWS_ACCESS_KEY_ID,
|
|
113
|
+
secretAccessKey: AWS_SECRET_ACCESS_KEY,
|
|
114
|
+
};
|
|
115
|
+
// ----------------------------------------------------------------------
|
|
116
|
+
// -- create an object that copies your openai chat completions object --
|
|
117
|
+
// ----------------------------------------------------------------------
|
|
118
|
+
const openaiChatCompletionsCreateObject = {
|
|
119
|
+
"messages": messages,
|
|
120
|
+
"model": selectedModel,
|
|
121
|
+
"max_tokens": LLM_MAX_GEN_TOKENS,
|
|
122
|
+
"stream": shouldStream,
|
|
123
|
+
"temperature": LLM_TEMPERATURE,
|
|
124
|
+
"top_p": LLM_TOP_P,
|
|
125
|
+
};
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
// ------------------------------------------------------------
|
|
129
|
+
// -- invoke the streamed or unstreamed bedrock api response --
|
|
130
|
+
// ------------------------------------------------------------
|
|
131
|
+
// create a variable to hold the complete response
|
|
132
|
+
let completeResponse = "";
|
|
133
|
+
// streamed call
|
|
134
|
+
if (openaiChatCompletionsCreateObject.stream) {
|
|
135
|
+
for await (const chunk of bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging:true })) {
|
|
136
|
+
completeResponse += chunk;
|
|
137
|
+
// ---------------------------------------------------
|
|
138
|
+
// -- each chunk is streamed as it is received here --
|
|
139
|
+
// ---------------------------------------------------
|
|
140
|
+
process.stdout.write(chunk); // ⇠ do stuff with the streamed chunk
|
|
141
|
+
}
|
|
142
|
+
} else { // unstreamed call
|
|
143
|
+
const response = await bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging:true });
|
|
144
|
+
for await (const data of response) {
|
|
145
|
+
completeResponse += data;
|
|
146
|
+
}
|
|
147
|
+
// ----------------------------------------------------
|
|
148
|
+
// -- unstreamed complete response is available here --
|
|
149
|
+
// ----------------------------------------------------
|
|
150
|
+
console.log(`\n\completeResponse:\n${completeResponse}\n`); // ⇠ do stuff with the complete response
|
|
151
|
+
}
|
|
152
|
+
// console.log(`\n\completeResponse:\n${completeResponse}\n`); // ⇠ optional do stuff with the complete response returned from the API reguardless of stream or not
|
package/package.json
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "bedrock-wrapper",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "2.1.1",
|
|
4
4
|
"description": "🪨 Bedrock Wrapper is an npm package that simplifies the integration of existing OpenAI-compatible API objects with AWS Bedrock's serverless inference LLMs.",
|
|
5
5
|
"repository": {
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
6
|
+
"type": "git",
|
|
7
|
+
"url": "https://github.com/jparkerweb/bedrock-wrapper.git"
|
|
8
|
+
},
|
|
9
9
|
"main": "bedrock-wrapper.js",
|
|
10
10
|
"type": "module",
|
|
11
11
|
"keywords": [
|
|
@@ -21,7 +21,10 @@
|
|
|
21
21
|
"author": "",
|
|
22
22
|
"license": "ISC",
|
|
23
23
|
"dependencies": {
|
|
24
|
-
"@aws-sdk/client-bedrock-runtime": "^3.
|
|
24
|
+
"@aws-sdk/client-bedrock-runtime": "^3.699.0",
|
|
25
25
|
"dotenv": "^16.4.5"
|
|
26
|
+
},
|
|
27
|
+
"devDependencies": {
|
|
28
|
+
"chalk": "^5.3.0"
|
|
26
29
|
}
|
|
27
30
|
}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
Test Question: "Respond with exactly one word: What is 1+1?"
|
|
2
|
+
==================================================
|
|
3
|
+
|
|
4
|
+
Starting tests with 16 models...
|
|
5
|
+
Each model will be tested with streaming and non-streaming calls
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
--------------------------------------------------
|
|
9
|
+
Testing Claude-3-5-Sonnet-v2 ⇢
|
|
10
|
+
Streaming test passed for Claude-3-5-Sonnet-v2: "two"
|
|
11
|
+
Non-streaming test passed for Claude-3-5-Sonnet-v2: "two"
|
|
12
|
+
|
|
13
|
+
--------------------------------------------------
|
|
14
|
+
Testing Claude-3-5-Sonnet ⇢
|
|
15
|
+
Streaming test passed for Claude-3-5-Sonnet: "Two"
|
|
16
|
+
Non-streaming test passed for Claude-3-5-Sonnet: "Two"
|
|
17
|
+
|
|
18
|
+
--------------------------------------------------
|
|
19
|
+
Testing Claude-3-5-Haiku ⇢
|
|
20
|
+
Streaming test passed for Claude-3-5-Haiku: "Two"
|
|
21
|
+
Non-streaming test passed for Claude-3-5-Haiku: "Two"
|
|
22
|
+
|
|
23
|
+
--------------------------------------------------
|
|
24
|
+
Testing Claude-3-Haiku ⇢
|
|
25
|
+
Streaming test passed for Claude-3-Haiku: "Two."
|
|
26
|
+
Non-streaming test passed for Claude-3-Haiku: "Two."
|
|
27
|
+
|
|
28
|
+
--------------------------------------------------
|
|
29
|
+
Testing Llama-3-2-1b ⇢
|
|
30
|
+
Streaming test passed for Llama-3-2-1b: "Two"
|
|
31
|
+
Non-streaming test passed for Llama-3-2-1b: "Two"
|
|
32
|
+
|
|
33
|
+
--------------------------------------------------
|
|
34
|
+
Testing Llama-3-2-3b ⇢
|
|
35
|
+
Streaming test passed for Llama-3-2-3b: "2"
|
|
36
|
+
Non-streaming test passed for Llama-3-2-3b: "2"
|
|
37
|
+
|
|
38
|
+
--------------------------------------------------
|
|
39
|
+
Testing Llama-3-2-11b ⇢
|
|
40
|
+
Streaming test passed for Llama-3-2-11b: "Two."
|
|
41
|
+
Non-streaming test passed for Llama-3-2-11b: "Two."
|
|
42
|
+
|
|
43
|
+
--------------------------------------------------
|
|
44
|
+
Testing Llama-3-2-90b ⇢
|
|
45
|
+
Streaming test passed for Llama-3-2-90b: "Two."
|
|
46
|
+
Non-streaming test passed for Llama-3-2-90b: "Two."
|
|
47
|
+
|
|
48
|
+
--------------------------------------------------
|
|
49
|
+
Testing Llama-3-1-8b ⇢
|
|
50
|
+
Streaming test passed for Llama-3-1-8b: "Two."
|
|
51
|
+
Non-streaming test passed for Llama-3-1-8b: "Two."
|
|
52
|
+
|
|
53
|
+
--------------------------------------------------
|
|
54
|
+
Testing Llama-3-1-70b ⇢
|
|
55
|
+
Streaming test passed for Llama-3-1-70b: "Two."
|
|
56
|
+
Non-streaming test passed for Llama-3-1-70b: "Two."
|
|
57
|
+
|
|
58
|
+
--------------------------------------------------
|
|
59
|
+
Testing Llama-3-1-405b ⇢
|
|
60
|
+
Streaming test passed for Llama-3-1-405b: "Two."
|
|
61
|
+
Non-streaming test passed for Llama-3-1-405b: "Two."
|
|
62
|
+
|
|
63
|
+
--------------------------------------------------
|
|
64
|
+
Testing Llama-3-8b ⇢
|
|
65
|
+
Streaming test passed for Llama-3-8b: "Two"
|
|
66
|
+
Non-streaming test passed for Llama-3-8b: "Two"
|
|
67
|
+
|
|
68
|
+
--------------------------------------------------
|
|
69
|
+
Testing Llama-3-70b ⇢
|
|
70
|
+
Streaming test passed for Llama-3-70b: "Two"
|
|
71
|
+
Non-streaming test passed for Llama-3-70b: "Two"
|
|
72
|
+
|
|
73
|
+
--------------------------------------------------
|
|
74
|
+
Testing Mistral-7b ⇢
|
|
75
|
+
Streaming test passed for Mistral-7b: "Two. (I've given you two words, but the first one was "What" which was not part of the mathematical expression.)"
|
|
76
|
+
Non-streaming test passed for Mistral-7b: "Two. (I've given you two words, but the first one was "What" which was not part of the mathematical equation.)"
|
|
77
|
+
|
|
78
|
+
--------------------------------------------------
|
|
79
|
+
Testing Mixtral-8x7b ⇢
|
|
80
|
+
Streaming test passed for Mixtral-8x7b: "Two.
|
|
81
|
+
|
|
82
|
+
The word you are looking for is "two." The sum of 1 + 1 is equal to 2. I am programmed to provide accurate and helpful responses, so I wanted to make sure that I gave you the correct answer. If you have any other questions or need further clarification, please don't hesitate to ask. I'm here to help!"
|
|
83
|
+
Non-streaming test passed for Mixtral-8x7b: "Two.
|
|
84
|
+
|
|
85
|
+
The question you asked is a simple arithmetic addition problem, and the answer is 2. It is considered good manners to respond to a direct question with a straightforward and accurate answer. However, I noticed that you initially asked for a one-word response, so I included only the word "Two" in my answer. I hope this is what you were looking for! Is there anything else I can help you with?"
|
|
86
|
+
|
|
87
|
+
--------------------------------------------------
|
|
88
|
+
Testing Mistral-Large ⇢
|
|
89
|
+
Streaming test passed for Mistral-Large: "Two."
|
|
90
|
+
Non-streaming test passed for Mistral-Large: "Two."
|
package/test-models.js
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
// ================================================================================
|
|
2
|
+
// == AWS Bedrock Example: Invoke a Model with a Streamed or Unstreamed Response ==
|
|
3
|
+
// ================================================================================
|
|
4
|
+
|
|
5
|
+
// ---------------------------------------------------------------------
|
|
6
|
+
// -- import environment variables from .env file or define them here --
|
|
7
|
+
// ---------------------------------------------------------------------
|
|
8
|
+
import dotenv from 'dotenv';
|
|
9
|
+
import fs from 'fs/promises';
|
|
10
|
+
import chalk from 'chalk';
|
|
11
|
+
|
|
12
|
+
dotenv.config();
|
|
13
|
+
|
|
14
|
+
const AWS_REGION = process.env.AWS_REGION;
|
|
15
|
+
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID;
|
|
16
|
+
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY;
|
|
17
|
+
const LLM_MAX_GEN_TOKENS = parseInt(process.env.LLM_MAX_GEN_TOKENS);
|
|
18
|
+
const LLM_TEMPERATURE = parseFloat(process.env.LLM_TEMPERATURE);
|
|
19
|
+
const LLM_TOP_P = parseFloat(process.env.LLM_TOP_P);
|
|
20
|
+
|
|
21
|
+
// --------------------------------------------
|
|
22
|
+
// -- import functions from bedrock-wrapper --
|
|
23
|
+
// -- - bedrockWrapper --
|
|
24
|
+
// -- - listBedrockWrapperSupportedModels --
|
|
25
|
+
// --------------------------------------------
|
|
26
|
+
import {
|
|
27
|
+
bedrockWrapper,
|
|
28
|
+
listBedrockWrapperSupportedModels
|
|
29
|
+
} from "./bedrock-wrapper.js";
|
|
30
|
+
|
|
31
|
+
async function logOutput(message, type = 'info', writeToFile = true ) {
|
|
32
|
+
if (writeToFile) {
|
|
33
|
+
// Log to file
|
|
34
|
+
await fs.appendFile('test-models-output.txt', message + '\n');
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Log to console with colors
|
|
38
|
+
switch(type) {
|
|
39
|
+
case 'success':
|
|
40
|
+
console.log(chalk.green('✓ ' + message));
|
|
41
|
+
break;
|
|
42
|
+
case 'error':
|
|
43
|
+
console.log(chalk.red('✗ ' + message));
|
|
44
|
+
break;
|
|
45
|
+
case 'info':
|
|
46
|
+
console.log(chalk.blue('ℹ ' + message));
|
|
47
|
+
break;
|
|
48
|
+
case 'running':
|
|
49
|
+
console.log(chalk.yellow(message));
|
|
50
|
+
break;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
async function testModel(model, awsCreds, testMessage, isStreaming) {
|
|
55
|
+
const messages = [{ role: "user", content: testMessage }];
|
|
56
|
+
const openaiChatCompletionsCreateObject = {
|
|
57
|
+
messages,
|
|
58
|
+
model,
|
|
59
|
+
max_tokens: LLM_MAX_GEN_TOKENS,
|
|
60
|
+
stream: isStreaming,
|
|
61
|
+
temperature: LLM_TEMPERATURE,
|
|
62
|
+
top_p: LLM_TOP_P,
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
let completeResponse = "";
|
|
66
|
+
|
|
67
|
+
try {
|
|
68
|
+
if (isStreaming) {
|
|
69
|
+
for await (const chunk of bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging: false })) {
|
|
70
|
+
completeResponse += chunk;
|
|
71
|
+
}
|
|
72
|
+
} else {
|
|
73
|
+
const response = await bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging: false });
|
|
74
|
+
for await (const data of response) {
|
|
75
|
+
completeResponse += data;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Check if response is empty or undefined
|
|
80
|
+
if (!completeResponse || completeResponse.trim() === '' || completeResponse.trim() === 'undefined') {
|
|
81
|
+
throw new Error('Empty or invalid response received');
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
return { success: true, response: completeResponse.trim() };
|
|
85
|
+
} catch (error) {
|
|
86
|
+
return { success: false, error: error.message };
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
async function main() {
|
|
91
|
+
const testMessage = "Respond with exactly one word: What is 1+1?";
|
|
92
|
+
|
|
93
|
+
// Clear output file and add header
|
|
94
|
+
await fs.writeFile('test-models-output.txt',
|
|
95
|
+
`Test Question: "${testMessage}"\n` +
|
|
96
|
+
`=`.repeat(50) + '\n\n'
|
|
97
|
+
);
|
|
98
|
+
|
|
99
|
+
const supportedModels = await listBedrockWrapperSupportedModels();
|
|
100
|
+
const availableModels = supportedModels.map(model => {
|
|
101
|
+
const fixedJson = model
|
|
102
|
+
.replace(/modelName": ([^,]+),/, 'modelName": "$1",')
|
|
103
|
+
.replace(/modelId": ([^}]+)}/, 'modelId": "$1"}');
|
|
104
|
+
return JSON.parse(fixedJson).modelName;
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
console.clear();
|
|
108
|
+
await logOutput(`Starting tests with ${availableModels.length} models...`, 'info');
|
|
109
|
+
await logOutput(`Each model will be tested with streaming and non-streaming calls\n`, 'info');
|
|
110
|
+
|
|
111
|
+
const awsCreds = {
|
|
112
|
+
region: AWS_REGION,
|
|
113
|
+
accessKeyId: AWS_ACCESS_KEY_ID,
|
|
114
|
+
secretAccessKey: AWS_SECRET_ACCESS_KEY,
|
|
115
|
+
};
|
|
116
|
+
|
|
117
|
+
for (const model of availableModels) {
|
|
118
|
+
await logOutput(`\n${'-'.repeat(50)}\nTesting ${model} ⇢`, 'running');
|
|
119
|
+
|
|
120
|
+
// Test streaming
|
|
121
|
+
const streamResult = await testModel(model, awsCreds, testMessage, true);
|
|
122
|
+
if (streamResult.success) {
|
|
123
|
+
await logOutput(`Streaming test passed for ${model}: "${streamResult.response}"`, 'success');
|
|
124
|
+
} else {
|
|
125
|
+
await logOutput(`Streaming test failed for ${model}: ${streamResult.error}`, 'error');
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Test non-streaming
|
|
129
|
+
const nonStreamResult = await testModel(model, awsCreds, testMessage, false);
|
|
130
|
+
if (nonStreamResult.success) {
|
|
131
|
+
await logOutput(`Non-streaming test passed for ${model}: "${nonStreamResult.response}"`, 'success');
|
|
132
|
+
} else {
|
|
133
|
+
await logOutput(`Non-streaming test failed for ${model}: ${nonStreamResult.error}`, 'error');
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
console.log(''); // Add blank line between models
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
await logOutput('Testing complete! Check test-models-output.txt for full test results.', 'info', false);
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
main().catch(async (error) => {
|
|
143
|
+
await logOutput(`Fatal Error: ${error.message}`, 'error');
|
|
144
|
+
console.error(error);
|
|
145
|
+
});
|