bedrock-wrapper 1.3.1 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/FUNDING.yml +15 -0
- package/CHANGELOG.md +55 -0
- package/README.md +23 -18
- package/bedrock-models.js +63 -0
- package/bedrock-wrapper.js +72 -38
- package/{example-test.js → interactive-example.js} +152 -98
- package/package.json +5 -5
- package/test-models-output.txt +85 -0
- package/test-models.js +145 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# These are supported funding model platforms
|
|
2
|
+
|
|
3
|
+
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
|
4
|
+
patreon: # Replace with a single Patreon username
|
|
5
|
+
open_collective: # Replace with a single Open Collective username
|
|
6
|
+
ko_fi: jparkerweb
|
|
7
|
+
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
|
8
|
+
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
|
9
|
+
liberapay: # Replace with a single Liberapay username
|
|
10
|
+
issuehunt: # Replace with a single IssueHunt username
|
|
11
|
+
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
|
12
|
+
polar: # Replace with a single Polar username
|
|
13
|
+
buy_me_a_coffee: # Replace with a single Buy Me a Coffee username
|
|
14
|
+
thanks_dev: # Replace with a single thanks.dev username
|
|
15
|
+
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
All notable changes to this project will be documented in this file.
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
## [2.0.0] - 2024-10-31 (Claude Sonnet & Haiku)
|
|
6
|
+
### Added
|
|
7
|
+
- Support for Anthropic Sonnet & Haiku models
|
|
8
|
+
- Claude-3-5-Sonnet-v2
|
|
9
|
+
- Claude-3-5-Sonnet
|
|
10
|
+
- Claude-3-Haiku
|
|
11
|
+
- Interactive example script for testing models
|
|
12
|
+
- Testing script with streaming and non-streaming support for all models
|
|
13
|
+
- Stardardize output to be a string via Streamed and non-Streamed responses
|
|
14
|
+
> **NOTE:** This is a breaking change for previous non-streaming responses. Existing streaming responses will remain unchanged.
|
|
15
|
+
|
|
16
|
+
### Changed
|
|
17
|
+
- Complete architecture overhaul for better model support
|
|
18
|
+
- Improved message handling with role-based formatting
|
|
19
|
+
- Enhanced error handling and response processing
|
|
20
|
+
- Standardized model configuration format
|
|
21
|
+
- Updated AWS SDK integration
|
|
22
|
+
|
|
23
|
+
### Technical Details
|
|
24
|
+
- Implemented messages API support for compatible models
|
|
25
|
+
- Added system message handling as separate field where supported
|
|
26
|
+
- Configurable token limits per model
|
|
27
|
+
- Flexible response parsing with chunk/non-chunk handling
|
|
28
|
+
- Cross-region profile support for certain models
|
|
29
|
+
|
|
30
|
+
## [1.3.0] - 2024-07-24 (Llama3.2)
|
|
31
|
+
### Added
|
|
32
|
+
- Support for Llama 3.2 series models
|
|
33
|
+
- Llama-3-2-1b
|
|
34
|
+
- Llama-3-2-3b
|
|
35
|
+
- Llama-3-2-11b
|
|
36
|
+
- Llama-3-2-90b
|
|
37
|
+
|
|
38
|
+
## [1.1.0] - 2024-07-24 (Llama3.1)
|
|
39
|
+
### Added
|
|
40
|
+
- Support for Llama 3.1 series models
|
|
41
|
+
- Llama-3-1-8b
|
|
42
|
+
- Llama-3-1-70b
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
## [1.0.14] - 2024-05-06 (Initial Stable Release)
|
|
46
|
+
### Added
|
|
47
|
+
- Initial stablerelease of Bedrock Wrapper
|
|
48
|
+
- Basic AWS Bedrock integration
|
|
49
|
+
- OpenAI-compatible API object support
|
|
50
|
+
- Basic model support
|
|
51
|
+
- Llama-3-8b
|
|
52
|
+
- Llama-3-70b
|
|
53
|
+
- Mistral-7b
|
|
54
|
+
- Mixtral-8x7b
|
|
55
|
+
- Mistral-Large
|
package/README.md
CHANGED
|
@@ -76,13 +76,10 @@ Bedrock Wrapper is an npm package that simplifies the integration of existing Op
|
|
|
76
76
|
```javascript
|
|
77
77
|
// create a variable to hold the complete response
|
|
78
78
|
let completeResponse = "";
|
|
79
|
-
// invoke the streamed bedrock api response
|
|
80
79
|
if (!openaiChatCompletionsCreateObject.stream){ // invoke the unstreamed bedrock api response
|
|
81
80
|
const response = await bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject);
|
|
82
81
|
for await (const data of response) {
|
|
83
|
-
|
|
84
|
-
const jsonResponse = JSON.parse(jsonString);
|
|
85
|
-
completeResponse += jsonResponse.generation;
|
|
82
|
+
completeResponse += data;
|
|
86
83
|
}
|
|
87
84
|
// ----------------------------------------------------
|
|
88
85
|
// -- unstreamed complete response is available here --
|
|
@@ -94,20 +91,23 @@ Bedrock Wrapper is an npm package that simplifies the integration of existing Op
|
|
|
94
91
|
|
|
95
92
|
### Supported Models
|
|
96
93
|
|
|
97
|
-
| modelName
|
|
98
|
-
|
|
99
|
-
|
|
|
100
|
-
|
|
|
101
|
-
|
|
|
102
|
-
| Llama-3-2-
|
|
103
|
-
| Llama-3-
|
|
104
|
-
| Llama-3-
|
|
105
|
-
| Llama-3-
|
|
106
|
-
| Llama-3-8b
|
|
107
|
-
| Llama-3-70b
|
|
108
|
-
|
|
|
109
|
-
|
|
|
110
|
-
|
|
|
94
|
+
| modelName | modelId |
|
|
95
|
+
|----------------------|-------------------------------------------|
|
|
96
|
+
| Claude-3-5-Sonnet-v2 | anthropic.claude-3-5-sonnet-20241022-v2:0 |
|
|
97
|
+
| Claude-3-5-Sonnet | anthropic.claude-3-5-sonnet-20240620-v1:0 |
|
|
98
|
+
| Claude-3-Haiku | anthropic.claude-3-haiku-20240307-v1:0 |
|
|
99
|
+
| Llama-3-2-1b | us.meta.llama3-2-1b-instruct-v1:0 |
|
|
100
|
+
| Llama-3-2-3b | us.meta.llama3-2-3b-instruct-v1:0 |
|
|
101
|
+
| Llama-3-2-11b | us.meta.llama3-2-11b-instruct-v1:0 |
|
|
102
|
+
| Llama-3-2-90b | us.meta.llama3-2-90b-instruct-v1:0 |
|
|
103
|
+
| Llama-3-1-8b | meta.llama3-1-8b-instruct-v1:0 |
|
|
104
|
+
| Llama-3-1-70b | meta.llama3-1-70b-instruct-v1:0 |
|
|
105
|
+
| Llama-3-1-405b | meta.llama3-1-405b-instruct-v1:0 |
|
|
106
|
+
| Llama-3-8b | meta.llama3-8b-instruct-v1:0 |
|
|
107
|
+
| Llama-3-70b | meta.llama3-70b-instruct-v1:0 |
|
|
108
|
+
| Mistral-7b | mistral.mistral-7b-instruct-v0:2 |
|
|
109
|
+
| Mixtral-8x7b | mistral.mixtral-8x7b-instruct-v0:1 |
|
|
110
|
+
| Mistral-Large | mistral.mistral-large-2402-v1:0 |
|
|
111
111
|
|
|
112
112
|
To return the list progrmatically you can import and call `listBedrockWrapperSupportedModels`:
|
|
113
113
|
```javascript
|
|
@@ -135,3 +135,8 @@ In case you missed it at the beginning of this doc, for an even easier setup, us
|
|
|
135
135
|
- [OpenAI API](https://platform.openai.com/docs/api-reference/chat/create)
|
|
136
136
|
- [AWS Bedrock](https://aws.amazon.com/bedrock/)
|
|
137
137
|
- [AWS SDK for JavaScript](https://aws.amazon.com/sdk-for-javascript/)
|
|
138
|
+
|
|
139
|
+
---
|
|
140
|
+
|
|
141
|
+
Please consider sending me a tip to support my work 😀
|
|
142
|
+
# [🍵 tip me here](https://ko-fi.com/jparkerweb)
|
package/bedrock-models.js
CHANGED
|
@@ -6,6 +6,57 @@
|
|
|
6
6
|
// https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/cross-region-inference
|
|
7
7
|
|
|
8
8
|
export const bedrock_models = [
|
|
9
|
+
{
|
|
10
|
+
// ==========================
|
|
11
|
+
// == Claude 3.5 Sonnet v2 ==
|
|
12
|
+
// ==========================
|
|
13
|
+
"modelName": "Claude-3-5-Sonnet-v2",
|
|
14
|
+
"modelId": "anthropic.claude-3-5-sonnet-20241022-v2:0",
|
|
15
|
+
"messages_api": true,
|
|
16
|
+
"system_as_separate_field": true,
|
|
17
|
+
"display_role_names": true,
|
|
18
|
+
"max_tokens_param_name": "max_tokens",
|
|
19
|
+
"max_supported_response_tokens": 8192,
|
|
20
|
+
"response_chunk_element": "delta.text",
|
|
21
|
+
"response_nonchunk_element": "content[0].text",
|
|
22
|
+
"special_request_schema": {
|
|
23
|
+
"anthropic_version": "bedrock-2023-05-31"
|
|
24
|
+
}
|
|
25
|
+
},
|
|
26
|
+
{
|
|
27
|
+
// =======================
|
|
28
|
+
// == Claude 3.5 Sonnet ==
|
|
29
|
+
// =======================
|
|
30
|
+
"modelName": "Claude-3-5-Sonnet",
|
|
31
|
+
"modelId": "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
32
|
+
"messages_api": true,
|
|
33
|
+
"system_as_separate_field": true,
|
|
34
|
+
"display_role_names": true,
|
|
35
|
+
"max_tokens_param_name": "max_tokens",
|
|
36
|
+
"max_supported_response_tokens": 8192,
|
|
37
|
+
"response_chunk_element": "delta.text",
|
|
38
|
+
"response_nonchunk_element": "content[0].text",
|
|
39
|
+
"special_request_schema": {
|
|
40
|
+
"anthropic_version": "bedrock-2023-05-31"
|
|
41
|
+
}
|
|
42
|
+
},
|
|
43
|
+
{
|
|
44
|
+
// ====================
|
|
45
|
+
// == Claude 3 Haiku ==
|
|
46
|
+
// ====================
|
|
47
|
+
"modelName": "Claude-3-Haiku",
|
|
48
|
+
"modelId": "anthropic.claude-3-haiku-20240307-v1:0",
|
|
49
|
+
"messages_api": true,
|
|
50
|
+
"system_as_separate_field": true,
|
|
51
|
+
"display_role_names": true,
|
|
52
|
+
"max_tokens_param_name": "max_tokens",
|
|
53
|
+
"max_supported_response_tokens": 8192,
|
|
54
|
+
"response_chunk_element": "delta.text",
|
|
55
|
+
"response_nonchunk_element": "content[0].text",
|
|
56
|
+
"special_request_schema": {
|
|
57
|
+
"anthropic_version": "bedrock-2023-05-31"
|
|
58
|
+
}
|
|
59
|
+
},
|
|
9
60
|
{
|
|
10
61
|
// ==================
|
|
11
62
|
// == Llama 3.2 1b ==
|
|
@@ -13,6 +64,7 @@ export const bedrock_models = [
|
|
|
13
64
|
"modelName": "Llama-3-2-1b",
|
|
14
65
|
// "modelId": "meta.llama3-2-1b-instruct-v1:0",
|
|
15
66
|
"modelId": "us.meta.llama3-2-1b-instruct-v1:0",
|
|
67
|
+
"messages_api": false,
|
|
16
68
|
"bos_text": "<|begin_of_text|>",
|
|
17
69
|
"role_system_message_prefix": "",
|
|
18
70
|
"role_system_message_suffix": "",
|
|
@@ -39,6 +91,7 @@ export const bedrock_models = [
|
|
|
39
91
|
"modelName": "Llama-3-2-3b",
|
|
40
92
|
// "modelId": "meta.llama3-2-3b-instruct-v1:0",
|
|
41
93
|
"modelId": "us.meta.llama3-2-3b-instruct-v1:0",
|
|
94
|
+
"messages_api": false,
|
|
42
95
|
"bos_text": "<|begin_of_text|>",
|
|
43
96
|
"role_system_message_prefix": "",
|
|
44
97
|
"role_system_message_suffix": "",
|
|
@@ -65,6 +118,7 @@ export const bedrock_models = [
|
|
|
65
118
|
"modelName": "Llama-3-2-11b",
|
|
66
119
|
// "modelId": "meta.llama3-2-11b-instruct-v1:0",
|
|
67
120
|
"modelId": "us.meta.llama3-2-11b-instruct-v1:0",
|
|
121
|
+
"messages_api": false,
|
|
68
122
|
"bos_text": "<|begin_of_text|>",
|
|
69
123
|
"role_system_message_prefix": "",
|
|
70
124
|
"role_system_message_suffix": "",
|
|
@@ -91,6 +145,7 @@ export const bedrock_models = [
|
|
|
91
145
|
"modelName": "Llama-3-2-90b",
|
|
92
146
|
// "modelId": "meta.llama3-2-90b-instruct-v1:0",
|
|
93
147
|
"modelId": "us.meta.llama3-2-90b-instruct-v1:0",
|
|
148
|
+
"messages_api": false,
|
|
94
149
|
"bos_text": "<|begin_of_text|>",
|
|
95
150
|
"role_system_message_prefix": "",
|
|
96
151
|
"role_system_message_suffix": "",
|
|
@@ -116,6 +171,7 @@ export const bedrock_models = [
|
|
|
116
171
|
// ==================
|
|
117
172
|
"modelName": "Llama-3-1-8b",
|
|
118
173
|
"modelId": "meta.llama3-1-8b-instruct-v1:0",
|
|
174
|
+
"messages_api": false,
|
|
119
175
|
"bos_text": "<|begin_of_text|>",
|
|
120
176
|
"role_system_message_prefix": "",
|
|
121
177
|
"role_system_message_suffix": "",
|
|
@@ -141,6 +197,7 @@ export const bedrock_models = [
|
|
|
141
197
|
// ===================
|
|
142
198
|
"modelName": "Llama-3-1-70b",
|
|
143
199
|
"modelId": "meta.llama3-1-70b-instruct-v1:0",
|
|
200
|
+
"messages_api": false,
|
|
144
201
|
"bos_text": "<|begin_of_text|>",
|
|
145
202
|
"role_system_message_prefix": "",
|
|
146
203
|
"role_system_message_suffix": "",
|
|
@@ -166,6 +223,7 @@ export const bedrock_models = [
|
|
|
166
223
|
// ====================
|
|
167
224
|
"modelName": "Llama-3-1-405b",
|
|
168
225
|
"modelId": "meta.llama3-1-405b-instruct-v1:0",
|
|
226
|
+
"messages_api": false,
|
|
169
227
|
"bos_text": "<|begin_of_text|>",
|
|
170
228
|
"role_system_message_prefix": "",
|
|
171
229
|
"role_system_message_suffix": "",
|
|
@@ -191,6 +249,7 @@ export const bedrock_models = [
|
|
|
191
249
|
// ================
|
|
192
250
|
"modelName": "Llama-3-8b",
|
|
193
251
|
"modelId": "meta.llama3-8b-instruct-v1:0",
|
|
252
|
+
"messages_api": false,
|
|
194
253
|
"bos_text": "<|begin_of_text|>",
|
|
195
254
|
"role_system_message_prefix": "",
|
|
196
255
|
"role_system_message_suffix": "",
|
|
@@ -216,6 +275,7 @@ export const bedrock_models = [
|
|
|
216
275
|
// =================
|
|
217
276
|
"modelName": "Llama-3-70b",
|
|
218
277
|
"modelId": "meta.llama3-70b-instruct-v1:0",
|
|
278
|
+
"messages_api": false,
|
|
219
279
|
"bos_text": "<|begin_of_text|>",
|
|
220
280
|
"role_system_message_prefix": "",
|
|
221
281
|
"role_system_message_suffix": "",
|
|
@@ -241,6 +301,7 @@ export const bedrock_models = [
|
|
|
241
301
|
// ================
|
|
242
302
|
"modelName": "Mistral-7b",
|
|
243
303
|
"modelId": "mistral.mistral-7b-instruct-v0:2",
|
|
304
|
+
"messages_api": false,
|
|
244
305
|
"bos_text": "<s>",
|
|
245
306
|
"role_system_message_prefix": "",
|
|
246
307
|
"role_system_message_suffix": "",
|
|
@@ -266,6 +327,7 @@ export const bedrock_models = [
|
|
|
266
327
|
// ==================
|
|
267
328
|
"modelName": "Mixtral-8x7b",
|
|
268
329
|
"modelId": "mistral.mixtral-8x7b-instruct-v0:1",
|
|
330
|
+
"messages_api": false,
|
|
269
331
|
"bos_text": "<s>",
|
|
270
332
|
"role_system_message_prefix": "",
|
|
271
333
|
"role_system_message_suffix": "",
|
|
@@ -291,6 +353,7 @@ export const bedrock_models = [
|
|
|
291
353
|
// ===================
|
|
292
354
|
"modelName": "Mistral-Large",
|
|
293
355
|
"modelId": "mistral.mistral-large-2402-v1:0",
|
|
356
|
+
"messages_api": false,
|
|
294
357
|
"bos_text": "<s>",
|
|
295
358
|
"role_system_message_prefix": "",
|
|
296
359
|
"role_system_message_suffix": "",
|
package/bedrock-wrapper.js
CHANGED
|
@@ -41,9 +41,16 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
|
|
|
41
41
|
|
|
42
42
|
// cleanup message content before formatting prompt message
|
|
43
43
|
let message_cleaned = [];
|
|
44
|
+
let system_message = "";
|
|
45
|
+
|
|
44
46
|
for (let i = 0; i < messages.length; i++) {
|
|
45
47
|
if (messages[i].content !== "") {
|
|
46
|
-
|
|
48
|
+
// Extract system message only if model requires it as separate field
|
|
49
|
+
if (awsModel.system_as_separate_field && messages[i].role === "system") {
|
|
50
|
+
system_message = messages[i].content;
|
|
51
|
+
} else {
|
|
52
|
+
message_cleaned.push(messages[i]);
|
|
53
|
+
}
|
|
47
54
|
} else if (awsModel.display_role_names) {
|
|
48
55
|
message_cleaned.push(messages[i]);
|
|
49
56
|
}
|
|
@@ -53,58 +60,77 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
|
|
|
53
60
|
}
|
|
54
61
|
}
|
|
55
62
|
|
|
63
|
+
let prompt;
|
|
64
|
+
|
|
56
65
|
// format prompt message from message array
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
if
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
prompt +=
|
|
66
|
-
if (
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
if (
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
if (
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
66
|
+
if (awsModel.messages_api) {
|
|
67
|
+
// convert message array to prompt object if model supports messages api
|
|
68
|
+
prompt = message_cleaned;
|
|
69
|
+
} else {
|
|
70
|
+
// convert message array to prompt string if model does not support messages api
|
|
71
|
+
prompt = awsModel.bos_text;
|
|
72
|
+
let eom_text_inserted = false;
|
|
73
|
+
for (let i = 0; i < message_cleaned.length; i++) {
|
|
74
|
+
prompt += "\n";
|
|
75
|
+
if (message_cleaned[i].role === "system") {
|
|
76
|
+
prompt += awsModel.role_system_message_prefix;
|
|
77
|
+
prompt += awsModel.role_system_prefix;
|
|
78
|
+
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
79
|
+
prompt += awsModel.role_system_suffix;
|
|
80
|
+
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
81
|
+
prompt += message_cleaned[i].content;
|
|
82
|
+
prompt += awsModel.role_system_message_suffix;
|
|
83
|
+
} else if (message_cleaned[i].role === "user") {
|
|
84
|
+
prompt += awsModel.role_user_message_prefix;
|
|
85
|
+
prompt += awsModel.role_user_prefix;
|
|
86
|
+
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
87
|
+
prompt += awsModel.role_user_suffix;
|
|
88
|
+
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
89
|
+
prompt += message_cleaned[i].content;
|
|
90
|
+
prompt += awsModel.role_user_message_suffix;
|
|
91
|
+
} else if (message_cleaned[i].role === "assistant") {
|
|
92
|
+
prompt += awsModel.role_assistant_message_prefix;
|
|
93
|
+
prompt += awsModel.role_assistant_prefix;
|
|
94
|
+
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
95
|
+
prompt += awsModel.role_assistant_suffix;
|
|
96
|
+
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
97
|
+
prompt += message_cleaned[i].content;
|
|
98
|
+
prompt += awsModel.role_assistant_message_suffix;
|
|
99
|
+
}
|
|
100
|
+
if (message_cleaned[i+1] && message_cleaned[i+1].content === "") {
|
|
101
|
+
prompt += `\n${awsModel.eom_text}`;
|
|
102
|
+
eom_text_inserted = true;
|
|
103
|
+
} else if ((i+1) === (message_cleaned.length - 1) && !eom_text_inserted) {
|
|
104
|
+
prompt += `\n${awsModel.eom_text}`;
|
|
105
|
+
}
|
|
91
106
|
}
|
|
92
107
|
}
|
|
93
108
|
|
|
94
109
|
// logging
|
|
95
110
|
if (logging) {
|
|
96
|
-
|
|
111
|
+
if (awsModel.system_as_separate_field && system_message) {
|
|
112
|
+
console.log(`\nsystem: ${system_message}`);
|
|
113
|
+
}
|
|
114
|
+
console.log(`\nprompt: ${typeof prompt === 'object' ? JSON.stringify(prompt) : prompt}\n`);
|
|
97
115
|
}
|
|
98
116
|
|
|
99
117
|
const max_gen_tokens = max_tokens <= awsModel.max_supported_response_tokens ? max_tokens : awsModel.max_supported_response_tokens;
|
|
100
118
|
|
|
101
119
|
// Format the request payload using the model's native structure.
|
|
102
|
-
const request = {
|
|
120
|
+
const request = awsModel.messages_api ? {
|
|
121
|
+
messages: prompt,
|
|
122
|
+
...(awsModel.system_as_separate_field && system_message && { system: system_message }), // Only add system field if model requires it and there's a system message
|
|
123
|
+
[awsModel.max_tokens_param_name]: max_gen_tokens,
|
|
124
|
+
temperature: temperature,
|
|
125
|
+
top_p: top_p,
|
|
126
|
+
...awsModel.special_request_schema
|
|
127
|
+
} : {
|
|
103
128
|
prompt,
|
|
104
129
|
// Optional inference parameters:
|
|
105
130
|
[awsModel.max_tokens_param_name]: max_gen_tokens,
|
|
106
131
|
temperature: temperature,
|
|
107
132
|
top_p: top_p,
|
|
133
|
+
...awsModel.special_request_schema
|
|
108
134
|
};
|
|
109
135
|
|
|
110
136
|
// Create a Bedrock Runtime client in the AWS Region of your choice
|
|
@@ -139,7 +165,15 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
|
|
|
139
165
|
modelId: awsModel.modelId,
|
|
140
166
|
}),
|
|
141
167
|
);
|
|
142
|
-
|
|
168
|
+
|
|
169
|
+
const decodedBodyResponse = JSON.parse(new TextDecoder().decode(apiResponse.body));
|
|
170
|
+
let result;
|
|
171
|
+
if (awsModel.response_nonchunk_element) {
|
|
172
|
+
result = getValueByPath(decodedBodyResponse, awsModel.response_nonchunk_element);
|
|
173
|
+
} else {
|
|
174
|
+
result = getValueByPath(decodedBodyResponse, awsModel.response_chunk_element);
|
|
175
|
+
}
|
|
176
|
+
yield result;
|
|
143
177
|
}
|
|
144
178
|
}
|
|
145
179
|
|
|
@@ -1,98 +1,152 @@
|
|
|
1
|
-
//
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
//
|
|
6
|
-
//
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
const
|
|
14
|
-
const
|
|
15
|
-
const
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
//
|
|
21
|
-
//
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
//
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
}
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
1
|
+
// Clear terminal
|
|
2
|
+
console.clear();
|
|
3
|
+
|
|
4
|
+
// ================================================================================
|
|
5
|
+
// == AWS Bedrock Example: Invoke a Model with a Streamed or Unstreamed Response ==
|
|
6
|
+
// ================================================================================
|
|
7
|
+
|
|
8
|
+
// ---------------------------------------------------------------------
|
|
9
|
+
// -- import environment variables from .env file or define them here --
|
|
10
|
+
// ---------------------------------------------------------------------
|
|
11
|
+
import dotenv from 'dotenv';
|
|
12
|
+
dotenv.config();
|
|
13
|
+
const AWS_REGION = process.env.AWS_REGION;
|
|
14
|
+
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID;
|
|
15
|
+
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY;
|
|
16
|
+
const LLM_MAX_GEN_TOKENS = parseInt(process.env.LLM_MAX_GEN_TOKENS);
|
|
17
|
+
const LLM_TEMPERATURE = parseFloat(process.env.LLM_TEMPERATURE);
|
|
18
|
+
const LLM_TOP_P = parseFloat(process.env.LLM_TOP_P);
|
|
19
|
+
|
|
20
|
+
// --------------------------------------------
|
|
21
|
+
// -- import functions from bedrock-wrapper --
|
|
22
|
+
// -- - bedrockWrapper --
|
|
23
|
+
// -- - listBedrockWrapperSupportedModels --
|
|
24
|
+
// --------------------------------------------
|
|
25
|
+
import {
|
|
26
|
+
bedrockWrapper,
|
|
27
|
+
listBedrockWrapperSupportedModels
|
|
28
|
+
} from "./bedrock-wrapper.js";
|
|
29
|
+
|
|
30
|
+
// ----------------------------------------------
|
|
31
|
+
// -- Get and process supported models --
|
|
32
|
+
// ----------------------------------------------
|
|
33
|
+
const supportedModels = await listBedrockWrapperSupportedModels();
|
|
34
|
+
const availableModels = supportedModels.map(model => {
|
|
35
|
+
// Fix both modelName and modelId by adding quotes
|
|
36
|
+
const fixedJson = model
|
|
37
|
+
.replace(/modelName": ([^,]+),/, 'modelName": "$1",')
|
|
38
|
+
.replace(/modelId": ([^}]+)}/, 'modelId": "$1"}');
|
|
39
|
+
return JSON.parse(fixedJson).modelName;
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
// Display models with numbers
|
|
43
|
+
console.log('\nAvailable Models:');
|
|
44
|
+
availableModels.forEach((model, index) => {
|
|
45
|
+
console.log(`${index + 1}. ${model}`);
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
// Prompt user for input
|
|
49
|
+
import readline from 'readline';
|
|
50
|
+
const rl = readline.createInterface({
|
|
51
|
+
input: process.stdin,
|
|
52
|
+
output: process.stdout
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
// Get user selection
|
|
56
|
+
const selectedModel = await new Promise((resolve) => {
|
|
57
|
+
rl.question('\nEnter the number of the model you want to use: ', (answer) => {
|
|
58
|
+
const selection = parseInt(answer) - 1;
|
|
59
|
+
if (selection >= 0 && selection < availableModels.length) {
|
|
60
|
+
resolve(availableModels[selection]);
|
|
61
|
+
} else {
|
|
62
|
+
console.log('Invalid selection, defaulting to Claude-3-5-Sonnet');
|
|
63
|
+
resolve('Claude-3-5-Sonnet-v2');
|
|
64
|
+
}
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
// Ask for streaming preference
|
|
69
|
+
const shouldStream = await new Promise((resolve) => {
|
|
70
|
+
rl.question('\nDo you want streamed responses? (Y/n): ', (answer) => {
|
|
71
|
+
resolve(answer.toLowerCase() !== 'n');
|
|
72
|
+
});
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
console.log(`\nUsing model: ${selectedModel}`);
|
|
76
|
+
console.log(`Streaming: ${shouldStream ? 'enabled' : 'disabled'}\n`);
|
|
77
|
+
|
|
78
|
+
const defaultPrompt = "Describe what the openai api standard used by lots of serverless LLM api providers is and why it has been widely adopted.";
|
|
79
|
+
|
|
80
|
+
// Get user prompt
|
|
81
|
+
const userPrompt = await new Promise((resolve) => {
|
|
82
|
+
rl.question(`\nEnter your prompt (press Enter to use default):\n> `, (answer) => {
|
|
83
|
+
resolve(answer.trim() || defaultPrompt);
|
|
84
|
+
rl.close(); // Only close after all prompts are complete
|
|
85
|
+
});
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
// -----------------------------------------------
|
|
89
|
+
// -- example prompt in `messages` array format --
|
|
90
|
+
// -----------------------------------------------
|
|
91
|
+
const messages = [
|
|
92
|
+
{
|
|
93
|
+
role: "system",
|
|
94
|
+
content: "You are a helpful AI assistant that follows instructions extremely well. Answer the user questions accurately. Think step by step before answering the question.",
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
role: "user",
|
|
98
|
+
content: userPrompt,
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
role: "assistant",
|
|
102
|
+
content: "",
|
|
103
|
+
},
|
|
104
|
+
];
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
// ---------------------------------------------------
|
|
108
|
+
// -- create an object to hold your AWS credentials --
|
|
109
|
+
// ---------------------------------------------------
|
|
110
|
+
const awsCreds = {
|
|
111
|
+
region: AWS_REGION,
|
|
112
|
+
accessKeyId: AWS_ACCESS_KEY_ID,
|
|
113
|
+
secretAccessKey: AWS_SECRET_ACCESS_KEY,
|
|
114
|
+
};
|
|
115
|
+
// ----------------------------------------------------------------------
|
|
116
|
+
// -- create an object that copies your openai chat completions object --
|
|
117
|
+
// ----------------------------------------------------------------------
|
|
118
|
+
const openaiChatCompletionsCreateObject = {
|
|
119
|
+
"messages": messages,
|
|
120
|
+
"model": selectedModel,
|
|
121
|
+
"max_tokens": LLM_MAX_GEN_TOKENS,
|
|
122
|
+
"stream": shouldStream,
|
|
123
|
+
"temperature": LLM_TEMPERATURE,
|
|
124
|
+
"top_p": LLM_TOP_P,
|
|
125
|
+
};
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
// ------------------------------------------------------------
|
|
129
|
+
// -- invoke the streamed or unstreamed bedrock api response --
|
|
130
|
+
// ------------------------------------------------------------
|
|
131
|
+
// create a variable to hold the complete response
|
|
132
|
+
let completeResponse = "";
|
|
133
|
+
// streamed call
|
|
134
|
+
if (openaiChatCompletionsCreateObject.stream) {
|
|
135
|
+
for await (const chunk of bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging:true })) {
|
|
136
|
+
completeResponse += chunk;
|
|
137
|
+
// ---------------------------------------------------
|
|
138
|
+
// -- each chunk is streamed as it is received here --
|
|
139
|
+
// ---------------------------------------------------
|
|
140
|
+
process.stdout.write(chunk); // ⇠ do stuff with the streamed chunk
|
|
141
|
+
}
|
|
142
|
+
} else { // unstreamed call
|
|
143
|
+
const response = await bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging:true });
|
|
144
|
+
for await (const data of response) {
|
|
145
|
+
completeResponse += data;
|
|
146
|
+
}
|
|
147
|
+
// ----------------------------------------------------
|
|
148
|
+
// -- unstreamed complete response is available here --
|
|
149
|
+
// ----------------------------------------------------
|
|
150
|
+
console.log(`\n\completeResponse:\n${completeResponse}\n`); // ⇠ do stuff with the complete response
|
|
151
|
+
}
|
|
152
|
+
// console.log(`\n\completeResponse:\n${completeResponse}\n`); // ⇠ optional do stuff with the complete response returned from the API reguardless of stream or not
|
package/package.json
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "bedrock-wrapper",
|
|
3
|
-
"version": "
|
|
3
|
+
"version": "2.0.0",
|
|
4
4
|
"description": "🪨 Bedrock Wrapper is an npm package that simplifies the integration of existing OpenAI-compatible API objects with AWS Bedrock's serverless inference LLMs.",
|
|
5
5
|
"repository": {
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
6
|
+
"type": "git",
|
|
7
|
+
"url": "https://github.com/jparkerweb/bedrock-wrapper.git"
|
|
8
|
+
},
|
|
9
9
|
"main": "bedrock-wrapper.js",
|
|
10
10
|
"type": "module",
|
|
11
11
|
"keywords": [
|
|
@@ -21,7 +21,7 @@
|
|
|
21
21
|
"author": "",
|
|
22
22
|
"license": "ISC",
|
|
23
23
|
"dependencies": {
|
|
24
|
-
"@aws-sdk/client-bedrock-runtime": "^3.
|
|
24
|
+
"@aws-sdk/client-bedrock-runtime": "^3.682.0",
|
|
25
25
|
"dotenv": "^16.4.5"
|
|
26
26
|
}
|
|
27
27
|
}
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
Test Question: "Respond with exactly one word: What is 1+1?"
|
|
2
|
+
==================================================
|
|
3
|
+
|
|
4
|
+
Starting tests with 15 models...
|
|
5
|
+
Each model will be tested with streaming and non-streaming calls
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
--------------------------------------------------
|
|
9
|
+
Testing Claude-3-5-Sonnet-v2 ⇢
|
|
10
|
+
Streaming test passed for Claude-3-5-Sonnet-v2: "two"
|
|
11
|
+
Non-streaming test passed for Claude-3-5-Sonnet-v2: "two"
|
|
12
|
+
|
|
13
|
+
--------------------------------------------------
|
|
14
|
+
Testing Claude-3-5-Sonnet ⇢
|
|
15
|
+
Streaming test passed for Claude-3-5-Sonnet: "Two"
|
|
16
|
+
Non-streaming test passed for Claude-3-5-Sonnet: "Two"
|
|
17
|
+
|
|
18
|
+
--------------------------------------------------
|
|
19
|
+
Testing Claude-3-Haiku ⇢
|
|
20
|
+
Streaming test passed for Claude-3-Haiku: "Two."
|
|
21
|
+
Non-streaming test passed for Claude-3-Haiku: "Two."
|
|
22
|
+
|
|
23
|
+
--------------------------------------------------
|
|
24
|
+
Testing Llama-3-2-1b ⇢
|
|
25
|
+
Streaming test passed for Llama-3-2-1b: "Two"
|
|
26
|
+
Non-streaming test passed for Llama-3-2-1b: "Two"
|
|
27
|
+
|
|
28
|
+
--------------------------------------------------
|
|
29
|
+
Testing Llama-3-2-3b ⇢
|
|
30
|
+
Streaming test passed for Llama-3-2-3b: "2"
|
|
31
|
+
Non-streaming test passed for Llama-3-2-3b: "2"
|
|
32
|
+
|
|
33
|
+
--------------------------------------------------
|
|
34
|
+
Testing Llama-3-2-11b ⇢
|
|
35
|
+
Streaming test passed for Llama-3-2-11b: "Two."
|
|
36
|
+
Non-streaming test passed for Llama-3-2-11b: "Two."
|
|
37
|
+
|
|
38
|
+
--------------------------------------------------
|
|
39
|
+
Testing Llama-3-2-90b ⇢
|
|
40
|
+
Streaming test passed for Llama-3-2-90b: "Two."
|
|
41
|
+
Non-streaming test passed for Llama-3-2-90b: "Two."
|
|
42
|
+
|
|
43
|
+
--------------------------------------------------
|
|
44
|
+
Testing Llama-3-1-8b ⇢
|
|
45
|
+
Streaming test passed for Llama-3-1-8b: "Two."
|
|
46
|
+
Non-streaming test passed for Llama-3-1-8b: "Two."
|
|
47
|
+
|
|
48
|
+
--------------------------------------------------
|
|
49
|
+
Testing Llama-3-1-70b ⇢
|
|
50
|
+
Streaming test passed for Llama-3-1-70b: "Two."
|
|
51
|
+
Non-streaming test passed for Llama-3-1-70b: "Two."
|
|
52
|
+
|
|
53
|
+
--------------------------------------------------
|
|
54
|
+
Testing Llama-3-1-405b ⇢
|
|
55
|
+
Streaming test passed for Llama-3-1-405b: "Two"
|
|
56
|
+
Non-streaming test passed for Llama-3-1-405b: "Two."
|
|
57
|
+
|
|
58
|
+
--------------------------------------------------
|
|
59
|
+
Testing Llama-3-8b ⇢
|
|
60
|
+
Streaming test passed for Llama-3-8b: "Two"
|
|
61
|
+
Non-streaming test passed for Llama-3-8b: "Two"
|
|
62
|
+
|
|
63
|
+
--------------------------------------------------
|
|
64
|
+
Testing Llama-3-70b ⇢
|
|
65
|
+
Streaming test passed for Llama-3-70b: "Two"
|
|
66
|
+
Non-streaming test passed for Llama-3-70b: "Two"
|
|
67
|
+
|
|
68
|
+
--------------------------------------------------
|
|
69
|
+
Testing Mistral-7b ⇢
|
|
70
|
+
Streaming test passed for Mistral-7b: "Two. (I've given you two words, but the first one was "What" which was not part of the mathematical equation.)"
|
|
71
|
+
Non-streaming test passed for Mistral-7b: "Two. (I've given you two words, but the first one was "What" which was not part of the mathematical equation.)"
|
|
72
|
+
|
|
73
|
+
--------------------------------------------------
|
|
74
|
+
Testing Mixtral-8x7b ⇢
|
|
75
|
+
Streaming test passed for Mixtral-8x7b: "Two.
|
|
76
|
+
|
|
77
|
+
The word you are looking for is "two." The sum of 1 + 1 is equal to 2. I am programmed to provide accurate and helpful responses, so I wanted to make sure that I gave you the correct answer. If you have any other questions or need further clarification, please don't hesitate to ask. I'm here to help!"
|
|
78
|
+
Non-streaming test passed for Mixtral-8x7b: "Two.
|
|
79
|
+
|
|
80
|
+
The word you are looking for is "two." The sum of 1 + 1 is equal to 2. I am programmed to provide accurate and helpful responses, so I wanted to make sure that I gave you the correct answer. If you have any other questions or need further clarification, please don't hesitate to ask. I'm here to help!"
|
|
81
|
+
|
|
82
|
+
--------------------------------------------------
|
|
83
|
+
Testing Mistral-Large ⇢
|
|
84
|
+
Streaming test passed for Mistral-Large: "Two."
|
|
85
|
+
Non-streaming test passed for Mistral-Large: "Two."
|
package/test-models.js
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
// ================================================================================
|
|
2
|
+
// == AWS Bedrock Example: Invoke a Model with a Streamed or Unstreamed Response ==
|
|
3
|
+
// ================================================================================
|
|
4
|
+
|
|
5
|
+
// ---------------------------------------------------------------------
|
|
6
|
+
// -- import environment variables from .env file or define them here --
|
|
7
|
+
// ---------------------------------------------------------------------
|
|
8
|
+
import dotenv from 'dotenv';
|
|
9
|
+
import fs from 'fs/promises';
|
|
10
|
+
import chalk from 'chalk';
|
|
11
|
+
|
|
12
|
+
dotenv.config();
|
|
13
|
+
|
|
14
|
+
const AWS_REGION = process.env.AWS_REGION;
|
|
15
|
+
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID;
|
|
16
|
+
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY;
|
|
17
|
+
const LLM_MAX_GEN_TOKENS = parseInt(process.env.LLM_MAX_GEN_TOKENS);
|
|
18
|
+
const LLM_TEMPERATURE = parseFloat(process.env.LLM_TEMPERATURE);
|
|
19
|
+
const LLM_TOP_P = parseFloat(process.env.LLM_TOP_P);
|
|
20
|
+
|
|
21
|
+
// --------------------------------------------
|
|
22
|
+
// -- import functions from bedrock-wrapper --
|
|
23
|
+
// -- - bedrockWrapper --
|
|
24
|
+
// -- - listBedrockWrapperSupportedModels --
|
|
25
|
+
// --------------------------------------------
|
|
26
|
+
import {
|
|
27
|
+
bedrockWrapper,
|
|
28
|
+
listBedrockWrapperSupportedModels
|
|
29
|
+
} from "./bedrock-wrapper.js";
|
|
30
|
+
|
|
31
|
+
async function logOutput(message, type = 'info', writeToFile = true ) {
|
|
32
|
+
if (writeToFile) {
|
|
33
|
+
// Log to file
|
|
34
|
+
await fs.appendFile('test-models-output.txt', message + '\n');
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Log to console with colors
|
|
38
|
+
switch(type) {
|
|
39
|
+
case 'success':
|
|
40
|
+
console.log(chalk.green('✓ ' + message));
|
|
41
|
+
break;
|
|
42
|
+
case 'error':
|
|
43
|
+
console.log(chalk.red('✗ ' + message));
|
|
44
|
+
break;
|
|
45
|
+
case 'info':
|
|
46
|
+
console.log(chalk.blue('ℹ ' + message));
|
|
47
|
+
break;
|
|
48
|
+
case 'running':
|
|
49
|
+
console.log(chalk.yellow(message));
|
|
50
|
+
break;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
async function testModel(model, awsCreds, testMessage, isStreaming) {
|
|
55
|
+
const messages = [{ role: "user", content: testMessage }];
|
|
56
|
+
const openaiChatCompletionsCreateObject = {
|
|
57
|
+
messages,
|
|
58
|
+
model,
|
|
59
|
+
max_tokens: LLM_MAX_GEN_TOKENS,
|
|
60
|
+
stream: isStreaming,
|
|
61
|
+
temperature: LLM_TEMPERATURE,
|
|
62
|
+
top_p: LLM_TOP_P,
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
let completeResponse = "";
|
|
66
|
+
|
|
67
|
+
try {
|
|
68
|
+
if (isStreaming) {
|
|
69
|
+
for await (const chunk of bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging: false })) {
|
|
70
|
+
completeResponse += chunk;
|
|
71
|
+
}
|
|
72
|
+
} else {
|
|
73
|
+
const response = await bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging: false });
|
|
74
|
+
for await (const data of response) {
|
|
75
|
+
completeResponse += data;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Check if response is empty or undefined
|
|
80
|
+
if (!completeResponse || completeResponse.trim() === '' || completeResponse.trim() === 'undefined') {
|
|
81
|
+
throw new Error('Empty or invalid response received');
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
return { success: true, response: completeResponse.trim() };
|
|
85
|
+
} catch (error) {
|
|
86
|
+
return { success: false, error: error.message };
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
async function main() {
|
|
91
|
+
const testMessage = "Respond with exactly one word: What is 1+1?";
|
|
92
|
+
|
|
93
|
+
// Clear output file and add header
|
|
94
|
+
await fs.writeFile('test-models-output.txt',
|
|
95
|
+
`Test Question: "${testMessage}"\n` +
|
|
96
|
+
`=`.repeat(50) + '\n\n'
|
|
97
|
+
);
|
|
98
|
+
|
|
99
|
+
const supportedModels = await listBedrockWrapperSupportedModels();
|
|
100
|
+
const availableModels = supportedModels.map(model => {
|
|
101
|
+
const fixedJson = model
|
|
102
|
+
.replace(/modelName": ([^,]+),/, 'modelName": "$1",')
|
|
103
|
+
.replace(/modelId": ([^}]+)}/, 'modelId": "$1"}');
|
|
104
|
+
return JSON.parse(fixedJson).modelName;
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
console.clear();
|
|
108
|
+
await logOutput(`Starting tests with ${availableModels.length} models...`, 'info');
|
|
109
|
+
await logOutput(`Each model will be tested with streaming and non-streaming calls\n`, 'info');
|
|
110
|
+
|
|
111
|
+
const awsCreds = {
|
|
112
|
+
region: AWS_REGION,
|
|
113
|
+
accessKeyId: AWS_ACCESS_KEY_ID,
|
|
114
|
+
secretAccessKey: AWS_SECRET_ACCESS_KEY,
|
|
115
|
+
};
|
|
116
|
+
|
|
117
|
+
for (const model of availableModels) {
|
|
118
|
+
await logOutput(`\n${'-'.repeat(50)}\nTesting ${model} ⇢`, 'running');
|
|
119
|
+
|
|
120
|
+
// Test streaming
|
|
121
|
+
const streamResult = await testModel(model, awsCreds, testMessage, true);
|
|
122
|
+
if (streamResult.success) {
|
|
123
|
+
await logOutput(`Streaming test passed for ${model}: "${streamResult.response}"`, 'success');
|
|
124
|
+
} else {
|
|
125
|
+
await logOutput(`Streaming test failed for ${model}: ${streamResult.error}`, 'error');
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Test non-streaming
|
|
129
|
+
const nonStreamResult = await testModel(model, awsCreds, testMessage, false);
|
|
130
|
+
if (nonStreamResult.success) {
|
|
131
|
+
await logOutput(`Non-streaming test passed for ${model}: "${nonStreamResult.response}"`, 'success');
|
|
132
|
+
} else {
|
|
133
|
+
await logOutput(`Non-streaming test failed for ${model}: ${nonStreamResult.error}`, 'error');
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
console.log(''); // Add blank line between models
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
await logOutput('Testing complete! Check test-models-output.txt for full test results.', 'info', false);
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
main().catch(async (error) => {
|
|
143
|
+
await logOutput(`Fatal Error: ${error.message}`, 'error');
|
|
144
|
+
console.error(error);
|
|
145
|
+
});
|