bedrock-wrapper 1.1.0 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -9
- package/bedrock-models.js +25 -0
- package/bedrock-wrapper.js +156 -156
- package/{example.js → example-test.js} +2 -2
- package/package.json +2 -2
package/README.md
CHANGED
|
@@ -94,15 +94,16 @@ Bedrock Wrapper is an npm package that simplifies the integration of existing Op
|
|
|
94
94
|
|
|
95
95
|
### Supported Models
|
|
96
96
|
|
|
97
|
-
| modelName
|
|
98
|
-
|
|
99
|
-
| Llama-3-1-8b
|
|
100
|
-
| Llama-3-1-70b
|
|
101
|
-
| Llama-3-
|
|
102
|
-
| Llama-3-
|
|
103
|
-
|
|
|
104
|
-
|
|
|
105
|
-
|
|
|
97
|
+
| modelName | modelId |
|
|
98
|
+
|----------------|------------------------------------|
|
|
99
|
+
| Llama-3-1-8b | meta.llama3-1-8b-instruct-v1:0 |
|
|
100
|
+
| Llama-3-1-70b | meta.llama3-1-70b-instruct-v1:0 |
|
|
101
|
+
| Llama-3-1-405b | meta.llama3-1-405b-instruct-v1:0 |
|
|
102
|
+
| Llama-3-8b | meta.llama3-8b-instruct-v1:0 |
|
|
103
|
+
| Llama-3-70b | meta.llama3-70b-instruct-v1:0 |
|
|
104
|
+
| Mistral-7b | mistral.mistral-7b-instruct-v0:2 |
|
|
105
|
+
| Mixtral-8x7b | mistral.mixtral-8x7b-instruct-v0:1 |
|
|
106
|
+
| Mistral-Large | mistral.mistral-large-2402-v1:0 |
|
|
106
107
|
|
|
107
108
|
To return the list progrmatically you can import and call `listBedrockWrapperSupportedModels`:
|
|
108
109
|
```javascript
|
package/bedrock-models.js
CHANGED
|
@@ -50,6 +50,31 @@ export const bedrock_models = [
|
|
|
50
50
|
"max_supported_response_tokens": 2048,
|
|
51
51
|
"response_chunk_element": "generation",
|
|
52
52
|
},
|
|
53
|
+
{
|
|
54
|
+
// ====================
|
|
55
|
+
// == Llama 3.1 405b ==
|
|
56
|
+
// ====================
|
|
57
|
+
"modelName": "Llama-3-1-405b",
|
|
58
|
+
"modelId": "meta.llama3-1-405b-instruct-v1:0",
|
|
59
|
+
"bos_text": "<|begin_of_text|>",
|
|
60
|
+
"role_system_message_prefix": "",
|
|
61
|
+
"role_system_message_suffix": "",
|
|
62
|
+
"role_system_prefix": "<|start_header_id|>",
|
|
63
|
+
"role_system_suffix": "<|end_header_id|>",
|
|
64
|
+
"role_user_message_prefix": "",
|
|
65
|
+
"role_user_message_suffix": "",
|
|
66
|
+
"role_user_prefix": "<|start_header_id|>",
|
|
67
|
+
"role_user_suffix": "<|end_header_id|>",
|
|
68
|
+
"role_assistant_message_prefix": "",
|
|
69
|
+
"role_assistant_message_suffix": "",
|
|
70
|
+
"role_assistant_prefix": "<|start_header_id|>",
|
|
71
|
+
"role_assistant_suffix": "<|end_header_id|>",
|
|
72
|
+
"eom_text": "<|eot_id|>",
|
|
73
|
+
"display_role_names": true,
|
|
74
|
+
"max_tokens_param_name": "max_gen_len",
|
|
75
|
+
"max_supported_response_tokens": 2048,
|
|
76
|
+
"response_chunk_element": "generation",
|
|
77
|
+
},
|
|
53
78
|
{
|
|
54
79
|
// ================
|
|
55
80
|
// == Llama 3 8b ==
|
package/bedrock-wrapper.js
CHANGED
|
@@ -1,156 +1,156 @@
|
|
|
1
|
-
// ======================================================================
|
|
2
|
-
// == 🪨 Bedrock Wrapper ==
|
|
3
|
-
// == ==
|
|
4
|
-
// == Bedrock Wrapper is an npm package that simplifies the integration ==
|
|
5
|
-
// == of existing OpenAI-compatible API objects AWS Bedrock's ==
|
|
6
|
-
// == serverless inference LLMs. ==
|
|
7
|
-
// ======================================================================
|
|
8
|
-
|
|
9
|
-
// -------------
|
|
10
|
-
// -- imports --
|
|
11
|
-
// -------------
|
|
12
|
-
// Bedrock model configurations
|
|
13
|
-
import { bedrock_models } from "./bedrock-models.js";
|
|
14
|
-
// AWS SDK
|
|
15
|
-
import {
|
|
16
|
-
BedrockRuntimeClient,
|
|
17
|
-
InvokeModelCommand, InvokeModelWithResponseStreamCommand,
|
|
18
|
-
} from "@aws-sdk/client-bedrock-runtime";
|
|
19
|
-
// helper functions
|
|
20
|
-
import {
|
|
21
|
-
getValueByPath,
|
|
22
|
-
writeAsciiArt
|
|
23
|
-
} from "./utils.js";
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
// write the ascii art logo on initial load
|
|
27
|
-
writeAsciiArt();
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
// -------------------
|
|
31
|
-
// -- main function --
|
|
32
|
-
// -------------------
|
|
33
|
-
export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging = false } = {} ) {
|
|
34
|
-
const { region, accessKeyId, secretAccessKey } = awsCreds;
|
|
35
|
-
const { messages, model, max_tokens, stream, temperature, top_p } = openaiChatCompletionsCreateObject;
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
// retrieve the model configuration
|
|
39
|
-
const awsModel = bedrock_models.find((x) => (x.modelName.toLowerCase() === model.toLowerCase() || x.modelId.toLowerCase() === model.toLowerCase()));
|
|
40
|
-
if (!awsModel) { throw new Error(`Model configuration not found for model: ${model}`); }
|
|
41
|
-
|
|
42
|
-
// cleanup message content before formatting prompt message
|
|
43
|
-
let message_cleaned = [];
|
|
44
|
-
for (let i = 0; i < messages.length; i++) {
|
|
45
|
-
if (messages[i].content !== "") {
|
|
46
|
-
message_cleaned.push(messages[i]);
|
|
47
|
-
} else if (awsModel.display_role_names) {
|
|
48
|
-
message_cleaned.push(messages[i]);
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
if (i === (messages.length - 1) && messages[i].content !== "" && awsModel.display_role_names) {
|
|
52
|
-
message_cleaned.push({role: "assistant", content: ""});
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
// format prompt message from message array
|
|
57
|
-
let prompt = awsModel.bos_text;
|
|
58
|
-
let eom_text_inserted = false;
|
|
59
|
-
for (let i = 0; i < message_cleaned.length; i++) {
|
|
60
|
-
prompt += "\n";
|
|
61
|
-
if (message_cleaned[i].role === "system") {
|
|
62
|
-
prompt += awsModel.role_system_message_prefix;
|
|
63
|
-
prompt += awsModel.role_system_prefix;
|
|
64
|
-
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
65
|
-
prompt += awsModel.role_system_suffix;
|
|
66
|
-
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
67
|
-
prompt += message_cleaned[i].content;
|
|
68
|
-
prompt += awsModel.role_system_message_suffix;
|
|
69
|
-
} else if (message_cleaned[i].role === "user") {
|
|
70
|
-
prompt += awsModel.role_user_message_prefix;
|
|
71
|
-
prompt += awsModel.role_user_prefix;
|
|
72
|
-
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
73
|
-
prompt += awsModel.role_user_suffix;
|
|
74
|
-
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
75
|
-
prompt += message_cleaned[i].content;
|
|
76
|
-
prompt += awsModel.role_user_message_suffix;
|
|
77
|
-
} else if (message_cleaned[i].role === "assistant") {
|
|
78
|
-
prompt += awsModel.role_assistant_message_prefix;
|
|
79
|
-
prompt += awsModel.role_assistant_prefix;
|
|
80
|
-
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
81
|
-
prompt += awsModel.role_assistant_suffix;
|
|
82
|
-
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
83
|
-
prompt += message_cleaned[i].content;
|
|
84
|
-
prompt += awsModel.role_assistant_message_suffix;
|
|
85
|
-
}
|
|
86
|
-
if (message_cleaned[i+1] && message_cleaned[i+1].content === "") {
|
|
87
|
-
prompt += `\n${awsModel.eom_text}`;
|
|
88
|
-
eom_text_inserted = true;
|
|
89
|
-
} else if ((i+1) === (message_cleaned.length - 1) && !eom_text_inserted) {
|
|
90
|
-
prompt += `\n${awsModel.eom_text}`;
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
// logging
|
|
95
|
-
if (logging) {
|
|
96
|
-
console.log(`\nPrompt: ${prompt}\n`);
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
const max_gen_tokens = max_tokens <= awsModel.max_supported_response_tokens ? max_tokens : awsModel.max_supported_response_tokens;
|
|
100
|
-
|
|
101
|
-
// Format the request payload using the model's native structure.
|
|
102
|
-
const request = {
|
|
103
|
-
prompt,
|
|
104
|
-
// Optional inference parameters:
|
|
105
|
-
[awsModel.max_tokens_param_name]: max_gen_tokens,
|
|
106
|
-
temperature: temperature,
|
|
107
|
-
top_p: top_p,
|
|
108
|
-
};
|
|
109
|
-
|
|
110
|
-
// Create a Bedrock Runtime client in the AWS Region of your choice
|
|
111
|
-
const client = new BedrockRuntimeClient({
|
|
112
|
-
region: region,
|
|
113
|
-
credentials: {
|
|
114
|
-
accessKeyId: accessKeyId,
|
|
115
|
-
secretAccessKey: secretAccessKey,
|
|
116
|
-
},
|
|
117
|
-
});
|
|
118
|
-
|
|
119
|
-
if (stream) {
|
|
120
|
-
const responseStream = await client.send(
|
|
121
|
-
new InvokeModelWithResponseStreamCommand({
|
|
122
|
-
contentType: "application/json",
|
|
123
|
-
body: JSON.stringify(request),
|
|
124
|
-
modelId: awsModel.modelId,
|
|
125
|
-
}),
|
|
126
|
-
);
|
|
127
|
-
for await (const event of responseStream.body) {
|
|
128
|
-
const chunk = JSON.parse(new TextDecoder().decode(event.chunk.bytes));
|
|
129
|
-
let result = getValueByPath(chunk, awsModel.response_chunk_element);
|
|
130
|
-
if (result) {
|
|
131
|
-
yield result;
|
|
132
|
-
}
|
|
133
|
-
}
|
|
134
|
-
} else {
|
|
135
|
-
const apiResponse = await client.send(
|
|
136
|
-
new InvokeModelCommand({
|
|
137
|
-
contentType: "application/json",
|
|
138
|
-
body: JSON.stringify(request),
|
|
139
|
-
modelId: awsModel.modelId,
|
|
140
|
-
}),
|
|
141
|
-
);
|
|
142
|
-
yield apiResponse;
|
|
143
|
-
}
|
|
144
|
-
}
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
// ---------------------------
|
|
148
|
-
// -- list supported models --
|
|
149
|
-
// ---------------------------
|
|
150
|
-
export async function listBedrockWrapperSupportedModels() {
|
|
151
|
-
let supported_models = [];
|
|
152
|
-
for (let i = 0; i < bedrock_models.length; i++) {
|
|
153
|
-
supported_models.push(`{"modelName": ${bedrock_models[i].modelName}, "modelId": ${bedrock_models[i].modelId}}`);
|
|
154
|
-
}
|
|
155
|
-
return supported_models;
|
|
156
|
-
}
|
|
1
|
+
// ======================================================================
|
|
2
|
+
// == 🪨 Bedrock Wrapper ==
|
|
3
|
+
// == ==
|
|
4
|
+
// == Bedrock Wrapper is an npm package that simplifies the integration ==
|
|
5
|
+
// == of existing OpenAI-compatible API objects AWS Bedrock's ==
|
|
6
|
+
// == serverless inference LLMs. ==
|
|
7
|
+
// ======================================================================
|
|
8
|
+
|
|
9
|
+
// -------------
|
|
10
|
+
// -- imports --
|
|
11
|
+
// -------------
|
|
12
|
+
// Bedrock model configurations
|
|
13
|
+
import { bedrock_models } from "./bedrock-models.js";
|
|
14
|
+
// AWS SDK
|
|
15
|
+
import {
|
|
16
|
+
BedrockRuntimeClient,
|
|
17
|
+
InvokeModelCommand, InvokeModelWithResponseStreamCommand,
|
|
18
|
+
} from "@aws-sdk/client-bedrock-runtime";
|
|
19
|
+
// helper functions
|
|
20
|
+
import {
|
|
21
|
+
getValueByPath,
|
|
22
|
+
writeAsciiArt
|
|
23
|
+
} from "./utils.js";
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
// write the ascii art logo on initial load
|
|
27
|
+
writeAsciiArt();
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
// -------------------
|
|
31
|
+
// -- main function --
|
|
32
|
+
// -------------------
|
|
33
|
+
export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObject, { logging = false } = {} ) {
|
|
34
|
+
const { region, accessKeyId, secretAccessKey } = awsCreds;
|
|
35
|
+
const { messages, model, max_tokens, stream, temperature, top_p } = openaiChatCompletionsCreateObject;
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
// retrieve the model configuration
|
|
39
|
+
const awsModel = bedrock_models.find((x) => (x.modelName.toLowerCase() === model.toLowerCase() || x.modelId.toLowerCase() === model.toLowerCase()));
|
|
40
|
+
if (!awsModel) { throw new Error(`Model configuration not found for model: ${model}`); }
|
|
41
|
+
|
|
42
|
+
// cleanup message content before formatting prompt message
|
|
43
|
+
let message_cleaned = [];
|
|
44
|
+
for (let i = 0; i < messages.length; i++) {
|
|
45
|
+
if (messages[i].content !== "") {
|
|
46
|
+
message_cleaned.push(messages[i]);
|
|
47
|
+
} else if (awsModel.display_role_names) {
|
|
48
|
+
message_cleaned.push(messages[i]);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
if (i === (messages.length - 1) && messages[i].content !== "" && awsModel.display_role_names) {
|
|
52
|
+
message_cleaned.push({role: "assistant", content: ""});
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// format prompt message from message array
|
|
57
|
+
let prompt = awsModel.bos_text;
|
|
58
|
+
let eom_text_inserted = false;
|
|
59
|
+
for (let i = 0; i < message_cleaned.length; i++) {
|
|
60
|
+
prompt += "\n";
|
|
61
|
+
if (message_cleaned[i].role === "system") {
|
|
62
|
+
prompt += awsModel.role_system_message_prefix;
|
|
63
|
+
prompt += awsModel.role_system_prefix;
|
|
64
|
+
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
65
|
+
prompt += awsModel.role_system_suffix;
|
|
66
|
+
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
67
|
+
prompt += message_cleaned[i].content;
|
|
68
|
+
prompt += awsModel.role_system_message_suffix;
|
|
69
|
+
} else if (message_cleaned[i].role === "user") {
|
|
70
|
+
prompt += awsModel.role_user_message_prefix;
|
|
71
|
+
prompt += awsModel.role_user_prefix;
|
|
72
|
+
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
73
|
+
prompt += awsModel.role_user_suffix;
|
|
74
|
+
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
75
|
+
prompt += message_cleaned[i].content;
|
|
76
|
+
prompt += awsModel.role_user_message_suffix;
|
|
77
|
+
} else if (message_cleaned[i].role === "assistant") {
|
|
78
|
+
prompt += awsModel.role_assistant_message_prefix;
|
|
79
|
+
prompt += awsModel.role_assistant_prefix;
|
|
80
|
+
if (awsModel.display_role_names) { prompt += message_cleaned[i].role; }
|
|
81
|
+
prompt += awsModel.role_assistant_suffix;
|
|
82
|
+
if (awsModel.display_role_names) {prompt += "\n"; }
|
|
83
|
+
prompt += message_cleaned[i].content;
|
|
84
|
+
prompt += awsModel.role_assistant_message_suffix;
|
|
85
|
+
}
|
|
86
|
+
if (message_cleaned[i+1] && message_cleaned[i+1].content === "") {
|
|
87
|
+
prompt += `\n${awsModel.eom_text}`;
|
|
88
|
+
eom_text_inserted = true;
|
|
89
|
+
} else if ((i+1) === (message_cleaned.length - 1) && !eom_text_inserted) {
|
|
90
|
+
prompt += `\n${awsModel.eom_text}`;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// logging
|
|
95
|
+
if (logging) {
|
|
96
|
+
console.log(`\nPrompt: ${prompt}\n`);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const max_gen_tokens = max_tokens <= awsModel.max_supported_response_tokens ? max_tokens : awsModel.max_supported_response_tokens;
|
|
100
|
+
|
|
101
|
+
// Format the request payload using the model's native structure.
|
|
102
|
+
const request = {
|
|
103
|
+
prompt,
|
|
104
|
+
// Optional inference parameters:
|
|
105
|
+
[awsModel.max_tokens_param_name]: max_gen_tokens,
|
|
106
|
+
temperature: temperature,
|
|
107
|
+
top_p: top_p,
|
|
108
|
+
};
|
|
109
|
+
|
|
110
|
+
// Create a Bedrock Runtime client in the AWS Region of your choice
|
|
111
|
+
const client = new BedrockRuntimeClient({
|
|
112
|
+
region: region,
|
|
113
|
+
credentials: {
|
|
114
|
+
accessKeyId: accessKeyId,
|
|
115
|
+
secretAccessKey: secretAccessKey,
|
|
116
|
+
},
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
if (stream) {
|
|
120
|
+
const responseStream = await client.send(
|
|
121
|
+
new InvokeModelWithResponseStreamCommand({
|
|
122
|
+
contentType: "application/json",
|
|
123
|
+
body: JSON.stringify(request),
|
|
124
|
+
modelId: awsModel.modelId,
|
|
125
|
+
}),
|
|
126
|
+
);
|
|
127
|
+
for await (const event of responseStream.body) {
|
|
128
|
+
const chunk = JSON.parse(new TextDecoder().decode(event.chunk.bytes));
|
|
129
|
+
let result = getValueByPath(chunk, awsModel.response_chunk_element);
|
|
130
|
+
if (result) {
|
|
131
|
+
yield result;
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
} else {
|
|
135
|
+
const apiResponse = await client.send(
|
|
136
|
+
new InvokeModelCommand({
|
|
137
|
+
contentType: "application/json",
|
|
138
|
+
body: JSON.stringify(request),
|
|
139
|
+
modelId: awsModel.modelId,
|
|
140
|
+
}),
|
|
141
|
+
);
|
|
142
|
+
yield apiResponse;
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
// ---------------------------
|
|
148
|
+
// -- list supported models --
|
|
149
|
+
// ---------------------------
|
|
150
|
+
export async function listBedrockWrapperSupportedModels() {
|
|
151
|
+
let supported_models = [];
|
|
152
|
+
for (let i = 0; i < bedrock_models.length; i++) {
|
|
153
|
+
supported_models.push(`{"modelName": ${bedrock_models[i].modelName}, "modelId": ${bedrock_models[i].modelId}}`);
|
|
154
|
+
}
|
|
155
|
+
return supported_models;
|
|
156
|
+
}
|
|
@@ -22,7 +22,7 @@ const LLM_TOP_P = parseFloat(process.env.LLM_TOP_P);
|
|
|
22
22
|
import {
|
|
23
23
|
bedrockWrapper,
|
|
24
24
|
listBedrockWrapperSupportedModels
|
|
25
|
-
} from "bedrock-wrapper";
|
|
25
|
+
} from "./bedrock-wrapper.js";
|
|
26
26
|
|
|
27
27
|
// ----------------------------------------------
|
|
28
28
|
// -- example call to list of supported models --
|
|
@@ -61,7 +61,7 @@ const awsCreds = {
|
|
|
61
61
|
// ----------------------------------------------------------------------
|
|
62
62
|
const openaiChatCompletionsCreateObject = {
|
|
63
63
|
"messages": messages,
|
|
64
|
-
"model": "Llama-3-1-
|
|
64
|
+
"model": "Llama-3-1-405b",
|
|
65
65
|
"max_tokens": LLM_MAX_GEN_TOKENS,
|
|
66
66
|
"stream": true,
|
|
67
67
|
"temperature": LLM_TEMPERATURE,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "bedrock-wrapper",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.2.0",
|
|
4
4
|
"description": "🪨 Bedrock Wrapper is an npm package that simplifies the integration of existing OpenAI-compatible API objects with AWS Bedrock's serverless inference LLMs.",
|
|
5
5
|
"repository": {
|
|
6
6
|
"type": "git",
|
|
@@ -21,7 +21,7 @@
|
|
|
21
21
|
"author": "",
|
|
22
22
|
"license": "ISC",
|
|
23
23
|
"dependencies": {
|
|
24
|
-
"@aws-sdk/client-bedrock-runtime": "^3.
|
|
24
|
+
"@aws-sdk/client-bedrock-runtime": "^3.623.0",
|
|
25
25
|
"dotenv": "^16.4.5"
|
|
26
26
|
}
|
|
27
27
|
}
|