bedrock-wrapper 1.2.0 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +15 -1
- package/bedrock-models.js +100 -0
- package/package.json +2 -2
package/README.md
CHANGED
|
@@ -96,6 +96,10 @@ Bedrock Wrapper is an npm package that simplifies the integration of existing Op
|
|
|
96
96
|
|
|
97
97
|
| modelName | modelId |
|
|
98
98
|
|----------------|------------------------------------|
|
|
99
|
+
| Llama-3-2-1b | meta.llama3-2-1b-instruct-v1:0 |
|
|
100
|
+
| Llama-3-2-3b | meta.llama3-2-3b-instruct-v1:0 |
|
|
101
|
+
| Llama-3-2-11b | meta.llama3-2-11b-instruct-v1:0 |
|
|
102
|
+
| Llama-3-2-90b | meta.llama3-2-90b-instruct-v1:0 |
|
|
99
103
|
| Llama-3-1-8b | meta.llama3-1-8b-instruct-v1:0 |
|
|
100
104
|
| Llama-3-1-70b | meta.llama3-1-70b-instruct-v1:0 |
|
|
101
105
|
| Llama-3-1-405b | meta.llama3-1-405b-instruct-v1:0 |
|
|
@@ -120,4 +124,14 @@ Please modify the `bedrock_models.js` file and submit a PR 🏆 or create an Iss
|
|
|
120
124
|
|
|
121
125
|
In case you missed it at the beginning of this doc, for an even easier setup, use the 🔀 [Bedrock Proxy Endpoint](https://github.com/jparkerweb/bedrock-proxy-endpoint) project to spin up your own custom OpenAI server endpoint (using the standard `baseUrl`, and `apiKey` params).
|
|
122
126
|
|
|
123
|
-

|
|
127
|
+

|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
### 📚 References
|
|
132
|
+
|
|
133
|
+
- [AWS Meta Llama Models User Guide](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html)
|
|
134
|
+
- [AWS Mistral Models User Guide](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-mistral.html)
|
|
135
|
+
- [OpenAI API](https://platform.openai.com/docs/api-reference/chat/create)
|
|
136
|
+
- [AWS Bedrock](https://aws.amazon.com/bedrock/)
|
|
137
|
+
- [AWS SDK for JavaScript](https://aws.amazon.com/sdk-for-javascript/)
|
package/bedrock-models.js
CHANGED
|
@@ -1,5 +1,105 @@
|
|
|
1
1
|
// Description: This file contains the model configurations
|
|
2
2
|
export const bedrock_models = [
|
|
3
|
+
{
|
|
4
|
+
// ==================
|
|
5
|
+
// == Llama 3.2 1b ==
|
|
6
|
+
// ==================
|
|
7
|
+
"modelName": "Llama-3-2-1b",
|
|
8
|
+
"modelId": "meta.llama3-2-1b-instruct-v1:0",
|
|
9
|
+
"bos_text": "<|begin_of_text|>",
|
|
10
|
+
"role_system_message_prefix": "",
|
|
11
|
+
"role_system_message_suffix": "",
|
|
12
|
+
"role_system_prefix": "<|start_header_id|>",
|
|
13
|
+
"role_system_suffix": "<|end_header_id|>",
|
|
14
|
+
"role_user_message_prefix": "",
|
|
15
|
+
"role_user_message_suffix": "",
|
|
16
|
+
"role_user_prefix": "<|start_header_id|>",
|
|
17
|
+
"role_user_suffix": "<|end_header_id|>",
|
|
18
|
+
"role_assistant_message_prefix": "",
|
|
19
|
+
"role_assistant_message_suffix": "",
|
|
20
|
+
"role_assistant_prefix": "<|start_header_id|>",
|
|
21
|
+
"role_assistant_suffix": "<|end_header_id|>",
|
|
22
|
+
"eom_text": "<|eot_id|>",
|
|
23
|
+
"display_role_names": true,
|
|
24
|
+
"max_tokens_param_name": "max_gen_len",
|
|
25
|
+
"max_supported_response_tokens": 2048,
|
|
26
|
+
"response_chunk_element": "generation",
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
// ==================
|
|
30
|
+
// == Llama 3.2 3b ==
|
|
31
|
+
// ==================
|
|
32
|
+
"modelName": "Llama-3-2-3b",
|
|
33
|
+
"modelId": "meta.llama3-2-3b-instruct-v1:0",
|
|
34
|
+
"bos_text": "<|begin_of_text|>",
|
|
35
|
+
"role_system_message_prefix": "",
|
|
36
|
+
"role_system_message_suffix": "",
|
|
37
|
+
"role_system_prefix": "<|start_header_id|>",
|
|
38
|
+
"role_system_suffix": "<|end_header_id|>",
|
|
39
|
+
"role_user_message_prefix": "",
|
|
40
|
+
"role_user_message_suffix": "",
|
|
41
|
+
"role_user_prefix": "<|start_header_id|>",
|
|
42
|
+
"role_user_suffix": "<|end_header_id|>",
|
|
43
|
+
"role_assistant_message_prefix": "",
|
|
44
|
+
"role_assistant_message_suffix": "",
|
|
45
|
+
"role_assistant_prefix": "<|start_header_id|>",
|
|
46
|
+
"role_assistant_suffix": "<|end_header_id|>",
|
|
47
|
+
"eom_text": "<|eot_id|>",
|
|
48
|
+
"display_role_names": true,
|
|
49
|
+
"max_tokens_param_name": "max_gen_len",
|
|
50
|
+
"max_supported_response_tokens": 2048,
|
|
51
|
+
"response_chunk_element": "generation",
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
// ===================
|
|
55
|
+
// == Llama 3.2 11b ==
|
|
56
|
+
// ===================
|
|
57
|
+
"modelName": "Llama-3-2-11b",
|
|
58
|
+
"modelId": "meta.llama3-2-11b-instruct-v1:0",
|
|
59
|
+
"bos_text": "<|begin_of_text|>",
|
|
60
|
+
"role_system_message_prefix": "",
|
|
61
|
+
"role_system_message_suffix": "",
|
|
62
|
+
"role_system_prefix": "<|start_header_id|>",
|
|
63
|
+
"role_system_suffix": "<|end_header_id|>",
|
|
64
|
+
"role_user_message_prefix": "",
|
|
65
|
+
"role_user_message_suffix": "",
|
|
66
|
+
"role_user_prefix": "<|start_header_id|>",
|
|
67
|
+
"role_user_suffix": "<|end_header_id|>",
|
|
68
|
+
"role_assistant_message_prefix": "",
|
|
69
|
+
"role_assistant_message_suffix": "",
|
|
70
|
+
"role_assistant_prefix": "<|start_header_id|>",
|
|
71
|
+
"role_assistant_suffix": "<|end_header_id|>",
|
|
72
|
+
"eom_text": "<|eot_id|>",
|
|
73
|
+
"display_role_names": true,
|
|
74
|
+
"max_tokens_param_name": "max_gen_len",
|
|
75
|
+
"max_supported_response_tokens": 2048,
|
|
76
|
+
"response_chunk_element": "generation",
|
|
77
|
+
},
|
|
78
|
+
{
|
|
79
|
+
// ===================
|
|
80
|
+
// == Llama 3.2 90b ==
|
|
81
|
+
// ===================
|
|
82
|
+
"modelName": "Llama-3-2-90b",
|
|
83
|
+
"modelId": "meta.llama3-2-90b-instruct-v1:0",
|
|
84
|
+
"bos_text": "<|begin_of_text|>",
|
|
85
|
+
"role_system_message_prefix": "",
|
|
86
|
+
"role_system_message_suffix": "",
|
|
87
|
+
"role_system_prefix": "<|start_header_id|>",
|
|
88
|
+
"role_system_suffix": "<|end_header_id|>",
|
|
89
|
+
"role_user_message_prefix": "",
|
|
90
|
+
"role_user_message_suffix": "",
|
|
91
|
+
"role_user_prefix": "<|start_header_id|>",
|
|
92
|
+
"role_user_suffix": "<|end_header_id|>",
|
|
93
|
+
"role_assistant_message_prefix": "",
|
|
94
|
+
"role_assistant_message_suffix": "",
|
|
95
|
+
"role_assistant_prefix": "<|start_header_id|>",
|
|
96
|
+
"role_assistant_suffix": "<|end_header_id|>",
|
|
97
|
+
"eom_text": "<|eot_id|>",
|
|
98
|
+
"display_role_names": true,
|
|
99
|
+
"max_tokens_param_name": "max_gen_len",
|
|
100
|
+
"max_supported_response_tokens": 2048,
|
|
101
|
+
"response_chunk_element": "generation",
|
|
102
|
+
},
|
|
3
103
|
{
|
|
4
104
|
// ==================
|
|
5
105
|
// == Llama 3.1 8b ==
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "bedrock-wrapper",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.3.0",
|
|
4
4
|
"description": "🪨 Bedrock Wrapper is an npm package that simplifies the integration of existing OpenAI-compatible API objects with AWS Bedrock's serverless inference LLMs.",
|
|
5
5
|
"repository": {
|
|
6
6
|
"type": "git",
|
|
@@ -21,7 +21,7 @@
|
|
|
21
21
|
"author": "",
|
|
22
22
|
"license": "ISC",
|
|
23
23
|
"dependencies": {
|
|
24
|
-
"@aws-sdk/client-bedrock-runtime": "^3.
|
|
24
|
+
"@aws-sdk/client-bedrock-runtime": "^3.658.1",
|
|
25
25
|
"dotenv": "^16.4.5"
|
|
26
26
|
}
|
|
27
27
|
}
|