bedrock-wrapper 1.0.14 → 1.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bedrock-models.js CHANGED
@@ -22,6 +22,7 @@ export const bedrock_models = [
22
22
  "eom_text": "<|eot_id|>",
23
23
  "display_role_names": true,
24
24
  "max_tokens_param_name": "max_gen_len",
25
+ "max_supported_response_tokens": 2048,
25
26
  "response_chunk_element": "generation",
26
27
  },
27
28
  {
@@ -46,6 +47,7 @@ export const bedrock_models = [
46
47
  "eom_text": "<|eot_id|>",
47
48
  "display_role_names": true,
48
49
  "max_tokens_param_name": "max_gen_len",
50
+ "max_supported_response_tokens": 2048,
49
51
  "response_chunk_element": "generation",
50
52
  },
51
53
  {
@@ -70,6 +72,7 @@ export const bedrock_models = [
70
72
  "eom_text": "</s>",
71
73
  "display_role_names": false,
72
74
  "max_tokens_param_name": "max_tokens",
75
+ "max_supported_response_tokens": 8192,
73
76
  "response_chunk_element": "outputs[0].text",
74
77
  },
75
78
  {
@@ -94,6 +97,7 @@ export const bedrock_models = [
94
97
  "eom_text": "</s>",
95
98
  "display_role_names": false,
96
99
  "max_tokens_param_name": "max_tokens",
100
+ "max_supported_response_tokens": 4096,
97
101
  "response_chunk_element": "outputs[0].text",
98
102
  },
99
103
  {
@@ -118,6 +122,7 @@ export const bedrock_models = [
118
122
  "eom_text": "</s>",
119
123
  "display_role_names": false,
120
124
  "max_tokens_param_name": "max_tokens",
125
+ "max_supported_response_tokens": 8192,
121
126
  "response_chunk_element": "outputs[0].text",
122
127
  },
123
128
  ];
@@ -96,11 +96,13 @@ export async function* bedrockWrapper(awsCreds, openaiChatCompletionsCreateObjec
96
96
  console.log(`\nPrompt: ${prompt}\n`);
97
97
  }
98
98
 
99
+ const max_gen_tokens = max_tokens <= awsModel.max_supported_response_tokens ? max_tokens : awsModel.max_supported_response_tokens;
100
+
99
101
  // Format the request payload using the model's native structure.
100
102
  const request = {
101
103
  prompt,
102
104
  // Optional inference parameters:
103
- [awsModel.max_tokens_param_name]: max_tokens,
105
+ [awsModel.max_tokens_param_name]: max_gen_tokens,
104
106
  temperature: temperature,
105
107
  top_p: top_p,
106
108
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bedrock-wrapper",
3
- "version": "1.0.14",
3
+ "version": "1.0.15",
4
4
  "description": "🪨 Bedrock Wrapper is an npm package that simplifies the integration of existing OpenAI-compatible API objects with AWS Bedrock's serverless inference LLMs.",
5
5
  "repository": {
6
6
  "type": "git",
@@ -21,7 +21,7 @@
21
21
  "author": "",
22
22
  "license": "ISC",
23
23
  "dependencies": {
24
- "@aws-sdk/client-bedrock-runtime": "^3.567.0",
24
+ "@aws-sdk/client-bedrock-runtime": "^3.575.0",
25
25
  "dotenv": "^16.4.5"
26
26
  }
27
27
  }