@llumiverse/drivers 1.0.0-dev.20260202.145450Z → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/cjs/adobe/firefly.js +120 -0
- package/lib/cjs/adobe/firefly.js.map +1 -0
- package/lib/cjs/azure/azure_foundry.js +432 -0
- package/lib/cjs/azure/azure_foundry.js.map +1 -0
- package/lib/cjs/bedrock/converse.js +359 -0
- package/lib/cjs/bedrock/converse.js.map +1 -0
- package/lib/cjs/bedrock/index.js +1441 -0
- package/lib/cjs/bedrock/index.js.map +1 -0
- package/lib/cjs/bedrock/nova-image-payload.js +207 -0
- package/lib/cjs/bedrock/nova-image-payload.js.map +1 -0
- package/lib/cjs/bedrock/payloads.js +3 -0
- package/lib/cjs/bedrock/payloads.js.map +1 -0
- package/lib/cjs/bedrock/s3.js +107 -0
- package/lib/cjs/bedrock/s3.js.map +1 -0
- package/lib/cjs/bedrock/twelvelabs.js +87 -0
- package/lib/cjs/bedrock/twelvelabs.js.map +1 -0
- package/lib/cjs/groq/index.js +326 -0
- package/lib/cjs/groq/index.js.map +1 -0
- package/lib/cjs/huggingface_ie.js +201 -0
- package/lib/cjs/huggingface_ie.js.map +1 -0
- package/lib/cjs/index.js +31 -0
- package/lib/cjs/index.js.map +1 -0
- package/lib/cjs/mistral/index.js +176 -0
- package/lib/cjs/mistral/index.js.map +1 -0
- package/lib/cjs/mistral/types.js +83 -0
- package/lib/cjs/mistral/types.js.map +1 -0
- package/lib/cjs/openai/azure_openai.js +72 -0
- package/lib/cjs/openai/azure_openai.js.map +1 -0
- package/lib/cjs/openai/index.js +1100 -0
- package/lib/cjs/openai/index.js.map +1 -0
- package/lib/cjs/openai/openai.js +21 -0
- package/lib/cjs/openai/openai.js.map +1 -0
- package/lib/cjs/openai/openai_compatible.js +63 -0
- package/lib/cjs/openai/openai_compatible.js.map +1 -0
- package/lib/cjs/openai/openai_format.js +131 -0
- package/lib/cjs/openai/openai_format.js.map +1 -0
- package/lib/cjs/package.json +3 -0
- package/lib/cjs/replicate.js +275 -0
- package/lib/cjs/replicate.js.map +1 -0
- package/lib/cjs/test-driver/TestErrorCompletionStream.js +20 -0
- package/lib/cjs/test-driver/TestErrorCompletionStream.js.map +1 -0
- package/lib/cjs/test-driver/TestValidationErrorCompletionStream.js +24 -0
- package/lib/cjs/test-driver/TestValidationErrorCompletionStream.js.map +1 -0
- package/lib/cjs/test-driver/index.js +109 -0
- package/lib/cjs/test-driver/index.js.map +1 -0
- package/lib/cjs/test-driver/utils.js +30 -0
- package/lib/cjs/test-driver/utils.js.map +1 -0
- package/lib/cjs/togetherai/index.js +126 -0
- package/lib/cjs/togetherai/index.js.map +1 -0
- package/lib/cjs/togetherai/interfaces.js +3 -0
- package/lib/cjs/togetherai/interfaces.js.map +1 -0
- package/lib/cjs/vertexai/debug.js +12 -0
- package/lib/cjs/vertexai/debug.js.map +1 -0
- package/lib/cjs/vertexai/embeddings/embeddings-image.js +27 -0
- package/lib/cjs/vertexai/embeddings/embeddings-image.js.map +1 -0
- package/lib/cjs/vertexai/embeddings/embeddings-text.js +23 -0
- package/lib/cjs/vertexai/embeddings/embeddings-text.js.map +1 -0
- package/lib/cjs/vertexai/index.js +635 -0
- package/lib/cjs/vertexai/index.js.map +1 -0
- package/lib/cjs/vertexai/models/claude.js +842 -0
- package/lib/cjs/vertexai/models/claude.js.map +1 -0
- package/lib/cjs/vertexai/models/gemini.js +1110 -0
- package/lib/cjs/vertexai/models/gemini.js.map +1 -0
- package/lib/cjs/vertexai/models/imagen.js +303 -0
- package/lib/cjs/vertexai/models/imagen.js.map +1 -0
- package/lib/cjs/vertexai/models/llama.js +183 -0
- package/lib/cjs/vertexai/models/llama.js.map +1 -0
- package/lib/cjs/vertexai/models.js +35 -0
- package/lib/cjs/vertexai/models.js.map +1 -0
- package/lib/cjs/watsonx/index.js +161 -0
- package/lib/cjs/watsonx/index.js.map +1 -0
- package/lib/cjs/watsonx/interfaces.js +3 -0
- package/lib/cjs/watsonx/interfaces.js.map +1 -0
- package/lib/cjs/xai/index.js +65 -0
- package/lib/cjs/xai/index.js.map +1 -0
- package/lib/esm/adobe/firefly.js +116 -0
- package/lib/esm/adobe/firefly.js.map +1 -0
- package/lib/esm/azure/azure_foundry.js +426 -0
- package/lib/esm/azure/azure_foundry.js.map +1 -0
- package/lib/esm/bedrock/converse.js +352 -0
- package/lib/esm/bedrock/converse.js.map +1 -0
- package/lib/esm/bedrock/index.js +1434 -0
- package/lib/esm/bedrock/index.js.map +1 -0
- package/lib/esm/bedrock/nova-image-payload.js +203 -0
- package/lib/esm/bedrock/nova-image-payload.js.map +1 -0
- package/lib/esm/bedrock/payloads.js +2 -0
- package/lib/esm/bedrock/payloads.js.map +1 -0
- package/lib/esm/bedrock/s3.js +99 -0
- package/lib/esm/bedrock/s3.js.map +1 -0
- package/lib/esm/bedrock/twelvelabs.js +84 -0
- package/lib/esm/bedrock/twelvelabs.js.map +1 -0
- package/lib/esm/groq/index.js +319 -0
- package/lib/esm/groq/index.js.map +1 -0
- package/lib/esm/huggingface_ie.js +197 -0
- package/lib/esm/huggingface_ie.js.map +1 -0
- package/lib/esm/index.js +15 -0
- package/lib/esm/index.js.map +1 -0
- package/lib/esm/mistral/index.js +172 -0
- package/lib/esm/mistral/index.js.map +1 -0
- package/lib/esm/mistral/types.js +80 -0
- package/lib/esm/mistral/types.js.map +1 -0
- package/lib/esm/openai/azure_openai.js +68 -0
- package/lib/esm/openai/azure_openai.js.map +1 -0
- package/lib/esm/openai/index.js +1093 -0
- package/lib/esm/openai/index.js.map +1 -0
- package/lib/esm/openai/openai.js +14 -0
- package/lib/esm/openai/openai.js.map +1 -0
- package/lib/esm/openai/openai_compatible.js +56 -0
- package/lib/esm/openai/openai_compatible.js.map +1 -0
- package/lib/esm/openai/openai_format.js +127 -0
- package/lib/esm/openai/openai_format.js.map +1 -0
- package/lib/esm/replicate.js +268 -0
- package/lib/esm/replicate.js.map +1 -0
- package/lib/esm/test-driver/TestErrorCompletionStream.js +16 -0
- package/lib/esm/test-driver/TestErrorCompletionStream.js.map +1 -0
- package/lib/esm/test-driver/TestValidationErrorCompletionStream.js +20 -0
- package/lib/esm/test-driver/TestValidationErrorCompletionStream.js.map +1 -0
- package/lib/esm/test-driver/index.js +91 -0
- package/lib/esm/test-driver/index.js.map +1 -0
- package/lib/esm/test-driver/utils.js +25 -0
- package/lib/esm/test-driver/utils.js.map +1 -0
- package/lib/esm/togetherai/index.js +122 -0
- package/lib/esm/togetherai/index.js.map +1 -0
- package/lib/esm/togetherai/interfaces.js +2 -0
- package/lib/esm/togetherai/interfaces.js.map +1 -0
- package/lib/esm/vertexai/debug.js +6 -0
- package/lib/esm/vertexai/debug.js.map +1 -0
- package/lib/esm/vertexai/embeddings/embeddings-image.js +24 -0
- package/lib/esm/vertexai/embeddings/embeddings-image.js.map +1 -0
- package/lib/esm/vertexai/embeddings/embeddings-text.js +20 -0
- package/lib/esm/vertexai/embeddings/embeddings-text.js.map +1 -0
- package/lib/esm/vertexai/index.js +630 -0
- package/lib/esm/vertexai/index.js.map +1 -0
- package/lib/esm/vertexai/models/claude.js +833 -0
- package/lib/esm/vertexai/models/claude.js.map +1 -0
- package/lib/esm/vertexai/models/gemini.js +1104 -0
- package/lib/esm/vertexai/models/gemini.js.map +1 -0
- package/lib/esm/vertexai/models/imagen.js +299 -0
- package/lib/esm/vertexai/models/imagen.js.map +1 -0
- package/lib/esm/vertexai/models/llama.js +179 -0
- package/lib/esm/vertexai/models/llama.js.map +1 -0
- package/lib/esm/vertexai/models.js +32 -0
- package/lib/esm/vertexai/models.js.map +1 -0
- package/lib/esm/watsonx/index.js +157 -0
- package/lib/esm/watsonx/index.js.map +1 -0
- package/lib/esm/watsonx/interfaces.js +2 -0
- package/lib/esm/watsonx/interfaces.js.map +1 -0
- package/lib/esm/xai/index.js +58 -0
- package/lib/esm/xai/index.js.map +1 -0
- package/lib/types/adobe/firefly.d.ts +30 -0
- package/lib/types/adobe/firefly.d.ts.map +1 -0
- package/lib/types/azure/azure_foundry.d.ts +52 -0
- package/lib/types/azure/azure_foundry.d.ts.map +1 -0
- package/lib/types/bedrock/converse.d.ts +8 -0
- package/lib/types/bedrock/converse.d.ts.map +1 -0
- package/lib/types/bedrock/index.d.ts +135 -0
- package/lib/types/bedrock/index.d.ts.map +1 -0
- package/lib/types/bedrock/nova-image-payload.d.ts +74 -0
- package/lib/types/bedrock/nova-image-payload.d.ts.map +1 -0
- package/lib/types/bedrock/payloads.d.ts +12 -0
- package/lib/types/bedrock/payloads.d.ts.map +1 -0
- package/lib/types/bedrock/s3.d.ts +23 -0
- package/lib/types/bedrock/s3.d.ts.map +1 -0
- package/lib/types/bedrock/twelvelabs.d.ts +50 -0
- package/lib/types/bedrock/twelvelabs.d.ts.map +1 -0
- package/lib/types/groq/index.d.ts +27 -0
- package/lib/types/groq/index.d.ts.map +1 -0
- package/lib/types/huggingface_ie.d.ts +35 -0
- package/lib/types/huggingface_ie.d.ts.map +1 -0
- package/lib/types/index.d.ts +15 -0
- package/lib/types/index.d.ts.map +1 -0
- package/lib/types/mistral/index.d.ts +25 -0
- package/lib/types/mistral/index.d.ts.map +1 -0
- package/lib/types/mistral/types.d.ts +127 -0
- package/lib/types/mistral/types.d.ts.map +1 -0
- package/lib/types/openai/azure_openai.d.ts +25 -0
- package/lib/types/openai/azure_openai.d.ts.map +1 -0
- package/lib/types/openai/index.d.ts +126 -0
- package/lib/types/openai/index.d.ts.map +1 -0
- package/lib/types/openai/openai.d.ts +15 -0
- package/lib/types/openai/openai.d.ts.map +1 -0
- package/lib/types/openai/openai_compatible.d.ts +31 -0
- package/lib/types/openai/openai_compatible.d.ts.map +1 -0
- package/lib/types/openai/openai_format.d.ts +21 -0
- package/lib/types/openai/openai_format.d.ts.map +1 -0
- package/lib/types/replicate.d.ts +48 -0
- package/lib/types/replicate.d.ts.map +1 -0
- package/lib/types/test-driver/TestErrorCompletionStream.d.ts +9 -0
- package/lib/types/test-driver/TestErrorCompletionStream.d.ts.map +1 -0
- package/lib/types/test-driver/TestValidationErrorCompletionStream.d.ts +9 -0
- package/lib/types/test-driver/TestValidationErrorCompletionStream.d.ts.map +1 -0
- package/lib/types/test-driver/index.d.ts +24 -0
- package/lib/types/test-driver/index.d.ts.map +1 -0
- package/lib/types/test-driver/utils.d.ts +5 -0
- package/lib/types/test-driver/utils.d.ts.map +1 -0
- package/lib/types/togetherai/index.d.ts +23 -0
- package/lib/types/togetherai/index.d.ts.map +1 -0
- package/lib/types/togetherai/interfaces.d.ts +96 -0
- package/lib/types/togetherai/interfaces.d.ts.map +1 -0
- package/lib/types/vertexai/debug.d.ts +2 -0
- package/lib/types/vertexai/debug.d.ts.map +1 -0
- package/lib/types/vertexai/embeddings/embeddings-image.d.ts +11 -0
- package/lib/types/vertexai/embeddings/embeddings-image.d.ts.map +1 -0
- package/lib/types/vertexai/embeddings/embeddings-text.d.ts +10 -0
- package/lib/types/vertexai/embeddings/embeddings-text.d.ts.map +1 -0
- package/lib/types/vertexai/index.d.ts +79 -0
- package/lib/types/vertexai/index.d.ts.map +1 -0
- package/lib/types/vertexai/models/claude.d.ts +103 -0
- package/lib/types/vertexai/models/claude.d.ts.map +1 -0
- package/lib/types/vertexai/models/gemini.d.ts +78 -0
- package/lib/types/vertexai/models/gemini.d.ts.map +1 -0
- package/lib/types/vertexai/models/imagen.d.ts +75 -0
- package/lib/types/vertexai/models/imagen.d.ts.map +1 -0
- package/lib/types/vertexai/models/llama.d.ts +20 -0
- package/lib/types/vertexai/models/llama.d.ts.map +1 -0
- package/lib/types/vertexai/models.d.ts +20 -0
- package/lib/types/vertexai/models.d.ts.map +1 -0
- package/lib/types/watsonx/index.d.ts +27 -0
- package/lib/types/watsonx/index.d.ts.map +1 -0
- package/lib/types/watsonx/interfaces.d.ts +65 -0
- package/lib/types/watsonx/interfaces.d.ts.map +1 -0
- package/lib/types/xai/index.d.ts +18 -0
- package/lib/types/xai/index.d.ts.map +1 -0
- package/package.json +18 -18
- package/src/bedrock/converse.ts +85 -10
- package/src/bedrock/error-handling.test.ts +352 -0
- package/src/bedrock/index.ts +293 -16
- package/src/groq/index.ts +9 -4
- package/src/mistral/index.ts +25 -22
- package/src/mistral/types.ts +0 -5
- package/src/openai/error-handling.test.ts +567 -0
- package/src/openai/index.ts +513 -33
- package/src/openai/openai_compatible.ts +7 -0
- package/src/openai/openai_format.ts +1 -1
- package/src/vertexai/index.ts +61 -13
- package/src/vertexai/models/claude-error-handling.test.ts +432 -0
- package/src/vertexai/models/claude.ts +287 -10
- package/src/vertexai/models/gemini-error-handling.test.ts +353 -0
- package/src/vertexai/models/gemini.ts +329 -52
- package/src/vertexai/models.ts +7 -2
|
@@ -0,0 +1,1441 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.BedrockDriver = void 0;
|
|
4
|
+
exports.messagesContainToolBlocks = messagesContainToolBlocks;
|
|
5
|
+
exports.convertToolBlocksToText = convertToolBlocksToText;
|
|
6
|
+
exports.fixOrphanedToolUse = fixOrphanedToolUse;
|
|
7
|
+
const client_bedrock_1 = require("@aws-sdk/client-bedrock");
|
|
8
|
+
const client_bedrock_runtime_1 = require("@aws-sdk/client-bedrock-runtime");
|
|
9
|
+
const client_s3_1 = require("@aws-sdk/client-s3");
|
|
10
|
+
const core_1 = require("@llumiverse/core");
|
|
11
|
+
const async_1 = require("@llumiverse/core/async");
|
|
12
|
+
const formatters_1 = require("@llumiverse/core/formatters");
|
|
13
|
+
const mnemonist_1 = require("mnemonist");
|
|
14
|
+
const converse_js_1 = require("./converse.js");
|
|
15
|
+
const nova_image_payload_js_1 = require("./nova-image-payload.js");
|
|
16
|
+
const s3_js_1 = require("./s3.js");
|
|
17
|
+
const twelvelabs_js_1 = require("./twelvelabs.js");
|
|
18
|
+
const supportStreamingCache = new mnemonist_1.LRUCache(4096);
|
|
19
|
+
var BedrockModelType;
|
|
20
|
+
(function (BedrockModelType) {
|
|
21
|
+
BedrockModelType["FoundationModel"] = "foundation-model";
|
|
22
|
+
BedrockModelType["InferenceProfile"] = "inference-profile";
|
|
23
|
+
BedrockModelType["CustomModel"] = "custom-model";
|
|
24
|
+
BedrockModelType["Unknown"] = "unknown";
|
|
25
|
+
})(BedrockModelType || (BedrockModelType = {}));
|
|
26
|
+
;
|
|
27
|
+
function converseFinishReason(reason) {
|
|
28
|
+
//Possible values:
|
|
29
|
+
//end_turn | tool_use | max_tokens | stop_sequence | guardrail_intervened | content_filtered
|
|
30
|
+
if (!reason)
|
|
31
|
+
return undefined;
|
|
32
|
+
switch (reason) {
|
|
33
|
+
case 'end_turn': return "stop";
|
|
34
|
+
case 'max_tokens': return "length";
|
|
35
|
+
default: return reason;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
//Used to get a max_token value when not specified in the model options. Claude requires it to be set.
|
|
39
|
+
function maxTokenFallbackClaude(option) {
|
|
40
|
+
const modelOptions = option.model_options;
|
|
41
|
+
if (modelOptions && typeof modelOptions.max_tokens === "number") {
|
|
42
|
+
return modelOptions.max_tokens;
|
|
43
|
+
}
|
|
44
|
+
else {
|
|
45
|
+
let maxSupportedTokens = (0, core_1.getMaxTokensLimitBedrock)(option.model) ?? 8192; // Should always return a number for claude, 8192 is to satisfy the TypeScript type checker;
|
|
46
|
+
// Fallback to the default max tokens limit for the model
|
|
47
|
+
if (option.model.includes('claude-3-7-sonnet') && (modelOptions?.thinking_budget_tokens ?? 0) < 48000) {
|
|
48
|
+
maxSupportedTokens = 64000; // Claude 3.7 can go up to 128k with a beta header, but when no max tokens is specified, we default to 64k.
|
|
49
|
+
}
|
|
50
|
+
return maxSupportedTokens;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Parse Claude model version from model string.
|
|
55
|
+
* @param modelString - The model identifier string
|
|
56
|
+
* @returns An object with major and minor version numbers, or null if not parseable
|
|
57
|
+
*/
|
|
58
|
+
function parseClaudeVersion(modelString) {
|
|
59
|
+
// Match pattern: claude-[optional variant]-{major}-[optional 1-2 digit minor]
|
|
60
|
+
// The minor version is limited to 1-2 digits to avoid matching dates (YYYYMMDD format)
|
|
61
|
+
const match = modelString.match(/claude-(?:[a-z]+-)?(\d+)(?:-(\d{1,2}))?(?:-|\b)/);
|
|
62
|
+
if (match) {
|
|
63
|
+
return {
|
|
64
|
+
major: parseInt(match[1], 10),
|
|
65
|
+
minor: match[2] ? parseInt(match[2], 10) : 0
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
return null;
|
|
69
|
+
}
|
|
70
|
+
/**
|
|
71
|
+
* Check if a Claude model version is greater than or equal to a target version.
|
|
72
|
+
* @returns true if the model version is >= target version, false otherwise
|
|
73
|
+
*/
|
|
74
|
+
function isClaudeVersionGTE(modelString, targetMajor, targetMinor) {
|
|
75
|
+
const version = parseClaudeVersion(modelString);
|
|
76
|
+
if (!version) {
|
|
77
|
+
return false;
|
|
78
|
+
}
|
|
79
|
+
if (version.major > targetMajor) {
|
|
80
|
+
return true;
|
|
81
|
+
}
|
|
82
|
+
if (version.major === targetMajor && version.minor >= targetMinor) {
|
|
83
|
+
return true;
|
|
84
|
+
}
|
|
85
|
+
return false;
|
|
86
|
+
}
|
|
87
|
+
class BedrockDriver extends core_1.AbstractDriver {
|
|
88
|
+
static PROVIDER = "bedrock";
|
|
89
|
+
provider = BedrockDriver.PROVIDER;
|
|
90
|
+
_executor;
|
|
91
|
+
_service;
|
|
92
|
+
_service_region;
|
|
93
|
+
constructor(options) {
|
|
94
|
+
super(options);
|
|
95
|
+
if (!options.region) {
|
|
96
|
+
throw new Error("No region found. Set the region in the environment's endpoint URL.");
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
getExecutor() {
|
|
100
|
+
if (!this._executor) {
|
|
101
|
+
this._executor = new client_bedrock_runtime_1.BedrockRuntime({
|
|
102
|
+
region: this.options.region,
|
|
103
|
+
credentials: this.options.credentials,
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
return this._executor;
|
|
107
|
+
}
|
|
108
|
+
getService(region = this.options.region) {
|
|
109
|
+
if (!this._service || this._service_region != region) {
|
|
110
|
+
this._service = new client_bedrock_1.Bedrock({
|
|
111
|
+
region: region,
|
|
112
|
+
credentials: this.options.credentials,
|
|
113
|
+
});
|
|
114
|
+
this._service_region = region;
|
|
115
|
+
}
|
|
116
|
+
return this._service;
|
|
117
|
+
}
|
|
118
|
+
async formatPrompt(segments, opts) {
|
|
119
|
+
if (opts.model.includes("canvas")) {
|
|
120
|
+
return await (0, formatters_1.formatNovaPrompt)(segments, opts.result_schema);
|
|
121
|
+
}
|
|
122
|
+
if (opts.model.includes("twelvelabs.pegasus")) {
|
|
123
|
+
return await (0, twelvelabs_js_1.formatTwelvelabsPegasusPrompt)(segments, opts);
|
|
124
|
+
}
|
|
125
|
+
return await (0, converse_js_1.formatConversePrompt)(segments, opts);
|
|
126
|
+
}
|
|
127
|
+
/**
|
|
128
|
+
* Format AWS Bedrock errors into LlumiverseError with proper status codes and retryability.
|
|
129
|
+
*
|
|
130
|
+
* AWS SDK errors provide:
|
|
131
|
+
* - error.name: The exception type (e.g., "ThrottlingException")
|
|
132
|
+
* - error.$metadata.httpStatusCode: The HTTP status code
|
|
133
|
+
* - error.$metadata.requestId: The AWS request ID for tracking
|
|
134
|
+
* - error.$fault: "client" or "server" indicating error category
|
|
135
|
+
*
|
|
136
|
+
* @param error - The AWS SDK error
|
|
137
|
+
* @param context - Context about where the error occurred
|
|
138
|
+
* @returns A standardized LlumiverseError
|
|
139
|
+
*/
|
|
140
|
+
formatLlumiverseError(error, context) {
|
|
141
|
+
// Check if it's an AWS SDK error with $metadata
|
|
142
|
+
const awsError = error;
|
|
143
|
+
const hasMetadata = awsError?.$metadata !== undefined;
|
|
144
|
+
if (!hasMetadata) {
|
|
145
|
+
// Not an AWS SDK error, use default handling
|
|
146
|
+
return super.formatLlumiverseError(error, context);
|
|
147
|
+
}
|
|
148
|
+
// Extract AWS-specific fields
|
|
149
|
+
const errorName = awsError.name || 'UnknownError';
|
|
150
|
+
const httpStatusCode = awsError.$metadata?.httpStatusCode;
|
|
151
|
+
const requestId = awsError.$metadata?.requestId;
|
|
152
|
+
const fault = awsError.$fault; // "client" or "server"
|
|
153
|
+
// Extract error message - handle both Error instances and plain objects
|
|
154
|
+
let message;
|
|
155
|
+
if (error instanceof Error) {
|
|
156
|
+
message = error.message;
|
|
157
|
+
}
|
|
158
|
+
else if (typeof awsError.message === 'string') {
|
|
159
|
+
message = awsError.message;
|
|
160
|
+
}
|
|
161
|
+
else {
|
|
162
|
+
message = String(error);
|
|
163
|
+
}
|
|
164
|
+
// Build user-facing message with error name and status code
|
|
165
|
+
let userMessage = message;
|
|
166
|
+
// Include status code in message if available (for end-user visibility)
|
|
167
|
+
if (httpStatusCode) {
|
|
168
|
+
userMessage = `[${httpStatusCode}] ${userMessage}`;
|
|
169
|
+
}
|
|
170
|
+
// Prefix with error name if it's meaningful (not just "Error")
|
|
171
|
+
if (errorName && errorName !== 'Error' && errorName !== 'UnknownError') {
|
|
172
|
+
userMessage = `${errorName}: ${userMessage}`;
|
|
173
|
+
}
|
|
174
|
+
// Add request ID if available (useful for AWS support)
|
|
175
|
+
if (requestId) {
|
|
176
|
+
userMessage += ` (Request ID: ${requestId})`;
|
|
177
|
+
}
|
|
178
|
+
// Determine retryability based on AWS error types
|
|
179
|
+
const retryable = this.isBedrockErrorRetryable(errorName, httpStatusCode, fault);
|
|
180
|
+
return new core_1.LlumiverseError(`[${this.provider}] ${userMessage}`, retryable, context, error, httpStatusCode, // Only set code if we have numeric status code
|
|
181
|
+
errorName // Preserve AWS error name
|
|
182
|
+
);
|
|
183
|
+
}
|
|
184
|
+
/**
|
|
185
|
+
* Determine if a Bedrock error is retryable based on error type and status.
|
|
186
|
+
*
|
|
187
|
+
* Retryable errors:
|
|
188
|
+
* - ThrottlingException: Rate limit exceeded, retry with backoff
|
|
189
|
+
* - ServiceUnavailableException: Service temporarily down
|
|
190
|
+
* - InternalServerException: Server-side error
|
|
191
|
+
* - ServiceQuotaExceededException: Quota exhausted, may recover
|
|
192
|
+
* - 5xx status codes: Server errors
|
|
193
|
+
* - 429, 408 status codes: Rate limit, timeout
|
|
194
|
+
*
|
|
195
|
+
* Non-retryable errors:
|
|
196
|
+
* - ValidationException: Invalid request parameters
|
|
197
|
+
* - AccessDeniedException: Authentication/authorization failure
|
|
198
|
+
* - ResourceNotFoundException: Resource doesn't exist
|
|
199
|
+
* - ConflictException: Resource state conflict
|
|
200
|
+
* - ResourceInUseException: Resource locked by another operation
|
|
201
|
+
* - 4xx status codes (except 429, 408): Client errors
|
|
202
|
+
*
|
|
203
|
+
* @param errorName - The AWS error name (e.g., "ThrottlingException")
|
|
204
|
+
* @param httpStatusCode - The HTTP status code if available
|
|
205
|
+
* @param fault - The fault type ("client" or "server")
|
|
206
|
+
* @returns True if retryable, false if not retryable, undefined if unknown
|
|
207
|
+
*/
|
|
208
|
+
isBedrockErrorRetryable(errorName, httpStatusCode, fault) {
|
|
209
|
+
// Check specific AWS error types first
|
|
210
|
+
switch (errorName) {
|
|
211
|
+
// Retryable errors
|
|
212
|
+
case 'ThrottlingException':
|
|
213
|
+
case 'ServiceUnavailableException':
|
|
214
|
+
case 'InternalServerException':
|
|
215
|
+
case 'ServiceQuotaExceededException':
|
|
216
|
+
return true;
|
|
217
|
+
// Non-retryable errors
|
|
218
|
+
case 'ValidationException':
|
|
219
|
+
case 'AccessDeniedException':
|
|
220
|
+
case 'ResourceNotFoundException':
|
|
221
|
+
case 'ConflictException':
|
|
222
|
+
case 'ResourceInUseException':
|
|
223
|
+
case 'TooManyTagsException':
|
|
224
|
+
return false;
|
|
225
|
+
}
|
|
226
|
+
// If we have HTTP status code, use it
|
|
227
|
+
if (httpStatusCode !== undefined) {
|
|
228
|
+
if (httpStatusCode === 429 || httpStatusCode === 408)
|
|
229
|
+
return true; // Rate limit, timeout
|
|
230
|
+
if (httpStatusCode === 529)
|
|
231
|
+
return true; // Overloaded
|
|
232
|
+
if (httpStatusCode >= 500 && httpStatusCode < 600)
|
|
233
|
+
return true; // Server errors
|
|
234
|
+
if (httpStatusCode >= 400 && httpStatusCode < 500)
|
|
235
|
+
return false; // Client errors
|
|
236
|
+
}
|
|
237
|
+
// Fall back to fault type
|
|
238
|
+
if (fault === 'server')
|
|
239
|
+
return true;
|
|
240
|
+
if (fault === 'client')
|
|
241
|
+
return false;
|
|
242
|
+
// Unknown error type - let consumer decide retry strategy
|
|
243
|
+
return undefined;
|
|
244
|
+
}
|
|
245
|
+
getExtractedExecution(result, _prompt, options) {
|
|
246
|
+
let resultText = "";
|
|
247
|
+
let reasoning = "";
|
|
248
|
+
if (result.output?.message?.content) {
|
|
249
|
+
for (const content of result.output.message.content) {
|
|
250
|
+
// Get text output
|
|
251
|
+
if (content.text) {
|
|
252
|
+
resultText += content.text;
|
|
253
|
+
}
|
|
254
|
+
else if (content.reasoningContent) {
|
|
255
|
+
// Extract reasoning content if include_thoughts is true, or if it's a
|
|
256
|
+
// reasoning-only model (e.g. DeepSeek R1) that returns no text blocks
|
|
257
|
+
const claudeOptions = options?.model_options;
|
|
258
|
+
const isReasoningModel = options?.model?.includes('deepseek') && options?.model?.includes('r1');
|
|
259
|
+
if (claudeOptions?.include_thoughts || isReasoningModel) {
|
|
260
|
+
if (content.reasoningContent.reasoningText) {
|
|
261
|
+
reasoning += content.reasoningContent.reasoningText.text;
|
|
262
|
+
}
|
|
263
|
+
else if (content.reasoningContent.redactedContent) {
|
|
264
|
+
// Handle redacted thinking content
|
|
265
|
+
const redactedData = new TextDecoder().decode(content.reasoningContent.redactedContent);
|
|
266
|
+
reasoning += `[Redacted thinking: ${redactedData}]`;
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
else {
|
|
270
|
+
this.logger.info("[Bedrock] Not outputting reasoning content as include_thoughts is false");
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
else {
|
|
274
|
+
// Get content block type
|
|
275
|
+
const type = Object.keys(content).find(key => key !== '$unknown' && content[key] !== undefined);
|
|
276
|
+
this.logger.info({ type }, "[Bedrock] Unsupported content response type:");
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
// Add spacing if we have reasoning content
|
|
280
|
+
if (reasoning) {
|
|
281
|
+
reasoning += '\n\n';
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
const completionResult = {
|
|
285
|
+
result: reasoning + resultText ? [{ type: "text", value: reasoning + resultText }] : [],
|
|
286
|
+
token_usage: {
|
|
287
|
+
prompt: result.usage?.inputTokens,
|
|
288
|
+
result: result.usage?.outputTokens,
|
|
289
|
+
total: result.usage?.totalTokens,
|
|
290
|
+
},
|
|
291
|
+
finish_reason: converseFinishReason(result.stopReason),
|
|
292
|
+
};
|
|
293
|
+
return completionResult;
|
|
294
|
+
}
|
|
295
|
+
;
|
|
296
|
+
getExtractedStream(result, _prompt, options) {
|
|
297
|
+
let output = "";
|
|
298
|
+
let reasoning = "";
|
|
299
|
+
let stop_reason = "";
|
|
300
|
+
let token_usage;
|
|
301
|
+
// Check if we should include thoughts (always true for reasoning-only models like DeepSeek R1)
|
|
302
|
+
const isReasoningModel = options?.model?.includes('deepseek') && options?.model?.includes('r1');
|
|
303
|
+
const shouldIncludeThoughts = isReasoningModel || (options && options.model_options?.include_thoughts);
|
|
304
|
+
// Handle content block start events (for reasoning blocks)
|
|
305
|
+
if (result.contentBlockStart) {
|
|
306
|
+
// Handle redacted content at block start
|
|
307
|
+
if (result.contentBlockStart.start && 'reasoningContent' in result.contentBlockStart.start && shouldIncludeThoughts) {
|
|
308
|
+
const reasoningStart = result.contentBlockStart.start;
|
|
309
|
+
if (reasoningStart.reasoningContent?.redactedContent) {
|
|
310
|
+
const redactedData = new TextDecoder().decode(reasoningStart.reasoningContent.redactedContent);
|
|
311
|
+
reasoning = `[Redacted thinking: ${redactedData}]`;
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
// Handle content block deltas (text and reasoning)
|
|
316
|
+
if (result.contentBlockDelta) {
|
|
317
|
+
const delta = result.contentBlockDelta.delta;
|
|
318
|
+
if (delta?.text) {
|
|
319
|
+
output = delta.text;
|
|
320
|
+
}
|
|
321
|
+
else if (delta?.reasoningContent && shouldIncludeThoughts) {
|
|
322
|
+
if (delta.reasoningContent.text) {
|
|
323
|
+
reasoning = delta.reasoningContent.text;
|
|
324
|
+
}
|
|
325
|
+
else if (delta.reasoningContent.redactedContent) {
|
|
326
|
+
const redactedData = new TextDecoder().decode(delta.reasoningContent.redactedContent);
|
|
327
|
+
reasoning = `[Redacted thinking: ${redactedData}]`;
|
|
328
|
+
}
|
|
329
|
+
else if (delta.reasoningContent.signature) {
|
|
330
|
+
// Handle signature updates for reasoning content - end of thinking
|
|
331
|
+
reasoning = "\n\n";
|
|
332
|
+
// Putting logging here so it only triggers once.
|
|
333
|
+
this.logger.info("[Bedrock] Not outputting reasoning content as include_thoughts is false");
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
else if (delta) {
|
|
337
|
+
// Get content block type
|
|
338
|
+
const type = Object.keys(delta).find(key => key !== '$unknown' && delta[key] !== undefined);
|
|
339
|
+
this.logger.info({ type }, "[Bedrock] Unsupported content response type:");
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
// Handle content block stop events
|
|
343
|
+
if (result.contentBlockStop) {
|
|
344
|
+
// Content block ended - could be end of reasoning or text block
|
|
345
|
+
// Add minimal spacing for reasoning blocks if not already present
|
|
346
|
+
if (reasoning && !reasoning.endsWith('\n\n') && shouldIncludeThoughts) {
|
|
347
|
+
reasoning += '\n\n';
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
if (result.messageStop) {
|
|
351
|
+
stop_reason = result.messageStop.stopReason ?? "";
|
|
352
|
+
}
|
|
353
|
+
if (result.metadata) {
|
|
354
|
+
token_usage = {
|
|
355
|
+
prompt: result.metadata.usage?.inputTokens,
|
|
356
|
+
result: result.metadata.usage?.outputTokens,
|
|
357
|
+
total: result.metadata.usage?.totalTokens,
|
|
358
|
+
};
|
|
359
|
+
}
|
|
360
|
+
const completionResult = {
|
|
361
|
+
result: reasoning + output ? [{ type: "text", value: reasoning + output }] : [],
|
|
362
|
+
token_usage: token_usage,
|
|
363
|
+
finish_reason: converseFinishReason(stop_reason),
|
|
364
|
+
};
|
|
365
|
+
return completionResult;
|
|
366
|
+
}
|
|
367
|
+
;
|
|
368
|
+
extractRegion(modelString, defaultRegion) {
|
|
369
|
+
// Match region in full ARN pattern
|
|
370
|
+
const arnMatch = modelString.match(/arn:aws[^:]*:bedrock:([^:]+):/);
|
|
371
|
+
if (arnMatch) {
|
|
372
|
+
return arnMatch[1];
|
|
373
|
+
}
|
|
374
|
+
// Match common AWS regions directly in string
|
|
375
|
+
const regionMatch = modelString.match(/(?:us|eu|ap|sa|ca|me|af)[-](east|west|central|south|north|southeast|southwest|northeast|northwest)[-][1-9]/);
|
|
376
|
+
if (regionMatch) {
|
|
377
|
+
return regionMatch[0];
|
|
378
|
+
}
|
|
379
|
+
return defaultRegion;
|
|
380
|
+
}
|
|
381
|
+
async getCanStream(model, type) {
|
|
382
|
+
let canStream = false;
|
|
383
|
+
let error = null;
|
|
384
|
+
const region = this.extractRegion(model, this.options.region);
|
|
385
|
+
if (type == BedrockModelType.FoundationModel || type == BedrockModelType.Unknown) {
|
|
386
|
+
try {
|
|
387
|
+
const response = await this.getService(region).getFoundationModel({
|
|
388
|
+
modelIdentifier: model
|
|
389
|
+
});
|
|
390
|
+
canStream = response.modelDetails?.responseStreamingSupported ?? false;
|
|
391
|
+
return canStream;
|
|
392
|
+
}
|
|
393
|
+
catch (e) {
|
|
394
|
+
error = e;
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
if (type == BedrockModelType.InferenceProfile || type == BedrockModelType.Unknown) {
|
|
398
|
+
try {
|
|
399
|
+
const response = await this.getService(region).getInferenceProfile({
|
|
400
|
+
inferenceProfileIdentifier: model
|
|
401
|
+
});
|
|
402
|
+
canStream = await this.getCanStream(response.models?.[0].modelArn ?? "", BedrockModelType.FoundationModel);
|
|
403
|
+
return canStream;
|
|
404
|
+
}
|
|
405
|
+
catch (e) {
|
|
406
|
+
error = e;
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
if (type == BedrockModelType.CustomModel || type == BedrockModelType.Unknown) {
|
|
410
|
+
try {
|
|
411
|
+
const response = await this.getService(region).getCustomModel({
|
|
412
|
+
modelIdentifier: model
|
|
413
|
+
});
|
|
414
|
+
canStream = await this.getCanStream(response.baseModelArn ?? "", BedrockModelType.FoundationModel);
|
|
415
|
+
return canStream;
|
|
416
|
+
}
|
|
417
|
+
catch (e) {
|
|
418
|
+
error = e;
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
if (error) {
|
|
422
|
+
console.warn("Error on canStream check for model: " + model + " region detected: " + region, error);
|
|
423
|
+
}
|
|
424
|
+
return canStream;
|
|
425
|
+
}
|
|
426
|
+
async canStream(options) {
|
|
427
|
+
// // TwelveLabs Pegasus supports streaming according to the documentation
|
|
428
|
+
// if (options.model.includes("twelvelabs.pegasus")) {
|
|
429
|
+
// return true;
|
|
430
|
+
// }
|
|
431
|
+
let canStream = supportStreamingCache.get(options.model);
|
|
432
|
+
if (canStream == null) {
|
|
433
|
+
let type = BedrockModelType.Unknown;
|
|
434
|
+
if (options.model.includes("foundation-model")) {
|
|
435
|
+
type = BedrockModelType.FoundationModel;
|
|
436
|
+
}
|
|
437
|
+
else if (options.model.includes("inference-profile")) {
|
|
438
|
+
type = BedrockModelType.InferenceProfile;
|
|
439
|
+
}
|
|
440
|
+
else if (options.model.includes("custom-model")) {
|
|
441
|
+
type = BedrockModelType.CustomModel;
|
|
442
|
+
}
|
|
443
|
+
canStream = await this.getCanStream(options.model, type);
|
|
444
|
+
supportStreamingCache.set(options.model, canStream);
|
|
445
|
+
}
|
|
446
|
+
return canStream;
|
|
447
|
+
}
|
|
448
|
+
/**
|
|
449
|
+
* Build conversation context after streaming completion.
|
|
450
|
+
* Reconstructs the assistant message from accumulated results and applies stripping.
|
|
451
|
+
*/
|
|
452
|
+
buildStreamingConversation(prompt, result, toolUse, options) {
|
|
453
|
+
// Only handle ConverseRequest prompts (not NovaMessagesPrompt or TwelvelabsPegasusRequest)
|
|
454
|
+
if (options.model.includes("canvas") || options.model.includes("twelvelabs.pegasus")) {
|
|
455
|
+
return undefined;
|
|
456
|
+
}
|
|
457
|
+
const conversePrompt = prompt;
|
|
458
|
+
const completionResults = result;
|
|
459
|
+
// Convert accumulated results to text content for assistant message
|
|
460
|
+
const textContent = completionResults
|
|
461
|
+
.map(r => {
|
|
462
|
+
switch (r.type) {
|
|
463
|
+
case 'text':
|
|
464
|
+
return r.value;
|
|
465
|
+
case 'json':
|
|
466
|
+
return typeof r.value === 'string' ? r.value : JSON.stringify(r.value);
|
|
467
|
+
case 'image':
|
|
468
|
+
// Skip images in conversation - they're in the result
|
|
469
|
+
return '';
|
|
470
|
+
default:
|
|
471
|
+
return String(r.value || '');
|
|
472
|
+
}
|
|
473
|
+
})
|
|
474
|
+
.join('');
|
|
475
|
+
// Deserialize any base64-encoded binary data back to Uint8Array
|
|
476
|
+
const incomingConversation = (0, core_1.deserializeBinaryFromStorage)(options.conversation);
|
|
477
|
+
// Start with the conversation from options combined with the prompt
|
|
478
|
+
let conversation = updateConversation(incomingConversation, conversePrompt);
|
|
479
|
+
// Build assistant message content
|
|
480
|
+
const messageContent = [];
|
|
481
|
+
if (textContent) {
|
|
482
|
+
messageContent.push({ text: textContent });
|
|
483
|
+
}
|
|
484
|
+
// Add tool use blocks if present
|
|
485
|
+
if (toolUse && toolUse.length > 0) {
|
|
486
|
+
for (const tool of toolUse) {
|
|
487
|
+
messageContent.push({
|
|
488
|
+
toolUse: {
|
|
489
|
+
toolUseId: tool.id,
|
|
490
|
+
name: tool.tool_name,
|
|
491
|
+
input: tool.tool_input,
|
|
492
|
+
}
|
|
493
|
+
});
|
|
494
|
+
}
|
|
495
|
+
}
|
|
496
|
+
// Add assistant message
|
|
497
|
+
const assistantMessage = {
|
|
498
|
+
messages: [{
|
|
499
|
+
content: messageContent.length > 0 ? messageContent : [{ text: '' }],
|
|
500
|
+
role: "assistant"
|
|
501
|
+
}],
|
|
502
|
+
modelId: conversePrompt.modelId,
|
|
503
|
+
};
|
|
504
|
+
conversation = updateConversation(conversation, assistantMessage);
|
|
505
|
+
// Increment turn counter
|
|
506
|
+
conversation = (0, core_1.incrementConversationTurn)(conversation);
|
|
507
|
+
// Apply stripping based on options
|
|
508
|
+
const currentTurn = (0, core_1.getConversationMeta)(conversation).turnNumber;
|
|
509
|
+
const stripOptions = {
|
|
510
|
+
keepForTurns: options.stripImagesAfterTurns ?? Infinity,
|
|
511
|
+
currentTurn,
|
|
512
|
+
textMaxTokens: options.stripTextMaxTokens
|
|
513
|
+
};
|
|
514
|
+
let processedConversation = (0, core_1.stripBinaryFromConversation)(conversation, stripOptions);
|
|
515
|
+
processedConversation = (0, core_1.truncateLargeTextInConversation)(processedConversation, stripOptions);
|
|
516
|
+
processedConversation = (0, core_1.stripHeartbeatsFromConversation)(processedConversation, {
|
|
517
|
+
keepForTurns: options.stripHeartbeatsAfterTurns ?? 1,
|
|
518
|
+
currentTurn,
|
|
519
|
+
});
|
|
520
|
+
return processedConversation;
|
|
521
|
+
}
|
|
522
|
+
async requestTextCompletion(prompt, options) {
|
|
523
|
+
// Handle Twelvelabs Pegasus models
|
|
524
|
+
if (options.model.includes("twelvelabs.pegasus")) {
|
|
525
|
+
return this.requestTwelvelabsPegasusCompletion(prompt, options);
|
|
526
|
+
}
|
|
527
|
+
// Handle other Bedrock models that use Converse API
|
|
528
|
+
const conversePrompt = prompt;
|
|
529
|
+
// Deserialize any base64-encoded binary data back to Uint8Array before API call
|
|
530
|
+
const incomingConversation = (0, core_1.deserializeBinaryFromStorage)(options.conversation);
|
|
531
|
+
let conversation = updateConversation(incomingConversation, conversePrompt);
|
|
532
|
+
const payload = this.preparePayload(conversation, options);
|
|
533
|
+
const executor = this.getExecutor();
|
|
534
|
+
const res = await executor.converse({
|
|
535
|
+
...payload,
|
|
536
|
+
});
|
|
537
|
+
// Strip reasoningContent from assistant messages before storing in conversation
|
|
538
|
+
// (DeepSeek R1 returns reasoning blocks but rejects them in subsequent user turns)
|
|
539
|
+
const assistantMsg = res.output?.message ?? { content: [{ text: "" }], role: "assistant" };
|
|
540
|
+
if (assistantMsg.content) {
|
|
541
|
+
assistantMsg.content = assistantMsg.content.filter((c) => !c.reasoningContent);
|
|
542
|
+
}
|
|
543
|
+
conversation = updateConversation(conversation, {
|
|
544
|
+
messages: [assistantMsg],
|
|
545
|
+
modelId: conversePrompt.modelId,
|
|
546
|
+
});
|
|
547
|
+
// Increment turn counter for deferred stripping
|
|
548
|
+
conversation = (0, core_1.incrementConversationTurn)(conversation);
|
|
549
|
+
let tool_use = undefined;
|
|
550
|
+
//Get tool requests, we check tool use regardless of finish reason, as you can hit length and still get a valid response.
|
|
551
|
+
tool_use = res.output?.message?.content?.reduce((tools, c) => {
|
|
552
|
+
if (c.toolUse) {
|
|
553
|
+
tools.push({
|
|
554
|
+
tool_name: c.toolUse.name ?? "",
|
|
555
|
+
tool_input: c.toolUse.input,
|
|
556
|
+
id: c.toolUse.toolUseId ?? "",
|
|
557
|
+
});
|
|
558
|
+
}
|
|
559
|
+
return tools;
|
|
560
|
+
}, []);
|
|
561
|
+
//If no tools were used, set to undefined
|
|
562
|
+
if (tool_use && tool_use.length == 0) {
|
|
563
|
+
tool_use = undefined;
|
|
564
|
+
}
|
|
565
|
+
// Strip/serialize binary data based on options.stripImagesAfterTurns
|
|
566
|
+
const currentTurn = (0, core_1.getConversationMeta)(conversation).turnNumber;
|
|
567
|
+
const stripOptions = {
|
|
568
|
+
keepForTurns: options.stripImagesAfterTurns ?? Infinity,
|
|
569
|
+
currentTurn,
|
|
570
|
+
textMaxTokens: options.stripTextMaxTokens
|
|
571
|
+
};
|
|
572
|
+
let processedConversation = (0, core_1.stripBinaryFromConversation)(conversation, stripOptions);
|
|
573
|
+
// Truncate large text content if configured
|
|
574
|
+
processedConversation = (0, core_1.truncateLargeTextInConversation)(processedConversation, stripOptions);
|
|
575
|
+
// Strip old heartbeat status messages
|
|
576
|
+
processedConversation = (0, core_1.stripHeartbeatsFromConversation)(processedConversation, {
|
|
577
|
+
keepForTurns: options.stripHeartbeatsAfterTurns ?? 1,
|
|
578
|
+
currentTurn,
|
|
579
|
+
});
|
|
580
|
+
const completion = {
|
|
581
|
+
...this.getExtractedExecution(res, conversePrompt, options),
|
|
582
|
+
original_response: options.include_original_response ? res : undefined,
|
|
583
|
+
conversation: processedConversation,
|
|
584
|
+
tool_use: tool_use,
|
|
585
|
+
};
|
|
586
|
+
return completion;
|
|
587
|
+
}
|
|
588
|
+
async requestTwelvelabsPegasusCompletion(prompt, options) {
|
|
589
|
+
const executor = this.getExecutor();
|
|
590
|
+
const res = await executor.invokeModel({
|
|
591
|
+
modelId: options.model,
|
|
592
|
+
contentType: "application/json",
|
|
593
|
+
accept: "application/json",
|
|
594
|
+
body: JSON.stringify(prompt),
|
|
595
|
+
});
|
|
596
|
+
const decoder = new TextDecoder();
|
|
597
|
+
const body = decoder.decode(res.body);
|
|
598
|
+
const result = JSON.parse(body);
|
|
599
|
+
// Extract the response according to TwelveLabs Pegasus format
|
|
600
|
+
let finishReason;
|
|
601
|
+
switch (result.finishReason) {
|
|
602
|
+
case "stop":
|
|
603
|
+
finishReason = "stop";
|
|
604
|
+
break;
|
|
605
|
+
case "length":
|
|
606
|
+
finishReason = "length";
|
|
607
|
+
break;
|
|
608
|
+
default:
|
|
609
|
+
finishReason = result.finishReason;
|
|
610
|
+
}
|
|
611
|
+
return {
|
|
612
|
+
result: result.message ? [{ type: "text", value: result.message }] : [],
|
|
613
|
+
finish_reason: finishReason,
|
|
614
|
+
original_response: options.include_original_response ? result : undefined,
|
|
615
|
+
};
|
|
616
|
+
}
|
|
617
|
+
async requestTwelvelabsPegasusCompletionStream(prompt, options) {
|
|
618
|
+
const executor = this.getExecutor();
|
|
619
|
+
const res = await executor.invokeModelWithResponseStream({
|
|
620
|
+
modelId: options.model,
|
|
621
|
+
contentType: "application/json",
|
|
622
|
+
accept: "application/json",
|
|
623
|
+
body: JSON.stringify(prompt),
|
|
624
|
+
});
|
|
625
|
+
if (!res.body) {
|
|
626
|
+
throw new Error("[Bedrock] Stream not found in response");
|
|
627
|
+
}
|
|
628
|
+
return (0, async_1.transformAsyncIterator)(res.body, (chunk) => {
|
|
629
|
+
if (chunk.chunk?.bytes) {
|
|
630
|
+
const decoder = new TextDecoder();
|
|
631
|
+
const body = decoder.decode(chunk.chunk.bytes);
|
|
632
|
+
try {
|
|
633
|
+
const result = JSON.parse(body);
|
|
634
|
+
// Extract streaming response according to TwelveLabs Pegasus format
|
|
635
|
+
let finishReason;
|
|
636
|
+
if (result.finishReason) {
|
|
637
|
+
switch (result.finishReason) {
|
|
638
|
+
case "stop":
|
|
639
|
+
finishReason = "stop";
|
|
640
|
+
break;
|
|
641
|
+
case "length":
|
|
642
|
+
finishReason = "length";
|
|
643
|
+
break;
|
|
644
|
+
default:
|
|
645
|
+
finishReason = result.finishReason;
|
|
646
|
+
}
|
|
647
|
+
}
|
|
648
|
+
return {
|
|
649
|
+
result: result.delta || result.message ? [{ type: "text", value: result.delta || result.message || "" }] : [],
|
|
650
|
+
finish_reason: finishReason,
|
|
651
|
+
};
|
|
652
|
+
}
|
|
653
|
+
catch (error) {
|
|
654
|
+
// If JSON parsing fails, return empty chunk
|
|
655
|
+
return {
|
|
656
|
+
result: [],
|
|
657
|
+
};
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
return {
|
|
661
|
+
result: [],
|
|
662
|
+
};
|
|
663
|
+
});
|
|
664
|
+
}
|
|
665
|
+
async requestTextCompletionStream(prompt, options) {
|
|
666
|
+
// Handle Twelvelabs Pegasus models
|
|
667
|
+
if (options.model.includes("twelvelabs.pegasus")) {
|
|
668
|
+
return this.requestTwelvelabsPegasusCompletionStream(prompt, options);
|
|
669
|
+
}
|
|
670
|
+
// Handle other Bedrock models that use Converse API
|
|
671
|
+
const conversePrompt = prompt;
|
|
672
|
+
// Include conversation history (same as non-streaming)
|
|
673
|
+
// Deserialize any base64-encoded binary data back to Uint8Array before API call
|
|
674
|
+
const incomingConversation = (0, core_1.deserializeBinaryFromStorage)(options.conversation);
|
|
675
|
+
const conversation = updateConversation(incomingConversation, conversePrompt);
|
|
676
|
+
const payload = this.preparePayload(conversation, options);
|
|
677
|
+
const executor = this.getExecutor();
|
|
678
|
+
return executor.converseStream({
|
|
679
|
+
...payload,
|
|
680
|
+
}).then((res) => {
|
|
681
|
+
const stream = res.stream;
|
|
682
|
+
if (!stream) {
|
|
683
|
+
throw new Error("[Bedrock] Stream not found in response");
|
|
684
|
+
}
|
|
685
|
+
return (0, async_1.transformAsyncIterator)(stream, (streamSegment) => {
|
|
686
|
+
return this.getExtractedStream(streamSegment, conversePrompt, options);
|
|
687
|
+
});
|
|
688
|
+
}).catch((err) => {
|
|
689
|
+
this.logger.error({ error: err }, "[Bedrock] Failed to stream");
|
|
690
|
+
throw err;
|
|
691
|
+
});
|
|
692
|
+
}
|
|
693
|
+
preparePayload(prompt, options) {
|
|
694
|
+
const model_options = options.model_options ?? { _option_id: "text-fallback" };
|
|
695
|
+
let additionalField = {};
|
|
696
|
+
let supportsJSONPrefill = false;
|
|
697
|
+
if (options.model.includes("amazon")) {
|
|
698
|
+
supportsJSONPrefill = true;
|
|
699
|
+
//Titan models also exists but does not support any additional options
|
|
700
|
+
if (options.model.includes("nova")) {
|
|
701
|
+
additionalField = { inferenceConfig: { topK: model_options.top_k } };
|
|
702
|
+
}
|
|
703
|
+
}
|
|
704
|
+
else if (options.model.includes("claude")) {
|
|
705
|
+
const claude_options = model_options;
|
|
706
|
+
const thinking = claude_options.thinking_mode ?? false;
|
|
707
|
+
supportsJSONPrefill = !thinking;
|
|
708
|
+
if (options.model.includes("claude-3-7") || options.model.includes("-4-")) {
|
|
709
|
+
additionalField = {
|
|
710
|
+
...additionalField,
|
|
711
|
+
reasoning_config: {
|
|
712
|
+
type: thinking ? "enabled" : "disabled",
|
|
713
|
+
budget_tokens: thinking ? (claude_options.thinking_budget_tokens ?? 1024) : undefined,
|
|
714
|
+
}
|
|
715
|
+
};
|
|
716
|
+
if (thinking && options.model.includes("claude-3-7-sonnet") &&
|
|
717
|
+
((claude_options.max_tokens ?? 0) > 64000 || (claude_options.thinking_budget_tokens ?? 0) > 64000)) {
|
|
718
|
+
additionalField = {
|
|
719
|
+
...additionalField,
|
|
720
|
+
anthropic_beta: ["output-128k-2025-02-19"]
|
|
721
|
+
};
|
|
722
|
+
}
|
|
723
|
+
}
|
|
724
|
+
// Claude 4.6 and later versions don't support JSON prefill
|
|
725
|
+
if (isClaudeVersionGTE(options.model, 4, 6)) {
|
|
726
|
+
supportsJSONPrefill = false;
|
|
727
|
+
}
|
|
728
|
+
//Needs max_tokens to be set
|
|
729
|
+
if (!model_options.max_tokens) {
|
|
730
|
+
model_options.max_tokens = maxTokenFallbackClaude(options);
|
|
731
|
+
}
|
|
732
|
+
additionalField = { ...additionalField, top_k: model_options.top_k };
|
|
733
|
+
}
|
|
734
|
+
else if (options.model.includes("meta")) {
|
|
735
|
+
//LLaMA models support no additional options
|
|
736
|
+
}
|
|
737
|
+
else if (options.model.includes("mistral")) {
|
|
738
|
+
//7B instruct and 8x7B instruct
|
|
739
|
+
if (options.model.includes("7b")) {
|
|
740
|
+
additionalField = { top_k: model_options.top_k };
|
|
741
|
+
//Does not support system messages
|
|
742
|
+
if (prompt.system && prompt.system?.length != 0) {
|
|
743
|
+
prompt.messages?.push((0, converse_js_1.converseSystemToMessages)(prompt.system));
|
|
744
|
+
prompt.system = undefined;
|
|
745
|
+
prompt.messages = (0, converse_js_1.converseConcatMessages)(prompt.messages);
|
|
746
|
+
}
|
|
747
|
+
}
|
|
748
|
+
else {
|
|
749
|
+
//Other models such as Mistral Small,Large and Large 2
|
|
750
|
+
//Support no additional fields.
|
|
751
|
+
}
|
|
752
|
+
}
|
|
753
|
+
else if (options.model.includes("ai21")) {
|
|
754
|
+
//Jamba models support no additional options
|
|
755
|
+
//Jurassic 2 models do.
|
|
756
|
+
if (options.model.includes("j2")) {
|
|
757
|
+
additionalField = {
|
|
758
|
+
presencePenalty: { scale: model_options.presence_penalty },
|
|
759
|
+
frequencyPenalty: { scale: model_options.frequency_penalty },
|
|
760
|
+
};
|
|
761
|
+
//Does not support system messages
|
|
762
|
+
if (prompt.system && prompt.system?.length != 0) {
|
|
763
|
+
prompt.messages?.push((0, converse_js_1.converseSystemToMessages)(prompt.system));
|
|
764
|
+
prompt.system = undefined;
|
|
765
|
+
prompt.messages = (0, converse_js_1.converseConcatMessages)(prompt.messages);
|
|
766
|
+
}
|
|
767
|
+
}
|
|
768
|
+
}
|
|
769
|
+
else if (options.model.includes("cohere.command")) {
|
|
770
|
+
// If last message is "```json", remove it.
|
|
771
|
+
//Command R and R plus
|
|
772
|
+
if (options.model.includes("cohere.command-r")) {
|
|
773
|
+
additionalField = {
|
|
774
|
+
k: model_options.top_k,
|
|
775
|
+
frequency_penalty: model_options.frequency_penalty,
|
|
776
|
+
presence_penalty: model_options.presence_penalty,
|
|
777
|
+
};
|
|
778
|
+
}
|
|
779
|
+
else {
|
|
780
|
+
// Command non-R
|
|
781
|
+
additionalField = { k: model_options.top_k };
|
|
782
|
+
//Does not support system messages
|
|
783
|
+
if (prompt.system && prompt.system?.length != 0) {
|
|
784
|
+
prompt.messages?.push((0, converse_js_1.converseSystemToMessages)(prompt.system));
|
|
785
|
+
prompt.system = undefined;
|
|
786
|
+
prompt.messages = (0, converse_js_1.converseConcatMessages)(prompt.messages);
|
|
787
|
+
}
|
|
788
|
+
}
|
|
789
|
+
}
|
|
790
|
+
else if (options.model.includes("palmyra")) {
|
|
791
|
+
const palmyraOptions = model_options;
|
|
792
|
+
additionalField = {
|
|
793
|
+
seed: palmyraOptions?.seed,
|
|
794
|
+
presence_penalty: palmyraOptions?.presence_penalty,
|
|
795
|
+
frequency_penalty: palmyraOptions?.frequency_penalty,
|
|
796
|
+
min_tokens: palmyraOptions?.min_tokens,
|
|
797
|
+
};
|
|
798
|
+
}
|
|
799
|
+
else if (options.model.includes("deepseek")) {
|
|
800
|
+
// DeepSeek models: no additional options, no stopSequences, only one of temperature/top_p
|
|
801
|
+
model_options.stop_sequence = undefined;
|
|
802
|
+
model_options.top_p = undefined;
|
|
803
|
+
}
|
|
804
|
+
else if (options.model.includes("gpt-oss")) {
|
|
805
|
+
const gptOssOptions = model_options;
|
|
806
|
+
additionalField = {
|
|
807
|
+
reasoning_effort: gptOssOptions?.reasoning_effort,
|
|
808
|
+
};
|
|
809
|
+
}
|
|
810
|
+
//If last message is "```json", add corresponding ``` as a stop sequence.
|
|
811
|
+
if (prompt.messages && prompt.messages.length > 0) {
|
|
812
|
+
if (prompt.messages[prompt.messages.length - 1].content?.[0].text === "```json") {
|
|
813
|
+
const stopSeq = model_options.stop_sequence;
|
|
814
|
+
if (!stopSeq) {
|
|
815
|
+
model_options.stop_sequence = ["```"];
|
|
816
|
+
}
|
|
817
|
+
else if (!stopSeq.includes("```")) {
|
|
818
|
+
stopSeq.push("```");
|
|
819
|
+
model_options.stop_sequence = stopSeq;
|
|
820
|
+
}
|
|
821
|
+
}
|
|
822
|
+
}
|
|
823
|
+
const tool_defs = getToolDefinitions(options.tools);
|
|
824
|
+
// Use prefill when there is a schema and tools are not being used
|
|
825
|
+
if (supportsJSONPrefill && options.result_schema && !tool_defs) {
|
|
826
|
+
prompt.messages = (0, converse_js_1.converseJSONprefill)(prompt.messages);
|
|
827
|
+
}
|
|
828
|
+
// Clean undefined values from additionalField since AWS Bedrock requires valid JSON
|
|
829
|
+
// and will throw an exception for unrecognized parameters
|
|
830
|
+
const cleanedAdditionalFields = removeUndefinedValues(additionalField);
|
|
831
|
+
const cleanedModelOptions = removeUndefinedValues({
|
|
832
|
+
maxTokens: model_options.max_tokens,
|
|
833
|
+
temperature: model_options.temperature,
|
|
834
|
+
topP: model_options.temperature != null ? undefined : model_options.top_p,
|
|
835
|
+
stopSequences: model_options.stop_sequence,
|
|
836
|
+
});
|
|
837
|
+
//Construct the final request payload
|
|
838
|
+
// We only add fields that are defined to avoid AWS errors
|
|
839
|
+
const request = {
|
|
840
|
+
modelId: options.model,
|
|
841
|
+
};
|
|
842
|
+
if (prompt.messages) {
|
|
843
|
+
request.messages = prompt.messages;
|
|
844
|
+
}
|
|
845
|
+
if (prompt.system) {
|
|
846
|
+
request.system = prompt.system;
|
|
847
|
+
}
|
|
848
|
+
if (Object.keys(cleanedModelOptions).length > 0) {
|
|
849
|
+
request.inferenceConfig = cleanedModelOptions;
|
|
850
|
+
}
|
|
851
|
+
if (Object.keys(cleanedAdditionalFields).length > 0) {
|
|
852
|
+
request.additionalModelRequestFields = cleanedAdditionalFields;
|
|
853
|
+
}
|
|
854
|
+
if (tool_defs?.length) {
|
|
855
|
+
request.toolConfig = {
|
|
856
|
+
tools: tool_defs,
|
|
857
|
+
};
|
|
858
|
+
}
|
|
859
|
+
else if (request.messages && messagesContainToolBlocks(request.messages)) {
|
|
860
|
+
// Bedrock requires toolConfig when conversation contains toolUse/toolResult blocks.
|
|
861
|
+
// When no tools are provided (e.g. checkpoint summary calls), convert tool blocks
|
|
862
|
+
// to text representations so the conversation data is preserved while satisfying
|
|
863
|
+
// Bedrock's API requirements without making tools callable.
|
|
864
|
+
request.messages = convertToolBlocksToText(request.messages);
|
|
865
|
+
}
|
|
866
|
+
return request;
|
|
867
|
+
}
|
|
868
|
+
isImageModel(model) {
|
|
869
|
+
return model.includes("titan-image") || model.includes("stable-diffusion") || model.includes("nova-canvas");
|
|
870
|
+
}
|
|
871
|
+
async requestImageGeneration(prompt, options) {
|
|
872
|
+
if (options.model_options?._option_id !== "bedrock-nova-canvas") {
|
|
873
|
+
this.logger.warn({ options: options.model_options }, "Invalid model options");
|
|
874
|
+
}
|
|
875
|
+
const model_options = options.model_options;
|
|
876
|
+
const executor = this.getExecutor();
|
|
877
|
+
const taskType = model_options.taskType ?? nova_image_payload_js_1.NovaImageGenerationTaskType.TEXT_IMAGE;
|
|
878
|
+
this.logger.info("Task type: " + taskType);
|
|
879
|
+
if (typeof prompt === "string") {
|
|
880
|
+
throw new Error("Bad prompt format");
|
|
881
|
+
}
|
|
882
|
+
const payload = await (0, nova_image_payload_js_1.formatNovaImageGenerationPayload)(taskType, prompt, options);
|
|
883
|
+
const res = await executor.invokeModel({
|
|
884
|
+
modelId: options.model,
|
|
885
|
+
contentType: "application/json",
|
|
886
|
+
accept: "application/json",
|
|
887
|
+
body: JSON.stringify(payload),
|
|
888
|
+
}, {
|
|
889
|
+
requestTimeout: 60000 * 5
|
|
890
|
+
});
|
|
891
|
+
const decoder = new TextDecoder();
|
|
892
|
+
const body = decoder.decode(res.body);
|
|
893
|
+
const bedrockResult = JSON.parse(body);
|
|
894
|
+
return {
|
|
895
|
+
error: bedrockResult.error,
|
|
896
|
+
result: bedrockResult.images.map((image) => ({
|
|
897
|
+
type: "image",
|
|
898
|
+
value: image
|
|
899
|
+
}))
|
|
900
|
+
};
|
|
901
|
+
}
|
|
902
|
+
async startTraining(dataset, options) {
|
|
903
|
+
//convert options.params to Record<string, string>
|
|
904
|
+
const params = {};
|
|
905
|
+
for (const [key, value] of Object.entries(options.params || {})) {
|
|
906
|
+
params[key] = String(value);
|
|
907
|
+
}
|
|
908
|
+
if (!this.options.training_bucket) {
|
|
909
|
+
throw new Error("Training cannot nbe used since the 'training_bucket' property was not specified in driver options");
|
|
910
|
+
}
|
|
911
|
+
const s3 = new client_s3_1.S3Client({ region: this.options.region, credentials: this.options.credentials });
|
|
912
|
+
const stream = await dataset.getStream();
|
|
913
|
+
const upload = await (0, s3_js_1.forceUploadFile)(s3, stream, this.options.training_bucket, dataset.name);
|
|
914
|
+
const service = this.getService();
|
|
915
|
+
const response = await service.send(new client_bedrock_1.CreateModelCustomizationJobCommand({
|
|
916
|
+
jobName: options.name + "-job",
|
|
917
|
+
customModelName: options.name,
|
|
918
|
+
roleArn: this.options.training_role_arn || undefined,
|
|
919
|
+
baseModelIdentifier: options.model,
|
|
920
|
+
clientRequestToken: "llumiverse-" + Date.now(),
|
|
921
|
+
trainingDataConfig: {
|
|
922
|
+
s3Uri: `s3://${upload.Bucket}/${upload.Key}`,
|
|
923
|
+
},
|
|
924
|
+
outputDataConfig: undefined,
|
|
925
|
+
hyperParameters: params,
|
|
926
|
+
//TODO not supported?
|
|
927
|
+
//customizationType: "FINE_TUNING",
|
|
928
|
+
}));
|
|
929
|
+
const job = await service.send(new client_bedrock_1.GetModelCustomizationJobCommand({
|
|
930
|
+
jobIdentifier: response.jobArn
|
|
931
|
+
}));
|
|
932
|
+
return jobInfo(job, response.jobArn);
|
|
933
|
+
}
|
|
934
|
+
async cancelTraining(jobId) {
|
|
935
|
+
const service = this.getService();
|
|
936
|
+
await service.send(new client_bedrock_1.StopModelCustomizationJobCommand({
|
|
937
|
+
jobIdentifier: jobId
|
|
938
|
+
}));
|
|
939
|
+
const job = await service.send(new client_bedrock_1.GetModelCustomizationJobCommand({
|
|
940
|
+
jobIdentifier: jobId
|
|
941
|
+
}));
|
|
942
|
+
return jobInfo(job, jobId);
|
|
943
|
+
}
|
|
944
|
+
async getTrainingJob(jobId) {
|
|
945
|
+
const service = this.getService();
|
|
946
|
+
const job = await service.send(new client_bedrock_1.GetModelCustomizationJobCommand({
|
|
947
|
+
jobIdentifier: jobId
|
|
948
|
+
}));
|
|
949
|
+
return jobInfo(job, jobId);
|
|
950
|
+
}
|
|
951
|
+
// ===================== management API ==================
|
|
952
|
+
async validateConnection() {
|
|
953
|
+
const service = this.getService();
|
|
954
|
+
this.logger.debug("[Bedrock] validating connection", service.config.credentials.name);
|
|
955
|
+
//return true as if the client has been initialized, it means the connection is valid
|
|
956
|
+
return true;
|
|
957
|
+
}
|
|
958
|
+
async listTrainableModels() {
|
|
959
|
+
this.logger.debug("[Bedrock] listing trainable models");
|
|
960
|
+
return this._listModels(m => m.customizationsSupported ? m.customizationsSupported.includes("FINE_TUNING") : false);
|
|
961
|
+
}
|
|
962
|
+
async listModels() {
|
|
963
|
+
this.logger.debug("[Bedrock] listing models");
|
|
964
|
+
// exclude trainable models since they are not executable
|
|
965
|
+
// exclude embedding models, not to be used for typical completions.
|
|
966
|
+
const filter = (m) => (m.inferenceTypesSupported?.includes("ON_DEMAND") && !m.outputModalities?.includes("EMBEDDING")) ?? false;
|
|
967
|
+
return this._listModels(filter);
|
|
968
|
+
}
|
|
969
|
+
async _listModels(foundationFilter) {
|
|
970
|
+
const service = this.getService();
|
|
971
|
+
const [foundationModelsList, customModelsList, inferenceProfilesList] = await Promise.all([
|
|
972
|
+
service.listFoundationModels({}).catch(() => {
|
|
973
|
+
this.logger.warn("[Bedrock] Can't list foundation models. Check if the user has the right permissions.");
|
|
974
|
+
return undefined;
|
|
975
|
+
}),
|
|
976
|
+
service.listCustomModels({}).catch(() => {
|
|
977
|
+
this.logger.warn("[Bedrock] Can't list custom models. Check if the user has the right permissions.");
|
|
978
|
+
return undefined;
|
|
979
|
+
}),
|
|
980
|
+
service.listInferenceProfiles({}).catch(() => {
|
|
981
|
+
this.logger.warn("[Bedrock] Can't list inference profiles. Check if the user has the right permissions.");
|
|
982
|
+
return undefined;
|
|
983
|
+
}),
|
|
984
|
+
]);
|
|
985
|
+
if (!foundationModelsList?.modelSummaries) {
|
|
986
|
+
throw new Error("Foundation models not found");
|
|
987
|
+
}
|
|
988
|
+
let foundationModels = foundationModelsList.modelSummaries || [];
|
|
989
|
+
if (foundationFilter) {
|
|
990
|
+
foundationModels = foundationModels.filter(foundationFilter);
|
|
991
|
+
}
|
|
992
|
+
const supportedPublishers = ["amazon", "anthropic", "cohere", "ai21",
|
|
993
|
+
"mistral", "meta", "deepseek", "writer",
|
|
994
|
+
"openai", "twelvelabs", "qwen"];
|
|
995
|
+
const unsupportedModelsByPublisher = {
|
|
996
|
+
amazon: ["titan-image-generator", "nova-reel", "nova-sonic", "rerank"],
|
|
997
|
+
anthropic: [],
|
|
998
|
+
cohere: ["rerank", "embed"],
|
|
999
|
+
ai21: [],
|
|
1000
|
+
mistral: [],
|
|
1001
|
+
meta: [],
|
|
1002
|
+
deepseek: [],
|
|
1003
|
+
writer: [],
|
|
1004
|
+
openai: [],
|
|
1005
|
+
twelvelabs: ["marengo"],
|
|
1006
|
+
qwen: [],
|
|
1007
|
+
};
|
|
1008
|
+
// Helper function to check if model should be filtered out
|
|
1009
|
+
const shouldIncludeModel = (modelId, providerName) => {
|
|
1010
|
+
if (!modelId || !providerName)
|
|
1011
|
+
return false;
|
|
1012
|
+
const normalizedProvider = providerName.toLowerCase();
|
|
1013
|
+
// Check if provider is supported
|
|
1014
|
+
const isProviderSupported = supportedPublishers.some(provider => normalizedProvider.includes(provider));
|
|
1015
|
+
if (!isProviderSupported)
|
|
1016
|
+
return false;
|
|
1017
|
+
// Check if model is in the unsupported list for its provider
|
|
1018
|
+
for (const provider of supportedPublishers) {
|
|
1019
|
+
if (normalizedProvider.includes(provider)) {
|
|
1020
|
+
const unsupportedModels = unsupportedModelsByPublisher[provider] || [];
|
|
1021
|
+
return !unsupportedModels.some(unsupported => modelId.toLowerCase().includes(unsupported));
|
|
1022
|
+
}
|
|
1023
|
+
}
|
|
1024
|
+
return true;
|
|
1025
|
+
};
|
|
1026
|
+
foundationModels = foundationModels.filter(m => shouldIncludeModel(m.modelId, m.providerName));
|
|
1027
|
+
const aiModels = foundationModels.map((m) => {
|
|
1028
|
+
if (!m.modelId) {
|
|
1029
|
+
throw new Error("modelId not found");
|
|
1030
|
+
}
|
|
1031
|
+
const modelCapability = (0, core_1.getModelCapabilities)(m.modelArn ?? m.modelId, this.provider);
|
|
1032
|
+
const model = {
|
|
1033
|
+
id: m.modelArn ?? m.modelId,
|
|
1034
|
+
name: `${m.providerName} ${m.modelName}`,
|
|
1035
|
+
provider: this.provider,
|
|
1036
|
+
owner: m.providerName,
|
|
1037
|
+
can_stream: m.responseStreamingSupported ?? false,
|
|
1038
|
+
input_modalities: m.inputModalities ? formatAmazonModalities(m.inputModalities) : (0, core_1.modelModalitiesToArray)(modelCapability.input),
|
|
1039
|
+
output_modalities: m.outputModalities ? formatAmazonModalities(m.outputModalities) : (0, core_1.modelModalitiesToArray)(modelCapability.input),
|
|
1040
|
+
tool_support: modelCapability.tool_support,
|
|
1041
|
+
};
|
|
1042
|
+
return model;
|
|
1043
|
+
});
|
|
1044
|
+
//add custom models
|
|
1045
|
+
if (customModelsList?.modelSummaries) {
|
|
1046
|
+
customModelsList.modelSummaries.forEach((m) => {
|
|
1047
|
+
if (!m.modelArn) {
|
|
1048
|
+
throw new Error("Model ID not found");
|
|
1049
|
+
}
|
|
1050
|
+
const modelCapability = (0, core_1.getModelCapabilities)(m.modelArn, this.provider);
|
|
1051
|
+
const model = {
|
|
1052
|
+
id: m.modelArn,
|
|
1053
|
+
name: m.modelName ?? m.modelArn,
|
|
1054
|
+
provider: this.provider,
|
|
1055
|
+
owner: "custom",
|
|
1056
|
+
description: `Custom model from ${m.baseModelName}`,
|
|
1057
|
+
is_custom: true,
|
|
1058
|
+
input_modalities: (0, core_1.modelModalitiesToArray)(modelCapability.input),
|
|
1059
|
+
output_modalities: (0, core_1.modelModalitiesToArray)(modelCapability.output),
|
|
1060
|
+
tool_support: modelCapability.tool_support,
|
|
1061
|
+
};
|
|
1062
|
+
aiModels.push(model);
|
|
1063
|
+
this.validateConnection;
|
|
1064
|
+
});
|
|
1065
|
+
}
|
|
1066
|
+
//add inference profiles
|
|
1067
|
+
if (inferenceProfilesList?.inferenceProfileSummaries) {
|
|
1068
|
+
inferenceProfilesList.inferenceProfileSummaries.forEach((p) => {
|
|
1069
|
+
if (!p.inferenceProfileArn) {
|
|
1070
|
+
throw new Error("Profile ARN not found");
|
|
1071
|
+
}
|
|
1072
|
+
// Apply the same filtering logic to inference profiles based on their name
|
|
1073
|
+
const profileId = p.inferenceProfileId || "";
|
|
1074
|
+
const profileName = p.inferenceProfileName || "";
|
|
1075
|
+
// Extract provider name from profile name or ID
|
|
1076
|
+
let providerName = "";
|
|
1077
|
+
for (const provider of supportedPublishers) {
|
|
1078
|
+
if (profileName.toLowerCase().includes(provider) || profileId.toLowerCase().includes(provider)) {
|
|
1079
|
+
providerName = provider;
|
|
1080
|
+
break;
|
|
1081
|
+
}
|
|
1082
|
+
}
|
|
1083
|
+
const modelCapability = (0, core_1.getModelCapabilities)(p.inferenceProfileArn ?? p.inferenceProfileId, this.provider);
|
|
1084
|
+
if (providerName && shouldIncludeModel(profileId, providerName)) {
|
|
1085
|
+
const model = {
|
|
1086
|
+
id: p.inferenceProfileArn ?? p.inferenceProfileId,
|
|
1087
|
+
name: p.inferenceProfileName ?? p.inferenceProfileArn,
|
|
1088
|
+
provider: this.provider,
|
|
1089
|
+
owner: providerName,
|
|
1090
|
+
input_modalities: (0, core_1.modelModalitiesToArray)(modelCapability.input),
|
|
1091
|
+
output_modalities: (0, core_1.modelModalitiesToArray)(modelCapability.output),
|
|
1092
|
+
tool_support: modelCapability.tool_support,
|
|
1093
|
+
};
|
|
1094
|
+
aiModels.push(model);
|
|
1095
|
+
}
|
|
1096
|
+
});
|
|
1097
|
+
}
|
|
1098
|
+
return aiModels;
|
|
1099
|
+
}
|
|
1100
|
+
async generateEmbeddings({ text, image, model }) {
|
|
1101
|
+
this.logger.info("[Bedrock] Generating embeddings with model " + model);
|
|
1102
|
+
// Handle TwelveLabs Marengo models
|
|
1103
|
+
if (model?.includes("twelvelabs.marengo")) {
|
|
1104
|
+
return this.generateTwelvelabsMarengoEmbeddings({ text, image, model });
|
|
1105
|
+
}
|
|
1106
|
+
// Handle other Bedrock embedding models
|
|
1107
|
+
const defaultModel = image ? "amazon.titan-embed-image-v1" : "amazon.titan-embed-text-v2:0";
|
|
1108
|
+
const modelID = model ?? defaultModel;
|
|
1109
|
+
const invokeBody = {
|
|
1110
|
+
inputText: text,
|
|
1111
|
+
inputImage: image
|
|
1112
|
+
};
|
|
1113
|
+
const executor = this.getExecutor();
|
|
1114
|
+
const res = await executor.invokeModel({
|
|
1115
|
+
modelId: modelID,
|
|
1116
|
+
contentType: "application/json",
|
|
1117
|
+
body: JSON.stringify(invokeBody),
|
|
1118
|
+
});
|
|
1119
|
+
const decoder = new TextDecoder();
|
|
1120
|
+
const body = decoder.decode(res.body);
|
|
1121
|
+
const result = JSON.parse(body);
|
|
1122
|
+
if (!result.embedding) {
|
|
1123
|
+
throw new Error("Embeddings not found");
|
|
1124
|
+
}
|
|
1125
|
+
return {
|
|
1126
|
+
values: result.embedding,
|
|
1127
|
+
model: modelID,
|
|
1128
|
+
token_count: result.inputTextTokenCount
|
|
1129
|
+
};
|
|
1130
|
+
}
|
|
1131
|
+
async generateTwelvelabsMarengoEmbeddings({ text, image, model }) {
|
|
1132
|
+
const executor = this.getExecutor();
|
|
1133
|
+
// Prepare the request payload for TwelveLabs Marengo
|
|
1134
|
+
const invokeBody = {
|
|
1135
|
+
inputType: "text"
|
|
1136
|
+
};
|
|
1137
|
+
if (text) {
|
|
1138
|
+
invokeBody.inputText = text;
|
|
1139
|
+
invokeBody.inputType = "text";
|
|
1140
|
+
}
|
|
1141
|
+
if (image) {
|
|
1142
|
+
// For the embeddings interface, image is expected to be base64
|
|
1143
|
+
invokeBody.mediaSource = {
|
|
1144
|
+
base64String: image
|
|
1145
|
+
};
|
|
1146
|
+
invokeBody.inputType = "image";
|
|
1147
|
+
}
|
|
1148
|
+
const res = await executor.invokeModel({
|
|
1149
|
+
modelId: model,
|
|
1150
|
+
contentType: "application/json",
|
|
1151
|
+
accept: "application/json",
|
|
1152
|
+
body: JSON.stringify(invokeBody),
|
|
1153
|
+
});
|
|
1154
|
+
const decoder = new TextDecoder();
|
|
1155
|
+
const body = decoder.decode(res.body);
|
|
1156
|
+
const result = JSON.parse(body);
|
|
1157
|
+
// TwelveLabs Marengo returns embedding data
|
|
1158
|
+
if (!result.embedding) {
|
|
1159
|
+
throw new Error("Embeddings not found in TwelveLabs Marengo response");
|
|
1160
|
+
}
|
|
1161
|
+
return {
|
|
1162
|
+
values: result.embedding,
|
|
1163
|
+
model: model,
|
|
1164
|
+
// TwelveLabs Marengo doesn't return token count in the same way
|
|
1165
|
+
token_count: undefined
|
|
1166
|
+
};
|
|
1167
|
+
}
|
|
1168
|
+
/**
|
|
1169
|
+
* Cleanup AWS SDK clients when the driver is evicted from the cache.
|
|
1170
|
+
*/
|
|
1171
|
+
destroy() {
|
|
1172
|
+
this._executor?.destroy();
|
|
1173
|
+
this._service?.destroy();
|
|
1174
|
+
}
|
|
1175
|
+
}
|
|
1176
|
+
exports.BedrockDriver = BedrockDriver;
|
|
1177
|
+
function jobInfo(job, jobId) {
|
|
1178
|
+
const jobStatus = job.status;
|
|
1179
|
+
let status = core_1.TrainingJobStatus.running;
|
|
1180
|
+
let details;
|
|
1181
|
+
if (jobStatus === client_bedrock_1.ModelCustomizationJobStatus.COMPLETED) {
|
|
1182
|
+
status = core_1.TrainingJobStatus.succeeded;
|
|
1183
|
+
}
|
|
1184
|
+
else if (jobStatus === client_bedrock_1.ModelCustomizationJobStatus.FAILED) {
|
|
1185
|
+
status = core_1.TrainingJobStatus.failed;
|
|
1186
|
+
details = job.failureMessage || "error";
|
|
1187
|
+
}
|
|
1188
|
+
else if (jobStatus === client_bedrock_1.ModelCustomizationJobStatus.STOPPED) {
|
|
1189
|
+
status = core_1.TrainingJobStatus.cancelled;
|
|
1190
|
+
}
|
|
1191
|
+
else {
|
|
1192
|
+
status = core_1.TrainingJobStatus.running;
|
|
1193
|
+
details = jobStatus;
|
|
1194
|
+
}
|
|
1195
|
+
job.baseModelArn;
|
|
1196
|
+
return {
|
|
1197
|
+
id: jobId,
|
|
1198
|
+
model: job.outputModelArn,
|
|
1199
|
+
status,
|
|
1200
|
+
details
|
|
1201
|
+
};
|
|
1202
|
+
}
|
|
1203
|
+
function getToolDefinitions(tools) {
|
|
1204
|
+
return tools ? tools.map(getToolDefinition) : undefined;
|
|
1205
|
+
}
|
|
1206
|
+
function getToolDefinition(tool) {
|
|
1207
|
+
return {
|
|
1208
|
+
toolSpec: {
|
|
1209
|
+
name: tool.name,
|
|
1210
|
+
description: tool.description,
|
|
1211
|
+
inputSchema: {
|
|
1212
|
+
json: tool.input_schema,
|
|
1213
|
+
}
|
|
1214
|
+
}
|
|
1215
|
+
};
|
|
1216
|
+
}
|
|
1217
|
+
/**
|
|
1218
|
+
* Checks whether any message contains toolUse or toolResult content blocks.
|
|
1219
|
+
*/
|
|
1220
|
+
function messagesContainToolBlocks(messages) {
|
|
1221
|
+
for (const msg of messages) {
|
|
1222
|
+
if (!msg.content)
|
|
1223
|
+
continue;
|
|
1224
|
+
for (const block of msg.content) {
|
|
1225
|
+
if (block.toolUse ||
|
|
1226
|
+
block.toolResult) {
|
|
1227
|
+
return true;
|
|
1228
|
+
}
|
|
1229
|
+
}
|
|
1230
|
+
}
|
|
1231
|
+
return false;
|
|
1232
|
+
}
|
|
1233
|
+
/**
|
|
1234
|
+
* Converts toolUse and toolResult content blocks to text representations.
|
|
1235
|
+
* This preserves the tool call information in the conversation while removing
|
|
1236
|
+
* the structured tool blocks that require Bedrock's toolConfig to be set.
|
|
1237
|
+
*
|
|
1238
|
+
* Used when no tools are provided (e.g. checkpoint summary calls) but the
|
|
1239
|
+
* conversation history contains tool interactions from prior turns.
|
|
1240
|
+
*/
|
|
1241
|
+
function convertToolBlocksToText(messages) {
|
|
1242
|
+
return messages.map(msg => {
|
|
1243
|
+
if (!msg.content)
|
|
1244
|
+
return msg;
|
|
1245
|
+
let hasToolBlocks = false;
|
|
1246
|
+
for (const block of msg.content) {
|
|
1247
|
+
if (block.toolUse ||
|
|
1248
|
+
block.toolResult) {
|
|
1249
|
+
hasToolBlocks = true;
|
|
1250
|
+
break;
|
|
1251
|
+
}
|
|
1252
|
+
}
|
|
1253
|
+
if (!hasToolBlocks)
|
|
1254
|
+
return msg;
|
|
1255
|
+
const newContent = [];
|
|
1256
|
+
for (const block of msg.content) {
|
|
1257
|
+
const toolUse = block.toolUse;
|
|
1258
|
+
const toolResult = block.toolResult;
|
|
1259
|
+
if (toolUse) {
|
|
1260
|
+
const inputStr = toolUse.input ? JSON.stringify(toolUse.input) : '';
|
|
1261
|
+
const truncatedInput = inputStr.length > 500 ? inputStr.substring(0, 500) + '...' : inputStr;
|
|
1262
|
+
newContent.push({
|
|
1263
|
+
text: `[Tool call: ${toolUse.name}(${truncatedInput})]`,
|
|
1264
|
+
});
|
|
1265
|
+
}
|
|
1266
|
+
else if (toolResult) {
|
|
1267
|
+
const resultTexts = [];
|
|
1268
|
+
if (toolResult.content) {
|
|
1269
|
+
for (const c of toolResult.content) {
|
|
1270
|
+
if (c.text) {
|
|
1271
|
+
const text = c.text;
|
|
1272
|
+
resultTexts.push(text.length > 500 ? text.substring(0, 500) + '...' : text);
|
|
1273
|
+
}
|
|
1274
|
+
}
|
|
1275
|
+
}
|
|
1276
|
+
const resultStr = resultTexts.length > 0 ? resultTexts.join('\n') : 'No text content';
|
|
1277
|
+
newContent.push({
|
|
1278
|
+
text: `[Tool result: ${resultStr}]`,
|
|
1279
|
+
});
|
|
1280
|
+
}
|
|
1281
|
+
else {
|
|
1282
|
+
newContent.push(block);
|
|
1283
|
+
}
|
|
1284
|
+
}
|
|
1285
|
+
return { ...msg, content: newContent };
|
|
1286
|
+
});
|
|
1287
|
+
}
|
|
1288
|
+
/**
|
|
1289
|
+
* Recursively removes undefined values from an object.
|
|
1290
|
+
* AWS Bedrock's additionalModelRequestFields must be valid JSON, and undefined is not valid JSON.
|
|
1291
|
+
* Any unrecognized parameters will cause an exception.
|
|
1292
|
+
*/
|
|
1293
|
+
function removeUndefinedValues(obj) {
|
|
1294
|
+
if (obj === null || typeof obj !== 'object' || Array.isArray(obj)) {
|
|
1295
|
+
return obj;
|
|
1296
|
+
}
|
|
1297
|
+
const cleaned = {};
|
|
1298
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
1299
|
+
if (value !== undefined) {
|
|
1300
|
+
if (value !== null && typeof value === 'object' && !Array.isArray(value)) {
|
|
1301
|
+
const cleanedNested = removeUndefinedValues(value);
|
|
1302
|
+
// Only include nested objects if they have properties after cleaning
|
|
1303
|
+
if (Object.keys(cleanedNested).length > 0) {
|
|
1304
|
+
cleaned[key] = cleanedNested;
|
|
1305
|
+
}
|
|
1306
|
+
}
|
|
1307
|
+
else {
|
|
1308
|
+
cleaned[key] = value;
|
|
1309
|
+
}
|
|
1310
|
+
}
|
|
1311
|
+
}
|
|
1312
|
+
return cleaned;
|
|
1313
|
+
}
|
|
1314
|
+
/**
|
|
1315
|
+
* Update the conversation messages
|
|
1316
|
+
* @param prompt
|
|
1317
|
+
* @param response
|
|
1318
|
+
* @returns
|
|
1319
|
+
*/
|
|
1320
|
+
function updateConversation(conversation, prompt) {
|
|
1321
|
+
const combinedMessages = [...(conversation?.messages || []), ...(prompt.messages || [])];
|
|
1322
|
+
const combinedSystem = prompt.system || conversation?.system;
|
|
1323
|
+
// Fix orphaned toolUse blocks before returning
|
|
1324
|
+
const fixedMessages = fixOrphanedToolUse(combinedMessages);
|
|
1325
|
+
return {
|
|
1326
|
+
modelId: prompt?.modelId || conversation?.modelId,
|
|
1327
|
+
messages: fixedMessages.length > 0 ? fixedMessages : [],
|
|
1328
|
+
system: combinedSystem && combinedSystem.length > 0 ? combinedSystem : undefined,
|
|
1329
|
+
};
|
|
1330
|
+
}
|
|
1331
|
+
/**
|
|
1332
|
+
* Fix orphaned toolUse blocks in the conversation.
|
|
1333
|
+
*
|
|
1334
|
+
* When an agent is stopped mid-tool-execution, the assistant message contains toolUse blocks
|
|
1335
|
+
* but no corresponding toolResult was added. The AWS Converse API requires that every toolUse
|
|
1336
|
+
* must be followed by a toolResult in the next user message.
|
|
1337
|
+
*
|
|
1338
|
+
* This function detects such cases and injects synthetic toolResult blocks indicating
|
|
1339
|
+
* the tools were interrupted, allowing the conversation to continue.
|
|
1340
|
+
*/
|
|
1341
|
+
function fixOrphanedToolUse(messages) {
|
|
1342
|
+
if (messages.length < 2)
|
|
1343
|
+
return messages;
|
|
1344
|
+
const result = [];
|
|
1345
|
+
for (let i = 0; i < messages.length; i++) {
|
|
1346
|
+
const current = messages[i];
|
|
1347
|
+
result.push(current);
|
|
1348
|
+
// Check if this is an assistant message with toolUse blocks
|
|
1349
|
+
if (current.role === 'assistant' && current.content) {
|
|
1350
|
+
// Extract toolUse blocks using simple property check (same pattern as existing Bedrock code)
|
|
1351
|
+
const toolUseBlocks = [];
|
|
1352
|
+
for (const block of current.content) {
|
|
1353
|
+
if (block.toolUse?.toolUseId) {
|
|
1354
|
+
toolUseBlocks.push({
|
|
1355
|
+
toolUseId: block.toolUse.toolUseId,
|
|
1356
|
+
name: block.toolUse.name ?? 'unknown'
|
|
1357
|
+
});
|
|
1358
|
+
}
|
|
1359
|
+
}
|
|
1360
|
+
if (toolUseBlocks.length > 0) {
|
|
1361
|
+
// Check if the next message is a user message with matching toolResults
|
|
1362
|
+
const nextMessage = messages[i + 1];
|
|
1363
|
+
if (nextMessage && nextMessage.role === 'user' && nextMessage.content) {
|
|
1364
|
+
// Get toolResult IDs from the next message using simple property check
|
|
1365
|
+
const toolResultIds = new Set();
|
|
1366
|
+
for (const block of nextMessage.content) {
|
|
1367
|
+
if (block.toolResult?.toolUseId) {
|
|
1368
|
+
toolResultIds.add(block.toolResult.toolUseId);
|
|
1369
|
+
}
|
|
1370
|
+
}
|
|
1371
|
+
// Find orphaned toolUse blocks (no matching toolResult)
|
|
1372
|
+
const orphanedToolUse = toolUseBlocks.filter(tu => !toolResultIds.has(tu.toolUseId));
|
|
1373
|
+
if (orphanedToolUse.length > 0) {
|
|
1374
|
+
// Inject synthetic toolResults for orphaned toolUse
|
|
1375
|
+
const syntheticResults = orphanedToolUse.map(tu => ({
|
|
1376
|
+
toolResult: {
|
|
1377
|
+
toolUseId: tu.toolUseId,
|
|
1378
|
+
content: [{
|
|
1379
|
+
text: `[Tool interrupted: The user stopped the operation before "${tu.name}" could execute.]`
|
|
1380
|
+
}]
|
|
1381
|
+
}
|
|
1382
|
+
}));
|
|
1383
|
+
// Prepend synthetic results to the next user message
|
|
1384
|
+
const updatedNextMessage = {
|
|
1385
|
+
...nextMessage,
|
|
1386
|
+
content: [...syntheticResults, ...nextMessage.content]
|
|
1387
|
+
};
|
|
1388
|
+
// Replace the next message in our iteration
|
|
1389
|
+
messages[i + 1] = updatedNextMessage;
|
|
1390
|
+
}
|
|
1391
|
+
}
|
|
1392
|
+
else if (nextMessage && nextMessage.role === 'user' && !nextMessage.content) {
|
|
1393
|
+
// Next message is a user message but has no content
|
|
1394
|
+
// We need to add toolResults
|
|
1395
|
+
const syntheticResults = toolUseBlocks.map(tu => ({
|
|
1396
|
+
toolResult: {
|
|
1397
|
+
toolUseId: tu.toolUseId,
|
|
1398
|
+
content: [{
|
|
1399
|
+
text: `[Tool interrupted: The user stopped the operation before "${tu.name}" could execute.]`
|
|
1400
|
+
}]
|
|
1401
|
+
}
|
|
1402
|
+
}));
|
|
1403
|
+
const updatedNextMessage = {
|
|
1404
|
+
role: 'user',
|
|
1405
|
+
content: syntheticResults
|
|
1406
|
+
};
|
|
1407
|
+
messages[i + 1] = updatedNextMessage;
|
|
1408
|
+
}
|
|
1409
|
+
// Note: If there's no nextMessage, we leave the conversation as-is.
|
|
1410
|
+
// The toolUse blocks are expected to be there - the next turn will provide toolResults.
|
|
1411
|
+
}
|
|
1412
|
+
}
|
|
1413
|
+
}
|
|
1414
|
+
return result;
|
|
1415
|
+
}
|
|
1416
|
+
function formatAmazonModalities(modalities) {
|
|
1417
|
+
const standardizedModalities = [];
|
|
1418
|
+
for (const modality of modalities) {
|
|
1419
|
+
if (modality === client_bedrock_1.ModelModality.TEXT) {
|
|
1420
|
+
standardizedModalities.push("text");
|
|
1421
|
+
}
|
|
1422
|
+
else if (modality === client_bedrock_1.ModelModality.IMAGE) {
|
|
1423
|
+
standardizedModalities.push("image");
|
|
1424
|
+
}
|
|
1425
|
+
else if (modality === client_bedrock_1.ModelModality.EMBEDDING) {
|
|
1426
|
+
standardizedModalities.push("embedding");
|
|
1427
|
+
}
|
|
1428
|
+
else if (modality == "SPEECH") {
|
|
1429
|
+
standardizedModalities.push("audio");
|
|
1430
|
+
}
|
|
1431
|
+
else if (modality == "VIDEO") {
|
|
1432
|
+
standardizedModalities.push("video");
|
|
1433
|
+
}
|
|
1434
|
+
else {
|
|
1435
|
+
// Handle other modalities as needed
|
|
1436
|
+
standardizedModalities.push(modality.toString().toLowerCase());
|
|
1437
|
+
}
|
|
1438
|
+
}
|
|
1439
|
+
return standardizedModalities;
|
|
1440
|
+
}
|
|
1441
|
+
//# sourceMappingURL=index.js.map
|