@llumiverse/drivers 0.25.0 → 0.25.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. package/lib/cjs/adobe/firefly.js +120 -0
  2. package/lib/cjs/adobe/firefly.js.map +1 -0
  3. package/lib/cjs/azure/azure_foundry.js +432 -0
  4. package/lib/cjs/azure/azure_foundry.js.map +1 -0
  5. package/lib/cjs/bedrock/converse.js +285 -0
  6. package/lib/cjs/bedrock/converse.js.map +1 -0
  7. package/lib/cjs/bedrock/index.js +1091 -0
  8. package/lib/cjs/bedrock/index.js.map +1 -0
  9. package/lib/cjs/bedrock/nova-image-payload.js +207 -0
  10. package/lib/cjs/bedrock/nova-image-payload.js.map +1 -0
  11. package/lib/cjs/bedrock/payloads.js +3 -0
  12. package/lib/cjs/bedrock/payloads.js.map +1 -0
  13. package/lib/cjs/bedrock/s3.js +107 -0
  14. package/lib/cjs/bedrock/s3.js.map +1 -0
  15. package/lib/cjs/bedrock/twelvelabs.js +87 -0
  16. package/lib/cjs/bedrock/twelvelabs.js.map +1 -0
  17. package/lib/cjs/groq/index.js +323 -0
  18. package/lib/cjs/groq/index.js.map +1 -0
  19. package/lib/cjs/huggingface_ie.js +201 -0
  20. package/lib/cjs/huggingface_ie.js.map +1 -0
  21. package/lib/cjs/index.js +31 -0
  22. package/lib/cjs/index.js.map +1 -0
  23. package/lib/cjs/mistral/index.js +173 -0
  24. package/lib/cjs/mistral/index.js.map +1 -0
  25. package/lib/cjs/mistral/types.js +83 -0
  26. package/lib/cjs/mistral/types.js.map +1 -0
  27. package/lib/cjs/openai/azure_openai.js +72 -0
  28. package/lib/cjs/openai/azure_openai.js.map +1 -0
  29. package/lib/cjs/openai/index.js +665 -0
  30. package/lib/cjs/openai/index.js.map +1 -0
  31. package/lib/cjs/openai/openai.js +21 -0
  32. package/lib/cjs/openai/openai.js.map +1 -0
  33. package/lib/cjs/openai/openai_compatible.js +62 -0
  34. package/lib/cjs/openai/openai_compatible.js.map +1 -0
  35. package/lib/cjs/openai/openai_format.js +131 -0
  36. package/lib/cjs/openai/openai_format.js.map +1 -0
  37. package/lib/cjs/package.json +3 -0
  38. package/lib/cjs/replicate.js +275 -0
  39. package/lib/cjs/replicate.js.map +1 -0
  40. package/lib/cjs/test-driver/TestErrorCompletionStream.js +20 -0
  41. package/lib/cjs/test-driver/TestErrorCompletionStream.js.map +1 -0
  42. package/lib/cjs/test-driver/TestValidationErrorCompletionStream.js +24 -0
  43. package/lib/cjs/test-driver/TestValidationErrorCompletionStream.js.map +1 -0
  44. package/lib/cjs/test-driver/index.js +109 -0
  45. package/lib/cjs/test-driver/index.js.map +1 -0
  46. package/lib/cjs/test-driver/utils.js +30 -0
  47. package/lib/cjs/test-driver/utils.js.map +1 -0
  48. package/lib/cjs/togetherai/index.js +126 -0
  49. package/lib/cjs/togetherai/index.js.map +1 -0
  50. package/lib/cjs/togetherai/interfaces.js +3 -0
  51. package/lib/cjs/togetherai/interfaces.js.map +1 -0
  52. package/lib/cjs/vertexai/debug.js +12 -0
  53. package/lib/cjs/vertexai/debug.js.map +1 -0
  54. package/lib/cjs/vertexai/embeddings/embeddings-image.js +27 -0
  55. package/lib/cjs/vertexai/embeddings/embeddings-image.js.map +1 -0
  56. package/lib/cjs/vertexai/embeddings/embeddings-text.js +23 -0
  57. package/lib/cjs/vertexai/embeddings/embeddings-text.js.map +1 -0
  58. package/lib/cjs/vertexai/index.js +576 -0
  59. package/lib/cjs/vertexai/index.js.map +1 -0
  60. package/lib/cjs/vertexai/models/claude.js +485 -0
  61. package/lib/cjs/vertexai/models/claude.js.map +1 -0
  62. package/lib/cjs/vertexai/models/gemini.js +871 -0
  63. package/lib/cjs/vertexai/models/gemini.js.map +1 -0
  64. package/lib/cjs/vertexai/models/imagen.js +303 -0
  65. package/lib/cjs/vertexai/models/imagen.js.map +1 -0
  66. package/lib/cjs/vertexai/models/llama.js +183 -0
  67. package/lib/cjs/vertexai/models/llama.js.map +1 -0
  68. package/lib/cjs/vertexai/models.js +35 -0
  69. package/lib/cjs/vertexai/models.js.map +1 -0
  70. package/lib/cjs/watsonx/index.js +161 -0
  71. package/lib/cjs/watsonx/index.js.map +1 -0
  72. package/lib/cjs/watsonx/interfaces.js +3 -0
  73. package/lib/cjs/watsonx/interfaces.js.map +1 -0
  74. package/lib/cjs/xai/index.js +65 -0
  75. package/lib/cjs/xai/index.js.map +1 -0
  76. package/lib/esm/adobe/firefly.js +116 -0
  77. package/lib/esm/adobe/firefly.js.map +1 -0
  78. package/lib/esm/azure/azure_foundry.js +426 -0
  79. package/lib/esm/azure/azure_foundry.js.map +1 -0
  80. package/lib/esm/bedrock/converse.js +278 -0
  81. package/lib/esm/bedrock/converse.js.map +1 -0
  82. package/lib/esm/bedrock/index.js +1087 -0
  83. package/lib/esm/bedrock/index.js.map +1 -0
  84. package/lib/esm/bedrock/nova-image-payload.js +203 -0
  85. package/lib/esm/bedrock/nova-image-payload.js.map +1 -0
  86. package/lib/esm/bedrock/payloads.js +2 -0
  87. package/lib/esm/bedrock/payloads.js.map +1 -0
  88. package/lib/esm/bedrock/s3.js +99 -0
  89. package/lib/esm/bedrock/s3.js.map +1 -0
  90. package/lib/esm/bedrock/twelvelabs.js +84 -0
  91. package/lib/esm/bedrock/twelvelabs.js.map +1 -0
  92. package/lib/esm/groq/index.js +316 -0
  93. package/lib/esm/groq/index.js.map +1 -0
  94. package/lib/esm/huggingface_ie.js +197 -0
  95. package/lib/esm/huggingface_ie.js.map +1 -0
  96. package/lib/esm/index.js +15 -0
  97. package/lib/esm/index.js.map +1 -0
  98. package/lib/esm/mistral/index.js +169 -0
  99. package/lib/esm/mistral/index.js.map +1 -0
  100. package/lib/esm/mistral/types.js +80 -0
  101. package/lib/esm/mistral/types.js.map +1 -0
  102. package/lib/esm/openai/azure_openai.js +68 -0
  103. package/lib/esm/openai/azure_openai.js.map +1 -0
  104. package/lib/esm/openai/index.js +660 -0
  105. package/lib/esm/openai/index.js.map +1 -0
  106. package/lib/esm/openai/openai.js +14 -0
  107. package/lib/esm/openai/openai.js.map +1 -0
  108. package/lib/esm/openai/openai_compatible.js +55 -0
  109. package/lib/esm/openai/openai_compatible.js.map +1 -0
  110. package/lib/esm/openai/openai_format.js +127 -0
  111. package/lib/esm/openai/openai_format.js.map +1 -0
  112. package/lib/esm/replicate.js +268 -0
  113. package/lib/esm/replicate.js.map +1 -0
  114. package/lib/esm/test-driver/TestErrorCompletionStream.js +16 -0
  115. package/lib/esm/test-driver/TestErrorCompletionStream.js.map +1 -0
  116. package/lib/esm/test-driver/TestValidationErrorCompletionStream.js +20 -0
  117. package/lib/esm/test-driver/TestValidationErrorCompletionStream.js.map +1 -0
  118. package/lib/esm/test-driver/index.js +91 -0
  119. package/lib/esm/test-driver/index.js.map +1 -0
  120. package/lib/esm/test-driver/utils.js +25 -0
  121. package/lib/esm/test-driver/utils.js.map +1 -0
  122. package/lib/esm/togetherai/index.js +122 -0
  123. package/lib/esm/togetherai/index.js.map +1 -0
  124. package/lib/esm/togetherai/interfaces.js +2 -0
  125. package/lib/esm/togetherai/interfaces.js.map +1 -0
  126. package/lib/esm/vertexai/debug.js +6 -0
  127. package/lib/esm/vertexai/debug.js.map +1 -0
  128. package/lib/esm/vertexai/embeddings/embeddings-image.js +24 -0
  129. package/lib/esm/vertexai/embeddings/embeddings-image.js.map +1 -0
  130. package/lib/esm/vertexai/embeddings/embeddings-text.js +20 -0
  131. package/lib/esm/vertexai/embeddings/embeddings-text.js.map +1 -0
  132. package/lib/esm/vertexai/index.js +571 -0
  133. package/lib/esm/vertexai/index.js.map +1 -0
  134. package/lib/esm/vertexai/models/claude.js +479 -0
  135. package/lib/esm/vertexai/models/claude.js.map +1 -0
  136. package/lib/esm/vertexai/models/gemini.js +866 -0
  137. package/lib/esm/vertexai/models/gemini.js.map +1 -0
  138. package/lib/esm/vertexai/models/imagen.js +299 -0
  139. package/lib/esm/vertexai/models/imagen.js.map +1 -0
  140. package/lib/esm/vertexai/models/llama.js +179 -0
  141. package/lib/esm/vertexai/models/llama.js.map +1 -0
  142. package/lib/esm/vertexai/models.js +32 -0
  143. package/lib/esm/vertexai/models.js.map +1 -0
  144. package/lib/esm/watsonx/index.js +157 -0
  145. package/lib/esm/watsonx/index.js.map +1 -0
  146. package/lib/esm/watsonx/interfaces.js +2 -0
  147. package/lib/esm/watsonx/interfaces.js.map +1 -0
  148. package/lib/esm/xai/index.js +58 -0
  149. package/lib/esm/xai/index.js.map +1 -0
  150. package/lib/types/adobe/firefly.d.ts +30 -0
  151. package/lib/types/adobe/firefly.d.ts.map +1 -0
  152. package/lib/types/azure/azure_foundry.d.ts +52 -0
  153. package/lib/types/azure/azure_foundry.d.ts.map +1 -0
  154. package/lib/types/bedrock/converse.d.ts +9 -0
  155. package/lib/types/bedrock/converse.d.ts.map +1 -0
  156. package/lib/types/bedrock/index.d.ts +68 -0
  157. package/lib/types/bedrock/index.d.ts.map +1 -0
  158. package/lib/types/bedrock/nova-image-payload.d.ts +74 -0
  159. package/lib/types/bedrock/nova-image-payload.d.ts.map +1 -0
  160. package/lib/types/bedrock/payloads.d.ts +12 -0
  161. package/lib/types/bedrock/payloads.d.ts.map +1 -0
  162. package/lib/types/bedrock/s3.d.ts +23 -0
  163. package/lib/types/bedrock/s3.d.ts.map +1 -0
  164. package/lib/types/bedrock/twelvelabs.d.ts +50 -0
  165. package/lib/types/bedrock/twelvelabs.d.ts.map +1 -0
  166. package/lib/types/groq/index.d.ts +27 -0
  167. package/lib/types/groq/index.d.ts.map +1 -0
  168. package/lib/types/huggingface_ie.d.ts +35 -0
  169. package/lib/types/huggingface_ie.d.ts.map +1 -0
  170. package/lib/types/index.d.ts +15 -0
  171. package/lib/types/index.d.ts.map +1 -0
  172. package/lib/types/mistral/index.d.ts +25 -0
  173. package/lib/types/mistral/index.d.ts.map +1 -0
  174. package/lib/types/mistral/types.d.ts +132 -0
  175. package/lib/types/mistral/types.d.ts.map +1 -0
  176. package/lib/types/openai/azure_openai.d.ts +25 -0
  177. package/lib/types/openai/azure_openai.d.ts.map +1 -0
  178. package/lib/types/openai/index.d.ts +31 -0
  179. package/lib/types/openai/index.d.ts.map +1 -0
  180. package/lib/types/openai/openai.d.ts +15 -0
  181. package/lib/types/openai/openai.d.ts.map +1 -0
  182. package/lib/types/openai/openai_compatible.d.ts +26 -0
  183. package/lib/types/openai/openai_compatible.d.ts.map +1 -0
  184. package/lib/types/openai/openai_format.d.ts +21 -0
  185. package/lib/types/openai/openai_format.d.ts.map +1 -0
  186. package/lib/types/replicate.d.ts +48 -0
  187. package/lib/types/replicate.d.ts.map +1 -0
  188. package/lib/types/test-driver/TestErrorCompletionStream.d.ts +9 -0
  189. package/lib/types/test-driver/TestErrorCompletionStream.d.ts.map +1 -0
  190. package/lib/types/test-driver/TestValidationErrorCompletionStream.d.ts +9 -0
  191. package/lib/types/test-driver/TestValidationErrorCompletionStream.d.ts.map +1 -0
  192. package/lib/types/test-driver/index.d.ts +24 -0
  193. package/lib/types/test-driver/index.d.ts.map +1 -0
  194. package/lib/types/test-driver/utils.d.ts +5 -0
  195. package/lib/types/test-driver/utils.d.ts.map +1 -0
  196. package/lib/types/togetherai/index.d.ts +23 -0
  197. package/lib/types/togetherai/index.d.ts.map +1 -0
  198. package/lib/types/togetherai/interfaces.d.ts +96 -0
  199. package/lib/types/togetherai/interfaces.d.ts.map +1 -0
  200. package/lib/types/vertexai/debug.d.ts +2 -0
  201. package/lib/types/vertexai/debug.d.ts.map +1 -0
  202. package/lib/types/vertexai/embeddings/embeddings-image.d.ts +11 -0
  203. package/lib/types/vertexai/embeddings/embeddings-image.d.ts.map +1 -0
  204. package/lib/types/vertexai/embeddings/embeddings-text.d.ts +10 -0
  205. package/lib/types/vertexai/embeddings/embeddings-text.d.ts.map +1 -0
  206. package/lib/types/vertexai/index.d.ts +65 -0
  207. package/lib/types/vertexai/index.d.ts.map +1 -0
  208. package/lib/types/vertexai/models/claude.d.ts +28 -0
  209. package/lib/types/vertexai/models/claude.d.ts.map +1 -0
  210. package/lib/types/vertexai/models/gemini.d.ts +18 -0
  211. package/lib/types/vertexai/models/gemini.d.ts.map +1 -0
  212. package/lib/types/vertexai/models/imagen.d.ts +75 -0
  213. package/lib/types/vertexai/models/imagen.d.ts.map +1 -0
  214. package/lib/types/vertexai/models/llama.d.ts +20 -0
  215. package/lib/types/vertexai/models/llama.d.ts.map +1 -0
  216. package/lib/types/vertexai/models.d.ts +15 -0
  217. package/lib/types/vertexai/models.d.ts.map +1 -0
  218. package/lib/types/watsonx/index.d.ts +27 -0
  219. package/lib/types/watsonx/index.d.ts.map +1 -0
  220. package/lib/types/watsonx/interfaces.d.ts +65 -0
  221. package/lib/types/watsonx/interfaces.d.ts.map +1 -0
  222. package/lib/types/xai/index.d.ts +18 -0
  223. package/lib/types/xai/index.d.ts.map +1 -0
  224. package/package.json +3 -3
@@ -0,0 +1,1087 @@
1
+ import { Bedrock, CreateModelCustomizationJobCommand, GetModelCustomizationJobCommand, ModelCustomizationJobStatus, ModelModality, StopModelCustomizationJobCommand } from "@aws-sdk/client-bedrock";
2
+ import { BedrockRuntime } from "@aws-sdk/client-bedrock-runtime";
3
+ import { S3Client } from "@aws-sdk/client-s3";
4
+ import { AbstractDriver, getMaxTokensLimitBedrock, getModelCapabilities, modelModalitiesToArray, stripBinaryFromConversation, truncateLargeTextInConversation, deserializeBinaryFromStorage, getConversationMeta, incrementConversationTurn, TrainingJobStatus } from "@llumiverse/core";
5
+ import { transformAsyncIterator } from "@llumiverse/core/async";
6
+ import { formatNovaPrompt } from "@llumiverse/core/formatters";
7
+ import { LRUCache } from "mnemonist";
8
+ import { converseConcatMessages, converseJSONprefill, converseSystemToMessages, formatConversePrompt } from "./converse.js";
9
+ import { formatNovaImageGenerationPayload, NovaImageGenerationTaskType } from "./nova-image-payload.js";
10
+ import { forceUploadFile } from "./s3.js";
11
+ import { formatTwelvelabsPegasusPrompt } from "./twelvelabs.js";
12
+ const supportStreamingCache = new LRUCache(4096);
13
+ var BedrockModelType;
14
+ (function (BedrockModelType) {
15
+ BedrockModelType["FoundationModel"] = "foundation-model";
16
+ BedrockModelType["InferenceProfile"] = "inference-profile";
17
+ BedrockModelType["CustomModel"] = "custom-model";
18
+ BedrockModelType["Unknown"] = "unknown";
19
+ })(BedrockModelType || (BedrockModelType = {}));
20
+ ;
21
+ function converseFinishReason(reason) {
22
+ //Possible values:
23
+ //end_turn | tool_use | max_tokens | stop_sequence | guardrail_intervened | content_filtered
24
+ if (!reason)
25
+ return undefined;
26
+ switch (reason) {
27
+ case 'end_turn': return "stop";
28
+ case 'max_tokens': return "length";
29
+ default: return reason;
30
+ }
31
+ }
32
+ //Used to get a max_token value when not specified in the model options. Claude requires it to be set.
33
+ function maxTokenFallbackClaude(option) {
34
+ const modelOptions = option.model_options;
35
+ if (modelOptions && typeof modelOptions.max_tokens === "number") {
36
+ return modelOptions.max_tokens;
37
+ }
38
+ else {
39
+ const thinking_budget = modelOptions?.thinking_budget_tokens ?? 0;
40
+ let maxSupportedTokens = getMaxTokensLimitBedrock(option.model) ?? 8192; // Should always return a number for claude, 8192 is to satisfy the TypeScript type checker;
41
+ // Fallback to the default max tokens limit for the model
42
+ if (option.model.includes('claude-3-7-sonnet') && (modelOptions?.thinking_budget_tokens ?? 0) < 48000) {
43
+ maxSupportedTokens = 64000; // Claude 3.7 can go up to 128k with a beta header, but when no max tokens is specified, we default to 64k.
44
+ }
45
+ return Math.min(16000 + thinking_budget, maxSupportedTokens); // Cap to 16k, to avoid taking up too much context window and quota.
46
+ }
47
+ }
48
+ export class BedrockDriver extends AbstractDriver {
49
+ static PROVIDER = "bedrock";
50
+ provider = BedrockDriver.PROVIDER;
51
+ _executor;
52
+ _service;
53
+ _service_region;
54
+ constructor(options) {
55
+ super(options);
56
+ if (!options.region) {
57
+ throw new Error("No region found. Set the region in the environment's endpoint URL.");
58
+ }
59
+ }
60
+ getExecutor() {
61
+ if (!this._executor) {
62
+ this._executor = new BedrockRuntime({
63
+ region: this.options.region,
64
+ credentials: this.options.credentials,
65
+ });
66
+ }
67
+ return this._executor;
68
+ }
69
+ getService(region = this.options.region) {
70
+ if (!this._service || this._service_region != region) {
71
+ this._service = new Bedrock({
72
+ region: region,
73
+ credentials: this.options.credentials,
74
+ });
75
+ this._service_region = region;
76
+ }
77
+ return this._service;
78
+ }
79
+ async formatPrompt(segments, opts) {
80
+ if (opts.model.includes("canvas")) {
81
+ return await formatNovaPrompt(segments, opts.result_schema);
82
+ }
83
+ if (opts.model.includes("twelvelabs.pegasus")) {
84
+ return await formatTwelvelabsPegasusPrompt(segments, opts);
85
+ }
86
+ return await formatConversePrompt(segments, opts);
87
+ }
88
+ getExtractedExecution(result, _prompt, options) {
89
+ let resultText = "";
90
+ let reasoning = "";
91
+ if (result.output?.message?.content) {
92
+ for (const content of result.output.message.content) {
93
+ // Get text output
94
+ if (content.text) {
95
+ resultText += content.text;
96
+ }
97
+ else if (content.reasoningContent) {
98
+ // Get reasoning content only if include_thoughts is true
99
+ const claudeOptions = options?.model_options;
100
+ if (claudeOptions?.include_thoughts) {
101
+ if (content.reasoningContent.reasoningText) {
102
+ reasoning += content.reasoningContent.reasoningText.text;
103
+ }
104
+ else if (content.reasoningContent.redactedContent) {
105
+ // Handle redacted thinking content
106
+ const redactedData = new TextDecoder().decode(content.reasoningContent.redactedContent);
107
+ reasoning += `[Redacted thinking: ${redactedData}]`;
108
+ }
109
+ }
110
+ else {
111
+ this.logger.info("[Bedrock] Not outputting reasoning content as include_thoughts is false");
112
+ }
113
+ }
114
+ else {
115
+ // Get content block type
116
+ const type = Object.keys(content).find(key => key !== '$unknown' && content[key] !== undefined);
117
+ this.logger.info({ type }, "[Bedrock] Unsupported content response type:");
118
+ }
119
+ }
120
+ // Add spacing if we have reasoning content
121
+ if (reasoning) {
122
+ reasoning += '\n\n';
123
+ }
124
+ }
125
+ const completionResult = {
126
+ result: reasoning + resultText ? [{ type: "text", value: reasoning + resultText }] : [],
127
+ token_usage: {
128
+ prompt: result.usage?.inputTokens,
129
+ result: result.usage?.outputTokens,
130
+ total: result.usage?.totalTokens,
131
+ },
132
+ finish_reason: converseFinishReason(result.stopReason),
133
+ };
134
+ return completionResult;
135
+ }
136
+ ;
137
+ getExtractedStream(result, _prompt, options) {
138
+ let output = "";
139
+ let reasoning = "";
140
+ let stop_reason = "";
141
+ let token_usage;
142
+ // Check if we should include thoughts
143
+ const shouldIncludeThoughts = options && options.model_options?.include_thoughts;
144
+ // Handle content block start events (for reasoning blocks)
145
+ if (result.contentBlockStart) {
146
+ // Handle redacted content at block start
147
+ if (result.contentBlockStart.start && 'reasoningContent' in result.contentBlockStart.start && shouldIncludeThoughts) {
148
+ const reasoningStart = result.contentBlockStart.start;
149
+ if (reasoningStart.reasoningContent?.redactedContent) {
150
+ const redactedData = new TextDecoder().decode(reasoningStart.reasoningContent.redactedContent);
151
+ reasoning = `[Redacted thinking: ${redactedData}]`;
152
+ }
153
+ }
154
+ }
155
+ // Handle content block deltas (text and reasoning)
156
+ if (result.contentBlockDelta) {
157
+ const delta = result.contentBlockDelta.delta;
158
+ if (delta?.text) {
159
+ output = delta.text;
160
+ }
161
+ else if (delta?.reasoningContent && shouldIncludeThoughts) {
162
+ if (delta.reasoningContent.text) {
163
+ reasoning = delta.reasoningContent.text;
164
+ }
165
+ else if (delta.reasoningContent.redactedContent) {
166
+ const redactedData = new TextDecoder().decode(delta.reasoningContent.redactedContent);
167
+ reasoning = `[Redacted thinking: ${redactedData}]`;
168
+ }
169
+ else if (delta.reasoningContent.signature) {
170
+ // Handle signature updates for reasoning content - end of thinking
171
+ reasoning = "\n\n";
172
+ // Putting logging here so it only triggers once.
173
+ this.logger.info("[Bedrock] Not outputting reasoning content as include_thoughts is false");
174
+ }
175
+ }
176
+ else if (delta) {
177
+ // Get content block type
178
+ const type = Object.keys(delta).find(key => key !== '$unknown' && delta[key] !== undefined);
179
+ this.logger.info({ type }, "[Bedrock] Unsupported content response type:");
180
+ }
181
+ }
182
+ // Handle content block stop events
183
+ if (result.contentBlockStop) {
184
+ // Content block ended - could be end of reasoning or text block
185
+ // Add minimal spacing for reasoning blocks if not already present
186
+ if (reasoning && !reasoning.endsWith('\n\n') && shouldIncludeThoughts) {
187
+ reasoning += '\n\n';
188
+ }
189
+ }
190
+ if (result.messageStop) {
191
+ stop_reason = result.messageStop.stopReason ?? "";
192
+ }
193
+ if (result.metadata) {
194
+ token_usage = {
195
+ prompt: result.metadata.usage?.inputTokens,
196
+ result: result.metadata.usage?.outputTokens,
197
+ total: result.metadata.usage?.totalTokens,
198
+ };
199
+ }
200
+ const completionResult = {
201
+ result: reasoning + output ? [{ type: "text", value: reasoning + output }] : [],
202
+ token_usage: token_usage,
203
+ finish_reason: converseFinishReason(stop_reason),
204
+ };
205
+ return completionResult;
206
+ }
207
+ ;
208
+ extractRegion(modelString, defaultRegion) {
209
+ // Match region in full ARN pattern
210
+ const arnMatch = modelString.match(/arn:aws[^:]*:bedrock:([^:]+):/);
211
+ if (arnMatch) {
212
+ return arnMatch[1];
213
+ }
214
+ // Match common AWS regions directly in string
215
+ const regionMatch = modelString.match(/(?:us|eu|ap|sa|ca|me|af)[-](east|west|central|south|north|southeast|southwest|northeast|northwest)[-][1-9]/);
216
+ if (regionMatch) {
217
+ return regionMatch[0];
218
+ }
219
+ return defaultRegion;
220
+ }
221
+ async getCanStream(model, type) {
222
+ let canStream = false;
223
+ let error = null;
224
+ const region = this.extractRegion(model, this.options.region);
225
+ if (type == BedrockModelType.FoundationModel || type == BedrockModelType.Unknown) {
226
+ try {
227
+ const response = await this.getService(region).getFoundationModel({
228
+ modelIdentifier: model
229
+ });
230
+ canStream = response.modelDetails?.responseStreamingSupported ?? false;
231
+ return canStream;
232
+ }
233
+ catch (e) {
234
+ error = e;
235
+ }
236
+ }
237
+ if (type == BedrockModelType.InferenceProfile || type == BedrockModelType.Unknown) {
238
+ try {
239
+ const response = await this.getService(region).getInferenceProfile({
240
+ inferenceProfileIdentifier: model
241
+ });
242
+ canStream = await this.getCanStream(response.models?.[0].modelArn ?? "", BedrockModelType.FoundationModel);
243
+ return canStream;
244
+ }
245
+ catch (e) {
246
+ error = e;
247
+ }
248
+ }
249
+ if (type == BedrockModelType.CustomModel || type == BedrockModelType.Unknown) {
250
+ try {
251
+ const response = await this.getService(region).getCustomModel({
252
+ modelIdentifier: model
253
+ });
254
+ canStream = await this.getCanStream(response.baseModelArn ?? "", BedrockModelType.FoundationModel);
255
+ return canStream;
256
+ }
257
+ catch (e) {
258
+ error = e;
259
+ }
260
+ }
261
+ if (error) {
262
+ console.warn("Error on canStream check for model: " + model + " region detected: " + region, error);
263
+ }
264
+ return canStream;
265
+ }
266
+ async canStream(options) {
267
+ // // TwelveLabs Pegasus supports streaming according to the documentation
268
+ // if (options.model.includes("twelvelabs.pegasus")) {
269
+ // return true;
270
+ // }
271
+ let canStream = supportStreamingCache.get(options.model);
272
+ if (canStream == null) {
273
+ let type = BedrockModelType.Unknown;
274
+ if (options.model.includes("foundation-model")) {
275
+ type = BedrockModelType.FoundationModel;
276
+ }
277
+ else if (options.model.includes("inference-profile")) {
278
+ type = BedrockModelType.InferenceProfile;
279
+ }
280
+ else if (options.model.includes("custom-model")) {
281
+ type = BedrockModelType.CustomModel;
282
+ }
283
+ canStream = await this.getCanStream(options.model, type);
284
+ supportStreamingCache.set(options.model, canStream);
285
+ }
286
+ return canStream;
287
+ }
288
+ /**
289
+ * Build conversation context after streaming completion.
290
+ * Reconstructs the assistant message from accumulated results and applies stripping.
291
+ */
292
+ buildStreamingConversation(prompt, result, toolUse, options) {
293
+ // Only handle ConverseRequest prompts (not NovaMessagesPrompt or TwelvelabsPegasusRequest)
294
+ if (options.model.includes("canvas") || options.model.includes("twelvelabs.pegasus")) {
295
+ return undefined;
296
+ }
297
+ const conversePrompt = prompt;
298
+ const completionResults = result;
299
+ // Convert accumulated results to text content for assistant message
300
+ const textContent = completionResults
301
+ .map(r => {
302
+ switch (r.type) {
303
+ case 'text':
304
+ return r.value;
305
+ case 'json':
306
+ return typeof r.value === 'string' ? r.value : JSON.stringify(r.value);
307
+ case 'image':
308
+ // Skip images in conversation - they're in the result
309
+ return '';
310
+ default:
311
+ return String(r.value || '');
312
+ }
313
+ })
314
+ .join('');
315
+ // Deserialize any base64-encoded binary data back to Uint8Array
316
+ const incomingConversation = deserializeBinaryFromStorage(options.conversation);
317
+ // Start with the conversation from options combined with the prompt
318
+ let conversation = updateConversation(incomingConversation, conversePrompt);
319
+ // Build assistant message content
320
+ const messageContent = [];
321
+ if (textContent) {
322
+ messageContent.push({ text: textContent });
323
+ }
324
+ // Add tool use blocks if present
325
+ if (toolUse && toolUse.length > 0) {
326
+ for (const tool of toolUse) {
327
+ messageContent.push({
328
+ toolUse: {
329
+ toolUseId: tool.id,
330
+ name: tool.tool_name,
331
+ input: tool.tool_input,
332
+ }
333
+ });
334
+ }
335
+ }
336
+ // Add assistant message
337
+ const assistantMessage = {
338
+ messages: [{
339
+ content: messageContent.length > 0 ? messageContent : [{ text: '' }],
340
+ role: "assistant"
341
+ }],
342
+ modelId: conversePrompt.modelId,
343
+ };
344
+ conversation = updateConversation(conversation, assistantMessage);
345
+ // Increment turn counter
346
+ conversation = incrementConversationTurn(conversation);
347
+ // Apply stripping based on options
348
+ const currentTurn = getConversationMeta(conversation).turnNumber;
349
+ const stripOptions = {
350
+ keepForTurns: options.stripImagesAfterTurns ?? Infinity,
351
+ currentTurn,
352
+ textMaxTokens: options.stripTextMaxTokens
353
+ };
354
+ let processedConversation = stripBinaryFromConversation(conversation, stripOptions);
355
+ processedConversation = truncateLargeTextInConversation(processedConversation, stripOptions);
356
+ return processedConversation;
357
+ }
358
+ async requestTextCompletion(prompt, options) {
359
+ // Handle Twelvelabs Pegasus models
360
+ if (options.model.includes("twelvelabs.pegasus")) {
361
+ return this.requestTwelvelabsPegasusCompletion(prompt, options);
362
+ }
363
+ // Handle other Bedrock models that use Converse API
364
+ const conversePrompt = prompt;
365
+ // Deserialize any base64-encoded binary data back to Uint8Array before API call
366
+ const incomingConversation = deserializeBinaryFromStorage(options.conversation);
367
+ let conversation = updateConversation(incomingConversation, conversePrompt);
368
+ const payload = this.preparePayload(conversation, options);
369
+ const executor = this.getExecutor();
370
+ const res = await executor.converse({
371
+ ...payload,
372
+ });
373
+ conversation = updateConversation(conversation, {
374
+ messages: [res.output?.message ?? { content: [{ text: "" }], role: "assistant" }],
375
+ modelId: conversePrompt.modelId,
376
+ });
377
+ // Increment turn counter for deferred stripping
378
+ conversation = incrementConversationTurn(conversation);
379
+ let tool_use = undefined;
380
+ //Get tool requests, we check tool use regardless of finish reason, as you can hit length and still get a valid response.
381
+ tool_use = res.output?.message?.content?.reduce((tools, c) => {
382
+ if (c.toolUse) {
383
+ tools.push({
384
+ tool_name: c.toolUse.name ?? "",
385
+ tool_input: c.toolUse.input,
386
+ id: c.toolUse.toolUseId ?? "",
387
+ });
388
+ }
389
+ return tools;
390
+ }, []);
391
+ //If no tools were used, set to undefined
392
+ if (tool_use && tool_use.length == 0) {
393
+ tool_use = undefined;
394
+ }
395
+ // Strip/serialize binary data based on options.stripImagesAfterTurns
396
+ const currentTurn = getConversationMeta(conversation).turnNumber;
397
+ const stripOptions = {
398
+ keepForTurns: options.stripImagesAfterTurns ?? Infinity,
399
+ currentTurn,
400
+ textMaxTokens: options.stripTextMaxTokens
401
+ };
402
+ let processedConversation = stripBinaryFromConversation(conversation, stripOptions);
403
+ // Truncate large text content if configured
404
+ processedConversation = truncateLargeTextInConversation(processedConversation, stripOptions);
405
+ const completion = {
406
+ ...this.getExtractedExecution(res, conversePrompt, options),
407
+ original_response: options.include_original_response ? res : undefined,
408
+ conversation: processedConversation,
409
+ tool_use: tool_use,
410
+ };
411
+ return completion;
412
+ }
413
+ async requestTwelvelabsPegasusCompletion(prompt, options) {
414
+ const executor = this.getExecutor();
415
+ const res = await executor.invokeModel({
416
+ modelId: options.model,
417
+ contentType: "application/json",
418
+ accept: "application/json",
419
+ body: JSON.stringify(prompt),
420
+ });
421
+ const decoder = new TextDecoder();
422
+ const body = decoder.decode(res.body);
423
+ const result = JSON.parse(body);
424
+ // Extract the response according to TwelveLabs Pegasus format
425
+ let finishReason;
426
+ switch (result.finishReason) {
427
+ case "stop":
428
+ finishReason = "stop";
429
+ break;
430
+ case "length":
431
+ finishReason = "length";
432
+ break;
433
+ default:
434
+ finishReason = result.finishReason;
435
+ }
436
+ return {
437
+ result: result.message ? [{ type: "text", value: result.message }] : [],
438
+ finish_reason: finishReason,
439
+ original_response: options.include_original_response ? result : undefined,
440
+ };
441
+ }
442
+ async requestTwelvelabsPegasusCompletionStream(prompt, options) {
443
+ const executor = this.getExecutor();
444
+ const res = await executor.invokeModelWithResponseStream({
445
+ modelId: options.model,
446
+ contentType: "application/json",
447
+ accept: "application/json",
448
+ body: JSON.stringify(prompt),
449
+ });
450
+ if (!res.body) {
451
+ throw new Error("[Bedrock] Stream not found in response");
452
+ }
453
+ return transformAsyncIterator(res.body, (chunk) => {
454
+ if (chunk.chunk?.bytes) {
455
+ const decoder = new TextDecoder();
456
+ const body = decoder.decode(chunk.chunk.bytes);
457
+ try {
458
+ const result = JSON.parse(body);
459
+ // Extract streaming response according to TwelveLabs Pegasus format
460
+ let finishReason;
461
+ if (result.finishReason) {
462
+ switch (result.finishReason) {
463
+ case "stop":
464
+ finishReason = "stop";
465
+ break;
466
+ case "length":
467
+ finishReason = "length";
468
+ break;
469
+ default:
470
+ finishReason = result.finishReason;
471
+ }
472
+ }
473
+ return {
474
+ result: result.delta || result.message ? [{ type: "text", value: result.delta || result.message || "" }] : [],
475
+ finish_reason: finishReason,
476
+ };
477
+ }
478
+ catch (error) {
479
+ // If JSON parsing fails, return empty chunk
480
+ return {
481
+ result: [],
482
+ };
483
+ }
484
+ }
485
+ return {
486
+ result: [],
487
+ };
488
+ });
489
+ }
490
+ async requestTextCompletionStream(prompt, options) {
491
+ // Handle Twelvelabs Pegasus models
492
+ if (options.model.includes("twelvelabs.pegasus")) {
493
+ return this.requestTwelvelabsPegasusCompletionStream(prompt, options);
494
+ }
495
+ // Handle other Bedrock models that use Converse API
496
+ const conversePrompt = prompt;
497
+ // Include conversation history (same as non-streaming)
498
+ // Deserialize any base64-encoded binary data back to Uint8Array before API call
499
+ const incomingConversation = deserializeBinaryFromStorage(options.conversation);
500
+ const conversation = updateConversation(incomingConversation, conversePrompt);
501
+ const payload = this.preparePayload(conversation, options);
502
+ const executor = this.getExecutor();
503
+ return executor.converseStream({
504
+ ...payload,
505
+ }).then((res) => {
506
+ const stream = res.stream;
507
+ if (!stream) {
508
+ throw new Error("[Bedrock] Stream not found in response");
509
+ }
510
+ return transformAsyncIterator(stream, (streamSegment) => {
511
+ return this.getExtractedStream(streamSegment, conversePrompt, options);
512
+ });
513
+ }).catch((err) => {
514
+ this.logger.error({ error: err }, "[Bedrock] Failed to stream");
515
+ throw err;
516
+ });
517
+ }
518
+ preparePayload(prompt, options) {
519
+ const model_options = options.model_options ?? { _option_id: "text-fallback" };
520
+ let additionalField = {};
521
+ let supportsJSONPrefill = false;
522
+ if (options.model.includes("amazon")) {
523
+ supportsJSONPrefill = true;
524
+ //Titan models also exists but does not support any additional options
525
+ if (options.model.includes("nova")) {
526
+ additionalField = { inferenceConfig: { topK: model_options.top_k } };
527
+ }
528
+ }
529
+ else if (options.model.includes("claude")) {
530
+ const claude_options = model_options;
531
+ const thinking = claude_options.thinking_mode ?? false;
532
+ supportsJSONPrefill = !thinking;
533
+ if (options.model.includes("claude-3-7") || options.model.includes("-4-")) {
534
+ additionalField = {
535
+ ...additionalField,
536
+ reasoning_config: {
537
+ type: thinking ? "enabled" : "disabled",
538
+ budget_tokens: thinking ? (claude_options.thinking_budget_tokens ?? 1024) : undefined,
539
+ }
540
+ };
541
+ if (thinking && options.model.includes("claude-3-7-sonnet") &&
542
+ ((claude_options.max_tokens ?? 0) > 64000 || (claude_options.thinking_budget_tokens ?? 0) > 64000)) {
543
+ additionalField = {
544
+ ...additionalField,
545
+ anthropic_beta: ["output-128k-2025-02-19"]
546
+ };
547
+ }
548
+ }
549
+ //Needs max_tokens to be set
550
+ if (!model_options.max_tokens) {
551
+ model_options.max_tokens = maxTokenFallbackClaude(options);
552
+ }
553
+ additionalField = { ...additionalField, top_k: model_options.top_k };
554
+ }
555
+ else if (options.model.includes("meta")) {
556
+ //LLaMA models support no additional options
557
+ }
558
+ else if (options.model.includes("mistral")) {
559
+ //7B instruct and 8x7B instruct
560
+ if (options.model.includes("7b")) {
561
+ additionalField = { top_k: model_options.top_k };
562
+ //Does not support system messages
563
+ if (prompt.system && prompt.system?.length != 0) {
564
+ prompt.messages?.push(converseSystemToMessages(prompt.system));
565
+ prompt.system = undefined;
566
+ prompt.messages = converseConcatMessages(prompt.messages);
567
+ }
568
+ }
569
+ else {
570
+ //Other models such as Mistral Small,Large and Large 2
571
+ //Support no additional fields.
572
+ }
573
+ }
574
+ else if (options.model.includes("ai21")) {
575
+ //Jamba models support no additional options
576
+ //Jurassic 2 models do.
577
+ if (options.model.includes("j2")) {
578
+ additionalField = {
579
+ presencePenalty: { scale: model_options.presence_penalty },
580
+ frequencyPenalty: { scale: model_options.frequency_penalty },
581
+ };
582
+ //Does not support system messages
583
+ if (prompt.system && prompt.system?.length != 0) {
584
+ prompt.messages?.push(converseSystemToMessages(prompt.system));
585
+ prompt.system = undefined;
586
+ prompt.messages = converseConcatMessages(prompt.messages);
587
+ }
588
+ }
589
+ }
590
+ else if (options.model.includes("cohere.command")) {
591
+ // If last message is "```json", remove it.
592
+ //Command R and R plus
593
+ if (options.model.includes("cohere.command-r")) {
594
+ additionalField = {
595
+ k: model_options.top_k,
596
+ frequency_penalty: model_options.frequency_penalty,
597
+ presence_penalty: model_options.presence_penalty,
598
+ };
599
+ }
600
+ else {
601
+ // Command non-R
602
+ additionalField = { k: model_options.top_k };
603
+ //Does not support system messages
604
+ if (prompt.system && prompt.system?.length != 0) {
605
+ prompt.messages?.push(converseSystemToMessages(prompt.system));
606
+ prompt.system = undefined;
607
+ prompt.messages = converseConcatMessages(prompt.messages);
608
+ }
609
+ }
610
+ }
611
+ else if (options.model.includes("palmyra")) {
612
+ const palmyraOptions = model_options;
613
+ additionalField = {
614
+ seed: palmyraOptions?.seed,
615
+ presence_penalty: palmyraOptions?.presence_penalty,
616
+ frequency_penalty: palmyraOptions?.frequency_penalty,
617
+ min_tokens: palmyraOptions?.min_tokens,
618
+ };
619
+ }
620
+ else if (options.model.includes("deepseek")) {
621
+ //DeepSeek models support no additional options
622
+ }
623
+ else if (options.model.includes("gpt-oss")) {
624
+ const gptOssOptions = model_options;
625
+ additionalField = {
626
+ reasoning_effort: gptOssOptions?.reasoning_effort,
627
+ };
628
+ }
629
+ //If last message is "```json", add corresponding ``` as a stop sequence.
630
+ if (prompt.messages && prompt.messages.length > 0) {
631
+ if (prompt.messages[prompt.messages.length - 1].content?.[0].text === "```json") {
632
+ const stopSeq = model_options.stop_sequence;
633
+ if (!stopSeq) {
634
+ model_options.stop_sequence = ["```"];
635
+ }
636
+ else if (!stopSeq.includes("```")) {
637
+ stopSeq.push("```");
638
+ model_options.stop_sequence = stopSeq;
639
+ }
640
+ }
641
+ }
642
+ const tool_defs = getToolDefinitions(options.tools);
643
+ // Use prefill when there is a schema and tools are not being used
644
+ if (supportsJSONPrefill && options.result_schema && !tool_defs) {
645
+ prompt.messages = converseJSONprefill(prompt.messages);
646
+ }
647
+ // Clean undefined values from additionalField since AWS Bedrock requires valid JSON
648
+ // and will throw an exception for unrecognized parameters
649
+ const cleanedAdditionalFields = removeUndefinedValues(additionalField);
650
+ const cleanedModelOptions = removeUndefinedValues({
651
+ maxTokens: model_options.max_tokens,
652
+ temperature: model_options.temperature,
653
+ topP: model_options.top_p,
654
+ stopSequences: model_options.stop_sequence,
655
+ });
656
+ //Construct the final request payload
657
+ // We only add fields that are defined to avoid AWS errors
658
+ const request = {
659
+ modelId: options.model,
660
+ };
661
+ if (prompt.messages) {
662
+ request.messages = prompt.messages;
663
+ }
664
+ if (prompt.system) {
665
+ request.system = prompt.system;
666
+ }
667
+ if (Object.keys(cleanedModelOptions).length > 0) {
668
+ request.inferenceConfig = cleanedModelOptions;
669
+ }
670
+ if (Object.keys(cleanedAdditionalFields).length > 0) {
671
+ request.additionalModelRequestFields = cleanedAdditionalFields;
672
+ }
673
+ if (tool_defs?.length) {
674
+ request.toolConfig = {
675
+ tools: tool_defs,
676
+ };
677
+ }
678
+ return request;
679
+ }
680
+ isImageModel(model) {
681
+ return model.includes("titan-image") || model.includes("stable-diffusion") || model.includes("nova-canvas");
682
+ }
683
+ async requestImageGeneration(prompt, options) {
684
+ if (options.model_options?._option_id !== "bedrock-nova-canvas") {
685
+ this.logger.warn({ options: options.model_options }, "Invalid model options");
686
+ }
687
+ const model_options = options.model_options;
688
+ const executor = this.getExecutor();
689
+ const taskType = model_options.taskType ?? NovaImageGenerationTaskType.TEXT_IMAGE;
690
+ this.logger.info("Task type: " + taskType);
691
+ if (typeof prompt === "string") {
692
+ throw new Error("Bad prompt format");
693
+ }
694
+ const payload = await formatNovaImageGenerationPayload(taskType, prompt, options);
695
+ const res = await executor.invokeModel({
696
+ modelId: options.model,
697
+ contentType: "application/json",
698
+ accept: "application/json",
699
+ body: JSON.stringify(payload),
700
+ }, {
701
+ requestTimeout: 60000 * 5
702
+ });
703
+ const decoder = new TextDecoder();
704
+ const body = decoder.decode(res.body);
705
+ const bedrockResult = JSON.parse(body);
706
+ return {
707
+ error: bedrockResult.error,
708
+ result: bedrockResult.images.map((image) => ({
709
+ type: "image",
710
+ value: image
711
+ }))
712
+ };
713
+ }
714
+ async startTraining(dataset, options) {
715
+ //convert options.params to Record<string, string>
716
+ const params = {};
717
+ for (const [key, value] of Object.entries(options.params || {})) {
718
+ params[key] = String(value);
719
+ }
720
+ if (!this.options.training_bucket) {
721
+ throw new Error("Training cannot nbe used since the 'training_bucket' property was not specified in driver options");
722
+ }
723
+ const s3 = new S3Client({ region: this.options.region, credentials: this.options.credentials });
724
+ const stream = await dataset.getStream();
725
+ const upload = await forceUploadFile(s3, stream, this.options.training_bucket, dataset.name);
726
+ const service = this.getService();
727
+ const response = await service.send(new CreateModelCustomizationJobCommand({
728
+ jobName: options.name + "-job",
729
+ customModelName: options.name,
730
+ roleArn: this.options.training_role_arn || undefined,
731
+ baseModelIdentifier: options.model,
732
+ clientRequestToken: "llumiverse-" + Date.now(),
733
+ trainingDataConfig: {
734
+ s3Uri: `s3://${upload.Bucket}/${upload.Key}`,
735
+ },
736
+ outputDataConfig: undefined,
737
+ hyperParameters: params,
738
+ //TODO not supported?
739
+ //customizationType: "FINE_TUNING",
740
+ }));
741
+ const job = await service.send(new GetModelCustomizationJobCommand({
742
+ jobIdentifier: response.jobArn
743
+ }));
744
+ return jobInfo(job, response.jobArn);
745
+ }
746
+ async cancelTraining(jobId) {
747
+ const service = this.getService();
748
+ await service.send(new StopModelCustomizationJobCommand({
749
+ jobIdentifier: jobId
750
+ }));
751
+ const job = await service.send(new GetModelCustomizationJobCommand({
752
+ jobIdentifier: jobId
753
+ }));
754
+ return jobInfo(job, jobId);
755
+ }
756
+ async getTrainingJob(jobId) {
757
+ const service = this.getService();
758
+ const job = await service.send(new GetModelCustomizationJobCommand({
759
+ jobIdentifier: jobId
760
+ }));
761
+ return jobInfo(job, jobId);
762
+ }
763
+ // ===================== management API ==================
764
+ async validateConnection() {
765
+ const service = this.getService();
766
+ this.logger.debug("[Bedrock] validating connection", service.config.credentials.name);
767
+ //return true as if the client has been initialized, it means the connection is valid
768
+ return true;
769
+ }
770
+ async listTrainableModels() {
771
+ this.logger.debug("[Bedrock] listing trainable models");
772
+ return this._listModels(m => m.customizationsSupported ? m.customizationsSupported.includes("FINE_TUNING") : false);
773
+ }
774
+ async listModels() {
775
+ this.logger.debug("[Bedrock] listing models");
776
+ // exclude trainable models since they are not executable
777
+ // exclude embedding models, not to be used for typical completions.
778
+ const filter = (m) => (m.inferenceTypesSupported?.includes("ON_DEMAND") && !m.outputModalities?.includes("EMBEDDING")) ?? false;
779
+ return this._listModels(filter);
780
+ }
781
+ async _listModels(foundationFilter) {
782
+ const service = this.getService();
783
+ const [foundationModelsList, customModelsList, inferenceProfilesList] = await Promise.all([
784
+ service.listFoundationModels({}).catch(() => {
785
+ this.logger.warn("[Bedrock] Can't list foundation models. Check if the user has the right permissions.");
786
+ return undefined;
787
+ }),
788
+ service.listCustomModels({}).catch(() => {
789
+ this.logger.warn("[Bedrock] Can't list custom models. Check if the user has the right permissions.");
790
+ return undefined;
791
+ }),
792
+ service.listInferenceProfiles({}).catch(() => {
793
+ this.logger.warn("[Bedrock] Can't list inference profiles. Check if the user has the right permissions.");
794
+ return undefined;
795
+ }),
796
+ ]);
797
+ if (!foundationModelsList?.modelSummaries) {
798
+ throw new Error("Foundation models not found");
799
+ }
800
+ let foundationModels = foundationModelsList.modelSummaries || [];
801
+ if (foundationFilter) {
802
+ foundationModels = foundationModels.filter(foundationFilter);
803
+ }
804
+ const supportedPublishers = ["amazon", "anthropic", "cohere", "ai21",
805
+ "mistral", "meta", "deepseek", "writer",
806
+ "openai", "twelvelabs", "qwen"];
807
+ const unsupportedModelsByPublisher = {
808
+ amazon: ["titan-image-generator", "nova-reel", "nova-sonic", "rerank"],
809
+ anthropic: [],
810
+ cohere: ["rerank", "embed"],
811
+ ai21: [],
812
+ mistral: [],
813
+ meta: [],
814
+ deepseek: [],
815
+ writer: [],
816
+ openai: [],
817
+ twelvelabs: ["marengo"],
818
+ qwen: [],
819
+ };
820
+ // Helper function to check if model should be filtered out
821
+ const shouldIncludeModel = (modelId, providerName) => {
822
+ if (!modelId || !providerName)
823
+ return false;
824
+ const normalizedProvider = providerName.toLowerCase();
825
+ // Check if provider is supported
826
+ const isProviderSupported = supportedPublishers.some(provider => normalizedProvider.includes(provider));
827
+ if (!isProviderSupported)
828
+ return false;
829
+ // Check if model is in the unsupported list for its provider
830
+ for (const provider of supportedPublishers) {
831
+ if (normalizedProvider.includes(provider)) {
832
+ const unsupportedModels = unsupportedModelsByPublisher[provider] || [];
833
+ return !unsupportedModels.some(unsupported => modelId.toLowerCase().includes(unsupported));
834
+ }
835
+ }
836
+ return true;
837
+ };
838
+ foundationModels = foundationModels.filter(m => shouldIncludeModel(m.modelId, m.providerName));
839
+ const aiModels = foundationModels.map((m) => {
840
+ if (!m.modelId) {
841
+ throw new Error("modelId not found");
842
+ }
843
+ const modelCapability = getModelCapabilities(m.modelArn ?? m.modelId, this.provider);
844
+ const model = {
845
+ id: m.modelArn ?? m.modelId,
846
+ name: `${m.providerName} ${m.modelName}`,
847
+ provider: this.provider,
848
+ owner: m.providerName,
849
+ can_stream: m.responseStreamingSupported ?? false,
850
+ input_modalities: m.inputModalities ? formatAmazonModalities(m.inputModalities) : modelModalitiesToArray(modelCapability.input),
851
+ output_modalities: m.outputModalities ? formatAmazonModalities(m.outputModalities) : modelModalitiesToArray(modelCapability.input),
852
+ tool_support: modelCapability.tool_support,
853
+ };
854
+ return model;
855
+ });
856
+ //add custom models
857
+ if (customModelsList?.modelSummaries) {
858
+ customModelsList.modelSummaries.forEach((m) => {
859
+ if (!m.modelArn) {
860
+ throw new Error("Model ID not found");
861
+ }
862
+ const modelCapability = getModelCapabilities(m.modelArn, this.provider);
863
+ const model = {
864
+ id: m.modelArn,
865
+ name: m.modelName ?? m.modelArn,
866
+ provider: this.provider,
867
+ owner: "custom",
868
+ description: `Custom model from ${m.baseModelName}`,
869
+ is_custom: true,
870
+ input_modalities: modelModalitiesToArray(modelCapability.input),
871
+ output_modalities: modelModalitiesToArray(modelCapability.output),
872
+ tool_support: modelCapability.tool_support,
873
+ };
874
+ aiModels.push(model);
875
+ this.validateConnection;
876
+ });
877
+ }
878
+ //add inference profiles
879
+ if (inferenceProfilesList?.inferenceProfileSummaries) {
880
+ inferenceProfilesList.inferenceProfileSummaries.forEach((p) => {
881
+ if (!p.inferenceProfileArn) {
882
+ throw new Error("Profile ARN not found");
883
+ }
884
+ // Apply the same filtering logic to inference profiles based on their name
885
+ const profileId = p.inferenceProfileId || "";
886
+ const profileName = p.inferenceProfileName || "";
887
+ // Extract provider name from profile name or ID
888
+ let providerName = "";
889
+ for (const provider of supportedPublishers) {
890
+ if (profileName.toLowerCase().includes(provider) || profileId.toLowerCase().includes(provider)) {
891
+ providerName = provider;
892
+ break;
893
+ }
894
+ }
895
+ const modelCapability = getModelCapabilities(p.inferenceProfileArn ?? p.inferenceProfileId, this.provider);
896
+ if (providerName && shouldIncludeModel(profileId, providerName)) {
897
+ const model = {
898
+ id: p.inferenceProfileArn ?? p.inferenceProfileId,
899
+ name: p.inferenceProfileName ?? p.inferenceProfileArn,
900
+ provider: this.provider,
901
+ owner: providerName,
902
+ input_modalities: modelModalitiesToArray(modelCapability.input),
903
+ output_modalities: modelModalitiesToArray(modelCapability.output),
904
+ tool_support: modelCapability.tool_support,
905
+ };
906
+ aiModels.push(model);
907
+ }
908
+ });
909
+ }
910
+ return aiModels;
911
+ }
912
+ async generateEmbeddings({ text, image, model }) {
913
+ this.logger.info("[Bedrock] Generating embeddings with model " + model);
914
+ // Handle TwelveLabs Marengo models
915
+ if (model?.includes("twelvelabs.marengo")) {
916
+ return this.generateTwelvelabsMarengoEmbeddings({ text, image, model });
917
+ }
918
+ // Handle other Bedrock embedding models
919
+ const defaultModel = image ? "amazon.titan-embed-image-v1" : "amazon.titan-embed-text-v2:0";
920
+ const modelID = model ?? defaultModel;
921
+ const invokeBody = {
922
+ inputText: text,
923
+ inputImage: image
924
+ };
925
+ const executor = this.getExecutor();
926
+ const res = await executor.invokeModel({
927
+ modelId: modelID,
928
+ contentType: "application/json",
929
+ body: JSON.stringify(invokeBody),
930
+ });
931
+ const decoder = new TextDecoder();
932
+ const body = decoder.decode(res.body);
933
+ const result = JSON.parse(body);
934
+ if (!result.embedding) {
935
+ throw new Error("Embeddings not found");
936
+ }
937
+ return {
938
+ values: result.embedding,
939
+ model: modelID,
940
+ token_count: result.inputTextTokenCount
941
+ };
942
+ }
943
+ async generateTwelvelabsMarengoEmbeddings({ text, image, model }) {
944
+ const executor = this.getExecutor();
945
+ // Prepare the request payload for TwelveLabs Marengo
946
+ let invokeBody = {
947
+ inputType: "text"
948
+ };
949
+ if (text) {
950
+ invokeBody.inputText = text;
951
+ invokeBody.inputType = "text";
952
+ }
953
+ if (image) {
954
+ // For the embeddings interface, image is expected to be base64
955
+ invokeBody.mediaSource = {
956
+ base64String: image
957
+ };
958
+ invokeBody.inputType = "image";
959
+ }
960
+ const res = await executor.invokeModel({
961
+ modelId: model,
962
+ contentType: "application/json",
963
+ accept: "application/json",
964
+ body: JSON.stringify(invokeBody),
965
+ });
966
+ const decoder = new TextDecoder();
967
+ const body = decoder.decode(res.body);
968
+ const result = JSON.parse(body);
969
+ // TwelveLabs Marengo returns embedding data
970
+ if (!result.embedding) {
971
+ throw new Error("Embeddings not found in TwelveLabs Marengo response");
972
+ }
973
+ return {
974
+ values: result.embedding,
975
+ model: model,
976
+ // TwelveLabs Marengo doesn't return token count in the same way
977
+ token_count: undefined
978
+ };
979
+ }
980
+ }
981
+ function jobInfo(job, jobId) {
982
+ const jobStatus = job.status;
983
+ let status = TrainingJobStatus.running;
984
+ let details;
985
+ if (jobStatus === ModelCustomizationJobStatus.COMPLETED) {
986
+ status = TrainingJobStatus.succeeded;
987
+ }
988
+ else if (jobStatus === ModelCustomizationJobStatus.FAILED) {
989
+ status = TrainingJobStatus.failed;
990
+ details = job.failureMessage || "error";
991
+ }
992
+ else if (jobStatus === ModelCustomizationJobStatus.STOPPED) {
993
+ status = TrainingJobStatus.cancelled;
994
+ }
995
+ else {
996
+ status = TrainingJobStatus.running;
997
+ details = jobStatus;
998
+ }
999
+ job.baseModelArn;
1000
+ return {
1001
+ id: jobId,
1002
+ model: job.outputModelArn,
1003
+ status,
1004
+ details
1005
+ };
1006
+ }
1007
+ function getToolDefinitions(tools) {
1008
+ return tools ? tools.map(getToolDefinition) : undefined;
1009
+ }
1010
+ function getToolDefinition(tool) {
1011
+ return {
1012
+ toolSpec: {
1013
+ name: tool.name,
1014
+ description: tool.description,
1015
+ inputSchema: {
1016
+ json: tool.input_schema,
1017
+ }
1018
+ }
1019
+ };
1020
+ }
1021
+ /**
1022
+ * Recursively removes undefined values from an object.
1023
+ * AWS Bedrock's additionalModelRequestFields must be valid JSON, and undefined is not valid JSON.
1024
+ * Any unrecognized parameters will cause an exception.
1025
+ */
1026
+ function removeUndefinedValues(obj) {
1027
+ if (obj === null || typeof obj !== 'object' || Array.isArray(obj)) {
1028
+ return obj;
1029
+ }
1030
+ const cleaned = {};
1031
+ for (const [key, value] of Object.entries(obj)) {
1032
+ if (value !== undefined) {
1033
+ if (value !== null && typeof value === 'object' && !Array.isArray(value)) {
1034
+ const cleanedNested = removeUndefinedValues(value);
1035
+ // Only include nested objects if they have properties after cleaning
1036
+ if (Object.keys(cleanedNested).length > 0) {
1037
+ cleaned[key] = cleanedNested;
1038
+ }
1039
+ }
1040
+ else {
1041
+ cleaned[key] = value;
1042
+ }
1043
+ }
1044
+ }
1045
+ return cleaned;
1046
+ }
1047
+ /**
1048
+ * Update the conversation messages
1049
+ * @param prompt
1050
+ * @param response
1051
+ * @returns
1052
+ */
1053
+ function updateConversation(conversation, prompt) {
1054
+ const combinedMessages = [...(conversation?.messages || []), ...(prompt.messages || [])];
1055
+ const combinedSystem = prompt.system || conversation?.system;
1056
+ return {
1057
+ modelId: prompt?.modelId || conversation?.modelId,
1058
+ messages: combinedMessages.length > 0 ? combinedMessages : [],
1059
+ system: combinedSystem && combinedSystem.length > 0 ? combinedSystem : undefined,
1060
+ };
1061
+ }
1062
+ function formatAmazonModalities(modalities) {
1063
+ const standardizedModalities = [];
1064
+ for (const modality of modalities) {
1065
+ if (modality === ModelModality.TEXT) {
1066
+ standardizedModalities.push("text");
1067
+ }
1068
+ else if (modality === ModelModality.IMAGE) {
1069
+ standardizedModalities.push("image");
1070
+ }
1071
+ else if (modality === ModelModality.EMBEDDING) {
1072
+ standardizedModalities.push("embedding");
1073
+ }
1074
+ else if (modality == "SPEECH") {
1075
+ standardizedModalities.push("audio");
1076
+ }
1077
+ else if (modality == "VIDEO") {
1078
+ standardizedModalities.push("video");
1079
+ }
1080
+ else {
1081
+ // Handle other modalities as needed
1082
+ standardizedModalities.push(modality.toString().toLowerCase());
1083
+ }
1084
+ }
1085
+ return standardizedModalities;
1086
+ }
1087
+ //# sourceMappingURL=index.js.map