@llumiverse/drivers 0.22.0 → 0.22.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (289) hide show
  1. package/lib/cjs/adobe/firefly.js +8 -7
  2. package/lib/cjs/adobe/firefly.js.map +1 -1
  3. package/lib/cjs/azure/azure_foundry.js +12 -12
  4. package/lib/cjs/azure/azure_foundry.js.map +1 -1
  5. package/lib/cjs/bedrock/index.js +172 -22
  6. package/lib/cjs/bedrock/index.js.map +1 -1
  7. package/lib/cjs/bedrock/twelvelabs.js +87 -0
  8. package/lib/cjs/bedrock/twelvelabs.js.map +1 -0
  9. package/lib/cjs/groq/index.js +91 -16
  10. package/lib/cjs/groq/index.js.map +1 -1
  11. package/lib/cjs/huggingface_ie.js +7 -6
  12. package/lib/cjs/huggingface_ie.js.map +1 -1
  13. package/lib/cjs/index.js +2 -2
  14. package/lib/cjs/index.js.map +1 -1
  15. package/lib/cjs/mistral/index.js +5 -4
  16. package/lib/cjs/mistral/index.js.map +1 -1
  17. package/lib/cjs/openai/azure_openai.js +1 -1
  18. package/lib/cjs/openai/azure_openai.js.map +1 -1
  19. package/lib/cjs/openai/index.js +16 -12
  20. package/lib/cjs/openai/index.js.map +1 -1
  21. package/lib/cjs/replicate.js +6 -6
  22. package/lib/cjs/replicate.js.map +1 -1
  23. package/lib/cjs/test/utils.js +1 -1
  24. package/lib/cjs/test/utils.js.map +1 -1
  25. package/lib/cjs/test-driver/TestErrorCompletionStream.js +20 -0
  26. package/lib/cjs/test-driver/TestErrorCompletionStream.js.map +1 -0
  27. package/lib/cjs/test-driver/TestValidationErrorCompletionStream.js +24 -0
  28. package/lib/cjs/test-driver/TestValidationErrorCompletionStream.js.map +1 -0
  29. package/lib/cjs/test-driver/index.js +109 -0
  30. package/lib/cjs/test-driver/index.js.map +1 -0
  31. package/lib/cjs/test-driver/utils.js +30 -0
  32. package/lib/cjs/test-driver/utils.js.map +1 -0
  33. package/lib/cjs/togetherai/index.js +4 -4
  34. package/lib/cjs/togetherai/index.js.map +1 -1
  35. package/lib/cjs/vertexai/embeddings/embeddings-text.js +1 -1
  36. package/lib/cjs/vertexai/embeddings/embeddings-text.js.map +1 -1
  37. package/lib/cjs/vertexai/index.js +136 -31
  38. package/lib/cjs/vertexai/index.js.map +1 -1
  39. package/lib/cjs/vertexai/models/claude.js +38 -16
  40. package/lib/cjs/vertexai/models/claude.js.map +1 -1
  41. package/lib/cjs/vertexai/models/gemini.js +131 -41
  42. package/lib/cjs/vertexai/models/gemini.js.map +1 -1
  43. package/lib/cjs/vertexai/models/imagen.js +12 -23
  44. package/lib/cjs/vertexai/models/imagen.js.map +1 -1
  45. package/lib/cjs/vertexai/models/llama.js +4 -3
  46. package/lib/cjs/vertexai/models/llama.js.map +1 -1
  47. package/lib/cjs/vertexai/models.js +13 -2
  48. package/lib/cjs/vertexai/models.js.map +1 -1
  49. package/lib/cjs/watsonx/index.js +5 -5
  50. package/lib/cjs/watsonx/index.js.map +1 -1
  51. package/lib/cjs/xai/index.js +1 -1
  52. package/lib/cjs/xai/index.js.map +1 -1
  53. package/lib/esm/adobe/firefly.js +8 -7
  54. package/lib/esm/adobe/firefly.js.map +1 -1
  55. package/lib/esm/azure/azure_foundry.js +12 -12
  56. package/lib/esm/azure/azure_foundry.js.map +1 -1
  57. package/lib/esm/bedrock/index.js +172 -22
  58. package/lib/esm/bedrock/index.js.map +1 -1
  59. package/lib/esm/bedrock/twelvelabs.js +84 -0
  60. package/lib/esm/bedrock/twelvelabs.js.map +1 -0
  61. package/lib/esm/groq/index.js +91 -16
  62. package/lib/esm/groq/index.js.map +1 -1
  63. package/lib/esm/huggingface_ie.js +8 -7
  64. package/lib/esm/huggingface_ie.js.map +1 -1
  65. package/lib/esm/index.js +2 -2
  66. package/lib/esm/index.js.map +1 -1
  67. package/lib/esm/mistral/index.js +5 -4
  68. package/lib/esm/mistral/index.js.map +1 -1
  69. package/lib/esm/openai/azure_openai.js +1 -1
  70. package/lib/esm/openai/azure_openai.js.map +1 -1
  71. package/lib/esm/openai/index.js +16 -12
  72. package/lib/esm/openai/index.js.map +1 -1
  73. package/lib/esm/replicate.js +6 -6
  74. package/lib/esm/replicate.js.map +1 -1
  75. package/lib/esm/src/adobe/firefly.js +116 -0
  76. package/lib/esm/src/adobe/firefly.js.map +1 -0
  77. package/lib/esm/src/azure/azure_foundry.js +382 -0
  78. package/lib/esm/src/azure/azure_foundry.js.map +1 -0
  79. package/lib/esm/src/bedrock/converse.js +278 -0
  80. package/lib/esm/src/bedrock/converse.js.map +1 -0
  81. package/lib/esm/src/bedrock/index.js +962 -0
  82. package/lib/esm/src/bedrock/index.js.map +1 -0
  83. package/lib/esm/src/bedrock/nova-image-payload.js +203 -0
  84. package/lib/esm/src/bedrock/nova-image-payload.js.map +1 -0
  85. package/lib/esm/src/bedrock/payloads.js +2 -0
  86. package/lib/esm/src/bedrock/payloads.js.map +1 -0
  87. package/lib/esm/src/bedrock/s3.js +99 -0
  88. package/lib/esm/src/bedrock/s3.js.map +1 -0
  89. package/lib/esm/src/bedrock/twelvelabs.js +84 -0
  90. package/lib/esm/src/bedrock/twelvelabs.js.map +1 -0
  91. package/lib/esm/src/groq/index.js +286 -0
  92. package/lib/esm/src/groq/index.js.map +1 -0
  93. package/lib/esm/src/huggingface_ie.js +197 -0
  94. package/lib/esm/src/huggingface_ie.js.map +1 -0
  95. package/lib/esm/src/index.js +14 -0
  96. package/lib/esm/src/index.js.map +1 -0
  97. package/lib/esm/src/mistral/index.js +169 -0
  98. package/lib/esm/src/mistral/index.js.map +1 -0
  99. package/lib/esm/src/mistral/types.js +80 -0
  100. package/lib/esm/src/mistral/types.js.map +1 -0
  101. package/lib/esm/src/openai/azure_openai.js +68 -0
  102. package/lib/esm/src/openai/azure_openai.js.map +1 -0
  103. package/lib/esm/src/openai/index.js +464 -0
  104. package/lib/esm/src/openai/index.js.map +1 -0
  105. package/lib/esm/src/openai/openai.js +14 -0
  106. package/lib/esm/src/openai/openai.js.map +1 -0
  107. package/lib/esm/src/openai/openai_format.js +134 -0
  108. package/lib/esm/src/openai/openai_format.js.map +1 -0
  109. package/lib/esm/src/replicate.js +268 -0
  110. package/lib/esm/src/replicate.js.map +1 -0
  111. package/lib/esm/src/test/TestErrorCompletionStream.js +16 -0
  112. package/lib/esm/src/test/TestErrorCompletionStream.js.map +1 -0
  113. package/lib/esm/src/test/TestValidationErrorCompletionStream.js +20 -0
  114. package/lib/esm/src/test/TestValidationErrorCompletionStream.js.map +1 -0
  115. package/lib/esm/src/test/index.js +91 -0
  116. package/lib/esm/src/test/index.js.map +1 -0
  117. package/lib/esm/src/test/utils.js +25 -0
  118. package/lib/esm/src/test/utils.js.map +1 -0
  119. package/lib/esm/src/test-driver/TestErrorCompletionStream.js +16 -0
  120. package/lib/esm/src/test-driver/TestErrorCompletionStream.js.map +1 -0
  121. package/lib/esm/src/test-driver/TestValidationErrorCompletionStream.js +20 -0
  122. package/lib/esm/src/test-driver/TestValidationErrorCompletionStream.js.map +1 -0
  123. package/lib/esm/src/test-driver/index.js +91 -0
  124. package/lib/esm/src/test-driver/index.js.map +1 -0
  125. package/lib/esm/src/test-driver/utils.js +25 -0
  126. package/lib/esm/src/test-driver/utils.js.map +1 -0
  127. package/lib/esm/src/togetherai/index.js +122 -0
  128. package/lib/esm/src/togetherai/index.js.map +1 -0
  129. package/lib/esm/src/togetherai/interfaces.js +2 -0
  130. package/lib/esm/src/togetherai/interfaces.js.map +1 -0
  131. package/lib/esm/src/vertexai/debug.js +6 -0
  132. package/lib/esm/src/vertexai/debug.js.map +1 -0
  133. package/lib/esm/src/vertexai/embeddings/embeddings-image.js +24 -0
  134. package/lib/esm/src/vertexai/embeddings/embeddings-image.js.map +1 -0
  135. package/lib/esm/src/vertexai/embeddings/embeddings-text.js +20 -0
  136. package/lib/esm/src/vertexai/embeddings/embeddings-text.js.map +1 -0
  137. package/lib/esm/src/vertexai/index.js +383 -0
  138. package/lib/esm/src/vertexai/index.js.map +1 -0
  139. package/lib/esm/src/vertexai/models/claude.js +394 -0
  140. package/lib/esm/src/vertexai/models/claude.js.map +1 -0
  141. package/lib/esm/src/vertexai/models/gemini.js +817 -0
  142. package/lib/esm/src/vertexai/models/gemini.js.map +1 -0
  143. package/lib/esm/src/vertexai/models/imagen.js +302 -0
  144. package/lib/esm/src/vertexai/models/imagen.js.map +1 -0
  145. package/lib/esm/src/vertexai/models/llama.js +179 -0
  146. package/lib/esm/src/vertexai/models/llama.js.map +1 -0
  147. package/lib/esm/src/vertexai/models.js +32 -0
  148. package/lib/esm/src/vertexai/models.js.map +1 -0
  149. package/lib/esm/src/watsonx/index.js +157 -0
  150. package/lib/esm/src/watsonx/index.js.map +1 -0
  151. package/lib/esm/src/watsonx/interfaces.js +2 -0
  152. package/lib/esm/src/watsonx/interfaces.js.map +1 -0
  153. package/lib/esm/src/xai/index.js +64 -0
  154. package/lib/esm/src/xai/index.js.map +1 -0
  155. package/lib/esm/test/utils.js +1 -1
  156. package/lib/esm/test/utils.js.map +1 -1
  157. package/lib/esm/test-driver/TestErrorCompletionStream.js +16 -0
  158. package/lib/esm/test-driver/TestErrorCompletionStream.js.map +1 -0
  159. package/lib/esm/test-driver/TestValidationErrorCompletionStream.js +20 -0
  160. package/lib/esm/test-driver/TestValidationErrorCompletionStream.js.map +1 -0
  161. package/lib/esm/test-driver/index.js +91 -0
  162. package/lib/esm/test-driver/index.js.map +1 -0
  163. package/lib/esm/test-driver/utils.js +25 -0
  164. package/lib/esm/test-driver/utils.js.map +1 -0
  165. package/lib/esm/togetherai/index.js +4 -4
  166. package/lib/esm/togetherai/index.js.map +1 -1
  167. package/lib/esm/tsconfig.tsbuildinfo +1 -0
  168. package/lib/esm/vertexai/embeddings/embeddings-text.js +1 -1
  169. package/lib/esm/vertexai/embeddings/embeddings-text.js.map +1 -1
  170. package/lib/esm/vertexai/index.js +136 -31
  171. package/lib/esm/vertexai/index.js.map +1 -1
  172. package/lib/esm/vertexai/models/claude.js +37 -15
  173. package/lib/esm/vertexai/models/claude.js.map +1 -1
  174. package/lib/esm/vertexai/models/gemini.js +133 -43
  175. package/lib/esm/vertexai/models/gemini.js.map +1 -1
  176. package/lib/esm/vertexai/models/imagen.js +9 -17
  177. package/lib/esm/vertexai/models/imagen.js.map +1 -1
  178. package/lib/esm/vertexai/models/llama.js +4 -3
  179. package/lib/esm/vertexai/models/llama.js.map +1 -1
  180. package/lib/esm/vertexai/models.js +13 -2
  181. package/lib/esm/vertexai/models.js.map +1 -1
  182. package/lib/esm/watsonx/index.js +5 -5
  183. package/lib/esm/watsonx/index.js.map +1 -1
  184. package/lib/esm/xai/index.js +1 -1
  185. package/lib/esm/xai/index.js.map +1 -1
  186. package/lib/types/adobe/firefly.d.ts +3 -3
  187. package/lib/types/adobe/firefly.d.ts.map +1 -1
  188. package/lib/types/azure/azure_foundry.d.ts +2 -2
  189. package/lib/types/azure/azure_foundry.d.ts.map +1 -1
  190. package/lib/types/bedrock/index.d.ts +9 -5
  191. package/lib/types/bedrock/index.d.ts.map +1 -1
  192. package/lib/types/bedrock/twelvelabs.d.ts +50 -0
  193. package/lib/types/bedrock/twelvelabs.d.ts.map +1 -0
  194. package/lib/types/groq/index.d.ts +3 -0
  195. package/lib/types/groq/index.d.ts.map +1 -1
  196. package/lib/types/huggingface_ie.d.ts +8 -5
  197. package/lib/types/huggingface_ie.d.ts.map +1 -1
  198. package/lib/types/index.d.ts +2 -2
  199. package/lib/types/index.d.ts.map +1 -1
  200. package/lib/types/mistral/index.d.ts +2 -2
  201. package/lib/types/mistral/index.d.ts.map +1 -1
  202. package/lib/types/openai/index.d.ts +2 -2
  203. package/lib/types/openai/index.d.ts.map +1 -1
  204. package/lib/types/replicate.d.ts +6 -3
  205. package/lib/types/replicate.d.ts.map +1 -1
  206. package/lib/types/src/adobe/firefly.d.ts +29 -0
  207. package/lib/types/src/azure/azure_foundry.d.ts +49 -0
  208. package/lib/types/src/bedrock/converse.d.ts +8 -0
  209. package/lib/types/src/bedrock/index.d.ts +61 -0
  210. package/lib/types/src/bedrock/nova-image-payload.d.ts +73 -0
  211. package/lib/types/src/bedrock/payloads.d.ts +11 -0
  212. package/lib/types/src/bedrock/s3.d.ts +22 -0
  213. package/lib/types/src/bedrock/twelvelabs.d.ts +49 -0
  214. package/lib/types/src/groq/index.d.ts +26 -0
  215. package/lib/types/src/huggingface_ie.d.ts +34 -0
  216. package/lib/types/src/index.d.ts +13 -0
  217. package/lib/types/src/mistral/index.d.ts +24 -0
  218. package/lib/types/src/mistral/types.d.ts +131 -0
  219. package/lib/types/src/openai/azure_openai.d.ts +24 -0
  220. package/lib/types/src/openai/index.d.ts +24 -0
  221. package/lib/types/src/openai/openai.d.ts +14 -0
  222. package/lib/types/src/openai/openai_format.d.ts +18 -0
  223. package/lib/types/src/replicate.d.ts +47 -0
  224. package/lib/types/src/test/TestErrorCompletionStream.d.ts +8 -0
  225. package/lib/types/src/test/TestValidationErrorCompletionStream.d.ts +8 -0
  226. package/lib/types/src/test/index.d.ts +23 -0
  227. package/lib/types/src/test/utils.d.ts +4 -0
  228. package/lib/types/src/test-driver/TestErrorCompletionStream.d.ts +8 -0
  229. package/lib/types/src/test-driver/TestValidationErrorCompletionStream.d.ts +8 -0
  230. package/lib/types/src/test-driver/index.d.ts +23 -0
  231. package/lib/types/src/test-driver/utils.d.ts +4 -0
  232. package/lib/types/src/togetherai/index.d.ts +22 -0
  233. package/lib/types/src/togetherai/interfaces.d.ts +95 -0
  234. package/lib/types/src/vertexai/debug.d.ts +1 -0
  235. package/lib/types/src/vertexai/embeddings/embeddings-image.d.ts +10 -0
  236. package/lib/types/src/vertexai/embeddings/embeddings-text.d.ts +9 -0
  237. package/lib/types/src/vertexai/index.d.ts +52 -0
  238. package/lib/types/src/vertexai/models/claude.d.ts +19 -0
  239. package/lib/types/src/vertexai/models/gemini.d.ts +17 -0
  240. package/lib/types/src/vertexai/models/imagen.d.ts +74 -0
  241. package/lib/types/src/vertexai/models/llama.d.ts +19 -0
  242. package/lib/types/src/vertexai/models.d.ts +14 -0
  243. package/lib/types/src/watsonx/index.d.ts +26 -0
  244. package/lib/types/src/watsonx/interfaces.d.ts +64 -0
  245. package/lib/types/src/xai/index.d.ts +18 -0
  246. package/lib/types/test-driver/TestErrorCompletionStream.d.ts +9 -0
  247. package/lib/types/test-driver/TestErrorCompletionStream.d.ts.map +1 -0
  248. package/lib/types/test-driver/TestValidationErrorCompletionStream.d.ts +9 -0
  249. package/lib/types/test-driver/TestValidationErrorCompletionStream.d.ts.map +1 -0
  250. package/lib/types/test-driver/index.d.ts +24 -0
  251. package/lib/types/test-driver/index.d.ts.map +1 -0
  252. package/lib/types/test-driver/utils.d.ts +5 -0
  253. package/lib/types/test-driver/utils.d.ts.map +1 -0
  254. package/lib/types/togetherai/index.d.ts +3 -3
  255. package/lib/types/togetherai/index.d.ts.map +1 -1
  256. package/lib/types/vertexai/index.d.ts +17 -14
  257. package/lib/types/vertexai/index.d.ts.map +1 -1
  258. package/lib/types/vertexai/models/claude.d.ts +2 -0
  259. package/lib/types/vertexai/models/claude.d.ts.map +1 -1
  260. package/lib/types/vertexai/models/gemini.d.ts.map +1 -1
  261. package/lib/types/vertexai/models/imagen.d.ts +2 -2
  262. package/lib/types/vertexai/models/imagen.d.ts.map +1 -1
  263. package/lib/types/vertexai/models/llama.d.ts +2 -2
  264. package/lib/types/vertexai/models/llama.d.ts.map +1 -1
  265. package/lib/types/vertexai/models.d.ts +2 -2
  266. package/lib/types/vertexai/models.d.ts.map +1 -1
  267. package/lib/types/watsonx/index.d.ts +3 -3
  268. package/lib/types/watsonx/index.d.ts.map +1 -1
  269. package/package.json +9 -9
  270. package/src/adobe/firefly.ts +12 -20
  271. package/src/azure/azure_foundry.ts +5 -5
  272. package/src/bedrock/index.ts +203 -24
  273. package/src/bedrock/twelvelabs.ts +150 -0
  274. package/src/groq/index.ts +134 -37
  275. package/src/huggingface_ie.ts +7 -7
  276. package/src/mistral/index.ts +8 -7
  277. package/src/openai/index.ts +16 -10
  278. package/src/replicate.ts +11 -11
  279. package/src/test/utils.ts +1 -1
  280. package/src/togetherai/index.ts +7 -7
  281. package/src/vertexai/embeddings/embeddings-text.ts +2 -2
  282. package/src/vertexai/index.ts +133 -21
  283. package/src/vertexai/models/claude.ts +43 -17
  284. package/src/vertexai/models/gemini.ts +161 -60
  285. package/src/vertexai/models/imagen.ts +15 -26
  286. package/src/vertexai/models/llama.ts +6 -5
  287. package/src/vertexai/models.ts +18 -6
  288. package/src/watsonx/index.ts +5 -5
  289. package/src/xai/index.ts +11 -12
@@ -0,0 +1,962 @@
1
+ import { Bedrock, CreateModelCustomizationJobCommand, GetModelCustomizationJobCommand, ModelCustomizationJobStatus, ModelModality, StopModelCustomizationJobCommand } from "@aws-sdk/client-bedrock";
2
+ import { BedrockRuntime } from "@aws-sdk/client-bedrock-runtime";
3
+ import { S3Client } from "@aws-sdk/client-s3";
4
+ import { AbstractDriver, Modalities, TrainingJobStatus, getMaxTokensLimitBedrock, modelModalitiesToArray, getModelCapabilities } from "@llumiverse/core";
5
+ import { transformAsyncIterator } from "@llumiverse/core/async";
6
+ import { formatNovaPrompt } from "@llumiverse/core/formatters";
7
+ import { LRUCache } from "mnemonist";
8
+ import { converseConcatMessages, converseJSONprefill, converseSystemToMessages, formatConversePrompt } from "./converse.js";
9
+ import { formatNovaImageGenerationPayload, NovaImageGenerationTaskType } from "./nova-image-payload.js";
10
+ import { forceUploadFile } from "./s3.js";
11
+ import { formatTwelvelabsPegasusPrompt } from "./twelvelabs.js";
12
+ const supportStreamingCache = new LRUCache(4096);
13
+ var BedrockModelType;
14
+ (function (BedrockModelType) {
15
+ BedrockModelType["FoundationModel"] = "foundation-model";
16
+ BedrockModelType["InferenceProfile"] = "inference-profile";
17
+ BedrockModelType["CustomModel"] = "custom-model";
18
+ BedrockModelType["Unknown"] = "unknown";
19
+ })(BedrockModelType || (BedrockModelType = {}));
20
+ ;
21
+ function converseFinishReason(reason) {
22
+ //Possible values:
23
+ //end_turn | tool_use | max_tokens | stop_sequence | guardrail_intervened | content_filtered
24
+ if (!reason)
25
+ return undefined;
26
+ switch (reason) {
27
+ case 'end_turn': return "stop";
28
+ case 'max_tokens': return "length";
29
+ default: return reason;
30
+ }
31
+ }
32
+ //Used to get a max_token value when not specified in the model options. Claude requires it to be set.
33
+ function maxTokenFallbackClaude(option) {
34
+ const modelOptions = option.model_options;
35
+ if (modelOptions && typeof modelOptions.max_tokens === "number") {
36
+ return modelOptions.max_tokens;
37
+ }
38
+ else {
39
+ const thinking_budget = modelOptions?.thinking_budget_tokens ?? 0;
40
+ let maxSupportedTokens = getMaxTokensLimitBedrock(option.model) ?? 8192; // Should always return a number for claude, 8192 is to satisfy the TypeScript type checker;
41
+ // Fallback to the default max tokens limit for the model
42
+ if (option.model.includes('claude-3-7-sonnet') && (modelOptions?.thinking_budget_tokens ?? 0) < 48000) {
43
+ maxSupportedTokens = 64000; // Claude 3.7 can go up to 128k with a beta header, but when no max tokens is specified, we default to 64k.
44
+ }
45
+ return Math.min(16000 + thinking_budget, maxSupportedTokens); // Cap to 16k, to avoid taking up too much context window and quota.
46
+ }
47
+ }
48
+ export class BedrockDriver extends AbstractDriver {
49
+ static PROVIDER = "bedrock";
50
+ provider = BedrockDriver.PROVIDER;
51
+ _executor;
52
+ _service;
53
+ _service_region;
54
+ constructor(options) {
55
+ super(options);
56
+ if (!options.region) {
57
+ throw new Error("No region found. Set the region in the environment's endpoint URL.");
58
+ }
59
+ }
60
+ getExecutor() {
61
+ if (!this._executor) {
62
+ this._executor = new BedrockRuntime({
63
+ region: this.options.region,
64
+ credentials: this.options.credentials,
65
+ });
66
+ }
67
+ return this._executor;
68
+ }
69
+ getService(region = this.options.region) {
70
+ if (!this._service || this._service_region != region) {
71
+ this._service = new Bedrock({
72
+ region: region,
73
+ credentials: this.options.credentials,
74
+ });
75
+ this._service_region = region;
76
+ }
77
+ return this._service;
78
+ }
79
+ async formatPrompt(segments, opts) {
80
+ if (opts.model.includes("canvas")) {
81
+ return await formatNovaPrompt(segments, opts.result_schema);
82
+ }
83
+ if (opts.model.includes("twelvelabs.pegasus")) {
84
+ return await formatTwelvelabsPegasusPrompt(segments, opts);
85
+ }
86
+ return await formatConversePrompt(segments, opts);
87
+ }
88
+ getExtractedExecution(result, _prompt, options) {
89
+ let resultText = "";
90
+ let reasoning = "";
91
+ if (result.output?.message?.content) {
92
+ for (const content of result.output.message.content) {
93
+ // Get text output
94
+ if (content.text) {
95
+ resultText += content.text;
96
+ }
97
+ else if (content.reasoningContent) {
98
+ // Get reasoning content only if include_thoughts is true
99
+ const claudeOptions = options?.model_options;
100
+ if (claudeOptions?.include_thoughts) {
101
+ if (content.reasoningContent.reasoningText) {
102
+ reasoning += content.reasoningContent.reasoningText.text;
103
+ }
104
+ else if (content.reasoningContent.redactedContent) {
105
+ // Handle redacted thinking content
106
+ const redactedData = new TextDecoder().decode(content.reasoningContent.redactedContent);
107
+ reasoning += `[Redacted thinking: ${redactedData}]`;
108
+ }
109
+ }
110
+ else {
111
+ this.logger.info("[Bedrock] Not outputting reasoning content as include_thoughts is false");
112
+ }
113
+ }
114
+ else {
115
+ // Get content block type
116
+ const type = Object.keys(content).find(key => key !== '$unknown' && content[key] !== undefined);
117
+ this.logger.info({ type }, "[Bedrock] Unsupported content response type:");
118
+ }
119
+ }
120
+ // Add spacing if we have reasoning content
121
+ if (reasoning) {
122
+ reasoning += '\n\n';
123
+ }
124
+ }
125
+ const completionResult = {
126
+ result: reasoning + resultText ? [{ type: "text", value: reasoning + resultText }] : [],
127
+ token_usage: {
128
+ prompt: result.usage?.inputTokens,
129
+ result: result.usage?.outputTokens,
130
+ total: result.usage?.totalTokens,
131
+ },
132
+ finish_reason: converseFinishReason(result.stopReason),
133
+ };
134
+ return completionResult;
135
+ }
136
+ ;
137
+ getExtractedStream(result, _prompt, options) {
138
+ let output = "";
139
+ let reasoning = "";
140
+ let stop_reason = "";
141
+ let token_usage;
142
+ // Check if we should include thoughts
143
+ const shouldIncludeThoughts = options && options.model_options?.include_thoughts;
144
+ // Handle content block start events (for reasoning blocks)
145
+ if (result.contentBlockStart) {
146
+ // Handle redacted content at block start
147
+ if (result.contentBlockStart.start && 'reasoningContent' in result.contentBlockStart.start && shouldIncludeThoughts) {
148
+ const reasoningStart = result.contentBlockStart.start;
149
+ if (reasoningStart.reasoningContent?.redactedContent) {
150
+ const redactedData = new TextDecoder().decode(reasoningStart.reasoningContent.redactedContent);
151
+ reasoning = `[Redacted thinking: ${redactedData}]`;
152
+ }
153
+ }
154
+ }
155
+ // Handle content block deltas (text and reasoning)
156
+ if (result.contentBlockDelta) {
157
+ const delta = result.contentBlockDelta.delta;
158
+ if (delta?.text) {
159
+ output = delta.text;
160
+ }
161
+ else if (delta?.reasoningContent && shouldIncludeThoughts) {
162
+ if (delta.reasoningContent.text) {
163
+ reasoning = delta.reasoningContent.text;
164
+ }
165
+ else if (delta.reasoningContent.redactedContent) {
166
+ const redactedData = new TextDecoder().decode(delta.reasoningContent.redactedContent);
167
+ reasoning = `[Redacted thinking: ${redactedData}]`;
168
+ }
169
+ else if (delta.reasoningContent.signature) {
170
+ // Handle signature updates for reasoning content - end of thinking
171
+ reasoning = "\n\n";
172
+ // Putting logging here so it only triggers once.
173
+ this.logger.info("[Bedrock] Not outputting reasoning content as include_thoughts is false");
174
+ }
175
+ }
176
+ else if (delta) {
177
+ // Get content block type
178
+ const type = Object.keys(delta).find(key => key !== '$unknown' && delta[key] !== undefined);
179
+ this.logger.info({ type }, "[Bedrock] Unsupported content response type:");
180
+ }
181
+ }
182
+ // Handle content block stop events
183
+ if (result.contentBlockStop) {
184
+ // Content block ended - could be end of reasoning or text block
185
+ // Add minimal spacing for reasoning blocks if not already present
186
+ if (reasoning && !reasoning.endsWith('\n\n') && shouldIncludeThoughts) {
187
+ reasoning += '\n\n';
188
+ }
189
+ }
190
+ if (result.messageStop) {
191
+ stop_reason = result.messageStop.stopReason ?? "";
192
+ }
193
+ if (result.metadata) {
194
+ token_usage = {
195
+ prompt: result.metadata.usage?.inputTokens,
196
+ result: result.metadata.usage?.outputTokens,
197
+ total: result.metadata.usage?.totalTokens,
198
+ };
199
+ }
200
+ const completionResult = {
201
+ result: reasoning + output ? [{ type: "text", value: reasoning + output }] : [],
202
+ token_usage: token_usage,
203
+ finish_reason: converseFinishReason(stop_reason),
204
+ };
205
+ return completionResult;
206
+ }
207
+ ;
208
+ extractRegion(modelString, defaultRegion) {
209
+ // Match region in full ARN pattern
210
+ const arnMatch = modelString.match(/arn:aws[^:]*:bedrock:([^:]+):/);
211
+ if (arnMatch) {
212
+ return arnMatch[1];
213
+ }
214
+ // Match common AWS regions directly in string
215
+ const regionMatch = modelString.match(/(?:us|eu|ap|sa|ca|me|af)[-](east|west|central|south|north|southeast|southwest|northeast|northwest)[-][1-9]/);
216
+ if (regionMatch) {
217
+ return regionMatch[0];
218
+ }
219
+ return defaultRegion;
220
+ }
221
+ async getCanStream(model, type) {
222
+ let canStream = false;
223
+ let error = null;
224
+ const region = this.extractRegion(model, this.options.region);
225
+ if (type == BedrockModelType.FoundationModel || type == BedrockModelType.Unknown) {
226
+ try {
227
+ const response = await this.getService(region).getFoundationModel({
228
+ modelIdentifier: model
229
+ });
230
+ canStream = response.modelDetails?.responseStreamingSupported ?? false;
231
+ return canStream;
232
+ }
233
+ catch (e) {
234
+ error = e;
235
+ }
236
+ }
237
+ if (type == BedrockModelType.InferenceProfile || type == BedrockModelType.Unknown) {
238
+ try {
239
+ const response = await this.getService(region).getInferenceProfile({
240
+ inferenceProfileIdentifier: model
241
+ });
242
+ canStream = await this.getCanStream(response.models?.[0].modelArn ?? "", BedrockModelType.FoundationModel);
243
+ return canStream;
244
+ }
245
+ catch (e) {
246
+ error = e;
247
+ }
248
+ }
249
+ if (type == BedrockModelType.CustomModel || type == BedrockModelType.Unknown) {
250
+ try {
251
+ const response = await this.getService(region).getCustomModel({
252
+ modelIdentifier: model
253
+ });
254
+ canStream = await this.getCanStream(response.baseModelArn ?? "", BedrockModelType.FoundationModel);
255
+ return canStream;
256
+ }
257
+ catch (e) {
258
+ error = e;
259
+ }
260
+ }
261
+ if (error) {
262
+ console.warn("Error on canStream check for model: " + model + " region detected: " + region, error);
263
+ }
264
+ return canStream;
265
+ }
266
+ async canStream(options) {
267
+ // // TwelveLabs Pegasus supports streaming according to the documentation
268
+ // if (options.model.includes("twelvelabs.pegasus")) {
269
+ // return true;
270
+ // }
271
+ let canStream = supportStreamingCache.get(options.model);
272
+ if (canStream == null) {
273
+ let type = BedrockModelType.Unknown;
274
+ if (options.model.includes("foundation-model")) {
275
+ type = BedrockModelType.FoundationModel;
276
+ }
277
+ else if (options.model.includes("inference-profile")) {
278
+ type = BedrockModelType.InferenceProfile;
279
+ }
280
+ else if (options.model.includes("custom-model")) {
281
+ type = BedrockModelType.CustomModel;
282
+ }
283
+ canStream = await this.getCanStream(options.model, type);
284
+ supportStreamingCache.set(options.model, canStream);
285
+ }
286
+ return canStream;
287
+ }
288
+ async requestTextCompletion(prompt, options) {
289
+ // Handle Twelvelabs Pegasus models
290
+ if (options.model.includes("twelvelabs.pegasus")) {
291
+ return this.requestTwelvelabsPegasusCompletion(prompt, options);
292
+ }
293
+ // Handle other Bedrock models that use Converse API
294
+ const conversePrompt = prompt;
295
+ let conversation = updateConversation(options.conversation, conversePrompt);
296
+ const payload = this.preparePayload(conversation, options);
297
+ const executor = this.getExecutor();
298
+ const res = await executor.converse({
299
+ ...payload,
300
+ });
301
+ conversation = updateConversation(conversation, {
302
+ messages: [res.output?.message ?? { content: [{ text: "" }], role: "assistant" }],
303
+ modelId: conversePrompt.modelId,
304
+ });
305
+ let tool_use = undefined;
306
+ //Get tool requests, we check tool use regardless of finish reason, as you can hit length and still get a valid response.
307
+ tool_use = res.output?.message?.content?.reduce((tools, c) => {
308
+ if (c.toolUse) {
309
+ tools.push({
310
+ tool_name: c.toolUse.name ?? "",
311
+ tool_input: c.toolUse.input,
312
+ id: c.toolUse.toolUseId ?? "",
313
+ });
314
+ }
315
+ return tools;
316
+ }, []);
317
+ //If no tools were used, set to undefined
318
+ if (tool_use && tool_use.length == 0) {
319
+ tool_use = undefined;
320
+ }
321
+ const completion = {
322
+ ...this.getExtractedExecution(res, conversePrompt, options),
323
+ original_response: options.include_original_response ? res : undefined,
324
+ conversation: conversation,
325
+ tool_use: tool_use,
326
+ };
327
+ return completion;
328
+ }
329
+ async requestTwelvelabsPegasusCompletion(prompt, options) {
330
+ const executor = this.getExecutor();
331
+ const res = await executor.invokeModel({
332
+ modelId: options.model,
333
+ contentType: "application/json",
334
+ accept: "application/json",
335
+ body: JSON.stringify(prompt),
336
+ });
337
+ const decoder = new TextDecoder();
338
+ const body = decoder.decode(res.body);
339
+ const result = JSON.parse(body);
340
+ // Extract the response according to TwelveLabs Pegasus format
341
+ let finishReason;
342
+ switch (result.finishReason) {
343
+ case "stop":
344
+ finishReason = "stop";
345
+ break;
346
+ case "length":
347
+ finishReason = "length";
348
+ break;
349
+ default:
350
+ finishReason = result.finishReason;
351
+ }
352
+ return {
353
+ result: result.message ? [{ type: "text", value: result.message }] : [],
354
+ finish_reason: finishReason,
355
+ original_response: options.include_original_response ? result : undefined,
356
+ };
357
+ }
358
+ async requestTwelvelabsPegasusCompletionStream(prompt, options) {
359
+ const executor = this.getExecutor();
360
+ const res = await executor.invokeModelWithResponseStream({
361
+ modelId: options.model,
362
+ contentType: "application/json",
363
+ accept: "application/json",
364
+ body: JSON.stringify(prompt),
365
+ });
366
+ if (!res.body) {
367
+ throw new Error("[Bedrock] Stream not found in response");
368
+ }
369
+ return transformAsyncIterator(res.body, (chunk) => {
370
+ if (chunk.chunk?.bytes) {
371
+ const decoder = new TextDecoder();
372
+ const body = decoder.decode(chunk.chunk.bytes);
373
+ try {
374
+ const result = JSON.parse(body);
375
+ // Extract streaming response according to TwelveLabs Pegasus format
376
+ let finishReason;
377
+ if (result.finishReason) {
378
+ switch (result.finishReason) {
379
+ case "stop":
380
+ finishReason = "stop";
381
+ break;
382
+ case "length":
383
+ finishReason = "length";
384
+ break;
385
+ default:
386
+ finishReason = result.finishReason;
387
+ }
388
+ }
389
+ return {
390
+ result: result.delta || result.message ? [{ type: "text", value: result.delta || result.message || "" }] : [],
391
+ finish_reason: finishReason,
392
+ };
393
+ }
394
+ catch (error) {
395
+ // If JSON parsing fails, return empty chunk
396
+ return {
397
+ result: [],
398
+ };
399
+ }
400
+ }
401
+ return {
402
+ result: [],
403
+ };
404
+ });
405
+ }
406
+ async requestTextCompletionStream(prompt, options) {
407
+ // Handle Twelvelabs Pegasus models
408
+ if (options.model.includes("twelvelabs.pegasus")) {
409
+ return this.requestTwelvelabsPegasusCompletionStream(prompt, options);
410
+ }
411
+ // Handle other Bedrock models that use Converse API
412
+ const conversePrompt = prompt;
413
+ const payload = this.preparePayload(conversePrompt, options);
414
+ const executor = this.getExecutor();
415
+ return executor.converseStream({
416
+ ...payload,
417
+ }).then((res) => {
418
+ const stream = res.stream;
419
+ if (!stream) {
420
+ throw new Error("[Bedrock] Stream not found in response");
421
+ }
422
+ return transformAsyncIterator(stream, (streamSegment) => {
423
+ return this.getExtractedStream(streamSegment, conversePrompt, options);
424
+ });
425
+ }).catch((err) => {
426
+ this.logger.error({ error: err }, "[Bedrock] Failed to stream");
427
+ throw err;
428
+ });
429
+ }
430
+ preparePayload(prompt, options) {
431
+ const model_options = options.model_options ?? { _option_id: "text-fallback" };
432
+ let additionalField = {};
433
+ let supportsJSONPrefill = false;
434
+ if (options.model.includes("amazon")) {
435
+ supportsJSONPrefill = true;
436
+ //Titan models also exists but does not support any additional options
437
+ if (options.model.includes("nova")) {
438
+ additionalField = { inferenceConfig: { topK: model_options.top_k } };
439
+ }
440
+ }
441
+ else if (options.model.includes("claude")) {
442
+ const claude_options = model_options;
443
+ const thinking = claude_options.thinking_mode ?? false;
444
+ supportsJSONPrefill = !thinking;
445
+ if (options.model.includes("claude-3-7") || options.model.includes("-4-")) {
446
+ additionalField = {
447
+ ...additionalField,
448
+ reasoning_config: {
449
+ type: thinking ? "enabled" : "disabled",
450
+ budget_tokens: thinking ? (claude_options.thinking_budget_tokens ?? 1024) : undefined,
451
+ }
452
+ };
453
+ if (thinking && options.model.includes("claude-3-7-sonnet") &&
454
+ ((claude_options.max_tokens ?? 0) > 64000 || (claude_options.thinking_budget_tokens ?? 0) > 64000)) {
455
+ additionalField = {
456
+ ...additionalField,
457
+ anthropic_beta: ["output-128k-2025-02-19"]
458
+ };
459
+ }
460
+ }
461
+ //Needs max_tokens to be set
462
+ if (!model_options.max_tokens) {
463
+ model_options.max_tokens = maxTokenFallbackClaude(options);
464
+ }
465
+ additionalField = { ...additionalField, top_k: model_options.top_k };
466
+ }
467
+ else if (options.model.includes("meta")) {
468
+ //LLaMA models support no additional options
469
+ }
470
+ else if (options.model.includes("mistral")) {
471
+ //7B instruct and 8x7B instruct
472
+ if (options.model.includes("7b")) {
473
+ additionalField = { top_k: model_options.top_k };
474
+ //Does not support system messages
475
+ if (prompt.system && prompt.system?.length != 0) {
476
+ prompt.messages?.push(converseSystemToMessages(prompt.system));
477
+ prompt.system = undefined;
478
+ prompt.messages = converseConcatMessages(prompt.messages);
479
+ }
480
+ }
481
+ else {
482
+ //Other models such as Mistral Small,Large and Large 2
483
+ //Support no additional fields.
484
+ }
485
+ }
486
+ else if (options.model.includes("ai21")) {
487
+ //Jamba models support no additional options
488
+ //Jurassic 2 models do.
489
+ if (options.model.includes("j2")) {
490
+ additionalField = {
491
+ presencePenalty: { scale: model_options.presence_penalty },
492
+ frequencyPenalty: { scale: model_options.frequency_penalty },
493
+ };
494
+ //Does not support system messages
495
+ if (prompt.system && prompt.system?.length != 0) {
496
+ prompt.messages?.push(converseSystemToMessages(prompt.system));
497
+ prompt.system = undefined;
498
+ prompt.messages = converseConcatMessages(prompt.messages);
499
+ }
500
+ }
501
+ }
502
+ else if (options.model.includes("cohere.command")) {
503
+ // If last message is "```json", remove it.
504
+ //Command R and R plus
505
+ if (options.model.includes("cohere.command-r")) {
506
+ additionalField = {
507
+ k: model_options.top_k,
508
+ frequency_penalty: model_options.frequency_penalty,
509
+ presence_penalty: model_options.presence_penalty,
510
+ };
511
+ }
512
+ else {
513
+ // Command non-R
514
+ additionalField = { k: model_options.top_k };
515
+ //Does not support system messages
516
+ if (prompt.system && prompt.system?.length != 0) {
517
+ prompt.messages?.push(converseSystemToMessages(prompt.system));
518
+ prompt.system = undefined;
519
+ prompt.messages = converseConcatMessages(prompt.messages);
520
+ }
521
+ }
522
+ }
523
+ else if (options.model.includes("palmyra")) {
524
+ const palmyraOptions = model_options;
525
+ additionalField = {
526
+ seed: palmyraOptions?.seed,
527
+ presence_penalty: palmyraOptions?.presence_penalty,
528
+ frequency_penalty: palmyraOptions?.frequency_penalty,
529
+ min_tokens: palmyraOptions?.min_tokens,
530
+ };
531
+ }
532
+ else if (options.model.includes("deepseek")) {
533
+ //DeepSeek models support no additional options
534
+ }
535
+ else if (options.model.includes("gpt-oss")) {
536
+ const gptOssOptions = model_options;
537
+ additionalField = {
538
+ reasoning_effort: gptOssOptions?.reasoning_effort,
539
+ };
540
+ }
541
+ //If last message is "```json", add corresponding ``` as a stop sequence.
542
+ if (prompt.messages && prompt.messages.length > 0) {
543
+ if (prompt.messages[prompt.messages.length - 1].content?.[0].text === "```json") {
544
+ const stopSeq = model_options.stop_sequence;
545
+ if (!stopSeq) {
546
+ model_options.stop_sequence = ["```"];
547
+ }
548
+ else if (!stopSeq.includes("```")) {
549
+ stopSeq.push("```");
550
+ model_options.stop_sequence = stopSeq;
551
+ }
552
+ }
553
+ }
554
+ const tool_defs = getToolDefinitions(options.tools);
555
+ // Use prefill when there is a schema and tools are not being used
556
+ if (supportsJSONPrefill && options.result_schema && !tool_defs) {
557
+ prompt.messages = converseJSONprefill(prompt.messages);
558
+ }
559
+ const request = {
560
+ messages: prompt.messages,
561
+ system: prompt.system,
562
+ modelId: options.model,
563
+ inferenceConfig: {
564
+ maxTokens: model_options.max_tokens,
565
+ temperature: model_options.temperature,
566
+ topP: model_options.top_p,
567
+ stopSequences: model_options.stop_sequence,
568
+ },
569
+ additionalModelRequestFields: {
570
+ ...additionalField,
571
+ }
572
+ };
573
+ //Only add tools if they are defined and not empty
574
+ if (tool_defs?.length) {
575
+ request.toolConfig = {
576
+ tools: tool_defs,
577
+ };
578
+ }
579
+ return request;
580
+ }
581
+ async requestImageGeneration(prompt, options) {
582
+ if (options.output_modality !== Modalities.image) {
583
+ throw new Error(`Image generation requires image output_modality`);
584
+ }
585
+ if (options.model_options?._option_id !== "bedrock-nova-canvas") {
586
+ this.logger.warn({ options: options.model_options }, "Invalid model options");
587
+ }
588
+ const model_options = options.model_options;
589
+ const executor = this.getExecutor();
590
+ const taskType = model_options.taskType ?? NovaImageGenerationTaskType.TEXT_IMAGE;
591
+ this.logger.info("Task type: " + taskType);
592
+ if (typeof prompt === "string") {
593
+ throw new Error("Bad prompt format");
594
+ }
595
+ const payload = await formatNovaImageGenerationPayload(taskType, prompt, options);
596
+ const res = await executor.invokeModel({
597
+ modelId: options.model,
598
+ contentType: "application/json",
599
+ accept: "application/json",
600
+ body: JSON.stringify(payload),
601
+ }, {
602
+ requestTimeout: 60000 * 5
603
+ });
604
+ const decoder = new TextDecoder();
605
+ const body = decoder.decode(res.body);
606
+ const bedrockResult = JSON.parse(body);
607
+ return {
608
+ error: bedrockResult.error,
609
+ result: bedrockResult.images.map((image) => ({
610
+ type: "image",
611
+ value: image
612
+ }))
613
+ };
614
+ }
615
+ async startTraining(dataset, options) {
616
+ //convert options.params to Record<string, string>
617
+ const params = {};
618
+ for (const [key, value] of Object.entries(options.params || {})) {
619
+ params[key] = String(value);
620
+ }
621
+ if (!this.options.training_bucket) {
622
+ throw new Error("Training cannot nbe used since the 'training_bucket' property was not specified in driver options");
623
+ }
624
+ const s3 = new S3Client({ region: this.options.region, credentials: this.options.credentials });
625
+ const stream = await dataset.getStream();
626
+ const upload = await forceUploadFile(s3, stream, this.options.training_bucket, dataset.name);
627
+ const service = this.getService();
628
+ const response = await service.send(new CreateModelCustomizationJobCommand({
629
+ jobName: options.name + "-job",
630
+ customModelName: options.name,
631
+ roleArn: this.options.training_role_arn || undefined,
632
+ baseModelIdentifier: options.model,
633
+ clientRequestToken: "llumiverse-" + Date.now(),
634
+ trainingDataConfig: {
635
+ s3Uri: `s3://${upload.Bucket}/${upload.Key}`,
636
+ },
637
+ outputDataConfig: undefined,
638
+ hyperParameters: params,
639
+ //TODO not supported?
640
+ //customizationType: "FINE_TUNING",
641
+ }));
642
+ const job = await service.send(new GetModelCustomizationJobCommand({
643
+ jobIdentifier: response.jobArn
644
+ }));
645
+ return jobInfo(job, response.jobArn);
646
+ }
647
+ async cancelTraining(jobId) {
648
+ const service = this.getService();
649
+ await service.send(new StopModelCustomizationJobCommand({
650
+ jobIdentifier: jobId
651
+ }));
652
+ const job = await service.send(new GetModelCustomizationJobCommand({
653
+ jobIdentifier: jobId
654
+ }));
655
+ return jobInfo(job, jobId);
656
+ }
657
+ async getTrainingJob(jobId) {
658
+ const service = this.getService();
659
+ const job = await service.send(new GetModelCustomizationJobCommand({
660
+ jobIdentifier: jobId
661
+ }));
662
+ return jobInfo(job, jobId);
663
+ }
664
+ // ===================== management API ==================
665
+ async validateConnection() {
666
+ const service = this.getService();
667
+ this.logger.debug("[Bedrock] validating connection", service.config.credentials.name);
668
+ //return true as if the client has been initialized, it means the connection is valid
669
+ return true;
670
+ }
671
+ async listTrainableModels() {
672
+ this.logger.debug("[Bedrock] listing trainable models");
673
+ return this._listModels(m => m.customizationsSupported ? m.customizationsSupported.includes("FINE_TUNING") : false);
674
+ }
675
+ async listModels() {
676
+ this.logger.debug("[Bedrock] listing models");
677
+ // exclude trainable models since they are not executable
678
+ // exclude embedding models, not to be used for typical completions.
679
+ const filter = (m) => (m.inferenceTypesSupported?.includes("ON_DEMAND") && !m.outputModalities?.includes("EMBEDDING")) ?? false;
680
+ return this._listModels(filter);
681
+ }
682
+ async _listModels(foundationFilter) {
683
+ const service = this.getService();
684
+ const [foundationModelsList, customModelsList, inferenceProfilesList] = await Promise.all([
685
+ service.listFoundationModels({}).catch(() => {
686
+ this.logger.warn("[Bedrock] Can't list foundation models. Check if the user has the right permissions.");
687
+ return undefined;
688
+ }),
689
+ service.listCustomModels({}).catch(() => {
690
+ this.logger.warn("[Bedrock] Can't list custom models. Check if the user has the right permissions.");
691
+ return undefined;
692
+ }),
693
+ service.listInferenceProfiles({}).catch(() => {
694
+ this.logger.warn("[Bedrock] Can't list inference profiles. Check if the user has the right permissions.");
695
+ return undefined;
696
+ }),
697
+ ]);
698
+ if (!foundationModelsList?.modelSummaries) {
699
+ throw new Error("Foundation models not found");
700
+ }
701
+ let foundationModels = foundationModelsList.modelSummaries || [];
702
+ if (foundationFilter) {
703
+ foundationModels = foundationModels.filter(foundationFilter);
704
+ }
705
+ const supportedPublishers = ["amazon", "anthropic", "cohere", "ai21",
706
+ "mistral", "meta", "deepseek", "writer",
707
+ "openai", "twelvelabs", "qwen"];
708
+ const unsupportedModelsByPublisher = {
709
+ amazon: ["titan-image-generator", "nova-reel", "nova-sonic", "rerank"],
710
+ anthropic: [],
711
+ cohere: ["rerank", "embed"],
712
+ ai21: [],
713
+ mistral: [],
714
+ meta: [],
715
+ deepseek: [],
716
+ writer: [],
717
+ openai: [],
718
+ twelvelabs: ["marengo"],
719
+ qwen: [],
720
+ };
721
+ // Helper function to check if model should be filtered out
722
+ const shouldIncludeModel = (modelId, providerName) => {
723
+ if (!modelId || !providerName)
724
+ return false;
725
+ const normalizedProvider = providerName.toLowerCase();
726
+ // Check if provider is supported
727
+ const isProviderSupported = supportedPublishers.some(provider => normalizedProvider.includes(provider));
728
+ if (!isProviderSupported)
729
+ return false;
730
+ // Check if model is in the unsupported list for its provider
731
+ for (const provider of supportedPublishers) {
732
+ if (normalizedProvider.includes(provider)) {
733
+ const unsupportedModels = unsupportedModelsByPublisher[provider] || [];
734
+ return !unsupportedModels.some(unsupported => modelId.toLowerCase().includes(unsupported));
735
+ }
736
+ }
737
+ return true;
738
+ };
739
+ foundationModels = foundationModels.filter(m => shouldIncludeModel(m.modelId, m.providerName));
740
+ const aiModels = foundationModels.map((m) => {
741
+ if (!m.modelId) {
742
+ throw new Error("modelId not found");
743
+ }
744
+ const modelCapability = getModelCapabilities(m.modelArn ?? m.modelId, this.provider);
745
+ const model = {
746
+ id: m.modelArn ?? m.modelId,
747
+ name: `${m.providerName} ${m.modelName}`,
748
+ provider: this.provider,
749
+ owner: m.providerName,
750
+ can_stream: m.responseStreamingSupported ?? false,
751
+ input_modalities: m.inputModalities ? formatAmazonModalities(m.inputModalities) : modelModalitiesToArray(modelCapability.input),
752
+ output_modalities: m.outputModalities ? formatAmazonModalities(m.outputModalities) : modelModalitiesToArray(modelCapability.input),
753
+ tool_support: modelCapability.tool_support,
754
+ };
755
+ return model;
756
+ });
757
+ //add custom models
758
+ if (customModelsList?.modelSummaries) {
759
+ customModelsList.modelSummaries.forEach((m) => {
760
+ if (!m.modelArn) {
761
+ throw new Error("Model ID not found");
762
+ }
763
+ const modelCapability = getModelCapabilities(m.modelArn, this.provider);
764
+ const model = {
765
+ id: m.modelArn,
766
+ name: m.modelName ?? m.modelArn,
767
+ provider: this.provider,
768
+ owner: "custom",
769
+ description: `Custom model from ${m.baseModelName}`,
770
+ is_custom: true,
771
+ input_modalities: modelModalitiesToArray(modelCapability.input),
772
+ output_modalities: modelModalitiesToArray(modelCapability.output),
773
+ tool_support: modelCapability.tool_support,
774
+ };
775
+ aiModels.push(model);
776
+ this.validateConnection;
777
+ });
778
+ }
779
+ //add inference profiles
780
+ if (inferenceProfilesList?.inferenceProfileSummaries) {
781
+ inferenceProfilesList.inferenceProfileSummaries.forEach((p) => {
782
+ if (!p.inferenceProfileArn) {
783
+ throw new Error("Profile ARN not found");
784
+ }
785
+ // Apply the same filtering logic to inference profiles based on their name
786
+ const profileId = p.inferenceProfileId || "";
787
+ const profileName = p.inferenceProfileName || "";
788
+ // Extract provider name from profile name or ID
789
+ let providerName = "";
790
+ for (const provider of supportedPublishers) {
791
+ if (profileName.toLowerCase().includes(provider) || profileId.toLowerCase().includes(provider)) {
792
+ providerName = provider;
793
+ break;
794
+ }
795
+ }
796
+ const modelCapability = getModelCapabilities(p.inferenceProfileArn ?? p.inferenceProfileId, this.provider);
797
+ if (providerName && shouldIncludeModel(profileId, providerName)) {
798
+ const model = {
799
+ id: p.inferenceProfileArn ?? p.inferenceProfileId,
800
+ name: p.inferenceProfileName ?? p.inferenceProfileArn,
801
+ provider: this.provider,
802
+ owner: providerName,
803
+ input_modalities: modelModalitiesToArray(modelCapability.input),
804
+ output_modalities: modelModalitiesToArray(modelCapability.output),
805
+ tool_support: modelCapability.tool_support,
806
+ };
807
+ aiModels.push(model);
808
+ }
809
+ });
810
+ }
811
+ return aiModels;
812
+ }
813
+ async generateEmbeddings({ text, image, model }) {
814
+ this.logger.info("[Bedrock] Generating embeddings with model " + model);
815
+ // Handle TwelveLabs Marengo models
816
+ if (model?.includes("twelvelabs.marengo")) {
817
+ return this.generateTwelvelabsMarengoEmbeddings({ text, image, model });
818
+ }
819
+ // Handle other Bedrock embedding models
820
+ const defaultModel = image ? "amazon.titan-embed-image-v1" : "amazon.titan-embed-text-v2:0";
821
+ const modelID = model ?? defaultModel;
822
+ const invokeBody = {
823
+ inputText: text,
824
+ inputImage: image
825
+ };
826
+ const executor = this.getExecutor();
827
+ const res = await executor.invokeModel({
828
+ modelId: modelID,
829
+ contentType: "application/json",
830
+ body: JSON.stringify(invokeBody),
831
+ });
832
+ const decoder = new TextDecoder();
833
+ const body = decoder.decode(res.body);
834
+ const result = JSON.parse(body);
835
+ if (!result.embedding) {
836
+ throw new Error("Embeddings not found");
837
+ }
838
+ return {
839
+ values: result.embedding,
840
+ model: modelID,
841
+ token_count: result.inputTextTokenCount
842
+ };
843
+ }
844
+ async generateTwelvelabsMarengoEmbeddings({ text, image, model }) {
845
+ const executor = this.getExecutor();
846
+ // Prepare the request payload for TwelveLabs Marengo
847
+ let invokeBody = {
848
+ inputType: "text"
849
+ };
850
+ if (text) {
851
+ invokeBody.inputText = text;
852
+ invokeBody.inputType = "text";
853
+ }
854
+ if (image) {
855
+ // For the embeddings interface, image is expected to be base64
856
+ invokeBody.mediaSource = {
857
+ base64String: image
858
+ };
859
+ invokeBody.inputType = "image";
860
+ }
861
+ const res = await executor.invokeModel({
862
+ modelId: model,
863
+ contentType: "application/json",
864
+ accept: "application/json",
865
+ body: JSON.stringify(invokeBody),
866
+ });
867
+ const decoder = new TextDecoder();
868
+ const body = decoder.decode(res.body);
869
+ const result = JSON.parse(body);
870
+ // TwelveLabs Marengo returns embedding data
871
+ if (!result.embedding) {
872
+ throw new Error("Embeddings not found in TwelveLabs Marengo response");
873
+ }
874
+ return {
875
+ values: result.embedding,
876
+ model: model,
877
+ // TwelveLabs Marengo doesn't return token count in the same way
878
+ token_count: undefined
879
+ };
880
+ }
881
+ }
882
+ function jobInfo(job, jobId) {
883
+ const jobStatus = job.status;
884
+ let status = TrainingJobStatus.running;
885
+ let details;
886
+ if (jobStatus === ModelCustomizationJobStatus.COMPLETED) {
887
+ status = TrainingJobStatus.succeeded;
888
+ }
889
+ else if (jobStatus === ModelCustomizationJobStatus.FAILED) {
890
+ status = TrainingJobStatus.failed;
891
+ details = job.failureMessage || "error";
892
+ }
893
+ else if (jobStatus === ModelCustomizationJobStatus.STOPPED) {
894
+ status = TrainingJobStatus.cancelled;
895
+ }
896
+ else {
897
+ status = TrainingJobStatus.running;
898
+ details = jobStatus;
899
+ }
900
+ job.baseModelArn;
901
+ return {
902
+ id: jobId,
903
+ model: job.outputModelArn,
904
+ status,
905
+ details
906
+ };
907
+ }
908
+ function getToolDefinitions(tools) {
909
+ return tools ? tools.map(getToolDefinition) : undefined;
910
+ }
911
+ function getToolDefinition(tool) {
912
+ return {
913
+ toolSpec: {
914
+ name: tool.name,
915
+ description: tool.description,
916
+ inputSchema: {
917
+ json: tool.input_schema,
918
+ }
919
+ }
920
+ };
921
+ }
922
+ /**
923
+ * Update the conversation messages
924
+ * @param prompt
925
+ * @param response
926
+ * @returns
927
+ */
928
+ function updateConversation(conversation, prompt) {
929
+ const combinedMessages = [...(conversation?.messages || []), ...(prompt.messages || [])];
930
+ const combinedSystem = prompt.system || conversation?.system;
931
+ return {
932
+ modelId: prompt?.modelId || conversation?.modelId,
933
+ messages: combinedMessages.length > 0 ? combinedMessages : [],
934
+ system: combinedSystem && combinedSystem.length > 0 ? combinedSystem : undefined,
935
+ };
936
+ }
937
+ function formatAmazonModalities(modalities) {
938
+ const standardizedModalities = [];
939
+ for (const modality of modalities) {
940
+ if (modality === ModelModality.TEXT) {
941
+ standardizedModalities.push("text");
942
+ }
943
+ else if (modality === ModelModality.IMAGE) {
944
+ standardizedModalities.push("image");
945
+ }
946
+ else if (modality === ModelModality.EMBEDDING) {
947
+ standardizedModalities.push("embedding");
948
+ }
949
+ else if (modality == "SPEECH") {
950
+ standardizedModalities.push("audio");
951
+ }
952
+ else if (modality == "VIDEO") {
953
+ standardizedModalities.push("video");
954
+ }
955
+ else {
956
+ // Handle other modalities as needed
957
+ standardizedModalities.push(modality.toString().toLowerCase());
958
+ }
959
+ }
960
+ return standardizedModalities;
961
+ }
962
+ //# sourceMappingURL=index.js.map