@ax-llm/ax-ai-aws-bedrock 14.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.cjs ADDED
@@ -0,0 +1,492 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ AxAIBedrock: () => AxAIBedrock,
24
+ AxAIBedrockEmbedModel: () => AxAIBedrockEmbedModel,
25
+ AxAIBedrockModel: () => AxAIBedrockModel,
26
+ axModelInfoBedrock: () => axModelInfoBedrock
27
+ });
28
+ module.exports = __toCommonJS(index_exports);
29
+
30
+ // api.ts
31
+ var import_client_bedrock_runtime = require("@aws-sdk/client-bedrock-runtime");
32
+ var import_ax = require("@ax-llm/ax");
33
+
34
+ // types.ts
35
+ var AxAIBedrockModel = /* @__PURE__ */ ((AxAIBedrockModel2) => {
36
+ AxAIBedrockModel2["ClaudeSonnet4"] = "us.anthropic.claude-sonnet-4-20250514-v1:0";
37
+ AxAIBedrockModel2["Claude37Sonnet"] = "anthropic.claude-3-7-sonnet-20250219-v1:0";
38
+ AxAIBedrockModel2["Claude35Sonnet"] = "anthropic.claude-3-5-sonnet-20240620-v1:0";
39
+ AxAIBedrockModel2["GptOss120B"] = "openai.gpt-oss-120b-1:0";
40
+ AxAIBedrockModel2["GptOss20B"] = "openai.gpt-oss-20b-1:0";
41
+ return AxAIBedrockModel2;
42
+ })(AxAIBedrockModel || {});
43
+ var AxAIBedrockEmbedModel = /* @__PURE__ */ ((AxAIBedrockEmbedModel2) => {
44
+ AxAIBedrockEmbedModel2["TitanEmbedV2"] = "amazon.titan-embed-text-v2:0";
45
+ return AxAIBedrockEmbedModel2;
46
+ })(AxAIBedrockEmbedModel || {});
47
+
48
+ // info.ts
49
+ var axModelInfoBedrock = [
50
+ // ========================================================================
51
+ // Claude Models
52
+ // ========================================================================
53
+ {
54
+ name: "us.anthropic.claude-sonnet-4-20250514-v1:0" /* ClaudeSonnet4 */,
55
+ currency: "usd",
56
+ promptTokenCostPer1M: 3,
57
+ completionTokenCostPer1M: 15,
58
+ maxTokens: 64e3,
59
+ contextWindow: 2e5,
60
+ supported: { thinkingBudget: true, showThoughts: true }
61
+ },
62
+ {
63
+ name: "anthropic.claude-3-7-sonnet-20250219-v1:0" /* Claude37Sonnet */,
64
+ currency: "usd",
65
+ promptTokenCostPer1M: 3,
66
+ completionTokenCostPer1M: 15,
67
+ maxTokens: 64e3,
68
+ contextWindow: 2e5
69
+ },
70
+ {
71
+ name: "anthropic.claude-3-5-sonnet-20240620-v1:0" /* Claude35Sonnet */,
72
+ currency: "usd",
73
+ promptTokenCostPer1M: 3,
74
+ completionTokenCostPer1M: 15,
75
+ maxTokens: 8192,
76
+ contextWindow: 2e5
77
+ },
78
+ // ========================================================================
79
+ // GPT OSS Models
80
+ // ========================================================================
81
+ {
82
+ name: "openai.gpt-oss-120b-1:0" /* GptOss120B */,
83
+ currency: "usd",
84
+ promptTokenCostPer1M: 0.5,
85
+ completionTokenCostPer1M: 1.5,
86
+ maxTokens: 16384,
87
+ contextWindow: 128e3
88
+ },
89
+ {
90
+ name: "openai.gpt-oss-20b-1:0" /* GptOss20B */,
91
+ currency: "usd",
92
+ promptTokenCostPer1M: 0.25,
93
+ completionTokenCostPer1M: 0.75,
94
+ maxTokens: 16384,
95
+ contextWindow: 128e3
96
+ },
97
+ // ========================================================================
98
+ // Embed Models
99
+ // ========================================================================
100
+ {
101
+ name: "amazon.titan-embed-text-v2:0" /* TitanEmbedV2 */,
102
+ currency: "usd",
103
+ promptTokenCostPer1M: 0.02,
104
+ completionTokenCostPer1M: 0,
105
+ maxTokens: 8192,
106
+ contextWindow: 8192
107
+ }
108
+ ];
109
+
110
+ // api.ts
111
+ var AxAIBedrockImpl = class {
112
+ constructor(config, primaryRegion, fallbackRegions, gptRegion, gptFallbackRegions) {
113
+ this.config = config;
114
+ this.primaryRegion = primaryRegion;
115
+ this.fallbackRegions = fallbackRegions;
116
+ this.gptRegion = gptRegion;
117
+ this.gptFallbackRegions = gptFallbackRegions;
118
+ }
119
+ clients = /* @__PURE__ */ new Map();
120
+ tokensUsed;
121
+ getTokenUsage() {
122
+ return this.tokensUsed;
123
+ }
124
+ getModelConfig() {
125
+ return {
126
+ maxTokens: this.config.maxTokens,
127
+ temperature: this.config.temperature,
128
+ topP: this.config.topP,
129
+ stopSequences: this.config.stopSequences
130
+ };
131
+ }
132
+ getClient(region) {
133
+ let client = this.clients.get(region);
134
+ if (!client) {
135
+ client = new import_client_bedrock_runtime.BedrockRuntimeClient({ region });
136
+ this.clients.set(region, client);
137
+ }
138
+ return client;
139
+ }
140
+ /**
141
+ * Detect model family from model ID
142
+ */
143
+ getModelFamily(modelId) {
144
+ if (modelId.includes("anthropic.claude") || modelId.includes("us.anthropic.claude")) {
145
+ return "claude";
146
+ }
147
+ if (modelId.includes("openai.gpt")) {
148
+ return "gpt";
149
+ }
150
+ if (modelId.includes("amazon.titan-embed")) {
151
+ return "titan";
152
+ }
153
+ throw new Error(`Unknown model family for: ${modelId}`);
154
+ }
155
+ /**
156
+ * Get appropriate regions for model
157
+ */
158
+ getRegionsForModel(modelId) {
159
+ const family = this.getModelFamily(modelId);
160
+ if (family === "gpt") {
161
+ return [this.gptRegion, ...this.gptFallbackRegions];
162
+ }
163
+ return [this.primaryRegion, ...this.fallbackRegions];
164
+ }
165
+ /**
166
+ * Regional failover logic - tries primary region, then fallbacks
167
+ */
168
+ async invokeWithFailover(modelId, handler) {
169
+ const regions = this.getRegionsForModel(modelId);
170
+ let lastError;
171
+ for (const region of regions) {
172
+ try {
173
+ const client = this.getClient(region);
174
+ return await handler(client);
175
+ } catch (error) {
176
+ lastError = error;
177
+ console.warn(
178
+ `[Bedrock] Region ${region} failed for ${modelId}:`,
179
+ error
180
+ );
181
+ }
182
+ }
183
+ throw lastError || new Error(`All Bedrock regions failed for ${modelId}`);
184
+ }
185
+ /**
186
+ * Transform AX chat request → Bedrock request (Claude or GPT)
187
+ */
188
+ createChatReq = async (req, _config) => {
189
+ const family = this.getModelFamily(req.model);
190
+ const maxTokens = req.modelConfig?.maxTokens ?? this.config.maxTokens ?? 4096;
191
+ const temperature = req.modelConfig?.temperature ?? this.config.temperature;
192
+ const topP = req.modelConfig?.topP ?? this.config.topP;
193
+ let bedrockRequest;
194
+ if (family === "claude") {
195
+ const systemMessages = req.chatPrompt.filter((msg) => msg.role === "system").map((msg) => msg.content).join("\n\n");
196
+ const messages = req.chatPrompt.filter((msg) => msg.role !== "system").map((msg) => {
197
+ if (msg.role === "user") {
198
+ return {
199
+ role: "user",
200
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
201
+ };
202
+ }
203
+ if (msg.role === "assistant") {
204
+ return {
205
+ role: "assistant",
206
+ content: msg.content || ""
207
+ };
208
+ }
209
+ throw new Error(`Unsupported role: ${msg.role}`);
210
+ });
211
+ bedrockRequest = {
212
+ anthropic_version: "bedrock-2023-05-31",
213
+ max_tokens: maxTokens,
214
+ messages,
215
+ ...systemMessages ? { system: systemMessages } : {},
216
+ ...temperature !== void 0 ? { temperature } : {},
217
+ ...topP !== void 0 ? { top_p: topP } : {}
218
+ };
219
+ } else if (family === "gpt") {
220
+ const messages = req.chatPrompt.filter((msg) => msg.role !== "function").map((msg) => {
221
+ let content;
222
+ if ("content" in msg) {
223
+ content = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
224
+ } else {
225
+ content = "";
226
+ }
227
+ return {
228
+ role: msg.role,
229
+ content
230
+ };
231
+ });
232
+ bedrockRequest = {
233
+ messages,
234
+ max_tokens: maxTokens,
235
+ ...temperature !== void 0 ? { temperature } : {},
236
+ ...topP !== void 0 ? { top_p: topP } : {}
237
+ };
238
+ } else {
239
+ throw new Error(`Chat not supported for model family: ${family}`);
240
+ }
241
+ const apiConfig = {
242
+ name: `bedrock-${family}`,
243
+ localCall: async (data) => {
244
+ const reqBody = data;
245
+ const result = await this.invokeWithFailover(
246
+ req.model,
247
+ async (client) => {
248
+ const command = new import_client_bedrock_runtime.InvokeModelCommand({
249
+ modelId: req.model,
250
+ body: JSON.stringify(reqBody),
251
+ contentType: "application/json",
252
+ accept: "application/json"
253
+ });
254
+ const response = await client.send(command);
255
+ return JSON.parse(new TextDecoder().decode(response.body));
256
+ }
257
+ );
258
+ return result;
259
+ }
260
+ };
261
+ return [apiConfig, bedrockRequest];
262
+ };
263
+ /**
264
+ * Transform Bedrock response → AX chat response (Claude or GPT)
265
+ */
266
+ createChatResp(resp) {
267
+ if ("content" in resp && Array.isArray(resp.content)) {
268
+ return this.createClaudeChatResp(resp);
269
+ } else if ("choices" in resp && Array.isArray(resp.choices)) {
270
+ return this.createGptChatResp(resp);
271
+ }
272
+ throw new Error("Unknown response format");
273
+ }
274
+ /**
275
+ * Handle Claude-specific response format
276
+ */
277
+ createClaudeChatResp(resp) {
278
+ let content = "";
279
+ for (const block of resp.content) {
280
+ if (block.type === "text") {
281
+ content += block.text;
282
+ }
283
+ }
284
+ this.tokensUsed = {
285
+ promptTokens: resp.usage.input_tokens,
286
+ completionTokens: resp.usage.output_tokens,
287
+ totalTokens: resp.usage.input_tokens + resp.usage.output_tokens
288
+ };
289
+ let finishReason;
290
+ switch (resp.stop_reason) {
291
+ case "end_turn":
292
+ case "stop_sequence":
293
+ finishReason = "stop";
294
+ break;
295
+ case "max_tokens":
296
+ finishReason = "length";
297
+ break;
298
+ default:
299
+ finishReason = void 0;
300
+ }
301
+ return {
302
+ results: [
303
+ {
304
+ index: 0,
305
+ id: resp.id,
306
+ content,
307
+ finishReason
308
+ }
309
+ ],
310
+ remoteId: resp.id
311
+ };
312
+ }
313
+ /**
314
+ * Handle GPT-specific response format
315
+ */
316
+ createGptChatResp(resp) {
317
+ const choice = resp.choices[0];
318
+ if (!choice) {
319
+ throw new Error("No choices in GPT response");
320
+ }
321
+ let content = "";
322
+ if (typeof choice.message.content === "string") {
323
+ content = choice.message.content;
324
+ } else if (Array.isArray(choice.message.content)) {
325
+ content = choice.message.content.map((part) => {
326
+ if (typeof part === "string") return part;
327
+ return part.text || part.content || "";
328
+ }).join("");
329
+ }
330
+ if (resp.usage) {
331
+ this.tokensUsed = {
332
+ promptTokens: resp.usage.prompt_tokens,
333
+ completionTokens: resp.usage.completion_tokens,
334
+ totalTokens: resp.usage.total_tokens
335
+ };
336
+ }
337
+ let finishReason;
338
+ switch (choice.finish_reason) {
339
+ case "stop":
340
+ finishReason = "stop";
341
+ break;
342
+ case "length":
343
+ finishReason = "length";
344
+ break;
345
+ case "content_filter":
346
+ finishReason = "content_filter";
347
+ break;
348
+ default:
349
+ finishReason = void 0;
350
+ }
351
+ return {
352
+ results: [
353
+ {
354
+ index: choice.index,
355
+ id: resp.id,
356
+ content,
357
+ finishReason
358
+ }
359
+ ],
360
+ remoteId: resp.id
361
+ };
362
+ }
363
+ /**
364
+ * Create embed request for Titan
365
+ */
366
+ createEmbedReq = async (req) => {
367
+ if (!req.texts || req.texts.length === 0) {
368
+ throw new Error("No texts provided for embedding");
369
+ }
370
+ const embedRequest = {
371
+ inputText: req.texts[0],
372
+ // Take first text
373
+ dimensions: 512,
374
+ normalize: true
375
+ };
376
+ const apiConfig = {
377
+ name: "bedrock-titan-embed",
378
+ localCall: async (data) => {
379
+ const reqBody = data;
380
+ const result = await this.invokeWithFailover(
381
+ req.embedModel,
382
+ async (client) => {
383
+ const command = new import_client_bedrock_runtime.InvokeModelCommand({
384
+ modelId: req.embedModel,
385
+ body: JSON.stringify(reqBody),
386
+ contentType: "application/json",
387
+ accept: "application/json"
388
+ });
389
+ const response = await client.send(command);
390
+ return JSON.parse(new TextDecoder().decode(response.body));
391
+ }
392
+ );
393
+ return result;
394
+ }
395
+ };
396
+ return [apiConfig, embedRequest];
397
+ };
398
+ /**
399
+ * Create embed response from Titan
400
+ */
401
+ createEmbedResp(resp) {
402
+ return {
403
+ embeddings: [resp.embedding]
404
+ };
405
+ }
406
+ };
407
+ var AxAIBedrock = class extends import_ax.AxBaseAI {
408
+ constructor({
409
+ region = "us-east-2",
410
+ fallbackRegions = ["us-west-2", "us-east-1"],
411
+ gptRegion = "us-west-2",
412
+ gptFallbackRegions = ["us-east-1"],
413
+ config,
414
+ options
415
+ }) {
416
+ const fullConfig = {
417
+ ...(0, import_ax.axBaseAIDefaultConfig)(),
418
+ model: "us.anthropic.claude-sonnet-4-20250514-v1:0" /* ClaudeSonnet4 */,
419
+ region,
420
+ fallbackRegions,
421
+ gptRegion,
422
+ gptFallbackRegions,
423
+ ...config
424
+ };
425
+ const aiImpl = new AxAIBedrockImpl(
426
+ fullConfig,
427
+ region,
428
+ fallbackRegions,
429
+ gptRegion,
430
+ gptFallbackRegions
431
+ );
432
+ const supportFor = () => ({
433
+ functions: false,
434
+ // Not implemented yet - add when needed
435
+ streaming: false,
436
+ // Not implemented yet - add when needed
437
+ functionCot: false,
438
+ hasThinkingBudget: false,
439
+ hasShowThoughts: false,
440
+ media: {
441
+ images: {
442
+ supported: false,
443
+ // Add when needed
444
+ formats: []
445
+ },
446
+ audio: {
447
+ supported: false,
448
+ formats: []
449
+ },
450
+ files: {
451
+ supported: false,
452
+ formats: [],
453
+ uploadMethod: "none"
454
+ },
455
+ urls: {
456
+ supported: false,
457
+ webSearch: false,
458
+ contextFetching: false
459
+ }
460
+ },
461
+ caching: {
462
+ supported: false,
463
+ types: []
464
+ },
465
+ thinking: false,
466
+ multiTurn: true
467
+ // All models support multi-turn conversations
468
+ });
469
+ super(aiImpl, {
470
+ name: "Bedrock",
471
+ apiURL: "",
472
+ // Not used - we use SDK directly
473
+ headers: async () => ({}),
474
+ // AWS SDK handles auth
475
+ modelInfo: axModelInfoBedrock,
476
+ defaults: {
477
+ model: fullConfig.model,
478
+ embedModel: fullConfig.embedModel
479
+ },
480
+ options,
481
+ supportFor
482
+ });
483
+ }
484
+ };
485
+ // Annotate the CommonJS export names for ESM import in node:
486
+ 0 && (module.exports = {
487
+ AxAIBedrock,
488
+ AxAIBedrockEmbedModel,
489
+ AxAIBedrockModel,
490
+ axModelInfoBedrock
491
+ });
492
+ //# sourceMappingURL=index.cjs.map
package/index.cjs.map ADDED
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../index.ts","../api.ts","../types.ts","../info.ts"],"sourcesContent":["/**\n * AWS Bedrock Provider for AX\n *\n * Production-ready AWS Bedrock integration supporting Claude, GPT OSS, and Titan Embed models.\n *\n * @example\n * ```typescript\n * import { AxAIBedrock, AxAIBedrockModel } from '@ax-llm/ax-ai-aws-bedrock';\n *\n * const ai = new AxAIBedrock({\n * region: 'us-east-2',\n * config: { model: AxAIBedrockModel.ClaudeSonnet4 }\n * });\n *\n * const response = await ai.chat({\n * chatPrompt: [\n * { role: 'system', content: 'You are a helpful assistant.' },\n * { role: 'user', content: 'What is AWS Bedrock?' }\n * ]\n * });\n * ```\n *\n * @packageDocumentation\n */\n\nexport { AxAIBedrock } from './api.js';\nexport { axModelInfoBedrock } from './info.js';\nexport { AxAIBedrockEmbedModel, AxAIBedrockModel } from './types.js';\n\nexport type {\n AxAIBedrockConfig,\n BedrockChatRequest,\n BedrockChatResponse,\n BedrockClaudeRequest,\n BedrockClaudeResponse,\n BedrockGptRequest,\n BedrockGptResponse,\n BedrockTitanEmbedRequest,\n BedrockTitanEmbedResponse,\n} from './types.js';\n","/**\n * AWS Bedrock Provider for AX Library\n *\n * Supports Claude, GPT OSS, and Titan Embed models for use with AX's\n * compiler, signatures, flows, and optimization features.\n *\n * Usage:\n * const ai = new AxAIBedrock({\n * region: 'us-east-2',\n * config: { model: AxAIBedrockModel.ClaudeSonnet4 }\n * });\n *\n * const sig = new AxSignature('input -> output');\n * const gen = new AxGen(sig, { ai });\n * const result = await gen.forward({ input: 'test' });\n */\n\nimport {\n BedrockRuntimeClient,\n InvokeModelCommand,\n} from '@aws-sdk/client-bedrock-runtime';\nimport type {\n AxAIServiceImpl,\n AxAIServiceOptions,\n AxAPI,\n AxChatResponse,\n AxChatResponseResult,\n AxEmbedResponse,\n AxInternalChatRequest,\n AxInternalEmbedRequest,\n AxModelConfig,\n AxTokenUsage,\n} from '@ax-llm/ax';\nimport { type AxAIFeatures, AxBaseAI, axBaseAIDefaultConfig } from '@ax-llm/ax';\nimport { axModelInfoBedrock } from './info.js';\nimport {\n type AxAIBedrockConfig,\n type AxAIBedrockEmbedModel,\n AxAIBedrockModel,\n type BedrockChatRequest,\n type BedrockChatResponse,\n type BedrockClaudeRequest,\n type BedrockClaudeResponse,\n type BedrockGptRequest,\n type BedrockGptResponse,\n type BedrockTitanEmbedRequest,\n type BedrockTitanEmbedResponse,\n} from './types.js';\n\n// ============================================================================\n// IMPLEMENTATION - Converts between AX format and Bedrock format\n// ============================================================================\n\ntype ModelFamily = 'claude' | 'gpt' | 'titan';\n\nclass AxAIBedrockImpl\n implements\n AxAIServiceImpl<\n AxAIBedrockModel,\n AxAIBedrockEmbedModel,\n BedrockChatRequest,\n BedrockTitanEmbedRequest,\n BedrockChatResponse,\n never, // No streaming for now\n BedrockTitanEmbedResponse\n >\n{\n private clients: Map<string, BedrockRuntimeClient> = new Map();\n private tokensUsed?: AxTokenUsage;\n\n constructor(\n private config: AxAIBedrockConfig,\n private primaryRegion: string,\n private fallbackRegions: string[],\n private gptRegion: string,\n private gptFallbackRegions: string[]\n ) {}\n\n getTokenUsage(): AxTokenUsage | undefined {\n return this.tokensUsed;\n }\n\n getModelConfig(): AxModelConfig {\n return {\n maxTokens: this.config.maxTokens,\n temperature: this.config.temperature,\n topP: this.config.topP,\n stopSequences: this.config.stopSequences,\n };\n }\n\n private getClient(region: string): BedrockRuntimeClient {\n let client = this.clients.get(region);\n if (!client) {\n client = new BedrockRuntimeClient({ region });\n this.clients.set(region, client);\n }\n return client;\n }\n\n /**\n * Detect model family from model ID\n */\n private getModelFamily(modelId: string): ModelFamily {\n if (\n modelId.includes('anthropic.claude') ||\n modelId.includes('us.anthropic.claude')\n ) {\n return 'claude';\n }\n if (modelId.includes('openai.gpt')) {\n return 'gpt';\n }\n if (modelId.includes('amazon.titan-embed')) {\n return 'titan';\n }\n throw new Error(`Unknown model family for: ${modelId}`);\n }\n\n /**\n * Get appropriate regions for model\n */\n private getRegionsForModel(modelId: string): string[] {\n const family = this.getModelFamily(modelId);\n if (family === 'gpt') {\n return [this.gptRegion, ...this.gptFallbackRegions];\n }\n return [this.primaryRegion, ...this.fallbackRegions];\n }\n\n /**\n * Regional failover logic - tries primary region, then fallbacks\n */\n private async invokeWithFailover<T>(\n modelId: string,\n handler: (client: BedrockRuntimeClient) => Promise<T>\n ): Promise<T> {\n const regions = this.getRegionsForModel(modelId);\n let lastError: Error | undefined;\n\n for (const region of regions) {\n try {\n const client = this.getClient(region);\n return await handler(client);\n } catch (error) {\n lastError = error as Error;\n console.warn(\n `[Bedrock] Region ${region} failed for ${modelId}:`,\n error\n );\n }\n }\n\n throw lastError || new Error(`All Bedrock regions failed for ${modelId}`);\n }\n\n /**\n * Transform AX chat request → Bedrock request (Claude or GPT)\n */\n createChatReq = async (\n req: Readonly<AxInternalChatRequest<AxAIBedrockModel>>,\n _config: Readonly<AxAIServiceOptions>\n ): Promise<[AxAPI, BedrockChatRequest]> => {\n const family = this.getModelFamily(req.model);\n const maxTokens =\n req.modelConfig?.maxTokens ?? this.config.maxTokens ?? 4096;\n const temperature = req.modelConfig?.temperature ?? this.config.temperature;\n const topP = req.modelConfig?.topP ?? this.config.topP;\n\n let bedrockRequest: BedrockChatRequest;\n\n if (family === 'claude') {\n // Extract system messages for Claude\n const systemMessages = req.chatPrompt\n .filter((msg) => msg.role === 'system')\n .map((msg) => msg.content)\n .join('\\n\\n');\n\n // Convert other messages to Claude format\n const messages = req.chatPrompt\n .filter((msg) => msg.role !== 'system')\n .map((msg) => {\n if (msg.role === 'user') {\n return {\n role: 'user' as const,\n content:\n typeof msg.content === 'string'\n ? msg.content\n : JSON.stringify(msg.content),\n };\n }\n if (msg.role === 'assistant') {\n return {\n role: 'assistant' as const,\n content: msg.content || '',\n };\n }\n throw new Error(`Unsupported role: ${msg.role}`);\n });\n\n bedrockRequest = {\n anthropic_version: 'bedrock-2023-05-31',\n max_tokens: maxTokens,\n messages,\n ...(systemMessages ? { system: systemMessages } : {}),\n ...(temperature !== undefined ? { temperature } : {}),\n ...(topP !== undefined ? { top_p: topP } : {}),\n } as BedrockClaudeRequest;\n } else if (family === 'gpt') {\n // GPT uses OpenAI-style format with system messages in array\n const messages = req.chatPrompt\n .filter((msg) => msg.role !== 'function') // Skip function messages\n .map((msg) => {\n // Get content based on role\n let content: string;\n if ('content' in msg) {\n content =\n typeof msg.content === 'string'\n ? msg.content\n : JSON.stringify(msg.content);\n } else {\n content = '';\n }\n\n return {\n role: msg.role as 'system' | 'user' | 'assistant',\n content,\n };\n });\n\n bedrockRequest = {\n messages,\n max_tokens: maxTokens,\n ...(temperature !== undefined ? { temperature } : {}),\n ...(topP !== undefined ? { top_p: topP } : {}),\n } as BedrockGptRequest;\n } else {\n throw new Error(`Chat not supported for model family: ${family}`);\n }\n\n // Create API config with local call (uses SDK instead of HTTP)\n const apiConfig: AxAPI = {\n name: `bedrock-${family}`,\n localCall: async <TRequest, TResponse>(data: TRequest) => {\n const reqBody = data as unknown as BedrockChatRequest;\n const result = await this.invokeWithFailover(\n req.model,\n async (client) => {\n const command = new InvokeModelCommand({\n modelId: req.model,\n body: JSON.stringify(reqBody),\n contentType: 'application/json',\n accept: 'application/json',\n });\n const response = await client.send(command);\n return JSON.parse(new TextDecoder().decode(response.body));\n }\n );\n return result as TResponse;\n },\n };\n\n return [apiConfig, bedrockRequest];\n };\n\n /**\n * Transform Bedrock response → AX chat response (Claude or GPT)\n */\n createChatResp(resp: Readonly<BedrockChatResponse>): AxChatResponse {\n // Detect response type\n if ('content' in resp && Array.isArray(resp.content)) {\n // Claude response\n return this.createClaudeChatResp(resp as BedrockClaudeResponse);\n } else if ('choices' in resp && Array.isArray(resp.choices)) {\n // GPT response\n return this.createGptChatResp(resp as BedrockGptResponse);\n }\n throw new Error('Unknown response format');\n }\n\n /**\n * Handle Claude-specific response format\n */\n private createClaudeChatResp(\n resp: Readonly<BedrockClaudeResponse>\n ): AxChatResponse {\n // Extract text content from response\n let content = '';\n for (const block of resp.content) {\n if (block.type === 'text') {\n content += block.text;\n }\n }\n\n // Track token usage for AX's optimizer\n this.tokensUsed = {\n promptTokens: resp.usage.input_tokens,\n completionTokens: resp.usage.output_tokens,\n totalTokens: resp.usage.input_tokens + resp.usage.output_tokens,\n };\n\n // Map finish reason\n let finishReason: AxChatResponseResult['finishReason'];\n switch (resp.stop_reason) {\n case 'end_turn':\n case 'stop_sequence':\n finishReason = 'stop';\n break;\n case 'max_tokens':\n finishReason = 'length';\n break;\n default:\n finishReason = undefined;\n }\n\n return {\n results: [\n {\n index: 0,\n id: resp.id,\n content,\n finishReason,\n },\n ],\n remoteId: resp.id,\n };\n }\n\n /**\n * Handle GPT-specific response format\n */\n private createGptChatResp(\n resp: Readonly<BedrockGptResponse>\n ): AxChatResponse {\n const choice = resp.choices[0];\n if (!choice) {\n throw new Error('No choices in GPT response');\n }\n\n // Extract content (can be string or array)\n let content = '';\n if (typeof choice.message.content === 'string') {\n content = choice.message.content;\n } else if (Array.isArray(choice.message.content)) {\n content = choice.message.content\n .map((part) => {\n if (typeof part === 'string') return part;\n return part.text || part.content || '';\n })\n .join('');\n }\n\n // Track token usage if available\n if (resp.usage) {\n this.tokensUsed = {\n promptTokens: resp.usage.prompt_tokens,\n completionTokens: resp.usage.completion_tokens,\n totalTokens: resp.usage.total_tokens,\n };\n }\n\n // Map finish reason\n let finishReason: AxChatResponseResult['finishReason'];\n switch (choice.finish_reason) {\n case 'stop':\n finishReason = 'stop';\n break;\n case 'length':\n finishReason = 'length';\n break;\n case 'content_filter':\n finishReason = 'content_filter';\n break;\n default:\n finishReason = undefined;\n }\n\n return {\n results: [\n {\n index: choice.index,\n id: resp.id,\n content,\n finishReason,\n },\n ],\n remoteId: resp.id,\n };\n }\n\n /**\n * Create embed request for Titan\n */\n createEmbedReq = async (\n req: Readonly<AxInternalEmbedRequest<AxAIBedrockEmbedModel>>\n ): Promise<[AxAPI, BedrockTitanEmbedRequest]> => {\n if (!req.texts || req.texts.length === 0) {\n throw new Error('No texts provided for embedding');\n }\n\n const embedRequest: BedrockTitanEmbedRequest = {\n inputText: req.texts[0], // Take first text\n dimensions: 512,\n normalize: true,\n };\n\n const apiConfig: AxAPI = {\n name: 'bedrock-titan-embed',\n localCall: async <TRequest, TResponse>(data: TRequest) => {\n const reqBody = data as unknown as BedrockTitanEmbedRequest;\n const result = await this.invokeWithFailover(\n req.embedModel,\n async (client) => {\n const command = new InvokeModelCommand({\n modelId: req.embedModel,\n body: JSON.stringify(reqBody),\n contentType: 'application/json',\n accept: 'application/json',\n });\n const response = await client.send(command);\n return JSON.parse(new TextDecoder().decode(response.body));\n }\n );\n return result as TResponse;\n },\n };\n\n return [apiConfig, embedRequest];\n };\n\n /**\n * Create embed response from Titan\n */\n createEmbedResp(resp: Readonly<BedrockTitanEmbedResponse>): AxEmbedResponse {\n return {\n embeddings: [resp.embedding],\n };\n }\n}\n\n// ============================================================================\n// PROVIDER CLASS - Main entry point\n// ============================================================================\n\nexport class AxAIBedrock extends AxBaseAI<\n AxAIBedrockModel,\n AxAIBedrockEmbedModel,\n BedrockChatRequest,\n BedrockTitanEmbedRequest,\n BedrockChatResponse,\n never, // No streaming yet\n BedrockTitanEmbedResponse,\n string\n> {\n constructor({\n region = 'us-east-2',\n fallbackRegions = ['us-west-2', 'us-east-1'],\n gptRegion = 'us-west-2',\n gptFallbackRegions = ['us-east-1'],\n config,\n options,\n }: Readonly<{\n region?: string;\n fallbackRegions?: string[];\n gptRegion?: string;\n gptFallbackRegions?: string[];\n config: Readonly<Partial<AxAIBedrockConfig>>;\n options?: Readonly<AxAIServiceOptions>;\n }>) {\n // Merge user config with defaults\n const fullConfig: AxAIBedrockConfig = {\n ...axBaseAIDefaultConfig(),\n model: AxAIBedrockModel.ClaudeSonnet4,\n region,\n fallbackRegions,\n gptRegion,\n gptFallbackRegions,\n ...config,\n };\n\n // Create implementation\n const aiImpl = new AxAIBedrockImpl(\n fullConfig,\n region,\n fallbackRegions,\n gptRegion,\n gptFallbackRegions\n );\n\n // Define feature support\n const supportFor = (): AxAIFeatures => ({\n functions: false, // Not implemented yet - add when needed\n streaming: false, // Not implemented yet - add when needed\n functionCot: false,\n hasThinkingBudget: false,\n hasShowThoughts: false,\n media: {\n images: {\n supported: false, // Add when needed\n formats: [],\n },\n audio: {\n supported: false,\n formats: [],\n },\n files: {\n supported: false,\n formats: [],\n uploadMethod: 'none',\n },\n urls: {\n supported: false,\n webSearch: false,\n contextFetching: false,\n },\n },\n caching: {\n supported: false,\n types: [],\n },\n thinking: false,\n multiTurn: true, // All models support multi-turn conversations\n });\n\n // Initialize base class\n super(aiImpl, {\n name: 'Bedrock',\n apiURL: '', // Not used - we use SDK directly\n headers: async () => ({}), // AWS SDK handles auth\n modelInfo: axModelInfoBedrock,\n defaults: {\n model: fullConfig.model,\n embedModel: fullConfig.embedModel,\n },\n options,\n supportFor,\n });\n }\n}\n\nexport type { AxAIBedrockConfig } from './types.js';\n// Re-export types for convenience\nexport { AxAIBedrockEmbedModel, AxAIBedrockModel } from './types.js';\n","/**\n * AWS Bedrock provider types for AX integration\n * Supports Claude, GPT OSS, and Titan models\n */\n\nimport type { AxModelConfig } from '@ax-llm/ax';\n\n// All Bedrock models\nexport enum AxAIBedrockModel {\n // Claude models\n ClaudeSonnet4 = 'us.anthropic.claude-sonnet-4-20250514-v1:0',\n Claude37Sonnet = 'anthropic.claude-3-7-sonnet-20250219-v1:0',\n Claude35Sonnet = 'anthropic.claude-3-5-sonnet-20240620-v1:0',\n\n // GPT OSS models\n GptOss120B = 'openai.gpt-oss-120b-1:0',\n GptOss20B = 'openai.gpt-oss-20b-1:0',\n}\n\n// Embed models\nexport enum AxAIBedrockEmbedModel {\n TitanEmbedV2 = 'amazon.titan-embed-text-v2:0',\n}\n\nexport interface AxAIBedrockConfig extends AxModelConfig {\n model: AxAIBedrockModel;\n embedModel?: AxAIBedrockEmbedModel;\n region?: string;\n fallbackRegions?: string[];\n gptRegion?: string;\n gptFallbackRegions?: string[];\n}\n\n// ============================================================================\n// Claude Request/Response Types\n// ============================================================================\n\nexport interface BedrockClaudeRequest {\n anthropic_version: string;\n max_tokens: number;\n messages: Array<{\n role: 'user' | 'assistant';\n content: string | Array<{ type: 'text'; text: string }>;\n }>;\n system?: string;\n temperature?: number;\n top_p?: number;\n}\n\nexport interface BedrockClaudeResponse {\n id: string;\n type: 'message';\n role: 'assistant';\n content: Array<{ type: 'text'; text: string }>;\n model: string;\n stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence';\n usage: {\n input_tokens: number;\n output_tokens: number;\n };\n}\n\n// ============================================================================\n// GPT OSS Request/Response Types (OpenAI-compatible format)\n// ============================================================================\n\nexport interface BedrockGptRequest {\n messages: Array<{\n role: 'system' | 'user' | 'assistant';\n content: string;\n }>;\n max_tokens: number;\n temperature?: number;\n top_p?: number;\n}\n\nexport interface BedrockGptResponse {\n id?: string;\n choices: Array<{\n index: number;\n message: {\n role: 'assistant';\n content:\n | string\n | Array<{ type?: string; text?: string; content?: string }>;\n };\n finish_reason: 'stop' | 'length' | 'content_filter' | null;\n }>;\n usage?: {\n prompt_tokens: number;\n completion_tokens: number;\n total_tokens: number;\n };\n}\n\n// ============================================================================\n// Titan Embed Request/Response Types\n// ============================================================================\n\nexport interface BedrockTitanEmbedRequest {\n inputText: string;\n dimensions?: number;\n normalize?: boolean;\n}\n\nexport interface BedrockTitanEmbedResponse {\n embedding: number[];\n inputTextTokenCount: number;\n}\n\n// Union types for all models\nexport type BedrockChatRequest = BedrockClaudeRequest | BedrockGptRequest;\nexport type BedrockChatResponse = BedrockClaudeResponse | BedrockGptResponse;\n","/**\n * Bedrock model information (pricing, limits, features)\n */\n\nimport type { AxModelInfo } from '@ax-llm/ax';\nimport { AxAIBedrockEmbedModel, AxAIBedrockModel } from './types.js';\n\nexport const axModelInfoBedrock: AxModelInfo[] = [\n // ========================================================================\n // Claude Models\n // ========================================================================\n {\n name: AxAIBedrockModel.ClaudeSonnet4,\n currency: 'usd',\n promptTokenCostPer1M: 3.0,\n completionTokenCostPer1M: 15.0,\n maxTokens: 64000,\n contextWindow: 200000,\n supported: { thinkingBudget: true, showThoughts: true },\n },\n {\n name: AxAIBedrockModel.Claude37Sonnet,\n currency: 'usd',\n promptTokenCostPer1M: 3.0,\n completionTokenCostPer1M: 15.0,\n maxTokens: 64000,\n contextWindow: 200000,\n },\n {\n name: AxAIBedrockModel.Claude35Sonnet,\n currency: 'usd',\n promptTokenCostPer1M: 3.0,\n completionTokenCostPer1M: 15.0,\n maxTokens: 8192,\n contextWindow: 200000,\n },\n\n // ========================================================================\n // GPT OSS Models\n // ========================================================================\n {\n name: AxAIBedrockModel.GptOss120B,\n currency: 'usd',\n promptTokenCostPer1M: 0.5,\n completionTokenCostPer1M: 1.5,\n maxTokens: 16384,\n contextWindow: 128000,\n },\n {\n name: AxAIBedrockModel.GptOss20B,\n currency: 'usd',\n promptTokenCostPer1M: 0.25,\n completionTokenCostPer1M: 0.75,\n maxTokens: 16384,\n contextWindow: 128000,\n },\n\n // ========================================================================\n // Embed Models\n // ========================================================================\n {\n name: AxAIBedrockEmbedModel.TitanEmbedV2,\n currency: 'usd',\n promptTokenCostPer1M: 0.02,\n completionTokenCostPer1M: 0,\n maxTokens: 8192,\n contextWindow: 8192,\n },\n];\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACiBA,oCAGO;AAaP,gBAAmE;;;ACzB5D,IAAK,mBAAL,kBAAKA,sBAAL;AAEL,EAAAA,kBAAA,mBAAgB;AAChB,EAAAA,kBAAA,oBAAiB;AACjB,EAAAA,kBAAA,oBAAiB;AAGjB,EAAAA,kBAAA,gBAAa;AACb,EAAAA,kBAAA,eAAY;AARF,SAAAA;AAAA,GAAA;AAYL,IAAK,wBAAL,kBAAKC,2BAAL;AACL,EAAAA,uBAAA,kBAAe;AADL,SAAAA;AAAA,GAAA;;;ACbL,IAAM,qBAAoC;AAAA;AAAA;AAAA;AAAA,EAI/C;AAAA,IACE;AAAA,IACA,UAAU;AAAA,IACV,sBAAsB;AAAA,IACtB,0BAA0B;AAAA,IAC1B,WAAW;AAAA,IACX,eAAe;AAAA,IACf,WAAW,EAAE,gBAAgB,MAAM,cAAc,KAAK;AAAA,EACxD;AAAA,EACA;AAAA,IACE;AAAA,IACA,UAAU;AAAA,IACV,sBAAsB;AAAA,IACtB,0BAA0B;AAAA,IAC1B,WAAW;AAAA,IACX,eAAe;AAAA,EACjB;AAAA,EACA;AAAA,IACE;AAAA,IACA,UAAU;AAAA,IACV,sBAAsB;AAAA,IACtB,0BAA0B;AAAA,IAC1B,WAAW;AAAA,IACX,eAAe;AAAA,EACjB;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,IACE;AAAA,IACA,UAAU;AAAA,IACV,sBAAsB;AAAA,IACtB,0BAA0B;AAAA,IAC1B,WAAW;AAAA,IACX,eAAe;AAAA,EACjB;AAAA,EACA;AAAA,IACE;AAAA,IACA,UAAU;AAAA,IACV,sBAAsB;AAAA,IACtB,0BAA0B;AAAA,IAC1B,WAAW;AAAA,IACX,eAAe;AAAA,EACjB;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,IACE;AAAA,IACA,UAAU;AAAA,IACV,sBAAsB;AAAA,IACtB,0BAA0B;AAAA,IAC1B,WAAW;AAAA,IACX,eAAe;AAAA,EACjB;AACF;;;AFbA,IAAM,kBAAN,MAWA;AAAA,EAIE,YACU,QACA,eACA,iBACA,WACA,oBACR;AALQ;AACA;AACA;AACA;AACA;AAAA,EACP;AAAA,EATK,UAA6C,oBAAI,IAAI;AAAA,EACrD;AAAA,EAUR,gBAA0C;AACxC,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,iBAAgC;AAC9B,WAAO;AAAA,MACL,WAAW,KAAK,OAAO;AAAA,MACvB,aAAa,KAAK,OAAO;AAAA,MACzB,MAAM,KAAK,OAAO;AAAA,MAClB,eAAe,KAAK,OAAO;AAAA,IAC7B;AAAA,EACF;AAAA,EAEQ,UAAU,QAAsC;AACtD,QAAI,SAAS,KAAK,QAAQ,IAAI,MAAM;AACpC,QAAI,CAAC,QAAQ;AACX,eAAS,IAAI,mDAAqB,EAAE,OAAO,CAAC;AAC5C,WAAK,QAAQ,IAAI,QAAQ,MAAM;AAAA,IACjC;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,eAAe,SAA8B;AACnD,QACE,QAAQ,SAAS,kBAAkB,KACnC,QAAQ,SAAS,qBAAqB,GACtC;AACA,aAAO;AAAA,IACT;AACA,QAAI,QAAQ,SAAS,YAAY,GAAG;AAClC,aAAO;AAAA,IACT;AACA,QAAI,QAAQ,SAAS,oBAAoB,GAAG;AAC1C,aAAO;AAAA,IACT;AACA,UAAM,IAAI,MAAM,6BAA6B,OAAO,EAAE;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA,EAKQ,mBAAmB,SAA2B;AACpD,UAAM,SAAS,KAAK,eAAe,OAAO;AAC1C,QAAI,WAAW,OAAO;AACpB,aAAO,CAAC,KAAK,WAAW,GAAG,KAAK,kBAAkB;AAAA,IACpD;AACA,WAAO,CAAC,KAAK,eAAe,GAAG,KAAK,eAAe;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,mBACZ,SACA,SACY;AACZ,UAAM,UAAU,KAAK,mBAAmB,OAAO;AAC/C,QAAI;AAEJ,eAAW,UAAU,SAAS;AAC5B,UAAI;AACF,cAAM,SAAS,KAAK,UAAU,MAAM;AACpC,eAAO,MAAM,QAAQ,MAAM;AAAA,MAC7B,SAAS,OAAO;AACd,oBAAY;AACZ,gBAAQ;AAAA,UACN,oBAAoB,MAAM,eAAe,OAAO;AAAA,UAChD;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAEA,UAAM,aAAa,IAAI,MAAM,kCAAkC,OAAO,EAAE;AAAA,EAC1E;AAAA;AAAA;AAAA;AAAA,EAKA,gBAAgB,OACd,KACA,YACyC;AACzC,UAAM,SAAS,KAAK,eAAe,IAAI,KAAK;AAC5C,UAAM,YACJ,IAAI,aAAa,aAAa,KAAK,OAAO,aAAa;AACzD,UAAM,cAAc,IAAI,aAAa,eAAe,KAAK,OAAO;AAChE,UAAM,OAAO,IAAI,aAAa,QAAQ,KAAK,OAAO;AAElD,QAAI;AAEJ,QAAI,WAAW,UAAU;AAEvB,YAAM,iBAAiB,IAAI,WACxB,OAAO,CAAC,QAAQ,IAAI,SAAS,QAAQ,EACrC,IAAI,CAAC,QAAQ,IAAI,OAAO,EACxB,KAAK,MAAM;AAGd,YAAM,WAAW,IAAI,WAClB,OAAO,CAAC,QAAQ,IAAI,SAAS,QAAQ,EACrC,IAAI,CAAC,QAAQ;AACZ,YAAI,IAAI,SAAS,QAAQ;AACvB,iBAAO;AAAA,YACL,MAAM;AAAA,YACN,SACE,OAAO,IAAI,YAAY,WACnB,IAAI,UACJ,KAAK,UAAU,IAAI,OAAO;AAAA,UAClC;AAAA,QACF;AACA,YAAI,IAAI,SAAS,aAAa;AAC5B,iBAAO;AAAA,YACL,MAAM;AAAA,YACN,SAAS,IAAI,WAAW;AAAA,UAC1B;AAAA,QACF;AACA,cAAM,IAAI,MAAM,qBAAqB,IAAI,IAAI,EAAE;AAAA,MACjD,CAAC;AAEH,uBAAiB;AAAA,QACf,mBAAmB;AAAA,QACnB,YAAY;AAAA,QACZ;AAAA,QACA,GAAI,iBAAiB,EAAE,QAAQ,eAAe,IAAI,CAAC;AAAA,QACnD,GAAI,gBAAgB,SAAY,EAAE,YAAY,IAAI,CAAC;AAAA,QACnD,GAAI,SAAS,SAAY,EAAE,OAAO,KAAK,IAAI,CAAC;AAAA,MAC9C;AAAA,IACF,WAAW,WAAW,OAAO;AAE3B,YAAM,WAAW,IAAI,WAClB,OAAO,CAAC,QAAQ,IAAI,SAAS,UAAU,EACvC,IAAI,CAAC,QAAQ;AAEZ,YAAI;AACJ,YAAI,aAAa,KAAK;AACpB,oBACE,OAAO,IAAI,YAAY,WACnB,IAAI,UACJ,KAAK,UAAU,IAAI,OAAO;AAAA,QAClC,OAAO;AACL,oBAAU;AAAA,QACZ;AAEA,eAAO;AAAA,UACL,MAAM,IAAI;AAAA,UACV;AAAA,QACF;AAAA,MACF,CAAC;AAEH,uBAAiB;AAAA,QACf;AAAA,QACA,YAAY;AAAA,QACZ,GAAI,gBAAgB,SAAY,EAAE,YAAY,IAAI,CAAC;AAAA,QACnD,GAAI,SAAS,SAAY,EAAE,OAAO,KAAK,IAAI,CAAC;AAAA,MAC9C;AAAA,IACF,OAAO;AACL,YAAM,IAAI,MAAM,wCAAwC,MAAM,EAAE;AAAA,IAClE;AAGA,UAAM,YAAmB;AAAA,MACvB,MAAM,WAAW,MAAM;AAAA,MACvB,WAAW,OAA4B,SAAmB;AACxD,cAAM,UAAU;AAChB,cAAM,SAAS,MAAM,KAAK;AAAA,UACxB,IAAI;AAAA,UACJ,OAAO,WAAW;AAChB,kBAAM,UAAU,IAAI,iDAAmB;AAAA,cACrC,SAAS,IAAI;AAAA,cACb,MAAM,KAAK,UAAU,OAAO;AAAA,cAC5B,aAAa;AAAA,cACb,QAAQ;AAAA,YACV,CAAC;AACD,kBAAM,WAAW,MAAM,OAAO,KAAK,OAAO;AAC1C,mBAAO,KAAK,MAAM,IAAI,YAAY,EAAE,OAAO,SAAS,IAAI,CAAC;AAAA,UAC3D;AAAA,QACF;AACA,eAAO;AAAA,MACT;AAAA,IACF;AAEA,WAAO,CAAC,WAAW,cAAc;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA,EAKA,eAAe,MAAqD;AAElE,QAAI,aAAa,QAAQ,MAAM,QAAQ,KAAK,OAAO,GAAG;AAEpD,aAAO,KAAK,qBAAqB,IAA6B;AAAA,IAChE,WAAW,aAAa,QAAQ,MAAM,QAAQ,KAAK,OAAO,GAAG;AAE3D,aAAO,KAAK,kBAAkB,IAA0B;AAAA,IAC1D;AACA,UAAM,IAAI,MAAM,yBAAyB;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA,EAKQ,qBACN,MACgB;AAEhB,QAAI,UAAU;AACd,eAAW,SAAS,KAAK,SAAS;AAChC,UAAI,MAAM,SAAS,QAAQ;AACzB,mBAAW,MAAM;AAAA,MACnB;AAAA,IACF;AAGA,SAAK,aAAa;AAAA,MAChB,cAAc,KAAK,MAAM;AAAA,MACzB,kBAAkB,KAAK,MAAM;AAAA,MAC7B,aAAa,KAAK,MAAM,eAAe,KAAK,MAAM;AAAA,IACpD;AAGA,QAAI;AACJ,YAAQ,KAAK,aAAa;AAAA,MACxB,KAAK;AAAA,MACL,KAAK;AACH,uBAAe;AACf;AAAA,MACF,KAAK;AACH,uBAAe;AACf;AAAA,MACF;AACE,uBAAe;AAAA,IACnB;AAEA,WAAO;AAAA,MACL,SAAS;AAAA,QACP;AAAA,UACE,OAAO;AAAA,UACP,IAAI,KAAK;AAAA,UACT;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAAA,MACA,UAAU,KAAK;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,kBACN,MACgB;AAChB,UAAM,SAAS,KAAK,QAAQ,CAAC;AAC7B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM,4BAA4B;AAAA,IAC9C;AAGA,QAAI,UAAU;AACd,QAAI,OAAO,OAAO,QAAQ,YAAY,UAAU;AAC9C,gBAAU,OAAO,QAAQ;AAAA,IAC3B,WAAW,MAAM,QAAQ,OAAO,QAAQ,OAAO,GAAG;AAChD,gBAAU,OAAO,QAAQ,QACtB,IAAI,CAAC,SAAS;AACb,YAAI,OAAO,SAAS,SAAU,QAAO;AACrC,eAAO,KAAK,QAAQ,KAAK,WAAW;AAAA,MACtC,CAAC,EACA,KAAK,EAAE;AAAA,IACZ;AAGA,QAAI,KAAK,OAAO;AACd,WAAK,aAAa;AAAA,QAChB,cAAc,KAAK,MAAM;AAAA,QACzB,kBAAkB,KAAK,MAAM;AAAA,QAC7B,aAAa,KAAK,MAAM;AAAA,MAC1B;AAAA,IACF;AAGA,QAAI;AACJ,YAAQ,OAAO,eAAe;AAAA,MAC5B,KAAK;AACH,uBAAe;AACf;AAAA,MACF,KAAK;AACH,uBAAe;AACf;AAAA,MACF,KAAK;AACH,uBAAe;AACf;AAAA,MACF;AACE,uBAAe;AAAA,IACnB;AAEA,WAAO;AAAA,MACL,SAAS;AAAA,QACP;AAAA,UACE,OAAO,OAAO;AAAA,UACd,IAAI,KAAK;AAAA,UACT;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAAA,MACA,UAAU,KAAK;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,iBAAiB,OACf,QAC+C;AAC/C,QAAI,CAAC,IAAI,SAAS,IAAI,MAAM,WAAW,GAAG;AACxC,YAAM,IAAI,MAAM,iCAAiC;AAAA,IACnD;AAEA,UAAM,eAAyC;AAAA,MAC7C,WAAW,IAAI,MAAM,CAAC;AAAA;AAAA,MACtB,YAAY;AAAA,MACZ,WAAW;AAAA,IACb;AAEA,UAAM,YAAmB;AAAA,MACvB,MAAM;AAAA,MACN,WAAW,OAA4B,SAAmB;AACxD,cAAM,UAAU;AAChB,cAAM,SAAS,MAAM,KAAK;AAAA,UACxB,IAAI;AAAA,UACJ,OAAO,WAAW;AAChB,kBAAM,UAAU,IAAI,iDAAmB;AAAA,cACrC,SAAS,IAAI;AAAA,cACb,MAAM,KAAK,UAAU,OAAO;AAAA,cAC5B,aAAa;AAAA,cACb,QAAQ;AAAA,YACV,CAAC;AACD,kBAAM,WAAW,MAAM,OAAO,KAAK,OAAO;AAC1C,mBAAO,KAAK,MAAM,IAAI,YAAY,EAAE,OAAO,SAAS,IAAI,CAAC;AAAA,UAC3D;AAAA,QACF;AACA,eAAO;AAAA,MACT;AAAA,IACF;AAEA,WAAO,CAAC,WAAW,YAAY;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA,EAKA,gBAAgB,MAA4D;AAC1E,WAAO;AAAA,MACL,YAAY,CAAC,KAAK,SAAS;AAAA,IAC7B;AAAA,EACF;AACF;AAMO,IAAM,cAAN,cAA0B,mBAS/B;AAAA,EACA,YAAY;AAAA,IACV,SAAS;AAAA,IACT,kBAAkB,CAAC,aAAa,WAAW;AAAA,IAC3C,YAAY;AAAA,IACZ,qBAAqB,CAAC,WAAW;AAAA,IACjC;AAAA,IACA;AAAA,EACF,GAOI;AAEF,UAAM,aAAgC;AAAA,MACpC,OAAG,iCAAsB;AAAA,MACzB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,GAAG;AAAA,IACL;AAGA,UAAM,SAAS,IAAI;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAGA,UAAM,aAAa,OAAqB;AAAA,MACtC,WAAW;AAAA;AAAA,MACX,WAAW;AAAA;AAAA,MACX,aAAa;AAAA,MACb,mBAAmB;AAAA,MACnB,iBAAiB;AAAA,MACjB,OAAO;AAAA,QACL,QAAQ;AAAA,UACN,WAAW;AAAA;AAAA,UACX,SAAS,CAAC;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,UACL,WAAW;AAAA,UACX,SAAS,CAAC;AAAA,QACZ;AAAA,QACA,OAAO;AAAA,UACL,WAAW;AAAA,UACX,SAAS,CAAC;AAAA,UACV,cAAc;AAAA,QAChB;AAAA,QACA,MAAM;AAAA,UACJ,WAAW;AAAA,UACX,WAAW;AAAA,UACX,iBAAiB;AAAA,QACnB;AAAA,MACF;AAAA,MACA,SAAS;AAAA,QACP,WAAW;AAAA,QACX,OAAO,CAAC;AAAA,MACV;AAAA,MACA,UAAU;AAAA,MACV,WAAW;AAAA;AAAA,IACb;AAGA,UAAM,QAAQ;AAAA,MACZ,MAAM;AAAA,MACN,QAAQ;AAAA;AAAA,MACR,SAAS,aAAa,CAAC;AAAA;AAAA,MACvB,WAAW;AAAA,MACX,UAAU;AAAA,QACR,OAAO,WAAW;AAAA,QAClB,YAAY,WAAW;AAAA,MACzB;AAAA,MACA;AAAA,MACA;AAAA,IACF,CAAC;AAAA,EACH;AACF;","names":["AxAIBedrockModel","AxAIBedrockEmbedModel"]}
package/index.d.cts ADDED
@@ -0,0 +1,131 @@
1
+ import { AxModelConfig, AxBaseAI, AxAIServiceOptions, AxModelInfo } from '@ax-llm/ax';
2
+
3
+ /**
4
+ * AWS Bedrock provider types for AX integration
5
+ * Supports Claude, GPT OSS, and Titan models
6
+ */
7
+
8
+ declare enum AxAIBedrockModel {
9
+ ClaudeSonnet4 = "us.anthropic.claude-sonnet-4-20250514-v1:0",
10
+ Claude37Sonnet = "anthropic.claude-3-7-sonnet-20250219-v1:0",
11
+ Claude35Sonnet = "anthropic.claude-3-5-sonnet-20240620-v1:0",
12
+ GptOss120B = "openai.gpt-oss-120b-1:0",
13
+ GptOss20B = "openai.gpt-oss-20b-1:0"
14
+ }
15
+ declare enum AxAIBedrockEmbedModel {
16
+ TitanEmbedV2 = "amazon.titan-embed-text-v2:0"
17
+ }
18
+ interface AxAIBedrockConfig extends AxModelConfig {
19
+ model: AxAIBedrockModel;
20
+ embedModel?: AxAIBedrockEmbedModel;
21
+ region?: string;
22
+ fallbackRegions?: string[];
23
+ gptRegion?: string;
24
+ gptFallbackRegions?: string[];
25
+ }
26
+ interface BedrockClaudeRequest {
27
+ anthropic_version: string;
28
+ max_tokens: number;
29
+ messages: Array<{
30
+ role: 'user' | 'assistant';
31
+ content: string | Array<{
32
+ type: 'text';
33
+ text: string;
34
+ }>;
35
+ }>;
36
+ system?: string;
37
+ temperature?: number;
38
+ top_p?: number;
39
+ }
40
+ interface BedrockClaudeResponse {
41
+ id: string;
42
+ type: 'message';
43
+ role: 'assistant';
44
+ content: Array<{
45
+ type: 'text';
46
+ text: string;
47
+ }>;
48
+ model: string;
49
+ stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence';
50
+ usage: {
51
+ input_tokens: number;
52
+ output_tokens: number;
53
+ };
54
+ }
55
+ interface BedrockGptRequest {
56
+ messages: Array<{
57
+ role: 'system' | 'user' | 'assistant';
58
+ content: string;
59
+ }>;
60
+ max_tokens: number;
61
+ temperature?: number;
62
+ top_p?: number;
63
+ }
64
+ interface BedrockGptResponse {
65
+ id?: string;
66
+ choices: Array<{
67
+ index: number;
68
+ message: {
69
+ role: 'assistant';
70
+ content: string | Array<{
71
+ type?: string;
72
+ text?: string;
73
+ content?: string;
74
+ }>;
75
+ };
76
+ finish_reason: 'stop' | 'length' | 'content_filter' | null;
77
+ }>;
78
+ usage?: {
79
+ prompt_tokens: number;
80
+ completion_tokens: number;
81
+ total_tokens: number;
82
+ };
83
+ }
84
+ interface BedrockTitanEmbedRequest {
85
+ inputText: string;
86
+ dimensions?: number;
87
+ normalize?: boolean;
88
+ }
89
+ interface BedrockTitanEmbedResponse {
90
+ embedding: number[];
91
+ inputTextTokenCount: number;
92
+ }
93
+ type BedrockChatRequest = BedrockClaudeRequest | BedrockGptRequest;
94
+ type BedrockChatResponse = BedrockClaudeResponse | BedrockGptResponse;
95
+
96
+ /**
97
+ * AWS Bedrock Provider for AX Library
98
+ *
99
+ * Supports Claude, GPT OSS, and Titan Embed models for use with AX's
100
+ * compiler, signatures, flows, and optimization features.
101
+ *
102
+ * Usage:
103
+ * const ai = new AxAIBedrock({
104
+ * region: 'us-east-2',
105
+ * config: { model: AxAIBedrockModel.ClaudeSonnet4 }
106
+ * });
107
+ *
108
+ * const sig = new AxSignature('input -> output');
109
+ * const gen = new AxGen(sig, { ai });
110
+ * const result = await gen.forward({ input: 'test' });
111
+ */
112
+
113
+ declare class AxAIBedrock extends AxBaseAI<AxAIBedrockModel, AxAIBedrockEmbedModel, BedrockChatRequest, BedrockTitanEmbedRequest, BedrockChatResponse, never, // No streaming yet
114
+ BedrockTitanEmbedResponse, string> {
115
+ constructor({ region, fallbackRegions, gptRegion, gptFallbackRegions, config, options, }: Readonly<{
116
+ region?: string;
117
+ fallbackRegions?: string[];
118
+ gptRegion?: string;
119
+ gptFallbackRegions?: string[];
120
+ config: Readonly<Partial<AxAIBedrockConfig>>;
121
+ options?: Readonly<AxAIServiceOptions>;
122
+ }>);
123
+ }
124
+
125
+ /**
126
+ * Bedrock model information (pricing, limits, features)
127
+ */
128
+
129
+ declare const axModelInfoBedrock: AxModelInfo[];
130
+
131
+ export { AxAIBedrock, type AxAIBedrockConfig, AxAIBedrockEmbedModel, AxAIBedrockModel, type BedrockChatRequest, type BedrockChatResponse, type BedrockClaudeRequest, type BedrockClaudeResponse, type BedrockGptRequest, type BedrockGptResponse, type BedrockTitanEmbedRequest, type BedrockTitanEmbedResponse, axModelInfoBedrock };