@genui-a3/providers 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bedrock/index.cjs +232 -0
- package/dist/bedrock/index.cjs.map +1 -0
- package/dist/bedrock/index.d.cts +31 -0
- package/dist/bedrock/index.d.ts +31 -0
- package/dist/bedrock/index.js +230 -0
- package/dist/bedrock/index.js.map +1 -0
- package/dist/openai/index.cjs +249 -0
- package/dist/openai/index.cjs.map +1 -0
- package/dist/openai/index.d.cts +38 -0
- package/dist/openai/index.d.ts +38 -0
- package/dist/openai/index.js +243 -0
- package/dist/openai/index.js.map +1 -0
- package/package.json +54 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var clientBedrockRuntime = require('@aws-sdk/client-bedrock-runtime');
|
|
4
|
+
var client = require('@ag-ui/client');
|
|
5
|
+
|
|
6
|
+
// bedrock/index.ts
|
|
7
|
+
|
|
8
|
+
// bedrock/messageMerger.ts
|
|
9
|
+
function mergeSequentialMessages(messages) {
|
|
10
|
+
if (messages.length === 0) return [];
|
|
11
|
+
const result = [];
|
|
12
|
+
let currentMessage = null;
|
|
13
|
+
for (const message of messages) {
|
|
14
|
+
if (!message.content) continue;
|
|
15
|
+
if (!currentMessage) {
|
|
16
|
+
currentMessage = {
|
|
17
|
+
role: message.role,
|
|
18
|
+
content: [{ text: message.content }]
|
|
19
|
+
};
|
|
20
|
+
} else if (currentMessage.role === message.role && currentMessage.content) {
|
|
21
|
+
currentMessage.content.push({ text: message.content });
|
|
22
|
+
} else {
|
|
23
|
+
result.push(currentMessage);
|
|
24
|
+
currentMessage = {
|
|
25
|
+
role: message.role,
|
|
26
|
+
content: [{ text: message.content }]
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
if (currentMessage) {
|
|
31
|
+
result.push(currentMessage);
|
|
32
|
+
}
|
|
33
|
+
return result;
|
|
34
|
+
}
|
|
35
|
+
async function* processBedrockStream(rawStream, agentId, schema) {
|
|
36
|
+
let currentBlockType = null;
|
|
37
|
+
let toolInputBuffer = "";
|
|
38
|
+
for await (const event of rawStream) {
|
|
39
|
+
if (event.contentBlockStart) {
|
|
40
|
+
if (event.contentBlockStart.start?.toolUse) {
|
|
41
|
+
currentBlockType = "toolUse";
|
|
42
|
+
toolInputBuffer = "";
|
|
43
|
+
} else {
|
|
44
|
+
currentBlockType = "text";
|
|
45
|
+
}
|
|
46
|
+
continue;
|
|
47
|
+
}
|
|
48
|
+
if (event.contentBlockDelta) {
|
|
49
|
+
const delta = event.contentBlockDelta.delta;
|
|
50
|
+
if (!delta) continue;
|
|
51
|
+
if (delta.text && (currentBlockType === "text" || currentBlockType === null)) {
|
|
52
|
+
currentBlockType = "text";
|
|
53
|
+
yield { type: client.EventType.TEXT_MESSAGE_CONTENT, messageId: "", delta: delta.text, agentId };
|
|
54
|
+
} else if (delta.toolUse && (currentBlockType === "toolUse" || currentBlockType === null)) {
|
|
55
|
+
currentBlockType = "toolUse";
|
|
56
|
+
toolInputBuffer += delta.toolUse.input ?? "";
|
|
57
|
+
}
|
|
58
|
+
continue;
|
|
59
|
+
}
|
|
60
|
+
if (event.contentBlockStop) {
|
|
61
|
+
if (currentBlockType === "toolUse" && toolInputBuffer) {
|
|
62
|
+
yield parseToolCall(toolInputBuffer, schema, agentId);
|
|
63
|
+
toolInputBuffer = "";
|
|
64
|
+
}
|
|
65
|
+
currentBlockType = null;
|
|
66
|
+
continue;
|
|
67
|
+
}
|
|
68
|
+
if (event.metadata) {
|
|
69
|
+
continue;
|
|
70
|
+
}
|
|
71
|
+
throwIfStreamError(event);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
function parseToolCall(toolInputBuffer, schema, agentId) {
|
|
75
|
+
try {
|
|
76
|
+
const parsed = JSON.parse(toolInputBuffer);
|
|
77
|
+
const validated = schema.parse(parsed);
|
|
78
|
+
return {
|
|
79
|
+
type: client.EventType.TOOL_CALL_RESULT,
|
|
80
|
+
toolCallId: "",
|
|
81
|
+
messageId: "",
|
|
82
|
+
content: JSON.stringify(validated),
|
|
83
|
+
agentId
|
|
84
|
+
};
|
|
85
|
+
} catch (err) {
|
|
86
|
+
return {
|
|
87
|
+
type: client.EventType.RUN_ERROR,
|
|
88
|
+
message: `Tool call parse/validation failed: ${err.message}`,
|
|
89
|
+
agentId
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
function throwIfStreamError(event) {
|
|
94
|
+
if (event.internalServerException) {
|
|
95
|
+
throw new Error(`Bedrock internal error: ${event.internalServerException.message}`);
|
|
96
|
+
}
|
|
97
|
+
if (event.modelStreamErrorException) {
|
|
98
|
+
throw new Error(`Bedrock model stream error: ${event.modelStreamErrorException.message}`);
|
|
99
|
+
}
|
|
100
|
+
if (event.throttlingException) {
|
|
101
|
+
throw new Error(`Bedrock throttling: ${event.throttlingException.message}`);
|
|
102
|
+
}
|
|
103
|
+
if (event.validationException) {
|
|
104
|
+
throw new Error(`Bedrock validation error: ${event.validationException.message}`);
|
|
105
|
+
}
|
|
106
|
+
if (event.serviceUnavailableException) {
|
|
107
|
+
throw new Error(`Bedrock service unavailable: ${event.serviceUnavailableException.message}`);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// utils/executeWithFallback.ts
|
|
112
|
+
async function executeWithFallback(models, action) {
|
|
113
|
+
for (let i = 0; i < models.length; i++) {
|
|
114
|
+
const model = models[i];
|
|
115
|
+
try {
|
|
116
|
+
return await action(model);
|
|
117
|
+
} catch (error) {
|
|
118
|
+
const errorObj = error;
|
|
119
|
+
if (i === models.length - 1) {
|
|
120
|
+
throw errorObj;
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
throw new Error("All models failed");
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// bedrock/index.ts
|
|
128
|
+
var RESPONSE_FORMAT_INSTRUCTIONS = `
|
|
129
|
+
|
|
130
|
+
# RESPONSE FORMAT \u2014 MANDATORY
|
|
131
|
+
|
|
132
|
+
<<CRITICAL INSTRUCTION>>
|
|
133
|
+
You MUST ALWAYS output plain text FIRST, then call the structuredResponse tool SECOND. NEVER call the tool without writing text first. This is non-negotiable.
|
|
134
|
+
<</CRITICAL INSTRUCTION>>
|
|
135
|
+
|
|
136
|
+
Your response MUST have exactly two parts in this order:
|
|
137
|
+
|
|
138
|
+
PART 1 \u2014 TEXT: Write your full conversational reply as plain text. This text is streamed to the user in real-time. Do not skip this.
|
|
139
|
+
|
|
140
|
+
PART 2 \u2014 TOOL CALL: After the text, call the \`structuredResponse\` tool with the JSON payload. The \`chatbotMessage\` field MUST contain the same text you wrote in Part 1.
|
|
141
|
+
|
|
142
|
+
If you call the tool without writing text first, the response will be broken.`;
|
|
143
|
+
function getCommandInput(params, isStream) {
|
|
144
|
+
return {
|
|
145
|
+
modelId: params.modelId,
|
|
146
|
+
system: [{ text: params.systemPrompt }],
|
|
147
|
+
messages: params.mergedMessages,
|
|
148
|
+
toolConfig: {
|
|
149
|
+
tools: [
|
|
150
|
+
{
|
|
151
|
+
toolSpec: {
|
|
152
|
+
name: "structuredResponse",
|
|
153
|
+
description: isStream ? "Submit your structured response data. IMPORTANT: You MUST write your full text reply BEFORE calling this tool. Never call this tool as your first action." : "A tool to generate a structured response",
|
|
154
|
+
inputSchema: params.inputSchema
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
],
|
|
158
|
+
toolChoice: isStream ? { auto: {} } : { any: {} }
|
|
159
|
+
}
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
async function sendWithModel(client, params) {
|
|
163
|
+
const command = new clientBedrockRuntime.ConverseCommand(getCommandInput(params, false));
|
|
164
|
+
const response = await client.send(command);
|
|
165
|
+
const contentBlocks = response.output?.message?.content ?? [];
|
|
166
|
+
const toolUseBlock = contentBlocks.find((block) => block.toolUse);
|
|
167
|
+
const result = toolUseBlock?.toolUse?.input;
|
|
168
|
+
const isValidResponse = result && typeof result === "object" && "conversationPayload" in result && "chatbotMessage" in result && typeof result.conversationPayload === "object" && typeof result.chatbotMessage === "string";
|
|
169
|
+
if (!isValidResponse) {
|
|
170
|
+
throw new Error("Bedrock returned invalid tool response");
|
|
171
|
+
}
|
|
172
|
+
return {
|
|
173
|
+
content: JSON.stringify(result),
|
|
174
|
+
usage: response.usage ? {
|
|
175
|
+
inputTokens: response.usage.inputTokens,
|
|
176
|
+
outputTokens: response.usage.outputTokens,
|
|
177
|
+
totalTokens: (response.usage.inputTokens ?? 0) + (response.usage.outputTokens ?? 0)
|
|
178
|
+
} : void 0
|
|
179
|
+
};
|
|
180
|
+
}
|
|
181
|
+
async function sendStreamWithModel(client, params) {
|
|
182
|
+
const command = new clientBedrockRuntime.ConverseStreamCommand(getCommandInput(params, true));
|
|
183
|
+
const response = await client.send(command);
|
|
184
|
+
if (!response.stream) {
|
|
185
|
+
throw new Error("No stream returned from Bedrock");
|
|
186
|
+
}
|
|
187
|
+
return response.stream;
|
|
188
|
+
}
|
|
189
|
+
function prepareRequest(request) {
|
|
190
|
+
const systemPrompt = RESPONSE_FORMAT_INSTRUCTIONS + "\n\n" + request.systemPrompt;
|
|
191
|
+
const jsonSchema = request.responseSchema.toJSONSchema();
|
|
192
|
+
const inputSchema = { json: jsonSchema };
|
|
193
|
+
const prependedMessages = [{ role: "user", content: "Hi\n" }, ...request.messages];
|
|
194
|
+
const mergedMessages = mergeSequentialMessages(prependedMessages);
|
|
195
|
+
return { systemPrompt, inputSchema, mergedMessages };
|
|
196
|
+
}
|
|
197
|
+
function createBedrockProvider(config) {
|
|
198
|
+
const client = new clientBedrockRuntime.BedrockRuntimeClient(config.region ? { region: config.region } : {});
|
|
199
|
+
const models = config.models;
|
|
200
|
+
return {
|
|
201
|
+
name: "bedrock",
|
|
202
|
+
async sendRequest(request) {
|
|
203
|
+
const { systemPrompt, inputSchema, mergedMessages } = prepareRequest(request);
|
|
204
|
+
return executeWithFallback(
|
|
205
|
+
models,
|
|
206
|
+
(modelId) => sendWithModel(client, {
|
|
207
|
+
modelId,
|
|
208
|
+
systemPrompt,
|
|
209
|
+
mergedMessages,
|
|
210
|
+
inputSchema
|
|
211
|
+
})
|
|
212
|
+
);
|
|
213
|
+
},
|
|
214
|
+
async *sendRequestStream(request) {
|
|
215
|
+
const { systemPrompt, inputSchema, mergedMessages } = prepareRequest(request);
|
|
216
|
+
const rawStream = await executeWithFallback(
|
|
217
|
+
models,
|
|
218
|
+
(modelId) => sendStreamWithModel(client, {
|
|
219
|
+
modelId,
|
|
220
|
+
systemPrompt,
|
|
221
|
+
mergedMessages,
|
|
222
|
+
inputSchema
|
|
223
|
+
})
|
|
224
|
+
);
|
|
225
|
+
yield* processBedrockStream(rawStream, "bedrock", request.responseSchema);
|
|
226
|
+
}
|
|
227
|
+
};
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
exports.createBedrockProvider = createBedrockProvider;
|
|
231
|
+
//# sourceMappingURL=index.cjs.map
|
|
232
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../bedrock/messageMerger.ts","../../bedrock/streamProcessor.ts","../../utils/executeWithFallback.ts","../../bedrock/index.ts"],"names":["EventType","ConverseCommand","ConverseStreamCommand","BedrockRuntimeClient"],"mappings":";;;;;;;;AAQO,SAAS,wBAAwB,QAAA,EAA+C;AACrF,EAAA,IAAI,QAAA,CAAS,MAAA,KAAW,CAAA,EAAG,OAAO,EAAC;AAEnC,EAAA,MAAM,SAA2B,EAAC;AAClC,EAAA,IAAI,cAAA,GAAwC,IAAA;AAE5C,EAAA,KAAA,MAAW,WAAW,QAAA,EAAU;AAC9B,IAAA,IAAI,CAAC,QAAQ,OAAA,EAAS;AAEtB,IAAA,IAAI,CAAC,cAAA,EAAgB;AACnB,MAAA,cAAA,GAAiB;AAAA,QACf,MAAM,OAAA,CAAQ,IAAA;AAAA,QACd,SAAS,CAAC,EAAE,IAAA,EAAM,OAAA,CAAQ,SAAS;AAAA,OACrC;AAAA,IACF,WAAW,cAAA,CAAe,IAAA,KAAS,OAAA,CAAQ,IAAA,IAAQ,eAAe,OAAA,EAAS;AACzE,MAAA,cAAA,CAAe,QAAQ,IAAA,CAAK,EAAE,IAAA,EAAM,OAAA,CAAQ,SAAS,CAAA;AAAA,IACvD,CAAA,MAAO;AACL,MAAA,MAAA,CAAO,KAAK,cAAc,CAAA;AAC1B,MAAA,cAAA,GAAiB;AAAA,QACf,MAAM,OAAA,CAAQ,IAAA;AAAA,QACd,SAAS,CAAC,EAAE,IAAA,EAAM,OAAA,CAAQ,SAAS;AAAA,OACrC;AAAA,IACF;AAAA,EACF;AAEA,EAAA,IAAI,cAAA,EAAgB;AAClB,IAAA,MAAA,CAAO,KAAK,cAAc,CAAA;AAAA,EAC5B;AAEA,EAAA,OAAO,MAAA;AACT;ACjCA,gBAAuB,oBAAA,CACrB,SAAA,EACA,OAAA,EACA,MAAA,EACqC;AACrC,EAAA,IAAI,gBAAA,GAA8C,IAAA;AAClD,EAAA,IAAI,eAAA,GAAkB,EAAA;AAEtB,EAAA,WAAA,MAAiB,SAAS,SAAA,EAAW;AAEnC,IAAA,IAAI,MAAM,iBAAA,EAAmB;AAC3B,MAAA,IAAI,KAAA,CAAM,iBAAA,CAAkB,KAAA,EAAO,OAAA,EAAS;AAC1C,QAAA,gBAAA,GAAmB,SAAA;AACnB,QAAA,eAAA,GAAkB,EAAA;AAAA,MACpB,CAAA,MAAO;AACL,QAAA,gBAAA,GAAmB,MAAA;AAAA,MACrB;AACA,MAAA;AAAA,IACF;AAGA,IAAA,IAAI,MAAM,iBAAA,EAAmB;AAC3B,MAAA,MAAM,KAAA,GAAQ,MAAM,iBAAA,CAAkB,KAAA;AACtC,MAAA,IAAI,CAAC,KAAA,EAAO;AAIZ,MAAA,IAAI,KAAA,CAAM,IAAA,KAAS,gBAAA,KAAqB,MAAA,IAAU,qBAAqB,IAAA,CAAA,EAAO;AAC5E,QAAA,gBAAA,GAAmB,MAAA;AACnB,QAAA,MAAM,EAAE,MAAMA,gBAAA,CAAU,oBAAA,EAAsB,WAAW,EAAA,EAAI,KAAA,EAAO,KAAA,CAAM,IAAA,EAAM,OAAA,EAAQ;AAAA,MAC1F,WAAW,KAAA,CAAM,OAAA,KAAY,gBAAA,KAAqB,SAAA,IAAa,qBAAqB,IAAA,CAAA,EAAO;AACzF,QAAA,gBAAA,GAAmB,SAAA;AACnB,QAAA,eAAA,IAAmB,KAAA,CAAM,QAAQ,KAAA,IAAS,EAAA;AAAA,MAC5C;AACA,MAAA;AAAA,IACF;AAGA,IAAA,IAAI,MAAM,gBAAA,EAAkB;AAC1B,MAAA,IAAI,gBAAA,KAAqB,aAAa,eAAA,EAAiB;AACrD,QAAA,MAAM,aAAA,CAAsB,eAAA,EAAiB,MAAA,EAAQ,OAAO,CAAA;AAC5D,QAAA,eAAA,GAAkB,EAAA;AAAA,MACpB;AACA,MAAA,gBAAA,GAAmB,IAAA;AACnB,MAAA;AAAA,IACF;AAGA,IAAA,IAAI,MAAM,QAAA,EAAU;AAClB,MAAA;AAAA,IACF;AAGA,IAAA,kBAAA,CAAmB,KAAK,CAAA;AAAA,EAC1B;AACF;AAEA,SAAS,aAAA,CACP,eAAA,EACA,MAAA,EACA,OAAA,EACqB;AACrB,EAAA,IAAI;AACF,IAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,eAAe,CAAA;AACzC,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,MAAM,CAAA;AACrC,IAAA,OAAO;AAAA,MACL,MAAMA,gBAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,OAAO;AAAA,MACL,MAAMA,gBAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,mCAAA,EAAuC,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACrE;AAAA,KACF;AAAA,EACF;AACF;AAEA,SAAS,mBAAmB,KAAA,EAAmC;AAC7D,EAAA,IAAI,MAAM,uBAAA,EAAyB;AACjC,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,wBAAA,EAA2B,KAAA,CAAM,uBAAA,CAAwB,OAAO,CAAA,CAAE,CAAA;AAAA,EACpF;AACA,EAAA,IAAI,MAAM,yBAAA,EAA2B;AACnC,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,4BAAA,EAA+B,KAAA,CAAM,yBAAA,CAA0B,OAAO,CAAA,CAAE,CAAA;AAAA,EAC1F;AACA,EAAA,IAAI,MAAM,mBAAA,EAAqB;AAC7B,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,oBAAA,EAAuB,KAAA,CAAM,mBAAA,CAAoB,OAAO,CAAA,CAAE,CAAA;AAAA,EAC5E;AACA,EAAA,IAAI,MAAM,mBAAA,EAAqB;AAC7B,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,0BAAA,EAA6B,KAAA,CAAM,mBAAA,CAAoB,OAAO,CAAA,CAAE,CAAA;AAAA,EAClF;AACA,EAAA,IAAI,MAAM,2BAAA,EAA6B;AACrC,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,6BAAA,EAAgC,KAAA,CAAM,2BAAA,CAA4B,OAAO,CAAA,CAAE,CAAA;AAAA,EAC7F;AACF;;;ACpFA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACnBA,IAAM,4BAAA,GAA+B;;AAAA;;AAAA;AAAA;AAAA;;AAAA;;AAAA;;AAAA;;AAAA,6EAAA,CAAA;AAoCrC,SAAS,eAAA,CAAgB,QAA6B,QAAA,EAAmB;AACvE,EAAA,OAAO;AAAA,IACL,SAAS,MAAA,CAAO,OAAA;AAAA,IAChB,QAAQ,CAAC,EAAE,IAAA,EAAM,MAAA,CAAO,cAAc,CAAA;AAAA,IACtC,UAAU,MAAA,CAAO,cAAA;AAAA,IACjB,UAAA,EAAY;AAAA,MACV,KAAA,EAAO;AAAA,QACL;AAAA,UACE,QAAA,EAAU;AAAA,YACR,IAAA,EAAM,oBAAA;AAAA,YACN,WAAA,EAAa,WACT,2JAAA,GACA,0CAAA;AAAA,YACJ,aAAa,MAAA,CAAO;AAAA;AACtB;AACF,OACF;AAAA,MACA,UAAA,EAAY,QAAA,GAAW,EAAE,IAAA,EAAM,IAAG,GAAI,EAAE,GAAA,EAAK,EAAC;AAAE;AAClD,GACF;AACF;AAEA,eAAe,aAAA,CAAc,QAA8B,MAAA,EAAwD;AACjH,EAAA,MAAM,UAAU,IAAIC,oCAAA,CAAgB,eAAA,CAAgB,MAAA,EAAQ,KAAK,CAAC,CAAA;AAClE,EAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,OAAO,CAAA;AAE1C,EAAA,MAAM,aAAA,GAAgB,QAAA,CAAS,MAAA,EAAQ,OAAA,EAAS,WAAW,EAAC;AAC5D,EAAA,MAAM,eAAe,aAAA,CAAc,IAAA,CAAK,CAAC,KAAA,KAAU,MAAM,OAAO,CAAA;AAChE,EAAA,MAAM,MAAA,GAAS,cAAc,OAAA,EAAS,KAAA;AAEtC,EAAA,MAAM,eAAA,GACJ,MAAA,IACA,OAAO,MAAA,KAAW,YAClB,qBAAA,IAAyB,MAAA,IACzB,gBAAA,IAAoB,MAAA,IACpB,OAAO,MAAA,CAAO,mBAAA,KAAwB,QAAA,IACtC,OAAO,OAAO,cAAA,KAAmB,QAAA;AAEnC,EAAA,IAAI,CAAC,eAAA,EAAiB;AACpB,IAAA,MAAM,IAAI,MAAM,wCAAwC,CAAA;AAAA,EAC1D;AAEA,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,MAAM,CAAA;AAAA,IAC9B,KAAA,EAAO,SAAS,KAAA,GACZ;AAAA,MACE,WAAA,EAAa,SAAS,KAAA,CAAM,WAAA;AAAA,MAC5B,YAAA,EAAc,SAAS,KAAA,CAAM,YAAA;AAAA,MAC7B,cAAc,QAAA,CAAS,KAAA,CAAM,eAAe,CAAA,KAAM,QAAA,CAAS,MAAM,YAAA,IAAgB,CAAA;AAAA,KACnF,GACA;AAAA,GACN;AACF;AAEA,eAAe,mBAAA,CACb,QACA,MAAA,EAC8C;AAC9C,EAAA,MAAM,UAAU,IAAIC,0CAAA,CAAsB,eAAA,CAAgB,MAAA,EAAQ,IAAI,CAAC,CAAA;AACvE,EAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,OAAO,CAAA;AAE1C,EAAA,IAAI,CAAC,SAAS,MAAA,EAAQ;AACpB,IAAA,MAAM,IAAI,MAAM,iCAAiC,CAAA;AAAA,EACnD;AAEA,EAAA,OAAO,QAAA,CAAS,MAAA;AAClB;AAEA,SAAS,eAAe,OAAA,EAA0B;AAChD,EAAA,MAAM,YAAA,GAAe,4BAAA,GAA+B,MAAA,GAAS,OAAA,CAAQ,YAAA;AACrE,EAAA,MAAM,UAAA,GAAa,OAAA,CAAQ,cAAA,CAAe,YAAA,EAAa;AACvD,EAAA,MAAM,WAAA,GAAc,EAAE,IAAA,EAAM,UAAA,EAAW;AAGvC,EAAA,MAAM,iBAAA,GAAuC,CAAC,EAAE,IAAA,EAAM,MAAA,EAAQ,SAAS,MAAA,EAAO,EAAG,GAAG,OAAA,CAAQ,QAAQ,CAAA;AACpG,EAAA,MAAM,cAAA,GAAiB,wBAAwB,iBAAiB,CAAA;AAEhE,EAAA,OAAO,EAAE,YAAA,EAAc,WAAA,EAAa,cAAA,EAAe;AACrD;AAgBO,SAAS,sBAAsB,MAAA,EAAyC;AAC7E,EAAA,MAAM,MAAA,GAAS,IAAIC,yCAAA,CAAqB,MAAA,CAAO,MAAA,GAAS,EAAE,MAAA,EAAQ,MAAA,CAAO,MAAA,EAAO,GAAI,EAAE,CAAA;AACtF,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,SAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,EAAE,YAAA,EAAc,WAAA,EAAa,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAE5E,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,OAAA,KAClC,aAAA,CAAc,MAAA,EAAQ;AAAA,UACpB,OAAA;AAAA,UACA,YAAA;AAAA,UACA,cAAA;AAAA,UACA;AAAA,SACD;AAAA,OACH;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,EAAE,YAAA,EAAc,WAAA,EAAa,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAE5E,MAAA,MAAM,YAAY,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,OAAA,KACnD,mBAAA,CAAoB,MAAA,EAAQ;AAAA,UAC1B,OAAA;AAAA,UACA,YAAA;AAAA,UACA,cAAA;AAAA,UACA;AAAA,SACD;AAAA,OACH;AAEA,MAAA,OAAO,oBAAA,CAA6B,SAAA,EAAW,SAAA,EAAW,OAAA,CAAQ,cAAc,CAAA;AAAA,IAClF;AAAA,GACF;AACF","file":"index.cjs","sourcesContent":["import { Message as BedrockMessage } from '@aws-sdk/client-bedrock-runtime'\nimport type { ProviderMessage } from '@genui-a3/core'\n\n/**\n * Converts provider-agnostic messages to Bedrock format, merging sequential same-role messages.\n * Bedrock requires alternating user/assistant roles — this merges consecutive same-role messages\n * into a single message with multiple content blocks.\n */\nexport function mergeSequentialMessages(messages: ProviderMessage[]): BedrockMessage[] {\n if (messages.length === 0) return []\n\n const result: BedrockMessage[] = []\n let currentMessage: BedrockMessage | null = null\n\n for (const message of messages) {\n if (!message.content) continue\n\n if (!currentMessage) {\n currentMessage = {\n role: message.role,\n content: [{ text: message.content }],\n }\n } else if (currentMessage.role === message.role && currentMessage.content) {\n currentMessage.content.push({ text: message.content })\n } else {\n result.push(currentMessage)\n currentMessage = {\n role: message.role,\n content: [{ text: message.content }],\n }\n }\n }\n\n if (currentMessage) {\n result.push(currentMessage)\n }\n\n return result\n}\n","import { ConverseStreamOutput } from '@aws-sdk/client-bedrock-runtime'\nimport { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\n\nexport async function* processBedrockStream<TState extends BaseState = BaseState>(\n rawStream: AsyncIterable<ConverseStreamOutput>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let currentBlockType: 'text' | 'toolUse' | null = null\n let toolInputBuffer = ''\n\n for await (const event of rawStream) {\n // --- Content block start: determine block type ---\n if (event.contentBlockStart) {\n if (event.contentBlockStart.start?.toolUse) {\n currentBlockType = 'toolUse'\n toolInputBuffer = ''\n } else {\n currentBlockType = 'text'\n }\n continue\n }\n\n // --- Content block delta: text or tool input ---\n if (event.contentBlockDelta) {\n const delta = event.contentBlockDelta.delta\n if (!delta) continue\n\n // Infer block type from delta content if no contentBlockStart was received\n // (Bedrock may omit contentBlockStart for the first text block)\n if (delta.text && (currentBlockType === 'text' || currentBlockType === null)) {\n currentBlockType = 'text'\n yield { type: EventType.TEXT_MESSAGE_CONTENT, messageId: '', delta: delta.text, agentId } as StreamEvent<TState>\n } else if (delta.toolUse && (currentBlockType === 'toolUse' || currentBlockType === null)) {\n currentBlockType = 'toolUse'\n toolInputBuffer += delta.toolUse.input ?? ''\n }\n continue\n }\n\n // --- Content block stop: parse and validate tool call ---\n if (event.contentBlockStop) {\n if (currentBlockType === 'toolUse' && toolInputBuffer) {\n yield parseToolCall<TState>(toolInputBuffer, schema, agentId)\n toolInputBuffer = ''\n }\n currentBlockType = null\n continue\n }\n\n // --- Metadata: log usage ---\n if (event.metadata) {\n continue\n }\n\n // --- Error events: throw to surface to caller ---\n throwIfStreamError(event)\n }\n}\n\nfunction parseToolCall<TState extends BaseState>(\n toolInputBuffer: string,\n schema: ZodType,\n agentId: AgentId,\n): StreamEvent<TState> {\n try {\n const parsed = JSON.parse(toolInputBuffer) as Record<string, unknown>\n const validated = schema.parse(parsed)\n return {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n return {\n type: EventType.RUN_ERROR,\n message: `Tool call parse/validation failed: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\nfunction throwIfStreamError(event: ConverseStreamOutput): void {\n if (event.internalServerException) {\n throw new Error(`Bedrock internal error: ${event.internalServerException.message}`)\n }\n if (event.modelStreamErrorException) {\n throw new Error(`Bedrock model stream error: ${event.modelStreamErrorException.message}`)\n }\n if (event.throttlingException) {\n throw new Error(`Bedrock throttling: ${event.throttlingException.message}`)\n }\n if (event.validationException) {\n throw new Error(`Bedrock validation error: ${event.validationException.message}`)\n }\n if (event.serviceUnavailableException) {\n throw new Error(`Bedrock service unavailable: ${event.serviceUnavailableException.message}`)\n }\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import {\n BedrockRuntimeClient,\n ConverseCommand,\n ConverseStreamCommand,\n ConverseStreamOutput,\n ToolInputSchema,\n} from '@aws-sdk/client-bedrock-runtime'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { mergeSequentialMessages } from './messageMerger'\nimport { processBedrockStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\n\nconst RESPONSE_FORMAT_INSTRUCTIONS = `\n\n# RESPONSE FORMAT — MANDATORY\n\n<<CRITICAL INSTRUCTION>>\nYou MUST ALWAYS output plain text FIRST, then call the structuredResponse tool SECOND. NEVER call the tool without writing text first. This is non-negotiable.\n<</CRITICAL INSTRUCTION>>\n\nYour response MUST have exactly two parts in this order:\n\nPART 1 — TEXT: Write your full conversational reply as plain text. This text is streamed to the user in real-time. Do not skip this.\n\nPART 2 — TOOL CALL: After the text, call the \\`structuredResponse\\` tool with the JSON payload. The \\`chatbotMessage\\` field MUST contain the same text you wrote in Part 1.\n\nIf you call the tool without writing text first, the response will be broken.`\n\n/**\n * Configuration for creating a Bedrock provider.\n */\nexport interface BedrockProviderConfig {\n /** AWS region for the Bedrock client */\n region?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * Uses Bedrock model ARNs, e.g. 'us.anthropic.claude-sonnet-4-5-20250929-v1:0'\n */\n models: string[]\n}\n\ntype SendWithModelParams = {\n modelId: string\n systemPrompt: string\n mergedMessages: ReturnType<typeof mergeSequentialMessages>\n inputSchema: ToolInputSchema | undefined\n}\n\nfunction getCommandInput(params: SendWithModelParams, isStream: boolean) {\n return {\n modelId: params.modelId,\n system: [{ text: params.systemPrompt }],\n messages: params.mergedMessages,\n toolConfig: {\n tools: [\n {\n toolSpec: {\n name: 'structuredResponse',\n description: isStream\n ? 'Submit your structured response data. IMPORTANT: You MUST write your full text reply BEFORE calling this tool. Never call this tool as your first action.'\n : 'A tool to generate a structured response',\n inputSchema: params.inputSchema,\n },\n },\n ],\n toolChoice: isStream ? { auto: {} } : { any: {} },\n },\n }\n}\n\nasync function sendWithModel(client: BedrockRuntimeClient, params: SendWithModelParams): Promise<ProviderResponse> {\n const command = new ConverseCommand(getCommandInput(params, false))\n const response = await client.send(command)\n\n const contentBlocks = response.output?.message?.content ?? []\n const toolUseBlock = contentBlocks.find((block) => block.toolUse)\n const result = toolUseBlock?.toolUse?.input\n\n const isValidResponse =\n result &&\n typeof result === 'object' &&\n 'conversationPayload' in result &&\n 'chatbotMessage' in result &&\n typeof result.conversationPayload === 'object' &&\n typeof result.chatbotMessage === 'string'\n\n if (!isValidResponse) {\n throw new Error('Bedrock returned invalid tool response')\n }\n\n return {\n content: JSON.stringify(result),\n usage: response.usage\n ? {\n inputTokens: response.usage.inputTokens,\n outputTokens: response.usage.outputTokens,\n totalTokens: (response.usage.inputTokens ?? 0) + (response.usage.outputTokens ?? 0),\n }\n : undefined,\n }\n}\n\nasync function sendStreamWithModel(\n client: BedrockRuntimeClient,\n params: SendWithModelParams,\n): Promise<AsyncIterable<ConverseStreamOutput>> {\n const command = new ConverseStreamCommand(getCommandInput(params, true))\n const response = await client.send(command)\n\n if (!response.stream) {\n throw new Error('No stream returned from Bedrock')\n }\n\n return response.stream\n}\n\nfunction prepareRequest(request: ProviderRequest) {\n const systemPrompt = RESPONSE_FORMAT_INSTRUCTIONS + '\\n\\n' + request.systemPrompt\n const jsonSchema = request.responseSchema.toJSONSchema()\n const inputSchema = { json: jsonSchema } as ToolInputSchema\n\n // Bedrock requires messages to start with a user message — prepend \"Hi\" if needed\n const prependedMessages: ProviderMessage[] = [{ role: 'user', content: 'Hi\\n' }, ...request.messages]\n const mergedMessages = mergeSequentialMessages(prependedMessages)\n\n return { systemPrompt, inputSchema, mergedMessages }\n}\n\n/**\n * Creates an AWS Bedrock provider instance.\n *\n * @param config - Bedrock provider configuration\n * @returns A Provider implementation using AWS Bedrock\n *\n * @example\n * ```typescript\n * const provider = createBedrockProvider({\n * models: ['us.anthropic.claude-sonnet-4-5-20250929-v1:0'],\n * region: 'us-east-1',\n * })\n * ```\n */\nexport function createBedrockProvider(config: BedrockProviderConfig): Provider {\n const client = new BedrockRuntimeClient(config.region ? { region: config.region } : {})\n const models = config.models\n\n return {\n name: 'bedrock',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const { systemPrompt, inputSchema, mergedMessages } = prepareRequest(request)\n\n return executeWithFallback(models, (modelId) =>\n sendWithModel(client, {\n modelId,\n systemPrompt,\n mergedMessages,\n inputSchema,\n }),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const { systemPrompt, inputSchema, mergedMessages } = prepareRequest(request)\n\n const rawStream = await executeWithFallback(models, (modelId) =>\n sendStreamWithModel(client, {\n modelId,\n systemPrompt,\n mergedMessages,\n inputSchema,\n }),\n )\n\n yield* processBedrockStream<TState>(rawStream, 'bedrock', request.responseSchema)\n },\n }\n}\n"]}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import { Provider } from '@genui-a3/core';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Configuration for creating a Bedrock provider.
|
|
5
|
+
*/
|
|
6
|
+
interface BedrockProviderConfig {
|
|
7
|
+
/** AWS region for the Bedrock client */
|
|
8
|
+
region?: string;
|
|
9
|
+
/**
|
|
10
|
+
* Model identifiers in order of preference (first = primary, rest = fallbacks).
|
|
11
|
+
* Uses Bedrock model ARNs, e.g. 'us.anthropic.claude-sonnet-4-5-20250929-v1:0'
|
|
12
|
+
*/
|
|
13
|
+
models: string[];
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Creates an AWS Bedrock provider instance.
|
|
17
|
+
*
|
|
18
|
+
* @param config - Bedrock provider configuration
|
|
19
|
+
* @returns A Provider implementation using AWS Bedrock
|
|
20
|
+
*
|
|
21
|
+
* @example
|
|
22
|
+
* ```typescript
|
|
23
|
+
* const provider = createBedrockProvider({
|
|
24
|
+
* models: ['us.anthropic.claude-sonnet-4-5-20250929-v1:0'],
|
|
25
|
+
* region: 'us-east-1',
|
|
26
|
+
* })
|
|
27
|
+
* ```
|
|
28
|
+
*/
|
|
29
|
+
declare function createBedrockProvider(config: BedrockProviderConfig): Provider;
|
|
30
|
+
|
|
31
|
+
export { type BedrockProviderConfig, createBedrockProvider };
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import { Provider } from '@genui-a3/core';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Configuration for creating a Bedrock provider.
|
|
5
|
+
*/
|
|
6
|
+
interface BedrockProviderConfig {
|
|
7
|
+
/** AWS region for the Bedrock client */
|
|
8
|
+
region?: string;
|
|
9
|
+
/**
|
|
10
|
+
* Model identifiers in order of preference (first = primary, rest = fallbacks).
|
|
11
|
+
* Uses Bedrock model ARNs, e.g. 'us.anthropic.claude-sonnet-4-5-20250929-v1:0'
|
|
12
|
+
*/
|
|
13
|
+
models: string[];
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Creates an AWS Bedrock provider instance.
|
|
17
|
+
*
|
|
18
|
+
* @param config - Bedrock provider configuration
|
|
19
|
+
* @returns A Provider implementation using AWS Bedrock
|
|
20
|
+
*
|
|
21
|
+
* @example
|
|
22
|
+
* ```typescript
|
|
23
|
+
* const provider = createBedrockProvider({
|
|
24
|
+
* models: ['us.anthropic.claude-sonnet-4-5-20250929-v1:0'],
|
|
25
|
+
* region: 'us-east-1',
|
|
26
|
+
* })
|
|
27
|
+
* ```
|
|
28
|
+
*/
|
|
29
|
+
declare function createBedrockProvider(config: BedrockProviderConfig): Provider;
|
|
30
|
+
|
|
31
|
+
export { type BedrockProviderConfig, createBedrockProvider };
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
import { BedrockRuntimeClient, ConverseStreamCommand, ConverseCommand } from '@aws-sdk/client-bedrock-runtime';
|
|
2
|
+
import { EventType } from '@ag-ui/client';
|
|
3
|
+
|
|
4
|
+
// bedrock/index.ts
|
|
5
|
+
|
|
6
|
+
// bedrock/messageMerger.ts
|
|
7
|
+
function mergeSequentialMessages(messages) {
|
|
8
|
+
if (messages.length === 0) return [];
|
|
9
|
+
const result = [];
|
|
10
|
+
let currentMessage = null;
|
|
11
|
+
for (const message of messages) {
|
|
12
|
+
if (!message.content) continue;
|
|
13
|
+
if (!currentMessage) {
|
|
14
|
+
currentMessage = {
|
|
15
|
+
role: message.role,
|
|
16
|
+
content: [{ text: message.content }]
|
|
17
|
+
};
|
|
18
|
+
} else if (currentMessage.role === message.role && currentMessage.content) {
|
|
19
|
+
currentMessage.content.push({ text: message.content });
|
|
20
|
+
} else {
|
|
21
|
+
result.push(currentMessage);
|
|
22
|
+
currentMessage = {
|
|
23
|
+
role: message.role,
|
|
24
|
+
content: [{ text: message.content }]
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
if (currentMessage) {
|
|
29
|
+
result.push(currentMessage);
|
|
30
|
+
}
|
|
31
|
+
return result;
|
|
32
|
+
}
|
|
33
|
+
async function* processBedrockStream(rawStream, agentId, schema) {
|
|
34
|
+
let currentBlockType = null;
|
|
35
|
+
let toolInputBuffer = "";
|
|
36
|
+
for await (const event of rawStream) {
|
|
37
|
+
if (event.contentBlockStart) {
|
|
38
|
+
if (event.contentBlockStart.start?.toolUse) {
|
|
39
|
+
currentBlockType = "toolUse";
|
|
40
|
+
toolInputBuffer = "";
|
|
41
|
+
} else {
|
|
42
|
+
currentBlockType = "text";
|
|
43
|
+
}
|
|
44
|
+
continue;
|
|
45
|
+
}
|
|
46
|
+
if (event.contentBlockDelta) {
|
|
47
|
+
const delta = event.contentBlockDelta.delta;
|
|
48
|
+
if (!delta) continue;
|
|
49
|
+
if (delta.text && (currentBlockType === "text" || currentBlockType === null)) {
|
|
50
|
+
currentBlockType = "text";
|
|
51
|
+
yield { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "", delta: delta.text, agentId };
|
|
52
|
+
} else if (delta.toolUse && (currentBlockType === "toolUse" || currentBlockType === null)) {
|
|
53
|
+
currentBlockType = "toolUse";
|
|
54
|
+
toolInputBuffer += delta.toolUse.input ?? "";
|
|
55
|
+
}
|
|
56
|
+
continue;
|
|
57
|
+
}
|
|
58
|
+
if (event.contentBlockStop) {
|
|
59
|
+
if (currentBlockType === "toolUse" && toolInputBuffer) {
|
|
60
|
+
yield parseToolCall(toolInputBuffer, schema, agentId);
|
|
61
|
+
toolInputBuffer = "";
|
|
62
|
+
}
|
|
63
|
+
currentBlockType = null;
|
|
64
|
+
continue;
|
|
65
|
+
}
|
|
66
|
+
if (event.metadata) {
|
|
67
|
+
continue;
|
|
68
|
+
}
|
|
69
|
+
throwIfStreamError(event);
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
function parseToolCall(toolInputBuffer, schema, agentId) {
|
|
73
|
+
try {
|
|
74
|
+
const parsed = JSON.parse(toolInputBuffer);
|
|
75
|
+
const validated = schema.parse(parsed);
|
|
76
|
+
return {
|
|
77
|
+
type: EventType.TOOL_CALL_RESULT,
|
|
78
|
+
toolCallId: "",
|
|
79
|
+
messageId: "",
|
|
80
|
+
content: JSON.stringify(validated),
|
|
81
|
+
agentId
|
|
82
|
+
};
|
|
83
|
+
} catch (err) {
|
|
84
|
+
return {
|
|
85
|
+
type: EventType.RUN_ERROR,
|
|
86
|
+
message: `Tool call parse/validation failed: ${err.message}`,
|
|
87
|
+
agentId
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
function throwIfStreamError(event) {
|
|
92
|
+
if (event.internalServerException) {
|
|
93
|
+
throw new Error(`Bedrock internal error: ${event.internalServerException.message}`);
|
|
94
|
+
}
|
|
95
|
+
if (event.modelStreamErrorException) {
|
|
96
|
+
throw new Error(`Bedrock model stream error: ${event.modelStreamErrorException.message}`);
|
|
97
|
+
}
|
|
98
|
+
if (event.throttlingException) {
|
|
99
|
+
throw new Error(`Bedrock throttling: ${event.throttlingException.message}`);
|
|
100
|
+
}
|
|
101
|
+
if (event.validationException) {
|
|
102
|
+
throw new Error(`Bedrock validation error: ${event.validationException.message}`);
|
|
103
|
+
}
|
|
104
|
+
if (event.serviceUnavailableException) {
|
|
105
|
+
throw new Error(`Bedrock service unavailable: ${event.serviceUnavailableException.message}`);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// utils/executeWithFallback.ts
|
|
110
|
+
async function executeWithFallback(models, action) {
|
|
111
|
+
for (let i = 0; i < models.length; i++) {
|
|
112
|
+
const model = models[i];
|
|
113
|
+
try {
|
|
114
|
+
return await action(model);
|
|
115
|
+
} catch (error) {
|
|
116
|
+
const errorObj = error;
|
|
117
|
+
if (i === models.length - 1) {
|
|
118
|
+
throw errorObj;
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
throw new Error("All models failed");
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// bedrock/index.ts
|
|
126
|
+
var RESPONSE_FORMAT_INSTRUCTIONS = `
|
|
127
|
+
|
|
128
|
+
# RESPONSE FORMAT \u2014 MANDATORY
|
|
129
|
+
|
|
130
|
+
<<CRITICAL INSTRUCTION>>
|
|
131
|
+
You MUST ALWAYS output plain text FIRST, then call the structuredResponse tool SECOND. NEVER call the tool without writing text first. This is non-negotiable.
|
|
132
|
+
<</CRITICAL INSTRUCTION>>
|
|
133
|
+
|
|
134
|
+
Your response MUST have exactly two parts in this order:
|
|
135
|
+
|
|
136
|
+
PART 1 \u2014 TEXT: Write your full conversational reply as plain text. This text is streamed to the user in real-time. Do not skip this.
|
|
137
|
+
|
|
138
|
+
PART 2 \u2014 TOOL CALL: After the text, call the \`structuredResponse\` tool with the JSON payload. The \`chatbotMessage\` field MUST contain the same text you wrote in Part 1.
|
|
139
|
+
|
|
140
|
+
If you call the tool without writing text first, the response will be broken.`;
|
|
141
|
+
function getCommandInput(params, isStream) {
|
|
142
|
+
return {
|
|
143
|
+
modelId: params.modelId,
|
|
144
|
+
system: [{ text: params.systemPrompt }],
|
|
145
|
+
messages: params.mergedMessages,
|
|
146
|
+
toolConfig: {
|
|
147
|
+
tools: [
|
|
148
|
+
{
|
|
149
|
+
toolSpec: {
|
|
150
|
+
name: "structuredResponse",
|
|
151
|
+
description: isStream ? "Submit your structured response data. IMPORTANT: You MUST write your full text reply BEFORE calling this tool. Never call this tool as your first action." : "A tool to generate a structured response",
|
|
152
|
+
inputSchema: params.inputSchema
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
],
|
|
156
|
+
toolChoice: isStream ? { auto: {} } : { any: {} }
|
|
157
|
+
}
|
|
158
|
+
};
|
|
159
|
+
}
|
|
160
|
+
async function sendWithModel(client, params) {
|
|
161
|
+
const command = new ConverseCommand(getCommandInput(params, false));
|
|
162
|
+
const response = await client.send(command);
|
|
163
|
+
const contentBlocks = response.output?.message?.content ?? [];
|
|
164
|
+
const toolUseBlock = contentBlocks.find((block) => block.toolUse);
|
|
165
|
+
const result = toolUseBlock?.toolUse?.input;
|
|
166
|
+
const isValidResponse = result && typeof result === "object" && "conversationPayload" in result && "chatbotMessage" in result && typeof result.conversationPayload === "object" && typeof result.chatbotMessage === "string";
|
|
167
|
+
if (!isValidResponse) {
|
|
168
|
+
throw new Error("Bedrock returned invalid tool response");
|
|
169
|
+
}
|
|
170
|
+
return {
|
|
171
|
+
content: JSON.stringify(result),
|
|
172
|
+
usage: response.usage ? {
|
|
173
|
+
inputTokens: response.usage.inputTokens,
|
|
174
|
+
outputTokens: response.usage.outputTokens,
|
|
175
|
+
totalTokens: (response.usage.inputTokens ?? 0) + (response.usage.outputTokens ?? 0)
|
|
176
|
+
} : void 0
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
async function sendStreamWithModel(client, params) {
|
|
180
|
+
const command = new ConverseStreamCommand(getCommandInput(params, true));
|
|
181
|
+
const response = await client.send(command);
|
|
182
|
+
if (!response.stream) {
|
|
183
|
+
throw new Error("No stream returned from Bedrock");
|
|
184
|
+
}
|
|
185
|
+
return response.stream;
|
|
186
|
+
}
|
|
187
|
+
function prepareRequest(request) {
|
|
188
|
+
const systemPrompt = RESPONSE_FORMAT_INSTRUCTIONS + "\n\n" + request.systemPrompt;
|
|
189
|
+
const jsonSchema = request.responseSchema.toJSONSchema();
|
|
190
|
+
const inputSchema = { json: jsonSchema };
|
|
191
|
+
const prependedMessages = [{ role: "user", content: "Hi\n" }, ...request.messages];
|
|
192
|
+
const mergedMessages = mergeSequentialMessages(prependedMessages);
|
|
193
|
+
return { systemPrompt, inputSchema, mergedMessages };
|
|
194
|
+
}
|
|
195
|
+
function createBedrockProvider(config) {
|
|
196
|
+
const client = new BedrockRuntimeClient(config.region ? { region: config.region } : {});
|
|
197
|
+
const models = config.models;
|
|
198
|
+
return {
|
|
199
|
+
name: "bedrock",
|
|
200
|
+
async sendRequest(request) {
|
|
201
|
+
const { systemPrompt, inputSchema, mergedMessages } = prepareRequest(request);
|
|
202
|
+
return executeWithFallback(
|
|
203
|
+
models,
|
|
204
|
+
(modelId) => sendWithModel(client, {
|
|
205
|
+
modelId,
|
|
206
|
+
systemPrompt,
|
|
207
|
+
mergedMessages,
|
|
208
|
+
inputSchema
|
|
209
|
+
})
|
|
210
|
+
);
|
|
211
|
+
},
|
|
212
|
+
async *sendRequestStream(request) {
|
|
213
|
+
const { systemPrompt, inputSchema, mergedMessages } = prepareRequest(request);
|
|
214
|
+
const rawStream = await executeWithFallback(
|
|
215
|
+
models,
|
|
216
|
+
(modelId) => sendStreamWithModel(client, {
|
|
217
|
+
modelId,
|
|
218
|
+
systemPrompt,
|
|
219
|
+
mergedMessages,
|
|
220
|
+
inputSchema
|
|
221
|
+
})
|
|
222
|
+
);
|
|
223
|
+
yield* processBedrockStream(rawStream, "bedrock", request.responseSchema);
|
|
224
|
+
}
|
|
225
|
+
};
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
export { createBedrockProvider };
|
|
229
|
+
//# sourceMappingURL=index.js.map
|
|
230
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../bedrock/messageMerger.ts","../../bedrock/streamProcessor.ts","../../utils/executeWithFallback.ts","../../bedrock/index.ts"],"names":[],"mappings":";;;;;;AAQO,SAAS,wBAAwB,QAAA,EAA+C;AACrF,EAAA,IAAI,QAAA,CAAS,MAAA,KAAW,CAAA,EAAG,OAAO,EAAC;AAEnC,EAAA,MAAM,SAA2B,EAAC;AAClC,EAAA,IAAI,cAAA,GAAwC,IAAA;AAE5C,EAAA,KAAA,MAAW,WAAW,QAAA,EAAU;AAC9B,IAAA,IAAI,CAAC,QAAQ,OAAA,EAAS;AAEtB,IAAA,IAAI,CAAC,cAAA,EAAgB;AACnB,MAAA,cAAA,GAAiB;AAAA,QACf,MAAM,OAAA,CAAQ,IAAA;AAAA,QACd,SAAS,CAAC,EAAE,IAAA,EAAM,OAAA,CAAQ,SAAS;AAAA,OACrC;AAAA,IACF,WAAW,cAAA,CAAe,IAAA,KAAS,OAAA,CAAQ,IAAA,IAAQ,eAAe,OAAA,EAAS;AACzE,MAAA,cAAA,CAAe,QAAQ,IAAA,CAAK,EAAE,IAAA,EAAM,OAAA,CAAQ,SAAS,CAAA;AAAA,IACvD,CAAA,MAAO;AACL,MAAA,MAAA,CAAO,KAAK,cAAc,CAAA;AAC1B,MAAA,cAAA,GAAiB;AAAA,QACf,MAAM,OAAA,CAAQ,IAAA;AAAA,QACd,SAAS,CAAC,EAAE,IAAA,EAAM,OAAA,CAAQ,SAAS;AAAA,OACrC;AAAA,IACF;AAAA,EACF;AAEA,EAAA,IAAI,cAAA,EAAgB;AAClB,IAAA,MAAA,CAAO,KAAK,cAAc,CAAA;AAAA,EAC5B;AAEA,EAAA,OAAO,MAAA;AACT;ACjCA,gBAAuB,oBAAA,CACrB,SAAA,EACA,OAAA,EACA,MAAA,EACqC;AACrC,EAAA,IAAI,gBAAA,GAA8C,IAAA;AAClD,EAAA,IAAI,eAAA,GAAkB,EAAA;AAEtB,EAAA,WAAA,MAAiB,SAAS,SAAA,EAAW;AAEnC,IAAA,IAAI,MAAM,iBAAA,EAAmB;AAC3B,MAAA,IAAI,KAAA,CAAM,iBAAA,CAAkB,KAAA,EAAO,OAAA,EAAS;AAC1C,QAAA,gBAAA,GAAmB,SAAA;AACnB,QAAA,eAAA,GAAkB,EAAA;AAAA,MACpB,CAAA,MAAO;AACL,QAAA,gBAAA,GAAmB,MAAA;AAAA,MACrB;AACA,MAAA;AAAA,IACF;AAGA,IAAA,IAAI,MAAM,iBAAA,EAAmB;AAC3B,MAAA,MAAM,KAAA,GAAQ,MAAM,iBAAA,CAAkB,KAAA;AACtC,MAAA,IAAI,CAAC,KAAA,EAAO;AAIZ,MAAA,IAAI,KAAA,CAAM,IAAA,KAAS,gBAAA,KAAqB,MAAA,IAAU,qBAAqB,IAAA,CAAA,EAAO;AAC5E,QAAA,gBAAA,GAAmB,MAAA;AACnB,QAAA,MAAM,EAAE,MAAM,SAAA,CAAU,oBAAA,EAAsB,WAAW,EAAA,EAAI,KAAA,EAAO,KAAA,CAAM,IAAA,EAAM,OAAA,EAAQ;AAAA,MAC1F,WAAW,KAAA,CAAM,OAAA,KAAY,gBAAA,KAAqB,SAAA,IAAa,qBAAqB,IAAA,CAAA,EAAO;AACzF,QAAA,gBAAA,GAAmB,SAAA;AACnB,QAAA,eAAA,IAAmB,KAAA,CAAM,QAAQ,KAAA,IAAS,EAAA;AAAA,MAC5C;AACA,MAAA;AAAA,IACF;AAGA,IAAA,IAAI,MAAM,gBAAA,EAAkB;AAC1B,MAAA,IAAI,gBAAA,KAAqB,aAAa,eAAA,EAAiB;AACrD,QAAA,MAAM,aAAA,CAAsB,eAAA,EAAiB,MAAA,EAAQ,OAAO,CAAA;AAC5D,QAAA,eAAA,GAAkB,EAAA;AAAA,MACpB;AACA,MAAA,gBAAA,GAAmB,IAAA;AACnB,MAAA;AAAA,IACF;AAGA,IAAA,IAAI,MAAM,QAAA,EAAU;AAClB,MAAA;AAAA,IACF;AAGA,IAAA,kBAAA,CAAmB,KAAK,CAAA;AAAA,EAC1B;AACF;AAEA,SAAS,aAAA,CACP,eAAA,EACA,MAAA,EACA,OAAA,EACqB;AACrB,EAAA,IAAI;AACF,IAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,eAAe,CAAA;AACzC,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,MAAM,CAAA;AACrC,IAAA,OAAO;AAAA,MACL,MAAM,SAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,OAAO;AAAA,MACL,MAAM,SAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,mCAAA,EAAuC,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACrE;AAAA,KACF;AAAA,EACF;AACF;AAEA,SAAS,mBAAmB,KAAA,EAAmC;AAC7D,EAAA,IAAI,MAAM,uBAAA,EAAyB;AACjC,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,wBAAA,EAA2B,KAAA,CAAM,uBAAA,CAAwB,OAAO,CAAA,CAAE,CAAA;AAAA,EACpF;AACA,EAAA,IAAI,MAAM,yBAAA,EAA2B;AACnC,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,4BAAA,EAA+B,KAAA,CAAM,yBAAA,CAA0B,OAAO,CAAA,CAAE,CAAA;AAAA,EAC1F;AACA,EAAA,IAAI,MAAM,mBAAA,EAAqB;AAC7B,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,oBAAA,EAAuB,KAAA,CAAM,mBAAA,CAAoB,OAAO,CAAA,CAAE,CAAA;AAAA,EAC5E;AACA,EAAA,IAAI,MAAM,mBAAA,EAAqB;AAC7B,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,0BAAA,EAA6B,KAAA,CAAM,mBAAA,CAAoB,OAAO,CAAA,CAAE,CAAA;AAAA,EAClF;AACA,EAAA,IAAI,MAAM,2BAAA,EAA6B;AACrC,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,6BAAA,EAAgC,KAAA,CAAM,2BAAA,CAA4B,OAAO,CAAA,CAAE,CAAA;AAAA,EAC7F;AACF;;;ACpFA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACnBA,IAAM,4BAAA,GAA+B;;AAAA;;AAAA;AAAA;AAAA;;AAAA;;AAAA;;AAAA;;AAAA,6EAAA,CAAA;AAoCrC,SAAS,eAAA,CAAgB,QAA6B,QAAA,EAAmB;AACvE,EAAA,OAAO;AAAA,IACL,SAAS,MAAA,CAAO,OAAA;AAAA,IAChB,QAAQ,CAAC,EAAE,IAAA,EAAM,MAAA,CAAO,cAAc,CAAA;AAAA,IACtC,UAAU,MAAA,CAAO,cAAA;AAAA,IACjB,UAAA,EAAY;AAAA,MACV,KAAA,EAAO;AAAA,QACL;AAAA,UACE,QAAA,EAAU;AAAA,YACR,IAAA,EAAM,oBAAA;AAAA,YACN,WAAA,EAAa,WACT,2JAAA,GACA,0CAAA;AAAA,YACJ,aAAa,MAAA,CAAO;AAAA;AACtB;AACF,OACF;AAAA,MACA,UAAA,EAAY,QAAA,GAAW,EAAE,IAAA,EAAM,IAAG,GAAI,EAAE,GAAA,EAAK,EAAC;AAAE;AAClD,GACF;AACF;AAEA,eAAe,aAAA,CAAc,QAA8B,MAAA,EAAwD;AACjH,EAAA,MAAM,UAAU,IAAI,eAAA,CAAgB,eAAA,CAAgB,MAAA,EAAQ,KAAK,CAAC,CAAA;AAClE,EAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,OAAO,CAAA;AAE1C,EAAA,MAAM,aAAA,GAAgB,QAAA,CAAS,MAAA,EAAQ,OAAA,EAAS,WAAW,EAAC;AAC5D,EAAA,MAAM,eAAe,aAAA,CAAc,IAAA,CAAK,CAAC,KAAA,KAAU,MAAM,OAAO,CAAA;AAChE,EAAA,MAAM,MAAA,GAAS,cAAc,OAAA,EAAS,KAAA;AAEtC,EAAA,MAAM,eAAA,GACJ,MAAA,IACA,OAAO,MAAA,KAAW,YAClB,qBAAA,IAAyB,MAAA,IACzB,gBAAA,IAAoB,MAAA,IACpB,OAAO,MAAA,CAAO,mBAAA,KAAwB,QAAA,IACtC,OAAO,OAAO,cAAA,KAAmB,QAAA;AAEnC,EAAA,IAAI,CAAC,eAAA,EAAiB;AACpB,IAAA,MAAM,IAAI,MAAM,wCAAwC,CAAA;AAAA,EAC1D;AAEA,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,MAAM,CAAA;AAAA,IAC9B,KAAA,EAAO,SAAS,KAAA,GACZ;AAAA,MACE,WAAA,EAAa,SAAS,KAAA,CAAM,WAAA;AAAA,MAC5B,YAAA,EAAc,SAAS,KAAA,CAAM,YAAA;AAAA,MAC7B,cAAc,QAAA,CAAS,KAAA,CAAM,eAAe,CAAA,KAAM,QAAA,CAAS,MAAM,YAAA,IAAgB,CAAA;AAAA,KACnF,GACA;AAAA,GACN;AACF;AAEA,eAAe,mBAAA,CACb,QACA,MAAA,EAC8C;AAC9C,EAAA,MAAM,UAAU,IAAI,qBAAA,CAAsB,eAAA,CAAgB,MAAA,EAAQ,IAAI,CAAC,CAAA;AACvE,EAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,OAAO,CAAA;AAE1C,EAAA,IAAI,CAAC,SAAS,MAAA,EAAQ;AACpB,IAAA,MAAM,IAAI,MAAM,iCAAiC,CAAA;AAAA,EACnD;AAEA,EAAA,OAAO,QAAA,CAAS,MAAA;AAClB;AAEA,SAAS,eAAe,OAAA,EAA0B;AAChD,EAAA,MAAM,YAAA,GAAe,4BAAA,GAA+B,MAAA,GAAS,OAAA,CAAQ,YAAA;AACrE,EAAA,MAAM,UAAA,GAAa,OAAA,CAAQ,cAAA,CAAe,YAAA,EAAa;AACvD,EAAA,MAAM,WAAA,GAAc,EAAE,IAAA,EAAM,UAAA,EAAW;AAGvC,EAAA,MAAM,iBAAA,GAAuC,CAAC,EAAE,IAAA,EAAM,MAAA,EAAQ,SAAS,MAAA,EAAO,EAAG,GAAG,OAAA,CAAQ,QAAQ,CAAA;AACpG,EAAA,MAAM,cAAA,GAAiB,wBAAwB,iBAAiB,CAAA;AAEhE,EAAA,OAAO,EAAE,YAAA,EAAc,WAAA,EAAa,cAAA,EAAe;AACrD;AAgBO,SAAS,sBAAsB,MAAA,EAAyC;AAC7E,EAAA,MAAM,MAAA,GAAS,IAAI,oBAAA,CAAqB,MAAA,CAAO,MAAA,GAAS,EAAE,MAAA,EAAQ,MAAA,CAAO,MAAA,EAAO,GAAI,EAAE,CAAA;AACtF,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,SAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,EAAE,YAAA,EAAc,WAAA,EAAa,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAE5E,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,OAAA,KAClC,aAAA,CAAc,MAAA,EAAQ;AAAA,UACpB,OAAA;AAAA,UACA,YAAA;AAAA,UACA,cAAA;AAAA,UACA;AAAA,SACD;AAAA,OACH;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,EAAE,YAAA,EAAc,WAAA,EAAa,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAE5E,MAAA,MAAM,YAAY,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,OAAA,KACnD,mBAAA,CAAoB,MAAA,EAAQ;AAAA,UAC1B,OAAA;AAAA,UACA,YAAA;AAAA,UACA,cAAA;AAAA,UACA;AAAA,SACD;AAAA,OACH;AAEA,MAAA,OAAO,oBAAA,CAA6B,SAAA,EAAW,SAAA,EAAW,OAAA,CAAQ,cAAc,CAAA;AAAA,IAClF;AAAA,GACF;AACF","file":"index.js","sourcesContent":["import { Message as BedrockMessage } from '@aws-sdk/client-bedrock-runtime'\nimport type { ProviderMessage } from '@genui-a3/core'\n\n/**\n * Converts provider-agnostic messages to Bedrock format, merging sequential same-role messages.\n * Bedrock requires alternating user/assistant roles — this merges consecutive same-role messages\n * into a single message with multiple content blocks.\n */\nexport function mergeSequentialMessages(messages: ProviderMessage[]): BedrockMessage[] {\n if (messages.length === 0) return []\n\n const result: BedrockMessage[] = []\n let currentMessage: BedrockMessage | null = null\n\n for (const message of messages) {\n if (!message.content) continue\n\n if (!currentMessage) {\n currentMessage = {\n role: message.role,\n content: [{ text: message.content }],\n }\n } else if (currentMessage.role === message.role && currentMessage.content) {\n currentMessage.content.push({ text: message.content })\n } else {\n result.push(currentMessage)\n currentMessage = {\n role: message.role,\n content: [{ text: message.content }],\n }\n }\n }\n\n if (currentMessage) {\n result.push(currentMessage)\n }\n\n return result\n}\n","import { ConverseStreamOutput } from '@aws-sdk/client-bedrock-runtime'\nimport { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\n\nexport async function* processBedrockStream<TState extends BaseState = BaseState>(\n rawStream: AsyncIterable<ConverseStreamOutput>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let currentBlockType: 'text' | 'toolUse' | null = null\n let toolInputBuffer = ''\n\n for await (const event of rawStream) {\n // --- Content block start: determine block type ---\n if (event.contentBlockStart) {\n if (event.contentBlockStart.start?.toolUse) {\n currentBlockType = 'toolUse'\n toolInputBuffer = ''\n } else {\n currentBlockType = 'text'\n }\n continue\n }\n\n // --- Content block delta: text or tool input ---\n if (event.contentBlockDelta) {\n const delta = event.contentBlockDelta.delta\n if (!delta) continue\n\n // Infer block type from delta content if no contentBlockStart was received\n // (Bedrock may omit contentBlockStart for the first text block)\n if (delta.text && (currentBlockType === 'text' || currentBlockType === null)) {\n currentBlockType = 'text'\n yield { type: EventType.TEXT_MESSAGE_CONTENT, messageId: '', delta: delta.text, agentId } as StreamEvent<TState>\n } else if (delta.toolUse && (currentBlockType === 'toolUse' || currentBlockType === null)) {\n currentBlockType = 'toolUse'\n toolInputBuffer += delta.toolUse.input ?? ''\n }\n continue\n }\n\n // --- Content block stop: parse and validate tool call ---\n if (event.contentBlockStop) {\n if (currentBlockType === 'toolUse' && toolInputBuffer) {\n yield parseToolCall<TState>(toolInputBuffer, schema, agentId)\n toolInputBuffer = ''\n }\n currentBlockType = null\n continue\n }\n\n // --- Metadata: log usage ---\n if (event.metadata) {\n continue\n }\n\n // --- Error events: throw to surface to caller ---\n throwIfStreamError(event)\n }\n}\n\nfunction parseToolCall<TState extends BaseState>(\n toolInputBuffer: string,\n schema: ZodType,\n agentId: AgentId,\n): StreamEvent<TState> {\n try {\n const parsed = JSON.parse(toolInputBuffer) as Record<string, unknown>\n const validated = schema.parse(parsed)\n return {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n return {\n type: EventType.RUN_ERROR,\n message: `Tool call parse/validation failed: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\nfunction throwIfStreamError(event: ConverseStreamOutput): void {\n if (event.internalServerException) {\n throw new Error(`Bedrock internal error: ${event.internalServerException.message}`)\n }\n if (event.modelStreamErrorException) {\n throw new Error(`Bedrock model stream error: ${event.modelStreamErrorException.message}`)\n }\n if (event.throttlingException) {\n throw new Error(`Bedrock throttling: ${event.throttlingException.message}`)\n }\n if (event.validationException) {\n throw new Error(`Bedrock validation error: ${event.validationException.message}`)\n }\n if (event.serviceUnavailableException) {\n throw new Error(`Bedrock service unavailable: ${event.serviceUnavailableException.message}`)\n }\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import {\n BedrockRuntimeClient,\n ConverseCommand,\n ConverseStreamCommand,\n ConverseStreamOutput,\n ToolInputSchema,\n} from '@aws-sdk/client-bedrock-runtime'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { mergeSequentialMessages } from './messageMerger'\nimport { processBedrockStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\n\nconst RESPONSE_FORMAT_INSTRUCTIONS = `\n\n# RESPONSE FORMAT — MANDATORY\n\n<<CRITICAL INSTRUCTION>>\nYou MUST ALWAYS output plain text FIRST, then call the structuredResponse tool SECOND. NEVER call the tool without writing text first. This is non-negotiable.\n<</CRITICAL INSTRUCTION>>\n\nYour response MUST have exactly two parts in this order:\n\nPART 1 — TEXT: Write your full conversational reply as plain text. This text is streamed to the user in real-time. Do not skip this.\n\nPART 2 — TOOL CALL: After the text, call the \\`structuredResponse\\` tool with the JSON payload. The \\`chatbotMessage\\` field MUST contain the same text you wrote in Part 1.\n\nIf you call the tool without writing text first, the response will be broken.`\n\n/**\n * Configuration for creating a Bedrock provider.\n */\nexport interface BedrockProviderConfig {\n /** AWS region for the Bedrock client */\n region?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * Uses Bedrock model ARNs, e.g. 'us.anthropic.claude-sonnet-4-5-20250929-v1:0'\n */\n models: string[]\n}\n\ntype SendWithModelParams = {\n modelId: string\n systemPrompt: string\n mergedMessages: ReturnType<typeof mergeSequentialMessages>\n inputSchema: ToolInputSchema | undefined\n}\n\nfunction getCommandInput(params: SendWithModelParams, isStream: boolean) {\n return {\n modelId: params.modelId,\n system: [{ text: params.systemPrompt }],\n messages: params.mergedMessages,\n toolConfig: {\n tools: [\n {\n toolSpec: {\n name: 'structuredResponse',\n description: isStream\n ? 'Submit your structured response data. IMPORTANT: You MUST write your full text reply BEFORE calling this tool. Never call this tool as your first action.'\n : 'A tool to generate a structured response',\n inputSchema: params.inputSchema,\n },\n },\n ],\n toolChoice: isStream ? { auto: {} } : { any: {} },\n },\n }\n}\n\nasync function sendWithModel(client: BedrockRuntimeClient, params: SendWithModelParams): Promise<ProviderResponse> {\n const command = new ConverseCommand(getCommandInput(params, false))\n const response = await client.send(command)\n\n const contentBlocks = response.output?.message?.content ?? []\n const toolUseBlock = contentBlocks.find((block) => block.toolUse)\n const result = toolUseBlock?.toolUse?.input\n\n const isValidResponse =\n result &&\n typeof result === 'object' &&\n 'conversationPayload' in result &&\n 'chatbotMessage' in result &&\n typeof result.conversationPayload === 'object' &&\n typeof result.chatbotMessage === 'string'\n\n if (!isValidResponse) {\n throw new Error('Bedrock returned invalid tool response')\n }\n\n return {\n content: JSON.stringify(result),\n usage: response.usage\n ? {\n inputTokens: response.usage.inputTokens,\n outputTokens: response.usage.outputTokens,\n totalTokens: (response.usage.inputTokens ?? 0) + (response.usage.outputTokens ?? 0),\n }\n : undefined,\n }\n}\n\nasync function sendStreamWithModel(\n client: BedrockRuntimeClient,\n params: SendWithModelParams,\n): Promise<AsyncIterable<ConverseStreamOutput>> {\n const command = new ConverseStreamCommand(getCommandInput(params, true))\n const response = await client.send(command)\n\n if (!response.stream) {\n throw new Error('No stream returned from Bedrock')\n }\n\n return response.stream\n}\n\nfunction prepareRequest(request: ProviderRequest) {\n const systemPrompt = RESPONSE_FORMAT_INSTRUCTIONS + '\\n\\n' + request.systemPrompt\n const jsonSchema = request.responseSchema.toJSONSchema()\n const inputSchema = { json: jsonSchema } as ToolInputSchema\n\n // Bedrock requires messages to start with a user message — prepend \"Hi\" if needed\n const prependedMessages: ProviderMessage[] = [{ role: 'user', content: 'Hi\\n' }, ...request.messages]\n const mergedMessages = mergeSequentialMessages(prependedMessages)\n\n return { systemPrompt, inputSchema, mergedMessages }\n}\n\n/**\n * Creates an AWS Bedrock provider instance.\n *\n * @param config - Bedrock provider configuration\n * @returns A Provider implementation using AWS Bedrock\n *\n * @example\n * ```typescript\n * const provider = createBedrockProvider({\n * models: ['us.anthropic.claude-sonnet-4-5-20250929-v1:0'],\n * region: 'us-east-1',\n * })\n * ```\n */\nexport function createBedrockProvider(config: BedrockProviderConfig): Provider {\n const client = new BedrockRuntimeClient(config.region ? { region: config.region } : {})\n const models = config.models\n\n return {\n name: 'bedrock',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const { systemPrompt, inputSchema, mergedMessages } = prepareRequest(request)\n\n return executeWithFallback(models, (modelId) =>\n sendWithModel(client, {\n modelId,\n systemPrompt,\n mergedMessages,\n inputSchema,\n }),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const { systemPrompt, inputSchema, mergedMessages } = prepareRequest(request)\n\n const rawStream = await executeWithFallback(models, (modelId) =>\n sendStreamWithModel(client, {\n modelId,\n systemPrompt,\n mergedMessages,\n inputSchema,\n }),\n )\n\n yield* processBedrockStream<TState>(rawStream, 'bedrock', request.responseSchema)\n },\n }\n}\n"]}
|
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var OpenAI = require('openai');
|
|
4
|
+
var client = require('@ag-ui/client');
|
|
5
|
+
|
|
6
|
+
function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
|
|
7
|
+
|
|
8
|
+
var OpenAI__default = /*#__PURE__*/_interopDefault(OpenAI);
|
|
9
|
+
|
|
10
|
+
// openai/index.ts
|
|
11
|
+
var CHATBOT_MESSAGE_KEY = '"chatbotMessage":"';
|
|
12
|
+
async function* processOpenAIStream(rawStream, agentId, schema) {
|
|
13
|
+
let fullBuffer = "";
|
|
14
|
+
let state = 0 /* SEARCHING */;
|
|
15
|
+
let escapeNext = false;
|
|
16
|
+
try {
|
|
17
|
+
for await (const chunk of rawStream) {
|
|
18
|
+
const delta = chunk.choices[0]?.delta?.content;
|
|
19
|
+
if (!delta) continue;
|
|
20
|
+
for (const char of delta) {
|
|
21
|
+
fullBuffer += char;
|
|
22
|
+
const result = processChar(char, state, escapeNext, fullBuffer, agentId);
|
|
23
|
+
state = result.state;
|
|
24
|
+
escapeNext = result.escapeNext;
|
|
25
|
+
if (result.event) yield result.event;
|
|
26
|
+
}
|
|
27
|
+
const finishReason = chunk.choices[0]?.finish_reason;
|
|
28
|
+
if (finishReason === "length") {
|
|
29
|
+
yield {
|
|
30
|
+
type: client.EventType.RUN_ERROR,
|
|
31
|
+
message: "OpenAI response truncated (finish_reason: length)",
|
|
32
|
+
agentId
|
|
33
|
+
};
|
|
34
|
+
return;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
if (!fullBuffer) {
|
|
38
|
+
yield {
|
|
39
|
+
type: client.EventType.RUN_ERROR,
|
|
40
|
+
message: "OpenAI stream completed with empty response",
|
|
41
|
+
agentId
|
|
42
|
+
};
|
|
43
|
+
return;
|
|
44
|
+
}
|
|
45
|
+
yield parseResponse(fullBuffer, schema, agentId);
|
|
46
|
+
} catch (err) {
|
|
47
|
+
yield {
|
|
48
|
+
type: client.EventType.RUN_ERROR,
|
|
49
|
+
message: `OpenAI stream error: ${err.message}`,
|
|
50
|
+
agentId
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
function parseResponse(buffer, schema, agentId) {
|
|
55
|
+
try {
|
|
56
|
+
const parsed = JSON.parse(buffer);
|
|
57
|
+
const validated = schema.parse(parsed);
|
|
58
|
+
return {
|
|
59
|
+
type: client.EventType.TOOL_CALL_RESULT,
|
|
60
|
+
toolCallId: "",
|
|
61
|
+
messageId: "",
|
|
62
|
+
content: JSON.stringify(validated),
|
|
63
|
+
agentId
|
|
64
|
+
};
|
|
65
|
+
} catch (err) {
|
|
66
|
+
return {
|
|
67
|
+
type: client.EventType.RUN_ERROR,
|
|
68
|
+
message: `Response parse/validation failed: ${err.message}`,
|
|
69
|
+
agentId
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
function processChar(char, state, escapeNext, fullBuffer, agentId) {
|
|
74
|
+
switch (state) {
|
|
75
|
+
case 0 /* SEARCHING */:
|
|
76
|
+
if (fullBuffer.endsWith(CHATBOT_MESSAGE_KEY)) {
|
|
77
|
+
return { state: 1 /* IN_CHATBOT_MESSAGE */, escapeNext: false, event: null };
|
|
78
|
+
}
|
|
79
|
+
return { state, escapeNext, event: null };
|
|
80
|
+
case 1 /* IN_CHATBOT_MESSAGE */:
|
|
81
|
+
if (escapeNext) {
|
|
82
|
+
return {
|
|
83
|
+
state,
|
|
84
|
+
escapeNext: false,
|
|
85
|
+
event: {
|
|
86
|
+
type: client.EventType.TEXT_MESSAGE_CONTENT,
|
|
87
|
+
messageId: "",
|
|
88
|
+
delta: unescapeChar(char),
|
|
89
|
+
agentId
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
} else if (char === "\\") {
|
|
93
|
+
return { state, escapeNext: true, event: null };
|
|
94
|
+
} else if (char === '"') {
|
|
95
|
+
return { state: 2 /* PAST_CHATBOT_MESSAGE */, escapeNext: false, event: null };
|
|
96
|
+
} else {
|
|
97
|
+
return {
|
|
98
|
+
state,
|
|
99
|
+
escapeNext,
|
|
100
|
+
event: {
|
|
101
|
+
type: client.EventType.TEXT_MESSAGE_CONTENT,
|
|
102
|
+
messageId: "",
|
|
103
|
+
delta: char,
|
|
104
|
+
agentId
|
|
105
|
+
}
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
case 2 /* PAST_CHATBOT_MESSAGE */:
|
|
109
|
+
return { state, escapeNext, event: null };
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
function unescapeChar(char) {
|
|
113
|
+
switch (char) {
|
|
114
|
+
case '"':
|
|
115
|
+
return '"';
|
|
116
|
+
case "\\":
|
|
117
|
+
return "\\";
|
|
118
|
+
case "n":
|
|
119
|
+
return "\n";
|
|
120
|
+
case "t":
|
|
121
|
+
return " ";
|
|
122
|
+
case "r":
|
|
123
|
+
return "\r";
|
|
124
|
+
case "/":
|
|
125
|
+
return "/";
|
|
126
|
+
default:
|
|
127
|
+
return char;
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// utils/executeWithFallback.ts
|
|
132
|
+
async function executeWithFallback(models, action) {
|
|
133
|
+
for (let i = 0; i < models.length; i++) {
|
|
134
|
+
const model = models[i];
|
|
135
|
+
try {
|
|
136
|
+
return await action(model);
|
|
137
|
+
} catch (error) {
|
|
138
|
+
const errorObj = error;
|
|
139
|
+
if (i === models.length - 1) {
|
|
140
|
+
throw errorObj;
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
throw new Error("All models failed");
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// openai/index.ts
|
|
148
|
+
function enforceStrictSchema(schema) {
|
|
149
|
+
const result = { ...schema };
|
|
150
|
+
if (result.type === "object" && result.properties) {
|
|
151
|
+
result.additionalProperties = false;
|
|
152
|
+
result.required = Object.keys(result.properties);
|
|
153
|
+
const props = result.properties;
|
|
154
|
+
const strictProps = {};
|
|
155
|
+
for (const [key, value] of Object.entries(props)) {
|
|
156
|
+
strictProps[key] = enforceStrictSchema(value);
|
|
157
|
+
}
|
|
158
|
+
result.properties = strictProps;
|
|
159
|
+
}
|
|
160
|
+
if (result.items && typeof result.items === "object") {
|
|
161
|
+
result.items = enforceStrictSchema(result.items);
|
|
162
|
+
}
|
|
163
|
+
for (const keyword of ["anyOf", "oneOf", "allOf"]) {
|
|
164
|
+
if (Array.isArray(result[keyword])) {
|
|
165
|
+
result[keyword] = result[keyword].map((s) => enforceStrictSchema(s));
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
return result;
|
|
169
|
+
}
|
|
170
|
+
function toOpenAIMessages(systemPrompt, messages) {
|
|
171
|
+
const openAIMessages = [{ role: "system", content: systemPrompt }];
|
|
172
|
+
for (const msg of messages) {
|
|
173
|
+
openAIMessages.push({ role: msg.role, content: msg.content });
|
|
174
|
+
}
|
|
175
|
+
return openAIMessages;
|
|
176
|
+
}
|
|
177
|
+
function prepareRequest(request) {
|
|
178
|
+
const jsonSchema = enforceStrictSchema(request.responseSchema.toJSONSchema());
|
|
179
|
+
const responseFormat = {
|
|
180
|
+
type: "json_schema",
|
|
181
|
+
json_schema: {
|
|
182
|
+
name: "structuredResponse",
|
|
183
|
+
strict: true,
|
|
184
|
+
schema: jsonSchema
|
|
185
|
+
}
|
|
186
|
+
};
|
|
187
|
+
const openAIMessages = toOpenAIMessages(request.systemPrompt, request.messages);
|
|
188
|
+
return { responseFormat, openAIMessages };
|
|
189
|
+
}
|
|
190
|
+
async function sendWithModel(client, model, openAIMessages, responseFormat) {
|
|
191
|
+
const response = await client.chat.completions.create({
|
|
192
|
+
model,
|
|
193
|
+
messages: openAIMessages,
|
|
194
|
+
response_format: responseFormat
|
|
195
|
+
});
|
|
196
|
+
const choice = response.choices[0];
|
|
197
|
+
if (!choice?.message?.content) {
|
|
198
|
+
throw new Error("OpenAI returned empty response");
|
|
199
|
+
}
|
|
200
|
+
if (choice.finish_reason === "length") {
|
|
201
|
+
throw new Error("OpenAI response truncated (finish_reason: length)");
|
|
202
|
+
}
|
|
203
|
+
return {
|
|
204
|
+
content: choice.message.content,
|
|
205
|
+
usage: response.usage ? {
|
|
206
|
+
inputTokens: response.usage.prompt_tokens,
|
|
207
|
+
outputTokens: response.usage.completion_tokens,
|
|
208
|
+
totalTokens: response.usage.total_tokens
|
|
209
|
+
} : void 0
|
|
210
|
+
};
|
|
211
|
+
}
|
|
212
|
+
async function sendStreamWithModel(client, model, openAIMessages, responseFormat) {
|
|
213
|
+
return client.chat.completions.create({
|
|
214
|
+
model,
|
|
215
|
+
messages: openAIMessages,
|
|
216
|
+
response_format: responseFormat,
|
|
217
|
+
stream: true
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
function createOpenAIProvider(config) {
|
|
221
|
+
const client = new OpenAI__default.default({
|
|
222
|
+
apiKey: config.apiKey,
|
|
223
|
+
baseURL: config.baseURL,
|
|
224
|
+
organization: config.organization
|
|
225
|
+
});
|
|
226
|
+
const models = config.models;
|
|
227
|
+
return {
|
|
228
|
+
name: "openai",
|
|
229
|
+
async sendRequest(request) {
|
|
230
|
+
const { responseFormat, openAIMessages } = prepareRequest(request);
|
|
231
|
+
return executeWithFallback(
|
|
232
|
+
models,
|
|
233
|
+
(model) => sendWithModel(client, model, openAIMessages, responseFormat)
|
|
234
|
+
);
|
|
235
|
+
},
|
|
236
|
+
async *sendRequestStream(request) {
|
|
237
|
+
const { responseFormat, openAIMessages } = prepareRequest(request);
|
|
238
|
+
const rawStream = await executeWithFallback(
|
|
239
|
+
models,
|
|
240
|
+
(model) => sendStreamWithModel(client, model, openAIMessages, responseFormat)
|
|
241
|
+
);
|
|
242
|
+
yield* processOpenAIStream(rawStream, "openai", request.responseSchema);
|
|
243
|
+
}
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
exports.createOpenAIProvider = createOpenAIProvider;
|
|
248
|
+
//# sourceMappingURL=index.cjs.map
|
|
249
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../openai/streamProcessor.ts","../../utils/executeWithFallback.ts","../../openai/index.ts"],"names":["EventType","OpenAI"],"mappings":";;;;;;;;;;AAaA,IAAM,mBAAA,GAAsB,oBAAA;AAe5B,gBAAuB,mBAAA,CACrB,SAAA,EACA,OAAA,EACA,MAAA,EACqC;AACrC,EAAA,IAAI,UAAA,GAAa,EAAA;AACjB,EAAA,IAAI,KAAA,GAAqB,CAAA;AACzB,EAAA,IAAI,UAAA,GAAa,KAAA;AAEjB,EAAA,IAAI;AACF,IAAA,WAAA,MAAiB,SAAS,SAAA,EAAW;AACnC,MAAA,MAAM,KAAA,GAAQ,KAAA,CAAM,OAAA,CAAQ,CAAC,GAAG,KAAA,EAAO,OAAA;AACvC,MAAA,IAAI,CAAC,KAAA,EAAO;AAEZ,MAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACxB,QAAA,UAAA,IAAc,IAAA;AACd,QAAA,MAAM,SAA6B,WAAA,CAAoB,IAAA,EAAM,KAAA,EAAO,UAAA,EAAY,YAAY,OAAO,CAAA;AACnG,QAAA,KAAA,GAAQ,MAAA,CAAO,KAAA;AACf,QAAA,UAAA,GAAa,MAAA,CAAO,UAAA;AACpB,QAAA,IAAI,MAAA,CAAO,KAAA,EAAO,MAAM,MAAA,CAAO,KAAA;AAAA,MACjC;AAGA,MAAA,MAAM,YAAA,GAAe,KAAA,CAAM,OAAA,CAAQ,CAAC,CAAA,EAAG,aAAA;AACvC,MAAA,IAAI,iBAAiB,QAAA,EAAU;AAC7B,QAAA,MAAM;AAAA,UACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,UAChB,OAAA,EAAS,mDAAA;AAAA,UACT;AAAA,SACF;AACA,QAAA;AAAA,MACF;AAAA,IACF;AAGA,IAAA,IAAI,CAAC,UAAA,EAAY;AACf,MAAA,MAAM;AAAA,QACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,QAChB,OAAA,EAAS,6CAAA;AAAA,QACT;AAAA,OACF;AACA,MAAA;AAAA,IACF;AAEA,IAAA,MAAM,aAAA,CAAsB,UAAA,EAAY,MAAA,EAAQ,OAAO,CAAA;AAAA,EACzD,SAAS,GAAA,EAAK;AACZ,IAAA,MAAM;AAAA,MACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,qBAAA,EAAyB,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACvD;AAAA,KACF;AAAA,EACF;AACF;AAEA,SAAS,aAAA,CACP,MAAA,EACA,MAAA,EACA,OAAA,EACqB;AACrB,EAAA,IAAI;AACF,IAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,MAAM,CAAA;AAChC,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,MAAM,CAAA;AACrC,IAAA,OAAO;AAAA,MACL,MAAMA,gBAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,OAAO;AAAA,MACL,MAAMA,gBAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,kCAAA,EAAsC,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACpE;AAAA,KACF;AAAA,EACF;AACF;AAQA,SAAS,WAAA,CACP,IAAA,EACA,KAAA,EACA,UAAA,EACA,YACA,OAAA,EACoB;AACpB,EAAA,QAAQ,KAAA;AAAO,IACb,KAAK,CAAA;AACH,MAAA,IAAI,UAAA,CAAW,QAAA,CAAS,mBAAmB,CAAA,EAAG;AAC5C,QAAA,OAAO,EAAE,KAAA,EAAO,CAAA,2BAAgC,UAAA,EAAY,KAAA,EAAO,OAAO,IAAA,EAAK;AAAA,MACjF;AACA,MAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,KAAA,EAAO,IAAA,EAAK;AAAA,IAE1C,KAAK,CAAA;AACH,MAAA,IAAI,UAAA,EAAY;AACd,QAAA,OAAO;AAAA,UACL,KAAA;AAAA,UACA,UAAA,EAAY,KAAA;AAAA,UACZ,KAAA,EAAO;AAAA,YACL,MAAMA,gBAAA,CAAU,oBAAA;AAAA,YAChB,SAAA,EAAW,EAAA;AAAA,YACX,KAAA,EAAO,aAAa,IAAI,CAAA;AAAA,YACxB;AAAA;AACF,SACF;AAAA,MACF,CAAA,MAAA,IAAW,SAAS,IAAA,EAAM;AACxB,QAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,IAAA,EAAM,OAAO,IAAA,EAAK;AAAA,MAChD,CAAA,MAAA,IAAW,SAAS,GAAA,EAAK;AACvB,QAAA,OAAO,EAAE,KAAA,EAAO,CAAA,6BAAkC,UAAA,EAAY,KAAA,EAAO,OAAO,IAAA,EAAK;AAAA,MACnF,CAAA,MAAO;AACL,QAAA,OAAO;AAAA,UACL,KAAA;AAAA,UACA,UAAA;AAAA,UACA,KAAA,EAAO;AAAA,YACL,MAAMA,gBAAA,CAAU,oBAAA;AAAA,YAChB,SAAA,EAAW,EAAA;AAAA,YACX,KAAA,EAAO,IAAA;AAAA,YACP;AAAA;AACF,SACF;AAAA,MACF;AAAA,IAEF,KAAK,CAAA;AACH,MAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,KAAA,EAAO,IAAA,EAAK;AAAA;AAE9C;AAGA,SAAS,aAAa,IAAA,EAAsB;AAC1C,EAAA,QAAQ,IAAA;AAAM,IACZ,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT,KAAK,IAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT;AAEE,MAAA,OAAO,IAAA;AAAA;AAEb;;;ACjKA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACDA,SAAS,oBAAoB,MAAA,EAAgC;AAC3D,EAAA,MAAM,MAAA,GAAS,EAAE,GAAG,MAAA,EAAO;AAE3B,EAAA,IAAI,MAAA,CAAO,IAAA,KAAS,QAAA,IAAY,MAAA,CAAO,UAAA,EAAY;AACjD,IAAA,MAAA,CAAO,oBAAA,GAAuB,KAAA;AAC9B,IAAA,MAAA,CAAO,QAAA,GAAW,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,UAAqC,CAAA;AAC1E,IAAA,MAAM,QAAQ,MAAA,CAAO,UAAA;AACrB,IAAA,MAAM,cAA0C,EAAC;AACjD,IAAA,KAAA,MAAW,CAAC,GAAA,EAAK,KAAK,KAAK,MAAA,CAAO,OAAA,CAAQ,KAAK,CAAA,EAAG;AAChD,MAAA,WAAA,CAAY,GAAG,CAAA,GAAI,mBAAA,CAAoB,KAAK,CAAA;AAAA,IAC9C;AACA,IAAA,MAAA,CAAO,UAAA,GAAa,WAAA;AAAA,EACtB;AAEA,EAAA,IAAI,MAAA,CAAO,KAAA,IAAS,OAAO,MAAA,CAAO,UAAU,QAAA,EAAU;AACpD,IAAA,MAAA,CAAO,KAAA,GAAQ,mBAAA,CAAoB,MAAA,CAAO,KAAmB,CAAA;AAAA,EAC/D;AAGA,EAAA,KAAA,MAAW,OAAA,IAAW,CAAC,OAAA,EAAS,OAAA,EAAS,OAAO,CAAA,EAAY;AAC1D,IAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,MAAA,CAAO,OAAO,CAAC,CAAA,EAAG;AAClC,MAAA,MAAA,CAAO,OAAO,CAAA,GAAK,MAAA,CAAO,OAAO,CAAA,CAAmB,IAAI,CAAC,CAAA,KAAM,mBAAA,CAAoB,CAAC,CAAC,CAAA;AAAA,IACvF;AAAA,EACF;AAEA,EAAA,OAAO,MAAA;AACT;AAEA,SAAS,gBAAA,CACP,cACA,QAAA,EAC8B;AAC9B,EAAA,MAAM,iBAA+C,CAAC,EAAE,MAAM,QAAA,EAAU,OAAA,EAAS,cAAc,CAAA;AAE/F,EAAA,KAAA,MAAW,OAAO,QAAA,EAAU;AAC1B,IAAA,cAAA,CAAe,IAAA,CAAK,EAAE,IAAA,EAAM,GAAA,CAAI,MAAM,OAAA,EAAS,GAAA,CAAI,SAAS,CAAA;AAAA,EAC9D;AAEA,EAAA,OAAO,cAAA;AACT;AAEA,SAAS,eAAe,OAAA,EAA0B;AAChD,EAAA,MAAM,UAAA,GAAa,mBAAA,CAAoB,OAAA,CAAQ,cAAA,CAAe,cAA4B,CAAA;AAC1F,EAAA,MAAM,cAAA,GAAiB;AAAA,IACrB,IAAA,EAAM,aAAA;AAAA,IACN,WAAA,EAAa;AAAA,MACX,IAAA,EAAM,oBAAA;AAAA,MACN,MAAA,EAAQ,IAAA;AAAA,MACR,MAAA,EAAQ;AAAA;AACV,GACF;AACA,EAAA,MAAM,cAAA,GAAiB,gBAAA,CAAiB,OAAA,CAAQ,YAAA,EAAc,QAAQ,QAAQ,CAAA;AAE9E,EAAA,OAAO,EAAE,gBAAgB,cAAA,EAAe;AAC1C;AAEA,eAAe,aAAA,CACb,MAAA,EACA,KAAA,EACA,cAAA,EACA,cAAA,EAI2B;AAC3B,EAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,IACpD,KAAA;AAAA,IACA,QAAA,EAAU,cAAA;AAAA,IACV,eAAA,EAAiB;AAAA,GAClB,CAAA;AAED,EAAA,MAAM,MAAA,GAAS,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA;AACjC,EAAA,IAAI,CAAC,MAAA,EAAQ,OAAA,EAAS,OAAA,EAAS;AAC7B,IAAA,MAAM,IAAI,MAAM,gCAAgC,CAAA;AAAA,EAClD;AAEA,EAAA,IAAI,MAAA,CAAO,kBAAkB,QAAA,EAAU;AACrC,IAAA,MAAM,IAAI,MAAM,mDAAmD,CAAA;AAAA,EACrE;AAEA,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,OAAO,OAAA,CAAQ,OAAA;AAAA,IACxB,KAAA,EAAO,SAAS,KAAA,GACZ;AAAA,MACE,WAAA,EAAa,SAAS,KAAA,CAAM,aAAA;AAAA,MAC5B,YAAA,EAAc,SAAS,KAAA,CAAM,iBAAA;AAAA,MAC7B,WAAA,EAAa,SAAS,KAAA,CAAM;AAAA,KAC9B,GACA;AAAA,GACN;AACF;AAEA,eAAe,mBAAA,CACb,MAAA,EACA,KAAA,EACA,cAAA,EACA,cAAA,EAIA;AACA,EAAA,OAAO,MAAA,CAAO,IAAA,CAAK,WAAA,CAAY,MAAA,CAAO;AAAA,IACpC,KAAA;AAAA,IACA,QAAA,EAAU,cAAA;AAAA,IACV,eAAA,EAAiB,cAAA;AAAA,IACjB,MAAA,EAAQ;AAAA,GACT,CAAA;AACH;AAmBO,SAAS,qBAAqB,MAAA,EAAwC;AAC3E,EAAA,MAAM,MAAA,GAAS,IAAIC,uBAAA,CAAO;AAAA,IACxB,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,SAAS,MAAA,CAAO,OAAA;AAAA,IAChB,cAAc,MAAA,CAAO;AAAA,GACtB,CAAA;AACD,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,QAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,EAAE,cAAA,EAAgB,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAEjE,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,KAAA,KAClC,aAAA,CAAc,MAAA,EAAQ,KAAA,EAAO,gBAAgB,cAAc;AAAA,OAC7D;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,EAAE,cAAA,EAAgB,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAEjE,MAAA,MAAM,YAAY,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,KAAA,KACnD,mBAAA,CAAoB,MAAA,EAAQ,KAAA,EAAO,gBAAgB,cAAc;AAAA,OACnE;AAEA,MAAA,OAAO,mBAAA,CAA4B,SAAA,EAAW,QAAA,EAAU,OAAA,CAAQ,cAAc,CAAA;AAAA,IAChF;AAAA,GACF;AACF","file":"index.cjs","sourcesContent":["import { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\nimport type { Stream } from 'openai/streaming'\nimport type { ChatCompletionChunk } from 'openai/resources/chat/completions'\n\n/** State-machine states for extracting chatbotMessage from structured JSON stream */\nconst enum ParserState {\n SEARCHING = 0,\n IN_CHATBOT_MESSAGE = 1,\n PAST_CHATBOT_MESSAGE = 2,\n}\n\nconst CHATBOT_MESSAGE_KEY = '\"chatbotMessage\":\"'\n\n/**\n * Processes an OpenAI streaming response into AG-UI events.\n *\n * OpenAI structured output returns the entire response as JSON. The chatbotMessage\n * field is embedded within that JSON. This processor uses a character-level state\n * machine to extract chatbotMessage text progressively during streaming, yielding\n * TEXT_MESSAGE_CONTENT deltas in real-time.\n *\n * @param rawStream - OpenAI chat completion stream\n * @param agentId - Agent identifier for event tagging\n * @param schema - Zod schema for final response validation\n * @returns Async generator of AG-UI stream events\n */\nexport async function* processOpenAIStream<TState extends BaseState = BaseState>(\n rawStream: Stream<ChatCompletionChunk>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let fullBuffer = ''\n let state: ParserState = ParserState.SEARCHING\n let escapeNext = false\n\n try {\n for await (const chunk of rawStream) {\n const delta = chunk.choices[0]?.delta?.content\n if (!delta) continue\n\n for (const char of delta) {\n fullBuffer += char\n const result: CharResult<TState> = processChar<TState>(char, state, escapeNext, fullBuffer, agentId)\n state = result.state\n escapeNext = result.escapeNext\n if (result.event) yield result.event\n }\n\n // Check for truncation\n const finishReason = chunk.choices[0]?.finish_reason\n if (finishReason === 'length') {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI response truncated (finish_reason: length)',\n agentId,\n } as StreamEvent<TState>\n return\n }\n }\n\n // Stream complete — parse and validate the full response\n if (!fullBuffer) {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI stream completed with empty response',\n agentId,\n } as StreamEvent<TState>\n return\n }\n\n yield parseResponse<TState>(fullBuffer, schema, agentId)\n } catch (err) {\n yield {\n type: EventType.RUN_ERROR,\n message: `OpenAI stream error: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\nfunction parseResponse<TState extends BaseState>(\n buffer: string,\n schema: ZodType,\n agentId: AgentId,\n): StreamEvent<TState> {\n try {\n const parsed = JSON.parse(buffer) as Record<string, unknown>\n const validated = schema.parse(parsed)\n return {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n return {\n type: EventType.RUN_ERROR,\n message: `Response parse/validation failed: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\ninterface CharResult<TState extends BaseState> {\n state: ParserState\n escapeNext: boolean\n event: StreamEvent<TState> | null\n}\n\nfunction processChar<TState extends BaseState>(\n char: string,\n state: ParserState,\n escapeNext: boolean,\n fullBuffer: string,\n agentId: AgentId,\n): CharResult<TState> {\n switch (state) {\n case ParserState.SEARCHING:\n if (fullBuffer.endsWith(CHATBOT_MESSAGE_KEY)) {\n return { state: ParserState.IN_CHATBOT_MESSAGE, escapeNext: false, event: null }\n }\n return { state, escapeNext, event: null }\n\n case ParserState.IN_CHATBOT_MESSAGE:\n if (escapeNext) {\n return {\n state,\n escapeNext: false,\n event: {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta: unescapeChar(char),\n agentId,\n } as StreamEvent<TState>,\n }\n } else if (char === '\\\\') {\n return { state, escapeNext: true, event: null }\n } else if (char === '\"') {\n return { state: ParserState.PAST_CHATBOT_MESSAGE, escapeNext: false, event: null }\n } else {\n return {\n state,\n escapeNext,\n event: {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta: char,\n agentId,\n } as StreamEvent<TState>,\n }\n }\n\n case ParserState.PAST_CHATBOT_MESSAGE:\n return { state, escapeNext, event: null }\n }\n}\n\n/** Converts a JSON escape character to its actual value */\nfunction unescapeChar(char: string): string {\n switch (char) {\n case '\"':\n return '\"'\n case '\\\\':\n return '\\\\'\n case 'n':\n return '\\n'\n case 't':\n return '\\t'\n case 'r':\n return '\\r'\n case '/':\n return '/'\n default:\n // For \\uXXXX and unknown escapes, return as-is (the character after the backslash)\n return char\n }\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import OpenAI from 'openai'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { processOpenAIStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\nimport type { ChatCompletionMessageParam } from 'openai/resources/chat/completions'\n\n/**\n * Configuration for creating an OpenAI provider.\n */\nexport interface OpenAIProviderConfig {\n /** OpenAI API key. Defaults to OPENAI_API_KEY env var (OpenAI SDK default). */\n apiKey?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * e.g. ['gpt-4o', 'gpt-4o-mini']\n */\n models: string[]\n /** Optional base URL for Azure OpenAI or compatible endpoints */\n baseURL?: string\n /** Optional OpenAI organization ID */\n organization?: string\n}\n\ntype JsonSchema = Record<string, unknown>\n\n/**\n * Recursively enforces OpenAI structured output requirements on a JSON Schema:\n * - Adds `additionalProperties: false` to all object types\n * - Ensures all properties are listed in `required`\n */\nfunction enforceStrictSchema(schema: JsonSchema): JsonSchema {\n const result = { ...schema }\n\n if (result.type === 'object' && result.properties) {\n result.additionalProperties = false\n result.required = Object.keys(result.properties as Record<string, unknown>)\n const props = result.properties as Record<string, JsonSchema>\n const strictProps: Record<string, JsonSchema> = {}\n for (const [key, value] of Object.entries(props)) {\n strictProps[key] = enforceStrictSchema(value)\n }\n result.properties = strictProps\n }\n\n if (result.items && typeof result.items === 'object') {\n result.items = enforceStrictSchema(result.items as JsonSchema)\n }\n\n // Handle anyOf/oneOf/allOf\n for (const keyword of ['anyOf', 'oneOf', 'allOf'] as const) {\n if (Array.isArray(result[keyword])) {\n result[keyword] = (result[keyword] as JsonSchema[]).map((s) => enforceStrictSchema(s))\n }\n }\n\n return result\n}\n\nfunction toOpenAIMessages(\n systemPrompt: string,\n messages: ProviderMessage[],\n): ChatCompletionMessageParam[] {\n const openAIMessages: ChatCompletionMessageParam[] = [{ role: 'system', content: systemPrompt }]\n\n for (const msg of messages) {\n openAIMessages.push({ role: msg.role, content: msg.content })\n }\n\n return openAIMessages\n}\n\nfunction prepareRequest(request: ProviderRequest) {\n const jsonSchema = enforceStrictSchema(request.responseSchema.toJSONSchema() as JsonSchema)\n const responseFormat = {\n type: 'json_schema' as const,\n json_schema: {\n name: 'structuredResponse',\n strict: true,\n schema: jsonSchema,\n },\n }\n const openAIMessages = toOpenAIMessages(request.systemPrompt, request.messages)\n\n return { responseFormat, openAIMessages }\n}\n\nasync function sendWithModel(\n client: OpenAI,\n model: string,\n openAIMessages: ChatCompletionMessageParam[],\n responseFormat: {\n type: 'json_schema'\n json_schema: { name: string; strict: boolean; schema: JsonSchema }\n },\n): Promise<ProviderResponse> {\n const response = await client.chat.completions.create({\n model,\n messages: openAIMessages,\n response_format: responseFormat,\n })\n\n const choice = response.choices[0]\n if (!choice?.message?.content) {\n throw new Error('OpenAI returned empty response')\n }\n\n if (choice.finish_reason === 'length') {\n throw new Error('OpenAI response truncated (finish_reason: length)')\n }\n\n return {\n content: choice.message.content,\n usage: response.usage\n ? {\n inputTokens: response.usage.prompt_tokens,\n outputTokens: response.usage.completion_tokens,\n totalTokens: response.usage.total_tokens,\n }\n : undefined,\n }\n}\n\nasync function sendStreamWithModel(\n client: OpenAI,\n model: string,\n openAIMessages: ChatCompletionMessageParam[],\n responseFormat: {\n type: 'json_schema'\n json_schema: { name: string; strict: boolean; schema: JsonSchema }\n },\n) {\n return client.chat.completions.create({\n model,\n messages: openAIMessages,\n response_format: responseFormat,\n stream: true,\n })\n}\n\n/**\n * Creates an OpenAI provider instance.\n *\n * Uses OpenAI's structured output (response_format with JSON Schema) for both\n * blocking and streaming paths, with real-time chatbotMessage text streaming\n * via a custom stream processor.\n *\n * @param config - OpenAI provider configuration\n * @returns A Provider implementation using OpenAI\n *\n * @example\n * ```typescript\n * const provider = createOpenAIProvider({\n * models: ['gpt-4o', 'gpt-4o-mini'],\n * })\n * ```\n */\nexport function createOpenAIProvider(config: OpenAIProviderConfig): Provider {\n const client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: config.baseURL,\n organization: config.organization,\n })\n const models = config.models\n\n return {\n name: 'openai',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const { responseFormat, openAIMessages } = prepareRequest(request)\n\n return executeWithFallback(models, (model) =>\n sendWithModel(client, model, openAIMessages, responseFormat),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const { responseFormat, openAIMessages } = prepareRequest(request)\n\n const rawStream = await executeWithFallback(models, (model) =>\n sendStreamWithModel(client, model, openAIMessages, responseFormat),\n )\n\n yield* processOpenAIStream<TState>(rawStream, 'openai', request.responseSchema)\n },\n }\n}\n"]}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { Provider } from '@genui-a3/core';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Configuration for creating an OpenAI provider.
|
|
5
|
+
*/
|
|
6
|
+
interface OpenAIProviderConfig {
|
|
7
|
+
/** OpenAI API key. Defaults to OPENAI_API_KEY env var (OpenAI SDK default). */
|
|
8
|
+
apiKey?: string;
|
|
9
|
+
/**
|
|
10
|
+
* Model identifiers in order of preference (first = primary, rest = fallbacks).
|
|
11
|
+
* e.g. ['gpt-4o', 'gpt-4o-mini']
|
|
12
|
+
*/
|
|
13
|
+
models: string[];
|
|
14
|
+
/** Optional base URL for Azure OpenAI or compatible endpoints */
|
|
15
|
+
baseURL?: string;
|
|
16
|
+
/** Optional OpenAI organization ID */
|
|
17
|
+
organization?: string;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Creates an OpenAI provider instance.
|
|
21
|
+
*
|
|
22
|
+
* Uses OpenAI's structured output (response_format with JSON Schema) for both
|
|
23
|
+
* blocking and streaming paths, with real-time chatbotMessage text streaming
|
|
24
|
+
* via a custom stream processor.
|
|
25
|
+
*
|
|
26
|
+
* @param config - OpenAI provider configuration
|
|
27
|
+
* @returns A Provider implementation using OpenAI
|
|
28
|
+
*
|
|
29
|
+
* @example
|
|
30
|
+
* ```typescript
|
|
31
|
+
* const provider = createOpenAIProvider({
|
|
32
|
+
* models: ['gpt-4o', 'gpt-4o-mini'],
|
|
33
|
+
* })
|
|
34
|
+
* ```
|
|
35
|
+
*/
|
|
36
|
+
declare function createOpenAIProvider(config: OpenAIProviderConfig): Provider;
|
|
37
|
+
|
|
38
|
+
export { type OpenAIProviderConfig, createOpenAIProvider };
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { Provider } from '@genui-a3/core';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Configuration for creating an OpenAI provider.
|
|
5
|
+
*/
|
|
6
|
+
interface OpenAIProviderConfig {
|
|
7
|
+
/** OpenAI API key. Defaults to OPENAI_API_KEY env var (OpenAI SDK default). */
|
|
8
|
+
apiKey?: string;
|
|
9
|
+
/**
|
|
10
|
+
* Model identifiers in order of preference (first = primary, rest = fallbacks).
|
|
11
|
+
* e.g. ['gpt-4o', 'gpt-4o-mini']
|
|
12
|
+
*/
|
|
13
|
+
models: string[];
|
|
14
|
+
/** Optional base URL for Azure OpenAI or compatible endpoints */
|
|
15
|
+
baseURL?: string;
|
|
16
|
+
/** Optional OpenAI organization ID */
|
|
17
|
+
organization?: string;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Creates an OpenAI provider instance.
|
|
21
|
+
*
|
|
22
|
+
* Uses OpenAI's structured output (response_format with JSON Schema) for both
|
|
23
|
+
* blocking and streaming paths, with real-time chatbotMessage text streaming
|
|
24
|
+
* via a custom stream processor.
|
|
25
|
+
*
|
|
26
|
+
* @param config - OpenAI provider configuration
|
|
27
|
+
* @returns A Provider implementation using OpenAI
|
|
28
|
+
*
|
|
29
|
+
* @example
|
|
30
|
+
* ```typescript
|
|
31
|
+
* const provider = createOpenAIProvider({
|
|
32
|
+
* models: ['gpt-4o', 'gpt-4o-mini'],
|
|
33
|
+
* })
|
|
34
|
+
* ```
|
|
35
|
+
*/
|
|
36
|
+
declare function createOpenAIProvider(config: OpenAIProviderConfig): Provider;
|
|
37
|
+
|
|
38
|
+
export { type OpenAIProviderConfig, createOpenAIProvider };
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
import { EventType } from '@ag-ui/client';
|
|
3
|
+
|
|
4
|
+
// openai/index.ts
|
|
5
|
+
var CHATBOT_MESSAGE_KEY = '"chatbotMessage":"';
|
|
6
|
+
async function* processOpenAIStream(rawStream, agentId, schema) {
|
|
7
|
+
let fullBuffer = "";
|
|
8
|
+
let state = 0 /* SEARCHING */;
|
|
9
|
+
let escapeNext = false;
|
|
10
|
+
try {
|
|
11
|
+
for await (const chunk of rawStream) {
|
|
12
|
+
const delta = chunk.choices[0]?.delta?.content;
|
|
13
|
+
if (!delta) continue;
|
|
14
|
+
for (const char of delta) {
|
|
15
|
+
fullBuffer += char;
|
|
16
|
+
const result = processChar(char, state, escapeNext, fullBuffer, agentId);
|
|
17
|
+
state = result.state;
|
|
18
|
+
escapeNext = result.escapeNext;
|
|
19
|
+
if (result.event) yield result.event;
|
|
20
|
+
}
|
|
21
|
+
const finishReason = chunk.choices[0]?.finish_reason;
|
|
22
|
+
if (finishReason === "length") {
|
|
23
|
+
yield {
|
|
24
|
+
type: EventType.RUN_ERROR,
|
|
25
|
+
message: "OpenAI response truncated (finish_reason: length)",
|
|
26
|
+
agentId
|
|
27
|
+
};
|
|
28
|
+
return;
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
if (!fullBuffer) {
|
|
32
|
+
yield {
|
|
33
|
+
type: EventType.RUN_ERROR,
|
|
34
|
+
message: "OpenAI stream completed with empty response",
|
|
35
|
+
agentId
|
|
36
|
+
};
|
|
37
|
+
return;
|
|
38
|
+
}
|
|
39
|
+
yield parseResponse(fullBuffer, schema, agentId);
|
|
40
|
+
} catch (err) {
|
|
41
|
+
yield {
|
|
42
|
+
type: EventType.RUN_ERROR,
|
|
43
|
+
message: `OpenAI stream error: ${err.message}`,
|
|
44
|
+
agentId
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
function parseResponse(buffer, schema, agentId) {
|
|
49
|
+
try {
|
|
50
|
+
const parsed = JSON.parse(buffer);
|
|
51
|
+
const validated = schema.parse(parsed);
|
|
52
|
+
return {
|
|
53
|
+
type: EventType.TOOL_CALL_RESULT,
|
|
54
|
+
toolCallId: "",
|
|
55
|
+
messageId: "",
|
|
56
|
+
content: JSON.stringify(validated),
|
|
57
|
+
agentId
|
|
58
|
+
};
|
|
59
|
+
} catch (err) {
|
|
60
|
+
return {
|
|
61
|
+
type: EventType.RUN_ERROR,
|
|
62
|
+
message: `Response parse/validation failed: ${err.message}`,
|
|
63
|
+
agentId
|
|
64
|
+
};
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
function processChar(char, state, escapeNext, fullBuffer, agentId) {
|
|
68
|
+
switch (state) {
|
|
69
|
+
case 0 /* SEARCHING */:
|
|
70
|
+
if (fullBuffer.endsWith(CHATBOT_MESSAGE_KEY)) {
|
|
71
|
+
return { state: 1 /* IN_CHATBOT_MESSAGE */, escapeNext: false, event: null };
|
|
72
|
+
}
|
|
73
|
+
return { state, escapeNext, event: null };
|
|
74
|
+
case 1 /* IN_CHATBOT_MESSAGE */:
|
|
75
|
+
if (escapeNext) {
|
|
76
|
+
return {
|
|
77
|
+
state,
|
|
78
|
+
escapeNext: false,
|
|
79
|
+
event: {
|
|
80
|
+
type: EventType.TEXT_MESSAGE_CONTENT,
|
|
81
|
+
messageId: "",
|
|
82
|
+
delta: unescapeChar(char),
|
|
83
|
+
agentId
|
|
84
|
+
}
|
|
85
|
+
};
|
|
86
|
+
} else if (char === "\\") {
|
|
87
|
+
return { state, escapeNext: true, event: null };
|
|
88
|
+
} else if (char === '"') {
|
|
89
|
+
return { state: 2 /* PAST_CHATBOT_MESSAGE */, escapeNext: false, event: null };
|
|
90
|
+
} else {
|
|
91
|
+
return {
|
|
92
|
+
state,
|
|
93
|
+
escapeNext,
|
|
94
|
+
event: {
|
|
95
|
+
type: EventType.TEXT_MESSAGE_CONTENT,
|
|
96
|
+
messageId: "",
|
|
97
|
+
delta: char,
|
|
98
|
+
agentId
|
|
99
|
+
}
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
case 2 /* PAST_CHATBOT_MESSAGE */:
|
|
103
|
+
return { state, escapeNext, event: null };
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
function unescapeChar(char) {
|
|
107
|
+
switch (char) {
|
|
108
|
+
case '"':
|
|
109
|
+
return '"';
|
|
110
|
+
case "\\":
|
|
111
|
+
return "\\";
|
|
112
|
+
case "n":
|
|
113
|
+
return "\n";
|
|
114
|
+
case "t":
|
|
115
|
+
return " ";
|
|
116
|
+
case "r":
|
|
117
|
+
return "\r";
|
|
118
|
+
case "/":
|
|
119
|
+
return "/";
|
|
120
|
+
default:
|
|
121
|
+
return char;
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// utils/executeWithFallback.ts
|
|
126
|
+
async function executeWithFallback(models, action) {
|
|
127
|
+
for (let i = 0; i < models.length; i++) {
|
|
128
|
+
const model = models[i];
|
|
129
|
+
try {
|
|
130
|
+
return await action(model);
|
|
131
|
+
} catch (error) {
|
|
132
|
+
const errorObj = error;
|
|
133
|
+
if (i === models.length - 1) {
|
|
134
|
+
throw errorObj;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
throw new Error("All models failed");
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// openai/index.ts
|
|
142
|
+
function enforceStrictSchema(schema) {
|
|
143
|
+
const result = { ...schema };
|
|
144
|
+
if (result.type === "object" && result.properties) {
|
|
145
|
+
result.additionalProperties = false;
|
|
146
|
+
result.required = Object.keys(result.properties);
|
|
147
|
+
const props = result.properties;
|
|
148
|
+
const strictProps = {};
|
|
149
|
+
for (const [key, value] of Object.entries(props)) {
|
|
150
|
+
strictProps[key] = enforceStrictSchema(value);
|
|
151
|
+
}
|
|
152
|
+
result.properties = strictProps;
|
|
153
|
+
}
|
|
154
|
+
if (result.items && typeof result.items === "object") {
|
|
155
|
+
result.items = enforceStrictSchema(result.items);
|
|
156
|
+
}
|
|
157
|
+
for (const keyword of ["anyOf", "oneOf", "allOf"]) {
|
|
158
|
+
if (Array.isArray(result[keyword])) {
|
|
159
|
+
result[keyword] = result[keyword].map((s) => enforceStrictSchema(s));
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
return result;
|
|
163
|
+
}
|
|
164
|
+
function toOpenAIMessages(systemPrompt, messages) {
|
|
165
|
+
const openAIMessages = [{ role: "system", content: systemPrompt }];
|
|
166
|
+
for (const msg of messages) {
|
|
167
|
+
openAIMessages.push({ role: msg.role, content: msg.content });
|
|
168
|
+
}
|
|
169
|
+
return openAIMessages;
|
|
170
|
+
}
|
|
171
|
+
function prepareRequest(request) {
|
|
172
|
+
const jsonSchema = enforceStrictSchema(request.responseSchema.toJSONSchema());
|
|
173
|
+
const responseFormat = {
|
|
174
|
+
type: "json_schema",
|
|
175
|
+
json_schema: {
|
|
176
|
+
name: "structuredResponse",
|
|
177
|
+
strict: true,
|
|
178
|
+
schema: jsonSchema
|
|
179
|
+
}
|
|
180
|
+
};
|
|
181
|
+
const openAIMessages = toOpenAIMessages(request.systemPrompt, request.messages);
|
|
182
|
+
return { responseFormat, openAIMessages };
|
|
183
|
+
}
|
|
184
|
+
async function sendWithModel(client, model, openAIMessages, responseFormat) {
|
|
185
|
+
const response = await client.chat.completions.create({
|
|
186
|
+
model,
|
|
187
|
+
messages: openAIMessages,
|
|
188
|
+
response_format: responseFormat
|
|
189
|
+
});
|
|
190
|
+
const choice = response.choices[0];
|
|
191
|
+
if (!choice?.message?.content) {
|
|
192
|
+
throw new Error("OpenAI returned empty response");
|
|
193
|
+
}
|
|
194
|
+
if (choice.finish_reason === "length") {
|
|
195
|
+
throw new Error("OpenAI response truncated (finish_reason: length)");
|
|
196
|
+
}
|
|
197
|
+
return {
|
|
198
|
+
content: choice.message.content,
|
|
199
|
+
usage: response.usage ? {
|
|
200
|
+
inputTokens: response.usage.prompt_tokens,
|
|
201
|
+
outputTokens: response.usage.completion_tokens,
|
|
202
|
+
totalTokens: response.usage.total_tokens
|
|
203
|
+
} : void 0
|
|
204
|
+
};
|
|
205
|
+
}
|
|
206
|
+
async function sendStreamWithModel(client, model, openAIMessages, responseFormat) {
|
|
207
|
+
return client.chat.completions.create({
|
|
208
|
+
model,
|
|
209
|
+
messages: openAIMessages,
|
|
210
|
+
response_format: responseFormat,
|
|
211
|
+
stream: true
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
function createOpenAIProvider(config) {
|
|
215
|
+
const client = new OpenAI({
|
|
216
|
+
apiKey: config.apiKey,
|
|
217
|
+
baseURL: config.baseURL,
|
|
218
|
+
organization: config.organization
|
|
219
|
+
});
|
|
220
|
+
const models = config.models;
|
|
221
|
+
return {
|
|
222
|
+
name: "openai",
|
|
223
|
+
async sendRequest(request) {
|
|
224
|
+
const { responseFormat, openAIMessages } = prepareRequest(request);
|
|
225
|
+
return executeWithFallback(
|
|
226
|
+
models,
|
|
227
|
+
(model) => sendWithModel(client, model, openAIMessages, responseFormat)
|
|
228
|
+
);
|
|
229
|
+
},
|
|
230
|
+
async *sendRequestStream(request) {
|
|
231
|
+
const { responseFormat, openAIMessages } = prepareRequest(request);
|
|
232
|
+
const rawStream = await executeWithFallback(
|
|
233
|
+
models,
|
|
234
|
+
(model) => sendStreamWithModel(client, model, openAIMessages, responseFormat)
|
|
235
|
+
);
|
|
236
|
+
yield* processOpenAIStream(rawStream, "openai", request.responseSchema);
|
|
237
|
+
}
|
|
238
|
+
};
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
export { createOpenAIProvider };
|
|
242
|
+
//# sourceMappingURL=index.js.map
|
|
243
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../openai/streamProcessor.ts","../../utils/executeWithFallback.ts","../../openai/index.ts"],"names":[],"mappings":";;;;AAaA,IAAM,mBAAA,GAAsB,oBAAA;AAe5B,gBAAuB,mBAAA,CACrB,SAAA,EACA,OAAA,EACA,MAAA,EACqC;AACrC,EAAA,IAAI,UAAA,GAAa,EAAA;AACjB,EAAA,IAAI,KAAA,GAAqB,CAAA;AACzB,EAAA,IAAI,UAAA,GAAa,KAAA;AAEjB,EAAA,IAAI;AACF,IAAA,WAAA,MAAiB,SAAS,SAAA,EAAW;AACnC,MAAA,MAAM,KAAA,GAAQ,KAAA,CAAM,OAAA,CAAQ,CAAC,GAAG,KAAA,EAAO,OAAA;AACvC,MAAA,IAAI,CAAC,KAAA,EAAO;AAEZ,MAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACxB,QAAA,UAAA,IAAc,IAAA;AACd,QAAA,MAAM,SAA6B,WAAA,CAAoB,IAAA,EAAM,KAAA,EAAO,UAAA,EAAY,YAAY,OAAO,CAAA;AACnG,QAAA,KAAA,GAAQ,MAAA,CAAO,KAAA;AACf,QAAA,UAAA,GAAa,MAAA,CAAO,UAAA;AACpB,QAAA,IAAI,MAAA,CAAO,KAAA,EAAO,MAAM,MAAA,CAAO,KAAA;AAAA,MACjC;AAGA,MAAA,MAAM,YAAA,GAAe,KAAA,CAAM,OAAA,CAAQ,CAAC,CAAA,EAAG,aAAA;AACvC,MAAA,IAAI,iBAAiB,QAAA,EAAU;AAC7B,QAAA,MAAM;AAAA,UACJ,MAAM,SAAA,CAAU,SAAA;AAAA,UAChB,OAAA,EAAS,mDAAA;AAAA,UACT;AAAA,SACF;AACA,QAAA;AAAA,MACF;AAAA,IACF;AAGA,IAAA,IAAI,CAAC,UAAA,EAAY;AACf,MAAA,MAAM;AAAA,QACJ,MAAM,SAAA,CAAU,SAAA;AAAA,QAChB,OAAA,EAAS,6CAAA;AAAA,QACT;AAAA,OACF;AACA,MAAA;AAAA,IACF;AAEA,IAAA,MAAM,aAAA,CAAsB,UAAA,EAAY,MAAA,EAAQ,OAAO,CAAA;AAAA,EACzD,SAAS,GAAA,EAAK;AACZ,IAAA,MAAM;AAAA,MACJ,MAAM,SAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,qBAAA,EAAyB,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACvD;AAAA,KACF;AAAA,EACF;AACF;AAEA,SAAS,aAAA,CACP,MAAA,EACA,MAAA,EACA,OAAA,EACqB;AACrB,EAAA,IAAI;AACF,IAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,MAAM,CAAA;AAChC,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,MAAM,CAAA;AACrC,IAAA,OAAO;AAAA,MACL,MAAM,SAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,OAAO;AAAA,MACL,MAAM,SAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,kCAAA,EAAsC,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACpE;AAAA,KACF;AAAA,EACF;AACF;AAQA,SAAS,WAAA,CACP,IAAA,EACA,KAAA,EACA,UAAA,EACA,YACA,OAAA,EACoB;AACpB,EAAA,QAAQ,KAAA;AAAO,IACb,KAAK,CAAA;AACH,MAAA,IAAI,UAAA,CAAW,QAAA,CAAS,mBAAmB,CAAA,EAAG;AAC5C,QAAA,OAAO,EAAE,KAAA,EAAO,CAAA,2BAAgC,UAAA,EAAY,KAAA,EAAO,OAAO,IAAA,EAAK;AAAA,MACjF;AACA,MAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,KAAA,EAAO,IAAA,EAAK;AAAA,IAE1C,KAAK,CAAA;AACH,MAAA,IAAI,UAAA,EAAY;AACd,QAAA,OAAO;AAAA,UACL,KAAA;AAAA,UACA,UAAA,EAAY,KAAA;AAAA,UACZ,KAAA,EAAO;AAAA,YACL,MAAM,SAAA,CAAU,oBAAA;AAAA,YAChB,SAAA,EAAW,EAAA;AAAA,YACX,KAAA,EAAO,aAAa,IAAI,CAAA;AAAA,YACxB;AAAA;AACF,SACF;AAAA,MACF,CAAA,MAAA,IAAW,SAAS,IAAA,EAAM;AACxB,QAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,IAAA,EAAM,OAAO,IAAA,EAAK;AAAA,MAChD,CAAA,MAAA,IAAW,SAAS,GAAA,EAAK;AACvB,QAAA,OAAO,EAAE,KAAA,EAAO,CAAA,6BAAkC,UAAA,EAAY,KAAA,EAAO,OAAO,IAAA,EAAK;AAAA,MACnF,CAAA,MAAO;AACL,QAAA,OAAO;AAAA,UACL,KAAA;AAAA,UACA,UAAA;AAAA,UACA,KAAA,EAAO;AAAA,YACL,MAAM,SAAA,CAAU,oBAAA;AAAA,YAChB,SAAA,EAAW,EAAA;AAAA,YACX,KAAA,EAAO,IAAA;AAAA,YACP;AAAA;AACF,SACF;AAAA,MACF;AAAA,IAEF,KAAK,CAAA;AACH,MAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,KAAA,EAAO,IAAA,EAAK;AAAA;AAE9C;AAGA,SAAS,aAAa,IAAA,EAAsB;AAC1C,EAAA,QAAQ,IAAA;AAAM,IACZ,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT,KAAK,IAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT;AAEE,MAAA,OAAO,IAAA;AAAA;AAEb;;;ACjKA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACDA,SAAS,oBAAoB,MAAA,EAAgC;AAC3D,EAAA,MAAM,MAAA,GAAS,EAAE,GAAG,MAAA,EAAO;AAE3B,EAAA,IAAI,MAAA,CAAO,IAAA,KAAS,QAAA,IAAY,MAAA,CAAO,UAAA,EAAY;AACjD,IAAA,MAAA,CAAO,oBAAA,GAAuB,KAAA;AAC9B,IAAA,MAAA,CAAO,QAAA,GAAW,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,UAAqC,CAAA;AAC1E,IAAA,MAAM,QAAQ,MAAA,CAAO,UAAA;AACrB,IAAA,MAAM,cAA0C,EAAC;AACjD,IAAA,KAAA,MAAW,CAAC,GAAA,EAAK,KAAK,KAAK,MAAA,CAAO,OAAA,CAAQ,KAAK,CAAA,EAAG;AAChD,MAAA,WAAA,CAAY,GAAG,CAAA,GAAI,mBAAA,CAAoB,KAAK,CAAA;AAAA,IAC9C;AACA,IAAA,MAAA,CAAO,UAAA,GAAa,WAAA;AAAA,EACtB;AAEA,EAAA,IAAI,MAAA,CAAO,KAAA,IAAS,OAAO,MAAA,CAAO,UAAU,QAAA,EAAU;AACpD,IAAA,MAAA,CAAO,KAAA,GAAQ,mBAAA,CAAoB,MAAA,CAAO,KAAmB,CAAA;AAAA,EAC/D;AAGA,EAAA,KAAA,MAAW,OAAA,IAAW,CAAC,OAAA,EAAS,OAAA,EAAS,OAAO,CAAA,EAAY;AAC1D,IAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,MAAA,CAAO,OAAO,CAAC,CAAA,EAAG;AAClC,MAAA,MAAA,CAAO,OAAO,CAAA,GAAK,MAAA,CAAO,OAAO,CAAA,CAAmB,IAAI,CAAC,CAAA,KAAM,mBAAA,CAAoB,CAAC,CAAC,CAAA;AAAA,IACvF;AAAA,EACF;AAEA,EAAA,OAAO,MAAA;AACT;AAEA,SAAS,gBAAA,CACP,cACA,QAAA,EAC8B;AAC9B,EAAA,MAAM,iBAA+C,CAAC,EAAE,MAAM,QAAA,EAAU,OAAA,EAAS,cAAc,CAAA;AAE/F,EAAA,KAAA,MAAW,OAAO,QAAA,EAAU;AAC1B,IAAA,cAAA,CAAe,IAAA,CAAK,EAAE,IAAA,EAAM,GAAA,CAAI,MAAM,OAAA,EAAS,GAAA,CAAI,SAAS,CAAA;AAAA,EAC9D;AAEA,EAAA,OAAO,cAAA;AACT;AAEA,SAAS,eAAe,OAAA,EAA0B;AAChD,EAAA,MAAM,UAAA,GAAa,mBAAA,CAAoB,OAAA,CAAQ,cAAA,CAAe,cAA4B,CAAA;AAC1F,EAAA,MAAM,cAAA,GAAiB;AAAA,IACrB,IAAA,EAAM,aAAA;AAAA,IACN,WAAA,EAAa;AAAA,MACX,IAAA,EAAM,oBAAA;AAAA,MACN,MAAA,EAAQ,IAAA;AAAA,MACR,MAAA,EAAQ;AAAA;AACV,GACF;AACA,EAAA,MAAM,cAAA,GAAiB,gBAAA,CAAiB,OAAA,CAAQ,YAAA,EAAc,QAAQ,QAAQ,CAAA;AAE9E,EAAA,OAAO,EAAE,gBAAgB,cAAA,EAAe;AAC1C;AAEA,eAAe,aAAA,CACb,MAAA,EACA,KAAA,EACA,cAAA,EACA,cAAA,EAI2B;AAC3B,EAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,IACpD,KAAA;AAAA,IACA,QAAA,EAAU,cAAA;AAAA,IACV,eAAA,EAAiB;AAAA,GAClB,CAAA;AAED,EAAA,MAAM,MAAA,GAAS,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA;AACjC,EAAA,IAAI,CAAC,MAAA,EAAQ,OAAA,EAAS,OAAA,EAAS;AAC7B,IAAA,MAAM,IAAI,MAAM,gCAAgC,CAAA;AAAA,EAClD;AAEA,EAAA,IAAI,MAAA,CAAO,kBAAkB,QAAA,EAAU;AACrC,IAAA,MAAM,IAAI,MAAM,mDAAmD,CAAA;AAAA,EACrE;AAEA,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,OAAO,OAAA,CAAQ,OAAA;AAAA,IACxB,KAAA,EAAO,SAAS,KAAA,GACZ;AAAA,MACE,WAAA,EAAa,SAAS,KAAA,CAAM,aAAA;AAAA,MAC5B,YAAA,EAAc,SAAS,KAAA,CAAM,iBAAA;AAAA,MAC7B,WAAA,EAAa,SAAS,KAAA,CAAM;AAAA,KAC9B,GACA;AAAA,GACN;AACF;AAEA,eAAe,mBAAA,CACb,MAAA,EACA,KAAA,EACA,cAAA,EACA,cAAA,EAIA;AACA,EAAA,OAAO,MAAA,CAAO,IAAA,CAAK,WAAA,CAAY,MAAA,CAAO;AAAA,IACpC,KAAA;AAAA,IACA,QAAA,EAAU,cAAA;AAAA,IACV,eAAA,EAAiB,cAAA;AAAA,IACjB,MAAA,EAAQ;AAAA,GACT,CAAA;AACH;AAmBO,SAAS,qBAAqB,MAAA,EAAwC;AAC3E,EAAA,MAAM,MAAA,GAAS,IAAI,MAAA,CAAO;AAAA,IACxB,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,SAAS,MAAA,CAAO,OAAA;AAAA,IAChB,cAAc,MAAA,CAAO;AAAA,GACtB,CAAA;AACD,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,QAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,EAAE,cAAA,EAAgB,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAEjE,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,KAAA,KAClC,aAAA,CAAc,MAAA,EAAQ,KAAA,EAAO,gBAAgB,cAAc;AAAA,OAC7D;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,EAAE,cAAA,EAAgB,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAEjE,MAAA,MAAM,YAAY,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,KAAA,KACnD,mBAAA,CAAoB,MAAA,EAAQ,KAAA,EAAO,gBAAgB,cAAc;AAAA,OACnE;AAEA,MAAA,OAAO,mBAAA,CAA4B,SAAA,EAAW,QAAA,EAAU,OAAA,CAAQ,cAAc,CAAA;AAAA,IAChF;AAAA,GACF;AACF","file":"index.js","sourcesContent":["import { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\nimport type { Stream } from 'openai/streaming'\nimport type { ChatCompletionChunk } from 'openai/resources/chat/completions'\n\n/** State-machine states for extracting chatbotMessage from structured JSON stream */\nconst enum ParserState {\n SEARCHING = 0,\n IN_CHATBOT_MESSAGE = 1,\n PAST_CHATBOT_MESSAGE = 2,\n}\n\nconst CHATBOT_MESSAGE_KEY = '\"chatbotMessage\":\"'\n\n/**\n * Processes an OpenAI streaming response into AG-UI events.\n *\n * OpenAI structured output returns the entire response as JSON. The chatbotMessage\n * field is embedded within that JSON. This processor uses a character-level state\n * machine to extract chatbotMessage text progressively during streaming, yielding\n * TEXT_MESSAGE_CONTENT deltas in real-time.\n *\n * @param rawStream - OpenAI chat completion stream\n * @param agentId - Agent identifier for event tagging\n * @param schema - Zod schema for final response validation\n * @returns Async generator of AG-UI stream events\n */\nexport async function* processOpenAIStream<TState extends BaseState = BaseState>(\n rawStream: Stream<ChatCompletionChunk>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let fullBuffer = ''\n let state: ParserState = ParserState.SEARCHING\n let escapeNext = false\n\n try {\n for await (const chunk of rawStream) {\n const delta = chunk.choices[0]?.delta?.content\n if (!delta) continue\n\n for (const char of delta) {\n fullBuffer += char\n const result: CharResult<TState> = processChar<TState>(char, state, escapeNext, fullBuffer, agentId)\n state = result.state\n escapeNext = result.escapeNext\n if (result.event) yield result.event\n }\n\n // Check for truncation\n const finishReason = chunk.choices[0]?.finish_reason\n if (finishReason === 'length') {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI response truncated (finish_reason: length)',\n agentId,\n } as StreamEvent<TState>\n return\n }\n }\n\n // Stream complete — parse and validate the full response\n if (!fullBuffer) {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI stream completed with empty response',\n agentId,\n } as StreamEvent<TState>\n return\n }\n\n yield parseResponse<TState>(fullBuffer, schema, agentId)\n } catch (err) {\n yield {\n type: EventType.RUN_ERROR,\n message: `OpenAI stream error: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\nfunction parseResponse<TState extends BaseState>(\n buffer: string,\n schema: ZodType,\n agentId: AgentId,\n): StreamEvent<TState> {\n try {\n const parsed = JSON.parse(buffer) as Record<string, unknown>\n const validated = schema.parse(parsed)\n return {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n return {\n type: EventType.RUN_ERROR,\n message: `Response parse/validation failed: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\ninterface CharResult<TState extends BaseState> {\n state: ParserState\n escapeNext: boolean\n event: StreamEvent<TState> | null\n}\n\nfunction processChar<TState extends BaseState>(\n char: string,\n state: ParserState,\n escapeNext: boolean,\n fullBuffer: string,\n agentId: AgentId,\n): CharResult<TState> {\n switch (state) {\n case ParserState.SEARCHING:\n if (fullBuffer.endsWith(CHATBOT_MESSAGE_KEY)) {\n return { state: ParserState.IN_CHATBOT_MESSAGE, escapeNext: false, event: null }\n }\n return { state, escapeNext, event: null }\n\n case ParserState.IN_CHATBOT_MESSAGE:\n if (escapeNext) {\n return {\n state,\n escapeNext: false,\n event: {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta: unescapeChar(char),\n agentId,\n } as StreamEvent<TState>,\n }\n } else if (char === '\\\\') {\n return { state, escapeNext: true, event: null }\n } else if (char === '\"') {\n return { state: ParserState.PAST_CHATBOT_MESSAGE, escapeNext: false, event: null }\n } else {\n return {\n state,\n escapeNext,\n event: {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta: char,\n agentId,\n } as StreamEvent<TState>,\n }\n }\n\n case ParserState.PAST_CHATBOT_MESSAGE:\n return { state, escapeNext, event: null }\n }\n}\n\n/** Converts a JSON escape character to its actual value */\nfunction unescapeChar(char: string): string {\n switch (char) {\n case '\"':\n return '\"'\n case '\\\\':\n return '\\\\'\n case 'n':\n return '\\n'\n case 't':\n return '\\t'\n case 'r':\n return '\\r'\n case '/':\n return '/'\n default:\n // For \\uXXXX and unknown escapes, return as-is (the character after the backslash)\n return char\n }\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import OpenAI from 'openai'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { processOpenAIStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\nimport type { ChatCompletionMessageParam } from 'openai/resources/chat/completions'\n\n/**\n * Configuration for creating an OpenAI provider.\n */\nexport interface OpenAIProviderConfig {\n /** OpenAI API key. Defaults to OPENAI_API_KEY env var (OpenAI SDK default). */\n apiKey?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * e.g. ['gpt-4o', 'gpt-4o-mini']\n */\n models: string[]\n /** Optional base URL for Azure OpenAI or compatible endpoints */\n baseURL?: string\n /** Optional OpenAI organization ID */\n organization?: string\n}\n\ntype JsonSchema = Record<string, unknown>\n\n/**\n * Recursively enforces OpenAI structured output requirements on a JSON Schema:\n * - Adds `additionalProperties: false` to all object types\n * - Ensures all properties are listed in `required`\n */\nfunction enforceStrictSchema(schema: JsonSchema): JsonSchema {\n const result = { ...schema }\n\n if (result.type === 'object' && result.properties) {\n result.additionalProperties = false\n result.required = Object.keys(result.properties as Record<string, unknown>)\n const props = result.properties as Record<string, JsonSchema>\n const strictProps: Record<string, JsonSchema> = {}\n for (const [key, value] of Object.entries(props)) {\n strictProps[key] = enforceStrictSchema(value)\n }\n result.properties = strictProps\n }\n\n if (result.items && typeof result.items === 'object') {\n result.items = enforceStrictSchema(result.items as JsonSchema)\n }\n\n // Handle anyOf/oneOf/allOf\n for (const keyword of ['anyOf', 'oneOf', 'allOf'] as const) {\n if (Array.isArray(result[keyword])) {\n result[keyword] = (result[keyword] as JsonSchema[]).map((s) => enforceStrictSchema(s))\n }\n }\n\n return result\n}\n\nfunction toOpenAIMessages(\n systemPrompt: string,\n messages: ProviderMessage[],\n): ChatCompletionMessageParam[] {\n const openAIMessages: ChatCompletionMessageParam[] = [{ role: 'system', content: systemPrompt }]\n\n for (const msg of messages) {\n openAIMessages.push({ role: msg.role, content: msg.content })\n }\n\n return openAIMessages\n}\n\nfunction prepareRequest(request: ProviderRequest) {\n const jsonSchema = enforceStrictSchema(request.responseSchema.toJSONSchema() as JsonSchema)\n const responseFormat = {\n type: 'json_schema' as const,\n json_schema: {\n name: 'structuredResponse',\n strict: true,\n schema: jsonSchema,\n },\n }\n const openAIMessages = toOpenAIMessages(request.systemPrompt, request.messages)\n\n return { responseFormat, openAIMessages }\n}\n\nasync function sendWithModel(\n client: OpenAI,\n model: string,\n openAIMessages: ChatCompletionMessageParam[],\n responseFormat: {\n type: 'json_schema'\n json_schema: { name: string; strict: boolean; schema: JsonSchema }\n },\n): Promise<ProviderResponse> {\n const response = await client.chat.completions.create({\n model,\n messages: openAIMessages,\n response_format: responseFormat,\n })\n\n const choice = response.choices[0]\n if (!choice?.message?.content) {\n throw new Error('OpenAI returned empty response')\n }\n\n if (choice.finish_reason === 'length') {\n throw new Error('OpenAI response truncated (finish_reason: length)')\n }\n\n return {\n content: choice.message.content,\n usage: response.usage\n ? {\n inputTokens: response.usage.prompt_tokens,\n outputTokens: response.usage.completion_tokens,\n totalTokens: response.usage.total_tokens,\n }\n : undefined,\n }\n}\n\nasync function sendStreamWithModel(\n client: OpenAI,\n model: string,\n openAIMessages: ChatCompletionMessageParam[],\n responseFormat: {\n type: 'json_schema'\n json_schema: { name: string; strict: boolean; schema: JsonSchema }\n },\n) {\n return client.chat.completions.create({\n model,\n messages: openAIMessages,\n response_format: responseFormat,\n stream: true,\n })\n}\n\n/**\n * Creates an OpenAI provider instance.\n *\n * Uses OpenAI's structured output (response_format with JSON Schema) for both\n * blocking and streaming paths, with real-time chatbotMessage text streaming\n * via a custom stream processor.\n *\n * @param config - OpenAI provider configuration\n * @returns A Provider implementation using OpenAI\n *\n * @example\n * ```typescript\n * const provider = createOpenAIProvider({\n * models: ['gpt-4o', 'gpt-4o-mini'],\n * })\n * ```\n */\nexport function createOpenAIProvider(config: OpenAIProviderConfig): Provider {\n const client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: config.baseURL,\n organization: config.organization,\n })\n const models = config.models\n\n return {\n name: 'openai',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const { responseFormat, openAIMessages } = prepareRequest(request)\n\n return executeWithFallback(models, (model) =>\n sendWithModel(client, model, openAIMessages, responseFormat),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const { responseFormat, openAIMessages } = prepareRequest(request)\n\n const rawStream = await executeWithFallback(models, (model) =>\n sendStreamWithModel(client, model, openAIMessages, responseFormat),\n )\n\n yield* processOpenAIStream<TState>(rawStream, 'openai', request.responseSchema)\n },\n }\n}\n"]}
|
package/package.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@genui-a3/providers",
|
|
3
|
+
"version": "0.0.0",
|
|
4
|
+
"description": "Provider implementations for the A3 agentic framework",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"exports": {
|
|
7
|
+
"./bedrock": {
|
|
8
|
+
"types": "./dist/bedrock/index.d.ts",
|
|
9
|
+
"import": "./dist/bedrock/index.js",
|
|
10
|
+
"require": "./dist/bedrock/index.cjs"
|
|
11
|
+
},
|
|
12
|
+
"./openai": {
|
|
13
|
+
"types": "./dist/openai/index.d.ts",
|
|
14
|
+
"import": "./dist/openai/index.js",
|
|
15
|
+
"require": "./dist/openai/index.cjs"
|
|
16
|
+
}
|
|
17
|
+
},
|
|
18
|
+
"files": [
|
|
19
|
+
"dist"
|
|
20
|
+
],
|
|
21
|
+
"scripts": {
|
|
22
|
+
"build": "tsup",
|
|
23
|
+
"dev": "tsup --watch",
|
|
24
|
+
"clean": "rm -rf dist",
|
|
25
|
+
"prepublishOnly": "npm run clean && npm run build"
|
|
26
|
+
},
|
|
27
|
+
"keywords": [
|
|
28
|
+
"a3",
|
|
29
|
+
"providers",
|
|
30
|
+
"bedrock",
|
|
31
|
+
"openai",
|
|
32
|
+
"ai",
|
|
33
|
+
"llm"
|
|
34
|
+
],
|
|
35
|
+
"author": "GenUI <https://genui.com>",
|
|
36
|
+
"license": "ISC",
|
|
37
|
+
"publishConfig": {
|
|
38
|
+
"access": "public"
|
|
39
|
+
},
|
|
40
|
+
"peerDependencies": {
|
|
41
|
+
"@genui-a3/core": ">=0.1.5"
|
|
42
|
+
},
|
|
43
|
+
"dependencies": {
|
|
44
|
+
"@ag-ui/client": "0.0.47",
|
|
45
|
+
"@aws-sdk/client-bedrock-runtime": "3.975.0",
|
|
46
|
+
"openai": "6.27.0",
|
|
47
|
+
"zod": "4.3.6"
|
|
48
|
+
},
|
|
49
|
+
"devDependencies": {
|
|
50
|
+
"@types/node": "20.19.0",
|
|
51
|
+
"tsup": "8.5.1",
|
|
52
|
+
"typescript": "5.9.3"
|
|
53
|
+
}
|
|
54
|
+
}
|