@genui-a3/providers 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,162 @@
1
+ 'use strict';
2
+
3
+ var anthropic = require('@ai-sdk/anthropic');
4
+ var ai = require('ai');
5
+ var client = require('@ag-ui/client');
6
+
7
+ // anthropic/index.ts
8
+ async function* processAnthropicStream(streamResult, reader, first, agentId, schema) {
9
+ let prevMessageLength = 0;
10
+ try {
11
+ if (!first.done) {
12
+ const partial = first.value;
13
+ const delta = extractDelta(partial, prevMessageLength);
14
+ if (delta) {
15
+ prevMessageLength += delta.length;
16
+ yield {
17
+ type: client.EventType.TEXT_MESSAGE_CONTENT,
18
+ messageId: "",
19
+ delta,
20
+ agentId
21
+ };
22
+ }
23
+ }
24
+ let next = await reader.next();
25
+ while (!next.done) {
26
+ const partial = next.value;
27
+ const delta = extractDelta(partial, prevMessageLength);
28
+ if (delta) {
29
+ prevMessageLength += delta.length;
30
+ yield {
31
+ type: client.EventType.TEXT_MESSAGE_CONTENT,
32
+ messageId: "",
33
+ delta,
34
+ agentId
35
+ };
36
+ }
37
+ next = await reader.next();
38
+ }
39
+ const finalObject = await streamResult.output;
40
+ if (finalObject === null) {
41
+ yield {
42
+ type: client.EventType.RUN_ERROR,
43
+ message: "Anthropic stream completed with null output",
44
+ agentId
45
+ };
46
+ return;
47
+ }
48
+ const validated = schema.parse(finalObject);
49
+ yield {
50
+ type: client.EventType.TOOL_CALL_RESULT,
51
+ toolCallId: "",
52
+ messageId: "",
53
+ content: JSON.stringify(validated),
54
+ agentId
55
+ };
56
+ } catch (err) {
57
+ yield {
58
+ type: client.EventType.RUN_ERROR,
59
+ message: `Anthropic stream error: ${err.message}`,
60
+ agentId
61
+ };
62
+ }
63
+ }
64
+ function extractDelta(partial, prevLength) {
65
+ const chatbotMessage = partial.chatbotMessage;
66
+ if (typeof chatbotMessage !== "string" || chatbotMessage.length <= prevLength) {
67
+ return null;
68
+ }
69
+ return chatbotMessage.slice(prevLength);
70
+ }
71
+
72
+ // utils/executeWithFallback.ts
73
+ async function executeWithFallback(models, action) {
74
+ for (let i = 0; i < models.length; i++) {
75
+ const model = models[i];
76
+ try {
77
+ return await action(model);
78
+ } catch (error) {
79
+ const errorObj = error;
80
+ if (i === models.length - 1) {
81
+ throw errorObj;
82
+ }
83
+ }
84
+ }
85
+ throw new Error("All models failed");
86
+ }
87
+
88
+ // anthropic/index.ts
89
+ function toAIMessages(messages) {
90
+ return messages.map((msg) => ({
91
+ role: msg.role,
92
+ content: msg.content
93
+ }));
94
+ }
95
+ function prepareMessages(messages) {
96
+ if (messages.length === 0) return messages;
97
+ const lastMessage = messages[messages.length - 1];
98
+ if (lastMessage.role === "assistant") {
99
+ return [...messages, { role: "user", content: "Continue" }];
100
+ }
101
+ return messages;
102
+ }
103
+ async function sendWithModel(anthropicProvider, model, system, messages, schema) {
104
+ const preparedMessages = prepareMessages(messages);
105
+ const result = await ai.generateText({
106
+ model: anthropicProvider(model),
107
+ system,
108
+ messages: preparedMessages,
109
+ output: ai.Output.object({ schema })
110
+ });
111
+ return {
112
+ content: JSON.stringify(result.output),
113
+ usage: {
114
+ inputTokens: result.usage.inputTokens ?? 0,
115
+ outputTokens: result.usage.outputTokens ?? 0,
116
+ totalTokens: (result.usage.inputTokens ?? 0) + (result.usage.outputTokens ?? 0)
117
+ }
118
+ };
119
+ }
120
+ async function sendStreamWithModel(anthropicProvider, model, system, messages, schema) {
121
+ const preparedMessages = prepareMessages(messages);
122
+ const result = ai.streamText({
123
+ model: anthropicProvider(model),
124
+ system,
125
+ messages: preparedMessages,
126
+ output: ai.Output.object({ schema })
127
+ });
128
+ const partialStream = result.partialOutputStream;
129
+ const reader = partialStream[Symbol.asyncIterator]();
130
+ const first = await reader.next();
131
+ return { result, reader, first };
132
+ }
133
+ function createAnthropicProvider(config) {
134
+ const anthropicProvider = anthropic.createAnthropic({
135
+ apiKey: config.apiKey,
136
+ baseURL: config.baseURL
137
+ });
138
+ const models = config.models;
139
+ return {
140
+ name: "anthropic",
141
+ async sendRequest(request) {
142
+ const messages = toAIMessages(request.messages);
143
+ return executeWithFallback(
144
+ models,
145
+ (model) => sendWithModel(anthropicProvider, model, request.systemPrompt, messages, request.responseSchema)
146
+ );
147
+ },
148
+ async *sendRequestStream(request) {
149
+ const messages = toAIMessages(request.messages);
150
+ const { result, reader, first } = await executeWithFallback(
151
+ models,
152
+ (model) => sendStreamWithModel(anthropicProvider, model, request.systemPrompt, messages, request.responseSchema)
153
+ );
154
+ yield* processAnthropicStream(result, reader, first, "anthropic", request.responseSchema);
155
+ }
156
+ };
157
+ }
158
+
159
+ exports.createAnthropicProvider = createAnthropicProvider;
160
+ exports.prepareMessages = prepareMessages;
161
+ //# sourceMappingURL=index.cjs.map
162
+ //# sourceMappingURL=index.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../anthropic/streamProcessor.ts","../../utils/executeWithFallback.ts","../../anthropic/index.ts"],"names":["EventType","generateText","Output","streamText","createAnthropic"],"mappings":";;;;;;;AAoBA,gBAAuB,sBAAA,CACrB,YAAA,EACA,MAAA,EACA,KAAA,EACA,SACA,MAAA,EACqC;AACrC,EAAA,IAAI,iBAAA,GAAoB,CAAA;AAExB,EAAA,IAAI;AAEF,IAAA,IAAI,CAAC,MAAM,IAAA,EAAM;AACf,MAAA,MAAM,UAAU,KAAA,CAAM,KAAA;AACtB,MAAA,MAAM,KAAA,GAAQ,YAAA,CAAa,OAAA,EAAS,iBAAiB,CAAA;AACrD,MAAA,IAAI,KAAA,EAAO;AACT,QAAA,iBAAA,IAAqB,KAAA,CAAM,MAAA;AAC3B,QAAA,MAAM;AAAA,UACJ,MAAMA,gBAAA,CAAU,oBAAA;AAAA,UAChB,SAAA,EAAW,EAAA;AAAA,UACX,KAAA;AAAA,UACA;AAAA,SACF;AAAA,MACF;AAAA,IACF;AAGA,IAAA,IAAI,IAAA,GAAO,MAAM,MAAA,CAAO,IAAA,EAAK;AAC7B,IAAA,OAAO,CAAC,KAAK,IAAA,EAAM;AACjB,MAAA,MAAM,UAAU,IAAA,CAAK,KAAA;AACrB,MAAA,MAAM,KAAA,GAAQ,YAAA,CAAa,OAAA,EAAS,iBAAiB,CAAA;AACrD,MAAA,IAAI,KAAA,EAAO;AACT,QAAA,iBAAA,IAAqB,KAAA,CAAM,MAAA;AAC3B,QAAA,MAAM;AAAA,UACJ,MAAMA,gBAAA,CAAU,oBAAA;AAAA,UAChB,SAAA,EAAW,EAAA;AAAA,UACX,KAAA;AAAA,UACA;AAAA,SACF;AAAA,MACF;AAEA,MAAA,IAAA,GAAO,MAAM,OAAO,IAAA,EAAK;AAAA,IAC3B;AAGA,IAAA,MAAM,WAAA,GAAc,MAAM,YAAA,CAAa,MAAA;AAEvC,IAAA,IAAI,gBAAgB,IAAA,EAAM;AACxB,MAAA,MAAM;AAAA,QACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,QAChB,OAAA,EAAS,6CAAA;AAAA,QACT;AAAA,OACF;AACA,MAAA;AAAA,IACF;AAEA,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,WAAW,CAAA;AAC1C,IAAA,MAAM;AAAA,MACJ,MAAMA,gBAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,MAAM;AAAA,MACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,wBAAA,EAA4B,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MAC1D;AAAA,KACF;AAAA,EACF;AACF;AAKA,SAAS,YAAA,CAAa,SAAkC,UAAA,EAAmC;AACzF,EAAA,MAAM,iBAAiB,OAAA,CAAQ,cAAA;AAC/B,EAAA,IAAI,OAAO,cAAA,KAAmB,QAAA,IAAY,cAAA,CAAe,UAAU,UAAA,EAAY;AAC7E,IAAA,OAAO,IAAA;AAAA,EACT;AACA,EAAA,OAAO,cAAA,CAAe,MAAM,UAAU,CAAA;AACxC;;;ACnFA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACVA,SAAS,aAAa,QAAA,EAA6C;AACjE,EAAA,OAAO,QAAA,CAAS,GAAA,CAAI,CAAC,GAAA,MAAS;AAAA,IAC5B,MAAM,GAAA,CAAI,IAAA;AAAA,IACV,SAAS,GAAA,CAAI;AAAA,GACf,CAAE,CAAA;AACJ;AAEO,SAAS,gBAAgB,QAAA,EAA0C;AACxE,EAAA,IAAI,QAAA,CAAS,MAAA,KAAW,CAAA,EAAG,OAAO,QAAA;AAClC,EAAA,MAAM,WAAA,GAAc,QAAA,CAAS,QAAA,CAAS,MAAA,GAAS,CAAC,CAAA;AAChD,EAAA,IAAI,WAAA,CAAY,SAAS,WAAA,EAAa;AACpC,IAAA,OAAO,CAAC,GAAG,QAAA,EAAU,EAAE,MAAM,MAAA,EAAQ,OAAA,EAAS,YAAY,CAAA;AAAA,EAC5D;AACA,EAAA,OAAO,QAAA;AACT;AAEA,eAAe,aAAA,CACb,iBAAA,EACA,KAAA,EACA,MAAA,EACA,UACA,MAAA,EAC2B;AAC3B,EAAA,MAAM,gBAAA,GAAmB,gBAAgB,QAAQ,CAAA;AACjD,EAAA,MAAM,MAAA,GAAS,MAAMC,eAAA,CAAa;AAAA,IAChC,KAAA,EAAO,kBAAkB,KAAK,CAAA;AAAA,IAC9B,MAAA;AAAA,IACA,QAAA,EAAU,gBAAA;AAAA,IACV,MAAA,EAAQC,SAAA,CAAO,MAAA,CAAO,EAAE,QAAQ;AAAA,GACjC,CAAA;AAED,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,MAAA,CAAO,MAAM,CAAA;AAAA,IACrC,KAAA,EAAO;AAAA,MACL,WAAA,EAAa,MAAA,CAAO,KAAA,CAAM,WAAA,IAAe,CAAA;AAAA,MACzC,YAAA,EAAc,MAAA,CAAO,KAAA,CAAM,YAAA,IAAgB,CAAA;AAAA,MAC3C,cAAc,MAAA,CAAO,KAAA,CAAM,eAAe,CAAA,KAAM,MAAA,CAAO,MAAM,YAAA,IAAgB,CAAA;AAAA;AAC/E,GACF;AACF;AAEA,eAAe,mBAAA,CACb,iBAAA,EACA,KAAA,EACA,MAAA,EACA,UACA,MAAA,EACA;AACA,EAAA,MAAM,gBAAA,GAAmB,gBAAgB,QAAQ,CAAA;AACjD,EAAA,MAAM,SAASC,aAAA,CAAW;AAAA,IACxB,KAAA,EAAO,kBAAkB,KAAK,CAAA;AAAA,IAC9B,MAAA;AAAA,IACA,QAAA,EAAU,gBAAA;AAAA,IACV,MAAA,EAAQD,SAAA,CAAO,MAAA,CAAO,EAAE,QAAQ;AAAA,GACjC,CAAA;AAGD,EAAA,MAAM,gBAAgB,MAAA,CAAO,mBAAA;AAC7B,EAAA,MAAM,MAAA,GAAS,aAAA,CAAc,MAAA,CAAO,aAAa,CAAA,EAAE;AACnD,EAAA,MAAM,KAAA,GAAQ,MAAM,MAAA,CAAO,IAAA,EAAK;AAEhC,EAAA,OAAO,EAAE,MAAA,EAAQ,MAAA,EAAQ,KAAA,EAAM;AACjC;AAoBO,SAAS,wBAAwB,MAAA,EAA2C;AACjF,EAAA,MAAM,oBAAoBE,yBAAA,CAAgB;AAAA,IACxC,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,SAAS,MAAA,CAAO;AAAA,GACjB,CAAA;AACD,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,WAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,QAAA,GAAW,YAAA,CAAa,OAAA,CAAQ,QAAQ,CAAA;AAE9C,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,UAClC,aAAA,CAAc,iBAAA,EAAmB,OAAO,OAAA,CAAQ,YAAA,EAAc,QAAA,EAAU,OAAA,CAAQ,cAAc;AAAA,OAChG;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,QAAA,GAAW,YAAA,CAAa,OAAA,CAAQ,QAAQ,CAAA;AAE9C,MAAA,MAAM,EAAE,MAAA,EAAQ,MAAA,EAAQ,KAAA,KAAU,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,UACnE,mBAAA,CAAoB,iBAAA,EAAmB,OAAO,OAAA,CAAQ,YAAA,EAAc,QAAA,EAAU,OAAA,CAAQ,cAAc;AAAA,OACtG;AAEA,MAAA,OAAO,uBAA+B,MAAA,EAAQ,MAAA,EAAQ,KAAA,EAAO,WAAA,EAAa,QAAQ,cAAc,CAAA;AAAA,IAClG;AAAA,GACF;AACF","file":"index.cjs","sourcesContent":["import { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\nimport type { StreamTextResult, ToolSet } from 'ai'\n\n/**\n * Processes an Anthropic streaming response (via Vercel AI SDK) into AG-UI events.\n *\n * Uses `partialOutputStream` from `streamText` + `Output.object()` to receive\n * progressively-built partial objects. Tracks `chatbotMessage` growth to yield\n * TEXT_MESSAGE_CONTENT deltas. After the stream completes, validates the final\n * object and yields TOOL_CALL_RESULT.\n *\n * @param streamResult - The streamText result containing partialOutputStream and output promise\n * @param reader - Pre-started async iterator for the partial object stream\n * @param first - The first iteration result (already consumed to trigger the API call)\n * @param agentId - Agent identifier for event tagging\n * @param schema - Zod schema for final response validation\n * @returns Async generator of AG-UI stream events\n */\nexport async function* processAnthropicStream<TState extends BaseState = BaseState>(\n streamResult: StreamTextResult<ToolSet, never>,\n reader: AsyncIterator<unknown>,\n first: IteratorResult<unknown>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let prevMessageLength = 0\n\n try {\n // Process the first partial (already consumed to trigger the API call)\n if (!first.done) {\n const partial = first.value as Record<string, unknown>\n const delta = extractDelta(partial, prevMessageLength)\n if (delta) {\n prevMessageLength += delta.length\n yield {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta,\n agentId,\n } as StreamEvent<TState>\n }\n }\n\n // Process remaining partials\n let next = await reader.next()\n while (!next.done) {\n const partial = next.value as Record<string, unknown>\n const delta = extractDelta(partial, prevMessageLength)\n if (delta) {\n prevMessageLength += delta.length\n yield {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta,\n agentId,\n } as StreamEvent<TState>\n }\n // eslint-disable-next-line no-await-in-loop\n next = await reader.next()\n }\n\n // Stream complete — await and validate the final object\n const finalObject = await streamResult.output\n\n if (finalObject === null) {\n yield {\n type: EventType.RUN_ERROR,\n message: 'Anthropic stream completed with null output',\n agentId,\n } as StreamEvent<TState>\n return\n }\n\n const validated = schema.parse(finalObject)\n yield {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n yield {\n type: EventType.RUN_ERROR,\n message: `Anthropic stream error: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\n/**\n * Extracts the new portion of chatbotMessage from a partial object.\n */\nfunction extractDelta(partial: Record<string, unknown>, prevLength: number): string | null {\n const chatbotMessage = partial.chatbotMessage\n if (typeof chatbotMessage !== 'string' || chatbotMessage.length <= prevLength) {\n return null\n }\n return chatbotMessage.slice(prevLength)\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import { createAnthropic } from '@ai-sdk/anthropic'\nimport { generateText, streamText, Output, ModelMessage } from 'ai'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { processAnthropicStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\n\n/**\n * Configuration for creating an Anthropic provider.\n */\nexport interface AnthropicProviderConfig {\n /** Anthropic API key. Defaults to ANTHROPIC_API_KEY env var. */\n apiKey?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * e.g. ['claude-sonnet-4-5-20250929', 'claude-haiku-4-5-20251001']\n */\n models: string[]\n /** Optional custom base URL for the Anthropic API */\n baseURL?: string\n}\n\nfunction toAIMessages(messages: ProviderMessage[]): ModelMessage[] {\n return messages.map((msg) => ({\n role: msg.role,\n content: msg.content,\n }))\n}\n\nexport function prepareMessages(messages: ModelMessage[]): ModelMessage[] {\n if (messages.length === 0) return messages\n const lastMessage = messages[messages.length - 1]\n if (lastMessage.role === 'assistant') {\n return [...messages, { role: 'user', content: 'Continue' }]\n }\n return messages\n}\n\nasync function sendWithModel(\n anthropicProvider: ReturnType<typeof createAnthropic>,\n model: string,\n system: string,\n messages: ModelMessage[],\n schema: ProviderRequest['responseSchema'],\n): Promise<ProviderResponse> {\n const preparedMessages = prepareMessages(messages)\n const result = await generateText({\n model: anthropicProvider(model),\n system,\n messages: preparedMessages,\n output: Output.object({ schema }),\n })\n\n return {\n content: JSON.stringify(result.output),\n usage: {\n inputTokens: result.usage.inputTokens ?? 0,\n outputTokens: result.usage.outputTokens ?? 0,\n totalTokens: (result.usage.inputTokens ?? 0) + (result.usage.outputTokens ?? 0),\n },\n }\n}\n\nasync function sendStreamWithModel(\n anthropicProvider: ReturnType<typeof createAnthropic>,\n model: string,\n system: string,\n messages: ModelMessage[],\n schema: ProviderRequest['responseSchema'],\n) {\n const preparedMessages = prepareMessages(messages)\n const result = streamText({\n model: anthropicProvider(model),\n system,\n messages: preparedMessages,\n output: Output.object({ schema }),\n })\n\n // Force the API call to start so executeWithFallback can catch connection errors\n const partialStream = result.partialOutputStream\n const reader = partialStream[Symbol.asyncIterator]()\n const first = await reader.next()\n\n return { result, reader, first }\n}\n\n/**\n * Creates an Anthropic provider instance.\n *\n * Uses the Vercel AI SDK (`ai` + `@ai-sdk/anthropic`) for structured output via\n * `generateText` + `Output.object()` (blocking) and `streamText` + `Output.object()`\n * (streaming). The AI SDK handles Zod-to-JSON-schema conversion, partial JSON\n * parsing, and validation internally.\n *\n * @param config - Anthropic provider configuration\n * @returns A Provider implementation using Anthropic\n *\n * @example\n * ```typescript\n * const provider = createAnthropicProvider({\n * models: ['claude-sonnet-4-5-20250929', 'claude-haiku-4-5-20251001'],\n * })\n * ```\n */\nexport function createAnthropicProvider(config: AnthropicProviderConfig): Provider {\n const anthropicProvider = createAnthropic({\n apiKey: config.apiKey,\n baseURL: config.baseURL,\n })\n const models = config.models\n\n return {\n name: 'anthropic',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const messages = toAIMessages(request.messages)\n\n return executeWithFallback(models, (model) =>\n sendWithModel(anthropicProvider, model, request.systemPrompt, messages, request.responseSchema),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const messages = toAIMessages(request.messages)\n\n const { result, reader, first } = await executeWithFallback(models, (model) =>\n sendStreamWithModel(anthropicProvider, model, request.systemPrompt, messages, request.responseSchema),\n )\n\n yield* processAnthropicStream<TState>(result, reader, first, 'anthropic', request.responseSchema)\n },\n }\n}\n"]}
@@ -0,0 +1,39 @@
1
+ import { ModelMessage } from 'ai';
2
+ import { Provider } from '@genui-a3/core';
3
+
4
+ /**
5
+ * Configuration for creating an Anthropic provider.
6
+ */
7
+ interface AnthropicProviderConfig {
8
+ /** Anthropic API key. Defaults to ANTHROPIC_API_KEY env var. */
9
+ apiKey?: string;
10
+ /**
11
+ * Model identifiers in order of preference (first = primary, rest = fallbacks).
12
+ * e.g. ['claude-sonnet-4-5-20250929', 'claude-haiku-4-5-20251001']
13
+ */
14
+ models: string[];
15
+ /** Optional custom base URL for the Anthropic API */
16
+ baseURL?: string;
17
+ }
18
+ declare function prepareMessages(messages: ModelMessage[]): ModelMessage[];
19
+ /**
20
+ * Creates an Anthropic provider instance.
21
+ *
22
+ * Uses the Vercel AI SDK (`ai` + `@ai-sdk/anthropic`) for structured output via
23
+ * `generateText` + `Output.object()` (blocking) and `streamText` + `Output.object()`
24
+ * (streaming). The AI SDK handles Zod-to-JSON-schema conversion, partial JSON
25
+ * parsing, and validation internally.
26
+ *
27
+ * @param config - Anthropic provider configuration
28
+ * @returns A Provider implementation using Anthropic
29
+ *
30
+ * @example
31
+ * ```typescript
32
+ * const provider = createAnthropicProvider({
33
+ * models: ['claude-sonnet-4-5-20250929', 'claude-haiku-4-5-20251001'],
34
+ * })
35
+ * ```
36
+ */
37
+ declare function createAnthropicProvider(config: AnthropicProviderConfig): Provider;
38
+
39
+ export { type AnthropicProviderConfig, createAnthropicProvider, prepareMessages };
@@ -0,0 +1,39 @@
1
+ import { ModelMessage } from 'ai';
2
+ import { Provider } from '@genui-a3/core';
3
+
4
+ /**
5
+ * Configuration for creating an Anthropic provider.
6
+ */
7
+ interface AnthropicProviderConfig {
8
+ /** Anthropic API key. Defaults to ANTHROPIC_API_KEY env var. */
9
+ apiKey?: string;
10
+ /**
11
+ * Model identifiers in order of preference (first = primary, rest = fallbacks).
12
+ * e.g. ['claude-sonnet-4-5-20250929', 'claude-haiku-4-5-20251001']
13
+ */
14
+ models: string[];
15
+ /** Optional custom base URL for the Anthropic API */
16
+ baseURL?: string;
17
+ }
18
+ declare function prepareMessages(messages: ModelMessage[]): ModelMessage[];
19
+ /**
20
+ * Creates an Anthropic provider instance.
21
+ *
22
+ * Uses the Vercel AI SDK (`ai` + `@ai-sdk/anthropic`) for structured output via
23
+ * `generateText` + `Output.object()` (blocking) and `streamText` + `Output.object()`
24
+ * (streaming). The AI SDK handles Zod-to-JSON-schema conversion, partial JSON
25
+ * parsing, and validation internally.
26
+ *
27
+ * @param config - Anthropic provider configuration
28
+ * @returns A Provider implementation using Anthropic
29
+ *
30
+ * @example
31
+ * ```typescript
32
+ * const provider = createAnthropicProvider({
33
+ * models: ['claude-sonnet-4-5-20250929', 'claude-haiku-4-5-20251001'],
34
+ * })
35
+ * ```
36
+ */
37
+ declare function createAnthropicProvider(config: AnthropicProviderConfig): Provider;
38
+
39
+ export { type AnthropicProviderConfig, createAnthropicProvider, prepareMessages };
@@ -0,0 +1,159 @@
1
+ import { createAnthropic } from '@ai-sdk/anthropic';
2
+ import { streamText, Output, generateText } from 'ai';
3
+ import { EventType } from '@ag-ui/client';
4
+
5
+ // anthropic/index.ts
6
+ async function* processAnthropicStream(streamResult, reader, first, agentId, schema) {
7
+ let prevMessageLength = 0;
8
+ try {
9
+ if (!first.done) {
10
+ const partial = first.value;
11
+ const delta = extractDelta(partial, prevMessageLength);
12
+ if (delta) {
13
+ prevMessageLength += delta.length;
14
+ yield {
15
+ type: EventType.TEXT_MESSAGE_CONTENT,
16
+ messageId: "",
17
+ delta,
18
+ agentId
19
+ };
20
+ }
21
+ }
22
+ let next = await reader.next();
23
+ while (!next.done) {
24
+ const partial = next.value;
25
+ const delta = extractDelta(partial, prevMessageLength);
26
+ if (delta) {
27
+ prevMessageLength += delta.length;
28
+ yield {
29
+ type: EventType.TEXT_MESSAGE_CONTENT,
30
+ messageId: "",
31
+ delta,
32
+ agentId
33
+ };
34
+ }
35
+ next = await reader.next();
36
+ }
37
+ const finalObject = await streamResult.output;
38
+ if (finalObject === null) {
39
+ yield {
40
+ type: EventType.RUN_ERROR,
41
+ message: "Anthropic stream completed with null output",
42
+ agentId
43
+ };
44
+ return;
45
+ }
46
+ const validated = schema.parse(finalObject);
47
+ yield {
48
+ type: EventType.TOOL_CALL_RESULT,
49
+ toolCallId: "",
50
+ messageId: "",
51
+ content: JSON.stringify(validated),
52
+ agentId
53
+ };
54
+ } catch (err) {
55
+ yield {
56
+ type: EventType.RUN_ERROR,
57
+ message: `Anthropic stream error: ${err.message}`,
58
+ agentId
59
+ };
60
+ }
61
+ }
62
+ function extractDelta(partial, prevLength) {
63
+ const chatbotMessage = partial.chatbotMessage;
64
+ if (typeof chatbotMessage !== "string" || chatbotMessage.length <= prevLength) {
65
+ return null;
66
+ }
67
+ return chatbotMessage.slice(prevLength);
68
+ }
69
+
70
+ // utils/executeWithFallback.ts
71
+ async function executeWithFallback(models, action) {
72
+ for (let i = 0; i < models.length; i++) {
73
+ const model = models[i];
74
+ try {
75
+ return await action(model);
76
+ } catch (error) {
77
+ const errorObj = error;
78
+ if (i === models.length - 1) {
79
+ throw errorObj;
80
+ }
81
+ }
82
+ }
83
+ throw new Error("All models failed");
84
+ }
85
+
86
+ // anthropic/index.ts
87
+ function toAIMessages(messages) {
88
+ return messages.map((msg) => ({
89
+ role: msg.role,
90
+ content: msg.content
91
+ }));
92
+ }
93
+ function prepareMessages(messages) {
94
+ if (messages.length === 0) return messages;
95
+ const lastMessage = messages[messages.length - 1];
96
+ if (lastMessage.role === "assistant") {
97
+ return [...messages, { role: "user", content: "Continue" }];
98
+ }
99
+ return messages;
100
+ }
101
+ async function sendWithModel(anthropicProvider, model, system, messages, schema) {
102
+ const preparedMessages = prepareMessages(messages);
103
+ const result = await generateText({
104
+ model: anthropicProvider(model),
105
+ system,
106
+ messages: preparedMessages,
107
+ output: Output.object({ schema })
108
+ });
109
+ return {
110
+ content: JSON.stringify(result.output),
111
+ usage: {
112
+ inputTokens: result.usage.inputTokens ?? 0,
113
+ outputTokens: result.usage.outputTokens ?? 0,
114
+ totalTokens: (result.usage.inputTokens ?? 0) + (result.usage.outputTokens ?? 0)
115
+ }
116
+ };
117
+ }
118
+ async function sendStreamWithModel(anthropicProvider, model, system, messages, schema) {
119
+ const preparedMessages = prepareMessages(messages);
120
+ const result = streamText({
121
+ model: anthropicProvider(model),
122
+ system,
123
+ messages: preparedMessages,
124
+ output: Output.object({ schema })
125
+ });
126
+ const partialStream = result.partialOutputStream;
127
+ const reader = partialStream[Symbol.asyncIterator]();
128
+ const first = await reader.next();
129
+ return { result, reader, first };
130
+ }
131
+ function createAnthropicProvider(config) {
132
+ const anthropicProvider = createAnthropic({
133
+ apiKey: config.apiKey,
134
+ baseURL: config.baseURL
135
+ });
136
+ const models = config.models;
137
+ return {
138
+ name: "anthropic",
139
+ async sendRequest(request) {
140
+ const messages = toAIMessages(request.messages);
141
+ return executeWithFallback(
142
+ models,
143
+ (model) => sendWithModel(anthropicProvider, model, request.systemPrompt, messages, request.responseSchema)
144
+ );
145
+ },
146
+ async *sendRequestStream(request) {
147
+ const messages = toAIMessages(request.messages);
148
+ const { result, reader, first } = await executeWithFallback(
149
+ models,
150
+ (model) => sendStreamWithModel(anthropicProvider, model, request.systemPrompt, messages, request.responseSchema)
151
+ );
152
+ yield* processAnthropicStream(result, reader, first, "anthropic", request.responseSchema);
153
+ }
154
+ };
155
+ }
156
+
157
+ export { createAnthropicProvider, prepareMessages };
158
+ //# sourceMappingURL=index.js.map
159
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../anthropic/streamProcessor.ts","../../utils/executeWithFallback.ts","../../anthropic/index.ts"],"names":[],"mappings":";;;;;AAoBA,gBAAuB,sBAAA,CACrB,YAAA,EACA,MAAA,EACA,KAAA,EACA,SACA,MAAA,EACqC;AACrC,EAAA,IAAI,iBAAA,GAAoB,CAAA;AAExB,EAAA,IAAI;AAEF,IAAA,IAAI,CAAC,MAAM,IAAA,EAAM;AACf,MAAA,MAAM,UAAU,KAAA,CAAM,KAAA;AACtB,MAAA,MAAM,KAAA,GAAQ,YAAA,CAAa,OAAA,EAAS,iBAAiB,CAAA;AACrD,MAAA,IAAI,KAAA,EAAO;AACT,QAAA,iBAAA,IAAqB,KAAA,CAAM,MAAA;AAC3B,QAAA,MAAM;AAAA,UACJ,MAAM,SAAA,CAAU,oBAAA;AAAA,UAChB,SAAA,EAAW,EAAA;AAAA,UACX,KAAA;AAAA,UACA;AAAA,SACF;AAAA,MACF;AAAA,IACF;AAGA,IAAA,IAAI,IAAA,GAAO,MAAM,MAAA,CAAO,IAAA,EAAK;AAC7B,IAAA,OAAO,CAAC,KAAK,IAAA,EAAM;AACjB,MAAA,MAAM,UAAU,IAAA,CAAK,KAAA;AACrB,MAAA,MAAM,KAAA,GAAQ,YAAA,CAAa,OAAA,EAAS,iBAAiB,CAAA;AACrD,MAAA,IAAI,KAAA,EAAO;AACT,QAAA,iBAAA,IAAqB,KAAA,CAAM,MAAA;AAC3B,QAAA,MAAM;AAAA,UACJ,MAAM,SAAA,CAAU,oBAAA;AAAA,UAChB,SAAA,EAAW,EAAA;AAAA,UACX,KAAA;AAAA,UACA;AAAA,SACF;AAAA,MACF;AAEA,MAAA,IAAA,GAAO,MAAM,OAAO,IAAA,EAAK;AAAA,IAC3B;AAGA,IAAA,MAAM,WAAA,GAAc,MAAM,YAAA,CAAa,MAAA;AAEvC,IAAA,IAAI,gBAAgB,IAAA,EAAM;AACxB,MAAA,MAAM;AAAA,QACJ,MAAM,SAAA,CAAU,SAAA;AAAA,QAChB,OAAA,EAAS,6CAAA;AAAA,QACT;AAAA,OACF;AACA,MAAA;AAAA,IACF;AAEA,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,WAAW,CAAA;AAC1C,IAAA,MAAM;AAAA,MACJ,MAAM,SAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,MAAM;AAAA,MACJ,MAAM,SAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,wBAAA,EAA4B,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MAC1D;AAAA,KACF;AAAA,EACF;AACF;AAKA,SAAS,YAAA,CAAa,SAAkC,UAAA,EAAmC;AACzF,EAAA,MAAM,iBAAiB,OAAA,CAAQ,cAAA;AAC/B,EAAA,IAAI,OAAO,cAAA,KAAmB,QAAA,IAAY,cAAA,CAAe,UAAU,UAAA,EAAY;AAC7E,IAAA,OAAO,IAAA;AAAA,EACT;AACA,EAAA,OAAO,cAAA,CAAe,MAAM,UAAU,CAAA;AACxC;;;ACnFA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACVA,SAAS,aAAa,QAAA,EAA6C;AACjE,EAAA,OAAO,QAAA,CAAS,GAAA,CAAI,CAAC,GAAA,MAAS;AAAA,IAC5B,MAAM,GAAA,CAAI,IAAA;AAAA,IACV,SAAS,GAAA,CAAI;AAAA,GACf,CAAE,CAAA;AACJ;AAEO,SAAS,gBAAgB,QAAA,EAA0C;AACxE,EAAA,IAAI,QAAA,CAAS,MAAA,KAAW,CAAA,EAAG,OAAO,QAAA;AAClC,EAAA,MAAM,WAAA,GAAc,QAAA,CAAS,QAAA,CAAS,MAAA,GAAS,CAAC,CAAA;AAChD,EAAA,IAAI,WAAA,CAAY,SAAS,WAAA,EAAa;AACpC,IAAA,OAAO,CAAC,GAAG,QAAA,EAAU,EAAE,MAAM,MAAA,EAAQ,OAAA,EAAS,YAAY,CAAA;AAAA,EAC5D;AACA,EAAA,OAAO,QAAA;AACT;AAEA,eAAe,aAAA,CACb,iBAAA,EACA,KAAA,EACA,MAAA,EACA,UACA,MAAA,EAC2B;AAC3B,EAAA,MAAM,gBAAA,GAAmB,gBAAgB,QAAQ,CAAA;AACjD,EAAA,MAAM,MAAA,GAAS,MAAM,YAAA,CAAa;AAAA,IAChC,KAAA,EAAO,kBAAkB,KAAK,CAAA;AAAA,IAC9B,MAAA;AAAA,IACA,QAAA,EAAU,gBAAA;AAAA,IACV,MAAA,EAAQ,MAAA,CAAO,MAAA,CAAO,EAAE,QAAQ;AAAA,GACjC,CAAA;AAED,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,MAAA,CAAO,MAAM,CAAA;AAAA,IACrC,KAAA,EAAO;AAAA,MACL,WAAA,EAAa,MAAA,CAAO,KAAA,CAAM,WAAA,IAAe,CAAA;AAAA,MACzC,YAAA,EAAc,MAAA,CAAO,KAAA,CAAM,YAAA,IAAgB,CAAA;AAAA,MAC3C,cAAc,MAAA,CAAO,KAAA,CAAM,eAAe,CAAA,KAAM,MAAA,CAAO,MAAM,YAAA,IAAgB,CAAA;AAAA;AAC/E,GACF;AACF;AAEA,eAAe,mBAAA,CACb,iBAAA,EACA,KAAA,EACA,MAAA,EACA,UACA,MAAA,EACA;AACA,EAAA,MAAM,gBAAA,GAAmB,gBAAgB,QAAQ,CAAA;AACjD,EAAA,MAAM,SAAS,UAAA,CAAW;AAAA,IACxB,KAAA,EAAO,kBAAkB,KAAK,CAAA;AAAA,IAC9B,MAAA;AAAA,IACA,QAAA,EAAU,gBAAA;AAAA,IACV,MAAA,EAAQ,MAAA,CAAO,MAAA,CAAO,EAAE,QAAQ;AAAA,GACjC,CAAA;AAGD,EAAA,MAAM,gBAAgB,MAAA,CAAO,mBAAA;AAC7B,EAAA,MAAM,MAAA,GAAS,aAAA,CAAc,MAAA,CAAO,aAAa,CAAA,EAAE;AACnD,EAAA,MAAM,KAAA,GAAQ,MAAM,MAAA,CAAO,IAAA,EAAK;AAEhC,EAAA,OAAO,EAAE,MAAA,EAAQ,MAAA,EAAQ,KAAA,EAAM;AACjC;AAoBO,SAAS,wBAAwB,MAAA,EAA2C;AACjF,EAAA,MAAM,oBAAoB,eAAA,CAAgB;AAAA,IACxC,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,SAAS,MAAA,CAAO;AAAA,GACjB,CAAA;AACD,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,WAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,QAAA,GAAW,YAAA,CAAa,OAAA,CAAQ,QAAQ,CAAA;AAE9C,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,UAClC,aAAA,CAAc,iBAAA,EAAmB,OAAO,OAAA,CAAQ,YAAA,EAAc,QAAA,EAAU,OAAA,CAAQ,cAAc;AAAA,OAChG;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,QAAA,GAAW,YAAA,CAAa,OAAA,CAAQ,QAAQ,CAAA;AAE9C,MAAA,MAAM,EAAE,MAAA,EAAQ,MAAA,EAAQ,KAAA,KAAU,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,UACnE,mBAAA,CAAoB,iBAAA,EAAmB,OAAO,OAAA,CAAQ,YAAA,EAAc,QAAA,EAAU,OAAA,CAAQ,cAAc;AAAA,OACtG;AAEA,MAAA,OAAO,uBAA+B,MAAA,EAAQ,MAAA,EAAQ,KAAA,EAAO,WAAA,EAAa,QAAQ,cAAc,CAAA;AAAA,IAClG;AAAA,GACF;AACF","file":"index.js","sourcesContent":["import { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\nimport type { StreamTextResult, ToolSet } from 'ai'\n\n/**\n * Processes an Anthropic streaming response (via Vercel AI SDK) into AG-UI events.\n *\n * Uses `partialOutputStream` from `streamText` + `Output.object()` to receive\n * progressively-built partial objects. Tracks `chatbotMessage` growth to yield\n * TEXT_MESSAGE_CONTENT deltas. After the stream completes, validates the final\n * object and yields TOOL_CALL_RESULT.\n *\n * @param streamResult - The streamText result containing partialOutputStream and output promise\n * @param reader - Pre-started async iterator for the partial object stream\n * @param first - The first iteration result (already consumed to trigger the API call)\n * @param agentId - Agent identifier for event tagging\n * @param schema - Zod schema for final response validation\n * @returns Async generator of AG-UI stream events\n */\nexport async function* processAnthropicStream<TState extends BaseState = BaseState>(\n streamResult: StreamTextResult<ToolSet, never>,\n reader: AsyncIterator<unknown>,\n first: IteratorResult<unknown>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let prevMessageLength = 0\n\n try {\n // Process the first partial (already consumed to trigger the API call)\n if (!first.done) {\n const partial = first.value as Record<string, unknown>\n const delta = extractDelta(partial, prevMessageLength)\n if (delta) {\n prevMessageLength += delta.length\n yield {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta,\n agentId,\n } as StreamEvent<TState>\n }\n }\n\n // Process remaining partials\n let next = await reader.next()\n while (!next.done) {\n const partial = next.value as Record<string, unknown>\n const delta = extractDelta(partial, prevMessageLength)\n if (delta) {\n prevMessageLength += delta.length\n yield {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta,\n agentId,\n } as StreamEvent<TState>\n }\n // eslint-disable-next-line no-await-in-loop\n next = await reader.next()\n }\n\n // Stream complete — await and validate the final object\n const finalObject = await streamResult.output\n\n if (finalObject === null) {\n yield {\n type: EventType.RUN_ERROR,\n message: 'Anthropic stream completed with null output',\n agentId,\n } as StreamEvent<TState>\n return\n }\n\n const validated = schema.parse(finalObject)\n yield {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n yield {\n type: EventType.RUN_ERROR,\n message: `Anthropic stream error: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\n/**\n * Extracts the new portion of chatbotMessage from a partial object.\n */\nfunction extractDelta(partial: Record<string, unknown>, prevLength: number): string | null {\n const chatbotMessage = partial.chatbotMessage\n if (typeof chatbotMessage !== 'string' || chatbotMessage.length <= prevLength) {\n return null\n }\n return chatbotMessage.slice(prevLength)\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import { createAnthropic } from '@ai-sdk/anthropic'\nimport { generateText, streamText, Output, ModelMessage } from 'ai'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { processAnthropicStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\n\n/**\n * Configuration for creating an Anthropic provider.\n */\nexport interface AnthropicProviderConfig {\n /** Anthropic API key. Defaults to ANTHROPIC_API_KEY env var. */\n apiKey?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * e.g. ['claude-sonnet-4-5-20250929', 'claude-haiku-4-5-20251001']\n */\n models: string[]\n /** Optional custom base URL for the Anthropic API */\n baseURL?: string\n}\n\nfunction toAIMessages(messages: ProviderMessage[]): ModelMessage[] {\n return messages.map((msg) => ({\n role: msg.role,\n content: msg.content,\n }))\n}\n\nexport function prepareMessages(messages: ModelMessage[]): ModelMessage[] {\n if (messages.length === 0) return messages\n const lastMessage = messages[messages.length - 1]\n if (lastMessage.role === 'assistant') {\n return [...messages, { role: 'user', content: 'Continue' }]\n }\n return messages\n}\n\nasync function sendWithModel(\n anthropicProvider: ReturnType<typeof createAnthropic>,\n model: string,\n system: string,\n messages: ModelMessage[],\n schema: ProviderRequest['responseSchema'],\n): Promise<ProviderResponse> {\n const preparedMessages = prepareMessages(messages)\n const result = await generateText({\n model: anthropicProvider(model),\n system,\n messages: preparedMessages,\n output: Output.object({ schema }),\n })\n\n return {\n content: JSON.stringify(result.output),\n usage: {\n inputTokens: result.usage.inputTokens ?? 0,\n outputTokens: result.usage.outputTokens ?? 0,\n totalTokens: (result.usage.inputTokens ?? 0) + (result.usage.outputTokens ?? 0),\n },\n }\n}\n\nasync function sendStreamWithModel(\n anthropicProvider: ReturnType<typeof createAnthropic>,\n model: string,\n system: string,\n messages: ModelMessage[],\n schema: ProviderRequest['responseSchema'],\n) {\n const preparedMessages = prepareMessages(messages)\n const result = streamText({\n model: anthropicProvider(model),\n system,\n messages: preparedMessages,\n output: Output.object({ schema }),\n })\n\n // Force the API call to start so executeWithFallback can catch connection errors\n const partialStream = result.partialOutputStream\n const reader = partialStream[Symbol.asyncIterator]()\n const first = await reader.next()\n\n return { result, reader, first }\n}\n\n/**\n * Creates an Anthropic provider instance.\n *\n * Uses the Vercel AI SDK (`ai` + `@ai-sdk/anthropic`) for structured output via\n * `generateText` + `Output.object()` (blocking) and `streamText` + `Output.object()`\n * (streaming). The AI SDK handles Zod-to-JSON-schema conversion, partial JSON\n * parsing, and validation internally.\n *\n * @param config - Anthropic provider configuration\n * @returns A Provider implementation using Anthropic\n *\n * @example\n * ```typescript\n * const provider = createAnthropicProvider({\n * models: ['claude-sonnet-4-5-20250929', 'claude-haiku-4-5-20251001'],\n * })\n * ```\n */\nexport function createAnthropicProvider(config: AnthropicProviderConfig): Provider {\n const anthropicProvider = createAnthropic({\n apiKey: config.apiKey,\n baseURL: config.baseURL,\n })\n const models = config.models\n\n return {\n name: 'anthropic',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const messages = toAIMessages(request.messages)\n\n return executeWithFallback(models, (model) =>\n sendWithModel(anthropicProvider, model, request.systemPrompt, messages, request.responseSchema),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const messages = toAIMessages(request.messages)\n\n const { result, reader, first } = await executeWithFallback(models, (model) =>\n sendStreamWithModel(anthropicProvider, model, request.systemPrompt, messages, request.responseSchema),\n )\n\n yield* processAnthropicStream<TState>(result, reader, first, 'anthropic', request.responseSchema)\n },\n }\n}\n"]}
@@ -1,61 +1,52 @@
1
1
  'use strict';
2
2
 
3
- var OpenAI = require('openai');
3
+ var openai = require('@ai-sdk/openai');
4
+ var ai = require('ai');
4
5
  var client = require('@ag-ui/client');
5
6
 
6
- function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
7
-
8
- var OpenAI__default = /*#__PURE__*/_interopDefault(OpenAI);
9
-
10
7
  // openai/index.ts
11
- var CHATBOT_MESSAGE_KEY = '"chatbotMessage":"';
12
- async function* processOpenAIStream(rawStream, agentId, schema) {
13
- let fullBuffer = "";
14
- let state = 0 /* SEARCHING */;
15
- let escapeNext = false;
8
+ async function* processOpenAIStream(streamResult, reader, first, agentId, schema) {
9
+ let prevMessageLength = 0;
16
10
  try {
17
- for await (const chunk of rawStream) {
18
- const delta = chunk.choices[0]?.delta?.content;
19
- if (!delta) continue;
20
- for (const char of delta) {
21
- fullBuffer += char;
22
- const result = processChar(char, state, escapeNext, fullBuffer, agentId);
23
- state = result.state;
24
- escapeNext = result.escapeNext;
25
- if (result.event) yield result.event;
11
+ if (!first.done) {
12
+ const partial = first.value;
13
+ const delta = extractDelta(partial, prevMessageLength);
14
+ if (delta) {
15
+ prevMessageLength += delta.length;
16
+ yield {
17
+ type: client.EventType.TEXT_MESSAGE_CONTENT,
18
+ messageId: "",
19
+ delta,
20
+ agentId
21
+ };
26
22
  }
27
- const finishReason = chunk.choices[0]?.finish_reason;
28
- if (finishReason === "length") {
23
+ }
24
+ let next = await reader.next();
25
+ while (!next.done) {
26
+ const partial = next.value;
27
+ const delta = extractDelta(partial, prevMessageLength);
28
+ if (delta) {
29
+ prevMessageLength += delta.length;
29
30
  yield {
30
- type: client.EventType.RUN_ERROR,
31
- message: "OpenAI response truncated (finish_reason: length)",
31
+ type: client.EventType.TEXT_MESSAGE_CONTENT,
32
+ messageId: "",
33
+ delta,
32
34
  agentId
33
35
  };
34
- return;
35
36
  }
37
+ next = await reader.next();
36
38
  }
37
- if (!fullBuffer) {
39
+ const finalObject = await streamResult.output;
40
+ if (finalObject === null) {
38
41
  yield {
39
42
  type: client.EventType.RUN_ERROR,
40
- message: "OpenAI stream completed with empty response",
43
+ message: "OpenAI stream completed with null output",
41
44
  agentId
42
45
  };
43
46
  return;
44
47
  }
45
- yield parseResponse(fullBuffer, schema, agentId);
46
- } catch (err) {
48
+ const validated = schema.parse(finalObject);
47
49
  yield {
48
- type: client.EventType.RUN_ERROR,
49
- message: `OpenAI stream error: ${err.message}`,
50
- agentId
51
- };
52
- }
53
- }
54
- function parseResponse(buffer, schema, agentId) {
55
- try {
56
- const parsed = JSON.parse(buffer);
57
- const validated = schema.parse(parsed);
58
- return {
59
50
  type: client.EventType.TOOL_CALL_RESULT,
60
51
  toolCallId: "",
61
52
  messageId: "",
@@ -63,69 +54,19 @@ function parseResponse(buffer, schema, agentId) {
63
54
  agentId
64
55
  };
65
56
  } catch (err) {
66
- return {
57
+ yield {
67
58
  type: client.EventType.RUN_ERROR,
68
- message: `Response parse/validation failed: ${err.message}`,
59
+ message: `OpenAI stream error: ${err.message}`,
69
60
  agentId
70
61
  };
71
62
  }
72
63
  }
73
- function processChar(char, state, escapeNext, fullBuffer, agentId) {
74
- switch (state) {
75
- case 0 /* SEARCHING */:
76
- if (fullBuffer.endsWith(CHATBOT_MESSAGE_KEY)) {
77
- return { state: 1 /* IN_CHATBOT_MESSAGE */, escapeNext: false, event: null };
78
- }
79
- return { state, escapeNext, event: null };
80
- case 1 /* IN_CHATBOT_MESSAGE */:
81
- if (escapeNext) {
82
- return {
83
- state,
84
- escapeNext: false,
85
- event: {
86
- type: client.EventType.TEXT_MESSAGE_CONTENT,
87
- messageId: "",
88
- delta: unescapeChar(char),
89
- agentId
90
- }
91
- };
92
- } else if (char === "\\") {
93
- return { state, escapeNext: true, event: null };
94
- } else if (char === '"') {
95
- return { state: 2 /* PAST_CHATBOT_MESSAGE */, escapeNext: false, event: null };
96
- } else {
97
- return {
98
- state,
99
- escapeNext,
100
- event: {
101
- type: client.EventType.TEXT_MESSAGE_CONTENT,
102
- messageId: "",
103
- delta: char,
104
- agentId
105
- }
106
- };
107
- }
108
- case 2 /* PAST_CHATBOT_MESSAGE */:
109
- return { state, escapeNext, event: null };
110
- }
111
- }
112
- function unescapeChar(char) {
113
- switch (char) {
114
- case '"':
115
- return '"';
116
- case "\\":
117
- return "\\";
118
- case "n":
119
- return "\n";
120
- case "t":
121
- return " ";
122
- case "r":
123
- return "\r";
124
- case "/":
125
- return "/";
126
- default:
127
- return char;
64
+ function extractDelta(partial, prevLength) {
65
+ const chatbotMessage = partial.chatbotMessage;
66
+ if (typeof chatbotMessage !== "string" || chatbotMessage.length <= prevLength) {
67
+ return null;
128
68
  }
69
+ return chatbotMessage.slice(prevLength);
129
70
  }
130
71
 
131
72
  // utils/executeWithFallback.ts
@@ -167,58 +108,51 @@ function enforceStrictSchema(schema) {
167
108
  }
168
109
  return result;
169
110
  }
170
- function toOpenAIMessages(systemPrompt, messages) {
171
- const openAIMessages = [{ role: "system", content: systemPrompt }];
172
- for (const msg of messages) {
173
- openAIMessages.push({ role: msg.role, content: msg.content });
174
- }
175
- return openAIMessages;
176
- }
177
- function prepareRequest(request) {
178
- const jsonSchema = enforceStrictSchema(request.responseSchema.toJSONSchema());
179
- const responseFormat = {
180
- type: "json_schema",
181
- json_schema: {
182
- name: "structuredResponse",
183
- strict: true,
184
- schema: jsonSchema
111
+ function toOpenAISchema(zodSchema) {
112
+ const strict = enforceStrictSchema(zodSchema.toJSONSchema());
113
+ return ai.jsonSchema(strict, {
114
+ validate: (value) => {
115
+ const result = zodSchema.safeParse(value);
116
+ return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
185
117
  }
186
- };
187
- const openAIMessages = toOpenAIMessages(request.systemPrompt, request.messages);
188
- return { responseFormat, openAIMessages };
118
+ });
119
+ }
120
+ function toAIMessages(messages) {
121
+ return messages.map((msg) => ({
122
+ role: msg.role,
123
+ content: msg.content
124
+ }));
189
125
  }
190
- async function sendWithModel(client, model, openAIMessages, responseFormat) {
191
- const response = await client.chat.completions.create({
192
- model,
193
- messages: openAIMessages,
194
- response_format: responseFormat
126
+ async function sendWithModel(openaiProvider, model, system, messages, schema) {
127
+ const result = await ai.generateText({
128
+ model: openaiProvider(model),
129
+ system,
130
+ messages,
131
+ output: ai.Output.object({ schema: toOpenAISchema(schema) })
195
132
  });
196
- const choice = response.choices[0];
197
- if (!choice?.message?.content) {
198
- throw new Error("OpenAI returned empty response");
199
- }
200
- if (choice.finish_reason === "length") {
201
- throw new Error("OpenAI response truncated (finish_reason: length)");
202
- }
203
133
  return {
204
- content: choice.message.content,
205
- usage: response.usage ? {
206
- inputTokens: response.usage.prompt_tokens,
207
- outputTokens: response.usage.completion_tokens,
208
- totalTokens: response.usage.total_tokens
209
- } : void 0
134
+ content: JSON.stringify(result.output),
135
+ usage: {
136
+ inputTokens: result.usage.inputTokens ?? 0,
137
+ outputTokens: result.usage.outputTokens ?? 0,
138
+ totalTokens: (result.usage.inputTokens ?? 0) + (result.usage.outputTokens ?? 0)
139
+ }
210
140
  };
211
141
  }
212
- async function sendStreamWithModel(client, model, openAIMessages, responseFormat) {
213
- return client.chat.completions.create({
214
- model,
215
- messages: openAIMessages,
216
- response_format: responseFormat,
217
- stream: true
142
+ async function sendStreamWithModel(openaiProvider, model, system, messages, schema) {
143
+ const result = ai.streamText({
144
+ model: openaiProvider(model),
145
+ system,
146
+ messages,
147
+ output: ai.Output.object({ schema: toOpenAISchema(schema) })
218
148
  });
149
+ const partialStream = result.partialOutputStream;
150
+ const reader = partialStream[Symbol.asyncIterator]();
151
+ const first = await reader.next();
152
+ return { result, reader, first };
219
153
  }
220
154
  function createOpenAIProvider(config) {
221
- const client = new OpenAI__default.default({
155
+ const openaiProvider = openai.createOpenAI({
222
156
  apiKey: config.apiKey,
223
157
  baseURL: config.baseURL,
224
158
  organization: config.organization
@@ -227,19 +161,19 @@ function createOpenAIProvider(config) {
227
161
  return {
228
162
  name: "openai",
229
163
  async sendRequest(request) {
230
- const { responseFormat, openAIMessages } = prepareRequest(request);
164
+ const messages = toAIMessages(request.messages);
231
165
  return executeWithFallback(
232
166
  models,
233
- (model) => sendWithModel(client, model, openAIMessages, responseFormat)
167
+ (model) => sendWithModel(openaiProvider, model, request.systemPrompt, messages, request.responseSchema)
234
168
  );
235
169
  },
236
170
  async *sendRequestStream(request) {
237
- const { responseFormat, openAIMessages } = prepareRequest(request);
238
- const rawStream = await executeWithFallback(
171
+ const messages = toAIMessages(request.messages);
172
+ const { result, reader, first } = await executeWithFallback(
239
173
  models,
240
- (model) => sendStreamWithModel(client, model, openAIMessages, responseFormat)
174
+ (model) => sendStreamWithModel(openaiProvider, model, request.systemPrompt, messages, request.responseSchema)
241
175
  );
242
- yield* processOpenAIStream(rawStream, "openai", request.responseSchema);
176
+ yield* processOpenAIStream(result, reader, first, "openai", request.responseSchema);
243
177
  }
244
178
  };
245
179
  }
@@ -1 +1 @@
1
- {"version":3,"sources":["../../openai/streamProcessor.ts","../../utils/executeWithFallback.ts","../../openai/index.ts"],"names":["EventType","OpenAI"],"mappings":";;;;;;;;;;AAaA,IAAM,mBAAA,GAAsB,oBAAA;AAe5B,gBAAuB,mBAAA,CACrB,SAAA,EACA,OAAA,EACA,MAAA,EACqC;AACrC,EAAA,IAAI,UAAA,GAAa,EAAA;AACjB,EAAA,IAAI,KAAA,GAAqB,CAAA;AACzB,EAAA,IAAI,UAAA,GAAa,KAAA;AAEjB,EAAA,IAAI;AACF,IAAA,WAAA,MAAiB,SAAS,SAAA,EAAW;AACnC,MAAA,MAAM,KAAA,GAAQ,KAAA,CAAM,OAAA,CAAQ,CAAC,GAAG,KAAA,EAAO,OAAA;AACvC,MAAA,IAAI,CAAC,KAAA,EAAO;AAEZ,MAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACxB,QAAA,UAAA,IAAc,IAAA;AACd,QAAA,MAAM,SAA6B,WAAA,CAAoB,IAAA,EAAM,KAAA,EAAO,UAAA,EAAY,YAAY,OAAO,CAAA;AACnG,QAAA,KAAA,GAAQ,MAAA,CAAO,KAAA;AACf,QAAA,UAAA,GAAa,MAAA,CAAO,UAAA;AACpB,QAAA,IAAI,MAAA,CAAO,KAAA,EAAO,MAAM,MAAA,CAAO,KAAA;AAAA,MACjC;AAGA,MAAA,MAAM,YAAA,GAAe,KAAA,CAAM,OAAA,CAAQ,CAAC,CAAA,EAAG,aAAA;AACvC,MAAA,IAAI,iBAAiB,QAAA,EAAU;AAC7B,QAAA,MAAM;AAAA,UACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,UAChB,OAAA,EAAS,mDAAA;AAAA,UACT;AAAA,SACF;AACA,QAAA;AAAA,MACF;AAAA,IACF;AAGA,IAAA,IAAI,CAAC,UAAA,EAAY;AACf,MAAA,MAAM;AAAA,QACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,QAChB,OAAA,EAAS,6CAAA;AAAA,QACT;AAAA,OACF;AACA,MAAA;AAAA,IACF;AAEA,IAAA,MAAM,aAAA,CAAsB,UAAA,EAAY,MAAA,EAAQ,OAAO,CAAA;AAAA,EACzD,SAAS,GAAA,EAAK;AACZ,IAAA,MAAM;AAAA,MACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,qBAAA,EAAyB,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACvD;AAAA,KACF;AAAA,EACF;AACF;AAEA,SAAS,aAAA,CACP,MAAA,EACA,MAAA,EACA,OAAA,EACqB;AACrB,EAAA,IAAI;AACF,IAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,MAAM,CAAA;AAChC,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,MAAM,CAAA;AACrC,IAAA,OAAO;AAAA,MACL,MAAMA,gBAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,OAAO;AAAA,MACL,MAAMA,gBAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,kCAAA,EAAsC,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACpE;AAAA,KACF;AAAA,EACF;AACF;AAQA,SAAS,WAAA,CACP,IAAA,EACA,KAAA,EACA,UAAA,EACA,YACA,OAAA,EACoB;AACpB,EAAA,QAAQ,KAAA;AAAO,IACb,KAAK,CAAA;AACH,MAAA,IAAI,UAAA,CAAW,QAAA,CAAS,mBAAmB,CAAA,EAAG;AAC5C,QAAA,OAAO,EAAE,KAAA,EAAO,CAAA,2BAAgC,UAAA,EAAY,KAAA,EAAO,OAAO,IAAA,EAAK;AAAA,MACjF;AACA,MAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,KAAA,EAAO,IAAA,EAAK;AAAA,IAE1C,KAAK,CAAA;AACH,MAAA,IAAI,UAAA,EAAY;AACd,QAAA,OAAO;AAAA,UACL,KAAA;AAAA,UACA,UAAA,EAAY,KAAA;AAAA,UACZ,KAAA,EAAO;AAAA,YACL,MAAMA,gBAAA,CAAU,oBAAA;AAAA,YAChB,SAAA,EAAW,EAAA;AAAA,YACX,KAAA,EAAO,aAAa,IAAI,CAAA;AAAA,YACxB;AAAA;AACF,SACF;AAAA,MACF,CAAA,MAAA,IAAW,SAAS,IAAA,EAAM;AACxB,QAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,IAAA,EAAM,OAAO,IAAA,EAAK;AAAA,MAChD,CAAA,MAAA,IAAW,SAAS,GAAA,EAAK;AACvB,QAAA,OAAO,EAAE,KAAA,EAAO,CAAA,6BAAkC,UAAA,EAAY,KAAA,EAAO,OAAO,IAAA,EAAK;AAAA,MACnF,CAAA,MAAO;AACL,QAAA,OAAO;AAAA,UACL,KAAA;AAAA,UACA,UAAA;AAAA,UACA,KAAA,EAAO;AAAA,YACL,MAAMA,gBAAA,CAAU,oBAAA;AAAA,YAChB,SAAA,EAAW,EAAA;AAAA,YACX,KAAA,EAAO,IAAA;AAAA,YACP;AAAA;AACF,SACF;AAAA,MACF;AAAA,IAEF,KAAK,CAAA;AACH,MAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,KAAA,EAAO,IAAA,EAAK;AAAA;AAE9C;AAGA,SAAS,aAAa,IAAA,EAAsB;AAC1C,EAAA,QAAQ,IAAA;AAAM,IACZ,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT,KAAK,IAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT;AAEE,MAAA,OAAO,IAAA;AAAA;AAEb;;;ACjKA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACDA,SAAS,oBAAoB,MAAA,EAAgC;AAC3D,EAAA,MAAM,MAAA,GAAS,EAAE,GAAG,MAAA,EAAO;AAE3B,EAAA,IAAI,MAAA,CAAO,IAAA,KAAS,QAAA,IAAY,MAAA,CAAO,UAAA,EAAY;AACjD,IAAA,MAAA,CAAO,oBAAA,GAAuB,KAAA;AAC9B,IAAA,MAAA,CAAO,QAAA,GAAW,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,UAAqC,CAAA;AAC1E,IAAA,MAAM,QAAQ,MAAA,CAAO,UAAA;AACrB,IAAA,MAAM,cAA0C,EAAC;AACjD,IAAA,KAAA,MAAW,CAAC,GAAA,EAAK,KAAK,KAAK,MAAA,CAAO,OAAA,CAAQ,KAAK,CAAA,EAAG;AAChD,MAAA,WAAA,CAAY,GAAG,CAAA,GAAI,mBAAA,CAAoB,KAAK,CAAA;AAAA,IAC9C;AACA,IAAA,MAAA,CAAO,UAAA,GAAa,WAAA;AAAA,EACtB;AAEA,EAAA,IAAI,MAAA,CAAO,KAAA,IAAS,OAAO,MAAA,CAAO,UAAU,QAAA,EAAU;AACpD,IAAA,MAAA,CAAO,KAAA,GAAQ,mBAAA,CAAoB,MAAA,CAAO,KAAmB,CAAA;AAAA,EAC/D;AAGA,EAAA,KAAA,MAAW,OAAA,IAAW,CAAC,OAAA,EAAS,OAAA,EAAS,OAAO,CAAA,EAAY;AAC1D,IAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,MAAA,CAAO,OAAO,CAAC,CAAA,EAAG;AAClC,MAAA,MAAA,CAAO,OAAO,CAAA,GAAK,MAAA,CAAO,OAAO,CAAA,CAAmB,IAAI,CAAC,CAAA,KAAM,mBAAA,CAAoB,CAAC,CAAC,CAAA;AAAA,IACvF;AAAA,EACF;AAEA,EAAA,OAAO,MAAA;AACT;AAEA,SAAS,gBAAA,CACP,cACA,QAAA,EAC8B;AAC9B,EAAA,MAAM,iBAA+C,CAAC,EAAE,MAAM,QAAA,EAAU,OAAA,EAAS,cAAc,CAAA;AAE/F,EAAA,KAAA,MAAW,OAAO,QAAA,EAAU;AAC1B,IAAA,cAAA,CAAe,IAAA,CAAK,EAAE,IAAA,EAAM,GAAA,CAAI,MAAM,OAAA,EAAS,GAAA,CAAI,SAAS,CAAA;AAAA,EAC9D;AAEA,EAAA,OAAO,cAAA;AACT;AAEA,SAAS,eAAe,OAAA,EAA0B;AAChD,EAAA,MAAM,UAAA,GAAa,mBAAA,CAAoB,OAAA,CAAQ,cAAA,CAAe,cAA4B,CAAA;AAC1F,EAAA,MAAM,cAAA,GAAiB;AAAA,IACrB,IAAA,EAAM,aAAA;AAAA,IACN,WAAA,EAAa;AAAA,MACX,IAAA,EAAM,oBAAA;AAAA,MACN,MAAA,EAAQ,IAAA;AAAA,MACR,MAAA,EAAQ;AAAA;AACV,GACF;AACA,EAAA,MAAM,cAAA,GAAiB,gBAAA,CAAiB,OAAA,CAAQ,YAAA,EAAc,QAAQ,QAAQ,CAAA;AAE9E,EAAA,OAAO,EAAE,gBAAgB,cAAA,EAAe;AAC1C;AAEA,eAAe,aAAA,CACb,MAAA,EACA,KAAA,EACA,cAAA,EACA,cAAA,EAI2B;AAC3B,EAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,IACpD,KAAA;AAAA,IACA,QAAA,EAAU,cAAA;AAAA,IACV,eAAA,EAAiB;AAAA,GAClB,CAAA;AAED,EAAA,MAAM,MAAA,GAAS,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA;AACjC,EAAA,IAAI,CAAC,MAAA,EAAQ,OAAA,EAAS,OAAA,EAAS;AAC7B,IAAA,MAAM,IAAI,MAAM,gCAAgC,CAAA;AAAA,EAClD;AAEA,EAAA,IAAI,MAAA,CAAO,kBAAkB,QAAA,EAAU;AACrC,IAAA,MAAM,IAAI,MAAM,mDAAmD,CAAA;AAAA,EACrE;AAEA,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,OAAO,OAAA,CAAQ,OAAA;AAAA,IACxB,KAAA,EAAO,SAAS,KAAA,GACZ;AAAA,MACE,WAAA,EAAa,SAAS,KAAA,CAAM,aAAA;AAAA,MAC5B,YAAA,EAAc,SAAS,KAAA,CAAM,iBAAA;AAAA,MAC7B,WAAA,EAAa,SAAS,KAAA,CAAM;AAAA,KAC9B,GACA;AAAA,GACN;AACF;AAEA,eAAe,mBAAA,CACb,MAAA,EACA,KAAA,EACA,cAAA,EACA,cAAA,EAIA;AACA,EAAA,OAAO,MAAA,CAAO,IAAA,CAAK,WAAA,CAAY,MAAA,CAAO;AAAA,IACpC,KAAA;AAAA,IACA,QAAA,EAAU,cAAA;AAAA,IACV,eAAA,EAAiB,cAAA;AAAA,IACjB,MAAA,EAAQ;AAAA,GACT,CAAA;AACH;AAmBO,SAAS,qBAAqB,MAAA,EAAwC;AAC3E,EAAA,MAAM,MAAA,GAAS,IAAIC,uBAAA,CAAO;AAAA,IACxB,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,SAAS,MAAA,CAAO,OAAA;AAAA,IAChB,cAAc,MAAA,CAAO;AAAA,GACtB,CAAA;AACD,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,QAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,EAAE,cAAA,EAAgB,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAEjE,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,KAAA,KAClC,aAAA,CAAc,MAAA,EAAQ,KAAA,EAAO,gBAAgB,cAAc;AAAA,OAC7D;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,EAAE,cAAA,EAAgB,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAEjE,MAAA,MAAM,YAAY,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,KAAA,KACnD,mBAAA,CAAoB,MAAA,EAAQ,KAAA,EAAO,gBAAgB,cAAc;AAAA,OACnE;AAEA,MAAA,OAAO,mBAAA,CAA4B,SAAA,EAAW,QAAA,EAAU,OAAA,CAAQ,cAAc,CAAA;AAAA,IAChF;AAAA,GACF;AACF","file":"index.cjs","sourcesContent":["import { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\nimport type { Stream } from 'openai/streaming'\nimport type { ChatCompletionChunk } from 'openai/resources/chat/completions'\n\n/** State-machine states for extracting chatbotMessage from structured JSON stream */\nconst enum ParserState {\n SEARCHING = 0,\n IN_CHATBOT_MESSAGE = 1,\n PAST_CHATBOT_MESSAGE = 2,\n}\n\nconst CHATBOT_MESSAGE_KEY = '\"chatbotMessage\":\"'\n\n/**\n * Processes an OpenAI streaming response into AG-UI events.\n *\n * OpenAI structured output returns the entire response as JSON. The chatbotMessage\n * field is embedded within that JSON. This processor uses a character-level state\n * machine to extract chatbotMessage text progressively during streaming, yielding\n * TEXT_MESSAGE_CONTENT deltas in real-time.\n *\n * @param rawStream - OpenAI chat completion stream\n * @param agentId - Agent identifier for event tagging\n * @param schema - Zod schema for final response validation\n * @returns Async generator of AG-UI stream events\n */\nexport async function* processOpenAIStream<TState extends BaseState = BaseState>(\n rawStream: Stream<ChatCompletionChunk>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let fullBuffer = ''\n let state: ParserState = ParserState.SEARCHING\n let escapeNext = false\n\n try {\n for await (const chunk of rawStream) {\n const delta = chunk.choices[0]?.delta?.content\n if (!delta) continue\n\n for (const char of delta) {\n fullBuffer += char\n const result: CharResult<TState> = processChar<TState>(char, state, escapeNext, fullBuffer, agentId)\n state = result.state\n escapeNext = result.escapeNext\n if (result.event) yield result.event\n }\n\n // Check for truncation\n const finishReason = chunk.choices[0]?.finish_reason\n if (finishReason === 'length') {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI response truncated (finish_reason: length)',\n agentId,\n } as StreamEvent<TState>\n return\n }\n }\n\n // Stream complete — parse and validate the full response\n if (!fullBuffer) {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI stream completed with empty response',\n agentId,\n } as StreamEvent<TState>\n return\n }\n\n yield parseResponse<TState>(fullBuffer, schema, agentId)\n } catch (err) {\n yield {\n type: EventType.RUN_ERROR,\n message: `OpenAI stream error: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\nfunction parseResponse<TState extends BaseState>(\n buffer: string,\n schema: ZodType,\n agentId: AgentId,\n): StreamEvent<TState> {\n try {\n const parsed = JSON.parse(buffer) as Record<string, unknown>\n const validated = schema.parse(parsed)\n return {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n return {\n type: EventType.RUN_ERROR,\n message: `Response parse/validation failed: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\ninterface CharResult<TState extends BaseState> {\n state: ParserState\n escapeNext: boolean\n event: StreamEvent<TState> | null\n}\n\nfunction processChar<TState extends BaseState>(\n char: string,\n state: ParserState,\n escapeNext: boolean,\n fullBuffer: string,\n agentId: AgentId,\n): CharResult<TState> {\n switch (state) {\n case ParserState.SEARCHING:\n if (fullBuffer.endsWith(CHATBOT_MESSAGE_KEY)) {\n return { state: ParserState.IN_CHATBOT_MESSAGE, escapeNext: false, event: null }\n }\n return { state, escapeNext, event: null }\n\n case ParserState.IN_CHATBOT_MESSAGE:\n if (escapeNext) {\n return {\n state,\n escapeNext: false,\n event: {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta: unescapeChar(char),\n agentId,\n } as StreamEvent<TState>,\n }\n } else if (char === '\\\\') {\n return { state, escapeNext: true, event: null }\n } else if (char === '\"') {\n return { state: ParserState.PAST_CHATBOT_MESSAGE, escapeNext: false, event: null }\n } else {\n return {\n state,\n escapeNext,\n event: {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta: char,\n agentId,\n } as StreamEvent<TState>,\n }\n }\n\n case ParserState.PAST_CHATBOT_MESSAGE:\n return { state, escapeNext, event: null }\n }\n}\n\n/** Converts a JSON escape character to its actual value */\nfunction unescapeChar(char: string): string {\n switch (char) {\n case '\"':\n return '\"'\n case '\\\\':\n return '\\\\'\n case 'n':\n return '\\n'\n case 't':\n return '\\t'\n case 'r':\n return '\\r'\n case '/':\n return '/'\n default:\n // For \\uXXXX and unknown escapes, return as-is (the character after the backslash)\n return char\n }\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import OpenAI from 'openai'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { processOpenAIStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\nimport type { ChatCompletionMessageParam } from 'openai/resources/chat/completions'\n\n/**\n * Configuration for creating an OpenAI provider.\n */\nexport interface OpenAIProviderConfig {\n /** OpenAI API key. Defaults to OPENAI_API_KEY env var (OpenAI SDK default). */\n apiKey?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * e.g. ['gpt-4o', 'gpt-4o-mini']\n */\n models: string[]\n /** Optional base URL for Azure OpenAI or compatible endpoints */\n baseURL?: string\n /** Optional OpenAI organization ID */\n organization?: string\n}\n\ntype JsonSchema = Record<string, unknown>\n\n/**\n * Recursively enforces OpenAI structured output requirements on a JSON Schema:\n * - Adds `additionalProperties: false` to all object types\n * - Ensures all properties are listed in `required`\n */\nfunction enforceStrictSchema(schema: JsonSchema): JsonSchema {\n const result = { ...schema }\n\n if (result.type === 'object' && result.properties) {\n result.additionalProperties = false\n result.required = Object.keys(result.properties as Record<string, unknown>)\n const props = result.properties as Record<string, JsonSchema>\n const strictProps: Record<string, JsonSchema> = {}\n for (const [key, value] of Object.entries(props)) {\n strictProps[key] = enforceStrictSchema(value)\n }\n result.properties = strictProps\n }\n\n if (result.items && typeof result.items === 'object') {\n result.items = enforceStrictSchema(result.items as JsonSchema)\n }\n\n // Handle anyOf/oneOf/allOf\n for (const keyword of ['anyOf', 'oneOf', 'allOf'] as const) {\n if (Array.isArray(result[keyword])) {\n result[keyword] = (result[keyword] as JsonSchema[]).map((s) => enforceStrictSchema(s))\n }\n }\n\n return result\n}\n\nfunction toOpenAIMessages(\n systemPrompt: string,\n messages: ProviderMessage[],\n): ChatCompletionMessageParam[] {\n const openAIMessages: ChatCompletionMessageParam[] = [{ role: 'system', content: systemPrompt }]\n\n for (const msg of messages) {\n openAIMessages.push({ role: msg.role, content: msg.content })\n }\n\n return openAIMessages\n}\n\nfunction prepareRequest(request: ProviderRequest) {\n const jsonSchema = enforceStrictSchema(request.responseSchema.toJSONSchema() as JsonSchema)\n const responseFormat = {\n type: 'json_schema' as const,\n json_schema: {\n name: 'structuredResponse',\n strict: true,\n schema: jsonSchema,\n },\n }\n const openAIMessages = toOpenAIMessages(request.systemPrompt, request.messages)\n\n return { responseFormat, openAIMessages }\n}\n\nasync function sendWithModel(\n client: OpenAI,\n model: string,\n openAIMessages: ChatCompletionMessageParam[],\n responseFormat: {\n type: 'json_schema'\n json_schema: { name: string; strict: boolean; schema: JsonSchema }\n },\n): Promise<ProviderResponse> {\n const response = await client.chat.completions.create({\n model,\n messages: openAIMessages,\n response_format: responseFormat,\n })\n\n const choice = response.choices[0]\n if (!choice?.message?.content) {\n throw new Error('OpenAI returned empty response')\n }\n\n if (choice.finish_reason === 'length') {\n throw new Error('OpenAI response truncated (finish_reason: length)')\n }\n\n return {\n content: choice.message.content,\n usage: response.usage\n ? {\n inputTokens: response.usage.prompt_tokens,\n outputTokens: response.usage.completion_tokens,\n totalTokens: response.usage.total_tokens,\n }\n : undefined,\n }\n}\n\nasync function sendStreamWithModel(\n client: OpenAI,\n model: string,\n openAIMessages: ChatCompletionMessageParam[],\n responseFormat: {\n type: 'json_schema'\n json_schema: { name: string; strict: boolean; schema: JsonSchema }\n },\n) {\n return client.chat.completions.create({\n model,\n messages: openAIMessages,\n response_format: responseFormat,\n stream: true,\n })\n}\n\n/**\n * Creates an OpenAI provider instance.\n *\n * Uses OpenAI's structured output (response_format with JSON Schema) for both\n * blocking and streaming paths, with real-time chatbotMessage text streaming\n * via a custom stream processor.\n *\n * @param config - OpenAI provider configuration\n * @returns A Provider implementation using OpenAI\n *\n * @example\n * ```typescript\n * const provider = createOpenAIProvider({\n * models: ['gpt-4o', 'gpt-4o-mini'],\n * })\n * ```\n */\nexport function createOpenAIProvider(config: OpenAIProviderConfig): Provider {\n const client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: config.baseURL,\n organization: config.organization,\n })\n const models = config.models\n\n return {\n name: 'openai',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const { responseFormat, openAIMessages } = prepareRequest(request)\n\n return executeWithFallback(models, (model) =>\n sendWithModel(client, model, openAIMessages, responseFormat),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const { responseFormat, openAIMessages } = prepareRequest(request)\n\n const rawStream = await executeWithFallback(models, (model) =>\n sendStreamWithModel(client, model, openAIMessages, responseFormat),\n )\n\n yield* processOpenAIStream<TState>(rawStream, 'openai', request.responseSchema)\n },\n }\n}\n"]}
1
+ {"version":3,"sources":["../../openai/streamProcessor.ts","../../utils/executeWithFallback.ts","../../openai/index.ts"],"names":["EventType","jsonSchema","generateText","Output","streamText","createOpenAI"],"mappings":";;;;;;;AAoBA,gBAAuB,mBAAA,CACrB,YAAA,EACA,MAAA,EACA,KAAA,EACA,SACA,MAAA,EACqC;AACrC,EAAA,IAAI,iBAAA,GAAoB,CAAA;AAExB,EAAA,IAAI;AAEF,IAAA,IAAI,CAAC,MAAM,IAAA,EAAM;AACf,MAAA,MAAM,UAAU,KAAA,CAAM,KAAA;AACtB,MAAA,MAAM,KAAA,GAAQ,YAAA,CAAa,OAAA,EAAS,iBAAiB,CAAA;AACrD,MAAA,IAAI,KAAA,EAAO;AACT,QAAA,iBAAA,IAAqB,KAAA,CAAM,MAAA;AAC3B,QAAA,MAAM;AAAA,UACJ,MAAMA,gBAAA,CAAU,oBAAA;AAAA,UAChB,SAAA,EAAW,EAAA;AAAA,UACX,KAAA;AAAA,UACA;AAAA,SACF;AAAA,MACF;AAAA,IACF;AAGA,IAAA,IAAI,IAAA,GAAO,MAAM,MAAA,CAAO,IAAA,EAAK;AAC7B,IAAA,OAAO,CAAC,KAAK,IAAA,EAAM;AACjB,MAAA,MAAM,UAAU,IAAA,CAAK,KAAA;AACrB,MAAA,MAAM,KAAA,GAAQ,YAAA,CAAa,OAAA,EAAS,iBAAiB,CAAA;AACrD,MAAA,IAAI,KAAA,EAAO;AACT,QAAA,iBAAA,IAAqB,KAAA,CAAM,MAAA;AAC3B,QAAA,MAAM;AAAA,UACJ,MAAMA,gBAAA,CAAU,oBAAA;AAAA,UAChB,SAAA,EAAW,EAAA;AAAA,UACX,KAAA;AAAA,UACA;AAAA,SACF;AAAA,MACF;AAEA,MAAA,IAAA,GAAO,MAAM,OAAO,IAAA,EAAK;AAAA,IAC3B;AAGA,IAAA,MAAM,WAAA,GAAc,MAAM,YAAA,CAAa,MAAA;AAEvC,IAAA,IAAI,gBAAgB,IAAA,EAAM;AACxB,MAAA,MAAM;AAAA,QACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,QAChB,OAAA,EAAS,0CAAA;AAAA,QACT;AAAA,OACF;AACA,MAAA;AAAA,IACF;AAEA,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,WAAW,CAAA;AAC1C,IAAA,MAAM;AAAA,MACJ,MAAMA,gBAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,MAAM;AAAA,MACJ,MAAMA,gBAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,qBAAA,EAAyB,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACvD;AAAA,KACF;AAAA,EACF;AACF;AAKA,SAAS,YAAA,CAAa,SAAkC,UAAA,EAAmC;AACzF,EAAA,MAAM,iBAAiB,OAAA,CAAQ,cAAA;AAC/B,EAAA,IAAI,OAAO,cAAA,KAAmB,QAAA,IAAY,cAAA,CAAe,UAAU,UAAA,EAAY;AAC7E,IAAA,OAAO,IAAA;AAAA,EACT;AACA,EAAA,OAAO,cAAA,CAAe,MAAM,UAAU,CAAA;AACxC;;;ACnFA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACEA,SAAS,oBAAoB,MAAA,EAAgC;AAC3D,EAAA,MAAM,MAAA,GAAS,EAAE,GAAG,MAAA,EAAO;AAC3B,EAAA,IAAI,MAAA,CAAO,IAAA,KAAS,QAAA,IAAY,MAAA,CAAO,UAAA,EAAY;AACjD,IAAA,MAAA,CAAO,oBAAA,GAAuB,KAAA;AAC9B,IAAA,MAAA,CAAO,QAAA,GAAW,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,UAAqC,CAAA;AAC1E,IAAA,MAAM,QAAQ,MAAA,CAAO,UAAA;AACrB,IAAA,MAAM,cAA0C,EAAC;AACjD,IAAA,KAAA,MAAW,CAAC,GAAA,EAAK,KAAK,KAAK,MAAA,CAAO,OAAA,CAAQ,KAAK,CAAA,EAAG;AAChD,MAAA,WAAA,CAAY,GAAG,CAAA,GAAI,mBAAA,CAAoB,KAAK,CAAA;AAAA,IAC9C;AACA,IAAA,MAAA,CAAO,UAAA,GAAa,WAAA;AAAA,EACtB;AACA,EAAA,IAAI,MAAA,CAAO,KAAA,IAAS,OAAO,MAAA,CAAO,UAAU,QAAA,EAAU;AACpD,IAAA,MAAA,CAAO,KAAA,GAAQ,mBAAA,CAAoB,MAAA,CAAO,KAAmB,CAAA;AAAA,EAC/D;AACA,EAAA,KAAA,MAAW,OAAA,IAAW,CAAC,OAAA,EAAS,OAAA,EAAS,OAAO,CAAA,EAAY;AAC1D,IAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,MAAA,CAAO,OAAO,CAAC,CAAA,EAAG;AAClC,MAAA,MAAA,CAAO,OAAO,CAAA,GAAK,MAAA,CAAO,OAAO,CAAA,CAAmB,IAAI,CAAC,CAAA,KAAM,mBAAA,CAAoB,CAAC,CAAC,CAAA;AAAA,IACvF;AAAA,EACF;AACA,EAAA,OAAO,MAAA;AACT;AAQA,SAAS,eAAe,SAAA,EAA8C;AACpE,EAAA,MAAM,MAAA,GAAS,mBAAA,CAAoB,SAAA,CAAU,YAAA,EAA4B,CAAA;AACzE,EAAA,OAAOC,cAAW,MAAA,EAAiB;AAAA,IACjC,QAAA,EAAU,CAAC,KAAA,KAAmB;AAC5B,MAAA,MAAM,MAAA,GAAS,SAAA,CAAU,SAAA,CAAU,KAAK,CAAA;AACxC,MAAA,OAAO,MAAA,CAAO,OAAA,GACV,EAAE,OAAA,EAAS,MAAe,KAAA,EAAO,MAAA,CAAO,IAAA,EAAK,GAC7C,EAAE,OAAA,EAAS,KAAA,EAAgB,KAAA,EAAO,OAAO,KAAA,EAAM;AAAA,IACrD;AAAA,GACD,CAAA;AACH;AAEA,SAAS,aAAa,QAAA,EAA6C;AACjE,EAAA,OAAO,QAAA,CAAS,GAAA,CAAI,CAAC,GAAA,MAAS;AAAA,IAC5B,MAAM,GAAA,CAAI,IAAA;AAAA,IACV,SAAS,GAAA,CAAI;AAAA,GACf,CAAE,CAAA;AACJ;AAEA,eAAe,aAAA,CACb,cAAA,EACA,KAAA,EACA,MAAA,EACA,UACA,MAAA,EAC2B;AAC3B,EAAA,MAAM,MAAA,GAAS,MAAMC,eAAA,CAAa;AAAA,IAChC,KAAA,EAAO,eAAe,KAAK,CAAA;AAAA,IAC3B,MAAA;AAAA,IACA,QAAA;AAAA,IACA,MAAA,EAAQC,UAAO,MAAA,CAAO,EAAE,QAAQ,cAAA,CAAe,MAAM,GAAG;AAAA,GACzD,CAAA;AAED,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,MAAA,CAAO,MAAM,CAAA;AAAA,IACrC,KAAA,EAAO;AAAA,MACL,WAAA,EAAa,MAAA,CAAO,KAAA,CAAM,WAAA,IAAe,CAAA;AAAA,MACzC,YAAA,EAAc,MAAA,CAAO,KAAA,CAAM,YAAA,IAAgB,CAAA;AAAA,MAC3C,cAAc,MAAA,CAAO,KAAA,CAAM,eAAe,CAAA,KAAM,MAAA,CAAO,MAAM,YAAA,IAAgB,CAAA;AAAA;AAC/E,GACF;AACF;AAEA,eAAe,mBAAA,CACb,cAAA,EACA,KAAA,EACA,MAAA,EACA,UACA,MAAA,EACA;AACA,EAAA,MAAM,SAASC,aAAA,CAAW;AAAA,IACxB,KAAA,EAAO,eAAe,KAAK,CAAA;AAAA,IAC3B,MAAA;AAAA,IACA,QAAA;AAAA,IACA,MAAA,EAAQD,UAAO,MAAA,CAAO,EAAE,QAAQ,cAAA,CAAe,MAAM,GAAG;AAAA,GACzD,CAAA;AAGD,EAAA,MAAM,gBAAgB,MAAA,CAAO,mBAAA;AAC7B,EAAA,MAAM,MAAA,GAAS,aAAA,CAAc,MAAA,CAAO,aAAa,CAAA,EAAE;AACnD,EAAA,MAAM,KAAA,GAAQ,MAAM,MAAA,CAAO,IAAA,EAAK;AAEhC,EAAA,OAAO,EAAE,MAAA,EAAQ,MAAA,EAAQ,KAAA,EAAM;AACjC;AAoBO,SAAS,qBAAqB,MAAA,EAAwC;AAC3E,EAAA,MAAM,iBAAiBE,mBAAA,CAAa;AAAA,IAClC,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,SAAS,MAAA,CAAO,OAAA;AAAA,IAChB,cAAc,MAAA,CAAO;AAAA,GACtB,CAAA;AACD,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,QAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,QAAA,GAAW,YAAA,CAAa,OAAA,CAAQ,QAAQ,CAAA;AAE9C,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,UAClC,aAAA,CAAc,cAAA,EAAgB,OAAO,OAAA,CAAQ,YAAA,EAAc,QAAA,EAAU,OAAA,CAAQ,cAAc;AAAA,OAC7F;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,QAAA,GAAW,YAAA,CAAa,OAAA,CAAQ,QAAQ,CAAA;AAE9C,MAAA,MAAM,EAAE,MAAA,EAAQ,MAAA,EAAQ,KAAA,KAAU,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,UACnE,mBAAA,CAAoB,cAAA,EAAgB,OAAO,OAAA,CAAQ,YAAA,EAAc,QAAA,EAAU,OAAA,CAAQ,cAAc;AAAA,OACnG;AAEA,MAAA,OAAO,oBAA4B,MAAA,EAAQ,MAAA,EAAQ,KAAA,EAAO,QAAA,EAAU,QAAQ,cAAc,CAAA;AAAA,IAC5F;AAAA,GACF;AACF","file":"index.cjs","sourcesContent":["import { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\nimport type { StreamTextResult, ToolSet } from 'ai'\n\n/**\n * Processes an OpenAI streaming response (via Vercel AI SDK) into AG-UI events.\n *\n * Uses `partialOutputStream` from `streamText` + `Output.object()` to receive\n * progressively-built partial objects. Tracks `chatbotMessage` growth to yield\n * TEXT_MESSAGE_CONTENT deltas. After the stream completes, validates the final\n * object and yields TOOL_CALL_RESULT.\n *\n * @param streamResult - The streamText result containing partialOutputStream and output promise\n * @param reader - Pre-started async iterator for the partial object stream\n * @param first - The first iteration result (already consumed to trigger the API call)\n * @param agentId - Agent identifier for event tagging\n * @param schema - Zod schema for final response validation\n * @returns Async generator of AG-UI stream events\n */\nexport async function* processOpenAIStream<TState extends BaseState = BaseState>(\n streamResult: StreamTextResult<ToolSet, never>,\n reader: AsyncIterator<unknown>,\n first: IteratorResult<unknown>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let prevMessageLength = 0\n\n try {\n // Process the first partial (already consumed to trigger the API call)\n if (!first.done) {\n const partial = first.value as Record<string, unknown>\n const delta = extractDelta(partial, prevMessageLength)\n if (delta) {\n prevMessageLength += delta.length\n yield {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta,\n agentId,\n } as StreamEvent<TState>\n }\n }\n\n // Process remaining partials\n let next = await reader.next()\n while (!next.done) {\n const partial = next.value as Record<string, unknown>\n const delta = extractDelta(partial, prevMessageLength)\n if (delta) {\n prevMessageLength += delta.length\n yield {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta,\n agentId,\n } as StreamEvent<TState>\n }\n // eslint-disable-next-line no-await-in-loop\n next = await reader.next()\n }\n\n // Stream complete — await and validate the final object\n const finalObject = await streamResult.output\n\n if (finalObject === null) {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI stream completed with null output',\n agentId,\n } as StreamEvent<TState>\n return\n }\n\n const validated = schema.parse(finalObject)\n yield {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n yield {\n type: EventType.RUN_ERROR,\n message: `OpenAI stream error: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\n/**\n * Extracts the new portion of chatbotMessage from a partial object.\n */\nfunction extractDelta(partial: Record<string, unknown>, prevLength: number): string | null {\n const chatbotMessage = partial.chatbotMessage\n if (typeof chatbotMessage !== 'string' || chatbotMessage.length <= prevLength) {\n return null\n }\n return chatbotMessage.slice(prevLength)\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import { createOpenAI } from '@ai-sdk/openai'\nimport { generateText, streamText, Output, ModelMessage, jsonSchema } from 'ai'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { processOpenAIStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\n\n/**\n * Configuration for creating an OpenAI provider.\n */\nexport interface OpenAIProviderConfig {\n /** OpenAI API key. Defaults to OPENAI_API_KEY env var (OpenAI SDK default). */\n apiKey?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * e.g. ['gpt-4o', 'gpt-4o-mini']\n */\n models: string[]\n /** Optional base URL for Azure OpenAI or compatible endpoints */\n baseURL?: string\n /** Optional OpenAI organization ID */\n organization?: string\n}\n\ntype JsonSchema = Record<string, unknown>\n\n/**\n * Enforces strict JSON schema constraints required by OpenAI's structured output API.\n * OpenAI requires all object properties — including optional ones — to be in the `required` array.\n * This function recursively adds `required` and `additionalProperties: false` to all objects.\n *\n * @param schema - JSON schema to enforce\n * @returns Enforced JSON schema\n */\nfunction enforceStrictSchema(schema: JsonSchema): JsonSchema {\n const result = { ...schema }\n if (result.type === 'object' && result.properties) {\n result.additionalProperties = false\n result.required = Object.keys(result.properties as Record<string, unknown>)\n const props = result.properties as Record<string, JsonSchema>\n const strictProps: Record<string, JsonSchema> = {}\n for (const [key, value] of Object.entries(props)) {\n strictProps[key] = enforceStrictSchema(value)\n }\n result.properties = strictProps\n }\n if (result.items && typeof result.items === 'object') {\n result.items = enforceStrictSchema(result.items as JsonSchema)\n }\n for (const keyword of ['anyOf', 'oneOf', 'allOf'] as const) {\n if (Array.isArray(result[keyword])) {\n result[keyword] = (result[keyword] as JsonSchema[]).map((s) => enforceStrictSchema(s))\n }\n }\n return result\n}\n\n/**\n * Converts a Zod schema to an OpenAI-compatible strict JSON schema wrapped for the Vercel AI SDK.\n *\n * @param zodSchema - Zod schema to convert\n * @returns JSON schema wrapped for Vercel AI SDK\n */\nfunction toOpenAISchema(zodSchema: ProviderRequest['responseSchema']) {\n const strict = enforceStrictSchema(zodSchema.toJSONSchema() as JsonSchema)\n return jsonSchema(strict as never, {\n validate: (value: unknown) => {\n const result = zodSchema.safeParse(value)\n return result.success\n ? { success: true as const, value: result.data }\n : { success: false as const, error: result.error }\n },\n })\n}\n\nfunction toAIMessages(messages: ProviderMessage[]): ModelMessage[] {\n return messages.map((msg) => ({\n role: msg.role,\n content: msg.content,\n }))\n}\n\nasync function sendWithModel(\n openaiProvider: ReturnType<typeof createOpenAI>,\n model: string,\n system: string,\n messages: ModelMessage[],\n schema: ProviderRequest['responseSchema'],\n): Promise<ProviderResponse> {\n const result = await generateText({\n model: openaiProvider(model),\n system,\n messages,\n output: Output.object({ schema: toOpenAISchema(schema) }),\n })\n\n return {\n content: JSON.stringify(result.output),\n usage: {\n inputTokens: result.usage.inputTokens ?? 0,\n outputTokens: result.usage.outputTokens ?? 0,\n totalTokens: (result.usage.inputTokens ?? 0) + (result.usage.outputTokens ?? 0),\n },\n }\n}\n\nasync function sendStreamWithModel(\n openaiProvider: ReturnType<typeof createOpenAI>,\n model: string,\n system: string,\n messages: ModelMessage[],\n schema: ProviderRequest['responseSchema'],\n) {\n const result = streamText({\n model: openaiProvider(model),\n system,\n messages,\n output: Output.object({ schema: toOpenAISchema(schema) }),\n })\n\n // Force the API call to start so executeWithFallback can catch connection errors\n const partialStream = result.partialOutputStream\n const reader = partialStream[Symbol.asyncIterator]()\n const first = await reader.next()\n\n return { result, reader, first }\n}\n\n/**\n * Creates an OpenAI provider instance.\n *\n * Uses the Vercel AI SDK (`ai` + `@ai-sdk/openai`) for structured output via\n * `generateText` + `Output.object()` (blocking) and `streamText` + `Output.object()`\n * (streaming). The AI SDK handles Zod-to-JSON-schema conversion, partial JSON\n * parsing, and validation internally.\n *\n * @param config - OpenAI provider configuration\n * @returns A Provider implementation using OpenAI\n *\n * @example\n * ```typescript\n * const provider = createOpenAIProvider({\n * models: ['gpt-4o', 'gpt-4o-mini'],\n * })\n * ```\n */\nexport function createOpenAIProvider(config: OpenAIProviderConfig): Provider {\n const openaiProvider = createOpenAI({\n apiKey: config.apiKey,\n baseURL: config.baseURL,\n organization: config.organization,\n })\n const models = config.models\n\n return {\n name: 'openai',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const messages = toAIMessages(request.messages)\n\n return executeWithFallback(models, (model) =>\n sendWithModel(openaiProvider, model, request.systemPrompt, messages, request.responseSchema),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const messages = toAIMessages(request.messages)\n\n const { result, reader, first } = await executeWithFallback(models, (model) =>\n sendStreamWithModel(openaiProvider, model, request.systemPrompt, messages, request.responseSchema),\n )\n\n yield* processOpenAIStream<TState>(result, reader, first, 'openai', request.responseSchema)\n },\n }\n}\n"]}
@@ -19,9 +19,10 @@ interface OpenAIProviderConfig {
19
19
  /**
20
20
  * Creates an OpenAI provider instance.
21
21
  *
22
- * Uses OpenAI's structured output (response_format with JSON Schema) for both
23
- * blocking and streaming paths, with real-time chatbotMessage text streaming
24
- * via a custom stream processor.
22
+ * Uses the Vercel AI SDK (`ai` + `@ai-sdk/openai`) for structured output via
23
+ * `generateText` + `Output.object()` (blocking) and `streamText` + `Output.object()`
24
+ * (streaming). The AI SDK handles Zod-to-JSON-schema conversion, partial JSON
25
+ * parsing, and validation internally.
25
26
  *
26
27
  * @param config - OpenAI provider configuration
27
28
  * @returns A Provider implementation using OpenAI
@@ -19,9 +19,10 @@ interface OpenAIProviderConfig {
19
19
  /**
20
20
  * Creates an OpenAI provider instance.
21
21
  *
22
- * Uses OpenAI's structured output (response_format with JSON Schema) for both
23
- * blocking and streaming paths, with real-time chatbotMessage text streaming
24
- * via a custom stream processor.
22
+ * Uses the Vercel AI SDK (`ai` + `@ai-sdk/openai`) for structured output via
23
+ * `generateText` + `Output.object()` (blocking) and `streamText` + `Output.object()`
24
+ * (streaming). The AI SDK handles Zod-to-JSON-schema conversion, partial JSON
25
+ * parsing, and validation internally.
25
26
  *
26
27
  * @param config - OpenAI provider configuration
27
28
  * @returns A Provider implementation using OpenAI
@@ -1,55 +1,50 @@
1
- import OpenAI from 'openai';
1
+ import { createOpenAI } from '@ai-sdk/openai';
2
+ import { streamText, Output, jsonSchema, generateText } from 'ai';
2
3
  import { EventType } from '@ag-ui/client';
3
4
 
4
5
  // openai/index.ts
5
- var CHATBOT_MESSAGE_KEY = '"chatbotMessage":"';
6
- async function* processOpenAIStream(rawStream, agentId, schema) {
7
- let fullBuffer = "";
8
- let state = 0 /* SEARCHING */;
9
- let escapeNext = false;
6
+ async function* processOpenAIStream(streamResult, reader, first, agentId, schema) {
7
+ let prevMessageLength = 0;
10
8
  try {
11
- for await (const chunk of rawStream) {
12
- const delta = chunk.choices[0]?.delta?.content;
13
- if (!delta) continue;
14
- for (const char of delta) {
15
- fullBuffer += char;
16
- const result = processChar(char, state, escapeNext, fullBuffer, agentId);
17
- state = result.state;
18
- escapeNext = result.escapeNext;
19
- if (result.event) yield result.event;
9
+ if (!first.done) {
10
+ const partial = first.value;
11
+ const delta = extractDelta(partial, prevMessageLength);
12
+ if (delta) {
13
+ prevMessageLength += delta.length;
14
+ yield {
15
+ type: EventType.TEXT_MESSAGE_CONTENT,
16
+ messageId: "",
17
+ delta,
18
+ agentId
19
+ };
20
20
  }
21
- const finishReason = chunk.choices[0]?.finish_reason;
22
- if (finishReason === "length") {
21
+ }
22
+ let next = await reader.next();
23
+ while (!next.done) {
24
+ const partial = next.value;
25
+ const delta = extractDelta(partial, prevMessageLength);
26
+ if (delta) {
27
+ prevMessageLength += delta.length;
23
28
  yield {
24
- type: EventType.RUN_ERROR,
25
- message: "OpenAI response truncated (finish_reason: length)",
29
+ type: EventType.TEXT_MESSAGE_CONTENT,
30
+ messageId: "",
31
+ delta,
26
32
  agentId
27
33
  };
28
- return;
29
34
  }
35
+ next = await reader.next();
30
36
  }
31
- if (!fullBuffer) {
37
+ const finalObject = await streamResult.output;
38
+ if (finalObject === null) {
32
39
  yield {
33
40
  type: EventType.RUN_ERROR,
34
- message: "OpenAI stream completed with empty response",
41
+ message: "OpenAI stream completed with null output",
35
42
  agentId
36
43
  };
37
44
  return;
38
45
  }
39
- yield parseResponse(fullBuffer, schema, agentId);
40
- } catch (err) {
46
+ const validated = schema.parse(finalObject);
41
47
  yield {
42
- type: EventType.RUN_ERROR,
43
- message: `OpenAI stream error: ${err.message}`,
44
- agentId
45
- };
46
- }
47
- }
48
- function parseResponse(buffer, schema, agentId) {
49
- try {
50
- const parsed = JSON.parse(buffer);
51
- const validated = schema.parse(parsed);
52
- return {
53
48
  type: EventType.TOOL_CALL_RESULT,
54
49
  toolCallId: "",
55
50
  messageId: "",
@@ -57,69 +52,19 @@ function parseResponse(buffer, schema, agentId) {
57
52
  agentId
58
53
  };
59
54
  } catch (err) {
60
- return {
55
+ yield {
61
56
  type: EventType.RUN_ERROR,
62
- message: `Response parse/validation failed: ${err.message}`,
57
+ message: `OpenAI stream error: ${err.message}`,
63
58
  agentId
64
59
  };
65
60
  }
66
61
  }
67
- function processChar(char, state, escapeNext, fullBuffer, agentId) {
68
- switch (state) {
69
- case 0 /* SEARCHING */:
70
- if (fullBuffer.endsWith(CHATBOT_MESSAGE_KEY)) {
71
- return { state: 1 /* IN_CHATBOT_MESSAGE */, escapeNext: false, event: null };
72
- }
73
- return { state, escapeNext, event: null };
74
- case 1 /* IN_CHATBOT_MESSAGE */:
75
- if (escapeNext) {
76
- return {
77
- state,
78
- escapeNext: false,
79
- event: {
80
- type: EventType.TEXT_MESSAGE_CONTENT,
81
- messageId: "",
82
- delta: unescapeChar(char),
83
- agentId
84
- }
85
- };
86
- } else if (char === "\\") {
87
- return { state, escapeNext: true, event: null };
88
- } else if (char === '"') {
89
- return { state: 2 /* PAST_CHATBOT_MESSAGE */, escapeNext: false, event: null };
90
- } else {
91
- return {
92
- state,
93
- escapeNext,
94
- event: {
95
- type: EventType.TEXT_MESSAGE_CONTENT,
96
- messageId: "",
97
- delta: char,
98
- agentId
99
- }
100
- };
101
- }
102
- case 2 /* PAST_CHATBOT_MESSAGE */:
103
- return { state, escapeNext, event: null };
104
- }
105
- }
106
- function unescapeChar(char) {
107
- switch (char) {
108
- case '"':
109
- return '"';
110
- case "\\":
111
- return "\\";
112
- case "n":
113
- return "\n";
114
- case "t":
115
- return " ";
116
- case "r":
117
- return "\r";
118
- case "/":
119
- return "/";
120
- default:
121
- return char;
62
+ function extractDelta(partial, prevLength) {
63
+ const chatbotMessage = partial.chatbotMessage;
64
+ if (typeof chatbotMessage !== "string" || chatbotMessage.length <= prevLength) {
65
+ return null;
122
66
  }
67
+ return chatbotMessage.slice(prevLength);
123
68
  }
124
69
 
125
70
  // utils/executeWithFallback.ts
@@ -161,58 +106,51 @@ function enforceStrictSchema(schema) {
161
106
  }
162
107
  return result;
163
108
  }
164
- function toOpenAIMessages(systemPrompt, messages) {
165
- const openAIMessages = [{ role: "system", content: systemPrompt }];
166
- for (const msg of messages) {
167
- openAIMessages.push({ role: msg.role, content: msg.content });
168
- }
169
- return openAIMessages;
170
- }
171
- function prepareRequest(request) {
172
- const jsonSchema = enforceStrictSchema(request.responseSchema.toJSONSchema());
173
- const responseFormat = {
174
- type: "json_schema",
175
- json_schema: {
176
- name: "structuredResponse",
177
- strict: true,
178
- schema: jsonSchema
109
+ function toOpenAISchema(zodSchema) {
110
+ const strict = enforceStrictSchema(zodSchema.toJSONSchema());
111
+ return jsonSchema(strict, {
112
+ validate: (value) => {
113
+ const result = zodSchema.safeParse(value);
114
+ return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
179
115
  }
180
- };
181
- const openAIMessages = toOpenAIMessages(request.systemPrompt, request.messages);
182
- return { responseFormat, openAIMessages };
116
+ });
117
+ }
118
+ function toAIMessages(messages) {
119
+ return messages.map((msg) => ({
120
+ role: msg.role,
121
+ content: msg.content
122
+ }));
183
123
  }
184
- async function sendWithModel(client, model, openAIMessages, responseFormat) {
185
- const response = await client.chat.completions.create({
186
- model,
187
- messages: openAIMessages,
188
- response_format: responseFormat
124
+ async function sendWithModel(openaiProvider, model, system, messages, schema) {
125
+ const result = await generateText({
126
+ model: openaiProvider(model),
127
+ system,
128
+ messages,
129
+ output: Output.object({ schema: toOpenAISchema(schema) })
189
130
  });
190
- const choice = response.choices[0];
191
- if (!choice?.message?.content) {
192
- throw new Error("OpenAI returned empty response");
193
- }
194
- if (choice.finish_reason === "length") {
195
- throw new Error("OpenAI response truncated (finish_reason: length)");
196
- }
197
131
  return {
198
- content: choice.message.content,
199
- usage: response.usage ? {
200
- inputTokens: response.usage.prompt_tokens,
201
- outputTokens: response.usage.completion_tokens,
202
- totalTokens: response.usage.total_tokens
203
- } : void 0
132
+ content: JSON.stringify(result.output),
133
+ usage: {
134
+ inputTokens: result.usage.inputTokens ?? 0,
135
+ outputTokens: result.usage.outputTokens ?? 0,
136
+ totalTokens: (result.usage.inputTokens ?? 0) + (result.usage.outputTokens ?? 0)
137
+ }
204
138
  };
205
139
  }
206
- async function sendStreamWithModel(client, model, openAIMessages, responseFormat) {
207
- return client.chat.completions.create({
208
- model,
209
- messages: openAIMessages,
210
- response_format: responseFormat,
211
- stream: true
140
+ async function sendStreamWithModel(openaiProvider, model, system, messages, schema) {
141
+ const result = streamText({
142
+ model: openaiProvider(model),
143
+ system,
144
+ messages,
145
+ output: Output.object({ schema: toOpenAISchema(schema) })
212
146
  });
147
+ const partialStream = result.partialOutputStream;
148
+ const reader = partialStream[Symbol.asyncIterator]();
149
+ const first = await reader.next();
150
+ return { result, reader, first };
213
151
  }
214
152
  function createOpenAIProvider(config) {
215
- const client = new OpenAI({
153
+ const openaiProvider = createOpenAI({
216
154
  apiKey: config.apiKey,
217
155
  baseURL: config.baseURL,
218
156
  organization: config.organization
@@ -221,19 +159,19 @@ function createOpenAIProvider(config) {
221
159
  return {
222
160
  name: "openai",
223
161
  async sendRequest(request) {
224
- const { responseFormat, openAIMessages } = prepareRequest(request);
162
+ const messages = toAIMessages(request.messages);
225
163
  return executeWithFallback(
226
164
  models,
227
- (model) => sendWithModel(client, model, openAIMessages, responseFormat)
165
+ (model) => sendWithModel(openaiProvider, model, request.systemPrompt, messages, request.responseSchema)
228
166
  );
229
167
  },
230
168
  async *sendRequestStream(request) {
231
- const { responseFormat, openAIMessages } = prepareRequest(request);
232
- const rawStream = await executeWithFallback(
169
+ const messages = toAIMessages(request.messages);
170
+ const { result, reader, first } = await executeWithFallback(
233
171
  models,
234
- (model) => sendStreamWithModel(client, model, openAIMessages, responseFormat)
172
+ (model) => sendStreamWithModel(openaiProvider, model, request.systemPrompt, messages, request.responseSchema)
235
173
  );
236
- yield* processOpenAIStream(rawStream, "openai", request.responseSchema);
174
+ yield* processOpenAIStream(result, reader, first, "openai", request.responseSchema);
237
175
  }
238
176
  };
239
177
  }
@@ -1 +1 @@
1
- {"version":3,"sources":["../../openai/streamProcessor.ts","../../utils/executeWithFallback.ts","../../openai/index.ts"],"names":[],"mappings":";;;;AAaA,IAAM,mBAAA,GAAsB,oBAAA;AAe5B,gBAAuB,mBAAA,CACrB,SAAA,EACA,OAAA,EACA,MAAA,EACqC;AACrC,EAAA,IAAI,UAAA,GAAa,EAAA;AACjB,EAAA,IAAI,KAAA,GAAqB,CAAA;AACzB,EAAA,IAAI,UAAA,GAAa,KAAA;AAEjB,EAAA,IAAI;AACF,IAAA,WAAA,MAAiB,SAAS,SAAA,EAAW;AACnC,MAAA,MAAM,KAAA,GAAQ,KAAA,CAAM,OAAA,CAAQ,CAAC,GAAG,KAAA,EAAO,OAAA;AACvC,MAAA,IAAI,CAAC,KAAA,EAAO;AAEZ,MAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACxB,QAAA,UAAA,IAAc,IAAA;AACd,QAAA,MAAM,SAA6B,WAAA,CAAoB,IAAA,EAAM,KAAA,EAAO,UAAA,EAAY,YAAY,OAAO,CAAA;AACnG,QAAA,KAAA,GAAQ,MAAA,CAAO,KAAA;AACf,QAAA,UAAA,GAAa,MAAA,CAAO,UAAA;AACpB,QAAA,IAAI,MAAA,CAAO,KAAA,EAAO,MAAM,MAAA,CAAO,KAAA;AAAA,MACjC;AAGA,MAAA,MAAM,YAAA,GAAe,KAAA,CAAM,OAAA,CAAQ,CAAC,CAAA,EAAG,aAAA;AACvC,MAAA,IAAI,iBAAiB,QAAA,EAAU;AAC7B,QAAA,MAAM;AAAA,UACJ,MAAM,SAAA,CAAU,SAAA;AAAA,UAChB,OAAA,EAAS,mDAAA;AAAA,UACT;AAAA,SACF;AACA,QAAA;AAAA,MACF;AAAA,IACF;AAGA,IAAA,IAAI,CAAC,UAAA,EAAY;AACf,MAAA,MAAM;AAAA,QACJ,MAAM,SAAA,CAAU,SAAA;AAAA,QAChB,OAAA,EAAS,6CAAA;AAAA,QACT;AAAA,OACF;AACA,MAAA;AAAA,IACF;AAEA,IAAA,MAAM,aAAA,CAAsB,UAAA,EAAY,MAAA,EAAQ,OAAO,CAAA;AAAA,EACzD,SAAS,GAAA,EAAK;AACZ,IAAA,MAAM;AAAA,MACJ,MAAM,SAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,qBAAA,EAAyB,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACvD;AAAA,KACF;AAAA,EACF;AACF;AAEA,SAAS,aAAA,CACP,MAAA,EACA,MAAA,EACA,OAAA,EACqB;AACrB,EAAA,IAAI;AACF,IAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,MAAM,CAAA;AAChC,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,MAAM,CAAA;AACrC,IAAA,OAAO;AAAA,MACL,MAAM,SAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,OAAO;AAAA,MACL,MAAM,SAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,kCAAA,EAAsC,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACpE;AAAA,KACF;AAAA,EACF;AACF;AAQA,SAAS,WAAA,CACP,IAAA,EACA,KAAA,EACA,UAAA,EACA,YACA,OAAA,EACoB;AACpB,EAAA,QAAQ,KAAA;AAAO,IACb,KAAK,CAAA;AACH,MAAA,IAAI,UAAA,CAAW,QAAA,CAAS,mBAAmB,CAAA,EAAG;AAC5C,QAAA,OAAO,EAAE,KAAA,EAAO,CAAA,2BAAgC,UAAA,EAAY,KAAA,EAAO,OAAO,IAAA,EAAK;AAAA,MACjF;AACA,MAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,KAAA,EAAO,IAAA,EAAK;AAAA,IAE1C,KAAK,CAAA;AACH,MAAA,IAAI,UAAA,EAAY;AACd,QAAA,OAAO;AAAA,UACL,KAAA;AAAA,UACA,UAAA,EAAY,KAAA;AAAA,UACZ,KAAA,EAAO;AAAA,YACL,MAAM,SAAA,CAAU,oBAAA;AAAA,YAChB,SAAA,EAAW,EAAA;AAAA,YACX,KAAA,EAAO,aAAa,IAAI,CAAA;AAAA,YACxB;AAAA;AACF,SACF;AAAA,MACF,CAAA,MAAA,IAAW,SAAS,IAAA,EAAM;AACxB,QAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,IAAA,EAAM,OAAO,IAAA,EAAK;AAAA,MAChD,CAAA,MAAA,IAAW,SAAS,GAAA,EAAK;AACvB,QAAA,OAAO,EAAE,KAAA,EAAO,CAAA,6BAAkC,UAAA,EAAY,KAAA,EAAO,OAAO,IAAA,EAAK;AAAA,MACnF,CAAA,MAAO;AACL,QAAA,OAAO;AAAA,UACL,KAAA;AAAA,UACA,UAAA;AAAA,UACA,KAAA,EAAO;AAAA,YACL,MAAM,SAAA,CAAU,oBAAA;AAAA,YAChB,SAAA,EAAW,EAAA;AAAA,YACX,KAAA,EAAO,IAAA;AAAA,YACP;AAAA;AACF,SACF;AAAA,MACF;AAAA,IAEF,KAAK,CAAA;AACH,MAAA,OAAO,EAAE,KAAA,EAAO,UAAA,EAAY,KAAA,EAAO,IAAA,EAAK;AAAA;AAE9C;AAGA,SAAS,aAAa,IAAA,EAAsB;AAC1C,EAAA,QAAQ,IAAA;AAAM,IACZ,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT,KAAK,IAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,IAAA;AAAA,IACT,KAAK,GAAA;AACH,MAAA,OAAO,GAAA;AAAA,IACT;AAEE,MAAA,OAAO,IAAA;AAAA;AAEb;;;ACjKA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACDA,SAAS,oBAAoB,MAAA,EAAgC;AAC3D,EAAA,MAAM,MAAA,GAAS,EAAE,GAAG,MAAA,EAAO;AAE3B,EAAA,IAAI,MAAA,CAAO,IAAA,KAAS,QAAA,IAAY,MAAA,CAAO,UAAA,EAAY;AACjD,IAAA,MAAA,CAAO,oBAAA,GAAuB,KAAA;AAC9B,IAAA,MAAA,CAAO,QAAA,GAAW,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,UAAqC,CAAA;AAC1E,IAAA,MAAM,QAAQ,MAAA,CAAO,UAAA;AACrB,IAAA,MAAM,cAA0C,EAAC;AACjD,IAAA,KAAA,MAAW,CAAC,GAAA,EAAK,KAAK,KAAK,MAAA,CAAO,OAAA,CAAQ,KAAK,CAAA,EAAG;AAChD,MAAA,WAAA,CAAY,GAAG,CAAA,GAAI,mBAAA,CAAoB,KAAK,CAAA;AAAA,IAC9C;AACA,IAAA,MAAA,CAAO,UAAA,GAAa,WAAA;AAAA,EACtB;AAEA,EAAA,IAAI,MAAA,CAAO,KAAA,IAAS,OAAO,MAAA,CAAO,UAAU,QAAA,EAAU;AACpD,IAAA,MAAA,CAAO,KAAA,GAAQ,mBAAA,CAAoB,MAAA,CAAO,KAAmB,CAAA;AAAA,EAC/D;AAGA,EAAA,KAAA,MAAW,OAAA,IAAW,CAAC,OAAA,EAAS,OAAA,EAAS,OAAO,CAAA,EAAY;AAC1D,IAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,MAAA,CAAO,OAAO,CAAC,CAAA,EAAG;AAClC,MAAA,MAAA,CAAO,OAAO,CAAA,GAAK,MAAA,CAAO,OAAO,CAAA,CAAmB,IAAI,CAAC,CAAA,KAAM,mBAAA,CAAoB,CAAC,CAAC,CAAA;AAAA,IACvF;AAAA,EACF;AAEA,EAAA,OAAO,MAAA;AACT;AAEA,SAAS,gBAAA,CACP,cACA,QAAA,EAC8B;AAC9B,EAAA,MAAM,iBAA+C,CAAC,EAAE,MAAM,QAAA,EAAU,OAAA,EAAS,cAAc,CAAA;AAE/F,EAAA,KAAA,MAAW,OAAO,QAAA,EAAU;AAC1B,IAAA,cAAA,CAAe,IAAA,CAAK,EAAE,IAAA,EAAM,GAAA,CAAI,MAAM,OAAA,EAAS,GAAA,CAAI,SAAS,CAAA;AAAA,EAC9D;AAEA,EAAA,OAAO,cAAA;AACT;AAEA,SAAS,eAAe,OAAA,EAA0B;AAChD,EAAA,MAAM,UAAA,GAAa,mBAAA,CAAoB,OAAA,CAAQ,cAAA,CAAe,cAA4B,CAAA;AAC1F,EAAA,MAAM,cAAA,GAAiB;AAAA,IACrB,IAAA,EAAM,aAAA;AAAA,IACN,WAAA,EAAa;AAAA,MACX,IAAA,EAAM,oBAAA;AAAA,MACN,MAAA,EAAQ,IAAA;AAAA,MACR,MAAA,EAAQ;AAAA;AACV,GACF;AACA,EAAA,MAAM,cAAA,GAAiB,gBAAA,CAAiB,OAAA,CAAQ,YAAA,EAAc,QAAQ,QAAQ,CAAA;AAE9E,EAAA,OAAO,EAAE,gBAAgB,cAAA,EAAe;AAC1C;AAEA,eAAe,aAAA,CACb,MAAA,EACA,KAAA,EACA,cAAA,EACA,cAAA,EAI2B;AAC3B,EAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,IACpD,KAAA;AAAA,IACA,QAAA,EAAU,cAAA;AAAA,IACV,eAAA,EAAiB;AAAA,GAClB,CAAA;AAED,EAAA,MAAM,MAAA,GAAS,QAAA,CAAS,OAAA,CAAQ,CAAC,CAAA;AACjC,EAAA,IAAI,CAAC,MAAA,EAAQ,OAAA,EAAS,OAAA,EAAS;AAC7B,IAAA,MAAM,IAAI,MAAM,gCAAgC,CAAA;AAAA,EAClD;AAEA,EAAA,IAAI,MAAA,CAAO,kBAAkB,QAAA,EAAU;AACrC,IAAA,MAAM,IAAI,MAAM,mDAAmD,CAAA;AAAA,EACrE;AAEA,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,OAAO,OAAA,CAAQ,OAAA;AAAA,IACxB,KAAA,EAAO,SAAS,KAAA,GACZ;AAAA,MACE,WAAA,EAAa,SAAS,KAAA,CAAM,aAAA;AAAA,MAC5B,YAAA,EAAc,SAAS,KAAA,CAAM,iBAAA;AAAA,MAC7B,WAAA,EAAa,SAAS,KAAA,CAAM;AAAA,KAC9B,GACA;AAAA,GACN;AACF;AAEA,eAAe,mBAAA,CACb,MAAA,EACA,KAAA,EACA,cAAA,EACA,cAAA,EAIA;AACA,EAAA,OAAO,MAAA,CAAO,IAAA,CAAK,WAAA,CAAY,MAAA,CAAO;AAAA,IACpC,KAAA;AAAA,IACA,QAAA,EAAU,cAAA;AAAA,IACV,eAAA,EAAiB,cAAA;AAAA,IACjB,MAAA,EAAQ;AAAA,GACT,CAAA;AACH;AAmBO,SAAS,qBAAqB,MAAA,EAAwC;AAC3E,EAAA,MAAM,MAAA,GAAS,IAAI,MAAA,CAAO;AAAA,IACxB,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,SAAS,MAAA,CAAO,OAAA;AAAA,IAChB,cAAc,MAAA,CAAO;AAAA,GACtB,CAAA;AACD,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,QAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,EAAE,cAAA,EAAgB,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAEjE,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,KAAA,KAClC,aAAA,CAAc,MAAA,EAAQ,KAAA,EAAO,gBAAgB,cAAc;AAAA,OAC7D;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,EAAE,cAAA,EAAgB,cAAA,EAAe,GAAI,eAAe,OAAO,CAAA;AAEjE,MAAA,MAAM,YAAY,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,KAAA,KACnD,mBAAA,CAAoB,MAAA,EAAQ,KAAA,EAAO,gBAAgB,cAAc;AAAA,OACnE;AAEA,MAAA,OAAO,mBAAA,CAA4B,SAAA,EAAW,QAAA,EAAU,OAAA,CAAQ,cAAc,CAAA;AAAA,IAChF;AAAA,GACF;AACF","file":"index.js","sourcesContent":["import { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\nimport type { Stream } from 'openai/streaming'\nimport type { ChatCompletionChunk } from 'openai/resources/chat/completions'\n\n/** State-machine states for extracting chatbotMessage from structured JSON stream */\nconst enum ParserState {\n SEARCHING = 0,\n IN_CHATBOT_MESSAGE = 1,\n PAST_CHATBOT_MESSAGE = 2,\n}\n\nconst CHATBOT_MESSAGE_KEY = '\"chatbotMessage\":\"'\n\n/**\n * Processes an OpenAI streaming response into AG-UI events.\n *\n * OpenAI structured output returns the entire response as JSON. The chatbotMessage\n * field is embedded within that JSON. This processor uses a character-level state\n * machine to extract chatbotMessage text progressively during streaming, yielding\n * TEXT_MESSAGE_CONTENT deltas in real-time.\n *\n * @param rawStream - OpenAI chat completion stream\n * @param agentId - Agent identifier for event tagging\n * @param schema - Zod schema for final response validation\n * @returns Async generator of AG-UI stream events\n */\nexport async function* processOpenAIStream<TState extends BaseState = BaseState>(\n rawStream: Stream<ChatCompletionChunk>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let fullBuffer = ''\n let state: ParserState = ParserState.SEARCHING\n let escapeNext = false\n\n try {\n for await (const chunk of rawStream) {\n const delta = chunk.choices[0]?.delta?.content\n if (!delta) continue\n\n for (const char of delta) {\n fullBuffer += char\n const result: CharResult<TState> = processChar<TState>(char, state, escapeNext, fullBuffer, agentId)\n state = result.state\n escapeNext = result.escapeNext\n if (result.event) yield result.event\n }\n\n // Check for truncation\n const finishReason = chunk.choices[0]?.finish_reason\n if (finishReason === 'length') {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI response truncated (finish_reason: length)',\n agentId,\n } as StreamEvent<TState>\n return\n }\n }\n\n // Stream complete — parse and validate the full response\n if (!fullBuffer) {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI stream completed with empty response',\n agentId,\n } as StreamEvent<TState>\n return\n }\n\n yield parseResponse<TState>(fullBuffer, schema, agentId)\n } catch (err) {\n yield {\n type: EventType.RUN_ERROR,\n message: `OpenAI stream error: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\nfunction parseResponse<TState extends BaseState>(\n buffer: string,\n schema: ZodType,\n agentId: AgentId,\n): StreamEvent<TState> {\n try {\n const parsed = JSON.parse(buffer) as Record<string, unknown>\n const validated = schema.parse(parsed)\n return {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n return {\n type: EventType.RUN_ERROR,\n message: `Response parse/validation failed: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\ninterface CharResult<TState extends BaseState> {\n state: ParserState\n escapeNext: boolean\n event: StreamEvent<TState> | null\n}\n\nfunction processChar<TState extends BaseState>(\n char: string,\n state: ParserState,\n escapeNext: boolean,\n fullBuffer: string,\n agentId: AgentId,\n): CharResult<TState> {\n switch (state) {\n case ParserState.SEARCHING:\n if (fullBuffer.endsWith(CHATBOT_MESSAGE_KEY)) {\n return { state: ParserState.IN_CHATBOT_MESSAGE, escapeNext: false, event: null }\n }\n return { state, escapeNext, event: null }\n\n case ParserState.IN_CHATBOT_MESSAGE:\n if (escapeNext) {\n return {\n state,\n escapeNext: false,\n event: {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta: unescapeChar(char),\n agentId,\n } as StreamEvent<TState>,\n }\n } else if (char === '\\\\') {\n return { state, escapeNext: true, event: null }\n } else if (char === '\"') {\n return { state: ParserState.PAST_CHATBOT_MESSAGE, escapeNext: false, event: null }\n } else {\n return {\n state,\n escapeNext,\n event: {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta: char,\n agentId,\n } as StreamEvent<TState>,\n }\n }\n\n case ParserState.PAST_CHATBOT_MESSAGE:\n return { state, escapeNext, event: null }\n }\n}\n\n/** Converts a JSON escape character to its actual value */\nfunction unescapeChar(char: string): string {\n switch (char) {\n case '\"':\n return '\"'\n case '\\\\':\n return '\\\\'\n case 'n':\n return '\\n'\n case 't':\n return '\\t'\n case 'r':\n return '\\r'\n case '/':\n return '/'\n default:\n // For \\uXXXX and unknown escapes, return as-is (the character after the backslash)\n return char\n }\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import OpenAI from 'openai'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { processOpenAIStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\nimport type { ChatCompletionMessageParam } from 'openai/resources/chat/completions'\n\n/**\n * Configuration for creating an OpenAI provider.\n */\nexport interface OpenAIProviderConfig {\n /** OpenAI API key. Defaults to OPENAI_API_KEY env var (OpenAI SDK default). */\n apiKey?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * e.g. ['gpt-4o', 'gpt-4o-mini']\n */\n models: string[]\n /** Optional base URL for Azure OpenAI or compatible endpoints */\n baseURL?: string\n /** Optional OpenAI organization ID */\n organization?: string\n}\n\ntype JsonSchema = Record<string, unknown>\n\n/**\n * Recursively enforces OpenAI structured output requirements on a JSON Schema:\n * - Adds `additionalProperties: false` to all object types\n * - Ensures all properties are listed in `required`\n */\nfunction enforceStrictSchema(schema: JsonSchema): JsonSchema {\n const result = { ...schema }\n\n if (result.type === 'object' && result.properties) {\n result.additionalProperties = false\n result.required = Object.keys(result.properties as Record<string, unknown>)\n const props = result.properties as Record<string, JsonSchema>\n const strictProps: Record<string, JsonSchema> = {}\n for (const [key, value] of Object.entries(props)) {\n strictProps[key] = enforceStrictSchema(value)\n }\n result.properties = strictProps\n }\n\n if (result.items && typeof result.items === 'object') {\n result.items = enforceStrictSchema(result.items as JsonSchema)\n }\n\n // Handle anyOf/oneOf/allOf\n for (const keyword of ['anyOf', 'oneOf', 'allOf'] as const) {\n if (Array.isArray(result[keyword])) {\n result[keyword] = (result[keyword] as JsonSchema[]).map((s) => enforceStrictSchema(s))\n }\n }\n\n return result\n}\n\nfunction toOpenAIMessages(\n systemPrompt: string,\n messages: ProviderMessage[],\n): ChatCompletionMessageParam[] {\n const openAIMessages: ChatCompletionMessageParam[] = [{ role: 'system', content: systemPrompt }]\n\n for (const msg of messages) {\n openAIMessages.push({ role: msg.role, content: msg.content })\n }\n\n return openAIMessages\n}\n\nfunction prepareRequest(request: ProviderRequest) {\n const jsonSchema = enforceStrictSchema(request.responseSchema.toJSONSchema() as JsonSchema)\n const responseFormat = {\n type: 'json_schema' as const,\n json_schema: {\n name: 'structuredResponse',\n strict: true,\n schema: jsonSchema,\n },\n }\n const openAIMessages = toOpenAIMessages(request.systemPrompt, request.messages)\n\n return { responseFormat, openAIMessages }\n}\n\nasync function sendWithModel(\n client: OpenAI,\n model: string,\n openAIMessages: ChatCompletionMessageParam[],\n responseFormat: {\n type: 'json_schema'\n json_schema: { name: string; strict: boolean; schema: JsonSchema }\n },\n): Promise<ProviderResponse> {\n const response = await client.chat.completions.create({\n model,\n messages: openAIMessages,\n response_format: responseFormat,\n })\n\n const choice = response.choices[0]\n if (!choice?.message?.content) {\n throw new Error('OpenAI returned empty response')\n }\n\n if (choice.finish_reason === 'length') {\n throw new Error('OpenAI response truncated (finish_reason: length)')\n }\n\n return {\n content: choice.message.content,\n usage: response.usage\n ? {\n inputTokens: response.usage.prompt_tokens,\n outputTokens: response.usage.completion_tokens,\n totalTokens: response.usage.total_tokens,\n }\n : undefined,\n }\n}\n\nasync function sendStreamWithModel(\n client: OpenAI,\n model: string,\n openAIMessages: ChatCompletionMessageParam[],\n responseFormat: {\n type: 'json_schema'\n json_schema: { name: string; strict: boolean; schema: JsonSchema }\n },\n) {\n return client.chat.completions.create({\n model,\n messages: openAIMessages,\n response_format: responseFormat,\n stream: true,\n })\n}\n\n/**\n * Creates an OpenAI provider instance.\n *\n * Uses OpenAI's structured output (response_format with JSON Schema) for both\n * blocking and streaming paths, with real-time chatbotMessage text streaming\n * via a custom stream processor.\n *\n * @param config - OpenAI provider configuration\n * @returns A Provider implementation using OpenAI\n *\n * @example\n * ```typescript\n * const provider = createOpenAIProvider({\n * models: ['gpt-4o', 'gpt-4o-mini'],\n * })\n * ```\n */\nexport function createOpenAIProvider(config: OpenAIProviderConfig): Provider {\n const client = new OpenAI({\n apiKey: config.apiKey,\n baseURL: config.baseURL,\n organization: config.organization,\n })\n const models = config.models\n\n return {\n name: 'openai',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const { responseFormat, openAIMessages } = prepareRequest(request)\n\n return executeWithFallback(models, (model) =>\n sendWithModel(client, model, openAIMessages, responseFormat),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const { responseFormat, openAIMessages } = prepareRequest(request)\n\n const rawStream = await executeWithFallback(models, (model) =>\n sendStreamWithModel(client, model, openAIMessages, responseFormat),\n )\n\n yield* processOpenAIStream<TState>(rawStream, 'openai', request.responseSchema)\n },\n }\n}\n"]}
1
+ {"version":3,"sources":["../../openai/streamProcessor.ts","../../utils/executeWithFallback.ts","../../openai/index.ts"],"names":[],"mappings":";;;;;AAoBA,gBAAuB,mBAAA,CACrB,YAAA,EACA,MAAA,EACA,KAAA,EACA,SACA,MAAA,EACqC;AACrC,EAAA,IAAI,iBAAA,GAAoB,CAAA;AAExB,EAAA,IAAI;AAEF,IAAA,IAAI,CAAC,MAAM,IAAA,EAAM;AACf,MAAA,MAAM,UAAU,KAAA,CAAM,KAAA;AACtB,MAAA,MAAM,KAAA,GAAQ,YAAA,CAAa,OAAA,EAAS,iBAAiB,CAAA;AACrD,MAAA,IAAI,KAAA,EAAO;AACT,QAAA,iBAAA,IAAqB,KAAA,CAAM,MAAA;AAC3B,QAAA,MAAM;AAAA,UACJ,MAAM,SAAA,CAAU,oBAAA;AAAA,UAChB,SAAA,EAAW,EAAA;AAAA,UACX,KAAA;AAAA,UACA;AAAA,SACF;AAAA,MACF;AAAA,IACF;AAGA,IAAA,IAAI,IAAA,GAAO,MAAM,MAAA,CAAO,IAAA,EAAK;AAC7B,IAAA,OAAO,CAAC,KAAK,IAAA,EAAM;AACjB,MAAA,MAAM,UAAU,IAAA,CAAK,KAAA;AACrB,MAAA,MAAM,KAAA,GAAQ,YAAA,CAAa,OAAA,EAAS,iBAAiB,CAAA;AACrD,MAAA,IAAI,KAAA,EAAO;AACT,QAAA,iBAAA,IAAqB,KAAA,CAAM,MAAA;AAC3B,QAAA,MAAM;AAAA,UACJ,MAAM,SAAA,CAAU,oBAAA;AAAA,UAChB,SAAA,EAAW,EAAA;AAAA,UACX,KAAA;AAAA,UACA;AAAA,SACF;AAAA,MACF;AAEA,MAAA,IAAA,GAAO,MAAM,OAAO,IAAA,EAAK;AAAA,IAC3B;AAGA,IAAA,MAAM,WAAA,GAAc,MAAM,YAAA,CAAa,MAAA;AAEvC,IAAA,IAAI,gBAAgB,IAAA,EAAM;AACxB,MAAA,MAAM;AAAA,QACJ,MAAM,SAAA,CAAU,SAAA;AAAA,QAChB,OAAA,EAAS,0CAAA;AAAA,QACT;AAAA,OACF;AACA,MAAA;AAAA,IACF;AAEA,IAAA,MAAM,SAAA,GAAY,MAAA,CAAO,KAAA,CAAM,WAAW,CAAA;AAC1C,IAAA,MAAM;AAAA,MACJ,MAAM,SAAA,CAAU,gBAAA;AAAA,MAChB,UAAA,EAAY,EAAA;AAAA,MACZ,SAAA,EAAW,EAAA;AAAA,MACX,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,SAAS,CAAA;AAAA,MACjC;AAAA,KACF;AAAA,EACF,SAAS,GAAA,EAAK;AACZ,IAAA,MAAM;AAAA,MACJ,MAAM,SAAA,CAAU,SAAA;AAAA,MAChB,OAAA,EAAS,CAAA,qBAAA,EAAyB,GAAA,CAAc,OAAO,CAAA,CAAA;AAAA,MACvD;AAAA,KACF;AAAA,EACF;AACF;AAKA,SAAS,YAAA,CAAa,SAAkC,UAAA,EAAmC;AACzF,EAAA,MAAM,iBAAiB,OAAA,CAAQ,cAAA;AAC/B,EAAA,IAAI,OAAO,cAAA,KAAmB,QAAA,IAAY,cAAA,CAAe,UAAU,UAAA,EAAY;AAC7E,IAAA,OAAO,IAAA;AAAA,EACT;AACA,EAAA,OAAO,cAAA,CAAe,MAAM,UAAU,CAAA;AACxC;;;ACnFA,eAAsB,mBAAA,CAAuB,QAAkB,MAAA,EAAmD;AAGhH,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,MAAA,CAAO,QAAQ,CAAA,EAAA,EAAK;AACtC,IAAA,MAAM,KAAA,GAAQ,OAAO,CAAC,CAAA;AAEtB,IAAA,IAAI;AAEF,MAAA,OAAO,MAAM,OAAO,KAAK,CAAA;AAAA,IAC3B,SAAS,KAAA,EAAO;AACd,MAAA,MAAM,QAAA,GAAW,KAAA;AAGjB,MAAA,IAAI,CAAA,KAAM,MAAA,CAAO,MAAA,GAAS,CAAA,EAAG;AAC3B,QAAA,MAAM,QAAA;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,EAAA,MAAM,IAAI,MAAM,mBAAmB,CAAA;AACrC;;;ACEA,SAAS,oBAAoB,MAAA,EAAgC;AAC3D,EAAA,MAAM,MAAA,GAAS,EAAE,GAAG,MAAA,EAAO;AAC3B,EAAA,IAAI,MAAA,CAAO,IAAA,KAAS,QAAA,IAAY,MAAA,CAAO,UAAA,EAAY;AACjD,IAAA,MAAA,CAAO,oBAAA,GAAuB,KAAA;AAC9B,IAAA,MAAA,CAAO,QAAA,GAAW,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,UAAqC,CAAA;AAC1E,IAAA,MAAM,QAAQ,MAAA,CAAO,UAAA;AACrB,IAAA,MAAM,cAA0C,EAAC;AACjD,IAAA,KAAA,MAAW,CAAC,GAAA,EAAK,KAAK,KAAK,MAAA,CAAO,OAAA,CAAQ,KAAK,CAAA,EAAG;AAChD,MAAA,WAAA,CAAY,GAAG,CAAA,GAAI,mBAAA,CAAoB,KAAK,CAAA;AAAA,IAC9C;AACA,IAAA,MAAA,CAAO,UAAA,GAAa,WAAA;AAAA,EACtB;AACA,EAAA,IAAI,MAAA,CAAO,KAAA,IAAS,OAAO,MAAA,CAAO,UAAU,QAAA,EAAU;AACpD,IAAA,MAAA,CAAO,KAAA,GAAQ,mBAAA,CAAoB,MAAA,CAAO,KAAmB,CAAA;AAAA,EAC/D;AACA,EAAA,KAAA,MAAW,OAAA,IAAW,CAAC,OAAA,EAAS,OAAA,EAAS,OAAO,CAAA,EAAY;AAC1D,IAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,MAAA,CAAO,OAAO,CAAC,CAAA,EAAG;AAClC,MAAA,MAAA,CAAO,OAAO,CAAA,GAAK,MAAA,CAAO,OAAO,CAAA,CAAmB,IAAI,CAAC,CAAA,KAAM,mBAAA,CAAoB,CAAC,CAAC,CAAA;AAAA,IACvF;AAAA,EACF;AACA,EAAA,OAAO,MAAA;AACT;AAQA,SAAS,eAAe,SAAA,EAA8C;AACpE,EAAA,MAAM,MAAA,GAAS,mBAAA,CAAoB,SAAA,CAAU,YAAA,EAA4B,CAAA;AACzE,EAAA,OAAO,WAAW,MAAA,EAAiB;AAAA,IACjC,QAAA,EAAU,CAAC,KAAA,KAAmB;AAC5B,MAAA,MAAM,MAAA,GAAS,SAAA,CAAU,SAAA,CAAU,KAAK,CAAA;AACxC,MAAA,OAAO,MAAA,CAAO,OAAA,GACV,EAAE,OAAA,EAAS,MAAe,KAAA,EAAO,MAAA,CAAO,IAAA,EAAK,GAC7C,EAAE,OAAA,EAAS,KAAA,EAAgB,KAAA,EAAO,OAAO,KAAA,EAAM;AAAA,IACrD;AAAA,GACD,CAAA;AACH;AAEA,SAAS,aAAa,QAAA,EAA6C;AACjE,EAAA,OAAO,QAAA,CAAS,GAAA,CAAI,CAAC,GAAA,MAAS;AAAA,IAC5B,MAAM,GAAA,CAAI,IAAA;AAAA,IACV,SAAS,GAAA,CAAI;AAAA,GACf,CAAE,CAAA;AACJ;AAEA,eAAe,aAAA,CACb,cAAA,EACA,KAAA,EACA,MAAA,EACA,UACA,MAAA,EAC2B;AAC3B,EAAA,MAAM,MAAA,GAAS,MAAM,YAAA,CAAa;AAAA,IAChC,KAAA,EAAO,eAAe,KAAK,CAAA;AAAA,IAC3B,MAAA;AAAA,IACA,QAAA;AAAA,IACA,MAAA,EAAQ,OAAO,MAAA,CAAO,EAAE,QAAQ,cAAA,CAAe,MAAM,GAAG;AAAA,GACzD,CAAA;AAED,EAAA,OAAO;AAAA,IACL,OAAA,EAAS,IAAA,CAAK,SAAA,CAAU,MAAA,CAAO,MAAM,CAAA;AAAA,IACrC,KAAA,EAAO;AAAA,MACL,WAAA,EAAa,MAAA,CAAO,KAAA,CAAM,WAAA,IAAe,CAAA;AAAA,MACzC,YAAA,EAAc,MAAA,CAAO,KAAA,CAAM,YAAA,IAAgB,CAAA;AAAA,MAC3C,cAAc,MAAA,CAAO,KAAA,CAAM,eAAe,CAAA,KAAM,MAAA,CAAO,MAAM,YAAA,IAAgB,CAAA;AAAA;AAC/E,GACF;AACF;AAEA,eAAe,mBAAA,CACb,cAAA,EACA,KAAA,EACA,MAAA,EACA,UACA,MAAA,EACA;AACA,EAAA,MAAM,SAAS,UAAA,CAAW;AAAA,IACxB,KAAA,EAAO,eAAe,KAAK,CAAA;AAAA,IAC3B,MAAA;AAAA,IACA,QAAA;AAAA,IACA,MAAA,EAAQ,OAAO,MAAA,CAAO,EAAE,QAAQ,cAAA,CAAe,MAAM,GAAG;AAAA,GACzD,CAAA;AAGD,EAAA,MAAM,gBAAgB,MAAA,CAAO,mBAAA;AAC7B,EAAA,MAAM,MAAA,GAAS,aAAA,CAAc,MAAA,CAAO,aAAa,CAAA,EAAE;AACnD,EAAA,MAAM,KAAA,GAAQ,MAAM,MAAA,CAAO,IAAA,EAAK;AAEhC,EAAA,OAAO,EAAE,MAAA,EAAQ,MAAA,EAAQ,KAAA,EAAM;AACjC;AAoBO,SAAS,qBAAqB,MAAA,EAAwC;AAC3E,EAAA,MAAM,iBAAiB,YAAA,CAAa;AAAA,IAClC,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,SAAS,MAAA,CAAO,OAAA;AAAA,IAChB,cAAc,MAAA,CAAO;AAAA,GACtB,CAAA;AACD,EAAA,MAAM,SAAS,MAAA,CAAO,MAAA;AAEtB,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,QAAA;AAAA,IAEN,MAAM,YAAY,OAAA,EAAqD;AACrE,MAAA,MAAM,QAAA,GAAW,YAAA,CAAa,OAAA,CAAQ,QAAQ,CAAA;AAE9C,MAAA,OAAO,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,UAClC,aAAA,CAAc,cAAA,EAAgB,OAAO,OAAA,CAAQ,YAAA,EAAc,QAAA,EAAU,OAAA,CAAQ,cAAc;AAAA,OAC7F;AAAA,IACF,CAAA;AAAA,IAEA,OAAO,kBACL,OAAA,EACqC;AACrC,MAAA,MAAM,QAAA,GAAW,YAAA,CAAa,OAAA,CAAQ,QAAQ,CAAA;AAE9C,MAAA,MAAM,EAAE,MAAA,EAAQ,MAAA,EAAQ,KAAA,KAAU,MAAM,mBAAA;AAAA,QAAoB,MAAA;AAAA,QAAQ,CAAC,UACnE,mBAAA,CAAoB,cAAA,EAAgB,OAAO,OAAA,CAAQ,YAAA,EAAc,QAAA,EAAU,OAAA,CAAQ,cAAc;AAAA,OACnG;AAEA,MAAA,OAAO,oBAA4B,MAAA,EAAQ,MAAA,EAAQ,KAAA,EAAO,QAAA,EAAU,QAAQ,cAAc,CAAA;AAAA,IAC5F;AAAA,GACF;AACF","file":"index.js","sourcesContent":["import { EventType } from '@ag-ui/client'\nimport { ZodType } from 'zod'\nimport type { AgentId, StreamEvent, BaseState } from '@genui-a3/core'\nimport type { StreamTextResult, ToolSet } from 'ai'\n\n/**\n * Processes an OpenAI streaming response (via Vercel AI SDK) into AG-UI events.\n *\n * Uses `partialOutputStream` from `streamText` + `Output.object()` to receive\n * progressively-built partial objects. Tracks `chatbotMessage` growth to yield\n * TEXT_MESSAGE_CONTENT deltas. After the stream completes, validates the final\n * object and yields TOOL_CALL_RESULT.\n *\n * @param streamResult - The streamText result containing partialOutputStream and output promise\n * @param reader - Pre-started async iterator for the partial object stream\n * @param first - The first iteration result (already consumed to trigger the API call)\n * @param agentId - Agent identifier for event tagging\n * @param schema - Zod schema for final response validation\n * @returns Async generator of AG-UI stream events\n */\nexport async function* processOpenAIStream<TState extends BaseState = BaseState>(\n streamResult: StreamTextResult<ToolSet, never>,\n reader: AsyncIterator<unknown>,\n first: IteratorResult<unknown>,\n agentId: AgentId,\n schema: ZodType,\n): AsyncGenerator<StreamEvent<TState>> {\n let prevMessageLength = 0\n\n try {\n // Process the first partial (already consumed to trigger the API call)\n if (!first.done) {\n const partial = first.value as Record<string, unknown>\n const delta = extractDelta(partial, prevMessageLength)\n if (delta) {\n prevMessageLength += delta.length\n yield {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta,\n agentId,\n } as StreamEvent<TState>\n }\n }\n\n // Process remaining partials\n let next = await reader.next()\n while (!next.done) {\n const partial = next.value as Record<string, unknown>\n const delta = extractDelta(partial, prevMessageLength)\n if (delta) {\n prevMessageLength += delta.length\n yield {\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId: '',\n delta,\n agentId,\n } as StreamEvent<TState>\n }\n // eslint-disable-next-line no-await-in-loop\n next = await reader.next()\n }\n\n // Stream complete — await and validate the final object\n const finalObject = await streamResult.output\n\n if (finalObject === null) {\n yield {\n type: EventType.RUN_ERROR,\n message: 'OpenAI stream completed with null output',\n agentId,\n } as StreamEvent<TState>\n return\n }\n\n const validated = schema.parse(finalObject)\n yield {\n type: EventType.TOOL_CALL_RESULT,\n toolCallId: '',\n messageId: '',\n content: JSON.stringify(validated),\n agentId,\n } as StreamEvent<TState>\n } catch (err) {\n yield {\n type: EventType.RUN_ERROR,\n message: `OpenAI stream error: ${(err as Error).message}`,\n agentId,\n } as StreamEvent<TState>\n }\n}\n\n/**\n * Extracts the new portion of chatbotMessage from a partial object.\n */\nfunction extractDelta(partial: Record<string, unknown>, prevLength: number): string | null {\n const chatbotMessage = partial.chatbotMessage\n if (typeof chatbotMessage !== 'string' || chatbotMessage.length <= prevLength) {\n return null\n }\n return chatbotMessage.slice(prevLength)\n}\n","/**\n * Executes an action with model fallback support.\n * Tries each model in order; if one fails, falls back to the next.\n * Throws the last error if all models fail.\n *\n * @param models - Model identifiers in priority order\n * @param action - Async action to attempt with each model\n * @returns The result from the first successful model\n * @throws The error from the last model if all fail\n *\n * @example\n * ```typescript\n * const result = await executeWithFallback(\n * ['model-primary', 'model-fallback'],\n * (model) => provider.call(model, params),\n * )\n * ```\n */\nexport async function executeWithFallback<T>(models: string[], action: (model: string) => Promise<T>): Promise<T> {\n const errors: Array<{ model: string; error: Error }> = []\n\n for (let i = 0; i < models.length; i++) {\n const model = models[i]\n\n try {\n // eslint-disable-next-line no-await-in-loop\n return await action(model)\n } catch (error) {\n const errorObj = error as Error\n errors.push({ model, error: errorObj })\n\n if (i === models.length - 1) {\n throw errorObj\n }\n }\n }\n\n throw new Error('All models failed')\n}\n","import { createOpenAI } from '@ai-sdk/openai'\nimport { generateText, streamText, Output, ModelMessage, jsonSchema } from 'ai'\nimport type {\n Provider,\n ProviderRequest,\n ProviderResponse,\n ProviderMessage,\n BaseState,\n StreamEvent,\n} from '@genui-a3/core'\nimport { processOpenAIStream } from './streamProcessor'\nimport { executeWithFallback } from '../utils/executeWithFallback'\n\n/**\n * Configuration for creating an OpenAI provider.\n */\nexport interface OpenAIProviderConfig {\n /** OpenAI API key. Defaults to OPENAI_API_KEY env var (OpenAI SDK default). */\n apiKey?: string\n /**\n * Model identifiers in order of preference (first = primary, rest = fallbacks).\n * e.g. ['gpt-4o', 'gpt-4o-mini']\n */\n models: string[]\n /** Optional base URL for Azure OpenAI or compatible endpoints */\n baseURL?: string\n /** Optional OpenAI organization ID */\n organization?: string\n}\n\ntype JsonSchema = Record<string, unknown>\n\n/**\n * Enforces strict JSON schema constraints required by OpenAI's structured output API.\n * OpenAI requires all object properties — including optional ones — to be in the `required` array.\n * This function recursively adds `required` and `additionalProperties: false` to all objects.\n *\n * @param schema - JSON schema to enforce\n * @returns Enforced JSON schema\n */\nfunction enforceStrictSchema(schema: JsonSchema): JsonSchema {\n const result = { ...schema }\n if (result.type === 'object' && result.properties) {\n result.additionalProperties = false\n result.required = Object.keys(result.properties as Record<string, unknown>)\n const props = result.properties as Record<string, JsonSchema>\n const strictProps: Record<string, JsonSchema> = {}\n for (const [key, value] of Object.entries(props)) {\n strictProps[key] = enforceStrictSchema(value)\n }\n result.properties = strictProps\n }\n if (result.items && typeof result.items === 'object') {\n result.items = enforceStrictSchema(result.items as JsonSchema)\n }\n for (const keyword of ['anyOf', 'oneOf', 'allOf'] as const) {\n if (Array.isArray(result[keyword])) {\n result[keyword] = (result[keyword] as JsonSchema[]).map((s) => enforceStrictSchema(s))\n }\n }\n return result\n}\n\n/**\n * Converts a Zod schema to an OpenAI-compatible strict JSON schema wrapped for the Vercel AI SDK.\n *\n * @param zodSchema - Zod schema to convert\n * @returns JSON schema wrapped for Vercel AI SDK\n */\nfunction toOpenAISchema(zodSchema: ProviderRequest['responseSchema']) {\n const strict = enforceStrictSchema(zodSchema.toJSONSchema() as JsonSchema)\n return jsonSchema(strict as never, {\n validate: (value: unknown) => {\n const result = zodSchema.safeParse(value)\n return result.success\n ? { success: true as const, value: result.data }\n : { success: false as const, error: result.error }\n },\n })\n}\n\nfunction toAIMessages(messages: ProviderMessage[]): ModelMessage[] {\n return messages.map((msg) => ({\n role: msg.role,\n content: msg.content,\n }))\n}\n\nasync function sendWithModel(\n openaiProvider: ReturnType<typeof createOpenAI>,\n model: string,\n system: string,\n messages: ModelMessage[],\n schema: ProviderRequest['responseSchema'],\n): Promise<ProviderResponse> {\n const result = await generateText({\n model: openaiProvider(model),\n system,\n messages,\n output: Output.object({ schema: toOpenAISchema(schema) }),\n })\n\n return {\n content: JSON.stringify(result.output),\n usage: {\n inputTokens: result.usage.inputTokens ?? 0,\n outputTokens: result.usage.outputTokens ?? 0,\n totalTokens: (result.usage.inputTokens ?? 0) + (result.usage.outputTokens ?? 0),\n },\n }\n}\n\nasync function sendStreamWithModel(\n openaiProvider: ReturnType<typeof createOpenAI>,\n model: string,\n system: string,\n messages: ModelMessage[],\n schema: ProviderRequest['responseSchema'],\n) {\n const result = streamText({\n model: openaiProvider(model),\n system,\n messages,\n output: Output.object({ schema: toOpenAISchema(schema) }),\n })\n\n // Force the API call to start so executeWithFallback can catch connection errors\n const partialStream = result.partialOutputStream\n const reader = partialStream[Symbol.asyncIterator]()\n const first = await reader.next()\n\n return { result, reader, first }\n}\n\n/**\n * Creates an OpenAI provider instance.\n *\n * Uses the Vercel AI SDK (`ai` + `@ai-sdk/openai`) for structured output via\n * `generateText` + `Output.object()` (blocking) and `streamText` + `Output.object()`\n * (streaming). The AI SDK handles Zod-to-JSON-schema conversion, partial JSON\n * parsing, and validation internally.\n *\n * @param config - OpenAI provider configuration\n * @returns A Provider implementation using OpenAI\n *\n * @example\n * ```typescript\n * const provider = createOpenAIProvider({\n * models: ['gpt-4o', 'gpt-4o-mini'],\n * })\n * ```\n */\nexport function createOpenAIProvider(config: OpenAIProviderConfig): Provider {\n const openaiProvider = createOpenAI({\n apiKey: config.apiKey,\n baseURL: config.baseURL,\n organization: config.organization,\n })\n const models = config.models\n\n return {\n name: 'openai',\n\n async sendRequest(request: ProviderRequest): Promise<ProviderResponse> {\n const messages = toAIMessages(request.messages)\n\n return executeWithFallback(models, (model) =>\n sendWithModel(openaiProvider, model, request.systemPrompt, messages, request.responseSchema),\n )\n },\n\n async *sendRequestStream<TState extends BaseState = BaseState>(\n request: ProviderRequest,\n ): AsyncGenerator<StreamEvent<TState>> {\n const messages = toAIMessages(request.messages)\n\n const { result, reader, first } = await executeWithFallback(models, (model) =>\n sendStreamWithModel(openaiProvider, model, request.systemPrompt, messages, request.responseSchema),\n )\n\n yield* processOpenAIStream<TState>(result, reader, first, 'openai', request.responseSchema)\n },\n }\n}\n"]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@genui-a3/providers",
3
- "version": "0.0.1",
3
+ "version": "0.0.3",
4
4
  "description": "Provider implementations for the A3 agentic framework",
5
5
  "type": "module",
6
6
  "exports": {
@@ -13,6 +13,11 @@
13
13
  "types": "./dist/openai/index.d.ts",
14
14
  "import": "./dist/openai/index.js",
15
15
  "require": "./dist/openai/index.cjs"
16
+ },
17
+ "./anthropic": {
18
+ "types": "./dist/anthropic/index.d.ts",
19
+ "import": "./dist/anthropic/index.js",
20
+ "require": "./dist/anthropic/index.cjs"
16
21
  }
17
22
  },
18
23
  "files": [
@@ -29,6 +34,7 @@
29
34
  "providers",
30
35
  "bedrock",
31
36
  "openai",
37
+ "anthropic",
32
38
  "ai",
33
39
  "llm"
34
40
  ],
@@ -42,8 +48,10 @@
42
48
  },
43
49
  "dependencies": {
44
50
  "@ag-ui/client": "0.0.47",
51
+ "@ai-sdk/anthropic": "^3.0.58",
45
52
  "@aws-sdk/client-bedrock-runtime": "3.975.0",
46
- "openai": "6.27.0",
53
+ "ai": "^6.0.116",
54
+ "@ai-sdk/openai": "^3.0.41",
47
55
  "zod": "4.3.6"
48
56
  },
49
57
  "devDependencies": {