workers-ai-provider 3.1.10 → 3.1.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +29 -0
- package/dist/index.d.mts +32 -0
- package/dist/index.mjs +60 -18
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
- package/src/streaming.ts +45 -19
- package/src/workersai-chat-language-model.ts +44 -3
- package/src/workersai-chat-settings.ts +23 -0
package/README.md
CHANGED
|
@@ -112,6 +112,35 @@ for await (const chunk of result.textStream) {
|
|
|
112
112
|
}
|
|
113
113
|
```
|
|
114
114
|
|
|
115
|
+
## Reasoning Controls
|
|
116
|
+
|
|
117
|
+
Reasoning-capable Workers AI models (GLM-4.7-flash, Kimi K2.5/K2.6, GPT-OSS, QwQ) accept `reasoning_effort` and `chat_template_kwargs` on their inputs. Either set them at model creation time as settings, or per-call via `providerOptions["workers-ai"]` (per-call wins):
|
|
118
|
+
|
|
119
|
+
```ts
|
|
120
|
+
// Settings-level (applies to every request on this model instance)
|
|
121
|
+
const model = workersai("@cf/zai-org/glm-4.7-flash", {
|
|
122
|
+
reasoning_effort: "low", // "low" | "medium" | "high" | null
|
|
123
|
+
chat_template_kwargs: { enable_thinking: false },
|
|
124
|
+
});
|
|
125
|
+
|
|
126
|
+
await generateText({ model, prompt: "Summarize in one sentence." });
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
```ts
|
|
130
|
+
// Per-call (overrides any settings-level value)
|
|
131
|
+
const model = workersai("@cf/zai-org/glm-4.7-flash");
|
|
132
|
+
|
|
133
|
+
await generateText({
|
|
134
|
+
model,
|
|
135
|
+
prompt: "Summarize in one sentence.",
|
|
136
|
+
providerOptions: {
|
|
137
|
+
"workers-ai": { reasoning_effort: "low" },
|
|
138
|
+
},
|
|
139
|
+
});
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
`reasoning_effort: null` is meaningful — it's the explicit "disable reasoning" signal for models that support it. Both fields land on the `inputs` object of `binding.run()` (and the JSON body of the REST request), matching the shape expected by Workers AI. See the [model catalog](https://developers.cloudflare.com/workers-ai/models/) for per-model reasoning capabilities.
|
|
143
|
+
|
|
115
144
|
## Vision (Image Inputs)
|
|
116
145
|
|
|
117
146
|
Send images to vision-capable models like Kimi K2.5:
|
package/dist/index.d.mts
CHANGED
|
@@ -122,6 +122,25 @@ type WorkersAIChatSettings = {
|
|
|
122
122
|
* Routes requests with the same key to the same backend replica.
|
|
123
123
|
*/
|
|
124
124
|
sessionAffinity?: string;
|
|
125
|
+
/**
|
|
126
|
+
* Controls the reasoning budget for reasoning-capable Workers AI models
|
|
127
|
+
* (e.g. `@cf/zai-org/glm-4.7-flash`, `@cf/moonshotai/kimi-k2.5`,
|
|
128
|
+
* `@cf/openai/gpt-oss-120b`).
|
|
129
|
+
*
|
|
130
|
+
* `null` is a valid value and disables reasoning for models that support it.
|
|
131
|
+
* Forwarded on the `inputs` object of `binding.run(model, inputs)`.
|
|
132
|
+
*/
|
|
133
|
+
reasoning_effort?: "low" | "medium" | "high" | null;
|
|
134
|
+
/**
|
|
135
|
+
* Chat-template overrides for reasoning-capable models that expose
|
|
136
|
+
* thinking toggles (e.g. GLM, Kimi).
|
|
137
|
+
*
|
|
138
|
+
* Forwarded on the `inputs` object of `binding.run(model, inputs)`.
|
|
139
|
+
*/
|
|
140
|
+
chat_template_kwargs?: {
|
|
141
|
+
/** Whether to enable reasoning. Enabled by default on reasoning models. */enable_thinking?: boolean; /** If false, preserves reasoning context between turns. */
|
|
142
|
+
clear_thinking?: boolean;
|
|
143
|
+
};
|
|
125
144
|
/**
|
|
126
145
|
* Passthrough settings that are provided directly to the run function.
|
|
127
146
|
* Use this for any provider-specific options not covered by the typed fields.
|
|
@@ -154,10 +173,23 @@ declare class WorkersAIChatLanguageModel implements LanguageModelV3 {
|
|
|
154
173
|
* accept this format at runtime.
|
|
155
174
|
*
|
|
156
175
|
* The binding path additionally normalises null content to empty strings.
|
|
176
|
+
*
|
|
177
|
+
* Reasoning controls (`reasoning_effort`, `chat_template_kwargs`) are
|
|
178
|
+
* forwarded here from settings. These belong on the INPUTS object, not on
|
|
179
|
+
* the 3rd-arg options / REST query string — see
|
|
180
|
+
* https://github.com/cloudflare/ai/issues/501. Per-call values from
|
|
181
|
+
* `providerOptions["workers-ai"]` override settings.
|
|
182
|
+
*
|
|
183
|
+
* `reasoning_effort: null` is a valid value ("disable reasoning"), so we
|
|
184
|
+
* check `!== undefined` rather than truthiness.
|
|
157
185
|
*/
|
|
158
186
|
private buildRunInputs;
|
|
159
187
|
/**
|
|
160
188
|
* Get passthrough options for binding.run() from settings.
|
|
189
|
+
*
|
|
190
|
+
* `reasoning_effort` and `chat_template_kwargs` are explicitly excluded
|
|
191
|
+
* here — they belong on the `inputs` object (see `buildRunInputs`), not on
|
|
192
|
+
* the `options` (3rd) arg of binding.run() or the REST query string.
|
|
161
193
|
*/
|
|
162
194
|
private getRunOptions;
|
|
163
195
|
doGenerate(options: Parameters<LanguageModelV3["doGenerate"]>[0]): Promise<Awaited<ReturnType<LanguageModelV3["doGenerate"]>>>;
|
package/dist/index.mjs
CHANGED
|
@@ -304,6 +304,8 @@ function getMappedStream(response) {
|
|
|
304
304
|
let receivedDone = false;
|
|
305
305
|
let receivedAnyData = false;
|
|
306
306
|
const activeToolCalls = /* @__PURE__ */ new Map();
|
|
307
|
+
const closedToolCalls = /* @__PURE__ */ new Set();
|
|
308
|
+
let lastActiveToolIndex = null;
|
|
307
309
|
return rawStream.pipeThrough(new SSEDecoder()).pipeThrough(new TransformStream({
|
|
308
310
|
transform(data, controller) {
|
|
309
311
|
if (!data || data === "[DONE]") {
|
|
@@ -412,17 +414,9 @@ function getMappedStream(response) {
|
|
|
412
414
|
}
|
|
413
415
|
},
|
|
414
416
|
flush(controller) {
|
|
415
|
-
for (const [
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
id: tc.id
|
|
419
|
-
});
|
|
420
|
-
controller.enqueue({
|
|
421
|
-
type: "tool-call",
|
|
422
|
-
toolCallId: tc.id,
|
|
423
|
-
toolName: tc.toolName,
|
|
424
|
-
input: tc.args
|
|
425
|
-
});
|
|
417
|
+
for (const [idx] of activeToolCalls) {
|
|
418
|
+
if (closedToolCalls.has(idx)) continue;
|
|
419
|
+
closeToolCall(idx, controller);
|
|
426
420
|
}
|
|
427
421
|
if (reasoningId) controller.enqueue({
|
|
428
422
|
type: "reasoning-end",
|
|
@@ -447,26 +441,50 @@ function getMappedStream(response) {
|
|
|
447
441
|
}
|
|
448
442
|
}));
|
|
449
443
|
/**
|
|
444
|
+
* Emit tool-input-end + tool-call for a tool call that is complete.
|
|
445
|
+
*/
|
|
446
|
+
function closeToolCall(index, controller) {
|
|
447
|
+
const tc = activeToolCalls.get(index);
|
|
448
|
+
if (!tc || closedToolCalls.has(index)) return;
|
|
449
|
+
closedToolCalls.add(index);
|
|
450
|
+
controller.enqueue({
|
|
451
|
+
type: "tool-input-end",
|
|
452
|
+
id: tc.id
|
|
453
|
+
});
|
|
454
|
+
controller.enqueue({
|
|
455
|
+
type: "tool-call",
|
|
456
|
+
toolCallId: tc.id,
|
|
457
|
+
toolName: tc.toolName,
|
|
458
|
+
input: tc.args
|
|
459
|
+
});
|
|
460
|
+
}
|
|
461
|
+
/**
|
|
450
462
|
* Emit incremental tool call events from streaming chunks.
|
|
451
463
|
*
|
|
452
464
|
* Workers AI streams tool calls as:
|
|
453
465
|
* Chunk A: { id, type, index, function: { name } } — start
|
|
454
466
|
* Chunk B: { index, function: { arguments: "partial..." } } — args delta
|
|
455
467
|
* Chunk C: { index, function: { arguments: "rest..." } } — args delta
|
|
456
|
-
* Chunk D: { id: null, type: null, function: { name: null } } — finalize
|
|
468
|
+
* Chunk D: { id: null, type: null, function: { name: null } } — finalize
|
|
457
469
|
*
|
|
458
470
|
* We emit tool-input-start on first sight, tool-input-delta for each
|
|
459
|
-
* argument chunk, and tool-input-end
|
|
471
|
+
* argument chunk, and tool-input-end eagerly — either when a new tool
|
|
472
|
+
* index starts (closing the previous one) or on a null finalization
|
|
473
|
+
* chunk. Any remaining open calls are closed in flush().
|
|
460
474
|
*/
|
|
461
475
|
function emitToolCallDeltas(toolCalls, controller) {
|
|
462
476
|
for (const tc of toolCalls) {
|
|
463
|
-
if (isNullFinalizationChunk(tc))
|
|
477
|
+
if (isNullFinalizationChunk(tc)) {
|
|
478
|
+
if (lastActiveToolIndex != null) closeToolCall(lastActiveToolIndex, controller);
|
|
479
|
+
continue;
|
|
480
|
+
}
|
|
464
481
|
const tcIndex = tc.index ?? 0;
|
|
465
482
|
const fn = tc.function;
|
|
466
483
|
const tcName = fn?.name ?? tc.name ?? null;
|
|
467
484
|
const tcArgs = fn?.arguments ?? tc.arguments ?? null;
|
|
468
485
|
const tcId = tc.id;
|
|
469
486
|
if (!activeToolCalls.has(tcIndex)) {
|
|
487
|
+
if (lastActiveToolIndex != null && lastActiveToolIndex !== tcIndex) closeToolCall(lastActiveToolIndex, controller);
|
|
470
488
|
const id = tcId || generateId();
|
|
471
489
|
const toolName = tcName || "";
|
|
472
490
|
activeToolCalls.set(tcIndex, {
|
|
@@ -474,6 +492,7 @@ function getMappedStream(response) {
|
|
|
474
492
|
toolName,
|
|
475
493
|
args: ""
|
|
476
494
|
});
|
|
495
|
+
lastActiveToolIndex = tcIndex;
|
|
477
496
|
controller.enqueue({
|
|
478
497
|
type: "tool-input-start",
|
|
479
498
|
id,
|
|
@@ -490,6 +509,7 @@ function getMappedStream(response) {
|
|
|
490
509
|
}
|
|
491
510
|
} else {
|
|
492
511
|
const active = activeToolCalls.get(tcIndex);
|
|
512
|
+
lastActiveToolIndex = tcIndex;
|
|
493
513
|
if (tcArgs != null && tcArgs !== "") {
|
|
494
514
|
const delta = typeof tcArgs === "string" ? tcArgs : JSON.stringify(tcArgs);
|
|
495
515
|
active.args += delta;
|
|
@@ -969,8 +989,21 @@ var WorkersAIChatLanguageModel = class {
|
|
|
969
989
|
* accept this format at runtime.
|
|
970
990
|
*
|
|
971
991
|
* The binding path additionally normalises null content to empty strings.
|
|
992
|
+
*
|
|
993
|
+
* Reasoning controls (`reasoning_effort`, `chat_template_kwargs`) are
|
|
994
|
+
* forwarded here from settings. These belong on the INPUTS object, not on
|
|
995
|
+
* the 3rd-arg options / REST query string — see
|
|
996
|
+
* https://github.com/cloudflare/ai/issues/501. Per-call values from
|
|
997
|
+
* `providerOptions["workers-ai"]` override settings.
|
|
998
|
+
*
|
|
999
|
+
* `reasoning_effort: null` is a valid value ("disable reasoning"), so we
|
|
1000
|
+
* check `!== undefined` rather than truthiness.
|
|
972
1001
|
*/
|
|
973
1002
|
buildRunInputs(args, messages, options) {
|
|
1003
|
+
const rawPerCall = options?.providerOptions?.["workers-ai"];
|
|
1004
|
+
const perCall = rawPerCall !== null && typeof rawPerCall === "object" && !Array.isArray(rawPerCall) ? rawPerCall : {};
|
|
1005
|
+
const reasoningEffort = "reasoning_effort" in perCall ? perCall.reasoning_effort : this.settings.reasoning_effort;
|
|
1006
|
+
const chatTemplateKwargs = "chat_template_kwargs" in perCall ? perCall.chat_template_kwargs : this.settings.chat_template_kwargs;
|
|
974
1007
|
return {
|
|
975
1008
|
max_tokens: args.max_tokens,
|
|
976
1009
|
messages: this.config.isBinding ? normalizeMessagesForBinding(messages) : messages,
|
|
@@ -979,14 +1012,20 @@ var WorkersAIChatLanguageModel = class {
|
|
|
979
1012
|
...args.tool_choice ? { tool_choice: args.tool_choice } : {},
|
|
980
1013
|
top_p: args.top_p,
|
|
981
1014
|
...args.response_format ? { response_format: args.response_format } : {},
|
|
982
|
-
...options?.stream ? { stream: true } : {}
|
|
1015
|
+
...options?.stream ? { stream: true } : {},
|
|
1016
|
+
...reasoningEffort !== void 0 ? { reasoning_effort: reasoningEffort } : {},
|
|
1017
|
+
...chatTemplateKwargs !== void 0 ? { chat_template_kwargs: chatTemplateKwargs } : {}
|
|
983
1018
|
};
|
|
984
1019
|
}
|
|
985
1020
|
/**
|
|
986
1021
|
* Get passthrough options for binding.run() from settings.
|
|
1022
|
+
*
|
|
1023
|
+
* `reasoning_effort` and `chat_template_kwargs` are explicitly excluded
|
|
1024
|
+
* here — they belong on the `inputs` object (see `buildRunInputs`), not on
|
|
1025
|
+
* the `options` (3rd) arg of binding.run() or the REST query string.
|
|
987
1026
|
*/
|
|
988
1027
|
getRunOptions() {
|
|
989
|
-
const { gateway, safePrompt: _safePrompt, sessionAffinity, extraHeaders, ...passthroughOptions } = this.settings;
|
|
1028
|
+
const { gateway, safePrompt: _safePrompt, sessionAffinity, extraHeaders, reasoning_effort: _reasoningEffort, chat_template_kwargs: _chatTemplateKwargs, ...passthroughOptions } = this.settings;
|
|
990
1029
|
const mergedHeaders = {
|
|
991
1030
|
...extraHeaders && typeof extraHeaders === "object" ? extraHeaders : {},
|
|
992
1031
|
...sessionAffinity ? { "x-session-affinity": sessionAffinity } : {}
|
|
@@ -1000,7 +1039,7 @@ var WorkersAIChatLanguageModel = class {
|
|
|
1000
1039
|
async doGenerate(options) {
|
|
1001
1040
|
const { args, warnings } = this.getArgs(options);
|
|
1002
1041
|
const { messages } = convertToWorkersAIChatMessages(options.prompt);
|
|
1003
|
-
const inputs = this.buildRunInputs(args, messages);
|
|
1042
|
+
const inputs = this.buildRunInputs(args, messages, { providerOptions: options.providerOptions });
|
|
1004
1043
|
const runOptions = this.getRunOptions();
|
|
1005
1044
|
const output = await this.config.binding.run(args.model, inputs, {
|
|
1006
1045
|
...runOptions,
|
|
@@ -1030,7 +1069,10 @@ var WorkersAIChatLanguageModel = class {
|
|
|
1030
1069
|
async doStream(options) {
|
|
1031
1070
|
const { args, warnings } = this.getArgs(options);
|
|
1032
1071
|
const { messages } = convertToWorkersAIChatMessages(options.prompt);
|
|
1033
|
-
const inputs = this.buildRunInputs(args, messages, {
|
|
1072
|
+
const inputs = this.buildRunInputs(args, messages, {
|
|
1073
|
+
stream: true,
|
|
1074
|
+
providerOptions: options.providerOptions
|
|
1075
|
+
});
|
|
1034
1076
|
const runOptions = this.getRunOptions();
|
|
1035
1077
|
const response = await this.config.binding.run(args.model, inputs, {
|
|
1036
1078
|
...runOptions,
|
package/dist/index.mjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.mjs","names":["toUint8Array","uint8ArrayToBase64","toUint8Array"],"sources":["../src/convert-to-workersai-chat-messages.ts","../src/map-workersai-usage.ts","../src/map-workersai-finish-reason.ts","../src/streaming.ts","../src/utils.ts","../src/aisearch-chat-language-model.ts","../src/workersai-embedding-model.ts","../src/workersai-chat-language-model.ts","../src/workersai-image-model.ts","../src/workersai-transcription-model.ts","../src/workersai-speech-model.ts","../src/workersai-reranking-model.ts","../src/autorag-chat-language-model.ts","../src/index.ts"],"sourcesContent":["import type { LanguageModelV3DataContent, LanguageModelV3Prompt } from \"@ai-sdk/provider\";\nimport type { WorkersAIContentPart, WorkersAIChatPrompt } from \"./workersai-chat-prompt\";\n\n/**\n * Normalise any LanguageModelV3DataContent value to a Uint8Array.\n *\n * Handles:\n * - Uint8Array → returned as-is\n * - string → decoded from base64 (with or without data-URL prefix)\n * - URL → not supported (Workers AI needs raw bytes, not a reference)\n */\nfunction toUint8Array(data: LanguageModelV3DataContent): Uint8Array | null {\n\tif (data instanceof Uint8Array) {\n\t\treturn data;\n\t}\n\n\tif (typeof data === \"string\") {\n\t\tlet base64 = data;\n\t\tif (base64.startsWith(\"data:\")) {\n\t\t\tconst commaIndex = base64.indexOf(\",\");\n\t\t\tif (commaIndex >= 0) {\n\t\t\t\tbase64 = base64.slice(commaIndex + 1);\n\t\t\t}\n\t\t}\n\t\tconst binaryString = atob(base64);\n\t\tconst bytes = new Uint8Array(binaryString.length);\n\t\tfor (let i = 0; i < binaryString.length; i++) {\n\t\t\tbytes[i] = binaryString.charCodeAt(i);\n\t\t}\n\t\treturn bytes;\n\t}\n\n\tif (data instanceof URL) {\n\t\tthrow new Error(\n\t\t\t\"URL image sources are not supported by Workers AI. \" +\n\t\t\t\t\"Provide image data as a Uint8Array or base64 string instead.\",\n\t\t);\n\t}\n\n\treturn null;\n}\n\nfunction uint8ArrayToBase64(bytes: Uint8Array): string {\n\tlet binary = \"\";\n\tconst chunkSize = 8192;\n\tfor (let i = 0; i < bytes.length; i += chunkSize) {\n\t\tconst chunk = bytes.subarray(i, Math.min(i + chunkSize, bytes.length));\n\t\tbinary += String.fromCharCode(...chunk);\n\t}\n\treturn btoa(binary);\n}\n\nexport function convertToWorkersAIChatMessages(prompt: LanguageModelV3Prompt): {\n\tmessages: WorkersAIChatPrompt;\n} {\n\tconst messages: WorkersAIChatPrompt = [];\n\n\tfor (const { role, content } of prompt) {\n\t\tswitch (role) {\n\t\t\tcase \"system\": {\n\t\t\t\tmessages.push({ content, role: \"system\" });\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tcase \"user\": {\n\t\t\t\tconst textParts: string[] = [];\n\t\t\t\tconst imageParts: { image: Uint8Array; mediaType: string | undefined }[] = [];\n\n\t\t\t\tfor (const part of content) {\n\t\t\t\t\tswitch (part.type) {\n\t\t\t\t\t\tcase \"text\": {\n\t\t\t\t\t\t\ttextParts.push(part.text);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcase \"file\": {\n\t\t\t\t\t\t\tconst imageBytes = toUint8Array(part.data);\n\t\t\t\t\t\t\tif (imageBytes) {\n\t\t\t\t\t\t\t\timageParts.push({\n\t\t\t\t\t\t\t\t\timage: imageBytes,\n\t\t\t\t\t\t\t\t\tmediaType: part.mediaType,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (imageParts.length > 0) {\n\t\t\t\t\tconst contentArray: WorkersAIContentPart[] = [];\n\t\t\t\t\tif (textParts.length > 0) {\n\t\t\t\t\t\tcontentArray.push({ type: \"text\", text: textParts.join(\"\\n\") });\n\t\t\t\t\t}\n\t\t\t\t\tfor (const img of imageParts) {\n\t\t\t\t\t\tconst base64 = uint8ArrayToBase64(img.image);\n\t\t\t\t\t\tconst mediaType = img.mediaType || \"image/png\";\n\t\t\t\t\t\tcontentArray.push({\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: { url: `data:${mediaType};base64,${base64}` },\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\tmessages.push({ content: contentArray, role: \"user\" });\n\t\t\t\t} else {\n\t\t\t\t\tmessages.push({ content: textParts.join(\"\\n\"), role: \"user\" });\n\t\t\t\t}\n\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tcase \"assistant\": {\n\t\t\t\tlet text = \"\";\n\t\t\t\tlet reasoning = \"\";\n\t\t\t\tconst toolCalls: Array<{\n\t\t\t\t\tid: string;\n\t\t\t\t\ttype: \"function\";\n\t\t\t\t\tfunction: { name: string; arguments: string };\n\t\t\t\t}> = [];\n\n\t\t\t\tfor (const part of content) {\n\t\t\t\t\tswitch (part.type) {\n\t\t\t\t\t\tcase \"text\": {\n\t\t\t\t\t\t\ttext += part.text;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcase \"reasoning\": {\n\t\t\t\t\t\t\t// Reasoning is accumulated separately and sent as the `reasoning`\n\t\t\t\t\t\t\t// field on the message object. This is the field name vLLM expects\n\t\t\t\t\t\t\t// on input for reasoning models (kimi-k2.5, glm-4.7-flash).\n\t\t\t\t\t\t\t// Concatenating it into `content` corrupts the conversation history\n\t\t\t\t\t\t\t// and causes models to produce empty or garbled responses on the\n\t\t\t\t\t\t\t// next turn.\n\t\t\t\t\t\t\treasoning += part.text;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcase \"file\": {\n\t\t\t\t\t\t\t// File parts in assistant messages - no action needed\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcase \"tool-call\": {\n\t\t\t\t\t\t\ttoolCalls.push({\n\t\t\t\t\t\t\t\tfunction: {\n\t\t\t\t\t\t\t\t\targuments: JSON.stringify(part.input),\n\t\t\t\t\t\t\t\t\tname: part.toolName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tid: part.toolCallId,\n\t\t\t\t\t\t\t\ttype: \"function\",\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcase \"tool-result\": {\n\t\t\t\t\t\t\t// Tool results in assistant messages - no action needed\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tdefault: {\n\t\t\t\t\t\t\tconst exhaustiveCheck = part satisfies never;\n\t\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t\t`Unsupported part type: ${(exhaustiveCheck as { type: string }).type}`,\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmessages.push({\n\t\t\t\t\tcontent: text,\n\t\t\t\t\trole: \"assistant\",\n\t\t\t\t\t...(reasoning ? { reasoning } : {}),\n\t\t\t\t\ttool_calls:\n\t\t\t\t\t\ttoolCalls.length > 0\n\t\t\t\t\t\t\t? toolCalls.map(({ function: { name, arguments: args }, id }) => ({\n\t\t\t\t\t\t\t\t\tfunction: { arguments: args, name },\n\t\t\t\t\t\t\t\t\tid,\n\t\t\t\t\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\t\t\t\t}))\n\t\t\t\t\t\t\t: undefined,\n\t\t\t\t});\n\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tcase \"tool\": {\n\t\t\t\tfor (const toolResponse of content) {\n\t\t\t\t\tif (toolResponse.type === \"tool-result\") {\n\t\t\t\t\t\tconst output = toolResponse.output;\n\t\t\t\t\t\tlet content: string;\n\t\t\t\t\t\tswitch (output.type) {\n\t\t\t\t\t\t\tcase \"text\":\n\t\t\t\t\t\t\tcase \"error-text\":\n\t\t\t\t\t\t\t\tcontent = output.value;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase \"json\":\n\t\t\t\t\t\t\tcase \"error-json\":\n\t\t\t\t\t\t\t\tcontent = JSON.stringify(output.value);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase \"execution-denied\":\n\t\t\t\t\t\t\t\tcontent = output.reason\n\t\t\t\t\t\t\t\t\t? `Tool execution denied: ${output.reason}`\n\t\t\t\t\t\t\t\t\t: \"Tool execution was denied.\";\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase \"content\":\n\t\t\t\t\t\t\t\tcontent = output.value\n\t\t\t\t\t\t\t\t\t.filter(\n\t\t\t\t\t\t\t\t\t\t(p): p is { type: \"text\"; text: string } =>\n\t\t\t\t\t\t\t\t\t\t\tp.type === \"text\",\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t.map((p) => p.text)\n\t\t\t\t\t\t\t\t\t.join(\"\\n\");\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tcontent = \"\";\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmessages.push({\n\t\t\t\t\t\t\tcontent,\n\t\t\t\t\t\t\tname: toolResponse.toolName,\n\t\t\t\t\t\t\ttool_call_id: toolResponse.toolCallId,\n\t\t\t\t\t\t\trole: \"tool\",\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\t// Skip tool-approval-response parts as they're not supported by Workers AI\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tdefault: {\n\t\t\t\tconst exhaustiveCheck = role satisfies never;\n\t\t\t\tthrow new Error(`Unsupported role: ${exhaustiveCheck}`);\n\t\t\t}\n\t\t}\n\t}\n\n\treturn { messages };\n}\n","import type { LanguageModelV3Usage } from \"@ai-sdk/provider\";\n\n/**\n * Map Workers AI usage data to the AI SDK V3 usage format.\n * Accepts any object that may have a `usage` property with token counts.\n */\nexport function mapWorkersAIUsage(\n\toutput: Record<string, unknown> | AiTextGenerationOutput | AiTextToImageOutput,\n): LanguageModelV3Usage {\n\tconst usage = (\n\t\toutput as {\n\t\t\tusage?: { prompt_tokens?: number; completion_tokens?: number };\n\t\t}\n\t).usage ?? {\n\t\tcompletion_tokens: 0,\n\t\tprompt_tokens: 0,\n\t};\n\n\tconst promptTokens = usage.prompt_tokens ?? 0;\n\tconst completionTokens = usage.completion_tokens ?? 0;\n\n\treturn {\n\t\toutputTokens: {\n\t\t\ttotal: completionTokens,\n\t\t\ttext: undefined,\n\t\t\treasoning: undefined,\n\t\t},\n\t\tinputTokens: {\n\t\t\ttotal: promptTokens,\n\t\t\tnoCache: undefined,\n\t\t\tcacheRead: undefined,\n\t\t\tcacheWrite: undefined,\n\t\t},\n\t\traw: { total: promptTokens + completionTokens },\n\t};\n}\n","import type { LanguageModelV3FinishReason } from \"@ai-sdk/provider\";\n\n/**\n * Map a Workers AI finish reason to the AI SDK unified finish reason.\n *\n * Accepts either:\n * - A raw finish reason string (e.g., \"stop\", \"tool_calls\")\n * - A full response object with finish_reason in various locations\n */\nexport function mapWorkersAIFinishReason(\n\tfinishReasonOrResponse: string | null | undefined | Record<string, unknown>,\n): LanguageModelV3FinishReason {\n\tlet finishReason: string | null | undefined;\n\n\tif (\n\t\ttypeof finishReasonOrResponse === \"string\" ||\n\t\tfinishReasonOrResponse === null ||\n\t\tfinishReasonOrResponse === undefined\n\t) {\n\t\tfinishReason = finishReasonOrResponse;\n\t} else if (typeof finishReasonOrResponse === \"object\" && finishReasonOrResponse !== null) {\n\t\tconst response = finishReasonOrResponse;\n\n\t\t// OpenAI format: { choices: [{ finish_reason: \"stop\" }] }\n\t\tconst choices = response.choices as Array<{ finish_reason?: string }> | undefined;\n\t\tif (Array.isArray(choices) && choices.length > 0) {\n\t\t\tfinishReason = choices[0].finish_reason;\n\t\t} else if (\"finish_reason\" in response) {\n\t\t\tfinishReason = response.finish_reason as string;\n\t\t} else {\n\t\t\tfinishReason = undefined;\n\t\t}\n\t} else {\n\t\t// Numbers, booleans, etc. -- default to stop\n\t\tfinishReason = undefined;\n\t}\n\n\tconst raw = finishReason ?? \"stop\";\n\n\tswitch (finishReason) {\n\t\tcase \"stop\":\n\t\t\treturn { unified: \"stop\", raw };\n\t\tcase \"length\":\n\t\tcase \"model_length\":\n\t\t\treturn { unified: \"length\", raw };\n\t\tcase \"tool_calls\":\n\t\t\treturn { unified: \"tool-calls\", raw };\n\t\tcase \"error\":\n\t\t\treturn { unified: \"error\", raw };\n\t\tcase \"other\":\n\t\tcase \"unknown\":\n\t\t\treturn { unified: \"other\", raw };\n\t\tdefault:\n\t\t\treturn { unified: \"stop\", raw };\n\t}\n}\n","import type {\n\tLanguageModelV3FinishReason,\n\tLanguageModelV3StreamPart,\n\tLanguageModelV3Usage,\n} from \"@ai-sdk/provider\";\nimport { generateId } from \"ai\";\nimport { mapWorkersAIFinishReason } from \"./map-workersai-finish-reason\";\nimport { mapWorkersAIUsage } from \"./map-workersai-usage\";\n\n/**\n * Prepend a stream-start event to an existing LanguageModelV3 stream.\n * Uses pipeThrough for proper backpressure and error propagation.\n */\nexport function prependStreamStart(\n\tsource: ReadableStream<LanguageModelV3StreamPart>,\n\twarnings: LanguageModelV3StreamPart extends { type: \"stream-start\" } ? never : unknown,\n): ReadableStream<LanguageModelV3StreamPart> {\n\tlet sentStart = false;\n\treturn source.pipeThrough(\n\t\tnew TransformStream<LanguageModelV3StreamPart, LanguageModelV3StreamPart>({\n\t\t\ttransform(chunk, controller) {\n\t\t\t\tif (!sentStart) {\n\t\t\t\t\tsentStart = true;\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"stream-start\",\n\t\t\t\t\t\twarnings: warnings as [],\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tcontroller.enqueue(chunk);\n\t\t\t},\n\t\t\tflush(controller) {\n\t\t\t\tif (!sentStart) {\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"stream-start\",\n\t\t\t\t\t\twarnings: warnings as [],\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t},\n\t\t}),\n\t);\n}\n\n/**\n * Check if a streaming tool call chunk is a null-finalization sentinel.\n */\nfunction isNullFinalizationChunk(tc: Record<string, unknown>): boolean {\n\tconst fn = tc.function as Record<string, unknown> | undefined;\n\tconst name = fn?.name ?? tc.name ?? null;\n\tconst args = fn?.arguments ?? tc.arguments ?? null;\n\tconst id = tc.id ?? null;\n\treturn !id && !name && (!args || args === \"\");\n}\n\n/**\n * Maps a Workers AI SSE stream into AI SDK V3 LanguageModelV3StreamPart events.\n *\n * Uses a TransformStream pipeline for proper backpressure — chunks are emitted\n * one at a time as the downstream consumer pulls, not buffered eagerly.\n *\n * Handles two distinct formats:\n * 1. Native format: { response: \"chunk\", tool_calls: [...] }\n * 2. OpenAI format: { choices: [{ delta: { content: \"chunk\" } }] }\n */\nexport function getMappedStream(\n\tresponse: Response | ReadableStream<Uint8Array>,\n): ReadableStream<LanguageModelV3StreamPart> {\n\tconst rawStream =\n\t\tresponse instanceof ReadableStream\n\t\t\t? response\n\t\t\t: (response.body as ReadableStream<Uint8Array>);\n\n\tif (!rawStream) {\n\t\tthrow new Error(\"No readable stream available for SSE parsing.\");\n\t}\n\n\t// State shared across the transform\n\tlet usage: LanguageModelV3Usage = {\n\t\toutputTokens: { total: 0, text: undefined, reasoning: undefined },\n\t\tinputTokens: {\n\t\t\ttotal: 0,\n\t\t\tnoCache: undefined,\n\t\t\tcacheRead: undefined,\n\t\t\tcacheWrite: undefined,\n\t\t},\n\t\traw: { totalTokens: 0 },\n\t};\n\tlet textId: string | null = null;\n\tlet reasoningId: string | null = null;\n\tlet finishReason: LanguageModelV3FinishReason | null = null;\n\tlet receivedDone = false;\n\tlet receivedAnyData = false;\n\n\t// Track tool call streaming state per index.\n\t// When we see the first chunk for a tool call index, we emit tool-input-start.\n\t// Subsequent argument deltas emit tool-input-delta.\n\t// All open tool calls are closed with tool-input-end in flush().\n\tconst activeToolCalls = new Map<number, { id: string; toolName: string; args: string }>();\n\n\t// Step 1: Decode bytes into SSE lines\n\tconst sseStream = rawStream.pipeThrough(new SSEDecoder());\n\n\t// Step 2: Transform SSE events into LanguageModelV3StreamPart\n\treturn sseStream.pipeThrough(\n\t\tnew TransformStream<string, LanguageModelV3StreamPart>({\n\t\t\ttransform(data, controller) {\n\t\t\t\tif (!data || data === \"[DONE]\") {\n\t\t\t\t\tif (data === \"[DONE]\") receivedDone = true;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\treceivedAnyData = true;\n\t\t\t\tlet chunk: Record<string, unknown>;\n\t\t\t\ttry {\n\t\t\t\t\tchunk = JSON.parse(data);\n\t\t\t\t} catch {\n\t\t\t\t\tconsole.warn(\"[workers-ai-provider] failed to parse SSE event:\", data);\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tusage = mapWorkersAIUsage(chunk as Parameters<typeof mapWorkersAIUsage>[0]);\n\t\t\t\t}\n\n\t\t\t\t// Extract finish_reason\n\t\t\t\tconst choices = chunk.choices as\n\t\t\t\t\t| Array<{\n\t\t\t\t\t\t\tfinish_reason?: string;\n\t\t\t\t\t\t\tdelta?: Record<string, unknown>;\n\t\t\t\t\t }>\n\t\t\t\t\t| undefined;\n\t\t\t\tconst choiceFinishReason = choices?.[0]?.finish_reason;\n\t\t\t\tconst directFinishReason = chunk.finish_reason as string | undefined;\n\n\t\t\t\tif (choiceFinishReason != null) {\n\t\t\t\t\tfinishReason = mapWorkersAIFinishReason(choiceFinishReason);\n\t\t\t\t} else if (directFinishReason != null) {\n\t\t\t\t\tfinishReason = mapWorkersAIFinishReason(directFinishReason);\n\t\t\t\t}\n\n\t\t\t\t// --- Native format: top-level `response` field ---\n\t\t\t\tconst nativeResponse = chunk.response;\n\t\t\t\tif (nativeResponse != null && nativeResponse !== \"\") {\n\t\t\t\t\tconst responseText = String(nativeResponse);\n\t\t\t\t\tif (responseText.length > 0) {\n\t\t\t\t\t\t// Close active reasoning block before text starts\n\t\t\t\t\t\tif (reasoningId) {\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t\t\treasoningId = null;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (!textId) {\n\t\t\t\t\t\t\ttextId = generateId();\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"text-start\", id: textId });\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: \"text-delta\",\n\t\t\t\t\t\t\tid: textId,\n\t\t\t\t\t\t\tdelta: responseText,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// --- Native format: top-level `tool_calls` ---\n\t\t\t\tif (Array.isArray(chunk.tool_calls)) {\n\t\t\t\t\t// Close active reasoning block before tool calls start\n\t\t\t\t\tif (reasoningId) {\n\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t\treasoningId = null;\n\t\t\t\t\t}\n\t\t\t\t\temitToolCallDeltas(chunk.tool_calls as Record<string, unknown>[], controller);\n\t\t\t\t}\n\n\t\t\t\t// --- OpenAI format: choices[0].delta ---\n\t\t\t\tif (choices?.[0]?.delta) {\n\t\t\t\t\tconst delta = choices[0].delta;\n\n\t\t\t\t\tconst reasoningDelta = (delta.reasoning_content ?? delta.reasoning) as\n\t\t\t\t\t\t| string\n\t\t\t\t\t\t| undefined;\n\t\t\t\t\tif (reasoningDelta && reasoningDelta.length > 0) {\n\t\t\t\t\t\tif (!reasoningId) {\n\t\t\t\t\t\t\treasoningId = generateId();\n\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\ttype: \"reasoning-start\",\n\t\t\t\t\t\t\t\tid: reasoningId,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: \"reasoning-delta\",\n\t\t\t\t\t\t\tid: reasoningId,\n\t\t\t\t\t\t\tdelta: reasoningDelta,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\n\t\t\t\t\tconst textDelta = delta.content as string | undefined;\n\t\t\t\t\tif (textDelta && textDelta.length > 0) {\n\t\t\t\t\t\t// Close active reasoning block before text starts\n\t\t\t\t\t\tif (reasoningId) {\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t\t\treasoningId = null;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (!textId) {\n\t\t\t\t\t\t\ttextId = generateId();\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"text-start\", id: textId });\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: \"text-delta\",\n\t\t\t\t\t\t\tid: textId,\n\t\t\t\t\t\t\tdelta: textDelta,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\n\t\t\t\t\tconst deltaToolCalls = delta.tool_calls as\n\t\t\t\t\t\t| Record<string, unknown>[]\n\t\t\t\t\t\t| undefined;\n\t\t\t\t\tif (Array.isArray(deltaToolCalls)) {\n\t\t\t\t\t\t// Close active reasoning block before tool calls start\n\t\t\t\t\t\tif (reasoningId) {\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t\t\treasoningId = null;\n\t\t\t\t\t\t}\n\t\t\t\t\t\temitToolCallDeltas(deltaToolCalls, controller);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\n\t\t\tflush(controller) {\n\t\t\t\t// Close all open tool call inputs and emit complete tool-call events\n\t\t\t\tfor (const [, tc] of activeToolCalls) {\n\t\t\t\t\tcontroller.enqueue({ type: \"tool-input-end\", id: tc.id });\n\t\t\t\t\t// Emit the complete tool-call event — the AI SDK expects both\n\t\t\t\t\t// incremental tool-input-* events AND a final tool-call event,\n\t\t\t\t\t// matching how @ai-sdk/openai-compatible works.\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"tool-call\",\n\t\t\t\t\t\ttoolCallId: tc.id,\n\t\t\t\t\t\ttoolName: tc.toolName,\n\t\t\t\t\t\tinput: tc.args,\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// Close open text/reasoning blocks\n\t\t\t\tif (reasoningId) {\n\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t}\n\t\t\t\tif (textId) {\n\t\t\t\t\tcontroller.enqueue({ type: \"text-end\", id: textId });\n\t\t\t\t}\n\n\t\t\t\t// Detect premature termination\n\t\t\t\tconst effectiveFinishReason =\n\t\t\t\t\t!receivedDone && receivedAnyData && !finishReason\n\t\t\t\t\t\t? ({\n\t\t\t\t\t\t\t\tunified: \"error\",\n\t\t\t\t\t\t\t\traw: \"stream-truncated\",\n\t\t\t\t\t\t\t} as LanguageModelV3FinishReason)\n\t\t\t\t\t\t: (finishReason ?? { unified: \"stop\", raw: \"stop\" });\n\n\t\t\t\tcontroller.enqueue({\n\t\t\t\t\tfinishReason: effectiveFinishReason,\n\t\t\t\t\ttype: \"finish\",\n\t\t\t\t\tusage,\n\t\t\t\t});\n\t\t\t},\n\t\t}),\n\t);\n\n\t/**\n\t * Emit incremental tool call events from streaming chunks.\n\t *\n\t * Workers AI streams tool calls as:\n\t * Chunk A: { id, type, index, function: { name } } — start\n\t * Chunk B: { index, function: { arguments: \"partial...\" } } — args delta\n\t * Chunk C: { index, function: { arguments: \"rest...\" } } — args delta\n\t * Chunk D: { id: null, type: null, function: { name: null } } — finalize (skip)\n\t *\n\t * We emit tool-input-start on first sight, tool-input-delta for each\n\t * argument chunk, and tool-input-end in flush().\n\t */\n\tfunction emitToolCallDeltas(\n\t\ttoolCalls: Record<string, unknown>[],\n\t\tcontroller: TransformStreamDefaultController<LanguageModelV3StreamPart>,\n\t) {\n\t\tfor (const tc of toolCalls) {\n\t\t\tif (isNullFinalizationChunk(tc)) continue;\n\n\t\t\tconst tcIndex = (tc.index as number) ?? 0;\n\t\t\tconst fn = tc.function as Record<string, unknown> | undefined;\n\t\t\tconst tcName = (fn?.name ?? tc.name ?? null) as string | null;\n\t\t\tconst tcArgs = (fn?.arguments ?? tc.arguments ?? null) as string | null;\n\t\t\tconst tcId = tc.id as string | null;\n\n\t\t\tif (!activeToolCalls.has(tcIndex)) {\n\t\t\t\t// First chunk for this tool call — emit tool-input-start\n\t\t\t\tconst id = tcId || generateId();\n\t\t\t\tconst toolName = tcName || \"\";\n\t\t\t\tactiveToolCalls.set(tcIndex, { id, toolName, args: \"\" });\n\n\t\t\t\tcontroller.enqueue({\n\t\t\t\t\ttype: \"tool-input-start\",\n\t\t\t\t\tid,\n\t\t\t\t\ttoolName,\n\t\t\t\t});\n\n\t\t\t\t// If arguments arrived in the same chunk as the start, emit them\n\t\t\t\tif (tcArgs != null && tcArgs !== \"\") {\n\t\t\t\t\tconst delta = typeof tcArgs === \"string\" ? tcArgs : JSON.stringify(tcArgs);\n\t\t\t\t\tactiveToolCalls.get(tcIndex)!.args += delta;\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"tool-input-delta\",\n\t\t\t\t\t\tid,\n\t\t\t\t\t\tdelta,\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Subsequent chunks — emit argument deltas\n\t\t\t\tconst active = activeToolCalls.get(tcIndex)!;\n\t\t\t\tif (tcArgs != null && tcArgs !== \"\") {\n\t\t\t\t\tconst delta = typeof tcArgs === \"string\" ? tcArgs : JSON.stringify(tcArgs);\n\t\t\t\t\tactive.args += delta;\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"tool-input-delta\",\n\t\t\t\t\t\tid: active.id,\n\t\t\t\t\t\tdelta,\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n/**\n * TransformStream that decodes a raw byte stream into SSE `data:` payloads.\n * Each output chunk is the string content after \"data: \" (one per SSE event).\n * Handles line buffering for partial chunks.\n */\nclass SSEDecoder extends TransformStream<Uint8Array, string> {\n\tconstructor() {\n\t\tlet buffer = \"\";\n\t\tconst decoder = new TextDecoder();\n\n\t\tsuper({\n\t\t\ttransform(chunk, controller) {\n\t\t\t\tbuffer += decoder.decode(chunk, { stream: true });\n\t\t\t\tconst lines = buffer.split(\"\\n\");\n\t\t\t\tbuffer = lines.pop() || \"\";\n\n\t\t\t\tfor (const line of lines) {\n\t\t\t\t\tconst trimmed = line.trim();\n\t\t\t\t\tif (!trimmed) continue;\n\t\t\t\t\tif (trimmed.startsWith(\"data: \")) {\n\t\t\t\t\t\tcontroller.enqueue(trimmed.slice(6));\n\t\t\t\t\t} else if (trimmed.startsWith(\"data:\")) {\n\t\t\t\t\t\tcontroller.enqueue(trimmed.slice(5));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\n\t\t\tflush(controller) {\n\t\t\t\tif (buffer.trim()) {\n\t\t\t\t\tconst trimmed = buffer.trim();\n\t\t\t\t\tif (trimmed.startsWith(\"data: \")) {\n\t\t\t\t\t\tcontroller.enqueue(trimmed.slice(6));\n\t\t\t\t\t} else if (trimmed.startsWith(\"data:\")) {\n\t\t\t\t\t\tcontroller.enqueue(trimmed.slice(5));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t});\n\t}\n}\n","import type { LanguageModelV3, LanguageModelV3ToolCall } from \"@ai-sdk/provider\";\nimport { generateId } from \"ai\";\nimport type { WorkersAIChatPrompt } from \"./workersai-chat-prompt\";\n\n// ---------------------------------------------------------------------------\n// Workers AI quirk workarounds\n// ---------------------------------------------------------------------------\n\n/**\n * Normalize messages before passing to the Workers AI binding.\n *\n * The binding has strict schema validation that differs from the OpenAI API:\n * - `content` must not be null\n */\nexport function normalizeMessagesForBinding(messages: WorkersAIChatPrompt): WorkersAIChatPrompt {\n\treturn messages.map((msg) => {\n\t\tconst normalized = { ...msg };\n\n\t\t// content: null → content: \"\"\n\t\tif (normalized.content === null || normalized.content === undefined) {\n\t\t\t(normalized as { content: string }).content = \"\";\n\t\t}\n\n\t\treturn normalized;\n\t});\n}\n\n// ---------------------------------------------------------------------------\n// REST API client\n// ---------------------------------------------------------------------------\n\n/**\n * General AI run interface with overloads to handle distinct return types.\n */\nexport interface AiRun {\n\t<Name extends keyof AiModels>(\n\t\tmodel: Name,\n\t\tinputs: AiModels[Name][\"inputs\"],\n\t\toptions: AiOptions & { returnRawResponse: true },\n\t): Promise<Response>;\n\n\t<Name extends keyof AiModels>(\n\t\tmodel: Name,\n\t\tinputs: AiModels[Name][\"inputs\"] & { stream: true },\n\t\toptions?: AiOptions,\n\t): Promise<ReadableStream<Uint8Array>>;\n\n\t<Name extends keyof AiModels>(\n\t\tmodel: Name,\n\t\tinputs: AiModels[Name][\"inputs\"],\n\t\toptions?: AiOptions,\n\t): Promise<AiModels[Name][\"postProcessedOutputs\"]>;\n}\n\n/**\n * Parameters for configuring the Cloudflare-based AI runner.\n */\nexport interface CreateRunConfig {\n\t/** Your Cloudflare account identifier. */\n\taccountId: string;\n\t/** Cloudflare API token/key with appropriate permissions. */\n\tapiKey: string;\n\t/** Custom fetch implementation for intercepting requests. */\n\tfetch?: typeof globalThis.fetch;\n}\n\n/**\n * Creates a run method that emulates the Cloudflare Workers AI binding,\n * but uses the Cloudflare REST API under the hood.\n */\nexport function createRun(config: CreateRunConfig): AiRun {\n\tconst { accountId, apiKey } = config;\n\tconst fetchFn = config.fetch ?? globalThis.fetch;\n\n\treturn async function run<Name extends keyof AiModels>(\n\t\tmodel: Name,\n\t\tinputs: AiModels[Name][\"inputs\"],\n\t\toptions?: AiOptions & Record<string, unknown>,\n\t): Promise<Response | ReadableStream<Uint8Array> | AiModels[Name][\"postProcessedOutputs\"]> {\n\t\tconst {\n\t\t\tgateway,\n\t\t\tprefix: _prefix,\n\t\t\textraHeaders,\n\t\t\treturnRawResponse,\n\t\t\tsignal, // AbortSignal — not serializable as a query parameter\n\t\t\t...passthroughOptions\n\t\t} = options || {};\n\n\t\tconst urlParams = new URLSearchParams();\n\t\tfor (const [key, value] of Object.entries(passthroughOptions)) {\n\t\t\tif (value === undefined || value === null) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Value for option '${key}' is not able to be coerced into a string.`,\n\t\t\t\t);\n\t\t\t}\n\t\t\ttry {\n\t\t\t\tconst valueStr = String(value);\n\t\t\t\tif (!valueStr) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\turlParams.append(key, valueStr);\n\t\t\t} catch {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Value for option '${key}' is not able to be coerced into a string.`,\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\tconst queryString = urlParams.toString();\n\n\t\tconst modelPath = String(model).startsWith(\"run/\") ? model : `run/${model}`;\n\n\t\t// Build URL: use AI Gateway if gateway option is provided, otherwise direct API\n\t\tconst url = gateway?.id\n\t\t\t? `https://gateway.ai.cloudflare.com/v1/${accountId}/${gateway.id}/workers-ai/${modelPath}${\n\t\t\t\t\tqueryString ? `?${queryString}` : \"\"\n\t\t\t\t}`\n\t\t\t: `https://api.cloudflare.com/client/v4/accounts/${accountId}/ai/${modelPath}${\n\t\t\t\t\tqueryString ? `?${queryString}` : \"\"\n\t\t\t\t}`;\n\n\t\tconst headers: Record<string, string> = {\n\t\t\tAuthorization: `Bearer ${apiKey}`,\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t...(extraHeaders && typeof extraHeaders === \"object\"\n\t\t\t\t? (extraHeaders as Record<string, string>)\n\t\t\t\t: {}),\n\t\t};\n\n\t\tif (gateway) {\n\t\t\tif (gateway.skipCache) {\n\t\t\t\theaders[\"cf-aig-skip-cache\"] = \"true\";\n\t\t\t}\n\t\t\tif (typeof gateway.cacheTtl === \"number\") {\n\t\t\t\theaders[\"cf-aig-cache-ttl\"] = String(gateway.cacheTtl);\n\t\t\t}\n\t\t\tif (gateway.cacheKey) {\n\t\t\t\theaders[\"cf-aig-cache-key\"] = gateway.cacheKey;\n\t\t\t}\n\t\t\tif (gateway.metadata) {\n\t\t\t\theaders[\"cf-aig-metadata\"] = JSON.stringify(gateway.metadata);\n\t\t\t}\n\t\t}\n\n\t\tconst body = JSON.stringify(inputs);\n\n\t\tconst response = await fetchFn(url, {\n\t\t\tbody,\n\t\t\theaders,\n\t\t\tmethod: \"POST\",\n\t\t\tsignal: signal as AbortSignal | undefined,\n\t\t});\n\n\t\t// Check for HTTP errors before processing\n\t\tif (!response.ok && !returnRawResponse) {\n\t\t\tlet errorBody: string;\n\t\t\ttry {\n\t\t\t\terrorBody = await response.text();\n\t\t\t} catch {\n\t\t\t\terrorBody = \"<unable to read response body>\";\n\t\t\t}\n\t\t\tthrow new Error(\n\t\t\t\t`Workers AI API error (${response.status} ${response.statusText}): ${errorBody}`,\n\t\t\t);\n\t\t}\n\n\t\tif (returnRawResponse) {\n\t\t\treturn response;\n\t\t}\n\n\t\tif ((inputs as AiTextGenerationInput).stream === true) {\n\t\t\tconst contentType = response.headers.get(\"content-type\") || \"\";\n\t\t\tif (contentType.includes(\"event-stream\") && response.body) {\n\t\t\t\treturn response.body;\n\t\t\t}\n\t\t\tif (response.body && !contentType.includes(\"json\")) {\n\t\t\t\t// Unknown content type — assume it's a stream\n\t\t\t\treturn response.body;\n\t\t\t}\n\n\t\t\t// Some models (e.g. GPT-OSS) don't support streaming via the /ai/run/\n\t\t\t// endpoint and return a JSON response with empty result instead of SSE.\n\t\t\t// Retry without streaming so doStream's graceful degradation path can\n\t\t\t// wrap the complete response as a synthetic stream.\n\t\t\t// Use the same URL (gateway or direct) as the original request.\n\t\t\tconst retryResponse = await fetchFn(url, {\n\t\t\t\tbody: JSON.stringify({\n\t\t\t\t\t...(inputs as Record<string, unknown>),\n\t\t\t\t\tstream: false,\n\t\t\t\t}),\n\t\t\t\theaders,\n\t\t\t\tmethod: \"POST\",\n\t\t\t\tsignal: signal as AbortSignal | undefined,\n\t\t\t});\n\n\t\t\tif (!retryResponse.ok) {\n\t\t\t\tlet errorBody: string;\n\t\t\t\ttry {\n\t\t\t\t\terrorBody = await retryResponse.text();\n\t\t\t\t} catch {\n\t\t\t\t\terrorBody = \"<unable to read response body>\";\n\t\t\t\t}\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Workers AI API error (${retryResponse.status} ${retryResponse.statusText}): ${errorBody}`,\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst retryData = await retryResponse.json<{\n\t\t\t\tresult: AiModels[Name][\"postProcessedOutputs\"];\n\t\t\t}>();\n\t\t\treturn retryData.result;\n\t\t}\n\n\t\tconst data = await response.json<{\n\t\t\tresult: AiModels[Name][\"postProcessedOutputs\"];\n\t\t}>();\n\t\treturn data.result;\n\t};\n}\n\n/**\n * Make a binary REST API call to Workers AI.\n *\n * Some models (e.g. `@cf/deepgram/nova-3`) require raw audio bytes\n * with an appropriate `Content-Type` header instead of JSON.\n *\n * @param config Credentials config\n * @param model Workers AI model name\n * @param audioBytes Raw audio bytes\n * @param contentType MIME type (e.g. \"audio/wav\")\n * @param signal Optional AbortSignal\n * @returns The parsed JSON response body\n */\nexport async function createRunBinary(\n\tconfig: CreateRunConfig,\n\tmodel: string,\n\taudioBytes: Uint8Array,\n\tcontentType: string,\n\tsignal?: AbortSignal,\n): Promise<Record<string, unknown>> {\n\tconst url = `https://api.cloudflare.com/client/v4/accounts/${config.accountId}/ai/run/${model}`;\n\n\tconst response = await fetch(url, {\n\t\tmethod: \"POST\",\n\t\theaders: {\n\t\t\tAuthorization: `Bearer ${config.apiKey}`,\n\t\t\t\"Content-Type\": contentType,\n\t\t},\n\t\tbody: audioBytes,\n\t\tsignal,\n\t});\n\n\tif (!response.ok) {\n\t\tlet errorBody: string;\n\t\ttry {\n\t\t\terrorBody = await response.text();\n\t\t} catch {\n\t\t\terrorBody = \"<unable to read response body>\";\n\t\t}\n\t\tthrow new Error(\n\t\t\t`Workers AI API error (${response.status} ${response.statusText}): ${errorBody}`,\n\t\t);\n\t}\n\n\tconst data = await response.json<{ result?: Record<string, unknown> }>();\n\treturn (data.result ?? data) as Record<string, unknown>;\n}\n\n// ---------------------------------------------------------------------------\n// Tool preparation\n// ---------------------------------------------------------------------------\n\nexport function prepareToolsAndToolChoice(\n\ttools: Parameters<LanguageModelV3[\"doGenerate\"]>[0][\"tools\"],\n\ttoolChoice: Parameters<LanguageModelV3[\"doGenerate\"]>[0][\"toolChoice\"],\n) {\n\tif (tools == null) {\n\t\treturn { tool_choice: undefined, tools: undefined };\n\t}\n\n\tconst mappedTools = tools.map((tool) => ({\n\t\tfunction: {\n\t\t\tdescription: tool.type === \"function\" ? tool.description : undefined,\n\t\t\tname: tool.name,\n\t\t\tparameters: tool.type === \"function\" ? tool.inputSchema : undefined,\n\t\t},\n\t\ttype: \"function\",\n\t}));\n\n\tif (toolChoice == null) {\n\t\treturn { tool_choice: undefined, tools: mappedTools };\n\t}\n\n\tconst type = toolChoice.type;\n\n\tswitch (type) {\n\t\tcase \"auto\":\n\t\t\treturn { tool_choice: type, tools: mappedTools };\n\t\tcase \"none\":\n\t\t\treturn { tool_choice: type, tools: mappedTools };\n\t\tcase \"required\":\n\t\t\treturn { tool_choice: \"required\", tools: mappedTools };\n\n\t\t// Workers AI does not support tool mode directly,\n\t\t// so we filter the tools and force the tool choice through 'required'\n\t\tcase \"tool\":\n\t\t\treturn {\n\t\t\t\ttool_choice: \"required\",\n\t\t\t\ttools: mappedTools.filter((tool) => tool.function.name === toolChoice.toolName),\n\t\t\t};\n\t\tdefault: {\n\t\t\tconst exhaustiveCheck = type satisfies never;\n\t\t\tthrow new Error(`Unsupported tool choice type: ${exhaustiveCheck}`);\n\t\t}\n\t}\n}\n\n// ---------------------------------------------------------------------------\n// Message helpers\n// ---------------------------------------------------------------------------\n\n// ---------------------------------------------------------------------------\n// Tool call processing\n// ---------------------------------------------------------------------------\n\n/** Workers AI flat tool call format (non-streaming, native) */\ninterface FlatToolCall {\n\tname: string;\n\targuments: unknown;\n\tid?: string;\n}\n\n/** Workers AI OpenAI-compatible tool call format */\ninterface OpenAIToolCall {\n\tid: string;\n\ttype: \"function\";\n\tfunction: {\n\t\tname: string;\n\t\targuments: unknown;\n\t};\n}\n\n/** Partial tool call from streaming (has index for merging) */\ninterface PartialToolCall {\n\tindex?: number;\n\tid?: string;\n\ttype?: string;\n\tfunction?: {\n\t\tname?: string;\n\t\targuments?: string;\n\t};\n\t// Flat format fields\n\tname?: string;\n\targuments?: string;\n}\n\nfunction mergePartialToolCalls(partialCalls: PartialToolCall[]) {\n\tconst mergedCallsByIndex: Record<\n\t\tnumber,\n\t\t{ function: { arguments: string; name: string }; id: string; type: string }\n\t> = {};\n\n\tfor (const partialCall of partialCalls) {\n\t\tconst index = partialCall.index ?? 0;\n\n\t\tif (!mergedCallsByIndex[index]) {\n\t\t\tmergedCallsByIndex[index] = {\n\t\t\t\tfunction: {\n\t\t\t\t\targuments: \"\",\n\t\t\t\t\tname: partialCall.function?.name || \"\",\n\t\t\t\t},\n\t\t\t\tid: partialCall.id || \"\",\n\t\t\t\ttype: partialCall.type || \"\",\n\t\t\t};\n\t\t} else {\n\t\t\tif (partialCall.id) {\n\t\t\t\tmergedCallsByIndex[index].id = partialCall.id;\n\t\t\t}\n\t\t\tif (partialCall.type) {\n\t\t\t\tmergedCallsByIndex[index].type = partialCall.type;\n\t\t\t}\n\t\t\tif (partialCall.function?.name) {\n\t\t\t\tmergedCallsByIndex[index].function.name = partialCall.function.name;\n\t\t\t}\n\t\t}\n\n\t\t// Append arguments if available (they arrive in order during streaming)\n\t\tif (partialCall.function?.arguments) {\n\t\t\tmergedCallsByIndex[index].function.arguments += partialCall.function.arguments;\n\t\t}\n\t}\n\n\treturn Object.values(mergedCallsByIndex);\n}\n\nfunction processToolCall(toolCall: FlatToolCall | OpenAIToolCall): LanguageModelV3ToolCall {\n\t// OpenAI format: has function.name (the key discriminator)\n\tconst fn =\n\t\t\"function\" in toolCall && typeof toolCall.function === \"object\" && toolCall.function\n\t\t\t? (toolCall.function as { name?: string; arguments?: unknown })\n\t\t\t: null;\n\n\tif (fn?.name) {\n\t\treturn {\n\t\t\tinput:\n\t\t\t\ttypeof fn.arguments === \"string\"\n\t\t\t\t\t? fn.arguments\n\t\t\t\t\t: JSON.stringify(fn.arguments || {}),\n\t\t\ttoolCallId: toolCall.id || generateId(),\n\t\t\ttype: \"tool-call\",\n\t\t\ttoolName: fn.name,\n\t\t};\n\t}\n\n\t// Flat format (native Workers AI non-streaming): has top-level name\n\tconst flat = toolCall as FlatToolCall;\n\treturn {\n\t\tinput:\n\t\t\ttypeof flat.arguments === \"string\"\n\t\t\t\t? flat.arguments\n\t\t\t\t: JSON.stringify(flat.arguments || {}),\n\t\ttoolCallId: flat.id || generateId(),\n\t\ttype: \"tool-call\",\n\t\ttoolName: flat.name,\n\t};\n}\n\nexport function processToolCalls(output: Record<string, unknown>): LanguageModelV3ToolCall[] {\n\tif (output.tool_calls && Array.isArray(output.tool_calls)) {\n\t\treturn output.tool_calls.map((toolCall: FlatToolCall | OpenAIToolCall) =>\n\t\t\tprocessToolCall(toolCall),\n\t\t);\n\t}\n\n\tconst choices = output.choices as\n\t\t| Array<{ message?: { tool_calls?: Array<FlatToolCall | OpenAIToolCall> } }>\n\t\t| undefined;\n\tif (choices?.[0]?.message?.tool_calls && Array.isArray(choices[0].message.tool_calls)) {\n\t\treturn choices[0].message.tool_calls.map((toolCall) => processToolCall(toolCall));\n\t}\n\n\treturn [];\n}\n\nexport function processPartialToolCalls(partialToolCalls: PartialToolCall[]) {\n\tconst mergedToolCalls = mergePartialToolCalls(partialToolCalls);\n\treturn processToolCalls({ tool_calls: mergedToolCalls });\n}\n\n// ---------------------------------------------------------------------------\n// Text extraction\n// ---------------------------------------------------------------------------\n\n/**\n * Extract text from a Workers AI response, handling multiple response formats:\n * - OpenAI format: { choices: [{ message: { content: \"...\" } }] }\n * - Native format: { response: \"...\" }\n * - Structured output quirk: { response: { ... } } (object instead of string)\n * - Structured output quirk: { response: \"{ ... }\" } (JSON string)\n */\nexport function processText(output: Record<string, unknown>): string | undefined {\n\t// OpenAI format\n\tconst choices = output.choices as Array<{ message?: { content?: string | null } }> | undefined;\n\tconst choiceContent = choices?.[0]?.message?.content;\n\tif (choiceContent != null && String(choiceContent).length > 0) {\n\t\treturn String(choiceContent);\n\t}\n\n\tif (\"response\" in output) {\n\t\tconst response = output.response;\n\t\t// Object response (structured output quirk #2)\n\t\tif (typeof response === \"object\" && response !== null) {\n\t\t\treturn JSON.stringify(response);\n\t\t}\n\t\t// Numeric response (quirk #9)\n\t\tif (typeof response === \"number\") {\n\t\t\treturn String(response);\n\t\t}\n\t\t// Null response (e.g., tool-call-only responses)\n\t\tif (response === null || response === undefined) {\n\t\t\treturn undefined;\n\t\t}\n\t\treturn String(response);\n\t}\n\treturn undefined;\n}\n","import type { LanguageModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\n\nimport type { AISearchChatSettings } from \"./aisearch-chat-settings\";\nimport { convertToWorkersAIChatMessages } from \"./convert-to-workersai-chat-messages\";\nimport { mapWorkersAIUsage } from \"./map-workersai-usage\";\nimport { getMappedStream, prependStreamStart } from \"./streaming\";\nimport { processToolCalls } from \"./utils\";\nimport type { TextGenerationModels } from \"./workersai-models\";\n\ntype AISearchChatConfig = {\n\tprovider: string;\n\tbinding: AutoRAG;\n\tgateway?: GatewayOptions;\n};\n\nexport class AISearchChatLanguageModel implements LanguageModelV3 {\n\treadonly specificationVersion = \"v3\";\n\treadonly defaultObjectGenerationMode = \"json\";\n\n\treadonly supportedUrls: Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>> = {};\n\n\treadonly modelId: TextGenerationModels;\n\treadonly settings: AISearchChatSettings;\n\n\tprivate readonly config: AISearchChatConfig;\n\n\tconstructor(\n\t\tmodelId: TextGenerationModels,\n\t\tsettings: AISearchChatSettings,\n\t\tconfig: AISearchChatConfig,\n\t) {\n\t\tthis.modelId = modelId;\n\t\tthis.settings = settings;\n\t\tthis.config = config;\n\t}\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tprivate getWarnings({\n\t\ttools,\n\t\tfrequencyPenalty,\n\t\tpresencePenalty,\n\t\tresponseFormat,\n\t}: Parameters<LanguageModelV3[\"doGenerate\"]>[0]): SharedV3Warning[] {\n\t\tconst warnings: SharedV3Warning[] = [];\n\n\t\tif (tools != null && tools.length > 0) {\n\t\t\tconsole.warn(\n\t\t\t\t\"[workers-ai-provider] Tools are not supported by AI Search. They will be ignored.\",\n\t\t\t);\n\t\t\twarnings.push({ feature: \"tools\", type: \"unsupported\" });\n\t\t}\n\n\t\tif (frequencyPenalty != null) {\n\t\t\twarnings.push({ feature: \"frequencyPenalty\", type: \"unsupported\" });\n\t\t}\n\n\t\tif (presencePenalty != null) {\n\t\t\twarnings.push({ feature: \"presencePenalty\", type: \"unsupported\" });\n\t\t}\n\n\t\tif (responseFormat?.type === \"json\") {\n\t\t\twarnings.push({ feature: \"responseFormat\", type: \"unsupported\" });\n\t\t}\n\n\t\treturn warnings;\n\t}\n\n\t/**\n\t * Build the search query from messages.\n\t * Flattens the conversation into a single string for aiSearch.\n\t */\n\tprivate buildQuery(prompt: Parameters<LanguageModelV3[\"doGenerate\"]>[0][\"prompt\"]): string {\n\t\tconst { messages } = convertToWorkersAIChatMessages(prompt);\n\t\treturn messages.map(({ content, role }) => `${role}: ${content}`).join(\"\\n\\n\");\n\t}\n\n\tasync doGenerate(\n\t\toptions: Parameters<LanguageModelV3[\"doGenerate\"]>[0],\n\t): Promise<Awaited<ReturnType<LanguageModelV3[\"doGenerate\"]>>> {\n\t\tconst warnings = this.getWarnings(options);\n\t\tconst query = this.buildQuery(options.prompt);\n\n\t\tconst output = await this.config.binding.aiSearch({ query });\n\n\t\treturn {\n\t\t\tfinishReason: { unified: \"stop\", raw: \"stop\" },\n\t\t\tcontent: [\n\t\t\t\t...output.data.map(({ file_id, filename, score }) => ({\n\t\t\t\t\ttype: \"source\" as const,\n\t\t\t\t\tsourceType: \"url\" as const,\n\t\t\t\t\tid: file_id,\n\t\t\t\t\turl: filename,\n\t\t\t\t\tproviderMetadata: {\n\t\t\t\t\t\tattributes: { score },\n\t\t\t\t\t},\n\t\t\t\t})),\n\t\t\t\t{\n\t\t\t\t\ttype: \"text\" as const,\n\t\t\t\t\ttext: output.response,\n\t\t\t\t},\n\t\t\t\t...processToolCalls(output as unknown as Record<string, unknown>),\n\t\t\t],\n\t\t\tusage: mapWorkersAIUsage(output as unknown as Record<string, unknown>),\n\t\t\twarnings,\n\t\t};\n\t}\n\n\tasync doStream(\n\t\toptions: Parameters<LanguageModelV3[\"doStream\"]>[0],\n\t): Promise<Awaited<ReturnType<LanguageModelV3[\"doStream\"]>>> {\n\t\tconst warnings = this.getWarnings(options);\n\t\tconst query = this.buildQuery(options.prompt);\n\n\t\tconst response = await this.config.binding.aiSearch({\n\t\t\tquery,\n\t\t\tstream: true,\n\t\t});\n\n\t\treturn {\n\t\t\tstream: prependStreamStart(\n\t\t\t\tgetMappedStream(response as unknown as Response | ReadableStream<Uint8Array>),\n\t\t\t\twarnings,\n\t\t\t),\n\t\t};\n\t}\n}\n","import type {\n\tEmbeddingModelV3,\n\tEmbeddingModelV3CallOptions,\n\tEmbeddingModelV3Result,\n} from \"@ai-sdk/provider\";\nimport { TooManyEmbeddingValuesForCallError } from \"@ai-sdk/provider\";\nimport type { EmbeddingModels } from \"./workersai-models\";\n\nexport type WorkersAIEmbeddingConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n};\n\nexport type WorkersAIEmbeddingSettings = {\n\tgateway?: GatewayOptions;\n\tmaxEmbeddingsPerCall?: number;\n\tsupportsParallelCalls?: boolean;\n\n\t/**\n\t * Passthrough settings that are provided directly to the run function.\n\t */\n\t[key: string]: unknown;\n};\n\nexport class WorkersAIEmbeddingModel implements EmbeddingModelV3 {\n\treadonly specificationVersion = \"v3\";\n\treadonly modelId: EmbeddingModels;\n\tprivate readonly config: WorkersAIEmbeddingConfig;\n\tprivate readonly settings: WorkersAIEmbeddingSettings;\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tget maxEmbeddingsPerCall(): number {\n\t\t// https://developers.cloudflare.com/workers-ai/platform/limits/#text-embeddings\n\t\treturn this.settings.maxEmbeddingsPerCall ?? 3000;\n\t}\n\n\tget supportsParallelCalls(): boolean {\n\t\treturn this.settings.supportsParallelCalls ?? true;\n\t}\n\n\tconstructor(\n\t\tmodelId: EmbeddingModels,\n\t\tsettings: WorkersAIEmbeddingSettings,\n\t\tconfig: WorkersAIEmbeddingConfig,\n\t) {\n\t\tthis.modelId = modelId;\n\t\tthis.settings = settings;\n\t\tthis.config = config;\n\t}\n\n\tasync doEmbed({\n\t\tvalues,\n\t\tabortSignal,\n\t}: EmbeddingModelV3CallOptions): Promise<EmbeddingModelV3Result> {\n\t\tif (values.length > this.maxEmbeddingsPerCall) {\n\t\t\tthrow new TooManyEmbeddingValuesForCallError({\n\t\t\t\tmaxEmbeddingsPerCall: this.maxEmbeddingsPerCall,\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\tprovider: this.provider,\n\t\t\t\tvalues,\n\t\t\t});\n\t\t}\n\n\t\tconst {\n\t\t\tgateway,\n\t\t\tmaxEmbeddingsPerCall: _maxEmbeddingsPerCall,\n\t\t\tsupportsParallelCalls: _supportsParallelCalls,\n\t\t\t...passthroughOptions\n\t\t} = this.settings;\n\n\t\tconst response = await this.config.binding.run(\n\t\t\tthis.modelId as keyof AiModels,\n\t\t\t{\n\t\t\t\ttext: values,\n\t\t\t},\n\t\t\t{\n\t\t\t\tgateway: this.config.gateway ?? gateway,\n\t\t\t\tsignal: abortSignal,\n\t\t\t\t...passthroughOptions,\n\t\t\t} as AiOptions,\n\t\t);\n\n\t\treturn {\n\t\t\tembeddings: (response as { data: number[][] }).data,\n\t\t\twarnings: [],\n\t\t};\n\t}\n}\n","import type { LanguageModelV3, SharedV3Warning, LanguageModelV3StreamPart } from \"@ai-sdk/provider\";\nimport { generateId } from \"ai\";\nimport { convertToWorkersAIChatMessages } from \"./convert-to-workersai-chat-messages\";\nimport { mapWorkersAIFinishReason } from \"./map-workersai-finish-reason\";\nimport { mapWorkersAIUsage } from \"./map-workersai-usage\";\nimport { getMappedStream, prependStreamStart } from \"./streaming\";\nimport {\n\tnormalizeMessagesForBinding,\n\tprepareToolsAndToolChoice,\n\tprocessText,\n\tprocessToolCalls,\n} from \"./utils\";\nimport type { WorkersAIChatSettings } from \"./workersai-chat-settings\";\nimport type { TextGenerationModels } from \"./workersai-models\";\n\ntype WorkersAIChatConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n\t/** True when using a real Workers AI binding (not the REST shim). */\n\tisBinding: boolean;\n};\n\nexport class WorkersAIChatLanguageModel implements LanguageModelV3 {\n\treadonly specificationVersion = \"v3\";\n\treadonly defaultObjectGenerationMode = \"json\";\n\n\treadonly supportedUrls: Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>> = {};\n\n\treadonly modelId: TextGenerationModels;\n\treadonly settings: WorkersAIChatSettings;\n\n\tprivate readonly config: WorkersAIChatConfig;\n\n\tconstructor(\n\t\tmodelId: TextGenerationModels,\n\t\tsettings: WorkersAIChatSettings,\n\t\tconfig: WorkersAIChatConfig,\n\t) {\n\t\tthis.modelId = modelId;\n\t\tthis.settings = settings;\n\t\tthis.config = config;\n\t}\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tprivate getArgs({\n\t\tresponseFormat,\n\t\ttools,\n\t\ttoolChoice,\n\t\tmaxOutputTokens,\n\t\ttemperature,\n\t\ttopP,\n\t\tfrequencyPenalty,\n\t\tpresencePenalty,\n\t\tseed,\n\t}: Parameters<LanguageModelV3[\"doGenerate\"]>[0]) {\n\t\tconst type = responseFormat?.type ?? \"text\";\n\n\t\tconst warnings: SharedV3Warning[] = [];\n\n\t\tif (frequencyPenalty != null) {\n\t\t\twarnings.push({ feature: \"frequencyPenalty\", type: \"unsupported\" });\n\t\t}\n\n\t\tif (presencePenalty != null) {\n\t\t\twarnings.push({ feature: \"presencePenalty\", type: \"unsupported\" });\n\t\t}\n\n\t\tconst baseArgs = {\n\t\t\tmax_tokens: maxOutputTokens,\n\t\t\tmodel: this.modelId,\n\t\t\trandom_seed: seed,\n\t\t\tsafe_prompt: this.settings.safePrompt,\n\t\t\ttemperature,\n\t\t\ttop_p: topP,\n\t\t};\n\n\t\tswitch (type) {\n\t\t\tcase \"text\": {\n\t\t\t\treturn {\n\t\t\t\t\targs: {\n\t\t\t\t\t\t...baseArgs,\n\t\t\t\t\t\tresponse_format: undefined as\n\t\t\t\t\t\t\t| { type: string; json_schema?: unknown }\n\t\t\t\t\t\t\t| undefined,\n\t\t\t\t\t\t...prepareToolsAndToolChoice(tools, toolChoice),\n\t\t\t\t\t},\n\t\t\t\t\twarnings,\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tcase \"json\": {\n\t\t\t\treturn {\n\t\t\t\t\targs: {\n\t\t\t\t\t\t...baseArgs,\n\t\t\t\t\t\tresponse_format: {\n\t\t\t\t\t\t\ttype: \"json_schema\",\n\t\t\t\t\t\t\tjson_schema:\n\t\t\t\t\t\t\t\tresponseFormat?.type === \"json\" ? responseFormat.schema : undefined,\n\t\t\t\t\t\t},\n\t\t\t\t\t\ttools: undefined,\n\t\t\t\t\t\ttool_choice: undefined,\n\t\t\t\t\t},\n\t\t\t\t\twarnings,\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tdefault: {\n\t\t\t\tconst exhaustiveCheck = type satisfies never;\n\t\t\t\tthrow new Error(`Unsupported type: ${exhaustiveCheck}`);\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * Build the inputs object for `binding.run()`, shared by doGenerate and doStream.\n\t *\n\t * Images are embedded inline in messages as OpenAI-compatible content\n\t * arrays with `image_url` parts. Both the REST API and the binding\n\t * accept this format at runtime.\n\t *\n\t * The binding path additionally normalises null content to empty strings.\n\t */\n\tprivate buildRunInputs(\n\t\targs: ReturnType<typeof this.getArgs>[\"args\"],\n\t\tmessages: ReturnType<typeof convertToWorkersAIChatMessages>[\"messages\"],\n\t\toptions?: { stream?: boolean },\n\t) {\n\t\treturn {\n\t\t\tmax_tokens: args.max_tokens,\n\t\t\tmessages: this.config.isBinding ? normalizeMessagesForBinding(messages) : messages,\n\t\t\ttemperature: args.temperature,\n\t\t\ttools: args.tools,\n\t\t\t...(args.tool_choice ? { tool_choice: args.tool_choice } : {}),\n\t\t\ttop_p: args.top_p,\n\t\t\t...(args.response_format ? { response_format: args.response_format } : {}),\n\t\t\t...(options?.stream ? { stream: true } : {}),\n\t\t};\n\t}\n\n\t/**\n\t * Get passthrough options for binding.run() from settings.\n\t */\n\tprivate getRunOptions() {\n\t\tconst {\n\t\t\tgateway,\n\t\t\tsafePrompt: _safePrompt,\n\t\t\tsessionAffinity,\n\t\t\textraHeaders,\n\t\t\t...passthroughOptions\n\t\t} = this.settings;\n\n\t\tconst mergedHeaders = {\n\t\t\t...(extraHeaders && typeof extraHeaders === \"object\"\n\t\t\t\t? (extraHeaders as Record<string, string>)\n\t\t\t\t: {}),\n\t\t\t...(sessionAffinity ? { \"x-session-affinity\": sessionAffinity } : {}),\n\t\t};\n\n\t\treturn {\n\t\t\tgateway: this.config.gateway ?? gateway,\n\t\t\t...(Object.keys(mergedHeaders).length > 0 ? { extraHeaders: mergedHeaders } : {}),\n\t\t\t...passthroughOptions,\n\t\t};\n\t}\n\n\tasync doGenerate(\n\t\toptions: Parameters<LanguageModelV3[\"doGenerate\"]>[0],\n\t): Promise<Awaited<ReturnType<LanguageModelV3[\"doGenerate\"]>>> {\n\t\tconst { args, warnings } = this.getArgs(options);\n\t\tconst { messages } = convertToWorkersAIChatMessages(options.prompt);\n\n\t\tconst inputs = this.buildRunInputs(args, messages);\n\t\tconst runOptions = this.getRunOptions();\n\n\t\tconst output = await this.config.binding.run(\n\t\t\targs.model as keyof AiModels,\n\t\t\tinputs as AiModels[keyof AiModels][\"inputs\"],\n\t\t\t{\n\t\t\t\t...runOptions,\n\t\t\t\tsignal: options.abortSignal,\n\t\t\t} as AiOptions,\n\t\t);\n\n\t\tif (output instanceof ReadableStream) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Unexpected streaming response from non-streaming request. Check that `stream: true` was not passed.\",\n\t\t\t);\n\t\t}\n\n\t\tconst outputRecord = output as Record<string, unknown>;\n\t\tconst choices = outputRecord.choices as\n\t\t\t| Array<{\n\t\t\t\t\tmessage?: { reasoning_content?: string; reasoning?: string };\n\t\t\t }>\n\t\t\t| undefined;\n\t\tconst reasoningContent =\n\t\t\tchoices?.[0]?.message?.reasoning_content ?? choices?.[0]?.message?.reasoning;\n\n\t\treturn {\n\t\t\tfinishReason: mapWorkersAIFinishReason(outputRecord),\n\t\t\tcontent: [\n\t\t\t\t...(reasoningContent\n\t\t\t\t\t? [{ type: \"reasoning\" as const, text: reasoningContent }]\n\t\t\t\t\t: []),\n\t\t\t\t{\n\t\t\t\t\ttype: \"text\",\n\t\t\t\t\ttext: processText(outputRecord) ?? \"\",\n\t\t\t\t},\n\t\t\t\t...processToolCalls(outputRecord),\n\t\t\t],\n\t\t\tusage: mapWorkersAIUsage(output as Record<string, unknown>),\n\t\t\twarnings,\n\t\t};\n\t}\n\n\tasync doStream(\n\t\toptions: Parameters<LanguageModelV3[\"doStream\"]>[0],\n\t): Promise<Awaited<ReturnType<LanguageModelV3[\"doStream\"]>>> {\n\t\tconst { args, warnings } = this.getArgs(options);\n\t\tconst { messages } = convertToWorkersAIChatMessages(options.prompt);\n\n\t\tconst inputs = this.buildRunInputs(args, messages, { stream: true });\n\t\tconst runOptions = this.getRunOptions();\n\n\t\tconst response = await this.config.binding.run(\n\t\t\targs.model as keyof AiModels,\n\t\t\tinputs as AiModels[keyof AiModels][\"inputs\"],\n\t\t\t{\n\t\t\t\t...runOptions,\n\t\t\t\tsignal: options.abortSignal,\n\t\t\t} as AiOptions,\n\t\t);\n\n\t\t// If the binding returned a stream, pipe it through the SSE mapper\n\t\tif (response instanceof ReadableStream) {\n\t\t\treturn {\n\t\t\t\tstream: prependStreamStart(getMappedStream(response), warnings),\n\t\t\t};\n\t\t}\n\n\t\t// Graceful degradation: some models return a non-streaming response even\n\t\t// when stream:true is requested. Wrap the complete response as a stream.\n\t\tconst outputRecord = response as Record<string, unknown>;\n\t\tconst choices = outputRecord.choices as\n\t\t\t| Array<{\n\t\t\t\t\tmessage?: { reasoning_content?: string; reasoning?: string };\n\t\t\t }>\n\t\t\t| undefined;\n\t\tconst reasoningContent =\n\t\t\tchoices?.[0]?.message?.reasoning_content ?? choices?.[0]?.message?.reasoning;\n\n\t\tlet textId: string | null = null;\n\t\tlet reasoningId: string | null = null;\n\n\t\treturn {\n\t\t\tstream: new ReadableStream<LanguageModelV3StreamPart>({\n\t\t\t\tstart(controller) {\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"stream-start\",\n\t\t\t\t\t\twarnings: warnings as SharedV3Warning[],\n\t\t\t\t\t});\n\n\t\t\t\t\tif (reasoningContent) {\n\t\t\t\t\t\treasoningId = generateId();\n\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-start\", id: reasoningId });\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: \"reasoning-delta\",\n\t\t\t\t\t\t\tid: reasoningId,\n\t\t\t\t\t\t\tdelta: reasoningContent,\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t}\n\n\t\t\t\t\tconst text = processText(outputRecord);\n\t\t\t\t\tif (text) {\n\t\t\t\t\t\ttextId = generateId();\n\t\t\t\t\t\tcontroller.enqueue({ type: \"text-start\", id: textId });\n\t\t\t\t\t\tcontroller.enqueue({ type: \"text-delta\", id: textId, delta: text });\n\t\t\t\t\t\tcontroller.enqueue({ type: \"text-end\", id: textId });\n\t\t\t\t\t}\n\n\t\t\t\t\tfor (const toolCall of processToolCalls(outputRecord)) {\n\t\t\t\t\t\tcontroller.enqueue(toolCall);\n\t\t\t\t\t}\n\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"finish\",\n\t\t\t\t\t\tfinishReason: mapWorkersAIFinishReason(outputRecord),\n\t\t\t\t\t\tusage: mapWorkersAIUsage(response as Record<string, unknown>),\n\t\t\t\t\t});\n\t\t\t\t\tcontroller.close();\n\t\t\t\t},\n\t\t\t}),\n\t\t};\n\t}\n}\n","import type { ImageModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\nimport type { WorkersAIImageSettings } from \"./workersai-image-settings\";\nimport type { ImageGenerationModels } from \"./workersai-models\";\n\nexport type WorkersAIImageConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n};\n\nexport class WorkersAIImageModel implements ImageModelV3 {\n\treadonly specificationVersion = \"v3\";\n\n\tget maxImagesPerCall(): number {\n\t\treturn this.settings.maxImagesPerCall ?? 1;\n\t}\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tconstructor(\n\t\treadonly modelId: ImageGenerationModels,\n\t\treadonly settings: WorkersAIImageSettings,\n\t\treadonly config: WorkersAIImageConfig,\n\t) {}\n\n\tasync doGenerate({\n\t\tprompt,\n\t\tn,\n\t\tsize,\n\t\taspectRatio,\n\t\tseed,\n\t\tabortSignal,\n\t}: Parameters<ImageModelV3[\"doGenerate\"]>[0]): Promise<\n\t\tAwaited<ReturnType<ImageModelV3[\"doGenerate\"]>>\n\t> {\n\t\tconst { width, height } = getDimensionsFromSizeString(size);\n\n\t\tconst warnings: Array<SharedV3Warning> = [];\n\n\t\tif (aspectRatio != null) {\n\t\t\twarnings.push({\n\t\t\t\tdetails: \"This model does not support aspect ratio. Use `size` instead.\",\n\t\t\t\tfeature: \"aspectRatio\",\n\t\t\t\ttype: \"unsupported\",\n\t\t\t});\n\t\t}\n\n\t\tconst generateImage = async () => {\n\t\t\tconst output = (await this.config.binding.run(\n\t\t\t\tthis.modelId as keyof AiModels,\n\t\t\t\t{\n\t\t\t\t\theight,\n\t\t\t\t\tprompt: prompt ?? \"\",\n\t\t\t\t\tseed,\n\t\t\t\t\twidth,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tgateway: this.config.gateway,\n\t\t\t\t\tsignal: abortSignal,\n\t\t\t\t} as AiOptions,\n\t\t\t)) as unknown;\n\n\t\t\treturn toUint8Array(output);\n\t\t};\n\n\t\tconst images: Uint8Array[] = await Promise.all(\n\t\t\tArray.from({ length: n }, () => generateImage()),\n\t\t);\n\n\t\treturn {\n\t\t\timages,\n\t\t\tresponse: {\n\t\t\t\theaders: {},\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\ttimestamp: new Date(),\n\t\t\t},\n\t\t\twarnings,\n\t\t};\n\t}\n}\n\nfunction getDimensionsFromSizeString(size: string | undefined) {\n\tconst [width, height] = size?.split(\"x\") ?? [undefined, undefined];\n\n\treturn {\n\t\theight: parseInteger(height),\n\t\twidth: parseInteger(width),\n\t};\n}\n\nfunction parseInteger(value?: string) {\n\tif (value === \"\" || !value) return undefined;\n\tconst number = Number(value);\n\treturn Number.isInteger(number) ? number : undefined;\n}\n\n/**\n * Convert various output types from binding.run() to Uint8Array.\n * Workers AI image models return different types depending on the runtime:\n * - ReadableStream<Uint8Array> (most common in workerd)\n * - Uint8Array / ArrayBuffer (direct binary)\n * - Response (needs .arrayBuffer())\n * - { image: string } with base64 data\n */\nasync function toUint8Array(output: unknown): Promise<Uint8Array> {\n\tif (output instanceof Uint8Array) {\n\t\treturn output;\n\t}\n\tif (output instanceof ArrayBuffer) {\n\t\treturn new Uint8Array(output);\n\t}\n\tif (output instanceof ReadableStream) {\n\t\tconst reader = (output as ReadableStream<Uint8Array>).getReader();\n\t\tconst chunks: Uint8Array[] = [];\n\t\tlet totalLength = 0;\n\t\twhile (true) {\n\t\t\tconst { done, value } = await reader.read();\n\t\t\tif (done) break;\n\t\t\tchunks.push(value);\n\t\t\ttotalLength += value.length;\n\t\t}\n\t\tconst result = new Uint8Array(totalLength);\n\t\tlet offset = 0;\n\t\tfor (const chunk of chunks) {\n\t\t\tresult.set(chunk, offset);\n\t\t\toffset += chunk.length;\n\t\t}\n\t\treturn result;\n\t}\n\t// Response object (e.g., from REST shim)\n\tif (output instanceof Response) {\n\t\treturn new Uint8Array(await output.arrayBuffer());\n\t}\n\t// Object with binary-like properties\n\tif (typeof output === \"object\" && output !== null) {\n\t\tconst obj = output as Record<string, unknown>;\n\t\t// { image: base64string }\n\t\tif (typeof obj.image === \"string\") {\n\t\t\treturn Uint8Array.from(atob(obj.image), (c) => c.charCodeAt(0));\n\t\t}\n\t\t// { data: Uint8Array }\n\t\tif (obj.data instanceof Uint8Array) {\n\t\t\treturn obj.data;\n\t\t}\n\t\t// { data: ArrayBuffer }\n\t\tif (obj.data instanceof ArrayBuffer) {\n\t\t\treturn new Uint8Array(obj.data);\n\t\t}\n\t\t// Try to get a body if it looks response-like\n\t\tif (typeof obj.arrayBuffer === \"function\") {\n\t\t\treturn new Uint8Array(await (obj as unknown as Response).arrayBuffer());\n\t\t}\n\t}\n\tthrow new Error(\n\t\t`Unexpected output type from image model. Got ${Object.prototype.toString.call(output)} with keys: ${\n\t\t\ttypeof output === \"object\" && output !== null\n\t\t\t\t? JSON.stringify(Object.keys(output))\n\t\t\t\t: \"N/A\"\n\t\t}`,\n\t);\n}\n","import type { TranscriptionModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\nimport type { WorkersAITranscriptionSettings } from \"./workersai-transcription-settings\";\nimport type { TranscriptionModels } from \"./workersai-models\";\nimport { createRunBinary, type CreateRunConfig } from \"./utils\";\n\nexport type WorkersAITranscriptionConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n\t/**\n\t * Whether the binding is a real `env.AI` binding (true) or a REST shim (false).\n\t * Nova-3 uses different upload paths depending on this.\n\t */\n\tisBinding: boolean;\n\t/**\n\t * REST credentials, only set when `isBinding` is false.\n\t * Needed for Nova-3 which requires binary upload, bypassing the JSON-based REST shim.\n\t */\n\tcredentials?: CreateRunConfig;\n};\n\n/**\n * Workers AI transcription model implementing the AI SDK's `TranscriptionModelV3` interface.\n *\n * Supports:\n * - Whisper models (`@cf/openai/whisper`, `whisper-tiny-en`, `whisper-large-v3-turbo`)\n * - Deepgram Nova-3 (`@cf/deepgram/nova-3`) — uses a different input/output format\n */\nexport class WorkersAITranscriptionModel implements TranscriptionModelV3 {\n\treadonly specificationVersion = \"v3\";\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tconstructor(\n\t\treadonly modelId: TranscriptionModels,\n\t\treadonly settings: WorkersAITranscriptionSettings,\n\t\treadonly config: WorkersAITranscriptionConfig,\n\t) {}\n\n\tasync doGenerate(\n\t\toptions: Parameters<TranscriptionModelV3[\"doGenerate\"]>[0],\n\t): Promise<Awaited<ReturnType<TranscriptionModelV3[\"doGenerate\"]>>> {\n\t\tconst { audio, mediaType, abortSignal } = options;\n\n\t\tconst warnings: Array<SharedV3Warning> = [];\n\n\t\t// The AI SDK always converts audio to Uint8Array via\n\t\t// convertDataContentToUint8Array before calling doGenerate.\n\t\tconst audioBytes =\n\t\t\ttypeof audio === \"string\"\n\t\t\t\t? Uint8Array.from(atob(audio), (c) => c.charCodeAt(0))\n\t\t\t\t: audio;\n\n\t\tconst isNova3 = this.modelId === \"@cf/deepgram/nova-3\";\n\n\t\tlet rawResult: unknown;\n\n\t\tif (isNova3) {\n\t\t\trawResult = await this.runNova3(audioBytes, mediaType, abortSignal);\n\t\t} else {\n\t\t\trawResult = await this.runWhisper(audioBytes, abortSignal);\n\t\t}\n\n\t\tconst result = rawResult as Record<string, unknown>;\n\n\t\t// Normalize response into AI SDK format\n\t\tif (isNova3) {\n\t\t\treturn this.normalizeNova3Response(result, warnings);\n\t\t}\n\t\treturn this.normalizeWhisperResponse(result, warnings);\n\t}\n\n\t// ---------------------------------------------------------------------------\n\t// Whisper models\n\t// ---------------------------------------------------------------------------\n\n\tprivate async runWhisper(audioBytes: Uint8Array, abortSignal?: AbortSignal): Promise<unknown> {\n\t\t// whisper-large-v3-turbo requires base64 audio (both binding and REST).\n\t\t// Other Whisper models accept number[].\n\t\tconst modelStr = this.modelId as string;\n\t\tconst audio =\n\t\t\tmodelStr === \"@cf/openai/whisper-large-v3-turbo\"\n\t\t\t\t? uint8ArrayToBase64(audioBytes)\n\t\t\t\t: Array.from(audioBytes);\n\n\t\tconst inputs: Record<string, unknown> = { audio };\n\n\t\tif (this.settings.language) {\n\t\t\tinputs.language = this.settings.language;\n\t\t}\n\t\tif (this.settings.prompt) {\n\t\t\tinputs.initial_prompt = this.settings.prompt;\n\t\t}\n\n\t\treturn this.config.binding.run(\n\t\t\tthis.modelId as Parameters<Ai[\"run\"]>[0],\n\t\t\tinputs as Parameters<Ai[\"run\"]>[1],\n\t\t\t{ gateway: this.config.gateway, signal: abortSignal } as AiOptions,\n\t\t);\n\t}\n\n\tprivate normalizeWhisperResponse(\n\t\traw: Record<string, unknown>,\n\t\twarnings: Array<SharedV3Warning>,\n\t): Awaited<ReturnType<TranscriptionModelV3[\"doGenerate\"]>> {\n\t\tconst text = (raw.text as string) ?? \"\";\n\n\t\t// Build segments from Whisper's various formats\n\t\tconst segments: Array<{ text: string; startSecond: number; endSecond: number }> = [];\n\n\t\t// whisper-large-v3-turbo returns segments[]\n\t\tif (raw.segments && Array.isArray(raw.segments)) {\n\t\t\tfor (const seg of raw.segments) {\n\t\t\t\tsegments.push({\n\t\t\t\t\ttext: ((seg as Record<string, unknown>).text as string) ?? \"\",\n\t\t\t\t\tstartSecond: ((seg as Record<string, unknown>).start as number) ?? 0,\n\t\t\t\t\tendSecond: ((seg as Record<string, unknown>).end as number) ?? 0,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t\t// basic whisper returns words[]\n\t\telse if (raw.words && Array.isArray(raw.words)) {\n\t\t\tfor (const w of raw.words) {\n\t\t\t\tsegments.push({\n\t\t\t\t\ttext: ((w as Record<string, unknown>).word as string) ?? \"\",\n\t\t\t\t\tstartSecond: ((w as Record<string, unknown>).start as number) ?? 0,\n\t\t\t\t\tendSecond: ((w as Record<string, unknown>).end as number) ?? 0,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\t// Language and duration from transcription_info (v3-turbo)\n\t\tconst info = raw.transcription_info as Record<string, unknown> | undefined;\n\n\t\treturn {\n\t\t\ttext,\n\t\t\tsegments,\n\t\t\tlanguage: (info?.language as string) ?? undefined,\n\t\t\tdurationInSeconds: (info?.duration as number) ?? undefined,\n\t\t\twarnings,\n\t\t\tresponse: {\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\theaders: {},\n\t\t\t},\n\t\t};\n\t}\n\n\t// ---------------------------------------------------------------------------\n\t// Deepgram Nova-3\n\t// ---------------------------------------------------------------------------\n\n\tprivate async runNova3(\n\t\taudioBytes: Uint8Array,\n\t\tmediaType: string,\n\t\tabortSignal?: AbortSignal,\n\t): Promise<unknown> {\n\t\tif (this.config.isBinding) {\n\t\t\t// Binding path: Nova-3 accepts { audio: { body: base64, contentType } }\n\t\t\treturn this.config.binding.run(\n\t\t\t\tthis.modelId as Parameters<Ai[\"run\"]>[0],\n\t\t\t\t{\n\t\t\t\t\taudio: { body: uint8ArrayToBase64(audioBytes), contentType: mediaType },\n\t\t\t\t} as Parameters<Ai[\"run\"]>[1],\n\t\t\t\t{ gateway: this.config.gateway, signal: abortSignal } as AiOptions,\n\t\t\t);\n\t\t}\n\n\t\t// REST path: Nova-3 requires raw binary with a Content-Type header,\n\t\t// not JSON. The createRun shim always sends JSON, so we bypass it\n\t\t// and use createRunBinary which sends the audio bytes directly.\n\t\tif (!this.config.credentials) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Nova-3 transcription via REST requires credentials in the config. \" +\n\t\t\t\t\t\"This is a bug — credentials should have been set by createWorkersAI.\",\n\t\t\t);\n\t\t}\n\t\treturn createRunBinary(\n\t\t\tthis.config.credentials,\n\t\t\tthis.modelId,\n\t\t\taudioBytes,\n\t\t\tmediaType,\n\t\t\tabortSignal,\n\t\t);\n\t}\n\n\tprivate normalizeNova3Response(\n\t\traw: Record<string, unknown>,\n\t\twarnings: Array<SharedV3Warning>,\n\t): Awaited<ReturnType<TranscriptionModelV3[\"doGenerate\"]>> {\n\t\t// Nova-3 format: { results: { channels: [{ alternatives: [{ transcript, words }] }] } }\n\t\tconst results = raw.results as Record<string, unknown> | undefined;\n\t\tconst channels = results?.channels as\n\t\t\t| Array<{\n\t\t\t\t\talternatives?: Array<{\n\t\t\t\t\t\ttranscript?: string;\n\t\t\t\t\t\tconfidence?: number;\n\t\t\t\t\t\twords?: Array<{ word: string; start: number; end: number }>;\n\t\t\t\t\t}>;\n\t\t\t }>\n\t\t\t| undefined;\n\t\tconst alt = channels?.[0]?.alternatives?.[0];\n\n\t\tconst text = alt?.transcript ?? \"\";\n\t\tconst segments: Array<{ text: string; startSecond: number; endSecond: number }> = [];\n\n\t\tif (alt?.words && Array.isArray(alt.words)) {\n\t\t\tfor (const w of alt.words) {\n\t\t\t\tsegments.push({\n\t\t\t\t\ttext: w.word ?? \"\",\n\t\t\t\t\tstartSecond: w.start ?? 0,\n\t\t\t\t\tendSecond: w.end ?? 0,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\treturn {\n\t\t\ttext,\n\t\t\tsegments,\n\t\t\tlanguage: undefined,\n\t\t\tdurationInSeconds: undefined,\n\t\t\twarnings,\n\t\t\tresponse: {\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\theaders: {},\n\t\t\t},\n\t\t};\n\t}\n}\n\n// ---------------------------------------------------------------------------\n// Helpers\n// ---------------------------------------------------------------------------\n\nfunction uint8ArrayToBase64(bytes: Uint8Array): string {\n\tlet binary = \"\";\n\tfor (let i = 0; i < bytes.length; i++) {\n\t\tbinary += String.fromCharCode(bytes[i]!);\n\t}\n\treturn btoa(binary);\n}\n","import type { SpeechModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\nimport type { WorkersAISpeechSettings } from \"./workersai-speech-settings\";\nimport type { SpeechModels } from \"./workersai-models\";\n\nexport type WorkersAISpeechConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n};\n\n/**\n * Workers AI speech (text-to-speech) model implementing the AI SDK's `SpeechModelV3` interface.\n *\n * Currently supports Deepgram Aura-1 (`@cf/deepgram/aura-1`).\n * The model accepts `{ text, voice?, speed? }` and returns raw audio bytes.\n */\nexport class WorkersAISpeechModel implements SpeechModelV3 {\n\treadonly specificationVersion = \"v3\";\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tconstructor(\n\t\treadonly modelId: SpeechModels,\n\t\treadonly settings: WorkersAISpeechSettings,\n\t\treadonly config: WorkersAISpeechConfig,\n\t) {}\n\n\tasync doGenerate(\n\t\toptions: Parameters<SpeechModelV3[\"doGenerate\"]>[0],\n\t): Promise<Awaited<ReturnType<SpeechModelV3[\"doGenerate\"]>>> {\n\t\tconst { text, voice, speed, abortSignal } = options;\n\n\t\tconst warnings: Array<SharedV3Warning> = [];\n\n\t\tif (options.instructions) {\n\t\t\twarnings.push({\n\t\t\t\tdetails: \"Workers AI TTS models do not support instructions.\",\n\t\t\t\tfeature: \"instructions\",\n\t\t\t\ttype: \"unsupported\",\n\t\t\t});\n\t\t}\n\n\t\tif (options.outputFormat) {\n\t\t\twarnings.push({\n\t\t\t\tdetails:\n\t\t\t\t\t\"Workers AI TTS models do not support output format selection. Audio is returned as MP3.\",\n\t\t\t\tfeature: \"outputFormat\",\n\t\t\t\ttype: \"unsupported\",\n\t\t\t});\n\t\t}\n\n\t\t// Build inputs for Workers AI TTS\n\t\tconst inputs: Record<string, unknown> = { text };\n\t\tif (voice) inputs.voice = voice;\n\t\tif (speed != null) inputs.speed = speed;\n\n\t\tconst result = await this.config.binding.run(\n\t\t\tthis.modelId as Parameters<Ai[\"run\"]>[0],\n\t\t\tinputs as Parameters<Ai[\"run\"]>[1],\n\t\t\t{\n\t\t\t\tgateway: this.config.gateway,\n\t\t\t\tsignal: abortSignal,\n\t\t\t\t// returnRawResponse prevents the createRun REST shim from trying\n\t\t\t\t// to JSON.parse binary audio. Real env.AI bindings don't recognize\n\t\t\t\t// this option — it has no effect, and the binding returns the normal\n\t\t\t\t// binary result (Uint8Array/ReadableStream) which toUint8Array handles.\n\t\t\t\treturnRawResponse: true,\n\t\t\t} as AiOptions,\n\t\t);\n\n\t\t// Workers AI TTS returns binary audio in various formats:\n\t\t// - Binding: Uint8Array, ArrayBuffer, ReadableStream, or { audio: base64 }\n\t\t// - REST (returnRawResponse): Response object\n\t\tconst audio = await toUint8Array(result);\n\n\t\treturn {\n\t\t\taudio,\n\t\t\twarnings,\n\t\t\tresponse: {\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\theaders: {},\n\t\t\t},\n\t\t};\n\t}\n}\n\n// ---------------------------------------------------------------------------\n// Helpers\n// ---------------------------------------------------------------------------\n\n/**\n * Convert various output types from binding.run() to Uint8Array.\n * Workers AI TTS models return different types depending on the runtime:\n * - Response (from REST shim with returnRawResponse)\n * - ReadableStream<Uint8Array> (most common in workerd)\n * - Uint8Array / ArrayBuffer (direct binary)\n * - { audio: string } with base64 data\n */\nasync function toUint8Array(output: unknown): Promise<Uint8Array> {\n\t// Response object (from REST shim with returnRawResponse: true)\n\tif (output instanceof Response) {\n\t\treturn new Uint8Array(await output.arrayBuffer());\n\t}\n\tif (output instanceof Uint8Array) {\n\t\treturn output;\n\t}\n\tif (output instanceof ArrayBuffer) {\n\t\treturn new Uint8Array(output);\n\t}\n\tif (output instanceof ReadableStream) {\n\t\tconst reader = (output as ReadableStream<Uint8Array>).getReader();\n\t\tconst chunks: Uint8Array[] = [];\n\t\tlet totalLength = 0;\n\t\twhile (true) {\n\t\t\tconst { done, value } = await reader.read();\n\t\t\tif (done) break;\n\t\t\tchunks.push(value);\n\t\t\ttotalLength += value.length;\n\t\t}\n\t\tconst result = new Uint8Array(totalLength);\n\t\tlet offset = 0;\n\t\tfor (const chunk of chunks) {\n\t\t\tresult.set(chunk, offset);\n\t\t\toffset += chunk.length;\n\t\t}\n\t\treturn result;\n\t}\n\t// Object with audio property (e.g. { audio: base64string })\n\tif (typeof output === \"object\" && output !== null) {\n\t\tconst obj = output as Record<string, unknown>;\n\t\tif (typeof obj.audio === \"string\") {\n\t\t\treturn Uint8Array.from(atob(obj.audio), (c) => c.charCodeAt(0));\n\t\t}\n\t}\n\tthrow new Error(\n\t\t`Unexpected output type from TTS model. Got ${Object.prototype.toString.call(output)}`,\n\t);\n}\n","import type { RerankingModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\nimport type { WorkersAIRerankingSettings } from \"./workersai-reranking-settings\";\nimport type { RerankingModels } from \"./workersai-models\";\n\nexport type WorkersAIRerankingConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n};\n\n/**\n * Workers AI reranking model implementing the AI SDK's `RerankingModelV3` interface.\n *\n * Supports BGE reranker models (`@cf/baai/bge-reranker-base`, `bge-reranker-v2-m3`).\n *\n * Workers AI reranking API:\n * - Input: `{ query, contexts: [{ text }], top_k? }`\n * - Output: `{ response: [{ id, score }] }`\n */\nexport class WorkersAIRerankingModel implements RerankingModelV3 {\n\treadonly specificationVersion = \"v3\";\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tconstructor(\n\t\treadonly modelId: RerankingModels,\n\t\treadonly settings: WorkersAIRerankingSettings,\n\t\treadonly config: WorkersAIRerankingConfig,\n\t) {}\n\n\tasync doRerank(\n\t\toptions: Parameters<RerankingModelV3[\"doRerank\"]>[0],\n\t): Promise<Awaited<ReturnType<RerankingModelV3[\"doRerank\"]>>> {\n\t\tconst { documents, query, topN, abortSignal } = options;\n\n\t\tconst warnings: Array<SharedV3Warning> = [];\n\n\t\t// Convert AI SDK documents to Workers AI contexts format\n\t\tconst contexts = documentsToContexts(documents, warnings);\n\n\t\t// Build Workers AI inputs\n\t\tconst inputs: Record<string, unknown> = {\n\t\t\tquery,\n\t\t\tcontexts,\n\t\t};\n\t\tif (topN != null) {\n\t\t\tinputs.top_k = topN;\n\t\t}\n\n\t\tconst result = (await this.config.binding.run(\n\t\t\tthis.modelId as Parameters<Ai[\"run\"]>[0],\n\t\t\tinputs as Parameters<Ai[\"run\"]>[1],\n\t\t\t{ gateway: this.config.gateway, signal: abortSignal } as AiOptions,\n\t\t)) as Record<string, unknown>;\n\n\t\t// Workers AI returns { response: [{ id, score }] }\n\t\tconst response = result.response as Array<{ id?: number; score?: number }> | undefined;\n\n\t\tconst ranking = (response ?? [])\n\t\t\t.map((item) => ({\n\t\t\t\tindex: item.id ?? 0,\n\t\t\t\trelevanceScore: item.score ?? 0,\n\t\t\t}))\n\t\t\t.sort((a, b) => b.relevanceScore - a.relevanceScore);\n\n\t\treturn {\n\t\t\tranking,\n\t\t\twarnings,\n\t\t\tresponse: {\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\theaders: {},\n\t\t\t},\n\t\t};\n\t}\n}\n\n// ---------------------------------------------------------------------------\n// Helpers\n// ---------------------------------------------------------------------------\n\n/**\n * Convert AI SDK document format to Workers AI contexts format.\n *\n * AI SDK supports two document types:\n * - `{ type: 'text', values: string[] }` — direct text strings\n * - `{ type: 'object', values: JSONObject[] }` — JSON objects (stringified for Workers AI)\n */\nfunction documentsToContexts(\n\tdocuments: Parameters<RerankingModelV3[\"doRerank\"]>[0][\"documents\"],\n\twarnings: Array<SharedV3Warning>,\n): Array<{ text: string }> {\n\tif (documents.type === \"text\") {\n\t\treturn documents.values.map((text) => ({ text }));\n\t}\n\n\t// Object documents: stringify each object for the reranker\n\twarnings.push({\n\t\tmessage: \"Workers AI reranker expects text contexts. JSON objects have been stringified.\",\n\t\ttype: \"other\",\n\t});\n\n\treturn documents.values.map((obj) => ({ text: JSON.stringify(obj) }));\n}\n","import { AISearchChatLanguageModel } from \"./aisearch-chat-language-model\";\n\n/**\n * @deprecated Use `AISearchChatLanguageModel` instead. AutoRAG has been renamed to AI Search.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport class AutoRAGChatLanguageModel extends AISearchChatLanguageModel {}\n","import { AISearchChatLanguageModel } from \"./aisearch-chat-language-model\";\nimport type { AISearchChatSettings } from \"./aisearch-chat-settings\";\nimport { createRun } from \"./utils\";\nimport {\n\tWorkersAIEmbeddingModel,\n\ttype WorkersAIEmbeddingSettings,\n} from \"./workersai-embedding-model\";\nimport { WorkersAIChatLanguageModel } from \"./workersai-chat-language-model\";\nimport type { WorkersAIChatSettings } from \"./workersai-chat-settings\";\nimport { WorkersAIImageModel } from \"./workersai-image-model\";\nimport type { WorkersAIImageSettings } from \"./workersai-image-settings\";\nimport { WorkersAITranscriptionModel } from \"./workersai-transcription-model\";\nimport type { WorkersAITranscriptionSettings } from \"./workersai-transcription-settings\";\nimport { WorkersAISpeechModel } from \"./workersai-speech-model\";\nimport type { WorkersAISpeechSettings } from \"./workersai-speech-settings\";\nimport { WorkersAIRerankingModel } from \"./workersai-reranking-model\";\nimport type { WorkersAIRerankingSettings } from \"./workersai-reranking-settings\";\nimport type {\n\tEmbeddingModels,\n\tImageGenerationModels,\n\tTextGenerationModels,\n\tTranscriptionModels,\n\tSpeechModels,\n\tRerankingModels,\n} from \"./workersai-models\";\n\n// Re-export deprecated AutoRAG aliases\nexport { AutoRAGChatLanguageModel } from \"./autorag-chat-language-model\";\nexport type { AutoRAGChatSettings } from \"./autorag-chat-settings\";\n\n// Export new AI Search types\nexport { AISearchChatLanguageModel } from \"./aisearch-chat-language-model\";\nexport type { AISearchChatSettings } from \"./aisearch-chat-settings\";\n\n// Export transcription and speech types\nexport { WorkersAITranscriptionModel } from \"./workersai-transcription-model\";\nexport type { WorkersAITranscriptionSettings } from \"./workersai-transcription-settings\";\nexport { WorkersAISpeechModel } from \"./workersai-speech-model\";\nexport type { WorkersAISpeechSettings } from \"./workersai-speech-settings\";\nexport { WorkersAIRerankingModel } from \"./workersai-reranking-model\";\nexport type { WorkersAIRerankingSettings } from \"./workersai-reranking-settings\";\n\n// ---------------------------------------------------------------------------\n// Workers AI\n// ---------------------------------------------------------------------------\n\nexport type WorkersAISettings = (\n\t| {\n\t\t\t/**\n\t\t\t * Provide a Cloudflare AI binding.\n\t\t\t */\n\t\t\tbinding: Ai;\n\n\t\t\t/**\n\t\t\t * Credentials must be absent when a binding is given.\n\t\t\t */\n\t\t\taccountId?: never;\n\t\t\tapiKey?: never;\n\t }\n\t| {\n\t\t\t/**\n\t\t\t * Provide Cloudflare API credentials directly. Must be used if a binding is not specified.\n\t\t\t */\n\t\t\taccountId: string;\n\t\t\tapiKey: string;\n\t\t\t/**\n\t\t\t * Both binding must be absent if credentials are used directly.\n\t\t\t */\n\t\t\tbinding?: never;\n\n\t\t\t/**\n\t\t\t * Custom fetch implementation. You can use it as a middleware to\n\t\t\t * intercept requests, or to provide a custom fetch implementation\n\t\t\t * for e.g. testing. Only available in credentials mode.\n\t\t\t */\n\t\t\tfetch?: typeof globalThis.fetch;\n\t }\n) & {\n\t/**\n\t * Optionally specify a gateway.\n\t */\n\tgateway?: GatewayOptions;\n};\n\nexport interface WorkersAI {\n\t(modelId: TextGenerationModels, settings?: WorkersAIChatSettings): WorkersAIChatLanguageModel;\n\t/**\n\t * Creates a model for text generation.\n\t **/\n\tchat(\n\t\tmodelId: TextGenerationModels,\n\t\tsettings?: WorkersAIChatSettings,\n\t): WorkersAIChatLanguageModel;\n\n\tembedding(\n\t\tmodelId: EmbeddingModels,\n\t\tsettings?: WorkersAIEmbeddingSettings,\n\t): WorkersAIEmbeddingModel;\n\n\ttextEmbedding(\n\t\tmodelId: EmbeddingModels,\n\t\tsettings?: WorkersAIEmbeddingSettings,\n\t): WorkersAIEmbeddingModel;\n\n\ttextEmbeddingModel(\n\t\tmodelId: EmbeddingModels,\n\t\tsettings?: WorkersAIEmbeddingSettings,\n\t): WorkersAIEmbeddingModel;\n\n\t/**\n\t * Creates a model for image generation.\n\t **/\n\timage(modelId: ImageGenerationModels, settings?: WorkersAIImageSettings): WorkersAIImageModel;\n\timageModel(\n\t\tmodelId: ImageGenerationModels,\n\t\tsettings?: WorkersAIImageSettings,\n\t): WorkersAIImageModel;\n\n\t/**\n\t * Creates a model for speech-to-text transcription.\n\t **/\n\ttranscription(\n\t\tmodelId: TranscriptionModels,\n\t\tsettings?: WorkersAITranscriptionSettings,\n\t): WorkersAITranscriptionModel;\n\ttranscriptionModel(\n\t\tmodelId: TranscriptionModels,\n\t\tsettings?: WorkersAITranscriptionSettings,\n\t): WorkersAITranscriptionModel;\n\n\t/**\n\t * Creates a model for text-to-speech synthesis.\n\t **/\n\tspeech(modelId: SpeechModels, settings?: WorkersAISpeechSettings): WorkersAISpeechModel;\n\tspeechModel(modelId: SpeechModels, settings?: WorkersAISpeechSettings): WorkersAISpeechModel;\n\n\t/**\n\t * Creates a model for document reranking.\n\t **/\n\treranking(\n\t\tmodelId: RerankingModels,\n\t\tsettings?: WorkersAIRerankingSettings,\n\t): WorkersAIRerankingModel;\n\trerankingModel(\n\t\tmodelId: RerankingModels,\n\t\tsettings?: WorkersAIRerankingSettings,\n\t): WorkersAIRerankingModel;\n}\n\n/**\n * Create a Workers AI provider instance.\n */\nexport function createWorkersAI(options: WorkersAISettings): WorkersAI {\n\tif (!options.binding && !(\"accountId\" in options && \"apiKey\" in options)) {\n\t\tthrow new Error(\n\t\t\t\"Invalid Workers AI configuration: you must provide either a binding (e.g. { binding: env.AI }) \" +\n\t\t\t\t\"or credentials ({ accountId, apiKey }).\",\n\t\t);\n\t}\n\n\tlet binding: Ai;\n\tconst isBinding = !!options.binding;\n\n\tif (options.binding) {\n\t\tbinding = options.binding;\n\t} else {\n\t\tconst { accountId, apiKey } = options;\n\t\tbinding = {\n\t\t\trun: createRun({ accountId, apiKey, fetch: options.fetch }),\n\t\t} as Ai;\n\t}\n\n\tconst createChatModel = (modelId: TextGenerationModels, settings: WorkersAIChatSettings = {}) =>\n\t\tnew WorkersAIChatLanguageModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.chat\",\n\t\t\tisBinding,\n\t\t});\n\n\tconst createImageModel = (\n\t\tmodelId: ImageGenerationModels,\n\t\tsettings: WorkersAIImageSettings = {},\n\t) =>\n\t\tnew WorkersAIImageModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.image\",\n\t\t});\n\tconst createEmbeddingModel = (\n\t\tmodelId: EmbeddingModels,\n\t\tsettings: WorkersAIEmbeddingSettings = {},\n\t) =>\n\t\tnew WorkersAIEmbeddingModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.embedding\",\n\t\t});\n\n\tconst createTranscriptionModel = (\n\t\tmodelId: TranscriptionModels,\n\t\tsettings: WorkersAITranscriptionSettings = {},\n\t) =>\n\t\tnew WorkersAITranscriptionModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.transcription\",\n\t\t\tisBinding,\n\t\t\tcredentials:\n\t\t\t\t!isBinding && \"accountId\" in options\n\t\t\t\t\t? { accountId: options.accountId, apiKey: options.apiKey }\n\t\t\t\t\t: undefined,\n\t\t});\n\n\tconst createSpeechModel = (modelId: SpeechModels, settings: WorkersAISpeechSettings = {}) =>\n\t\tnew WorkersAISpeechModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.speech\",\n\t\t});\n\n\tconst createRerankingModel = (\n\t\tmodelId: RerankingModels,\n\t\tsettings: WorkersAIRerankingSettings = {},\n\t) =>\n\t\tnew WorkersAIRerankingModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.reranking\",\n\t\t});\n\n\tconst provider = (modelId: TextGenerationModels, settings?: WorkersAIChatSettings) => {\n\t\tif (new.target) {\n\t\t\tthrow new Error(\"The WorkersAI model function cannot be called with the new keyword.\");\n\t\t}\n\t\treturn createChatModel(modelId, settings);\n\t};\n\n\tprovider.chat = createChatModel;\n\tprovider.embedding = createEmbeddingModel;\n\tprovider.textEmbedding = createEmbeddingModel;\n\tprovider.textEmbeddingModel = createEmbeddingModel;\n\tprovider.image = createImageModel;\n\tprovider.imageModel = createImageModel;\n\tprovider.transcription = createTranscriptionModel;\n\tprovider.transcriptionModel = createTranscriptionModel;\n\tprovider.speech = createSpeechModel;\n\tprovider.speechModel = createSpeechModel;\n\tprovider.reranking = createRerankingModel;\n\tprovider.rerankingModel = createRerankingModel;\n\n\treturn provider;\n}\n\n// ---------------------------------------------------------------------------\n// AI Search (formerly AutoRAG)\n// ---------------------------------------------------------------------------\n\nexport type AISearchSettings = {\n\tbinding: AutoRAG;\n};\n\nexport interface AISearchProvider {\n\t(settings?: AISearchChatSettings): AISearchChatLanguageModel;\n\t/**\n\t * Creates a model for text generation.\n\t **/\n\tchat(settings?: AISearchChatSettings): AISearchChatLanguageModel;\n}\n\n/**\n * Create an AI Search provider instance.\n *\n * AI Search (formerly AutoRAG) is Cloudflare's managed search service.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport function createAISearch(\n\toptions: AISearchSettings,\n\t/** @internal */\n\tproviderName = \"aisearch.chat\",\n): AISearchProvider {\n\tconst binding = options.binding;\n\n\tconst createChatModel = (settings: AISearchChatSettings = {}) =>\n\t\tnew AISearchChatLanguageModel(\"@cf/meta/llama-3.3-70b-instruct-fp8-fast\", settings, {\n\t\t\tbinding,\n\t\t\tprovider: providerName,\n\t\t});\n\n\tconst provider = (settings?: AISearchChatSettings) => {\n\t\tif (new.target) {\n\t\t\tthrow new Error(\"The AISearch model function cannot be called with the new keyword.\");\n\t\t}\n\t\treturn createChatModel(settings);\n\t};\n\n\tprovider.chat = createChatModel;\n\n\treturn provider;\n}\n\n// ---------------------------------------------------------------------------\n// Deprecated AutoRAG aliases\n// ---------------------------------------------------------------------------\n\n/**\n * @deprecated Use `AISearchSettings` instead. AutoRAG has been renamed to AI Search.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport type AutoRAGSettings = AISearchSettings;\n\n/**\n * @deprecated Use `AISearchProvider` instead. AutoRAG has been renamed to AI Search.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport type AutoRAGProvider = AISearchProvider;\n\nlet autoRAGWarned = false;\n\n/**\n * @deprecated Use `createAISearch` instead. AutoRAG has been renamed to AI Search.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport function createAutoRAG(options: AISearchSettings): AISearchProvider {\n\tif (!autoRAGWarned) {\n\t\tautoRAGWarned = true;\n\t\tconsole.warn(\n\t\t\t\"[workers-ai-provider] createAutoRAG is deprecated. Use createAISearch instead. \" +\n\t\t\t\t\"AutoRAG has been renamed to AI Search. \" +\n\t\t\t\t\"See https://developers.cloudflare.com/ai-search/\",\n\t\t);\n\t}\n\treturn createAISearch(options, \"autorag.chat\");\n}\n"],"mappings":";;;;;;;;;;;AAWA,SAASA,eAAa,MAAqD;AAC1E,KAAI,gBAAgB,WACnB,QAAO;AAGR,KAAI,OAAO,SAAS,UAAU;EAC7B,IAAI,SAAS;AACb,MAAI,OAAO,WAAW,QAAQ,EAAE;GAC/B,MAAM,aAAa,OAAO,QAAQ,IAAI;AACtC,OAAI,cAAc,EACjB,UAAS,OAAO,MAAM,aAAa,EAAE;;EAGvC,MAAM,eAAe,KAAK,OAAO;EACjC,MAAM,QAAQ,IAAI,WAAW,aAAa,OAAO;AACjD,OAAK,IAAI,IAAI,GAAG,IAAI,aAAa,QAAQ,IACxC,OAAM,KAAK,aAAa,WAAW,EAAE;AAEtC,SAAO;;AAGR,KAAI,gBAAgB,IACnB,OAAM,IAAI,MACT,kHAEA;AAGF,QAAO;;AAGR,SAASC,qBAAmB,OAA2B;CACtD,IAAI,SAAS;CACb,MAAM,YAAY;AAClB,MAAK,IAAI,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;EACjD,MAAM,QAAQ,MAAM,SAAS,GAAG,KAAK,IAAI,IAAI,WAAW,MAAM,OAAO,CAAC;AACtE,YAAU,OAAO,aAAa,GAAG,MAAM;;AAExC,QAAO,KAAK,OAAO;;AAGpB,SAAgB,+BAA+B,QAE7C;CACD,MAAM,WAAgC,EAAE;AAExC,MAAK,MAAM,EAAE,MAAM,aAAa,OAC/B,SAAQ,MAAR;EACC,KAAK;AACJ,YAAS,KAAK;IAAE;IAAS,MAAM;IAAU,CAAC;AAC1C;EAGD,KAAK,QAAQ;GACZ,MAAM,YAAsB,EAAE;GAC9B,MAAM,aAAqE,EAAE;AAE7E,QAAK,MAAM,QAAQ,QAClB,SAAQ,KAAK,MAAb;IACC,KAAK;AACJ,eAAU,KAAK,KAAK,KAAK;AACzB;IAED,KAAK,QAAQ;KACZ,MAAM,aAAaD,eAAa,KAAK,KAAK;AAC1C,SAAI,WACH,YAAW,KAAK;MACf,OAAO;MACP,WAAW,KAAK;MAChB,CAAC;AAEH;;;AAKH,OAAI,WAAW,SAAS,GAAG;IAC1B,MAAM,eAAuC,EAAE;AAC/C,QAAI,UAAU,SAAS,EACtB,cAAa,KAAK;KAAE,MAAM;KAAQ,MAAM,UAAU,KAAK,KAAK;KAAE,CAAC;AAEhE,SAAK,MAAM,OAAO,YAAY;KAC7B,MAAM,SAASC,qBAAmB,IAAI,MAAM;KAC5C,MAAM,YAAY,IAAI,aAAa;AACnC,kBAAa,KAAK;MACjB,MAAM;MACN,WAAW,EAAE,KAAK,QAAQ,UAAU,UAAU,UAAU;MACxD,CAAC;;AAEH,aAAS,KAAK;KAAE,SAAS;KAAc,MAAM;KAAQ,CAAC;SAEtD,UAAS,KAAK;IAAE,SAAS,UAAU,KAAK,KAAK;IAAE,MAAM;IAAQ,CAAC;AAG/D;;EAGD,KAAK,aAAa;GACjB,IAAI,OAAO;GACX,IAAI,YAAY;GAChB,MAAM,YAID,EAAE;AAEP,QAAK,MAAM,QAAQ,QAClB,SAAQ,KAAK,MAAb;IACC,KAAK;AACJ,aAAQ,KAAK;AACb;IAGD,KAAK;AAOJ,kBAAa,KAAK;AAClB;IAGD,KAAK,OAEJ;IAGD,KAAK;AACJ,eAAU,KAAK;MACd,UAAU;OACT,WAAW,KAAK,UAAU,KAAK,MAAM;OACrC,MAAM,KAAK;OACX;MACD,IAAI,KAAK;MACT,MAAM;MACN,CAAC;AACF;IAGD,KAAK,cAEJ;IAGD,QAEC,OAAM,IAAI,MACT,0BAFuB,KAEyC,OAChE;;AAKJ,YAAS,KAAK;IACb,SAAS;IACT,MAAM;IACN,GAAI,YAAY,EAAE,WAAW,GAAG,EAAE;IAClC,YACC,UAAU,SAAS,IAChB,UAAU,KAAK,EAAE,UAAU,EAAE,MAAM,WAAW,QAAQ,UAAU;KAChE,UAAU;MAAE,WAAW;MAAM;MAAM;KACnC;KACA,MAAM;KACN,EAAE,GACF,KAAA;IACJ,CAAC;AAEF;;EAGD,KAAK;AACJ,QAAK,MAAM,gBAAgB,QAC1B,KAAI,aAAa,SAAS,eAAe;IACxC,MAAM,SAAS,aAAa;IAC5B,IAAI;AACJ,YAAQ,OAAO,MAAf;KACC,KAAK;KACL,KAAK;AACJ,gBAAU,OAAO;AACjB;KACD,KAAK;KACL,KAAK;AACJ,gBAAU,KAAK,UAAU,OAAO,MAAM;AACtC;KACD,KAAK;AACJ,gBAAU,OAAO,SACd,0BAA0B,OAAO,WACjC;AACH;KACD,KAAK;AACJ,gBAAU,OAAO,MACf,QACC,MACA,EAAE,SAAS,OACZ,CACA,KAAK,MAAM,EAAE,KAAK,CAClB,KAAK,KAAK;AACZ;KACD;AACC,gBAAU;AACV;;AAEF,aAAS,KAAK;KACb;KACA,MAAM,aAAa;KACnB,cAAc,aAAa;KAC3B,MAAM;KACN,CAAC;;AAIJ;EAGD,QAEC,OAAM,IAAI,MAAM,qBADQ,OAC+B;;AAK1D,QAAO,EAAE,UAAU;;;;;;;;ACpOpB,SAAgB,kBACf,QACuB;CACvB,MAAM,QACL,OAGC,SAAS;EACV,mBAAmB;EACnB,eAAe;EACf;CAED,MAAM,eAAe,MAAM,iBAAiB;CAC5C,MAAM,mBAAmB,MAAM,qBAAqB;AAEpD,QAAO;EACN,cAAc;GACb,OAAO;GACP,MAAM,KAAA;GACN,WAAW,KAAA;GACX;EACD,aAAa;GACZ,OAAO;GACP,SAAS,KAAA;GACT,WAAW,KAAA;GACX,YAAY,KAAA;GACZ;EACD,KAAK,EAAE,OAAO,eAAe,kBAAkB;EAC/C;;;;;;;;;;;ACzBF,SAAgB,yBACf,wBAC8B;CAC9B,IAAI;AAEJ,KACC,OAAO,2BAA2B,YAClC,2BAA2B,QAC3B,2BAA2B,KAAA,EAE3B,gBAAe;UACL,OAAO,2BAA2B,YAAY,2BAA2B,MAAM;EACzF,MAAM,WAAW;EAGjB,MAAM,UAAU,SAAS;AACzB,MAAI,MAAM,QAAQ,QAAQ,IAAI,QAAQ,SAAS,EAC9C,gBAAe,QAAQ,GAAG;WAChB,mBAAmB,SAC7B,gBAAe,SAAS;MAExB,gBAAe,KAAA;OAIhB,gBAAe,KAAA;CAGhB,MAAM,MAAM,gBAAgB;AAE5B,SAAQ,cAAR;EACC,KAAK,OACJ,QAAO;GAAE,SAAS;GAAQ;GAAK;EAChC,KAAK;EACL,KAAK,eACJ,QAAO;GAAE,SAAS;GAAU;GAAK;EAClC,KAAK,aACJ,QAAO;GAAE,SAAS;GAAc;GAAK;EACtC,KAAK,QACJ,QAAO;GAAE,SAAS;GAAS;GAAK;EACjC,KAAK;EACL,KAAK,UACJ,QAAO;GAAE,SAAS;GAAS;GAAK;EACjC,QACC,QAAO;GAAE,SAAS;GAAQ;GAAK;;;;;;;;;ACxClC,SAAgB,mBACf,QACA,UAC4C;CAC5C,IAAI,YAAY;AAChB,QAAO,OAAO,YACb,IAAI,gBAAsE;EACzE,UAAU,OAAO,YAAY;AAC5B,OAAI,CAAC,WAAW;AACf,gBAAY;AACZ,eAAW,QAAQ;KAClB,MAAM;KACI;KACV,CAAC;;AAEH,cAAW,QAAQ,MAAM;;EAE1B,MAAM,YAAY;AACjB,OAAI,CAAC,UACJ,YAAW,QAAQ;IAClB,MAAM;IACI;IACV,CAAC;;EAGJ,CAAC,CACF;;;;;AAMF,SAAS,wBAAwB,IAAsC;CACtE,MAAM,KAAK,GAAG;CACd,MAAM,OAAO,IAAI,QAAQ,GAAG,QAAQ;CACpC,MAAM,OAAO,IAAI,aAAa,GAAG,aAAa;AAE9C,QAAO,EADI,GAAG,MAAM,SACN,CAAC,SAAS,CAAC,QAAQ,SAAS;;;;;;;;;;;;AAa3C,SAAgB,gBACf,UAC4C;CAC5C,MAAM,YACL,oBAAoB,iBACjB,WACC,SAAS;AAEd,KAAI,CAAC,UACJ,OAAM,IAAI,MAAM,gDAAgD;CAIjE,IAAI,QAA8B;EACjC,cAAc;GAAE,OAAO;GAAG,MAAM,KAAA;GAAW,WAAW,KAAA;GAAW;EACjE,aAAa;GACZ,OAAO;GACP,SAAS,KAAA;GACT,WAAW,KAAA;GACX,YAAY,KAAA;GACZ;EACD,KAAK,EAAE,aAAa,GAAG;EACvB;CACD,IAAI,SAAwB;CAC5B,IAAI,cAA6B;CACjC,IAAI,eAAmD;CACvD,IAAI,eAAe;CACnB,IAAI,kBAAkB;CAMtB,MAAM,kCAAkB,IAAI,KAA6D;AAMzF,QAHkB,UAAU,YAAY,IAAI,YAAY,CAAC,CAGxC,YAChB,IAAI,gBAAmD;EACtD,UAAU,MAAM,YAAY;AAC3B,OAAI,CAAC,QAAQ,SAAS,UAAU;AAC/B,QAAI,SAAS,SAAU,gBAAe;AACtC;;AAGD,qBAAkB;GAClB,IAAI;AACJ,OAAI;AACH,YAAQ,KAAK,MAAM,KAAK;WACjB;AACP,YAAQ,KAAK,oDAAoD,KAAK;AACtE;;AAGD,OAAI,MAAM,MACT,SAAQ,kBAAkB,MAAiD;GAI5E,MAAM,UAAU,MAAM;GAMtB,MAAM,qBAAqB,UAAU,IAAI;GACzC,MAAM,qBAAqB,MAAM;AAEjC,OAAI,sBAAsB,KACzB,gBAAe,yBAAyB,mBAAmB;YACjD,sBAAsB,KAChC,gBAAe,yBAAyB,mBAAmB;GAI5D,MAAM,iBAAiB,MAAM;AAC7B,OAAI,kBAAkB,QAAQ,mBAAmB,IAAI;IACpD,MAAM,eAAe,OAAO,eAAe;AAC3C,QAAI,aAAa,SAAS,GAAG;AAE5B,SAAI,aAAa;AAChB,iBAAW,QAAQ;OAAE,MAAM;OAAiB,IAAI;OAAa,CAAC;AAC9D,oBAAc;;AAEf,SAAI,CAAC,QAAQ;AACZ,eAAS,YAAY;AACrB,iBAAW,QAAQ;OAAE,MAAM;OAAc,IAAI;OAAQ,CAAC;;AAEvD,gBAAW,QAAQ;MAClB,MAAM;MACN,IAAI;MACJ,OAAO;MACP,CAAC;;;AAKJ,OAAI,MAAM,QAAQ,MAAM,WAAW,EAAE;AAEpC,QAAI,aAAa;AAChB,gBAAW,QAAQ;MAAE,MAAM;MAAiB,IAAI;MAAa,CAAC;AAC9D,mBAAc;;AAEf,uBAAmB,MAAM,YAAyC,WAAW;;AAI9E,OAAI,UAAU,IAAI,OAAO;IACxB,MAAM,QAAQ,QAAQ,GAAG;IAEzB,MAAM,iBAAkB,MAAM,qBAAqB,MAAM;AAGzD,QAAI,kBAAkB,eAAe,SAAS,GAAG;AAChD,SAAI,CAAC,aAAa;AACjB,oBAAc,YAAY;AAC1B,iBAAW,QAAQ;OAClB,MAAM;OACN,IAAI;OACJ,CAAC;;AAEH,gBAAW,QAAQ;MAClB,MAAM;MACN,IAAI;MACJ,OAAO;MACP,CAAC;;IAGH,MAAM,YAAY,MAAM;AACxB,QAAI,aAAa,UAAU,SAAS,GAAG;AAEtC,SAAI,aAAa;AAChB,iBAAW,QAAQ;OAAE,MAAM;OAAiB,IAAI;OAAa,CAAC;AAC9D,oBAAc;;AAEf,SAAI,CAAC,QAAQ;AACZ,eAAS,YAAY;AACrB,iBAAW,QAAQ;OAAE,MAAM;OAAc,IAAI;OAAQ,CAAC;;AAEvD,gBAAW,QAAQ;MAClB,MAAM;MACN,IAAI;MACJ,OAAO;MACP,CAAC;;IAGH,MAAM,iBAAiB,MAAM;AAG7B,QAAI,MAAM,QAAQ,eAAe,EAAE;AAElC,SAAI,aAAa;AAChB,iBAAW,QAAQ;OAAE,MAAM;OAAiB,IAAI;OAAa,CAAC;AAC9D,oBAAc;;AAEf,wBAAmB,gBAAgB,WAAW;;;;EAKjD,MAAM,YAAY;AAEjB,QAAK,MAAM,GAAG,OAAO,iBAAiB;AACrC,eAAW,QAAQ;KAAE,MAAM;KAAkB,IAAI,GAAG;KAAI,CAAC;AAIzD,eAAW,QAAQ;KAClB,MAAM;KACN,YAAY,GAAG;KACf,UAAU,GAAG;KACb,OAAO,GAAG;KACV,CAAC;;AAIH,OAAI,YACH,YAAW,QAAQ;IAAE,MAAM;IAAiB,IAAI;IAAa,CAAC;AAE/D,OAAI,OACH,YAAW,QAAQ;IAAE,MAAM;IAAY,IAAI;IAAQ,CAAC;GAIrD,MAAM,wBACL,CAAC,gBAAgB,mBAAmB,CAAC,eACjC;IACD,SAAS;IACT,KAAK;IACL,GACC,gBAAgB;IAAE,SAAS;IAAQ,KAAK;IAAQ;AAErD,cAAW,QAAQ;IAClB,cAAc;IACd,MAAM;IACN;IACA,CAAC;;EAEH,CAAC,CACF;;;;;;;;;;;;;CAcD,SAAS,mBACR,WACA,YACC;AACD,OAAK,MAAM,MAAM,WAAW;AAC3B,OAAI,wBAAwB,GAAG,CAAE;GAEjC,MAAM,UAAW,GAAG,SAAoB;GACxC,MAAM,KAAK,GAAG;GACd,MAAM,SAAU,IAAI,QAAQ,GAAG,QAAQ;GACvC,MAAM,SAAU,IAAI,aAAa,GAAG,aAAa;GACjD,MAAM,OAAO,GAAG;AAEhB,OAAI,CAAC,gBAAgB,IAAI,QAAQ,EAAE;IAElC,MAAM,KAAK,QAAQ,YAAY;IAC/B,MAAM,WAAW,UAAU;AAC3B,oBAAgB,IAAI,SAAS;KAAE;KAAI;KAAU,MAAM;KAAI,CAAC;AAExD,eAAW,QAAQ;KAClB,MAAM;KACN;KACA;KACA,CAAC;AAGF,QAAI,UAAU,QAAQ,WAAW,IAAI;KACpC,MAAM,QAAQ,OAAO,WAAW,WAAW,SAAS,KAAK,UAAU,OAAO;AAC1E,qBAAgB,IAAI,QAAQ,CAAE,QAAQ;AACtC,gBAAW,QAAQ;MAClB,MAAM;MACN;MACA;MACA,CAAC;;UAEG;IAEN,MAAM,SAAS,gBAAgB,IAAI,QAAQ;AAC3C,QAAI,UAAU,QAAQ,WAAW,IAAI;KACpC,MAAM,QAAQ,OAAO,WAAW,WAAW,SAAS,KAAK,UAAU,OAAO;AAC1E,YAAO,QAAQ;AACf,gBAAW,QAAQ;MAClB,MAAM;MACN,IAAI,OAAO;MACX;MACA,CAAC;;;;;;;;;;;AAYP,IAAM,aAAN,cAAyB,gBAAoC;CAC5D,cAAc;EACb,IAAI,SAAS;EACb,MAAM,UAAU,IAAI,aAAa;AAEjC,QAAM;GACL,UAAU,OAAO,YAAY;AAC5B,cAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,MAAM,CAAC;IACjD,MAAM,QAAQ,OAAO,MAAM,KAAK;AAChC,aAAS,MAAM,KAAK,IAAI;AAExB,SAAK,MAAM,QAAQ,OAAO;KACzB,MAAM,UAAU,KAAK,MAAM;AAC3B,SAAI,CAAC,QAAS;AACd,SAAI,QAAQ,WAAW,SAAS,CAC/B,YAAW,QAAQ,QAAQ,MAAM,EAAE,CAAC;cAC1B,QAAQ,WAAW,QAAQ,CACrC,YAAW,QAAQ,QAAQ,MAAM,EAAE,CAAC;;;GAKvC,MAAM,YAAY;AACjB,QAAI,OAAO,MAAM,EAAE;KAClB,MAAM,UAAU,OAAO,MAAM;AAC7B,SAAI,QAAQ,WAAW,SAAS,CAC/B,YAAW,QAAQ,QAAQ,MAAM,EAAE,CAAC;cAC1B,QAAQ,WAAW,QAAQ,CACrC,YAAW,QAAQ,QAAQ,MAAM,EAAE,CAAC;;;GAIvC,CAAC;;;;;;;;;;;ACjWJ,SAAgB,4BAA4B,UAAoD;AAC/F,QAAO,SAAS,KAAK,QAAQ;EAC5B,MAAM,aAAa,EAAE,GAAG,KAAK;AAG7B,MAAI,WAAW,YAAY,QAAQ,WAAW,YAAY,KAAA,EACxD,YAAmC,UAAU;AAG/C,SAAO;GACN;;;;;;AA8CH,SAAgB,UAAU,QAAgC;CACzD,MAAM,EAAE,WAAW,WAAW;CAC9B,MAAM,UAAU,OAAO,SAAS,WAAW;AAE3C,QAAO,eAAe,IACrB,OACA,QACA,SAC0F;EAC1F,MAAM,EACL,SACA,QAAQ,SACR,cACA,mBACA,QACA,GAAG,uBACA,WAAW,EAAE;EAEjB,MAAM,YAAY,IAAI,iBAAiB;AACvC,OAAK,MAAM,CAAC,KAAK,UAAU,OAAO,QAAQ,mBAAmB,EAAE;AAC9D,OAAI,UAAU,KAAA,KAAa,UAAU,KACpC,OAAM,IAAI,MACT,qBAAqB,IAAI,4CACzB;AAEF,OAAI;IACH,MAAM,WAAW,OAAO,MAAM;AAC9B,QAAI,CAAC,SACJ;AAED,cAAU,OAAO,KAAK,SAAS;WACxB;AACP,UAAM,IAAI,MACT,qBAAqB,IAAI,4CACzB;;;EAIH,MAAM,cAAc,UAAU,UAAU;EAExC,MAAM,YAAY,OAAO,MAAM,CAAC,WAAW,OAAO,GAAG,QAAQ,OAAO;EAGpE,MAAM,MAAM,SAAS,KAClB,wCAAwC,UAAU,GAAG,QAAQ,GAAG,cAAc,YAC9E,cAAc,IAAI,gBAAgB,OAElC,iDAAiD,UAAU,MAAM,YACjE,cAAc,IAAI,gBAAgB;EAGrC,MAAM,UAAkC;GACvC,eAAe,UAAU;GACzB,gBAAgB;GAChB,GAAI,gBAAgB,OAAO,iBAAiB,WACxC,eACD,EAAE;GACL;AAED,MAAI,SAAS;AACZ,OAAI,QAAQ,UACX,SAAQ,uBAAuB;AAEhC,OAAI,OAAO,QAAQ,aAAa,SAC/B,SAAQ,sBAAsB,OAAO,QAAQ,SAAS;AAEvD,OAAI,QAAQ,SACX,SAAQ,sBAAsB,QAAQ;AAEvC,OAAI,QAAQ,SACX,SAAQ,qBAAqB,KAAK,UAAU,QAAQ,SAAS;;EAM/D,MAAM,WAAW,MAAM,QAAQ,KAAK;GACnC,MAHY,KAAK,UAAU,OAAO;GAIlC;GACA,QAAQ;GACA;GACR,CAAC;AAGF,MAAI,CAAC,SAAS,MAAM,CAAC,mBAAmB;GACvC,IAAI;AACJ,OAAI;AACH,gBAAY,MAAM,SAAS,MAAM;WAC1B;AACP,gBAAY;;AAEb,SAAM,IAAI,MACT,yBAAyB,SAAS,OAAO,GAAG,SAAS,WAAW,KAAK,YACrE;;AAGF,MAAI,kBACH,QAAO;AAGR,MAAK,OAAiC,WAAW,MAAM;GACtD,MAAM,cAAc,SAAS,QAAQ,IAAI,eAAe,IAAI;AAC5D,OAAI,YAAY,SAAS,eAAe,IAAI,SAAS,KACpD,QAAO,SAAS;AAEjB,OAAI,SAAS,QAAQ,CAAC,YAAY,SAAS,OAAO,CAEjD,QAAO,SAAS;GAQjB,MAAM,gBAAgB,MAAM,QAAQ,KAAK;IACxC,MAAM,KAAK,UAAU;KACpB,GAAI;KACJ,QAAQ;KACR,CAAC;IACF;IACA,QAAQ;IACA;IACR,CAAC;AAEF,OAAI,CAAC,cAAc,IAAI;IACtB,IAAI;AACJ,QAAI;AACH,iBAAY,MAAM,cAAc,MAAM;YAC/B;AACP,iBAAY;;AAEb,UAAM,IAAI,MACT,yBAAyB,cAAc,OAAO,GAAG,cAAc,WAAW,KAAK,YAC/E;;AAMF,WAHkB,MAAM,cAAc,MAElC,EACa;;AAMlB,UAHa,MAAM,SAAS,MAExB,EACQ;;;;;;;;;;;;;;;;AAiBd,eAAsB,gBACrB,QACA,OACA,YACA,aACA,QACmC;CACnC,MAAM,MAAM,iDAAiD,OAAO,UAAU,UAAU;CAExF,MAAM,WAAW,MAAM,MAAM,KAAK;EACjC,QAAQ;EACR,SAAS;GACR,eAAe,UAAU,OAAO;GAChC,gBAAgB;GAChB;EACD,MAAM;EACN;EACA,CAAC;AAEF,KAAI,CAAC,SAAS,IAAI;EACjB,IAAI;AACJ,MAAI;AACH,eAAY,MAAM,SAAS,MAAM;UAC1B;AACP,eAAY;;AAEb,QAAM,IAAI,MACT,yBAAyB,SAAS,OAAO,GAAG,SAAS,WAAW,KAAK,YACrE;;CAGF,MAAM,OAAO,MAAM,SAAS,MAA4C;AACxE,QAAQ,KAAK,UAAU;;AAOxB,SAAgB,0BACf,OACA,YACC;AACD,KAAI,SAAS,KACZ,QAAO;EAAE,aAAa,KAAA;EAAW,OAAO,KAAA;EAAW;CAGpD,MAAM,cAAc,MAAM,KAAK,UAAU;EACxC,UAAU;GACT,aAAa,KAAK,SAAS,aAAa,KAAK,cAAc,KAAA;GAC3D,MAAM,KAAK;GACX,YAAY,KAAK,SAAS,aAAa,KAAK,cAAc,KAAA;GAC1D;EACD,MAAM;EACN,EAAE;AAEH,KAAI,cAAc,KACjB,QAAO;EAAE,aAAa,KAAA;EAAW,OAAO;EAAa;CAGtD,MAAM,OAAO,WAAW;AAExB,SAAQ,MAAR;EACC,KAAK,OACJ,QAAO;GAAE,aAAa;GAAM,OAAO;GAAa;EACjD,KAAK,OACJ,QAAO;GAAE,aAAa;GAAM,OAAO;GAAa;EACjD,KAAK,WACJ,QAAO;GAAE,aAAa;GAAY,OAAO;GAAa;EAIvD,KAAK,OACJ,QAAO;GACN,aAAa;GACb,OAAO,YAAY,QAAQ,SAAS,KAAK,SAAS,SAAS,WAAW,SAAS;GAC/E;EACF,QAEC,OAAM,IAAI,MAAM,iCADQ,OAC2C;;;AAmFtE,SAAS,gBAAgB,UAAkE;CAE1F,MAAM,KACL,cAAc,YAAY,OAAO,SAAS,aAAa,YAAY,SAAS,WACxE,SAAS,WACV;AAEJ,KAAI,IAAI,KACP,QAAO;EACN,OACC,OAAO,GAAG,cAAc,WACrB,GAAG,YACH,KAAK,UAAU,GAAG,aAAa,EAAE,CAAC;EACtC,YAAY,SAAS,MAAM,YAAY;EACvC,MAAM;EACN,UAAU,GAAG;EACb;CAIF,MAAM,OAAO;AACb,QAAO;EACN,OACC,OAAO,KAAK,cAAc,WACvB,KAAK,YACL,KAAK,UAAU,KAAK,aAAa,EAAE,CAAC;EACxC,YAAY,KAAK,MAAM,YAAY;EACnC,MAAM;EACN,UAAU,KAAK;EACf;;AAGF,SAAgB,iBAAiB,QAA4D;AAC5F,KAAI,OAAO,cAAc,MAAM,QAAQ,OAAO,WAAW,CACxD,QAAO,OAAO,WAAW,KAAK,aAC7B,gBAAgB,SAAS,CACzB;CAGF,MAAM,UAAU,OAAO;AAGvB,KAAI,UAAU,IAAI,SAAS,cAAc,MAAM,QAAQ,QAAQ,GAAG,QAAQ,WAAW,CACpF,QAAO,QAAQ,GAAG,QAAQ,WAAW,KAAK,aAAa,gBAAgB,SAAS,CAAC;AAGlF,QAAO,EAAE;;;;;;;;;AAmBV,SAAgB,YAAY,QAAqD;CAGhF,MAAM,gBADU,OAAO,UACS,IAAI,SAAS;AAC7C,KAAI,iBAAiB,QAAQ,OAAO,cAAc,CAAC,SAAS,EAC3D,QAAO,OAAO,cAAc;AAG7B,KAAI,cAAc,QAAQ;EACzB,MAAM,WAAW,OAAO;AAExB,MAAI,OAAO,aAAa,YAAY,aAAa,KAChD,QAAO,KAAK,UAAU,SAAS;AAGhC,MAAI,OAAO,aAAa,SACvB,QAAO,OAAO,SAAS;AAGxB,MAAI,aAAa,QAAQ,aAAa,KAAA,EACrC;AAED,SAAO,OAAO,SAAS;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACndzB,IAAa,4BAAb,MAAkE;CAWjE,YACC,SACA,UACA,QACC;wBAdO,wBAAuB,KAAK;wBAC5B,+BAA8B,OAAO;wBAErC,iBAAkF,EAAE,CAAC;wBAErF,WAAA,KAAA,EAA8B;wBAC9B,YAAA,KAAA,EAA+B;wBAEvB,UAAA,KAAA,EAA2B;AAO3C,OAAK,UAAU;AACf,OAAK,WAAW;AAChB,OAAK,SAAS;;CAGf,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YAAoB,EACnB,OACA,kBACA,iBACA,kBACmE;EACnE,MAAM,WAA8B,EAAE;AAEtC,MAAI,SAAS,QAAQ,MAAM,SAAS,GAAG;AACtC,WAAQ,KACP,oFACA;AACD,YAAS,KAAK;IAAE,SAAS;IAAS,MAAM;IAAe,CAAC;;AAGzD,MAAI,oBAAoB,KACvB,UAAS,KAAK;GAAE,SAAS;GAAoB,MAAM;GAAe,CAAC;AAGpE,MAAI,mBAAmB,KACtB,UAAS,KAAK;GAAE,SAAS;GAAmB,MAAM;GAAe,CAAC;AAGnE,MAAI,gBAAgB,SAAS,OAC5B,UAAS,KAAK;GAAE,SAAS;GAAkB,MAAM;GAAe,CAAC;AAGlE,SAAO;;;;;;CAOR,WAAmB,QAAwE;EAC1F,MAAM,EAAE,aAAa,+BAA+B,OAAO;AAC3D,SAAO,SAAS,KAAK,EAAE,SAAS,WAAW,GAAG,KAAK,IAAI,UAAU,CAAC,KAAK,OAAO;;CAG/E,MAAM,WACL,SAC8D;EAC9D,MAAM,WAAW,KAAK,YAAY,QAAQ;EAC1C,MAAM,QAAQ,KAAK,WAAW,QAAQ,OAAO;EAE7C,MAAM,SAAS,MAAM,KAAK,OAAO,QAAQ,SAAS,EAAE,OAAO,CAAC;AAE5D,SAAO;GACN,cAAc;IAAE,SAAS;IAAQ,KAAK;IAAQ;GAC9C,SAAS;IACR,GAAG,OAAO,KAAK,KAAK,EAAE,SAAS,UAAU,aAAa;KACrD,MAAM;KACN,YAAY;KACZ,IAAI;KACJ,KAAK;KACL,kBAAkB,EACjB,YAAY,EAAE,OAAO,EACrB;KACD,EAAE;IACH;KACC,MAAM;KACN,MAAM,OAAO;KACb;IACD,GAAG,iBAAiB,OAA6C;IACjE;GACD,OAAO,kBAAkB,OAA6C;GACtE;GACA;;CAGF,MAAM,SACL,SAC4D;EAC5D,MAAM,WAAW,KAAK,YAAY,QAAQ;EAC1C,MAAM,QAAQ,KAAK,WAAW,QAAQ,OAAO;AAO7C,SAAO,EACN,QAAQ,mBACP,gBAPe,MAAM,KAAK,OAAO,QAAQ,SAAS;GACnD;GACA,QAAQ;GACR,CAAC,CAI6E,EAC7E,SACA,EACD;;;;;ACrGH,IAAa,0BAAb,MAAiE;CAMhE,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,IAAI,uBAA+B;AAElC,SAAO,KAAK,SAAS,wBAAwB;;CAG9C,IAAI,wBAAiC;AACpC,SAAO,KAAK,SAAS,yBAAyB;;CAG/C,YACC,SACA,UACA,QACC;wBAtBO,wBAAuB,KAAK;wBAC5B,WAAA,KAAA,EAAyB;wBACjB,UAAA,KAAA,EAAiC;wBACjC,YAAA,KAAA,EAAqC;AAoBrD,OAAK,UAAU;AACf,OAAK,WAAW;AAChB,OAAK,SAAS;;CAGf,MAAM,QAAQ,EACb,QACA,eACgE;AAChE,MAAI,OAAO,SAAS,KAAK,qBACxB,OAAM,IAAI,mCAAmC;GAC5C,sBAAsB,KAAK;GAC3B,SAAS,KAAK;GACd,UAAU,KAAK;GACf;GACA,CAAC;EAGH,MAAM,EACL,SACA,sBAAsB,uBACtB,uBAAuB,wBACvB,GAAG,uBACA,KAAK;AAcT,SAAO;GACN,aAbgB,MAAM,KAAK,OAAO,QAAQ,IAC1C,KAAK,SACL,EACC,MAAM,QACN,EACD;IACC,SAAS,KAAK,OAAO,WAAW;IAChC,QAAQ;IACR,GAAG;IACH,CACD,EAG+C;GAC/C,UAAU,EAAE;GACZ;;;;;AClEH,IAAa,6BAAb,MAAmE;CAWlE,YACC,SACA,UACA,QACC;wBAdO,wBAAuB,KAAK;wBAC5B,+BAA8B,OAAO;wBAErC,iBAAkF,EAAE,CAAC;wBAErF,WAAA,KAAA,EAA8B;wBAC9B,YAAA,KAAA,EAAgC;wBAExB,UAAA,KAAA,EAA4B;AAO5C,OAAK,UAAU;AACf,OAAK,WAAW;AAChB,OAAK,SAAS;;CAGf,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,QAAgB,EACf,gBACA,OACA,YACA,iBACA,aACA,MACA,kBACA,iBACA,QACgD;EAChD,MAAM,OAAO,gBAAgB,QAAQ;EAErC,MAAM,WAA8B,EAAE;AAEtC,MAAI,oBAAoB,KACvB,UAAS,KAAK;GAAE,SAAS;GAAoB,MAAM;GAAe,CAAC;AAGpE,MAAI,mBAAmB,KACtB,UAAS,KAAK;GAAE,SAAS;GAAmB,MAAM;GAAe,CAAC;EAGnE,MAAM,WAAW;GAChB,YAAY;GACZ,OAAO,KAAK;GACZ,aAAa;GACb,aAAa,KAAK,SAAS;GAC3B;GACA,OAAO;GACP;AAED,UAAQ,MAAR;GACC,KAAK,OACJ,QAAO;IACN,MAAM;KACL,GAAG;KACH,iBAAiB,KAAA;KAGjB,GAAG,0BAA0B,OAAO,WAAW;KAC/C;IACD;IACA;GAGF,KAAK,OACJ,QAAO;IACN,MAAM;KACL,GAAG;KACH,iBAAiB;MAChB,MAAM;MACN,aACC,gBAAgB,SAAS,SAAS,eAAe,SAAS,KAAA;MAC3D;KACD,OAAO,KAAA;KACP,aAAa,KAAA;KACb;IACD;IACA;GAGF,QAEC,OAAM,IAAI,MAAM,qBADQ,OAC+B;;;;;;;;;;;;CAc1D,eACC,MACA,UACA,SACC;AACD,SAAO;GACN,YAAY,KAAK;GACjB,UAAU,KAAK,OAAO,YAAY,4BAA4B,SAAS,GAAG;GAC1E,aAAa,KAAK;GAClB,OAAO,KAAK;GACZ,GAAI,KAAK,cAAc,EAAE,aAAa,KAAK,aAAa,GAAG,EAAE;GAC7D,OAAO,KAAK;GACZ,GAAI,KAAK,kBAAkB,EAAE,iBAAiB,KAAK,iBAAiB,GAAG,EAAE;GACzE,GAAI,SAAS,SAAS,EAAE,QAAQ,MAAM,GAAG,EAAE;GAC3C;;;;;CAMF,gBAAwB;EACvB,MAAM,EACL,SACA,YAAY,aACZ,iBACA,cACA,GAAG,uBACA,KAAK;EAET,MAAM,gBAAgB;GACrB,GAAI,gBAAgB,OAAO,iBAAiB,WACxC,eACD,EAAE;GACL,GAAI,kBAAkB,EAAE,sBAAsB,iBAAiB,GAAG,EAAE;GACpE;AAED,SAAO;GACN,SAAS,KAAK,OAAO,WAAW;GAChC,GAAI,OAAO,KAAK,cAAc,CAAC,SAAS,IAAI,EAAE,cAAc,eAAe,GAAG,EAAE;GAChF,GAAG;GACH;;CAGF,MAAM,WACL,SAC8D;EAC9D,MAAM,EAAE,MAAM,aAAa,KAAK,QAAQ,QAAQ;EAChD,MAAM,EAAE,aAAa,+BAA+B,QAAQ,OAAO;EAEnE,MAAM,SAAS,KAAK,eAAe,MAAM,SAAS;EAClD,MAAM,aAAa,KAAK,eAAe;EAEvC,MAAM,SAAS,MAAM,KAAK,OAAO,QAAQ,IACxC,KAAK,OACL,QACA;GACC,GAAG;GACH,QAAQ,QAAQ;GAChB,CACD;AAED,MAAI,kBAAkB,eACrB,OAAM,IAAI,MACT,sGACA;EAGF,MAAM,eAAe;EACrB,MAAM,UAAU,aAAa;EAK7B,MAAM,mBACL,UAAU,IAAI,SAAS,qBAAqB,UAAU,IAAI,SAAS;AAEpE,SAAO;GACN,cAAc,yBAAyB,aAAa;GACpD,SAAS;IACR,GAAI,mBACD,CAAC;KAAE,MAAM;KAAsB,MAAM;KAAkB,CAAC,GACxD,EAAE;IACL;KACC,MAAM;KACN,MAAM,YAAY,aAAa,IAAI;KACnC;IACD,GAAG,iBAAiB,aAAa;IACjC;GACD,OAAO,kBAAkB,OAAkC;GAC3D;GACA;;CAGF,MAAM,SACL,SAC4D;EAC5D,MAAM,EAAE,MAAM,aAAa,KAAK,QAAQ,QAAQ;EAChD,MAAM,EAAE,aAAa,+BAA+B,QAAQ,OAAO;EAEnE,MAAM,SAAS,KAAK,eAAe,MAAM,UAAU,EAAE,QAAQ,MAAM,CAAC;EACpE,MAAM,aAAa,KAAK,eAAe;EAEvC,MAAM,WAAW,MAAM,KAAK,OAAO,QAAQ,IAC1C,KAAK,OACL,QACA;GACC,GAAG;GACH,QAAQ,QAAQ;GAChB,CACD;AAGD,MAAI,oBAAoB,eACvB,QAAO,EACN,QAAQ,mBAAmB,gBAAgB,SAAS,EAAE,SAAS,EAC/D;EAKF,MAAM,eAAe;EACrB,MAAM,UAAU,aAAa;EAK7B,MAAM,mBACL,UAAU,IAAI,SAAS,qBAAqB,UAAU,IAAI,SAAS;EAEpE,IAAI,SAAwB;EAC5B,IAAI,cAA6B;AAEjC,SAAO,EACN,QAAQ,IAAI,eAA0C,EACrD,MAAM,YAAY;AACjB,cAAW,QAAQ;IAClB,MAAM;IACI;IACV,CAAC;AAEF,OAAI,kBAAkB;AACrB,kBAAc,YAAY;AAC1B,eAAW,QAAQ;KAAE,MAAM;KAAmB,IAAI;KAAa,CAAC;AAChE,eAAW,QAAQ;KAClB,MAAM;KACN,IAAI;KACJ,OAAO;KACP,CAAC;AACF,eAAW,QAAQ;KAAE,MAAM;KAAiB,IAAI;KAAa,CAAC;;GAG/D,MAAM,OAAO,YAAY,aAAa;AACtC,OAAI,MAAM;AACT,aAAS,YAAY;AACrB,eAAW,QAAQ;KAAE,MAAM;KAAc,IAAI;KAAQ,CAAC;AACtD,eAAW,QAAQ;KAAE,MAAM;KAAc,IAAI;KAAQ,OAAO;KAAM,CAAC;AACnE,eAAW,QAAQ;KAAE,MAAM;KAAY,IAAI;KAAQ,CAAC;;AAGrD,QAAK,MAAM,YAAY,iBAAiB,aAAa,CACpD,YAAW,QAAQ,SAAS;AAG7B,cAAW,QAAQ;IAClB,MAAM;IACN,cAAc,yBAAyB,aAAa;IACpD,OAAO,kBAAkB,SAAoC;IAC7D,CAAC;AACF,cAAW,OAAO;KAEnB,CAAC,EACF;;;;;AC/RH,IAAa,sBAAb,MAAyD;CAGxD,IAAI,mBAA2B;AAC9B,SAAO,KAAK,SAAS,oBAAoB;;CAG1C,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YACC,SACA,UACA,QACC;AAHQ,OAAA,UAAA;AACA,OAAA,WAAA;AACA,OAAA,SAAA;wBAbD,wBAAuB,KAAK;;CAgBrC,MAAM,WAAW,EAChB,QACA,GACA,MACA,aACA,MACA,eAGC;EACD,MAAM,EAAE,OAAO,WAAW,4BAA4B,KAAK;EAE3D,MAAM,WAAmC,EAAE;AAE3C,MAAI,eAAe,KAClB,UAAS,KAAK;GACb,SAAS;GACT,SAAS;GACT,MAAM;GACN,CAAC;EAGH,MAAM,gBAAgB,YAAY;AAejC,UAAOC,eAdS,MAAM,KAAK,OAAO,QAAQ,IACzC,KAAK,SACL;IACC;IACA,QAAQ,UAAU;IAClB;IACA;IACA,EACD;IACC,SAAS,KAAK,OAAO;IACrB,QAAQ;IACR,CACD,CAE0B;;AAO5B,SAAO;GACN,QAL4B,MAAM,QAAQ,IAC1C,MAAM,KAAK,EAAE,QAAQ,GAAG,QAAQ,eAAe,CAAC,CAChD;GAIA,UAAU;IACT,SAAS,EAAE;IACX,SAAS,KAAK;IACd,2BAAW,IAAI,MAAM;IACrB;GACD;GACA;;;AAIH,SAAS,4BAA4B,MAA0B;CAC9D,MAAM,CAAC,OAAO,UAAU,MAAM,MAAM,IAAI,IAAI,CAAC,KAAA,GAAW,KAAA,EAAU;AAElE,QAAO;EACN,QAAQ,aAAa,OAAO;EAC5B,OAAO,aAAa,MAAM;EAC1B;;AAGF,SAAS,aAAa,OAAgB;AACrC,KAAI,UAAU,MAAM,CAAC,MAAO,QAAO,KAAA;CACnC,MAAM,SAAS,OAAO,MAAM;AAC5B,QAAO,OAAO,UAAU,OAAO,GAAG,SAAS,KAAA;;;;;;;;;;AAW5C,eAAeA,eAAa,QAAsC;AACjE,KAAI,kBAAkB,WACrB,QAAO;AAER,KAAI,kBAAkB,YACrB,QAAO,IAAI,WAAW,OAAO;AAE9B,KAAI,kBAAkB,gBAAgB;EACrC,MAAM,SAAU,OAAsC,WAAW;EACjE,MAAM,SAAuB,EAAE;EAC/B,IAAI,cAAc;AAClB,SAAO,MAAM;GACZ,MAAM,EAAE,MAAM,UAAU,MAAM,OAAO,MAAM;AAC3C,OAAI,KAAM;AACV,UAAO,KAAK,MAAM;AAClB,kBAAe,MAAM;;EAEtB,MAAM,SAAS,IAAI,WAAW,YAAY;EAC1C,IAAI,SAAS;AACb,OAAK,MAAM,SAAS,QAAQ;AAC3B,UAAO,IAAI,OAAO,OAAO;AACzB,aAAU,MAAM;;AAEjB,SAAO;;AAGR,KAAI,kBAAkB,SACrB,QAAO,IAAI,WAAW,MAAM,OAAO,aAAa,CAAC;AAGlD,KAAI,OAAO,WAAW,YAAY,WAAW,MAAM;EAClD,MAAM,MAAM;AAEZ,MAAI,OAAO,IAAI,UAAU,SACxB,QAAO,WAAW,KAAK,KAAK,IAAI,MAAM,GAAG,MAAM,EAAE,WAAW,EAAE,CAAC;AAGhE,MAAI,IAAI,gBAAgB,WACvB,QAAO,IAAI;AAGZ,MAAI,IAAI,gBAAgB,YACvB,QAAO,IAAI,WAAW,IAAI,KAAK;AAGhC,MAAI,OAAO,IAAI,gBAAgB,WAC9B,QAAO,IAAI,WAAW,MAAO,IAA4B,aAAa,CAAC;;AAGzE,OAAM,IAAI,MACT,gDAAgD,OAAO,UAAU,SAAS,KAAK,OAAO,CAAC,cACtF,OAAO,WAAW,YAAY,WAAW,OACtC,KAAK,UAAU,OAAO,KAAK,OAAO,CAAC,GACnC,QAEJ;;;;;;;;;;;ACrIF,IAAa,8BAAb,MAAyE;CAGxE,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YACC,SACA,UACA,QACC;AAHQ,OAAA,UAAA;AACA,OAAA,WAAA;AACA,OAAA,SAAA;wBATD,wBAAuB,KAAK;;CAYrC,MAAM,WACL,SACmE;EACnE,MAAM,EAAE,OAAO,WAAW,gBAAgB;EAE1C,MAAM,WAAmC,EAAE;EAI3C,MAAM,aACL,OAAO,UAAU,WACd,WAAW,KAAK,KAAK,MAAM,GAAG,MAAM,EAAE,WAAW,EAAE,CAAC,GACpD;EAEJ,MAAM,UAAU,KAAK,YAAY;EAEjC,IAAI;AAEJ,MAAI,QACH,aAAY,MAAM,KAAK,SAAS,YAAY,WAAW,YAAY;MAEnE,aAAY,MAAM,KAAK,WAAW,YAAY,YAAY;EAG3D,MAAM,SAAS;AAGf,MAAI,QACH,QAAO,KAAK,uBAAuB,QAAQ,SAAS;AAErD,SAAO,KAAK,yBAAyB,QAAQ,SAAS;;CAOvD,MAAc,WAAW,YAAwB,aAA6C;EAS7F,MAAM,SAAkC,EAAE,OANzB,KAAK,YAER,sCACV,mBAAmB,WAAW,GAC9B,MAAM,KAAK,WAAW,EAEuB;AAEjD,MAAI,KAAK,SAAS,SACjB,QAAO,WAAW,KAAK,SAAS;AAEjC,MAAI,KAAK,SAAS,OACjB,QAAO,iBAAiB,KAAK,SAAS;AAGvC,SAAO,KAAK,OAAO,QAAQ,IAC1B,KAAK,SACL,QACA;GAAE,SAAS,KAAK,OAAO;GAAS,QAAQ;GAAa,CACrD;;CAGF,yBACC,KACA,UAC0D;EAC1D,MAAM,OAAQ,IAAI,QAAmB;EAGrC,MAAM,WAA4E,EAAE;AAGpF,MAAI,IAAI,YAAY,MAAM,QAAQ,IAAI,SAAS,CAC9C,MAAK,MAAM,OAAO,IAAI,SACrB,UAAS,KAAK;GACb,MAAQ,IAAgC,QAAmB;GAC3D,aAAe,IAAgC,SAAoB;GACnE,WAAa,IAAgC,OAAkB;GAC/D,CAAC;WAIK,IAAI,SAAS,MAAM,QAAQ,IAAI,MAAM,CAC7C,MAAK,MAAM,KAAK,IAAI,MACnB,UAAS,KAAK;GACb,MAAQ,EAA8B,QAAmB;GACzD,aAAe,EAA8B,SAAoB;GACjE,WAAa,EAA8B,OAAkB;GAC7D,CAAC;EAKJ,MAAM,OAAO,IAAI;AAEjB,SAAO;GACN;GACA;GACA,UAAW,MAAM,YAAuB,KAAA;GACxC,mBAAoB,MAAM,YAAuB,KAAA;GACjD;GACA,UAAU;IACT,2BAAW,IAAI,MAAM;IACrB,SAAS,KAAK;IACd,SAAS,EAAE;IACX;GACD;;CAOF,MAAc,SACb,YACA,WACA,aACmB;AACnB,MAAI,KAAK,OAAO,UAEf,QAAO,KAAK,OAAO,QAAQ,IAC1B,KAAK,SACL,EACC,OAAO;GAAE,MAAM,mBAAmB,WAAW;GAAE,aAAa;GAAW,EACvE,EACD;GAAE,SAAS,KAAK,OAAO;GAAS,QAAQ;GAAa,CACrD;AAMF,MAAI,CAAC,KAAK,OAAO,YAChB,OAAM,IAAI,MACT,yIAEA;AAEF,SAAO,gBACN,KAAK,OAAO,aACZ,KAAK,SACL,YACA,WACA,YACA;;CAGF,uBACC,KACA,UAC0D;EAY1D,MAAM,OAVU,IAAI,SACM,YASH,IAAI,eAAe;EAE1C,MAAM,OAAO,KAAK,cAAc;EAChC,MAAM,WAA4E,EAAE;AAEpF,MAAI,KAAK,SAAS,MAAM,QAAQ,IAAI,MAAM,CACzC,MAAK,MAAM,KAAK,IAAI,MACnB,UAAS,KAAK;GACb,MAAM,EAAE,QAAQ;GAChB,aAAa,EAAE,SAAS;GACxB,WAAW,EAAE,OAAO;GACpB,CAAC;AAIJ,SAAO;GACN;GACA;GACA,UAAU,KAAA;GACV,mBAAmB,KAAA;GACnB;GACA,UAAU;IACT,2BAAW,IAAI,MAAM;IACrB,SAAS,KAAK;IACd,SAAS,EAAE;IACX;GACD;;;AAQH,SAAS,mBAAmB,OAA2B;CACtD,IAAI,SAAS;AACb,MAAK,IAAI,IAAI,GAAG,IAAI,MAAM,QAAQ,IACjC,WAAU,OAAO,aAAa,MAAM,GAAI;AAEzC,QAAO,KAAK,OAAO;;;;;;;;;;AClOpB,IAAa,uBAAb,MAA2D;CAG1D,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YACC,SACA,UACA,QACC;AAHQ,OAAA,UAAA;AACA,OAAA,WAAA;AACA,OAAA,SAAA;wBATD,wBAAuB,KAAK;;CAYrC,MAAM,WACL,SAC4D;EAC5D,MAAM,EAAE,MAAM,OAAO,OAAO,gBAAgB;EAE5C,MAAM,WAAmC,EAAE;AAE3C,MAAI,QAAQ,aACX,UAAS,KAAK;GACb,SAAS;GACT,SAAS;GACT,MAAM;GACN,CAAC;AAGH,MAAI,QAAQ,aACX,UAAS,KAAK;GACb,SACC;GACD,SAAS;GACT,MAAM;GACN,CAAC;EAIH,MAAM,SAAkC,EAAE,MAAM;AAChD,MAAI,MAAO,QAAO,QAAQ;AAC1B,MAAI,SAAS,KAAM,QAAO,QAAQ;AAqBlC,SAAO;GACN,OAHa,MAAM,aAjBL,MAAM,KAAK,OAAO,QAAQ,IACxC,KAAK,SACL,QACA;IACC,SAAS,KAAK,OAAO;IACrB,QAAQ;IAKR,mBAAmB;IACnB,CACD,CAKuC;GAIvC;GACA,UAAU;IACT,2BAAW,IAAI,MAAM;IACrB,SAAS,KAAK;IACd,SAAS,EAAE;IACX;GACD;;;;;;;;;;;AAgBH,eAAe,aAAa,QAAsC;AAEjE,KAAI,kBAAkB,SACrB,QAAO,IAAI,WAAW,MAAM,OAAO,aAAa,CAAC;AAElD,KAAI,kBAAkB,WACrB,QAAO;AAER,KAAI,kBAAkB,YACrB,QAAO,IAAI,WAAW,OAAO;AAE9B,KAAI,kBAAkB,gBAAgB;EACrC,MAAM,SAAU,OAAsC,WAAW;EACjE,MAAM,SAAuB,EAAE;EAC/B,IAAI,cAAc;AAClB,SAAO,MAAM;GACZ,MAAM,EAAE,MAAM,UAAU,MAAM,OAAO,MAAM;AAC3C,OAAI,KAAM;AACV,UAAO,KAAK,MAAM;AAClB,kBAAe,MAAM;;EAEtB,MAAM,SAAS,IAAI,WAAW,YAAY;EAC1C,IAAI,SAAS;AACb,OAAK,MAAM,SAAS,QAAQ;AAC3B,UAAO,IAAI,OAAO,OAAO;AACzB,aAAU,MAAM;;AAEjB,SAAO;;AAGR,KAAI,OAAO,WAAW,YAAY,WAAW,MAAM;EAClD,MAAM,MAAM;AACZ,MAAI,OAAO,IAAI,UAAU,SACxB,QAAO,WAAW,KAAK,KAAK,IAAI,MAAM,GAAG,MAAM,EAAE,WAAW,EAAE,CAAC;;AAGjE,OAAM,IAAI,MACT,8CAA8C,OAAO,UAAU,SAAS,KAAK,OAAO,GACpF;;;;;;;;;;;;;ACxHF,IAAa,0BAAb,MAAiE;CAGhE,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YACC,SACA,UACA,QACC;AAHQ,OAAA,UAAA;AACA,OAAA,WAAA;AACA,OAAA,SAAA;wBATD,wBAAuB,KAAK;;CAYrC,MAAM,SACL,SAC6D;EAC7D,MAAM,EAAE,WAAW,OAAO,MAAM,gBAAgB;EAEhD,MAAM,WAAmC,EAAE;EAM3C,MAAM,SAAkC;GACvC;GACA,UALgB,oBAAoB,WAAW,SAAS;GAMxD;AACD,MAAI,QAAQ,KACX,QAAO,QAAQ;AAmBhB,SAAO;GACN,WAjBe,MAAM,KAAK,OAAO,QAAQ,IACzC,KAAK,SACL,QACA;IAAE,SAAS,KAAK,OAAO;IAAS,QAAQ;IAAa,CACrD,EAGuB,YAEK,EAAE,EAC7B,KAAK,UAAU;IACf,OAAO,KAAK,MAAM;IAClB,gBAAgB,KAAK,SAAS;IAC9B,EAAE,CACF,MAAM,GAAG,MAAM,EAAE,iBAAiB,EAAE,eAAe;GAIpD;GACA,UAAU;IACT,2BAAW,IAAI,MAAM;IACrB,SAAS,KAAK;IACd,SAAS,EAAE;IACX;GACD;;;;;;;;;;AAeH,SAAS,oBACR,WACA,UAC0B;AAC1B,KAAI,UAAU,SAAS,OACtB,QAAO,UAAU,OAAO,KAAK,UAAU,EAAE,MAAM,EAAE;AAIlD,UAAS,KAAK;EACb,SAAS;EACT,MAAM;EACN,CAAC;AAEF,QAAO,UAAU,OAAO,KAAK,SAAS,EAAE,MAAM,KAAK,UAAU,IAAI,EAAE,EAAE;;;;;;;;AClGtE,IAAa,2BAAb,cAA8C,0BAA0B;;;;;;ACkJxE,SAAgB,gBAAgB,SAAuC;AACtE,KAAI,CAAC,QAAQ,WAAW,EAAE,eAAe,WAAW,YAAY,SAC/D,OAAM,IAAI,MACT,yIAEA;CAGF,IAAI;CACJ,MAAM,YAAY,CAAC,CAAC,QAAQ;AAE5B,KAAI,QAAQ,QACX,WAAU,QAAQ;MACZ;EACN,MAAM,EAAE,WAAW,WAAW;AAC9B,YAAU,EACT,KAAK,UAAU;GAAE;GAAW;GAAQ,OAAO,QAAQ;GAAO,CAAC,EAC3D;;CAGF,MAAM,mBAAmB,SAA+B,WAAkC,EAAE,KAC3F,IAAI,2BAA2B,SAAS,UAAU;EACjD;EACA,SAAS,QAAQ;EACjB,UAAU;EACV;EACA,CAAC;CAEH,MAAM,oBACL,SACA,WAAmC,EAAE,KAErC,IAAI,oBAAoB,SAAS,UAAU;EAC1C;EACA,SAAS,QAAQ;EACjB,UAAU;EACV,CAAC;CACH,MAAM,wBACL,SACA,WAAuC,EAAE,KAEzC,IAAI,wBAAwB,SAAS,UAAU;EAC9C;EACA,SAAS,QAAQ;EACjB,UAAU;EACV,CAAC;CAEH,MAAM,4BACL,SACA,WAA2C,EAAE,KAE7C,IAAI,4BAA4B,SAAS,UAAU;EAClD;EACA,SAAS,QAAQ;EACjB,UAAU;EACV;EACA,aACC,CAAC,aAAa,eAAe,UAC1B;GAAE,WAAW,QAAQ;GAAW,QAAQ,QAAQ;GAAQ,GACxD,KAAA;EACJ,CAAC;CAEH,MAAM,qBAAqB,SAAuB,WAAoC,EAAE,KACvF,IAAI,qBAAqB,SAAS,UAAU;EAC3C;EACA,SAAS,QAAQ;EACjB,UAAU;EACV,CAAC;CAEH,MAAM,wBACL,SACA,WAAuC,EAAE,KAEzC,IAAI,wBAAwB,SAAS,UAAU;EAC9C;EACA,SAAS,QAAQ;EACjB,UAAU;EACV,CAAC;CAEH,MAAM,YAAY,SAA+B,aAAqC;AACrF,MAAI,IAAI,OACP,OAAM,IAAI,MAAM,sEAAsE;AAEvF,SAAO,gBAAgB,SAAS,SAAS;;AAG1C,UAAS,OAAO;AAChB,UAAS,YAAY;AACrB,UAAS,gBAAgB;AACzB,UAAS,qBAAqB;AAC9B,UAAS,QAAQ;AACjB,UAAS,aAAa;AACtB,UAAS,gBAAgB;AACzB,UAAS,qBAAqB;AAC9B,UAAS,SAAS;AAClB,UAAS,cAAc;AACvB,UAAS,YAAY;AACrB,UAAS,iBAAiB;AAE1B,QAAO;;;;;;;;AAyBR,SAAgB,eACf,SAEA,eAAe,iBACI;CACnB,MAAM,UAAU,QAAQ;CAExB,MAAM,mBAAmB,WAAiC,EAAE,KAC3D,IAAI,0BAA0B,4CAA4C,UAAU;EACnF;EACA,UAAU;EACV,CAAC;CAEH,MAAM,YAAY,aAAoC;AACrD,MAAI,IAAI,OACP,OAAM,IAAI,MAAM,qEAAqE;AAEtF,SAAO,gBAAgB,SAAS;;AAGjC,UAAS,OAAO;AAEhB,QAAO;;AAmBR,IAAI,gBAAgB;;;;;AAMpB,SAAgB,cAAc,SAA6C;AAC1E,KAAI,CAAC,eAAe;AACnB,kBAAgB;AAChB,UAAQ,KACP,yKAGA;;AAEF,QAAO,eAAe,SAAS,eAAe"}
|
|
1
|
+
{"version":3,"file":"index.mjs","names":["toUint8Array","uint8ArrayToBase64","toUint8Array"],"sources":["../src/convert-to-workersai-chat-messages.ts","../src/map-workersai-usage.ts","../src/map-workersai-finish-reason.ts","../src/streaming.ts","../src/utils.ts","../src/aisearch-chat-language-model.ts","../src/workersai-embedding-model.ts","../src/workersai-chat-language-model.ts","../src/workersai-image-model.ts","../src/workersai-transcription-model.ts","../src/workersai-speech-model.ts","../src/workersai-reranking-model.ts","../src/autorag-chat-language-model.ts","../src/index.ts"],"sourcesContent":["import type { LanguageModelV3DataContent, LanguageModelV3Prompt } from \"@ai-sdk/provider\";\nimport type { WorkersAIContentPart, WorkersAIChatPrompt } from \"./workersai-chat-prompt\";\n\n/**\n * Normalise any LanguageModelV3DataContent value to a Uint8Array.\n *\n * Handles:\n * - Uint8Array → returned as-is\n * - string → decoded from base64 (with or without data-URL prefix)\n * - URL → not supported (Workers AI needs raw bytes, not a reference)\n */\nfunction toUint8Array(data: LanguageModelV3DataContent): Uint8Array | null {\n\tif (data instanceof Uint8Array) {\n\t\treturn data;\n\t}\n\n\tif (typeof data === \"string\") {\n\t\tlet base64 = data;\n\t\tif (base64.startsWith(\"data:\")) {\n\t\t\tconst commaIndex = base64.indexOf(\",\");\n\t\t\tif (commaIndex >= 0) {\n\t\t\t\tbase64 = base64.slice(commaIndex + 1);\n\t\t\t}\n\t\t}\n\t\tconst binaryString = atob(base64);\n\t\tconst bytes = new Uint8Array(binaryString.length);\n\t\tfor (let i = 0; i < binaryString.length; i++) {\n\t\t\tbytes[i] = binaryString.charCodeAt(i);\n\t\t}\n\t\treturn bytes;\n\t}\n\n\tif (data instanceof URL) {\n\t\tthrow new Error(\n\t\t\t\"URL image sources are not supported by Workers AI. \" +\n\t\t\t\t\"Provide image data as a Uint8Array or base64 string instead.\",\n\t\t);\n\t}\n\n\treturn null;\n}\n\nfunction uint8ArrayToBase64(bytes: Uint8Array): string {\n\tlet binary = \"\";\n\tconst chunkSize = 8192;\n\tfor (let i = 0; i < bytes.length; i += chunkSize) {\n\t\tconst chunk = bytes.subarray(i, Math.min(i + chunkSize, bytes.length));\n\t\tbinary += String.fromCharCode(...chunk);\n\t}\n\treturn btoa(binary);\n}\n\nexport function convertToWorkersAIChatMessages(prompt: LanguageModelV3Prompt): {\n\tmessages: WorkersAIChatPrompt;\n} {\n\tconst messages: WorkersAIChatPrompt = [];\n\n\tfor (const { role, content } of prompt) {\n\t\tswitch (role) {\n\t\t\tcase \"system\": {\n\t\t\t\tmessages.push({ content, role: \"system\" });\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tcase \"user\": {\n\t\t\t\tconst textParts: string[] = [];\n\t\t\t\tconst imageParts: { image: Uint8Array; mediaType: string | undefined }[] = [];\n\n\t\t\t\tfor (const part of content) {\n\t\t\t\t\tswitch (part.type) {\n\t\t\t\t\t\tcase \"text\": {\n\t\t\t\t\t\t\ttextParts.push(part.text);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcase \"file\": {\n\t\t\t\t\t\t\tconst imageBytes = toUint8Array(part.data);\n\t\t\t\t\t\t\tif (imageBytes) {\n\t\t\t\t\t\t\t\timageParts.push({\n\t\t\t\t\t\t\t\t\timage: imageBytes,\n\t\t\t\t\t\t\t\t\tmediaType: part.mediaType,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (imageParts.length > 0) {\n\t\t\t\t\tconst contentArray: WorkersAIContentPart[] = [];\n\t\t\t\t\tif (textParts.length > 0) {\n\t\t\t\t\t\tcontentArray.push({ type: \"text\", text: textParts.join(\"\\n\") });\n\t\t\t\t\t}\n\t\t\t\t\tfor (const img of imageParts) {\n\t\t\t\t\t\tconst base64 = uint8ArrayToBase64(img.image);\n\t\t\t\t\t\tconst mediaType = img.mediaType || \"image/png\";\n\t\t\t\t\t\tcontentArray.push({\n\t\t\t\t\t\t\ttype: \"image_url\",\n\t\t\t\t\t\t\timage_url: { url: `data:${mediaType};base64,${base64}` },\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\tmessages.push({ content: contentArray, role: \"user\" });\n\t\t\t\t} else {\n\t\t\t\t\tmessages.push({ content: textParts.join(\"\\n\"), role: \"user\" });\n\t\t\t\t}\n\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tcase \"assistant\": {\n\t\t\t\tlet text = \"\";\n\t\t\t\tlet reasoning = \"\";\n\t\t\t\tconst toolCalls: Array<{\n\t\t\t\t\tid: string;\n\t\t\t\t\ttype: \"function\";\n\t\t\t\t\tfunction: { name: string; arguments: string };\n\t\t\t\t}> = [];\n\n\t\t\t\tfor (const part of content) {\n\t\t\t\t\tswitch (part.type) {\n\t\t\t\t\t\tcase \"text\": {\n\t\t\t\t\t\t\ttext += part.text;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcase \"reasoning\": {\n\t\t\t\t\t\t\t// Reasoning is accumulated separately and sent as the `reasoning`\n\t\t\t\t\t\t\t// field on the message object. This is the field name vLLM expects\n\t\t\t\t\t\t\t// on input for reasoning models (kimi-k2.5, glm-4.7-flash).\n\t\t\t\t\t\t\t// Concatenating it into `content` corrupts the conversation history\n\t\t\t\t\t\t\t// and causes models to produce empty or garbled responses on the\n\t\t\t\t\t\t\t// next turn.\n\t\t\t\t\t\t\treasoning += part.text;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcase \"file\": {\n\t\t\t\t\t\t\t// File parts in assistant messages - no action needed\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcase \"tool-call\": {\n\t\t\t\t\t\t\ttoolCalls.push({\n\t\t\t\t\t\t\t\tfunction: {\n\t\t\t\t\t\t\t\t\targuments: JSON.stringify(part.input),\n\t\t\t\t\t\t\t\t\tname: part.toolName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tid: part.toolCallId,\n\t\t\t\t\t\t\t\ttype: \"function\",\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcase \"tool-result\": {\n\t\t\t\t\t\t\t// Tool results in assistant messages - no action needed\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tdefault: {\n\t\t\t\t\t\t\tconst exhaustiveCheck = part satisfies never;\n\t\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t\t`Unsupported part type: ${(exhaustiveCheck as { type: string }).type}`,\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmessages.push({\n\t\t\t\t\tcontent: text,\n\t\t\t\t\trole: \"assistant\",\n\t\t\t\t\t...(reasoning ? { reasoning } : {}),\n\t\t\t\t\ttool_calls:\n\t\t\t\t\t\ttoolCalls.length > 0\n\t\t\t\t\t\t\t? toolCalls.map(({ function: { name, arguments: args }, id }) => ({\n\t\t\t\t\t\t\t\t\tfunction: { arguments: args, name },\n\t\t\t\t\t\t\t\t\tid,\n\t\t\t\t\t\t\t\t\ttype: \"function\" as const,\n\t\t\t\t\t\t\t\t}))\n\t\t\t\t\t\t\t: undefined,\n\t\t\t\t});\n\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tcase \"tool\": {\n\t\t\t\tfor (const toolResponse of content) {\n\t\t\t\t\tif (toolResponse.type === \"tool-result\") {\n\t\t\t\t\t\tconst output = toolResponse.output;\n\t\t\t\t\t\tlet content: string;\n\t\t\t\t\t\tswitch (output.type) {\n\t\t\t\t\t\t\tcase \"text\":\n\t\t\t\t\t\t\tcase \"error-text\":\n\t\t\t\t\t\t\t\tcontent = output.value;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase \"json\":\n\t\t\t\t\t\t\tcase \"error-json\":\n\t\t\t\t\t\t\t\tcontent = JSON.stringify(output.value);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase \"execution-denied\":\n\t\t\t\t\t\t\t\tcontent = output.reason\n\t\t\t\t\t\t\t\t\t? `Tool execution denied: ${output.reason}`\n\t\t\t\t\t\t\t\t\t: \"Tool execution was denied.\";\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tcase \"content\":\n\t\t\t\t\t\t\t\tcontent = output.value\n\t\t\t\t\t\t\t\t\t.filter(\n\t\t\t\t\t\t\t\t\t\t(p): p is { type: \"text\"; text: string } =>\n\t\t\t\t\t\t\t\t\t\t\tp.type === \"text\",\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t.map((p) => p.text)\n\t\t\t\t\t\t\t\t\t.join(\"\\n\");\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tcontent = \"\";\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmessages.push({\n\t\t\t\t\t\t\tcontent,\n\t\t\t\t\t\t\tname: toolResponse.toolName,\n\t\t\t\t\t\t\ttool_call_id: toolResponse.toolCallId,\n\t\t\t\t\t\t\trole: \"tool\",\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\t// Skip tool-approval-response parts as they're not supported by Workers AI\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tdefault: {\n\t\t\t\tconst exhaustiveCheck = role satisfies never;\n\t\t\t\tthrow new Error(`Unsupported role: ${exhaustiveCheck}`);\n\t\t\t}\n\t\t}\n\t}\n\n\treturn { messages };\n}\n","import type { LanguageModelV3Usage } from \"@ai-sdk/provider\";\n\n/**\n * Map Workers AI usage data to the AI SDK V3 usage format.\n * Accepts any object that may have a `usage` property with token counts.\n */\nexport function mapWorkersAIUsage(\n\toutput: Record<string, unknown> | AiTextGenerationOutput | AiTextToImageOutput,\n): LanguageModelV3Usage {\n\tconst usage = (\n\t\toutput as {\n\t\t\tusage?: { prompt_tokens?: number; completion_tokens?: number };\n\t\t}\n\t).usage ?? {\n\t\tcompletion_tokens: 0,\n\t\tprompt_tokens: 0,\n\t};\n\n\tconst promptTokens = usage.prompt_tokens ?? 0;\n\tconst completionTokens = usage.completion_tokens ?? 0;\n\n\treturn {\n\t\toutputTokens: {\n\t\t\ttotal: completionTokens,\n\t\t\ttext: undefined,\n\t\t\treasoning: undefined,\n\t\t},\n\t\tinputTokens: {\n\t\t\ttotal: promptTokens,\n\t\t\tnoCache: undefined,\n\t\t\tcacheRead: undefined,\n\t\t\tcacheWrite: undefined,\n\t\t},\n\t\traw: { total: promptTokens + completionTokens },\n\t};\n}\n","import type { LanguageModelV3FinishReason } from \"@ai-sdk/provider\";\n\n/**\n * Map a Workers AI finish reason to the AI SDK unified finish reason.\n *\n * Accepts either:\n * - A raw finish reason string (e.g., \"stop\", \"tool_calls\")\n * - A full response object with finish_reason in various locations\n */\nexport function mapWorkersAIFinishReason(\n\tfinishReasonOrResponse: string | null | undefined | Record<string, unknown>,\n): LanguageModelV3FinishReason {\n\tlet finishReason: string | null | undefined;\n\n\tif (\n\t\ttypeof finishReasonOrResponse === \"string\" ||\n\t\tfinishReasonOrResponse === null ||\n\t\tfinishReasonOrResponse === undefined\n\t) {\n\t\tfinishReason = finishReasonOrResponse;\n\t} else if (typeof finishReasonOrResponse === \"object\" && finishReasonOrResponse !== null) {\n\t\tconst response = finishReasonOrResponse;\n\n\t\t// OpenAI format: { choices: [{ finish_reason: \"stop\" }] }\n\t\tconst choices = response.choices as Array<{ finish_reason?: string }> | undefined;\n\t\tif (Array.isArray(choices) && choices.length > 0) {\n\t\t\tfinishReason = choices[0].finish_reason;\n\t\t} else if (\"finish_reason\" in response) {\n\t\t\tfinishReason = response.finish_reason as string;\n\t\t} else {\n\t\t\tfinishReason = undefined;\n\t\t}\n\t} else {\n\t\t// Numbers, booleans, etc. -- default to stop\n\t\tfinishReason = undefined;\n\t}\n\n\tconst raw = finishReason ?? \"stop\";\n\n\tswitch (finishReason) {\n\t\tcase \"stop\":\n\t\t\treturn { unified: \"stop\", raw };\n\t\tcase \"length\":\n\t\tcase \"model_length\":\n\t\t\treturn { unified: \"length\", raw };\n\t\tcase \"tool_calls\":\n\t\t\treturn { unified: \"tool-calls\", raw };\n\t\tcase \"error\":\n\t\t\treturn { unified: \"error\", raw };\n\t\tcase \"other\":\n\t\tcase \"unknown\":\n\t\t\treturn { unified: \"other\", raw };\n\t\tdefault:\n\t\t\treturn { unified: \"stop\", raw };\n\t}\n}\n","import type {\n\tLanguageModelV3FinishReason,\n\tLanguageModelV3StreamPart,\n\tLanguageModelV3Usage,\n} from \"@ai-sdk/provider\";\nimport { generateId } from \"ai\";\nimport { mapWorkersAIFinishReason } from \"./map-workersai-finish-reason\";\nimport { mapWorkersAIUsage } from \"./map-workersai-usage\";\n\n/**\n * Prepend a stream-start event to an existing LanguageModelV3 stream.\n * Uses pipeThrough for proper backpressure and error propagation.\n */\nexport function prependStreamStart(\n\tsource: ReadableStream<LanguageModelV3StreamPart>,\n\twarnings: LanguageModelV3StreamPart extends { type: \"stream-start\" } ? never : unknown,\n): ReadableStream<LanguageModelV3StreamPart> {\n\tlet sentStart = false;\n\treturn source.pipeThrough(\n\t\tnew TransformStream<LanguageModelV3StreamPart, LanguageModelV3StreamPart>({\n\t\t\ttransform(chunk, controller) {\n\t\t\t\tif (!sentStart) {\n\t\t\t\t\tsentStart = true;\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"stream-start\",\n\t\t\t\t\t\twarnings: warnings as [],\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tcontroller.enqueue(chunk);\n\t\t\t},\n\t\t\tflush(controller) {\n\t\t\t\tif (!sentStart) {\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"stream-start\",\n\t\t\t\t\t\twarnings: warnings as [],\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t},\n\t\t}),\n\t);\n}\n\n/**\n * Check if a streaming tool call chunk is a null-finalization sentinel.\n */\nfunction isNullFinalizationChunk(tc: Record<string, unknown>): boolean {\n\tconst fn = tc.function as Record<string, unknown> | undefined;\n\tconst name = fn?.name ?? tc.name ?? null;\n\tconst args = fn?.arguments ?? tc.arguments ?? null;\n\tconst id = tc.id ?? null;\n\treturn !id && !name && (!args || args === \"\");\n}\n\n/**\n * Maps a Workers AI SSE stream into AI SDK V3 LanguageModelV3StreamPart events.\n *\n * Uses a TransformStream pipeline for proper backpressure — chunks are emitted\n * one at a time as the downstream consumer pulls, not buffered eagerly.\n *\n * Handles two distinct formats:\n * 1. Native format: { response: \"chunk\", tool_calls: [...] }\n * 2. OpenAI format: { choices: [{ delta: { content: \"chunk\" } }] }\n */\nexport function getMappedStream(\n\tresponse: Response | ReadableStream<Uint8Array>,\n): ReadableStream<LanguageModelV3StreamPart> {\n\tconst rawStream =\n\t\tresponse instanceof ReadableStream\n\t\t\t? response\n\t\t\t: (response.body as ReadableStream<Uint8Array>);\n\n\tif (!rawStream) {\n\t\tthrow new Error(\"No readable stream available for SSE parsing.\");\n\t}\n\n\t// State shared across the transform\n\tlet usage: LanguageModelV3Usage = {\n\t\toutputTokens: { total: 0, text: undefined, reasoning: undefined },\n\t\tinputTokens: {\n\t\t\ttotal: 0,\n\t\t\tnoCache: undefined,\n\t\t\tcacheRead: undefined,\n\t\t\tcacheWrite: undefined,\n\t\t},\n\t\traw: { totalTokens: 0 },\n\t};\n\tlet textId: string | null = null;\n\tlet reasoningId: string | null = null;\n\tlet finishReason: LanguageModelV3FinishReason | null = null;\n\tlet receivedDone = false;\n\tlet receivedAnyData = false;\n\n\t// Track tool call streaming state per index.\n\t// When we see the first chunk for a tool call index, we emit tool-input-start.\n\t// Subsequent argument deltas emit tool-input-delta.\n\t// tool-input-end is emitted eagerly when a new tool index starts or a null\n\t// finalization chunk arrives; any remaining open calls are closed in flush().\n\tconst activeToolCalls = new Map<number, { id: string; toolName: string; args: string }>();\n\tconst closedToolCalls = new Set<number>();\n\tlet lastActiveToolIndex: number | null = null;\n\n\t// Step 1: Decode bytes into SSE lines\n\tconst sseStream = rawStream.pipeThrough(new SSEDecoder());\n\n\t// Step 2: Transform SSE events into LanguageModelV3StreamPart\n\treturn sseStream.pipeThrough(\n\t\tnew TransformStream<string, LanguageModelV3StreamPart>({\n\t\t\ttransform(data, controller) {\n\t\t\t\tif (!data || data === \"[DONE]\") {\n\t\t\t\t\tif (data === \"[DONE]\") receivedDone = true;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\treceivedAnyData = true;\n\t\t\t\tlet chunk: Record<string, unknown>;\n\t\t\t\ttry {\n\t\t\t\t\tchunk = JSON.parse(data);\n\t\t\t\t} catch {\n\t\t\t\t\tconsole.warn(\"[workers-ai-provider] failed to parse SSE event:\", data);\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tif (chunk.usage) {\n\t\t\t\t\tusage = mapWorkersAIUsage(chunk as Parameters<typeof mapWorkersAIUsage>[0]);\n\t\t\t\t}\n\n\t\t\t\t// Extract finish_reason\n\t\t\t\tconst choices = chunk.choices as\n\t\t\t\t\t| Array<{\n\t\t\t\t\t\t\tfinish_reason?: string;\n\t\t\t\t\t\t\tdelta?: Record<string, unknown>;\n\t\t\t\t\t }>\n\t\t\t\t\t| undefined;\n\t\t\t\tconst choiceFinishReason = choices?.[0]?.finish_reason;\n\t\t\t\tconst directFinishReason = chunk.finish_reason as string | undefined;\n\n\t\t\t\tif (choiceFinishReason != null) {\n\t\t\t\t\tfinishReason = mapWorkersAIFinishReason(choiceFinishReason);\n\t\t\t\t} else if (directFinishReason != null) {\n\t\t\t\t\tfinishReason = mapWorkersAIFinishReason(directFinishReason);\n\t\t\t\t}\n\n\t\t\t\t// --- Native format: top-level `response` field ---\n\t\t\t\tconst nativeResponse = chunk.response;\n\t\t\t\tif (nativeResponse != null && nativeResponse !== \"\") {\n\t\t\t\t\tconst responseText = String(nativeResponse);\n\t\t\t\t\tif (responseText.length > 0) {\n\t\t\t\t\t\t// Close active reasoning block before text starts\n\t\t\t\t\t\tif (reasoningId) {\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t\t\treasoningId = null;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (!textId) {\n\t\t\t\t\t\t\ttextId = generateId();\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"text-start\", id: textId });\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: \"text-delta\",\n\t\t\t\t\t\t\tid: textId,\n\t\t\t\t\t\t\tdelta: responseText,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// --- Native format: top-level `tool_calls` ---\n\t\t\t\tif (Array.isArray(chunk.tool_calls)) {\n\t\t\t\t\t// Close active reasoning block before tool calls start\n\t\t\t\t\tif (reasoningId) {\n\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t\treasoningId = null;\n\t\t\t\t\t}\n\t\t\t\t\temitToolCallDeltas(chunk.tool_calls as Record<string, unknown>[], controller);\n\t\t\t\t}\n\n\t\t\t\t// --- OpenAI format: choices[0].delta ---\n\t\t\t\tif (choices?.[0]?.delta) {\n\t\t\t\t\tconst delta = choices[0].delta;\n\n\t\t\t\t\tconst reasoningDelta = (delta.reasoning_content ?? delta.reasoning) as\n\t\t\t\t\t\t| string\n\t\t\t\t\t\t| undefined;\n\t\t\t\t\tif (reasoningDelta && reasoningDelta.length > 0) {\n\t\t\t\t\t\tif (!reasoningId) {\n\t\t\t\t\t\t\treasoningId = generateId();\n\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\ttype: \"reasoning-start\",\n\t\t\t\t\t\t\t\tid: reasoningId,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: \"reasoning-delta\",\n\t\t\t\t\t\t\tid: reasoningId,\n\t\t\t\t\t\t\tdelta: reasoningDelta,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\n\t\t\t\t\tconst textDelta = delta.content as string | undefined;\n\t\t\t\t\tif (textDelta && textDelta.length > 0) {\n\t\t\t\t\t\t// Close active reasoning block before text starts\n\t\t\t\t\t\tif (reasoningId) {\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t\t\treasoningId = null;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (!textId) {\n\t\t\t\t\t\t\ttextId = generateId();\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"text-start\", id: textId });\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: \"text-delta\",\n\t\t\t\t\t\t\tid: textId,\n\t\t\t\t\t\t\tdelta: textDelta,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\n\t\t\t\t\tconst deltaToolCalls = delta.tool_calls as\n\t\t\t\t\t\t| Record<string, unknown>[]\n\t\t\t\t\t\t| undefined;\n\t\t\t\t\tif (Array.isArray(deltaToolCalls)) {\n\t\t\t\t\t\t// Close active reasoning block before tool calls start\n\t\t\t\t\t\tif (reasoningId) {\n\t\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t\t\treasoningId = null;\n\t\t\t\t\t\t}\n\t\t\t\t\t\temitToolCallDeltas(deltaToolCalls, controller);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\n\t\t\tflush(controller) {\n\t\t\t\t// Close any tool calls that weren't already closed during streaming\n\t\t\t\tfor (const [idx] of activeToolCalls) {\n\t\t\t\t\tif (closedToolCalls.has(idx)) continue;\n\t\t\t\t\tcloseToolCall(idx, controller);\n\t\t\t\t}\n\n\t\t\t\t// Close open text/reasoning blocks\n\t\t\t\tif (reasoningId) {\n\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t}\n\t\t\t\tif (textId) {\n\t\t\t\t\tcontroller.enqueue({ type: \"text-end\", id: textId });\n\t\t\t\t}\n\n\t\t\t\t// Detect premature termination\n\t\t\t\tconst effectiveFinishReason =\n\t\t\t\t\t!receivedDone && receivedAnyData && !finishReason\n\t\t\t\t\t\t? ({\n\t\t\t\t\t\t\t\tunified: \"error\",\n\t\t\t\t\t\t\t\traw: \"stream-truncated\",\n\t\t\t\t\t\t\t} as LanguageModelV3FinishReason)\n\t\t\t\t\t\t: (finishReason ?? { unified: \"stop\", raw: \"stop\" });\n\n\t\t\t\tcontroller.enqueue({\n\t\t\t\t\tfinishReason: effectiveFinishReason,\n\t\t\t\t\ttype: \"finish\",\n\t\t\t\t\tusage,\n\t\t\t\t});\n\t\t\t},\n\t\t}),\n\t);\n\n\t/**\n\t * Emit tool-input-end + tool-call for a tool call that is complete.\n\t */\n\tfunction closeToolCall(\n\t\tindex: number,\n\t\tcontroller: TransformStreamDefaultController<LanguageModelV3StreamPart>,\n\t) {\n\t\tconst tc = activeToolCalls.get(index);\n\t\tif (!tc || closedToolCalls.has(index)) return;\n\t\tclosedToolCalls.add(index);\n\t\tcontroller.enqueue({ type: \"tool-input-end\", id: tc.id });\n\t\tcontroller.enqueue({\n\t\t\ttype: \"tool-call\",\n\t\t\ttoolCallId: tc.id,\n\t\t\ttoolName: tc.toolName,\n\t\t\tinput: tc.args,\n\t\t});\n\t}\n\n\t/**\n\t * Emit incremental tool call events from streaming chunks.\n\t *\n\t * Workers AI streams tool calls as:\n\t * Chunk A: { id, type, index, function: { name } } — start\n\t * Chunk B: { index, function: { arguments: \"partial...\" } } — args delta\n\t * Chunk C: { index, function: { arguments: \"rest...\" } } — args delta\n\t * Chunk D: { id: null, type: null, function: { name: null } } — finalize\n\t *\n\t * We emit tool-input-start on first sight, tool-input-delta for each\n\t * argument chunk, and tool-input-end eagerly — either when a new tool\n\t * index starts (closing the previous one) or on a null finalization\n\t * chunk. Any remaining open calls are closed in flush().\n\t */\n\tfunction emitToolCallDeltas(\n\t\ttoolCalls: Record<string, unknown>[],\n\t\tcontroller: TransformStreamDefaultController<LanguageModelV3StreamPart>,\n\t) {\n\t\tfor (const tc of toolCalls) {\n\t\t\tif (isNullFinalizationChunk(tc)) {\n\t\t\t\t// Null finalization sentinel — close the last active tool call\n\t\t\t\tif (lastActiveToolIndex != null) {\n\t\t\t\t\tcloseToolCall(lastActiveToolIndex, controller);\n\t\t\t\t}\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tconst tcIndex = (tc.index as number) ?? 0;\n\t\t\tconst fn = tc.function as Record<string, unknown> | undefined;\n\t\t\tconst tcName = (fn?.name ?? tc.name ?? null) as string | null;\n\t\t\tconst tcArgs = (fn?.arguments ?? tc.arguments ?? null) as string | null;\n\t\t\tconst tcId = tc.id as string | null;\n\n\t\t\tif (!activeToolCalls.has(tcIndex)) {\n\t\t\t\t// A new tool call is starting — close the previous one first\n\t\t\t\tif (lastActiveToolIndex != null && lastActiveToolIndex !== tcIndex) {\n\t\t\t\t\tcloseToolCall(lastActiveToolIndex, controller);\n\t\t\t\t}\n\n\t\t\t\tconst id = tcId || generateId();\n\t\t\t\tconst toolName = tcName || \"\";\n\t\t\t\tactiveToolCalls.set(tcIndex, { id, toolName, args: \"\" });\n\t\t\t\tlastActiveToolIndex = tcIndex;\n\n\t\t\t\tcontroller.enqueue({\n\t\t\t\t\ttype: \"tool-input-start\",\n\t\t\t\t\tid,\n\t\t\t\t\ttoolName,\n\t\t\t\t});\n\n\t\t\t\tif (tcArgs != null && tcArgs !== \"\") {\n\t\t\t\t\tconst delta = typeof tcArgs === \"string\" ? tcArgs : JSON.stringify(tcArgs);\n\t\t\t\t\tactiveToolCalls.get(tcIndex)!.args += delta;\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"tool-input-delta\",\n\t\t\t\t\t\tid,\n\t\t\t\t\t\tdelta,\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tconst active = activeToolCalls.get(tcIndex)!;\n\t\t\t\tlastActiveToolIndex = tcIndex;\n\t\t\t\tif (tcArgs != null && tcArgs !== \"\") {\n\t\t\t\t\tconst delta = typeof tcArgs === \"string\" ? tcArgs : JSON.stringify(tcArgs);\n\t\t\t\t\tactive.args += delta;\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"tool-input-delta\",\n\t\t\t\t\t\tid: active.id,\n\t\t\t\t\t\tdelta,\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n/**\n * TransformStream that decodes a raw byte stream into SSE `data:` payloads.\n * Each output chunk is the string content after \"data: \" (one per SSE event).\n * Handles line buffering for partial chunks.\n */\nclass SSEDecoder extends TransformStream<Uint8Array, string> {\n\tconstructor() {\n\t\tlet buffer = \"\";\n\t\tconst decoder = new TextDecoder();\n\n\t\tsuper({\n\t\t\ttransform(chunk, controller) {\n\t\t\t\tbuffer += decoder.decode(chunk, { stream: true });\n\t\t\t\tconst lines = buffer.split(\"\\n\");\n\t\t\t\tbuffer = lines.pop() || \"\";\n\n\t\t\t\tfor (const line of lines) {\n\t\t\t\t\tconst trimmed = line.trim();\n\t\t\t\t\tif (!trimmed) continue;\n\t\t\t\t\tif (trimmed.startsWith(\"data: \")) {\n\t\t\t\t\t\tcontroller.enqueue(trimmed.slice(6));\n\t\t\t\t\t} else if (trimmed.startsWith(\"data:\")) {\n\t\t\t\t\t\tcontroller.enqueue(trimmed.slice(5));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\n\t\t\tflush(controller) {\n\t\t\t\tif (buffer.trim()) {\n\t\t\t\t\tconst trimmed = buffer.trim();\n\t\t\t\t\tif (trimmed.startsWith(\"data: \")) {\n\t\t\t\t\t\tcontroller.enqueue(trimmed.slice(6));\n\t\t\t\t\t} else if (trimmed.startsWith(\"data:\")) {\n\t\t\t\t\t\tcontroller.enqueue(trimmed.slice(5));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t});\n\t}\n}\n","import type { LanguageModelV3, LanguageModelV3ToolCall } from \"@ai-sdk/provider\";\nimport { generateId } from \"ai\";\nimport type { WorkersAIChatPrompt } from \"./workersai-chat-prompt\";\n\n// ---------------------------------------------------------------------------\n// Workers AI quirk workarounds\n// ---------------------------------------------------------------------------\n\n/**\n * Normalize messages before passing to the Workers AI binding.\n *\n * The binding has strict schema validation that differs from the OpenAI API:\n * - `content` must not be null\n */\nexport function normalizeMessagesForBinding(messages: WorkersAIChatPrompt): WorkersAIChatPrompt {\n\treturn messages.map((msg) => {\n\t\tconst normalized = { ...msg };\n\n\t\t// content: null → content: \"\"\n\t\tif (normalized.content === null || normalized.content === undefined) {\n\t\t\t(normalized as { content: string }).content = \"\";\n\t\t}\n\n\t\treturn normalized;\n\t});\n}\n\n// ---------------------------------------------------------------------------\n// REST API client\n// ---------------------------------------------------------------------------\n\n/**\n * General AI run interface with overloads to handle distinct return types.\n */\nexport interface AiRun {\n\t<Name extends keyof AiModels>(\n\t\tmodel: Name,\n\t\tinputs: AiModels[Name][\"inputs\"],\n\t\toptions: AiOptions & { returnRawResponse: true },\n\t): Promise<Response>;\n\n\t<Name extends keyof AiModels>(\n\t\tmodel: Name,\n\t\tinputs: AiModels[Name][\"inputs\"] & { stream: true },\n\t\toptions?: AiOptions,\n\t): Promise<ReadableStream<Uint8Array>>;\n\n\t<Name extends keyof AiModels>(\n\t\tmodel: Name,\n\t\tinputs: AiModels[Name][\"inputs\"],\n\t\toptions?: AiOptions,\n\t): Promise<AiModels[Name][\"postProcessedOutputs\"]>;\n}\n\n/**\n * Parameters for configuring the Cloudflare-based AI runner.\n */\nexport interface CreateRunConfig {\n\t/** Your Cloudflare account identifier. */\n\taccountId: string;\n\t/** Cloudflare API token/key with appropriate permissions. */\n\tapiKey: string;\n\t/** Custom fetch implementation for intercepting requests. */\n\tfetch?: typeof globalThis.fetch;\n}\n\n/**\n * Creates a run method that emulates the Cloudflare Workers AI binding,\n * but uses the Cloudflare REST API under the hood.\n */\nexport function createRun(config: CreateRunConfig): AiRun {\n\tconst { accountId, apiKey } = config;\n\tconst fetchFn = config.fetch ?? globalThis.fetch;\n\n\treturn async function run<Name extends keyof AiModels>(\n\t\tmodel: Name,\n\t\tinputs: AiModels[Name][\"inputs\"],\n\t\toptions?: AiOptions & Record<string, unknown>,\n\t): Promise<Response | ReadableStream<Uint8Array> | AiModels[Name][\"postProcessedOutputs\"]> {\n\t\tconst {\n\t\t\tgateway,\n\t\t\tprefix: _prefix,\n\t\t\textraHeaders,\n\t\t\treturnRawResponse,\n\t\t\tsignal, // AbortSignal — not serializable as a query parameter\n\t\t\t...passthroughOptions\n\t\t} = options || {};\n\n\t\tconst urlParams = new URLSearchParams();\n\t\tfor (const [key, value] of Object.entries(passthroughOptions)) {\n\t\t\tif (value === undefined || value === null) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Value for option '${key}' is not able to be coerced into a string.`,\n\t\t\t\t);\n\t\t\t}\n\t\t\ttry {\n\t\t\t\tconst valueStr = String(value);\n\t\t\t\tif (!valueStr) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\turlParams.append(key, valueStr);\n\t\t\t} catch {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Value for option '${key}' is not able to be coerced into a string.`,\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\tconst queryString = urlParams.toString();\n\n\t\tconst modelPath = String(model).startsWith(\"run/\") ? model : `run/${model}`;\n\n\t\t// Build URL: use AI Gateway if gateway option is provided, otherwise direct API\n\t\tconst url = gateway?.id\n\t\t\t? `https://gateway.ai.cloudflare.com/v1/${accountId}/${gateway.id}/workers-ai/${modelPath}${\n\t\t\t\t\tqueryString ? `?${queryString}` : \"\"\n\t\t\t\t}`\n\t\t\t: `https://api.cloudflare.com/client/v4/accounts/${accountId}/ai/${modelPath}${\n\t\t\t\t\tqueryString ? `?${queryString}` : \"\"\n\t\t\t\t}`;\n\n\t\tconst headers: Record<string, string> = {\n\t\t\tAuthorization: `Bearer ${apiKey}`,\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t...(extraHeaders && typeof extraHeaders === \"object\"\n\t\t\t\t? (extraHeaders as Record<string, string>)\n\t\t\t\t: {}),\n\t\t};\n\n\t\tif (gateway) {\n\t\t\tif (gateway.skipCache) {\n\t\t\t\theaders[\"cf-aig-skip-cache\"] = \"true\";\n\t\t\t}\n\t\t\tif (typeof gateway.cacheTtl === \"number\") {\n\t\t\t\theaders[\"cf-aig-cache-ttl\"] = String(gateway.cacheTtl);\n\t\t\t}\n\t\t\tif (gateway.cacheKey) {\n\t\t\t\theaders[\"cf-aig-cache-key\"] = gateway.cacheKey;\n\t\t\t}\n\t\t\tif (gateway.metadata) {\n\t\t\t\theaders[\"cf-aig-metadata\"] = JSON.stringify(gateway.metadata);\n\t\t\t}\n\t\t}\n\n\t\tconst body = JSON.stringify(inputs);\n\n\t\tconst response = await fetchFn(url, {\n\t\t\tbody,\n\t\t\theaders,\n\t\t\tmethod: \"POST\",\n\t\t\tsignal: signal as AbortSignal | undefined,\n\t\t});\n\n\t\t// Check for HTTP errors before processing\n\t\tif (!response.ok && !returnRawResponse) {\n\t\t\tlet errorBody: string;\n\t\t\ttry {\n\t\t\t\terrorBody = await response.text();\n\t\t\t} catch {\n\t\t\t\terrorBody = \"<unable to read response body>\";\n\t\t\t}\n\t\t\tthrow new Error(\n\t\t\t\t`Workers AI API error (${response.status} ${response.statusText}): ${errorBody}`,\n\t\t\t);\n\t\t}\n\n\t\tif (returnRawResponse) {\n\t\t\treturn response;\n\t\t}\n\n\t\tif ((inputs as AiTextGenerationInput).stream === true) {\n\t\t\tconst contentType = response.headers.get(\"content-type\") || \"\";\n\t\t\tif (contentType.includes(\"event-stream\") && response.body) {\n\t\t\t\treturn response.body;\n\t\t\t}\n\t\t\tif (response.body && !contentType.includes(\"json\")) {\n\t\t\t\t// Unknown content type — assume it's a stream\n\t\t\t\treturn response.body;\n\t\t\t}\n\n\t\t\t// Some models (e.g. GPT-OSS) don't support streaming via the /ai/run/\n\t\t\t// endpoint and return a JSON response with empty result instead of SSE.\n\t\t\t// Retry without streaming so doStream's graceful degradation path can\n\t\t\t// wrap the complete response as a synthetic stream.\n\t\t\t// Use the same URL (gateway or direct) as the original request.\n\t\t\tconst retryResponse = await fetchFn(url, {\n\t\t\t\tbody: JSON.stringify({\n\t\t\t\t\t...(inputs as Record<string, unknown>),\n\t\t\t\t\tstream: false,\n\t\t\t\t}),\n\t\t\t\theaders,\n\t\t\t\tmethod: \"POST\",\n\t\t\t\tsignal: signal as AbortSignal | undefined,\n\t\t\t});\n\n\t\t\tif (!retryResponse.ok) {\n\t\t\t\tlet errorBody: string;\n\t\t\t\ttry {\n\t\t\t\t\terrorBody = await retryResponse.text();\n\t\t\t\t} catch {\n\t\t\t\t\terrorBody = \"<unable to read response body>\";\n\t\t\t\t}\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Workers AI API error (${retryResponse.status} ${retryResponse.statusText}): ${errorBody}`,\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst retryData = await retryResponse.json<{\n\t\t\t\tresult: AiModels[Name][\"postProcessedOutputs\"];\n\t\t\t}>();\n\t\t\treturn retryData.result;\n\t\t}\n\n\t\tconst data = await response.json<{\n\t\t\tresult: AiModels[Name][\"postProcessedOutputs\"];\n\t\t}>();\n\t\treturn data.result;\n\t};\n}\n\n/**\n * Make a binary REST API call to Workers AI.\n *\n * Some models (e.g. `@cf/deepgram/nova-3`) require raw audio bytes\n * with an appropriate `Content-Type` header instead of JSON.\n *\n * @param config Credentials config\n * @param model Workers AI model name\n * @param audioBytes Raw audio bytes\n * @param contentType MIME type (e.g. \"audio/wav\")\n * @param signal Optional AbortSignal\n * @returns The parsed JSON response body\n */\nexport async function createRunBinary(\n\tconfig: CreateRunConfig,\n\tmodel: string,\n\taudioBytes: Uint8Array,\n\tcontentType: string,\n\tsignal?: AbortSignal,\n): Promise<Record<string, unknown>> {\n\tconst url = `https://api.cloudflare.com/client/v4/accounts/${config.accountId}/ai/run/${model}`;\n\n\tconst response = await fetch(url, {\n\t\tmethod: \"POST\",\n\t\theaders: {\n\t\t\tAuthorization: `Bearer ${config.apiKey}`,\n\t\t\t\"Content-Type\": contentType,\n\t\t},\n\t\tbody: audioBytes,\n\t\tsignal,\n\t});\n\n\tif (!response.ok) {\n\t\tlet errorBody: string;\n\t\ttry {\n\t\t\terrorBody = await response.text();\n\t\t} catch {\n\t\t\terrorBody = \"<unable to read response body>\";\n\t\t}\n\t\tthrow new Error(\n\t\t\t`Workers AI API error (${response.status} ${response.statusText}): ${errorBody}`,\n\t\t);\n\t}\n\n\tconst data = await response.json<{ result?: Record<string, unknown> }>();\n\treturn (data.result ?? data) as Record<string, unknown>;\n}\n\n// ---------------------------------------------------------------------------\n// Tool preparation\n// ---------------------------------------------------------------------------\n\nexport function prepareToolsAndToolChoice(\n\ttools: Parameters<LanguageModelV3[\"doGenerate\"]>[0][\"tools\"],\n\ttoolChoice: Parameters<LanguageModelV3[\"doGenerate\"]>[0][\"toolChoice\"],\n) {\n\tif (tools == null) {\n\t\treturn { tool_choice: undefined, tools: undefined };\n\t}\n\n\tconst mappedTools = tools.map((tool) => ({\n\t\tfunction: {\n\t\t\tdescription: tool.type === \"function\" ? tool.description : undefined,\n\t\t\tname: tool.name,\n\t\t\tparameters: tool.type === \"function\" ? tool.inputSchema : undefined,\n\t\t},\n\t\ttype: \"function\",\n\t}));\n\n\tif (toolChoice == null) {\n\t\treturn { tool_choice: undefined, tools: mappedTools };\n\t}\n\n\tconst type = toolChoice.type;\n\n\tswitch (type) {\n\t\tcase \"auto\":\n\t\t\treturn { tool_choice: type, tools: mappedTools };\n\t\tcase \"none\":\n\t\t\treturn { tool_choice: type, tools: mappedTools };\n\t\tcase \"required\":\n\t\t\treturn { tool_choice: \"required\", tools: mappedTools };\n\n\t\t// Workers AI does not support tool mode directly,\n\t\t// so we filter the tools and force the tool choice through 'required'\n\t\tcase \"tool\":\n\t\t\treturn {\n\t\t\t\ttool_choice: \"required\",\n\t\t\t\ttools: mappedTools.filter((tool) => tool.function.name === toolChoice.toolName),\n\t\t\t};\n\t\tdefault: {\n\t\t\tconst exhaustiveCheck = type satisfies never;\n\t\t\tthrow new Error(`Unsupported tool choice type: ${exhaustiveCheck}`);\n\t\t}\n\t}\n}\n\n// ---------------------------------------------------------------------------\n// Message helpers\n// ---------------------------------------------------------------------------\n\n// ---------------------------------------------------------------------------\n// Tool call processing\n// ---------------------------------------------------------------------------\n\n/** Workers AI flat tool call format (non-streaming, native) */\ninterface FlatToolCall {\n\tname: string;\n\targuments: unknown;\n\tid?: string;\n}\n\n/** Workers AI OpenAI-compatible tool call format */\ninterface OpenAIToolCall {\n\tid: string;\n\ttype: \"function\";\n\tfunction: {\n\t\tname: string;\n\t\targuments: unknown;\n\t};\n}\n\n/** Partial tool call from streaming (has index for merging) */\ninterface PartialToolCall {\n\tindex?: number;\n\tid?: string;\n\ttype?: string;\n\tfunction?: {\n\t\tname?: string;\n\t\targuments?: string;\n\t};\n\t// Flat format fields\n\tname?: string;\n\targuments?: string;\n}\n\nfunction mergePartialToolCalls(partialCalls: PartialToolCall[]) {\n\tconst mergedCallsByIndex: Record<\n\t\tnumber,\n\t\t{ function: { arguments: string; name: string }; id: string; type: string }\n\t> = {};\n\n\tfor (const partialCall of partialCalls) {\n\t\tconst index = partialCall.index ?? 0;\n\n\t\tif (!mergedCallsByIndex[index]) {\n\t\t\tmergedCallsByIndex[index] = {\n\t\t\t\tfunction: {\n\t\t\t\t\targuments: \"\",\n\t\t\t\t\tname: partialCall.function?.name || \"\",\n\t\t\t\t},\n\t\t\t\tid: partialCall.id || \"\",\n\t\t\t\ttype: partialCall.type || \"\",\n\t\t\t};\n\t\t} else {\n\t\t\tif (partialCall.id) {\n\t\t\t\tmergedCallsByIndex[index].id = partialCall.id;\n\t\t\t}\n\t\t\tif (partialCall.type) {\n\t\t\t\tmergedCallsByIndex[index].type = partialCall.type;\n\t\t\t}\n\t\t\tif (partialCall.function?.name) {\n\t\t\t\tmergedCallsByIndex[index].function.name = partialCall.function.name;\n\t\t\t}\n\t\t}\n\n\t\t// Append arguments if available (they arrive in order during streaming)\n\t\tif (partialCall.function?.arguments) {\n\t\t\tmergedCallsByIndex[index].function.arguments += partialCall.function.arguments;\n\t\t}\n\t}\n\n\treturn Object.values(mergedCallsByIndex);\n}\n\nfunction processToolCall(toolCall: FlatToolCall | OpenAIToolCall): LanguageModelV3ToolCall {\n\t// OpenAI format: has function.name (the key discriminator)\n\tconst fn =\n\t\t\"function\" in toolCall && typeof toolCall.function === \"object\" && toolCall.function\n\t\t\t? (toolCall.function as { name?: string; arguments?: unknown })\n\t\t\t: null;\n\n\tif (fn?.name) {\n\t\treturn {\n\t\t\tinput:\n\t\t\t\ttypeof fn.arguments === \"string\"\n\t\t\t\t\t? fn.arguments\n\t\t\t\t\t: JSON.stringify(fn.arguments || {}),\n\t\t\ttoolCallId: toolCall.id || generateId(),\n\t\t\ttype: \"tool-call\",\n\t\t\ttoolName: fn.name,\n\t\t};\n\t}\n\n\t// Flat format (native Workers AI non-streaming): has top-level name\n\tconst flat = toolCall as FlatToolCall;\n\treturn {\n\t\tinput:\n\t\t\ttypeof flat.arguments === \"string\"\n\t\t\t\t? flat.arguments\n\t\t\t\t: JSON.stringify(flat.arguments || {}),\n\t\ttoolCallId: flat.id || generateId(),\n\t\ttype: \"tool-call\",\n\t\ttoolName: flat.name,\n\t};\n}\n\nexport function processToolCalls(output: Record<string, unknown>): LanguageModelV3ToolCall[] {\n\tif (output.tool_calls && Array.isArray(output.tool_calls)) {\n\t\treturn output.tool_calls.map((toolCall: FlatToolCall | OpenAIToolCall) =>\n\t\t\tprocessToolCall(toolCall),\n\t\t);\n\t}\n\n\tconst choices = output.choices as\n\t\t| Array<{ message?: { tool_calls?: Array<FlatToolCall | OpenAIToolCall> } }>\n\t\t| undefined;\n\tif (choices?.[0]?.message?.tool_calls && Array.isArray(choices[0].message.tool_calls)) {\n\t\treturn choices[0].message.tool_calls.map((toolCall) => processToolCall(toolCall));\n\t}\n\n\treturn [];\n}\n\nexport function processPartialToolCalls(partialToolCalls: PartialToolCall[]) {\n\tconst mergedToolCalls = mergePartialToolCalls(partialToolCalls);\n\treturn processToolCalls({ tool_calls: mergedToolCalls });\n}\n\n// ---------------------------------------------------------------------------\n// Text extraction\n// ---------------------------------------------------------------------------\n\n/**\n * Extract text from a Workers AI response, handling multiple response formats:\n * - OpenAI format: { choices: [{ message: { content: \"...\" } }] }\n * - Native format: { response: \"...\" }\n * - Structured output quirk: { response: { ... } } (object instead of string)\n * - Structured output quirk: { response: \"{ ... }\" } (JSON string)\n */\nexport function processText(output: Record<string, unknown>): string | undefined {\n\t// OpenAI format\n\tconst choices = output.choices as Array<{ message?: { content?: string | null } }> | undefined;\n\tconst choiceContent = choices?.[0]?.message?.content;\n\tif (choiceContent != null && String(choiceContent).length > 0) {\n\t\treturn String(choiceContent);\n\t}\n\n\tif (\"response\" in output) {\n\t\tconst response = output.response;\n\t\t// Object response (structured output quirk #2)\n\t\tif (typeof response === \"object\" && response !== null) {\n\t\t\treturn JSON.stringify(response);\n\t\t}\n\t\t// Numeric response (quirk #9)\n\t\tif (typeof response === \"number\") {\n\t\t\treturn String(response);\n\t\t}\n\t\t// Null response (e.g., tool-call-only responses)\n\t\tif (response === null || response === undefined) {\n\t\t\treturn undefined;\n\t\t}\n\t\treturn String(response);\n\t}\n\treturn undefined;\n}\n","import type { LanguageModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\n\nimport type { AISearchChatSettings } from \"./aisearch-chat-settings\";\nimport { convertToWorkersAIChatMessages } from \"./convert-to-workersai-chat-messages\";\nimport { mapWorkersAIUsage } from \"./map-workersai-usage\";\nimport { getMappedStream, prependStreamStart } from \"./streaming\";\nimport { processToolCalls } from \"./utils\";\nimport type { TextGenerationModels } from \"./workersai-models\";\n\ntype AISearchChatConfig = {\n\tprovider: string;\n\tbinding: AutoRAG;\n\tgateway?: GatewayOptions;\n};\n\nexport class AISearchChatLanguageModel implements LanguageModelV3 {\n\treadonly specificationVersion = \"v3\";\n\treadonly defaultObjectGenerationMode = \"json\";\n\n\treadonly supportedUrls: Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>> = {};\n\n\treadonly modelId: TextGenerationModels;\n\treadonly settings: AISearchChatSettings;\n\n\tprivate readonly config: AISearchChatConfig;\n\n\tconstructor(\n\t\tmodelId: TextGenerationModels,\n\t\tsettings: AISearchChatSettings,\n\t\tconfig: AISearchChatConfig,\n\t) {\n\t\tthis.modelId = modelId;\n\t\tthis.settings = settings;\n\t\tthis.config = config;\n\t}\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tprivate getWarnings({\n\t\ttools,\n\t\tfrequencyPenalty,\n\t\tpresencePenalty,\n\t\tresponseFormat,\n\t}: Parameters<LanguageModelV3[\"doGenerate\"]>[0]): SharedV3Warning[] {\n\t\tconst warnings: SharedV3Warning[] = [];\n\n\t\tif (tools != null && tools.length > 0) {\n\t\t\tconsole.warn(\n\t\t\t\t\"[workers-ai-provider] Tools are not supported by AI Search. They will be ignored.\",\n\t\t\t);\n\t\t\twarnings.push({ feature: \"tools\", type: \"unsupported\" });\n\t\t}\n\n\t\tif (frequencyPenalty != null) {\n\t\t\twarnings.push({ feature: \"frequencyPenalty\", type: \"unsupported\" });\n\t\t}\n\n\t\tif (presencePenalty != null) {\n\t\t\twarnings.push({ feature: \"presencePenalty\", type: \"unsupported\" });\n\t\t}\n\n\t\tif (responseFormat?.type === \"json\") {\n\t\t\twarnings.push({ feature: \"responseFormat\", type: \"unsupported\" });\n\t\t}\n\n\t\treturn warnings;\n\t}\n\n\t/**\n\t * Build the search query from messages.\n\t * Flattens the conversation into a single string for aiSearch.\n\t */\n\tprivate buildQuery(prompt: Parameters<LanguageModelV3[\"doGenerate\"]>[0][\"prompt\"]): string {\n\t\tconst { messages } = convertToWorkersAIChatMessages(prompt);\n\t\treturn messages.map(({ content, role }) => `${role}: ${content}`).join(\"\\n\\n\");\n\t}\n\n\tasync doGenerate(\n\t\toptions: Parameters<LanguageModelV3[\"doGenerate\"]>[0],\n\t): Promise<Awaited<ReturnType<LanguageModelV3[\"doGenerate\"]>>> {\n\t\tconst warnings = this.getWarnings(options);\n\t\tconst query = this.buildQuery(options.prompt);\n\n\t\tconst output = await this.config.binding.aiSearch({ query });\n\n\t\treturn {\n\t\t\tfinishReason: { unified: \"stop\", raw: \"stop\" },\n\t\t\tcontent: [\n\t\t\t\t...output.data.map(({ file_id, filename, score }) => ({\n\t\t\t\t\ttype: \"source\" as const,\n\t\t\t\t\tsourceType: \"url\" as const,\n\t\t\t\t\tid: file_id,\n\t\t\t\t\turl: filename,\n\t\t\t\t\tproviderMetadata: {\n\t\t\t\t\t\tattributes: { score },\n\t\t\t\t\t},\n\t\t\t\t})),\n\t\t\t\t{\n\t\t\t\t\ttype: \"text\" as const,\n\t\t\t\t\ttext: output.response,\n\t\t\t\t},\n\t\t\t\t...processToolCalls(output as unknown as Record<string, unknown>),\n\t\t\t],\n\t\t\tusage: mapWorkersAIUsage(output as unknown as Record<string, unknown>),\n\t\t\twarnings,\n\t\t};\n\t}\n\n\tasync doStream(\n\t\toptions: Parameters<LanguageModelV3[\"doStream\"]>[0],\n\t): Promise<Awaited<ReturnType<LanguageModelV3[\"doStream\"]>>> {\n\t\tconst warnings = this.getWarnings(options);\n\t\tconst query = this.buildQuery(options.prompt);\n\n\t\tconst response = await this.config.binding.aiSearch({\n\t\t\tquery,\n\t\t\tstream: true,\n\t\t});\n\n\t\treturn {\n\t\t\tstream: prependStreamStart(\n\t\t\t\tgetMappedStream(response as unknown as Response | ReadableStream<Uint8Array>),\n\t\t\t\twarnings,\n\t\t\t),\n\t\t};\n\t}\n}\n","import type {\n\tEmbeddingModelV3,\n\tEmbeddingModelV3CallOptions,\n\tEmbeddingModelV3Result,\n} from \"@ai-sdk/provider\";\nimport { TooManyEmbeddingValuesForCallError } from \"@ai-sdk/provider\";\nimport type { EmbeddingModels } from \"./workersai-models\";\n\nexport type WorkersAIEmbeddingConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n};\n\nexport type WorkersAIEmbeddingSettings = {\n\tgateway?: GatewayOptions;\n\tmaxEmbeddingsPerCall?: number;\n\tsupportsParallelCalls?: boolean;\n\n\t/**\n\t * Passthrough settings that are provided directly to the run function.\n\t */\n\t[key: string]: unknown;\n};\n\nexport class WorkersAIEmbeddingModel implements EmbeddingModelV3 {\n\treadonly specificationVersion = \"v3\";\n\treadonly modelId: EmbeddingModels;\n\tprivate readonly config: WorkersAIEmbeddingConfig;\n\tprivate readonly settings: WorkersAIEmbeddingSettings;\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tget maxEmbeddingsPerCall(): number {\n\t\t// https://developers.cloudflare.com/workers-ai/platform/limits/#text-embeddings\n\t\treturn this.settings.maxEmbeddingsPerCall ?? 3000;\n\t}\n\n\tget supportsParallelCalls(): boolean {\n\t\treturn this.settings.supportsParallelCalls ?? true;\n\t}\n\n\tconstructor(\n\t\tmodelId: EmbeddingModels,\n\t\tsettings: WorkersAIEmbeddingSettings,\n\t\tconfig: WorkersAIEmbeddingConfig,\n\t) {\n\t\tthis.modelId = modelId;\n\t\tthis.settings = settings;\n\t\tthis.config = config;\n\t}\n\n\tasync doEmbed({\n\t\tvalues,\n\t\tabortSignal,\n\t}: EmbeddingModelV3CallOptions): Promise<EmbeddingModelV3Result> {\n\t\tif (values.length > this.maxEmbeddingsPerCall) {\n\t\t\tthrow new TooManyEmbeddingValuesForCallError({\n\t\t\t\tmaxEmbeddingsPerCall: this.maxEmbeddingsPerCall,\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\tprovider: this.provider,\n\t\t\t\tvalues,\n\t\t\t});\n\t\t}\n\n\t\tconst {\n\t\t\tgateway,\n\t\t\tmaxEmbeddingsPerCall: _maxEmbeddingsPerCall,\n\t\t\tsupportsParallelCalls: _supportsParallelCalls,\n\t\t\t...passthroughOptions\n\t\t} = this.settings;\n\n\t\tconst response = await this.config.binding.run(\n\t\t\tthis.modelId as keyof AiModels,\n\t\t\t{\n\t\t\t\ttext: values,\n\t\t\t},\n\t\t\t{\n\t\t\t\tgateway: this.config.gateway ?? gateway,\n\t\t\t\tsignal: abortSignal,\n\t\t\t\t...passthroughOptions,\n\t\t\t} as AiOptions,\n\t\t);\n\n\t\treturn {\n\t\t\tembeddings: (response as { data: number[][] }).data,\n\t\t\twarnings: [],\n\t\t};\n\t}\n}\n","import type { LanguageModelV3, SharedV3Warning, LanguageModelV3StreamPart } from \"@ai-sdk/provider\";\nimport { generateId } from \"ai\";\nimport { convertToWorkersAIChatMessages } from \"./convert-to-workersai-chat-messages\";\nimport { mapWorkersAIFinishReason } from \"./map-workersai-finish-reason\";\nimport { mapWorkersAIUsage } from \"./map-workersai-usage\";\nimport { getMappedStream, prependStreamStart } from \"./streaming\";\nimport {\n\tnormalizeMessagesForBinding,\n\tprepareToolsAndToolChoice,\n\tprocessText,\n\tprocessToolCalls,\n} from \"./utils\";\nimport type { WorkersAIChatSettings } from \"./workersai-chat-settings\";\nimport type { TextGenerationModels } from \"./workersai-models\";\n\ntype WorkersAIChatConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n\t/** True when using a real Workers AI binding (not the REST shim). */\n\tisBinding: boolean;\n};\n\nexport class WorkersAIChatLanguageModel implements LanguageModelV3 {\n\treadonly specificationVersion = \"v3\";\n\treadonly defaultObjectGenerationMode = \"json\";\n\n\treadonly supportedUrls: Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>> = {};\n\n\treadonly modelId: TextGenerationModels;\n\treadonly settings: WorkersAIChatSettings;\n\n\tprivate readonly config: WorkersAIChatConfig;\n\n\tconstructor(\n\t\tmodelId: TextGenerationModels,\n\t\tsettings: WorkersAIChatSettings,\n\t\tconfig: WorkersAIChatConfig,\n\t) {\n\t\tthis.modelId = modelId;\n\t\tthis.settings = settings;\n\t\tthis.config = config;\n\t}\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tprivate getArgs({\n\t\tresponseFormat,\n\t\ttools,\n\t\ttoolChoice,\n\t\tmaxOutputTokens,\n\t\ttemperature,\n\t\ttopP,\n\t\tfrequencyPenalty,\n\t\tpresencePenalty,\n\t\tseed,\n\t}: Parameters<LanguageModelV3[\"doGenerate\"]>[0]) {\n\t\tconst type = responseFormat?.type ?? \"text\";\n\n\t\tconst warnings: SharedV3Warning[] = [];\n\n\t\tif (frequencyPenalty != null) {\n\t\t\twarnings.push({ feature: \"frequencyPenalty\", type: \"unsupported\" });\n\t\t}\n\n\t\tif (presencePenalty != null) {\n\t\t\twarnings.push({ feature: \"presencePenalty\", type: \"unsupported\" });\n\t\t}\n\n\t\tconst baseArgs = {\n\t\t\tmax_tokens: maxOutputTokens,\n\t\t\tmodel: this.modelId,\n\t\t\trandom_seed: seed,\n\t\t\tsafe_prompt: this.settings.safePrompt,\n\t\t\ttemperature,\n\t\t\ttop_p: topP,\n\t\t};\n\n\t\tswitch (type) {\n\t\t\tcase \"text\": {\n\t\t\t\treturn {\n\t\t\t\t\targs: {\n\t\t\t\t\t\t...baseArgs,\n\t\t\t\t\t\tresponse_format: undefined as\n\t\t\t\t\t\t\t| { type: string; json_schema?: unknown }\n\t\t\t\t\t\t\t| undefined,\n\t\t\t\t\t\t...prepareToolsAndToolChoice(tools, toolChoice),\n\t\t\t\t\t},\n\t\t\t\t\twarnings,\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tcase \"json\": {\n\t\t\t\treturn {\n\t\t\t\t\targs: {\n\t\t\t\t\t\t...baseArgs,\n\t\t\t\t\t\tresponse_format: {\n\t\t\t\t\t\t\ttype: \"json_schema\",\n\t\t\t\t\t\t\tjson_schema:\n\t\t\t\t\t\t\t\tresponseFormat?.type === \"json\" ? responseFormat.schema : undefined,\n\t\t\t\t\t\t},\n\t\t\t\t\t\ttools: undefined,\n\t\t\t\t\t\ttool_choice: undefined,\n\t\t\t\t\t},\n\t\t\t\t\twarnings,\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tdefault: {\n\t\t\t\tconst exhaustiveCheck = type satisfies never;\n\t\t\t\tthrow new Error(`Unsupported type: ${exhaustiveCheck}`);\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * Build the inputs object for `binding.run()`, shared by doGenerate and doStream.\n\t *\n\t * Images are embedded inline in messages as OpenAI-compatible content\n\t * arrays with `image_url` parts. Both the REST API and the binding\n\t * accept this format at runtime.\n\t *\n\t * The binding path additionally normalises null content to empty strings.\n\t *\n\t * Reasoning controls (`reasoning_effort`, `chat_template_kwargs`) are\n\t * forwarded here from settings. These belong on the INPUTS object, not on\n\t * the 3rd-arg options / REST query string — see\n\t * https://github.com/cloudflare/ai/issues/501. Per-call values from\n\t * `providerOptions[\"workers-ai\"]` override settings.\n\t *\n\t * `reasoning_effort: null` is a valid value (\"disable reasoning\"), so we\n\t * check `!== undefined` rather than truthiness.\n\t */\n\tprivate buildRunInputs(\n\t\targs: ReturnType<typeof this.getArgs>[\"args\"],\n\t\tmessages: ReturnType<typeof convertToWorkersAIChatMessages>[\"messages\"],\n\t\toptions?: { stream?: boolean; providerOptions?: Record<string, unknown> },\n\t) {\n\t\t// The AI SDK types this as `Record<string, JSONObject>` but we defensively\n\t\t// accept anything and only treat it as a lookup if it's a plain object.\n\t\t// `\"key\" in x` throws for primitives, so we can't skip the typeof guard.\n\t\tconst rawPerCall = options?.providerOptions?.[\"workers-ai\"];\n\t\tconst perCall: Record<string, unknown> =\n\t\t\trawPerCall !== null && typeof rawPerCall === \"object\" && !Array.isArray(rawPerCall)\n\t\t\t\t? (rawPerCall as Record<string, unknown>)\n\t\t\t\t: {};\n\t\tconst reasoningEffort =\n\t\t\t\"reasoning_effort\" in perCall\n\t\t\t\t? perCall.reasoning_effort\n\t\t\t\t: this.settings.reasoning_effort;\n\t\tconst chatTemplateKwargs =\n\t\t\t\"chat_template_kwargs\" in perCall\n\t\t\t\t? perCall.chat_template_kwargs\n\t\t\t\t: this.settings.chat_template_kwargs;\n\n\t\treturn {\n\t\t\tmax_tokens: args.max_tokens,\n\t\t\tmessages: this.config.isBinding ? normalizeMessagesForBinding(messages) : messages,\n\t\t\ttemperature: args.temperature,\n\t\t\ttools: args.tools,\n\t\t\t...(args.tool_choice ? { tool_choice: args.tool_choice } : {}),\n\t\t\ttop_p: args.top_p,\n\t\t\t...(args.response_format ? { response_format: args.response_format } : {}),\n\t\t\t...(options?.stream ? { stream: true } : {}),\n\t\t\t...(reasoningEffort !== undefined ? { reasoning_effort: reasoningEffort } : {}),\n\t\t\t...(chatTemplateKwargs !== undefined\n\t\t\t\t? { chat_template_kwargs: chatTemplateKwargs }\n\t\t\t\t: {}),\n\t\t};\n\t}\n\n\t/**\n\t * Get passthrough options for binding.run() from settings.\n\t *\n\t * `reasoning_effort` and `chat_template_kwargs` are explicitly excluded\n\t * here — they belong on the `inputs` object (see `buildRunInputs`), not on\n\t * the `options` (3rd) arg of binding.run() or the REST query string.\n\t */\n\tprivate getRunOptions() {\n\t\tconst {\n\t\t\tgateway,\n\t\t\tsafePrompt: _safePrompt,\n\t\t\tsessionAffinity,\n\t\t\textraHeaders,\n\t\t\treasoning_effort: _reasoningEffort,\n\t\t\tchat_template_kwargs: _chatTemplateKwargs,\n\t\t\t...passthroughOptions\n\t\t} = this.settings;\n\n\t\tconst mergedHeaders = {\n\t\t\t...(extraHeaders && typeof extraHeaders === \"object\"\n\t\t\t\t? (extraHeaders as Record<string, string>)\n\t\t\t\t: {}),\n\t\t\t...(sessionAffinity ? { \"x-session-affinity\": sessionAffinity } : {}),\n\t\t};\n\n\t\treturn {\n\t\t\tgateway: this.config.gateway ?? gateway,\n\t\t\t...(Object.keys(mergedHeaders).length > 0 ? { extraHeaders: mergedHeaders } : {}),\n\t\t\t...passthroughOptions,\n\t\t};\n\t}\n\n\tasync doGenerate(\n\t\toptions: Parameters<LanguageModelV3[\"doGenerate\"]>[0],\n\t): Promise<Awaited<ReturnType<LanguageModelV3[\"doGenerate\"]>>> {\n\t\tconst { args, warnings } = this.getArgs(options);\n\t\tconst { messages } = convertToWorkersAIChatMessages(options.prompt);\n\n\t\tconst inputs = this.buildRunInputs(args, messages, {\n\t\t\tproviderOptions: options.providerOptions,\n\t\t});\n\t\tconst runOptions = this.getRunOptions();\n\n\t\tconst output = await this.config.binding.run(\n\t\t\targs.model as keyof AiModels,\n\t\t\tinputs as AiModels[keyof AiModels][\"inputs\"],\n\t\t\t{\n\t\t\t\t...runOptions,\n\t\t\t\tsignal: options.abortSignal,\n\t\t\t} as AiOptions,\n\t\t);\n\n\t\tif (output instanceof ReadableStream) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Unexpected streaming response from non-streaming request. Check that `stream: true` was not passed.\",\n\t\t\t);\n\t\t}\n\n\t\tconst outputRecord = output as Record<string, unknown>;\n\t\tconst choices = outputRecord.choices as\n\t\t\t| Array<{\n\t\t\t\t\tmessage?: { reasoning_content?: string; reasoning?: string };\n\t\t\t }>\n\t\t\t| undefined;\n\t\tconst reasoningContent =\n\t\t\tchoices?.[0]?.message?.reasoning_content ?? choices?.[0]?.message?.reasoning;\n\n\t\treturn {\n\t\t\tfinishReason: mapWorkersAIFinishReason(outputRecord),\n\t\t\tcontent: [\n\t\t\t\t...(reasoningContent\n\t\t\t\t\t? [{ type: \"reasoning\" as const, text: reasoningContent }]\n\t\t\t\t\t: []),\n\t\t\t\t{\n\t\t\t\t\ttype: \"text\",\n\t\t\t\t\ttext: processText(outputRecord) ?? \"\",\n\t\t\t\t},\n\t\t\t\t...processToolCalls(outputRecord),\n\t\t\t],\n\t\t\tusage: mapWorkersAIUsage(output as Record<string, unknown>),\n\t\t\twarnings,\n\t\t};\n\t}\n\n\tasync doStream(\n\t\toptions: Parameters<LanguageModelV3[\"doStream\"]>[0],\n\t): Promise<Awaited<ReturnType<LanguageModelV3[\"doStream\"]>>> {\n\t\tconst { args, warnings } = this.getArgs(options);\n\t\tconst { messages } = convertToWorkersAIChatMessages(options.prompt);\n\n\t\tconst inputs = this.buildRunInputs(args, messages, {\n\t\t\tstream: true,\n\t\t\tproviderOptions: options.providerOptions,\n\t\t});\n\t\tconst runOptions = this.getRunOptions();\n\n\t\tconst response = await this.config.binding.run(\n\t\t\targs.model as keyof AiModels,\n\t\t\tinputs as AiModels[keyof AiModels][\"inputs\"],\n\t\t\t{\n\t\t\t\t...runOptions,\n\t\t\t\tsignal: options.abortSignal,\n\t\t\t} as AiOptions,\n\t\t);\n\n\t\t// If the binding returned a stream, pipe it through the SSE mapper\n\t\tif (response instanceof ReadableStream) {\n\t\t\treturn {\n\t\t\t\tstream: prependStreamStart(getMappedStream(response), warnings),\n\t\t\t};\n\t\t}\n\n\t\t// Graceful degradation: some models return a non-streaming response even\n\t\t// when stream:true is requested. Wrap the complete response as a stream.\n\t\tconst outputRecord = response as Record<string, unknown>;\n\t\tconst choices = outputRecord.choices as\n\t\t\t| Array<{\n\t\t\t\t\tmessage?: { reasoning_content?: string; reasoning?: string };\n\t\t\t }>\n\t\t\t| undefined;\n\t\tconst reasoningContent =\n\t\t\tchoices?.[0]?.message?.reasoning_content ?? choices?.[0]?.message?.reasoning;\n\n\t\tlet textId: string | null = null;\n\t\tlet reasoningId: string | null = null;\n\n\t\treturn {\n\t\t\tstream: new ReadableStream<LanguageModelV3StreamPart>({\n\t\t\t\tstart(controller) {\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"stream-start\",\n\t\t\t\t\t\twarnings: warnings as SharedV3Warning[],\n\t\t\t\t\t});\n\n\t\t\t\t\tif (reasoningContent) {\n\t\t\t\t\t\treasoningId = generateId();\n\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-start\", id: reasoningId });\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: \"reasoning-delta\",\n\t\t\t\t\t\t\tid: reasoningId,\n\t\t\t\t\t\t\tdelta: reasoningContent,\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcontroller.enqueue({ type: \"reasoning-end\", id: reasoningId });\n\t\t\t\t\t}\n\n\t\t\t\t\tconst text = processText(outputRecord);\n\t\t\t\t\tif (text) {\n\t\t\t\t\t\ttextId = generateId();\n\t\t\t\t\t\tcontroller.enqueue({ type: \"text-start\", id: textId });\n\t\t\t\t\t\tcontroller.enqueue({ type: \"text-delta\", id: textId, delta: text });\n\t\t\t\t\t\tcontroller.enqueue({ type: \"text-end\", id: textId });\n\t\t\t\t\t}\n\n\t\t\t\t\tfor (const toolCall of processToolCalls(outputRecord)) {\n\t\t\t\t\t\tcontroller.enqueue(toolCall);\n\t\t\t\t\t}\n\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: \"finish\",\n\t\t\t\t\t\tfinishReason: mapWorkersAIFinishReason(outputRecord),\n\t\t\t\t\t\tusage: mapWorkersAIUsage(response as Record<string, unknown>),\n\t\t\t\t\t});\n\t\t\t\t\tcontroller.close();\n\t\t\t\t},\n\t\t\t}),\n\t\t};\n\t}\n}\n","import type { ImageModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\nimport type { WorkersAIImageSettings } from \"./workersai-image-settings\";\nimport type { ImageGenerationModels } from \"./workersai-models\";\n\nexport type WorkersAIImageConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n};\n\nexport class WorkersAIImageModel implements ImageModelV3 {\n\treadonly specificationVersion = \"v3\";\n\n\tget maxImagesPerCall(): number {\n\t\treturn this.settings.maxImagesPerCall ?? 1;\n\t}\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tconstructor(\n\t\treadonly modelId: ImageGenerationModels,\n\t\treadonly settings: WorkersAIImageSettings,\n\t\treadonly config: WorkersAIImageConfig,\n\t) {}\n\n\tasync doGenerate({\n\t\tprompt,\n\t\tn,\n\t\tsize,\n\t\taspectRatio,\n\t\tseed,\n\t\tabortSignal,\n\t}: Parameters<ImageModelV3[\"doGenerate\"]>[0]): Promise<\n\t\tAwaited<ReturnType<ImageModelV3[\"doGenerate\"]>>\n\t> {\n\t\tconst { width, height } = getDimensionsFromSizeString(size);\n\n\t\tconst warnings: Array<SharedV3Warning> = [];\n\n\t\tif (aspectRatio != null) {\n\t\t\twarnings.push({\n\t\t\t\tdetails: \"This model does not support aspect ratio. Use `size` instead.\",\n\t\t\t\tfeature: \"aspectRatio\",\n\t\t\t\ttype: \"unsupported\",\n\t\t\t});\n\t\t}\n\n\t\tconst generateImage = async () => {\n\t\t\tconst output = (await this.config.binding.run(\n\t\t\t\tthis.modelId as keyof AiModels,\n\t\t\t\t{\n\t\t\t\t\theight,\n\t\t\t\t\tprompt: prompt ?? \"\",\n\t\t\t\t\tseed,\n\t\t\t\t\twidth,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tgateway: this.config.gateway,\n\t\t\t\t\tsignal: abortSignal,\n\t\t\t\t} as AiOptions,\n\t\t\t)) as unknown;\n\n\t\t\treturn toUint8Array(output);\n\t\t};\n\n\t\tconst images: Uint8Array[] = await Promise.all(\n\t\t\tArray.from({ length: n }, () => generateImage()),\n\t\t);\n\n\t\treturn {\n\t\t\timages,\n\t\t\tresponse: {\n\t\t\t\theaders: {},\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\ttimestamp: new Date(),\n\t\t\t},\n\t\t\twarnings,\n\t\t};\n\t}\n}\n\nfunction getDimensionsFromSizeString(size: string | undefined) {\n\tconst [width, height] = size?.split(\"x\") ?? [undefined, undefined];\n\n\treturn {\n\t\theight: parseInteger(height),\n\t\twidth: parseInteger(width),\n\t};\n}\n\nfunction parseInteger(value?: string) {\n\tif (value === \"\" || !value) return undefined;\n\tconst number = Number(value);\n\treturn Number.isInteger(number) ? number : undefined;\n}\n\n/**\n * Convert various output types from binding.run() to Uint8Array.\n * Workers AI image models return different types depending on the runtime:\n * - ReadableStream<Uint8Array> (most common in workerd)\n * - Uint8Array / ArrayBuffer (direct binary)\n * - Response (needs .arrayBuffer())\n * - { image: string } with base64 data\n */\nasync function toUint8Array(output: unknown): Promise<Uint8Array> {\n\tif (output instanceof Uint8Array) {\n\t\treturn output;\n\t}\n\tif (output instanceof ArrayBuffer) {\n\t\treturn new Uint8Array(output);\n\t}\n\tif (output instanceof ReadableStream) {\n\t\tconst reader = (output as ReadableStream<Uint8Array>).getReader();\n\t\tconst chunks: Uint8Array[] = [];\n\t\tlet totalLength = 0;\n\t\twhile (true) {\n\t\t\tconst { done, value } = await reader.read();\n\t\t\tif (done) break;\n\t\t\tchunks.push(value);\n\t\t\ttotalLength += value.length;\n\t\t}\n\t\tconst result = new Uint8Array(totalLength);\n\t\tlet offset = 0;\n\t\tfor (const chunk of chunks) {\n\t\t\tresult.set(chunk, offset);\n\t\t\toffset += chunk.length;\n\t\t}\n\t\treturn result;\n\t}\n\t// Response object (e.g., from REST shim)\n\tif (output instanceof Response) {\n\t\treturn new Uint8Array(await output.arrayBuffer());\n\t}\n\t// Object with binary-like properties\n\tif (typeof output === \"object\" && output !== null) {\n\t\tconst obj = output as Record<string, unknown>;\n\t\t// { image: base64string }\n\t\tif (typeof obj.image === \"string\") {\n\t\t\treturn Uint8Array.from(atob(obj.image), (c) => c.charCodeAt(0));\n\t\t}\n\t\t// { data: Uint8Array }\n\t\tif (obj.data instanceof Uint8Array) {\n\t\t\treturn obj.data;\n\t\t}\n\t\t// { data: ArrayBuffer }\n\t\tif (obj.data instanceof ArrayBuffer) {\n\t\t\treturn new Uint8Array(obj.data);\n\t\t}\n\t\t// Try to get a body if it looks response-like\n\t\tif (typeof obj.arrayBuffer === \"function\") {\n\t\t\treturn new Uint8Array(await (obj as unknown as Response).arrayBuffer());\n\t\t}\n\t}\n\tthrow new Error(\n\t\t`Unexpected output type from image model. Got ${Object.prototype.toString.call(output)} with keys: ${\n\t\t\ttypeof output === \"object\" && output !== null\n\t\t\t\t? JSON.stringify(Object.keys(output))\n\t\t\t\t: \"N/A\"\n\t\t}`,\n\t);\n}\n","import type { TranscriptionModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\nimport type { WorkersAITranscriptionSettings } from \"./workersai-transcription-settings\";\nimport type { TranscriptionModels } from \"./workersai-models\";\nimport { createRunBinary, type CreateRunConfig } from \"./utils\";\n\nexport type WorkersAITranscriptionConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n\t/**\n\t * Whether the binding is a real `env.AI` binding (true) or a REST shim (false).\n\t * Nova-3 uses different upload paths depending on this.\n\t */\n\tisBinding: boolean;\n\t/**\n\t * REST credentials, only set when `isBinding` is false.\n\t * Needed for Nova-3 which requires binary upload, bypassing the JSON-based REST shim.\n\t */\n\tcredentials?: CreateRunConfig;\n};\n\n/**\n * Workers AI transcription model implementing the AI SDK's `TranscriptionModelV3` interface.\n *\n * Supports:\n * - Whisper models (`@cf/openai/whisper`, `whisper-tiny-en`, `whisper-large-v3-turbo`)\n * - Deepgram Nova-3 (`@cf/deepgram/nova-3`) — uses a different input/output format\n */\nexport class WorkersAITranscriptionModel implements TranscriptionModelV3 {\n\treadonly specificationVersion = \"v3\";\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tconstructor(\n\t\treadonly modelId: TranscriptionModels,\n\t\treadonly settings: WorkersAITranscriptionSettings,\n\t\treadonly config: WorkersAITranscriptionConfig,\n\t) {}\n\n\tasync doGenerate(\n\t\toptions: Parameters<TranscriptionModelV3[\"doGenerate\"]>[0],\n\t): Promise<Awaited<ReturnType<TranscriptionModelV3[\"doGenerate\"]>>> {\n\t\tconst { audio, mediaType, abortSignal } = options;\n\n\t\tconst warnings: Array<SharedV3Warning> = [];\n\n\t\t// The AI SDK always converts audio to Uint8Array via\n\t\t// convertDataContentToUint8Array before calling doGenerate.\n\t\tconst audioBytes =\n\t\t\ttypeof audio === \"string\"\n\t\t\t\t? Uint8Array.from(atob(audio), (c) => c.charCodeAt(0))\n\t\t\t\t: audio;\n\n\t\tconst isNova3 = this.modelId === \"@cf/deepgram/nova-3\";\n\n\t\tlet rawResult: unknown;\n\n\t\tif (isNova3) {\n\t\t\trawResult = await this.runNova3(audioBytes, mediaType, abortSignal);\n\t\t} else {\n\t\t\trawResult = await this.runWhisper(audioBytes, abortSignal);\n\t\t}\n\n\t\tconst result = rawResult as Record<string, unknown>;\n\n\t\t// Normalize response into AI SDK format\n\t\tif (isNova3) {\n\t\t\treturn this.normalizeNova3Response(result, warnings);\n\t\t}\n\t\treturn this.normalizeWhisperResponse(result, warnings);\n\t}\n\n\t// ---------------------------------------------------------------------------\n\t// Whisper models\n\t// ---------------------------------------------------------------------------\n\n\tprivate async runWhisper(audioBytes: Uint8Array, abortSignal?: AbortSignal): Promise<unknown> {\n\t\t// whisper-large-v3-turbo requires base64 audio (both binding and REST).\n\t\t// Other Whisper models accept number[].\n\t\tconst modelStr = this.modelId as string;\n\t\tconst audio =\n\t\t\tmodelStr === \"@cf/openai/whisper-large-v3-turbo\"\n\t\t\t\t? uint8ArrayToBase64(audioBytes)\n\t\t\t\t: Array.from(audioBytes);\n\n\t\tconst inputs: Record<string, unknown> = { audio };\n\n\t\tif (this.settings.language) {\n\t\t\tinputs.language = this.settings.language;\n\t\t}\n\t\tif (this.settings.prompt) {\n\t\t\tinputs.initial_prompt = this.settings.prompt;\n\t\t}\n\n\t\treturn this.config.binding.run(\n\t\t\tthis.modelId as Parameters<Ai[\"run\"]>[0],\n\t\t\tinputs as Parameters<Ai[\"run\"]>[1],\n\t\t\t{ gateway: this.config.gateway, signal: abortSignal } as AiOptions,\n\t\t);\n\t}\n\n\tprivate normalizeWhisperResponse(\n\t\traw: Record<string, unknown>,\n\t\twarnings: Array<SharedV3Warning>,\n\t): Awaited<ReturnType<TranscriptionModelV3[\"doGenerate\"]>> {\n\t\tconst text = (raw.text as string) ?? \"\";\n\n\t\t// Build segments from Whisper's various formats\n\t\tconst segments: Array<{ text: string; startSecond: number; endSecond: number }> = [];\n\n\t\t// whisper-large-v3-turbo returns segments[]\n\t\tif (raw.segments && Array.isArray(raw.segments)) {\n\t\t\tfor (const seg of raw.segments) {\n\t\t\t\tsegments.push({\n\t\t\t\t\ttext: ((seg as Record<string, unknown>).text as string) ?? \"\",\n\t\t\t\t\tstartSecond: ((seg as Record<string, unknown>).start as number) ?? 0,\n\t\t\t\t\tendSecond: ((seg as Record<string, unknown>).end as number) ?? 0,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t\t// basic whisper returns words[]\n\t\telse if (raw.words && Array.isArray(raw.words)) {\n\t\t\tfor (const w of raw.words) {\n\t\t\t\tsegments.push({\n\t\t\t\t\ttext: ((w as Record<string, unknown>).word as string) ?? \"\",\n\t\t\t\t\tstartSecond: ((w as Record<string, unknown>).start as number) ?? 0,\n\t\t\t\t\tendSecond: ((w as Record<string, unknown>).end as number) ?? 0,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\t// Language and duration from transcription_info (v3-turbo)\n\t\tconst info = raw.transcription_info as Record<string, unknown> | undefined;\n\n\t\treturn {\n\t\t\ttext,\n\t\t\tsegments,\n\t\t\tlanguage: (info?.language as string) ?? undefined,\n\t\t\tdurationInSeconds: (info?.duration as number) ?? undefined,\n\t\t\twarnings,\n\t\t\tresponse: {\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\theaders: {},\n\t\t\t},\n\t\t};\n\t}\n\n\t// ---------------------------------------------------------------------------\n\t// Deepgram Nova-3\n\t// ---------------------------------------------------------------------------\n\n\tprivate async runNova3(\n\t\taudioBytes: Uint8Array,\n\t\tmediaType: string,\n\t\tabortSignal?: AbortSignal,\n\t): Promise<unknown> {\n\t\tif (this.config.isBinding) {\n\t\t\t// Binding path: Nova-3 accepts { audio: { body: base64, contentType } }\n\t\t\treturn this.config.binding.run(\n\t\t\t\tthis.modelId as Parameters<Ai[\"run\"]>[0],\n\t\t\t\t{\n\t\t\t\t\taudio: { body: uint8ArrayToBase64(audioBytes), contentType: mediaType },\n\t\t\t\t} as Parameters<Ai[\"run\"]>[1],\n\t\t\t\t{ gateway: this.config.gateway, signal: abortSignal } as AiOptions,\n\t\t\t);\n\t\t}\n\n\t\t// REST path: Nova-3 requires raw binary with a Content-Type header,\n\t\t// not JSON. The createRun shim always sends JSON, so we bypass it\n\t\t// and use createRunBinary which sends the audio bytes directly.\n\t\tif (!this.config.credentials) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Nova-3 transcription via REST requires credentials in the config. \" +\n\t\t\t\t\t\"This is a bug — credentials should have been set by createWorkersAI.\",\n\t\t\t);\n\t\t}\n\t\treturn createRunBinary(\n\t\t\tthis.config.credentials,\n\t\t\tthis.modelId,\n\t\t\taudioBytes,\n\t\t\tmediaType,\n\t\t\tabortSignal,\n\t\t);\n\t}\n\n\tprivate normalizeNova3Response(\n\t\traw: Record<string, unknown>,\n\t\twarnings: Array<SharedV3Warning>,\n\t): Awaited<ReturnType<TranscriptionModelV3[\"doGenerate\"]>> {\n\t\t// Nova-3 format: { results: { channels: [{ alternatives: [{ transcript, words }] }] } }\n\t\tconst results = raw.results as Record<string, unknown> | undefined;\n\t\tconst channels = results?.channels as\n\t\t\t| Array<{\n\t\t\t\t\talternatives?: Array<{\n\t\t\t\t\t\ttranscript?: string;\n\t\t\t\t\t\tconfidence?: number;\n\t\t\t\t\t\twords?: Array<{ word: string; start: number; end: number }>;\n\t\t\t\t\t}>;\n\t\t\t }>\n\t\t\t| undefined;\n\t\tconst alt = channels?.[0]?.alternatives?.[0];\n\n\t\tconst text = alt?.transcript ?? \"\";\n\t\tconst segments: Array<{ text: string; startSecond: number; endSecond: number }> = [];\n\n\t\tif (alt?.words && Array.isArray(alt.words)) {\n\t\t\tfor (const w of alt.words) {\n\t\t\t\tsegments.push({\n\t\t\t\t\ttext: w.word ?? \"\",\n\t\t\t\t\tstartSecond: w.start ?? 0,\n\t\t\t\t\tendSecond: w.end ?? 0,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\treturn {\n\t\t\ttext,\n\t\t\tsegments,\n\t\t\tlanguage: undefined,\n\t\t\tdurationInSeconds: undefined,\n\t\t\twarnings,\n\t\t\tresponse: {\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\theaders: {},\n\t\t\t},\n\t\t};\n\t}\n}\n\n// ---------------------------------------------------------------------------\n// Helpers\n// ---------------------------------------------------------------------------\n\nfunction uint8ArrayToBase64(bytes: Uint8Array): string {\n\tlet binary = \"\";\n\tfor (let i = 0; i < bytes.length; i++) {\n\t\tbinary += String.fromCharCode(bytes[i]!);\n\t}\n\treturn btoa(binary);\n}\n","import type { SpeechModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\nimport type { WorkersAISpeechSettings } from \"./workersai-speech-settings\";\nimport type { SpeechModels } from \"./workersai-models\";\n\nexport type WorkersAISpeechConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n};\n\n/**\n * Workers AI speech (text-to-speech) model implementing the AI SDK's `SpeechModelV3` interface.\n *\n * Currently supports Deepgram Aura-1 (`@cf/deepgram/aura-1`).\n * The model accepts `{ text, voice?, speed? }` and returns raw audio bytes.\n */\nexport class WorkersAISpeechModel implements SpeechModelV3 {\n\treadonly specificationVersion = \"v3\";\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tconstructor(\n\t\treadonly modelId: SpeechModels,\n\t\treadonly settings: WorkersAISpeechSettings,\n\t\treadonly config: WorkersAISpeechConfig,\n\t) {}\n\n\tasync doGenerate(\n\t\toptions: Parameters<SpeechModelV3[\"doGenerate\"]>[0],\n\t): Promise<Awaited<ReturnType<SpeechModelV3[\"doGenerate\"]>>> {\n\t\tconst { text, voice, speed, abortSignal } = options;\n\n\t\tconst warnings: Array<SharedV3Warning> = [];\n\n\t\tif (options.instructions) {\n\t\t\twarnings.push({\n\t\t\t\tdetails: \"Workers AI TTS models do not support instructions.\",\n\t\t\t\tfeature: \"instructions\",\n\t\t\t\ttype: \"unsupported\",\n\t\t\t});\n\t\t}\n\n\t\tif (options.outputFormat) {\n\t\t\twarnings.push({\n\t\t\t\tdetails:\n\t\t\t\t\t\"Workers AI TTS models do not support output format selection. Audio is returned as MP3.\",\n\t\t\t\tfeature: \"outputFormat\",\n\t\t\t\ttype: \"unsupported\",\n\t\t\t});\n\t\t}\n\n\t\t// Build inputs for Workers AI TTS\n\t\tconst inputs: Record<string, unknown> = { text };\n\t\tif (voice) inputs.voice = voice;\n\t\tif (speed != null) inputs.speed = speed;\n\n\t\tconst result = await this.config.binding.run(\n\t\t\tthis.modelId as Parameters<Ai[\"run\"]>[0],\n\t\t\tinputs as Parameters<Ai[\"run\"]>[1],\n\t\t\t{\n\t\t\t\tgateway: this.config.gateway,\n\t\t\t\tsignal: abortSignal,\n\t\t\t\t// returnRawResponse prevents the createRun REST shim from trying\n\t\t\t\t// to JSON.parse binary audio. Real env.AI bindings don't recognize\n\t\t\t\t// this option — it has no effect, and the binding returns the normal\n\t\t\t\t// binary result (Uint8Array/ReadableStream) which toUint8Array handles.\n\t\t\t\treturnRawResponse: true,\n\t\t\t} as AiOptions,\n\t\t);\n\n\t\t// Workers AI TTS returns binary audio in various formats:\n\t\t// - Binding: Uint8Array, ArrayBuffer, ReadableStream, or { audio: base64 }\n\t\t// - REST (returnRawResponse): Response object\n\t\tconst audio = await toUint8Array(result);\n\n\t\treturn {\n\t\t\taudio,\n\t\t\twarnings,\n\t\t\tresponse: {\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\theaders: {},\n\t\t\t},\n\t\t};\n\t}\n}\n\n// ---------------------------------------------------------------------------\n// Helpers\n// ---------------------------------------------------------------------------\n\n/**\n * Convert various output types from binding.run() to Uint8Array.\n * Workers AI TTS models return different types depending on the runtime:\n * - Response (from REST shim with returnRawResponse)\n * - ReadableStream<Uint8Array> (most common in workerd)\n * - Uint8Array / ArrayBuffer (direct binary)\n * - { audio: string } with base64 data\n */\nasync function toUint8Array(output: unknown): Promise<Uint8Array> {\n\t// Response object (from REST shim with returnRawResponse: true)\n\tif (output instanceof Response) {\n\t\treturn new Uint8Array(await output.arrayBuffer());\n\t}\n\tif (output instanceof Uint8Array) {\n\t\treturn output;\n\t}\n\tif (output instanceof ArrayBuffer) {\n\t\treturn new Uint8Array(output);\n\t}\n\tif (output instanceof ReadableStream) {\n\t\tconst reader = (output as ReadableStream<Uint8Array>).getReader();\n\t\tconst chunks: Uint8Array[] = [];\n\t\tlet totalLength = 0;\n\t\twhile (true) {\n\t\t\tconst { done, value } = await reader.read();\n\t\t\tif (done) break;\n\t\t\tchunks.push(value);\n\t\t\ttotalLength += value.length;\n\t\t}\n\t\tconst result = new Uint8Array(totalLength);\n\t\tlet offset = 0;\n\t\tfor (const chunk of chunks) {\n\t\t\tresult.set(chunk, offset);\n\t\t\toffset += chunk.length;\n\t\t}\n\t\treturn result;\n\t}\n\t// Object with audio property (e.g. { audio: base64string })\n\tif (typeof output === \"object\" && output !== null) {\n\t\tconst obj = output as Record<string, unknown>;\n\t\tif (typeof obj.audio === \"string\") {\n\t\t\treturn Uint8Array.from(atob(obj.audio), (c) => c.charCodeAt(0));\n\t\t}\n\t}\n\tthrow new Error(\n\t\t`Unexpected output type from TTS model. Got ${Object.prototype.toString.call(output)}`,\n\t);\n}\n","import type { RerankingModelV3, SharedV3Warning } from \"@ai-sdk/provider\";\nimport type { WorkersAIRerankingSettings } from \"./workersai-reranking-settings\";\nimport type { RerankingModels } from \"./workersai-models\";\n\nexport type WorkersAIRerankingConfig = {\n\tprovider: string;\n\tbinding: Ai;\n\tgateway?: GatewayOptions;\n};\n\n/**\n * Workers AI reranking model implementing the AI SDK's `RerankingModelV3` interface.\n *\n * Supports BGE reranker models (`@cf/baai/bge-reranker-base`, `bge-reranker-v2-m3`).\n *\n * Workers AI reranking API:\n * - Input: `{ query, contexts: [{ text }], top_k? }`\n * - Output: `{ response: [{ id, score }] }`\n */\nexport class WorkersAIRerankingModel implements RerankingModelV3 {\n\treadonly specificationVersion = \"v3\";\n\n\tget provider(): string {\n\t\treturn this.config.provider;\n\t}\n\n\tconstructor(\n\t\treadonly modelId: RerankingModels,\n\t\treadonly settings: WorkersAIRerankingSettings,\n\t\treadonly config: WorkersAIRerankingConfig,\n\t) {}\n\n\tasync doRerank(\n\t\toptions: Parameters<RerankingModelV3[\"doRerank\"]>[0],\n\t): Promise<Awaited<ReturnType<RerankingModelV3[\"doRerank\"]>>> {\n\t\tconst { documents, query, topN, abortSignal } = options;\n\n\t\tconst warnings: Array<SharedV3Warning> = [];\n\n\t\t// Convert AI SDK documents to Workers AI contexts format\n\t\tconst contexts = documentsToContexts(documents, warnings);\n\n\t\t// Build Workers AI inputs\n\t\tconst inputs: Record<string, unknown> = {\n\t\t\tquery,\n\t\t\tcontexts,\n\t\t};\n\t\tif (topN != null) {\n\t\t\tinputs.top_k = topN;\n\t\t}\n\n\t\tconst result = (await this.config.binding.run(\n\t\t\tthis.modelId as Parameters<Ai[\"run\"]>[0],\n\t\t\tinputs as Parameters<Ai[\"run\"]>[1],\n\t\t\t{ gateway: this.config.gateway, signal: abortSignal } as AiOptions,\n\t\t)) as Record<string, unknown>;\n\n\t\t// Workers AI returns { response: [{ id, score }] }\n\t\tconst response = result.response as Array<{ id?: number; score?: number }> | undefined;\n\n\t\tconst ranking = (response ?? [])\n\t\t\t.map((item) => ({\n\t\t\t\tindex: item.id ?? 0,\n\t\t\t\trelevanceScore: item.score ?? 0,\n\t\t\t}))\n\t\t\t.sort((a, b) => b.relevanceScore - a.relevanceScore);\n\n\t\treturn {\n\t\t\tranking,\n\t\t\twarnings,\n\t\t\tresponse: {\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\theaders: {},\n\t\t\t},\n\t\t};\n\t}\n}\n\n// ---------------------------------------------------------------------------\n// Helpers\n// ---------------------------------------------------------------------------\n\n/**\n * Convert AI SDK document format to Workers AI contexts format.\n *\n * AI SDK supports two document types:\n * - `{ type: 'text', values: string[] }` — direct text strings\n * - `{ type: 'object', values: JSONObject[] }` — JSON objects (stringified for Workers AI)\n */\nfunction documentsToContexts(\n\tdocuments: Parameters<RerankingModelV3[\"doRerank\"]>[0][\"documents\"],\n\twarnings: Array<SharedV3Warning>,\n): Array<{ text: string }> {\n\tif (documents.type === \"text\") {\n\t\treturn documents.values.map((text) => ({ text }));\n\t}\n\n\t// Object documents: stringify each object for the reranker\n\twarnings.push({\n\t\tmessage: \"Workers AI reranker expects text contexts. JSON objects have been stringified.\",\n\t\ttype: \"other\",\n\t});\n\n\treturn documents.values.map((obj) => ({ text: JSON.stringify(obj) }));\n}\n","import { AISearchChatLanguageModel } from \"./aisearch-chat-language-model\";\n\n/**\n * @deprecated Use `AISearchChatLanguageModel` instead. AutoRAG has been renamed to AI Search.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport class AutoRAGChatLanguageModel extends AISearchChatLanguageModel {}\n","import { AISearchChatLanguageModel } from \"./aisearch-chat-language-model\";\nimport type { AISearchChatSettings } from \"./aisearch-chat-settings\";\nimport { createRun } from \"./utils\";\nimport {\n\tWorkersAIEmbeddingModel,\n\ttype WorkersAIEmbeddingSettings,\n} from \"./workersai-embedding-model\";\nimport { WorkersAIChatLanguageModel } from \"./workersai-chat-language-model\";\nimport type { WorkersAIChatSettings } from \"./workersai-chat-settings\";\nimport { WorkersAIImageModel } from \"./workersai-image-model\";\nimport type { WorkersAIImageSettings } from \"./workersai-image-settings\";\nimport { WorkersAITranscriptionModel } from \"./workersai-transcription-model\";\nimport type { WorkersAITranscriptionSettings } from \"./workersai-transcription-settings\";\nimport { WorkersAISpeechModel } from \"./workersai-speech-model\";\nimport type { WorkersAISpeechSettings } from \"./workersai-speech-settings\";\nimport { WorkersAIRerankingModel } from \"./workersai-reranking-model\";\nimport type { WorkersAIRerankingSettings } from \"./workersai-reranking-settings\";\nimport type {\n\tEmbeddingModels,\n\tImageGenerationModels,\n\tTextGenerationModels,\n\tTranscriptionModels,\n\tSpeechModels,\n\tRerankingModels,\n} from \"./workersai-models\";\n\n// Re-export deprecated AutoRAG aliases\nexport { AutoRAGChatLanguageModel } from \"./autorag-chat-language-model\";\nexport type { AutoRAGChatSettings } from \"./autorag-chat-settings\";\n\n// Export new AI Search types\nexport { AISearchChatLanguageModel } from \"./aisearch-chat-language-model\";\nexport type { AISearchChatSettings } from \"./aisearch-chat-settings\";\n\n// Export transcription and speech types\nexport { WorkersAITranscriptionModel } from \"./workersai-transcription-model\";\nexport type { WorkersAITranscriptionSettings } from \"./workersai-transcription-settings\";\nexport { WorkersAISpeechModel } from \"./workersai-speech-model\";\nexport type { WorkersAISpeechSettings } from \"./workersai-speech-settings\";\nexport { WorkersAIRerankingModel } from \"./workersai-reranking-model\";\nexport type { WorkersAIRerankingSettings } from \"./workersai-reranking-settings\";\n\n// ---------------------------------------------------------------------------\n// Workers AI\n// ---------------------------------------------------------------------------\n\nexport type WorkersAISettings = (\n\t| {\n\t\t\t/**\n\t\t\t * Provide a Cloudflare AI binding.\n\t\t\t */\n\t\t\tbinding: Ai;\n\n\t\t\t/**\n\t\t\t * Credentials must be absent when a binding is given.\n\t\t\t */\n\t\t\taccountId?: never;\n\t\t\tapiKey?: never;\n\t }\n\t| {\n\t\t\t/**\n\t\t\t * Provide Cloudflare API credentials directly. Must be used if a binding is not specified.\n\t\t\t */\n\t\t\taccountId: string;\n\t\t\tapiKey: string;\n\t\t\t/**\n\t\t\t * Both binding must be absent if credentials are used directly.\n\t\t\t */\n\t\t\tbinding?: never;\n\n\t\t\t/**\n\t\t\t * Custom fetch implementation. You can use it as a middleware to\n\t\t\t * intercept requests, or to provide a custom fetch implementation\n\t\t\t * for e.g. testing. Only available in credentials mode.\n\t\t\t */\n\t\t\tfetch?: typeof globalThis.fetch;\n\t }\n) & {\n\t/**\n\t * Optionally specify a gateway.\n\t */\n\tgateway?: GatewayOptions;\n};\n\nexport interface WorkersAI {\n\t(modelId: TextGenerationModels, settings?: WorkersAIChatSettings): WorkersAIChatLanguageModel;\n\t/**\n\t * Creates a model for text generation.\n\t **/\n\tchat(\n\t\tmodelId: TextGenerationModels,\n\t\tsettings?: WorkersAIChatSettings,\n\t): WorkersAIChatLanguageModel;\n\n\tembedding(\n\t\tmodelId: EmbeddingModels,\n\t\tsettings?: WorkersAIEmbeddingSettings,\n\t): WorkersAIEmbeddingModel;\n\n\ttextEmbedding(\n\t\tmodelId: EmbeddingModels,\n\t\tsettings?: WorkersAIEmbeddingSettings,\n\t): WorkersAIEmbeddingModel;\n\n\ttextEmbeddingModel(\n\t\tmodelId: EmbeddingModels,\n\t\tsettings?: WorkersAIEmbeddingSettings,\n\t): WorkersAIEmbeddingModel;\n\n\t/**\n\t * Creates a model for image generation.\n\t **/\n\timage(modelId: ImageGenerationModels, settings?: WorkersAIImageSettings): WorkersAIImageModel;\n\timageModel(\n\t\tmodelId: ImageGenerationModels,\n\t\tsettings?: WorkersAIImageSettings,\n\t): WorkersAIImageModel;\n\n\t/**\n\t * Creates a model for speech-to-text transcription.\n\t **/\n\ttranscription(\n\t\tmodelId: TranscriptionModels,\n\t\tsettings?: WorkersAITranscriptionSettings,\n\t): WorkersAITranscriptionModel;\n\ttranscriptionModel(\n\t\tmodelId: TranscriptionModels,\n\t\tsettings?: WorkersAITranscriptionSettings,\n\t): WorkersAITranscriptionModel;\n\n\t/**\n\t * Creates a model for text-to-speech synthesis.\n\t **/\n\tspeech(modelId: SpeechModels, settings?: WorkersAISpeechSettings): WorkersAISpeechModel;\n\tspeechModel(modelId: SpeechModels, settings?: WorkersAISpeechSettings): WorkersAISpeechModel;\n\n\t/**\n\t * Creates a model for document reranking.\n\t **/\n\treranking(\n\t\tmodelId: RerankingModels,\n\t\tsettings?: WorkersAIRerankingSettings,\n\t): WorkersAIRerankingModel;\n\trerankingModel(\n\t\tmodelId: RerankingModels,\n\t\tsettings?: WorkersAIRerankingSettings,\n\t): WorkersAIRerankingModel;\n}\n\n/**\n * Create a Workers AI provider instance.\n */\nexport function createWorkersAI(options: WorkersAISettings): WorkersAI {\n\tif (!options.binding && !(\"accountId\" in options && \"apiKey\" in options)) {\n\t\tthrow new Error(\n\t\t\t\"Invalid Workers AI configuration: you must provide either a binding (e.g. { binding: env.AI }) \" +\n\t\t\t\t\"or credentials ({ accountId, apiKey }).\",\n\t\t);\n\t}\n\n\tlet binding: Ai;\n\tconst isBinding = !!options.binding;\n\n\tif (options.binding) {\n\t\tbinding = options.binding;\n\t} else {\n\t\tconst { accountId, apiKey } = options;\n\t\tbinding = {\n\t\t\trun: createRun({ accountId, apiKey, fetch: options.fetch }),\n\t\t} as Ai;\n\t}\n\n\tconst createChatModel = (modelId: TextGenerationModels, settings: WorkersAIChatSettings = {}) =>\n\t\tnew WorkersAIChatLanguageModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.chat\",\n\t\t\tisBinding,\n\t\t});\n\n\tconst createImageModel = (\n\t\tmodelId: ImageGenerationModels,\n\t\tsettings: WorkersAIImageSettings = {},\n\t) =>\n\t\tnew WorkersAIImageModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.image\",\n\t\t});\n\tconst createEmbeddingModel = (\n\t\tmodelId: EmbeddingModels,\n\t\tsettings: WorkersAIEmbeddingSettings = {},\n\t) =>\n\t\tnew WorkersAIEmbeddingModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.embedding\",\n\t\t});\n\n\tconst createTranscriptionModel = (\n\t\tmodelId: TranscriptionModels,\n\t\tsettings: WorkersAITranscriptionSettings = {},\n\t) =>\n\t\tnew WorkersAITranscriptionModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.transcription\",\n\t\t\tisBinding,\n\t\t\tcredentials:\n\t\t\t\t!isBinding && \"accountId\" in options\n\t\t\t\t\t? { accountId: options.accountId, apiKey: options.apiKey }\n\t\t\t\t\t: undefined,\n\t\t});\n\n\tconst createSpeechModel = (modelId: SpeechModels, settings: WorkersAISpeechSettings = {}) =>\n\t\tnew WorkersAISpeechModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.speech\",\n\t\t});\n\n\tconst createRerankingModel = (\n\t\tmodelId: RerankingModels,\n\t\tsettings: WorkersAIRerankingSettings = {},\n\t) =>\n\t\tnew WorkersAIRerankingModel(modelId, settings, {\n\t\t\tbinding,\n\t\t\tgateway: options.gateway,\n\t\t\tprovider: \"workersai.reranking\",\n\t\t});\n\n\tconst provider = (modelId: TextGenerationModels, settings?: WorkersAIChatSettings) => {\n\t\tif (new.target) {\n\t\t\tthrow new Error(\"The WorkersAI model function cannot be called with the new keyword.\");\n\t\t}\n\t\treturn createChatModel(modelId, settings);\n\t};\n\n\tprovider.chat = createChatModel;\n\tprovider.embedding = createEmbeddingModel;\n\tprovider.textEmbedding = createEmbeddingModel;\n\tprovider.textEmbeddingModel = createEmbeddingModel;\n\tprovider.image = createImageModel;\n\tprovider.imageModel = createImageModel;\n\tprovider.transcription = createTranscriptionModel;\n\tprovider.transcriptionModel = createTranscriptionModel;\n\tprovider.speech = createSpeechModel;\n\tprovider.speechModel = createSpeechModel;\n\tprovider.reranking = createRerankingModel;\n\tprovider.rerankingModel = createRerankingModel;\n\n\treturn provider;\n}\n\n// ---------------------------------------------------------------------------\n// AI Search (formerly AutoRAG)\n// ---------------------------------------------------------------------------\n\nexport type AISearchSettings = {\n\tbinding: AutoRAG;\n};\n\nexport interface AISearchProvider {\n\t(settings?: AISearchChatSettings): AISearchChatLanguageModel;\n\t/**\n\t * Creates a model for text generation.\n\t **/\n\tchat(settings?: AISearchChatSettings): AISearchChatLanguageModel;\n}\n\n/**\n * Create an AI Search provider instance.\n *\n * AI Search (formerly AutoRAG) is Cloudflare's managed search service.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport function createAISearch(\n\toptions: AISearchSettings,\n\t/** @internal */\n\tproviderName = \"aisearch.chat\",\n): AISearchProvider {\n\tconst binding = options.binding;\n\n\tconst createChatModel = (settings: AISearchChatSettings = {}) =>\n\t\tnew AISearchChatLanguageModel(\"@cf/meta/llama-3.3-70b-instruct-fp8-fast\", settings, {\n\t\t\tbinding,\n\t\t\tprovider: providerName,\n\t\t});\n\n\tconst provider = (settings?: AISearchChatSettings) => {\n\t\tif (new.target) {\n\t\t\tthrow new Error(\"The AISearch model function cannot be called with the new keyword.\");\n\t\t}\n\t\treturn createChatModel(settings);\n\t};\n\n\tprovider.chat = createChatModel;\n\n\treturn provider;\n}\n\n// ---------------------------------------------------------------------------\n// Deprecated AutoRAG aliases\n// ---------------------------------------------------------------------------\n\n/**\n * @deprecated Use `AISearchSettings` instead. AutoRAG has been renamed to AI Search.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport type AutoRAGSettings = AISearchSettings;\n\n/**\n * @deprecated Use `AISearchProvider` instead. AutoRAG has been renamed to AI Search.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport type AutoRAGProvider = AISearchProvider;\n\nlet autoRAGWarned = false;\n\n/**\n * @deprecated Use `createAISearch` instead. AutoRAG has been renamed to AI Search.\n * @see https://developers.cloudflare.com/ai-search/\n */\nexport function createAutoRAG(options: AISearchSettings): AISearchProvider {\n\tif (!autoRAGWarned) {\n\t\tautoRAGWarned = true;\n\t\tconsole.warn(\n\t\t\t\"[workers-ai-provider] createAutoRAG is deprecated. Use createAISearch instead. \" +\n\t\t\t\t\"AutoRAG has been renamed to AI Search. \" +\n\t\t\t\t\"See https://developers.cloudflare.com/ai-search/\",\n\t\t);\n\t}\n\treturn createAISearch(options, \"autorag.chat\");\n}\n"],"mappings":";;;;;;;;;;;AAWA,SAASA,eAAa,MAAqD;AAC1E,KAAI,gBAAgB,WACnB,QAAO;AAGR,KAAI,OAAO,SAAS,UAAU;EAC7B,IAAI,SAAS;AACb,MAAI,OAAO,WAAW,QAAQ,EAAE;GAC/B,MAAM,aAAa,OAAO,QAAQ,IAAI;AACtC,OAAI,cAAc,EACjB,UAAS,OAAO,MAAM,aAAa,EAAE;;EAGvC,MAAM,eAAe,KAAK,OAAO;EACjC,MAAM,QAAQ,IAAI,WAAW,aAAa,OAAO;AACjD,OAAK,IAAI,IAAI,GAAG,IAAI,aAAa,QAAQ,IACxC,OAAM,KAAK,aAAa,WAAW,EAAE;AAEtC,SAAO;;AAGR,KAAI,gBAAgB,IACnB,OAAM,IAAI,MACT,kHAEA;AAGF,QAAO;;AAGR,SAASC,qBAAmB,OAA2B;CACtD,IAAI,SAAS;CACb,MAAM,YAAY;AAClB,MAAK,IAAI,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;EACjD,MAAM,QAAQ,MAAM,SAAS,GAAG,KAAK,IAAI,IAAI,WAAW,MAAM,OAAO,CAAC;AACtE,YAAU,OAAO,aAAa,GAAG,MAAM;;AAExC,QAAO,KAAK,OAAO;;AAGpB,SAAgB,+BAA+B,QAE7C;CACD,MAAM,WAAgC,EAAE;AAExC,MAAK,MAAM,EAAE,MAAM,aAAa,OAC/B,SAAQ,MAAR;EACC,KAAK;AACJ,YAAS,KAAK;IAAE;IAAS,MAAM;IAAU,CAAC;AAC1C;EAGD,KAAK,QAAQ;GACZ,MAAM,YAAsB,EAAE;GAC9B,MAAM,aAAqE,EAAE;AAE7E,QAAK,MAAM,QAAQ,QAClB,SAAQ,KAAK,MAAb;IACC,KAAK;AACJ,eAAU,KAAK,KAAK,KAAK;AACzB;IAED,KAAK,QAAQ;KACZ,MAAM,aAAaD,eAAa,KAAK,KAAK;AAC1C,SAAI,WACH,YAAW,KAAK;MACf,OAAO;MACP,WAAW,KAAK;MAChB,CAAC;AAEH;;;AAKH,OAAI,WAAW,SAAS,GAAG;IAC1B,MAAM,eAAuC,EAAE;AAC/C,QAAI,UAAU,SAAS,EACtB,cAAa,KAAK;KAAE,MAAM;KAAQ,MAAM,UAAU,KAAK,KAAK;KAAE,CAAC;AAEhE,SAAK,MAAM,OAAO,YAAY;KAC7B,MAAM,SAASC,qBAAmB,IAAI,MAAM;KAC5C,MAAM,YAAY,IAAI,aAAa;AACnC,kBAAa,KAAK;MACjB,MAAM;MACN,WAAW,EAAE,KAAK,QAAQ,UAAU,UAAU,UAAU;MACxD,CAAC;;AAEH,aAAS,KAAK;KAAE,SAAS;KAAc,MAAM;KAAQ,CAAC;SAEtD,UAAS,KAAK;IAAE,SAAS,UAAU,KAAK,KAAK;IAAE,MAAM;IAAQ,CAAC;AAG/D;;EAGD,KAAK,aAAa;GACjB,IAAI,OAAO;GACX,IAAI,YAAY;GAChB,MAAM,YAID,EAAE;AAEP,QAAK,MAAM,QAAQ,QAClB,SAAQ,KAAK,MAAb;IACC,KAAK;AACJ,aAAQ,KAAK;AACb;IAGD,KAAK;AAOJ,kBAAa,KAAK;AAClB;IAGD,KAAK,OAEJ;IAGD,KAAK;AACJ,eAAU,KAAK;MACd,UAAU;OACT,WAAW,KAAK,UAAU,KAAK,MAAM;OACrC,MAAM,KAAK;OACX;MACD,IAAI,KAAK;MACT,MAAM;MACN,CAAC;AACF;IAGD,KAAK,cAEJ;IAGD,QAEC,OAAM,IAAI,MACT,0BAFuB,KAEyC,OAChE;;AAKJ,YAAS,KAAK;IACb,SAAS;IACT,MAAM;IACN,GAAI,YAAY,EAAE,WAAW,GAAG,EAAE;IAClC,YACC,UAAU,SAAS,IAChB,UAAU,KAAK,EAAE,UAAU,EAAE,MAAM,WAAW,QAAQ,UAAU;KAChE,UAAU;MAAE,WAAW;MAAM;MAAM;KACnC;KACA,MAAM;KACN,EAAE,GACF,KAAA;IACJ,CAAC;AAEF;;EAGD,KAAK;AACJ,QAAK,MAAM,gBAAgB,QAC1B,KAAI,aAAa,SAAS,eAAe;IACxC,MAAM,SAAS,aAAa;IAC5B,IAAI;AACJ,YAAQ,OAAO,MAAf;KACC,KAAK;KACL,KAAK;AACJ,gBAAU,OAAO;AACjB;KACD,KAAK;KACL,KAAK;AACJ,gBAAU,KAAK,UAAU,OAAO,MAAM;AACtC;KACD,KAAK;AACJ,gBAAU,OAAO,SACd,0BAA0B,OAAO,WACjC;AACH;KACD,KAAK;AACJ,gBAAU,OAAO,MACf,QACC,MACA,EAAE,SAAS,OACZ,CACA,KAAK,MAAM,EAAE,KAAK,CAClB,KAAK,KAAK;AACZ;KACD;AACC,gBAAU;AACV;;AAEF,aAAS,KAAK;KACb;KACA,MAAM,aAAa;KACnB,cAAc,aAAa;KAC3B,MAAM;KACN,CAAC;;AAIJ;EAGD,QAEC,OAAM,IAAI,MAAM,qBADQ,OAC+B;;AAK1D,QAAO,EAAE,UAAU;;;;;;;;ACpOpB,SAAgB,kBACf,QACuB;CACvB,MAAM,QACL,OAGC,SAAS;EACV,mBAAmB;EACnB,eAAe;EACf;CAED,MAAM,eAAe,MAAM,iBAAiB;CAC5C,MAAM,mBAAmB,MAAM,qBAAqB;AAEpD,QAAO;EACN,cAAc;GACb,OAAO;GACP,MAAM,KAAA;GACN,WAAW,KAAA;GACX;EACD,aAAa;GACZ,OAAO;GACP,SAAS,KAAA;GACT,WAAW,KAAA;GACX,YAAY,KAAA;GACZ;EACD,KAAK,EAAE,OAAO,eAAe,kBAAkB;EAC/C;;;;;;;;;;;ACzBF,SAAgB,yBACf,wBAC8B;CAC9B,IAAI;AAEJ,KACC,OAAO,2BAA2B,YAClC,2BAA2B,QAC3B,2BAA2B,KAAA,EAE3B,gBAAe;UACL,OAAO,2BAA2B,YAAY,2BAA2B,MAAM;EACzF,MAAM,WAAW;EAGjB,MAAM,UAAU,SAAS;AACzB,MAAI,MAAM,QAAQ,QAAQ,IAAI,QAAQ,SAAS,EAC9C,gBAAe,QAAQ,GAAG;WAChB,mBAAmB,SAC7B,gBAAe,SAAS;MAExB,gBAAe,KAAA;OAIhB,gBAAe,KAAA;CAGhB,MAAM,MAAM,gBAAgB;AAE5B,SAAQ,cAAR;EACC,KAAK,OACJ,QAAO;GAAE,SAAS;GAAQ;GAAK;EAChC,KAAK;EACL,KAAK,eACJ,QAAO;GAAE,SAAS;GAAU;GAAK;EAClC,KAAK,aACJ,QAAO;GAAE,SAAS;GAAc;GAAK;EACtC,KAAK,QACJ,QAAO;GAAE,SAAS;GAAS;GAAK;EACjC,KAAK;EACL,KAAK,UACJ,QAAO;GAAE,SAAS;GAAS;GAAK;EACjC,QACC,QAAO;GAAE,SAAS;GAAQ;GAAK;;;;;;;;;ACxClC,SAAgB,mBACf,QACA,UAC4C;CAC5C,IAAI,YAAY;AAChB,QAAO,OAAO,YACb,IAAI,gBAAsE;EACzE,UAAU,OAAO,YAAY;AAC5B,OAAI,CAAC,WAAW;AACf,gBAAY;AACZ,eAAW,QAAQ;KAClB,MAAM;KACI;KACV,CAAC;;AAEH,cAAW,QAAQ,MAAM;;EAE1B,MAAM,YAAY;AACjB,OAAI,CAAC,UACJ,YAAW,QAAQ;IAClB,MAAM;IACI;IACV,CAAC;;EAGJ,CAAC,CACF;;;;;AAMF,SAAS,wBAAwB,IAAsC;CACtE,MAAM,KAAK,GAAG;CACd,MAAM,OAAO,IAAI,QAAQ,GAAG,QAAQ;CACpC,MAAM,OAAO,IAAI,aAAa,GAAG,aAAa;AAE9C,QAAO,EADI,GAAG,MAAM,SACN,CAAC,SAAS,CAAC,QAAQ,SAAS;;;;;;;;;;;;AAa3C,SAAgB,gBACf,UAC4C;CAC5C,MAAM,YACL,oBAAoB,iBACjB,WACC,SAAS;AAEd,KAAI,CAAC,UACJ,OAAM,IAAI,MAAM,gDAAgD;CAIjE,IAAI,QAA8B;EACjC,cAAc;GAAE,OAAO;GAAG,MAAM,KAAA;GAAW,WAAW,KAAA;GAAW;EACjE,aAAa;GACZ,OAAO;GACP,SAAS,KAAA;GACT,WAAW,KAAA;GACX,YAAY,KAAA;GACZ;EACD,KAAK,EAAE,aAAa,GAAG;EACvB;CACD,IAAI,SAAwB;CAC5B,IAAI,cAA6B;CACjC,IAAI,eAAmD;CACvD,IAAI,eAAe;CACnB,IAAI,kBAAkB;CAOtB,MAAM,kCAAkB,IAAI,KAA6D;CACzF,MAAM,kCAAkB,IAAI,KAAa;CACzC,IAAI,sBAAqC;AAMzC,QAHkB,UAAU,YAAY,IAAI,YAAY,CAAC,CAGxC,YAChB,IAAI,gBAAmD;EACtD,UAAU,MAAM,YAAY;AAC3B,OAAI,CAAC,QAAQ,SAAS,UAAU;AAC/B,QAAI,SAAS,SAAU,gBAAe;AACtC;;AAGD,qBAAkB;GAClB,IAAI;AACJ,OAAI;AACH,YAAQ,KAAK,MAAM,KAAK;WACjB;AACP,YAAQ,KAAK,oDAAoD,KAAK;AACtE;;AAGD,OAAI,MAAM,MACT,SAAQ,kBAAkB,MAAiD;GAI5E,MAAM,UAAU,MAAM;GAMtB,MAAM,qBAAqB,UAAU,IAAI;GACzC,MAAM,qBAAqB,MAAM;AAEjC,OAAI,sBAAsB,KACzB,gBAAe,yBAAyB,mBAAmB;YACjD,sBAAsB,KAChC,gBAAe,yBAAyB,mBAAmB;GAI5D,MAAM,iBAAiB,MAAM;AAC7B,OAAI,kBAAkB,QAAQ,mBAAmB,IAAI;IACpD,MAAM,eAAe,OAAO,eAAe;AAC3C,QAAI,aAAa,SAAS,GAAG;AAE5B,SAAI,aAAa;AAChB,iBAAW,QAAQ;OAAE,MAAM;OAAiB,IAAI;OAAa,CAAC;AAC9D,oBAAc;;AAEf,SAAI,CAAC,QAAQ;AACZ,eAAS,YAAY;AACrB,iBAAW,QAAQ;OAAE,MAAM;OAAc,IAAI;OAAQ,CAAC;;AAEvD,gBAAW,QAAQ;MAClB,MAAM;MACN,IAAI;MACJ,OAAO;MACP,CAAC;;;AAKJ,OAAI,MAAM,QAAQ,MAAM,WAAW,EAAE;AAEpC,QAAI,aAAa;AAChB,gBAAW,QAAQ;MAAE,MAAM;MAAiB,IAAI;MAAa,CAAC;AAC9D,mBAAc;;AAEf,uBAAmB,MAAM,YAAyC,WAAW;;AAI9E,OAAI,UAAU,IAAI,OAAO;IACxB,MAAM,QAAQ,QAAQ,GAAG;IAEzB,MAAM,iBAAkB,MAAM,qBAAqB,MAAM;AAGzD,QAAI,kBAAkB,eAAe,SAAS,GAAG;AAChD,SAAI,CAAC,aAAa;AACjB,oBAAc,YAAY;AAC1B,iBAAW,QAAQ;OAClB,MAAM;OACN,IAAI;OACJ,CAAC;;AAEH,gBAAW,QAAQ;MAClB,MAAM;MACN,IAAI;MACJ,OAAO;MACP,CAAC;;IAGH,MAAM,YAAY,MAAM;AACxB,QAAI,aAAa,UAAU,SAAS,GAAG;AAEtC,SAAI,aAAa;AAChB,iBAAW,QAAQ;OAAE,MAAM;OAAiB,IAAI;OAAa,CAAC;AAC9D,oBAAc;;AAEf,SAAI,CAAC,QAAQ;AACZ,eAAS,YAAY;AACrB,iBAAW,QAAQ;OAAE,MAAM;OAAc,IAAI;OAAQ,CAAC;;AAEvD,gBAAW,QAAQ;MAClB,MAAM;MACN,IAAI;MACJ,OAAO;MACP,CAAC;;IAGH,MAAM,iBAAiB,MAAM;AAG7B,QAAI,MAAM,QAAQ,eAAe,EAAE;AAElC,SAAI,aAAa;AAChB,iBAAW,QAAQ;OAAE,MAAM;OAAiB,IAAI;OAAa,CAAC;AAC9D,oBAAc;;AAEf,wBAAmB,gBAAgB,WAAW;;;;EAKjD,MAAM,YAAY;AAEjB,QAAK,MAAM,CAAC,QAAQ,iBAAiB;AACpC,QAAI,gBAAgB,IAAI,IAAI,CAAE;AAC9B,kBAAc,KAAK,WAAW;;AAI/B,OAAI,YACH,YAAW,QAAQ;IAAE,MAAM;IAAiB,IAAI;IAAa,CAAC;AAE/D,OAAI,OACH,YAAW,QAAQ;IAAE,MAAM;IAAY,IAAI;IAAQ,CAAC;GAIrD,MAAM,wBACL,CAAC,gBAAgB,mBAAmB,CAAC,eACjC;IACD,SAAS;IACT,KAAK;IACL,GACC,gBAAgB;IAAE,SAAS;IAAQ,KAAK;IAAQ;AAErD,cAAW,QAAQ;IAClB,cAAc;IACd,MAAM;IACN;IACA,CAAC;;EAEH,CAAC,CACF;;;;CAKD,SAAS,cACR,OACA,YACC;EACD,MAAM,KAAK,gBAAgB,IAAI,MAAM;AACrC,MAAI,CAAC,MAAM,gBAAgB,IAAI,MAAM,CAAE;AACvC,kBAAgB,IAAI,MAAM;AAC1B,aAAW,QAAQ;GAAE,MAAM;GAAkB,IAAI,GAAG;GAAI,CAAC;AACzD,aAAW,QAAQ;GAClB,MAAM;GACN,YAAY,GAAG;GACf,UAAU,GAAG;GACb,OAAO,GAAG;GACV,CAAC;;;;;;;;;;;;;;;;CAiBH,SAAS,mBACR,WACA,YACC;AACD,OAAK,MAAM,MAAM,WAAW;AAC3B,OAAI,wBAAwB,GAAG,EAAE;AAEhC,QAAI,uBAAuB,KAC1B,eAAc,qBAAqB,WAAW;AAE/C;;GAGD,MAAM,UAAW,GAAG,SAAoB;GACxC,MAAM,KAAK,GAAG;GACd,MAAM,SAAU,IAAI,QAAQ,GAAG,QAAQ;GACvC,MAAM,SAAU,IAAI,aAAa,GAAG,aAAa;GACjD,MAAM,OAAO,GAAG;AAEhB,OAAI,CAAC,gBAAgB,IAAI,QAAQ,EAAE;AAElC,QAAI,uBAAuB,QAAQ,wBAAwB,QAC1D,eAAc,qBAAqB,WAAW;IAG/C,MAAM,KAAK,QAAQ,YAAY;IAC/B,MAAM,WAAW,UAAU;AAC3B,oBAAgB,IAAI,SAAS;KAAE;KAAI;KAAU,MAAM;KAAI,CAAC;AACxD,0BAAsB;AAEtB,eAAW,QAAQ;KAClB,MAAM;KACN;KACA;KACA,CAAC;AAEF,QAAI,UAAU,QAAQ,WAAW,IAAI;KACpC,MAAM,QAAQ,OAAO,WAAW,WAAW,SAAS,KAAK,UAAU,OAAO;AAC1E,qBAAgB,IAAI,QAAQ,CAAE,QAAQ;AACtC,gBAAW,QAAQ;MAClB,MAAM;MACN;MACA;MACA,CAAC;;UAEG;IACN,MAAM,SAAS,gBAAgB,IAAI,QAAQ;AAC3C,0BAAsB;AACtB,QAAI,UAAU,QAAQ,WAAW,IAAI;KACpC,MAAM,QAAQ,OAAO,WAAW,WAAW,SAAS,KAAK,UAAU,OAAO;AAC1E,YAAO,QAAQ;AACf,gBAAW,QAAQ;MAClB,MAAM;MACN,IAAI,OAAO;MACX;MACA,CAAC;;;;;;;;;;;AAYP,IAAM,aAAN,cAAyB,gBAAoC;CAC5D,cAAc;EACb,IAAI,SAAS;EACb,MAAM,UAAU,IAAI,aAAa;AAEjC,QAAM;GACL,UAAU,OAAO,YAAY;AAC5B,cAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,MAAM,CAAC;IACjD,MAAM,QAAQ,OAAO,MAAM,KAAK;AAChC,aAAS,MAAM,KAAK,IAAI;AAExB,SAAK,MAAM,QAAQ,OAAO;KACzB,MAAM,UAAU,KAAK,MAAM;AAC3B,SAAI,CAAC,QAAS;AACd,SAAI,QAAQ,WAAW,SAAS,CAC/B,YAAW,QAAQ,QAAQ,MAAM,EAAE,CAAC;cAC1B,QAAQ,WAAW,QAAQ,CACrC,YAAW,QAAQ,QAAQ,MAAM,EAAE,CAAC;;;GAKvC,MAAM,YAAY;AACjB,QAAI,OAAO,MAAM,EAAE;KAClB,MAAM,UAAU,OAAO,MAAM;AAC7B,SAAI,QAAQ,WAAW,SAAS,CAC/B,YAAW,QAAQ,QAAQ,MAAM,EAAE,CAAC;cAC1B,QAAQ,WAAW,QAAQ,CACrC,YAAW,QAAQ,QAAQ,MAAM,EAAE,CAAC;;;GAIvC,CAAC;;;;;;;;;;;AC3XJ,SAAgB,4BAA4B,UAAoD;AAC/F,QAAO,SAAS,KAAK,QAAQ;EAC5B,MAAM,aAAa,EAAE,GAAG,KAAK;AAG7B,MAAI,WAAW,YAAY,QAAQ,WAAW,YAAY,KAAA,EACxD,YAAmC,UAAU;AAG/C,SAAO;GACN;;;;;;AA8CH,SAAgB,UAAU,QAAgC;CACzD,MAAM,EAAE,WAAW,WAAW;CAC9B,MAAM,UAAU,OAAO,SAAS,WAAW;AAE3C,QAAO,eAAe,IACrB,OACA,QACA,SAC0F;EAC1F,MAAM,EACL,SACA,QAAQ,SACR,cACA,mBACA,QACA,GAAG,uBACA,WAAW,EAAE;EAEjB,MAAM,YAAY,IAAI,iBAAiB;AACvC,OAAK,MAAM,CAAC,KAAK,UAAU,OAAO,QAAQ,mBAAmB,EAAE;AAC9D,OAAI,UAAU,KAAA,KAAa,UAAU,KACpC,OAAM,IAAI,MACT,qBAAqB,IAAI,4CACzB;AAEF,OAAI;IACH,MAAM,WAAW,OAAO,MAAM;AAC9B,QAAI,CAAC,SACJ;AAED,cAAU,OAAO,KAAK,SAAS;WACxB;AACP,UAAM,IAAI,MACT,qBAAqB,IAAI,4CACzB;;;EAIH,MAAM,cAAc,UAAU,UAAU;EAExC,MAAM,YAAY,OAAO,MAAM,CAAC,WAAW,OAAO,GAAG,QAAQ,OAAO;EAGpE,MAAM,MAAM,SAAS,KAClB,wCAAwC,UAAU,GAAG,QAAQ,GAAG,cAAc,YAC9E,cAAc,IAAI,gBAAgB,OAElC,iDAAiD,UAAU,MAAM,YACjE,cAAc,IAAI,gBAAgB;EAGrC,MAAM,UAAkC;GACvC,eAAe,UAAU;GACzB,gBAAgB;GAChB,GAAI,gBAAgB,OAAO,iBAAiB,WACxC,eACD,EAAE;GACL;AAED,MAAI,SAAS;AACZ,OAAI,QAAQ,UACX,SAAQ,uBAAuB;AAEhC,OAAI,OAAO,QAAQ,aAAa,SAC/B,SAAQ,sBAAsB,OAAO,QAAQ,SAAS;AAEvD,OAAI,QAAQ,SACX,SAAQ,sBAAsB,QAAQ;AAEvC,OAAI,QAAQ,SACX,SAAQ,qBAAqB,KAAK,UAAU,QAAQ,SAAS;;EAM/D,MAAM,WAAW,MAAM,QAAQ,KAAK;GACnC,MAHY,KAAK,UAAU,OAAO;GAIlC;GACA,QAAQ;GACA;GACR,CAAC;AAGF,MAAI,CAAC,SAAS,MAAM,CAAC,mBAAmB;GACvC,IAAI;AACJ,OAAI;AACH,gBAAY,MAAM,SAAS,MAAM;WAC1B;AACP,gBAAY;;AAEb,SAAM,IAAI,MACT,yBAAyB,SAAS,OAAO,GAAG,SAAS,WAAW,KAAK,YACrE;;AAGF,MAAI,kBACH,QAAO;AAGR,MAAK,OAAiC,WAAW,MAAM;GACtD,MAAM,cAAc,SAAS,QAAQ,IAAI,eAAe,IAAI;AAC5D,OAAI,YAAY,SAAS,eAAe,IAAI,SAAS,KACpD,QAAO,SAAS;AAEjB,OAAI,SAAS,QAAQ,CAAC,YAAY,SAAS,OAAO,CAEjD,QAAO,SAAS;GAQjB,MAAM,gBAAgB,MAAM,QAAQ,KAAK;IACxC,MAAM,KAAK,UAAU;KACpB,GAAI;KACJ,QAAQ;KACR,CAAC;IACF;IACA,QAAQ;IACA;IACR,CAAC;AAEF,OAAI,CAAC,cAAc,IAAI;IACtB,IAAI;AACJ,QAAI;AACH,iBAAY,MAAM,cAAc,MAAM;YAC/B;AACP,iBAAY;;AAEb,UAAM,IAAI,MACT,yBAAyB,cAAc,OAAO,GAAG,cAAc,WAAW,KAAK,YAC/E;;AAMF,WAHkB,MAAM,cAAc,MAElC,EACa;;AAMlB,UAHa,MAAM,SAAS,MAExB,EACQ;;;;;;;;;;;;;;;;AAiBd,eAAsB,gBACrB,QACA,OACA,YACA,aACA,QACmC;CACnC,MAAM,MAAM,iDAAiD,OAAO,UAAU,UAAU;CAExF,MAAM,WAAW,MAAM,MAAM,KAAK;EACjC,QAAQ;EACR,SAAS;GACR,eAAe,UAAU,OAAO;GAChC,gBAAgB;GAChB;EACD,MAAM;EACN;EACA,CAAC;AAEF,KAAI,CAAC,SAAS,IAAI;EACjB,IAAI;AACJ,MAAI;AACH,eAAY,MAAM,SAAS,MAAM;UAC1B;AACP,eAAY;;AAEb,QAAM,IAAI,MACT,yBAAyB,SAAS,OAAO,GAAG,SAAS,WAAW,KAAK,YACrE;;CAGF,MAAM,OAAO,MAAM,SAAS,MAA4C;AACxE,QAAQ,KAAK,UAAU;;AAOxB,SAAgB,0BACf,OACA,YACC;AACD,KAAI,SAAS,KACZ,QAAO;EAAE,aAAa,KAAA;EAAW,OAAO,KAAA;EAAW;CAGpD,MAAM,cAAc,MAAM,KAAK,UAAU;EACxC,UAAU;GACT,aAAa,KAAK,SAAS,aAAa,KAAK,cAAc,KAAA;GAC3D,MAAM,KAAK;GACX,YAAY,KAAK,SAAS,aAAa,KAAK,cAAc,KAAA;GAC1D;EACD,MAAM;EACN,EAAE;AAEH,KAAI,cAAc,KACjB,QAAO;EAAE,aAAa,KAAA;EAAW,OAAO;EAAa;CAGtD,MAAM,OAAO,WAAW;AAExB,SAAQ,MAAR;EACC,KAAK,OACJ,QAAO;GAAE,aAAa;GAAM,OAAO;GAAa;EACjD,KAAK,OACJ,QAAO;GAAE,aAAa;GAAM,OAAO;GAAa;EACjD,KAAK,WACJ,QAAO;GAAE,aAAa;GAAY,OAAO;GAAa;EAIvD,KAAK,OACJ,QAAO;GACN,aAAa;GACb,OAAO,YAAY,QAAQ,SAAS,KAAK,SAAS,SAAS,WAAW,SAAS;GAC/E;EACF,QAEC,OAAM,IAAI,MAAM,iCADQ,OAC2C;;;AAmFtE,SAAS,gBAAgB,UAAkE;CAE1F,MAAM,KACL,cAAc,YAAY,OAAO,SAAS,aAAa,YAAY,SAAS,WACxE,SAAS,WACV;AAEJ,KAAI,IAAI,KACP,QAAO;EACN,OACC,OAAO,GAAG,cAAc,WACrB,GAAG,YACH,KAAK,UAAU,GAAG,aAAa,EAAE,CAAC;EACtC,YAAY,SAAS,MAAM,YAAY;EACvC,MAAM;EACN,UAAU,GAAG;EACb;CAIF,MAAM,OAAO;AACb,QAAO;EACN,OACC,OAAO,KAAK,cAAc,WACvB,KAAK,YACL,KAAK,UAAU,KAAK,aAAa,EAAE,CAAC;EACxC,YAAY,KAAK,MAAM,YAAY;EACnC,MAAM;EACN,UAAU,KAAK;EACf;;AAGF,SAAgB,iBAAiB,QAA4D;AAC5F,KAAI,OAAO,cAAc,MAAM,QAAQ,OAAO,WAAW,CACxD,QAAO,OAAO,WAAW,KAAK,aAC7B,gBAAgB,SAAS,CACzB;CAGF,MAAM,UAAU,OAAO;AAGvB,KAAI,UAAU,IAAI,SAAS,cAAc,MAAM,QAAQ,QAAQ,GAAG,QAAQ,WAAW,CACpF,QAAO,QAAQ,GAAG,QAAQ,WAAW,KAAK,aAAa,gBAAgB,SAAS,CAAC;AAGlF,QAAO,EAAE;;;;;;;;;AAmBV,SAAgB,YAAY,QAAqD;CAGhF,MAAM,gBADU,OAAO,UACS,IAAI,SAAS;AAC7C,KAAI,iBAAiB,QAAQ,OAAO,cAAc,CAAC,SAAS,EAC3D,QAAO,OAAO,cAAc;AAG7B,KAAI,cAAc,QAAQ;EACzB,MAAM,WAAW,OAAO;AAExB,MAAI,OAAO,aAAa,YAAY,aAAa,KAChD,QAAO,KAAK,UAAU,SAAS;AAGhC,MAAI,OAAO,aAAa,SACvB,QAAO,OAAO,SAAS;AAGxB,MAAI,aAAa,QAAQ,aAAa,KAAA,EACrC;AAED,SAAO,OAAO,SAAS;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACndzB,IAAa,4BAAb,MAAkE;CAWjE,YACC,SACA,UACA,QACC;wBAdO,wBAAuB,KAAK;wBAC5B,+BAA8B,OAAO;wBAErC,iBAAkF,EAAE,CAAC;wBAErF,WAAA,KAAA,EAA8B;wBAC9B,YAAA,KAAA,EAA+B;wBAEvB,UAAA,KAAA,EAA2B;AAO3C,OAAK,UAAU;AACf,OAAK,WAAW;AAChB,OAAK,SAAS;;CAGf,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YAAoB,EACnB,OACA,kBACA,iBACA,kBACmE;EACnE,MAAM,WAA8B,EAAE;AAEtC,MAAI,SAAS,QAAQ,MAAM,SAAS,GAAG;AACtC,WAAQ,KACP,oFACA;AACD,YAAS,KAAK;IAAE,SAAS;IAAS,MAAM;IAAe,CAAC;;AAGzD,MAAI,oBAAoB,KACvB,UAAS,KAAK;GAAE,SAAS;GAAoB,MAAM;GAAe,CAAC;AAGpE,MAAI,mBAAmB,KACtB,UAAS,KAAK;GAAE,SAAS;GAAmB,MAAM;GAAe,CAAC;AAGnE,MAAI,gBAAgB,SAAS,OAC5B,UAAS,KAAK;GAAE,SAAS;GAAkB,MAAM;GAAe,CAAC;AAGlE,SAAO;;;;;;CAOR,WAAmB,QAAwE;EAC1F,MAAM,EAAE,aAAa,+BAA+B,OAAO;AAC3D,SAAO,SAAS,KAAK,EAAE,SAAS,WAAW,GAAG,KAAK,IAAI,UAAU,CAAC,KAAK,OAAO;;CAG/E,MAAM,WACL,SAC8D;EAC9D,MAAM,WAAW,KAAK,YAAY,QAAQ;EAC1C,MAAM,QAAQ,KAAK,WAAW,QAAQ,OAAO;EAE7C,MAAM,SAAS,MAAM,KAAK,OAAO,QAAQ,SAAS,EAAE,OAAO,CAAC;AAE5D,SAAO;GACN,cAAc;IAAE,SAAS;IAAQ,KAAK;IAAQ;GAC9C,SAAS;IACR,GAAG,OAAO,KAAK,KAAK,EAAE,SAAS,UAAU,aAAa;KACrD,MAAM;KACN,YAAY;KACZ,IAAI;KACJ,KAAK;KACL,kBAAkB,EACjB,YAAY,EAAE,OAAO,EACrB;KACD,EAAE;IACH;KACC,MAAM;KACN,MAAM,OAAO;KACb;IACD,GAAG,iBAAiB,OAA6C;IACjE;GACD,OAAO,kBAAkB,OAA6C;GACtE;GACA;;CAGF,MAAM,SACL,SAC4D;EAC5D,MAAM,WAAW,KAAK,YAAY,QAAQ;EAC1C,MAAM,QAAQ,KAAK,WAAW,QAAQ,OAAO;AAO7C,SAAO,EACN,QAAQ,mBACP,gBAPe,MAAM,KAAK,OAAO,QAAQ,SAAS;GACnD;GACA,QAAQ;GACR,CAAC,CAI6E,EAC7E,SACA,EACD;;;;;ACrGH,IAAa,0BAAb,MAAiE;CAMhE,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,IAAI,uBAA+B;AAElC,SAAO,KAAK,SAAS,wBAAwB;;CAG9C,IAAI,wBAAiC;AACpC,SAAO,KAAK,SAAS,yBAAyB;;CAG/C,YACC,SACA,UACA,QACC;wBAtBO,wBAAuB,KAAK;wBAC5B,WAAA,KAAA,EAAyB;wBACjB,UAAA,KAAA,EAAiC;wBACjC,YAAA,KAAA,EAAqC;AAoBrD,OAAK,UAAU;AACf,OAAK,WAAW;AAChB,OAAK,SAAS;;CAGf,MAAM,QAAQ,EACb,QACA,eACgE;AAChE,MAAI,OAAO,SAAS,KAAK,qBACxB,OAAM,IAAI,mCAAmC;GAC5C,sBAAsB,KAAK;GAC3B,SAAS,KAAK;GACd,UAAU,KAAK;GACf;GACA,CAAC;EAGH,MAAM,EACL,SACA,sBAAsB,uBACtB,uBAAuB,wBACvB,GAAG,uBACA,KAAK;AAcT,SAAO;GACN,aAbgB,MAAM,KAAK,OAAO,QAAQ,IAC1C,KAAK,SACL,EACC,MAAM,QACN,EACD;IACC,SAAS,KAAK,OAAO,WAAW;IAChC,QAAQ;IACR,GAAG;IACH,CACD,EAG+C;GAC/C,UAAU,EAAE;GACZ;;;;;AClEH,IAAa,6BAAb,MAAmE;CAWlE,YACC,SACA,UACA,QACC;wBAdO,wBAAuB,KAAK;wBAC5B,+BAA8B,OAAO;wBAErC,iBAAkF,EAAE,CAAC;wBAErF,WAAA,KAAA,EAA8B;wBAC9B,YAAA,KAAA,EAAgC;wBAExB,UAAA,KAAA,EAA4B;AAO5C,OAAK,UAAU;AACf,OAAK,WAAW;AAChB,OAAK,SAAS;;CAGf,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,QAAgB,EACf,gBACA,OACA,YACA,iBACA,aACA,MACA,kBACA,iBACA,QACgD;EAChD,MAAM,OAAO,gBAAgB,QAAQ;EAErC,MAAM,WAA8B,EAAE;AAEtC,MAAI,oBAAoB,KACvB,UAAS,KAAK;GAAE,SAAS;GAAoB,MAAM;GAAe,CAAC;AAGpE,MAAI,mBAAmB,KACtB,UAAS,KAAK;GAAE,SAAS;GAAmB,MAAM;GAAe,CAAC;EAGnE,MAAM,WAAW;GAChB,YAAY;GACZ,OAAO,KAAK;GACZ,aAAa;GACb,aAAa,KAAK,SAAS;GAC3B;GACA,OAAO;GACP;AAED,UAAQ,MAAR;GACC,KAAK,OACJ,QAAO;IACN,MAAM;KACL,GAAG;KACH,iBAAiB,KAAA;KAGjB,GAAG,0BAA0B,OAAO,WAAW;KAC/C;IACD;IACA;GAGF,KAAK,OACJ,QAAO;IACN,MAAM;KACL,GAAG;KACH,iBAAiB;MAChB,MAAM;MACN,aACC,gBAAgB,SAAS,SAAS,eAAe,SAAS,KAAA;MAC3D;KACD,OAAO,KAAA;KACP,aAAa,KAAA;KACb;IACD;IACA;GAGF,QAEC,OAAM,IAAI,MAAM,qBADQ,OAC+B;;;;;;;;;;;;;;;;;;;;;CAuB1D,eACC,MACA,UACA,SACC;EAID,MAAM,aAAa,SAAS,kBAAkB;EAC9C,MAAM,UACL,eAAe,QAAQ,OAAO,eAAe,YAAY,CAAC,MAAM,QAAQ,WAAW,GAC/E,aACD,EAAE;EACN,MAAM,kBACL,sBAAsB,UACnB,QAAQ,mBACR,KAAK,SAAS;EAClB,MAAM,qBACL,0BAA0B,UACvB,QAAQ,uBACR,KAAK,SAAS;AAElB,SAAO;GACN,YAAY,KAAK;GACjB,UAAU,KAAK,OAAO,YAAY,4BAA4B,SAAS,GAAG;GAC1E,aAAa,KAAK;GAClB,OAAO,KAAK;GACZ,GAAI,KAAK,cAAc,EAAE,aAAa,KAAK,aAAa,GAAG,EAAE;GAC7D,OAAO,KAAK;GACZ,GAAI,KAAK,kBAAkB,EAAE,iBAAiB,KAAK,iBAAiB,GAAG,EAAE;GACzE,GAAI,SAAS,SAAS,EAAE,QAAQ,MAAM,GAAG,EAAE;GAC3C,GAAI,oBAAoB,KAAA,IAAY,EAAE,kBAAkB,iBAAiB,GAAG,EAAE;GAC9E,GAAI,uBAAuB,KAAA,IACxB,EAAE,sBAAsB,oBAAoB,GAC5C,EAAE;GACL;;;;;;;;;CAUF,gBAAwB;EACvB,MAAM,EACL,SACA,YAAY,aACZ,iBACA,cACA,kBAAkB,kBAClB,sBAAsB,qBACtB,GAAG,uBACA,KAAK;EAET,MAAM,gBAAgB;GACrB,GAAI,gBAAgB,OAAO,iBAAiB,WACxC,eACD,EAAE;GACL,GAAI,kBAAkB,EAAE,sBAAsB,iBAAiB,GAAG,EAAE;GACpE;AAED,SAAO;GACN,SAAS,KAAK,OAAO,WAAW;GAChC,GAAI,OAAO,KAAK,cAAc,CAAC,SAAS,IAAI,EAAE,cAAc,eAAe,GAAG,EAAE;GAChF,GAAG;GACH;;CAGF,MAAM,WACL,SAC8D;EAC9D,MAAM,EAAE,MAAM,aAAa,KAAK,QAAQ,QAAQ;EAChD,MAAM,EAAE,aAAa,+BAA+B,QAAQ,OAAO;EAEnE,MAAM,SAAS,KAAK,eAAe,MAAM,UAAU,EAClD,iBAAiB,QAAQ,iBACzB,CAAC;EACF,MAAM,aAAa,KAAK,eAAe;EAEvC,MAAM,SAAS,MAAM,KAAK,OAAO,QAAQ,IACxC,KAAK,OACL,QACA;GACC,GAAG;GACH,QAAQ,QAAQ;GAChB,CACD;AAED,MAAI,kBAAkB,eACrB,OAAM,IAAI,MACT,sGACA;EAGF,MAAM,eAAe;EACrB,MAAM,UAAU,aAAa;EAK7B,MAAM,mBACL,UAAU,IAAI,SAAS,qBAAqB,UAAU,IAAI,SAAS;AAEpE,SAAO;GACN,cAAc,yBAAyB,aAAa;GACpD,SAAS;IACR,GAAI,mBACD,CAAC;KAAE,MAAM;KAAsB,MAAM;KAAkB,CAAC,GACxD,EAAE;IACL;KACC,MAAM;KACN,MAAM,YAAY,aAAa,IAAI;KACnC;IACD,GAAG,iBAAiB,aAAa;IACjC;GACD,OAAO,kBAAkB,OAAkC;GAC3D;GACA;;CAGF,MAAM,SACL,SAC4D;EAC5D,MAAM,EAAE,MAAM,aAAa,KAAK,QAAQ,QAAQ;EAChD,MAAM,EAAE,aAAa,+BAA+B,QAAQ,OAAO;EAEnE,MAAM,SAAS,KAAK,eAAe,MAAM,UAAU;GAClD,QAAQ;GACR,iBAAiB,QAAQ;GACzB,CAAC;EACF,MAAM,aAAa,KAAK,eAAe;EAEvC,MAAM,WAAW,MAAM,KAAK,OAAO,QAAQ,IAC1C,KAAK,OACL,QACA;GACC,GAAG;GACH,QAAQ,QAAQ;GAChB,CACD;AAGD,MAAI,oBAAoB,eACvB,QAAO,EACN,QAAQ,mBAAmB,gBAAgB,SAAS,EAAE,SAAS,EAC/D;EAKF,MAAM,eAAe;EACrB,MAAM,UAAU,aAAa;EAK7B,MAAM,mBACL,UAAU,IAAI,SAAS,qBAAqB,UAAU,IAAI,SAAS;EAEpE,IAAI,SAAwB;EAC5B,IAAI,cAA6B;AAEjC,SAAO,EACN,QAAQ,IAAI,eAA0C,EACrD,MAAM,YAAY;AACjB,cAAW,QAAQ;IAClB,MAAM;IACI;IACV,CAAC;AAEF,OAAI,kBAAkB;AACrB,kBAAc,YAAY;AAC1B,eAAW,QAAQ;KAAE,MAAM;KAAmB,IAAI;KAAa,CAAC;AAChE,eAAW,QAAQ;KAClB,MAAM;KACN,IAAI;KACJ,OAAO;KACP,CAAC;AACF,eAAW,QAAQ;KAAE,MAAM;KAAiB,IAAI;KAAa,CAAC;;GAG/D,MAAM,OAAO,YAAY,aAAa;AACtC,OAAI,MAAM;AACT,aAAS,YAAY;AACrB,eAAW,QAAQ;KAAE,MAAM;KAAc,IAAI;KAAQ,CAAC;AACtD,eAAW,QAAQ;KAAE,MAAM;KAAc,IAAI;KAAQ,OAAO;KAAM,CAAC;AACnE,eAAW,QAAQ;KAAE,MAAM;KAAY,IAAI;KAAQ,CAAC;;AAGrD,QAAK,MAAM,YAAY,iBAAiB,aAAa,CACpD,YAAW,QAAQ,SAAS;AAG7B,cAAW,QAAQ;IAClB,MAAM;IACN,cAAc,yBAAyB,aAAa;IACpD,OAAO,kBAAkB,SAAoC;IAC7D,CAAC;AACF,cAAW,OAAO;KAEnB,CAAC,EACF;;;;;ACxUH,IAAa,sBAAb,MAAyD;CAGxD,IAAI,mBAA2B;AAC9B,SAAO,KAAK,SAAS,oBAAoB;;CAG1C,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YACC,SACA,UACA,QACC;AAHQ,OAAA,UAAA;AACA,OAAA,WAAA;AACA,OAAA,SAAA;wBAbD,wBAAuB,KAAK;;CAgBrC,MAAM,WAAW,EAChB,QACA,GACA,MACA,aACA,MACA,eAGC;EACD,MAAM,EAAE,OAAO,WAAW,4BAA4B,KAAK;EAE3D,MAAM,WAAmC,EAAE;AAE3C,MAAI,eAAe,KAClB,UAAS,KAAK;GACb,SAAS;GACT,SAAS;GACT,MAAM;GACN,CAAC;EAGH,MAAM,gBAAgB,YAAY;AAejC,UAAOC,eAdS,MAAM,KAAK,OAAO,QAAQ,IACzC,KAAK,SACL;IACC;IACA,QAAQ,UAAU;IAClB;IACA;IACA,EACD;IACC,SAAS,KAAK,OAAO;IACrB,QAAQ;IACR,CACD,CAE0B;;AAO5B,SAAO;GACN,QAL4B,MAAM,QAAQ,IAC1C,MAAM,KAAK,EAAE,QAAQ,GAAG,QAAQ,eAAe,CAAC,CAChD;GAIA,UAAU;IACT,SAAS,EAAE;IACX,SAAS,KAAK;IACd,2BAAW,IAAI,MAAM;IACrB;GACD;GACA;;;AAIH,SAAS,4BAA4B,MAA0B;CAC9D,MAAM,CAAC,OAAO,UAAU,MAAM,MAAM,IAAI,IAAI,CAAC,KAAA,GAAW,KAAA,EAAU;AAElE,QAAO;EACN,QAAQ,aAAa,OAAO;EAC5B,OAAO,aAAa,MAAM;EAC1B;;AAGF,SAAS,aAAa,OAAgB;AACrC,KAAI,UAAU,MAAM,CAAC,MAAO,QAAO,KAAA;CACnC,MAAM,SAAS,OAAO,MAAM;AAC5B,QAAO,OAAO,UAAU,OAAO,GAAG,SAAS,KAAA;;;;;;;;;;AAW5C,eAAeA,eAAa,QAAsC;AACjE,KAAI,kBAAkB,WACrB,QAAO;AAER,KAAI,kBAAkB,YACrB,QAAO,IAAI,WAAW,OAAO;AAE9B,KAAI,kBAAkB,gBAAgB;EACrC,MAAM,SAAU,OAAsC,WAAW;EACjE,MAAM,SAAuB,EAAE;EAC/B,IAAI,cAAc;AAClB,SAAO,MAAM;GACZ,MAAM,EAAE,MAAM,UAAU,MAAM,OAAO,MAAM;AAC3C,OAAI,KAAM;AACV,UAAO,KAAK,MAAM;AAClB,kBAAe,MAAM;;EAEtB,MAAM,SAAS,IAAI,WAAW,YAAY;EAC1C,IAAI,SAAS;AACb,OAAK,MAAM,SAAS,QAAQ;AAC3B,UAAO,IAAI,OAAO,OAAO;AACzB,aAAU,MAAM;;AAEjB,SAAO;;AAGR,KAAI,kBAAkB,SACrB,QAAO,IAAI,WAAW,MAAM,OAAO,aAAa,CAAC;AAGlD,KAAI,OAAO,WAAW,YAAY,WAAW,MAAM;EAClD,MAAM,MAAM;AAEZ,MAAI,OAAO,IAAI,UAAU,SACxB,QAAO,WAAW,KAAK,KAAK,IAAI,MAAM,GAAG,MAAM,EAAE,WAAW,EAAE,CAAC;AAGhE,MAAI,IAAI,gBAAgB,WACvB,QAAO,IAAI;AAGZ,MAAI,IAAI,gBAAgB,YACvB,QAAO,IAAI,WAAW,IAAI,KAAK;AAGhC,MAAI,OAAO,IAAI,gBAAgB,WAC9B,QAAO,IAAI,WAAW,MAAO,IAA4B,aAAa,CAAC;;AAGzE,OAAM,IAAI,MACT,gDAAgD,OAAO,UAAU,SAAS,KAAK,OAAO,CAAC,cACtF,OAAO,WAAW,YAAY,WAAW,OACtC,KAAK,UAAU,OAAO,KAAK,OAAO,CAAC,GACnC,QAEJ;;;;;;;;;;;ACrIF,IAAa,8BAAb,MAAyE;CAGxE,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YACC,SACA,UACA,QACC;AAHQ,OAAA,UAAA;AACA,OAAA,WAAA;AACA,OAAA,SAAA;wBATD,wBAAuB,KAAK;;CAYrC,MAAM,WACL,SACmE;EACnE,MAAM,EAAE,OAAO,WAAW,gBAAgB;EAE1C,MAAM,WAAmC,EAAE;EAI3C,MAAM,aACL,OAAO,UAAU,WACd,WAAW,KAAK,KAAK,MAAM,GAAG,MAAM,EAAE,WAAW,EAAE,CAAC,GACpD;EAEJ,MAAM,UAAU,KAAK,YAAY;EAEjC,IAAI;AAEJ,MAAI,QACH,aAAY,MAAM,KAAK,SAAS,YAAY,WAAW,YAAY;MAEnE,aAAY,MAAM,KAAK,WAAW,YAAY,YAAY;EAG3D,MAAM,SAAS;AAGf,MAAI,QACH,QAAO,KAAK,uBAAuB,QAAQ,SAAS;AAErD,SAAO,KAAK,yBAAyB,QAAQ,SAAS;;CAOvD,MAAc,WAAW,YAAwB,aAA6C;EAS7F,MAAM,SAAkC,EAAE,OANzB,KAAK,YAER,sCACV,mBAAmB,WAAW,GAC9B,MAAM,KAAK,WAAW,EAEuB;AAEjD,MAAI,KAAK,SAAS,SACjB,QAAO,WAAW,KAAK,SAAS;AAEjC,MAAI,KAAK,SAAS,OACjB,QAAO,iBAAiB,KAAK,SAAS;AAGvC,SAAO,KAAK,OAAO,QAAQ,IAC1B,KAAK,SACL,QACA;GAAE,SAAS,KAAK,OAAO;GAAS,QAAQ;GAAa,CACrD;;CAGF,yBACC,KACA,UAC0D;EAC1D,MAAM,OAAQ,IAAI,QAAmB;EAGrC,MAAM,WAA4E,EAAE;AAGpF,MAAI,IAAI,YAAY,MAAM,QAAQ,IAAI,SAAS,CAC9C,MAAK,MAAM,OAAO,IAAI,SACrB,UAAS,KAAK;GACb,MAAQ,IAAgC,QAAmB;GAC3D,aAAe,IAAgC,SAAoB;GACnE,WAAa,IAAgC,OAAkB;GAC/D,CAAC;WAIK,IAAI,SAAS,MAAM,QAAQ,IAAI,MAAM,CAC7C,MAAK,MAAM,KAAK,IAAI,MACnB,UAAS,KAAK;GACb,MAAQ,EAA8B,QAAmB;GACzD,aAAe,EAA8B,SAAoB;GACjE,WAAa,EAA8B,OAAkB;GAC7D,CAAC;EAKJ,MAAM,OAAO,IAAI;AAEjB,SAAO;GACN;GACA;GACA,UAAW,MAAM,YAAuB,KAAA;GACxC,mBAAoB,MAAM,YAAuB,KAAA;GACjD;GACA,UAAU;IACT,2BAAW,IAAI,MAAM;IACrB,SAAS,KAAK;IACd,SAAS,EAAE;IACX;GACD;;CAOF,MAAc,SACb,YACA,WACA,aACmB;AACnB,MAAI,KAAK,OAAO,UAEf,QAAO,KAAK,OAAO,QAAQ,IAC1B,KAAK,SACL,EACC,OAAO;GAAE,MAAM,mBAAmB,WAAW;GAAE,aAAa;GAAW,EACvE,EACD;GAAE,SAAS,KAAK,OAAO;GAAS,QAAQ;GAAa,CACrD;AAMF,MAAI,CAAC,KAAK,OAAO,YAChB,OAAM,IAAI,MACT,yIAEA;AAEF,SAAO,gBACN,KAAK,OAAO,aACZ,KAAK,SACL,YACA,WACA,YACA;;CAGF,uBACC,KACA,UAC0D;EAY1D,MAAM,OAVU,IAAI,SACM,YASH,IAAI,eAAe;EAE1C,MAAM,OAAO,KAAK,cAAc;EAChC,MAAM,WAA4E,EAAE;AAEpF,MAAI,KAAK,SAAS,MAAM,QAAQ,IAAI,MAAM,CACzC,MAAK,MAAM,KAAK,IAAI,MACnB,UAAS,KAAK;GACb,MAAM,EAAE,QAAQ;GAChB,aAAa,EAAE,SAAS;GACxB,WAAW,EAAE,OAAO;GACpB,CAAC;AAIJ,SAAO;GACN;GACA;GACA,UAAU,KAAA;GACV,mBAAmB,KAAA;GACnB;GACA,UAAU;IACT,2BAAW,IAAI,MAAM;IACrB,SAAS,KAAK;IACd,SAAS,EAAE;IACX;GACD;;;AAQH,SAAS,mBAAmB,OAA2B;CACtD,IAAI,SAAS;AACb,MAAK,IAAI,IAAI,GAAG,IAAI,MAAM,QAAQ,IACjC,WAAU,OAAO,aAAa,MAAM,GAAI;AAEzC,QAAO,KAAK,OAAO;;;;;;;;;;AClOpB,IAAa,uBAAb,MAA2D;CAG1D,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YACC,SACA,UACA,QACC;AAHQ,OAAA,UAAA;AACA,OAAA,WAAA;AACA,OAAA,SAAA;wBATD,wBAAuB,KAAK;;CAYrC,MAAM,WACL,SAC4D;EAC5D,MAAM,EAAE,MAAM,OAAO,OAAO,gBAAgB;EAE5C,MAAM,WAAmC,EAAE;AAE3C,MAAI,QAAQ,aACX,UAAS,KAAK;GACb,SAAS;GACT,SAAS;GACT,MAAM;GACN,CAAC;AAGH,MAAI,QAAQ,aACX,UAAS,KAAK;GACb,SACC;GACD,SAAS;GACT,MAAM;GACN,CAAC;EAIH,MAAM,SAAkC,EAAE,MAAM;AAChD,MAAI,MAAO,QAAO,QAAQ;AAC1B,MAAI,SAAS,KAAM,QAAO,QAAQ;AAqBlC,SAAO;GACN,OAHa,MAAM,aAjBL,MAAM,KAAK,OAAO,QAAQ,IACxC,KAAK,SACL,QACA;IACC,SAAS,KAAK,OAAO;IACrB,QAAQ;IAKR,mBAAmB;IACnB,CACD,CAKuC;GAIvC;GACA,UAAU;IACT,2BAAW,IAAI,MAAM;IACrB,SAAS,KAAK;IACd,SAAS,EAAE;IACX;GACD;;;;;;;;;;;AAgBH,eAAe,aAAa,QAAsC;AAEjE,KAAI,kBAAkB,SACrB,QAAO,IAAI,WAAW,MAAM,OAAO,aAAa,CAAC;AAElD,KAAI,kBAAkB,WACrB,QAAO;AAER,KAAI,kBAAkB,YACrB,QAAO,IAAI,WAAW,OAAO;AAE9B,KAAI,kBAAkB,gBAAgB;EACrC,MAAM,SAAU,OAAsC,WAAW;EACjE,MAAM,SAAuB,EAAE;EAC/B,IAAI,cAAc;AAClB,SAAO,MAAM;GACZ,MAAM,EAAE,MAAM,UAAU,MAAM,OAAO,MAAM;AAC3C,OAAI,KAAM;AACV,UAAO,KAAK,MAAM;AAClB,kBAAe,MAAM;;EAEtB,MAAM,SAAS,IAAI,WAAW,YAAY;EAC1C,IAAI,SAAS;AACb,OAAK,MAAM,SAAS,QAAQ;AAC3B,UAAO,IAAI,OAAO,OAAO;AACzB,aAAU,MAAM;;AAEjB,SAAO;;AAGR,KAAI,OAAO,WAAW,YAAY,WAAW,MAAM;EAClD,MAAM,MAAM;AACZ,MAAI,OAAO,IAAI,UAAU,SACxB,QAAO,WAAW,KAAK,KAAK,IAAI,MAAM,GAAG,MAAM,EAAE,WAAW,EAAE,CAAC;;AAGjE,OAAM,IAAI,MACT,8CAA8C,OAAO,UAAU,SAAS,KAAK,OAAO,GACpF;;;;;;;;;;;;;ACxHF,IAAa,0BAAb,MAAiE;CAGhE,IAAI,WAAmB;AACtB,SAAO,KAAK,OAAO;;CAGpB,YACC,SACA,UACA,QACC;AAHQ,OAAA,UAAA;AACA,OAAA,WAAA;AACA,OAAA,SAAA;wBATD,wBAAuB,KAAK;;CAYrC,MAAM,SACL,SAC6D;EAC7D,MAAM,EAAE,WAAW,OAAO,MAAM,gBAAgB;EAEhD,MAAM,WAAmC,EAAE;EAM3C,MAAM,SAAkC;GACvC;GACA,UALgB,oBAAoB,WAAW,SAAS;GAMxD;AACD,MAAI,QAAQ,KACX,QAAO,QAAQ;AAmBhB,SAAO;GACN,WAjBe,MAAM,KAAK,OAAO,QAAQ,IACzC,KAAK,SACL,QACA;IAAE,SAAS,KAAK,OAAO;IAAS,QAAQ;IAAa,CACrD,EAGuB,YAEK,EAAE,EAC7B,KAAK,UAAU;IACf,OAAO,KAAK,MAAM;IAClB,gBAAgB,KAAK,SAAS;IAC9B,EAAE,CACF,MAAM,GAAG,MAAM,EAAE,iBAAiB,EAAE,eAAe;GAIpD;GACA,UAAU;IACT,2BAAW,IAAI,MAAM;IACrB,SAAS,KAAK;IACd,SAAS,EAAE;IACX;GACD;;;;;;;;;;AAeH,SAAS,oBACR,WACA,UAC0B;AAC1B,KAAI,UAAU,SAAS,OACtB,QAAO,UAAU,OAAO,KAAK,UAAU,EAAE,MAAM,EAAE;AAIlD,UAAS,KAAK;EACb,SAAS;EACT,MAAM;EACN,CAAC;AAEF,QAAO,UAAU,OAAO,KAAK,SAAS,EAAE,MAAM,KAAK,UAAU,IAAI,EAAE,EAAE;;;;;;;;AClGtE,IAAa,2BAAb,cAA8C,0BAA0B;;;;;;ACkJxE,SAAgB,gBAAgB,SAAuC;AACtE,KAAI,CAAC,QAAQ,WAAW,EAAE,eAAe,WAAW,YAAY,SAC/D,OAAM,IAAI,MACT,yIAEA;CAGF,IAAI;CACJ,MAAM,YAAY,CAAC,CAAC,QAAQ;AAE5B,KAAI,QAAQ,QACX,WAAU,QAAQ;MACZ;EACN,MAAM,EAAE,WAAW,WAAW;AAC9B,YAAU,EACT,KAAK,UAAU;GAAE;GAAW;GAAQ,OAAO,QAAQ;GAAO,CAAC,EAC3D;;CAGF,MAAM,mBAAmB,SAA+B,WAAkC,EAAE,KAC3F,IAAI,2BAA2B,SAAS,UAAU;EACjD;EACA,SAAS,QAAQ;EACjB,UAAU;EACV;EACA,CAAC;CAEH,MAAM,oBACL,SACA,WAAmC,EAAE,KAErC,IAAI,oBAAoB,SAAS,UAAU;EAC1C;EACA,SAAS,QAAQ;EACjB,UAAU;EACV,CAAC;CACH,MAAM,wBACL,SACA,WAAuC,EAAE,KAEzC,IAAI,wBAAwB,SAAS,UAAU;EAC9C;EACA,SAAS,QAAQ;EACjB,UAAU;EACV,CAAC;CAEH,MAAM,4BACL,SACA,WAA2C,EAAE,KAE7C,IAAI,4BAA4B,SAAS,UAAU;EAClD;EACA,SAAS,QAAQ;EACjB,UAAU;EACV;EACA,aACC,CAAC,aAAa,eAAe,UAC1B;GAAE,WAAW,QAAQ;GAAW,QAAQ,QAAQ;GAAQ,GACxD,KAAA;EACJ,CAAC;CAEH,MAAM,qBAAqB,SAAuB,WAAoC,EAAE,KACvF,IAAI,qBAAqB,SAAS,UAAU;EAC3C;EACA,SAAS,QAAQ;EACjB,UAAU;EACV,CAAC;CAEH,MAAM,wBACL,SACA,WAAuC,EAAE,KAEzC,IAAI,wBAAwB,SAAS,UAAU;EAC9C;EACA,SAAS,QAAQ;EACjB,UAAU;EACV,CAAC;CAEH,MAAM,YAAY,SAA+B,aAAqC;AACrF,MAAI,IAAI,OACP,OAAM,IAAI,MAAM,sEAAsE;AAEvF,SAAO,gBAAgB,SAAS,SAAS;;AAG1C,UAAS,OAAO;AAChB,UAAS,YAAY;AACrB,UAAS,gBAAgB;AACzB,UAAS,qBAAqB;AAC9B,UAAS,QAAQ;AACjB,UAAS,aAAa;AACtB,UAAS,gBAAgB;AACzB,UAAS,qBAAqB;AAC9B,UAAS,SAAS;AAClB,UAAS,cAAc;AACvB,UAAS,YAAY;AACrB,UAAS,iBAAiB;AAE1B,QAAO;;;;;;;;AAyBR,SAAgB,eACf,SAEA,eAAe,iBACI;CACnB,MAAM,UAAU,QAAQ;CAExB,MAAM,mBAAmB,WAAiC,EAAE,KAC3D,IAAI,0BAA0B,4CAA4C,UAAU;EACnF;EACA,UAAU;EACV,CAAC;CAEH,MAAM,YAAY,aAAoC;AACrD,MAAI,IAAI,OACP,OAAM,IAAI,MAAM,qEAAqE;AAEtF,SAAO,gBAAgB,SAAS;;AAGjC,UAAS,OAAO;AAEhB,QAAO;;AAmBR,IAAI,gBAAgB;;;;;AAMpB,SAAgB,cAAc,SAA6C;AAC1E,KAAI,CAAC,eAAe;AACnB,kBAAgB;AAChB,UAAQ,KACP,yKAGA;;AAEF,QAAO,eAAe,SAAS,eAAe"}
|
package/package.json
CHANGED
package/src/streaming.ts
CHANGED
|
@@ -93,8 +93,11 @@ export function getMappedStream(
|
|
|
93
93
|
// Track tool call streaming state per index.
|
|
94
94
|
// When we see the first chunk for a tool call index, we emit tool-input-start.
|
|
95
95
|
// Subsequent argument deltas emit tool-input-delta.
|
|
96
|
-
//
|
|
96
|
+
// tool-input-end is emitted eagerly when a new tool index starts or a null
|
|
97
|
+
// finalization chunk arrives; any remaining open calls are closed in flush().
|
|
97
98
|
const activeToolCalls = new Map<number, { id: string; toolName: string; args: string }>();
|
|
99
|
+
const closedToolCalls = new Set<number>();
|
|
100
|
+
let lastActiveToolIndex: number | null = null;
|
|
98
101
|
|
|
99
102
|
// Step 1: Decode bytes into SSE lines
|
|
100
103
|
const sseStream = rawStream.pipeThrough(new SSEDecoder());
|
|
@@ -224,18 +227,10 @@ export function getMappedStream(
|
|
|
224
227
|
},
|
|
225
228
|
|
|
226
229
|
flush(controller) {
|
|
227
|
-
// Close
|
|
228
|
-
for (const [
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
// incremental tool-input-* events AND a final tool-call event,
|
|
232
|
-
// matching how @ai-sdk/openai-compatible works.
|
|
233
|
-
controller.enqueue({
|
|
234
|
-
type: "tool-call",
|
|
235
|
-
toolCallId: tc.id,
|
|
236
|
-
toolName: tc.toolName,
|
|
237
|
-
input: tc.args,
|
|
238
|
-
});
|
|
230
|
+
// Close any tool calls that weren't already closed during streaming
|
|
231
|
+
for (const [idx] of activeToolCalls) {
|
|
232
|
+
if (closedToolCalls.has(idx)) continue;
|
|
233
|
+
closeToolCall(idx, controller);
|
|
239
234
|
}
|
|
240
235
|
|
|
241
236
|
// Close open text/reasoning blocks
|
|
@@ -264,6 +259,25 @@ export function getMappedStream(
|
|
|
264
259
|
}),
|
|
265
260
|
);
|
|
266
261
|
|
|
262
|
+
/**
|
|
263
|
+
* Emit tool-input-end + tool-call for a tool call that is complete.
|
|
264
|
+
*/
|
|
265
|
+
function closeToolCall(
|
|
266
|
+
index: number,
|
|
267
|
+
controller: TransformStreamDefaultController<LanguageModelV3StreamPart>,
|
|
268
|
+
) {
|
|
269
|
+
const tc = activeToolCalls.get(index);
|
|
270
|
+
if (!tc || closedToolCalls.has(index)) return;
|
|
271
|
+
closedToolCalls.add(index);
|
|
272
|
+
controller.enqueue({ type: "tool-input-end", id: tc.id });
|
|
273
|
+
controller.enqueue({
|
|
274
|
+
type: "tool-call",
|
|
275
|
+
toolCallId: tc.id,
|
|
276
|
+
toolName: tc.toolName,
|
|
277
|
+
input: tc.args,
|
|
278
|
+
});
|
|
279
|
+
}
|
|
280
|
+
|
|
267
281
|
/**
|
|
268
282
|
* Emit incremental tool call events from streaming chunks.
|
|
269
283
|
*
|
|
@@ -271,17 +285,25 @@ export function getMappedStream(
|
|
|
271
285
|
* Chunk A: { id, type, index, function: { name } } — start
|
|
272
286
|
* Chunk B: { index, function: { arguments: "partial..." } } — args delta
|
|
273
287
|
* Chunk C: { index, function: { arguments: "rest..." } } — args delta
|
|
274
|
-
* Chunk D: { id: null, type: null, function: { name: null } } — finalize
|
|
288
|
+
* Chunk D: { id: null, type: null, function: { name: null } } — finalize
|
|
275
289
|
*
|
|
276
290
|
* We emit tool-input-start on first sight, tool-input-delta for each
|
|
277
|
-
* argument chunk, and tool-input-end
|
|
291
|
+
* argument chunk, and tool-input-end eagerly — either when a new tool
|
|
292
|
+
* index starts (closing the previous one) or on a null finalization
|
|
293
|
+
* chunk. Any remaining open calls are closed in flush().
|
|
278
294
|
*/
|
|
279
295
|
function emitToolCallDeltas(
|
|
280
296
|
toolCalls: Record<string, unknown>[],
|
|
281
297
|
controller: TransformStreamDefaultController<LanguageModelV3StreamPart>,
|
|
282
298
|
) {
|
|
283
299
|
for (const tc of toolCalls) {
|
|
284
|
-
if (isNullFinalizationChunk(tc))
|
|
300
|
+
if (isNullFinalizationChunk(tc)) {
|
|
301
|
+
// Null finalization sentinel — close the last active tool call
|
|
302
|
+
if (lastActiveToolIndex != null) {
|
|
303
|
+
closeToolCall(lastActiveToolIndex, controller);
|
|
304
|
+
}
|
|
305
|
+
continue;
|
|
306
|
+
}
|
|
285
307
|
|
|
286
308
|
const tcIndex = (tc.index as number) ?? 0;
|
|
287
309
|
const fn = tc.function as Record<string, unknown> | undefined;
|
|
@@ -290,10 +312,15 @@ export function getMappedStream(
|
|
|
290
312
|
const tcId = tc.id as string | null;
|
|
291
313
|
|
|
292
314
|
if (!activeToolCalls.has(tcIndex)) {
|
|
293
|
-
//
|
|
315
|
+
// A new tool call is starting — close the previous one first
|
|
316
|
+
if (lastActiveToolIndex != null && lastActiveToolIndex !== tcIndex) {
|
|
317
|
+
closeToolCall(lastActiveToolIndex, controller);
|
|
318
|
+
}
|
|
319
|
+
|
|
294
320
|
const id = tcId || generateId();
|
|
295
321
|
const toolName = tcName || "";
|
|
296
322
|
activeToolCalls.set(tcIndex, { id, toolName, args: "" });
|
|
323
|
+
lastActiveToolIndex = tcIndex;
|
|
297
324
|
|
|
298
325
|
controller.enqueue({
|
|
299
326
|
type: "tool-input-start",
|
|
@@ -301,7 +328,6 @@ export function getMappedStream(
|
|
|
301
328
|
toolName,
|
|
302
329
|
});
|
|
303
330
|
|
|
304
|
-
// If arguments arrived in the same chunk as the start, emit them
|
|
305
331
|
if (tcArgs != null && tcArgs !== "") {
|
|
306
332
|
const delta = typeof tcArgs === "string" ? tcArgs : JSON.stringify(tcArgs);
|
|
307
333
|
activeToolCalls.get(tcIndex)!.args += delta;
|
|
@@ -312,8 +338,8 @@ export function getMappedStream(
|
|
|
312
338
|
});
|
|
313
339
|
}
|
|
314
340
|
} else {
|
|
315
|
-
// Subsequent chunks — emit argument deltas
|
|
316
341
|
const active = activeToolCalls.get(tcIndex)!;
|
|
342
|
+
lastActiveToolIndex = tcIndex;
|
|
317
343
|
if (tcArgs != null && tcArgs !== "") {
|
|
318
344
|
const delta = typeof tcArgs === "string" ? tcArgs : JSON.stringify(tcArgs);
|
|
319
345
|
active.args += delta;
|
|
@@ -123,12 +123,38 @@ export class WorkersAIChatLanguageModel implements LanguageModelV3 {
|
|
|
123
123
|
* accept this format at runtime.
|
|
124
124
|
*
|
|
125
125
|
* The binding path additionally normalises null content to empty strings.
|
|
126
|
+
*
|
|
127
|
+
* Reasoning controls (`reasoning_effort`, `chat_template_kwargs`) are
|
|
128
|
+
* forwarded here from settings. These belong on the INPUTS object, not on
|
|
129
|
+
* the 3rd-arg options / REST query string — see
|
|
130
|
+
* https://github.com/cloudflare/ai/issues/501. Per-call values from
|
|
131
|
+
* `providerOptions["workers-ai"]` override settings.
|
|
132
|
+
*
|
|
133
|
+
* `reasoning_effort: null` is a valid value ("disable reasoning"), so we
|
|
134
|
+
* check `!== undefined` rather than truthiness.
|
|
126
135
|
*/
|
|
127
136
|
private buildRunInputs(
|
|
128
137
|
args: ReturnType<typeof this.getArgs>["args"],
|
|
129
138
|
messages: ReturnType<typeof convertToWorkersAIChatMessages>["messages"],
|
|
130
|
-
options?: { stream?: boolean },
|
|
139
|
+
options?: { stream?: boolean; providerOptions?: Record<string, unknown> },
|
|
131
140
|
) {
|
|
141
|
+
// The AI SDK types this as `Record<string, JSONObject>` but we defensively
|
|
142
|
+
// accept anything and only treat it as a lookup if it's a plain object.
|
|
143
|
+
// `"key" in x` throws for primitives, so we can't skip the typeof guard.
|
|
144
|
+
const rawPerCall = options?.providerOptions?.["workers-ai"];
|
|
145
|
+
const perCall: Record<string, unknown> =
|
|
146
|
+
rawPerCall !== null && typeof rawPerCall === "object" && !Array.isArray(rawPerCall)
|
|
147
|
+
? (rawPerCall as Record<string, unknown>)
|
|
148
|
+
: {};
|
|
149
|
+
const reasoningEffort =
|
|
150
|
+
"reasoning_effort" in perCall
|
|
151
|
+
? perCall.reasoning_effort
|
|
152
|
+
: this.settings.reasoning_effort;
|
|
153
|
+
const chatTemplateKwargs =
|
|
154
|
+
"chat_template_kwargs" in perCall
|
|
155
|
+
? perCall.chat_template_kwargs
|
|
156
|
+
: this.settings.chat_template_kwargs;
|
|
157
|
+
|
|
132
158
|
return {
|
|
133
159
|
max_tokens: args.max_tokens,
|
|
134
160
|
messages: this.config.isBinding ? normalizeMessagesForBinding(messages) : messages,
|
|
@@ -138,11 +164,19 @@ export class WorkersAIChatLanguageModel implements LanguageModelV3 {
|
|
|
138
164
|
top_p: args.top_p,
|
|
139
165
|
...(args.response_format ? { response_format: args.response_format } : {}),
|
|
140
166
|
...(options?.stream ? { stream: true } : {}),
|
|
167
|
+
...(reasoningEffort !== undefined ? { reasoning_effort: reasoningEffort } : {}),
|
|
168
|
+
...(chatTemplateKwargs !== undefined
|
|
169
|
+
? { chat_template_kwargs: chatTemplateKwargs }
|
|
170
|
+
: {}),
|
|
141
171
|
};
|
|
142
172
|
}
|
|
143
173
|
|
|
144
174
|
/**
|
|
145
175
|
* Get passthrough options for binding.run() from settings.
|
|
176
|
+
*
|
|
177
|
+
* `reasoning_effort` and `chat_template_kwargs` are explicitly excluded
|
|
178
|
+
* here — they belong on the `inputs` object (see `buildRunInputs`), not on
|
|
179
|
+
* the `options` (3rd) arg of binding.run() or the REST query string.
|
|
146
180
|
*/
|
|
147
181
|
private getRunOptions() {
|
|
148
182
|
const {
|
|
@@ -150,6 +184,8 @@ export class WorkersAIChatLanguageModel implements LanguageModelV3 {
|
|
|
150
184
|
safePrompt: _safePrompt,
|
|
151
185
|
sessionAffinity,
|
|
152
186
|
extraHeaders,
|
|
187
|
+
reasoning_effort: _reasoningEffort,
|
|
188
|
+
chat_template_kwargs: _chatTemplateKwargs,
|
|
153
189
|
...passthroughOptions
|
|
154
190
|
} = this.settings;
|
|
155
191
|
|
|
@@ -173,7 +209,9 @@ export class WorkersAIChatLanguageModel implements LanguageModelV3 {
|
|
|
173
209
|
const { args, warnings } = this.getArgs(options);
|
|
174
210
|
const { messages } = convertToWorkersAIChatMessages(options.prompt);
|
|
175
211
|
|
|
176
|
-
const inputs = this.buildRunInputs(args, messages
|
|
212
|
+
const inputs = this.buildRunInputs(args, messages, {
|
|
213
|
+
providerOptions: options.providerOptions,
|
|
214
|
+
});
|
|
177
215
|
const runOptions = this.getRunOptions();
|
|
178
216
|
|
|
179
217
|
const output = await this.config.binding.run(
|
|
@@ -223,7 +261,10 @@ export class WorkersAIChatLanguageModel implements LanguageModelV3 {
|
|
|
223
261
|
const { args, warnings } = this.getArgs(options);
|
|
224
262
|
const { messages } = convertToWorkersAIChatMessages(options.prompt);
|
|
225
263
|
|
|
226
|
-
const inputs = this.buildRunInputs(args, messages, {
|
|
264
|
+
const inputs = this.buildRunInputs(args, messages, {
|
|
265
|
+
stream: true,
|
|
266
|
+
providerOptions: options.providerOptions,
|
|
267
|
+
});
|
|
227
268
|
const runOptions = this.getRunOptions();
|
|
228
269
|
|
|
229
270
|
const response = await this.config.binding.run(
|
|
@@ -16,6 +16,29 @@ export type WorkersAIChatSettings = {
|
|
|
16
16
|
*/
|
|
17
17
|
sessionAffinity?: string;
|
|
18
18
|
|
|
19
|
+
/**
|
|
20
|
+
* Controls the reasoning budget for reasoning-capable Workers AI models
|
|
21
|
+
* (e.g. `@cf/zai-org/glm-4.7-flash`, `@cf/moonshotai/kimi-k2.5`,
|
|
22
|
+
* `@cf/openai/gpt-oss-120b`).
|
|
23
|
+
*
|
|
24
|
+
* `null` is a valid value and disables reasoning for models that support it.
|
|
25
|
+
* Forwarded on the `inputs` object of `binding.run(model, inputs)`.
|
|
26
|
+
*/
|
|
27
|
+
reasoning_effort?: "low" | "medium" | "high" | null;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Chat-template overrides for reasoning-capable models that expose
|
|
31
|
+
* thinking toggles (e.g. GLM, Kimi).
|
|
32
|
+
*
|
|
33
|
+
* Forwarded on the `inputs` object of `binding.run(model, inputs)`.
|
|
34
|
+
*/
|
|
35
|
+
chat_template_kwargs?: {
|
|
36
|
+
/** Whether to enable reasoning. Enabled by default on reasoning models. */
|
|
37
|
+
enable_thinking?: boolean;
|
|
38
|
+
/** If false, preserves reasoning context between turns. */
|
|
39
|
+
clear_thinking?: boolean;
|
|
40
|
+
};
|
|
41
|
+
|
|
19
42
|
/**
|
|
20
43
|
* Passthrough settings that are provided directly to the run function.
|
|
21
44
|
* Use this for any provider-specific options not covered by the typed fields.
|