workers-ai-provider 2.0.2 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "workers-ai-provider",
3
3
  "description": "Workers AI Provider for the vercel AI SDK",
4
4
  "type": "module",
5
- "version": "2.0.2",
5
+ "version": "3.0.1",
6
6
  "main": "dist/index.js",
7
7
  "types": "dist/index.d.ts",
8
8
  "repository": {
@@ -12,7 +12,7 @@
12
12
  "bugs": {
13
13
  "url": "https://github.com/cloudflare/ai/issues"
14
14
  },
15
- "authors": "Sunil Pai <spai@cloudflare.com>",
15
+ "authors": "Cloudflare Inc.",
16
16
  "license": "MIT",
17
17
  "files": [
18
18
  "dist",
@@ -30,13 +30,18 @@
30
30
  "chat",
31
31
  "serverless"
32
32
  ],
33
- "dependencies": {
34
- "@ai-sdk/provider": "^2.0.0",
35
- "@ai-sdk/provider-utils": "^3.0.19"
33
+ "peerDependencies": {
34
+ "@ai-sdk/provider": "^3.0.0",
35
+ "@ai-sdk/provider-utils": "^4.0.0",
36
+ "ai": "^6.0.0",
37
+ "zod": "^3.25.0 || ^4.0.0"
36
38
  },
37
39
  "devDependencies": {
40
+ "@ai-sdk/provider": "^3.0.0",
41
+ "@ai-sdk/provider-utils": "^4.0.0",
38
42
  "@cloudflare/workers-types": "^4.20251221.0",
39
- "zod": "^3.25.76"
43
+ "ai": "^6.0.0",
44
+ "zod": "^4.1.8"
40
45
  },
41
46
  "scripts": {
42
47
  "build": "rm -rf dist && tsup src/index.ts --dts --sourcemap --format esm --target es2020",
@@ -1,8 +1,4 @@
1
- import type {
2
- LanguageModelV2,
3
- LanguageModelV2CallWarning,
4
- LanguageModelV2StreamPart,
5
- } from "@ai-sdk/provider";
1
+ import type { LanguageModelV3, SharedV3Warning, LanguageModelV3StreamPart } from "@ai-sdk/provider";
6
2
 
7
3
  import type { AutoRAGChatSettings } from "./autorag-chat-settings";
8
4
  import { convertToWorkersAIChatMessages } from "./convert-to-workersai-chat-messages";
@@ -17,8 +13,8 @@ type AutoRAGChatConfig = {
17
13
  gateway?: GatewayOptions;
18
14
  };
19
15
 
20
- export class AutoRAGChatLanguageModel implements LanguageModelV2 {
21
- readonly specificationVersion = "v2";
16
+ export class AutoRAGChatLanguageModel implements LanguageModelV3 {
17
+ readonly specificationVersion = "v3";
22
18
  readonly defaultObjectGenerationMode = "json";
23
19
 
24
20
  readonly supportedUrls: Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>> = {
@@ -51,20 +47,20 @@ export class AutoRAGChatLanguageModel implements LanguageModelV2 {
51
47
  toolChoice,
52
48
  frequencyPenalty,
53
49
  presencePenalty,
54
- }: Parameters<LanguageModelV2["doGenerate"]>[0]) {
55
- const warnings: LanguageModelV2CallWarning[] = [];
50
+ }: Parameters<LanguageModelV3["doGenerate"]>[0]) {
51
+ const warnings: SharedV3Warning[] = [];
56
52
 
57
53
  if (frequencyPenalty != null) {
58
54
  warnings.push({
59
- setting: "frequencyPenalty",
60
- type: "unsupported-setting",
55
+ feature: "frequencyPenalty",
56
+ type: "unsupported",
61
57
  });
62
58
  }
63
59
 
64
60
  if (presencePenalty != null) {
65
61
  warnings.push({
66
- setting: "presencePenalty",
67
- type: "unsupported-setting",
62
+ feature: "presencePenalty",
63
+ type: "unsupported",
68
64
  });
69
65
  }
70
66
 
@@ -79,7 +75,10 @@ export class AutoRAGChatLanguageModel implements LanguageModelV2 {
79
75
  switch (type) {
80
76
  case "text": {
81
77
  return {
82
- args: { ...baseArgs, ...prepareToolsAndToolChoice(tools, toolChoice) },
78
+ args: {
79
+ ...baseArgs,
80
+ ...prepareToolsAndToolChoice(tools, toolChoice),
81
+ },
83
82
  warnings,
84
83
  };
85
84
  }
@@ -106,8 +105,8 @@ export class AutoRAGChatLanguageModel implements LanguageModelV2 {
106
105
  }
107
106
 
108
107
  async doGenerate(
109
- options: Parameters<LanguageModelV2["doGenerate"]>[0],
110
- ): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
108
+ options: Parameters<LanguageModelV3["doGenerate"]>[0],
109
+ ): Promise<Awaited<ReturnType<LanguageModelV3["doGenerate"]>>> {
111
110
  const { warnings } = this.getArgs(options);
112
111
  const { messages } = convertToWorkersAIChatMessages(options.prompt);
113
112
 
@@ -116,7 +115,7 @@ export class AutoRAGChatLanguageModel implements LanguageModelV2 {
116
115
  });
117
116
 
118
117
  return {
119
- finishReason: "stop",
118
+ finishReason: { unified: "stop", raw: "stop" },
120
119
 
121
120
  content: [
122
121
  ...output.data.map(({ file_id, filename, score }) => ({
@@ -140,14 +139,14 @@ export class AutoRAGChatLanguageModel implements LanguageModelV2 {
140
139
  }
141
140
 
142
141
  async doStream(
143
- options: Parameters<LanguageModelV2["doStream"]>[0],
144
- ): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
142
+ options: Parameters<LanguageModelV3["doStream"]>[0],
143
+ ): Promise<Awaited<ReturnType<LanguageModelV3["doStream"]>>> {
145
144
  const { args, warnings } = this.getArgs(options);
146
145
  const { messages } = convertToWorkersAIChatMessages(options.prompt);
147
146
 
148
147
  const query = messages.map(({ content, role }) => `${role}: ${content}`).join("\n\n");
149
148
 
150
- // Get the underlying streaming response (assume this returns a ReadableStream<LanguageModelV2StreamPart>)
149
+ // Get the underlying streaming response (assume this returns a ReadableStream<LanguageModelV3StreamPart>)
151
150
  const response = await this.config.binding.aiSearch({
152
151
  query,
153
152
  stream: true,
@@ -155,12 +154,12 @@ export class AutoRAGChatLanguageModel implements LanguageModelV2 {
155
154
 
156
155
  // Create a new stream that first emits the stream-start part with warnings,
157
156
  // then pipes through the rest of the response stream
158
- const stream = new ReadableStream<LanguageModelV2StreamPart>({
157
+ const stream = new ReadableStream<LanguageModelV3StreamPart>({
159
158
  start(controller) {
160
159
  // Emit the stream-start part with warnings
161
160
  controller.enqueue({
162
161
  type: "stream-start",
163
- warnings: warnings as LanguageModelV2CallWarning[],
162
+ warnings: warnings as SharedV3Warning[],
164
163
  });
165
164
 
166
165
  // Pipe the rest of the response stream
@@ -1,19 +1,19 @@
1
- import type { LanguageModelV2Prompt, SharedV2ProviderMetadata } from "@ai-sdk/provider";
1
+ import type { LanguageModelV3Prompt, SharedV3ProviderOptions } from "@ai-sdk/provider";
2
2
  import type { WorkersAIChatPrompt } from "./workersai-chat-prompt";
3
3
 
4
- export function convertToWorkersAIChatMessages(prompt: LanguageModelV2Prompt): {
4
+ export function convertToWorkersAIChatMessages(prompt: LanguageModelV3Prompt): {
5
5
  messages: WorkersAIChatPrompt;
6
6
  images: {
7
- mimeType: string | undefined;
7
+ mediaType: string | undefined;
8
8
  image: Uint8Array;
9
- providerOptions: SharedV2ProviderMetadata | undefined;
9
+ providerOptions: SharedV3ProviderOptions | undefined;
10
10
  }[];
11
11
  } {
12
12
  const messages: WorkersAIChatPrompt = [];
13
13
  const images: {
14
- mimeType: string | undefined;
14
+ mediaType: string | undefined;
15
15
  image: Uint8Array;
16
- providerOptions: SharedV2ProviderMetadata | undefined;
16
+ providerOptions: SharedV3ProviderOptions | undefined;
17
17
  }[] = [];
18
18
 
19
19
  for (const { role, content } of prompt) {
@@ -38,7 +38,7 @@ export function convertToWorkersAIChatMessages(prompt: LanguageModelV2Prompt): {
38
38
  // For Llama 3.2 Vision model, which needs array of integers
39
39
  images.push({
40
40
  image: part.data,
41
- mimeType: part.mediaType,
41
+ mediaType: part.mediaType,
42
42
  providerOptions: part.providerOptions,
43
43
  });
44
44
  }
@@ -74,6 +74,11 @@ export function convertToWorkersAIChatMessages(prompt: LanguageModelV2Prompt): {
74
74
  break;
75
75
  }
76
76
 
77
+ case "file": {
78
+ // Handle file parts in assistant messages (V3)
79
+ break;
80
+ }
81
+
77
82
  case "tool-call": {
78
83
  text = JSON.stringify({
79
84
  name: part.toolName,
@@ -90,9 +95,17 @@ export function convertToWorkersAIChatMessages(prompt: LanguageModelV2Prompt): {
90
95
  });
91
96
  break;
92
97
  }
98
+
99
+ case "tool-result": {
100
+ // Handle tool results in assistant messages (V3)
101
+ break;
102
+ }
103
+
93
104
  default: {
94
- const exhaustiveCheck = part;
95
- throw new Error(`Unsupported part type: ${exhaustiveCheck.type}`);
105
+ const exhaustiveCheck = part satisfies never;
106
+ throw new Error(
107
+ `Unsupported part type: ${(exhaustiveCheck as { type: string }).type}`,
108
+ );
96
109
  }
97
110
  }
98
111
  }
@@ -115,12 +128,15 @@ export function convertToWorkersAIChatMessages(prompt: LanguageModelV2Prompt): {
115
128
 
116
129
  case "tool": {
117
130
  for (const [index, toolResponse] of content.entries()) {
118
- messages.push({
119
- content: JSON.stringify(toolResponse.output),
120
- name: toolResponse.toolName,
121
- tool_call_id: `functions.${toolResponse.toolName}:${index}`,
122
- role: "tool",
123
- });
131
+ if (toolResponse.type === "tool-result") {
132
+ messages.push({
133
+ content: JSON.stringify(toolResponse.output),
134
+ name: toolResponse.toolName,
135
+ tool_call_id: `functions.${toolResponse.toolName}:${index}`,
136
+ role: "tool",
137
+ });
138
+ }
139
+ // Skip tool-approval-response parts as they're not supported by Workers AI
124
140
  }
125
141
  break;
126
142
  }
@@ -1,6 +1,6 @@
1
- import type { LanguageModelV2FinishReason } from "@ai-sdk/provider";
1
+ import type { LanguageModelV3FinishReason } from "@ai-sdk/provider";
2
2
 
3
- export function mapWorkersAIFinishReason(finishReasonOrResponse: any): LanguageModelV2FinishReason {
3
+ export function mapWorkersAIFinishReason(finishReasonOrResponse: any): LanguageModelV3FinishReason {
4
4
  let finishReason: string | null | undefined;
5
5
 
6
6
  // If it's a string/null/undefined, use it directly (original behavior)
@@ -26,22 +26,23 @@ export function mapWorkersAIFinishReason(finishReasonOrResponse: any): LanguageM
26
26
  }
27
27
  }
28
28
 
29
+ const raw = finishReason ?? "stop";
30
+
29
31
  switch (finishReason) {
30
32
  case "stop":
31
- return "stop";
33
+ return { unified: "stop", raw };
32
34
  case "length":
33
35
  case "model_length":
34
- return "length";
36
+ return { unified: "length", raw };
35
37
  case "tool_calls":
36
- return "tool-calls";
38
+ return { unified: "tool-calls", raw };
37
39
  case "error":
38
- return "error";
40
+ return { unified: "error", raw };
39
41
  case "other":
40
- return "other";
41
42
  case "unknown":
42
- return "unknown";
43
+ return { unified: "other", raw };
43
44
  default:
44
45
  // Default to `stop` for backwards compatibility
45
- return "stop";
46
+ return { unified: "stop", raw };
46
47
  }
47
48
  }
@@ -1,4 +1,8 @@
1
- export function mapWorkersAIUsage(output: AiTextGenerationOutput | AiTextToImageOutput) {
1
+ import type { LanguageModelV3Usage } from "@ai-sdk/provider";
2
+
3
+ export function mapWorkersAIUsage(
4
+ output: AiTextGenerationOutput | AiTextToImageOutput,
5
+ ): LanguageModelV3Usage {
2
6
  const usage = (
3
7
  output as {
4
8
  usage: { prompt_tokens: number; completion_tokens: number };
@@ -9,8 +13,17 @@ export function mapWorkersAIUsage(output: AiTextGenerationOutput | AiTextToImage
9
13
  };
10
14
 
11
15
  return {
12
- outputTokens: usage.completion_tokens,
13
- inputTokens: usage.prompt_tokens,
14
- totalTokens: usage.prompt_tokens + usage.completion_tokens,
16
+ outputTokens: {
17
+ total: usage.completion_tokens,
18
+ text: undefined,
19
+ reasoning: undefined,
20
+ },
21
+ inputTokens: {
22
+ total: usage.prompt_tokens,
23
+ noCache: undefined,
24
+ cacheRead: undefined,
25
+ cacheWrite: undefined,
26
+ },
27
+ raw: { total: usage.prompt_tokens + usage.completion_tokens },
15
28
  };
16
29
  }
package/src/streaming.ts CHANGED
@@ -1,4 +1,4 @@
1
- import type { LanguageModelV2StreamPart } from "@ai-sdk/provider";
1
+ import type { LanguageModelV3StreamPart, LanguageModelV3Usage } from "@ai-sdk/provider";
2
2
  import { generateId } from "ai";
3
3
  import { events } from "fetch-event-stream";
4
4
  import { mapWorkersAIUsage } from "./map-workersai-usage";
@@ -6,14 +6,25 @@ import { processPartialToolCalls } from "./utils";
6
6
 
7
7
  export function getMappedStream(response: Response) {
8
8
  const chunkEvent = events(response);
9
- let usage = { outputTokens: 0, inputTokens: 0, totalTokens: 0 };
9
+ let usage: LanguageModelV3Usage = {
10
+ outputTokens: { total: 0, text: undefined, reasoning: undefined },
11
+ inputTokens: {
12
+ total: 0,
13
+ noCache: undefined,
14
+ cacheRead: undefined,
15
+ cacheWrite: undefined,
16
+ },
17
+ raw: {
18
+ totalTokens: 0,
19
+ },
20
+ };
10
21
  const partialToolCalls: any[] = [];
11
22
 
12
23
  // Track start/delta/end IDs per v5 streaming protocol
13
24
  let textId: string | null = null;
14
25
  let reasoningId: string | null = null;
15
26
 
16
- return new ReadableStream<LanguageModelV2StreamPart>({
27
+ return new ReadableStream<LanguageModelV3StreamPart>({
17
28
  async start(controller) {
18
29
  for await (const event of chunkEvent) {
19
30
  if (!event.data) {
@@ -90,7 +101,7 @@ export function getMappedStream(response: Response) {
90
101
  }
91
102
 
92
103
  controller.enqueue({
93
- finishReason: "stop",
104
+ finishReason: { unified: "stop", raw: "stop" },
94
105
  type: "finish",
95
106
  usage: usage,
96
107
  });
package/src/utils.ts CHANGED
@@ -1,4 +1,4 @@
1
- import type { LanguageModelV2, LanguageModelV2ToolCall } from "@ai-sdk/provider";
1
+ import type { LanguageModelV3, LanguageModelV3ToolCall } from "@ai-sdk/provider";
2
2
  import { generateId } from "ai";
3
3
 
4
4
  /**
@@ -67,7 +67,6 @@ export function createRun(config: CreateRunConfig): AiRun {
67
67
  inputs: AiModels[Name]["inputs"],
68
68
  options?: AiOptions & Record<string, StringLike>,
69
69
  ): Promise<Response | ReadableStream<Uint8Array> | AiModels[Name]["postProcessedOutputs"]> {
70
- // biome-ignore lint/correctness/noUnusedVariables: they need to be destructured
71
70
  const { gateway, prefix, extraHeaders, returnRawResponse, ...passthroughOptions } =
72
71
  options || {};
73
72
 
@@ -128,8 +127,8 @@ export function createRun(config: CreateRunConfig): AiRun {
128
127
  }
129
128
 
130
129
  export function prepareToolsAndToolChoice(
131
- tools: Parameters<LanguageModelV2["doGenerate"]>[0]["tools"],
132
- toolChoice: Parameters<LanguageModelV2["doGenerate"]>[0]["toolChoice"],
130
+ tools: Parameters<LanguageModelV3["doGenerate"]>[0]["tools"],
131
+ toolChoice: Parameters<LanguageModelV3["doGenerate"]>[0]["toolChoice"],
133
132
  ) {
134
133
  if (tools == null) {
135
134
  return { tool_choice: undefined, tools: undefined };
@@ -213,7 +212,7 @@ function mergePartialToolCalls(partialCalls: any[]) {
213
212
  return Object.values(mergedCallsByIndex);
214
213
  }
215
214
 
216
- function processToolCall(toolCall: any): LanguageModelV2ToolCall {
215
+ function processToolCall(toolCall: any): LanguageModelV3ToolCall {
217
216
  // Check for OpenAI format tool calls first
218
217
  if (toolCall.function && toolCall.id) {
219
218
  return {
@@ -237,7 +236,7 @@ function processToolCall(toolCall: any): LanguageModelV2ToolCall {
237
236
  };
238
237
  }
239
238
 
240
- export function processToolCalls(output: any): LanguageModelV2ToolCall[] {
239
+ export function processToolCalls(output: any): LanguageModelV3ToolCall[] {
241
240
  if (output.tool_calls && Array.isArray(output.tool_calls)) {
242
241
  return output.tool_calls.map((toolCall: any) => {
243
242
  const processedToolCall = processToolCall(toolCall);
@@ -1,4 +1,9 @@
1
- import { type EmbeddingModelV2, TooManyEmbeddingValuesForCallError } from "@ai-sdk/provider";
1
+ import type {
2
+ EmbeddingModelV3,
3
+ EmbeddingModelV3CallOptions,
4
+ EmbeddingModelV3Result,
5
+ } from "@ai-sdk/provider";
6
+ import { TooManyEmbeddingValuesForCallError } from "@ai-sdk/provider";
2
7
  import type { StringLike } from "./utils";
3
8
  import type { EmbeddingModels } from "./workersai-models";
4
9
 
@@ -19,12 +24,12 @@ export type WorkersAIEmbeddingSettings = {
19
24
  [key: string]: StringLike;
20
25
  };
21
26
 
22
- export class WorkersAIEmbeddingModel implements EmbeddingModelV2<string> {
27
+ export class WorkersAIEmbeddingModel implements EmbeddingModelV3 {
23
28
  /**
24
- * Semantic version of the {@link EmbeddingModelV1} specification implemented
29
+ * Semantic version of the {@link EmbeddingModelV3} specification implemented
25
30
  * by this class. It never changes.
26
31
  */
27
- readonly specificationVersion = "v2";
32
+ readonly specificationVersion = "v3";
28
33
  readonly modelId: EmbeddingModels;
29
34
  private readonly config: WorkersAIEmbeddingConfig;
30
35
  private readonly settings: WorkersAIEmbeddingSettings;
@@ -56,11 +61,7 @@ export class WorkersAIEmbeddingModel implements EmbeddingModelV2<string> {
56
61
  this.config = config;
57
62
  }
58
63
 
59
- async doEmbed({
60
- values,
61
- }: Parameters<EmbeddingModelV2<string>["doEmbed"]>[0]): Promise<
62
- Awaited<ReturnType<EmbeddingModelV2<string>["doEmbed"]>>
63
- > {
64
+ async doEmbed({ values }: EmbeddingModelV3CallOptions): Promise<EmbeddingModelV3Result> {
64
65
  if (values.length > this.maxEmbeddingsPerCall) {
65
66
  throw new TooManyEmbeddingValuesForCallError({
66
67
  maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
@@ -85,6 +86,7 @@ export class WorkersAIEmbeddingModel implements EmbeddingModelV2<string> {
85
86
 
86
87
  return {
87
88
  embeddings: response.data,
89
+ warnings: [],
88
90
  };
89
91
  }
90
92
  }
@@ -1,8 +1,4 @@
1
- import type {
2
- LanguageModelV2,
3
- LanguageModelV2CallWarning,
4
- LanguageModelV2StreamPart,
5
- } from "@ai-sdk/provider";
1
+ import type { LanguageModelV3, SharedV3Warning, LanguageModelV3StreamPart } from "@ai-sdk/provider";
6
2
  import { generateId } from "ai";
7
3
  import { convertToWorkersAIChatMessages } from "./convert-to-workersai-chat-messages";
8
4
  import { mapWorkersAIFinishReason } from "./map-workersai-finish-reason";
@@ -23,8 +19,8 @@ type WorkersAIChatConfig = {
23
19
  gateway?: GatewayOptions;
24
20
  };
25
21
 
26
- export class WorkersAIChatLanguageModel implements LanguageModelV2 {
27
- readonly specificationVersion = "v2";
22
+ export class WorkersAIChatLanguageModel implements LanguageModelV3 {
23
+ readonly specificationVersion = "v3";
28
24
  readonly defaultObjectGenerationMode = "json";
29
25
 
30
26
  readonly supportedUrls: Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>> = {
@@ -60,22 +56,22 @@ export class WorkersAIChatLanguageModel implements LanguageModelV2 {
60
56
  frequencyPenalty,
61
57
  presencePenalty,
62
58
  seed,
63
- }: Parameters<LanguageModelV2["doGenerate"]>[0]) {
59
+ }: Parameters<LanguageModelV3["doGenerate"]>[0]) {
64
60
  const type = responseFormat?.type ?? "text";
65
61
 
66
- const warnings: LanguageModelV2CallWarning[] = [];
62
+ const warnings: SharedV3Warning[] = [];
67
63
 
68
64
  if (frequencyPenalty != null) {
69
65
  warnings.push({
70
- setting: "frequencyPenalty",
71
- type: "unsupported-setting",
66
+ feature: "frequencyPenalty",
67
+ type: "unsupported",
72
68
  });
73
69
  }
74
70
 
75
71
  if (presencePenalty != null) {
76
72
  warnings.push({
77
- setting: "presencePenalty",
78
- type: "unsupported-setting",
73
+ feature: "presencePenalty",
74
+ type: "unsupported",
79
75
  });
80
76
  }
81
77
 
@@ -125,11 +121,10 @@ export class WorkersAIChatLanguageModel implements LanguageModelV2 {
125
121
  }
126
122
 
127
123
  async doGenerate(
128
- options: Parameters<LanguageModelV2["doGenerate"]>[0],
129
- ): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
124
+ options: Parameters<LanguageModelV3["doGenerate"]>[0],
125
+ ): Promise<Awaited<ReturnType<LanguageModelV3["doGenerate"]>>> {
130
126
  const { args, warnings } = this.getArgs(options);
131
127
 
132
- // biome-ignore lint/correctness/noUnusedVariables: this needs to be destructured
133
128
  const { gateway, safePrompt, ...passthroughOptions } = this.settings;
134
129
 
135
130
  // Extract image from messages if present
@@ -198,8 +193,8 @@ export class WorkersAIChatLanguageModel implements LanguageModelV2 {
198
193
  }
199
194
 
200
195
  async doStream(
201
- options: Parameters<LanguageModelV2["doStream"]>[0],
202
- ): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
196
+ options: Parameters<LanguageModelV3["doStream"]>[0],
197
+ ): Promise<Awaited<ReturnType<LanguageModelV3["doStream"]>>> {
203
198
  const { args, warnings } = this.getArgs(options);
204
199
 
205
200
  // Extract image from messages if present
@@ -221,12 +216,12 @@ export class WorkersAIChatLanguageModel implements LanguageModelV2 {
221
216
 
222
217
  return {
223
218
  // rawCall: { rawPrompt: messages, rawSettings: args },
224
- stream: new ReadableStream<LanguageModelV2StreamPart>({
219
+ stream: new ReadableStream<LanguageModelV3StreamPart>({
225
220
  async start(controller) {
226
221
  // Emit the stream-start part with warnings
227
222
  controller.enqueue({
228
223
  type: "stream-start",
229
- warnings: warnings as LanguageModelV2CallWarning[],
224
+ warnings: warnings as SharedV3Warning[],
230
225
  });
231
226
 
232
227
  for (const contentPart of response.content) {
@@ -315,12 +310,12 @@ export class WorkersAIChatLanguageModel implements LanguageModelV2 {
315
310
 
316
311
  // Create a new stream that first emits the stream-start part with warnings,
317
312
  // then pipes through the rest of the response stream
318
- const stream = new ReadableStream<LanguageModelV2StreamPart>({
313
+ const stream = new ReadableStream<LanguageModelV3StreamPart>({
319
314
  start(controller) {
320
315
  // Emit the stream-start part with warnings
321
316
  controller.enqueue({
322
317
  type: "stream-start",
323
- warnings: warnings as LanguageModelV2CallWarning[],
318
+ warnings: warnings as SharedV3Warning[],
324
319
  });
325
320
 
326
321
  // Pipe the rest of the response stream
@@ -1,10 +1,10 @@
1
- import type { ImageModelV2, ImageModelV2CallWarning } from "@ai-sdk/provider";
1
+ import type { ImageModelV3, SharedV3Warning } from "@ai-sdk/provider";
2
2
  import type { WorkersAIImageConfig } from "./workersai-image-config";
3
3
  import type { WorkersAIImageSettings } from "./workersai-image-settings";
4
4
  import type { ImageGenerationModels } from "./workersai-models";
5
5
 
6
- export class WorkersAIImageModel implements ImageModelV2 {
7
- readonly specificationVersion = "v2";
6
+ export class WorkersAIImageModel implements ImageModelV3 {
7
+ readonly specificationVersion = "v3";
8
8
 
9
9
  get maxImagesPerCall(): number {
10
10
  return this.settings.maxImagesPerCall ?? 1;
@@ -25,20 +25,20 @@ export class WorkersAIImageModel implements ImageModelV2 {
25
25
  size,
26
26
  aspectRatio,
27
27
  seed,
28
- // headers,
29
- // abortSignal,
30
- }: Parameters<ImageModelV2["doGenerate"]>[0]): Promise<
31
- Awaited<ReturnType<ImageModelV2["doGenerate"]>>
28
+ }: // headers,
29
+ // abortSignal,
30
+ Parameters<ImageModelV3["doGenerate"]>[0]): Promise<
31
+ Awaited<ReturnType<ImageModelV3["doGenerate"]>>
32
32
  > {
33
33
  const { width, height } = getDimensionsFromSizeString(size);
34
34
 
35
- const warnings: Array<ImageModelV2CallWarning> = [];
35
+ const warnings: Array<SharedV3Warning> = [];
36
36
 
37
37
  if (aspectRatio != null) {
38
38
  warnings.push({
39
39
  details: "This model does not support aspect ratio. Use `size` instead.",
40
- setting: "aspectRatio",
41
- type: "unsupported-setting",
40
+ feature: "aspectRatio",
41
+ type: "unsupported",
42
42
  });
43
43
  }
44
44
 
@@ -47,7 +47,7 @@ export class WorkersAIImageModel implements ImageModelV2 {
47
47
  this.modelId,
48
48
  {
49
49
  height,
50
- prompt,
50
+ prompt: prompt!,
51
51
  seed,
52
52
  width,
53
53
  },