@ai-sdk/openai 2.1.0-beta.11 → 2.1.0-beta.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
1
+ import { LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
3
  import { FetchFunction } from '@ai-sdk/provider-utils';
4
4
  import { z } from 'zod/v4';
@@ -129,7 +129,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
129
129
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3<string>['doEmbed']>>>;
130
130
  }
131
131
 
132
- type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
132
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
133
133
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
134
134
  declare const hasDefaultResponseFormat: Set<string>;
135
135
 
@@ -161,7 +161,7 @@ declare const openAITranscriptionProviderOptions: z.ZodObject<{
161
161
  }, z.core.$strip>;
162
162
  type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
163
163
 
164
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
164
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
165
165
  providerOptions?: {
166
166
  openai?: OpenAITranscriptionProviderOptions;
167
167
  };
@@ -171,14 +171,14 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
171
171
  currentDate?: () => Date;
172
172
  };
173
173
  }
174
- declare class OpenAITranscriptionModel implements TranscriptionModelV2 {
174
+ declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
175
175
  readonly modelId: OpenAITranscriptionModelId;
176
176
  private readonly config;
177
- readonly specificationVersion = "v2";
177
+ readonly specificationVersion = "v3";
178
178
  get provider(): string;
179
179
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
180
180
  private getArgs;
181
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV2['doGenerate']>>>;
181
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>>;
182
182
  }
183
183
 
184
184
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
@@ -193,17 +193,17 @@ interface OpenAISpeechModelConfig extends OpenAIConfig {
193
193
  currentDate?: () => Date;
194
194
  };
195
195
  }
196
- declare class OpenAISpeechModel implements SpeechModelV2 {
196
+ declare class OpenAISpeechModel implements SpeechModelV3 {
197
197
  readonly modelId: OpenAISpeechModelId;
198
198
  private readonly config;
199
- readonly specificationVersion = "v2";
199
+ readonly specificationVersion = "v3";
200
200
  get provider(): string;
201
201
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
202
202
  private getArgs;
203
- doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
203
+ doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
204
204
  }
205
205
 
206
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
206
+ type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
207
207
 
208
208
  declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
209
209
  readonly specificationVersion = "v3";
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
1
+ import { LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
3
  import { FetchFunction } from '@ai-sdk/provider-utils';
4
4
  import { z } from 'zod/v4';
@@ -129,7 +129,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
129
129
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3<string>['doEmbed']>>>;
130
130
  }
131
131
 
132
- type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
132
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
133
133
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
134
134
  declare const hasDefaultResponseFormat: Set<string>;
135
135
 
@@ -161,7 +161,7 @@ declare const openAITranscriptionProviderOptions: z.ZodObject<{
161
161
  }, z.core.$strip>;
162
162
  type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
163
163
 
164
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
164
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
165
165
  providerOptions?: {
166
166
  openai?: OpenAITranscriptionProviderOptions;
167
167
  };
@@ -171,14 +171,14 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
171
171
  currentDate?: () => Date;
172
172
  };
173
173
  }
174
- declare class OpenAITranscriptionModel implements TranscriptionModelV2 {
174
+ declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
175
175
  readonly modelId: OpenAITranscriptionModelId;
176
176
  private readonly config;
177
- readonly specificationVersion = "v2";
177
+ readonly specificationVersion = "v3";
178
178
  get provider(): string;
179
179
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
180
180
  private getArgs;
181
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV2['doGenerate']>>>;
181
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>>;
182
182
  }
183
183
 
184
184
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
@@ -193,17 +193,17 @@ interface OpenAISpeechModelConfig extends OpenAIConfig {
193
193
  currentDate?: () => Date;
194
194
  };
195
195
  }
196
- declare class OpenAISpeechModel implements SpeechModelV2 {
196
+ declare class OpenAISpeechModel implements SpeechModelV3 {
197
197
  readonly modelId: OpenAISpeechModelId;
198
198
  private readonly config;
199
- readonly specificationVersion = "v2";
199
+ readonly specificationVersion = "v3";
200
200
  get provider(): string;
201
201
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
202
202
  private getArgs;
203
- doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
203
+ doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
204
204
  }
205
205
 
206
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
206
+ type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
207
207
 
208
208
  declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
209
209
  readonly specificationVersion = "v3";
@@ -78,6 +78,7 @@ function convertToOpenAIChatMessages({
78
78
  prompt,
79
79
  systemMessageMode = "system"
80
80
  }) {
81
+ var _a;
81
82
  const messages = [];
82
83
  const warnings = [];
83
84
  for (const { role, content } of prompt) {
@@ -116,7 +117,7 @@ function convertToOpenAIChatMessages({
116
117
  messages.push({
117
118
  role: "user",
118
119
  content: content.map((part, index) => {
119
- var _a, _b, _c;
120
+ var _a2, _b, _c;
120
121
  switch (part.type) {
121
122
  case "text": {
122
123
  return { type: "text", text: part.text };
@@ -129,7 +130,7 @@ function convertToOpenAIChatMessages({
129
130
  image_url: {
130
131
  url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils2.convertToBase64)(part.data)}`,
131
132
  // OpenAI specific extension: image detail
132
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
133
+ detail: (_b = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b.imageDetail
133
134
  }
134
135
  };
135
136
  } else if (part.mediaType.startsWith("audio/")) {
@@ -226,6 +227,9 @@ function convertToOpenAIChatMessages({
226
227
  case "error-text":
227
228
  contentValue = output.value;
228
229
  break;
230
+ case "execution-denied":
231
+ contentValue = (_a = output.reason) != null ? _a : "Tool execution denied.";
232
+ break;
229
233
  case "content":
230
234
  case "json":
231
235
  case "error-json":
@@ -1608,9 +1612,13 @@ var import_v48 = require("zod/v4");
1608
1612
  var modelMaxImagesPerCall = {
1609
1613
  "dall-e-3": 1,
1610
1614
  "dall-e-2": 10,
1611
- "gpt-image-1": 10
1615
+ "gpt-image-1": 10,
1616
+ "gpt-image-1-mini": 10
1612
1617
  };
1613
- var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1618
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
1619
+ "gpt-image-1",
1620
+ "gpt-image-1-mini"
1621
+ ]);
1614
1622
 
1615
1623
  // src/image/openai-image-model.ts
1616
1624
  var OpenAIImageModel = class {
@@ -1791,7 +1799,7 @@ var OpenAITranscriptionModel = class {
1791
1799
  constructor(modelId, config) {
1792
1800
  this.modelId = modelId;
1793
1801
  this.config = config;
1794
- this.specificationVersion = "v2";
1802
+ this.specificationVersion = "v3";
1795
1803
  }
1796
1804
  get provider() {
1797
1805
  return this.config.provider;
@@ -1931,7 +1939,7 @@ var OpenAISpeechModel = class {
1931
1939
  constructor(modelId, config) {
1932
1940
  this.modelId = modelId;
1933
1941
  this.config = config;
1934
- this.specificationVersion = "v2";
1942
+ this.specificationVersion = "v3";
1935
1943
  }
1936
1944
  get provider() {
1937
1945
  return this.config.provider;
@@ -2072,7 +2080,7 @@ async function convertToOpenAIResponsesInput({
2072
2080
  store,
2073
2081
  hasLocalShellTool = false
2074
2082
  }) {
2075
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2083
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
2076
2084
  const input = [];
2077
2085
  const warnings = [];
2078
2086
  for (const { role, content } of prompt) {
@@ -2275,6 +2283,9 @@ async function convertToOpenAIResponsesInput({
2275
2283
  case "error-text":
2276
2284
  contentValue = output.value;
2277
2285
  break;
2286
+ case "execution-denied":
2287
+ contentValue = (_j = output.reason) != null ? _j : "Tool execution denied.";
2288
+ break;
2278
2289
  case "content":
2279
2290
  case "json":
2280
2291
  case "error-json":