@ai-sdk/openai 1.3.17 → 1.3.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -218,8 +218,9 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
218
218
  doEmbed({ values, headers, abortSignal, }: Parameters<EmbeddingModelV1<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV1<string>['doEmbed']>>>;
219
219
  }
220
220
 
221
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
221
+ type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
222
222
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
223
+ declare const hasDefaultResponseFormat: Set<string>;
223
224
  interface OpenAIImageSettings {
224
225
  /**
225
226
  Override the maximum number of images per call (default is dependent on the
@@ -384,4 +385,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
384
385
  }>;
385
386
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
386
387
 
387
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, modelMaxImagesPerCall };
388
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, hasDefaultResponseFormat, modelMaxImagesPerCall };
@@ -218,8 +218,9 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
218
218
  doEmbed({ values, headers, abortSignal, }: Parameters<EmbeddingModelV1<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV1<string>['doEmbed']>>>;
219
219
  }
220
220
 
221
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
221
+ type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
222
222
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
223
+ declare const hasDefaultResponseFormat: Set<string>;
223
224
  interface OpenAIImageSettings {
224
225
  /**
225
226
  Override the maximum number of images per call (default is dependent on the
@@ -384,4 +385,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
384
385
  }>;
385
386
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
386
387
 
387
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, modelMaxImagesPerCall };
388
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, hasDefaultResponseFormat, modelMaxImagesPerCall };
@@ -27,6 +27,7 @@ __export(internal_exports, {
27
27
  OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
28
  OpenAISpeechModel: () => OpenAISpeechModel,
29
29
  OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
+ hasDefaultResponseFormat: () => hasDefaultResponseFormat,
30
31
  modelMaxImagesPerCall: () => modelMaxImagesPerCall
31
32
  });
32
33
  module.exports = __toCommonJS(internal_exports);
@@ -1536,8 +1537,10 @@ var import_zod5 = require("zod");
1536
1537
  // src/openai-image-settings.ts
1537
1538
  var modelMaxImagesPerCall = {
1538
1539
  "dall-e-3": 1,
1539
- "dall-e-2": 10
1540
+ "dall-e-2": 10,
1541
+ "gpt-image-1": 10
1540
1542
  };
1543
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1541
1544
 
1542
1545
  // src/openai-image-model.ts
1543
1546
  var OpenAIImageModel = class {
@@ -1589,7 +1592,7 @@ var OpenAIImageModel = class {
1589
1592
  n,
1590
1593
  size,
1591
1594
  ...(_d = providerOptions.openai) != null ? _d : {},
1592
- response_format: "b64_json"
1595
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1593
1596
  },
1594
1597
  failedResponseHandler: openaiFailedResponseHandler,
1595
1598
  successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
@@ -2719,6 +2722,7 @@ var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
2719
2722
  OpenAIResponsesLanguageModel,
2720
2723
  OpenAISpeechModel,
2721
2724
  OpenAITranscriptionModel,
2725
+ hasDefaultResponseFormat,
2722
2726
  modelMaxImagesPerCall
2723
2727
  });
2724
2728
  //# sourceMappingURL=index.js.map