@ai-sdk/openai 2.0.104 → 2.0.106

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -119,6 +119,24 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
119
119
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | 'gpt-image-2' | (string & {});
120
120
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
121
121
  declare const hasDefaultResponseFormat: Set<string>;
122
+ declare const openaiImageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
123
+ quality?: "auto" | "low" | "medium" | "high" | "standard" | "hd" | undefined;
124
+ background?: "auto" | "transparent" | "opaque" | undefined;
125
+ outputFormat?: "png" | "jpeg" | "webp" | undefined;
126
+ outputCompression?: number | undefined;
127
+ user?: string | undefined;
128
+ }>;
129
+ type OpenAIImageModelOptions = InferValidator<typeof openaiImageModelOptions>;
130
+ declare const openaiImageModelGenerationOptions: _ai_sdk_provider_utils.LazyValidator<{
131
+ quality?: "auto" | "low" | "medium" | "high" | "standard" | "hd" | undefined;
132
+ background?: "auto" | "transparent" | "opaque" | undefined;
133
+ outputFormat?: "png" | "jpeg" | "webp" | undefined;
134
+ outputCompression?: number | undefined;
135
+ user?: string | undefined;
136
+ style?: "vivid" | "natural" | undefined;
137
+ moderation?: "auto" | "low" | undefined;
138
+ }>;
139
+ type OpenAIImageModelGenerationOptions = InferValidator<typeof openaiImageModelGenerationOptions>;
122
140
 
123
141
  interface OpenAIImageModelConfig extends OpenAIConfig {
124
142
  _internal?: {
@@ -401,7 +419,7 @@ declare const fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithO
401
419
  }>;
402
420
 
403
421
  declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
404
- background?: "auto" | "opaque" | "transparent" | undefined;
422
+ background?: "auto" | "transparent" | "opaque" | undefined;
405
423
  inputFidelity?: "low" | "high" | undefined;
406
424
  inputImageMask?: {
407
425
  fileId?: string | undefined;
@@ -560,4 +578,4 @@ declare const webSearchPreview: _ai_sdk_provider_utils.ProviderDefinedToolFactor
560
578
  };
561
579
  }>;
562
580
 
563
- export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiSpeechProviderOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };
581
+ export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelGenerationOptions, type OpenAIImageModelId, type OpenAIImageModelOptions, OpenAIResponsesLanguageModel, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiImageModelGenerationOptions, openaiImageModelOptions, openaiSpeechProviderOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };
@@ -119,6 +119,24 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
119
119
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | 'gpt-image-2' | (string & {});
120
120
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
121
121
  declare const hasDefaultResponseFormat: Set<string>;
122
+ declare const openaiImageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
123
+ quality?: "auto" | "low" | "medium" | "high" | "standard" | "hd" | undefined;
124
+ background?: "auto" | "transparent" | "opaque" | undefined;
125
+ outputFormat?: "png" | "jpeg" | "webp" | undefined;
126
+ outputCompression?: number | undefined;
127
+ user?: string | undefined;
128
+ }>;
129
+ type OpenAIImageModelOptions = InferValidator<typeof openaiImageModelOptions>;
130
+ declare const openaiImageModelGenerationOptions: _ai_sdk_provider_utils.LazyValidator<{
131
+ quality?: "auto" | "low" | "medium" | "high" | "standard" | "hd" | undefined;
132
+ background?: "auto" | "transparent" | "opaque" | undefined;
133
+ outputFormat?: "png" | "jpeg" | "webp" | undefined;
134
+ outputCompression?: number | undefined;
135
+ user?: string | undefined;
136
+ style?: "vivid" | "natural" | undefined;
137
+ moderation?: "auto" | "low" | undefined;
138
+ }>;
139
+ type OpenAIImageModelGenerationOptions = InferValidator<typeof openaiImageModelGenerationOptions>;
122
140
 
123
141
  interface OpenAIImageModelConfig extends OpenAIConfig {
124
142
  _internal?: {
@@ -401,7 +419,7 @@ declare const fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithO
401
419
  }>;
402
420
 
403
421
  declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
404
- background?: "auto" | "opaque" | "transparent" | undefined;
422
+ background?: "auto" | "transparent" | "opaque" | undefined;
405
423
  inputFidelity?: "low" | "high" | undefined;
406
424
  inputImageMask?: {
407
425
  fileId?: string | undefined;
@@ -560,4 +578,4 @@ declare const webSearchPreview: _ai_sdk_provider_utils.ProviderDefinedToolFactor
560
578
  };
561
579
  }>;
562
580
 
563
- export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiSpeechProviderOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };
581
+ export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelGenerationOptions, type OpenAIImageModelId, type OpenAIImageModelOptions, OpenAIResponsesLanguageModel, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiImageModelGenerationOptions, openaiImageModelOptions, openaiSpeechProviderOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };