@ai-sdk/openai 2.0.0-canary.11 → 2.0.0-canary.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- import { LanguageModelV2, EmbeddingModelV2, ImageModelV1, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
@@ -93,11 +93,11 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
93
93
  }
94
94
 
95
95
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
96
- interface OpenAICompletionSettings {
96
+ declare const openaiCompletionProviderOptions: z.ZodObject<{
97
97
  /**
98
98
  Echo back the prompt in addition to the completion.
99
99
  */
100
- echo?: boolean;
100
+ echo: z.ZodOptional<z.ZodBoolean>;
101
101
  /**
102
102
  Modify the likelihood of specified tokens appearing in the completion.
103
103
 
@@ -111,18 +111,29 @@ interface OpenAICompletionSettings {
111
111
 
112
112
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
113
113
  token from being generated.
114
- */
115
- logitBias?: Record<number, number>;
114
+ */
115
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
116
116
  /**
117
117
  The suffix that comes after a completion of inserted text.
118
- */
119
- suffix?: string;
118
+ */
119
+ suffix: z.ZodOptional<z.ZodString>;
120
120
  /**
121
121
  A unique identifier representing your end-user, which can help OpenAI to
122
122
  monitor and detect abuse. Learn more.
123
- */
124
- user?: string;
125
- }
123
+ */
124
+ user: z.ZodOptional<z.ZodString>;
125
+ }, "strip", z.ZodTypeAny, {
126
+ user?: string | undefined;
127
+ logitBias?: Record<string, number> | undefined;
128
+ echo?: boolean | undefined;
129
+ suffix?: string | undefined;
130
+ }, {
131
+ user?: string | undefined;
132
+ logitBias?: Record<string, number> | undefined;
133
+ echo?: boolean | undefined;
134
+ suffix?: string | undefined;
135
+ }>;
136
+ type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
126
137
 
127
138
  type OpenAICompletionConfig = {
128
139
  provider: string;
@@ -137,9 +148,9 @@ type OpenAICompletionConfig = {
137
148
  declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
138
149
  readonly specificationVersion = "v2";
139
150
  readonly modelId: OpenAICompletionModelId;
140
- readonly settings: OpenAICompletionSettings;
141
151
  private readonly config;
142
- constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
152
+ private get providerOptionsName();
153
+ constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
143
154
  get provider(): string;
144
155
  getSupportedUrls(): Promise<Record<string, RegExp[]>>;
145
156
  private getArgs;
@@ -203,6 +214,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
203
214
 
204
215
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
205
216
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
217
+ declare const hasDefaultResponseFormat: Set<string>;
206
218
  interface OpenAIImageSettings {
207
219
  /**
208
220
  Override the maximum number of images per call (default is dependent on the
@@ -216,7 +228,7 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
216
228
  currentDate?: () => Date;
217
229
  };
218
230
  }
219
- declare class OpenAIImageModel implements ImageModelV1 {
231
+ declare class OpenAIImageModel implements ImageModelV2 {
220
232
  readonly modelId: OpenAIImageModelId;
221
233
  private readonly settings;
222
234
  private readonly config;
@@ -224,7 +236,7 @@ declare class OpenAIImageModel implements ImageModelV1 {
224
236
  get maxImagesPerCall(): number;
225
237
  get provider(): string;
226
238
  constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
227
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
239
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
228
240
  }
229
241
 
230
242
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
@@ -366,4 +378,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
366
378
  }>;
367
379
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
368
380
 
369
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, modelMaxImagesPerCall, openaiEmbeddingProviderOptions, openaiProviderOptions };
381
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
@@ -1,4 +1,4 @@
1
- import { LanguageModelV2, EmbeddingModelV2, ImageModelV1, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
@@ -93,11 +93,11 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
93
93
  }
94
94
 
95
95
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
96
- interface OpenAICompletionSettings {
96
+ declare const openaiCompletionProviderOptions: z.ZodObject<{
97
97
  /**
98
98
  Echo back the prompt in addition to the completion.
99
99
  */
100
- echo?: boolean;
100
+ echo: z.ZodOptional<z.ZodBoolean>;
101
101
  /**
102
102
  Modify the likelihood of specified tokens appearing in the completion.
103
103
 
@@ -111,18 +111,29 @@ interface OpenAICompletionSettings {
111
111
 
112
112
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
113
113
  token from being generated.
114
- */
115
- logitBias?: Record<number, number>;
114
+ */
115
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
116
116
  /**
117
117
  The suffix that comes after a completion of inserted text.
118
- */
119
- suffix?: string;
118
+ */
119
+ suffix: z.ZodOptional<z.ZodString>;
120
120
  /**
121
121
  A unique identifier representing your end-user, which can help OpenAI to
122
122
  monitor and detect abuse. Learn more.
123
- */
124
- user?: string;
125
- }
123
+ */
124
+ user: z.ZodOptional<z.ZodString>;
125
+ }, "strip", z.ZodTypeAny, {
126
+ user?: string | undefined;
127
+ logitBias?: Record<string, number> | undefined;
128
+ echo?: boolean | undefined;
129
+ suffix?: string | undefined;
130
+ }, {
131
+ user?: string | undefined;
132
+ logitBias?: Record<string, number> | undefined;
133
+ echo?: boolean | undefined;
134
+ suffix?: string | undefined;
135
+ }>;
136
+ type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
126
137
 
127
138
  type OpenAICompletionConfig = {
128
139
  provider: string;
@@ -137,9 +148,9 @@ type OpenAICompletionConfig = {
137
148
  declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
138
149
  readonly specificationVersion = "v2";
139
150
  readonly modelId: OpenAICompletionModelId;
140
- readonly settings: OpenAICompletionSettings;
141
151
  private readonly config;
142
- constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
152
+ private get providerOptionsName();
153
+ constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
143
154
  get provider(): string;
144
155
  getSupportedUrls(): Promise<Record<string, RegExp[]>>;
145
156
  private getArgs;
@@ -203,6 +214,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
203
214
 
204
215
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
205
216
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
217
+ declare const hasDefaultResponseFormat: Set<string>;
206
218
  interface OpenAIImageSettings {
207
219
  /**
208
220
  Override the maximum number of images per call (default is dependent on the
@@ -216,7 +228,7 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
216
228
  currentDate?: () => Date;
217
229
  };
218
230
  }
219
- declare class OpenAIImageModel implements ImageModelV1 {
231
+ declare class OpenAIImageModel implements ImageModelV2 {
220
232
  readonly modelId: OpenAIImageModelId;
221
233
  private readonly settings;
222
234
  private readonly config;
@@ -224,7 +236,7 @@ declare class OpenAIImageModel implements ImageModelV1 {
224
236
  get maxImagesPerCall(): number;
225
237
  get provider(): string;
226
238
  constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
227
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
239
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
228
240
  }
229
241
 
230
242
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
@@ -366,4 +378,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
366
378
  }>;
367
379
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
368
380
 
369
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, modelMaxImagesPerCall, openaiEmbeddingProviderOptions, openaiProviderOptions };
381
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };