@ai-sdk/openai 4.0.0-beta.5 → 4.0.0-beta.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/CHANGELOG.md +14 -0
  2. package/dist/index.d.mts +15 -15
  3. package/dist/index.d.ts +15 -15
  4. package/dist/index.js +12 -12
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +9 -9
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +26 -26
  9. package/dist/internal/index.d.ts +26 -26
  10. package/dist/internal/index.js +10 -10
  11. package/dist/internal/index.js.map +1 -1
  12. package/dist/internal/index.mjs +7 -7
  13. package/dist/internal/index.mjs.map +1 -1
  14. package/package.json +3 -3
  15. package/src/chat/convert-openai-chat-usage.ts +2 -2
  16. package/src/chat/convert-to-openai-chat-messages.ts +5 -5
  17. package/src/chat/map-openai-finish-reason.ts +2 -2
  18. package/src/chat/openai-chat-language-model.ts +22 -22
  19. package/src/chat/openai-chat-prepare-tools.ts +6 -6
  20. package/src/completion/convert-openai-completion-usage.ts +2 -2
  21. package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
  22. package/src/completion/map-openai-finish-reason.ts +2 -2
  23. package/src/completion/openai-completion-language-model.ts +20 -20
  24. package/src/embedding/openai-embedding-model.ts +5 -5
  25. package/src/image/openai-image-model.ts +9 -9
  26. package/src/openai-provider.ts +21 -21
  27. package/src/responses/convert-openai-responses-usage.ts +2 -2
  28. package/src/responses/convert-to-openai-responses-input.ts +7 -7
  29. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  30. package/src/responses/openai-responses-language-model.ts +29 -29
  31. package/src/responses/openai-responses-prepare-tools.ts +6 -6
  32. package/src/speech/openai-speech-model.ts +7 -7
  33. package/src/transcription/openai-transcription-model.ts +8 -8
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/openai",
3
- "version": "4.0.0-beta.5",
3
+ "version": "4.0.0-beta.7",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -36,8 +36,8 @@
36
36
  }
37
37
  },
38
38
  "dependencies": {
39
- "@ai-sdk/provider": "4.0.0-beta.0",
40
- "@ai-sdk/provider-utils": "5.0.0-beta.1"
39
+ "@ai-sdk/provider": "4.0.0-beta.1",
40
+ "@ai-sdk/provider-utils": "5.0.0-beta.2"
41
41
  },
42
42
  "devDependencies": {
43
43
  "@types/node": "20.17.24",
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAIChatUsage = {
4
4
  prompt_tokens?: number | null;
@@ -16,7 +16,7 @@ export type OpenAIChatUsage = {
16
16
 
17
17
  export function convertOpenAIChatUsage(
18
18
  usage: OpenAIChatUsage | undefined | null,
19
- ): LanguageModelV3Usage {
19
+ ): LanguageModelV4Usage {
20
20
  if (usage == null) {
21
21
  return {
22
22
  inputTokens: {
@@ -1,6 +1,6 @@
1
1
  import {
2
- SharedV3Warning,
3
- LanguageModelV3Prompt,
2
+ SharedV4Warning,
3
+ LanguageModelV4Prompt,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
6
  import { OpenAIChatPrompt } from './openai-chat-prompt';
@@ -10,14 +10,14 @@ export function convertToOpenAIChatMessages({
10
10
  prompt,
11
11
  systemMessageMode = 'system',
12
12
  }: {
13
- prompt: LanguageModelV3Prompt;
13
+ prompt: LanguageModelV4Prompt;
14
14
  systemMessageMode?: 'system' | 'developer' | 'remove';
15
15
  }): {
16
16
  messages: OpenAIChatPrompt;
17
- warnings: Array<SharedV3Warning>;
17
+ warnings: Array<SharedV4Warning>;
18
18
  } {
19
19
  const messages: OpenAIChatPrompt = [];
20
- const warnings: Array<SharedV3Warning> = [];
20
+ const warnings: Array<SharedV4Warning> = [];
21
21
 
22
22
  for (const { role, content } of prompt) {
23
23
  switch (role) {
@@ -1,8 +1,8 @@
1
- import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
1
+ import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
2
2
 
3
3
  export function mapOpenAIFinishReason(
4
4
  finishReason: string | null | undefined,
5
- ): LanguageModelV3FinishReason['unified'] {
5
+ ): LanguageModelV4FinishReason['unified'] {
6
6
  switch (finishReason) {
7
7
  case 'stop':
8
8
  return 'stop';
@@ -1,14 +1,14 @@
1
1
  import {
2
2
  InvalidResponseDataError,
3
- LanguageModelV3,
4
- LanguageModelV3CallOptions,
5
- LanguageModelV3Content,
6
- LanguageModelV3FinishReason,
7
- LanguageModelV3GenerateResult,
8
- LanguageModelV3StreamPart,
9
- LanguageModelV3StreamResult,
10
- SharedV3ProviderMetadata,
11
- SharedV3Warning,
3
+ LanguageModelV4,
4
+ LanguageModelV4CallOptions,
5
+ LanguageModelV4Content,
6
+ LanguageModelV4FinishReason,
7
+ LanguageModelV4GenerateResult,
8
+ LanguageModelV4StreamPart,
9
+ LanguageModelV4StreamResult,
10
+ SharedV4ProviderMetadata,
11
+ SharedV4Warning,
12
12
  } from '@ai-sdk/provider';
13
13
  import {
14
14
  FetchFunction,
@@ -48,8 +48,8 @@ type OpenAIChatConfig = {
48
48
  fetch?: FetchFunction;
49
49
  };
50
50
 
51
- export class OpenAIChatLanguageModel implements LanguageModelV3 {
52
- readonly specificationVersion = 'v3';
51
+ export class OpenAIChatLanguageModel implements LanguageModelV4 {
52
+ readonly specificationVersion = 'v4';
53
53
 
54
54
  readonly modelId: OpenAIChatModelId;
55
55
 
@@ -82,8 +82,8 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
82
82
  tools,
83
83
  toolChoice,
84
84
  providerOptions,
85
- }: LanguageModelV3CallOptions) {
86
- const warnings: SharedV3Warning[] = [];
85
+ }: LanguageModelV4CallOptions) {
86
+ const warnings: SharedV4Warning[] = [];
87
87
 
88
88
  // Parse provider options
89
89
  const openaiOptions =
@@ -314,8 +314,8 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
314
314
  }
315
315
 
316
316
  async doGenerate(
317
- options: LanguageModelV3CallOptions,
318
- ): Promise<LanguageModelV3GenerateResult> {
317
+ options: LanguageModelV4CallOptions,
318
+ ): Promise<LanguageModelV4GenerateResult> {
319
319
  const { args: body, warnings } = await this.getArgs(options);
320
320
 
321
321
  const {
@@ -338,7 +338,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
338
338
  });
339
339
 
340
340
  const choice = response.choices[0];
341
- const content: Array<LanguageModelV3Content> = [];
341
+ const content: Array<LanguageModelV4Content> = [];
342
342
 
343
343
  // text content:
344
344
  const text = choice.message.content;
@@ -370,7 +370,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
370
370
  // provider metadata:
371
371
  const completionTokenDetails = response.usage?.completion_tokens_details;
372
372
  const promptTokenDetails = response.usage?.prompt_tokens_details;
373
- const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
373
+ const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
374
374
  if (completionTokenDetails?.accepted_prediction_tokens != null) {
375
375
  providerMetadata.openai.acceptedPredictionTokens =
376
376
  completionTokenDetails?.accepted_prediction_tokens;
@@ -402,8 +402,8 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
402
402
  }
403
403
 
404
404
  async doStream(
405
- options: LanguageModelV3CallOptions,
406
- ): Promise<LanguageModelV3StreamResult> {
405
+ options: LanguageModelV4CallOptions,
406
+ ): Promise<LanguageModelV4StreamResult> {
407
407
  const { args, warnings } = await this.getArgs(options);
408
408
 
409
409
  const body = {
@@ -439,7 +439,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
439
439
  hasFinished: boolean;
440
440
  }> = [];
441
441
 
442
- let finishReason: LanguageModelV3FinishReason = {
442
+ let finishReason: LanguageModelV4FinishReason = {
443
443
  unified: 'other',
444
444
  raw: undefined,
445
445
  };
@@ -447,13 +447,13 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
447
447
  let metadataExtracted = false;
448
448
  let isActiveText = false;
449
449
 
450
- const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
450
+ const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
451
451
 
452
452
  return {
453
453
  stream: response.pipeThrough(
454
454
  new TransformStream<
455
455
  ParseResult<OpenAIChatChunk>,
456
- LanguageModelV3StreamPart
456
+ LanguageModelV4StreamPart
457
457
  >({
458
458
  start(controller) {
459
459
  controller.enqueue({ type: 'stream-start', warnings });
@@ -1,6 +1,6 @@
1
1
  import {
2
- LanguageModelV3CallOptions,
3
- SharedV3Warning,
2
+ LanguageModelV4CallOptions,
3
+ SharedV4Warning,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
@@ -12,17 +12,17 @@ export function prepareChatTools({
12
12
  tools,
13
13
  toolChoice,
14
14
  }: {
15
- tools: LanguageModelV3CallOptions['tools'];
16
- toolChoice?: LanguageModelV3CallOptions['toolChoice'];
15
+ tools: LanguageModelV4CallOptions['tools'];
16
+ toolChoice?: LanguageModelV4CallOptions['toolChoice'];
17
17
  }): {
18
18
  tools?: OpenAIChatFunctionTool[];
19
19
  toolChoice?: OpenAIChatToolChoice;
20
- toolWarnings: Array<SharedV3Warning>;
20
+ toolWarnings: Array<SharedV4Warning>;
21
21
  } {
22
22
  // when the tools array is empty, change it to undefined to prevent errors:
23
23
  tools = tools?.length ? tools : undefined;
24
24
 
25
- const toolWarnings: SharedV3Warning[] = [];
25
+ const toolWarnings: SharedV4Warning[] = [];
26
26
 
27
27
  if (tools == null) {
28
28
  return { tools: undefined, toolChoice: undefined, toolWarnings };
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAICompletionUsage = {
4
4
  prompt_tokens?: number | null;
@@ -8,7 +8,7 @@ export type OpenAICompletionUsage = {
8
8
 
9
9
  export function convertOpenAICompletionUsage(
10
10
  usage: OpenAICompletionUsage | undefined | null,
11
- ): LanguageModelV3Usage {
11
+ ): LanguageModelV4Usage {
12
12
  if (usage == null) {
13
13
  return {
14
14
  inputTokens: {
@@ -1,6 +1,6 @@
1
1
  import {
2
2
  InvalidPromptError,
3
- LanguageModelV3Prompt,
3
+ LanguageModelV4Prompt,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
6
 
@@ -9,7 +9,7 @@ export function convertToOpenAICompletionPrompt({
9
9
  user = 'user',
10
10
  assistant = 'assistant',
11
11
  }: {
12
- prompt: LanguageModelV3Prompt;
12
+ prompt: LanguageModelV4Prompt;
13
13
  user?: string;
14
14
  assistant?: string;
15
15
  }): {
@@ -1,8 +1,8 @@
1
- import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
1
+ import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
2
2
 
3
3
  export function mapOpenAIFinishReason(
4
4
  finishReason: string | null | undefined,
5
- ): LanguageModelV3FinishReason['unified'] {
5
+ ): LanguageModelV4FinishReason['unified'] {
6
6
  switch (finishReason) {
7
7
  case 'stop':
8
8
  return 'stop';
@@ -1,12 +1,12 @@
1
1
  import {
2
- LanguageModelV3,
3
- LanguageModelV3CallOptions,
4
- LanguageModelV3FinishReason,
5
- LanguageModelV3GenerateResult,
6
- LanguageModelV3StreamPart,
7
- LanguageModelV3StreamResult,
8
- SharedV3ProviderMetadata,
9
- SharedV3Warning,
2
+ LanguageModelV4,
3
+ LanguageModelV4CallOptions,
4
+ LanguageModelV4FinishReason,
5
+ LanguageModelV4GenerateResult,
6
+ LanguageModelV4StreamPart,
7
+ LanguageModelV4StreamResult,
8
+ SharedV4ProviderMetadata,
9
+ SharedV4Warning,
10
10
  } from '@ai-sdk/provider';
11
11
  import {
12
12
  combineHeaders,
@@ -42,8 +42,8 @@ type OpenAICompletionConfig = {
42
42
  fetch?: FetchFunction;
43
43
  };
44
44
 
45
- export class OpenAICompletionLanguageModel implements LanguageModelV3 {
46
- readonly specificationVersion = 'v3';
45
+ export class OpenAICompletionLanguageModel implements LanguageModelV4 {
46
+ readonly specificationVersion = 'v4';
47
47
 
48
48
  readonly modelId: OpenAICompletionModelId;
49
49
 
@@ -83,8 +83,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
83
83
  toolChoice,
84
84
  seed,
85
85
  providerOptions,
86
- }: LanguageModelV3CallOptions) {
87
- const warnings: SharedV3Warning[] = [];
86
+ }: LanguageModelV4CallOptions) {
87
+ const warnings: SharedV4Warning[] = [];
88
88
 
89
89
  // Parse provider options
90
90
  const openaiOptions = {
@@ -161,8 +161,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
161
161
  }
162
162
 
163
163
  async doGenerate(
164
- options: LanguageModelV3CallOptions,
165
- ): Promise<LanguageModelV3GenerateResult> {
164
+ options: LanguageModelV4CallOptions,
165
+ ): Promise<LanguageModelV4GenerateResult> {
166
166
  const { args, warnings } = await this.getArgs(options);
167
167
 
168
168
  const {
@@ -186,7 +186,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
186
186
 
187
187
  const choice = response.choices[0];
188
188
 
189
- const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
189
+ const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
190
190
 
191
191
  if (choice.logprobs != null) {
192
192
  providerMetadata.openai.logprobs = choice.logprobs;
@@ -211,8 +211,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
211
211
  }
212
212
 
213
213
  async doStream(
214
- options: LanguageModelV3CallOptions,
215
- ): Promise<LanguageModelV3StreamResult> {
214
+ options: LanguageModelV4CallOptions,
215
+ ): Promise<LanguageModelV4StreamResult> {
216
216
  const { args, warnings } = await this.getArgs(options);
217
217
 
218
218
  const body = {
@@ -239,11 +239,11 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
239
239
  fetch: this.config.fetch,
240
240
  });
241
241
 
242
- let finishReason: LanguageModelV3FinishReason = {
242
+ let finishReason: LanguageModelV4FinishReason = {
243
243
  unified: 'other',
244
244
  raw: undefined,
245
245
  };
246
- const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
246
+ const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
247
247
  let usage: OpenAICompletionUsage | undefined = undefined;
248
248
  let isFirstChunk = true;
249
249
 
@@ -251,7 +251,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
251
251
  stream: response.pipeThrough(
252
252
  new TransformStream<
253
253
  ParseResult<OpenAICompletionChunk>,
254
- LanguageModelV3StreamPart
254
+ LanguageModelV4StreamPart
255
255
  >({
256
256
  start(controller) {
257
257
  controller.enqueue({ type: 'stream-start', warnings });
@@ -1,5 +1,5 @@
1
1
  import {
2
- EmbeddingModelV3,
2
+ EmbeddingModelV4,
3
3
  TooManyEmbeddingValuesForCallError,
4
4
  } from '@ai-sdk/provider';
5
5
  import {
@@ -16,8 +16,8 @@ import {
16
16
  } from './openai-embedding-options';
17
17
  import { openaiTextEmbeddingResponseSchema } from './openai-embedding-api';
18
18
 
19
- export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
20
- readonly specificationVersion = 'v3';
19
+ export class OpenAIEmbeddingModel implements EmbeddingModelV4 {
20
+ readonly specificationVersion = 'v4';
21
21
  readonly modelId: OpenAIEmbeddingModelId;
22
22
  readonly maxEmbeddingsPerCall = 2048;
23
23
  readonly supportsParallelCalls = true;
@@ -38,8 +38,8 @@ export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
38
38
  headers,
39
39
  abortSignal,
40
40
  providerOptions,
41
- }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<
42
- Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>
41
+ }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<
42
+ Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>
43
43
  > {
44
44
  if (values.length > this.maxEmbeddingsPerCall) {
45
45
  throw new TooManyEmbeddingValuesForCallError({
@@ -1,7 +1,7 @@
1
1
  import {
2
- ImageModelV3,
3
- ImageModelV3File,
4
- SharedV3Warning,
2
+ ImageModelV4,
3
+ ImageModelV4File,
4
+ SharedV4Warning,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
7
7
  combineHeaders,
@@ -27,8 +27,8 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
27
27
  };
28
28
  }
29
29
 
30
- export class OpenAIImageModel implements ImageModelV3 {
31
- readonly specificationVersion = 'v3';
30
+ export class OpenAIImageModel implements ImageModelV4 {
31
+ readonly specificationVersion = 'v4';
32
32
 
33
33
  get maxImagesPerCall(): number {
34
34
  return modelMaxImagesPerCall[this.modelId] ?? 1;
@@ -54,10 +54,10 @@ export class OpenAIImageModel implements ImageModelV3 {
54
54
  providerOptions,
55
55
  headers,
56
56
  abortSignal,
57
- }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<
58
- Awaited<ReturnType<ImageModelV3['doGenerate']>>
57
+ }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<
58
+ Awaited<ReturnType<ImageModelV4['doGenerate']>>
59
59
  > {
60
- const warnings: Array<SharedV3Warning> = [];
60
+ const warnings: Array<SharedV4Warning> = [];
61
61
 
62
62
  if (aspectRatio != null) {
63
63
  warnings.push({
@@ -332,7 +332,7 @@ type OpenAIImageEditInput = {
332
332
  };
333
333
 
334
334
  async function fileToBlob(
335
- file: ImageModelV3File | undefined,
335
+ file: ImageModelV4File | undefined,
336
336
  ): Promise<Blob | undefined> {
337
337
  if (!file) return undefined;
338
338
 
@@ -1,10 +1,10 @@
1
1
  import {
2
- EmbeddingModelV3,
3
- ImageModelV3,
4
- LanguageModelV3,
5
- ProviderV3,
6
- SpeechModelV3,
7
- TranscriptionModelV3,
2
+ EmbeddingModelV4,
3
+ ImageModelV4,
4
+ LanguageModelV4,
5
+ ProviderV4,
6
+ SpeechModelV4,
7
+ TranscriptionModelV4,
8
8
  } from '@ai-sdk/provider';
9
9
  import {
10
10
  FetchFunction,
@@ -30,68 +30,68 @@ import { OpenAITranscriptionModel } from './transcription/openai-transcription-m
30
30
  import { OpenAITranscriptionModelId } from './transcription/openai-transcription-options';
31
31
  import { VERSION } from './version';
32
32
 
33
- export interface OpenAIProvider extends ProviderV3 {
34
- (modelId: OpenAIResponsesModelId): LanguageModelV3;
33
+ export interface OpenAIProvider extends ProviderV4 {
34
+ (modelId: OpenAIResponsesModelId): LanguageModelV4;
35
35
 
36
36
  /**
37
37
  * Creates an OpenAI model for text generation.
38
38
  */
39
- languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
39
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV4;
40
40
 
41
41
  /**
42
42
  * Creates an OpenAI chat model for text generation.
43
43
  */
44
- chat(modelId: OpenAIChatModelId): LanguageModelV3;
44
+ chat(modelId: OpenAIChatModelId): LanguageModelV4;
45
45
 
46
46
  /**
47
47
  * Creates an OpenAI responses API model for text generation.
48
48
  */
49
- responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
49
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV4;
50
50
 
51
51
  /**
52
52
  * Creates an OpenAI completion model for text generation.
53
53
  */
54
- completion(modelId: OpenAICompletionModelId): LanguageModelV3;
54
+ completion(modelId: OpenAICompletionModelId): LanguageModelV4;
55
55
 
56
56
  /**
57
57
  * Creates a model for text embeddings.
58
58
  */
59
- embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
59
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
60
60
 
61
61
  /**
62
62
  * Creates a model for text embeddings.
63
63
  */
64
- embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
64
+ embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
65
65
 
66
66
  /**
67
67
  * @deprecated Use `embedding` instead.
68
68
  */
69
- textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
69
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
70
70
 
71
71
  /**
72
72
  * @deprecated Use `embeddingModel` instead.
73
73
  */
74
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
74
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
75
75
 
76
76
  /**
77
77
  * Creates a model for image generation.
78
78
  */
79
- image(modelId: OpenAIImageModelId): ImageModelV3;
79
+ image(modelId: OpenAIImageModelId): ImageModelV4;
80
80
 
81
81
  /**
82
82
  * Creates a model for image generation.
83
83
  */
84
- imageModel(modelId: OpenAIImageModelId): ImageModelV3;
84
+ imageModel(modelId: OpenAIImageModelId): ImageModelV4;
85
85
 
86
86
  /**
87
87
  * Creates a model for transcription.
88
88
  */
89
- transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
89
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV4;
90
90
 
91
91
  /**
92
92
  * Creates a model for speech generation.
93
93
  */
94
- speech(modelId: OpenAISpeechModelId): SpeechModelV3;
94
+ speech(modelId: OpenAISpeechModelId): SpeechModelV4;
95
95
 
96
96
  /**
97
97
  * OpenAI-specific tools.
@@ -240,7 +240,7 @@ export function createOpenAI(
240
240
  return createLanguageModel(modelId);
241
241
  };
242
242
 
243
- provider.specificationVersion = 'v3' as const;
243
+ provider.specificationVersion = 'v4' as const;
244
244
  provider.languageModel = createLanguageModel;
245
245
  provider.chat = createChatModel;
246
246
  provider.completion = createCompletionModel;
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAIResponsesUsage = {
4
4
  input_tokens: number;
@@ -13,7 +13,7 @@ export type OpenAIResponsesUsage = {
13
13
 
14
14
  export function convertOpenAIResponsesUsage(
15
15
  usage: OpenAIResponsesUsage | undefined | null,
16
- ): LanguageModelV3Usage {
16
+ ): LanguageModelV4Usage {
17
17
  if (usage == null) {
18
18
  return {
19
19
  inputTokens: {
@@ -1,7 +1,7 @@
1
1
  import {
2
- LanguageModelV3Prompt,
3
- LanguageModelV3ToolApprovalResponsePart,
4
- SharedV3Warning,
2
+ LanguageModelV4Prompt,
3
+ LanguageModelV4ToolApprovalResponsePart,
4
+ SharedV4Warning,
5
5
  UnsupportedFunctionalityError,
6
6
  } from '@ai-sdk/provider';
7
7
  import {
@@ -50,7 +50,7 @@ export async function convertToOpenAIResponsesInput({
50
50
  hasApplyPatchTool = false,
51
51
  customProviderToolNames,
52
52
  }: {
53
- prompt: LanguageModelV3Prompt;
53
+ prompt: LanguageModelV4Prompt;
54
54
  toolNameMapping: ToolNameMapping;
55
55
  systemMessageMode: 'system' | 'developer' | 'remove';
56
56
  providerOptionsName: string;
@@ -63,10 +63,10 @@ export async function convertToOpenAIResponsesInput({
63
63
  customProviderToolNames?: Set<string>;
64
64
  }): Promise<{
65
65
  input: OpenAIResponsesInput;
66
- warnings: Array<SharedV3Warning>;
66
+ warnings: Array<SharedV4Warning>;
67
67
  }> {
68
68
  let input: OpenAIResponsesInput = [];
69
- const warnings: Array<SharedV3Warning> = [];
69
+ const warnings: Array<SharedV4Warning> = [];
70
70
  const processedApprovalIds = new Set<string>();
71
71
 
72
72
  for (const { role, content } of prompt) {
@@ -488,7 +488,7 @@ export async function convertToOpenAIResponsesInput({
488
488
  for (const part of content) {
489
489
  if (part.type === 'tool-approval-response') {
490
490
  const approvalResponse =
491
- part as LanguageModelV3ToolApprovalResponsePart;
491
+ part as LanguageModelV4ToolApprovalResponsePart;
492
492
 
493
493
  if (processedApprovalIds.has(approvalResponse.approvalId)) {
494
494
  continue;
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
1
+ import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
2
2
 
3
3
  export function mapOpenAIResponseFinishReason({
4
4
  finishReason,
@@ -7,7 +7,7 @@ export function mapOpenAIResponseFinishReason({
7
7
  finishReason: string | null | undefined;
8
8
  // flag that checks if there have been client-side tool calls (not executed by openai)
9
9
  hasFunctionCall: boolean;
10
- }): LanguageModelV3FinishReason['unified'] {
10
+ }): LanguageModelV4FinishReason['unified'] {
11
11
  switch (finishReason) {
12
12
  case undefined:
13
13
  case null: