@ai-sdk/openai 4.0.0-beta.40 → 4.0.0-beta.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/index.d.ts +6 -6
  3. package/dist/index.js +176 -118
  4. package/dist/index.js.map +1 -1
  5. package/dist/internal/index.d.ts +2 -2
  6. package/dist/internal/index.js +162 -107
  7. package/dist/internal/index.js.map +1 -1
  8. package/docs/03-openai.mdx +18 -13
  9. package/package.json +4 -4
  10. package/src/chat/convert-openai-chat-usage.ts +1 -1
  11. package/src/chat/convert-to-openai-chat-messages.ts +85 -72
  12. package/src/chat/map-openai-finish-reason.ts +1 -1
  13. package/src/chat/openai-chat-api.ts +6 -2
  14. package/src/chat/openai-chat-language-model.ts +6 -6
  15. package/src/chat/openai-chat-options.ts +5 -1
  16. package/src/chat/openai-chat-prepare-tools.ts +3 -3
  17. package/src/completion/convert-openai-completion-usage.ts +1 -1
  18. package/src/completion/convert-to-openai-completion-prompt.ts +1 -2
  19. package/src/completion/map-openai-finish-reason.ts +1 -1
  20. package/src/completion/openai-completion-api.ts +5 -2
  21. package/src/completion/openai-completion-language-model.ts +6 -7
  22. package/src/completion/openai-completion-options.ts +5 -1
  23. package/src/embedding/openai-embedding-model.ts +3 -3
  24. package/src/embedding/openai-embedding-options.ts +5 -1
  25. package/src/files/openai-files-options.ts +5 -1
  26. package/src/files/openai-files.ts +5 -7
  27. package/src/image/openai-image-model.ts +3 -4
  28. package/src/openai-config.ts +1 -1
  29. package/src/openai-provider.ts +9 -9
  30. package/src/responses/convert-openai-responses-usage.ts +1 -1
  31. package/src/responses/convert-to-openai-responses-input.ts +72 -60
  32. package/src/responses/map-openai-responses-finish-reason.ts +1 -1
  33. package/src/responses/openai-responses-api.ts +6 -2
  34. package/src/responses/openai-responses-language-model.ts +35 -35
  35. package/src/responses/openai-responses-options.ts +5 -1
  36. package/src/responses/openai-responses-prepare-tools.ts +5 -5
  37. package/src/responses/openai-responses-provider-metadata.ts +2 -2
  38. package/src/skills/openai-skills.ts +4 -8
  39. package/src/speech/openai-speech-model.ts +4 -5
  40. package/src/speech/openai-speech-options.ts +5 -1
  41. package/src/tool/file-search.ts +1 -1
  42. package/src/tool/mcp.ts +1 -1
  43. package/src/tool/tool-search.ts +2 -2
  44. package/src/transcription/openai-transcription-model.ts +4 -5
  45. package/src/transcription/openai-transcription-options.ts +5 -1
@@ -1194,8 +1194,9 @@ const result = await generateText({
1194
1194
  text: 'Please describe the image.',
1195
1195
  },
1196
1196
  {
1197
- type: 'image',
1198
- image: readFileSync('./data/image.png'),
1197
+ type: 'file',
1198
+ mediaType: 'image',
1199
+ data: readFileSync('./data/image.png'),
1199
1200
  },
1200
1201
  ],
1201
1202
  },
@@ -1210,8 +1211,9 @@ You can also pass a file-id from the OpenAI Files API.
1210
1211
 
1211
1212
  ```ts
1212
1213
  {
1213
- type: 'image',
1214
- image: 'file-8EFBcWHsQxZV7YGezBC1fq'
1214
+ type: 'file',
1215
+ mediaType: 'image',
1216
+ data: 'file-8EFBcWHsQxZV7YGezBC1fq'
1215
1217
  }
1216
1218
  ```
1217
1219
 
@@ -1219,8 +1221,9 @@ You can also pass the URL of an image.
1219
1221
 
1220
1222
  ```ts
1221
1223
  {
1222
- type: 'image',
1223
- image: 'https://sample.edu/image.png',
1224
+ type: 'file',
1225
+ mediaType: 'image',
1226
+ data: 'https://sample.edu/image.png',
1224
1227
  }
1225
1228
  ```
1226
1229
 
@@ -1931,8 +1934,9 @@ const result = await generateText({
1931
1934
  text: 'Please describe the image.',
1932
1935
  },
1933
1936
  {
1934
- type: 'image',
1935
- image: readFileSync('./data/image.png'),
1937
+ type: 'file',
1938
+ mediaType: 'image',
1939
+ data: readFileSync('./data/image.png'),
1936
1940
  },
1937
1941
  ],
1938
1942
  },
@@ -1947,8 +1951,9 @@ You can also pass the URL of an image.
1947
1951
 
1948
1952
  ```ts
1949
1953
  {
1950
- type: 'image',
1951
- image: 'https://sample.edu/image.png',
1954
+ type: 'file',
1955
+ mediaType: 'image',
1956
+ data: 'https://sample.edu/image.png',
1952
1957
  }
1953
1958
  ```
1954
1959
 
@@ -2065,9 +2070,9 @@ const result = await generateText({
2065
2070
  content: [
2066
2071
  { type: 'text', text: 'Describe the image in detail.' },
2067
2072
  {
2068
- type: 'image',
2069
- image:
2070
- 'https://github.com/vercel/ai/blob/main/examples/ai-functions/data/comic-cat.png?raw=true',
2073
+ type: 'file',
2074
+ mediaType: 'image',
2075
+ data: 'https://github.com/vercel/ai/blob/main/examples/ai-functions/data/comic-cat.png?raw=true',
2071
2076
 
2072
2077
  // OpenAI specific options - image detail:
2073
2078
  providerOptions: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/openai",
3
- "version": "4.0.0-beta.40",
3
+ "version": "4.0.0-beta.41",
4
4
  "type": "module",
5
5
  "license": "Apache-2.0",
6
6
  "sideEffects": false,
@@ -35,15 +35,15 @@
35
35
  }
36
36
  },
37
37
  "dependencies": {
38
- "@ai-sdk/provider": "4.0.0-beta.13",
39
- "@ai-sdk/provider-utils": "5.0.0-beta.28"
38
+ "@ai-sdk/provider": "4.0.0-beta.14",
39
+ "@ai-sdk/provider-utils": "5.0.0-beta.29"
40
40
  },
41
41
  "devDependencies": {
42
42
  "@types/node": "20.17.24",
43
43
  "tsup": "^8",
44
44
  "typescript": "5.8.3",
45
45
  "zod": "3.25.76",
46
- "@ai-sdk/test-server": "2.0.0-beta.2",
46
+ "@ai-sdk/test-server": "2.0.0-beta.3",
47
47
  "@vercel/ai-tsconfig": "0.0.0"
48
48
  },
49
49
  "peerDependencies": {
@@ -1,4 +1,4 @@
1
- import { LanguageModelV4Usage } from '@ai-sdk/provider';
1
+ import type { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAIChatUsage = {
4
4
  prompt_tokens?: number | null;
@@ -1,12 +1,13 @@
1
1
  import {
2
- SharedV4Warning,
3
- LanguageModelV4Prompt,
4
2
  UnsupportedFunctionalityError,
3
+ type SharedV4Warning,
4
+ type LanguageModelV4Prompt,
5
5
  } from '@ai-sdk/provider';
6
- import { OpenAIChatPrompt } from './openai-chat-prompt';
6
+ import type { OpenAIChatPrompt } from './openai-chat-prompt';
7
7
  import {
8
8
  convertToBase64,
9
- isProviderReference,
9
+ getTopLevelMediaType,
10
+ resolveFullMediaType,
10
11
  resolveProviderReference,
11
12
  } from '@ai-sdk/provider-utils';
12
13
 
@@ -70,87 +71,99 @@ export function convertToOpenAIChatMessages({
70
71
  return { type: 'text', text: part.text };
71
72
  }
72
73
  case 'file': {
73
- if (isProviderReference(part.data)) {
74
- return {
75
- type: 'file',
76
- file: {
77
- file_id: resolveProviderReference({
78
- reference: part.data,
79
- provider: 'openai',
80
- }),
81
- },
82
- };
83
- }
84
-
85
- if (part.mediaType.startsWith('image/')) {
86
- const mediaType =
87
- part.mediaType === 'image/*'
88
- ? 'image/jpeg'
89
- : part.mediaType;
90
-
91
- return {
92
- type: 'image_url',
93
- image_url: {
94
- url:
95
- part.data instanceof URL
96
- ? part.data.toString()
97
- : `data:${mediaType};base64,${convertToBase64(part.data)}`,
98
-
99
- detail: part.providerOptions?.openai?.imageDetail,
100
- },
101
- };
102
- } else if (part.mediaType.startsWith('audio/')) {
103
- if (part.data instanceof URL) {
74
+ switch (part.data.type) {
75
+ case 'reference': {
76
+ return {
77
+ type: 'file',
78
+ file: {
79
+ file_id: resolveProviderReference({
80
+ reference: part.data.reference,
81
+ provider: 'openai',
82
+ }),
83
+ },
84
+ };
85
+ }
86
+ case 'text': {
104
87
  throw new UnsupportedFunctionalityError({
105
- functionality: 'audio file parts with URLs',
88
+ functionality: 'text file parts',
106
89
  });
107
90
  }
91
+ case 'url':
92
+ case 'data': {
93
+ const topLevel = getTopLevelMediaType(part.mediaType);
108
94
 
109
- switch (part.mediaType) {
110
- case 'audio/wav': {
95
+ if (topLevel === 'image') {
111
96
  return {
112
- type: 'input_audio',
113
- input_audio: {
114
- data: convertToBase64(part.data),
115
- format: 'wav',
97
+ type: 'image_url',
98
+ image_url: {
99
+ url:
100
+ part.data.type === 'url'
101
+ ? part.data.url.toString()
102
+ : convertToBase64(part.data.data),
103
+
104
+ detail: part.providerOptions?.openai?.imageDetail,
116
105
  },
117
106
  };
107
+ } else if (topLevel === 'audio') {
108
+ if (part.data.type === 'url') {
109
+ throw new UnsupportedFunctionalityError({
110
+ functionality: 'audio file parts with URLs',
111
+ });
112
+ }
113
+
114
+ const fullMediaType = resolveFullMediaType({ part });
115
+
116
+ switch (fullMediaType) {
117
+ case 'audio/wav': {
118
+ return {
119
+ type: 'input_audio',
120
+ input_audio: {
121
+ data: convertToBase64(part.data.data),
122
+ format: 'wav',
123
+ },
124
+ };
125
+ }
126
+ case 'audio/mp3':
127
+ case 'audio/mpeg': {
128
+ return {
129
+ type: 'input_audio',
130
+ input_audio: {
131
+ data: convertToBase64(part.data.data),
132
+ format: 'mp3',
133
+ },
134
+ };
135
+ }
136
+
137
+ default: {
138
+ throw new UnsupportedFunctionalityError({
139
+ functionality: `audio content parts with media type ${fullMediaType}`,
140
+ });
141
+ }
142
+ }
118
143
  }
119
- case 'audio/mp3':
120
- case 'audio/mpeg': {
144
+ {
145
+ const fullMediaType = resolveFullMediaType({ part });
146
+ if (fullMediaType !== 'application/pdf') {
147
+ throw new UnsupportedFunctionalityError({
148
+ functionality: `file part media type ${fullMediaType}`,
149
+ });
150
+ }
151
+
152
+ if (part.data.type === 'url') {
153
+ throw new UnsupportedFunctionalityError({
154
+ functionality: 'PDF file parts with URLs',
155
+ });
156
+ }
157
+
121
158
  return {
122
- type: 'input_audio',
123
- input_audio: {
124
- data: convertToBase64(part.data),
125
- format: 'mp3',
159
+ type: 'file',
160
+ file: {
161
+ filename: part.filename ?? `part-${index}.pdf`,
162
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data.data)}`,
126
163
  },
127
164
  };
128
165
  }
129
-
130
- default: {
131
- throw new UnsupportedFunctionalityError({
132
- functionality: `audio content parts with media type ${part.mediaType}`,
133
- });
134
- }
135
166
  }
136
- } else if (part.mediaType === 'application/pdf') {
137
- if (part.data instanceof URL) {
138
- throw new UnsupportedFunctionalityError({
139
- functionality: 'PDF file parts with URLs',
140
- });
141
- }
142
-
143
- return {
144
- type: 'file',
145
- file: {
146
- filename: part.filename ?? `part-${index}.pdf`,
147
- file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`,
148
- },
149
- };
150
- } else {
151
- throw new UnsupportedFunctionalityError({
152
- functionality: `file part media type ${part.mediaType}`,
153
- });
154
167
  }
155
168
  }
156
169
  }
@@ -1,4 +1,4 @@
1
- import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
1
+ import type { LanguageModelV4FinishReason } from '@ai-sdk/provider';
2
2
 
3
3
  export function mapOpenAIFinishReason(
4
4
  finishReason: string | null | undefined,
@@ -1,5 +1,9 @@
1
- import { JSONSchema7 } from '@ai-sdk/provider';
2
- import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
1
+ import type { JSONSchema7 } from '@ai-sdk/provider';
2
+ import {
3
+ lazySchema,
4
+ zodSchema,
5
+ type InferSchema,
6
+ } from '@ai-sdk/provider-utils';
3
7
  import { z } from 'zod/v4';
4
8
  import { openaiErrorDataSchema } from '../openai-error';
5
9
 
@@ -1,4 +1,4 @@
1
- import {
1
+ import type {
2
2
  LanguageModelV4,
3
3
  LanguageModelV4CallOptions,
4
4
  LanguageModelV4Content,
@@ -10,8 +10,6 @@ import {
10
10
  SharedV4Warning,
11
11
  } from '@ai-sdk/provider';
12
12
  import {
13
- FetchFunction,
14
- ParseResult,
15
13
  StreamingToolCallTracker,
16
14
  combineHeaders,
17
15
  createEventSourceResponseHandler,
@@ -23,24 +21,26 @@ import {
23
21
  serializeModelOptions,
24
22
  WORKFLOW_DESERIALIZE,
25
23
  WORKFLOW_SERIALIZE,
24
+ type FetchFunction,
25
+ type ParseResult,
26
26
  } from '@ai-sdk/provider-utils';
27
27
  import { openaiFailedResponseHandler } from '../openai-error';
28
28
  import { getOpenAILanguageModelCapabilities } from '../openai-language-model-capabilities';
29
29
  import {
30
- OpenAIChatUsage,
31
30
  convertOpenAIChatUsage,
31
+ type OpenAIChatUsage,
32
32
  } from './convert-openai-chat-usage';
33
33
  import { convertToOpenAIChatMessages } from './convert-to-openai-chat-messages';
34
34
  import { getResponseMetadata } from './get-response-metadata';
35
35
  import { mapOpenAIFinishReason } from './map-openai-finish-reason';
36
36
  import {
37
- OpenAIChatChunk,
38
37
  openaiChatChunkSchema,
39
38
  openaiChatResponseSchema,
39
+ type OpenAIChatChunk,
40
40
  } from './openai-chat-api';
41
41
  import {
42
- OpenAIChatModelId,
43
42
  openaiLanguageModelChatOptions,
43
+ type OpenAIChatModelId,
44
44
  } from './openai-chat-options';
45
45
  import { prepareChatTools } from './openai-chat-prepare-tools';
46
46
 
@@ -1,4 +1,8 @@
1
- import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
1
+ import {
2
+ lazySchema,
3
+ zodSchema,
4
+ type InferSchema,
5
+ } from '@ai-sdk/provider-utils';
2
6
  import { z } from 'zod/v4';
3
7
 
4
8
  // https://platform.openai.com/docs/models
@@ -1,9 +1,9 @@
1
1
  import {
2
- LanguageModelV4CallOptions,
3
- SharedV4Warning,
4
2
  UnsupportedFunctionalityError,
3
+ type LanguageModelV4CallOptions,
4
+ type SharedV4Warning,
5
5
  } from '@ai-sdk/provider';
6
- import {
6
+ import type {
7
7
  OpenAIChatToolChoice,
8
8
  OpenAIChatFunctionTool,
9
9
  } from './openai-chat-api';
@@ -1,4 +1,4 @@
1
- import { LanguageModelV4Usage } from '@ai-sdk/provider';
1
+ import type { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAICompletionUsage = {
4
4
  prompt_tokens?: number | null;
@@ -1,9 +1,8 @@
1
1
  import {
2
2
  InvalidPromptError,
3
- LanguageModelV4Prompt,
4
3
  UnsupportedFunctionalityError,
4
+ type LanguageModelV4Prompt,
5
5
  } from '@ai-sdk/provider';
6
-
7
6
  export function convertToOpenAICompletionPrompt({
8
7
  prompt,
9
8
  user = 'user',
@@ -1,4 +1,4 @@
1
- import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
1
+ import type { LanguageModelV4FinishReason } from '@ai-sdk/provider';
2
2
 
3
3
  export function mapOpenAIFinishReason(
4
4
  finishReason: string | null | undefined,
@@ -1,7 +1,10 @@
1
1
  import { z } from 'zod/v4';
2
2
  import { openaiErrorDataSchema } from '../openai-error';
3
- import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
4
-
3
+ import {
4
+ lazySchema,
5
+ zodSchema,
6
+ type InferSchema,
7
+ } from '@ai-sdk/provider-utils';
5
8
  // limited version of the schema, focussed on what is needed for the implementation
6
9
  // this approach limits breakages when the API changes and increases efficiency
7
10
  export const openaiCompletionResponseSchema = lazySchema(() =>
@@ -1,4 +1,4 @@
1
- import {
1
+ import type {
2
2
  LanguageModelV4,
3
3
  LanguageModelV4CallOptions,
4
4
  LanguageModelV4FinishReason,
@@ -12,32 +12,31 @@ import {
12
12
  combineHeaders,
13
13
  createEventSourceResponseHandler,
14
14
  createJsonResponseHandler,
15
- FetchFunction,
16
15
  parseProviderOptions,
17
- ParseResult,
18
16
  postJsonToApi,
19
17
  serializeModelOptions,
20
18
  WORKFLOW_DESERIALIZE,
21
19
  WORKFLOW_SERIALIZE,
20
+ type FetchFunction,
21
+ type ParseResult,
22
22
  } from '@ai-sdk/provider-utils';
23
23
  import { openaiFailedResponseHandler } from '../openai-error';
24
24
  import {
25
25
  convertOpenAICompletionUsage,
26
- OpenAICompletionUsage,
26
+ type OpenAICompletionUsage,
27
27
  } from './convert-openai-completion-usage';
28
28
  import { convertToOpenAICompletionPrompt } from './convert-to-openai-completion-prompt';
29
29
  import { getResponseMetadata } from './get-response-metadata';
30
30
  import { mapOpenAIFinishReason } from './map-openai-finish-reason';
31
31
  import {
32
- OpenAICompletionChunk,
33
32
  openaiCompletionChunkSchema,
34
33
  openaiCompletionResponseSchema,
34
+ type OpenAICompletionChunk,
35
35
  } from './openai-completion-api';
36
36
  import {
37
- OpenAICompletionModelId,
38
37
  openaiLanguageModelCompletionOptions,
38
+ type OpenAICompletionModelId,
39
39
  } from './openai-completion-options';
40
-
41
40
  type OpenAICompletionConfig = {
42
41
  provider: string;
43
42
  headers?: () => Record<string, string | undefined>;
@@ -1,4 +1,8 @@
1
- import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
1
+ import {
2
+ lazySchema,
3
+ zodSchema,
4
+ type InferSchema,
5
+ } from '@ai-sdk/provider-utils';
2
6
  import { z } from 'zod/v4';
3
7
 
4
8
  // https://platform.openai.com/docs/models
@@ -1,6 +1,6 @@
1
1
  import {
2
- EmbeddingModelV4,
3
2
  TooManyEmbeddingValuesForCallError,
3
+ type EmbeddingModelV4,
4
4
  } from '@ai-sdk/provider';
5
5
  import {
6
6
  combineHeaders,
@@ -11,11 +11,11 @@ import {
11
11
  WORKFLOW_DESERIALIZE,
12
12
  WORKFLOW_SERIALIZE,
13
13
  } from '@ai-sdk/provider-utils';
14
- import { OpenAIConfig } from '../openai-config';
14
+ import type { OpenAIConfig } from '../openai-config';
15
15
  import { openaiFailedResponseHandler } from '../openai-error';
16
16
  import {
17
- OpenAIEmbeddingModelId,
18
17
  openaiEmbeddingModelOptions,
18
+ type OpenAIEmbeddingModelId,
19
19
  } from './openai-embedding-options';
20
20
  import { openaiTextEmbeddingResponseSchema } from './openai-embedding-api';
21
21
 
@@ -1,4 +1,8 @@
1
- import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
1
+ import {
2
+ lazySchema,
3
+ zodSchema,
4
+ type InferSchema,
5
+ } from '@ai-sdk/provider-utils';
2
6
  import { z } from 'zod/v4';
3
7
 
4
8
  export type OpenAIEmbeddingModelId =
@@ -1,4 +1,8 @@
1
- import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
1
+ import {
2
+ lazySchema,
3
+ zodSchema,
4
+ type InferSchema,
5
+ } from '@ai-sdk/provider-utils';
2
6
  import { z } from 'zod/v4';
3
7
 
4
8
  export const openaiFilesOptionsSchema = lazySchema(() =>
@@ -1,23 +1,22 @@
1
- import {
1
+ import type {
2
2
  FilesV4,
3
3
  FilesV4UploadFileCallOptions,
4
4
  FilesV4UploadFileResult,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
7
7
  combineHeaders,
8
- convertBase64ToUint8Array,
8
+ convertInlineFileDataToUint8Array,
9
9
  createJsonResponseHandler,
10
- FetchFunction,
11
10
  parseProviderOptions,
12
11
  postFormDataToApi,
12
+ type FetchFunction,
13
13
  } from '@ai-sdk/provider-utils';
14
14
  import { openaiFailedResponseHandler } from '../openai-error';
15
15
  import { openaiFilesResponseSchema } from './openai-files-api';
16
16
  import {
17
17
  openaiFilesOptionsSchema,
18
- OpenAIFilesOptions,
18
+ type OpenAIFilesOptions,
19
19
  } from './openai-files-options';
20
-
21
20
  interface OpenAIFilesConfig {
22
21
  provider: string;
23
22
  baseURL: string;
@@ -46,8 +45,7 @@ export class OpenAIFiles implements FilesV4 {
46
45
  schema: openaiFilesOptionsSchema,
47
46
  })) as OpenAIFilesOptions | undefined;
48
47
 
49
- const fileBytes =
50
- data instanceof Uint8Array ? data : convertBase64ToUint8Array(data);
48
+ const fileBytes = convertInlineFileDataToUint8Array(data);
51
49
 
52
50
  const blob = new Blob([fileBytes], {
53
51
  type: mediaType,
@@ -1,4 +1,4 @@
1
- import {
1
+ import type {
2
2
  ImageModelV4,
3
3
  ImageModelV4File,
4
4
  SharedV4Warning,
@@ -15,15 +15,14 @@ import {
15
15
  WORKFLOW_DESERIALIZE,
16
16
  WORKFLOW_SERIALIZE,
17
17
  } from '@ai-sdk/provider-utils';
18
- import { OpenAIConfig } from '../openai-config';
18
+ import type { OpenAIConfig } from '../openai-config';
19
19
  import { openaiFailedResponseHandler } from '../openai-error';
20
20
  import { openaiImageResponseSchema } from './openai-image-api';
21
21
  import {
22
- OpenAIImageModelId,
23
22
  hasDefaultResponseFormat,
24
23
  modelMaxImagesPerCall,
24
+ type OpenAIImageModelId,
25
25
  } from './openai-image-options';
26
-
27
26
  interface OpenAIImageModelConfig extends OpenAIConfig {
28
27
  _internal?: {
29
28
  currentDate?: () => Date;
@@ -1,4 +1,4 @@
1
- import { FetchFunction } from '@ai-sdk/provider-utils';
1
+ import type { FetchFunction } from '@ai-sdk/provider-utils';
2
2
 
3
3
  export type OpenAIConfig = {
4
4
  provider: string;
@@ -1,4 +1,4 @@
1
- import {
1
+ import type {
2
2
  EmbeddingModelV4,
3
3
  FilesV4,
4
4
  ImageModelV4,
@@ -9,28 +9,28 @@ import {
9
9
  TranscriptionModelV4,
10
10
  } from '@ai-sdk/provider';
11
11
  import {
12
- FetchFunction,
13
12
  loadApiKey,
14
13
  loadOptionalSetting,
15
14
  withoutTrailingSlash,
16
15
  withUserAgentSuffix,
16
+ type FetchFunction,
17
17
  } from '@ai-sdk/provider-utils';
18
18
  import { OpenAIChatLanguageModel } from './chat/openai-chat-language-model';
19
- import { OpenAIChatModelId } from './chat/openai-chat-options';
19
+ import type { OpenAIChatModelId } from './chat/openai-chat-options';
20
20
  import { OpenAICompletionLanguageModel } from './completion/openai-completion-language-model';
21
- import { OpenAICompletionModelId } from './completion/openai-completion-options';
21
+ import type { OpenAICompletionModelId } from './completion/openai-completion-options';
22
22
  import { OpenAIEmbeddingModel } from './embedding/openai-embedding-model';
23
23
  import { OpenAIFiles } from './files/openai-files';
24
- import { OpenAIEmbeddingModelId } from './embedding/openai-embedding-options';
24
+ import type { OpenAIEmbeddingModelId } from './embedding/openai-embedding-options';
25
25
  import { OpenAIImageModel } from './image/openai-image-model';
26
- import { OpenAIImageModelId } from './image/openai-image-options';
26
+ import type { OpenAIImageModelId } from './image/openai-image-options';
27
27
  import { openaiTools } from './openai-tools';
28
28
  import { OpenAIResponsesLanguageModel } from './responses/openai-responses-language-model';
29
- import { OpenAIResponsesModelId } from './responses/openai-responses-options';
29
+ import type { OpenAIResponsesModelId } from './responses/openai-responses-options';
30
30
  import { OpenAISpeechModel } from './speech/openai-speech-model';
31
- import { OpenAISpeechModelId } from './speech/openai-speech-options';
31
+ import type { OpenAISpeechModelId } from './speech/openai-speech-options';
32
32
  import { OpenAITranscriptionModel } from './transcription/openai-transcription-model';
33
- import { OpenAITranscriptionModelId } from './transcription/openai-transcription-options';
33
+ import type { OpenAITranscriptionModelId } from './transcription/openai-transcription-options';
34
34
  import { OpenAISkills } from './skills/openai-skills';
35
35
  import { VERSION } from './version';
36
36
 
@@ -1,4 +1,4 @@
1
- import { LanguageModelV4Usage } from '@ai-sdk/provider';
1
+ import type { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAIResponsesUsage = {
4
4
  input_tokens: number;