@ai-sdk/openai 3.0.14 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/package.json +6 -5
  5. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
  6. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
  7. package/src/chat/convert-openai-chat-usage.ts +57 -0
  8. package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
  9. package/src/chat/convert-to-openai-chat-messages.ts +225 -0
  10. package/src/chat/get-response-metadata.ts +15 -0
  11. package/src/chat/map-openai-finish-reason.ts +19 -0
  12. package/src/chat/openai-chat-api.ts +198 -0
  13. package/src/chat/openai-chat-language-model.test.ts +3496 -0
  14. package/src/chat/openai-chat-language-model.ts +700 -0
  15. package/src/chat/openai-chat-options.ts +186 -0
  16. package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
  17. package/src/chat/openai-chat-prepare-tools.ts +84 -0
  18. package/src/chat/openai-chat-prompt.ts +70 -0
  19. package/src/completion/convert-openai-completion-usage.ts +46 -0
  20. package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
  21. package/src/completion/get-response-metadata.ts +15 -0
  22. package/src/completion/map-openai-finish-reason.ts +19 -0
  23. package/src/completion/openai-completion-api.ts +81 -0
  24. package/src/completion/openai-completion-language-model.test.ts +752 -0
  25. package/src/completion/openai-completion-language-model.ts +336 -0
  26. package/src/completion/openai-completion-options.ts +58 -0
  27. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
  28. package/src/embedding/openai-embedding-api.ts +13 -0
  29. package/src/embedding/openai-embedding-model.test.ts +146 -0
  30. package/src/embedding/openai-embedding-model.ts +95 -0
  31. package/src/embedding/openai-embedding-options.ts +30 -0
  32. package/src/image/openai-image-api.ts +35 -0
  33. package/src/image/openai-image-model.test.ts +722 -0
  34. package/src/image/openai-image-model.ts +305 -0
  35. package/src/image/openai-image-options.ts +28 -0
  36. package/src/index.ts +9 -0
  37. package/src/internal/index.ts +19 -0
  38. package/src/openai-config.ts +18 -0
  39. package/src/openai-error.test.ts +34 -0
  40. package/src/openai-error.ts +22 -0
  41. package/src/openai-language-model-capabilities.test.ts +93 -0
  42. package/src/openai-language-model-capabilities.ts +54 -0
  43. package/src/openai-provider.test.ts +98 -0
  44. package/src/openai-provider.ts +270 -0
  45. package/src/openai-tools.ts +114 -0
  46. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
  47. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
  48. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
  49. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
  50. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
  51. package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
  52. package/src/responses/__fixtures__/openai-error.1.json +8 -0
  53. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
  54. package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
  55. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
  56. package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
  57. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
  58. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
  59. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
  60. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
  61. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
  62. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
  63. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
  64. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
  65. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
  66. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
  67. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
  68. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
  69. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
  70. package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
  71. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
  72. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
  73. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
  74. package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
  75. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
  76. package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
  77. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
  78. package/src/responses/convert-openai-responses-usage.ts +53 -0
  79. package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
  80. package/src/responses/convert-to-openai-responses-input.ts +578 -0
  81. package/src/responses/map-openai-responses-finish-reason.ts +22 -0
  82. package/src/responses/openai-responses-api.test.ts +89 -0
  83. package/src/responses/openai-responses-api.ts +1086 -0
  84. package/src/responses/openai-responses-language-model.test.ts +6927 -0
  85. package/src/responses/openai-responses-language-model.ts +1932 -0
  86. package/src/responses/openai-responses-options.ts +312 -0
  87. package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
  88. package/src/responses/openai-responses-prepare-tools.ts +264 -0
  89. package/src/responses/openai-responses-provider-metadata.ts +39 -0
  90. package/src/speech/openai-speech-api.ts +38 -0
  91. package/src/speech/openai-speech-model.test.ts +202 -0
  92. package/src/speech/openai-speech-model.ts +137 -0
  93. package/src/speech/openai-speech-options.ts +22 -0
  94. package/src/tool/apply-patch.ts +141 -0
  95. package/src/tool/code-interpreter.ts +104 -0
  96. package/src/tool/file-search.ts +145 -0
  97. package/src/tool/image-generation.ts +126 -0
  98. package/src/tool/local-shell.test-d.ts +20 -0
  99. package/src/tool/local-shell.ts +72 -0
  100. package/src/tool/mcp.ts +125 -0
  101. package/src/tool/shell.ts +85 -0
  102. package/src/tool/web-search-preview.ts +139 -0
  103. package/src/tool/web-search.test-d.ts +13 -0
  104. package/src/tool/web-search.ts +179 -0
  105. package/src/transcription/openai-transcription-api.ts +37 -0
  106. package/src/transcription/openai-transcription-model.test.ts +507 -0
  107. package/src/transcription/openai-transcription-model.ts +232 -0
  108. package/src/transcription/openai-transcription-options.ts +50 -0
  109. package/src/transcription/transcription-test.mp3 +0 -0
  110. package/src/version.ts +6 -0
@@ -0,0 +1,312 @@
1
+ import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
2
+ import { z } from 'zod/v4';
3
+
4
+ /**
5
+ * `top_logprobs` request body argument can be set to an integer between
6
+ * 0 and 20 specifying the number of most likely tokens to return at each
7
+ * token position, each with an associated log probability.
8
+ *
9
+ * @see https://platform.openai.com/docs/api-reference/responses/create#responses_create-top_logprobs
10
+ */
11
+ export const TOP_LOGPROBS_MAX = 20;
12
+
13
+ export const openaiResponsesReasoningModelIds = [
14
+ 'o1',
15
+ 'o1-2024-12-17',
16
+ 'o3',
17
+ 'o3-2025-04-16',
18
+ 'o3-deep-research',
19
+ 'o3-deep-research-2025-06-26',
20
+ 'o3-mini',
21
+ 'o3-mini-2025-01-31',
22
+ 'o4-mini',
23
+ 'o4-mini-2025-04-16',
24
+ 'o4-mini-deep-research',
25
+ 'o4-mini-deep-research-2025-06-26',
26
+ 'codex-mini-latest',
27
+ 'computer-use-preview',
28
+ 'gpt-5',
29
+ 'gpt-5-2025-08-07',
30
+ 'gpt-5-codex',
31
+ 'gpt-5-mini',
32
+ 'gpt-5-mini-2025-08-07',
33
+ 'gpt-5-nano',
34
+ 'gpt-5-nano-2025-08-07',
35
+ 'gpt-5-pro',
36
+ 'gpt-5-pro-2025-10-06',
37
+ 'gpt-5.1',
38
+ 'gpt-5.1-chat-latest',
39
+ 'gpt-5.1-codex-mini',
40
+ 'gpt-5.1-codex',
41
+ 'gpt-5.1-codex-max',
42
+ 'gpt-5.2',
43
+ 'gpt-5.2-chat-latest',
44
+ 'gpt-5.2-pro',
45
+ ] as const;
46
+
47
+ export const openaiResponsesModelIds = [
48
+ 'gpt-4.1',
49
+ 'gpt-4.1-2025-04-14',
50
+ 'gpt-4.1-mini',
51
+ 'gpt-4.1-mini-2025-04-14',
52
+ 'gpt-4.1-nano',
53
+ 'gpt-4.1-nano-2025-04-14',
54
+ 'gpt-4o',
55
+ 'gpt-4o-2024-05-13',
56
+ 'gpt-4o-2024-08-06',
57
+ 'gpt-4o-2024-11-20',
58
+ 'gpt-4o-audio-preview',
59
+ 'gpt-4o-audio-preview-2024-10-01',
60
+ 'gpt-4o-audio-preview-2024-12-17',
61
+ 'gpt-4o-search-preview',
62
+ 'gpt-4o-search-preview-2025-03-11',
63
+ 'gpt-4o-mini-search-preview',
64
+ 'gpt-4o-mini-search-preview-2025-03-11',
65
+ 'gpt-4o-mini',
66
+ 'gpt-4o-mini-2024-07-18',
67
+ 'gpt-4-turbo',
68
+ 'gpt-4-turbo-2024-04-09',
69
+ 'gpt-4-turbo-preview',
70
+ 'gpt-4-0125-preview',
71
+ 'gpt-4-1106-preview',
72
+ 'gpt-4',
73
+ 'gpt-4-0613',
74
+ 'gpt-4.5-preview',
75
+ 'gpt-4.5-preview-2025-02-27',
76
+ 'gpt-3.5-turbo-0125',
77
+ 'gpt-3.5-turbo',
78
+ 'gpt-3.5-turbo-1106',
79
+ 'chatgpt-4o-latest',
80
+ 'gpt-5-chat-latest',
81
+ ...openaiResponsesReasoningModelIds,
82
+ ] as const;
83
+
84
+ export type OpenAIResponsesModelId =
85
+ | 'chatgpt-4o-latest'
86
+ | 'gpt-3.5-turbo-0125'
87
+ | 'gpt-3.5-turbo-1106'
88
+ | 'gpt-3.5-turbo'
89
+ | 'gpt-4-0613'
90
+ | 'gpt-4-turbo-2024-04-09'
91
+ | 'gpt-4-turbo'
92
+ | 'gpt-4.1-2025-04-14'
93
+ | 'gpt-4.1-mini-2025-04-14'
94
+ | 'gpt-4.1-mini'
95
+ | 'gpt-4.1-nano-2025-04-14'
96
+ | 'gpt-4.1-nano'
97
+ | 'gpt-4.1'
98
+ | 'gpt-4'
99
+ | 'gpt-4o-2024-05-13'
100
+ | 'gpt-4o-2024-08-06'
101
+ | 'gpt-4o-2024-11-20'
102
+ | 'gpt-4o-mini-2024-07-18'
103
+ | 'gpt-4o-mini'
104
+ | 'gpt-4o'
105
+ | 'gpt-5.1'
106
+ | 'gpt-5.1-chat-latest'
107
+ | 'gpt-5.1-codex-mini'
108
+ | 'gpt-5.1-codex'
109
+ | 'gpt-5.1-codex-max'
110
+ | 'gpt-5.2'
111
+ | 'gpt-5.2-chat-latest'
112
+ | 'gpt-5.2-pro'
113
+ | 'gpt-5-2025-08-07'
114
+ | 'gpt-5-chat-latest'
115
+ | 'gpt-5-codex'
116
+ | 'gpt-5-mini-2025-08-07'
117
+ | 'gpt-5-mini'
118
+ | 'gpt-5-nano-2025-08-07'
119
+ | 'gpt-5-nano'
120
+ | 'gpt-5-pro-2025-10-06'
121
+ | 'gpt-5-pro'
122
+ | 'gpt-5'
123
+ | 'o1-2024-12-17'
124
+ | 'o1'
125
+ | 'o3-2025-04-16'
126
+ | 'o3-mini-2025-01-31'
127
+ | 'o3-mini'
128
+ | 'o3'
129
+ | (string & {});
130
+
131
+ // TODO AI SDK 6: use optional here instead of nullish
132
+ export const openaiResponsesProviderOptionsSchema = lazySchema(() =>
133
+ zodSchema(
134
+ z.object({
135
+ /**
136
+ * The ID of the OpenAI Conversation to continue.
137
+ * You must create a conversation first via the OpenAI API.
138
+ * Cannot be used in conjunction with `previousResponseId`.
139
+ * Defaults to `undefined`.
140
+ * @see https://platform.openai.com/docs/api-reference/conversations/create
141
+ */
142
+ conversation: z.string().nullish(),
143
+
144
+ /**
145
+ * The set of extra fields to include in the response (advanced, usually not needed).
146
+ * Example values: 'reasoning.encrypted_content', 'file_search_call.results', 'message.output_text.logprobs'.
147
+ */
148
+ include: z
149
+ .array(
150
+ z.enum([
151
+ 'reasoning.encrypted_content', // handled internally by default, only needed for unknown reasoning models
152
+ 'file_search_call.results',
153
+ 'message.output_text.logprobs',
154
+ ]),
155
+ )
156
+ .nullish(),
157
+
158
+ /**
159
+ * Instructions for the model.
160
+ * They can be used to change the system or developer message when continuing a conversation using the `previousResponseId` option.
161
+ * Defaults to `undefined`.
162
+ */
163
+ instructions: z.string().nullish(),
164
+
165
+ /**
166
+ * Return the log probabilities of the tokens. Including logprobs will increase
167
+ * the response size and can slow down response times. However, it can
168
+ * be useful to better understand how the model is behaving.
169
+ *
170
+ * Setting to true will return the log probabilities of the tokens that
171
+ * were generated.
172
+ *
173
+ * Setting to a number will return the log probabilities of the top n
174
+ * tokens that were generated.
175
+ *
176
+ * @see https://platform.openai.com/docs/api-reference/responses/create
177
+ * @see https://cookbook.openai.com/examples/using_logprobs
178
+ */
179
+ logprobs: z
180
+ .union([z.boolean(), z.number().min(1).max(TOP_LOGPROBS_MAX)])
181
+ .optional(),
182
+
183
+ /**
184
+ * The maximum number of total calls to built-in tools that can be processed in a response.
185
+ * This maximum number applies across all built-in tool calls, not per individual tool.
186
+ * Any further attempts to call a tool by the model will be ignored.
187
+ */
188
+ maxToolCalls: z.number().nullish(),
189
+
190
+ /**
191
+ * Additional metadata to store with the generation.
192
+ */
193
+ metadata: z.any().nullish(),
194
+
195
+ /**
196
+ * Whether to use parallel tool calls. Defaults to `true`.
197
+ */
198
+ parallelToolCalls: z.boolean().nullish(),
199
+
200
+ /**
201
+ * The ID of the previous response. You can use it to continue a conversation.
202
+ * Defaults to `undefined`.
203
+ */
204
+ previousResponseId: z.string().nullish(),
205
+
206
+ /**
207
+ * Sets a cache key to tie this prompt to cached prefixes for better caching performance.
208
+ */
209
+ promptCacheKey: z.string().nullish(),
210
+
211
+ /**
212
+ * The retention policy for the prompt cache.
213
+ * - 'in_memory': Default. Standard prompt caching behavior.
214
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
215
+ * Currently only available for 5.1 series models.
216
+ *
217
+ * @default 'in_memory'
218
+ */
219
+ promptCacheRetention: z.enum(['in_memory', '24h']).nullish(),
220
+
221
+ /**
222
+ * Reasoning effort for reasoning models. Defaults to `medium`. If you use
223
+ * `providerOptions` to set the `reasoningEffort` option, this model setting will be ignored.
224
+ * Valid values: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
225
+ *
226
+ * The 'none' type for `reasoningEffort` is only available for OpenAI's GPT-5.1
227
+ * models. Also, the 'xhigh' type for `reasoningEffort` is only available for
228
+ * OpenAI's GPT-5.1-Codex-Max model. Setting `reasoningEffort` to 'none' or 'xhigh' with unsupported models will result in
229
+ * an error.
230
+ */
231
+ reasoningEffort: z.string().nullish(),
232
+
233
+ /**
234
+ * Controls reasoning summary output from the model.
235
+ * Set to "auto" to automatically receive the richest level available,
236
+ * or "detailed" for comprehensive summaries.
237
+ */
238
+ reasoningSummary: z.string().nullish(),
239
+
240
+ /**
241
+ * The identifier for safety monitoring and tracking.
242
+ */
243
+ safetyIdentifier: z.string().nullish(),
244
+
245
+ /**
246
+ * Service tier for the request.
247
+ * Set to 'flex' for 50% cheaper processing at the cost of increased latency (available for o3, o4-mini, and gpt-5 models).
248
+ * Set to 'priority' for faster processing with Enterprise access (available for gpt-4, gpt-5, gpt-5-mini, o3, o4-mini; gpt-5-nano is not supported).
249
+ *
250
+ * Defaults to 'auto'.
251
+ */
252
+ serviceTier: z.enum(['auto', 'flex', 'priority', 'default']).nullish(),
253
+
254
+ /**
255
+ * Whether to store the generation. Defaults to `true`.
256
+ */
257
+ store: z.boolean().nullish(),
258
+
259
+ /**
260
+ * Whether to use strict JSON schema validation.
261
+ * Defaults to `true`.
262
+ */
263
+ strictJsonSchema: z.boolean().nullish(),
264
+
265
+ /**
266
+ * Controls the verbosity of the model's responses. Lower values ('low') will result
267
+ * in more concise responses, while higher values ('high') will result in more verbose responses.
268
+ * Valid values: 'low', 'medium', 'high'.
269
+ */
270
+ textVerbosity: z.enum(['low', 'medium', 'high']).nullish(),
271
+
272
+ /**
273
+ * Controls output truncation. 'auto' (default) performs truncation automatically;
274
+ * 'disabled' turns truncation off.
275
+ */
276
+ truncation: z.enum(['auto', 'disabled']).nullish(),
277
+
278
+ /**
279
+ * A unique identifier representing your end-user, which can help OpenAI to
280
+ * monitor and detect abuse.
281
+ * Defaults to `undefined`.
282
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
283
+ */
284
+ user: z.string().nullish(),
285
+
286
+ /**
287
+ * Override the system message mode for this model.
288
+ * - 'system': Use the 'system' role for system messages (default for most models)
289
+ * - 'developer': Use the 'developer' role for system messages (used by reasoning models)
290
+ * - 'remove': Remove system messages entirely
291
+ *
292
+ * If not specified, the mode is automatically determined based on the model.
293
+ */
294
+ systemMessageMode: z.enum(['system', 'developer', 'remove']).optional(),
295
+
296
+ /**
297
+ * Force treating this model as a reasoning model.
298
+ *
299
+ * This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
300
+ * where the model ID is not recognized by the SDK's allowlist.
301
+ *
302
+ * When enabled, the SDK applies reasoning-model parameter compatibility rules
303
+ * and defaults `systemMessageMode` to `developer` unless overridden.
304
+ */
305
+ forceReasoning: z.boolean().optional(),
306
+ }),
307
+ ),
308
+ );
309
+
310
+ export type OpenAIResponsesProviderOptions = InferSchema<
311
+ typeof openaiResponsesProviderOptionsSchema
312
+ >;