@ai-sdk/openai 2.0.0-beta.1 → 2.0.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,12 +26,12 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/openai-provider.ts
29
- var import_provider_utils12 = require("@ai-sdk/provider-utils");
29
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
30
30
 
31
31
  // src/openai-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
33
33
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
34
- var import_zod5 = require("zod");
34
+ var import_v45 = require("zod/v4");
35
35
 
36
36
  // src/convert-to-openai-chat-messages.ts
37
37
  var import_provider = require("@ai-sdk/provider");
@@ -242,15 +242,15 @@ function mapOpenAIFinishReason(finishReason) {
242
242
  }
243
243
 
244
244
  // src/openai-chat-options.ts
245
- var import_zod = require("zod");
246
- var openaiProviderOptions = import_zod.z.object({
245
+ var import_v4 = require("zod/v4");
246
+ var openaiProviderOptions = import_v4.z.object({
247
247
  /**
248
248
  * Modify the likelihood of specified tokens appearing in the completion.
249
249
  *
250
250
  * Accepts a JSON object that maps tokens (specified by their token ID in
251
251
  * the GPT tokenizer) to an associated bias value from -100 to 100.
252
252
  */
253
- logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
253
+ logitBias: import_v4.z.record(import_v4.z.coerce.number(), import_v4.z.number()).optional(),
254
254
  /**
255
255
  * Return the log probabilities of the tokens.
256
256
  *
@@ -260,63 +260,69 @@ var openaiProviderOptions = import_zod.z.object({
260
260
  * Setting to a number will return the log probabilities of the top n
261
261
  * tokens that were generated.
262
262
  */
263
- logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
263
+ logprobs: import_v4.z.union([import_v4.z.boolean(), import_v4.z.number()]).optional(),
264
264
  /**
265
265
  * Whether to enable parallel function calling during tool use. Default to true.
266
266
  */
267
- parallelToolCalls: import_zod.z.boolean().optional(),
267
+ parallelToolCalls: import_v4.z.boolean().optional(),
268
268
  /**
269
269
  * A unique identifier representing your end-user, which can help OpenAI to
270
270
  * monitor and detect abuse.
271
271
  */
272
- user: import_zod.z.string().optional(),
272
+ user: import_v4.z.string().optional(),
273
273
  /**
274
274
  * Reasoning effort for reasoning models. Defaults to `medium`.
275
275
  */
276
- reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
276
+ reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional(),
277
277
  /**
278
278
  * Maximum number of completion tokens to generate. Useful for reasoning models.
279
279
  */
280
- maxCompletionTokens: import_zod.z.number().optional(),
280
+ maxCompletionTokens: import_v4.z.number().optional(),
281
281
  /**
282
282
  * Whether to enable persistence in responses API.
283
283
  */
284
- store: import_zod.z.boolean().optional(),
284
+ store: import_v4.z.boolean().optional(),
285
285
  /**
286
286
  * Metadata to associate with the request.
287
287
  */
288
- metadata: import_zod.z.record(import_zod.z.string()).optional(),
288
+ metadata: import_v4.z.record(import_v4.z.string().max(64), import_v4.z.string().max(512)).optional(),
289
289
  /**
290
290
  * Parameters for prediction mode.
291
291
  */
292
- prediction: import_zod.z.record(import_zod.z.any()).optional(),
292
+ prediction: import_v4.z.record(import_v4.z.string(), import_v4.z.any()).optional(),
293
293
  /**
294
294
  * Whether to use structured outputs.
295
295
  *
296
296
  * @default true
297
297
  */
298
- structuredOutputs: import_zod.z.boolean().optional(),
298
+ structuredOutputs: import_v4.z.boolean().optional(),
299
299
  /**
300
300
  * Service tier for the request. Set to 'flex' for 50% cheaper processing
301
301
  * at the cost of increased latency. Only available for o3 and o4-mini models.
302
302
  *
303
303
  * @default 'auto'
304
304
  */
305
- serviceTier: import_zod.z.enum(["auto", "flex"]).optional()
305
+ serviceTier: import_v4.z.enum(["auto", "flex"]).optional(),
306
+ /**
307
+ * Whether to use strict JSON schema validation.
308
+ *
309
+ * @default false
310
+ */
311
+ strictJsonSchema: import_v4.z.boolean().optional()
306
312
  });
307
313
 
308
314
  // src/openai-error.ts
309
- var import_zod2 = require("zod");
315
+ var import_v42 = require("zod/v4");
310
316
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
311
- var openaiErrorDataSchema = import_zod2.z.object({
312
- error: import_zod2.z.object({
313
- message: import_zod2.z.string(),
317
+ var openaiErrorDataSchema = import_v42.z.object({
318
+ error: import_v42.z.object({
319
+ message: import_v42.z.string(),
314
320
  // The additional information below is handled loosely to support
315
321
  // OpenAI-compatible providers that have slightly different error
316
322
  // responses:
317
- type: import_zod2.z.string().nullish(),
318
- param: import_zod2.z.any().nullish(),
319
- code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
323
+ type: import_v42.z.string().nullish(),
324
+ param: import_v42.z.any().nullish(),
325
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
320
326
  })
321
327
  });
322
328
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -329,77 +335,78 @@ var import_provider2 = require("@ai-sdk/provider");
329
335
 
330
336
  // src/tool/file-search.ts
331
337
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
332
- var import_zod3 = require("zod");
333
- var fileSearchArgsSchema = import_zod3.z.object({
338
+ var import_v43 = require("zod/v4");
339
+ var fileSearchArgsSchema = import_v43.z.object({
334
340
  /**
335
341
  * List of vector store IDs to search through. If not provided, searches all available vector stores.
336
342
  */
337
- vectorStoreIds: import_zod3.z.array(import_zod3.z.string()).optional(),
343
+ vectorStoreIds: import_v43.z.array(import_v43.z.string()).optional(),
338
344
  /**
339
345
  * Maximum number of search results to return. Defaults to 10.
340
346
  */
341
- maxResults: import_zod3.z.number().optional(),
347
+ maxResults: import_v43.z.number().optional(),
342
348
  /**
343
349
  * Type of search to perform. Defaults to 'auto'.
344
350
  */
345
- searchType: import_zod3.z.enum(["auto", "keyword", "semantic"]).optional()
351
+ searchType: import_v43.z.enum(["auto", "keyword", "semantic"]).optional()
346
352
  });
347
353
  var fileSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
348
354
  id: "openai.file_search",
349
355
  name: "file_search",
350
- inputSchema: import_zod3.z.object({
351
- query: import_zod3.z.string()
356
+ inputSchema: import_v43.z.object({
357
+ query: import_v43.z.string()
352
358
  })
353
359
  });
354
360
 
355
361
  // src/tool/web-search-preview.ts
356
362
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
357
- var import_zod4 = require("zod");
358
- var webSearchPreviewArgsSchema = import_zod4.z.object({
363
+ var import_v44 = require("zod/v4");
364
+ var webSearchPreviewArgsSchema = import_v44.z.object({
359
365
  /**
360
366
  * Search context size to use for the web search.
361
367
  * - high: Most comprehensive context, highest cost, slower response
362
368
  * - medium: Balanced context, cost, and latency (default)
363
369
  * - low: Least context, lowest cost, fastest response
364
370
  */
365
- searchContextSize: import_zod4.z.enum(["low", "medium", "high"]).optional(),
371
+ searchContextSize: import_v44.z.enum(["low", "medium", "high"]).optional(),
366
372
  /**
367
373
  * User location information to provide geographically relevant search results.
368
374
  */
369
- userLocation: import_zod4.z.object({
375
+ userLocation: import_v44.z.object({
370
376
  /**
371
377
  * Type of location (always 'approximate')
372
378
  */
373
- type: import_zod4.z.literal("approximate"),
379
+ type: import_v44.z.literal("approximate"),
374
380
  /**
375
381
  * Two-letter ISO country code (e.g., 'US', 'GB')
376
382
  */
377
- country: import_zod4.z.string().optional(),
383
+ country: import_v44.z.string().optional(),
378
384
  /**
379
385
  * City name (free text, e.g., 'Minneapolis')
380
386
  */
381
- city: import_zod4.z.string().optional(),
387
+ city: import_v44.z.string().optional(),
382
388
  /**
383
389
  * Region name (free text, e.g., 'Minnesota')
384
390
  */
385
- region: import_zod4.z.string().optional(),
391
+ region: import_v44.z.string().optional(),
386
392
  /**
387
393
  * IANA timezone (e.g., 'America/Chicago')
388
394
  */
389
- timezone: import_zod4.z.string().optional()
395
+ timezone: import_v44.z.string().optional()
390
396
  }).optional()
391
397
  });
392
398
  var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFactory)({
393
399
  id: "openai.web_search_preview",
394
400
  name: "web_search_preview",
395
- inputSchema: import_zod4.z.object({})
401
+ inputSchema: import_v44.z.object({})
396
402
  });
397
403
 
398
404
  // src/openai-prepare-tools.ts
399
405
  function prepareTools({
400
406
  tools,
401
407
  toolChoice,
402
- structuredOutputs
408
+ structuredOutputs,
409
+ strictJsonSchema
403
410
  }) {
404
411
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
405
412
  const toolWarnings = [];
@@ -416,7 +423,7 @@ function prepareTools({
416
423
  name: tool.name,
417
424
  description: tool.description,
418
425
  parameters: tool.inputSchema,
419
- strict: structuredOutputs ? true : void 0
426
+ strict: structuredOutputs ? strictJsonSchema : void 0
420
427
  }
421
428
  });
422
429
  break;
@@ -508,7 +515,7 @@ var OpenAIChatLanguageModel = class {
508
515
  toolChoice,
509
516
  providerOptions
510
517
  }) {
511
- var _a, _b, _c;
518
+ var _a, _b, _c, _d;
512
519
  const warnings = [];
513
520
  const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
514
521
  provider: "openai",
@@ -536,6 +543,7 @@ var OpenAIChatLanguageModel = class {
536
543
  }
537
544
  );
538
545
  warnings.push(...messageWarnings);
546
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
539
547
  const baseArgs = {
540
548
  // model id:
541
549
  model: this.modelId,
@@ -551,18 +559,15 @@ var OpenAIChatLanguageModel = class {
551
559
  top_p: topP,
552
560
  frequency_penalty: frequencyPenalty,
553
561
  presence_penalty: presencePenalty,
554
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
555
- // TODO convert into provider option
556
- structuredOutputs && responseFormat.schema != null ? {
557
- type: "json_schema",
558
- json_schema: {
559
- schema: responseFormat.schema,
560
- strict: true,
561
- name: (_c = responseFormat.name) != null ? _c : "response",
562
- description: responseFormat.description
563
- }
564
- } : { type: "json_object" }
565
- ) : void 0,
562
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
563
+ type: "json_schema",
564
+ json_schema: {
565
+ schema: responseFormat.schema,
566
+ strict: strictJsonSchema,
567
+ name: (_d = responseFormat.name) != null ? _d : "response",
568
+ description: responseFormat.description
569
+ }
570
+ } : { type: "json_object" } : void 0,
566
571
  stop: stopSequences,
567
572
  seed,
568
573
  // openai specific settings:
@@ -661,7 +666,8 @@ var OpenAIChatLanguageModel = class {
661
666
  } = prepareTools({
662
667
  tools,
663
668
  toolChoice,
664
- structuredOutputs
669
+ structuredOutputs,
670
+ strictJsonSchema
665
671
  });
666
672
  return {
667
673
  args: {
@@ -943,97 +949,97 @@ var OpenAIChatLanguageModel = class {
943
949
  };
944
950
  }
945
951
  };
946
- var openaiTokenUsageSchema = import_zod5.z.object({
947
- prompt_tokens: import_zod5.z.number().nullish(),
948
- completion_tokens: import_zod5.z.number().nullish(),
949
- total_tokens: import_zod5.z.number().nullish(),
950
- prompt_tokens_details: import_zod5.z.object({
951
- cached_tokens: import_zod5.z.number().nullish()
952
+ var openaiTokenUsageSchema = import_v45.z.object({
953
+ prompt_tokens: import_v45.z.number().nullish(),
954
+ completion_tokens: import_v45.z.number().nullish(),
955
+ total_tokens: import_v45.z.number().nullish(),
956
+ prompt_tokens_details: import_v45.z.object({
957
+ cached_tokens: import_v45.z.number().nullish()
952
958
  }).nullish(),
953
- completion_tokens_details: import_zod5.z.object({
954
- reasoning_tokens: import_zod5.z.number().nullish(),
955
- accepted_prediction_tokens: import_zod5.z.number().nullish(),
956
- rejected_prediction_tokens: import_zod5.z.number().nullish()
959
+ completion_tokens_details: import_v45.z.object({
960
+ reasoning_tokens: import_v45.z.number().nullish(),
961
+ accepted_prediction_tokens: import_v45.z.number().nullish(),
962
+ rejected_prediction_tokens: import_v45.z.number().nullish()
957
963
  }).nullish()
958
964
  }).nullish();
959
- var openaiChatResponseSchema = import_zod5.z.object({
960
- id: import_zod5.z.string().nullish(),
961
- created: import_zod5.z.number().nullish(),
962
- model: import_zod5.z.string().nullish(),
963
- choices: import_zod5.z.array(
964
- import_zod5.z.object({
965
- message: import_zod5.z.object({
966
- role: import_zod5.z.literal("assistant").nullish(),
967
- content: import_zod5.z.string().nullish(),
968
- tool_calls: import_zod5.z.array(
969
- import_zod5.z.object({
970
- id: import_zod5.z.string().nullish(),
971
- type: import_zod5.z.literal("function"),
972
- function: import_zod5.z.object({
973
- name: import_zod5.z.string(),
974
- arguments: import_zod5.z.string()
965
+ var openaiChatResponseSchema = import_v45.z.object({
966
+ id: import_v45.z.string().nullish(),
967
+ created: import_v45.z.number().nullish(),
968
+ model: import_v45.z.string().nullish(),
969
+ choices: import_v45.z.array(
970
+ import_v45.z.object({
971
+ message: import_v45.z.object({
972
+ role: import_v45.z.literal("assistant").nullish(),
973
+ content: import_v45.z.string().nullish(),
974
+ tool_calls: import_v45.z.array(
975
+ import_v45.z.object({
976
+ id: import_v45.z.string().nullish(),
977
+ type: import_v45.z.literal("function"),
978
+ function: import_v45.z.object({
979
+ name: import_v45.z.string(),
980
+ arguments: import_v45.z.string()
975
981
  })
976
982
  })
977
983
  ).nullish()
978
984
  }),
979
- index: import_zod5.z.number(),
980
- logprobs: import_zod5.z.object({
981
- content: import_zod5.z.array(
982
- import_zod5.z.object({
983
- token: import_zod5.z.string(),
984
- logprob: import_zod5.z.number(),
985
- top_logprobs: import_zod5.z.array(
986
- import_zod5.z.object({
987
- token: import_zod5.z.string(),
988
- logprob: import_zod5.z.number()
985
+ index: import_v45.z.number(),
986
+ logprobs: import_v45.z.object({
987
+ content: import_v45.z.array(
988
+ import_v45.z.object({
989
+ token: import_v45.z.string(),
990
+ logprob: import_v45.z.number(),
991
+ top_logprobs: import_v45.z.array(
992
+ import_v45.z.object({
993
+ token: import_v45.z.string(),
994
+ logprob: import_v45.z.number()
989
995
  })
990
996
  )
991
997
  })
992
998
  ).nullish()
993
999
  }).nullish(),
994
- finish_reason: import_zod5.z.string().nullish()
1000
+ finish_reason: import_v45.z.string().nullish()
995
1001
  })
996
1002
  ),
997
1003
  usage: openaiTokenUsageSchema
998
1004
  });
999
- var openaiChatChunkSchema = import_zod5.z.union([
1000
- import_zod5.z.object({
1001
- id: import_zod5.z.string().nullish(),
1002
- created: import_zod5.z.number().nullish(),
1003
- model: import_zod5.z.string().nullish(),
1004
- choices: import_zod5.z.array(
1005
- import_zod5.z.object({
1006
- delta: import_zod5.z.object({
1007
- role: import_zod5.z.enum(["assistant"]).nullish(),
1008
- content: import_zod5.z.string().nullish(),
1009
- tool_calls: import_zod5.z.array(
1010
- import_zod5.z.object({
1011
- index: import_zod5.z.number(),
1012
- id: import_zod5.z.string().nullish(),
1013
- type: import_zod5.z.literal("function").nullish(),
1014
- function: import_zod5.z.object({
1015
- name: import_zod5.z.string().nullish(),
1016
- arguments: import_zod5.z.string().nullish()
1005
+ var openaiChatChunkSchema = import_v45.z.union([
1006
+ import_v45.z.object({
1007
+ id: import_v45.z.string().nullish(),
1008
+ created: import_v45.z.number().nullish(),
1009
+ model: import_v45.z.string().nullish(),
1010
+ choices: import_v45.z.array(
1011
+ import_v45.z.object({
1012
+ delta: import_v45.z.object({
1013
+ role: import_v45.z.enum(["assistant"]).nullish(),
1014
+ content: import_v45.z.string().nullish(),
1015
+ tool_calls: import_v45.z.array(
1016
+ import_v45.z.object({
1017
+ index: import_v45.z.number(),
1018
+ id: import_v45.z.string().nullish(),
1019
+ type: import_v45.z.literal("function").nullish(),
1020
+ function: import_v45.z.object({
1021
+ name: import_v45.z.string().nullish(),
1022
+ arguments: import_v45.z.string().nullish()
1017
1023
  })
1018
1024
  })
1019
1025
  ).nullish()
1020
1026
  }).nullish(),
1021
- logprobs: import_zod5.z.object({
1022
- content: import_zod5.z.array(
1023
- import_zod5.z.object({
1024
- token: import_zod5.z.string(),
1025
- logprob: import_zod5.z.number(),
1026
- top_logprobs: import_zod5.z.array(
1027
- import_zod5.z.object({
1028
- token: import_zod5.z.string(),
1029
- logprob: import_zod5.z.number()
1027
+ logprobs: import_v45.z.object({
1028
+ content: import_v45.z.array(
1029
+ import_v45.z.object({
1030
+ token: import_v45.z.string(),
1031
+ logprob: import_v45.z.number(),
1032
+ top_logprobs: import_v45.z.array(
1033
+ import_v45.z.object({
1034
+ token: import_v45.z.string(),
1035
+ logprob: import_v45.z.number()
1030
1036
  })
1031
1037
  )
1032
1038
  })
1033
1039
  ).nullish()
1034
1040
  }).nullish(),
1035
- finish_reason: import_zod5.z.string().nullish(),
1036
- index: import_zod5.z.number()
1041
+ finish_reason: import_v45.z.string().nullish(),
1042
+ index: import_v45.z.number()
1037
1043
  })
1038
1044
  ),
1039
1045
  usage: openaiTokenUsageSchema
@@ -1088,7 +1094,7 @@ var reasoningModels = {
1088
1094
 
1089
1095
  // src/openai-completion-language-model.ts
1090
1096
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1091
- var import_zod7 = require("zod");
1097
+ var import_v47 = require("zod/v4");
1092
1098
 
1093
1099
  // src/convert-to-openai-completion-prompt.ts
1094
1100
  var import_provider4 = require("@ai-sdk/provider");
@@ -1166,12 +1172,12 @@ ${user}:`]
1166
1172
  }
1167
1173
 
1168
1174
  // src/openai-completion-options.ts
1169
- var import_zod6 = require("zod");
1170
- var openaiCompletionProviderOptions = import_zod6.z.object({
1175
+ var import_v46 = require("zod/v4");
1176
+ var openaiCompletionProviderOptions = import_v46.z.object({
1171
1177
  /**
1172
1178
  Echo back the prompt in addition to the completion.
1173
1179
  */
1174
- echo: import_zod6.z.boolean().optional(),
1180
+ echo: import_v46.z.boolean().optional(),
1175
1181
  /**
1176
1182
  Modify the likelihood of specified tokens appearing in the completion.
1177
1183
 
@@ -1186,16 +1192,16 @@ var openaiCompletionProviderOptions = import_zod6.z.object({
1186
1192
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1187
1193
  token from being generated.
1188
1194
  */
1189
- logitBias: import_zod6.z.record(import_zod6.z.string(), import_zod6.z.number()).optional(),
1195
+ logitBias: import_v46.z.record(import_v46.z.string(), import_v46.z.number()).optional(),
1190
1196
  /**
1191
1197
  The suffix that comes after a completion of inserted text.
1192
1198
  */
1193
- suffix: import_zod6.z.string().optional(),
1199
+ suffix: import_v46.z.string().optional(),
1194
1200
  /**
1195
1201
  A unique identifier representing your end-user, which can help OpenAI to
1196
1202
  monitor and detect abuse. Learn more.
1197
1203
  */
1198
- user: import_zod6.z.string().optional(),
1204
+ user: import_v46.z.string().optional(),
1199
1205
  /**
1200
1206
  Return the log probabilities of the tokens. Including logprobs will increase
1201
1207
  the response size and can slow down response times. However, it can
@@ -1205,7 +1211,7 @@ var openaiCompletionProviderOptions = import_zod6.z.object({
1205
1211
  Setting to a number will return the log probabilities of the top n
1206
1212
  tokens that were generated.
1207
1213
  */
1208
- logprobs: import_zod6.z.union([import_zod6.z.boolean(), import_zod6.z.number()]).optional()
1214
+ logprobs: import_v46.z.union([import_v46.z.boolean(), import_v46.z.number()]).optional()
1209
1215
  });
1210
1216
 
1211
1217
  // src/openai-completion-language-model.ts
@@ -1437,42 +1443,42 @@ var OpenAICompletionLanguageModel = class {
1437
1443
  };
1438
1444
  }
1439
1445
  };
1440
- var usageSchema = import_zod7.z.object({
1441
- prompt_tokens: import_zod7.z.number(),
1442
- completion_tokens: import_zod7.z.number(),
1443
- total_tokens: import_zod7.z.number()
1446
+ var usageSchema = import_v47.z.object({
1447
+ prompt_tokens: import_v47.z.number(),
1448
+ completion_tokens: import_v47.z.number(),
1449
+ total_tokens: import_v47.z.number()
1444
1450
  });
1445
- var openaiCompletionResponseSchema = import_zod7.z.object({
1446
- id: import_zod7.z.string().nullish(),
1447
- created: import_zod7.z.number().nullish(),
1448
- model: import_zod7.z.string().nullish(),
1449
- choices: import_zod7.z.array(
1450
- import_zod7.z.object({
1451
- text: import_zod7.z.string(),
1452
- finish_reason: import_zod7.z.string(),
1453
- logprobs: import_zod7.z.object({
1454
- tokens: import_zod7.z.array(import_zod7.z.string()),
1455
- token_logprobs: import_zod7.z.array(import_zod7.z.number()),
1456
- top_logprobs: import_zod7.z.array(import_zod7.z.record(import_zod7.z.string(), import_zod7.z.number())).nullish()
1451
+ var openaiCompletionResponseSchema = import_v47.z.object({
1452
+ id: import_v47.z.string().nullish(),
1453
+ created: import_v47.z.number().nullish(),
1454
+ model: import_v47.z.string().nullish(),
1455
+ choices: import_v47.z.array(
1456
+ import_v47.z.object({
1457
+ text: import_v47.z.string(),
1458
+ finish_reason: import_v47.z.string(),
1459
+ logprobs: import_v47.z.object({
1460
+ tokens: import_v47.z.array(import_v47.z.string()),
1461
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1462
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1457
1463
  }).nullish()
1458
1464
  })
1459
1465
  ),
1460
1466
  usage: usageSchema.nullish()
1461
1467
  });
1462
- var openaiCompletionChunkSchema = import_zod7.z.union([
1463
- import_zod7.z.object({
1464
- id: import_zod7.z.string().nullish(),
1465
- created: import_zod7.z.number().nullish(),
1466
- model: import_zod7.z.string().nullish(),
1467
- choices: import_zod7.z.array(
1468
- import_zod7.z.object({
1469
- text: import_zod7.z.string(),
1470
- finish_reason: import_zod7.z.string().nullish(),
1471
- index: import_zod7.z.number(),
1472
- logprobs: import_zod7.z.object({
1473
- tokens: import_zod7.z.array(import_zod7.z.string()),
1474
- token_logprobs: import_zod7.z.array(import_zod7.z.number()),
1475
- top_logprobs: import_zod7.z.array(import_zod7.z.record(import_zod7.z.string(), import_zod7.z.number())).nullish()
1468
+ var openaiCompletionChunkSchema = import_v47.z.union([
1469
+ import_v47.z.object({
1470
+ id: import_v47.z.string().nullish(),
1471
+ created: import_v47.z.number().nullish(),
1472
+ model: import_v47.z.string().nullish(),
1473
+ choices: import_v47.z.array(
1474
+ import_v47.z.object({
1475
+ text: import_v47.z.string(),
1476
+ finish_reason: import_v47.z.string().nullish(),
1477
+ index: import_v47.z.number(),
1478
+ logprobs: import_v47.z.object({
1479
+ tokens: import_v47.z.array(import_v47.z.string()),
1480
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1481
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1476
1482
  }).nullish()
1477
1483
  })
1478
1484
  ),
@@ -1484,21 +1490,21 @@ var openaiCompletionChunkSchema = import_zod7.z.union([
1484
1490
  // src/openai-embedding-model.ts
1485
1491
  var import_provider5 = require("@ai-sdk/provider");
1486
1492
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
1487
- var import_zod9 = require("zod");
1493
+ var import_v49 = require("zod/v4");
1488
1494
 
1489
1495
  // src/openai-embedding-options.ts
1490
- var import_zod8 = require("zod");
1491
- var openaiEmbeddingProviderOptions = import_zod8.z.object({
1496
+ var import_v48 = require("zod/v4");
1497
+ var openaiEmbeddingProviderOptions = import_v48.z.object({
1492
1498
  /**
1493
1499
  The number of dimensions the resulting output embeddings should have.
1494
1500
  Only supported in text-embedding-3 and later models.
1495
1501
  */
1496
- dimensions: import_zod8.z.number().optional(),
1502
+ dimensions: import_v48.z.number().optional(),
1497
1503
  /**
1498
1504
  A unique identifier representing your end-user, which can help OpenAI to
1499
1505
  monitor and detect abuse. Learn more.
1500
1506
  */
1501
- user: import_zod8.z.string().optional()
1507
+ user: import_v48.z.string().optional()
1502
1508
  });
1503
1509
 
1504
1510
  // src/openai-embedding-model.ts
@@ -1564,14 +1570,14 @@ var OpenAIEmbeddingModel = class {
1564
1570
  };
1565
1571
  }
1566
1572
  };
1567
- var openaiTextEmbeddingResponseSchema = import_zod9.z.object({
1568
- data: import_zod9.z.array(import_zod9.z.object({ embedding: import_zod9.z.array(import_zod9.z.number()) })),
1569
- usage: import_zod9.z.object({ prompt_tokens: import_zod9.z.number() }).nullish()
1573
+ var openaiTextEmbeddingResponseSchema = import_v49.z.object({
1574
+ data: import_v49.z.array(import_v49.z.object({ embedding: import_v49.z.array(import_v49.z.number()) })),
1575
+ usage: import_v49.z.object({ prompt_tokens: import_v49.z.number() }).nullish()
1570
1576
  });
1571
1577
 
1572
1578
  // src/openai-image-model.ts
1573
1579
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1574
- var import_zod10 = require("zod");
1580
+ var import_v410 = require("zod/v4");
1575
1581
 
1576
1582
  // src/openai-image-settings.ts
1577
1583
  var modelMaxImagesPerCall = {
@@ -1659,9 +1665,9 @@ var OpenAIImageModel = class {
1659
1665
  };
1660
1666
  }
1661
1667
  };
1662
- var openaiImageResponseSchema = import_zod10.z.object({
1663
- data: import_zod10.z.array(
1664
- import_zod10.z.object({ b64_json: import_zod10.z.string(), revised_prompt: import_zod10.z.string().optional() })
1668
+ var openaiImageResponseSchema = import_v410.z.object({
1669
+ data: import_v410.z.array(
1670
+ import_v410.z.object({ b64_json: import_v410.z.string(), revised_prompt: import_v410.z.string().optional() })
1665
1671
  )
1666
1672
  });
1667
1673
 
@@ -1673,33 +1679,33 @@ var openaiTools = {
1673
1679
 
1674
1680
  // src/openai-transcription-model.ts
1675
1681
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
1676
- var import_zod12 = require("zod");
1682
+ var import_v412 = require("zod/v4");
1677
1683
 
1678
1684
  // src/openai-transcription-options.ts
1679
- var import_zod11 = require("zod");
1680
- var openAITranscriptionProviderOptions = import_zod11.z.object({
1685
+ var import_v411 = require("zod/v4");
1686
+ var openAITranscriptionProviderOptions = import_v411.z.object({
1681
1687
  /**
1682
1688
  * Additional information to include in the transcription response.
1683
1689
  */
1684
- include: import_zod11.z.array(import_zod11.z.string()).optional(),
1690
+ include: import_v411.z.array(import_v411.z.string()).optional(),
1685
1691
  /**
1686
1692
  * The language of the input audio in ISO-639-1 format.
1687
1693
  */
1688
- language: import_zod11.z.string().optional(),
1694
+ language: import_v411.z.string().optional(),
1689
1695
  /**
1690
1696
  * An optional text to guide the model's style or continue a previous audio segment.
1691
1697
  */
1692
- prompt: import_zod11.z.string().optional(),
1698
+ prompt: import_v411.z.string().optional(),
1693
1699
  /**
1694
1700
  * The sampling temperature, between 0 and 1.
1695
1701
  * @default 0
1696
1702
  */
1697
- temperature: import_zod11.z.number().min(0).max(1).default(0).optional(),
1703
+ temperature: import_v411.z.number().min(0).max(1).default(0).optional(),
1698
1704
  /**
1699
1705
  * The timestamp granularities to populate for this transcription.
1700
1706
  * @default ['segment']
1701
1707
  */
1702
- timestampGranularities: import_zod11.z.array(import_zod11.z.enum(["word", "segment"])).default(["segment"]).optional()
1708
+ timestampGranularities: import_v411.z.array(import_v411.z.enum(["word", "segment"])).default(["segment"]).optional()
1703
1709
  });
1704
1710
 
1705
1711
  // src/openai-transcription-model.ts
@@ -1847,29 +1853,33 @@ var OpenAITranscriptionModel = class {
1847
1853
  };
1848
1854
  }
1849
1855
  };
1850
- var openaiTranscriptionResponseSchema = import_zod12.z.object({
1851
- text: import_zod12.z.string(),
1852
- language: import_zod12.z.string().nullish(),
1853
- duration: import_zod12.z.number().nullish(),
1854
- words: import_zod12.z.array(
1855
- import_zod12.z.object({
1856
- word: import_zod12.z.string(),
1857
- start: import_zod12.z.number(),
1858
- end: import_zod12.z.number()
1856
+ var openaiTranscriptionResponseSchema = import_v412.z.object({
1857
+ text: import_v412.z.string(),
1858
+ language: import_v412.z.string().nullish(),
1859
+ duration: import_v412.z.number().nullish(),
1860
+ words: import_v412.z.array(
1861
+ import_v412.z.object({
1862
+ word: import_v412.z.string(),
1863
+ start: import_v412.z.number(),
1864
+ end: import_v412.z.number()
1859
1865
  })
1860
1866
  ).nullish()
1861
1867
  });
1862
1868
 
1863
1869
  // src/responses/openai-responses-language-model.ts
1864
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
1865
- var import_zod13 = require("zod");
1870
+ var import_provider8 = require("@ai-sdk/provider");
1871
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
1872
+ var import_v414 = require("zod/v4");
1866
1873
 
1867
1874
  // src/responses/convert-to-openai-responses-messages.ts
1868
1875
  var import_provider6 = require("@ai-sdk/provider");
1869
- function convertToOpenAIResponsesMessages({
1876
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
1877
+ var import_v413 = require("zod/v4");
1878
+ async function convertToOpenAIResponsesMessages({
1870
1879
  prompt,
1871
1880
  systemMessageMode
1872
1881
  }) {
1882
+ var _a, _b, _c, _d, _e, _f;
1873
1883
  const messages = [];
1874
1884
  const warnings = [];
1875
1885
  for (const { role, content } of prompt) {
@@ -1904,7 +1914,7 @@ function convertToOpenAIResponsesMessages({
1904
1914
  messages.push({
1905
1915
  role: "user",
1906
1916
  content: content.map((part, index) => {
1907
- var _a, _b, _c;
1917
+ var _a2, _b2, _c2;
1908
1918
  switch (part.type) {
1909
1919
  case "text": {
1910
1920
  return { type: "input_text", text: part.text };
@@ -1916,7 +1926,7 @@ function convertToOpenAIResponsesMessages({
1916
1926
  type: "input_image",
1917
1927
  image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1918
1928
  // OpenAI specific extension: image detail
1919
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1929
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
1920
1930
  };
1921
1931
  } else if (part.mediaType === "application/pdf") {
1922
1932
  if (part.data instanceof URL) {
@@ -1926,7 +1936,7 @@ function convertToOpenAIResponsesMessages({
1926
1936
  }
1927
1937
  return {
1928
1938
  type: "input_file",
1929
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1939
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
1930
1940
  file_data: `data:application/pdf;base64,${part.data}`
1931
1941
  };
1932
1942
  } else {
@@ -1941,12 +1951,14 @@ function convertToOpenAIResponsesMessages({
1941
1951
  break;
1942
1952
  }
1943
1953
  case "assistant": {
1954
+ const reasoningMessages = {};
1944
1955
  for (const part of content) {
1945
1956
  switch (part.type) {
1946
1957
  case "text": {
1947
1958
  messages.push({
1948
1959
  role: "assistant",
1949
- content: [{ type: "output_text", text: part.text }]
1960
+ content: [{ type: "output_text", text: part.text }],
1961
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
1950
1962
  });
1951
1963
  break;
1952
1964
  }
@@ -1958,7 +1970,8 @@ function convertToOpenAIResponsesMessages({
1958
1970
  type: "function_call",
1959
1971
  call_id: part.toolCallId,
1960
1972
  name: part.toolName,
1961
- arguments: JSON.stringify(part.input)
1973
+ arguments: JSON.stringify(part.input),
1974
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
1962
1975
  });
1963
1976
  break;
1964
1977
  }
@@ -1969,6 +1982,43 @@ function convertToOpenAIResponsesMessages({
1969
1982
  });
1970
1983
  break;
1971
1984
  }
1985
+ case "reasoning": {
1986
+ const providerOptions = await (0, import_provider_utils10.parseProviderOptions)({
1987
+ provider: "openai",
1988
+ providerOptions: part.providerOptions,
1989
+ schema: openaiResponsesReasoningProviderOptionsSchema
1990
+ });
1991
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
1992
+ if (reasoningId != null) {
1993
+ const existingReasoningMessage = reasoningMessages[reasoningId];
1994
+ const summaryParts = [];
1995
+ if (part.text.length > 0) {
1996
+ summaryParts.push({ type: "summary_text", text: part.text });
1997
+ } else if (existingReasoningMessage !== void 0) {
1998
+ warnings.push({
1999
+ type: "other",
2000
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2001
+ });
2002
+ }
2003
+ if (existingReasoningMessage === void 0) {
2004
+ reasoningMessages[reasoningId] = {
2005
+ type: "reasoning",
2006
+ id: reasoningId,
2007
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2008
+ summary: summaryParts
2009
+ };
2010
+ messages.push(reasoningMessages[reasoningId]);
2011
+ } else {
2012
+ existingReasoningMessage.summary.push(...summaryParts);
2013
+ }
2014
+ } else {
2015
+ warnings.push({
2016
+ type: "other",
2017
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2018
+ });
2019
+ }
2020
+ break;
2021
+ }
1972
2022
  }
1973
2023
  }
1974
2024
  break;
@@ -2004,6 +2054,10 @@ function convertToOpenAIResponsesMessages({
2004
2054
  }
2005
2055
  return { messages, warnings };
2006
2056
  }
2057
+ var openaiResponsesReasoningProviderOptionsSchema = import_v413.z.object({
2058
+ itemId: import_v413.z.string().nullish(),
2059
+ reasoningEncryptedContent: import_v413.z.string().nullish()
2060
+ });
2007
2061
 
2008
2062
  // src/responses/map-openai-responses-finish-reason.ts
2009
2063
  function mapOpenAIResponseFinishReason({
@@ -2028,7 +2082,7 @@ var import_provider7 = require("@ai-sdk/provider");
2028
2082
  function prepareResponsesTools({
2029
2083
  tools,
2030
2084
  toolChoice,
2031
- strict
2085
+ strictJsonSchema
2032
2086
  }) {
2033
2087
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2034
2088
  const toolWarnings = [];
@@ -2044,11 +2098,21 @@ function prepareResponsesTools({
2044
2098
  name: tool.name,
2045
2099
  description: tool.description,
2046
2100
  parameters: tool.inputSchema,
2047
- strict: strict ? true : void 0
2101
+ strict: strictJsonSchema
2048
2102
  });
2049
2103
  break;
2050
2104
  case "provider-defined":
2051
2105
  switch (tool.id) {
2106
+ case "openai.file_search": {
2107
+ const args = fileSearchArgsSchema.parse(tool.args);
2108
+ openaiTools2.push({
2109
+ type: "file_search",
2110
+ vector_store_ids: args.vectorStoreIds,
2111
+ max_results: args.maxResults,
2112
+ search_type: args.searchType
2113
+ });
2114
+ break;
2115
+ }
2052
2116
  case "openai.web_search_preview":
2053
2117
  openaiTools2.push({
2054
2118
  type: "web_search_preview",
@@ -2078,7 +2142,7 @@ function prepareResponsesTools({
2078
2142
  case "tool":
2079
2143
  return {
2080
2144
  tools: openaiTools2,
2081
- toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2145
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2082
2146
  toolWarnings
2083
2147
  };
2084
2148
  default: {
@@ -2142,17 +2206,17 @@ var OpenAIResponsesLanguageModel = class {
2142
2206
  if (stopSequences != null) {
2143
2207
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2144
2208
  }
2145
- const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2209
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2146
2210
  prompt,
2147
2211
  systemMessageMode: modelConfig.systemMessageMode
2148
2212
  });
2149
2213
  warnings.push(...messageWarnings);
2150
- const openaiOptions = await (0, import_provider_utils10.parseProviderOptions)({
2214
+ const openaiOptions = await (0, import_provider_utils11.parseProviderOptions)({
2151
2215
  provider: "openai",
2152
2216
  providerOptions,
2153
2217
  schema: openaiResponsesProviderOptionsSchema
2154
2218
  });
2155
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2219
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2156
2220
  const baseArgs = {
2157
2221
  model: this.modelId,
2158
2222
  input: messages,
@@ -2163,7 +2227,7 @@ var OpenAIResponsesLanguageModel = class {
2163
2227
  text: {
2164
2228
  format: responseFormat.schema != null ? {
2165
2229
  type: "json_schema",
2166
- strict: isStrict,
2230
+ strict: strictJsonSchema,
2167
2231
  name: (_b = responseFormat.name) != null ? _b : "response",
2168
2232
  description: responseFormat.description,
2169
2233
  schema: responseFormat.schema
@@ -2178,6 +2242,7 @@ var OpenAIResponsesLanguageModel = class {
2178
2242
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2179
2243
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2180
2244
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2245
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2181
2246
  // model-specific settings:
2182
2247
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2183
2248
  reasoning: {
@@ -2210,6 +2275,21 @@ var OpenAIResponsesLanguageModel = class {
2210
2275
  details: "topP is not supported for reasoning models"
2211
2276
  });
2212
2277
  }
2278
+ } else {
2279
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2280
+ warnings.push({
2281
+ type: "unsupported-setting",
2282
+ setting: "reasoningEffort",
2283
+ details: "reasoningEffort is not supported for non-reasoning models"
2284
+ });
2285
+ }
2286
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2287
+ warnings.push({
2288
+ type: "unsupported-setting",
2289
+ setting: "reasoningSummary",
2290
+ details: "reasoningSummary is not supported for non-reasoning models"
2291
+ });
2292
+ }
2213
2293
  }
2214
2294
  if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2215
2295
  warnings.push({
@@ -2226,7 +2306,7 @@ var OpenAIResponsesLanguageModel = class {
2226
2306
  } = prepareResponsesTools({
2227
2307
  tools,
2228
2308
  toolChoice,
2229
- strict: isStrict
2309
+ strictJsonSchema
2230
2310
  });
2231
2311
  return {
2232
2312
  args: {
@@ -2238,101 +2318,137 @@ var OpenAIResponsesLanguageModel = class {
2238
2318
  };
2239
2319
  }
2240
2320
  async doGenerate(options) {
2241
- var _a, _b, _c, _d, _e, _f, _g, _h;
2321
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2242
2322
  const { args: body, warnings } = await this.getArgs(options);
2323
+ const url = this.config.url({
2324
+ path: "/responses",
2325
+ modelId: this.modelId
2326
+ });
2243
2327
  const {
2244
2328
  responseHeaders,
2245
2329
  value: response,
2246
2330
  rawValue: rawResponse
2247
- } = await (0, import_provider_utils10.postJsonToApi)({
2248
- url: this.config.url({
2249
- path: "/responses",
2250
- modelId: this.modelId
2251
- }),
2252
- headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2331
+ } = await (0, import_provider_utils11.postJsonToApi)({
2332
+ url,
2333
+ headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), options.headers),
2253
2334
  body,
2254
2335
  failedResponseHandler: openaiFailedResponseHandler,
2255
- successfulResponseHandler: (0, import_provider_utils10.createJsonResponseHandler)(
2256
- import_zod13.z.object({
2257
- id: import_zod13.z.string(),
2258
- created_at: import_zod13.z.number(),
2259
- model: import_zod13.z.string(),
2260
- output: import_zod13.z.array(
2261
- import_zod13.z.discriminatedUnion("type", [
2262
- import_zod13.z.object({
2263
- type: import_zod13.z.literal("message"),
2264
- role: import_zod13.z.literal("assistant"),
2265
- content: import_zod13.z.array(
2266
- import_zod13.z.object({
2267
- type: import_zod13.z.literal("output_text"),
2268
- text: import_zod13.z.string(),
2269
- annotations: import_zod13.z.array(
2270
- import_zod13.z.object({
2271
- type: import_zod13.z.literal("url_citation"),
2272
- start_index: import_zod13.z.number(),
2273
- end_index: import_zod13.z.number(),
2274
- url: import_zod13.z.string(),
2275
- title: import_zod13.z.string()
2336
+ successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
2337
+ import_v414.z.object({
2338
+ id: import_v414.z.string(),
2339
+ created_at: import_v414.z.number(),
2340
+ error: import_v414.z.object({
2341
+ code: import_v414.z.string(),
2342
+ message: import_v414.z.string()
2343
+ }).nullish(),
2344
+ model: import_v414.z.string(),
2345
+ output: import_v414.z.array(
2346
+ import_v414.z.discriminatedUnion("type", [
2347
+ import_v414.z.object({
2348
+ type: import_v414.z.literal("message"),
2349
+ role: import_v414.z.literal("assistant"),
2350
+ id: import_v414.z.string(),
2351
+ content: import_v414.z.array(
2352
+ import_v414.z.object({
2353
+ type: import_v414.z.literal("output_text"),
2354
+ text: import_v414.z.string(),
2355
+ annotations: import_v414.z.array(
2356
+ import_v414.z.object({
2357
+ type: import_v414.z.literal("url_citation"),
2358
+ start_index: import_v414.z.number(),
2359
+ end_index: import_v414.z.number(),
2360
+ url: import_v414.z.string(),
2361
+ title: import_v414.z.string()
2276
2362
  })
2277
2363
  )
2278
2364
  })
2279
2365
  )
2280
2366
  }),
2281
- import_zod13.z.object({
2282
- type: import_zod13.z.literal("function_call"),
2283
- call_id: import_zod13.z.string(),
2284
- name: import_zod13.z.string(),
2285
- arguments: import_zod13.z.string()
2367
+ import_v414.z.object({
2368
+ type: import_v414.z.literal("function_call"),
2369
+ call_id: import_v414.z.string(),
2370
+ name: import_v414.z.string(),
2371
+ arguments: import_v414.z.string(),
2372
+ id: import_v414.z.string()
2286
2373
  }),
2287
- import_zod13.z.object({
2288
- type: import_zod13.z.literal("web_search_call"),
2289
- id: import_zod13.z.string(),
2290
- status: import_zod13.z.string().optional()
2374
+ import_v414.z.object({
2375
+ type: import_v414.z.literal("web_search_call"),
2376
+ id: import_v414.z.string(),
2377
+ status: import_v414.z.string().optional()
2291
2378
  }),
2292
- import_zod13.z.object({
2293
- type: import_zod13.z.literal("computer_call"),
2294
- id: import_zod13.z.string(),
2295
- status: import_zod13.z.string().optional()
2379
+ import_v414.z.object({
2380
+ type: import_v414.z.literal("computer_call"),
2381
+ id: import_v414.z.string(),
2382
+ status: import_v414.z.string().optional()
2296
2383
  }),
2297
- import_zod13.z.object({
2298
- type: import_zod13.z.literal("reasoning"),
2299
- summary: import_zod13.z.array(
2300
- import_zod13.z.object({
2301
- type: import_zod13.z.literal("summary_text"),
2302
- text: import_zod13.z.string()
2384
+ import_v414.z.object({
2385
+ type: import_v414.z.literal("reasoning"),
2386
+ id: import_v414.z.string(),
2387
+ encrypted_content: import_v414.z.string().nullish(),
2388
+ summary: import_v414.z.array(
2389
+ import_v414.z.object({
2390
+ type: import_v414.z.literal("summary_text"),
2391
+ text: import_v414.z.string()
2303
2392
  })
2304
2393
  )
2305
2394
  })
2306
2395
  ])
2307
2396
  ),
2308
- incomplete_details: import_zod13.z.object({ reason: import_zod13.z.string() }).nullable(),
2397
+ incomplete_details: import_v414.z.object({ reason: import_v414.z.string() }).nullable(),
2309
2398
  usage: usageSchema2
2310
2399
  })
2311
2400
  ),
2312
2401
  abortSignal: options.abortSignal,
2313
2402
  fetch: this.config.fetch
2314
2403
  });
2404
+ if (response.error) {
2405
+ throw new import_provider8.APICallError({
2406
+ message: response.error.message,
2407
+ url,
2408
+ requestBodyValues: body,
2409
+ statusCode: 400,
2410
+ responseHeaders,
2411
+ responseBody: rawResponse,
2412
+ isRetryable: false
2413
+ });
2414
+ }
2315
2415
  const content = [];
2316
2416
  for (const part of response.output) {
2317
2417
  switch (part.type) {
2318
2418
  case "reasoning": {
2319
- content.push({
2320
- type: "reasoning",
2321
- text: part.summary.map((summary) => summary.text).join()
2322
- });
2419
+ if (part.summary.length === 0) {
2420
+ part.summary.push({ type: "summary_text", text: "" });
2421
+ }
2422
+ for (const summary of part.summary) {
2423
+ content.push({
2424
+ type: "reasoning",
2425
+ text: summary.text,
2426
+ providerMetadata: {
2427
+ openai: {
2428
+ itemId: part.id,
2429
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2430
+ }
2431
+ }
2432
+ });
2433
+ }
2323
2434
  break;
2324
2435
  }
2325
2436
  case "message": {
2326
2437
  for (const contentPart of part.content) {
2327
2438
  content.push({
2328
2439
  type: "text",
2329
- text: contentPart.text
2440
+ text: contentPart.text,
2441
+ providerMetadata: {
2442
+ openai: {
2443
+ itemId: part.id
2444
+ }
2445
+ }
2330
2446
  });
2331
2447
  for (const annotation of contentPart.annotations) {
2332
2448
  content.push({
2333
2449
  type: "source",
2334
2450
  sourceType: "url",
2335
- id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils10.generateId)(),
2451
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils11.generateId)(),
2336
2452
  url: annotation.url,
2337
2453
  title: annotation.title
2338
2454
  });
@@ -2345,7 +2461,12 @@ var OpenAIResponsesLanguageModel = class {
2345
2461
  type: "tool-call",
2346
2462
  toolCallId: part.call_id,
2347
2463
  toolName: part.name,
2348
- input: part.arguments
2464
+ input: part.arguments,
2465
+ providerMetadata: {
2466
+ openai: {
2467
+ itemId: part.id
2468
+ }
2469
+ }
2349
2470
  });
2350
2471
  break;
2351
2472
  }
@@ -2391,15 +2512,15 @@ var OpenAIResponsesLanguageModel = class {
2391
2512
  return {
2392
2513
  content,
2393
2514
  finishReason: mapOpenAIResponseFinishReason({
2394
- finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2515
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2395
2516
  hasToolCalls: content.some((part) => part.type === "tool-call")
2396
2517
  }),
2397
2518
  usage: {
2398
2519
  inputTokens: response.usage.input_tokens,
2399
2520
  outputTokens: response.usage.output_tokens,
2400
2521
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2401
- reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2402
- cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2522
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2523
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2403
2524
  },
2404
2525
  request: { body },
2405
2526
  response: {
@@ -2419,18 +2540,18 @@ var OpenAIResponsesLanguageModel = class {
2419
2540
  }
2420
2541
  async doStream(options) {
2421
2542
  const { args: body, warnings } = await this.getArgs(options);
2422
- const { responseHeaders, value: response } = await (0, import_provider_utils10.postJsonToApi)({
2543
+ const { responseHeaders, value: response } = await (0, import_provider_utils11.postJsonToApi)({
2423
2544
  url: this.config.url({
2424
2545
  path: "/responses",
2425
2546
  modelId: this.modelId
2426
2547
  }),
2427
- headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2548
+ headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), options.headers),
2428
2549
  body: {
2429
2550
  ...body,
2430
2551
  stream: true
2431
2552
  },
2432
2553
  failedResponseHandler: openaiFailedResponseHandler,
2433
- successfulResponseHandler: (0, import_provider_utils10.createEventSourceResponseHandler)(
2554
+ successfulResponseHandler: (0, import_provider_utils11.createEventSourceResponseHandler)(
2434
2555
  openaiResponsesChunkSchema
2435
2556
  ),
2436
2557
  abortSignal: options.abortSignal,
@@ -2446,6 +2567,7 @@ var OpenAIResponsesLanguageModel = class {
2446
2567
  let responseId = null;
2447
2568
  const ongoingToolCalls = {};
2448
2569
  let hasToolCalls = false;
2570
+ const activeReasoning = {};
2449
2571
  return {
2450
2572
  stream: response.pipeThrough(
2451
2573
  new TransformStream({
@@ -2453,7 +2575,7 @@ var OpenAIResponsesLanguageModel = class {
2453
2575
  controller.enqueue({ type: "stream-start", warnings });
2454
2576
  },
2455
2577
  transform(chunk, controller) {
2456
- var _a, _b, _c, _d, _e, _f, _g, _h;
2578
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2457
2579
  if (options.includeRawChunks) {
2458
2580
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2459
2581
  }
@@ -2497,12 +2619,27 @@ var OpenAIResponsesLanguageModel = class {
2497
2619
  } else if (value.item.type === "message") {
2498
2620
  controller.enqueue({
2499
2621
  type: "text-start",
2500
- id: value.item.id
2622
+ id: value.item.id,
2623
+ providerMetadata: {
2624
+ openai: {
2625
+ itemId: value.item.id
2626
+ }
2627
+ }
2501
2628
  });
2502
- } else if (value.item.type === "reasoning") {
2629
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2630
+ activeReasoning[value.item.id] = {
2631
+ encryptedContent: value.item.encrypted_content,
2632
+ summaryParts: [0]
2633
+ };
2503
2634
  controller.enqueue({
2504
2635
  type: "reasoning-start",
2505
- id: value.item.id
2636
+ id: `${value.item.id}:0`,
2637
+ providerMetadata: {
2638
+ openai: {
2639
+ itemId: value.item.id,
2640
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2641
+ }
2642
+ }
2506
2643
  });
2507
2644
  }
2508
2645
  } else if (isResponseOutputItemDoneChunk(value)) {
@@ -2517,7 +2654,12 @@ var OpenAIResponsesLanguageModel = class {
2517
2654
  type: "tool-call",
2518
2655
  toolCallId: value.item.call_id,
2519
2656
  toolName: value.item.name,
2520
- input: value.item.arguments
2657
+ input: value.item.arguments,
2658
+ providerMetadata: {
2659
+ openai: {
2660
+ itemId: value.item.id
2661
+ }
2662
+ }
2521
2663
  });
2522
2664
  } else if (value.item.type === "web_search_call") {
2523
2665
  ongoingToolCalls[value.output_index] = void 0;
@@ -2572,11 +2714,21 @@ var OpenAIResponsesLanguageModel = class {
2572
2714
  type: "text-end",
2573
2715
  id: value.item.id
2574
2716
  });
2575
- } else if (value.item.type === "reasoning") {
2576
- controller.enqueue({
2577
- type: "reasoning-end",
2578
- id: value.item.id
2579
- });
2717
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2718
+ const activeReasoningPart = activeReasoning[value.item.id];
2719
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2720
+ controller.enqueue({
2721
+ type: "reasoning-end",
2722
+ id: `${value.item.id}:${summaryIndex}`,
2723
+ providerMetadata: {
2724
+ openai: {
2725
+ itemId: value.item.id,
2726
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2727
+ }
2728
+ }
2729
+ });
2730
+ }
2731
+ delete activeReasoning[value.item.id];
2580
2732
  }
2581
2733
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2582
2734
  const toolCall = ongoingToolCalls[value.output_index];
@@ -2601,30 +2753,53 @@ var OpenAIResponsesLanguageModel = class {
2601
2753
  id: value.item_id,
2602
2754
  delta: value.delta
2603
2755
  });
2756
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2757
+ if (value.summary_index > 0) {
2758
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2759
+ value.summary_index
2760
+ );
2761
+ controller.enqueue({
2762
+ type: "reasoning-start",
2763
+ id: `${value.item_id}:${value.summary_index}`,
2764
+ providerMetadata: {
2765
+ openai: {
2766
+ itemId: value.item_id,
2767
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2768
+ }
2769
+ }
2770
+ });
2771
+ }
2604
2772
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2605
2773
  controller.enqueue({
2606
2774
  type: "reasoning-delta",
2775
+ id: `${value.item_id}:${value.summary_index}`,
2607
2776
  delta: value.delta,
2608
- id: value.item_id
2777
+ providerMetadata: {
2778
+ openai: {
2779
+ itemId: value.item_id
2780
+ }
2781
+ }
2609
2782
  });
2610
2783
  } else if (isResponseFinishedChunk(value)) {
2611
2784
  finishReason = mapOpenAIResponseFinishReason({
2612
- finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2785
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2613
2786
  hasToolCalls
2614
2787
  });
2615
2788
  usage.inputTokens = value.response.usage.input_tokens;
2616
2789
  usage.outputTokens = value.response.usage.output_tokens;
2617
2790
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2618
- usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2619
- usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2791
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2792
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2620
2793
  } else if (isResponseAnnotationAddedChunk(value)) {
2621
2794
  controller.enqueue({
2622
2795
  type: "source",
2623
2796
  sourceType: "url",
2624
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils10.generateId)(),
2797
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils11.generateId)(),
2625
2798
  url: value.annotation.url,
2626
2799
  title: value.annotation.title
2627
2800
  });
2801
+ } else if (isErrorChunk(value)) {
2802
+ controller.enqueue({ type: "error", error: value });
2628
2803
  }
2629
2804
  },
2630
2805
  flush(controller) {
@@ -2646,124 +2821,130 @@ var OpenAIResponsesLanguageModel = class {
2646
2821
  };
2647
2822
  }
2648
2823
  };
2649
- var usageSchema2 = import_zod13.z.object({
2650
- input_tokens: import_zod13.z.number(),
2651
- input_tokens_details: import_zod13.z.object({ cached_tokens: import_zod13.z.number().nullish() }).nullish(),
2652
- output_tokens: import_zod13.z.number(),
2653
- output_tokens_details: import_zod13.z.object({ reasoning_tokens: import_zod13.z.number().nullish() }).nullish()
2824
+ var usageSchema2 = import_v414.z.object({
2825
+ input_tokens: import_v414.z.number(),
2826
+ input_tokens_details: import_v414.z.object({ cached_tokens: import_v414.z.number().nullish() }).nullish(),
2827
+ output_tokens: import_v414.z.number(),
2828
+ output_tokens_details: import_v414.z.object({ reasoning_tokens: import_v414.z.number().nullish() }).nullish()
2654
2829
  });
2655
- var textDeltaChunkSchema = import_zod13.z.object({
2656
- type: import_zod13.z.literal("response.output_text.delta"),
2657
- item_id: import_zod13.z.string(),
2658
- delta: import_zod13.z.string()
2830
+ var textDeltaChunkSchema = import_v414.z.object({
2831
+ type: import_v414.z.literal("response.output_text.delta"),
2832
+ item_id: import_v414.z.string(),
2833
+ delta: import_v414.z.string()
2659
2834
  });
2660
- var responseFinishedChunkSchema = import_zod13.z.object({
2661
- type: import_zod13.z.enum(["response.completed", "response.incomplete"]),
2662
- response: import_zod13.z.object({
2663
- incomplete_details: import_zod13.z.object({ reason: import_zod13.z.string() }).nullish(),
2835
+ var errorChunkSchema = import_v414.z.object({
2836
+ type: import_v414.z.literal("error"),
2837
+ code: import_v414.z.string(),
2838
+ message: import_v414.z.string(),
2839
+ param: import_v414.z.string().nullish(),
2840
+ sequence_number: import_v414.z.number()
2841
+ });
2842
+ var responseFinishedChunkSchema = import_v414.z.object({
2843
+ type: import_v414.z.enum(["response.completed", "response.incomplete"]),
2844
+ response: import_v414.z.object({
2845
+ incomplete_details: import_v414.z.object({ reason: import_v414.z.string() }).nullish(),
2664
2846
  usage: usageSchema2
2665
2847
  })
2666
2848
  });
2667
- var responseCreatedChunkSchema = import_zod13.z.object({
2668
- type: import_zod13.z.literal("response.created"),
2669
- response: import_zod13.z.object({
2670
- id: import_zod13.z.string(),
2671
- created_at: import_zod13.z.number(),
2672
- model: import_zod13.z.string()
2849
+ var responseCreatedChunkSchema = import_v414.z.object({
2850
+ type: import_v414.z.literal("response.created"),
2851
+ response: import_v414.z.object({
2852
+ id: import_v414.z.string(),
2853
+ created_at: import_v414.z.number(),
2854
+ model: import_v414.z.string()
2673
2855
  })
2674
2856
  });
2675
- var responseOutputItemAddedSchema = import_zod13.z.object({
2676
- type: import_zod13.z.literal("response.output_item.added"),
2677
- output_index: import_zod13.z.number(),
2678
- item: import_zod13.z.discriminatedUnion("type", [
2679
- import_zod13.z.object({
2680
- type: import_zod13.z.literal("message"),
2681
- id: import_zod13.z.string()
2857
+ var responseOutputItemAddedSchema = import_v414.z.object({
2858
+ type: import_v414.z.literal("response.output_item.added"),
2859
+ output_index: import_v414.z.number(),
2860
+ item: import_v414.z.discriminatedUnion("type", [
2861
+ import_v414.z.object({
2862
+ type: import_v414.z.literal("message"),
2863
+ id: import_v414.z.string()
2682
2864
  }),
2683
- import_zod13.z.object({
2684
- type: import_zod13.z.literal("reasoning"),
2685
- id: import_zod13.z.string()
2865
+ import_v414.z.object({
2866
+ type: import_v414.z.literal("reasoning"),
2867
+ id: import_v414.z.string(),
2868
+ encrypted_content: import_v414.z.string().nullish()
2686
2869
  }),
2687
- import_zod13.z.object({
2688
- type: import_zod13.z.literal("function_call"),
2689
- id: import_zod13.z.string(),
2690
- call_id: import_zod13.z.string(),
2691
- name: import_zod13.z.string(),
2692
- arguments: import_zod13.z.string()
2870
+ import_v414.z.object({
2871
+ type: import_v414.z.literal("function_call"),
2872
+ id: import_v414.z.string(),
2873
+ call_id: import_v414.z.string(),
2874
+ name: import_v414.z.string(),
2875
+ arguments: import_v414.z.string()
2693
2876
  }),
2694
- import_zod13.z.object({
2695
- type: import_zod13.z.literal("web_search_call"),
2696
- id: import_zod13.z.string(),
2697
- status: import_zod13.z.string()
2877
+ import_v414.z.object({
2878
+ type: import_v414.z.literal("web_search_call"),
2879
+ id: import_v414.z.string(),
2880
+ status: import_v414.z.string()
2698
2881
  }),
2699
- import_zod13.z.object({
2700
- type: import_zod13.z.literal("computer_call"),
2701
- id: import_zod13.z.string(),
2702
- status: import_zod13.z.string()
2882
+ import_v414.z.object({
2883
+ type: import_v414.z.literal("computer_call"),
2884
+ id: import_v414.z.string(),
2885
+ status: import_v414.z.string()
2703
2886
  })
2704
2887
  ])
2705
2888
  });
2706
- var responseOutputItemDoneSchema = import_zod13.z.object({
2707
- type: import_zod13.z.literal("response.output_item.done"),
2708
- output_index: import_zod13.z.number(),
2709
- item: import_zod13.z.discriminatedUnion("type", [
2710
- import_zod13.z.object({
2711
- type: import_zod13.z.literal("message"),
2712
- id: import_zod13.z.string()
2889
+ var responseOutputItemDoneSchema = import_v414.z.object({
2890
+ type: import_v414.z.literal("response.output_item.done"),
2891
+ output_index: import_v414.z.number(),
2892
+ item: import_v414.z.discriminatedUnion("type", [
2893
+ import_v414.z.object({
2894
+ type: import_v414.z.literal("message"),
2895
+ id: import_v414.z.string()
2713
2896
  }),
2714
- import_zod13.z.object({
2715
- type: import_zod13.z.literal("reasoning"),
2716
- id: import_zod13.z.string()
2897
+ import_v414.z.object({
2898
+ type: import_v414.z.literal("reasoning"),
2899
+ id: import_v414.z.string(),
2900
+ encrypted_content: import_v414.z.string().nullish()
2717
2901
  }),
2718
- import_zod13.z.object({
2719
- type: import_zod13.z.literal("function_call"),
2720
- id: import_zod13.z.string(),
2721
- call_id: import_zod13.z.string(),
2722
- name: import_zod13.z.string(),
2723
- arguments: import_zod13.z.string(),
2724
- status: import_zod13.z.literal("completed")
2902
+ import_v414.z.object({
2903
+ type: import_v414.z.literal("function_call"),
2904
+ id: import_v414.z.string(),
2905
+ call_id: import_v414.z.string(),
2906
+ name: import_v414.z.string(),
2907
+ arguments: import_v414.z.string(),
2908
+ status: import_v414.z.literal("completed")
2725
2909
  }),
2726
- import_zod13.z.object({
2727
- type: import_zod13.z.literal("web_search_call"),
2728
- id: import_zod13.z.string(),
2729
- status: import_zod13.z.literal("completed")
2910
+ import_v414.z.object({
2911
+ type: import_v414.z.literal("web_search_call"),
2912
+ id: import_v414.z.string(),
2913
+ status: import_v414.z.literal("completed")
2730
2914
  }),
2731
- import_zod13.z.object({
2732
- type: import_zod13.z.literal("computer_call"),
2733
- id: import_zod13.z.string(),
2734
- status: import_zod13.z.literal("completed")
2915
+ import_v414.z.object({
2916
+ type: import_v414.z.literal("computer_call"),
2917
+ id: import_v414.z.string(),
2918
+ status: import_v414.z.literal("completed")
2735
2919
  })
2736
2920
  ])
2737
2921
  });
2738
- var responseFunctionCallArgumentsDeltaSchema = import_zod13.z.object({
2739
- type: import_zod13.z.literal("response.function_call_arguments.delta"),
2740
- item_id: import_zod13.z.string(),
2741
- output_index: import_zod13.z.number(),
2742
- delta: import_zod13.z.string()
2922
+ var responseFunctionCallArgumentsDeltaSchema = import_v414.z.object({
2923
+ type: import_v414.z.literal("response.function_call_arguments.delta"),
2924
+ item_id: import_v414.z.string(),
2925
+ output_index: import_v414.z.number(),
2926
+ delta: import_v414.z.string()
2743
2927
  });
2744
- var responseAnnotationAddedSchema = import_zod13.z.object({
2745
- type: import_zod13.z.literal("response.output_text.annotation.added"),
2746
- annotation: import_zod13.z.object({
2747
- type: import_zod13.z.literal("url_citation"),
2748
- url: import_zod13.z.string(),
2749
- title: import_zod13.z.string()
2928
+ var responseAnnotationAddedSchema = import_v414.z.object({
2929
+ type: import_v414.z.literal("response.output_text.annotation.added"),
2930
+ annotation: import_v414.z.object({
2931
+ type: import_v414.z.literal("url_citation"),
2932
+ url: import_v414.z.string(),
2933
+ title: import_v414.z.string()
2750
2934
  })
2751
2935
  });
2752
- var responseReasoningSummaryTextDeltaSchema = import_zod13.z.object({
2753
- type: import_zod13.z.literal("response.reasoning_summary_text.delta"),
2754
- item_id: import_zod13.z.string(),
2755
- output_index: import_zod13.z.number(),
2756
- summary_index: import_zod13.z.number(),
2757
- delta: import_zod13.z.string()
2936
+ var responseReasoningSummaryPartAddedSchema = import_v414.z.object({
2937
+ type: import_v414.z.literal("response.reasoning_summary_part.added"),
2938
+ item_id: import_v414.z.string(),
2939
+ summary_index: import_v414.z.number()
2758
2940
  });
2759
- var responseReasoningSummaryPartDoneSchema = import_zod13.z.object({
2760
- type: import_zod13.z.literal("response.reasoning_summary_part.done"),
2761
- item_id: import_zod13.z.string(),
2762
- output_index: import_zod13.z.number(),
2763
- summary_index: import_zod13.z.number(),
2764
- part: import_zod13.z.unknown().nullish()
2941
+ var responseReasoningSummaryTextDeltaSchema = import_v414.z.object({
2942
+ type: import_v414.z.literal("response.reasoning_summary_text.delta"),
2943
+ item_id: import_v414.z.string(),
2944
+ summary_index: import_v414.z.number(),
2945
+ delta: import_v414.z.string()
2765
2946
  });
2766
- var openaiResponsesChunkSchema = import_zod13.z.union([
2947
+ var openaiResponsesChunkSchema = import_v414.z.union([
2767
2948
  textDeltaChunkSchema,
2768
2949
  responseFinishedChunkSchema,
2769
2950
  responseCreatedChunkSchema,
@@ -2771,9 +2952,10 @@ var openaiResponsesChunkSchema = import_zod13.z.union([
2771
2952
  responseOutputItemDoneSchema,
2772
2953
  responseFunctionCallArgumentsDeltaSchema,
2773
2954
  responseAnnotationAddedSchema,
2955
+ responseReasoningSummaryPartAddedSchema,
2774
2956
  responseReasoningSummaryTextDeltaSchema,
2775
- responseReasoningSummaryPartDoneSchema,
2776
- import_zod13.z.object({ type: import_zod13.z.string() }).passthrough()
2957
+ errorChunkSchema,
2958
+ import_v414.z.object({ type: import_v414.z.string() }).loose()
2777
2959
  // fallback for unknown chunks
2778
2960
  ]);
2779
2961
  function isTextDeltaChunk(chunk) {
@@ -2782,6 +2964,9 @@ function isTextDeltaChunk(chunk) {
2782
2964
  function isResponseOutputItemDoneChunk(chunk) {
2783
2965
  return chunk.type === "response.output_item.done";
2784
2966
  }
2967
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
2968
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
2969
+ }
2785
2970
  function isResponseFinishedChunk(chunk) {
2786
2971
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2787
2972
  }
@@ -2794,14 +2979,23 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2794
2979
  function isResponseOutputItemAddedChunk(chunk) {
2795
2980
  return chunk.type === "response.output_item.added";
2796
2981
  }
2982
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
2983
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
2984
+ }
2797
2985
  function isResponseAnnotationAddedChunk(chunk) {
2798
2986
  return chunk.type === "response.output_text.annotation.added";
2799
2987
  }
2988
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
2989
+ return chunk.type === "response.reasoning_summary_part.added";
2990
+ }
2800
2991
  function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2801
2992
  return chunk.type === "response.reasoning_summary_text.delta";
2802
2993
  }
2994
+ function isErrorChunk(chunk) {
2995
+ return chunk.type === "error";
2996
+ }
2803
2997
  function getResponsesModelConfig(modelId) {
2804
- if (modelId.startsWith("o")) {
2998
+ if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
2805
2999
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2806
3000
  return {
2807
3001
  isReasoningModel: true,
@@ -2824,25 +3018,26 @@ function getResponsesModelConfig(modelId) {
2824
3018
  function supportsFlexProcessing2(modelId) {
2825
3019
  return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
2826
3020
  }
2827
- var openaiResponsesProviderOptionsSchema = import_zod13.z.object({
2828
- metadata: import_zod13.z.any().nullish(),
2829
- parallelToolCalls: import_zod13.z.boolean().nullish(),
2830
- previousResponseId: import_zod13.z.string().nullish(),
2831
- store: import_zod13.z.boolean().nullish(),
2832
- user: import_zod13.z.string().nullish(),
2833
- reasoningEffort: import_zod13.z.string().nullish(),
2834
- strictSchemas: import_zod13.z.boolean().nullish(),
2835
- instructions: import_zod13.z.string().nullish(),
2836
- reasoningSummary: import_zod13.z.string().nullish(),
2837
- serviceTier: import_zod13.z.enum(["auto", "flex"]).nullish()
3021
+ var openaiResponsesProviderOptionsSchema = import_v414.z.object({
3022
+ metadata: import_v414.z.any().nullish(),
3023
+ parallelToolCalls: import_v414.z.boolean().nullish(),
3024
+ previousResponseId: import_v414.z.string().nullish(),
3025
+ store: import_v414.z.boolean().nullish(),
3026
+ user: import_v414.z.string().nullish(),
3027
+ reasoningEffort: import_v414.z.string().nullish(),
3028
+ strictJsonSchema: import_v414.z.boolean().nullish(),
3029
+ instructions: import_v414.z.string().nullish(),
3030
+ reasoningSummary: import_v414.z.string().nullish(),
3031
+ serviceTier: import_v414.z.enum(["auto", "flex"]).nullish(),
3032
+ include: import_v414.z.array(import_v414.z.enum(["reasoning.encrypted_content"])).nullish()
2838
3033
  });
2839
3034
 
2840
3035
  // src/openai-speech-model.ts
2841
- var import_provider_utils11 = require("@ai-sdk/provider-utils");
2842
- var import_zod14 = require("zod");
2843
- var OpenAIProviderOptionsSchema = import_zod14.z.object({
2844
- instructions: import_zod14.z.string().nullish(),
2845
- speed: import_zod14.z.number().min(0.25).max(4).default(1).nullish()
3036
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
3037
+ var import_v415 = require("zod/v4");
3038
+ var OpenAIProviderOptionsSchema = import_v415.z.object({
3039
+ instructions: import_v415.z.string().nullish(),
3040
+ speed: import_v415.z.number().min(0.25).max(4).default(1).nullish()
2846
3041
  });
2847
3042
  var OpenAISpeechModel = class {
2848
3043
  constructor(modelId, config) {
@@ -2863,7 +3058,7 @@ var OpenAISpeechModel = class {
2863
3058
  providerOptions
2864
3059
  }) {
2865
3060
  const warnings = [];
2866
- const openAIOptions = await (0, import_provider_utils11.parseProviderOptions)({
3061
+ const openAIOptions = await (0, import_provider_utils12.parseProviderOptions)({
2867
3062
  provider: "openai",
2868
3063
  providerOptions,
2869
3064
  schema: OpenAIProviderOptionsSchema
@@ -2916,15 +3111,15 @@ var OpenAISpeechModel = class {
2916
3111
  value: audio,
2917
3112
  responseHeaders,
2918
3113
  rawValue: rawResponse
2919
- } = await (0, import_provider_utils11.postJsonToApi)({
3114
+ } = await (0, import_provider_utils12.postJsonToApi)({
2920
3115
  url: this.config.url({
2921
3116
  path: "/audio/speech",
2922
3117
  modelId: this.modelId
2923
3118
  }),
2924
- headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), options.headers),
3119
+ headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
2925
3120
  body: requestBody,
2926
3121
  failedResponseHandler: openaiFailedResponseHandler,
2927
- successfulResponseHandler: (0, import_provider_utils11.createBinaryResponseHandler)(),
3122
+ successfulResponseHandler: (0, import_provider_utils12.createBinaryResponseHandler)(),
2928
3123
  abortSignal: options.abortSignal,
2929
3124
  fetch: this.config.fetch
2930
3125
  });
@@ -2947,10 +3142,10 @@ var OpenAISpeechModel = class {
2947
3142
  // src/openai-provider.ts
2948
3143
  function createOpenAI(options = {}) {
2949
3144
  var _a, _b;
2950
- const baseURL = (_a = (0, import_provider_utils12.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
3145
+ const baseURL = (_a = (0, import_provider_utils13.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2951
3146
  const providerName = (_b = options.name) != null ? _b : "openai";
2952
3147
  const getHeaders = () => ({
2953
- Authorization: `Bearer ${(0, import_provider_utils12.loadApiKey)({
3148
+ Authorization: `Bearer ${(0, import_provider_utils13.loadApiKey)({
2954
3149
  apiKey: options.apiKey,
2955
3150
  environmentVariableName: "OPENAI_API_KEY",
2956
3151
  description: "OpenAI"