@ai-sdk/openai 2.0.0-beta.1 → 2.0.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -39,7 +39,7 @@ module.exports = __toCommonJS(internal_exports);
39
39
  // src/openai-chat-language-model.ts
40
40
  var import_provider3 = require("@ai-sdk/provider");
41
41
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
42
- var import_zod5 = require("zod");
42
+ var import_v45 = require("zod/v4");
43
43
 
44
44
  // src/convert-to-openai-chat-messages.ts
45
45
  var import_provider = require("@ai-sdk/provider");
@@ -250,15 +250,15 @@ function mapOpenAIFinishReason(finishReason) {
250
250
  }
251
251
 
252
252
  // src/openai-chat-options.ts
253
- var import_zod = require("zod");
254
- var openaiProviderOptions = import_zod.z.object({
253
+ var import_v4 = require("zod/v4");
254
+ var openaiProviderOptions = import_v4.z.object({
255
255
  /**
256
256
  * Modify the likelihood of specified tokens appearing in the completion.
257
257
  *
258
258
  * Accepts a JSON object that maps tokens (specified by their token ID in
259
259
  * the GPT tokenizer) to an associated bias value from -100 to 100.
260
260
  */
261
- logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
261
+ logitBias: import_v4.z.record(import_v4.z.coerce.number(), import_v4.z.number()).optional(),
262
262
  /**
263
263
  * Return the log probabilities of the tokens.
264
264
  *
@@ -268,63 +268,69 @@ var openaiProviderOptions = import_zod.z.object({
268
268
  * Setting to a number will return the log probabilities of the top n
269
269
  * tokens that were generated.
270
270
  */
271
- logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
271
+ logprobs: import_v4.z.union([import_v4.z.boolean(), import_v4.z.number()]).optional(),
272
272
  /**
273
273
  * Whether to enable parallel function calling during tool use. Default to true.
274
274
  */
275
- parallelToolCalls: import_zod.z.boolean().optional(),
275
+ parallelToolCalls: import_v4.z.boolean().optional(),
276
276
  /**
277
277
  * A unique identifier representing your end-user, which can help OpenAI to
278
278
  * monitor and detect abuse.
279
279
  */
280
- user: import_zod.z.string().optional(),
280
+ user: import_v4.z.string().optional(),
281
281
  /**
282
282
  * Reasoning effort for reasoning models. Defaults to `medium`.
283
283
  */
284
- reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
284
+ reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional(),
285
285
  /**
286
286
  * Maximum number of completion tokens to generate. Useful for reasoning models.
287
287
  */
288
- maxCompletionTokens: import_zod.z.number().optional(),
288
+ maxCompletionTokens: import_v4.z.number().optional(),
289
289
  /**
290
290
  * Whether to enable persistence in responses API.
291
291
  */
292
- store: import_zod.z.boolean().optional(),
292
+ store: import_v4.z.boolean().optional(),
293
293
  /**
294
294
  * Metadata to associate with the request.
295
295
  */
296
- metadata: import_zod.z.record(import_zod.z.string()).optional(),
296
+ metadata: import_v4.z.record(import_v4.z.string().max(64), import_v4.z.string().max(512)).optional(),
297
297
  /**
298
298
  * Parameters for prediction mode.
299
299
  */
300
- prediction: import_zod.z.record(import_zod.z.any()).optional(),
300
+ prediction: import_v4.z.record(import_v4.z.string(), import_v4.z.any()).optional(),
301
301
  /**
302
302
  * Whether to use structured outputs.
303
303
  *
304
304
  * @default true
305
305
  */
306
- structuredOutputs: import_zod.z.boolean().optional(),
306
+ structuredOutputs: import_v4.z.boolean().optional(),
307
307
  /**
308
308
  * Service tier for the request. Set to 'flex' for 50% cheaper processing
309
309
  * at the cost of increased latency. Only available for o3 and o4-mini models.
310
310
  *
311
311
  * @default 'auto'
312
312
  */
313
- serviceTier: import_zod.z.enum(["auto", "flex"]).optional()
313
+ serviceTier: import_v4.z.enum(["auto", "flex"]).optional(),
314
+ /**
315
+ * Whether to use strict JSON schema validation.
316
+ *
317
+ * @default false
318
+ */
319
+ strictJsonSchema: import_v4.z.boolean().optional()
314
320
  });
315
321
 
316
322
  // src/openai-error.ts
317
- var import_zod2 = require("zod");
323
+ var import_v42 = require("zod/v4");
318
324
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
319
- var openaiErrorDataSchema = import_zod2.z.object({
320
- error: import_zod2.z.object({
321
- message: import_zod2.z.string(),
325
+ var openaiErrorDataSchema = import_v42.z.object({
326
+ error: import_v42.z.object({
327
+ message: import_v42.z.string(),
322
328
  // The additional information below is handled loosely to support
323
329
  // OpenAI-compatible providers that have slightly different error
324
330
  // responses:
325
- type: import_zod2.z.string().nullish(),
326
- param: import_zod2.z.any().nullish(),
327
- code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
331
+ type: import_v42.z.string().nullish(),
332
+ param: import_v42.z.any().nullish(),
333
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
328
334
  })
329
335
  });
330
336
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -337,77 +343,78 @@ var import_provider2 = require("@ai-sdk/provider");
337
343
 
338
344
  // src/tool/file-search.ts
339
345
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
340
- var import_zod3 = require("zod");
341
- var fileSearchArgsSchema = import_zod3.z.object({
346
+ var import_v43 = require("zod/v4");
347
+ var fileSearchArgsSchema = import_v43.z.object({
342
348
  /**
343
349
  * List of vector store IDs to search through. If not provided, searches all available vector stores.
344
350
  */
345
- vectorStoreIds: import_zod3.z.array(import_zod3.z.string()).optional(),
351
+ vectorStoreIds: import_v43.z.array(import_v43.z.string()).optional(),
346
352
  /**
347
353
  * Maximum number of search results to return. Defaults to 10.
348
354
  */
349
- maxResults: import_zod3.z.number().optional(),
355
+ maxResults: import_v43.z.number().optional(),
350
356
  /**
351
357
  * Type of search to perform. Defaults to 'auto'.
352
358
  */
353
- searchType: import_zod3.z.enum(["auto", "keyword", "semantic"]).optional()
359
+ searchType: import_v43.z.enum(["auto", "keyword", "semantic"]).optional()
354
360
  });
355
361
  var fileSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
356
362
  id: "openai.file_search",
357
363
  name: "file_search",
358
- inputSchema: import_zod3.z.object({
359
- query: import_zod3.z.string()
364
+ inputSchema: import_v43.z.object({
365
+ query: import_v43.z.string()
360
366
  })
361
367
  });
362
368
 
363
369
  // src/tool/web-search-preview.ts
364
370
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
365
- var import_zod4 = require("zod");
366
- var webSearchPreviewArgsSchema = import_zod4.z.object({
371
+ var import_v44 = require("zod/v4");
372
+ var webSearchPreviewArgsSchema = import_v44.z.object({
367
373
  /**
368
374
  * Search context size to use for the web search.
369
375
  * - high: Most comprehensive context, highest cost, slower response
370
376
  * - medium: Balanced context, cost, and latency (default)
371
377
  * - low: Least context, lowest cost, fastest response
372
378
  */
373
- searchContextSize: import_zod4.z.enum(["low", "medium", "high"]).optional(),
379
+ searchContextSize: import_v44.z.enum(["low", "medium", "high"]).optional(),
374
380
  /**
375
381
  * User location information to provide geographically relevant search results.
376
382
  */
377
- userLocation: import_zod4.z.object({
383
+ userLocation: import_v44.z.object({
378
384
  /**
379
385
  * Type of location (always 'approximate')
380
386
  */
381
- type: import_zod4.z.literal("approximate"),
387
+ type: import_v44.z.literal("approximate"),
382
388
  /**
383
389
  * Two-letter ISO country code (e.g., 'US', 'GB')
384
390
  */
385
- country: import_zod4.z.string().optional(),
391
+ country: import_v44.z.string().optional(),
386
392
  /**
387
393
  * City name (free text, e.g., 'Minneapolis')
388
394
  */
389
- city: import_zod4.z.string().optional(),
395
+ city: import_v44.z.string().optional(),
390
396
  /**
391
397
  * Region name (free text, e.g., 'Minnesota')
392
398
  */
393
- region: import_zod4.z.string().optional(),
399
+ region: import_v44.z.string().optional(),
394
400
  /**
395
401
  * IANA timezone (e.g., 'America/Chicago')
396
402
  */
397
- timezone: import_zod4.z.string().optional()
403
+ timezone: import_v44.z.string().optional()
398
404
  }).optional()
399
405
  });
400
406
  var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFactory)({
401
407
  id: "openai.web_search_preview",
402
408
  name: "web_search_preview",
403
- inputSchema: import_zod4.z.object({})
409
+ inputSchema: import_v44.z.object({})
404
410
  });
405
411
 
406
412
  // src/openai-prepare-tools.ts
407
413
  function prepareTools({
408
414
  tools,
409
415
  toolChoice,
410
- structuredOutputs
416
+ structuredOutputs,
417
+ strictJsonSchema
411
418
  }) {
412
419
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
413
420
  const toolWarnings = [];
@@ -424,7 +431,7 @@ function prepareTools({
424
431
  name: tool.name,
425
432
  description: tool.description,
426
433
  parameters: tool.inputSchema,
427
- strict: structuredOutputs ? true : void 0
434
+ strict: structuredOutputs ? strictJsonSchema : void 0
428
435
  }
429
436
  });
430
437
  break;
@@ -516,7 +523,7 @@ var OpenAIChatLanguageModel = class {
516
523
  toolChoice,
517
524
  providerOptions
518
525
  }) {
519
- var _a, _b, _c;
526
+ var _a, _b, _c, _d;
520
527
  const warnings = [];
521
528
  const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
522
529
  provider: "openai",
@@ -544,6 +551,7 @@ var OpenAIChatLanguageModel = class {
544
551
  }
545
552
  );
546
553
  warnings.push(...messageWarnings);
554
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
547
555
  const baseArgs = {
548
556
  // model id:
549
557
  model: this.modelId,
@@ -559,18 +567,15 @@ var OpenAIChatLanguageModel = class {
559
567
  top_p: topP,
560
568
  frequency_penalty: frequencyPenalty,
561
569
  presence_penalty: presencePenalty,
562
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
563
- // TODO convert into provider option
564
- structuredOutputs && responseFormat.schema != null ? {
565
- type: "json_schema",
566
- json_schema: {
567
- schema: responseFormat.schema,
568
- strict: true,
569
- name: (_c = responseFormat.name) != null ? _c : "response",
570
- description: responseFormat.description
571
- }
572
- } : { type: "json_object" }
573
- ) : void 0,
570
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
571
+ type: "json_schema",
572
+ json_schema: {
573
+ schema: responseFormat.schema,
574
+ strict: strictJsonSchema,
575
+ name: (_d = responseFormat.name) != null ? _d : "response",
576
+ description: responseFormat.description
577
+ }
578
+ } : { type: "json_object" } : void 0,
574
579
  stop: stopSequences,
575
580
  seed,
576
581
  // openai specific settings:
@@ -669,7 +674,8 @@ var OpenAIChatLanguageModel = class {
669
674
  } = prepareTools({
670
675
  tools,
671
676
  toolChoice,
672
- structuredOutputs
677
+ structuredOutputs,
678
+ strictJsonSchema
673
679
  });
674
680
  return {
675
681
  args: {
@@ -951,97 +957,97 @@ var OpenAIChatLanguageModel = class {
951
957
  };
952
958
  }
953
959
  };
954
- var openaiTokenUsageSchema = import_zod5.z.object({
955
- prompt_tokens: import_zod5.z.number().nullish(),
956
- completion_tokens: import_zod5.z.number().nullish(),
957
- total_tokens: import_zod5.z.number().nullish(),
958
- prompt_tokens_details: import_zod5.z.object({
959
- cached_tokens: import_zod5.z.number().nullish()
960
+ var openaiTokenUsageSchema = import_v45.z.object({
961
+ prompt_tokens: import_v45.z.number().nullish(),
962
+ completion_tokens: import_v45.z.number().nullish(),
963
+ total_tokens: import_v45.z.number().nullish(),
964
+ prompt_tokens_details: import_v45.z.object({
965
+ cached_tokens: import_v45.z.number().nullish()
960
966
  }).nullish(),
961
- completion_tokens_details: import_zod5.z.object({
962
- reasoning_tokens: import_zod5.z.number().nullish(),
963
- accepted_prediction_tokens: import_zod5.z.number().nullish(),
964
- rejected_prediction_tokens: import_zod5.z.number().nullish()
967
+ completion_tokens_details: import_v45.z.object({
968
+ reasoning_tokens: import_v45.z.number().nullish(),
969
+ accepted_prediction_tokens: import_v45.z.number().nullish(),
970
+ rejected_prediction_tokens: import_v45.z.number().nullish()
965
971
  }).nullish()
966
972
  }).nullish();
967
- var openaiChatResponseSchema = import_zod5.z.object({
968
- id: import_zod5.z.string().nullish(),
969
- created: import_zod5.z.number().nullish(),
970
- model: import_zod5.z.string().nullish(),
971
- choices: import_zod5.z.array(
972
- import_zod5.z.object({
973
- message: import_zod5.z.object({
974
- role: import_zod5.z.literal("assistant").nullish(),
975
- content: import_zod5.z.string().nullish(),
976
- tool_calls: import_zod5.z.array(
977
- import_zod5.z.object({
978
- id: import_zod5.z.string().nullish(),
979
- type: import_zod5.z.literal("function"),
980
- function: import_zod5.z.object({
981
- name: import_zod5.z.string(),
982
- arguments: import_zod5.z.string()
973
+ var openaiChatResponseSchema = import_v45.z.object({
974
+ id: import_v45.z.string().nullish(),
975
+ created: import_v45.z.number().nullish(),
976
+ model: import_v45.z.string().nullish(),
977
+ choices: import_v45.z.array(
978
+ import_v45.z.object({
979
+ message: import_v45.z.object({
980
+ role: import_v45.z.literal("assistant").nullish(),
981
+ content: import_v45.z.string().nullish(),
982
+ tool_calls: import_v45.z.array(
983
+ import_v45.z.object({
984
+ id: import_v45.z.string().nullish(),
985
+ type: import_v45.z.literal("function"),
986
+ function: import_v45.z.object({
987
+ name: import_v45.z.string(),
988
+ arguments: import_v45.z.string()
983
989
  })
984
990
  })
985
991
  ).nullish()
986
992
  }),
987
- index: import_zod5.z.number(),
988
- logprobs: import_zod5.z.object({
989
- content: import_zod5.z.array(
990
- import_zod5.z.object({
991
- token: import_zod5.z.string(),
992
- logprob: import_zod5.z.number(),
993
- top_logprobs: import_zod5.z.array(
994
- import_zod5.z.object({
995
- token: import_zod5.z.string(),
996
- logprob: import_zod5.z.number()
993
+ index: import_v45.z.number(),
994
+ logprobs: import_v45.z.object({
995
+ content: import_v45.z.array(
996
+ import_v45.z.object({
997
+ token: import_v45.z.string(),
998
+ logprob: import_v45.z.number(),
999
+ top_logprobs: import_v45.z.array(
1000
+ import_v45.z.object({
1001
+ token: import_v45.z.string(),
1002
+ logprob: import_v45.z.number()
997
1003
  })
998
1004
  )
999
1005
  })
1000
1006
  ).nullish()
1001
1007
  }).nullish(),
1002
- finish_reason: import_zod5.z.string().nullish()
1008
+ finish_reason: import_v45.z.string().nullish()
1003
1009
  })
1004
1010
  ),
1005
1011
  usage: openaiTokenUsageSchema
1006
1012
  });
1007
- var openaiChatChunkSchema = import_zod5.z.union([
1008
- import_zod5.z.object({
1009
- id: import_zod5.z.string().nullish(),
1010
- created: import_zod5.z.number().nullish(),
1011
- model: import_zod5.z.string().nullish(),
1012
- choices: import_zod5.z.array(
1013
- import_zod5.z.object({
1014
- delta: import_zod5.z.object({
1015
- role: import_zod5.z.enum(["assistant"]).nullish(),
1016
- content: import_zod5.z.string().nullish(),
1017
- tool_calls: import_zod5.z.array(
1018
- import_zod5.z.object({
1019
- index: import_zod5.z.number(),
1020
- id: import_zod5.z.string().nullish(),
1021
- type: import_zod5.z.literal("function").nullish(),
1022
- function: import_zod5.z.object({
1023
- name: import_zod5.z.string().nullish(),
1024
- arguments: import_zod5.z.string().nullish()
1013
+ var openaiChatChunkSchema = import_v45.z.union([
1014
+ import_v45.z.object({
1015
+ id: import_v45.z.string().nullish(),
1016
+ created: import_v45.z.number().nullish(),
1017
+ model: import_v45.z.string().nullish(),
1018
+ choices: import_v45.z.array(
1019
+ import_v45.z.object({
1020
+ delta: import_v45.z.object({
1021
+ role: import_v45.z.enum(["assistant"]).nullish(),
1022
+ content: import_v45.z.string().nullish(),
1023
+ tool_calls: import_v45.z.array(
1024
+ import_v45.z.object({
1025
+ index: import_v45.z.number(),
1026
+ id: import_v45.z.string().nullish(),
1027
+ type: import_v45.z.literal("function").nullish(),
1028
+ function: import_v45.z.object({
1029
+ name: import_v45.z.string().nullish(),
1030
+ arguments: import_v45.z.string().nullish()
1025
1031
  })
1026
1032
  })
1027
1033
  ).nullish()
1028
1034
  }).nullish(),
1029
- logprobs: import_zod5.z.object({
1030
- content: import_zod5.z.array(
1031
- import_zod5.z.object({
1032
- token: import_zod5.z.string(),
1033
- logprob: import_zod5.z.number(),
1034
- top_logprobs: import_zod5.z.array(
1035
- import_zod5.z.object({
1036
- token: import_zod5.z.string(),
1037
- logprob: import_zod5.z.number()
1035
+ logprobs: import_v45.z.object({
1036
+ content: import_v45.z.array(
1037
+ import_v45.z.object({
1038
+ token: import_v45.z.string(),
1039
+ logprob: import_v45.z.number(),
1040
+ top_logprobs: import_v45.z.array(
1041
+ import_v45.z.object({
1042
+ token: import_v45.z.string(),
1043
+ logprob: import_v45.z.number()
1038
1044
  })
1039
1045
  )
1040
1046
  })
1041
1047
  ).nullish()
1042
1048
  }).nullish(),
1043
- finish_reason: import_zod5.z.string().nullish(),
1044
- index: import_zod5.z.number()
1049
+ finish_reason: import_v45.z.string().nullish(),
1050
+ index: import_v45.z.number()
1045
1051
  })
1046
1052
  ),
1047
1053
  usage: openaiTokenUsageSchema
@@ -1096,7 +1102,7 @@ var reasoningModels = {
1096
1102
 
1097
1103
  // src/openai-completion-language-model.ts
1098
1104
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1099
- var import_zod7 = require("zod");
1105
+ var import_v47 = require("zod/v4");
1100
1106
 
1101
1107
  // src/convert-to-openai-completion-prompt.ts
1102
1108
  var import_provider4 = require("@ai-sdk/provider");
@@ -1174,12 +1180,12 @@ ${user}:`]
1174
1180
  }
1175
1181
 
1176
1182
  // src/openai-completion-options.ts
1177
- var import_zod6 = require("zod");
1178
- var openaiCompletionProviderOptions = import_zod6.z.object({
1183
+ var import_v46 = require("zod/v4");
1184
+ var openaiCompletionProviderOptions = import_v46.z.object({
1179
1185
  /**
1180
1186
  Echo back the prompt in addition to the completion.
1181
1187
  */
1182
- echo: import_zod6.z.boolean().optional(),
1188
+ echo: import_v46.z.boolean().optional(),
1183
1189
  /**
1184
1190
  Modify the likelihood of specified tokens appearing in the completion.
1185
1191
 
@@ -1194,16 +1200,16 @@ var openaiCompletionProviderOptions = import_zod6.z.object({
1194
1200
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1195
1201
  token from being generated.
1196
1202
  */
1197
- logitBias: import_zod6.z.record(import_zod6.z.string(), import_zod6.z.number()).optional(),
1203
+ logitBias: import_v46.z.record(import_v46.z.string(), import_v46.z.number()).optional(),
1198
1204
  /**
1199
1205
  The suffix that comes after a completion of inserted text.
1200
1206
  */
1201
- suffix: import_zod6.z.string().optional(),
1207
+ suffix: import_v46.z.string().optional(),
1202
1208
  /**
1203
1209
  A unique identifier representing your end-user, which can help OpenAI to
1204
1210
  monitor and detect abuse. Learn more.
1205
1211
  */
1206
- user: import_zod6.z.string().optional(),
1212
+ user: import_v46.z.string().optional(),
1207
1213
  /**
1208
1214
  Return the log probabilities of the tokens. Including logprobs will increase
1209
1215
  the response size and can slow down response times. However, it can
@@ -1213,7 +1219,7 @@ var openaiCompletionProviderOptions = import_zod6.z.object({
1213
1219
  Setting to a number will return the log probabilities of the top n
1214
1220
  tokens that were generated.
1215
1221
  */
1216
- logprobs: import_zod6.z.union([import_zod6.z.boolean(), import_zod6.z.number()]).optional()
1222
+ logprobs: import_v46.z.union([import_v46.z.boolean(), import_v46.z.number()]).optional()
1217
1223
  });
1218
1224
 
1219
1225
  // src/openai-completion-language-model.ts
@@ -1445,42 +1451,42 @@ var OpenAICompletionLanguageModel = class {
1445
1451
  };
1446
1452
  }
1447
1453
  };
1448
- var usageSchema = import_zod7.z.object({
1449
- prompt_tokens: import_zod7.z.number(),
1450
- completion_tokens: import_zod7.z.number(),
1451
- total_tokens: import_zod7.z.number()
1454
+ var usageSchema = import_v47.z.object({
1455
+ prompt_tokens: import_v47.z.number(),
1456
+ completion_tokens: import_v47.z.number(),
1457
+ total_tokens: import_v47.z.number()
1452
1458
  });
1453
- var openaiCompletionResponseSchema = import_zod7.z.object({
1454
- id: import_zod7.z.string().nullish(),
1455
- created: import_zod7.z.number().nullish(),
1456
- model: import_zod7.z.string().nullish(),
1457
- choices: import_zod7.z.array(
1458
- import_zod7.z.object({
1459
- text: import_zod7.z.string(),
1460
- finish_reason: import_zod7.z.string(),
1461
- logprobs: import_zod7.z.object({
1462
- tokens: import_zod7.z.array(import_zod7.z.string()),
1463
- token_logprobs: import_zod7.z.array(import_zod7.z.number()),
1464
- top_logprobs: import_zod7.z.array(import_zod7.z.record(import_zod7.z.string(), import_zod7.z.number())).nullish()
1459
+ var openaiCompletionResponseSchema = import_v47.z.object({
1460
+ id: import_v47.z.string().nullish(),
1461
+ created: import_v47.z.number().nullish(),
1462
+ model: import_v47.z.string().nullish(),
1463
+ choices: import_v47.z.array(
1464
+ import_v47.z.object({
1465
+ text: import_v47.z.string(),
1466
+ finish_reason: import_v47.z.string(),
1467
+ logprobs: import_v47.z.object({
1468
+ tokens: import_v47.z.array(import_v47.z.string()),
1469
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1470
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1465
1471
  }).nullish()
1466
1472
  })
1467
1473
  ),
1468
1474
  usage: usageSchema.nullish()
1469
1475
  });
1470
- var openaiCompletionChunkSchema = import_zod7.z.union([
1471
- import_zod7.z.object({
1472
- id: import_zod7.z.string().nullish(),
1473
- created: import_zod7.z.number().nullish(),
1474
- model: import_zod7.z.string().nullish(),
1475
- choices: import_zod7.z.array(
1476
- import_zod7.z.object({
1477
- text: import_zod7.z.string(),
1478
- finish_reason: import_zod7.z.string().nullish(),
1479
- index: import_zod7.z.number(),
1480
- logprobs: import_zod7.z.object({
1481
- tokens: import_zod7.z.array(import_zod7.z.string()),
1482
- token_logprobs: import_zod7.z.array(import_zod7.z.number()),
1483
- top_logprobs: import_zod7.z.array(import_zod7.z.record(import_zod7.z.string(), import_zod7.z.number())).nullish()
1476
+ var openaiCompletionChunkSchema = import_v47.z.union([
1477
+ import_v47.z.object({
1478
+ id: import_v47.z.string().nullish(),
1479
+ created: import_v47.z.number().nullish(),
1480
+ model: import_v47.z.string().nullish(),
1481
+ choices: import_v47.z.array(
1482
+ import_v47.z.object({
1483
+ text: import_v47.z.string(),
1484
+ finish_reason: import_v47.z.string().nullish(),
1485
+ index: import_v47.z.number(),
1486
+ logprobs: import_v47.z.object({
1487
+ tokens: import_v47.z.array(import_v47.z.string()),
1488
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1489
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1484
1490
  }).nullish()
1485
1491
  })
1486
1492
  ),
@@ -1492,21 +1498,21 @@ var openaiCompletionChunkSchema = import_zod7.z.union([
1492
1498
  // src/openai-embedding-model.ts
1493
1499
  var import_provider5 = require("@ai-sdk/provider");
1494
1500
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
1495
- var import_zod9 = require("zod");
1501
+ var import_v49 = require("zod/v4");
1496
1502
 
1497
1503
  // src/openai-embedding-options.ts
1498
- var import_zod8 = require("zod");
1499
- var openaiEmbeddingProviderOptions = import_zod8.z.object({
1504
+ var import_v48 = require("zod/v4");
1505
+ var openaiEmbeddingProviderOptions = import_v48.z.object({
1500
1506
  /**
1501
1507
  The number of dimensions the resulting output embeddings should have.
1502
1508
  Only supported in text-embedding-3 and later models.
1503
1509
  */
1504
- dimensions: import_zod8.z.number().optional(),
1510
+ dimensions: import_v48.z.number().optional(),
1505
1511
  /**
1506
1512
  A unique identifier representing your end-user, which can help OpenAI to
1507
1513
  monitor and detect abuse. Learn more.
1508
1514
  */
1509
- user: import_zod8.z.string().optional()
1515
+ user: import_v48.z.string().optional()
1510
1516
  });
1511
1517
 
1512
1518
  // src/openai-embedding-model.ts
@@ -1572,14 +1578,14 @@ var OpenAIEmbeddingModel = class {
1572
1578
  };
1573
1579
  }
1574
1580
  };
1575
- var openaiTextEmbeddingResponseSchema = import_zod9.z.object({
1576
- data: import_zod9.z.array(import_zod9.z.object({ embedding: import_zod9.z.array(import_zod9.z.number()) })),
1577
- usage: import_zod9.z.object({ prompt_tokens: import_zod9.z.number() }).nullish()
1581
+ var openaiTextEmbeddingResponseSchema = import_v49.z.object({
1582
+ data: import_v49.z.array(import_v49.z.object({ embedding: import_v49.z.array(import_v49.z.number()) })),
1583
+ usage: import_v49.z.object({ prompt_tokens: import_v49.z.number() }).nullish()
1578
1584
  });
1579
1585
 
1580
1586
  // src/openai-image-model.ts
1581
1587
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1582
- var import_zod10 = require("zod");
1588
+ var import_v410 = require("zod/v4");
1583
1589
 
1584
1590
  // src/openai-image-settings.ts
1585
1591
  var modelMaxImagesPerCall = {
@@ -1667,41 +1673,41 @@ var OpenAIImageModel = class {
1667
1673
  };
1668
1674
  }
1669
1675
  };
1670
- var openaiImageResponseSchema = import_zod10.z.object({
1671
- data: import_zod10.z.array(
1672
- import_zod10.z.object({ b64_json: import_zod10.z.string(), revised_prompt: import_zod10.z.string().optional() })
1676
+ var openaiImageResponseSchema = import_v410.z.object({
1677
+ data: import_v410.z.array(
1678
+ import_v410.z.object({ b64_json: import_v410.z.string(), revised_prompt: import_v410.z.string().optional() })
1673
1679
  )
1674
1680
  });
1675
1681
 
1676
1682
  // src/openai-transcription-model.ts
1677
1683
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
1678
- var import_zod12 = require("zod");
1684
+ var import_v412 = require("zod/v4");
1679
1685
 
1680
1686
  // src/openai-transcription-options.ts
1681
- var import_zod11 = require("zod");
1682
- var openAITranscriptionProviderOptions = import_zod11.z.object({
1687
+ var import_v411 = require("zod/v4");
1688
+ var openAITranscriptionProviderOptions = import_v411.z.object({
1683
1689
  /**
1684
1690
  * Additional information to include in the transcription response.
1685
1691
  */
1686
- include: import_zod11.z.array(import_zod11.z.string()).optional(),
1692
+ include: import_v411.z.array(import_v411.z.string()).optional(),
1687
1693
  /**
1688
1694
  * The language of the input audio in ISO-639-1 format.
1689
1695
  */
1690
- language: import_zod11.z.string().optional(),
1696
+ language: import_v411.z.string().optional(),
1691
1697
  /**
1692
1698
  * An optional text to guide the model's style or continue a previous audio segment.
1693
1699
  */
1694
- prompt: import_zod11.z.string().optional(),
1700
+ prompt: import_v411.z.string().optional(),
1695
1701
  /**
1696
1702
  * The sampling temperature, between 0 and 1.
1697
1703
  * @default 0
1698
1704
  */
1699
- temperature: import_zod11.z.number().min(0).max(1).default(0).optional(),
1705
+ temperature: import_v411.z.number().min(0).max(1).default(0).optional(),
1700
1706
  /**
1701
1707
  * The timestamp granularities to populate for this transcription.
1702
1708
  * @default ['segment']
1703
1709
  */
1704
- timestampGranularities: import_zod11.z.array(import_zod11.z.enum(["word", "segment"])).default(["segment"]).optional()
1710
+ timestampGranularities: import_v411.z.array(import_v411.z.enum(["word", "segment"])).default(["segment"]).optional()
1705
1711
  });
1706
1712
 
1707
1713
  // src/openai-transcription-model.ts
@@ -1849,25 +1855,25 @@ var OpenAITranscriptionModel = class {
1849
1855
  };
1850
1856
  }
1851
1857
  };
1852
- var openaiTranscriptionResponseSchema = import_zod12.z.object({
1853
- text: import_zod12.z.string(),
1854
- language: import_zod12.z.string().nullish(),
1855
- duration: import_zod12.z.number().nullish(),
1856
- words: import_zod12.z.array(
1857
- import_zod12.z.object({
1858
- word: import_zod12.z.string(),
1859
- start: import_zod12.z.number(),
1860
- end: import_zod12.z.number()
1858
+ var openaiTranscriptionResponseSchema = import_v412.z.object({
1859
+ text: import_v412.z.string(),
1860
+ language: import_v412.z.string().nullish(),
1861
+ duration: import_v412.z.number().nullish(),
1862
+ words: import_v412.z.array(
1863
+ import_v412.z.object({
1864
+ word: import_v412.z.string(),
1865
+ start: import_v412.z.number(),
1866
+ end: import_v412.z.number()
1861
1867
  })
1862
1868
  ).nullish()
1863
1869
  });
1864
1870
 
1865
1871
  // src/openai-speech-model.ts
1866
1872
  var import_provider_utils10 = require("@ai-sdk/provider-utils");
1867
- var import_zod13 = require("zod");
1868
- var OpenAIProviderOptionsSchema = import_zod13.z.object({
1869
- instructions: import_zod13.z.string().nullish(),
1870
- speed: import_zod13.z.number().min(0.25).max(4).default(1).nullish()
1873
+ var import_v413 = require("zod/v4");
1874
+ var OpenAIProviderOptionsSchema = import_v413.z.object({
1875
+ instructions: import_v413.z.string().nullish(),
1876
+ speed: import_v413.z.number().min(0.25).max(4).default(1).nullish()
1871
1877
  });
1872
1878
  var OpenAISpeechModel = class {
1873
1879
  constructor(modelId, config) {
@@ -1970,15 +1976,19 @@ var OpenAISpeechModel = class {
1970
1976
  };
1971
1977
 
1972
1978
  // src/responses/openai-responses-language-model.ts
1973
- var import_provider_utils11 = require("@ai-sdk/provider-utils");
1974
- var import_zod14 = require("zod");
1979
+ var import_provider8 = require("@ai-sdk/provider");
1980
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
1981
+ var import_v415 = require("zod/v4");
1975
1982
 
1976
1983
  // src/responses/convert-to-openai-responses-messages.ts
1977
1984
  var import_provider6 = require("@ai-sdk/provider");
1978
- function convertToOpenAIResponsesMessages({
1985
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
1986
+ var import_v414 = require("zod/v4");
1987
+ async function convertToOpenAIResponsesMessages({
1979
1988
  prompt,
1980
1989
  systemMessageMode
1981
1990
  }) {
1991
+ var _a, _b, _c, _d, _e, _f;
1982
1992
  const messages = [];
1983
1993
  const warnings = [];
1984
1994
  for (const { role, content } of prompt) {
@@ -2013,7 +2023,7 @@ function convertToOpenAIResponsesMessages({
2013
2023
  messages.push({
2014
2024
  role: "user",
2015
2025
  content: content.map((part, index) => {
2016
- var _a, _b, _c;
2026
+ var _a2, _b2, _c2;
2017
2027
  switch (part.type) {
2018
2028
  case "text": {
2019
2029
  return { type: "input_text", text: part.text };
@@ -2025,7 +2035,7 @@ function convertToOpenAIResponsesMessages({
2025
2035
  type: "input_image",
2026
2036
  image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
2027
2037
  // OpenAI specific extension: image detail
2028
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
2038
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2029
2039
  };
2030
2040
  } else if (part.mediaType === "application/pdf") {
2031
2041
  if (part.data instanceof URL) {
@@ -2035,7 +2045,7 @@ function convertToOpenAIResponsesMessages({
2035
2045
  }
2036
2046
  return {
2037
2047
  type: "input_file",
2038
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
2048
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2039
2049
  file_data: `data:application/pdf;base64,${part.data}`
2040
2050
  };
2041
2051
  } else {
@@ -2050,12 +2060,14 @@ function convertToOpenAIResponsesMessages({
2050
2060
  break;
2051
2061
  }
2052
2062
  case "assistant": {
2063
+ const reasoningMessages = {};
2053
2064
  for (const part of content) {
2054
2065
  switch (part.type) {
2055
2066
  case "text": {
2056
2067
  messages.push({
2057
2068
  role: "assistant",
2058
- content: [{ type: "output_text", text: part.text }]
2069
+ content: [{ type: "output_text", text: part.text }],
2070
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
2059
2071
  });
2060
2072
  break;
2061
2073
  }
@@ -2067,7 +2079,8 @@ function convertToOpenAIResponsesMessages({
2067
2079
  type: "function_call",
2068
2080
  call_id: part.toolCallId,
2069
2081
  name: part.toolName,
2070
- arguments: JSON.stringify(part.input)
2082
+ arguments: JSON.stringify(part.input),
2083
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2071
2084
  });
2072
2085
  break;
2073
2086
  }
@@ -2078,6 +2091,43 @@ function convertToOpenAIResponsesMessages({
2078
2091
  });
2079
2092
  break;
2080
2093
  }
2094
+ case "reasoning": {
2095
+ const providerOptions = await (0, import_provider_utils11.parseProviderOptions)({
2096
+ provider: "openai",
2097
+ providerOptions: part.providerOptions,
2098
+ schema: openaiResponsesReasoningProviderOptionsSchema
2099
+ });
2100
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2101
+ if (reasoningId != null) {
2102
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2103
+ const summaryParts = [];
2104
+ if (part.text.length > 0) {
2105
+ summaryParts.push({ type: "summary_text", text: part.text });
2106
+ } else if (existingReasoningMessage !== void 0) {
2107
+ warnings.push({
2108
+ type: "other",
2109
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2110
+ });
2111
+ }
2112
+ if (existingReasoningMessage === void 0) {
2113
+ reasoningMessages[reasoningId] = {
2114
+ type: "reasoning",
2115
+ id: reasoningId,
2116
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2117
+ summary: summaryParts
2118
+ };
2119
+ messages.push(reasoningMessages[reasoningId]);
2120
+ } else {
2121
+ existingReasoningMessage.summary.push(...summaryParts);
2122
+ }
2123
+ } else {
2124
+ warnings.push({
2125
+ type: "other",
2126
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2127
+ });
2128
+ }
2129
+ break;
2130
+ }
2081
2131
  }
2082
2132
  }
2083
2133
  break;
@@ -2113,6 +2163,10 @@ function convertToOpenAIResponsesMessages({
2113
2163
  }
2114
2164
  return { messages, warnings };
2115
2165
  }
2166
+ var openaiResponsesReasoningProviderOptionsSchema = import_v414.z.object({
2167
+ itemId: import_v414.z.string().nullish(),
2168
+ reasoningEncryptedContent: import_v414.z.string().nullish()
2169
+ });
2116
2170
 
2117
2171
  // src/responses/map-openai-responses-finish-reason.ts
2118
2172
  function mapOpenAIResponseFinishReason({
@@ -2137,7 +2191,7 @@ var import_provider7 = require("@ai-sdk/provider");
2137
2191
  function prepareResponsesTools({
2138
2192
  tools,
2139
2193
  toolChoice,
2140
- strict
2194
+ strictJsonSchema
2141
2195
  }) {
2142
2196
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2143
2197
  const toolWarnings = [];
@@ -2153,11 +2207,21 @@ function prepareResponsesTools({
2153
2207
  name: tool.name,
2154
2208
  description: tool.description,
2155
2209
  parameters: tool.inputSchema,
2156
- strict: strict ? true : void 0
2210
+ strict: strictJsonSchema
2157
2211
  });
2158
2212
  break;
2159
2213
  case "provider-defined":
2160
2214
  switch (tool.id) {
2215
+ case "openai.file_search": {
2216
+ const args = fileSearchArgsSchema.parse(tool.args);
2217
+ openaiTools.push({
2218
+ type: "file_search",
2219
+ vector_store_ids: args.vectorStoreIds,
2220
+ max_results: args.maxResults,
2221
+ search_type: args.searchType
2222
+ });
2223
+ break;
2224
+ }
2161
2225
  case "openai.web_search_preview":
2162
2226
  openaiTools.push({
2163
2227
  type: "web_search_preview",
@@ -2187,7 +2251,7 @@ function prepareResponsesTools({
2187
2251
  case "tool":
2188
2252
  return {
2189
2253
  tools: openaiTools,
2190
- toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2254
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2191
2255
  toolWarnings
2192
2256
  };
2193
2257
  default: {
@@ -2251,17 +2315,17 @@ var OpenAIResponsesLanguageModel = class {
2251
2315
  if (stopSequences != null) {
2252
2316
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2253
2317
  }
2254
- const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2318
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2255
2319
  prompt,
2256
2320
  systemMessageMode: modelConfig.systemMessageMode
2257
2321
  });
2258
2322
  warnings.push(...messageWarnings);
2259
- const openaiOptions = await (0, import_provider_utils11.parseProviderOptions)({
2323
+ const openaiOptions = await (0, import_provider_utils12.parseProviderOptions)({
2260
2324
  provider: "openai",
2261
2325
  providerOptions,
2262
2326
  schema: openaiResponsesProviderOptionsSchema
2263
2327
  });
2264
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2328
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2265
2329
  const baseArgs = {
2266
2330
  model: this.modelId,
2267
2331
  input: messages,
@@ -2272,7 +2336,7 @@ var OpenAIResponsesLanguageModel = class {
2272
2336
  text: {
2273
2337
  format: responseFormat.schema != null ? {
2274
2338
  type: "json_schema",
2275
- strict: isStrict,
2339
+ strict: strictJsonSchema,
2276
2340
  name: (_b = responseFormat.name) != null ? _b : "response",
2277
2341
  description: responseFormat.description,
2278
2342
  schema: responseFormat.schema
@@ -2287,6 +2351,7 @@ var OpenAIResponsesLanguageModel = class {
2287
2351
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2288
2352
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2289
2353
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2354
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2290
2355
  // model-specific settings:
2291
2356
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2292
2357
  reasoning: {
@@ -2319,6 +2384,21 @@ var OpenAIResponsesLanguageModel = class {
2319
2384
  details: "topP is not supported for reasoning models"
2320
2385
  });
2321
2386
  }
2387
+ } else {
2388
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2389
+ warnings.push({
2390
+ type: "unsupported-setting",
2391
+ setting: "reasoningEffort",
2392
+ details: "reasoningEffort is not supported for non-reasoning models"
2393
+ });
2394
+ }
2395
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2396
+ warnings.push({
2397
+ type: "unsupported-setting",
2398
+ setting: "reasoningSummary",
2399
+ details: "reasoningSummary is not supported for non-reasoning models"
2400
+ });
2401
+ }
2322
2402
  }
2323
2403
  if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2324
2404
  warnings.push({
@@ -2335,7 +2415,7 @@ var OpenAIResponsesLanguageModel = class {
2335
2415
  } = prepareResponsesTools({
2336
2416
  tools,
2337
2417
  toolChoice,
2338
- strict: isStrict
2418
+ strictJsonSchema
2339
2419
  });
2340
2420
  return {
2341
2421
  args: {
@@ -2347,101 +2427,137 @@ var OpenAIResponsesLanguageModel = class {
2347
2427
  };
2348
2428
  }
2349
2429
  async doGenerate(options) {
2350
- var _a, _b, _c, _d, _e, _f, _g, _h;
2430
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2351
2431
  const { args: body, warnings } = await this.getArgs(options);
2432
+ const url = this.config.url({
2433
+ path: "/responses",
2434
+ modelId: this.modelId
2435
+ });
2352
2436
  const {
2353
2437
  responseHeaders,
2354
2438
  value: response,
2355
2439
  rawValue: rawResponse
2356
- } = await (0, import_provider_utils11.postJsonToApi)({
2357
- url: this.config.url({
2358
- path: "/responses",
2359
- modelId: this.modelId
2360
- }),
2361
- headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), options.headers),
2440
+ } = await (0, import_provider_utils12.postJsonToApi)({
2441
+ url,
2442
+ headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
2362
2443
  body,
2363
2444
  failedResponseHandler: openaiFailedResponseHandler,
2364
- successfulResponseHandler: (0, import_provider_utils11.createJsonResponseHandler)(
2365
- import_zod14.z.object({
2366
- id: import_zod14.z.string(),
2367
- created_at: import_zod14.z.number(),
2368
- model: import_zod14.z.string(),
2369
- output: import_zod14.z.array(
2370
- import_zod14.z.discriminatedUnion("type", [
2371
- import_zod14.z.object({
2372
- type: import_zod14.z.literal("message"),
2373
- role: import_zod14.z.literal("assistant"),
2374
- content: import_zod14.z.array(
2375
- import_zod14.z.object({
2376
- type: import_zod14.z.literal("output_text"),
2377
- text: import_zod14.z.string(),
2378
- annotations: import_zod14.z.array(
2379
- import_zod14.z.object({
2380
- type: import_zod14.z.literal("url_citation"),
2381
- start_index: import_zod14.z.number(),
2382
- end_index: import_zod14.z.number(),
2383
- url: import_zod14.z.string(),
2384
- title: import_zod14.z.string()
2445
+ successfulResponseHandler: (0, import_provider_utils12.createJsonResponseHandler)(
2446
+ import_v415.z.object({
2447
+ id: import_v415.z.string(),
2448
+ created_at: import_v415.z.number(),
2449
+ error: import_v415.z.object({
2450
+ code: import_v415.z.string(),
2451
+ message: import_v415.z.string()
2452
+ }).nullish(),
2453
+ model: import_v415.z.string(),
2454
+ output: import_v415.z.array(
2455
+ import_v415.z.discriminatedUnion("type", [
2456
+ import_v415.z.object({
2457
+ type: import_v415.z.literal("message"),
2458
+ role: import_v415.z.literal("assistant"),
2459
+ id: import_v415.z.string(),
2460
+ content: import_v415.z.array(
2461
+ import_v415.z.object({
2462
+ type: import_v415.z.literal("output_text"),
2463
+ text: import_v415.z.string(),
2464
+ annotations: import_v415.z.array(
2465
+ import_v415.z.object({
2466
+ type: import_v415.z.literal("url_citation"),
2467
+ start_index: import_v415.z.number(),
2468
+ end_index: import_v415.z.number(),
2469
+ url: import_v415.z.string(),
2470
+ title: import_v415.z.string()
2385
2471
  })
2386
2472
  )
2387
2473
  })
2388
2474
  )
2389
2475
  }),
2390
- import_zod14.z.object({
2391
- type: import_zod14.z.literal("function_call"),
2392
- call_id: import_zod14.z.string(),
2393
- name: import_zod14.z.string(),
2394
- arguments: import_zod14.z.string()
2476
+ import_v415.z.object({
2477
+ type: import_v415.z.literal("function_call"),
2478
+ call_id: import_v415.z.string(),
2479
+ name: import_v415.z.string(),
2480
+ arguments: import_v415.z.string(),
2481
+ id: import_v415.z.string()
2395
2482
  }),
2396
- import_zod14.z.object({
2397
- type: import_zod14.z.literal("web_search_call"),
2398
- id: import_zod14.z.string(),
2399
- status: import_zod14.z.string().optional()
2483
+ import_v415.z.object({
2484
+ type: import_v415.z.literal("web_search_call"),
2485
+ id: import_v415.z.string(),
2486
+ status: import_v415.z.string().optional()
2400
2487
  }),
2401
- import_zod14.z.object({
2402
- type: import_zod14.z.literal("computer_call"),
2403
- id: import_zod14.z.string(),
2404
- status: import_zod14.z.string().optional()
2488
+ import_v415.z.object({
2489
+ type: import_v415.z.literal("computer_call"),
2490
+ id: import_v415.z.string(),
2491
+ status: import_v415.z.string().optional()
2405
2492
  }),
2406
- import_zod14.z.object({
2407
- type: import_zod14.z.literal("reasoning"),
2408
- summary: import_zod14.z.array(
2409
- import_zod14.z.object({
2410
- type: import_zod14.z.literal("summary_text"),
2411
- text: import_zod14.z.string()
2493
+ import_v415.z.object({
2494
+ type: import_v415.z.literal("reasoning"),
2495
+ id: import_v415.z.string(),
2496
+ encrypted_content: import_v415.z.string().nullish(),
2497
+ summary: import_v415.z.array(
2498
+ import_v415.z.object({
2499
+ type: import_v415.z.literal("summary_text"),
2500
+ text: import_v415.z.string()
2412
2501
  })
2413
2502
  )
2414
2503
  })
2415
2504
  ])
2416
2505
  ),
2417
- incomplete_details: import_zod14.z.object({ reason: import_zod14.z.string() }).nullable(),
2506
+ incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullable(),
2418
2507
  usage: usageSchema2
2419
2508
  })
2420
2509
  ),
2421
2510
  abortSignal: options.abortSignal,
2422
2511
  fetch: this.config.fetch
2423
2512
  });
2513
+ if (response.error) {
2514
+ throw new import_provider8.APICallError({
2515
+ message: response.error.message,
2516
+ url,
2517
+ requestBodyValues: body,
2518
+ statusCode: 400,
2519
+ responseHeaders,
2520
+ responseBody: rawResponse,
2521
+ isRetryable: false
2522
+ });
2523
+ }
2424
2524
  const content = [];
2425
2525
  for (const part of response.output) {
2426
2526
  switch (part.type) {
2427
2527
  case "reasoning": {
2428
- content.push({
2429
- type: "reasoning",
2430
- text: part.summary.map((summary) => summary.text).join()
2431
- });
2528
+ if (part.summary.length === 0) {
2529
+ part.summary.push({ type: "summary_text", text: "" });
2530
+ }
2531
+ for (const summary of part.summary) {
2532
+ content.push({
2533
+ type: "reasoning",
2534
+ text: summary.text,
2535
+ providerMetadata: {
2536
+ openai: {
2537
+ itemId: part.id,
2538
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2539
+ }
2540
+ }
2541
+ });
2542
+ }
2432
2543
  break;
2433
2544
  }
2434
2545
  case "message": {
2435
2546
  for (const contentPart of part.content) {
2436
2547
  content.push({
2437
2548
  type: "text",
2438
- text: contentPart.text
2549
+ text: contentPart.text,
2550
+ providerMetadata: {
2551
+ openai: {
2552
+ itemId: part.id
2553
+ }
2554
+ }
2439
2555
  });
2440
2556
  for (const annotation of contentPart.annotations) {
2441
2557
  content.push({
2442
2558
  type: "source",
2443
2559
  sourceType: "url",
2444
- id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils11.generateId)(),
2560
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils12.generateId)(),
2445
2561
  url: annotation.url,
2446
2562
  title: annotation.title
2447
2563
  });
@@ -2454,7 +2570,12 @@ var OpenAIResponsesLanguageModel = class {
2454
2570
  type: "tool-call",
2455
2571
  toolCallId: part.call_id,
2456
2572
  toolName: part.name,
2457
- input: part.arguments
2573
+ input: part.arguments,
2574
+ providerMetadata: {
2575
+ openai: {
2576
+ itemId: part.id
2577
+ }
2578
+ }
2458
2579
  });
2459
2580
  break;
2460
2581
  }
@@ -2500,15 +2621,15 @@ var OpenAIResponsesLanguageModel = class {
2500
2621
  return {
2501
2622
  content,
2502
2623
  finishReason: mapOpenAIResponseFinishReason({
2503
- finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2624
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2504
2625
  hasToolCalls: content.some((part) => part.type === "tool-call")
2505
2626
  }),
2506
2627
  usage: {
2507
2628
  inputTokens: response.usage.input_tokens,
2508
2629
  outputTokens: response.usage.output_tokens,
2509
2630
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2510
- reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2511
- cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2631
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2632
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2512
2633
  },
2513
2634
  request: { body },
2514
2635
  response: {
@@ -2528,18 +2649,18 @@ var OpenAIResponsesLanguageModel = class {
2528
2649
  }
2529
2650
  async doStream(options) {
2530
2651
  const { args: body, warnings } = await this.getArgs(options);
2531
- const { responseHeaders, value: response } = await (0, import_provider_utils11.postJsonToApi)({
2652
+ const { responseHeaders, value: response } = await (0, import_provider_utils12.postJsonToApi)({
2532
2653
  url: this.config.url({
2533
2654
  path: "/responses",
2534
2655
  modelId: this.modelId
2535
2656
  }),
2536
- headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), options.headers),
2657
+ headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
2537
2658
  body: {
2538
2659
  ...body,
2539
2660
  stream: true
2540
2661
  },
2541
2662
  failedResponseHandler: openaiFailedResponseHandler,
2542
- successfulResponseHandler: (0, import_provider_utils11.createEventSourceResponseHandler)(
2663
+ successfulResponseHandler: (0, import_provider_utils12.createEventSourceResponseHandler)(
2543
2664
  openaiResponsesChunkSchema
2544
2665
  ),
2545
2666
  abortSignal: options.abortSignal,
@@ -2555,6 +2676,7 @@ var OpenAIResponsesLanguageModel = class {
2555
2676
  let responseId = null;
2556
2677
  const ongoingToolCalls = {};
2557
2678
  let hasToolCalls = false;
2679
+ const activeReasoning = {};
2558
2680
  return {
2559
2681
  stream: response.pipeThrough(
2560
2682
  new TransformStream({
@@ -2562,7 +2684,7 @@ var OpenAIResponsesLanguageModel = class {
2562
2684
  controller.enqueue({ type: "stream-start", warnings });
2563
2685
  },
2564
2686
  transform(chunk, controller) {
2565
- var _a, _b, _c, _d, _e, _f, _g, _h;
2687
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2566
2688
  if (options.includeRawChunks) {
2567
2689
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2568
2690
  }
@@ -2606,12 +2728,27 @@ var OpenAIResponsesLanguageModel = class {
2606
2728
  } else if (value.item.type === "message") {
2607
2729
  controller.enqueue({
2608
2730
  type: "text-start",
2609
- id: value.item.id
2731
+ id: value.item.id,
2732
+ providerMetadata: {
2733
+ openai: {
2734
+ itemId: value.item.id
2735
+ }
2736
+ }
2610
2737
  });
2611
- } else if (value.item.type === "reasoning") {
2738
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2739
+ activeReasoning[value.item.id] = {
2740
+ encryptedContent: value.item.encrypted_content,
2741
+ summaryParts: [0]
2742
+ };
2612
2743
  controller.enqueue({
2613
2744
  type: "reasoning-start",
2614
- id: value.item.id
2745
+ id: `${value.item.id}:0`,
2746
+ providerMetadata: {
2747
+ openai: {
2748
+ itemId: value.item.id,
2749
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2750
+ }
2751
+ }
2615
2752
  });
2616
2753
  }
2617
2754
  } else if (isResponseOutputItemDoneChunk(value)) {
@@ -2626,7 +2763,12 @@ var OpenAIResponsesLanguageModel = class {
2626
2763
  type: "tool-call",
2627
2764
  toolCallId: value.item.call_id,
2628
2765
  toolName: value.item.name,
2629
- input: value.item.arguments
2766
+ input: value.item.arguments,
2767
+ providerMetadata: {
2768
+ openai: {
2769
+ itemId: value.item.id
2770
+ }
2771
+ }
2630
2772
  });
2631
2773
  } else if (value.item.type === "web_search_call") {
2632
2774
  ongoingToolCalls[value.output_index] = void 0;
@@ -2681,11 +2823,21 @@ var OpenAIResponsesLanguageModel = class {
2681
2823
  type: "text-end",
2682
2824
  id: value.item.id
2683
2825
  });
2684
- } else if (value.item.type === "reasoning") {
2685
- controller.enqueue({
2686
- type: "reasoning-end",
2687
- id: value.item.id
2688
- });
2826
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2827
+ const activeReasoningPart = activeReasoning[value.item.id];
2828
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2829
+ controller.enqueue({
2830
+ type: "reasoning-end",
2831
+ id: `${value.item.id}:${summaryIndex}`,
2832
+ providerMetadata: {
2833
+ openai: {
2834
+ itemId: value.item.id,
2835
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2836
+ }
2837
+ }
2838
+ });
2839
+ }
2840
+ delete activeReasoning[value.item.id];
2689
2841
  }
2690
2842
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2691
2843
  const toolCall = ongoingToolCalls[value.output_index];
@@ -2710,30 +2862,53 @@ var OpenAIResponsesLanguageModel = class {
2710
2862
  id: value.item_id,
2711
2863
  delta: value.delta
2712
2864
  });
2865
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2866
+ if (value.summary_index > 0) {
2867
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2868
+ value.summary_index
2869
+ );
2870
+ controller.enqueue({
2871
+ type: "reasoning-start",
2872
+ id: `${value.item_id}:${value.summary_index}`,
2873
+ providerMetadata: {
2874
+ openai: {
2875
+ itemId: value.item_id,
2876
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2877
+ }
2878
+ }
2879
+ });
2880
+ }
2713
2881
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2714
2882
  controller.enqueue({
2715
2883
  type: "reasoning-delta",
2884
+ id: `${value.item_id}:${value.summary_index}`,
2716
2885
  delta: value.delta,
2717
- id: value.item_id
2886
+ providerMetadata: {
2887
+ openai: {
2888
+ itemId: value.item_id
2889
+ }
2890
+ }
2718
2891
  });
2719
2892
  } else if (isResponseFinishedChunk(value)) {
2720
2893
  finishReason = mapOpenAIResponseFinishReason({
2721
- finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2894
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2722
2895
  hasToolCalls
2723
2896
  });
2724
2897
  usage.inputTokens = value.response.usage.input_tokens;
2725
2898
  usage.outputTokens = value.response.usage.output_tokens;
2726
2899
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2727
- usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2728
- usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2900
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2901
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2729
2902
  } else if (isResponseAnnotationAddedChunk(value)) {
2730
2903
  controller.enqueue({
2731
2904
  type: "source",
2732
2905
  sourceType: "url",
2733
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils11.generateId)(),
2906
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils12.generateId)(),
2734
2907
  url: value.annotation.url,
2735
2908
  title: value.annotation.title
2736
2909
  });
2910
+ } else if (isErrorChunk(value)) {
2911
+ controller.enqueue({ type: "error", error: value });
2737
2912
  }
2738
2913
  },
2739
2914
  flush(controller) {
@@ -2755,124 +2930,130 @@ var OpenAIResponsesLanguageModel = class {
2755
2930
  };
2756
2931
  }
2757
2932
  };
2758
- var usageSchema2 = import_zod14.z.object({
2759
- input_tokens: import_zod14.z.number(),
2760
- input_tokens_details: import_zod14.z.object({ cached_tokens: import_zod14.z.number().nullish() }).nullish(),
2761
- output_tokens: import_zod14.z.number(),
2762
- output_tokens_details: import_zod14.z.object({ reasoning_tokens: import_zod14.z.number().nullish() }).nullish()
2933
+ var usageSchema2 = import_v415.z.object({
2934
+ input_tokens: import_v415.z.number(),
2935
+ input_tokens_details: import_v415.z.object({ cached_tokens: import_v415.z.number().nullish() }).nullish(),
2936
+ output_tokens: import_v415.z.number(),
2937
+ output_tokens_details: import_v415.z.object({ reasoning_tokens: import_v415.z.number().nullish() }).nullish()
2763
2938
  });
2764
- var textDeltaChunkSchema = import_zod14.z.object({
2765
- type: import_zod14.z.literal("response.output_text.delta"),
2766
- item_id: import_zod14.z.string(),
2767
- delta: import_zod14.z.string()
2939
+ var textDeltaChunkSchema = import_v415.z.object({
2940
+ type: import_v415.z.literal("response.output_text.delta"),
2941
+ item_id: import_v415.z.string(),
2942
+ delta: import_v415.z.string()
2768
2943
  });
2769
- var responseFinishedChunkSchema = import_zod14.z.object({
2770
- type: import_zod14.z.enum(["response.completed", "response.incomplete"]),
2771
- response: import_zod14.z.object({
2772
- incomplete_details: import_zod14.z.object({ reason: import_zod14.z.string() }).nullish(),
2944
+ var errorChunkSchema = import_v415.z.object({
2945
+ type: import_v415.z.literal("error"),
2946
+ code: import_v415.z.string(),
2947
+ message: import_v415.z.string(),
2948
+ param: import_v415.z.string().nullish(),
2949
+ sequence_number: import_v415.z.number()
2950
+ });
2951
+ var responseFinishedChunkSchema = import_v415.z.object({
2952
+ type: import_v415.z.enum(["response.completed", "response.incomplete"]),
2953
+ response: import_v415.z.object({
2954
+ incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullish(),
2773
2955
  usage: usageSchema2
2774
2956
  })
2775
2957
  });
2776
- var responseCreatedChunkSchema = import_zod14.z.object({
2777
- type: import_zod14.z.literal("response.created"),
2778
- response: import_zod14.z.object({
2779
- id: import_zod14.z.string(),
2780
- created_at: import_zod14.z.number(),
2781
- model: import_zod14.z.string()
2958
+ var responseCreatedChunkSchema = import_v415.z.object({
2959
+ type: import_v415.z.literal("response.created"),
2960
+ response: import_v415.z.object({
2961
+ id: import_v415.z.string(),
2962
+ created_at: import_v415.z.number(),
2963
+ model: import_v415.z.string()
2782
2964
  })
2783
2965
  });
2784
- var responseOutputItemAddedSchema = import_zod14.z.object({
2785
- type: import_zod14.z.literal("response.output_item.added"),
2786
- output_index: import_zod14.z.number(),
2787
- item: import_zod14.z.discriminatedUnion("type", [
2788
- import_zod14.z.object({
2789
- type: import_zod14.z.literal("message"),
2790
- id: import_zod14.z.string()
2966
+ var responseOutputItemAddedSchema = import_v415.z.object({
2967
+ type: import_v415.z.literal("response.output_item.added"),
2968
+ output_index: import_v415.z.number(),
2969
+ item: import_v415.z.discriminatedUnion("type", [
2970
+ import_v415.z.object({
2971
+ type: import_v415.z.literal("message"),
2972
+ id: import_v415.z.string()
2791
2973
  }),
2792
- import_zod14.z.object({
2793
- type: import_zod14.z.literal("reasoning"),
2794
- id: import_zod14.z.string()
2974
+ import_v415.z.object({
2975
+ type: import_v415.z.literal("reasoning"),
2976
+ id: import_v415.z.string(),
2977
+ encrypted_content: import_v415.z.string().nullish()
2795
2978
  }),
2796
- import_zod14.z.object({
2797
- type: import_zod14.z.literal("function_call"),
2798
- id: import_zod14.z.string(),
2799
- call_id: import_zod14.z.string(),
2800
- name: import_zod14.z.string(),
2801
- arguments: import_zod14.z.string()
2979
+ import_v415.z.object({
2980
+ type: import_v415.z.literal("function_call"),
2981
+ id: import_v415.z.string(),
2982
+ call_id: import_v415.z.string(),
2983
+ name: import_v415.z.string(),
2984
+ arguments: import_v415.z.string()
2802
2985
  }),
2803
- import_zod14.z.object({
2804
- type: import_zod14.z.literal("web_search_call"),
2805
- id: import_zod14.z.string(),
2806
- status: import_zod14.z.string()
2986
+ import_v415.z.object({
2987
+ type: import_v415.z.literal("web_search_call"),
2988
+ id: import_v415.z.string(),
2989
+ status: import_v415.z.string()
2807
2990
  }),
2808
- import_zod14.z.object({
2809
- type: import_zod14.z.literal("computer_call"),
2810
- id: import_zod14.z.string(),
2811
- status: import_zod14.z.string()
2991
+ import_v415.z.object({
2992
+ type: import_v415.z.literal("computer_call"),
2993
+ id: import_v415.z.string(),
2994
+ status: import_v415.z.string()
2812
2995
  })
2813
2996
  ])
2814
2997
  });
2815
- var responseOutputItemDoneSchema = import_zod14.z.object({
2816
- type: import_zod14.z.literal("response.output_item.done"),
2817
- output_index: import_zod14.z.number(),
2818
- item: import_zod14.z.discriminatedUnion("type", [
2819
- import_zod14.z.object({
2820
- type: import_zod14.z.literal("message"),
2821
- id: import_zod14.z.string()
2998
+ var responseOutputItemDoneSchema = import_v415.z.object({
2999
+ type: import_v415.z.literal("response.output_item.done"),
3000
+ output_index: import_v415.z.number(),
3001
+ item: import_v415.z.discriminatedUnion("type", [
3002
+ import_v415.z.object({
3003
+ type: import_v415.z.literal("message"),
3004
+ id: import_v415.z.string()
2822
3005
  }),
2823
- import_zod14.z.object({
2824
- type: import_zod14.z.literal("reasoning"),
2825
- id: import_zod14.z.string()
3006
+ import_v415.z.object({
3007
+ type: import_v415.z.literal("reasoning"),
3008
+ id: import_v415.z.string(),
3009
+ encrypted_content: import_v415.z.string().nullish()
2826
3010
  }),
2827
- import_zod14.z.object({
2828
- type: import_zod14.z.literal("function_call"),
2829
- id: import_zod14.z.string(),
2830
- call_id: import_zod14.z.string(),
2831
- name: import_zod14.z.string(),
2832
- arguments: import_zod14.z.string(),
2833
- status: import_zod14.z.literal("completed")
3011
+ import_v415.z.object({
3012
+ type: import_v415.z.literal("function_call"),
3013
+ id: import_v415.z.string(),
3014
+ call_id: import_v415.z.string(),
3015
+ name: import_v415.z.string(),
3016
+ arguments: import_v415.z.string(),
3017
+ status: import_v415.z.literal("completed")
2834
3018
  }),
2835
- import_zod14.z.object({
2836
- type: import_zod14.z.literal("web_search_call"),
2837
- id: import_zod14.z.string(),
2838
- status: import_zod14.z.literal("completed")
3019
+ import_v415.z.object({
3020
+ type: import_v415.z.literal("web_search_call"),
3021
+ id: import_v415.z.string(),
3022
+ status: import_v415.z.literal("completed")
2839
3023
  }),
2840
- import_zod14.z.object({
2841
- type: import_zod14.z.literal("computer_call"),
2842
- id: import_zod14.z.string(),
2843
- status: import_zod14.z.literal("completed")
3024
+ import_v415.z.object({
3025
+ type: import_v415.z.literal("computer_call"),
3026
+ id: import_v415.z.string(),
3027
+ status: import_v415.z.literal("completed")
2844
3028
  })
2845
3029
  ])
2846
3030
  });
2847
- var responseFunctionCallArgumentsDeltaSchema = import_zod14.z.object({
2848
- type: import_zod14.z.literal("response.function_call_arguments.delta"),
2849
- item_id: import_zod14.z.string(),
2850
- output_index: import_zod14.z.number(),
2851
- delta: import_zod14.z.string()
3031
+ var responseFunctionCallArgumentsDeltaSchema = import_v415.z.object({
3032
+ type: import_v415.z.literal("response.function_call_arguments.delta"),
3033
+ item_id: import_v415.z.string(),
3034
+ output_index: import_v415.z.number(),
3035
+ delta: import_v415.z.string()
2852
3036
  });
2853
- var responseAnnotationAddedSchema = import_zod14.z.object({
2854
- type: import_zod14.z.literal("response.output_text.annotation.added"),
2855
- annotation: import_zod14.z.object({
2856
- type: import_zod14.z.literal("url_citation"),
2857
- url: import_zod14.z.string(),
2858
- title: import_zod14.z.string()
3037
+ var responseAnnotationAddedSchema = import_v415.z.object({
3038
+ type: import_v415.z.literal("response.output_text.annotation.added"),
3039
+ annotation: import_v415.z.object({
3040
+ type: import_v415.z.literal("url_citation"),
3041
+ url: import_v415.z.string(),
3042
+ title: import_v415.z.string()
2859
3043
  })
2860
3044
  });
2861
- var responseReasoningSummaryTextDeltaSchema = import_zod14.z.object({
2862
- type: import_zod14.z.literal("response.reasoning_summary_text.delta"),
2863
- item_id: import_zod14.z.string(),
2864
- output_index: import_zod14.z.number(),
2865
- summary_index: import_zod14.z.number(),
2866
- delta: import_zod14.z.string()
3045
+ var responseReasoningSummaryPartAddedSchema = import_v415.z.object({
3046
+ type: import_v415.z.literal("response.reasoning_summary_part.added"),
3047
+ item_id: import_v415.z.string(),
3048
+ summary_index: import_v415.z.number()
2867
3049
  });
2868
- var responseReasoningSummaryPartDoneSchema = import_zod14.z.object({
2869
- type: import_zod14.z.literal("response.reasoning_summary_part.done"),
2870
- item_id: import_zod14.z.string(),
2871
- output_index: import_zod14.z.number(),
2872
- summary_index: import_zod14.z.number(),
2873
- part: import_zod14.z.unknown().nullish()
3050
+ var responseReasoningSummaryTextDeltaSchema = import_v415.z.object({
3051
+ type: import_v415.z.literal("response.reasoning_summary_text.delta"),
3052
+ item_id: import_v415.z.string(),
3053
+ summary_index: import_v415.z.number(),
3054
+ delta: import_v415.z.string()
2874
3055
  });
2875
- var openaiResponsesChunkSchema = import_zod14.z.union([
3056
+ var openaiResponsesChunkSchema = import_v415.z.union([
2876
3057
  textDeltaChunkSchema,
2877
3058
  responseFinishedChunkSchema,
2878
3059
  responseCreatedChunkSchema,
@@ -2880,9 +3061,10 @@ var openaiResponsesChunkSchema = import_zod14.z.union([
2880
3061
  responseOutputItemDoneSchema,
2881
3062
  responseFunctionCallArgumentsDeltaSchema,
2882
3063
  responseAnnotationAddedSchema,
3064
+ responseReasoningSummaryPartAddedSchema,
2883
3065
  responseReasoningSummaryTextDeltaSchema,
2884
- responseReasoningSummaryPartDoneSchema,
2885
- import_zod14.z.object({ type: import_zod14.z.string() }).passthrough()
3066
+ errorChunkSchema,
3067
+ import_v415.z.object({ type: import_v415.z.string() }).loose()
2886
3068
  // fallback for unknown chunks
2887
3069
  ]);
2888
3070
  function isTextDeltaChunk(chunk) {
@@ -2891,6 +3073,9 @@ function isTextDeltaChunk(chunk) {
2891
3073
  function isResponseOutputItemDoneChunk(chunk) {
2892
3074
  return chunk.type === "response.output_item.done";
2893
3075
  }
3076
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3077
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3078
+ }
2894
3079
  function isResponseFinishedChunk(chunk) {
2895
3080
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2896
3081
  }
@@ -2903,14 +3088,23 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2903
3088
  function isResponseOutputItemAddedChunk(chunk) {
2904
3089
  return chunk.type === "response.output_item.added";
2905
3090
  }
3091
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3092
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3093
+ }
2906
3094
  function isResponseAnnotationAddedChunk(chunk) {
2907
3095
  return chunk.type === "response.output_text.annotation.added";
2908
3096
  }
3097
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3098
+ return chunk.type === "response.reasoning_summary_part.added";
3099
+ }
2909
3100
  function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2910
3101
  return chunk.type === "response.reasoning_summary_text.delta";
2911
3102
  }
3103
+ function isErrorChunk(chunk) {
3104
+ return chunk.type === "error";
3105
+ }
2912
3106
  function getResponsesModelConfig(modelId) {
2913
- if (modelId.startsWith("o")) {
3107
+ if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
2914
3108
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2915
3109
  return {
2916
3110
  isReasoningModel: true,
@@ -2933,17 +3127,18 @@ function getResponsesModelConfig(modelId) {
2933
3127
  function supportsFlexProcessing2(modelId) {
2934
3128
  return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
2935
3129
  }
2936
- var openaiResponsesProviderOptionsSchema = import_zod14.z.object({
2937
- metadata: import_zod14.z.any().nullish(),
2938
- parallelToolCalls: import_zod14.z.boolean().nullish(),
2939
- previousResponseId: import_zod14.z.string().nullish(),
2940
- store: import_zod14.z.boolean().nullish(),
2941
- user: import_zod14.z.string().nullish(),
2942
- reasoningEffort: import_zod14.z.string().nullish(),
2943
- strictSchemas: import_zod14.z.boolean().nullish(),
2944
- instructions: import_zod14.z.string().nullish(),
2945
- reasoningSummary: import_zod14.z.string().nullish(),
2946
- serviceTier: import_zod14.z.enum(["auto", "flex"]).nullish()
3130
+ var openaiResponsesProviderOptionsSchema = import_v415.z.object({
3131
+ metadata: import_v415.z.any().nullish(),
3132
+ parallelToolCalls: import_v415.z.boolean().nullish(),
3133
+ previousResponseId: import_v415.z.string().nullish(),
3134
+ store: import_v415.z.boolean().nullish(),
3135
+ user: import_v415.z.string().nullish(),
3136
+ reasoningEffort: import_v415.z.string().nullish(),
3137
+ strictJsonSchema: import_v415.z.boolean().nullish(),
3138
+ instructions: import_v415.z.string().nullish(),
3139
+ reasoningSummary: import_v415.z.string().nullish(),
3140
+ serviceTier: import_v415.z.enum(["auto", "flex"]).nullish(),
3141
+ include: import_v415.z.array(import_v415.z.enum(["reasoning.encrypted_content"])).nullish()
2947
3142
  });
2948
3143
  // Annotate the CommonJS export names for ESM import in node:
2949
3144
  0 && (module.exports = {