@ai-sdk/openai 2.0.31 → 2.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,7 +11,7 @@ import {
11
11
  parseProviderOptions,
12
12
  postJsonToApi
13
13
  } from "@ai-sdk/provider-utils";
14
- import { z as z5 } from "zod/v4";
14
+ import { z as z3 } from "zod/v4";
15
15
 
16
16
  // src/openai-error.ts
17
17
  import { z } from "zod/v4";
@@ -336,98 +336,6 @@ var openaiProviderOptions = z2.object({
336
336
  import {
337
337
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
338
338
  } from "@ai-sdk/provider";
339
-
340
- // src/tool/file-search.ts
341
- import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
342
- import { z as z3 } from "zod/v4";
343
- var comparisonFilterSchema = z3.object({
344
- key: z3.string(),
345
- type: z3.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
346
- value: z3.union([z3.string(), z3.number(), z3.boolean()])
347
- });
348
- var compoundFilterSchema = z3.object({
349
- type: z3.enum(["and", "or"]),
350
- filters: z3.array(
351
- z3.union([comparisonFilterSchema, z3.lazy(() => compoundFilterSchema)])
352
- )
353
- });
354
- var filtersSchema = z3.union([comparisonFilterSchema, compoundFilterSchema]);
355
- var fileSearchArgsSchema = z3.object({
356
- vectorStoreIds: z3.array(z3.string()).optional(),
357
- maxNumResults: z3.number().optional(),
358
- ranking: z3.object({
359
- ranker: z3.enum(["auto", "default-2024-08-21"]).optional()
360
- }).optional(),
361
- filters: filtersSchema.optional()
362
- });
363
- var fileSearch = createProviderDefinedToolFactory({
364
- id: "openai.file_search",
365
- name: "file_search",
366
- inputSchema: z3.object({
367
- query: z3.string()
368
- })
369
- });
370
-
371
- // src/tool/web-search-preview.ts
372
- import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
373
- import { z as z4 } from "zod/v4";
374
- var webSearchPreviewArgsSchema = z4.object({
375
- /**
376
- * Search context size to use for the web search.
377
- * - high: Most comprehensive context, highest cost, slower response
378
- * - medium: Balanced context, cost, and latency (default)
379
- * - low: Least context, lowest cost, fastest response
380
- */
381
- searchContextSize: z4.enum(["low", "medium", "high"]).optional(),
382
- /**
383
- * User location information to provide geographically relevant search results.
384
- */
385
- userLocation: z4.object({
386
- /**
387
- * Type of location (always 'approximate')
388
- */
389
- type: z4.literal("approximate"),
390
- /**
391
- * Two-letter ISO country code (e.g., 'US', 'GB')
392
- */
393
- country: z4.string().optional(),
394
- /**
395
- * City name (free text, e.g., 'Minneapolis')
396
- */
397
- city: z4.string().optional(),
398
- /**
399
- * Region name (free text, e.g., 'Minnesota')
400
- */
401
- region: z4.string().optional(),
402
- /**
403
- * IANA timezone (e.g., 'America/Chicago')
404
- */
405
- timezone: z4.string().optional()
406
- }).optional()
407
- });
408
- var webSearchPreview = createProviderDefinedToolFactory2({
409
- id: "openai.web_search_preview",
410
- name: "web_search_preview",
411
- inputSchema: z4.object({
412
- action: z4.discriminatedUnion("type", [
413
- z4.object({
414
- type: z4.literal("search"),
415
- query: z4.string().nullish()
416
- }),
417
- z4.object({
418
- type: z4.literal("open_page"),
419
- url: z4.string()
420
- }),
421
- z4.object({
422
- type: z4.literal("find"),
423
- url: z4.string(),
424
- pattern: z4.string()
425
- })
426
- ]).nullish()
427
- })
428
- });
429
-
430
- // src/chat/openai-chat-prepare-tools.ts
431
339
  function prepareChatTools({
432
340
  tools,
433
341
  toolChoice,
@@ -453,33 +361,6 @@ function prepareChatTools({
453
361
  }
454
362
  });
455
363
  break;
456
- case "provider-defined":
457
- switch (tool.id) {
458
- case "openai.file_search": {
459
- const args = fileSearchArgsSchema.parse(tool.args);
460
- openaiTools.push({
461
- type: "file_search",
462
- vector_store_ids: args.vectorStoreIds,
463
- max_num_results: args.maxNumResults,
464
- ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
465
- filters: args.filters
466
- });
467
- break;
468
- }
469
- case "openai.web_search_preview": {
470
- const args = webSearchPreviewArgsSchema.parse(tool.args);
471
- openaiTools.push({
472
- type: "web_search_preview",
473
- search_context_size: args.searchContextSize,
474
- user_location: args.userLocation
475
- });
476
- break;
477
- }
478
- default:
479
- toolWarnings.push({ type: "unsupported-tool", tool });
480
- break;
481
- }
482
- break;
483
364
  default:
484
365
  toolWarnings.push({ type: "unsupported-tool", tool });
485
366
  break;
@@ -1007,115 +888,115 @@ var OpenAIChatLanguageModel = class {
1007
888
  };
1008
889
  }
1009
890
  };
1010
- var openaiTokenUsageSchema = z5.object({
1011
- prompt_tokens: z5.number().nullish(),
1012
- completion_tokens: z5.number().nullish(),
1013
- total_tokens: z5.number().nullish(),
1014
- prompt_tokens_details: z5.object({
1015
- cached_tokens: z5.number().nullish()
891
+ var openaiTokenUsageSchema = z3.object({
892
+ prompt_tokens: z3.number().nullish(),
893
+ completion_tokens: z3.number().nullish(),
894
+ total_tokens: z3.number().nullish(),
895
+ prompt_tokens_details: z3.object({
896
+ cached_tokens: z3.number().nullish()
1016
897
  }).nullish(),
1017
- completion_tokens_details: z5.object({
1018
- reasoning_tokens: z5.number().nullish(),
1019
- accepted_prediction_tokens: z5.number().nullish(),
1020
- rejected_prediction_tokens: z5.number().nullish()
898
+ completion_tokens_details: z3.object({
899
+ reasoning_tokens: z3.number().nullish(),
900
+ accepted_prediction_tokens: z3.number().nullish(),
901
+ rejected_prediction_tokens: z3.number().nullish()
1021
902
  }).nullish()
1022
903
  }).nullish();
1023
- var openaiChatResponseSchema = z5.object({
1024
- id: z5.string().nullish(),
1025
- created: z5.number().nullish(),
1026
- model: z5.string().nullish(),
1027
- choices: z5.array(
1028
- z5.object({
1029
- message: z5.object({
1030
- role: z5.literal("assistant").nullish(),
1031
- content: z5.string().nullish(),
1032
- tool_calls: z5.array(
1033
- z5.object({
1034
- id: z5.string().nullish(),
1035
- type: z5.literal("function"),
1036
- function: z5.object({
1037
- name: z5.string(),
1038
- arguments: z5.string()
904
+ var openaiChatResponseSchema = z3.object({
905
+ id: z3.string().nullish(),
906
+ created: z3.number().nullish(),
907
+ model: z3.string().nullish(),
908
+ choices: z3.array(
909
+ z3.object({
910
+ message: z3.object({
911
+ role: z3.literal("assistant").nullish(),
912
+ content: z3.string().nullish(),
913
+ tool_calls: z3.array(
914
+ z3.object({
915
+ id: z3.string().nullish(),
916
+ type: z3.literal("function"),
917
+ function: z3.object({
918
+ name: z3.string(),
919
+ arguments: z3.string()
1039
920
  })
1040
921
  })
1041
922
  ).nullish(),
1042
- annotations: z5.array(
1043
- z5.object({
1044
- type: z5.literal("url_citation"),
1045
- start_index: z5.number(),
1046
- end_index: z5.number(),
1047
- url: z5.string(),
1048
- title: z5.string()
923
+ annotations: z3.array(
924
+ z3.object({
925
+ type: z3.literal("url_citation"),
926
+ start_index: z3.number(),
927
+ end_index: z3.number(),
928
+ url: z3.string(),
929
+ title: z3.string()
1049
930
  })
1050
931
  ).nullish()
1051
932
  }),
1052
- index: z5.number(),
1053
- logprobs: z5.object({
1054
- content: z5.array(
1055
- z5.object({
1056
- token: z5.string(),
1057
- logprob: z5.number(),
1058
- top_logprobs: z5.array(
1059
- z5.object({
1060
- token: z5.string(),
1061
- logprob: z5.number()
933
+ index: z3.number(),
934
+ logprobs: z3.object({
935
+ content: z3.array(
936
+ z3.object({
937
+ token: z3.string(),
938
+ logprob: z3.number(),
939
+ top_logprobs: z3.array(
940
+ z3.object({
941
+ token: z3.string(),
942
+ logprob: z3.number()
1062
943
  })
1063
944
  )
1064
945
  })
1065
946
  ).nullish()
1066
947
  }).nullish(),
1067
- finish_reason: z5.string().nullish()
948
+ finish_reason: z3.string().nullish()
1068
949
  })
1069
950
  ),
1070
951
  usage: openaiTokenUsageSchema
1071
952
  });
1072
- var openaiChatChunkSchema = z5.union([
1073
- z5.object({
1074
- id: z5.string().nullish(),
1075
- created: z5.number().nullish(),
1076
- model: z5.string().nullish(),
1077
- choices: z5.array(
1078
- z5.object({
1079
- delta: z5.object({
1080
- role: z5.enum(["assistant"]).nullish(),
1081
- content: z5.string().nullish(),
1082
- tool_calls: z5.array(
1083
- z5.object({
1084
- index: z5.number(),
1085
- id: z5.string().nullish(),
1086
- type: z5.literal("function").nullish(),
1087
- function: z5.object({
1088
- name: z5.string().nullish(),
1089
- arguments: z5.string().nullish()
953
+ var openaiChatChunkSchema = z3.union([
954
+ z3.object({
955
+ id: z3.string().nullish(),
956
+ created: z3.number().nullish(),
957
+ model: z3.string().nullish(),
958
+ choices: z3.array(
959
+ z3.object({
960
+ delta: z3.object({
961
+ role: z3.enum(["assistant"]).nullish(),
962
+ content: z3.string().nullish(),
963
+ tool_calls: z3.array(
964
+ z3.object({
965
+ index: z3.number(),
966
+ id: z3.string().nullish(),
967
+ type: z3.literal("function").nullish(),
968
+ function: z3.object({
969
+ name: z3.string().nullish(),
970
+ arguments: z3.string().nullish()
1090
971
  })
1091
972
  })
1092
973
  ).nullish(),
1093
- annotations: z5.array(
1094
- z5.object({
1095
- type: z5.literal("url_citation"),
1096
- start_index: z5.number(),
1097
- end_index: z5.number(),
1098
- url: z5.string(),
1099
- title: z5.string()
974
+ annotations: z3.array(
975
+ z3.object({
976
+ type: z3.literal("url_citation"),
977
+ start_index: z3.number(),
978
+ end_index: z3.number(),
979
+ url: z3.string(),
980
+ title: z3.string()
1100
981
  })
1101
982
  ).nullish()
1102
983
  }).nullish(),
1103
- logprobs: z5.object({
1104
- content: z5.array(
1105
- z5.object({
1106
- token: z5.string(),
1107
- logprob: z5.number(),
1108
- top_logprobs: z5.array(
1109
- z5.object({
1110
- token: z5.string(),
1111
- logprob: z5.number()
984
+ logprobs: z3.object({
985
+ content: z3.array(
986
+ z3.object({
987
+ token: z3.string(),
988
+ logprob: z3.number(),
989
+ top_logprobs: z3.array(
990
+ z3.object({
991
+ token: z3.string(),
992
+ logprob: z3.number()
1112
993
  })
1113
994
  )
1114
995
  })
1115
996
  ).nullish()
1116
997
  }).nullish(),
1117
- finish_reason: z5.string().nullish(),
1118
- index: z5.number()
998
+ finish_reason: z3.string().nullish(),
999
+ index: z3.number()
1119
1000
  })
1120
1001
  ),
1121
1002
  usage: openaiTokenUsageSchema
@@ -1179,7 +1060,7 @@ import {
1179
1060
  parseProviderOptions as parseProviderOptions2,
1180
1061
  postJsonToApi as postJsonToApi2
1181
1062
  } from "@ai-sdk/provider-utils";
1182
- import { z as z7 } from "zod/v4";
1063
+ import { z as z5 } from "zod/v4";
1183
1064
 
1184
1065
  // src/completion/convert-to-openai-completion-prompt.ts
1185
1066
  import {
@@ -1290,12 +1171,12 @@ function mapOpenAIFinishReason2(finishReason) {
1290
1171
  }
1291
1172
 
1292
1173
  // src/completion/openai-completion-options.ts
1293
- import { z as z6 } from "zod/v4";
1294
- var openaiCompletionProviderOptions = z6.object({
1174
+ import { z as z4 } from "zod/v4";
1175
+ var openaiCompletionProviderOptions = z4.object({
1295
1176
  /**
1296
1177
  Echo back the prompt in addition to the completion.
1297
1178
  */
1298
- echo: z6.boolean().optional(),
1179
+ echo: z4.boolean().optional(),
1299
1180
  /**
1300
1181
  Modify the likelihood of specified tokens appearing in the completion.
1301
1182
 
@@ -1310,16 +1191,16 @@ var openaiCompletionProviderOptions = z6.object({
1310
1191
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1311
1192
  token from being generated.
1312
1193
  */
1313
- logitBias: z6.record(z6.string(), z6.number()).optional(),
1194
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
1314
1195
  /**
1315
1196
  The suffix that comes after a completion of inserted text.
1316
1197
  */
1317
- suffix: z6.string().optional(),
1198
+ suffix: z4.string().optional(),
1318
1199
  /**
1319
1200
  A unique identifier representing your end-user, which can help OpenAI to
1320
1201
  monitor and detect abuse. Learn more.
1321
1202
  */
1322
- user: z6.string().optional(),
1203
+ user: z4.string().optional(),
1323
1204
  /**
1324
1205
  Return the log probabilities of the tokens. Including logprobs will increase
1325
1206
  the response size and can slow down response times. However, it can
@@ -1329,7 +1210,7 @@ var openaiCompletionProviderOptions = z6.object({
1329
1210
  Setting to a number will return the log probabilities of the top n
1330
1211
  tokens that were generated.
1331
1212
  */
1332
- logprobs: z6.union([z6.boolean(), z6.number()]).optional()
1213
+ logprobs: z4.union([z4.boolean(), z4.number()]).optional()
1333
1214
  });
1334
1215
 
1335
1216
  // src/completion/openai-completion-language-model.ts
@@ -1561,42 +1442,42 @@ var OpenAICompletionLanguageModel = class {
1561
1442
  };
1562
1443
  }
1563
1444
  };
1564
- var usageSchema = z7.object({
1565
- prompt_tokens: z7.number(),
1566
- completion_tokens: z7.number(),
1567
- total_tokens: z7.number()
1445
+ var usageSchema = z5.object({
1446
+ prompt_tokens: z5.number(),
1447
+ completion_tokens: z5.number(),
1448
+ total_tokens: z5.number()
1568
1449
  });
1569
- var openaiCompletionResponseSchema = z7.object({
1570
- id: z7.string().nullish(),
1571
- created: z7.number().nullish(),
1572
- model: z7.string().nullish(),
1573
- choices: z7.array(
1574
- z7.object({
1575
- text: z7.string(),
1576
- finish_reason: z7.string(),
1577
- logprobs: z7.object({
1578
- tokens: z7.array(z7.string()),
1579
- token_logprobs: z7.array(z7.number()),
1580
- top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1450
+ var openaiCompletionResponseSchema = z5.object({
1451
+ id: z5.string().nullish(),
1452
+ created: z5.number().nullish(),
1453
+ model: z5.string().nullish(),
1454
+ choices: z5.array(
1455
+ z5.object({
1456
+ text: z5.string(),
1457
+ finish_reason: z5.string(),
1458
+ logprobs: z5.object({
1459
+ tokens: z5.array(z5.string()),
1460
+ token_logprobs: z5.array(z5.number()),
1461
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1581
1462
  }).nullish()
1582
1463
  })
1583
1464
  ),
1584
1465
  usage: usageSchema.nullish()
1585
1466
  });
1586
- var openaiCompletionChunkSchema = z7.union([
1587
- z7.object({
1588
- id: z7.string().nullish(),
1589
- created: z7.number().nullish(),
1590
- model: z7.string().nullish(),
1591
- choices: z7.array(
1592
- z7.object({
1593
- text: z7.string(),
1594
- finish_reason: z7.string().nullish(),
1595
- index: z7.number(),
1596
- logprobs: z7.object({
1597
- tokens: z7.array(z7.string()),
1598
- token_logprobs: z7.array(z7.number()),
1599
- top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1467
+ var openaiCompletionChunkSchema = z5.union([
1468
+ z5.object({
1469
+ id: z5.string().nullish(),
1470
+ created: z5.number().nullish(),
1471
+ model: z5.string().nullish(),
1472
+ choices: z5.array(
1473
+ z5.object({
1474
+ text: z5.string(),
1475
+ finish_reason: z5.string().nullish(),
1476
+ index: z5.number(),
1477
+ logprobs: z5.object({
1478
+ tokens: z5.array(z5.string()),
1479
+ token_logprobs: z5.array(z5.number()),
1480
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1600
1481
  }).nullish()
1601
1482
  })
1602
1483
  ),
@@ -1615,21 +1496,21 @@ import {
1615
1496
  parseProviderOptions as parseProviderOptions3,
1616
1497
  postJsonToApi as postJsonToApi3
1617
1498
  } from "@ai-sdk/provider-utils";
1618
- import { z as z9 } from "zod/v4";
1499
+ import { z as z7 } from "zod/v4";
1619
1500
 
1620
1501
  // src/embedding/openai-embedding-options.ts
1621
- import { z as z8 } from "zod/v4";
1622
- var openaiEmbeddingProviderOptions = z8.object({
1502
+ import { z as z6 } from "zod/v4";
1503
+ var openaiEmbeddingProviderOptions = z6.object({
1623
1504
  /**
1624
1505
  The number of dimensions the resulting output embeddings should have.
1625
1506
  Only supported in text-embedding-3 and later models.
1626
1507
  */
1627
- dimensions: z8.number().optional(),
1508
+ dimensions: z6.number().optional(),
1628
1509
  /**
1629
1510
  A unique identifier representing your end-user, which can help OpenAI to
1630
1511
  monitor and detect abuse. Learn more.
1631
1512
  */
1632
- user: z8.string().optional()
1513
+ user: z6.string().optional()
1633
1514
  });
1634
1515
 
1635
1516
  // src/embedding/openai-embedding-model.ts
@@ -1695,9 +1576,9 @@ var OpenAIEmbeddingModel = class {
1695
1576
  };
1696
1577
  }
1697
1578
  };
1698
- var openaiTextEmbeddingResponseSchema = z9.object({
1699
- data: z9.array(z9.object({ embedding: z9.array(z9.number()) })),
1700
- usage: z9.object({ prompt_tokens: z9.number() }).nullish()
1579
+ var openaiTextEmbeddingResponseSchema = z7.object({
1580
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1581
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1701
1582
  });
1702
1583
 
1703
1584
  // src/image/openai-image-model.ts
@@ -1706,7 +1587,7 @@ import {
1706
1587
  createJsonResponseHandler as createJsonResponseHandler4,
1707
1588
  postJsonToApi as postJsonToApi4
1708
1589
  } from "@ai-sdk/provider-utils";
1709
- import { z as z10 } from "zod/v4";
1590
+ import { z as z8 } from "zod/v4";
1710
1591
 
1711
1592
  // src/image/openai-image-options.ts
1712
1593
  var modelMaxImagesPerCall = {
@@ -1794,9 +1675,9 @@ var OpenAIImageModel = class {
1794
1675
  };
1795
1676
  }
1796
1677
  };
1797
- var openaiImageResponseSchema = z10.object({
1798
- data: z10.array(
1799
- z10.object({ b64_json: z10.string(), revised_prompt: z10.string().optional() })
1678
+ var openaiImageResponseSchema = z8.object({
1679
+ data: z8.array(
1680
+ z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
1800
1681
  )
1801
1682
  });
1802
1683
 
@@ -1809,33 +1690,33 @@ import {
1809
1690
  parseProviderOptions as parseProviderOptions4,
1810
1691
  postFormDataToApi
1811
1692
  } from "@ai-sdk/provider-utils";
1812
- import { z as z12 } from "zod/v4";
1693
+ import { z as z10 } from "zod/v4";
1813
1694
 
1814
1695
  // src/transcription/openai-transcription-options.ts
1815
- import { z as z11 } from "zod/v4";
1816
- var openAITranscriptionProviderOptions = z11.object({
1696
+ import { z as z9 } from "zod/v4";
1697
+ var openAITranscriptionProviderOptions = z9.object({
1817
1698
  /**
1818
1699
  * Additional information to include in the transcription response.
1819
1700
  */
1820
- include: z11.array(z11.string()).optional(),
1701
+ include: z9.array(z9.string()).optional(),
1821
1702
  /**
1822
1703
  * The language of the input audio in ISO-639-1 format.
1823
1704
  */
1824
- language: z11.string().optional(),
1705
+ language: z9.string().optional(),
1825
1706
  /**
1826
1707
  * An optional text to guide the model's style or continue a previous audio segment.
1827
1708
  */
1828
- prompt: z11.string().optional(),
1709
+ prompt: z9.string().optional(),
1829
1710
  /**
1830
1711
  * The sampling temperature, between 0 and 1.
1831
1712
  * @default 0
1832
1713
  */
1833
- temperature: z11.number().min(0).max(1).default(0).optional(),
1714
+ temperature: z9.number().min(0).max(1).default(0).optional(),
1834
1715
  /**
1835
1716
  * The timestamp granularities to populate for this transcription.
1836
1717
  * @default ['segment']
1837
1718
  */
1838
- timestampGranularities: z11.array(z11.enum(["word", "segment"])).default(["segment"]).optional()
1719
+ timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).optional()
1839
1720
  });
1840
1721
 
1841
1722
  // src/transcription/openai-transcription-model.ts
@@ -2004,29 +1885,29 @@ var OpenAITranscriptionModel = class {
2004
1885
  };
2005
1886
  }
2006
1887
  };
2007
- var openaiTranscriptionResponseSchema = z12.object({
2008
- text: z12.string(),
2009
- language: z12.string().nullish(),
2010
- duration: z12.number().nullish(),
2011
- words: z12.array(
2012
- z12.object({
2013
- word: z12.string(),
2014
- start: z12.number(),
2015
- end: z12.number()
1888
+ var openaiTranscriptionResponseSchema = z10.object({
1889
+ text: z10.string(),
1890
+ language: z10.string().nullish(),
1891
+ duration: z10.number().nullish(),
1892
+ words: z10.array(
1893
+ z10.object({
1894
+ word: z10.string(),
1895
+ start: z10.number(),
1896
+ end: z10.number()
2016
1897
  })
2017
1898
  ).nullish(),
2018
- segments: z12.array(
2019
- z12.object({
2020
- id: z12.number(),
2021
- seek: z12.number(),
2022
- start: z12.number(),
2023
- end: z12.number(),
2024
- text: z12.string(),
2025
- tokens: z12.array(z12.number()),
2026
- temperature: z12.number(),
2027
- avg_logprob: z12.number(),
2028
- compression_ratio: z12.number(),
2029
- no_speech_prob: z12.number()
1899
+ segments: z10.array(
1900
+ z10.object({
1901
+ id: z10.number(),
1902
+ seek: z10.number(),
1903
+ start: z10.number(),
1904
+ end: z10.number(),
1905
+ text: z10.string(),
1906
+ tokens: z10.array(z10.number()),
1907
+ temperature: z10.number(),
1908
+ avg_logprob: z10.number(),
1909
+ compression_ratio: z10.number(),
1910
+ no_speech_prob: z10.number()
2030
1911
  })
2031
1912
  ).nullish()
2032
1913
  });
@@ -2038,10 +1919,10 @@ import {
2038
1919
  parseProviderOptions as parseProviderOptions5,
2039
1920
  postJsonToApi as postJsonToApi5
2040
1921
  } from "@ai-sdk/provider-utils";
2041
- import { z as z13 } from "zod/v4";
2042
- var OpenAIProviderOptionsSchema = z13.object({
2043
- instructions: z13.string().nullish(),
2044
- speed: z13.number().min(0.25).max(4).default(1).nullish()
1922
+ import { z as z11 } from "zod/v4";
1923
+ var OpenAIProviderOptionsSchema = z11.object({
1924
+ instructions: z11.string().nullish(),
1925
+ speed: z11.number().min(0.25).max(4).default(1).nullish()
2045
1926
  });
2046
1927
  var OpenAISpeechModel = class {
2047
1928
  constructor(modelId, config) {
@@ -2162,7 +2043,7 @@ import {
2162
2043
  UnsupportedFunctionalityError as UnsupportedFunctionalityError4
2163
2044
  } from "@ai-sdk/provider";
2164
2045
  import { convertToBase64 as convertToBase642, parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
2165
- import { z as z14 } from "zod/v4";
2046
+ import { z as z12 } from "zod/v4";
2166
2047
  function isFileId(data, prefixes) {
2167
2048
  if (!prefixes) return false;
2168
2049
  return prefixes.some((prefix) => data.startsWith(prefix));
@@ -2358,9 +2239,9 @@ async function convertToOpenAIResponsesInput({
2358
2239
  }
2359
2240
  return { input, warnings };
2360
2241
  }
2361
- var openaiResponsesReasoningProviderOptionsSchema = z14.object({
2362
- itemId: z14.string().nullish(),
2363
- reasoningEncryptedContent: z14.string().nullish()
2242
+ var openaiResponsesReasoningProviderOptionsSchema = z12.object({
2243
+ itemId: z12.string().nullish(),
2244
+ reasoningEncryptedContent: z12.string().nullish()
2364
2245
  });
2365
2246
 
2366
2247
  // src/responses/map-openai-responses-finish-reason.ts
@@ -2388,24 +2269,24 @@ import {
2388
2269
 
2389
2270
  // src/tool/code-interpreter.ts
2390
2271
  import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils";
2391
- import { z as z15 } from "zod/v4";
2392
- var codeInterpreterInputSchema = z15.object({
2393
- code: z15.string().nullish(),
2394
- containerId: z15.string()
2272
+ import { z as z13 } from "zod/v4";
2273
+ var codeInterpreterInputSchema = z13.object({
2274
+ code: z13.string().nullish(),
2275
+ containerId: z13.string()
2395
2276
  });
2396
- var codeInterpreterOutputSchema = z15.object({
2397
- outputs: z15.array(
2398
- z15.discriminatedUnion("type", [
2399
- z15.object({ type: z15.literal("logs"), logs: z15.string() }),
2400
- z15.object({ type: z15.literal("image"), url: z15.string() })
2277
+ var codeInterpreterOutputSchema = z13.object({
2278
+ outputs: z13.array(
2279
+ z13.discriminatedUnion("type", [
2280
+ z13.object({ type: z13.literal("logs"), logs: z13.string() }),
2281
+ z13.object({ type: z13.literal("image"), url: z13.string() })
2401
2282
  ])
2402
2283
  ).nullish()
2403
2284
  });
2404
- var codeInterpreterArgsSchema = z15.object({
2405
- container: z15.union([
2406
- z15.string(),
2407
- z15.object({
2408
- fileIds: z15.array(z15.string()).optional()
2285
+ var codeInterpreterArgsSchema = z13.object({
2286
+ container: z13.union([
2287
+ z13.string(),
2288
+ z13.object({
2289
+ fileIds: z13.array(z13.string()).optional()
2409
2290
  })
2410
2291
  ]).optional()
2411
2292
  });
@@ -2416,25 +2297,126 @@ var codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchem
2416
2297
  outputSchema: codeInterpreterOutputSchema
2417
2298
  });
2418
2299
 
2300
+ // src/tool/file-search.ts
2301
+ import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema2 } from "@ai-sdk/provider-utils";
2302
+ import { z as z14 } from "zod/v4";
2303
+ var comparisonFilterSchema = z14.object({
2304
+ key: z14.string(),
2305
+ type: z14.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
2306
+ value: z14.union([z14.string(), z14.number(), z14.boolean()])
2307
+ });
2308
+ var compoundFilterSchema = z14.object({
2309
+ type: z14.enum(["and", "or"]),
2310
+ filters: z14.array(
2311
+ z14.union([comparisonFilterSchema, z14.lazy(() => compoundFilterSchema)])
2312
+ )
2313
+ });
2314
+ var fileSearchArgsSchema = z14.object({
2315
+ vectorStoreIds: z14.array(z14.string()),
2316
+ maxNumResults: z14.number().optional(),
2317
+ ranking: z14.object({
2318
+ ranker: z14.string().optional(),
2319
+ scoreThreshold: z14.number().optional()
2320
+ }).optional(),
2321
+ filters: z14.union([comparisonFilterSchema, compoundFilterSchema]).optional()
2322
+ });
2323
+ var fileSearchOutputSchema = z14.object({
2324
+ queries: z14.array(z14.string()),
2325
+ results: z14.array(
2326
+ z14.object({
2327
+ attributes: z14.record(z14.string(), z14.unknown()),
2328
+ fileId: z14.string(),
2329
+ filename: z14.string(),
2330
+ score: z14.number(),
2331
+ text: z14.string()
2332
+ })
2333
+ ).nullable()
2334
+ });
2335
+ var fileSearch = createProviderDefinedToolFactoryWithOutputSchema2({
2336
+ id: "openai.file_search",
2337
+ name: "file_search",
2338
+ inputSchema: z14.object({}),
2339
+ outputSchema: fileSearchOutputSchema
2340
+ });
2341
+
2419
2342
  // src/tool/web-search.ts
2420
- import { createProviderDefinedToolFactory as createProviderDefinedToolFactory3 } from "@ai-sdk/provider-utils";
2421
- import { z as z16 } from "zod/v4";
2422
- var webSearchArgsSchema = z16.object({
2423
- filters: z16.object({
2424
- allowedDomains: z16.array(z16.string()).optional()
2343
+ import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
2344
+ import { z as z15 } from "zod/v4";
2345
+ var webSearchArgsSchema = z15.object({
2346
+ filters: z15.object({
2347
+ allowedDomains: z15.array(z15.string()).optional()
2425
2348
  }).optional(),
2349
+ searchContextSize: z15.enum(["low", "medium", "high"]).optional(),
2350
+ userLocation: z15.object({
2351
+ type: z15.literal("approximate"),
2352
+ country: z15.string().optional(),
2353
+ city: z15.string().optional(),
2354
+ region: z15.string().optional(),
2355
+ timezone: z15.string().optional()
2356
+ }).optional()
2357
+ });
2358
+ var webSearchToolFactory = createProviderDefinedToolFactory({
2359
+ id: "openai.web_search",
2360
+ name: "web_search",
2361
+ inputSchema: z15.object({
2362
+ action: z15.discriminatedUnion("type", [
2363
+ z15.object({
2364
+ type: z15.literal("search"),
2365
+ query: z15.string().nullish()
2366
+ }),
2367
+ z15.object({
2368
+ type: z15.literal("open_page"),
2369
+ url: z15.string()
2370
+ }),
2371
+ z15.object({
2372
+ type: z15.literal("find"),
2373
+ url: z15.string(),
2374
+ pattern: z15.string()
2375
+ })
2376
+ ]).nullish()
2377
+ })
2378
+ });
2379
+
2380
+ // src/tool/web-search-preview.ts
2381
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
2382
+ import { z as z16 } from "zod/v4";
2383
+ var webSearchPreviewArgsSchema = z16.object({
2384
+ /**
2385
+ * Search context size to use for the web search.
2386
+ * - high: Most comprehensive context, highest cost, slower response
2387
+ * - medium: Balanced context, cost, and latency (default)
2388
+ * - low: Least context, lowest cost, fastest response
2389
+ */
2426
2390
  searchContextSize: z16.enum(["low", "medium", "high"]).optional(),
2391
+ /**
2392
+ * User location information to provide geographically relevant search results.
2393
+ */
2427
2394
  userLocation: z16.object({
2395
+ /**
2396
+ * Type of location (always 'approximate')
2397
+ */
2428
2398
  type: z16.literal("approximate"),
2399
+ /**
2400
+ * Two-letter ISO country code (e.g., 'US', 'GB')
2401
+ */
2429
2402
  country: z16.string().optional(),
2403
+ /**
2404
+ * City name (free text, e.g., 'Minneapolis')
2405
+ */
2430
2406
  city: z16.string().optional(),
2407
+ /**
2408
+ * Region name (free text, e.g., 'Minnesota')
2409
+ */
2431
2410
  region: z16.string().optional(),
2411
+ /**
2412
+ * IANA timezone (e.g., 'America/Chicago')
2413
+ */
2432
2414
  timezone: z16.string().optional()
2433
2415
  }).optional()
2434
2416
  });
2435
- var webSearchToolFactory = createProviderDefinedToolFactory3({
2436
- id: "openai.web_search",
2437
- name: "web_search",
2417
+ var webSearchPreview = createProviderDefinedToolFactory2({
2418
+ id: "openai.web_search_preview",
2419
+ name: "web_search_preview",
2438
2420
  inputSchema: z16.object({
2439
2421
  action: z16.discriminatedUnion("type", [
2440
2422
  z16.object({
@@ -2455,7 +2437,7 @@ var webSearchToolFactory = createProviderDefinedToolFactory3({
2455
2437
  });
2456
2438
 
2457
2439
  // src/tool/image-generation.ts
2458
- import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema2 } from "@ai-sdk/provider-utils";
2440
+ import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema3 } from "@ai-sdk/provider-utils";
2459
2441
  import { z as z17 } from "zod/v4";
2460
2442
  var imageGenerationArgsSchema = z17.object({
2461
2443
  background: z17.enum(["auto", "opaque", "transparent"]).optional(),
@@ -2474,7 +2456,7 @@ var imageGenerationArgsSchema = z17.object({
2474
2456
  var imageGenerationOutputSchema = z17.object({
2475
2457
  result: z17.string()
2476
2458
  });
2477
- var imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema2({
2459
+ var imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema3({
2478
2460
  id: "openai.image_generation",
2479
2461
  name: "image_generation",
2480
2462
  inputSchema: z17.object({}),
@@ -2512,7 +2494,10 @@ function prepareResponsesTools({
2512
2494
  type: "file_search",
2513
2495
  vector_store_ids: args.vectorStoreIds,
2514
2496
  max_num_results: args.maxNumResults,
2515
- ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2497
+ ranking_options: args.ranking ? {
2498
+ ranker: args.ranking.ranker,
2499
+ score_threshold: args.ranking.scoreThreshold
2500
+ } : void 0,
2516
2501
  filters: args.filters
2517
2502
  });
2518
2503
  break;
@@ -2616,6 +2601,20 @@ var webSearchCallItem = z18.object({
2616
2601
  })
2617
2602
  ]).nullish()
2618
2603
  });
2604
+ var fileSearchCallItem = z18.object({
2605
+ type: z18.literal("file_search_call"),
2606
+ id: z18.string(),
2607
+ queries: z18.array(z18.string()),
2608
+ results: z18.array(
2609
+ z18.object({
2610
+ attributes: z18.record(z18.string(), z18.unknown()),
2611
+ file_id: z18.string(),
2612
+ filename: z18.string(),
2613
+ score: z18.number(),
2614
+ text: z18.string()
2615
+ })
2616
+ ).nullish()
2617
+ });
2619
2618
  var codeInterpreterCallItem = z18.object({
2620
2619
  type: z18.literal("code_interpreter_call"),
2621
2620
  id: z18.string(),
@@ -2674,7 +2673,7 @@ var OpenAIResponsesLanguageModel = class {
2674
2673
  toolChoice,
2675
2674
  responseFormat
2676
2675
  }) {
2677
- var _a, _b, _c, _d, _e;
2676
+ var _a, _b, _c, _d;
2678
2677
  const warnings = [];
2679
2678
  const modelConfig = getResponsesModelConfig(this.modelId);
2680
2679
  if (topK != null) {
@@ -2712,16 +2711,27 @@ var OpenAIResponsesLanguageModel = class {
2712
2711
  warnings.push(...inputWarnings);
2713
2712
  const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
2714
2713
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
2714
+ function addInclude(key) {
2715
+ include = include != null ? [...include, key] : [key];
2716
+ }
2717
+ function hasOpenAITool(id) {
2718
+ return (tools == null ? void 0 : tools.find(
2719
+ (tool) => tool.type === "provider-defined" && tool.id === id
2720
+ )) != null;
2721
+ }
2715
2722
  const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
2716
- include = topLogprobs ? Array.isArray(include) ? [...include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : include;
2723
+ if (topLogprobs) {
2724
+ addInclude("message.output_text.logprobs");
2725
+ }
2717
2726
  const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
2718
2727
  (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
2719
2728
  )) == null ? void 0 : _c.name;
2720
- include = webSearchToolName ? Array.isArray(include) ? [...include, "web_search_call.action.sources"] : ["web_search_call.action.sources"] : include;
2721
- const codeInterpreterToolName = (_d = tools == null ? void 0 : tools.find(
2722
- (tool) => tool.type === "provider-defined" && tool.id === "openai.code_interpreter"
2723
- )) == null ? void 0 : _d.name;
2724
- include = codeInterpreterToolName ? Array.isArray(include) ? [...include, "code_interpreter_call.outputs"] : ["code_interpreter_call.outputs"] : include;
2729
+ if (webSearchToolName) {
2730
+ addInclude("web_search_call.action.sources");
2731
+ }
2732
+ if (hasOpenAITool("openai.code_interpreter")) {
2733
+ addInclude("code_interpreter_call.outputs");
2734
+ }
2725
2735
  const baseArgs = {
2726
2736
  model: this.modelId,
2727
2737
  input,
@@ -2734,7 +2744,7 @@ var OpenAIResponsesLanguageModel = class {
2734
2744
  format: responseFormat.schema != null ? {
2735
2745
  type: "json_schema",
2736
2746
  strict: strictJsonSchema,
2737
- name: (_e = responseFormat.name) != null ? _e : "response",
2747
+ name: (_d = responseFormat.name) != null ? _d : "response",
2738
2748
  description: responseFormat.description,
2739
2749
  schema: responseFormat.schema
2740
2750
  } : { type: "json_object" }
@@ -2841,7 +2851,7 @@ var OpenAIResponsesLanguageModel = class {
2841
2851
  };
2842
2852
  }
2843
2853
  async doGenerate(options) {
2844
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2854
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
2845
2855
  const {
2846
2856
  args: body,
2847
2857
  warnings,
@@ -2906,6 +2916,8 @@ var OpenAIResponsesLanguageModel = class {
2906
2916
  })
2907
2917
  )
2908
2918
  }),
2919
+ webSearchCallItem,
2920
+ fileSearchCallItem,
2909
2921
  codeInterpreterCallItem,
2910
2922
  imageGenerationCallItem,
2911
2923
  z18.object({
@@ -2915,28 +2927,11 @@ var OpenAIResponsesLanguageModel = class {
2915
2927
  arguments: z18.string(),
2916
2928
  id: z18.string()
2917
2929
  }),
2918
- webSearchCallItem,
2919
2930
  z18.object({
2920
2931
  type: z18.literal("computer_call"),
2921
2932
  id: z18.string(),
2922
2933
  status: z18.string().optional()
2923
2934
  }),
2924
- z18.object({
2925
- type: z18.literal("file_search_call"),
2926
- id: z18.string(),
2927
- status: z18.string().optional(),
2928
- queries: z18.array(z18.string()).nullish(),
2929
- results: z18.array(
2930
- z18.object({
2931
- attributes: z18.object({
2932
- file_id: z18.string(),
2933
- filename: z18.string(),
2934
- score: z18.number(),
2935
- text: z18.string()
2936
- })
2937
- })
2938
- ).nullish()
2939
- }),
2940
2935
  z18.object({
2941
2936
  type: z18.literal("reasoning"),
2942
2937
  id: z18.string(),
@@ -3105,7 +3100,7 @@ var OpenAIResponsesLanguageModel = class {
3105
3100
  type: "tool-call",
3106
3101
  toolCallId: part.id,
3107
3102
  toolName: "file_search",
3108
- input: "",
3103
+ input: "{}",
3109
3104
  providerExecuted: true
3110
3105
  });
3111
3106
  content.push({
@@ -3113,10 +3108,14 @@ var OpenAIResponsesLanguageModel = class {
3113
3108
  toolCallId: part.id,
3114
3109
  toolName: "file_search",
3115
3110
  result: {
3116
- type: "file_search_tool_result",
3117
- status: part.status || "completed",
3118
- ...part.queries && { queries: part.queries },
3119
- ...part.results && { results: part.results }
3111
+ queries: part.queries,
3112
+ results: (_n = (_m = part.results) == null ? void 0 : _m.map((result) => ({
3113
+ attributes: result.attributes,
3114
+ fileId: result.file_id,
3115
+ filename: result.filename,
3116
+ score: result.score,
3117
+ text: result.text
3118
+ }))) != null ? _n : null
3120
3119
  },
3121
3120
  providerExecuted: true
3122
3121
  });
@@ -3158,15 +3157,15 @@ var OpenAIResponsesLanguageModel = class {
3158
3157
  return {
3159
3158
  content,
3160
3159
  finishReason: mapOpenAIResponseFinishReason({
3161
- finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
3160
+ finishReason: (_o = response.incomplete_details) == null ? void 0 : _o.reason,
3162
3161
  hasFunctionCall
3163
3162
  }),
3164
3163
  usage: {
3165
3164
  inputTokens: response.usage.input_tokens,
3166
3165
  outputTokens: response.usage.output_tokens,
3167
3166
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3168
- reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
3169
- cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
3167
+ reasoningTokens: (_q = (_p = response.usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3168
+ cachedInputTokens: (_s = (_r = response.usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3170
3169
  },
3171
3170
  request: { body },
3172
3171
  response: {
@@ -3223,7 +3222,7 @@ var OpenAIResponsesLanguageModel = class {
3223
3222
  controller.enqueue({ type: "stream-start", warnings });
3224
3223
  },
3225
3224
  transform(chunk, controller) {
3226
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
3225
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
3227
3226
  if (options.includeRawChunks) {
3228
3227
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3229
3228
  }
@@ -3265,14 +3264,12 @@ var OpenAIResponsesLanguageModel = class {
3265
3264
  toolName: "computer_use"
3266
3265
  });
3267
3266
  } else if (value.item.type === "file_search_call") {
3268
- ongoingToolCalls[value.output_index] = {
3269
- toolName: "file_search",
3270
- toolCallId: value.item.id
3271
- };
3272
3267
  controller.enqueue({
3273
- type: "tool-input-start",
3274
- id: value.item.id,
3275
- toolName: "file_search"
3268
+ type: "tool-call",
3269
+ toolCallId: value.item.id,
3270
+ toolName: "file_search",
3271
+ input: "{}",
3272
+ providerExecuted: true
3276
3273
  });
3277
3274
  } else if (value.item.type === "image_generation_call") {
3278
3275
  controller.enqueue({
@@ -3372,26 +3369,19 @@ var OpenAIResponsesLanguageModel = class {
3372
3369
  });
3373
3370
  } else if (value.item.type === "file_search_call") {
3374
3371
  ongoingToolCalls[value.output_index] = void 0;
3375
- controller.enqueue({
3376
- type: "tool-input-end",
3377
- id: value.item.id
3378
- });
3379
- controller.enqueue({
3380
- type: "tool-call",
3381
- toolCallId: value.item.id,
3382
- toolName: "file_search",
3383
- input: "",
3384
- providerExecuted: true
3385
- });
3386
3372
  controller.enqueue({
3387
3373
  type: "tool-result",
3388
3374
  toolCallId: value.item.id,
3389
3375
  toolName: "file_search",
3390
3376
  result: {
3391
- type: "file_search_tool_result",
3392
- status: value.item.status || "completed",
3393
- ...value.item.queries && { queries: value.item.queries },
3394
- ...value.item.results && { results: value.item.results }
3377
+ queries: value.item.queries,
3378
+ results: (_c = (_b = value.item.results) == null ? void 0 : _b.map((result) => ({
3379
+ attributes: result.attributes,
3380
+ fileId: result.file_id,
3381
+ filename: result.filename,
3382
+ score: result.score,
3383
+ text: result.text
3384
+ }))) != null ? _c : null
3395
3385
  },
3396
3386
  providerExecuted: true
3397
3387
  });
@@ -3439,7 +3429,7 @@ var OpenAIResponsesLanguageModel = class {
3439
3429
  providerMetadata: {
3440
3430
  openai: {
3441
3431
  itemId: value.item.id,
3442
- reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
3432
+ reasoningEncryptedContent: (_d = value.item.encrypted_content) != null ? _d : null
3443
3433
  }
3444
3434
  }
3445
3435
  });
@@ -3469,12 +3459,12 @@ var OpenAIResponsesLanguageModel = class {
3469
3459
  id: value.item_id,
3470
3460
  delta: value.delta
3471
3461
  });
3472
- if (((_d = (_c = options.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.logprobs) && value.logprobs) {
3462
+ if (((_f = (_e = options.providerOptions) == null ? void 0 : _e.openai) == null ? void 0 : _f.logprobs) && value.logprobs) {
3473
3463
  logprobs.push(value.logprobs);
3474
3464
  }
3475
3465
  } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
3476
3466
  if (value.summary_index > 0) {
3477
- (_e = activeReasoning[value.item_id]) == null ? void 0 : _e.summaryParts.push(
3467
+ (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.summaryParts.push(
3478
3468
  value.summary_index
3479
3469
  );
3480
3470
  controller.enqueue({
@@ -3483,7 +3473,7 @@ var OpenAIResponsesLanguageModel = class {
3483
3473
  providerMetadata: {
3484
3474
  openai: {
3485
3475
  itemId: value.item_id,
3486
- reasoningEncryptedContent: (_g = (_f = activeReasoning[value.item_id]) == null ? void 0 : _f.encryptedContent) != null ? _g : null
3476
+ reasoningEncryptedContent: (_i = (_h = activeReasoning[value.item_id]) == null ? void 0 : _h.encryptedContent) != null ? _i : null
3487
3477
  }
3488
3478
  }
3489
3479
  });
@@ -3501,14 +3491,14 @@ var OpenAIResponsesLanguageModel = class {
3501
3491
  });
3502
3492
  } else if (isResponseFinishedChunk(value)) {
3503
3493
  finishReason = mapOpenAIResponseFinishReason({
3504
- finishReason: (_h = value.response.incomplete_details) == null ? void 0 : _h.reason,
3494
+ finishReason: (_j = value.response.incomplete_details) == null ? void 0 : _j.reason,
3505
3495
  hasFunctionCall
3506
3496
  });
3507
3497
  usage.inputTokens = value.response.usage.input_tokens;
3508
3498
  usage.outputTokens = value.response.usage.output_tokens;
3509
3499
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3510
- usage.reasoningTokens = (_j = (_i = value.response.usage.output_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0;
3511
- usage.cachedInputTokens = (_l = (_k = value.response.usage.input_tokens_details) == null ? void 0 : _k.cached_tokens) != null ? _l : void 0;
3500
+ usage.reasoningTokens = (_l = (_k = value.response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0;
3501
+ usage.cachedInputTokens = (_n = (_m = value.response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0;
3512
3502
  if (typeof value.response.service_tier === "string") {
3513
3503
  serviceTier = value.response.service_tier;
3514
3504
  }
@@ -3517,7 +3507,7 @@ var OpenAIResponsesLanguageModel = class {
3517
3507
  controller.enqueue({
3518
3508
  type: "source",
3519
3509
  sourceType: "url",
3520
- id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : generateId2(),
3510
+ id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : generateId2(),
3521
3511
  url: value.annotation.url,
3522
3512
  title: value.annotation.title
3523
3513
  });
@@ -3525,10 +3515,10 @@ var OpenAIResponsesLanguageModel = class {
3525
3515
  controller.enqueue({
3526
3516
  type: "source",
3527
3517
  sourceType: "document",
3528
- id: (_r = (_q = (_p = self.config).generateId) == null ? void 0 : _q.call(_p)) != null ? _r : generateId2(),
3518
+ id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : generateId2(),
3529
3519
  mediaType: "text/plain",
3530
- title: (_t = (_s = value.annotation.quote) != null ? _s : value.annotation.filename) != null ? _t : "Document",
3531
- filename: (_u = value.annotation.filename) != null ? _u : value.annotation.file_id
3520
+ title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
3521
+ filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
3532
3522
  });
3533
3523
  }
3534
3524
  } else if (isErrorChunk(value)) {
@@ -3633,19 +3623,7 @@ var responseOutputItemAddedSchema = z18.object({
3633
3623
  }),
3634
3624
  z18.object({
3635
3625
  type: z18.literal("file_search_call"),
3636
- id: z18.string(),
3637
- status: z18.string(),
3638
- queries: z18.array(z18.string()).nullish(),
3639
- results: z18.array(
3640
- z18.object({
3641
- attributes: z18.object({
3642
- file_id: z18.string(),
3643
- filename: z18.string(),
3644
- score: z18.number(),
3645
- text: z18.string()
3646
- })
3647
- })
3648
- ).optional()
3626
+ id: z18.string()
3649
3627
  }),
3650
3628
  z18.object({
3651
3629
  type: z18.literal("image_generation_call"),
@@ -3677,26 +3655,11 @@ var responseOutputItemDoneSchema = z18.object({
3677
3655
  codeInterpreterCallItem,
3678
3656
  imageGenerationCallItem,
3679
3657
  webSearchCallItem,
3658
+ fileSearchCallItem,
3680
3659
  z18.object({
3681
3660
  type: z18.literal("computer_call"),
3682
3661
  id: z18.string(),
3683
3662
  status: z18.literal("completed")
3684
- }),
3685
- z18.object({
3686
- type: z18.literal("file_search_call"),
3687
- id: z18.string(),
3688
- status: z18.literal("completed"),
3689
- queries: z18.array(z18.string()).nullish(),
3690
- results: z18.array(
3691
- z18.object({
3692
- attributes: z18.object({
3693
- file_id: z18.string(),
3694
- filename: z18.string(),
3695
- score: z18.number(),
3696
- text: z18.string()
3697
- })
3698
- })
3699
- ).nullish()
3700
3663
  })
3701
3664
  ])
3702
3665
  });