@mastra/server 0.0.0-toolOptionTypes-20250917085558 → 0.0.0-top-level-fix-20251211111608

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (448) hide show
  1. package/CHANGELOG.md +1684 -3
  2. package/README.md +2 -2
  3. package/dist/{chunk-PPYGWINI.cjs → chunk-2PLXW4ZX.cjs} +74 -74
  4. package/dist/{chunk-PPYGWINI.cjs.map → chunk-2PLXW4ZX.cjs.map} +1 -1
  5. package/dist/chunk-3SFLFUKY.js +116 -0
  6. package/dist/chunk-3SFLFUKY.js.map +1 -0
  7. package/dist/chunk-3XI22UQR.cjs +148 -0
  8. package/dist/chunk-3XI22UQR.cjs.map +1 -0
  9. package/dist/chunk-4W2SM6CG.js +294 -0
  10. package/dist/chunk-4W2SM6CG.js.map +1 -0
  11. package/dist/chunk-5W4RPVTK.cjs +49 -0
  12. package/dist/chunk-5W4RPVTK.cjs.map +1 -0
  13. package/dist/{chunk-7NADHFD2.cjs → chunk-64ITUOXI.cjs} +2 -2
  14. package/dist/chunk-64ITUOXI.cjs.map +1 -0
  15. package/dist/{chunk-MMROOK5J.js → chunk-6QWQZI4Q.js} +2 -2
  16. package/dist/{chunk-7NADHFD2.cjs.map → chunk-6QWQZI4Q.js.map} +1 -1
  17. package/dist/{chunk-3THIIWWW.cjs → chunk-7O3KPUJ4.cjs} +230 -34
  18. package/dist/chunk-7O3KPUJ4.cjs.map +1 -0
  19. package/dist/chunk-C3UIIRAT.cjs +920 -0
  20. package/dist/chunk-C3UIIRAT.cjs.map +1 -0
  21. package/dist/chunk-CDRVS35Y.js +23226 -0
  22. package/dist/chunk-CDRVS35Y.js.map +1 -0
  23. package/dist/chunk-DRUNNM4C.js +328 -0
  24. package/dist/chunk-DRUNNM4C.js.map +1 -0
  25. package/dist/chunk-EBWTF2DH.cjs +810 -0
  26. package/dist/chunk-EBWTF2DH.cjs.map +1 -0
  27. package/dist/chunk-ER3QM7DD.js +46 -0
  28. package/dist/chunk-ER3QM7DD.js.map +1 -0
  29. package/dist/chunk-FPCGLPLJ.cjs +297 -0
  30. package/dist/chunk-FPCGLPLJ.cjs.map +1 -0
  31. package/dist/chunk-FYY54HZC.js +310 -0
  32. package/dist/chunk-FYY54HZC.js.map +1 -0
  33. package/dist/chunk-GFF2I6UD.js +354 -0
  34. package/dist/chunk-GFF2I6UD.js.map +1 -0
  35. package/dist/chunk-GN2LFMET.cjs +23268 -0
  36. package/dist/chunk-GN2LFMET.cjs.map +1 -0
  37. package/dist/chunk-H2RMXG2Q.cjs +167 -0
  38. package/dist/chunk-H2RMXG2Q.cjs.map +1 -0
  39. package/dist/chunk-HAJOEDNB.js +274 -0
  40. package/dist/chunk-HAJOEDNB.js.map +1 -0
  41. package/dist/chunk-HT4LP3BO.js +75 -0
  42. package/dist/chunk-HT4LP3BO.js.map +1 -0
  43. package/dist/chunk-I6LR6CPC.cjs +125 -0
  44. package/dist/chunk-I6LR6CPC.cjs.map +1 -0
  45. package/dist/chunk-IEYXQTUW.cjs +284 -0
  46. package/dist/chunk-IEYXQTUW.cjs.map +1 -0
  47. package/dist/chunk-K3ELPJS3.js +1063 -0
  48. package/dist/chunk-K3ELPJS3.js.map +1 -0
  49. package/dist/{chunk-5QUKZCEF.js → chunk-KEXR53KI.js} +224 -34
  50. package/dist/chunk-KEXR53KI.js.map +1 -0
  51. package/dist/{chunk-SIGXR3JT.cjs → chunk-LPM6BBAX.cjs} +5 -5
  52. package/dist/{chunk-SIGXR3JT.cjs.map → chunk-LPM6BBAX.cjs.map} +1 -1
  53. package/dist/chunk-MCYD5LW7.cjs +90 -0
  54. package/dist/chunk-MCYD5LW7.cjs.map +1 -0
  55. package/dist/chunk-MQLS6Z7A.js +891 -0
  56. package/dist/chunk-MQLS6Z7A.js.map +1 -0
  57. package/dist/{chunk-EMMSS5I5.cjs → chunk-O7I5CWRX.cjs} +10 -3
  58. package/dist/{chunk-EMMSS5I5.cjs.map → chunk-O7I5CWRX.cjs.map} +1 -1
  59. package/dist/{chunk-G3PMV62Z.js → chunk-PR4QN5HX.js} +10 -4
  60. package/dist/{chunk-G3PMV62Z.js.map → chunk-PR4QN5HX.js.map} +1 -1
  61. package/dist/chunk-PUFCKXFW.cjs +312 -0
  62. package/dist/chunk-PUFCKXFW.cjs.map +1 -0
  63. package/dist/{chunk-NG5IVLEZ.js → chunk-RQK4FQUD.js} +3 -3
  64. package/dist/{chunk-NG5IVLEZ.js.map → chunk-RQK4FQUD.js.map} +1 -1
  65. package/dist/chunk-S3TIWWQL.cjs +322 -0
  66. package/dist/chunk-S3TIWWQL.cjs.map +1 -0
  67. package/dist/chunk-SXVANU23.js +164 -0
  68. package/dist/chunk-SXVANU23.js.map +1 -0
  69. package/dist/chunk-TGIVGCBJ.cjs +211 -0
  70. package/dist/chunk-TGIVGCBJ.cjs.map +1 -0
  71. package/dist/chunk-TYZ6ZISQ.cjs +368 -0
  72. package/dist/chunk-TYZ6ZISQ.cjs.map +1 -0
  73. package/dist/{chunk-CY4TP3FK.js → chunk-UXGQZUYZ.js} +3 -3
  74. package/dist/{chunk-CY4TP3FK.js.map → chunk-UXGQZUYZ.js.map} +1 -1
  75. package/dist/chunk-UY2D2LVC.cjs +205 -0
  76. package/dist/chunk-UY2D2LVC.cjs.map +1 -0
  77. package/dist/chunk-V272B7RM.cjs +255 -0
  78. package/dist/chunk-V272B7RM.cjs.map +1 -0
  79. package/dist/{chunk-RE4RPXT2.cjs → chunk-V5WWQN7P.cjs} +4 -4
  80. package/dist/{chunk-RE4RPXT2.cjs.map → chunk-V5WWQN7P.cjs.map} +1 -1
  81. package/dist/chunk-VIDXWHJQ.js +204 -0
  82. package/dist/chunk-VIDXWHJQ.js.map +1 -0
  83. package/dist/chunk-W36USBM5.js +196 -0
  84. package/dist/chunk-W36USBM5.js.map +1 -0
  85. package/dist/chunk-WBLT2HL3.js +144 -0
  86. package/dist/chunk-WBLT2HL3.js.map +1 -0
  87. package/dist/chunk-WE4USCF3.js +226 -0
  88. package/dist/chunk-WE4USCF3.js.map +1 -0
  89. package/dist/chunk-X43DWDXB.cjs +346 -0
  90. package/dist/chunk-X43DWDXB.cjs.map +1 -0
  91. package/dist/chunk-X4QMPCTP.cjs +259 -0
  92. package/dist/chunk-X4QMPCTP.cjs.map +1 -0
  93. package/dist/chunk-XQPJ63ZD.cjs +48 -0
  94. package/dist/chunk-XQPJ63ZD.cjs.map +1 -0
  95. package/dist/chunk-XW2HXQDO.js +302 -0
  96. package/dist/chunk-XW2HXQDO.js.map +1 -0
  97. package/dist/chunk-XWGAT2DA.js +44 -0
  98. package/dist/chunk-XWGAT2DA.js.map +1 -0
  99. package/dist/chunk-YHNJY33C.js +786 -0
  100. package/dist/chunk-YHNJY33C.js.map +1 -0
  101. package/dist/chunk-YP34EWWK.js +253 -0
  102. package/dist/chunk-YP34EWWK.js.map +1 -0
  103. package/dist/chunk-ZN5R6OZB.cjs +1107 -0
  104. package/dist/chunk-ZN5R6OZB.cjs.map +1 -0
  105. package/dist/{chunk-6GMFZ5LK.js → chunk-ZULZ2752.js} +3 -3
  106. package/dist/{chunk-6GMFZ5LK.js.map → chunk-ZULZ2752.js.map} +1 -1
  107. package/dist/{chunk-IGFMAZZ5.cjs → dist-4MVGNSRL.cjs} +20 -20
  108. package/dist/dist-4MVGNSRL.cjs.map +1 -0
  109. package/dist/{chunk-TVSIG4JE.cjs → dist-FZYCV3VB.cjs} +26 -26
  110. package/dist/dist-FZYCV3VB.cjs.map +1 -0
  111. package/dist/{chunk-5DP5XZH6.cjs → dist-G2BYZJOC.cjs} +28 -28
  112. package/dist/dist-G2BYZJOC.cjs.map +1 -0
  113. package/dist/dist-P4MXBQ3U.cjs +16 -0
  114. package/dist/{dist-3A5DXB37.cjs.map → dist-P4MXBQ3U.cjs.map} +1 -1
  115. package/dist/{chunk-FQNT7PI4.js → dist-PQZUVLPC.js} +3 -3
  116. package/dist/dist-PQZUVLPC.js.map +1 -0
  117. package/dist/{chunk-P7CIEIJ3.js → dist-R7WYX6LC.js} +3 -3
  118. package/dist/dist-R7WYX6LC.js.map +1 -0
  119. package/dist/{chunk-HJQKWRKQ.cjs → dist-RFMYFILX.cjs} +30 -30
  120. package/dist/dist-RFMYFILX.cjs.map +1 -0
  121. package/dist/{chunk-LYPU75T6.js → dist-X7XR3M3Z.js} +3 -3
  122. package/dist/dist-X7XR3M3Z.js.map +1 -0
  123. package/dist/{chunk-66YYHFGF.js → dist-XVBSOGFK.js} +3 -3
  124. package/dist/dist-XVBSOGFK.js.map +1 -0
  125. package/dist/dist-YREX2TJT.js +3 -0
  126. package/dist/{dist-26HWEQY6.js.map → dist-YREX2TJT.js.map} +1 -1
  127. package/dist/server/auth/defaults.d.ts +3 -0
  128. package/dist/server/auth/defaults.d.ts.map +1 -0
  129. package/dist/server/auth/helpers.d.ts +14 -0
  130. package/dist/server/auth/helpers.d.ts.map +1 -0
  131. package/dist/server/auth/index.cjs +137 -0
  132. package/dist/server/auth/index.cjs.map +1 -0
  133. package/dist/server/auth/index.d.ts +3 -0
  134. package/dist/server/auth/index.d.ts.map +1 -0
  135. package/dist/server/auth/index.js +127 -0
  136. package/dist/server/auth/index.js.map +1 -0
  137. package/dist/server/handlers/a2a.cjs +15 -7
  138. package/dist/server/handlers/a2a.d.ts +492 -10
  139. package/dist/server/handlers/a2a.d.ts.map +1 -1
  140. package/dist/server/handlers/a2a.js +1 -1
  141. package/dist/server/handlers/agent-builder.cjs +43 -31
  142. package/dist/server/handlers/agent-builder.d.ts +599 -77
  143. package/dist/server/handlers/agent-builder.d.ts.map +1 -1
  144. package/dist/server/handlers/agent-builder.js +1 -1
  145. package/dist/server/handlers/agents.cjs +61 -29
  146. package/dist/server/handlers/agents.d.ts +3329 -129
  147. package/dist/server/handlers/agents.d.ts.map +1 -1
  148. package/dist/server/handlers/agents.js +1 -1
  149. package/dist/server/handlers/error.cjs +2 -2
  150. package/dist/server/handlers/error.js +1 -1
  151. package/dist/server/handlers/logs.cjs +7 -7
  152. package/dist/server/handlers/logs.d.ts +135 -27
  153. package/dist/server/handlers/logs.d.ts.map +1 -1
  154. package/dist/server/handlers/logs.js +1 -1
  155. package/dist/server/handlers/mcp.cjs +40 -0
  156. package/dist/server/handlers/mcp.cjs.map +1 -0
  157. package/dist/server/handlers/mcp.d.ts +110 -0
  158. package/dist/server/handlers/mcp.d.ts.map +1 -0
  159. package/dist/server/handlers/mcp.js +3 -0
  160. package/dist/server/handlers/mcp.js.map +1 -0
  161. package/dist/server/handlers/memory.cjs +63 -31
  162. package/dist/server/handlers/memory.d.ts +934 -103
  163. package/dist/server/handlers/memory.d.ts.map +1 -1
  164. package/dist/server/handlers/memory.js +1 -1
  165. package/dist/server/handlers/observability.cjs +29 -5
  166. package/dist/server/handlers/observability.d.ts +142 -9
  167. package/dist/server/handlers/observability.d.ts.map +1 -1
  168. package/dist/server/handlers/observability.js +1 -1
  169. package/dist/server/handlers/scores.cjs +13 -13
  170. package/dist/server/handlers/scores.d.ts +112 -40
  171. package/dist/server/handlers/scores.d.ts.map +1 -1
  172. package/dist/server/handlers/scores.js +1 -1
  173. package/dist/server/handlers/stored-agents.cjs +28 -0
  174. package/dist/server/handlers/stored-agents.cjs.map +1 -0
  175. package/dist/server/handlers/stored-agents.d.ts +289 -0
  176. package/dist/server/handlers/stored-agents.d.ts.map +1 -0
  177. package/dist/server/handlers/stored-agents.js +3 -0
  178. package/dist/server/handlers/stored-agents.js.map +1 -0
  179. package/dist/server/handlers/test-utils.cjs +15 -0
  180. package/dist/server/handlers/test-utils.cjs.map +1 -0
  181. package/dist/server/handlers/test-utils.d.ts +6 -0
  182. package/dist/server/handlers/test-utils.d.ts.map +1 -0
  183. package/dist/server/handlers/test-utils.js +13 -0
  184. package/dist/server/handlers/test-utils.js.map +1 -0
  185. package/dist/server/handlers/tools.cjs +11 -11
  186. package/dist/server/handlers/tools.d.ts +69 -23
  187. package/dist/server/handlers/tools.d.ts.map +1 -1
  188. package/dist/server/handlers/tools.js +1 -1
  189. package/dist/server/handlers/utils.cjs +11 -3
  190. package/dist/server/handlers/utils.d.ts +5 -0
  191. package/dist/server/handlers/utils.d.ts.map +1 -1
  192. package/dist/server/handlers/utils.js +1 -1
  193. package/dist/server/handlers/vector.cjs +31 -7
  194. package/dist/server/handlers/vector.d.ts +93 -9
  195. package/dist/server/handlers/vector.d.ts.map +1 -1
  196. package/dist/server/handlers/vector.js +1 -1
  197. package/dist/server/handlers/voice.cjs +21 -9
  198. package/dist/server/handlers/voice.d.ts +81 -40
  199. package/dist/server/handlers/voice.d.ts.map +1 -1
  200. package/dist/server/handlers/voice.js +1 -1
  201. package/dist/server/handlers/workflows.cjs +75 -31
  202. package/dist/server/handlers/workflows.d.ts +925 -60
  203. package/dist/server/handlers/workflows.d.ts.map +1 -1
  204. package/dist/server/handlers/workflows.js +1 -1
  205. package/dist/server/handlers.cjs +26 -36
  206. package/dist/server/handlers.d.ts +1 -3
  207. package/dist/server/handlers.d.ts.map +1 -1
  208. package/dist/server/handlers.js +12 -14
  209. package/dist/server/http-exception.d.ts +0 -5
  210. package/dist/server/http-exception.d.ts.map +1 -1
  211. package/dist/server/schemas/a2a.d.ts +786 -0
  212. package/dist/server/schemas/a2a.d.ts.map +1 -0
  213. package/dist/server/schemas/agent-builder.d.ts +204 -0
  214. package/dist/server/schemas/agent-builder.d.ts.map +1 -0
  215. package/dist/server/schemas/agents.d.ts +1375 -0
  216. package/dist/server/schemas/agents.d.ts.map +1 -0
  217. package/dist/server/schemas/common.d.ts +179 -0
  218. package/dist/server/schemas/common.d.ts.map +1 -0
  219. package/dist/server/schemas/logs.d.ts +124 -0
  220. package/dist/server/schemas/logs.d.ts.map +1 -0
  221. package/dist/server/schemas/mcp.d.ts +299 -0
  222. package/dist/server/schemas/mcp.d.ts.map +1 -0
  223. package/dist/server/schemas/memory.d.ts +998 -0
  224. package/dist/server/schemas/memory.d.ts.map +1 -0
  225. package/dist/server/schemas/observability.d.ts +402 -0
  226. package/dist/server/schemas/observability.d.ts.map +1 -0
  227. package/dist/server/schemas/scores.d.ts +259 -0
  228. package/dist/server/schemas/scores.d.ts.map +1 -0
  229. package/dist/server/schemas/stored-agents.d.ts +792 -0
  230. package/dist/server/schemas/stored-agents.d.ts.map +1 -0
  231. package/dist/server/schemas/vectors.d.ts +107 -0
  232. package/dist/server/schemas/vectors.d.ts.map +1 -0
  233. package/dist/server/schemas/workflows.d.ts +608 -0
  234. package/dist/server/schemas/workflows.d.ts.map +1 -0
  235. package/dist/server/server-adapter/index.cjs +481 -0
  236. package/dist/server/server-adapter/index.cjs.map +1 -0
  237. package/dist/server/server-adapter/index.d.ts +91 -0
  238. package/dist/server/server-adapter/index.d.ts.map +1 -0
  239. package/dist/server/server-adapter/index.js +466 -0
  240. package/dist/server/server-adapter/index.js.map +1 -0
  241. package/dist/server/server-adapter/openapi-utils.d.ts +59 -0
  242. package/dist/server/server-adapter/openapi-utils.d.ts.map +1 -0
  243. package/dist/server/server-adapter/redact.d.ts +26 -0
  244. package/dist/server/server-adapter/redact.d.ts.map +1 -0
  245. package/dist/server/server-adapter/routes/a2a.d.ts +3 -0
  246. package/dist/server/server-adapter/routes/a2a.d.ts.map +1 -0
  247. package/dist/server/server-adapter/routes/agent-builder.d.ts +3 -0
  248. package/dist/server/server-adapter/routes/agent-builder.d.ts.map +1 -0
  249. package/dist/server/server-adapter/routes/agents.d.ts +3 -0
  250. package/dist/server/server-adapter/routes/agents.d.ts.map +1 -0
  251. package/dist/server/server-adapter/routes/index.d.ts +50 -0
  252. package/dist/server/server-adapter/routes/index.d.ts.map +1 -0
  253. package/dist/server/server-adapter/routes/legacy.d.ts +7 -0
  254. package/dist/server/server-adapter/routes/legacy.d.ts.map +1 -0
  255. package/dist/server/server-adapter/routes/logs.d.ts +3 -0
  256. package/dist/server/server-adapter/routes/logs.d.ts.map +1 -0
  257. package/dist/server/server-adapter/routes/mcp.d.ts +9 -0
  258. package/dist/server/server-adapter/routes/mcp.d.ts.map +1 -0
  259. package/dist/server/server-adapter/routes/memory.d.ts +3 -0
  260. package/dist/server/server-adapter/routes/memory.d.ts.map +1 -0
  261. package/dist/server/server-adapter/routes/observability.d.ts +3 -0
  262. package/dist/server/server-adapter/routes/observability.d.ts.map +1 -0
  263. package/dist/server/server-adapter/routes/route-builder.d.ts +52 -0
  264. package/dist/server/server-adapter/routes/route-builder.d.ts.map +1 -0
  265. package/dist/server/server-adapter/routes/scorers.d.ts +3 -0
  266. package/dist/server/server-adapter/routes/scorers.d.ts.map +1 -0
  267. package/dist/server/server-adapter/routes/stored-agents.d.ts +8 -0
  268. package/dist/server/server-adapter/routes/stored-agents.d.ts.map +1 -0
  269. package/dist/server/server-adapter/routes/stream-types.d.ts +10 -0
  270. package/dist/server/server-adapter/routes/stream-types.d.ts.map +1 -0
  271. package/dist/server/server-adapter/routes/tools.d.ts +3 -0
  272. package/dist/server/server-adapter/routes/tools.d.ts.map +1 -0
  273. package/dist/server/server-adapter/routes/vectors.d.ts +3 -0
  274. package/dist/server/server-adapter/routes/vectors.d.ts.map +1 -0
  275. package/dist/server/server-adapter/routes/workflows.d.ts +3 -0
  276. package/dist/server/server-adapter/routes/workflows.d.ts.map +1 -0
  277. package/dist/server/utils.d.ts +5 -2
  278. package/dist/server/utils.d.ts.map +1 -1
  279. package/dist/token-GVZ7HRD7.js +62 -0
  280. package/dist/token-GVZ7HRD7.js.map +1 -0
  281. package/dist/token-JGA3ZWAN.js +61 -0
  282. package/dist/token-JGA3ZWAN.js.map +1 -0
  283. package/dist/token-VFONFWVS.cjs +64 -0
  284. package/dist/token-VFONFWVS.cjs.map +1 -0
  285. package/dist/token-ZOD6YIQ3.cjs +63 -0
  286. package/dist/token-ZOD6YIQ3.cjs.map +1 -0
  287. package/dist/token-util-7R2ZFIXO.js +7 -0
  288. package/dist/token-util-7R2ZFIXO.js.map +1 -0
  289. package/dist/token-util-BLJZJDBZ.cjs +9 -0
  290. package/dist/token-util-BLJZJDBZ.cjs.map +1 -0
  291. package/dist/token-util-VGZUWSNR.cjs +9 -0
  292. package/dist/token-util-VGZUWSNR.cjs.map +1 -0
  293. package/dist/token-util-VKTPZLSE.js +7 -0
  294. package/dist/token-util-VKTPZLSE.js.map +1 -0
  295. package/package.json +39 -19
  296. package/dist/chunk-3THIIWWW.cjs.map +0 -1
  297. package/dist/chunk-4QCXUEAT.js +0 -25
  298. package/dist/chunk-4QCXUEAT.js.map +0 -1
  299. package/dist/chunk-4RRMWXQ2.js +0 -3522
  300. package/dist/chunk-4RRMWXQ2.js.map +0 -1
  301. package/dist/chunk-5DP5XZH6.cjs.map +0 -1
  302. package/dist/chunk-5QUKZCEF.js.map +0 -1
  303. package/dist/chunk-5VTTUNVK.cjs +0 -540
  304. package/dist/chunk-5VTTUNVK.cjs.map +0 -1
  305. package/dist/chunk-66YYHFGF.js.map +0 -1
  306. package/dist/chunk-6LUKYSWE.cjs +0 -610
  307. package/dist/chunk-6LUKYSWE.cjs.map +0 -1
  308. package/dist/chunk-743UIDHI.cjs +0 -2013
  309. package/dist/chunk-743UIDHI.cjs.map +0 -1
  310. package/dist/chunk-7QEJ5QG5.js +0 -151
  311. package/dist/chunk-7QEJ5QG5.js.map +0 -1
  312. package/dist/chunk-A3AL7EWJ.js +0 -83
  313. package/dist/chunk-A3AL7EWJ.js.map +0 -1
  314. package/dist/chunk-AK2FXLLB.cjs +0 -849
  315. package/dist/chunk-AK2FXLLB.cjs.map +0 -1
  316. package/dist/chunk-B2V3PUB7.js +0 -591
  317. package/dist/chunk-B2V3PUB7.js.map +0 -1
  318. package/dist/chunk-EMNGA4R4.js +0 -845
  319. package/dist/chunk-EMNGA4R4.js.map +0 -1
  320. package/dist/chunk-FALVL2VV.cjs +0 -3525
  321. package/dist/chunk-FALVL2VV.cjs.map +0 -1
  322. package/dist/chunk-FQNT7PI4.js.map +0 -1
  323. package/dist/chunk-G4PUALCE.cjs +0 -28
  324. package/dist/chunk-G4PUALCE.cjs.map +0 -1
  325. package/dist/chunk-GUI3CROV.cjs +0 -159
  326. package/dist/chunk-GUI3CROV.cjs.map +0 -1
  327. package/dist/chunk-HJQKWRKQ.cjs.map +0 -1
  328. package/dist/chunk-HVBBFCDH.cjs +0 -2321
  329. package/dist/chunk-HVBBFCDH.cjs.map +0 -1
  330. package/dist/chunk-HZJRQ5L3.cjs +0 -1411
  331. package/dist/chunk-HZJRQ5L3.cjs.map +0 -1
  332. package/dist/chunk-IGFMAZZ5.cjs.map +0 -1
  333. package/dist/chunk-IOQGI4ML.js +0 -931
  334. package/dist/chunk-IOQGI4ML.js.map +0 -1
  335. package/dist/chunk-IQ4NTMIJ.cjs +0 -15753
  336. package/dist/chunk-IQ4NTMIJ.cjs.map +0 -1
  337. package/dist/chunk-IY34NOLA.cjs +0 -150
  338. package/dist/chunk-IY34NOLA.cjs.map +0 -1
  339. package/dist/chunk-J7BPKKOG.cjs +0 -163
  340. package/dist/chunk-J7BPKKOG.cjs.map +0 -1
  341. package/dist/chunk-JRDEOHAJ.js +0 -122
  342. package/dist/chunk-JRDEOHAJ.js.map +0 -1
  343. package/dist/chunk-KNGXRN26.cjs +0 -335
  344. package/dist/chunk-KNGXRN26.cjs.map +0 -1
  345. package/dist/chunk-KV6VHX4V.js +0 -160
  346. package/dist/chunk-KV6VHX4V.js.map +0 -1
  347. package/dist/chunk-L265APUD.cjs +0 -69
  348. package/dist/chunk-L265APUD.cjs.map +0 -1
  349. package/dist/chunk-LYPU75T6.js.map +0 -1
  350. package/dist/chunk-MMROOK5J.js.map +0 -1
  351. package/dist/chunk-N7F33WAD.js +0 -2290
  352. package/dist/chunk-N7F33WAD.js.map +0 -1
  353. package/dist/chunk-NLWACBE7.cjs +0 -128
  354. package/dist/chunk-NLWACBE7.cjs.map +0 -1
  355. package/dist/chunk-OGW6HHVI.js +0 -1408
  356. package/dist/chunk-OGW6HHVI.js.map +0 -1
  357. package/dist/chunk-OZLRIVC4.cjs +0 -588
  358. package/dist/chunk-OZLRIVC4.cjs.map +0 -1
  359. package/dist/chunk-P7CIEIJ3.js.map +0 -1
  360. package/dist/chunk-P7RBMCBE.cjs +0 -934
  361. package/dist/chunk-P7RBMCBE.cjs.map +0 -1
  362. package/dist/chunk-PWTXZZTR.cjs +0 -165
  363. package/dist/chunk-PWTXZZTR.cjs.map +0 -1
  364. package/dist/chunk-R7NOGUZG.js +0 -65
  365. package/dist/chunk-R7NOGUZG.js.map +0 -1
  366. package/dist/chunk-RCHEPTZZ.js +0 -2006
  367. package/dist/chunk-RCHEPTZZ.js.map +0 -1
  368. package/dist/chunk-SPLSYTYW.cjs +0 -88
  369. package/dist/chunk-SPLSYTYW.cjs.map +0 -1
  370. package/dist/chunk-SQY4T6EJ.js +0 -571
  371. package/dist/chunk-SQY4T6EJ.js.map +0 -1
  372. package/dist/chunk-SYRRSBGL.js +0 -156
  373. package/dist/chunk-SYRRSBGL.js.map +0 -1
  374. package/dist/chunk-T3TIA3O6.cjs +0 -131
  375. package/dist/chunk-T3TIA3O6.cjs.map +0 -1
  376. package/dist/chunk-TTHEEIZ3.js +0 -323
  377. package/dist/chunk-TTHEEIZ3.js.map +0 -1
  378. package/dist/chunk-TVSIG4JE.cjs.map +0 -1
  379. package/dist/chunk-U46VIX2V.js +0 -144
  380. package/dist/chunk-U46VIX2V.js.map +0 -1
  381. package/dist/chunk-WHN4VX55.js +0 -123
  382. package/dist/chunk-WHN4VX55.js.map +0 -1
  383. package/dist/chunk-YRMJCAH4.js +0 -15717
  384. package/dist/chunk-YRMJCAH4.js.map +0 -1
  385. package/dist/chunk-ZSAOHEZK.js +0 -524
  386. package/dist/chunk-ZSAOHEZK.js.map +0 -1
  387. package/dist/dist-26HWEQY6.js +0 -3
  388. package/dist/dist-3A5DXB37.cjs +0 -20
  389. package/dist/dist-3SJKQJGY.cjs +0 -16
  390. package/dist/dist-3SJKQJGY.cjs.map +0 -1
  391. package/dist/dist-4ZQSPE5K.js +0 -3
  392. package/dist/dist-4ZQSPE5K.js.map +0 -1
  393. package/dist/dist-5W5QNRTD.js +0 -3
  394. package/dist/dist-5W5QNRTD.js.map +0 -1
  395. package/dist/dist-653SRMPL.js +0 -3
  396. package/dist/dist-653SRMPL.js.map +0 -1
  397. package/dist/dist-6U6EFC5C.cjs +0 -16
  398. package/dist/dist-6U6EFC5C.cjs.map +0 -1
  399. package/dist/dist-7IHNNYMF.cjs +0 -16
  400. package/dist/dist-7IHNNYMF.cjs.map +0 -1
  401. package/dist/dist-B5IPRF6W.js +0 -3
  402. package/dist/dist-B5IPRF6W.js.map +0 -1
  403. package/dist/dist-EOMYFT4Y.cjs +0 -16
  404. package/dist/dist-EOMYFT4Y.cjs.map +0 -1
  405. package/dist/dist-EZZMMMNT.cjs +0 -16
  406. package/dist/dist-EZZMMMNT.cjs.map +0 -1
  407. package/dist/dist-F2ET4MNO.cjs +0 -16
  408. package/dist/dist-F2ET4MNO.cjs.map +0 -1
  409. package/dist/dist-H64VX6DE.js +0 -3
  410. package/dist/dist-H64VX6DE.js.map +0 -1
  411. package/dist/dist-HY7RMLJQ.cjs +0 -16
  412. package/dist/dist-HY7RMLJQ.cjs.map +0 -1
  413. package/dist/dist-M6S4P3FJ.js +0 -3
  414. package/dist/dist-M6S4P3FJ.js.map +0 -1
  415. package/dist/dist-NR7QSCQT.js +0 -3
  416. package/dist/dist-NR7QSCQT.js.map +0 -1
  417. package/dist/dist-QLFMCMCX.js +0 -3
  418. package/dist/dist-QLFMCMCX.js.map +0 -1
  419. package/dist/dist-UY46BFRP.js +0 -3
  420. package/dist/dist-UY46BFRP.js.map +0 -1
  421. package/dist/dist-WCQDRTIV.cjs +0 -16
  422. package/dist/dist-WCQDRTIV.cjs.map +0 -1
  423. package/dist/dist-WKYB3LTJ.cjs +0 -16
  424. package/dist/dist-WKYB3LTJ.cjs.map +0 -1
  425. package/dist/server/handlers/legacyWorkflows.cjs +0 -48
  426. package/dist/server/handlers/legacyWorkflows.cjs.map +0 -1
  427. package/dist/server/handlers/legacyWorkflows.d.ts +0 -59
  428. package/dist/server/handlers/legacyWorkflows.d.ts.map +0 -1
  429. package/dist/server/handlers/legacyWorkflows.js +0 -3
  430. package/dist/server/handlers/legacyWorkflows.js.map +0 -1
  431. package/dist/server/handlers/network.cjs +0 -24
  432. package/dist/server/handlers/network.cjs.map +0 -1
  433. package/dist/server/handlers/network.d.ts +0 -50
  434. package/dist/server/handlers/network.d.ts.map +0 -1
  435. package/dist/server/handlers/network.js +0 -3
  436. package/dist/server/handlers/network.js.map +0 -1
  437. package/dist/server/handlers/telemetry.cjs +0 -20
  438. package/dist/server/handlers/telemetry.cjs.map +0 -1
  439. package/dist/server/handlers/telemetry.d.ts +0 -33
  440. package/dist/server/handlers/telemetry.d.ts.map +0 -1
  441. package/dist/server/handlers/telemetry.js +0 -3
  442. package/dist/server/handlers/telemetry.js.map +0 -1
  443. package/dist/server/handlers/vNextNetwork.cjs +0 -220
  444. package/dist/server/handlers/vNextNetwork.cjs.map +0 -1
  445. package/dist/server/handlers/vNextNetwork.d.ts +0 -179
  446. package/dist/server/handlers/vNextNetwork.d.ts.map +0 -1
  447. package/dist/server/handlers/vNextNetwork.js +0 -213
  448. package/dist/server/handlers/vNextNetwork.js.map +0 -1
@@ -1,3525 +0,0 @@
1
- 'use strict';
2
-
3
- var chunkHVBBFCDH_cjs = require('./chunk-HVBBFCDH.cjs');
4
- var v4 = require('zod/v4');
5
-
6
- var openaiErrorDataSchema = v4.z.object({
7
- error: v4.z.object({
8
- message: v4.z.string(),
9
- // The additional information below is handled loosely to support
10
- // OpenAI-compatible providers that have slightly different error
11
- // responses:
12
- type: v4.z.string().nullish(),
13
- param: v4.z.any().nullish(),
14
- code: v4.z.union([v4.z.string(), v4.z.number()]).nullish()
15
- })
16
- });
17
- var openaiFailedResponseHandler = chunkHVBBFCDH_cjs.createJsonErrorResponseHandler({
18
- errorSchema: openaiErrorDataSchema,
19
- errorToMessage: (data) => data.error.message
20
- });
21
- function convertToOpenAIChatMessages({
22
- prompt,
23
- systemMessageMode = "system"
24
- }) {
25
- const messages = [];
26
- const warnings = [];
27
- for (const { role, content } of prompt) {
28
- switch (role) {
29
- case "system": {
30
- switch (systemMessageMode) {
31
- case "system": {
32
- messages.push({ role: "system", content });
33
- break;
34
- }
35
- case "developer": {
36
- messages.push({ role: "developer", content });
37
- break;
38
- }
39
- case "remove": {
40
- warnings.push({
41
- type: "other",
42
- message: "system messages are removed for this model"
43
- });
44
- break;
45
- }
46
- default: {
47
- const _exhaustiveCheck = systemMessageMode;
48
- throw new Error(
49
- `Unsupported system message mode: ${_exhaustiveCheck}`
50
- );
51
- }
52
- }
53
- break;
54
- }
55
- case "user": {
56
- if (content.length === 1 && content[0].type === "text") {
57
- messages.push({ role: "user", content: content[0].text });
58
- break;
59
- }
60
- messages.push({
61
- role: "user",
62
- content: content.map((part, index) => {
63
- var _a, _b, _c;
64
- switch (part.type) {
65
- case "text": {
66
- return { type: "text", text: part.text };
67
- }
68
- case "file": {
69
- if (part.mediaType.startsWith("image/")) {
70
- const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
71
- return {
72
- type: "image_url",
73
- image_url: {
74
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${chunkHVBBFCDH_cjs.convertToBase64(part.data)}`,
75
- // OpenAI specific extension: image detail
76
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
77
- }
78
- };
79
- } else if (part.mediaType.startsWith("audio/")) {
80
- if (part.data instanceof URL) {
81
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
82
- functionality: "audio file parts with URLs"
83
- });
84
- }
85
- switch (part.mediaType) {
86
- case "audio/wav": {
87
- return {
88
- type: "input_audio",
89
- input_audio: {
90
- data: chunkHVBBFCDH_cjs.convertToBase64(part.data),
91
- format: "wav"
92
- }
93
- };
94
- }
95
- case "audio/mp3":
96
- case "audio/mpeg": {
97
- return {
98
- type: "input_audio",
99
- input_audio: {
100
- data: chunkHVBBFCDH_cjs.convertToBase64(part.data),
101
- format: "mp3"
102
- }
103
- };
104
- }
105
- default: {
106
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
107
- functionality: `audio content parts with media type ${part.mediaType}`
108
- });
109
- }
110
- }
111
- } else if (part.mediaType === "application/pdf") {
112
- if (part.data instanceof URL) {
113
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
114
- functionality: "PDF file parts with URLs"
115
- });
116
- }
117
- return {
118
- type: "file",
119
- file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
120
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
121
- file_data: `data:application/pdf;base64,${chunkHVBBFCDH_cjs.convertToBase64(part.data)}`
122
- }
123
- };
124
- } else {
125
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
126
- functionality: `file part media type ${part.mediaType}`
127
- });
128
- }
129
- }
130
- }
131
- })
132
- });
133
- break;
134
- }
135
- case "assistant": {
136
- let text = "";
137
- const toolCalls = [];
138
- for (const part of content) {
139
- switch (part.type) {
140
- case "text": {
141
- text += part.text;
142
- break;
143
- }
144
- case "tool-call": {
145
- toolCalls.push({
146
- id: part.toolCallId,
147
- type: "function",
148
- function: {
149
- name: part.toolName,
150
- arguments: JSON.stringify(part.input)
151
- }
152
- });
153
- break;
154
- }
155
- }
156
- }
157
- messages.push({
158
- role: "assistant",
159
- content: text,
160
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
161
- });
162
- break;
163
- }
164
- case "tool": {
165
- for (const toolResponse of content) {
166
- const output = toolResponse.output;
167
- let contentValue;
168
- switch (output.type) {
169
- case "text":
170
- case "error-text":
171
- contentValue = output.value;
172
- break;
173
- case "content":
174
- case "json":
175
- case "error-json":
176
- contentValue = JSON.stringify(output.value);
177
- break;
178
- }
179
- messages.push({
180
- role: "tool",
181
- tool_call_id: toolResponse.toolCallId,
182
- content: contentValue
183
- });
184
- }
185
- break;
186
- }
187
- default: {
188
- const _exhaustiveCheck = role;
189
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
190
- }
191
- }
192
- }
193
- return { messages, warnings };
194
- }
195
- function getResponseMetadata({
196
- id,
197
- model,
198
- created
199
- }) {
200
- return {
201
- id: id != null ? id : void 0,
202
- modelId: model != null ? model : void 0,
203
- timestamp: created != null ? new Date(created * 1e3) : void 0
204
- };
205
- }
206
- function mapOpenAIFinishReason(finishReason) {
207
- switch (finishReason) {
208
- case "stop":
209
- return "stop";
210
- case "length":
211
- return "length";
212
- case "content_filter":
213
- return "content-filter";
214
- case "function_call":
215
- case "tool_calls":
216
- return "tool-calls";
217
- default:
218
- return "unknown";
219
- }
220
- }
221
- var openaiProviderOptions = v4.z.object({
222
- /**
223
- * Modify the likelihood of specified tokens appearing in the completion.
224
- *
225
- * Accepts a JSON object that maps tokens (specified by their token ID in
226
- * the GPT tokenizer) to an associated bias value from -100 to 100.
227
- */
228
- logitBias: v4.z.record(v4.z.coerce.number(), v4.z.number()).optional(),
229
- /**
230
- * Return the log probabilities of the tokens.
231
- *
232
- * Setting to true will return the log probabilities of the tokens that
233
- * were generated.
234
- *
235
- * Setting to a number will return the log probabilities of the top n
236
- * tokens that were generated.
237
- */
238
- logprobs: v4.z.union([v4.z.boolean(), v4.z.number()]).optional(),
239
- /**
240
- * Whether to enable parallel function calling during tool use. Default to true.
241
- */
242
- parallelToolCalls: v4.z.boolean().optional(),
243
- /**
244
- * A unique identifier representing your end-user, which can help OpenAI to
245
- * monitor and detect abuse.
246
- */
247
- user: v4.z.string().optional(),
248
- /**
249
- * Reasoning effort for reasoning models. Defaults to `medium`.
250
- */
251
- reasoningEffort: v4.z.enum(["minimal", "low", "medium", "high"]).optional(),
252
- /**
253
- * Maximum number of completion tokens to generate. Useful for reasoning models.
254
- */
255
- maxCompletionTokens: v4.z.number().optional(),
256
- /**
257
- * Whether to enable persistence in responses API.
258
- */
259
- store: v4.z.boolean().optional(),
260
- /**
261
- * Metadata to associate with the request.
262
- */
263
- metadata: v4.z.record(v4.z.string().max(64), v4.z.string().max(512)).optional(),
264
- /**
265
- * Parameters for prediction mode.
266
- */
267
- prediction: v4.z.record(v4.z.string(), v4.z.any()).optional(),
268
- /**
269
- * Whether to use structured outputs.
270
- *
271
- * @default true
272
- */
273
- structuredOutputs: v4.z.boolean().optional(),
274
- /**
275
- * Service tier for the request.
276
- * - 'auto': Default service tier
277
- * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
278
- * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
279
- *
280
- * @default 'auto'
281
- */
282
- serviceTier: v4.z.enum(["auto", "flex", "priority"]).optional(),
283
- /**
284
- * Whether to use strict JSON schema validation.
285
- *
286
- * @default false
287
- */
288
- strictJsonSchema: v4.z.boolean().optional(),
289
- /**
290
- * Controls the verbosity of the model's responses.
291
- * Lower values will result in more concise responses, while higher values will result in more verbose responses.
292
- */
293
- textVerbosity: v4.z.enum(["low", "medium", "high"]).optional(),
294
- /**
295
- * A cache key for prompt caching. Allows manual control over prompt caching behavior.
296
- * Useful for improving cache hit rates and working around automatic caching issues.
297
- */
298
- promptCacheKey: v4.z.string().optional(),
299
- /**
300
- * A stable identifier used to help detect users of your application
301
- * that may be violating OpenAI's usage policies. The IDs should be a
302
- * string that uniquely identifies each user. We recommend hashing their
303
- * username or email address, in order to avoid sending us any identifying
304
- * information.
305
- */
306
- safetyIdentifier: v4.z.string().optional()
307
- });
308
- var comparisonFilterSchema = v4.z.object({
309
- key: v4.z.string(),
310
- type: v4.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
311
- value: v4.z.union([v4.z.string(), v4.z.number(), v4.z.boolean()])
312
- });
313
- var compoundFilterSchema = v4.z.object({
314
- type: v4.z.enum(["and", "or"]),
315
- filters: v4.z.array(
316
- v4.z.union([comparisonFilterSchema, v4.z.lazy(() => compoundFilterSchema)])
317
- )
318
- });
319
- var filtersSchema = v4.z.union([comparisonFilterSchema, compoundFilterSchema]);
320
- var fileSearchArgsSchema = v4.z.object({
321
- /**
322
- * List of vector store IDs to search through. If not provided, searches all available vector stores.
323
- */
324
- vectorStoreIds: v4.z.array(v4.z.string()).optional(),
325
- /**
326
- * Maximum number of search results to return. Defaults to 10.
327
- */
328
- maxNumResults: v4.z.number().optional(),
329
- /**
330
- * Ranking options for the search.
331
- */
332
- ranking: v4.z.object({
333
- ranker: v4.z.enum(["auto", "default-2024-08-21"]).optional()
334
- }).optional(),
335
- /**
336
- * A filter to apply based on file attributes.
337
- */
338
- filters: filtersSchema.optional()
339
- });
340
- var fileSearch = chunkHVBBFCDH_cjs.createProviderDefinedToolFactory({
341
- id: "openai.file_search",
342
- name: "file_search",
343
- inputSchema: v4.z.object({
344
- query: v4.z.string()
345
- })
346
- });
347
- var webSearchPreviewArgsSchema = v4.z.object({
348
- /**
349
- * Search context size to use for the web search.
350
- * - high: Most comprehensive context, highest cost, slower response
351
- * - medium: Balanced context, cost, and latency (default)
352
- * - low: Least context, lowest cost, fastest response
353
- */
354
- searchContextSize: v4.z.enum(["low", "medium", "high"]).optional(),
355
- /**
356
- * User location information to provide geographically relevant search results.
357
- */
358
- userLocation: v4.z.object({
359
- /**
360
- * Type of location (always 'approximate')
361
- */
362
- type: v4.z.literal("approximate"),
363
- /**
364
- * Two-letter ISO country code (e.g., 'US', 'GB')
365
- */
366
- country: v4.z.string().optional(),
367
- /**
368
- * City name (free text, e.g., 'Minneapolis')
369
- */
370
- city: v4.z.string().optional(),
371
- /**
372
- * Region name (free text, e.g., 'Minnesota')
373
- */
374
- region: v4.z.string().optional(),
375
- /**
376
- * IANA timezone (e.g., 'America/Chicago')
377
- */
378
- timezone: v4.z.string().optional()
379
- }).optional()
380
- });
381
- var webSearchPreview = chunkHVBBFCDH_cjs.createProviderDefinedToolFactory({
382
- id: "openai.web_search_preview",
383
- name: "web_search_preview",
384
- inputSchema: v4.z.object({})
385
- });
386
- function prepareChatTools({
387
- tools,
388
- toolChoice,
389
- structuredOutputs,
390
- strictJsonSchema
391
- }) {
392
- tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
393
- const toolWarnings = [];
394
- if (tools == null) {
395
- return { tools: void 0, toolChoice: void 0, toolWarnings };
396
- }
397
- const openaiTools2 = [];
398
- for (const tool of tools) {
399
- switch (tool.type) {
400
- case "function":
401
- openaiTools2.push({
402
- type: "function",
403
- function: {
404
- name: tool.name,
405
- description: tool.description,
406
- parameters: tool.inputSchema,
407
- strict: structuredOutputs ? strictJsonSchema : void 0
408
- }
409
- });
410
- break;
411
- case "provider-defined":
412
- switch (tool.id) {
413
- case "openai.file_search": {
414
- const args = fileSearchArgsSchema.parse(tool.args);
415
- openaiTools2.push({
416
- type: "file_search",
417
- vector_store_ids: args.vectorStoreIds,
418
- max_num_results: args.maxNumResults,
419
- ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
420
- filters: args.filters
421
- });
422
- break;
423
- }
424
- case "openai.web_search_preview": {
425
- const args = webSearchPreviewArgsSchema.parse(tool.args);
426
- openaiTools2.push({
427
- type: "web_search_preview",
428
- search_context_size: args.searchContextSize,
429
- user_location: args.userLocation
430
- });
431
- break;
432
- }
433
- default:
434
- toolWarnings.push({ type: "unsupported-tool", tool });
435
- break;
436
- }
437
- break;
438
- default:
439
- toolWarnings.push({ type: "unsupported-tool", tool });
440
- break;
441
- }
442
- }
443
- if (toolChoice == null) {
444
- return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
445
- }
446
- const type = toolChoice.type;
447
- switch (type) {
448
- case "auto":
449
- case "none":
450
- case "required":
451
- return { tools: openaiTools2, toolChoice: type, toolWarnings };
452
- case "tool":
453
- return {
454
- tools: openaiTools2,
455
- toolChoice: {
456
- type: "function",
457
- function: {
458
- name: toolChoice.toolName
459
- }
460
- },
461
- toolWarnings
462
- };
463
- default: {
464
- const _exhaustiveCheck = type;
465
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
466
- functionality: `tool choice type: ${_exhaustiveCheck}`
467
- });
468
- }
469
- }
470
- }
471
- var OpenAIChatLanguageModel = class {
472
- constructor(modelId, config) {
473
- this.specificationVersion = "v2";
474
- this.supportedUrls = {
475
- "image/*": [/^https?:\/\/.*$/]
476
- };
477
- this.modelId = modelId;
478
- this.config = config;
479
- }
480
- get provider() {
481
- return this.config.provider;
482
- }
483
- async getArgs({
484
- prompt,
485
- maxOutputTokens,
486
- temperature,
487
- topP,
488
- topK,
489
- frequencyPenalty,
490
- presencePenalty,
491
- stopSequences,
492
- responseFormat,
493
- seed,
494
- tools,
495
- toolChoice,
496
- providerOptions
497
- }) {
498
- var _a, _b, _c, _d;
499
- const warnings = [];
500
- const openaiOptions = (_a = await chunkHVBBFCDH_cjs.parseProviderOptions({
501
- provider: "openai",
502
- providerOptions,
503
- schema: openaiProviderOptions
504
- })) != null ? _a : {};
505
- const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
506
- if (topK != null) {
507
- warnings.push({
508
- type: "unsupported-setting",
509
- setting: "topK"
510
- });
511
- }
512
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
513
- warnings.push({
514
- type: "unsupported-setting",
515
- setting: "responseFormat",
516
- details: "JSON response format schema is only supported with structuredOutputs"
517
- });
518
- }
519
- const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
520
- {
521
- prompt,
522
- systemMessageMode: getSystemMessageMode(this.modelId)
523
- }
524
- );
525
- warnings.push(...messageWarnings);
526
- const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
527
- const baseArgs = {
528
- // model id:
529
- model: this.modelId,
530
- // model specific settings:
531
- logit_bias: openaiOptions.logitBias,
532
- logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
533
- top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
534
- user: openaiOptions.user,
535
- parallel_tool_calls: openaiOptions.parallelToolCalls,
536
- // standardized settings:
537
- max_tokens: maxOutputTokens,
538
- temperature,
539
- top_p: topP,
540
- frequency_penalty: frequencyPenalty,
541
- presence_penalty: presencePenalty,
542
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
543
- type: "json_schema",
544
- json_schema: {
545
- schema: responseFormat.schema,
546
- strict: strictJsonSchema,
547
- name: (_d = responseFormat.name) != null ? _d : "response",
548
- description: responseFormat.description
549
- }
550
- } : { type: "json_object" } : void 0,
551
- stop: stopSequences,
552
- seed,
553
- verbosity: openaiOptions.textVerbosity,
554
- // openai specific settings:
555
- // TODO AI SDK 6: remove, we auto-map maxOutputTokens now
556
- max_completion_tokens: openaiOptions.maxCompletionTokens,
557
- store: openaiOptions.store,
558
- metadata: openaiOptions.metadata,
559
- prediction: openaiOptions.prediction,
560
- reasoning_effort: openaiOptions.reasoningEffort,
561
- service_tier: openaiOptions.serviceTier,
562
- prompt_cache_key: openaiOptions.promptCacheKey,
563
- safety_identifier: openaiOptions.safetyIdentifier,
564
- // messages:
565
- messages
566
- };
567
- if (isReasoningModel(this.modelId)) {
568
- if (baseArgs.temperature != null) {
569
- baseArgs.temperature = void 0;
570
- warnings.push({
571
- type: "unsupported-setting",
572
- setting: "temperature",
573
- details: "temperature is not supported for reasoning models"
574
- });
575
- }
576
- if (baseArgs.top_p != null) {
577
- baseArgs.top_p = void 0;
578
- warnings.push({
579
- type: "unsupported-setting",
580
- setting: "topP",
581
- details: "topP is not supported for reasoning models"
582
- });
583
- }
584
- if (baseArgs.frequency_penalty != null) {
585
- baseArgs.frequency_penalty = void 0;
586
- warnings.push({
587
- type: "unsupported-setting",
588
- setting: "frequencyPenalty",
589
- details: "frequencyPenalty is not supported for reasoning models"
590
- });
591
- }
592
- if (baseArgs.presence_penalty != null) {
593
- baseArgs.presence_penalty = void 0;
594
- warnings.push({
595
- type: "unsupported-setting",
596
- setting: "presencePenalty",
597
- details: "presencePenalty is not supported for reasoning models"
598
- });
599
- }
600
- if (baseArgs.logit_bias != null) {
601
- baseArgs.logit_bias = void 0;
602
- warnings.push({
603
- type: "other",
604
- message: "logitBias is not supported for reasoning models"
605
- });
606
- }
607
- if (baseArgs.logprobs != null) {
608
- baseArgs.logprobs = void 0;
609
- warnings.push({
610
- type: "other",
611
- message: "logprobs is not supported for reasoning models"
612
- });
613
- }
614
- if (baseArgs.top_logprobs != null) {
615
- baseArgs.top_logprobs = void 0;
616
- warnings.push({
617
- type: "other",
618
- message: "topLogprobs is not supported for reasoning models"
619
- });
620
- }
621
- if (baseArgs.max_tokens != null) {
622
- if (baseArgs.max_completion_tokens == null) {
623
- baseArgs.max_completion_tokens = baseArgs.max_tokens;
624
- }
625
- baseArgs.max_tokens = void 0;
626
- }
627
- } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
628
- if (baseArgs.temperature != null) {
629
- baseArgs.temperature = void 0;
630
- warnings.push({
631
- type: "unsupported-setting",
632
- setting: "temperature",
633
- details: "temperature is not supported for the search preview models and has been removed."
634
- });
635
- }
636
- }
637
- if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
638
- warnings.push({
639
- type: "unsupported-setting",
640
- setting: "serviceTier",
641
- details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
642
- });
643
- baseArgs.service_tier = void 0;
644
- }
645
- if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
646
- warnings.push({
647
- type: "unsupported-setting",
648
- setting: "serviceTier",
649
- details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
650
- });
651
- baseArgs.service_tier = void 0;
652
- }
653
- const {
654
- tools: openaiTools2,
655
- toolChoice: openaiToolChoice,
656
- toolWarnings
657
- } = prepareChatTools({
658
- tools,
659
- toolChoice,
660
- structuredOutputs,
661
- strictJsonSchema
662
- });
663
- return {
664
- args: {
665
- ...baseArgs,
666
- tools: openaiTools2,
667
- tool_choice: openaiToolChoice
668
- },
669
- warnings: [...warnings, ...toolWarnings]
670
- };
671
- }
672
- async doGenerate(options) {
673
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
674
- const { args: body, warnings } = await this.getArgs(options);
675
- const {
676
- responseHeaders,
677
- value: response,
678
- rawValue: rawResponse
679
- } = await chunkHVBBFCDH_cjs.postJsonToApi({
680
- url: this.config.url({
681
- path: "/chat/completions",
682
- modelId: this.modelId
683
- }),
684
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), options.headers),
685
- body,
686
- failedResponseHandler: openaiFailedResponseHandler,
687
- successfulResponseHandler: chunkHVBBFCDH_cjs.createJsonResponseHandler(
688
- openaiChatResponseSchema
689
- ),
690
- abortSignal: options.abortSignal,
691
- fetch: this.config.fetch
692
- });
693
- const choice = response.choices[0];
694
- const content = [];
695
- const text = choice.message.content;
696
- if (text != null && text.length > 0) {
697
- content.push({ type: "text", text });
698
- }
699
- for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
700
- content.push({
701
- type: "tool-call",
702
- toolCallId: (_b = toolCall.id) != null ? _b : chunkHVBBFCDH_cjs.generateId(),
703
- toolName: toolCall.function.name,
704
- input: toolCall.function.arguments
705
- });
706
- }
707
- for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
708
- content.push({
709
- type: "source",
710
- sourceType: "url",
711
- id: chunkHVBBFCDH_cjs.generateId(),
712
- url: annotation.url,
713
- title: annotation.title
714
- });
715
- }
716
- const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
717
- const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
718
- const providerMetadata = { openai: {} };
719
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
720
- providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
721
- }
722
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
723
- providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
724
- }
725
- if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
726
- providerMetadata.openai.logprobs = choice.logprobs.content;
727
- }
728
- return {
729
- content,
730
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
731
- usage: {
732
- inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
733
- outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
734
- totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
735
- reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
736
- cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
737
- },
738
- request: { body },
739
- response: {
740
- ...getResponseMetadata(response),
741
- headers: responseHeaders,
742
- body: rawResponse
743
- },
744
- warnings,
745
- providerMetadata
746
- };
747
- }
748
- async doStream(options) {
749
- const { args, warnings } = await this.getArgs(options);
750
- const body = {
751
- ...args,
752
- stream: true,
753
- stream_options: {
754
- include_usage: true
755
- }
756
- };
757
- const { responseHeaders, value: response } = await chunkHVBBFCDH_cjs.postJsonToApi({
758
- url: this.config.url({
759
- path: "/chat/completions",
760
- modelId: this.modelId
761
- }),
762
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), options.headers),
763
- body,
764
- failedResponseHandler: openaiFailedResponseHandler,
765
- successfulResponseHandler: chunkHVBBFCDH_cjs.createEventSourceResponseHandler(
766
- openaiChatChunkSchema
767
- ),
768
- abortSignal: options.abortSignal,
769
- fetch: this.config.fetch
770
- });
771
- const toolCalls = [];
772
- let finishReason = "unknown";
773
- const usage = {
774
- inputTokens: void 0,
775
- outputTokens: void 0,
776
- totalTokens: void 0
777
- };
778
- let isFirstChunk = true;
779
- let isActiveText = false;
780
- const providerMetadata = { openai: {} };
781
- return {
782
- stream: response.pipeThrough(
783
- new TransformStream({
784
- start(controller) {
785
- controller.enqueue({ type: "stream-start", warnings });
786
- },
787
- transform(chunk, controller) {
788
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
789
- if (options.includeRawChunks) {
790
- controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
791
- }
792
- if (!chunk.success) {
793
- finishReason = "error";
794
- controller.enqueue({ type: "error", error: chunk.error });
795
- return;
796
- }
797
- const value = chunk.value;
798
- if ("error" in value) {
799
- finishReason = "error";
800
- controller.enqueue({ type: "error", error: value.error });
801
- return;
802
- }
803
- if (isFirstChunk) {
804
- isFirstChunk = false;
805
- controller.enqueue({
806
- type: "response-metadata",
807
- ...getResponseMetadata(value)
808
- });
809
- }
810
- if (value.usage != null) {
811
- usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
812
- usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
813
- usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
814
- usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
815
- usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
816
- if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
817
- providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
818
- }
819
- if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
820
- providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
821
- }
822
- }
823
- const choice = value.choices[0];
824
- if ((choice == null ? void 0 : choice.finish_reason) != null) {
825
- finishReason = mapOpenAIFinishReason(choice.finish_reason);
826
- }
827
- if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
828
- providerMetadata.openai.logprobs = choice.logprobs.content;
829
- }
830
- if ((choice == null ? void 0 : choice.delta) == null) {
831
- return;
832
- }
833
- const delta = choice.delta;
834
- if (delta.content != null) {
835
- if (!isActiveText) {
836
- controller.enqueue({ type: "text-start", id: "0" });
837
- isActiveText = true;
838
- }
839
- controller.enqueue({
840
- type: "text-delta",
841
- id: "0",
842
- delta: delta.content
843
- });
844
- }
845
- if (delta.tool_calls != null) {
846
- for (const toolCallDelta of delta.tool_calls) {
847
- const index = toolCallDelta.index;
848
- if (toolCalls[index] == null) {
849
- if (toolCallDelta.type !== "function") {
850
- throw new chunkHVBBFCDH_cjs.InvalidResponseDataError({
851
- data: toolCallDelta,
852
- message: `Expected 'function' type.`
853
- });
854
- }
855
- if (toolCallDelta.id == null) {
856
- throw new chunkHVBBFCDH_cjs.InvalidResponseDataError({
857
- data: toolCallDelta,
858
- message: `Expected 'id' to be a string.`
859
- });
860
- }
861
- if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
862
- throw new chunkHVBBFCDH_cjs.InvalidResponseDataError({
863
- data: toolCallDelta,
864
- message: `Expected 'function.name' to be a string.`
865
- });
866
- }
867
- controller.enqueue({
868
- type: "tool-input-start",
869
- id: toolCallDelta.id,
870
- toolName: toolCallDelta.function.name
871
- });
872
- toolCalls[index] = {
873
- id: toolCallDelta.id,
874
- type: "function",
875
- function: {
876
- name: toolCallDelta.function.name,
877
- arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
878
- },
879
- hasFinished: false
880
- };
881
- const toolCall2 = toolCalls[index];
882
- if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
883
- if (toolCall2.function.arguments.length > 0) {
884
- controller.enqueue({
885
- type: "tool-input-delta",
886
- id: toolCall2.id,
887
- delta: toolCall2.function.arguments
888
- });
889
- }
890
- if (chunkHVBBFCDH_cjs.isParsableJson(toolCall2.function.arguments)) {
891
- controller.enqueue({
892
- type: "tool-input-end",
893
- id: toolCall2.id
894
- });
895
- controller.enqueue({
896
- type: "tool-call",
897
- toolCallId: (_q = toolCall2.id) != null ? _q : chunkHVBBFCDH_cjs.generateId(),
898
- toolName: toolCall2.function.name,
899
- input: toolCall2.function.arguments
900
- });
901
- toolCall2.hasFinished = true;
902
- }
903
- }
904
- continue;
905
- }
906
- const toolCall = toolCalls[index];
907
- if (toolCall.hasFinished) {
908
- continue;
909
- }
910
- if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
911
- toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
912
- }
913
- controller.enqueue({
914
- type: "tool-input-delta",
915
- id: toolCall.id,
916
- delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
917
- });
918
- if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && chunkHVBBFCDH_cjs.isParsableJson(toolCall.function.arguments)) {
919
- controller.enqueue({
920
- type: "tool-input-end",
921
- id: toolCall.id
922
- });
923
- controller.enqueue({
924
- type: "tool-call",
925
- toolCallId: (_x = toolCall.id) != null ? _x : chunkHVBBFCDH_cjs.generateId(),
926
- toolName: toolCall.function.name,
927
- input: toolCall.function.arguments
928
- });
929
- toolCall.hasFinished = true;
930
- }
931
- }
932
- }
933
- if (delta.annotations != null) {
934
- for (const annotation of delta.annotations) {
935
- controller.enqueue({
936
- type: "source",
937
- sourceType: "url",
938
- id: chunkHVBBFCDH_cjs.generateId(),
939
- url: annotation.url,
940
- title: annotation.title
941
- });
942
- }
943
- }
944
- },
945
- flush(controller) {
946
- if (isActiveText) {
947
- controller.enqueue({ type: "text-end", id: "0" });
948
- }
949
- controller.enqueue({
950
- type: "finish",
951
- finishReason,
952
- usage,
953
- ...providerMetadata != null ? { providerMetadata } : {}
954
- });
955
- }
956
- })
957
- ),
958
- request: { body },
959
- response: { headers: responseHeaders }
960
- };
961
- }
962
- };
963
- var openaiTokenUsageSchema = v4.z.object({
964
- prompt_tokens: v4.z.number().nullish(),
965
- completion_tokens: v4.z.number().nullish(),
966
- total_tokens: v4.z.number().nullish(),
967
- prompt_tokens_details: v4.z.object({
968
- cached_tokens: v4.z.number().nullish()
969
- }).nullish(),
970
- completion_tokens_details: v4.z.object({
971
- reasoning_tokens: v4.z.number().nullish(),
972
- accepted_prediction_tokens: v4.z.number().nullish(),
973
- rejected_prediction_tokens: v4.z.number().nullish()
974
- }).nullish()
975
- }).nullish();
976
- var openaiChatResponseSchema = v4.z.object({
977
- id: v4.z.string().nullish(),
978
- created: v4.z.number().nullish(),
979
- model: v4.z.string().nullish(),
980
- choices: v4.z.array(
981
- v4.z.object({
982
- message: v4.z.object({
983
- role: v4.z.literal("assistant").nullish(),
984
- content: v4.z.string().nullish(),
985
- tool_calls: v4.z.array(
986
- v4.z.object({
987
- id: v4.z.string().nullish(),
988
- type: v4.z.literal("function"),
989
- function: v4.z.object({
990
- name: v4.z.string(),
991
- arguments: v4.z.string()
992
- })
993
- })
994
- ).nullish(),
995
- annotations: v4.z.array(
996
- v4.z.object({
997
- type: v4.z.literal("url_citation"),
998
- start_index: v4.z.number(),
999
- end_index: v4.z.number(),
1000
- url: v4.z.string(),
1001
- title: v4.z.string()
1002
- })
1003
- ).nullish()
1004
- }),
1005
- index: v4.z.number(),
1006
- logprobs: v4.z.object({
1007
- content: v4.z.array(
1008
- v4.z.object({
1009
- token: v4.z.string(),
1010
- logprob: v4.z.number(),
1011
- top_logprobs: v4.z.array(
1012
- v4.z.object({
1013
- token: v4.z.string(),
1014
- logprob: v4.z.number()
1015
- })
1016
- )
1017
- })
1018
- ).nullish()
1019
- }).nullish(),
1020
- finish_reason: v4.z.string().nullish()
1021
- })
1022
- ),
1023
- usage: openaiTokenUsageSchema
1024
- });
1025
- var openaiChatChunkSchema = v4.z.union([
1026
- v4.z.object({
1027
- id: v4.z.string().nullish(),
1028
- created: v4.z.number().nullish(),
1029
- model: v4.z.string().nullish(),
1030
- choices: v4.z.array(
1031
- v4.z.object({
1032
- delta: v4.z.object({
1033
- role: v4.z.enum(["assistant"]).nullish(),
1034
- content: v4.z.string().nullish(),
1035
- tool_calls: v4.z.array(
1036
- v4.z.object({
1037
- index: v4.z.number(),
1038
- id: v4.z.string().nullish(),
1039
- type: v4.z.literal("function").nullish(),
1040
- function: v4.z.object({
1041
- name: v4.z.string().nullish(),
1042
- arguments: v4.z.string().nullish()
1043
- })
1044
- })
1045
- ).nullish(),
1046
- annotations: v4.z.array(
1047
- v4.z.object({
1048
- type: v4.z.literal("url_citation"),
1049
- start_index: v4.z.number(),
1050
- end_index: v4.z.number(),
1051
- url: v4.z.string(),
1052
- title: v4.z.string()
1053
- })
1054
- ).nullish()
1055
- }).nullish(),
1056
- logprobs: v4.z.object({
1057
- content: v4.z.array(
1058
- v4.z.object({
1059
- token: v4.z.string(),
1060
- logprob: v4.z.number(),
1061
- top_logprobs: v4.z.array(
1062
- v4.z.object({
1063
- token: v4.z.string(),
1064
- logprob: v4.z.number()
1065
- })
1066
- )
1067
- })
1068
- ).nullish()
1069
- }).nullish(),
1070
- finish_reason: v4.z.string().nullish(),
1071
- index: v4.z.number()
1072
- })
1073
- ),
1074
- usage: openaiTokenUsageSchema
1075
- }),
1076
- openaiErrorDataSchema
1077
- ]);
1078
- function isReasoningModel(modelId) {
1079
- return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1080
- }
1081
- function supportsFlexProcessing(modelId) {
1082
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1083
- }
1084
- function supportsPriorityProcessing(modelId) {
1085
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1086
- }
1087
- function getSystemMessageMode(modelId) {
1088
- var _a, _b;
1089
- if (!isReasoningModel(modelId)) {
1090
- return "system";
1091
- }
1092
- return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1093
- }
1094
- var reasoningModels = {
1095
- "o1-mini": {
1096
- systemMessageMode: "remove"
1097
- },
1098
- "o1-mini-2024-09-12": {
1099
- systemMessageMode: "remove"
1100
- },
1101
- "o1-preview": {
1102
- systemMessageMode: "remove"
1103
- },
1104
- "o1-preview-2024-09-12": {
1105
- systemMessageMode: "remove"
1106
- },
1107
- o3: {
1108
- systemMessageMode: "developer"
1109
- },
1110
- "o3-2025-04-16": {
1111
- systemMessageMode: "developer"
1112
- },
1113
- "o3-mini": {
1114
- systemMessageMode: "developer"
1115
- },
1116
- "o3-mini-2025-01-31": {
1117
- systemMessageMode: "developer"
1118
- },
1119
- "o4-mini": {
1120
- systemMessageMode: "developer"
1121
- },
1122
- "o4-mini-2025-04-16": {
1123
- systemMessageMode: "developer"
1124
- }
1125
- };
1126
- function convertToOpenAICompletionPrompt({
1127
- prompt,
1128
- user = "user",
1129
- assistant = "assistant"
1130
- }) {
1131
- let text = "";
1132
- if (prompt[0].role === "system") {
1133
- text += `${prompt[0].content}
1134
-
1135
- `;
1136
- prompt = prompt.slice(1);
1137
- }
1138
- for (const { role, content } of prompt) {
1139
- switch (role) {
1140
- case "system": {
1141
- throw new chunkHVBBFCDH_cjs.InvalidPromptError({
1142
- message: "Unexpected system message in prompt: ${content}",
1143
- prompt
1144
- });
1145
- }
1146
- case "user": {
1147
- const userMessage = content.map((part) => {
1148
- switch (part.type) {
1149
- case "text": {
1150
- return part.text;
1151
- }
1152
- }
1153
- }).filter(Boolean).join("");
1154
- text += `${user}:
1155
- ${userMessage}
1156
-
1157
- `;
1158
- break;
1159
- }
1160
- case "assistant": {
1161
- const assistantMessage = content.map((part) => {
1162
- switch (part.type) {
1163
- case "text": {
1164
- return part.text;
1165
- }
1166
- case "tool-call": {
1167
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
1168
- functionality: "tool-call messages"
1169
- });
1170
- }
1171
- }
1172
- }).join("");
1173
- text += `${assistant}:
1174
- ${assistantMessage}
1175
-
1176
- `;
1177
- break;
1178
- }
1179
- case "tool": {
1180
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
1181
- functionality: "tool messages"
1182
- });
1183
- }
1184
- default: {
1185
- const _exhaustiveCheck = role;
1186
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1187
- }
1188
- }
1189
- }
1190
- text += `${assistant}:
1191
- `;
1192
- return {
1193
- prompt: text,
1194
- stopSequences: [`
1195
- ${user}:`]
1196
- };
1197
- }
1198
- function getResponseMetadata2({
1199
- id,
1200
- model,
1201
- created
1202
- }) {
1203
- return {
1204
- id: id != null ? id : void 0,
1205
- modelId: model != null ? model : void 0,
1206
- timestamp: created != null ? new Date(created * 1e3) : void 0
1207
- };
1208
- }
1209
- function mapOpenAIFinishReason2(finishReason) {
1210
- switch (finishReason) {
1211
- case "stop":
1212
- return "stop";
1213
- case "length":
1214
- return "length";
1215
- case "content_filter":
1216
- return "content-filter";
1217
- case "function_call":
1218
- case "tool_calls":
1219
- return "tool-calls";
1220
- default:
1221
- return "unknown";
1222
- }
1223
- }
1224
- var openaiCompletionProviderOptions = v4.z.object({
1225
- /**
1226
- Echo back the prompt in addition to the completion.
1227
- */
1228
- echo: v4.z.boolean().optional(),
1229
- /**
1230
- Modify the likelihood of specified tokens appearing in the completion.
1231
-
1232
- Accepts a JSON object that maps tokens (specified by their token ID in
1233
- the GPT tokenizer) to an associated bias value from -100 to 100. You
1234
- can use this tokenizer tool to convert text to token IDs. Mathematically,
1235
- the bias is added to the logits generated by the model prior to sampling.
1236
- The exact effect will vary per model, but values between -1 and 1 should
1237
- decrease or increase likelihood of selection; values like -100 or 100
1238
- should result in a ban or exclusive selection of the relevant token.
1239
-
1240
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1241
- token from being generated.
1242
- */
1243
- logitBias: v4.z.record(v4.z.string(), v4.z.number()).optional(),
1244
- /**
1245
- The suffix that comes after a completion of inserted text.
1246
- */
1247
- suffix: v4.z.string().optional(),
1248
- /**
1249
- A unique identifier representing your end-user, which can help OpenAI to
1250
- monitor and detect abuse. Learn more.
1251
- */
1252
- user: v4.z.string().optional(),
1253
- /**
1254
- Return the log probabilities of the tokens. Including logprobs will increase
1255
- the response size and can slow down response times. However, it can
1256
- be useful to better understand how the model is behaving.
1257
- Setting to true will return the log probabilities of the tokens that
1258
- were generated.
1259
- Setting to a number will return the log probabilities of the top n
1260
- tokens that were generated.
1261
- */
1262
- logprobs: v4.z.union([v4.z.boolean(), v4.z.number()]).optional()
1263
- });
1264
- var OpenAICompletionLanguageModel = class {
1265
- constructor(modelId, config) {
1266
- this.specificationVersion = "v2";
1267
- this.supportedUrls = {
1268
- // No URLs are supported for completion models.
1269
- };
1270
- this.modelId = modelId;
1271
- this.config = config;
1272
- }
1273
- get providerOptionsName() {
1274
- return this.config.provider.split(".")[0].trim();
1275
- }
1276
- get provider() {
1277
- return this.config.provider;
1278
- }
1279
- async getArgs({
1280
- prompt,
1281
- maxOutputTokens,
1282
- temperature,
1283
- topP,
1284
- topK,
1285
- frequencyPenalty,
1286
- presencePenalty,
1287
- stopSequences: userStopSequences,
1288
- responseFormat,
1289
- tools,
1290
- toolChoice,
1291
- seed,
1292
- providerOptions
1293
- }) {
1294
- const warnings = [];
1295
- const openaiOptions = {
1296
- ...await chunkHVBBFCDH_cjs.parseProviderOptions({
1297
- provider: "openai",
1298
- providerOptions,
1299
- schema: openaiCompletionProviderOptions
1300
- }),
1301
- ...await chunkHVBBFCDH_cjs.parseProviderOptions({
1302
- provider: this.providerOptionsName,
1303
- providerOptions,
1304
- schema: openaiCompletionProviderOptions
1305
- })
1306
- };
1307
- if (topK != null) {
1308
- warnings.push({ type: "unsupported-setting", setting: "topK" });
1309
- }
1310
- if (tools == null ? void 0 : tools.length) {
1311
- warnings.push({ type: "unsupported-setting", setting: "tools" });
1312
- }
1313
- if (toolChoice != null) {
1314
- warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1315
- }
1316
- if (responseFormat != null && responseFormat.type !== "text") {
1317
- warnings.push({
1318
- type: "unsupported-setting",
1319
- setting: "responseFormat",
1320
- details: "JSON response format is not supported."
1321
- });
1322
- }
1323
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1324
- const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1325
- return {
1326
- args: {
1327
- // model id:
1328
- model: this.modelId,
1329
- // model specific settings:
1330
- echo: openaiOptions.echo,
1331
- logit_bias: openaiOptions.logitBias,
1332
- logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1333
- suffix: openaiOptions.suffix,
1334
- user: openaiOptions.user,
1335
- // standardized settings:
1336
- max_tokens: maxOutputTokens,
1337
- temperature,
1338
- top_p: topP,
1339
- frequency_penalty: frequencyPenalty,
1340
- presence_penalty: presencePenalty,
1341
- seed,
1342
- // prompt:
1343
- prompt: completionPrompt,
1344
- // stop sequences:
1345
- stop: stop.length > 0 ? stop : void 0
1346
- },
1347
- warnings
1348
- };
1349
- }
1350
- async doGenerate(options) {
1351
- var _a, _b, _c;
1352
- const { args, warnings } = await this.getArgs(options);
1353
- const {
1354
- responseHeaders,
1355
- value: response,
1356
- rawValue: rawResponse
1357
- } = await chunkHVBBFCDH_cjs.postJsonToApi({
1358
- url: this.config.url({
1359
- path: "/completions",
1360
- modelId: this.modelId
1361
- }),
1362
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), options.headers),
1363
- body: args,
1364
- failedResponseHandler: openaiFailedResponseHandler,
1365
- successfulResponseHandler: chunkHVBBFCDH_cjs.createJsonResponseHandler(
1366
- openaiCompletionResponseSchema
1367
- ),
1368
- abortSignal: options.abortSignal,
1369
- fetch: this.config.fetch
1370
- });
1371
- const choice = response.choices[0];
1372
- const providerMetadata = { openai: {} };
1373
- if (choice.logprobs != null) {
1374
- providerMetadata.openai.logprobs = choice.logprobs;
1375
- }
1376
- return {
1377
- content: [{ type: "text", text: choice.text }],
1378
- usage: {
1379
- inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1380
- outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1381
- totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1382
- },
1383
- finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1384
- request: { body: args },
1385
- response: {
1386
- ...getResponseMetadata2(response),
1387
- headers: responseHeaders,
1388
- body: rawResponse
1389
- },
1390
- providerMetadata,
1391
- warnings
1392
- };
1393
- }
1394
- async doStream(options) {
1395
- const { args, warnings } = await this.getArgs(options);
1396
- const body = {
1397
- ...args,
1398
- stream: true,
1399
- stream_options: {
1400
- include_usage: true
1401
- }
1402
- };
1403
- const { responseHeaders, value: response } = await chunkHVBBFCDH_cjs.postJsonToApi({
1404
- url: this.config.url({
1405
- path: "/completions",
1406
- modelId: this.modelId
1407
- }),
1408
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), options.headers),
1409
- body,
1410
- failedResponseHandler: openaiFailedResponseHandler,
1411
- successfulResponseHandler: chunkHVBBFCDH_cjs.createEventSourceResponseHandler(
1412
- openaiCompletionChunkSchema
1413
- ),
1414
- abortSignal: options.abortSignal,
1415
- fetch: this.config.fetch
1416
- });
1417
- let finishReason = "unknown";
1418
- const providerMetadata = { openai: {} };
1419
- const usage = {
1420
- inputTokens: void 0,
1421
- outputTokens: void 0,
1422
- totalTokens: void 0
1423
- };
1424
- let isFirstChunk = true;
1425
- return {
1426
- stream: response.pipeThrough(
1427
- new TransformStream({
1428
- start(controller) {
1429
- controller.enqueue({ type: "stream-start", warnings });
1430
- },
1431
- transform(chunk, controller) {
1432
- if (options.includeRawChunks) {
1433
- controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1434
- }
1435
- if (!chunk.success) {
1436
- finishReason = "error";
1437
- controller.enqueue({ type: "error", error: chunk.error });
1438
- return;
1439
- }
1440
- const value = chunk.value;
1441
- if ("error" in value) {
1442
- finishReason = "error";
1443
- controller.enqueue({ type: "error", error: value.error });
1444
- return;
1445
- }
1446
- if (isFirstChunk) {
1447
- isFirstChunk = false;
1448
- controller.enqueue({
1449
- type: "response-metadata",
1450
- ...getResponseMetadata2(value)
1451
- });
1452
- controller.enqueue({ type: "text-start", id: "0" });
1453
- }
1454
- if (value.usage != null) {
1455
- usage.inputTokens = value.usage.prompt_tokens;
1456
- usage.outputTokens = value.usage.completion_tokens;
1457
- usage.totalTokens = value.usage.total_tokens;
1458
- }
1459
- const choice = value.choices[0];
1460
- if ((choice == null ? void 0 : choice.finish_reason) != null) {
1461
- finishReason = mapOpenAIFinishReason2(choice.finish_reason);
1462
- }
1463
- if ((choice == null ? void 0 : choice.logprobs) != null) {
1464
- providerMetadata.openai.logprobs = choice.logprobs;
1465
- }
1466
- if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1467
- controller.enqueue({
1468
- type: "text-delta",
1469
- id: "0",
1470
- delta: choice.text
1471
- });
1472
- }
1473
- },
1474
- flush(controller) {
1475
- if (!isFirstChunk) {
1476
- controller.enqueue({ type: "text-end", id: "0" });
1477
- }
1478
- controller.enqueue({
1479
- type: "finish",
1480
- finishReason,
1481
- providerMetadata,
1482
- usage
1483
- });
1484
- }
1485
- })
1486
- ),
1487
- request: { body },
1488
- response: { headers: responseHeaders }
1489
- };
1490
- }
1491
- };
1492
- var usageSchema = v4.z.object({
1493
- prompt_tokens: v4.z.number(),
1494
- completion_tokens: v4.z.number(),
1495
- total_tokens: v4.z.number()
1496
- });
1497
- var openaiCompletionResponseSchema = v4.z.object({
1498
- id: v4.z.string().nullish(),
1499
- created: v4.z.number().nullish(),
1500
- model: v4.z.string().nullish(),
1501
- choices: v4.z.array(
1502
- v4.z.object({
1503
- text: v4.z.string(),
1504
- finish_reason: v4.z.string(),
1505
- logprobs: v4.z.object({
1506
- tokens: v4.z.array(v4.z.string()),
1507
- token_logprobs: v4.z.array(v4.z.number()),
1508
- top_logprobs: v4.z.array(v4.z.record(v4.z.string(), v4.z.number())).nullish()
1509
- }).nullish()
1510
- })
1511
- ),
1512
- usage: usageSchema.nullish()
1513
- });
1514
- var openaiCompletionChunkSchema = v4.z.union([
1515
- v4.z.object({
1516
- id: v4.z.string().nullish(),
1517
- created: v4.z.number().nullish(),
1518
- model: v4.z.string().nullish(),
1519
- choices: v4.z.array(
1520
- v4.z.object({
1521
- text: v4.z.string(),
1522
- finish_reason: v4.z.string().nullish(),
1523
- index: v4.z.number(),
1524
- logprobs: v4.z.object({
1525
- tokens: v4.z.array(v4.z.string()),
1526
- token_logprobs: v4.z.array(v4.z.number()),
1527
- top_logprobs: v4.z.array(v4.z.record(v4.z.string(), v4.z.number())).nullish()
1528
- }).nullish()
1529
- })
1530
- ),
1531
- usage: usageSchema.nullish()
1532
- }),
1533
- openaiErrorDataSchema
1534
- ]);
1535
- var openaiEmbeddingProviderOptions = v4.z.object({
1536
- /**
1537
- The number of dimensions the resulting output embeddings should have.
1538
- Only supported in text-embedding-3 and later models.
1539
- */
1540
- dimensions: v4.z.number().optional(),
1541
- /**
1542
- A unique identifier representing your end-user, which can help OpenAI to
1543
- monitor and detect abuse. Learn more.
1544
- */
1545
- user: v4.z.string().optional()
1546
- });
1547
- var OpenAIEmbeddingModel = class {
1548
- constructor(modelId, config) {
1549
- this.specificationVersion = "v2";
1550
- this.maxEmbeddingsPerCall = 2048;
1551
- this.supportsParallelCalls = true;
1552
- this.modelId = modelId;
1553
- this.config = config;
1554
- }
1555
- get provider() {
1556
- return this.config.provider;
1557
- }
1558
- async doEmbed({
1559
- values,
1560
- headers,
1561
- abortSignal,
1562
- providerOptions
1563
- }) {
1564
- var _a;
1565
- if (values.length > this.maxEmbeddingsPerCall) {
1566
- throw new chunkHVBBFCDH_cjs.TooManyEmbeddingValuesForCallError({
1567
- provider: this.provider,
1568
- modelId: this.modelId,
1569
- maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1570
- values
1571
- });
1572
- }
1573
- const openaiOptions = (_a = await chunkHVBBFCDH_cjs.parseProviderOptions({
1574
- provider: "openai",
1575
- providerOptions,
1576
- schema: openaiEmbeddingProviderOptions
1577
- })) != null ? _a : {};
1578
- const {
1579
- responseHeaders,
1580
- value: response,
1581
- rawValue
1582
- } = await chunkHVBBFCDH_cjs.postJsonToApi({
1583
- url: this.config.url({
1584
- path: "/embeddings",
1585
- modelId: this.modelId
1586
- }),
1587
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), headers),
1588
- body: {
1589
- model: this.modelId,
1590
- input: values,
1591
- encoding_format: "float",
1592
- dimensions: openaiOptions.dimensions,
1593
- user: openaiOptions.user
1594
- },
1595
- failedResponseHandler: openaiFailedResponseHandler,
1596
- successfulResponseHandler: chunkHVBBFCDH_cjs.createJsonResponseHandler(
1597
- openaiTextEmbeddingResponseSchema
1598
- ),
1599
- abortSignal,
1600
- fetch: this.config.fetch
1601
- });
1602
- return {
1603
- embeddings: response.data.map((item) => item.embedding),
1604
- usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1605
- response: { headers: responseHeaders, body: rawValue }
1606
- };
1607
- }
1608
- };
1609
- var openaiTextEmbeddingResponseSchema = v4.z.object({
1610
- data: v4.z.array(v4.z.object({ embedding: v4.z.array(v4.z.number()) })),
1611
- usage: v4.z.object({ prompt_tokens: v4.z.number() }).nullish()
1612
- });
1613
- var modelMaxImagesPerCall = {
1614
- "dall-e-3": 1,
1615
- "dall-e-2": 10,
1616
- "gpt-image-1": 10
1617
- };
1618
- var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1619
- var OpenAIImageModel = class {
1620
- constructor(modelId, config) {
1621
- this.modelId = modelId;
1622
- this.config = config;
1623
- this.specificationVersion = "v2";
1624
- }
1625
- get maxImagesPerCall() {
1626
- var _a;
1627
- return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1628
- }
1629
- get provider() {
1630
- return this.config.provider;
1631
- }
1632
- async doGenerate({
1633
- prompt,
1634
- n,
1635
- size,
1636
- aspectRatio,
1637
- seed,
1638
- providerOptions,
1639
- headers,
1640
- abortSignal
1641
- }) {
1642
- var _a, _b, _c, _d;
1643
- const warnings = [];
1644
- if (aspectRatio != null) {
1645
- warnings.push({
1646
- type: "unsupported-setting",
1647
- setting: "aspectRatio",
1648
- details: "This model does not support aspect ratio. Use `size` instead."
1649
- });
1650
- }
1651
- if (seed != null) {
1652
- warnings.push({ type: "unsupported-setting", setting: "seed" });
1653
- }
1654
- const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1655
- const { value: response, responseHeaders } = await chunkHVBBFCDH_cjs.postJsonToApi({
1656
- url: this.config.url({
1657
- path: "/images/generations",
1658
- modelId: this.modelId
1659
- }),
1660
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), headers),
1661
- body: {
1662
- model: this.modelId,
1663
- prompt,
1664
- n,
1665
- size,
1666
- ...(_d = providerOptions.openai) != null ? _d : {},
1667
- ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1668
- },
1669
- failedResponseHandler: openaiFailedResponseHandler,
1670
- successfulResponseHandler: chunkHVBBFCDH_cjs.createJsonResponseHandler(
1671
- openaiImageResponseSchema
1672
- ),
1673
- abortSignal,
1674
- fetch: this.config.fetch
1675
- });
1676
- return {
1677
- images: response.data.map((item) => item.b64_json),
1678
- warnings,
1679
- response: {
1680
- timestamp: currentDate,
1681
- modelId: this.modelId,
1682
- headers: responseHeaders
1683
- },
1684
- providerMetadata: {
1685
- openai: {
1686
- images: response.data.map(
1687
- (item) => item.revised_prompt ? {
1688
- revisedPrompt: item.revised_prompt
1689
- } : null
1690
- )
1691
- }
1692
- }
1693
- };
1694
- }
1695
- };
1696
- var openaiImageResponseSchema = v4.z.object({
1697
- data: v4.z.array(
1698
- v4.z.object({ b64_json: v4.z.string(), revised_prompt: v4.z.string().optional() })
1699
- )
1700
- });
1701
- var codeInterpreterArgsSchema = v4.z.object({
1702
- container: v4.z.union([
1703
- v4.z.string(),
1704
- v4.z.object({
1705
- fileIds: v4.z.array(v4.z.string()).optional()
1706
- })
1707
- ]).optional()
1708
- });
1709
- var codeInterpreter = chunkHVBBFCDH_cjs.createProviderDefinedToolFactory({
1710
- id: "openai.code_interpreter",
1711
- name: "code_interpreter",
1712
- inputSchema: v4.z.object({})
1713
- });
1714
- var openaiTools = {
1715
- codeInterpreter,
1716
- fileSearch,
1717
- webSearchPreview
1718
- };
1719
- function isFileId(data, prefixes) {
1720
- if (!prefixes) return false;
1721
- return prefixes.some((prefix) => data.startsWith(prefix));
1722
- }
1723
- async function convertToOpenAIResponsesMessages({
1724
- prompt,
1725
- systemMessageMode,
1726
- fileIdPrefixes
1727
- }) {
1728
- var _a, _b, _c, _d, _e, _f;
1729
- const messages = [];
1730
- const warnings = [];
1731
- for (const { role, content } of prompt) {
1732
- switch (role) {
1733
- case "system": {
1734
- switch (systemMessageMode) {
1735
- case "system": {
1736
- messages.push({ role: "system", content });
1737
- break;
1738
- }
1739
- case "developer": {
1740
- messages.push({ role: "developer", content });
1741
- break;
1742
- }
1743
- case "remove": {
1744
- warnings.push({
1745
- type: "other",
1746
- message: "system messages are removed for this model"
1747
- });
1748
- break;
1749
- }
1750
- default: {
1751
- const _exhaustiveCheck = systemMessageMode;
1752
- throw new Error(
1753
- `Unsupported system message mode: ${_exhaustiveCheck}`
1754
- );
1755
- }
1756
- }
1757
- break;
1758
- }
1759
- case "user": {
1760
- messages.push({
1761
- role: "user",
1762
- content: content.map((part, index) => {
1763
- var _a2, _b2, _c2;
1764
- switch (part.type) {
1765
- case "text": {
1766
- return { type: "input_text", text: part.text };
1767
- }
1768
- case "file": {
1769
- if (part.mediaType.startsWith("image/")) {
1770
- const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1771
- return {
1772
- type: "input_image",
1773
- ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
1774
- image_url: `data:${mediaType};base64,${chunkHVBBFCDH_cjs.convertToBase64(part.data)}`
1775
- },
1776
- detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
1777
- };
1778
- } else if (part.mediaType === "application/pdf") {
1779
- if (part.data instanceof URL) {
1780
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
1781
- functionality: "PDF file parts with URLs"
1782
- });
1783
- }
1784
- return {
1785
- type: "input_file",
1786
- ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
1787
- filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
1788
- file_data: `data:application/pdf;base64,${chunkHVBBFCDH_cjs.convertToBase64(part.data)}`
1789
- }
1790
- };
1791
- } else {
1792
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
1793
- functionality: `file part media type ${part.mediaType}`
1794
- });
1795
- }
1796
- }
1797
- }
1798
- })
1799
- });
1800
- break;
1801
- }
1802
- case "assistant": {
1803
- const reasoningMessages = {};
1804
- for (const part of content) {
1805
- switch (part.type) {
1806
- case "text": {
1807
- messages.push({
1808
- role: "assistant",
1809
- content: [{ type: "output_text", text: part.text }],
1810
- id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
1811
- });
1812
- break;
1813
- }
1814
- case "tool-call": {
1815
- if (part.providerExecuted) {
1816
- break;
1817
- }
1818
- messages.push({
1819
- type: "function_call",
1820
- call_id: part.toolCallId,
1821
- name: part.toolName,
1822
- arguments: JSON.stringify(part.input),
1823
- id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
1824
- });
1825
- break;
1826
- }
1827
- case "tool-result": {
1828
- warnings.push({
1829
- type: "other",
1830
- message: `tool result parts in assistant messages are not supported for OpenAI responses`
1831
- });
1832
- break;
1833
- }
1834
- case "reasoning": {
1835
- const providerOptions = await chunkHVBBFCDH_cjs.parseProviderOptions({
1836
- provider: "openai",
1837
- providerOptions: part.providerOptions,
1838
- schema: openaiResponsesReasoningProviderOptionsSchema
1839
- });
1840
- const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
1841
- if (reasoningId != null) {
1842
- const existingReasoningMessage = reasoningMessages[reasoningId];
1843
- const summaryParts = [];
1844
- if (part.text.length > 0) {
1845
- summaryParts.push({ type: "summary_text", text: part.text });
1846
- } else if (existingReasoningMessage !== void 0) {
1847
- warnings.push({
1848
- type: "other",
1849
- message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
1850
- });
1851
- }
1852
- if (existingReasoningMessage === void 0) {
1853
- reasoningMessages[reasoningId] = {
1854
- type: "reasoning",
1855
- id: reasoningId,
1856
- encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
1857
- summary: summaryParts
1858
- };
1859
- messages.push(reasoningMessages[reasoningId]);
1860
- } else {
1861
- existingReasoningMessage.summary.push(...summaryParts);
1862
- }
1863
- } else {
1864
- warnings.push({
1865
- type: "other",
1866
- message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
1867
- });
1868
- }
1869
- break;
1870
- }
1871
- }
1872
- }
1873
- break;
1874
- }
1875
- case "tool": {
1876
- for (const part of content) {
1877
- const output = part.output;
1878
- let contentValue;
1879
- switch (output.type) {
1880
- case "text":
1881
- case "error-text":
1882
- contentValue = output.value;
1883
- break;
1884
- case "content":
1885
- case "json":
1886
- case "error-json":
1887
- contentValue = JSON.stringify(output.value);
1888
- break;
1889
- }
1890
- messages.push({
1891
- type: "function_call_output",
1892
- call_id: part.toolCallId,
1893
- output: contentValue
1894
- });
1895
- }
1896
- break;
1897
- }
1898
- default: {
1899
- const _exhaustiveCheck = role;
1900
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1901
- }
1902
- }
1903
- }
1904
- return { messages, warnings };
1905
- }
1906
- var openaiResponsesReasoningProviderOptionsSchema = v4.z.object({
1907
- itemId: v4.z.string().nullish(),
1908
- reasoningEncryptedContent: v4.z.string().nullish()
1909
- });
1910
- function mapOpenAIResponseFinishReason({
1911
- finishReason,
1912
- hasToolCalls
1913
- }) {
1914
- switch (finishReason) {
1915
- case void 0:
1916
- case null:
1917
- return hasToolCalls ? "tool-calls" : "stop";
1918
- case "max_output_tokens":
1919
- return "length";
1920
- case "content_filter":
1921
- return "content-filter";
1922
- default:
1923
- return hasToolCalls ? "tool-calls" : "unknown";
1924
- }
1925
- }
1926
- function prepareResponsesTools({
1927
- tools,
1928
- toolChoice,
1929
- strictJsonSchema
1930
- }) {
1931
- tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1932
- const toolWarnings = [];
1933
- if (tools == null) {
1934
- return { tools: void 0, toolChoice: void 0, toolWarnings };
1935
- }
1936
- const openaiTools2 = [];
1937
- for (const tool of tools) {
1938
- switch (tool.type) {
1939
- case "function":
1940
- openaiTools2.push({
1941
- type: "function",
1942
- name: tool.name,
1943
- description: tool.description,
1944
- parameters: tool.inputSchema,
1945
- strict: strictJsonSchema
1946
- });
1947
- break;
1948
- case "provider-defined": {
1949
- switch (tool.id) {
1950
- case "openai.file_search": {
1951
- const args = fileSearchArgsSchema.parse(tool.args);
1952
- openaiTools2.push({
1953
- type: "file_search",
1954
- vector_store_ids: args.vectorStoreIds,
1955
- max_num_results: args.maxNumResults,
1956
- ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
1957
- filters: args.filters
1958
- });
1959
- break;
1960
- }
1961
- case "openai.web_search_preview": {
1962
- const args = webSearchPreviewArgsSchema.parse(tool.args);
1963
- openaiTools2.push({
1964
- type: "web_search_preview",
1965
- search_context_size: args.searchContextSize,
1966
- user_location: args.userLocation
1967
- });
1968
- break;
1969
- }
1970
- case "openai.code_interpreter": {
1971
- const args = codeInterpreterArgsSchema.parse(tool.args);
1972
- openaiTools2.push({
1973
- type: "code_interpreter",
1974
- container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
1975
- });
1976
- break;
1977
- }
1978
- default: {
1979
- toolWarnings.push({ type: "unsupported-tool", tool });
1980
- break;
1981
- }
1982
- }
1983
- break;
1984
- }
1985
- default:
1986
- toolWarnings.push({ type: "unsupported-tool", tool });
1987
- break;
1988
- }
1989
- }
1990
- if (toolChoice == null) {
1991
- return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
1992
- }
1993
- const type = toolChoice.type;
1994
- switch (type) {
1995
- case "auto":
1996
- case "none":
1997
- case "required":
1998
- return { tools: openaiTools2, toolChoice: type, toolWarnings };
1999
- case "tool":
2000
- return {
2001
- tools: openaiTools2,
2002
- toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2003
- toolWarnings
2004
- };
2005
- default: {
2006
- const _exhaustiveCheck = type;
2007
- throw new chunkHVBBFCDH_cjs.UnsupportedFunctionalityError({
2008
- functionality: `tool choice type: ${_exhaustiveCheck}`
2009
- });
2010
- }
2011
- }
2012
- }
2013
- var TOP_LOGPROBS_MAX = 20;
2014
- var LOGPROBS_SCHEMA = v4.z.array(
2015
- v4.z.object({
2016
- token: v4.z.string(),
2017
- logprob: v4.z.number(),
2018
- top_logprobs: v4.z.array(
2019
- v4.z.object({
2020
- token: v4.z.string(),
2021
- logprob: v4.z.number()
2022
- })
2023
- )
2024
- })
2025
- );
2026
- var OpenAIResponsesLanguageModel = class {
2027
- constructor(modelId, config) {
2028
- this.specificationVersion = "v2";
2029
- this.supportedUrls = {
2030
- "image/*": [/^https?:\/\/.*$/]
2031
- };
2032
- this.modelId = modelId;
2033
- this.config = config;
2034
- }
2035
- get provider() {
2036
- return this.config.provider;
2037
- }
2038
- async getArgs({
2039
- maxOutputTokens,
2040
- temperature,
2041
- stopSequences,
2042
- topP,
2043
- topK,
2044
- presencePenalty,
2045
- frequencyPenalty,
2046
- seed,
2047
- prompt,
2048
- providerOptions,
2049
- tools,
2050
- toolChoice,
2051
- responseFormat
2052
- }) {
2053
- var _a, _b;
2054
- const warnings = [];
2055
- const modelConfig = getResponsesModelConfig(this.modelId);
2056
- if (topK != null) {
2057
- warnings.push({ type: "unsupported-setting", setting: "topK" });
2058
- }
2059
- if (seed != null) {
2060
- warnings.push({ type: "unsupported-setting", setting: "seed" });
2061
- }
2062
- if (presencePenalty != null) {
2063
- warnings.push({
2064
- type: "unsupported-setting",
2065
- setting: "presencePenalty"
2066
- });
2067
- }
2068
- if (frequencyPenalty != null) {
2069
- warnings.push({
2070
- type: "unsupported-setting",
2071
- setting: "frequencyPenalty"
2072
- });
2073
- }
2074
- if (stopSequences != null) {
2075
- warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2076
- }
2077
- const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2078
- prompt,
2079
- systemMessageMode: modelConfig.systemMessageMode,
2080
- fileIdPrefixes: this.config.fileIdPrefixes
2081
- });
2082
- warnings.push(...messageWarnings);
2083
- const openaiOptions = await chunkHVBBFCDH_cjs.parseProviderOptions({
2084
- provider: "openai",
2085
- providerOptions,
2086
- schema: openaiResponsesProviderOptionsSchema
2087
- });
2088
- const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2089
- const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
2090
- const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
2091
- const baseArgs = {
2092
- model: this.modelId,
2093
- input: messages,
2094
- temperature,
2095
- top_p: topP,
2096
- max_output_tokens: maxOutputTokens,
2097
- ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
2098
- text: {
2099
- ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2100
- format: responseFormat.schema != null ? {
2101
- type: "json_schema",
2102
- strict: strictJsonSchema,
2103
- name: (_b = responseFormat.name) != null ? _b : "response",
2104
- description: responseFormat.description,
2105
- schema: responseFormat.schema
2106
- } : { type: "json_object" }
2107
- },
2108
- ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
2109
- verbosity: openaiOptions.textVerbosity
2110
- }
2111
- }
2112
- },
2113
- // provider options:
2114
- metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2115
- parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2116
- previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2117
- store: openaiOptions == null ? void 0 : openaiOptions.store,
2118
- user: openaiOptions == null ? void 0 : openaiOptions.user,
2119
- instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2120
- service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2121
- include: openaiOptionsInclude,
2122
- prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
2123
- safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
2124
- top_logprobs: topLogprobs,
2125
- // model-specific settings:
2126
- ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2127
- reasoning: {
2128
- ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2129
- effort: openaiOptions.reasoningEffort
2130
- },
2131
- ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2132
- summary: openaiOptions.reasoningSummary
2133
- }
2134
- }
2135
- },
2136
- ...modelConfig.requiredAutoTruncation && {
2137
- truncation: "auto"
2138
- }
2139
- };
2140
- if (modelConfig.isReasoningModel) {
2141
- if (baseArgs.temperature != null) {
2142
- baseArgs.temperature = void 0;
2143
- warnings.push({
2144
- type: "unsupported-setting",
2145
- setting: "temperature",
2146
- details: "temperature is not supported for reasoning models"
2147
- });
2148
- }
2149
- if (baseArgs.top_p != null) {
2150
- baseArgs.top_p = void 0;
2151
- warnings.push({
2152
- type: "unsupported-setting",
2153
- setting: "topP",
2154
- details: "topP is not supported for reasoning models"
2155
- });
2156
- }
2157
- } else {
2158
- if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2159
- warnings.push({
2160
- type: "unsupported-setting",
2161
- setting: "reasoningEffort",
2162
- details: "reasoningEffort is not supported for non-reasoning models"
2163
- });
2164
- }
2165
- if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2166
- warnings.push({
2167
- type: "unsupported-setting",
2168
- setting: "reasoningSummary",
2169
- details: "reasoningSummary is not supported for non-reasoning models"
2170
- });
2171
- }
2172
- }
2173
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
2174
- warnings.push({
2175
- type: "unsupported-setting",
2176
- setting: "serviceTier",
2177
- details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
2178
- });
2179
- delete baseArgs.service_tier;
2180
- }
2181
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
2182
- warnings.push({
2183
- type: "unsupported-setting",
2184
- setting: "serviceTier",
2185
- details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
2186
- });
2187
- delete baseArgs.service_tier;
2188
- }
2189
- const {
2190
- tools: openaiTools2,
2191
- toolChoice: openaiToolChoice,
2192
- toolWarnings
2193
- } = prepareResponsesTools({
2194
- tools,
2195
- toolChoice,
2196
- strictJsonSchema
2197
- });
2198
- return {
2199
- args: {
2200
- ...baseArgs,
2201
- tools: openaiTools2,
2202
- tool_choice: openaiToolChoice
2203
- },
2204
- warnings: [...warnings, ...toolWarnings]
2205
- };
2206
- }
2207
- async doGenerate(options) {
2208
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2209
- const { args: body, warnings } = await this.getArgs(options);
2210
- const url = this.config.url({
2211
- path: "/responses",
2212
- modelId: this.modelId
2213
- });
2214
- const {
2215
- responseHeaders,
2216
- value: response,
2217
- rawValue: rawResponse
2218
- } = await chunkHVBBFCDH_cjs.postJsonToApi({
2219
- url,
2220
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), options.headers),
2221
- body,
2222
- failedResponseHandler: openaiFailedResponseHandler,
2223
- successfulResponseHandler: chunkHVBBFCDH_cjs.createJsonResponseHandler(
2224
- v4.z.object({
2225
- id: v4.z.string(),
2226
- created_at: v4.z.number(),
2227
- error: v4.z.object({
2228
- code: v4.z.string(),
2229
- message: v4.z.string()
2230
- }).nullish(),
2231
- model: v4.z.string(),
2232
- output: v4.z.array(
2233
- v4.z.discriminatedUnion("type", [
2234
- v4.z.object({
2235
- type: v4.z.literal("message"),
2236
- role: v4.z.literal("assistant"),
2237
- id: v4.z.string(),
2238
- content: v4.z.array(
2239
- v4.z.object({
2240
- type: v4.z.literal("output_text"),
2241
- text: v4.z.string(),
2242
- logprobs: LOGPROBS_SCHEMA.nullish(),
2243
- annotations: v4.z.array(
2244
- v4.z.discriminatedUnion("type", [
2245
- v4.z.object({
2246
- type: v4.z.literal("url_citation"),
2247
- start_index: v4.z.number(),
2248
- end_index: v4.z.number(),
2249
- url: v4.z.string(),
2250
- title: v4.z.string()
2251
- }),
2252
- v4.z.object({
2253
- type: v4.z.literal("file_citation"),
2254
- start_index: v4.z.number(),
2255
- end_index: v4.z.number(),
2256
- file_id: v4.z.string(),
2257
- quote: v4.z.string()
2258
- })
2259
- ])
2260
- )
2261
- })
2262
- )
2263
- }),
2264
- v4.z.object({
2265
- type: v4.z.literal("function_call"),
2266
- call_id: v4.z.string(),
2267
- name: v4.z.string(),
2268
- arguments: v4.z.string(),
2269
- id: v4.z.string()
2270
- }),
2271
- v4.z.object({
2272
- type: v4.z.literal("web_search_call"),
2273
- id: v4.z.string(),
2274
- status: v4.z.string().optional(),
2275
- action: v4.z.object({
2276
- type: v4.z.literal("search"),
2277
- query: v4.z.string().optional()
2278
- }).nullish()
2279
- }),
2280
- v4.z.object({
2281
- type: v4.z.literal("computer_call"),
2282
- id: v4.z.string(),
2283
- status: v4.z.string().optional()
2284
- }),
2285
- v4.z.object({
2286
- type: v4.z.literal("file_search_call"),
2287
- id: v4.z.string(),
2288
- status: v4.z.string().optional(),
2289
- queries: v4.z.array(v4.z.string()).nullish(),
2290
- results: v4.z.array(
2291
- v4.z.object({
2292
- attributes: v4.z.object({
2293
- file_id: v4.z.string(),
2294
- filename: v4.z.string(),
2295
- score: v4.z.number(),
2296
- text: v4.z.string()
2297
- })
2298
- })
2299
- ).nullish()
2300
- }),
2301
- v4.z.object({
2302
- type: v4.z.literal("reasoning"),
2303
- id: v4.z.string(),
2304
- encrypted_content: v4.z.string().nullish(),
2305
- summary: v4.z.array(
2306
- v4.z.object({
2307
- type: v4.z.literal("summary_text"),
2308
- text: v4.z.string()
2309
- })
2310
- )
2311
- })
2312
- ])
2313
- ),
2314
- incomplete_details: v4.z.object({ reason: v4.z.string() }).nullable(),
2315
- usage: usageSchema2
2316
- })
2317
- ),
2318
- abortSignal: options.abortSignal,
2319
- fetch: this.config.fetch
2320
- });
2321
- if (response.error) {
2322
- throw new chunkHVBBFCDH_cjs.APICallError({
2323
- message: response.error.message,
2324
- url,
2325
- requestBodyValues: body,
2326
- statusCode: 400,
2327
- responseHeaders,
2328
- responseBody: rawResponse,
2329
- isRetryable: false
2330
- });
2331
- }
2332
- const content = [];
2333
- const logprobs = [];
2334
- for (const part of response.output) {
2335
- switch (part.type) {
2336
- case "reasoning": {
2337
- if (part.summary.length === 0) {
2338
- part.summary.push({ type: "summary_text", text: "" });
2339
- }
2340
- for (const summary of part.summary) {
2341
- content.push({
2342
- type: "reasoning",
2343
- text: summary.text,
2344
- providerMetadata: {
2345
- openai: {
2346
- itemId: part.id,
2347
- reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2348
- }
2349
- }
2350
- });
2351
- }
2352
- break;
2353
- }
2354
- case "message": {
2355
- for (const contentPart of part.content) {
2356
- if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
2357
- logprobs.push(contentPart.logprobs);
2358
- }
2359
- content.push({
2360
- type: "text",
2361
- text: contentPart.text,
2362
- providerMetadata: {
2363
- openai: {
2364
- itemId: part.id
2365
- }
2366
- }
2367
- });
2368
- for (const annotation of contentPart.annotations) {
2369
- if (annotation.type === "url_citation") {
2370
- content.push({
2371
- type: "source",
2372
- sourceType: "url",
2373
- id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : chunkHVBBFCDH_cjs.generateId(),
2374
- url: annotation.url,
2375
- title: annotation.title
2376
- });
2377
- } else if (annotation.type === "file_citation") {
2378
- content.push({
2379
- type: "source",
2380
- sourceType: "document",
2381
- id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : chunkHVBBFCDH_cjs.generateId(),
2382
- mediaType: "text/plain",
2383
- title: annotation.quote,
2384
- filename: annotation.file_id
2385
- });
2386
- }
2387
- }
2388
- }
2389
- break;
2390
- }
2391
- case "function_call": {
2392
- content.push({
2393
- type: "tool-call",
2394
- toolCallId: part.call_id,
2395
- toolName: part.name,
2396
- input: part.arguments,
2397
- providerMetadata: {
2398
- openai: {
2399
- itemId: part.id
2400
- }
2401
- }
2402
- });
2403
- break;
2404
- }
2405
- case "web_search_call": {
2406
- content.push({
2407
- type: "tool-call",
2408
- toolCallId: part.id,
2409
- toolName: "web_search_preview",
2410
- input: (_k = (_j = part.action) == null ? void 0 : _j.query) != null ? _k : "",
2411
- providerExecuted: true
2412
- });
2413
- content.push({
2414
- type: "tool-result",
2415
- toolCallId: part.id,
2416
- toolName: "web_search_preview",
2417
- result: {
2418
- status: part.status || "completed",
2419
- ...((_l = part.action) == null ? void 0 : _l.query) && { query: part.action.query }
2420
- },
2421
- providerExecuted: true
2422
- });
2423
- break;
2424
- }
2425
- case "computer_call": {
2426
- content.push({
2427
- type: "tool-call",
2428
- toolCallId: part.id,
2429
- toolName: "computer_use",
2430
- input: "",
2431
- providerExecuted: true
2432
- });
2433
- content.push({
2434
- type: "tool-result",
2435
- toolCallId: part.id,
2436
- toolName: "computer_use",
2437
- result: {
2438
- type: "computer_use_tool_result",
2439
- status: part.status || "completed"
2440
- },
2441
- providerExecuted: true
2442
- });
2443
- break;
2444
- }
2445
- case "file_search_call": {
2446
- content.push({
2447
- type: "tool-call",
2448
- toolCallId: part.id,
2449
- toolName: "file_search",
2450
- input: "",
2451
- providerExecuted: true
2452
- });
2453
- content.push({
2454
- type: "tool-result",
2455
- toolCallId: part.id,
2456
- toolName: "file_search",
2457
- result: {
2458
- type: "file_search_tool_result",
2459
- status: part.status || "completed",
2460
- ...part.queries && { queries: part.queries },
2461
- ...part.results && { results: part.results }
2462
- },
2463
- providerExecuted: true
2464
- });
2465
- break;
2466
- }
2467
- }
2468
- }
2469
- const providerMetadata = {
2470
- openai: { responseId: response.id }
2471
- };
2472
- if (logprobs.length > 0) {
2473
- providerMetadata.openai.logprobs = logprobs;
2474
- }
2475
- return {
2476
- content,
2477
- finishReason: mapOpenAIResponseFinishReason({
2478
- finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
2479
- hasToolCalls: content.some((part) => part.type === "tool-call")
2480
- }),
2481
- usage: {
2482
- inputTokens: response.usage.input_tokens,
2483
- outputTokens: response.usage.output_tokens,
2484
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2485
- reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
2486
- cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
2487
- },
2488
- request: { body },
2489
- response: {
2490
- id: response.id,
2491
- timestamp: new Date(response.created_at * 1e3),
2492
- modelId: response.model,
2493
- headers: responseHeaders,
2494
- body: rawResponse
2495
- },
2496
- providerMetadata,
2497
- warnings
2498
- };
2499
- }
2500
- async doStream(options) {
2501
- const { args: body, warnings } = await this.getArgs(options);
2502
- const { responseHeaders, value: response } = await chunkHVBBFCDH_cjs.postJsonToApi({
2503
- url: this.config.url({
2504
- path: "/responses",
2505
- modelId: this.modelId
2506
- }),
2507
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), options.headers),
2508
- body: {
2509
- ...body,
2510
- stream: true
2511
- },
2512
- failedResponseHandler: openaiFailedResponseHandler,
2513
- successfulResponseHandler: chunkHVBBFCDH_cjs.createEventSourceResponseHandler(
2514
- openaiResponsesChunkSchema
2515
- ),
2516
- abortSignal: options.abortSignal,
2517
- fetch: this.config.fetch
2518
- });
2519
- const self = this;
2520
- let finishReason = "unknown";
2521
- const usage = {
2522
- inputTokens: void 0,
2523
- outputTokens: void 0,
2524
- totalTokens: void 0
2525
- };
2526
- const logprobs = [];
2527
- let responseId = null;
2528
- const ongoingToolCalls = {};
2529
- let hasToolCalls = false;
2530
- const activeReasoning = {};
2531
- return {
2532
- stream: response.pipeThrough(
2533
- new TransformStream({
2534
- start(controller) {
2535
- controller.enqueue({ type: "stream-start", warnings });
2536
- },
2537
- transform(chunk, controller) {
2538
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
2539
- if (options.includeRawChunks) {
2540
- controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2541
- }
2542
- if (!chunk.success) {
2543
- finishReason = "error";
2544
- controller.enqueue({ type: "error", error: chunk.error });
2545
- return;
2546
- }
2547
- const value = chunk.value;
2548
- if (isResponseOutputItemAddedChunk(value)) {
2549
- if (value.item.type === "function_call") {
2550
- ongoingToolCalls[value.output_index] = {
2551
- toolName: value.item.name,
2552
- toolCallId: value.item.call_id
2553
- };
2554
- controller.enqueue({
2555
- type: "tool-input-start",
2556
- id: value.item.call_id,
2557
- toolName: value.item.name
2558
- });
2559
- } else if (value.item.type === "web_search_call") {
2560
- ongoingToolCalls[value.output_index] = {
2561
- toolName: "web_search_preview",
2562
- toolCallId: value.item.id
2563
- };
2564
- controller.enqueue({
2565
- type: "tool-input-start",
2566
- id: value.item.id,
2567
- toolName: "web_search_preview"
2568
- });
2569
- } else if (value.item.type === "computer_call") {
2570
- ongoingToolCalls[value.output_index] = {
2571
- toolName: "computer_use",
2572
- toolCallId: value.item.id
2573
- };
2574
- controller.enqueue({
2575
- type: "tool-input-start",
2576
- id: value.item.id,
2577
- toolName: "computer_use"
2578
- });
2579
- } else if (value.item.type === "file_search_call") {
2580
- ongoingToolCalls[value.output_index] = {
2581
- toolName: "file_search",
2582
- toolCallId: value.item.id
2583
- };
2584
- controller.enqueue({
2585
- type: "tool-input-start",
2586
- id: value.item.id,
2587
- toolName: "file_search"
2588
- });
2589
- } else if (value.item.type === "message") {
2590
- controller.enqueue({
2591
- type: "text-start",
2592
- id: value.item.id,
2593
- providerMetadata: {
2594
- openai: {
2595
- itemId: value.item.id
2596
- }
2597
- }
2598
- });
2599
- } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2600
- activeReasoning[value.item.id] = {
2601
- encryptedContent: value.item.encrypted_content,
2602
- summaryParts: [0]
2603
- };
2604
- controller.enqueue({
2605
- type: "reasoning-start",
2606
- id: `${value.item.id}:0`,
2607
- providerMetadata: {
2608
- openai: {
2609
- itemId: value.item.id,
2610
- reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2611
- }
2612
- }
2613
- });
2614
- }
2615
- } else if (isResponseOutputItemDoneChunk(value)) {
2616
- if (value.item.type === "function_call") {
2617
- ongoingToolCalls[value.output_index] = void 0;
2618
- hasToolCalls = true;
2619
- controller.enqueue({
2620
- type: "tool-input-end",
2621
- id: value.item.call_id
2622
- });
2623
- controller.enqueue({
2624
- type: "tool-call",
2625
- toolCallId: value.item.call_id,
2626
- toolName: value.item.name,
2627
- input: value.item.arguments,
2628
- providerMetadata: {
2629
- openai: {
2630
- itemId: value.item.id
2631
- }
2632
- }
2633
- });
2634
- } else if (value.item.type === "web_search_call") {
2635
- ongoingToolCalls[value.output_index] = void 0;
2636
- hasToolCalls = true;
2637
- controller.enqueue({
2638
- type: "tool-input-end",
2639
- id: value.item.id
2640
- });
2641
- controller.enqueue({
2642
- type: "tool-call",
2643
- toolCallId: value.item.id,
2644
- toolName: "web_search_preview",
2645
- input: (_c = (_b = value.item.action) == null ? void 0 : _b.query) != null ? _c : "",
2646
- providerExecuted: true
2647
- });
2648
- controller.enqueue({
2649
- type: "tool-result",
2650
- toolCallId: value.item.id,
2651
- toolName: "web_search_preview",
2652
- result: {
2653
- type: "web_search_tool_result",
2654
- status: value.item.status || "completed",
2655
- ...((_d = value.item.action) == null ? void 0 : _d.query) && {
2656
- query: value.item.action.query
2657
- }
2658
- },
2659
- providerExecuted: true
2660
- });
2661
- } else if (value.item.type === "computer_call") {
2662
- ongoingToolCalls[value.output_index] = void 0;
2663
- hasToolCalls = true;
2664
- controller.enqueue({
2665
- type: "tool-input-end",
2666
- id: value.item.id
2667
- });
2668
- controller.enqueue({
2669
- type: "tool-call",
2670
- toolCallId: value.item.id,
2671
- toolName: "computer_use",
2672
- input: "",
2673
- providerExecuted: true
2674
- });
2675
- controller.enqueue({
2676
- type: "tool-result",
2677
- toolCallId: value.item.id,
2678
- toolName: "computer_use",
2679
- result: {
2680
- type: "computer_use_tool_result",
2681
- status: value.item.status || "completed"
2682
- },
2683
- providerExecuted: true
2684
- });
2685
- } else if (value.item.type === "file_search_call") {
2686
- ongoingToolCalls[value.output_index] = void 0;
2687
- hasToolCalls = true;
2688
- controller.enqueue({
2689
- type: "tool-input-end",
2690
- id: value.item.id
2691
- });
2692
- controller.enqueue({
2693
- type: "tool-call",
2694
- toolCallId: value.item.id,
2695
- toolName: "file_search",
2696
- input: "",
2697
- providerExecuted: true
2698
- });
2699
- controller.enqueue({
2700
- type: "tool-result",
2701
- toolCallId: value.item.id,
2702
- toolName: "file_search",
2703
- result: {
2704
- type: "file_search_tool_result",
2705
- status: value.item.status || "completed",
2706
- ...value.item.queries && { queries: value.item.queries },
2707
- ...value.item.results && { results: value.item.results }
2708
- },
2709
- providerExecuted: true
2710
- });
2711
- } else if (value.item.type === "message") {
2712
- controller.enqueue({
2713
- type: "text-end",
2714
- id: value.item.id
2715
- });
2716
- } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2717
- const activeReasoningPart = activeReasoning[value.item.id];
2718
- for (const summaryIndex of activeReasoningPart.summaryParts) {
2719
- controller.enqueue({
2720
- type: "reasoning-end",
2721
- id: `${value.item.id}:${summaryIndex}`,
2722
- providerMetadata: {
2723
- openai: {
2724
- itemId: value.item.id,
2725
- reasoningEncryptedContent: (_e = value.item.encrypted_content) != null ? _e : null
2726
- }
2727
- }
2728
- });
2729
- }
2730
- delete activeReasoning[value.item.id];
2731
- }
2732
- } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2733
- const toolCall = ongoingToolCalls[value.output_index];
2734
- if (toolCall != null) {
2735
- controller.enqueue({
2736
- type: "tool-input-delta",
2737
- id: toolCall.toolCallId,
2738
- delta: value.delta
2739
- });
2740
- }
2741
- } else if (isResponseCreatedChunk(value)) {
2742
- responseId = value.response.id;
2743
- controller.enqueue({
2744
- type: "response-metadata",
2745
- id: value.response.id,
2746
- timestamp: new Date(value.response.created_at * 1e3),
2747
- modelId: value.response.model
2748
- });
2749
- } else if (isTextDeltaChunk(value)) {
2750
- controller.enqueue({
2751
- type: "text-delta",
2752
- id: value.item_id,
2753
- delta: value.delta
2754
- });
2755
- if (value.logprobs) {
2756
- logprobs.push(value.logprobs);
2757
- }
2758
- } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2759
- if (value.summary_index > 0) {
2760
- (_f = activeReasoning[value.item_id]) == null ? void 0 : _f.summaryParts.push(
2761
- value.summary_index
2762
- );
2763
- controller.enqueue({
2764
- type: "reasoning-start",
2765
- id: `${value.item_id}:${value.summary_index}`,
2766
- providerMetadata: {
2767
- openai: {
2768
- itemId: value.item_id,
2769
- reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
2770
- }
2771
- }
2772
- });
2773
- }
2774
- } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2775
- controller.enqueue({
2776
- type: "reasoning-delta",
2777
- id: `${value.item_id}:${value.summary_index}`,
2778
- delta: value.delta,
2779
- providerMetadata: {
2780
- openai: {
2781
- itemId: value.item_id
2782
- }
2783
- }
2784
- });
2785
- } else if (isResponseFinishedChunk(value)) {
2786
- finishReason = mapOpenAIResponseFinishReason({
2787
- finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
2788
- hasToolCalls
2789
- });
2790
- usage.inputTokens = value.response.usage.input_tokens;
2791
- usage.outputTokens = value.response.usage.output_tokens;
2792
- usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2793
- usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
2794
- usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
2795
- } else if (isResponseAnnotationAddedChunk(value)) {
2796
- if (value.annotation.type === "url_citation") {
2797
- controller.enqueue({
2798
- type: "source",
2799
- sourceType: "url",
2800
- id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : chunkHVBBFCDH_cjs.generateId(),
2801
- url: value.annotation.url,
2802
- title: value.annotation.title
2803
- });
2804
- } else if (value.annotation.type === "file_citation") {
2805
- controller.enqueue({
2806
- type: "source",
2807
- sourceType: "document",
2808
- id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : chunkHVBBFCDH_cjs.generateId(),
2809
- mediaType: "text/plain",
2810
- title: value.annotation.quote,
2811
- filename: value.annotation.file_id
2812
- });
2813
- }
2814
- } else if (isErrorChunk(value)) {
2815
- controller.enqueue({ type: "error", error: value });
2816
- }
2817
- },
2818
- flush(controller) {
2819
- const providerMetadata = {
2820
- openai: {
2821
- responseId
2822
- }
2823
- };
2824
- if (logprobs.length > 0) {
2825
- providerMetadata.openai.logprobs = logprobs;
2826
- }
2827
- controller.enqueue({
2828
- type: "finish",
2829
- finishReason,
2830
- usage,
2831
- providerMetadata
2832
- });
2833
- }
2834
- })
2835
- ),
2836
- request: { body },
2837
- response: { headers: responseHeaders }
2838
- };
2839
- }
2840
- };
2841
- var usageSchema2 = v4.z.object({
2842
- input_tokens: v4.z.number(),
2843
- input_tokens_details: v4.z.object({ cached_tokens: v4.z.number().nullish() }).nullish(),
2844
- output_tokens: v4.z.number(),
2845
- output_tokens_details: v4.z.object({ reasoning_tokens: v4.z.number().nullish() }).nullish()
2846
- });
2847
- var textDeltaChunkSchema = v4.z.object({
2848
- type: v4.z.literal("response.output_text.delta"),
2849
- item_id: v4.z.string(),
2850
- delta: v4.z.string(),
2851
- logprobs: LOGPROBS_SCHEMA.nullish()
2852
- });
2853
- var errorChunkSchema = v4.z.object({
2854
- type: v4.z.literal("error"),
2855
- code: v4.z.string(),
2856
- message: v4.z.string(),
2857
- param: v4.z.string().nullish(),
2858
- sequence_number: v4.z.number()
2859
- });
2860
- var responseFinishedChunkSchema = v4.z.object({
2861
- type: v4.z.enum(["response.completed", "response.incomplete"]),
2862
- response: v4.z.object({
2863
- incomplete_details: v4.z.object({ reason: v4.z.string() }).nullish(),
2864
- usage: usageSchema2
2865
- })
2866
- });
2867
- var responseCreatedChunkSchema = v4.z.object({
2868
- type: v4.z.literal("response.created"),
2869
- response: v4.z.object({
2870
- id: v4.z.string(),
2871
- created_at: v4.z.number(),
2872
- model: v4.z.string()
2873
- })
2874
- });
2875
- var responseOutputItemAddedSchema = v4.z.object({
2876
- type: v4.z.literal("response.output_item.added"),
2877
- output_index: v4.z.number(),
2878
- item: v4.z.discriminatedUnion("type", [
2879
- v4.z.object({
2880
- type: v4.z.literal("message"),
2881
- id: v4.z.string()
2882
- }),
2883
- v4.z.object({
2884
- type: v4.z.literal("reasoning"),
2885
- id: v4.z.string(),
2886
- encrypted_content: v4.z.string().nullish()
2887
- }),
2888
- v4.z.object({
2889
- type: v4.z.literal("function_call"),
2890
- id: v4.z.string(),
2891
- call_id: v4.z.string(),
2892
- name: v4.z.string(),
2893
- arguments: v4.z.string()
2894
- }),
2895
- v4.z.object({
2896
- type: v4.z.literal("web_search_call"),
2897
- id: v4.z.string(),
2898
- status: v4.z.string(),
2899
- action: v4.z.object({
2900
- type: v4.z.literal("search"),
2901
- query: v4.z.string().optional()
2902
- }).nullish()
2903
- }),
2904
- v4.z.object({
2905
- type: v4.z.literal("computer_call"),
2906
- id: v4.z.string(),
2907
- status: v4.z.string()
2908
- }),
2909
- v4.z.object({
2910
- type: v4.z.literal("file_search_call"),
2911
- id: v4.z.string(),
2912
- status: v4.z.string(),
2913
- queries: v4.z.array(v4.z.string()).nullish(),
2914
- results: v4.z.array(
2915
- v4.z.object({
2916
- attributes: v4.z.object({
2917
- file_id: v4.z.string(),
2918
- filename: v4.z.string(),
2919
- score: v4.z.number(),
2920
- text: v4.z.string()
2921
- })
2922
- })
2923
- ).optional()
2924
- })
2925
- ])
2926
- });
2927
- var responseOutputItemDoneSchema = v4.z.object({
2928
- type: v4.z.literal("response.output_item.done"),
2929
- output_index: v4.z.number(),
2930
- item: v4.z.discriminatedUnion("type", [
2931
- v4.z.object({
2932
- type: v4.z.literal("message"),
2933
- id: v4.z.string()
2934
- }),
2935
- v4.z.object({
2936
- type: v4.z.literal("reasoning"),
2937
- id: v4.z.string(),
2938
- encrypted_content: v4.z.string().nullish()
2939
- }),
2940
- v4.z.object({
2941
- type: v4.z.literal("function_call"),
2942
- id: v4.z.string(),
2943
- call_id: v4.z.string(),
2944
- name: v4.z.string(),
2945
- arguments: v4.z.string(),
2946
- status: v4.z.literal("completed")
2947
- }),
2948
- v4.z.object({
2949
- type: v4.z.literal("web_search_call"),
2950
- id: v4.z.string(),
2951
- status: v4.z.literal("completed"),
2952
- action: v4.z.object({
2953
- type: v4.z.literal("search"),
2954
- query: v4.z.string().optional()
2955
- }).nullish()
2956
- }),
2957
- v4.z.object({
2958
- type: v4.z.literal("computer_call"),
2959
- id: v4.z.string(),
2960
- status: v4.z.literal("completed")
2961
- }),
2962
- v4.z.object({
2963
- type: v4.z.literal("file_search_call"),
2964
- id: v4.z.string(),
2965
- status: v4.z.literal("completed"),
2966
- queries: v4.z.array(v4.z.string()).nullish(),
2967
- results: v4.z.array(
2968
- v4.z.object({
2969
- attributes: v4.z.object({
2970
- file_id: v4.z.string(),
2971
- filename: v4.z.string(),
2972
- score: v4.z.number(),
2973
- text: v4.z.string()
2974
- })
2975
- })
2976
- ).nullish()
2977
- })
2978
- ])
2979
- });
2980
- var responseFunctionCallArgumentsDeltaSchema = v4.z.object({
2981
- type: v4.z.literal("response.function_call_arguments.delta"),
2982
- item_id: v4.z.string(),
2983
- output_index: v4.z.number(),
2984
- delta: v4.z.string()
2985
- });
2986
- var responseAnnotationAddedSchema = v4.z.object({
2987
- type: v4.z.literal("response.output_text.annotation.added"),
2988
- annotation: v4.z.discriminatedUnion("type", [
2989
- v4.z.object({
2990
- type: v4.z.literal("url_citation"),
2991
- url: v4.z.string(),
2992
- title: v4.z.string()
2993
- }),
2994
- v4.z.object({
2995
- type: v4.z.literal("file_citation"),
2996
- file_id: v4.z.string(),
2997
- quote: v4.z.string()
2998
- })
2999
- ])
3000
- });
3001
- var responseReasoningSummaryPartAddedSchema = v4.z.object({
3002
- type: v4.z.literal("response.reasoning_summary_part.added"),
3003
- item_id: v4.z.string(),
3004
- summary_index: v4.z.number()
3005
- });
3006
- var responseReasoningSummaryTextDeltaSchema = v4.z.object({
3007
- type: v4.z.literal("response.reasoning_summary_text.delta"),
3008
- item_id: v4.z.string(),
3009
- summary_index: v4.z.number(),
3010
- delta: v4.z.string()
3011
- });
3012
- var openaiResponsesChunkSchema = v4.z.union([
3013
- textDeltaChunkSchema,
3014
- responseFinishedChunkSchema,
3015
- responseCreatedChunkSchema,
3016
- responseOutputItemAddedSchema,
3017
- responseOutputItemDoneSchema,
3018
- responseFunctionCallArgumentsDeltaSchema,
3019
- responseAnnotationAddedSchema,
3020
- responseReasoningSummaryPartAddedSchema,
3021
- responseReasoningSummaryTextDeltaSchema,
3022
- errorChunkSchema,
3023
- v4.z.object({ type: v4.z.string() }).loose()
3024
- // fallback for unknown chunks
3025
- ]);
3026
- function isTextDeltaChunk(chunk) {
3027
- return chunk.type === "response.output_text.delta";
3028
- }
3029
- function isResponseOutputItemDoneChunk(chunk) {
3030
- return chunk.type === "response.output_item.done";
3031
- }
3032
- function isResponseOutputItemDoneReasoningChunk(chunk) {
3033
- return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3034
- }
3035
- function isResponseFinishedChunk(chunk) {
3036
- return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3037
- }
3038
- function isResponseCreatedChunk(chunk) {
3039
- return chunk.type === "response.created";
3040
- }
3041
- function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3042
- return chunk.type === "response.function_call_arguments.delta";
3043
- }
3044
- function isResponseOutputItemAddedChunk(chunk) {
3045
- return chunk.type === "response.output_item.added";
3046
- }
3047
- function isResponseOutputItemAddedReasoningChunk(chunk) {
3048
- return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3049
- }
3050
- function isResponseAnnotationAddedChunk(chunk) {
3051
- return chunk.type === "response.output_text.annotation.added";
3052
- }
3053
- function isResponseReasoningSummaryPartAddedChunk(chunk) {
3054
- return chunk.type === "response.reasoning_summary_part.added";
3055
- }
3056
- function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3057
- return chunk.type === "response.reasoning_summary_text.delta";
3058
- }
3059
- function isErrorChunk(chunk) {
3060
- return chunk.type === "error";
3061
- }
3062
- function getResponsesModelConfig(modelId) {
3063
- const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
3064
- const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3065
- const defaults = {
3066
- requiredAutoTruncation: false,
3067
- systemMessageMode: "system",
3068
- supportsFlexProcessing: supportsFlexProcessing2,
3069
- supportsPriorityProcessing: supportsPriorityProcessing2
3070
- };
3071
- if (modelId.startsWith("gpt-5-chat")) {
3072
- return {
3073
- ...defaults,
3074
- isReasoningModel: false
3075
- };
3076
- }
3077
- if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3078
- if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3079
- return {
3080
- ...defaults,
3081
- isReasoningModel: true,
3082
- systemMessageMode: "remove"
3083
- };
3084
- }
3085
- return {
3086
- ...defaults,
3087
- isReasoningModel: true,
3088
- systemMessageMode: "developer"
3089
- };
3090
- }
3091
- return {
3092
- ...defaults,
3093
- isReasoningModel: false
3094
- };
3095
- }
3096
- var openaiResponsesProviderOptionsSchema = v4.z.object({
3097
- metadata: v4.z.any().nullish(),
3098
- parallelToolCalls: v4.z.boolean().nullish(),
3099
- previousResponseId: v4.z.string().nullish(),
3100
- store: v4.z.boolean().nullish(),
3101
- user: v4.z.string().nullish(),
3102
- reasoningEffort: v4.z.string().nullish(),
3103
- strictJsonSchema: v4.z.boolean().nullish(),
3104
- instructions: v4.z.string().nullish(),
3105
- reasoningSummary: v4.z.string().nullish(),
3106
- serviceTier: v4.z.enum(["auto", "flex", "priority"]).nullish(),
3107
- include: v4.z.array(
3108
- v4.z.enum([
3109
- "reasoning.encrypted_content",
3110
- "file_search_call.results",
3111
- "message.output_text.logprobs"
3112
- ])
3113
- ).nullish(),
3114
- textVerbosity: v4.z.enum(["low", "medium", "high"]).nullish(),
3115
- promptCacheKey: v4.z.string().nullish(),
3116
- safetyIdentifier: v4.z.string().nullish(),
3117
- /**
3118
- * Return the log probabilities of the tokens.
3119
- *
3120
- * Setting to true will return the log probabilities of the tokens that
3121
- * were generated.
3122
- *
3123
- * Setting to a number will return the log probabilities of the top n
3124
- * tokens that were generated.
3125
- *
3126
- * @see https://platform.openai.com/docs/api-reference/responses/create
3127
- * @see https://cookbook.openai.com/examples/using_logprobs
3128
- */
3129
- logprobs: v4.z.union([v4.z.boolean(), v4.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3130
- });
3131
- var OpenAIProviderOptionsSchema = v4.z.object({
3132
- instructions: v4.z.string().nullish(),
3133
- speed: v4.z.number().min(0.25).max(4).default(1).nullish()
3134
- });
3135
- var OpenAISpeechModel = class {
3136
- constructor(modelId, config) {
3137
- this.modelId = modelId;
3138
- this.config = config;
3139
- this.specificationVersion = "v2";
3140
- }
3141
- get provider() {
3142
- return this.config.provider;
3143
- }
3144
- async getArgs({
3145
- text,
3146
- voice = "alloy",
3147
- outputFormat = "mp3",
3148
- speed,
3149
- instructions,
3150
- language,
3151
- providerOptions
3152
- }) {
3153
- const warnings = [];
3154
- const openAIOptions = await chunkHVBBFCDH_cjs.parseProviderOptions({
3155
- provider: "openai",
3156
- providerOptions,
3157
- schema: OpenAIProviderOptionsSchema
3158
- });
3159
- const requestBody = {
3160
- model: this.modelId,
3161
- input: text,
3162
- voice,
3163
- response_format: "mp3",
3164
- speed,
3165
- instructions
3166
- };
3167
- if (outputFormat) {
3168
- if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
3169
- requestBody.response_format = outputFormat;
3170
- } else {
3171
- warnings.push({
3172
- type: "unsupported-setting",
3173
- setting: "outputFormat",
3174
- details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
3175
- });
3176
- }
3177
- }
3178
- if (openAIOptions) {
3179
- const speechModelOptions = {};
3180
- for (const key in speechModelOptions) {
3181
- const value = speechModelOptions[key];
3182
- if (value !== void 0) {
3183
- requestBody[key] = value;
3184
- }
3185
- }
3186
- }
3187
- if (language) {
3188
- warnings.push({
3189
- type: "unsupported-setting",
3190
- setting: "language",
3191
- details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
3192
- });
3193
- }
3194
- return {
3195
- requestBody,
3196
- warnings
3197
- };
3198
- }
3199
- async doGenerate(options) {
3200
- var _a, _b, _c;
3201
- const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
3202
- const { requestBody, warnings } = await this.getArgs(options);
3203
- const {
3204
- value: audio,
3205
- responseHeaders,
3206
- rawValue: rawResponse
3207
- } = await chunkHVBBFCDH_cjs.postJsonToApi({
3208
- url: this.config.url({
3209
- path: "/audio/speech",
3210
- modelId: this.modelId
3211
- }),
3212
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), options.headers),
3213
- body: requestBody,
3214
- failedResponseHandler: openaiFailedResponseHandler,
3215
- successfulResponseHandler: chunkHVBBFCDH_cjs.createBinaryResponseHandler(),
3216
- abortSignal: options.abortSignal,
3217
- fetch: this.config.fetch
3218
- });
3219
- return {
3220
- audio,
3221
- warnings,
3222
- request: {
3223
- body: JSON.stringify(requestBody)
3224
- },
3225
- response: {
3226
- timestamp: currentDate,
3227
- modelId: this.modelId,
3228
- headers: responseHeaders,
3229
- body: rawResponse
3230
- }
3231
- };
3232
- }
3233
- };
3234
- var openAITranscriptionProviderOptions = v4.z.object({
3235
- /**
3236
- * Additional information to include in the transcription response.
3237
- */
3238
- include: v4.z.array(v4.z.string()).optional(),
3239
- /**
3240
- * The language of the input audio in ISO-639-1 format.
3241
- */
3242
- language: v4.z.string().optional(),
3243
- /**
3244
- * An optional text to guide the model's style or continue a previous audio segment.
3245
- */
3246
- prompt: v4.z.string().optional(),
3247
- /**
3248
- * The sampling temperature, between 0 and 1.
3249
- * @default 0
3250
- */
3251
- temperature: v4.z.number().min(0).max(1).default(0).optional(),
3252
- /**
3253
- * The timestamp granularities to populate for this transcription.
3254
- * @default ['segment']
3255
- */
3256
- timestampGranularities: v4.z.array(v4.z.enum(["word", "segment"])).default(["segment"]).optional()
3257
- });
3258
- var languageMap = {
3259
- afrikaans: "af",
3260
- arabic: "ar",
3261
- armenian: "hy",
3262
- azerbaijani: "az",
3263
- belarusian: "be",
3264
- bosnian: "bs",
3265
- bulgarian: "bg",
3266
- catalan: "ca",
3267
- chinese: "zh",
3268
- croatian: "hr",
3269
- czech: "cs",
3270
- danish: "da",
3271
- dutch: "nl",
3272
- english: "en",
3273
- estonian: "et",
3274
- finnish: "fi",
3275
- french: "fr",
3276
- galician: "gl",
3277
- german: "de",
3278
- greek: "el",
3279
- hebrew: "he",
3280
- hindi: "hi",
3281
- hungarian: "hu",
3282
- icelandic: "is",
3283
- indonesian: "id",
3284
- italian: "it",
3285
- japanese: "ja",
3286
- kannada: "kn",
3287
- kazakh: "kk",
3288
- korean: "ko",
3289
- latvian: "lv",
3290
- lithuanian: "lt",
3291
- macedonian: "mk",
3292
- malay: "ms",
3293
- marathi: "mr",
3294
- maori: "mi",
3295
- nepali: "ne",
3296
- norwegian: "no",
3297
- persian: "fa",
3298
- polish: "pl",
3299
- portuguese: "pt",
3300
- romanian: "ro",
3301
- russian: "ru",
3302
- serbian: "sr",
3303
- slovak: "sk",
3304
- slovenian: "sl",
3305
- spanish: "es",
3306
- swahili: "sw",
3307
- swedish: "sv",
3308
- tagalog: "tl",
3309
- tamil: "ta",
3310
- thai: "th",
3311
- turkish: "tr",
3312
- ukrainian: "uk",
3313
- urdu: "ur",
3314
- vietnamese: "vi",
3315
- welsh: "cy"
3316
- };
3317
- var OpenAITranscriptionModel = class {
3318
- constructor(modelId, config) {
3319
- this.modelId = modelId;
3320
- this.config = config;
3321
- this.specificationVersion = "v2";
3322
- }
3323
- get provider() {
3324
- return this.config.provider;
3325
- }
3326
- async getArgs({
3327
- audio,
3328
- mediaType,
3329
- providerOptions
3330
- }) {
3331
- const warnings = [];
3332
- const openAIOptions = await chunkHVBBFCDH_cjs.parseProviderOptions({
3333
- provider: "openai",
3334
- providerOptions,
3335
- schema: openAITranscriptionProviderOptions
3336
- });
3337
- const formData = new FormData();
3338
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([chunkHVBBFCDH_cjs.convertBase64ToUint8Array(audio)]);
3339
- formData.append("model", this.modelId);
3340
- formData.append("file", new File([blob], "audio", { type: mediaType }));
3341
- if (openAIOptions) {
3342
- const transcriptionModelOptions = {
3343
- include: openAIOptions.include,
3344
- language: openAIOptions.language,
3345
- prompt: openAIOptions.prompt,
3346
- response_format: "verbose_json",
3347
- // always use verbose_json to get segments
3348
- temperature: openAIOptions.temperature,
3349
- timestamp_granularities: openAIOptions.timestampGranularities
3350
- };
3351
- for (const [key, value] of Object.entries(transcriptionModelOptions)) {
3352
- if (value != null) {
3353
- formData.append(key, String(value));
3354
- }
3355
- }
3356
- }
3357
- return {
3358
- formData,
3359
- warnings
3360
- };
3361
- }
3362
- async doGenerate(options) {
3363
- var _a, _b, _c, _d, _e, _f, _g, _h;
3364
- const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
3365
- const { formData, warnings } = await this.getArgs(options);
3366
- const {
3367
- value: response,
3368
- responseHeaders,
3369
- rawValue: rawResponse
3370
- } = await chunkHVBBFCDH_cjs.postFormDataToApi({
3371
- url: this.config.url({
3372
- path: "/audio/transcriptions",
3373
- modelId: this.modelId
3374
- }),
3375
- headers: chunkHVBBFCDH_cjs.combineHeaders(this.config.headers(), options.headers),
3376
- formData,
3377
- failedResponseHandler: openaiFailedResponseHandler,
3378
- successfulResponseHandler: chunkHVBBFCDH_cjs.createJsonResponseHandler(
3379
- openaiTranscriptionResponseSchema
3380
- ),
3381
- abortSignal: options.abortSignal,
3382
- fetch: this.config.fetch
3383
- });
3384
- const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
3385
- return {
3386
- text: response.text,
3387
- segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
3388
- text: segment.text,
3389
- startSecond: segment.start,
3390
- endSecond: segment.end
3391
- }))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
3392
- text: word.word,
3393
- startSecond: word.start,
3394
- endSecond: word.end
3395
- }))) != null ? _g : [],
3396
- language,
3397
- durationInSeconds: (_h = response.duration) != null ? _h : void 0,
3398
- warnings,
3399
- response: {
3400
- timestamp: currentDate,
3401
- modelId: this.modelId,
3402
- headers: responseHeaders,
3403
- body: rawResponse
3404
- }
3405
- };
3406
- }
3407
- };
3408
- var openaiTranscriptionResponseSchema = v4.z.object({
3409
- text: v4.z.string(),
3410
- language: v4.z.string().nullish(),
3411
- duration: v4.z.number().nullish(),
3412
- words: v4.z.array(
3413
- v4.z.object({
3414
- word: v4.z.string(),
3415
- start: v4.z.number(),
3416
- end: v4.z.number()
3417
- })
3418
- ).nullish(),
3419
- segments: v4.z.array(
3420
- v4.z.object({
3421
- id: v4.z.number(),
3422
- seek: v4.z.number(),
3423
- start: v4.z.number(),
3424
- end: v4.z.number(),
3425
- text: v4.z.string(),
3426
- tokens: v4.z.array(v4.z.number()),
3427
- temperature: v4.z.number(),
3428
- avg_logprob: v4.z.number(),
3429
- compression_ratio: v4.z.number(),
3430
- no_speech_prob: v4.z.number()
3431
- })
3432
- ).nullish()
3433
- });
3434
- function createOpenAI(options = {}) {
3435
- var _a, _b;
3436
- const baseURL = (_a = chunkHVBBFCDH_cjs.withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
3437
- const providerName = (_b = options.name) != null ? _b : "openai";
3438
- const getHeaders = () => ({
3439
- Authorization: `Bearer ${chunkHVBBFCDH_cjs.loadApiKey({
3440
- apiKey: options.apiKey,
3441
- environmentVariableName: "OPENAI_API_KEY",
3442
- description: "OpenAI"
3443
- })}`,
3444
- "OpenAI-Organization": options.organization,
3445
- "OpenAI-Project": options.project,
3446
- ...options.headers
3447
- });
3448
- const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
3449
- provider: `${providerName}.chat`,
3450
- url: ({ path }) => `${baseURL}${path}`,
3451
- headers: getHeaders,
3452
- fetch: options.fetch
3453
- });
3454
- const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
3455
- provider: `${providerName}.completion`,
3456
- url: ({ path }) => `${baseURL}${path}`,
3457
- headers: getHeaders,
3458
- fetch: options.fetch
3459
- });
3460
- const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
3461
- provider: `${providerName}.embedding`,
3462
- url: ({ path }) => `${baseURL}${path}`,
3463
- headers: getHeaders,
3464
- fetch: options.fetch
3465
- });
3466
- const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
3467
- provider: `${providerName}.image`,
3468
- url: ({ path }) => `${baseURL}${path}`,
3469
- headers: getHeaders,
3470
- fetch: options.fetch
3471
- });
3472
- const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
3473
- provider: `${providerName}.transcription`,
3474
- url: ({ path }) => `${baseURL}${path}`,
3475
- headers: getHeaders,
3476
- fetch: options.fetch
3477
- });
3478
- const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
3479
- provider: `${providerName}.speech`,
3480
- url: ({ path }) => `${baseURL}${path}`,
3481
- headers: getHeaders,
3482
- fetch: options.fetch
3483
- });
3484
- const createLanguageModel = (modelId) => {
3485
- if (new.target) {
3486
- throw new Error(
3487
- "The OpenAI model function cannot be called with the new keyword."
3488
- );
3489
- }
3490
- return createResponsesModel(modelId);
3491
- };
3492
- const createResponsesModel = (modelId) => {
3493
- return new OpenAIResponsesLanguageModel(modelId, {
3494
- provider: `${providerName}.responses`,
3495
- url: ({ path }) => `${baseURL}${path}`,
3496
- headers: getHeaders,
3497
- fetch: options.fetch,
3498
- fileIdPrefixes: ["file-"]
3499
- });
3500
- };
3501
- const provider = function(modelId) {
3502
- return createLanguageModel(modelId);
3503
- };
3504
- provider.languageModel = createLanguageModel;
3505
- provider.chat = createChatModel;
3506
- provider.completion = createCompletionModel;
3507
- provider.responses = createResponsesModel;
3508
- provider.embedding = createEmbeddingModel;
3509
- provider.textEmbedding = createEmbeddingModel;
3510
- provider.textEmbeddingModel = createEmbeddingModel;
3511
- provider.image = createImageModel;
3512
- provider.imageModel = createImageModel;
3513
- provider.transcription = createTranscriptionModel;
3514
- provider.transcriptionModel = createTranscriptionModel;
3515
- provider.speech = createSpeechModel;
3516
- provider.speechModel = createSpeechModel;
3517
- provider.tools = openaiTools;
3518
- return provider;
3519
- }
3520
- var openai = createOpenAI();
3521
-
3522
- exports.createOpenAI = createOpenAI;
3523
- exports.openai = openai;
3524
- //# sourceMappingURL=chunk-FALVL2VV.cjs.map
3525
- //# sourceMappingURL=chunk-FALVL2VV.cjs.map