@mastra/core 1.0.0-beta.11 → 1.0.0-beta.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (394) hide show
  1. package/CHANGELOG.md +173 -0
  2. package/dist/_types/@internal_ai-sdk-v4/dist/index.d.ts +7549 -0
  3. package/dist/_types/@internal_ai-sdk-v4/dist/test.d.ts +65 -0
  4. package/dist/_types/@internal_ai-sdk-v5/dist/index.d.ts +8396 -0
  5. package/dist/_types/@internal_ai-sdk-v5/dist/test.d.ts +1708 -0
  6. package/dist/_types/@internal_external-types/dist/index.d.ts +858 -0
  7. package/dist/agent/agent-legacy.d.ts +1 -1
  8. package/dist/agent/agent.d.ts +1 -1
  9. package/dist/agent/agent.d.ts.map +1 -1
  10. package/dist/agent/agent.types.d.ts +5 -1
  11. package/dist/agent/agent.types.d.ts.map +1 -1
  12. package/dist/agent/index.cjs +9 -9
  13. package/dist/agent/index.js +2 -2
  14. package/dist/agent/message-list/index.cjs +3 -3
  15. package/dist/agent/message-list/index.d.ts +4 -3
  16. package/dist/agent/message-list/index.d.ts.map +1 -1
  17. package/dist/agent/message-list/index.js +1 -1
  18. package/dist/agent/message-list/prompt/attachments-to-parts.d.ts +1 -1
  19. package/dist/agent/message-list/prompt/invalid-content-error.d.ts +1 -1
  20. package/dist/agent/message-list/types.d.ts +3 -3
  21. package/dist/agent/message-list/types.d.ts.map +1 -1
  22. package/dist/agent/message-list/utils/ai-v4-v5/core-model-message.d.ts +1 -1
  23. package/dist/agent/message-list/utils/ai-v4-v5/ui-message.d.ts +1 -1
  24. package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts +2 -2
  25. package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts.map +1 -1
  26. package/dist/agent/message-list/utils/convert-messages.d.ts +2 -2
  27. package/dist/agent/message-list/utils/convert-messages.d.ts.map +1 -1
  28. package/dist/agent/types.d.ts +3 -3
  29. package/dist/agent/utils.d.ts +3 -3
  30. package/dist/agent/utils.d.ts.map +1 -1
  31. package/dist/agent/workflows/prepare-stream/index.d.ts +2 -1
  32. package/dist/agent/workflows/prepare-stream/index.d.ts.map +1 -1
  33. package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts.map +1 -1
  34. package/dist/agent/workflows/prepare-stream/stream-step.d.ts +3 -1
  35. package/dist/agent/workflows/prepare-stream/stream-step.d.ts.map +1 -1
  36. package/dist/bundler/types.d.ts +14 -1
  37. package/dist/bundler/types.d.ts.map +1 -1
  38. package/dist/{chunk-U3XOLEPX.js → chunk-2IU4RGU5.js} +6 -32
  39. package/dist/chunk-2IU4RGU5.js.map +1 -0
  40. package/dist/chunk-2SQB3WBT.js +4574 -0
  41. package/dist/chunk-2SQB3WBT.js.map +1 -0
  42. package/dist/{chunk-THZTRBFS.js → chunk-373OC54J.js} +8 -8
  43. package/dist/chunk-373OC54J.js.map +1 -0
  44. package/dist/{chunk-F2GAJSBI.js → chunk-4BC5FUAO.js} +8 -6
  45. package/dist/{chunk-F2GAJSBI.js.map → chunk-4BC5FUAO.js.map} +1 -1
  46. package/dist/chunk-55VPMN3N.js +250 -0
  47. package/dist/chunk-55VPMN3N.js.map +1 -0
  48. package/dist/{chunk-QM5SRDJX.js → chunk-5PTZG26U.js} +66 -84
  49. package/dist/chunk-5PTZG26U.js.map +1 -0
  50. package/dist/chunk-5VZGJTPR.js +4837 -0
  51. package/dist/chunk-5VZGJTPR.js.map +1 -0
  52. package/dist/{chunk-C36YRTZ6.js → chunk-62Q7K656.js} +6 -7
  53. package/dist/chunk-62Q7K656.js.map +1 -0
  54. package/dist/chunk-6PMMP3FR.js +7 -0
  55. package/dist/chunk-6PMMP3FR.js.map +1 -0
  56. package/dist/{chunk-DZUJEN5N.cjs → chunk-6SZKM6EC.cjs} +10 -3
  57. package/dist/{chunk-DZUJEN5N.cjs.map → chunk-6SZKM6EC.cjs.map} +1 -1
  58. package/dist/{chunk-5WRI5ZAA.js → chunk-7D4SUZUM.js} +10 -4
  59. package/dist/{chunk-5WRI5ZAA.js.map → chunk-7D4SUZUM.js.map} +1 -1
  60. package/dist/{chunk-YWMMBIOM.cjs → chunk-7HEAVZRS.cjs} +15 -15
  61. package/dist/{chunk-YWMMBIOM.cjs.map → chunk-7HEAVZRS.cjs.map} +1 -1
  62. package/dist/{chunk-BUKY6CTR.cjs → chunk-AGHLXC4I.cjs} +106 -36
  63. package/dist/chunk-AGHLXC4I.cjs.map +1 -0
  64. package/dist/{chunk-PK2A5WBG.js → chunk-ARAQIW6E.js} +222 -604
  65. package/dist/chunk-ARAQIW6E.js.map +1 -0
  66. package/dist/{chunk-US2U7ECW.js → chunk-BQDZIQ3G.js} +156 -90
  67. package/dist/chunk-BQDZIQ3G.js.map +1 -0
  68. package/dist/chunk-D22XABFZ.js +79 -0
  69. package/dist/chunk-D22XABFZ.js.map +1 -0
  70. package/dist/{chunk-2ULLRN4Y.js → chunk-E5BQRAJK.js} +943 -626
  71. package/dist/chunk-E5BQRAJK.js.map +1 -0
  72. package/dist/chunk-FST2G2FQ.cjs +84 -0
  73. package/dist/chunk-FST2G2FQ.cjs.map +1 -0
  74. package/dist/chunk-FVQTJUBD.cjs +2120 -0
  75. package/dist/chunk-FVQTJUBD.cjs.map +1 -0
  76. package/dist/chunk-G6E6V2Z4.js +2070 -0
  77. package/dist/chunk-G6E6V2Z4.js.map +1 -0
  78. package/dist/{chunk-7P6BNIJH.js → chunk-GIWC35YQ.js} +105 -35
  79. package/dist/chunk-GIWC35YQ.js.map +1 -0
  80. package/dist/{chunk-4JKEUSCC.cjs → chunk-H4VUIOWU.cjs} +22 -20
  81. package/dist/chunk-H4VUIOWU.cjs.map +1 -0
  82. package/dist/{chunk-TWH4PTDG.cjs → chunk-HWMMIRIF.cjs} +32 -27
  83. package/dist/chunk-HWMMIRIF.cjs.map +1 -0
  84. package/dist/chunk-IXZ2T2QX.cjs +448 -0
  85. package/dist/chunk-IXZ2T2QX.cjs.map +1 -0
  86. package/dist/chunk-L3NKIMF5.cjs +10 -0
  87. package/dist/chunk-L3NKIMF5.cjs.map +1 -0
  88. package/dist/chunk-L4JCRWDY.cjs +252 -0
  89. package/dist/chunk-L4JCRWDY.cjs.map +1 -0
  90. package/dist/{chunk-BJXKH4LG.cjs → chunk-LGB4VNZI.cjs} +43 -78
  91. package/dist/chunk-LGB4VNZI.cjs.map +1 -0
  92. package/dist/{chunk-PG5H6QIO.cjs → chunk-MLKE7HRS.cjs} +41 -21
  93. package/dist/chunk-MLKE7HRS.cjs.map +1 -0
  94. package/dist/{chunk-OEIVMCWX.js → chunk-MRRFTNF4.js} +2537 -84
  95. package/dist/chunk-MRRFTNF4.js.map +1 -0
  96. package/dist/chunk-MXBVP7HX.cjs +4842 -0
  97. package/dist/chunk-MXBVP7HX.cjs.map +1 -0
  98. package/dist/chunk-NESKUIRE.cjs +4586 -0
  99. package/dist/chunk-NESKUIRE.cjs.map +1 -0
  100. package/dist/{chunk-SVLMF4UZ.cjs → chunk-NIOEY3N3.cjs} +66 -85
  101. package/dist/chunk-NIOEY3N3.cjs.map +1 -0
  102. package/dist/{chunk-CZEJQSWB.cjs → chunk-OWIEOL55.cjs} +295 -677
  103. package/dist/chunk-OWIEOL55.cjs.map +1 -0
  104. package/dist/{chunk-WTSZBHIZ.cjs → chunk-PJAK4U6R.cjs} +24 -24
  105. package/dist/{chunk-WTSZBHIZ.cjs.map → chunk-PJAK4U6R.cjs.map} +1 -1
  106. package/dist/{chunk-52RSUALV.cjs → chunk-R5AJGM55.cjs} +1314 -995
  107. package/dist/chunk-R5AJGM55.cjs.map +1 -0
  108. package/dist/{chunk-IVV5TOMD.js → chunk-RCJLMMTO.js} +32 -12
  109. package/dist/chunk-RCJLMMTO.js.map +1 -0
  110. package/dist/{chunk-S73Z3PBJ.cjs → chunk-SZYSDJTN.cjs} +27 -28
  111. package/dist/chunk-SZYSDJTN.cjs.map +1 -0
  112. package/dist/{chunk-YC6PJEPH.cjs → chunk-U4CSOY6T.cjs} +188 -122
  113. package/dist/chunk-U4CSOY6T.cjs.map +1 -0
  114. package/dist/chunk-UBSPZTQX.js +434 -0
  115. package/dist/chunk-UBSPZTQX.js.map +1 -0
  116. package/dist/{chunk-SCUWP4II.cjs → chunk-VEPP75C4.cjs} +47 -74
  117. package/dist/chunk-VEPP75C4.cjs.map +1 -0
  118. package/dist/{chunk-JIGDJK2O.js → chunk-VETAQUW3.js} +4 -39
  119. package/dist/chunk-VETAQUW3.js.map +1 -0
  120. package/dist/{chunk-Z57R5WS4.js → chunk-WPTTKULS.js} +4 -4
  121. package/dist/{chunk-Z57R5WS4.js.map → chunk-WPTTKULS.js.map} +1 -1
  122. package/dist/{chunk-O2BJW7YA.js → chunk-WYGUWVTF.js} +5 -5
  123. package/dist/{chunk-O2BJW7YA.js.map → chunk-WYGUWVTF.js.map} +1 -1
  124. package/dist/{chunk-SXNQRJQD.js → chunk-WYWRMIQC.js} +127 -22
  125. package/dist/chunk-WYWRMIQC.js.map +1 -0
  126. package/dist/{chunk-5Q6WAYEY.cjs → chunk-X6IBA7FP.cjs} +137 -50
  127. package/dist/chunk-X6IBA7FP.cjs.map +1 -0
  128. package/dist/{chunk-MRFUISXC.cjs → chunk-Y7MZ5LJT.cjs} +2632 -179
  129. package/dist/chunk-Y7MZ5LJT.cjs.map +1 -0
  130. package/dist/{chunk-JJ5O45LH.js → chunk-YPLZDWG7.js} +32 -27
  131. package/dist/chunk-YPLZDWG7.js.map +1 -0
  132. package/dist/{chunk-MGCGWPQJ.cjs → chunk-Z55SJVEC.cjs} +8 -8
  133. package/dist/chunk-Z55SJVEC.cjs.map +1 -0
  134. package/dist/error/index.cjs +6 -6
  135. package/dist/error/index.d.ts +26 -20
  136. package/dist/error/index.d.ts.map +1 -1
  137. package/dist/error/index.js +1 -1
  138. package/dist/error/utils.d.ts +19 -5
  139. package/dist/error/utils.d.ts.map +1 -1
  140. package/dist/evals/index.cjs +4 -4
  141. package/dist/evals/index.js +1 -1
  142. package/dist/evals/run/index.d.ts +1 -1
  143. package/dist/evals/scoreTraces/index.cjs +8 -8
  144. package/dist/evals/scoreTraces/index.js +2 -2
  145. package/dist/evals/types.d.ts +1 -1
  146. package/dist/events/event-emitter.d.ts +6 -1
  147. package/dist/events/event-emitter.d.ts.map +1 -1
  148. package/dist/index.cjs +2 -2
  149. package/dist/index.js +1 -1
  150. package/dist/integration/index.cjs +2 -2
  151. package/dist/integration/index.js +1 -1
  152. package/dist/llm/index.cjs +15 -15
  153. package/dist/llm/index.d.ts +2 -2
  154. package/dist/llm/index.d.ts.map +1 -1
  155. package/dist/llm/index.js +5 -5
  156. package/dist/llm/model/aisdk/v5/model.d.ts +1 -1
  157. package/dist/llm/model/base.types.d.ts +2 -2
  158. package/dist/llm/model/model.d.ts +1 -1
  159. package/dist/llm/model/model.d.ts.map +1 -1
  160. package/dist/llm/model/model.loop.d.ts +2 -2
  161. package/dist/llm/model/model.loop.d.ts.map +1 -1
  162. package/dist/llm/model/model.loop.types.d.ts +1 -1
  163. package/dist/llm/model/model.loop.types.d.ts.map +1 -1
  164. package/dist/llm/model/provider-types.generated.d.ts +51 -11
  165. package/dist/llm/model/shared.types.d.ts +1 -1
  166. package/dist/loop/index.cjs +2 -2
  167. package/dist/loop/index.js +1 -1
  168. package/dist/loop/loop.d.ts +2 -2
  169. package/dist/loop/loop.d.ts.map +1 -1
  170. package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts +1 -1
  171. package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts.map +1 -1
  172. package/dist/loop/test-utils/options.d.ts.map +1 -1
  173. package/dist/loop/test-utils/streamObject.d.ts +1 -1
  174. package/dist/loop/test-utils/streamObject.d.ts.map +1 -1
  175. package/dist/loop/test-utils/tools.d.ts.map +1 -1
  176. package/dist/loop/types.d.ts +3 -1
  177. package/dist/loop/types.d.ts.map +1 -1
  178. package/dist/loop/workflows/agentic-execution/index.d.ts +13 -13
  179. package/dist/loop/workflows/agentic-execution/index.d.ts.map +1 -1
  180. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts +10 -10
  181. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts.map +1 -1
  182. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts +5 -5
  183. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts.map +1 -1
  184. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts +22 -21
  185. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts.map +1 -1
  186. package/dist/loop/workflows/agentic-loop/index.d.ts +13 -13
  187. package/dist/loop/workflows/agentic-loop/index.d.ts.map +1 -1
  188. package/dist/loop/workflows/schema.d.ts +6 -6
  189. package/dist/loop/workflows/schema.d.ts.map +1 -1
  190. package/dist/loop/workflows/stream.d.ts +2 -2
  191. package/dist/loop/workflows/stream.d.ts.map +1 -1
  192. package/dist/mastra/index.cjs +2 -2
  193. package/dist/mastra/index.js +1 -1
  194. package/dist/mcp/index.cjs +4 -4
  195. package/dist/mcp/index.js +1 -1
  196. package/dist/memory/index.cjs +6 -6
  197. package/dist/memory/index.js +1 -1
  198. package/dist/memory/memory.d.ts +1 -1
  199. package/dist/memory/types.d.ts +3 -3
  200. package/dist/memory/types.d.ts.map +1 -1
  201. package/dist/models-dev-D3EKFGAO.cjs +12 -0
  202. package/dist/{models-dev-EO3SUIY2.cjs.map → models-dev-D3EKFGAO.cjs.map} +1 -1
  203. package/dist/models-dev-EO22XOXQ.js +3 -0
  204. package/dist/{models-dev-23RN2WHG.js.map → models-dev-EO22XOXQ.js.map} +1 -1
  205. package/dist/netlify-AE4LNCAI.js +3 -0
  206. package/dist/{netlify-GXJ5D5DD.js.map → netlify-AE4LNCAI.js.map} +1 -1
  207. package/dist/netlify-WE42TZIT.cjs +12 -0
  208. package/dist/{netlify-KJLY3GFS.cjs.map → netlify-WE42TZIT.cjs.map} +1 -1
  209. package/dist/processors/index.cjs +37 -37
  210. package/dist/processors/index.d.ts +2 -2
  211. package/dist/processors/index.d.ts.map +1 -1
  212. package/dist/processors/index.js +1 -1
  213. package/dist/processors/step-schema.d.ts +1267 -1267
  214. package/dist/processors/step-schema.d.ts.map +1 -1
  215. package/dist/provider-registry-6LF3NGC5.js +3 -0
  216. package/dist/{provider-registry-F67Y6OF2.js.map → provider-registry-6LF3NGC5.js.map} +1 -1
  217. package/dist/provider-registry-73FKMXJV.cjs +40 -0
  218. package/dist/{provider-registry-3TG2KUD2.cjs.map → provider-registry-73FKMXJV.cjs.map} +1 -1
  219. package/dist/provider-registry.json +100 -30
  220. package/dist/{registry-generator-UMTNPBJX.js → registry-generator-AVQXI3GX.js} +2 -2
  221. package/dist/{registry-generator-UMTNPBJX.js.map → registry-generator-AVQXI3GX.js.map} +1 -1
  222. package/dist/{registry-generator-34SC4TAU.cjs → registry-generator-KOFNIIWJ.cjs} +2 -2
  223. package/dist/{registry-generator-34SC4TAU.cjs.map → registry-generator-KOFNIIWJ.cjs.map} +1 -1
  224. package/dist/relevance/index.cjs +2 -2
  225. package/dist/relevance/index.js +1 -1
  226. package/dist/server/index.cjs +5 -5
  227. package/dist/server/index.js +1 -1
  228. package/dist/storage/base.d.ts +2 -10
  229. package/dist/storage/base.d.ts.map +1 -1
  230. package/dist/storage/domains/workflows/base.d.ts +2 -8
  231. package/dist/storage/domains/workflows/base.d.ts.map +1 -1
  232. package/dist/storage/domains/workflows/inmemory.d.ts +2 -8
  233. package/dist/storage/domains/workflows/inmemory.d.ts.map +1 -1
  234. package/dist/storage/index.cjs +38 -38
  235. package/dist/storage/index.js +1 -1
  236. package/dist/storage/mock.d.ts +2 -8
  237. package/dist/storage/mock.d.ts.map +1 -1
  238. package/dist/storage/types.d.ts +9 -1
  239. package/dist/storage/types.d.ts.map +1 -1
  240. package/dist/stream/RunOutput.d.ts +1 -1
  241. package/dist/stream/aisdk/v4/input.d.ts +1 -1
  242. package/dist/stream/aisdk/v5/compat/content.d.ts +1 -1
  243. package/dist/stream/aisdk/v5/compat/content.d.ts.map +1 -1
  244. package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts +1 -1
  245. package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts.map +1 -1
  246. package/dist/stream/aisdk/v5/compat/ui-message.d.ts +1 -1
  247. package/dist/stream/aisdk/v5/compat/ui-message.d.ts.map +1 -1
  248. package/dist/stream/aisdk/v5/compat/validation.d.ts +1 -1
  249. package/dist/stream/aisdk/v5/compat/validation.d.ts.map +1 -1
  250. package/dist/stream/aisdk/v5/execute.d.ts +2 -2
  251. package/dist/stream/aisdk/v5/execute.d.ts.map +1 -1
  252. package/dist/stream/aisdk/v5/input.d.ts +1 -1
  253. package/dist/stream/aisdk/v5/input.d.ts.map +1 -1
  254. package/dist/stream/aisdk/v5/output-helpers.d.ts +12 -27
  255. package/dist/stream/aisdk/v5/output-helpers.d.ts.map +1 -1
  256. package/dist/stream/aisdk/v5/output.d.ts +41 -91
  257. package/dist/stream/aisdk/v5/output.d.ts.map +1 -1
  258. package/dist/stream/aisdk/v5/transform.d.ts +1 -1
  259. package/dist/stream/aisdk/v5/transform.d.ts.map +1 -1
  260. package/dist/stream/base/input.d.ts +1 -1
  261. package/dist/stream/base/output.d.ts +9 -31
  262. package/dist/stream/base/output.d.ts.map +1 -1
  263. package/dist/stream/base/schema.d.ts +2 -2
  264. package/dist/stream/base/schema.d.ts.map +1 -1
  265. package/dist/stream/index.cjs +12 -12
  266. package/dist/stream/index.js +2 -2
  267. package/dist/stream/types.d.ts +3 -2
  268. package/dist/stream/types.d.ts.map +1 -1
  269. package/dist/test-utils/llm-mock.cjs +14587 -14
  270. package/dist/test-utils/llm-mock.cjs.map +1 -1
  271. package/dist/test-utils/llm-mock.d.ts +3 -3
  272. package/dist/test-utils/llm-mock.d.ts.map +1 -1
  273. package/dist/test-utils/llm-mock.js +14577 -4
  274. package/dist/test-utils/llm-mock.js.map +1 -1
  275. package/dist/token-6GSAFR2W-SGVIXFCP.cjs +63 -0
  276. package/dist/token-6GSAFR2W-SGVIXFCP.cjs.map +1 -0
  277. package/dist/token-6GSAFR2W-SPYPLMBM.js +61 -0
  278. package/dist/token-6GSAFR2W-SPYPLMBM.js.map +1 -0
  279. package/dist/token-util-NEHG7TUY-7GMW5FXI.cjs +10 -0
  280. package/dist/token-util-NEHG7TUY-7GMW5FXI.cjs.map +1 -0
  281. package/dist/token-util-NEHG7TUY-JRJTGTAB.js +8 -0
  282. package/dist/token-util-NEHG7TUY-JRJTGTAB.js.map +1 -0
  283. package/dist/tools/index.cjs +4 -4
  284. package/dist/tools/index.js +1 -1
  285. package/dist/tools/is-vercel-tool.cjs +2 -2
  286. package/dist/tools/is-vercel-tool.js +1 -1
  287. package/dist/tools/tool-builder/builder.d.ts +2 -1
  288. package/dist/tools/tool-builder/builder.d.ts.map +1 -1
  289. package/dist/tools/tool.d.ts.map +1 -1
  290. package/dist/tools/types.d.ts +5 -5
  291. package/dist/tools/types.d.ts.map +1 -1
  292. package/dist/utils.cjs +22 -22
  293. package/dist/utils.d.ts +3 -3
  294. package/dist/utils.d.ts.map +1 -1
  295. package/dist/utils.js +1 -1
  296. package/dist/vector/embed.d.ts +2 -2
  297. package/dist/vector/embed.d.ts.map +1 -1
  298. package/dist/vector/index.cjs +11 -11
  299. package/dist/vector/index.js +3 -3
  300. package/dist/vector/vector.d.ts +1 -1
  301. package/dist/voice/aisdk/speech.d.ts +1 -1
  302. package/dist/voice/aisdk/speech.d.ts.map +1 -1
  303. package/dist/voice/aisdk/transcription.d.ts +1 -1
  304. package/dist/voice/aisdk/transcription.d.ts.map +1 -1
  305. package/dist/voice/composite-voice.d.ts +1 -1
  306. package/dist/voice/composite-voice.d.ts.map +1 -1
  307. package/dist/voice/index.cjs +6 -6
  308. package/dist/voice/index.js +1 -1
  309. package/dist/workflows/constants.cjs +4 -4
  310. package/dist/workflows/constants.d.ts +1 -1
  311. package/dist/workflows/constants.d.ts.map +1 -1
  312. package/dist/workflows/constants.js +1 -1
  313. package/dist/workflows/default.d.ts +9 -16
  314. package/dist/workflows/default.d.ts.map +1 -1
  315. package/dist/workflows/evented/execution-engine.d.ts +3 -2
  316. package/dist/workflows/evented/execution-engine.d.ts.map +1 -1
  317. package/dist/workflows/evented/index.cjs +10 -10
  318. package/dist/workflows/evented/index.js +1 -1
  319. package/dist/workflows/evented/step-executor.d.ts +1 -1
  320. package/dist/workflows/evented/step-executor.d.ts.map +1 -1
  321. package/dist/workflows/evented/workflow-event-processor/index.d.ts +1 -1
  322. package/dist/workflows/evented/workflow-event-processor/index.d.ts.map +1 -1
  323. package/dist/workflows/evented/workflow.d.ts +15 -0
  324. package/dist/workflows/evented/workflow.d.ts.map +1 -1
  325. package/dist/workflows/execution-engine.d.ts +25 -2
  326. package/dist/workflows/execution-engine.d.ts.map +1 -1
  327. package/dist/workflows/handlers/control-flow.d.ts +6 -5
  328. package/dist/workflows/handlers/control-flow.d.ts.map +1 -1
  329. package/dist/workflows/handlers/entry.d.ts +5 -3
  330. package/dist/workflows/handlers/entry.d.ts.map +1 -1
  331. package/dist/workflows/handlers/sleep.d.ts +4 -3
  332. package/dist/workflows/handlers/sleep.d.ts.map +1 -1
  333. package/dist/workflows/handlers/step.d.ts +5 -3
  334. package/dist/workflows/handlers/step.d.ts.map +1 -1
  335. package/dist/workflows/index.cjs +26 -22
  336. package/dist/workflows/index.js +1 -1
  337. package/dist/workflows/step.d.ts +5 -4
  338. package/dist/workflows/step.d.ts.map +1 -1
  339. package/dist/workflows/types.d.ts +66 -14
  340. package/dist/workflows/types.d.ts.map +1 -1
  341. package/dist/workflows/utils.d.ts +11 -0
  342. package/dist/workflows/utils.d.ts.map +1 -1
  343. package/dist/workflows/workflow.d.ts +26 -8
  344. package/dist/workflows/workflow.d.ts.map +1 -1
  345. package/package.json +12 -11
  346. package/src/llm/model/provider-types.generated.d.ts +51 -11
  347. package/dist/agent/__tests__/mock-model.d.ts +0 -8
  348. package/dist/agent/__tests__/mock-model.d.ts.map +0 -1
  349. package/dist/agent/agent-types.test-d.d.ts +0 -2
  350. package/dist/agent/agent-types.test-d.d.ts.map +0 -1
  351. package/dist/ai-sdk.types.d.ts +0 -4705
  352. package/dist/chunk-2ULLRN4Y.js.map +0 -1
  353. package/dist/chunk-3E3ILV6T.cjs +0 -518
  354. package/dist/chunk-3E3ILV6T.cjs.map +0 -1
  355. package/dist/chunk-4JKEUSCC.cjs.map +0 -1
  356. package/dist/chunk-52RSUALV.cjs.map +0 -1
  357. package/dist/chunk-5PAEYE3Q.js +0 -513
  358. package/dist/chunk-5PAEYE3Q.js.map +0 -1
  359. package/dist/chunk-5Q6WAYEY.cjs.map +0 -1
  360. package/dist/chunk-7P6BNIJH.js.map +0 -1
  361. package/dist/chunk-ABJOUEVA.cjs +0 -10
  362. package/dist/chunk-ABJOUEVA.cjs.map +0 -1
  363. package/dist/chunk-BJXKH4LG.cjs.map +0 -1
  364. package/dist/chunk-BUKY6CTR.cjs.map +0 -1
  365. package/dist/chunk-C36YRTZ6.js.map +0 -1
  366. package/dist/chunk-CZEJQSWB.cjs.map +0 -1
  367. package/dist/chunk-IVV5TOMD.js.map +0 -1
  368. package/dist/chunk-JIGDJK2O.js.map +0 -1
  369. package/dist/chunk-JJ5O45LH.js.map +0 -1
  370. package/dist/chunk-MGCGWPQJ.cjs.map +0 -1
  371. package/dist/chunk-MRFUISXC.cjs.map +0 -1
  372. package/dist/chunk-NLNKQD2T.js +0 -7
  373. package/dist/chunk-NLNKQD2T.js.map +0 -1
  374. package/dist/chunk-OEIVMCWX.js.map +0 -1
  375. package/dist/chunk-PG5H6QIO.cjs.map +0 -1
  376. package/dist/chunk-PK2A5WBG.js.map +0 -1
  377. package/dist/chunk-QM5SRDJX.js.map +0 -1
  378. package/dist/chunk-S73Z3PBJ.cjs.map +0 -1
  379. package/dist/chunk-SCUWP4II.cjs.map +0 -1
  380. package/dist/chunk-SVLMF4UZ.cjs.map +0 -1
  381. package/dist/chunk-SXNQRJQD.js.map +0 -1
  382. package/dist/chunk-THZTRBFS.js.map +0 -1
  383. package/dist/chunk-TWH4PTDG.cjs.map +0 -1
  384. package/dist/chunk-U3XOLEPX.js.map +0 -1
  385. package/dist/chunk-US2U7ECW.js.map +0 -1
  386. package/dist/chunk-YC6PJEPH.cjs.map +0 -1
  387. package/dist/models-dev-23RN2WHG.js +0 -3
  388. package/dist/models-dev-EO3SUIY2.cjs +0 -12
  389. package/dist/netlify-GXJ5D5DD.js +0 -3
  390. package/dist/netlify-KJLY3GFS.cjs +0 -12
  391. package/dist/provider-registry-3TG2KUD2.cjs +0 -40
  392. package/dist/provider-registry-F67Y6OF2.js +0 -3
  393. package/dist/tools/tool-stream-types.test-d.d.ts +0 -2
  394. package/dist/tools/tool-stream-types.test-d.d.ts.map +0 -1
@@ -0,0 +1,4842 @@
1
+ 'use strict';
2
+
3
+ var chunkY7MZ5LJT_cjs = require('./chunk-Y7MZ5LJT.cjs');
4
+ var chunk7HEAVZRS_cjs = require('./chunk-7HEAVZRS.cjs');
5
+ var chunkOWIEOL55_cjs = require('./chunk-OWIEOL55.cjs');
6
+ var chunkAGHLXC4I_cjs = require('./chunk-AGHLXC4I.cjs');
7
+ var chunkIXZ2T2QX_cjs = require('./chunk-IXZ2T2QX.cjs');
8
+ var chunkSZYSDJTN_cjs = require('./chunk-SZYSDJTN.cjs');
9
+ var chunkUVHSM2GU_cjs = require('./chunk-UVHSM2GU.cjs');
10
+ var chunkHWMMIRIF_cjs = require('./chunk-HWMMIRIF.cjs');
11
+ var chunkUIGRFDO6_cjs = require('./chunk-UIGRFDO6.cjs');
12
+ var crypto = require('crypto');
13
+ var v4 = require('zod/v4');
14
+
15
+ var openaiErrorDataSchema = v4.z.object({
16
+ error: v4.z.object({
17
+ message: v4.z.string(),
18
+ // The additional information below is handled loosely to support
19
+ // OpenAI-compatible providers that have slightly different error
20
+ // responses:
21
+ type: v4.z.string().nullish(),
22
+ param: v4.z.any().nullish(),
23
+ code: v4.z.union([v4.z.string(), v4.z.number()]).nullish()
24
+ })
25
+ });
26
+ var openaiFailedResponseHandler = chunkOWIEOL55_cjs.createJsonErrorResponseHandler({
27
+ errorSchema: openaiErrorDataSchema,
28
+ errorToMessage: (data) => data.error.message
29
+ });
30
+ function convertToOpenAIChatMessages({
31
+ prompt,
32
+ systemMessageMode = "system"
33
+ }) {
34
+ const messages = [];
35
+ const warnings = [];
36
+ for (const { role, content } of prompt) {
37
+ switch (role) {
38
+ case "system": {
39
+ switch (systemMessageMode) {
40
+ case "system": {
41
+ messages.push({ role: "system", content });
42
+ break;
43
+ }
44
+ case "developer": {
45
+ messages.push({ role: "developer", content });
46
+ break;
47
+ }
48
+ case "remove": {
49
+ warnings.push({
50
+ type: "other",
51
+ message: "system messages are removed for this model"
52
+ });
53
+ break;
54
+ }
55
+ default: {
56
+ const _exhaustiveCheck = systemMessageMode;
57
+ throw new Error(
58
+ `Unsupported system message mode: ${_exhaustiveCheck}`
59
+ );
60
+ }
61
+ }
62
+ break;
63
+ }
64
+ case "user": {
65
+ if (content.length === 1 && content[0].type === "text") {
66
+ messages.push({ role: "user", content: content[0].text });
67
+ break;
68
+ }
69
+ messages.push({
70
+ role: "user",
71
+ content: content.map((part, index) => {
72
+ var _a, _b, _c;
73
+ switch (part.type) {
74
+ case "text": {
75
+ return { type: "text", text: part.text };
76
+ }
77
+ case "file": {
78
+ if (part.mediaType.startsWith("image/")) {
79
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
80
+ return {
81
+ type: "image_url",
82
+ image_url: {
83
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${chunkOWIEOL55_cjs.convertToBase64(part.data)}`,
84
+ // OpenAI specific extension: image detail
85
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
86
+ }
87
+ };
88
+ } else if (part.mediaType.startsWith("audio/")) {
89
+ if (part.data instanceof URL) {
90
+ throw new chunkIXZ2T2QX_cjs.UnsupportedFunctionalityError({
91
+ functionality: "audio file parts with URLs"
92
+ });
93
+ }
94
+ switch (part.mediaType) {
95
+ case "audio/wav": {
96
+ return {
97
+ type: "input_audio",
98
+ input_audio: {
99
+ data: chunkOWIEOL55_cjs.convertToBase64(part.data),
100
+ format: "wav"
101
+ }
102
+ };
103
+ }
104
+ case "audio/mp3":
105
+ case "audio/mpeg": {
106
+ return {
107
+ type: "input_audio",
108
+ input_audio: {
109
+ data: chunkOWIEOL55_cjs.convertToBase64(part.data),
110
+ format: "mp3"
111
+ }
112
+ };
113
+ }
114
+ default: {
115
+ throw new chunkIXZ2T2QX_cjs.UnsupportedFunctionalityError({
116
+ functionality: `audio content parts with media type ${part.mediaType}`
117
+ });
118
+ }
119
+ }
120
+ } else if (part.mediaType === "application/pdf") {
121
+ if (part.data instanceof URL) {
122
+ throw new chunkIXZ2T2QX_cjs.UnsupportedFunctionalityError({
123
+ functionality: "PDF file parts with URLs"
124
+ });
125
+ }
126
+ return {
127
+ type: "file",
128
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
129
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
130
+ file_data: `data:application/pdf;base64,${chunkOWIEOL55_cjs.convertToBase64(part.data)}`
131
+ }
132
+ };
133
+ } else {
134
+ throw new chunkIXZ2T2QX_cjs.UnsupportedFunctionalityError({
135
+ functionality: `file part media type ${part.mediaType}`
136
+ });
137
+ }
138
+ }
139
+ }
140
+ })
141
+ });
142
+ break;
143
+ }
144
+ case "assistant": {
145
+ let text = "";
146
+ const toolCalls = [];
147
+ for (const part of content) {
148
+ switch (part.type) {
149
+ case "text": {
150
+ text += part.text;
151
+ break;
152
+ }
153
+ case "tool-call": {
154
+ toolCalls.push({
155
+ id: part.toolCallId,
156
+ type: "function",
157
+ function: {
158
+ name: part.toolName,
159
+ arguments: JSON.stringify(part.input)
160
+ }
161
+ });
162
+ break;
163
+ }
164
+ }
165
+ }
166
+ messages.push({
167
+ role: "assistant",
168
+ content: text,
169
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
170
+ });
171
+ break;
172
+ }
173
+ case "tool": {
174
+ for (const toolResponse of content) {
175
+ const output = toolResponse.output;
176
+ let contentValue;
177
+ switch (output.type) {
178
+ case "text":
179
+ case "error-text":
180
+ contentValue = output.value;
181
+ break;
182
+ case "content":
183
+ case "json":
184
+ case "error-json":
185
+ contentValue = JSON.stringify(output.value);
186
+ break;
187
+ }
188
+ messages.push({
189
+ role: "tool",
190
+ tool_call_id: toolResponse.toolCallId,
191
+ content: contentValue
192
+ });
193
+ }
194
+ break;
195
+ }
196
+ default: {
197
+ const _exhaustiveCheck = role;
198
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
199
+ }
200
+ }
201
+ }
202
+ return { messages, warnings };
203
+ }
204
+ function getResponseMetadata({
205
+ id,
206
+ model,
207
+ created
208
+ }) {
209
+ return {
210
+ id: id != null ? id : void 0,
211
+ modelId: model != null ? model : void 0,
212
+ timestamp: created ? new Date(created * 1e3) : void 0
213
+ };
214
+ }
215
+ function mapOpenAIFinishReason(finishReason) {
216
+ switch (finishReason) {
217
+ case "stop":
218
+ return "stop";
219
+ case "length":
220
+ return "length";
221
+ case "content_filter":
222
+ return "content-filter";
223
+ case "function_call":
224
+ case "tool_calls":
225
+ return "tool-calls";
226
+ default:
227
+ return "unknown";
228
+ }
229
+ }
230
+ var openaiChatResponseSchema = chunkOWIEOL55_cjs.lazyValidator(
231
+ () => chunkOWIEOL55_cjs.zodSchema(
232
+ v4.z.object({
233
+ id: v4.z.string().nullish(),
234
+ created: v4.z.number().nullish(),
235
+ model: v4.z.string().nullish(),
236
+ choices: v4.z.array(
237
+ v4.z.object({
238
+ message: v4.z.object({
239
+ role: v4.z.literal("assistant").nullish(),
240
+ content: v4.z.string().nullish(),
241
+ tool_calls: v4.z.array(
242
+ v4.z.object({
243
+ id: v4.z.string().nullish(),
244
+ type: v4.z.literal("function"),
245
+ function: v4.z.object({
246
+ name: v4.z.string(),
247
+ arguments: v4.z.string()
248
+ })
249
+ })
250
+ ).nullish(),
251
+ annotations: v4.z.array(
252
+ v4.z.object({
253
+ type: v4.z.literal("url_citation"),
254
+ start_index: v4.z.number(),
255
+ end_index: v4.z.number(),
256
+ url: v4.z.string(),
257
+ title: v4.z.string()
258
+ })
259
+ ).nullish()
260
+ }),
261
+ index: v4.z.number(),
262
+ logprobs: v4.z.object({
263
+ content: v4.z.array(
264
+ v4.z.object({
265
+ token: v4.z.string(),
266
+ logprob: v4.z.number(),
267
+ top_logprobs: v4.z.array(
268
+ v4.z.object({
269
+ token: v4.z.string(),
270
+ logprob: v4.z.number()
271
+ })
272
+ )
273
+ })
274
+ ).nullish()
275
+ }).nullish(),
276
+ finish_reason: v4.z.string().nullish()
277
+ })
278
+ ),
279
+ usage: v4.z.object({
280
+ prompt_tokens: v4.z.number().nullish(),
281
+ completion_tokens: v4.z.number().nullish(),
282
+ total_tokens: v4.z.number().nullish(),
283
+ prompt_tokens_details: v4.z.object({
284
+ cached_tokens: v4.z.number().nullish()
285
+ }).nullish(),
286
+ completion_tokens_details: v4.z.object({
287
+ reasoning_tokens: v4.z.number().nullish(),
288
+ accepted_prediction_tokens: v4.z.number().nullish(),
289
+ rejected_prediction_tokens: v4.z.number().nullish()
290
+ }).nullish()
291
+ }).nullish()
292
+ })
293
+ )
294
+ );
295
+ var openaiChatChunkSchema = chunkOWIEOL55_cjs.lazyValidator(
296
+ () => chunkOWIEOL55_cjs.zodSchema(
297
+ v4.z.union([
298
+ v4.z.object({
299
+ id: v4.z.string().nullish(),
300
+ created: v4.z.number().nullish(),
301
+ model: v4.z.string().nullish(),
302
+ choices: v4.z.array(
303
+ v4.z.object({
304
+ delta: v4.z.object({
305
+ role: v4.z.enum(["assistant"]).nullish(),
306
+ content: v4.z.string().nullish(),
307
+ tool_calls: v4.z.array(
308
+ v4.z.object({
309
+ index: v4.z.number(),
310
+ id: v4.z.string().nullish(),
311
+ type: v4.z.literal("function").nullish(),
312
+ function: v4.z.object({
313
+ name: v4.z.string().nullish(),
314
+ arguments: v4.z.string().nullish()
315
+ })
316
+ })
317
+ ).nullish(),
318
+ annotations: v4.z.array(
319
+ v4.z.object({
320
+ type: v4.z.literal("url_citation"),
321
+ start_index: v4.z.number(),
322
+ end_index: v4.z.number(),
323
+ url: v4.z.string(),
324
+ title: v4.z.string()
325
+ })
326
+ ).nullish()
327
+ }).nullish(),
328
+ logprobs: v4.z.object({
329
+ content: v4.z.array(
330
+ v4.z.object({
331
+ token: v4.z.string(),
332
+ logprob: v4.z.number(),
333
+ top_logprobs: v4.z.array(
334
+ v4.z.object({
335
+ token: v4.z.string(),
336
+ logprob: v4.z.number()
337
+ })
338
+ )
339
+ })
340
+ ).nullish()
341
+ }).nullish(),
342
+ finish_reason: v4.z.string().nullish(),
343
+ index: v4.z.number()
344
+ })
345
+ ),
346
+ usage: v4.z.object({
347
+ prompt_tokens: v4.z.number().nullish(),
348
+ completion_tokens: v4.z.number().nullish(),
349
+ total_tokens: v4.z.number().nullish(),
350
+ prompt_tokens_details: v4.z.object({
351
+ cached_tokens: v4.z.number().nullish()
352
+ }).nullish(),
353
+ completion_tokens_details: v4.z.object({
354
+ reasoning_tokens: v4.z.number().nullish(),
355
+ accepted_prediction_tokens: v4.z.number().nullish(),
356
+ rejected_prediction_tokens: v4.z.number().nullish()
357
+ }).nullish()
358
+ }).nullish()
359
+ }),
360
+ openaiErrorDataSchema
361
+ ])
362
+ )
363
+ );
364
+ var openaiChatLanguageModelOptions = chunkOWIEOL55_cjs.lazyValidator(
365
+ () => chunkOWIEOL55_cjs.zodSchema(
366
+ v4.z.object({
367
+ /**
368
+ * Modify the likelihood of specified tokens appearing in the completion.
369
+ *
370
+ * Accepts a JSON object that maps tokens (specified by their token ID in
371
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
372
+ */
373
+ logitBias: v4.z.record(v4.z.coerce.number(), v4.z.number()).optional(),
374
+ /**
375
+ * Return the log probabilities of the tokens.
376
+ *
377
+ * Setting to true will return the log probabilities of the tokens that
378
+ * were generated.
379
+ *
380
+ * Setting to a number will return the log probabilities of the top n
381
+ * tokens that were generated.
382
+ */
383
+ logprobs: v4.z.union([v4.z.boolean(), v4.z.number()]).optional(),
384
+ /**
385
+ * Whether to enable parallel function calling during tool use. Default to true.
386
+ */
387
+ parallelToolCalls: v4.z.boolean().optional(),
388
+ /**
389
+ * A unique identifier representing your end-user, which can help OpenAI to
390
+ * monitor and detect abuse.
391
+ */
392
+ user: v4.z.string().optional(),
393
+ /**
394
+ * Reasoning effort for reasoning models. Defaults to `medium`.
395
+ */
396
+ reasoningEffort: v4.z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
397
+ /**
398
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
399
+ */
400
+ maxCompletionTokens: v4.z.number().optional(),
401
+ /**
402
+ * Whether to enable persistence in responses API.
403
+ */
404
+ store: v4.z.boolean().optional(),
405
+ /**
406
+ * Metadata to associate with the request.
407
+ */
408
+ metadata: v4.z.record(v4.z.string().max(64), v4.z.string().max(512)).optional(),
409
+ /**
410
+ * Parameters for prediction mode.
411
+ */
412
+ prediction: v4.z.record(v4.z.string(), v4.z.any()).optional(),
413
+ /**
414
+ * Whether to use structured outputs.
415
+ *
416
+ * @default true
417
+ */
418
+ structuredOutputs: v4.z.boolean().optional(),
419
+ /**
420
+ * Service tier for the request.
421
+ * - 'auto': Default service tier. The request will be processed with the service tier configured in the
422
+ * Project settings. Unless otherwise configured, the Project will use 'default'.
423
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
424
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
425
+ * - 'default': The request will be processed with the standard pricing and performance for the selected model.
426
+ *
427
+ * @default 'auto'
428
+ */
429
+ serviceTier: v4.z.enum(["auto", "flex", "priority", "default"]).optional(),
430
+ /**
431
+ * Whether to use strict JSON schema validation.
432
+ *
433
+ * @default false
434
+ */
435
+ strictJsonSchema: v4.z.boolean().optional(),
436
+ /**
437
+ * Controls the verbosity of the model's responses.
438
+ * Lower values will result in more concise responses, while higher values will result in more verbose responses.
439
+ */
440
+ textVerbosity: v4.z.enum(["low", "medium", "high"]).optional(),
441
+ /**
442
+ * A cache key for prompt caching. Allows manual control over prompt caching behavior.
443
+ * Useful for improving cache hit rates and working around automatic caching issues.
444
+ */
445
+ promptCacheKey: v4.z.string().optional(),
446
+ /**
447
+ * The retention policy for the prompt cache.
448
+ * - 'in_memory': Default. Standard prompt caching behavior.
449
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
450
+ * Currently only available for 5.1 series models.
451
+ *
452
+ * @default 'in_memory'
453
+ */
454
+ promptCacheRetention: v4.z.enum(["in_memory", "24h"]).optional(),
455
+ /**
456
+ * A stable identifier used to help detect users of your application
457
+ * that may be violating OpenAI's usage policies. The IDs should be a
458
+ * string that uniquely identifies each user. We recommend hashing their
459
+ * username or email address, in order to avoid sending us any identifying
460
+ * information.
461
+ */
462
+ safetyIdentifier: v4.z.string().optional()
463
+ })
464
+ )
465
+ );
466
+ function prepareChatTools({
467
+ tools,
468
+ toolChoice,
469
+ structuredOutputs,
470
+ strictJsonSchema
471
+ }) {
472
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
473
+ const toolWarnings = [];
474
+ if (tools == null) {
475
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
476
+ }
477
+ const openaiTools = [];
478
+ for (const tool of tools) {
479
+ switch (tool.type) {
480
+ case "function":
481
+ openaiTools.push({
482
+ type: "function",
483
+ function: {
484
+ name: tool.name,
485
+ description: tool.description,
486
+ parameters: tool.inputSchema,
487
+ strict: structuredOutputs ? strictJsonSchema : void 0
488
+ }
489
+ });
490
+ break;
491
+ default:
492
+ toolWarnings.push({ type: "unsupported-tool", tool });
493
+ break;
494
+ }
495
+ }
496
+ if (toolChoice == null) {
497
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
498
+ }
499
+ const type = toolChoice.type;
500
+ switch (type) {
501
+ case "auto":
502
+ case "none":
503
+ case "required":
504
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
505
+ case "tool":
506
+ return {
507
+ tools: openaiTools,
508
+ toolChoice: {
509
+ type: "function",
510
+ function: {
511
+ name: toolChoice.toolName
512
+ }
513
+ },
514
+ toolWarnings
515
+ };
516
+ default: {
517
+ const _exhaustiveCheck = type;
518
+ throw new chunkIXZ2T2QX_cjs.UnsupportedFunctionalityError({
519
+ functionality: `tool choice type: ${_exhaustiveCheck}`
520
+ });
521
+ }
522
+ }
523
+ }
524
+ var OpenAIChatLanguageModel = class {
525
+ constructor(modelId, config) {
526
+ this.specificationVersion = "v2";
527
+ this.supportedUrls = {
528
+ "image/*": [/^https?:\/\/.*$/]
529
+ };
530
+ this.modelId = modelId;
531
+ this.config = config;
532
+ }
533
+ get provider() {
534
+ return this.config.provider;
535
+ }
536
+ async getArgs({
537
+ prompt,
538
+ maxOutputTokens,
539
+ temperature,
540
+ topP,
541
+ topK,
542
+ frequencyPenalty,
543
+ presencePenalty,
544
+ stopSequences,
545
+ responseFormat,
546
+ seed,
547
+ tools,
548
+ toolChoice,
549
+ providerOptions
550
+ }) {
551
+ var _a, _b, _c, _d;
552
+ const warnings = [];
553
+ const openaiOptions = (_a = await chunkOWIEOL55_cjs.parseProviderOptions({
554
+ provider: "openai",
555
+ providerOptions,
556
+ schema: openaiChatLanguageModelOptions
557
+ })) != null ? _a : {};
558
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
559
+ if (topK != null) {
560
+ warnings.push({
561
+ type: "unsupported-setting",
562
+ setting: "topK"
563
+ });
564
+ }
565
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
566
+ warnings.push({
567
+ type: "unsupported-setting",
568
+ setting: "responseFormat",
569
+ details: "JSON response format schema is only supported with structuredOutputs"
570
+ });
571
+ }
572
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
573
+ {
574
+ prompt,
575
+ systemMessageMode: getSystemMessageMode(this.modelId)
576
+ }
577
+ );
578
+ warnings.push(...messageWarnings);
579
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
580
+ const baseArgs = {
581
+ // model id:
582
+ model: this.modelId,
583
+ // model specific settings:
584
+ logit_bias: openaiOptions.logitBias,
585
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
586
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
587
+ user: openaiOptions.user,
588
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
589
+ // standardized settings:
590
+ max_tokens: maxOutputTokens,
591
+ temperature,
592
+ top_p: topP,
593
+ frequency_penalty: frequencyPenalty,
594
+ presence_penalty: presencePenalty,
595
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
596
+ type: "json_schema",
597
+ json_schema: {
598
+ schema: responseFormat.schema,
599
+ strict: strictJsonSchema,
600
+ name: (_d = responseFormat.name) != null ? _d : "response",
601
+ description: responseFormat.description
602
+ }
603
+ } : { type: "json_object" } : void 0,
604
+ stop: stopSequences,
605
+ seed,
606
+ verbosity: openaiOptions.textVerbosity,
607
+ // openai specific settings:
608
+ // TODO AI SDK 6: remove, we auto-map maxOutputTokens now
609
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
610
+ store: openaiOptions.store,
611
+ metadata: openaiOptions.metadata,
612
+ prediction: openaiOptions.prediction,
613
+ reasoning_effort: openaiOptions.reasoningEffort,
614
+ service_tier: openaiOptions.serviceTier,
615
+ prompt_cache_key: openaiOptions.promptCacheKey,
616
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
617
+ safety_identifier: openaiOptions.safetyIdentifier,
618
+ // messages:
619
+ messages
620
+ };
621
+ if (isReasoningModel(this.modelId)) {
622
+ if (baseArgs.temperature != null) {
623
+ baseArgs.temperature = void 0;
624
+ warnings.push({
625
+ type: "unsupported-setting",
626
+ setting: "temperature",
627
+ details: "temperature is not supported for reasoning models"
628
+ });
629
+ }
630
+ if (baseArgs.top_p != null) {
631
+ baseArgs.top_p = void 0;
632
+ warnings.push({
633
+ type: "unsupported-setting",
634
+ setting: "topP",
635
+ details: "topP is not supported for reasoning models"
636
+ });
637
+ }
638
+ if (baseArgs.frequency_penalty != null) {
639
+ baseArgs.frequency_penalty = void 0;
640
+ warnings.push({
641
+ type: "unsupported-setting",
642
+ setting: "frequencyPenalty",
643
+ details: "frequencyPenalty is not supported for reasoning models"
644
+ });
645
+ }
646
+ if (baseArgs.presence_penalty != null) {
647
+ baseArgs.presence_penalty = void 0;
648
+ warnings.push({
649
+ type: "unsupported-setting",
650
+ setting: "presencePenalty",
651
+ details: "presencePenalty is not supported for reasoning models"
652
+ });
653
+ }
654
+ if (baseArgs.logit_bias != null) {
655
+ baseArgs.logit_bias = void 0;
656
+ warnings.push({
657
+ type: "other",
658
+ message: "logitBias is not supported for reasoning models"
659
+ });
660
+ }
661
+ if (baseArgs.logprobs != null) {
662
+ baseArgs.logprobs = void 0;
663
+ warnings.push({
664
+ type: "other",
665
+ message: "logprobs is not supported for reasoning models"
666
+ });
667
+ }
668
+ if (baseArgs.top_logprobs != null) {
669
+ baseArgs.top_logprobs = void 0;
670
+ warnings.push({
671
+ type: "other",
672
+ message: "topLogprobs is not supported for reasoning models"
673
+ });
674
+ }
675
+ if (baseArgs.max_tokens != null) {
676
+ if (baseArgs.max_completion_tokens == null) {
677
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
678
+ }
679
+ baseArgs.max_tokens = void 0;
680
+ }
681
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
682
+ if (baseArgs.temperature != null) {
683
+ baseArgs.temperature = void 0;
684
+ warnings.push({
685
+ type: "unsupported-setting",
686
+ setting: "temperature",
687
+ details: "temperature is not supported for the search preview models and has been removed."
688
+ });
689
+ }
690
+ }
691
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
692
+ warnings.push({
693
+ type: "unsupported-setting",
694
+ setting: "serviceTier",
695
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
696
+ });
697
+ baseArgs.service_tier = void 0;
698
+ }
699
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
700
+ warnings.push({
701
+ type: "unsupported-setting",
702
+ setting: "serviceTier",
703
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
704
+ });
705
+ baseArgs.service_tier = void 0;
706
+ }
707
+ const {
708
+ tools: openaiTools,
709
+ toolChoice: openaiToolChoice,
710
+ toolWarnings
711
+ } = prepareChatTools({
712
+ tools,
713
+ toolChoice,
714
+ structuredOutputs,
715
+ strictJsonSchema
716
+ });
717
+ return {
718
+ args: {
719
+ ...baseArgs,
720
+ tools: openaiTools,
721
+ tool_choice: openaiToolChoice
722
+ },
723
+ warnings: [...warnings, ...toolWarnings]
724
+ };
725
+ }
726
+ async doGenerate(options) {
727
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
728
+ const { args: body, warnings } = await this.getArgs(options);
729
+ const {
730
+ responseHeaders,
731
+ value: response,
732
+ rawValue: rawResponse
733
+ } = await chunkOWIEOL55_cjs.postJsonToApi({
734
+ url: this.config.url({
735
+ path: "/chat/completions",
736
+ modelId: this.modelId
737
+ }),
738
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), options.headers),
739
+ body,
740
+ failedResponseHandler: openaiFailedResponseHandler,
741
+ successfulResponseHandler: chunkOWIEOL55_cjs.createJsonResponseHandler(
742
+ openaiChatResponseSchema
743
+ ),
744
+ abortSignal: options.abortSignal,
745
+ fetch: this.config.fetch
746
+ });
747
+ const choice = response.choices[0];
748
+ const content = [];
749
+ const text = choice.message.content;
750
+ if (text != null && text.length > 0) {
751
+ content.push({ type: "text", text });
752
+ }
753
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
754
+ content.push({
755
+ type: "tool-call",
756
+ toolCallId: (_b = toolCall.id) != null ? _b : chunkOWIEOL55_cjs.generateId(),
757
+ toolName: toolCall.function.name,
758
+ input: toolCall.function.arguments
759
+ });
760
+ }
761
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
762
+ content.push({
763
+ type: "source",
764
+ sourceType: "url",
765
+ id: chunkOWIEOL55_cjs.generateId(),
766
+ url: annotation.url,
767
+ title: annotation.title
768
+ });
769
+ }
770
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
771
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
772
+ const providerMetadata = { openai: {} };
773
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
774
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
775
+ }
776
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
777
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
778
+ }
779
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
780
+ providerMetadata.openai.logprobs = choice.logprobs.content;
781
+ }
782
+ return {
783
+ content,
784
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
785
+ usage: {
786
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
787
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
788
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
789
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
790
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
791
+ },
792
+ request: { body },
793
+ response: {
794
+ ...getResponseMetadata(response),
795
+ headers: responseHeaders,
796
+ body: rawResponse
797
+ },
798
+ warnings,
799
+ providerMetadata
800
+ };
801
+ }
802
+ async doStream(options) {
803
+ const { args, warnings } = await this.getArgs(options);
804
+ const body = {
805
+ ...args,
806
+ stream: true,
807
+ stream_options: {
808
+ include_usage: true
809
+ }
810
+ };
811
+ const { responseHeaders, value: response } = await chunkOWIEOL55_cjs.postJsonToApi({
812
+ url: this.config.url({
813
+ path: "/chat/completions",
814
+ modelId: this.modelId
815
+ }),
816
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), options.headers),
817
+ body,
818
+ failedResponseHandler: openaiFailedResponseHandler,
819
+ successfulResponseHandler: chunkOWIEOL55_cjs.createEventSourceResponseHandler(
820
+ openaiChatChunkSchema
821
+ ),
822
+ abortSignal: options.abortSignal,
823
+ fetch: this.config.fetch
824
+ });
825
+ const toolCalls = [];
826
+ let finishReason = "unknown";
827
+ const usage = {
828
+ inputTokens: void 0,
829
+ outputTokens: void 0,
830
+ totalTokens: void 0
831
+ };
832
+ let metadataExtracted = false;
833
+ let isActiveText = false;
834
+ const providerMetadata = { openai: {} };
835
+ return {
836
+ stream: response.pipeThrough(
837
+ new TransformStream({
838
+ start(controller) {
839
+ controller.enqueue({ type: "stream-start", warnings });
840
+ },
841
+ transform(chunk, controller) {
842
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
843
+ if (options.includeRawChunks) {
844
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
845
+ }
846
+ if (!chunk.success) {
847
+ finishReason = "error";
848
+ controller.enqueue({ type: "error", error: chunk.error });
849
+ return;
850
+ }
851
+ const value = chunk.value;
852
+ if ("error" in value) {
853
+ finishReason = "error";
854
+ controller.enqueue({ type: "error", error: value.error });
855
+ return;
856
+ }
857
+ if (!metadataExtracted) {
858
+ const metadata = getResponseMetadata(value);
859
+ if (Object.values(metadata).some(Boolean)) {
860
+ metadataExtracted = true;
861
+ controller.enqueue({
862
+ type: "response-metadata",
863
+ ...getResponseMetadata(value)
864
+ });
865
+ }
866
+ }
867
+ if (value.usage != null) {
868
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
869
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
870
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
871
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
872
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
873
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
874
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
875
+ }
876
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
877
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
878
+ }
879
+ }
880
+ const choice = value.choices[0];
881
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
882
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
883
+ }
884
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
885
+ providerMetadata.openai.logprobs = choice.logprobs.content;
886
+ }
887
+ if ((choice == null ? void 0 : choice.delta) == null) {
888
+ return;
889
+ }
890
+ const delta = choice.delta;
891
+ if (delta.content != null) {
892
+ if (!isActiveText) {
893
+ controller.enqueue({ type: "text-start", id: "0" });
894
+ isActiveText = true;
895
+ }
896
+ controller.enqueue({
897
+ type: "text-delta",
898
+ id: "0",
899
+ delta: delta.content
900
+ });
901
+ }
902
+ if (delta.tool_calls != null) {
903
+ for (const toolCallDelta of delta.tool_calls) {
904
+ const index = toolCallDelta.index;
905
+ if (toolCalls[index] == null) {
906
+ if (toolCallDelta.type !== "function") {
907
+ throw new chunkIXZ2T2QX_cjs.InvalidResponseDataError({
908
+ data: toolCallDelta,
909
+ message: `Expected 'function' type.`
910
+ });
911
+ }
912
+ if (toolCallDelta.id == null) {
913
+ throw new chunkIXZ2T2QX_cjs.InvalidResponseDataError({
914
+ data: toolCallDelta,
915
+ message: `Expected 'id' to be a string.`
916
+ });
917
+ }
918
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
919
+ throw new chunkIXZ2T2QX_cjs.InvalidResponseDataError({
920
+ data: toolCallDelta,
921
+ message: `Expected 'function.name' to be a string.`
922
+ });
923
+ }
924
+ controller.enqueue({
925
+ type: "tool-input-start",
926
+ id: toolCallDelta.id,
927
+ toolName: toolCallDelta.function.name
928
+ });
929
+ toolCalls[index] = {
930
+ id: toolCallDelta.id,
931
+ type: "function",
932
+ function: {
933
+ name: toolCallDelta.function.name,
934
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
935
+ },
936
+ hasFinished: false
937
+ };
938
+ const toolCall2 = toolCalls[index];
939
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
940
+ if (toolCall2.function.arguments.length > 0) {
941
+ controller.enqueue({
942
+ type: "tool-input-delta",
943
+ id: toolCall2.id,
944
+ delta: toolCall2.function.arguments
945
+ });
946
+ }
947
+ if (chunkOWIEOL55_cjs.isParsableJson(toolCall2.function.arguments)) {
948
+ controller.enqueue({
949
+ type: "tool-input-end",
950
+ id: toolCall2.id
951
+ });
952
+ controller.enqueue({
953
+ type: "tool-call",
954
+ toolCallId: (_q = toolCall2.id) != null ? _q : chunkOWIEOL55_cjs.generateId(),
955
+ toolName: toolCall2.function.name,
956
+ input: toolCall2.function.arguments
957
+ });
958
+ toolCall2.hasFinished = true;
959
+ }
960
+ }
961
+ continue;
962
+ }
963
+ const toolCall = toolCalls[index];
964
+ if (toolCall.hasFinished) {
965
+ continue;
966
+ }
967
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
968
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
969
+ }
970
+ controller.enqueue({
971
+ type: "tool-input-delta",
972
+ id: toolCall.id,
973
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
974
+ });
975
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && chunkOWIEOL55_cjs.isParsableJson(toolCall.function.arguments)) {
976
+ controller.enqueue({
977
+ type: "tool-input-end",
978
+ id: toolCall.id
979
+ });
980
+ controller.enqueue({
981
+ type: "tool-call",
982
+ toolCallId: (_x = toolCall.id) != null ? _x : chunkOWIEOL55_cjs.generateId(),
983
+ toolName: toolCall.function.name,
984
+ input: toolCall.function.arguments
985
+ });
986
+ toolCall.hasFinished = true;
987
+ }
988
+ }
989
+ }
990
+ if (delta.annotations != null) {
991
+ for (const annotation of delta.annotations) {
992
+ controller.enqueue({
993
+ type: "source",
994
+ sourceType: "url",
995
+ id: chunkOWIEOL55_cjs.generateId(),
996
+ url: annotation.url,
997
+ title: annotation.title
998
+ });
999
+ }
1000
+ }
1001
+ },
1002
+ flush(controller) {
1003
+ if (isActiveText) {
1004
+ controller.enqueue({ type: "text-end", id: "0" });
1005
+ }
1006
+ controller.enqueue({
1007
+ type: "finish",
1008
+ finishReason,
1009
+ usage,
1010
+ ...providerMetadata != null ? { providerMetadata } : {}
1011
+ });
1012
+ }
1013
+ })
1014
+ ),
1015
+ request: { body },
1016
+ response: { headers: responseHeaders }
1017
+ };
1018
+ }
1019
+ };
1020
+ function isReasoningModel(modelId) {
1021
+ return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1022
+ }
1023
+ function supportsFlexProcessing(modelId) {
1024
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1025
+ }
1026
+ function supportsPriorityProcessing(modelId) {
1027
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1028
+ }
1029
+ function getSystemMessageMode(modelId) {
1030
+ var _a, _b;
1031
+ if (!isReasoningModel(modelId)) {
1032
+ return "system";
1033
+ }
1034
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1035
+ }
1036
+ var reasoningModels = {
1037
+ o3: {
1038
+ systemMessageMode: "developer"
1039
+ },
1040
+ "o3-2025-04-16": {
1041
+ systemMessageMode: "developer"
1042
+ },
1043
+ "o3-mini": {
1044
+ systemMessageMode: "developer"
1045
+ },
1046
+ "o3-mini-2025-01-31": {
1047
+ systemMessageMode: "developer"
1048
+ },
1049
+ "o4-mini": {
1050
+ systemMessageMode: "developer"
1051
+ },
1052
+ "o4-mini-2025-04-16": {
1053
+ systemMessageMode: "developer"
1054
+ }
1055
+ };
1056
+ function convertToOpenAICompletionPrompt({
1057
+ prompt,
1058
+ user = "user",
1059
+ assistant = "assistant"
1060
+ }) {
1061
+ let text = "";
1062
+ if (prompt[0].role === "system") {
1063
+ text += `${prompt[0].content}
1064
+
1065
+ `;
1066
+ prompt = prompt.slice(1);
1067
+ }
1068
+ for (const { role, content } of prompt) {
1069
+ switch (role) {
1070
+ case "system": {
1071
+ throw new chunkIXZ2T2QX_cjs.InvalidPromptError({
1072
+ message: "Unexpected system message in prompt: ${content}",
1073
+ prompt
1074
+ });
1075
+ }
1076
+ case "user": {
1077
+ const userMessage = content.map((part) => {
1078
+ switch (part.type) {
1079
+ case "text": {
1080
+ return part.text;
1081
+ }
1082
+ }
1083
+ }).filter(Boolean).join("");
1084
+ text += `${user}:
1085
+ ${userMessage}
1086
+
1087
+ `;
1088
+ break;
1089
+ }
1090
+ case "assistant": {
1091
+ const assistantMessage = content.map((part) => {
1092
+ switch (part.type) {
1093
+ case "text": {
1094
+ return part.text;
1095
+ }
1096
+ case "tool-call": {
1097
+ throw new chunkIXZ2T2QX_cjs.UnsupportedFunctionalityError({
1098
+ functionality: "tool-call messages"
1099
+ });
1100
+ }
1101
+ }
1102
+ }).join("");
1103
+ text += `${assistant}:
1104
+ ${assistantMessage}
1105
+
1106
+ `;
1107
+ break;
1108
+ }
1109
+ case "tool": {
1110
+ throw new chunkIXZ2T2QX_cjs.UnsupportedFunctionalityError({
1111
+ functionality: "tool messages"
1112
+ });
1113
+ }
1114
+ default: {
1115
+ const _exhaustiveCheck = role;
1116
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1117
+ }
1118
+ }
1119
+ }
1120
+ text += `${assistant}:
1121
+ `;
1122
+ return {
1123
+ prompt: text,
1124
+ stopSequences: [`
1125
+ ${user}:`]
1126
+ };
1127
+ }
1128
+ function getResponseMetadata2({
1129
+ id,
1130
+ model,
1131
+ created
1132
+ }) {
1133
+ return {
1134
+ id: id != null ? id : void 0,
1135
+ modelId: model != null ? model : void 0,
1136
+ timestamp: created != null ? new Date(created * 1e3) : void 0
1137
+ };
1138
+ }
1139
+ function mapOpenAIFinishReason2(finishReason) {
1140
+ switch (finishReason) {
1141
+ case "stop":
1142
+ return "stop";
1143
+ case "length":
1144
+ return "length";
1145
+ case "content_filter":
1146
+ return "content-filter";
1147
+ case "function_call":
1148
+ case "tool_calls":
1149
+ return "tool-calls";
1150
+ default:
1151
+ return "unknown";
1152
+ }
1153
+ }
1154
+ var openaiCompletionResponseSchema = chunkOWIEOL55_cjs.lazyValidator(
1155
+ () => chunkOWIEOL55_cjs.zodSchema(
1156
+ v4.z.object({
1157
+ id: v4.z.string().nullish(),
1158
+ created: v4.z.number().nullish(),
1159
+ model: v4.z.string().nullish(),
1160
+ choices: v4.z.array(
1161
+ v4.z.object({
1162
+ text: v4.z.string(),
1163
+ finish_reason: v4.z.string(),
1164
+ logprobs: v4.z.object({
1165
+ tokens: v4.z.array(v4.z.string()),
1166
+ token_logprobs: v4.z.array(v4.z.number()),
1167
+ top_logprobs: v4.z.array(v4.z.record(v4.z.string(), v4.z.number())).nullish()
1168
+ }).nullish()
1169
+ })
1170
+ ),
1171
+ usage: v4.z.object({
1172
+ prompt_tokens: v4.z.number(),
1173
+ completion_tokens: v4.z.number(),
1174
+ total_tokens: v4.z.number()
1175
+ }).nullish()
1176
+ })
1177
+ )
1178
+ );
1179
+ var openaiCompletionChunkSchema = chunkOWIEOL55_cjs.lazyValidator(
1180
+ () => chunkOWIEOL55_cjs.zodSchema(
1181
+ v4.z.union([
1182
+ v4.z.object({
1183
+ id: v4.z.string().nullish(),
1184
+ created: v4.z.number().nullish(),
1185
+ model: v4.z.string().nullish(),
1186
+ choices: v4.z.array(
1187
+ v4.z.object({
1188
+ text: v4.z.string(),
1189
+ finish_reason: v4.z.string().nullish(),
1190
+ index: v4.z.number(),
1191
+ logprobs: v4.z.object({
1192
+ tokens: v4.z.array(v4.z.string()),
1193
+ token_logprobs: v4.z.array(v4.z.number()),
1194
+ top_logprobs: v4.z.array(v4.z.record(v4.z.string(), v4.z.number())).nullish()
1195
+ }).nullish()
1196
+ })
1197
+ ),
1198
+ usage: v4.z.object({
1199
+ prompt_tokens: v4.z.number(),
1200
+ completion_tokens: v4.z.number(),
1201
+ total_tokens: v4.z.number()
1202
+ }).nullish()
1203
+ }),
1204
+ openaiErrorDataSchema
1205
+ ])
1206
+ )
1207
+ );
1208
+ var openaiCompletionProviderOptions = chunkOWIEOL55_cjs.lazyValidator(
1209
+ () => chunkOWIEOL55_cjs.zodSchema(
1210
+ v4.z.object({
1211
+ /**
1212
+ Echo back the prompt in addition to the completion.
1213
+ */
1214
+ echo: v4.z.boolean().optional(),
1215
+ /**
1216
+ Modify the likelihood of specified tokens appearing in the completion.
1217
+
1218
+ Accepts a JSON object that maps tokens (specified by their token ID in
1219
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1220
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1221
+ the bias is added to the logits generated by the model prior to sampling.
1222
+ The exact effect will vary per model, but values between -1 and 1 should
1223
+ decrease or increase likelihood of selection; values like -100 or 100
1224
+ should result in a ban or exclusive selection of the relevant token.
1225
+
1226
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1227
+ token from being generated.
1228
+ */
1229
+ logitBias: v4.z.record(v4.z.string(), v4.z.number()).optional(),
1230
+ /**
1231
+ The suffix that comes after a completion of inserted text.
1232
+ */
1233
+ suffix: v4.z.string().optional(),
1234
+ /**
1235
+ A unique identifier representing your end-user, which can help OpenAI to
1236
+ monitor and detect abuse. Learn more.
1237
+ */
1238
+ user: v4.z.string().optional(),
1239
+ /**
1240
+ Return the log probabilities of the tokens. Including logprobs will increase
1241
+ the response size and can slow down response times. However, it can
1242
+ be useful to better understand how the model is behaving.
1243
+ Setting to true will return the log probabilities of the tokens that
1244
+ were generated.
1245
+ Setting to a number will return the log probabilities of the top n
1246
+ tokens that were generated.
1247
+ */
1248
+ logprobs: v4.z.union([v4.z.boolean(), v4.z.number()]).optional()
1249
+ })
1250
+ )
1251
+ );
1252
+ var OpenAICompletionLanguageModel = class {
1253
+ constructor(modelId, config) {
1254
+ this.specificationVersion = "v2";
1255
+ this.supportedUrls = {
1256
+ // No URLs are supported for completion models.
1257
+ };
1258
+ this.modelId = modelId;
1259
+ this.config = config;
1260
+ }
1261
+ get providerOptionsName() {
1262
+ return this.config.provider.split(".")[0].trim();
1263
+ }
1264
+ get provider() {
1265
+ return this.config.provider;
1266
+ }
1267
+ async getArgs({
1268
+ prompt,
1269
+ maxOutputTokens,
1270
+ temperature,
1271
+ topP,
1272
+ topK,
1273
+ frequencyPenalty,
1274
+ presencePenalty,
1275
+ stopSequences: userStopSequences,
1276
+ responseFormat,
1277
+ tools,
1278
+ toolChoice,
1279
+ seed,
1280
+ providerOptions
1281
+ }) {
1282
+ const warnings = [];
1283
+ const openaiOptions = {
1284
+ ...await chunkOWIEOL55_cjs.parseProviderOptions({
1285
+ provider: "openai",
1286
+ providerOptions,
1287
+ schema: openaiCompletionProviderOptions
1288
+ }),
1289
+ ...await chunkOWIEOL55_cjs.parseProviderOptions({
1290
+ provider: this.providerOptionsName,
1291
+ providerOptions,
1292
+ schema: openaiCompletionProviderOptions
1293
+ })
1294
+ };
1295
+ if (topK != null) {
1296
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1297
+ }
1298
+ if (tools == null ? void 0 : tools.length) {
1299
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1300
+ }
1301
+ if (toolChoice != null) {
1302
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1303
+ }
1304
+ if (responseFormat != null && responseFormat.type !== "text") {
1305
+ warnings.push({
1306
+ type: "unsupported-setting",
1307
+ setting: "responseFormat",
1308
+ details: "JSON response format is not supported."
1309
+ });
1310
+ }
1311
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1312
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1313
+ return {
1314
+ args: {
1315
+ // model id:
1316
+ model: this.modelId,
1317
+ // model specific settings:
1318
+ echo: openaiOptions.echo,
1319
+ logit_bias: openaiOptions.logitBias,
1320
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1321
+ suffix: openaiOptions.suffix,
1322
+ user: openaiOptions.user,
1323
+ // standardized settings:
1324
+ max_tokens: maxOutputTokens,
1325
+ temperature,
1326
+ top_p: topP,
1327
+ frequency_penalty: frequencyPenalty,
1328
+ presence_penalty: presencePenalty,
1329
+ seed,
1330
+ // prompt:
1331
+ prompt: completionPrompt,
1332
+ // stop sequences:
1333
+ stop: stop.length > 0 ? stop : void 0
1334
+ },
1335
+ warnings
1336
+ };
1337
+ }
1338
+ async doGenerate(options) {
1339
+ var _a, _b, _c;
1340
+ const { args, warnings } = await this.getArgs(options);
1341
+ const {
1342
+ responseHeaders,
1343
+ value: response,
1344
+ rawValue: rawResponse
1345
+ } = await chunkOWIEOL55_cjs.postJsonToApi({
1346
+ url: this.config.url({
1347
+ path: "/completions",
1348
+ modelId: this.modelId
1349
+ }),
1350
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), options.headers),
1351
+ body: args,
1352
+ failedResponseHandler: openaiFailedResponseHandler,
1353
+ successfulResponseHandler: chunkOWIEOL55_cjs.createJsonResponseHandler(
1354
+ openaiCompletionResponseSchema
1355
+ ),
1356
+ abortSignal: options.abortSignal,
1357
+ fetch: this.config.fetch
1358
+ });
1359
+ const choice = response.choices[0];
1360
+ const providerMetadata = { openai: {} };
1361
+ if (choice.logprobs != null) {
1362
+ providerMetadata.openai.logprobs = choice.logprobs;
1363
+ }
1364
+ return {
1365
+ content: [{ type: "text", text: choice.text }],
1366
+ usage: {
1367
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1368
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1369
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1370
+ },
1371
+ finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1372
+ request: { body: args },
1373
+ response: {
1374
+ ...getResponseMetadata2(response),
1375
+ headers: responseHeaders,
1376
+ body: rawResponse
1377
+ },
1378
+ providerMetadata,
1379
+ warnings
1380
+ };
1381
+ }
1382
+ async doStream(options) {
1383
+ const { args, warnings } = await this.getArgs(options);
1384
+ const body = {
1385
+ ...args,
1386
+ stream: true,
1387
+ stream_options: {
1388
+ include_usage: true
1389
+ }
1390
+ };
1391
+ const { responseHeaders, value: response } = await chunkOWIEOL55_cjs.postJsonToApi({
1392
+ url: this.config.url({
1393
+ path: "/completions",
1394
+ modelId: this.modelId
1395
+ }),
1396
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), options.headers),
1397
+ body,
1398
+ failedResponseHandler: openaiFailedResponseHandler,
1399
+ successfulResponseHandler: chunkOWIEOL55_cjs.createEventSourceResponseHandler(
1400
+ openaiCompletionChunkSchema
1401
+ ),
1402
+ abortSignal: options.abortSignal,
1403
+ fetch: this.config.fetch
1404
+ });
1405
+ let finishReason = "unknown";
1406
+ const providerMetadata = { openai: {} };
1407
+ const usage = {
1408
+ inputTokens: void 0,
1409
+ outputTokens: void 0,
1410
+ totalTokens: void 0
1411
+ };
1412
+ let isFirstChunk = true;
1413
+ return {
1414
+ stream: response.pipeThrough(
1415
+ new TransformStream({
1416
+ start(controller) {
1417
+ controller.enqueue({ type: "stream-start", warnings });
1418
+ },
1419
+ transform(chunk, controller) {
1420
+ if (options.includeRawChunks) {
1421
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1422
+ }
1423
+ if (!chunk.success) {
1424
+ finishReason = "error";
1425
+ controller.enqueue({ type: "error", error: chunk.error });
1426
+ return;
1427
+ }
1428
+ const value = chunk.value;
1429
+ if ("error" in value) {
1430
+ finishReason = "error";
1431
+ controller.enqueue({ type: "error", error: value.error });
1432
+ return;
1433
+ }
1434
+ if (isFirstChunk) {
1435
+ isFirstChunk = false;
1436
+ controller.enqueue({
1437
+ type: "response-metadata",
1438
+ ...getResponseMetadata2(value)
1439
+ });
1440
+ controller.enqueue({ type: "text-start", id: "0" });
1441
+ }
1442
+ if (value.usage != null) {
1443
+ usage.inputTokens = value.usage.prompt_tokens;
1444
+ usage.outputTokens = value.usage.completion_tokens;
1445
+ usage.totalTokens = value.usage.total_tokens;
1446
+ }
1447
+ const choice = value.choices[0];
1448
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1449
+ finishReason = mapOpenAIFinishReason2(choice.finish_reason);
1450
+ }
1451
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1452
+ providerMetadata.openai.logprobs = choice.logprobs;
1453
+ }
1454
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1455
+ controller.enqueue({
1456
+ type: "text-delta",
1457
+ id: "0",
1458
+ delta: choice.text
1459
+ });
1460
+ }
1461
+ },
1462
+ flush(controller) {
1463
+ if (!isFirstChunk) {
1464
+ controller.enqueue({ type: "text-end", id: "0" });
1465
+ }
1466
+ controller.enqueue({
1467
+ type: "finish",
1468
+ finishReason,
1469
+ providerMetadata,
1470
+ usage
1471
+ });
1472
+ }
1473
+ })
1474
+ ),
1475
+ request: { body },
1476
+ response: { headers: responseHeaders }
1477
+ };
1478
+ }
1479
+ };
1480
+ var openaiEmbeddingProviderOptions = chunkOWIEOL55_cjs.lazyValidator(
1481
+ () => chunkOWIEOL55_cjs.zodSchema(
1482
+ v4.z.object({
1483
+ /**
1484
+ The number of dimensions the resulting output embeddings should have.
1485
+ Only supported in text-embedding-3 and later models.
1486
+ */
1487
+ dimensions: v4.z.number().optional(),
1488
+ /**
1489
+ A unique identifier representing your end-user, which can help OpenAI to
1490
+ monitor and detect abuse. Learn more.
1491
+ */
1492
+ user: v4.z.string().optional()
1493
+ })
1494
+ )
1495
+ );
1496
+ var openaiTextEmbeddingResponseSchema = chunkOWIEOL55_cjs.lazyValidator(
1497
+ () => chunkOWIEOL55_cjs.zodSchema(
1498
+ v4.z.object({
1499
+ data: v4.z.array(v4.z.object({ embedding: v4.z.array(v4.z.number()) })),
1500
+ usage: v4.z.object({ prompt_tokens: v4.z.number() }).nullish()
1501
+ })
1502
+ )
1503
+ );
1504
+ var OpenAIEmbeddingModel = class {
1505
+ constructor(modelId, config) {
1506
+ this.specificationVersion = "v2";
1507
+ this.maxEmbeddingsPerCall = 2048;
1508
+ this.supportsParallelCalls = true;
1509
+ this.modelId = modelId;
1510
+ this.config = config;
1511
+ }
1512
+ get provider() {
1513
+ return this.config.provider;
1514
+ }
1515
+ async doEmbed({
1516
+ values,
1517
+ headers,
1518
+ abortSignal,
1519
+ providerOptions
1520
+ }) {
1521
+ var _a;
1522
+ if (values.length > this.maxEmbeddingsPerCall) {
1523
+ throw new chunkIXZ2T2QX_cjs.TooManyEmbeddingValuesForCallError({
1524
+ provider: this.provider,
1525
+ modelId: this.modelId,
1526
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1527
+ values
1528
+ });
1529
+ }
1530
+ const openaiOptions = (_a = await chunkOWIEOL55_cjs.parseProviderOptions({
1531
+ provider: "openai",
1532
+ providerOptions,
1533
+ schema: openaiEmbeddingProviderOptions
1534
+ })) != null ? _a : {};
1535
+ const {
1536
+ responseHeaders,
1537
+ value: response,
1538
+ rawValue
1539
+ } = await chunkOWIEOL55_cjs.postJsonToApi({
1540
+ url: this.config.url({
1541
+ path: "/embeddings",
1542
+ modelId: this.modelId
1543
+ }),
1544
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), headers),
1545
+ body: {
1546
+ model: this.modelId,
1547
+ input: values,
1548
+ encoding_format: "float",
1549
+ dimensions: openaiOptions.dimensions,
1550
+ user: openaiOptions.user
1551
+ },
1552
+ failedResponseHandler: openaiFailedResponseHandler,
1553
+ successfulResponseHandler: chunkOWIEOL55_cjs.createJsonResponseHandler(
1554
+ openaiTextEmbeddingResponseSchema
1555
+ ),
1556
+ abortSignal,
1557
+ fetch: this.config.fetch
1558
+ });
1559
+ return {
1560
+ embeddings: response.data.map((item) => item.embedding),
1561
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1562
+ response: { headers: responseHeaders, body: rawValue }
1563
+ };
1564
+ }
1565
+ };
1566
+ var openaiImageResponseSchema = chunkOWIEOL55_cjs.lazyValidator(
1567
+ () => chunkOWIEOL55_cjs.zodSchema(
1568
+ v4.z.object({
1569
+ data: v4.z.array(
1570
+ v4.z.object({
1571
+ b64_json: v4.z.string(),
1572
+ revised_prompt: v4.z.string().nullish()
1573
+ })
1574
+ )
1575
+ })
1576
+ )
1577
+ );
1578
+ var modelMaxImagesPerCall = {
1579
+ "dall-e-3": 1,
1580
+ "dall-e-2": 10,
1581
+ "gpt-image-1": 10,
1582
+ "gpt-image-1-mini": 10
1583
+ };
1584
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
1585
+ "gpt-image-1",
1586
+ "gpt-image-1-mini"
1587
+ ]);
1588
+ var OpenAIImageModel = class {
1589
+ constructor(modelId, config) {
1590
+ this.modelId = modelId;
1591
+ this.config = config;
1592
+ this.specificationVersion = "v2";
1593
+ }
1594
+ get maxImagesPerCall() {
1595
+ var _a;
1596
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1597
+ }
1598
+ get provider() {
1599
+ return this.config.provider;
1600
+ }
1601
+ async doGenerate({
1602
+ prompt,
1603
+ n,
1604
+ size,
1605
+ aspectRatio,
1606
+ seed,
1607
+ providerOptions,
1608
+ headers,
1609
+ abortSignal
1610
+ }) {
1611
+ var _a, _b, _c, _d;
1612
+ const warnings = [];
1613
+ if (aspectRatio != null) {
1614
+ warnings.push({
1615
+ type: "unsupported-setting",
1616
+ setting: "aspectRatio",
1617
+ details: "This model does not support aspect ratio. Use `size` instead."
1618
+ });
1619
+ }
1620
+ if (seed != null) {
1621
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1622
+ }
1623
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1624
+ const { value: response, responseHeaders } = await chunkOWIEOL55_cjs.postJsonToApi({
1625
+ url: this.config.url({
1626
+ path: "/images/generations",
1627
+ modelId: this.modelId
1628
+ }),
1629
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), headers),
1630
+ body: {
1631
+ model: this.modelId,
1632
+ prompt,
1633
+ n,
1634
+ size,
1635
+ ...(_d = providerOptions.openai) != null ? _d : {},
1636
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1637
+ },
1638
+ failedResponseHandler: openaiFailedResponseHandler,
1639
+ successfulResponseHandler: chunkOWIEOL55_cjs.createJsonResponseHandler(
1640
+ openaiImageResponseSchema
1641
+ ),
1642
+ abortSignal,
1643
+ fetch: this.config.fetch
1644
+ });
1645
+ return {
1646
+ images: response.data.map((item) => item.b64_json),
1647
+ warnings,
1648
+ response: {
1649
+ timestamp: currentDate,
1650
+ modelId: this.modelId,
1651
+ headers: responseHeaders
1652
+ },
1653
+ providerMetadata: {
1654
+ openai: {
1655
+ images: response.data.map(
1656
+ (item) => item.revised_prompt ? {
1657
+ revisedPrompt: item.revised_prompt
1658
+ } : null
1659
+ )
1660
+ }
1661
+ }
1662
+ };
1663
+ }
1664
+ };
1665
+ var openaiTranscriptionResponseSchema = chunkOWIEOL55_cjs.lazyValidator(
1666
+ () => chunkOWIEOL55_cjs.zodSchema(
1667
+ v4.z.object({
1668
+ text: v4.z.string(),
1669
+ language: v4.z.string().nullish(),
1670
+ duration: v4.z.number().nullish(),
1671
+ words: v4.z.array(
1672
+ v4.z.object({
1673
+ word: v4.z.string(),
1674
+ start: v4.z.number(),
1675
+ end: v4.z.number()
1676
+ })
1677
+ ).nullish(),
1678
+ segments: v4.z.array(
1679
+ v4.z.object({
1680
+ id: v4.z.number(),
1681
+ seek: v4.z.number(),
1682
+ start: v4.z.number(),
1683
+ end: v4.z.number(),
1684
+ text: v4.z.string(),
1685
+ tokens: v4.z.array(v4.z.number()),
1686
+ temperature: v4.z.number(),
1687
+ avg_logprob: v4.z.number(),
1688
+ compression_ratio: v4.z.number(),
1689
+ no_speech_prob: v4.z.number()
1690
+ })
1691
+ ).nullish()
1692
+ })
1693
+ )
1694
+ );
1695
+ var openAITranscriptionProviderOptions = chunkOWIEOL55_cjs.lazyValidator(
1696
+ () => chunkOWIEOL55_cjs.zodSchema(
1697
+ v4.z.object({
1698
+ /**
1699
+ * Additional information to include in the transcription response.
1700
+ */
1701
+ include: v4.z.array(v4.z.string()).optional(),
1702
+ /**
1703
+ * The language of the input audio in ISO-639-1 format.
1704
+ */
1705
+ language: v4.z.string().optional(),
1706
+ /**
1707
+ * An optional text to guide the model's style or continue a previous audio segment.
1708
+ */
1709
+ prompt: v4.z.string().optional(),
1710
+ /**
1711
+ * The sampling temperature, between 0 and 1.
1712
+ * @default 0
1713
+ */
1714
+ temperature: v4.z.number().min(0).max(1).default(0).optional(),
1715
+ /**
1716
+ * The timestamp granularities to populate for this transcription.
1717
+ * @default ['segment']
1718
+ */
1719
+ timestampGranularities: v4.z.array(v4.z.enum(["word", "segment"])).default(["segment"]).optional()
1720
+ })
1721
+ )
1722
+ );
1723
+ var languageMap = {
1724
+ afrikaans: "af",
1725
+ arabic: "ar",
1726
+ armenian: "hy",
1727
+ azerbaijani: "az",
1728
+ belarusian: "be",
1729
+ bosnian: "bs",
1730
+ bulgarian: "bg",
1731
+ catalan: "ca",
1732
+ chinese: "zh",
1733
+ croatian: "hr",
1734
+ czech: "cs",
1735
+ danish: "da",
1736
+ dutch: "nl",
1737
+ english: "en",
1738
+ estonian: "et",
1739
+ finnish: "fi",
1740
+ french: "fr",
1741
+ galician: "gl",
1742
+ german: "de",
1743
+ greek: "el",
1744
+ hebrew: "he",
1745
+ hindi: "hi",
1746
+ hungarian: "hu",
1747
+ icelandic: "is",
1748
+ indonesian: "id",
1749
+ italian: "it",
1750
+ japanese: "ja",
1751
+ kannada: "kn",
1752
+ kazakh: "kk",
1753
+ korean: "ko",
1754
+ latvian: "lv",
1755
+ lithuanian: "lt",
1756
+ macedonian: "mk",
1757
+ malay: "ms",
1758
+ marathi: "mr",
1759
+ maori: "mi",
1760
+ nepali: "ne",
1761
+ norwegian: "no",
1762
+ persian: "fa",
1763
+ polish: "pl",
1764
+ portuguese: "pt",
1765
+ romanian: "ro",
1766
+ russian: "ru",
1767
+ serbian: "sr",
1768
+ slovak: "sk",
1769
+ slovenian: "sl",
1770
+ spanish: "es",
1771
+ swahili: "sw",
1772
+ swedish: "sv",
1773
+ tagalog: "tl",
1774
+ tamil: "ta",
1775
+ thai: "th",
1776
+ turkish: "tr",
1777
+ ukrainian: "uk",
1778
+ urdu: "ur",
1779
+ vietnamese: "vi",
1780
+ welsh: "cy"
1781
+ };
1782
+ var OpenAITranscriptionModel = class {
1783
+ constructor(modelId, config) {
1784
+ this.modelId = modelId;
1785
+ this.config = config;
1786
+ this.specificationVersion = "v2";
1787
+ }
1788
+ get provider() {
1789
+ return this.config.provider;
1790
+ }
1791
+ async getArgs({
1792
+ audio,
1793
+ mediaType,
1794
+ providerOptions
1795
+ }) {
1796
+ const warnings = [];
1797
+ const openAIOptions = await chunkOWIEOL55_cjs.parseProviderOptions({
1798
+ provider: "openai",
1799
+ providerOptions,
1800
+ schema: openAITranscriptionProviderOptions
1801
+ });
1802
+ const formData = new FormData();
1803
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([chunkOWIEOL55_cjs.convertBase64ToUint8Array(audio)]);
1804
+ formData.append("model", this.modelId);
1805
+ const fileExtension = chunkOWIEOL55_cjs.mediaTypeToExtension(mediaType);
1806
+ formData.append(
1807
+ "file",
1808
+ new File([blob], "audio", { type: mediaType }),
1809
+ `audio.${fileExtension}`
1810
+ );
1811
+ if (openAIOptions) {
1812
+ const transcriptionModelOptions = {
1813
+ include: openAIOptions.include,
1814
+ language: openAIOptions.language,
1815
+ prompt: openAIOptions.prompt,
1816
+ // https://platform.openai.com/docs/api-reference/audio/createTranscription#audio_createtranscription-response_format
1817
+ // prefer verbose_json to get segments for models that support it
1818
+ response_format: [
1819
+ "gpt-4o-transcribe",
1820
+ "gpt-4o-mini-transcribe"
1821
+ ].includes(this.modelId) ? "json" : "verbose_json",
1822
+ temperature: openAIOptions.temperature,
1823
+ timestamp_granularities: openAIOptions.timestampGranularities
1824
+ };
1825
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1826
+ if (value != null) {
1827
+ if (Array.isArray(value)) {
1828
+ for (const item of value) {
1829
+ formData.append(`${key}[]`, String(item));
1830
+ }
1831
+ } else {
1832
+ formData.append(key, String(value));
1833
+ }
1834
+ }
1835
+ }
1836
+ }
1837
+ return {
1838
+ formData,
1839
+ warnings
1840
+ };
1841
+ }
1842
+ async doGenerate(options) {
1843
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1844
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1845
+ const { formData, warnings } = await this.getArgs(options);
1846
+ const {
1847
+ value: response,
1848
+ responseHeaders,
1849
+ rawValue: rawResponse
1850
+ } = await chunkOWIEOL55_cjs.postFormDataToApi({
1851
+ url: this.config.url({
1852
+ path: "/audio/transcriptions",
1853
+ modelId: this.modelId
1854
+ }),
1855
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), options.headers),
1856
+ formData,
1857
+ failedResponseHandler: openaiFailedResponseHandler,
1858
+ successfulResponseHandler: chunkOWIEOL55_cjs.createJsonResponseHandler(
1859
+ openaiTranscriptionResponseSchema
1860
+ ),
1861
+ abortSignal: options.abortSignal,
1862
+ fetch: this.config.fetch
1863
+ });
1864
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1865
+ return {
1866
+ text: response.text,
1867
+ segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
1868
+ text: segment.text,
1869
+ startSecond: segment.start,
1870
+ endSecond: segment.end
1871
+ }))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
1872
+ text: word.word,
1873
+ startSecond: word.start,
1874
+ endSecond: word.end
1875
+ }))) != null ? _g : [],
1876
+ language,
1877
+ durationInSeconds: (_h = response.duration) != null ? _h : void 0,
1878
+ warnings,
1879
+ response: {
1880
+ timestamp: currentDate,
1881
+ modelId: this.modelId,
1882
+ headers: responseHeaders,
1883
+ body: rawResponse
1884
+ }
1885
+ };
1886
+ }
1887
+ };
1888
+ var openaiSpeechProviderOptionsSchema = chunkOWIEOL55_cjs.lazyValidator(
1889
+ () => chunkOWIEOL55_cjs.zodSchema(
1890
+ v4.z.object({
1891
+ instructions: v4.z.string().nullish(),
1892
+ speed: v4.z.number().min(0.25).max(4).default(1).nullish()
1893
+ })
1894
+ )
1895
+ );
1896
+ var OpenAISpeechModel = class {
1897
+ constructor(modelId, config) {
1898
+ this.modelId = modelId;
1899
+ this.config = config;
1900
+ this.specificationVersion = "v2";
1901
+ }
1902
+ get provider() {
1903
+ return this.config.provider;
1904
+ }
1905
+ async getArgs({
1906
+ text,
1907
+ voice = "alloy",
1908
+ outputFormat = "mp3",
1909
+ speed,
1910
+ instructions,
1911
+ language,
1912
+ providerOptions
1913
+ }) {
1914
+ const warnings = [];
1915
+ const openAIOptions = await chunkOWIEOL55_cjs.parseProviderOptions({
1916
+ provider: "openai",
1917
+ providerOptions,
1918
+ schema: openaiSpeechProviderOptionsSchema
1919
+ });
1920
+ const requestBody = {
1921
+ model: this.modelId,
1922
+ input: text,
1923
+ voice,
1924
+ response_format: "mp3",
1925
+ speed,
1926
+ instructions
1927
+ };
1928
+ if (outputFormat) {
1929
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1930
+ requestBody.response_format = outputFormat;
1931
+ } else {
1932
+ warnings.push({
1933
+ type: "unsupported-setting",
1934
+ setting: "outputFormat",
1935
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1936
+ });
1937
+ }
1938
+ }
1939
+ if (openAIOptions) {
1940
+ const speechModelOptions = {};
1941
+ for (const key in speechModelOptions) {
1942
+ const value = speechModelOptions[key];
1943
+ if (value !== void 0) {
1944
+ requestBody[key] = value;
1945
+ }
1946
+ }
1947
+ }
1948
+ if (language) {
1949
+ warnings.push({
1950
+ type: "unsupported-setting",
1951
+ setting: "language",
1952
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
1953
+ });
1954
+ }
1955
+ return {
1956
+ requestBody,
1957
+ warnings
1958
+ };
1959
+ }
1960
+ async doGenerate(options) {
1961
+ var _a, _b, _c;
1962
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1963
+ const { requestBody, warnings } = await this.getArgs(options);
1964
+ const {
1965
+ value: audio,
1966
+ responseHeaders,
1967
+ rawValue: rawResponse
1968
+ } = await chunkOWIEOL55_cjs.postJsonToApi({
1969
+ url: this.config.url({
1970
+ path: "/audio/speech",
1971
+ modelId: this.modelId
1972
+ }),
1973
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), options.headers),
1974
+ body: requestBody,
1975
+ failedResponseHandler: openaiFailedResponseHandler,
1976
+ successfulResponseHandler: chunkOWIEOL55_cjs.createBinaryResponseHandler(),
1977
+ abortSignal: options.abortSignal,
1978
+ fetch: this.config.fetch
1979
+ });
1980
+ return {
1981
+ audio,
1982
+ warnings,
1983
+ request: {
1984
+ body: JSON.stringify(requestBody)
1985
+ },
1986
+ response: {
1987
+ timestamp: currentDate,
1988
+ modelId: this.modelId,
1989
+ headers: responseHeaders,
1990
+ body: rawResponse
1991
+ }
1992
+ };
1993
+ }
1994
+ };
1995
+ var localShellInputSchema = chunkOWIEOL55_cjs.lazySchema(
1996
+ () => chunkOWIEOL55_cjs.zodSchema(
1997
+ v4.z.object({
1998
+ action: v4.z.object({
1999
+ type: v4.z.literal("exec"),
2000
+ command: v4.z.array(v4.z.string()),
2001
+ timeoutMs: v4.z.number().optional(),
2002
+ user: v4.z.string().optional(),
2003
+ workingDirectory: v4.z.string().optional(),
2004
+ env: v4.z.record(v4.z.string(), v4.z.string()).optional()
2005
+ })
2006
+ })
2007
+ )
2008
+ );
2009
+ var localShellOutputSchema = chunkOWIEOL55_cjs.lazySchema(
2010
+ () => chunkOWIEOL55_cjs.zodSchema(v4.z.object({ output: v4.z.string() }))
2011
+ );
2012
+ chunkOWIEOL55_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2013
+ id: "openai.local_shell",
2014
+ name: "local_shell",
2015
+ inputSchema: localShellInputSchema,
2016
+ outputSchema: localShellOutputSchema
2017
+ });
2018
+ function isFileId(data, prefixes) {
2019
+ if (!prefixes) return false;
2020
+ return prefixes.some((prefix) => data.startsWith(prefix));
2021
+ }
2022
+ async function convertToOpenAIResponsesInput({
2023
+ prompt,
2024
+ systemMessageMode,
2025
+ fileIdPrefixes,
2026
+ store,
2027
+ hasLocalShellTool = false
2028
+ }) {
2029
+ var _a, _b, _c, _d;
2030
+ const input = [];
2031
+ const warnings = [];
2032
+ for (const { role, content } of prompt) {
2033
+ switch (role) {
2034
+ case "system": {
2035
+ switch (systemMessageMode) {
2036
+ case "system": {
2037
+ input.push({ role: "system", content });
2038
+ break;
2039
+ }
2040
+ case "developer": {
2041
+ input.push({ role: "developer", content });
2042
+ break;
2043
+ }
2044
+ case "remove": {
2045
+ warnings.push({
2046
+ type: "other",
2047
+ message: "system messages are removed for this model"
2048
+ });
2049
+ break;
2050
+ }
2051
+ default: {
2052
+ const _exhaustiveCheck = systemMessageMode;
2053
+ throw new Error(
2054
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2055
+ );
2056
+ }
2057
+ }
2058
+ break;
2059
+ }
2060
+ case "user": {
2061
+ input.push({
2062
+ role: "user",
2063
+ content: content.map((part, index) => {
2064
+ var _a2, _b2, _c2;
2065
+ switch (part.type) {
2066
+ case "text": {
2067
+ return { type: "input_text", text: part.text };
2068
+ }
2069
+ case "file": {
2070
+ if (part.mediaType.startsWith("image/")) {
2071
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2072
+ return {
2073
+ type: "input_image",
2074
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2075
+ image_url: `data:${mediaType};base64,${chunkOWIEOL55_cjs.convertToBase64(part.data)}`
2076
+ },
2077
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2078
+ };
2079
+ } else if (part.mediaType === "application/pdf") {
2080
+ if (part.data instanceof URL) {
2081
+ return {
2082
+ type: "input_file",
2083
+ file_url: part.data.toString()
2084
+ };
2085
+ }
2086
+ return {
2087
+ type: "input_file",
2088
+ ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2089
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2090
+ file_data: `data:application/pdf;base64,${chunkOWIEOL55_cjs.convertToBase64(part.data)}`
2091
+ }
2092
+ };
2093
+ } else {
2094
+ throw new chunkIXZ2T2QX_cjs.UnsupportedFunctionalityError({
2095
+ functionality: `file part media type ${part.mediaType}`
2096
+ });
2097
+ }
2098
+ }
2099
+ }
2100
+ })
2101
+ });
2102
+ break;
2103
+ }
2104
+ case "assistant": {
2105
+ const reasoningMessages = {};
2106
+ for (const part of content) {
2107
+ switch (part.type) {
2108
+ case "text": {
2109
+ const id = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId;
2110
+ if (store && id != null) {
2111
+ input.push({ type: "item_reference", id });
2112
+ break;
2113
+ }
2114
+ input.push({
2115
+ role: "assistant",
2116
+ content: [{ type: "output_text", text: part.text }],
2117
+ id
2118
+ });
2119
+ break;
2120
+ }
2121
+ case "tool-call": {
2122
+ if (part.providerExecuted) {
2123
+ break;
2124
+ }
2125
+ const id = (_d = (_c = part.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.itemId;
2126
+ if (store && id != null) {
2127
+ input.push({ type: "item_reference", id });
2128
+ break;
2129
+ }
2130
+ if (hasLocalShellTool && part.toolName === "local_shell") {
2131
+ const parsedInput = await chunkOWIEOL55_cjs.validateTypes({
2132
+ value: part.input,
2133
+ schema: localShellInputSchema
2134
+ });
2135
+ input.push({
2136
+ type: "local_shell_call",
2137
+ call_id: part.toolCallId,
2138
+ id,
2139
+ action: {
2140
+ type: "exec",
2141
+ command: parsedInput.action.command,
2142
+ timeout_ms: parsedInput.action.timeoutMs,
2143
+ user: parsedInput.action.user,
2144
+ working_directory: parsedInput.action.workingDirectory,
2145
+ env: parsedInput.action.env
2146
+ }
2147
+ });
2148
+ break;
2149
+ }
2150
+ input.push({
2151
+ type: "function_call",
2152
+ call_id: part.toolCallId,
2153
+ name: part.toolName,
2154
+ arguments: JSON.stringify(part.input),
2155
+ id
2156
+ });
2157
+ break;
2158
+ }
2159
+ // assistant tool result parts are from provider-executed tools:
2160
+ case "tool-result": {
2161
+ if (store) {
2162
+ input.push({ type: "item_reference", id: part.toolCallId });
2163
+ } else {
2164
+ warnings.push({
2165
+ type: "other",
2166
+ message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`
2167
+ });
2168
+ }
2169
+ break;
2170
+ }
2171
+ case "reasoning": {
2172
+ const providerOptions = await chunkOWIEOL55_cjs.parseProviderOptions({
2173
+ provider: "openai",
2174
+ providerOptions: part.providerOptions,
2175
+ schema: openaiResponsesReasoningProviderOptionsSchema
2176
+ });
2177
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2178
+ if (reasoningId != null) {
2179
+ const reasoningMessage = reasoningMessages[reasoningId];
2180
+ if (store) {
2181
+ if (reasoningMessage === void 0) {
2182
+ input.push({ type: "item_reference", id: reasoningId });
2183
+ reasoningMessages[reasoningId] = {
2184
+ type: "reasoning",
2185
+ id: reasoningId,
2186
+ summary: []
2187
+ };
2188
+ }
2189
+ } else {
2190
+ const summaryParts = [];
2191
+ if (part.text.length > 0) {
2192
+ summaryParts.push({
2193
+ type: "summary_text",
2194
+ text: part.text
2195
+ });
2196
+ } else if (reasoningMessage !== void 0) {
2197
+ warnings.push({
2198
+ type: "other",
2199
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2200
+ });
2201
+ }
2202
+ if (reasoningMessage === void 0) {
2203
+ reasoningMessages[reasoningId] = {
2204
+ type: "reasoning",
2205
+ id: reasoningId,
2206
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2207
+ summary: summaryParts
2208
+ };
2209
+ input.push(reasoningMessages[reasoningId]);
2210
+ } else {
2211
+ reasoningMessage.summary.push(...summaryParts);
2212
+ if ((providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent) != null) {
2213
+ reasoningMessage.encrypted_content = providerOptions.reasoningEncryptedContent;
2214
+ }
2215
+ }
2216
+ }
2217
+ } else {
2218
+ warnings.push({
2219
+ type: "other",
2220
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2221
+ });
2222
+ }
2223
+ break;
2224
+ }
2225
+ }
2226
+ }
2227
+ break;
2228
+ }
2229
+ case "tool": {
2230
+ for (const part of content) {
2231
+ const output = part.output;
2232
+ if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
2233
+ const parsedOutput = await chunkOWIEOL55_cjs.validateTypes({
2234
+ value: output.value,
2235
+ schema: localShellOutputSchema
2236
+ });
2237
+ input.push({
2238
+ type: "local_shell_call_output",
2239
+ call_id: part.toolCallId,
2240
+ output: parsedOutput.output
2241
+ });
2242
+ break;
2243
+ }
2244
+ let contentValue;
2245
+ switch (output.type) {
2246
+ case "text":
2247
+ case "error-text":
2248
+ contentValue = output.value;
2249
+ break;
2250
+ case "json":
2251
+ case "error-json":
2252
+ contentValue = JSON.stringify(output.value);
2253
+ break;
2254
+ case "content":
2255
+ contentValue = output.value.map((item) => {
2256
+ switch (item.type) {
2257
+ case "text": {
2258
+ return { type: "input_text", text: item.text };
2259
+ }
2260
+ case "media": {
2261
+ return item.mediaType.startsWith("image/") ? {
2262
+ type: "input_image",
2263
+ image_url: `data:${item.mediaType};base64,${item.data}`
2264
+ } : {
2265
+ type: "input_file",
2266
+ filename: "data",
2267
+ file_data: `data:${item.mediaType};base64,${item.data}`
2268
+ };
2269
+ }
2270
+ }
2271
+ });
2272
+ break;
2273
+ }
2274
+ input.push({
2275
+ type: "function_call_output",
2276
+ call_id: part.toolCallId,
2277
+ output: contentValue
2278
+ });
2279
+ }
2280
+ break;
2281
+ }
2282
+ default: {
2283
+ const _exhaustiveCheck = role;
2284
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2285
+ }
2286
+ }
2287
+ }
2288
+ return { input, warnings };
2289
+ }
2290
+ var openaiResponsesReasoningProviderOptionsSchema = v4.z.object({
2291
+ itemId: v4.z.string().nullish(),
2292
+ reasoningEncryptedContent: v4.z.string().nullish()
2293
+ });
2294
+ function mapOpenAIResponseFinishReason({
2295
+ finishReason,
2296
+ hasFunctionCall
2297
+ }) {
2298
+ switch (finishReason) {
2299
+ case void 0:
2300
+ case null:
2301
+ return hasFunctionCall ? "tool-calls" : "stop";
2302
+ case "max_output_tokens":
2303
+ return "length";
2304
+ case "content_filter":
2305
+ return "content-filter";
2306
+ default:
2307
+ return hasFunctionCall ? "tool-calls" : "unknown";
2308
+ }
2309
+ }
2310
+ var openaiResponsesChunkSchema = chunkOWIEOL55_cjs.lazyValidator(
2311
+ () => chunkOWIEOL55_cjs.zodSchema(
2312
+ v4.z.union([
2313
+ v4.z.object({
2314
+ type: v4.z.literal("response.output_text.delta"),
2315
+ item_id: v4.z.string(),
2316
+ delta: v4.z.string(),
2317
+ logprobs: v4.z.array(
2318
+ v4.z.object({
2319
+ token: v4.z.string(),
2320
+ logprob: v4.z.number(),
2321
+ top_logprobs: v4.z.array(
2322
+ v4.z.object({
2323
+ token: v4.z.string(),
2324
+ logprob: v4.z.number()
2325
+ })
2326
+ )
2327
+ })
2328
+ ).nullish()
2329
+ }),
2330
+ v4.z.object({
2331
+ type: v4.z.enum(["response.completed", "response.incomplete"]),
2332
+ response: v4.z.object({
2333
+ incomplete_details: v4.z.object({ reason: v4.z.string() }).nullish(),
2334
+ usage: v4.z.object({
2335
+ input_tokens: v4.z.number(),
2336
+ input_tokens_details: v4.z.object({ cached_tokens: v4.z.number().nullish() }).nullish(),
2337
+ output_tokens: v4.z.number(),
2338
+ output_tokens_details: v4.z.object({ reasoning_tokens: v4.z.number().nullish() }).nullish()
2339
+ }),
2340
+ service_tier: v4.z.string().nullish()
2341
+ })
2342
+ }),
2343
+ v4.z.object({
2344
+ type: v4.z.literal("response.created"),
2345
+ response: v4.z.object({
2346
+ id: v4.z.string(),
2347
+ created_at: v4.z.number(),
2348
+ model: v4.z.string(),
2349
+ service_tier: v4.z.string().nullish()
2350
+ })
2351
+ }),
2352
+ v4.z.object({
2353
+ type: v4.z.literal("response.output_item.added"),
2354
+ output_index: v4.z.number(),
2355
+ item: v4.z.discriminatedUnion("type", [
2356
+ v4.z.object({
2357
+ type: v4.z.literal("message"),
2358
+ id: v4.z.string()
2359
+ }),
2360
+ v4.z.object({
2361
+ type: v4.z.literal("reasoning"),
2362
+ id: v4.z.string(),
2363
+ encrypted_content: v4.z.string().nullish()
2364
+ }),
2365
+ v4.z.object({
2366
+ type: v4.z.literal("function_call"),
2367
+ id: v4.z.string(),
2368
+ call_id: v4.z.string(),
2369
+ name: v4.z.string(),
2370
+ arguments: v4.z.string()
2371
+ }),
2372
+ v4.z.object({
2373
+ type: v4.z.literal("web_search_call"),
2374
+ id: v4.z.string(),
2375
+ status: v4.z.string()
2376
+ }),
2377
+ v4.z.object({
2378
+ type: v4.z.literal("computer_call"),
2379
+ id: v4.z.string(),
2380
+ status: v4.z.string()
2381
+ }),
2382
+ v4.z.object({
2383
+ type: v4.z.literal("file_search_call"),
2384
+ id: v4.z.string()
2385
+ }),
2386
+ v4.z.object({
2387
+ type: v4.z.literal("image_generation_call"),
2388
+ id: v4.z.string()
2389
+ }),
2390
+ v4.z.object({
2391
+ type: v4.z.literal("code_interpreter_call"),
2392
+ id: v4.z.string(),
2393
+ container_id: v4.z.string(),
2394
+ code: v4.z.string().nullable(),
2395
+ outputs: v4.z.array(
2396
+ v4.z.discriminatedUnion("type", [
2397
+ v4.z.object({ type: v4.z.literal("logs"), logs: v4.z.string() }),
2398
+ v4.z.object({ type: v4.z.literal("image"), url: v4.z.string() })
2399
+ ])
2400
+ ).nullable(),
2401
+ status: v4.z.string()
2402
+ })
2403
+ ])
2404
+ }),
2405
+ v4.z.object({
2406
+ type: v4.z.literal("response.output_item.done"),
2407
+ output_index: v4.z.number(),
2408
+ item: v4.z.discriminatedUnion("type", [
2409
+ v4.z.object({
2410
+ type: v4.z.literal("message"),
2411
+ id: v4.z.string()
2412
+ }),
2413
+ v4.z.object({
2414
+ type: v4.z.literal("reasoning"),
2415
+ id: v4.z.string(),
2416
+ encrypted_content: v4.z.string().nullish()
2417
+ }),
2418
+ v4.z.object({
2419
+ type: v4.z.literal("function_call"),
2420
+ id: v4.z.string(),
2421
+ call_id: v4.z.string(),
2422
+ name: v4.z.string(),
2423
+ arguments: v4.z.string(),
2424
+ status: v4.z.literal("completed")
2425
+ }),
2426
+ v4.z.object({
2427
+ type: v4.z.literal("code_interpreter_call"),
2428
+ id: v4.z.string(),
2429
+ code: v4.z.string().nullable(),
2430
+ container_id: v4.z.string(),
2431
+ outputs: v4.z.array(
2432
+ v4.z.discriminatedUnion("type", [
2433
+ v4.z.object({ type: v4.z.literal("logs"), logs: v4.z.string() }),
2434
+ v4.z.object({ type: v4.z.literal("image"), url: v4.z.string() })
2435
+ ])
2436
+ ).nullable()
2437
+ }),
2438
+ v4.z.object({
2439
+ type: v4.z.literal("image_generation_call"),
2440
+ id: v4.z.string(),
2441
+ result: v4.z.string()
2442
+ }),
2443
+ v4.z.object({
2444
+ type: v4.z.literal("web_search_call"),
2445
+ id: v4.z.string(),
2446
+ status: v4.z.string(),
2447
+ action: v4.z.discriminatedUnion("type", [
2448
+ v4.z.object({
2449
+ type: v4.z.literal("search"),
2450
+ query: v4.z.string().nullish(),
2451
+ sources: v4.z.array(
2452
+ v4.z.discriminatedUnion("type", [
2453
+ v4.z.object({ type: v4.z.literal("url"), url: v4.z.string() }),
2454
+ v4.z.object({ type: v4.z.literal("api"), name: v4.z.string() })
2455
+ ])
2456
+ ).nullish()
2457
+ }),
2458
+ v4.z.object({
2459
+ type: v4.z.literal("open_page"),
2460
+ url: v4.z.string()
2461
+ }),
2462
+ v4.z.object({
2463
+ type: v4.z.literal("find"),
2464
+ url: v4.z.string(),
2465
+ pattern: v4.z.string()
2466
+ })
2467
+ ])
2468
+ }),
2469
+ v4.z.object({
2470
+ type: v4.z.literal("file_search_call"),
2471
+ id: v4.z.string(),
2472
+ queries: v4.z.array(v4.z.string()),
2473
+ results: v4.z.array(
2474
+ v4.z.object({
2475
+ attributes: v4.z.record(v4.z.string(), v4.z.unknown()),
2476
+ file_id: v4.z.string(),
2477
+ filename: v4.z.string(),
2478
+ score: v4.z.number(),
2479
+ text: v4.z.string()
2480
+ })
2481
+ ).nullish()
2482
+ }),
2483
+ v4.z.object({
2484
+ type: v4.z.literal("local_shell_call"),
2485
+ id: v4.z.string(),
2486
+ call_id: v4.z.string(),
2487
+ action: v4.z.object({
2488
+ type: v4.z.literal("exec"),
2489
+ command: v4.z.array(v4.z.string()),
2490
+ timeout_ms: v4.z.number().optional(),
2491
+ user: v4.z.string().optional(),
2492
+ working_directory: v4.z.string().optional(),
2493
+ env: v4.z.record(v4.z.string(), v4.z.string()).optional()
2494
+ })
2495
+ }),
2496
+ v4.z.object({
2497
+ type: v4.z.literal("computer_call"),
2498
+ id: v4.z.string(),
2499
+ status: v4.z.literal("completed")
2500
+ })
2501
+ ])
2502
+ }),
2503
+ v4.z.object({
2504
+ type: v4.z.literal("response.function_call_arguments.delta"),
2505
+ item_id: v4.z.string(),
2506
+ output_index: v4.z.number(),
2507
+ delta: v4.z.string()
2508
+ }),
2509
+ v4.z.object({
2510
+ type: v4.z.literal("response.image_generation_call.partial_image"),
2511
+ item_id: v4.z.string(),
2512
+ output_index: v4.z.number(),
2513
+ partial_image_b64: v4.z.string()
2514
+ }),
2515
+ v4.z.object({
2516
+ type: v4.z.literal("response.code_interpreter_call_code.delta"),
2517
+ item_id: v4.z.string(),
2518
+ output_index: v4.z.number(),
2519
+ delta: v4.z.string()
2520
+ }),
2521
+ v4.z.object({
2522
+ type: v4.z.literal("response.code_interpreter_call_code.done"),
2523
+ item_id: v4.z.string(),
2524
+ output_index: v4.z.number(),
2525
+ code: v4.z.string()
2526
+ }),
2527
+ v4.z.object({
2528
+ type: v4.z.literal("response.output_text.annotation.added"),
2529
+ annotation: v4.z.discriminatedUnion("type", [
2530
+ v4.z.object({
2531
+ type: v4.z.literal("url_citation"),
2532
+ start_index: v4.z.number(),
2533
+ end_index: v4.z.number(),
2534
+ url: v4.z.string(),
2535
+ title: v4.z.string()
2536
+ }),
2537
+ v4.z.object({
2538
+ type: v4.z.literal("file_citation"),
2539
+ file_id: v4.z.string(),
2540
+ filename: v4.z.string().nullish(),
2541
+ index: v4.z.number().nullish(),
2542
+ start_index: v4.z.number().nullish(),
2543
+ end_index: v4.z.number().nullish(),
2544
+ quote: v4.z.string().nullish()
2545
+ })
2546
+ ])
2547
+ }),
2548
+ v4.z.object({
2549
+ type: v4.z.literal("response.reasoning_summary_part.added"),
2550
+ item_id: v4.z.string(),
2551
+ summary_index: v4.z.number()
2552
+ }),
2553
+ v4.z.object({
2554
+ type: v4.z.literal("response.reasoning_summary_text.delta"),
2555
+ item_id: v4.z.string(),
2556
+ summary_index: v4.z.number(),
2557
+ delta: v4.z.string()
2558
+ }),
2559
+ v4.z.object({
2560
+ type: v4.z.literal("response.reasoning_summary_part.done"),
2561
+ item_id: v4.z.string(),
2562
+ summary_index: v4.z.number()
2563
+ }),
2564
+ v4.z.object({
2565
+ type: v4.z.literal("error"),
2566
+ sequence_number: v4.z.number(),
2567
+ error: v4.z.object({
2568
+ type: v4.z.string(),
2569
+ code: v4.z.string(),
2570
+ message: v4.z.string(),
2571
+ param: v4.z.string().nullish()
2572
+ })
2573
+ }),
2574
+ v4.z.object({ type: v4.z.string() }).loose().transform((value) => ({
2575
+ type: "unknown_chunk",
2576
+ message: value.type
2577
+ }))
2578
+ // fallback for unknown chunks
2579
+ ])
2580
+ )
2581
+ );
2582
+ var openaiResponsesResponseSchema = chunkOWIEOL55_cjs.lazyValidator(
2583
+ () => chunkOWIEOL55_cjs.zodSchema(
2584
+ v4.z.object({
2585
+ id: v4.z.string().optional(),
2586
+ created_at: v4.z.number().optional(),
2587
+ error: v4.z.object({
2588
+ message: v4.z.string(),
2589
+ type: v4.z.string(),
2590
+ param: v4.z.string().nullish(),
2591
+ code: v4.z.string()
2592
+ }).nullish(),
2593
+ model: v4.z.string().optional(),
2594
+ output: v4.z.array(
2595
+ v4.z.discriminatedUnion("type", [
2596
+ v4.z.object({
2597
+ type: v4.z.literal("message"),
2598
+ role: v4.z.literal("assistant"),
2599
+ id: v4.z.string(),
2600
+ content: v4.z.array(
2601
+ v4.z.object({
2602
+ type: v4.z.literal("output_text"),
2603
+ text: v4.z.string(),
2604
+ logprobs: v4.z.array(
2605
+ v4.z.object({
2606
+ token: v4.z.string(),
2607
+ logprob: v4.z.number(),
2608
+ top_logprobs: v4.z.array(
2609
+ v4.z.object({
2610
+ token: v4.z.string(),
2611
+ logprob: v4.z.number()
2612
+ })
2613
+ )
2614
+ })
2615
+ ).nullish(),
2616
+ annotations: v4.z.array(
2617
+ v4.z.discriminatedUnion("type", [
2618
+ v4.z.object({
2619
+ type: v4.z.literal("url_citation"),
2620
+ start_index: v4.z.number(),
2621
+ end_index: v4.z.number(),
2622
+ url: v4.z.string(),
2623
+ title: v4.z.string()
2624
+ }),
2625
+ v4.z.object({
2626
+ type: v4.z.literal("file_citation"),
2627
+ file_id: v4.z.string(),
2628
+ filename: v4.z.string().nullish(),
2629
+ index: v4.z.number().nullish(),
2630
+ start_index: v4.z.number().nullish(),
2631
+ end_index: v4.z.number().nullish(),
2632
+ quote: v4.z.string().nullish()
2633
+ }),
2634
+ v4.z.object({
2635
+ type: v4.z.literal("container_file_citation"),
2636
+ container_id: v4.z.string(),
2637
+ file_id: v4.z.string(),
2638
+ filename: v4.z.string().nullish(),
2639
+ start_index: v4.z.number().nullish(),
2640
+ end_index: v4.z.number().nullish(),
2641
+ index: v4.z.number().nullish()
2642
+ }),
2643
+ v4.z.object({
2644
+ type: v4.z.literal("file_path"),
2645
+ file_id: v4.z.string(),
2646
+ index: v4.z.number().nullish()
2647
+ })
2648
+ ])
2649
+ )
2650
+ })
2651
+ )
2652
+ }),
2653
+ v4.z.object({
2654
+ type: v4.z.literal("web_search_call"),
2655
+ id: v4.z.string(),
2656
+ status: v4.z.string(),
2657
+ action: v4.z.discriminatedUnion("type", [
2658
+ v4.z.object({
2659
+ type: v4.z.literal("search"),
2660
+ query: v4.z.string().nullish(),
2661
+ sources: v4.z.array(
2662
+ v4.z.discriminatedUnion("type", [
2663
+ v4.z.object({ type: v4.z.literal("url"), url: v4.z.string() }),
2664
+ v4.z.object({ type: v4.z.literal("api"), name: v4.z.string() })
2665
+ ])
2666
+ ).nullish()
2667
+ }),
2668
+ v4.z.object({
2669
+ type: v4.z.literal("open_page"),
2670
+ url: v4.z.string()
2671
+ }),
2672
+ v4.z.object({
2673
+ type: v4.z.literal("find"),
2674
+ url: v4.z.string(),
2675
+ pattern: v4.z.string()
2676
+ })
2677
+ ])
2678
+ }),
2679
+ v4.z.object({
2680
+ type: v4.z.literal("file_search_call"),
2681
+ id: v4.z.string(),
2682
+ queries: v4.z.array(v4.z.string()),
2683
+ results: v4.z.array(
2684
+ v4.z.object({
2685
+ attributes: v4.z.record(
2686
+ v4.z.string(),
2687
+ v4.z.union([v4.z.string(), v4.z.number(), v4.z.boolean()])
2688
+ ),
2689
+ file_id: v4.z.string(),
2690
+ filename: v4.z.string(),
2691
+ score: v4.z.number(),
2692
+ text: v4.z.string()
2693
+ })
2694
+ ).nullish()
2695
+ }),
2696
+ v4.z.object({
2697
+ type: v4.z.literal("code_interpreter_call"),
2698
+ id: v4.z.string(),
2699
+ code: v4.z.string().nullable(),
2700
+ container_id: v4.z.string(),
2701
+ outputs: v4.z.array(
2702
+ v4.z.discriminatedUnion("type", [
2703
+ v4.z.object({ type: v4.z.literal("logs"), logs: v4.z.string() }),
2704
+ v4.z.object({ type: v4.z.literal("image"), url: v4.z.string() })
2705
+ ])
2706
+ ).nullable()
2707
+ }),
2708
+ v4.z.object({
2709
+ type: v4.z.literal("image_generation_call"),
2710
+ id: v4.z.string(),
2711
+ result: v4.z.string()
2712
+ }),
2713
+ v4.z.object({
2714
+ type: v4.z.literal("local_shell_call"),
2715
+ id: v4.z.string(),
2716
+ call_id: v4.z.string(),
2717
+ action: v4.z.object({
2718
+ type: v4.z.literal("exec"),
2719
+ command: v4.z.array(v4.z.string()),
2720
+ timeout_ms: v4.z.number().optional(),
2721
+ user: v4.z.string().optional(),
2722
+ working_directory: v4.z.string().optional(),
2723
+ env: v4.z.record(v4.z.string(), v4.z.string()).optional()
2724
+ })
2725
+ }),
2726
+ v4.z.object({
2727
+ type: v4.z.literal("function_call"),
2728
+ call_id: v4.z.string(),
2729
+ name: v4.z.string(),
2730
+ arguments: v4.z.string(),
2731
+ id: v4.z.string()
2732
+ }),
2733
+ v4.z.object({
2734
+ type: v4.z.literal("computer_call"),
2735
+ id: v4.z.string(),
2736
+ status: v4.z.string().optional()
2737
+ }),
2738
+ v4.z.object({
2739
+ type: v4.z.literal("reasoning"),
2740
+ id: v4.z.string(),
2741
+ encrypted_content: v4.z.string().nullish(),
2742
+ summary: v4.z.array(
2743
+ v4.z.object({
2744
+ type: v4.z.literal("summary_text"),
2745
+ text: v4.z.string()
2746
+ })
2747
+ )
2748
+ })
2749
+ ])
2750
+ ).optional(),
2751
+ service_tier: v4.z.string().nullish(),
2752
+ incomplete_details: v4.z.object({ reason: v4.z.string() }).nullish(),
2753
+ usage: v4.z.object({
2754
+ input_tokens: v4.z.number(),
2755
+ input_tokens_details: v4.z.object({ cached_tokens: v4.z.number().nullish() }).nullish(),
2756
+ output_tokens: v4.z.number(),
2757
+ output_tokens_details: v4.z.object({ reasoning_tokens: v4.z.number().nullish() }).nullish()
2758
+ }).optional()
2759
+ })
2760
+ )
2761
+ );
2762
+ var TOP_LOGPROBS_MAX = 20;
2763
+ var openaiResponsesProviderOptionsSchema = chunkOWIEOL55_cjs.lazyValidator(
2764
+ () => chunkOWIEOL55_cjs.zodSchema(
2765
+ v4.z.object({
2766
+ conversation: v4.z.string().nullish(),
2767
+ include: v4.z.array(
2768
+ v4.z.enum([
2769
+ "reasoning.encrypted_content",
2770
+ // handled internally by default, only needed for unknown reasoning models
2771
+ "file_search_call.results",
2772
+ "message.output_text.logprobs"
2773
+ ])
2774
+ ).nullish(),
2775
+ instructions: v4.z.string().nullish(),
2776
+ /**
2777
+ * Return the log probabilities of the tokens.
2778
+ *
2779
+ * Setting to true will return the log probabilities of the tokens that
2780
+ * were generated.
2781
+ *
2782
+ * Setting to a number will return the log probabilities of the top n
2783
+ * tokens that were generated.
2784
+ *
2785
+ * @see https://platform.openai.com/docs/api-reference/responses/create
2786
+ * @see https://cookbook.openai.com/examples/using_logprobs
2787
+ */
2788
+ logprobs: v4.z.union([v4.z.boolean(), v4.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
2789
+ /**
2790
+ * The maximum number of total calls to built-in tools that can be processed in a response.
2791
+ * This maximum number applies across all built-in tool calls, not per individual tool.
2792
+ * Any further attempts to call a tool by the model will be ignored.
2793
+ */
2794
+ maxToolCalls: v4.z.number().nullish(),
2795
+ metadata: v4.z.any().nullish(),
2796
+ parallelToolCalls: v4.z.boolean().nullish(),
2797
+ previousResponseId: v4.z.string().nullish(),
2798
+ promptCacheKey: v4.z.string().nullish(),
2799
+ /**
2800
+ * The retention policy for the prompt cache.
2801
+ * - 'in_memory': Default. Standard prompt caching behavior.
2802
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
2803
+ * Currently only available for 5.1 series models.
2804
+ *
2805
+ * @default 'in_memory'
2806
+ */
2807
+ promptCacheRetention: v4.z.enum(["in_memory", "24h"]).nullish(),
2808
+ reasoningEffort: v4.z.string().nullish(),
2809
+ reasoningSummary: v4.z.string().nullish(),
2810
+ safetyIdentifier: v4.z.string().nullish(),
2811
+ serviceTier: v4.z.enum(["auto", "flex", "priority", "default"]).nullish(),
2812
+ store: v4.z.boolean().nullish(),
2813
+ strictJsonSchema: v4.z.boolean().nullish(),
2814
+ textVerbosity: v4.z.enum(["low", "medium", "high"]).nullish(),
2815
+ truncation: v4.z.enum(["auto", "disabled"]).nullish(),
2816
+ user: v4.z.string().nullish()
2817
+ })
2818
+ )
2819
+ );
2820
+ var codeInterpreterInputSchema = chunkOWIEOL55_cjs.lazySchema(
2821
+ () => chunkOWIEOL55_cjs.zodSchema(
2822
+ v4.z.object({
2823
+ code: v4.z.string().nullish(),
2824
+ containerId: v4.z.string()
2825
+ })
2826
+ )
2827
+ );
2828
+ var codeInterpreterOutputSchema = chunkOWIEOL55_cjs.lazySchema(
2829
+ () => chunkOWIEOL55_cjs.zodSchema(
2830
+ v4.z.object({
2831
+ outputs: v4.z.array(
2832
+ v4.z.discriminatedUnion("type", [
2833
+ v4.z.object({ type: v4.z.literal("logs"), logs: v4.z.string() }),
2834
+ v4.z.object({ type: v4.z.literal("image"), url: v4.z.string() })
2835
+ ])
2836
+ ).nullish()
2837
+ })
2838
+ )
2839
+ );
2840
+ var codeInterpreterArgsSchema = chunkOWIEOL55_cjs.lazySchema(
2841
+ () => chunkOWIEOL55_cjs.zodSchema(
2842
+ v4.z.object({
2843
+ container: v4.z.union([
2844
+ v4.z.string(),
2845
+ v4.z.object({
2846
+ fileIds: v4.z.array(v4.z.string()).optional()
2847
+ })
2848
+ ]).optional()
2849
+ })
2850
+ )
2851
+ );
2852
+ var codeInterpreterToolFactory = chunkOWIEOL55_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2853
+ id: "openai.code_interpreter",
2854
+ name: "code_interpreter",
2855
+ inputSchema: codeInterpreterInputSchema,
2856
+ outputSchema: codeInterpreterOutputSchema
2857
+ });
2858
+ var codeInterpreter = (args = {}) => {
2859
+ return codeInterpreterToolFactory(args);
2860
+ };
2861
+ var comparisonFilterSchema = v4.z.object({
2862
+ key: v4.z.string(),
2863
+ type: v4.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
2864
+ value: v4.z.union([v4.z.string(), v4.z.number(), v4.z.boolean()])
2865
+ });
2866
+ var compoundFilterSchema = v4.z.object({
2867
+ type: v4.z.enum(["and", "or"]),
2868
+ filters: v4.z.array(
2869
+ v4.z.union([comparisonFilterSchema, v4.z.lazy(() => compoundFilterSchema)])
2870
+ )
2871
+ });
2872
+ var fileSearchArgsSchema = chunkOWIEOL55_cjs.lazySchema(
2873
+ () => chunkOWIEOL55_cjs.zodSchema(
2874
+ v4.z.object({
2875
+ vectorStoreIds: v4.z.array(v4.z.string()),
2876
+ maxNumResults: v4.z.number().optional(),
2877
+ ranking: v4.z.object({
2878
+ ranker: v4.z.string().optional(),
2879
+ scoreThreshold: v4.z.number().optional()
2880
+ }).optional(),
2881
+ filters: v4.z.union([comparisonFilterSchema, compoundFilterSchema]).optional()
2882
+ })
2883
+ )
2884
+ );
2885
+ var fileSearchOutputSchema = chunkOWIEOL55_cjs.lazySchema(
2886
+ () => chunkOWIEOL55_cjs.zodSchema(
2887
+ v4.z.object({
2888
+ queries: v4.z.array(v4.z.string()),
2889
+ results: v4.z.array(
2890
+ v4.z.object({
2891
+ attributes: v4.z.record(v4.z.string(), v4.z.unknown()),
2892
+ fileId: v4.z.string(),
2893
+ filename: v4.z.string(),
2894
+ score: v4.z.number(),
2895
+ text: v4.z.string()
2896
+ })
2897
+ ).nullable()
2898
+ })
2899
+ )
2900
+ );
2901
+ var fileSearch = chunkOWIEOL55_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2902
+ id: "openai.file_search",
2903
+ name: "file_search",
2904
+ inputSchema: v4.z.object({}),
2905
+ outputSchema: fileSearchOutputSchema
2906
+ });
2907
+ var webSearchArgsSchema = chunkOWIEOL55_cjs.lazySchema(
2908
+ () => chunkOWIEOL55_cjs.zodSchema(
2909
+ v4.z.object({
2910
+ externalWebAccess: v4.z.boolean().optional(),
2911
+ filters: v4.z.object({ allowedDomains: v4.z.array(v4.z.string()).optional() }).optional(),
2912
+ searchContextSize: v4.z.enum(["low", "medium", "high"]).optional(),
2913
+ userLocation: v4.z.object({
2914
+ type: v4.z.literal("approximate"),
2915
+ country: v4.z.string().optional(),
2916
+ city: v4.z.string().optional(),
2917
+ region: v4.z.string().optional(),
2918
+ timezone: v4.z.string().optional()
2919
+ }).optional()
2920
+ })
2921
+ )
2922
+ );
2923
+ var webSearchInputSchema = chunkOWIEOL55_cjs.lazySchema(() => chunkOWIEOL55_cjs.zodSchema(v4.z.object({})));
2924
+ var webSearchOutputSchema = chunkOWIEOL55_cjs.lazySchema(
2925
+ () => chunkOWIEOL55_cjs.zodSchema(
2926
+ v4.z.object({
2927
+ action: v4.z.discriminatedUnion("type", [
2928
+ v4.z.object({
2929
+ type: v4.z.literal("search"),
2930
+ query: v4.z.string().optional()
2931
+ }),
2932
+ v4.z.object({
2933
+ type: v4.z.literal("openPage"),
2934
+ url: v4.z.string()
2935
+ }),
2936
+ v4.z.object({
2937
+ type: v4.z.literal("find"),
2938
+ url: v4.z.string(),
2939
+ pattern: v4.z.string()
2940
+ })
2941
+ ]),
2942
+ sources: v4.z.array(
2943
+ v4.z.discriminatedUnion("type", [
2944
+ v4.z.object({ type: v4.z.literal("url"), url: v4.z.string() }),
2945
+ v4.z.object({ type: v4.z.literal("api"), name: v4.z.string() })
2946
+ ])
2947
+ ).optional()
2948
+ })
2949
+ )
2950
+ );
2951
+ chunkOWIEOL55_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2952
+ id: "openai.web_search",
2953
+ name: "web_search",
2954
+ inputSchema: webSearchInputSchema,
2955
+ outputSchema: webSearchOutputSchema
2956
+ });
2957
+ var webSearchPreviewArgsSchema = chunkOWIEOL55_cjs.lazySchema(
2958
+ () => chunkOWIEOL55_cjs.zodSchema(
2959
+ v4.z.object({
2960
+ searchContextSize: v4.z.enum(["low", "medium", "high"]).optional(),
2961
+ userLocation: v4.z.object({
2962
+ type: v4.z.literal("approximate"),
2963
+ country: v4.z.string().optional(),
2964
+ city: v4.z.string().optional(),
2965
+ region: v4.z.string().optional(),
2966
+ timezone: v4.z.string().optional()
2967
+ }).optional()
2968
+ })
2969
+ )
2970
+ );
2971
+ var webSearchPreviewInputSchema = chunkOWIEOL55_cjs.lazySchema(
2972
+ () => chunkOWIEOL55_cjs.zodSchema(v4.z.object({}))
2973
+ );
2974
+ var webSearchPreviewOutputSchema = chunkOWIEOL55_cjs.lazySchema(
2975
+ () => chunkOWIEOL55_cjs.zodSchema(
2976
+ v4.z.object({
2977
+ action: v4.z.discriminatedUnion("type", [
2978
+ v4.z.object({
2979
+ type: v4.z.literal("search"),
2980
+ query: v4.z.string().optional()
2981
+ }),
2982
+ v4.z.object({
2983
+ type: v4.z.literal("openPage"),
2984
+ url: v4.z.string()
2985
+ }),
2986
+ v4.z.object({
2987
+ type: v4.z.literal("find"),
2988
+ url: v4.z.string(),
2989
+ pattern: v4.z.string()
2990
+ })
2991
+ ])
2992
+ })
2993
+ )
2994
+ );
2995
+ var webSearchPreview = chunkOWIEOL55_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2996
+ id: "openai.web_search_preview",
2997
+ name: "web_search_preview",
2998
+ inputSchema: webSearchPreviewInputSchema,
2999
+ outputSchema: webSearchPreviewOutputSchema
3000
+ });
3001
+ var imageGenerationArgsSchema = chunkOWIEOL55_cjs.lazySchema(
3002
+ () => chunkOWIEOL55_cjs.zodSchema(
3003
+ v4.z.object({
3004
+ background: v4.z.enum(["auto", "opaque", "transparent"]).optional(),
3005
+ inputFidelity: v4.z.enum(["low", "high"]).optional(),
3006
+ inputImageMask: v4.z.object({
3007
+ fileId: v4.z.string().optional(),
3008
+ imageUrl: v4.z.string().optional()
3009
+ }).optional(),
3010
+ model: v4.z.string().optional(),
3011
+ moderation: v4.z.enum(["auto"]).optional(),
3012
+ outputCompression: v4.z.number().int().min(0).max(100).optional(),
3013
+ outputFormat: v4.z.enum(["png", "jpeg", "webp"]).optional(),
3014
+ partialImages: v4.z.number().int().min(0).max(3).optional(),
3015
+ quality: v4.z.enum(["auto", "low", "medium", "high"]).optional(),
3016
+ size: v4.z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
3017
+ }).strict()
3018
+ )
3019
+ );
3020
+ var imageGenerationInputSchema = chunkOWIEOL55_cjs.lazySchema(() => chunkOWIEOL55_cjs.zodSchema(v4.z.object({})));
3021
+ var imageGenerationOutputSchema = chunkOWIEOL55_cjs.lazySchema(
3022
+ () => chunkOWIEOL55_cjs.zodSchema(v4.z.object({ result: v4.z.string() }))
3023
+ );
3024
+ var imageGenerationToolFactory = chunkOWIEOL55_cjs.createProviderDefinedToolFactoryWithOutputSchema({
3025
+ id: "openai.image_generation",
3026
+ name: "image_generation",
3027
+ inputSchema: imageGenerationInputSchema,
3028
+ outputSchema: imageGenerationOutputSchema
3029
+ });
3030
+ var imageGeneration = (args = {}) => {
3031
+ return imageGenerationToolFactory(args);
3032
+ };
3033
+ async function prepareResponsesTools({
3034
+ tools,
3035
+ toolChoice,
3036
+ strictJsonSchema
3037
+ }) {
3038
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
3039
+ const toolWarnings = [];
3040
+ if (tools == null) {
3041
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
3042
+ }
3043
+ const openaiTools = [];
3044
+ for (const tool of tools) {
3045
+ switch (tool.type) {
3046
+ case "function":
3047
+ openaiTools.push({
3048
+ type: "function",
3049
+ name: tool.name,
3050
+ description: tool.description,
3051
+ parameters: tool.inputSchema,
3052
+ strict: strictJsonSchema
3053
+ });
3054
+ break;
3055
+ case "provider-defined": {
3056
+ switch (tool.id) {
3057
+ case "openai.file_search": {
3058
+ const args = await chunkOWIEOL55_cjs.validateTypes({
3059
+ value: tool.args,
3060
+ schema: fileSearchArgsSchema
3061
+ });
3062
+ openaiTools.push({
3063
+ type: "file_search",
3064
+ vector_store_ids: args.vectorStoreIds,
3065
+ max_num_results: args.maxNumResults,
3066
+ ranking_options: args.ranking ? {
3067
+ ranker: args.ranking.ranker,
3068
+ score_threshold: args.ranking.scoreThreshold
3069
+ } : void 0,
3070
+ filters: args.filters
3071
+ });
3072
+ break;
3073
+ }
3074
+ case "openai.local_shell": {
3075
+ openaiTools.push({
3076
+ type: "local_shell"
3077
+ });
3078
+ break;
3079
+ }
3080
+ case "openai.web_search_preview": {
3081
+ const args = await chunkOWIEOL55_cjs.validateTypes({
3082
+ value: tool.args,
3083
+ schema: webSearchPreviewArgsSchema
3084
+ });
3085
+ openaiTools.push({
3086
+ type: "web_search_preview",
3087
+ search_context_size: args.searchContextSize,
3088
+ user_location: args.userLocation
3089
+ });
3090
+ break;
3091
+ }
3092
+ case "openai.web_search": {
3093
+ const args = await chunkOWIEOL55_cjs.validateTypes({
3094
+ value: tool.args,
3095
+ schema: webSearchArgsSchema
3096
+ });
3097
+ openaiTools.push({
3098
+ type: "web_search",
3099
+ filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
3100
+ external_web_access: args.externalWebAccess,
3101
+ search_context_size: args.searchContextSize,
3102
+ user_location: args.userLocation
3103
+ });
3104
+ break;
3105
+ }
3106
+ case "openai.code_interpreter": {
3107
+ const args = await chunkOWIEOL55_cjs.validateTypes({
3108
+ value: tool.args,
3109
+ schema: codeInterpreterArgsSchema
3110
+ });
3111
+ openaiTools.push({
3112
+ type: "code_interpreter",
3113
+ container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
3114
+ });
3115
+ break;
3116
+ }
3117
+ case "openai.image_generation": {
3118
+ const args = await chunkOWIEOL55_cjs.validateTypes({
3119
+ value: tool.args,
3120
+ schema: imageGenerationArgsSchema
3121
+ });
3122
+ openaiTools.push({
3123
+ type: "image_generation",
3124
+ background: args.background,
3125
+ input_fidelity: args.inputFidelity,
3126
+ input_image_mask: args.inputImageMask ? {
3127
+ file_id: args.inputImageMask.fileId,
3128
+ image_url: args.inputImageMask.imageUrl
3129
+ } : void 0,
3130
+ model: args.model,
3131
+ size: args.size,
3132
+ quality: args.quality,
3133
+ moderation: args.moderation,
3134
+ output_format: args.outputFormat,
3135
+ output_compression: args.outputCompression
3136
+ });
3137
+ break;
3138
+ }
3139
+ }
3140
+ break;
3141
+ }
3142
+ default:
3143
+ toolWarnings.push({ type: "unsupported-tool", tool });
3144
+ break;
3145
+ }
3146
+ }
3147
+ if (toolChoice == null) {
3148
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
3149
+ }
3150
+ const type = toolChoice.type;
3151
+ switch (type) {
3152
+ case "auto":
3153
+ case "none":
3154
+ case "required":
3155
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
3156
+ case "tool":
3157
+ return {
3158
+ tools: openaiTools,
3159
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "image_generation" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
3160
+ toolWarnings
3161
+ };
3162
+ default: {
3163
+ const _exhaustiveCheck = type;
3164
+ throw new chunkIXZ2T2QX_cjs.UnsupportedFunctionalityError({
3165
+ functionality: `tool choice type: ${_exhaustiveCheck}`
3166
+ });
3167
+ }
3168
+ }
3169
+ }
3170
+ var OpenAIResponsesLanguageModel = class {
3171
+ constructor(modelId, config) {
3172
+ this.specificationVersion = "v2";
3173
+ this.supportedUrls = {
3174
+ "image/*": [/^https?:\/\/.*$/],
3175
+ "application/pdf": [/^https?:\/\/.*$/]
3176
+ };
3177
+ this.modelId = modelId;
3178
+ this.config = config;
3179
+ }
3180
+ get provider() {
3181
+ return this.config.provider;
3182
+ }
3183
+ async getArgs({
3184
+ maxOutputTokens,
3185
+ temperature,
3186
+ stopSequences,
3187
+ topP,
3188
+ topK,
3189
+ presencePenalty,
3190
+ frequencyPenalty,
3191
+ seed,
3192
+ prompt,
3193
+ providerOptions,
3194
+ tools,
3195
+ toolChoice,
3196
+ responseFormat
3197
+ }) {
3198
+ var _a, _b, _c, _d;
3199
+ const warnings = [];
3200
+ const modelConfig = getResponsesModelConfig(this.modelId);
3201
+ if (topK != null) {
3202
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
3203
+ }
3204
+ if (seed != null) {
3205
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
3206
+ }
3207
+ if (presencePenalty != null) {
3208
+ warnings.push({
3209
+ type: "unsupported-setting",
3210
+ setting: "presencePenalty"
3211
+ });
3212
+ }
3213
+ if (frequencyPenalty != null) {
3214
+ warnings.push({
3215
+ type: "unsupported-setting",
3216
+ setting: "frequencyPenalty"
3217
+ });
3218
+ }
3219
+ if (stopSequences != null) {
3220
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
3221
+ }
3222
+ const openaiOptions = await chunkOWIEOL55_cjs.parseProviderOptions({
3223
+ provider: "openai",
3224
+ providerOptions,
3225
+ schema: openaiResponsesProviderOptionsSchema
3226
+ });
3227
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3228
+ warnings.push({
3229
+ type: "unsupported-setting",
3230
+ setting: "conversation",
3231
+ details: "conversation and previousResponseId cannot be used together"
3232
+ });
3233
+ }
3234
+ const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3235
+ prompt,
3236
+ systemMessageMode: modelConfig.systemMessageMode,
3237
+ fileIdPrefixes: this.config.fileIdPrefixes,
3238
+ store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
3239
+ hasLocalShellTool: hasOpenAITool("openai.local_shell")
3240
+ });
3241
+ warnings.push(...inputWarnings);
3242
+ const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
3243
+ let include = openaiOptions == null ? void 0 : openaiOptions.include;
3244
+ function addInclude(key) {
3245
+ if (include == null) {
3246
+ include = [key];
3247
+ } else if (!include.includes(key)) {
3248
+ include = [...include, key];
3249
+ }
3250
+ }
3251
+ function hasOpenAITool(id) {
3252
+ return (tools == null ? void 0 : tools.find(
3253
+ (tool) => tool.type === "provider-defined" && tool.id === id
3254
+ )) != null;
3255
+ }
3256
+ const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
3257
+ if (topLogprobs) {
3258
+ addInclude("message.output_text.logprobs");
3259
+ }
3260
+ const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
3261
+ (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
3262
+ )) == null ? void 0 : _c.name;
3263
+ if (webSearchToolName) {
3264
+ addInclude("web_search_call.action.sources");
3265
+ }
3266
+ if (hasOpenAITool("openai.code_interpreter")) {
3267
+ addInclude("code_interpreter_call.outputs");
3268
+ }
3269
+ const store = openaiOptions == null ? void 0 : openaiOptions.store;
3270
+ if (store === false && modelConfig.isReasoningModel) {
3271
+ addInclude("reasoning.encrypted_content");
3272
+ }
3273
+ const baseArgs = {
3274
+ model: this.modelId,
3275
+ input,
3276
+ temperature,
3277
+ top_p: topP,
3278
+ max_output_tokens: maxOutputTokens,
3279
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
3280
+ text: {
3281
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
3282
+ format: responseFormat.schema != null ? {
3283
+ type: "json_schema",
3284
+ strict: strictJsonSchema,
3285
+ name: (_d = responseFormat.name) != null ? _d : "response",
3286
+ description: responseFormat.description,
3287
+ schema: responseFormat.schema
3288
+ } : { type: "json_object" }
3289
+ },
3290
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
3291
+ verbosity: openaiOptions.textVerbosity
3292
+ }
3293
+ }
3294
+ },
3295
+ // provider options:
3296
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3297
+ max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3298
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3299
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
3300
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
3301
+ store,
3302
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
3303
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
3304
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
3305
+ include,
3306
+ prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3307
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
3308
+ safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3309
+ top_logprobs: topLogprobs,
3310
+ truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
3311
+ // model-specific settings:
3312
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
3313
+ reasoning: {
3314
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
3315
+ effort: openaiOptions.reasoningEffort
3316
+ },
3317
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
3318
+ summary: openaiOptions.reasoningSummary
3319
+ }
3320
+ }
3321
+ }
3322
+ };
3323
+ if (modelConfig.isReasoningModel) {
3324
+ if (baseArgs.temperature != null) {
3325
+ baseArgs.temperature = void 0;
3326
+ warnings.push({
3327
+ type: "unsupported-setting",
3328
+ setting: "temperature",
3329
+ details: "temperature is not supported for reasoning models"
3330
+ });
3331
+ }
3332
+ if (baseArgs.top_p != null) {
3333
+ baseArgs.top_p = void 0;
3334
+ warnings.push({
3335
+ type: "unsupported-setting",
3336
+ setting: "topP",
3337
+ details: "topP is not supported for reasoning models"
3338
+ });
3339
+ }
3340
+ } else {
3341
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
3342
+ warnings.push({
3343
+ type: "unsupported-setting",
3344
+ setting: "reasoningEffort",
3345
+ details: "reasoningEffort is not supported for non-reasoning models"
3346
+ });
3347
+ }
3348
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
3349
+ warnings.push({
3350
+ type: "unsupported-setting",
3351
+ setting: "reasoningSummary",
3352
+ details: "reasoningSummary is not supported for non-reasoning models"
3353
+ });
3354
+ }
3355
+ }
3356
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
3357
+ warnings.push({
3358
+ type: "unsupported-setting",
3359
+ setting: "serviceTier",
3360
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
3361
+ });
3362
+ delete baseArgs.service_tier;
3363
+ }
3364
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
3365
+ warnings.push({
3366
+ type: "unsupported-setting",
3367
+ setting: "serviceTier",
3368
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
3369
+ });
3370
+ delete baseArgs.service_tier;
3371
+ }
3372
+ const {
3373
+ tools: openaiTools,
3374
+ toolChoice: openaiToolChoice,
3375
+ toolWarnings
3376
+ } = await prepareResponsesTools({
3377
+ tools,
3378
+ toolChoice,
3379
+ strictJsonSchema
3380
+ });
3381
+ return {
3382
+ webSearchToolName,
3383
+ args: {
3384
+ ...baseArgs,
3385
+ tools: openaiTools,
3386
+ tool_choice: openaiToolChoice
3387
+ },
3388
+ warnings: [...warnings, ...toolWarnings],
3389
+ store
3390
+ };
3391
+ }
3392
+ async doGenerate(options) {
3393
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
3394
+ const {
3395
+ args: body,
3396
+ warnings,
3397
+ webSearchToolName
3398
+ } = await this.getArgs(options);
3399
+ const url = this.config.url({
3400
+ path: "/responses",
3401
+ modelId: this.modelId
3402
+ });
3403
+ const {
3404
+ responseHeaders,
3405
+ value: response,
3406
+ rawValue: rawResponse
3407
+ } = await chunkOWIEOL55_cjs.postJsonToApi({
3408
+ url,
3409
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), options.headers),
3410
+ body,
3411
+ failedResponseHandler: openaiFailedResponseHandler,
3412
+ successfulResponseHandler: chunkOWIEOL55_cjs.createJsonResponseHandler(
3413
+ openaiResponsesResponseSchema
3414
+ ),
3415
+ abortSignal: options.abortSignal,
3416
+ fetch: this.config.fetch
3417
+ });
3418
+ if (response.error) {
3419
+ throw new chunkIXZ2T2QX_cjs.APICallError({
3420
+ message: response.error.message,
3421
+ url,
3422
+ requestBodyValues: body,
3423
+ statusCode: 400,
3424
+ responseHeaders,
3425
+ responseBody: rawResponse,
3426
+ isRetryable: false
3427
+ });
3428
+ }
3429
+ const content = [];
3430
+ const logprobs = [];
3431
+ let hasFunctionCall = false;
3432
+ for (const part of response.output) {
3433
+ switch (part.type) {
3434
+ case "reasoning": {
3435
+ if (part.summary.length === 0) {
3436
+ part.summary.push({ type: "summary_text", text: "" });
3437
+ }
3438
+ for (const summary of part.summary) {
3439
+ content.push({
3440
+ type: "reasoning",
3441
+ text: summary.text,
3442
+ providerMetadata: {
3443
+ openai: {
3444
+ itemId: part.id,
3445
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
3446
+ }
3447
+ }
3448
+ });
3449
+ }
3450
+ break;
3451
+ }
3452
+ case "image_generation_call": {
3453
+ content.push({
3454
+ type: "tool-call",
3455
+ toolCallId: part.id,
3456
+ toolName: "image_generation",
3457
+ input: "{}",
3458
+ providerExecuted: true
3459
+ });
3460
+ content.push({
3461
+ type: "tool-result",
3462
+ toolCallId: part.id,
3463
+ toolName: "image_generation",
3464
+ result: {
3465
+ result: part.result
3466
+ },
3467
+ providerExecuted: true
3468
+ });
3469
+ break;
3470
+ }
3471
+ case "local_shell_call": {
3472
+ content.push({
3473
+ type: "tool-call",
3474
+ toolCallId: part.call_id,
3475
+ toolName: "local_shell",
3476
+ input: JSON.stringify({
3477
+ action: part.action
3478
+ }),
3479
+ providerMetadata: {
3480
+ openai: {
3481
+ itemId: part.id
3482
+ }
3483
+ }
3484
+ });
3485
+ break;
3486
+ }
3487
+ case "message": {
3488
+ for (const contentPart of part.content) {
3489
+ if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
3490
+ logprobs.push(contentPart.logprobs);
3491
+ }
3492
+ content.push({
3493
+ type: "text",
3494
+ text: contentPart.text,
3495
+ providerMetadata: {
3496
+ openai: {
3497
+ itemId: part.id
3498
+ }
3499
+ }
3500
+ });
3501
+ for (const annotation of contentPart.annotations) {
3502
+ if (annotation.type === "url_citation") {
3503
+ content.push({
3504
+ type: "source",
3505
+ sourceType: "url",
3506
+ id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : chunkOWIEOL55_cjs.generateId(),
3507
+ url: annotation.url,
3508
+ title: annotation.title
3509
+ });
3510
+ } else if (annotation.type === "file_citation") {
3511
+ content.push({
3512
+ type: "source",
3513
+ sourceType: "document",
3514
+ id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : chunkOWIEOL55_cjs.generateId(),
3515
+ mediaType: "text/plain",
3516
+ title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
3517
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id,
3518
+ ...annotation.file_id ? {
3519
+ providerMetadata: {
3520
+ openai: {
3521
+ fileId: annotation.file_id
3522
+ }
3523
+ }
3524
+ } : {}
3525
+ });
3526
+ }
3527
+ }
3528
+ }
3529
+ break;
3530
+ }
3531
+ case "function_call": {
3532
+ hasFunctionCall = true;
3533
+ content.push({
3534
+ type: "tool-call",
3535
+ toolCallId: part.call_id,
3536
+ toolName: part.name,
3537
+ input: part.arguments,
3538
+ providerMetadata: {
3539
+ openai: {
3540
+ itemId: part.id
3541
+ }
3542
+ }
3543
+ });
3544
+ break;
3545
+ }
3546
+ case "web_search_call": {
3547
+ content.push({
3548
+ type: "tool-call",
3549
+ toolCallId: part.id,
3550
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3551
+ input: JSON.stringify({}),
3552
+ providerExecuted: true
3553
+ });
3554
+ content.push({
3555
+ type: "tool-result",
3556
+ toolCallId: part.id,
3557
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3558
+ result: mapWebSearchOutput(part.action),
3559
+ providerExecuted: true
3560
+ });
3561
+ break;
3562
+ }
3563
+ case "computer_call": {
3564
+ content.push({
3565
+ type: "tool-call",
3566
+ toolCallId: part.id,
3567
+ toolName: "computer_use",
3568
+ input: "",
3569
+ providerExecuted: true
3570
+ });
3571
+ content.push({
3572
+ type: "tool-result",
3573
+ toolCallId: part.id,
3574
+ toolName: "computer_use",
3575
+ result: {
3576
+ type: "computer_use_tool_result",
3577
+ status: part.status || "completed"
3578
+ },
3579
+ providerExecuted: true
3580
+ });
3581
+ break;
3582
+ }
3583
+ case "file_search_call": {
3584
+ content.push({
3585
+ type: "tool-call",
3586
+ toolCallId: part.id,
3587
+ toolName: "file_search",
3588
+ input: "{}",
3589
+ providerExecuted: true
3590
+ });
3591
+ content.push({
3592
+ type: "tool-result",
3593
+ toolCallId: part.id,
3594
+ toolName: "file_search",
3595
+ result: {
3596
+ queries: part.queries,
3597
+ results: (_n = (_m = part.results) == null ? void 0 : _m.map((result) => ({
3598
+ attributes: result.attributes,
3599
+ fileId: result.file_id,
3600
+ filename: result.filename,
3601
+ score: result.score,
3602
+ text: result.text
3603
+ }))) != null ? _n : null
3604
+ },
3605
+ providerExecuted: true
3606
+ });
3607
+ break;
3608
+ }
3609
+ case "code_interpreter_call": {
3610
+ content.push({
3611
+ type: "tool-call",
3612
+ toolCallId: part.id,
3613
+ toolName: "code_interpreter",
3614
+ input: JSON.stringify({
3615
+ code: part.code,
3616
+ containerId: part.container_id
3617
+ }),
3618
+ providerExecuted: true
3619
+ });
3620
+ content.push({
3621
+ type: "tool-result",
3622
+ toolCallId: part.id,
3623
+ toolName: "code_interpreter",
3624
+ result: {
3625
+ outputs: part.outputs
3626
+ },
3627
+ providerExecuted: true
3628
+ });
3629
+ break;
3630
+ }
3631
+ }
3632
+ }
3633
+ const providerMetadata = {
3634
+ openai: {
3635
+ ...response.id != null ? { responseId: response.id } : {}
3636
+ }
3637
+ };
3638
+ if (logprobs.length > 0) {
3639
+ providerMetadata.openai.logprobs = logprobs;
3640
+ }
3641
+ if (typeof response.service_tier === "string") {
3642
+ providerMetadata.openai.serviceTier = response.service_tier;
3643
+ }
3644
+ const usage = response.usage;
3645
+ return {
3646
+ content,
3647
+ finishReason: mapOpenAIResponseFinishReason({
3648
+ finishReason: (_o = response.incomplete_details) == null ? void 0 : _o.reason,
3649
+ hasFunctionCall
3650
+ }),
3651
+ usage: {
3652
+ inputTokens: usage.input_tokens,
3653
+ outputTokens: usage.output_tokens,
3654
+ totalTokens: usage.input_tokens + usage.output_tokens,
3655
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3656
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3657
+ },
3658
+ request: { body },
3659
+ response: {
3660
+ id: response.id,
3661
+ timestamp: new Date(response.created_at * 1e3),
3662
+ modelId: response.model,
3663
+ headers: responseHeaders,
3664
+ body: rawResponse
3665
+ },
3666
+ providerMetadata,
3667
+ warnings
3668
+ };
3669
+ }
3670
+ async doStream(options) {
3671
+ const {
3672
+ args: body,
3673
+ warnings,
3674
+ webSearchToolName,
3675
+ store
3676
+ } = await this.getArgs(options);
3677
+ const { responseHeaders, value: response } = await chunkOWIEOL55_cjs.postJsonToApi({
3678
+ url: this.config.url({
3679
+ path: "/responses",
3680
+ modelId: this.modelId
3681
+ }),
3682
+ headers: chunkOWIEOL55_cjs.combineHeaders(this.config.headers(), options.headers),
3683
+ body: {
3684
+ ...body,
3685
+ stream: true
3686
+ },
3687
+ failedResponseHandler: openaiFailedResponseHandler,
3688
+ successfulResponseHandler: chunkOWIEOL55_cjs.createEventSourceResponseHandler(
3689
+ openaiResponsesChunkSchema
3690
+ ),
3691
+ abortSignal: options.abortSignal,
3692
+ fetch: this.config.fetch
3693
+ });
3694
+ const self = this;
3695
+ let finishReason = "unknown";
3696
+ const usage = {
3697
+ inputTokens: void 0,
3698
+ outputTokens: void 0,
3699
+ totalTokens: void 0
3700
+ };
3701
+ const logprobs = [];
3702
+ let responseId = null;
3703
+ const ongoingToolCalls = {};
3704
+ const ongoingAnnotations = [];
3705
+ let hasFunctionCall = false;
3706
+ const activeReasoning = {};
3707
+ let serviceTier;
3708
+ return {
3709
+ stream: response.pipeThrough(
3710
+ new TransformStream({
3711
+ start(controller) {
3712
+ controller.enqueue({ type: "stream-start", warnings });
3713
+ },
3714
+ transform(chunk, controller) {
3715
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
3716
+ if (options.includeRawChunks) {
3717
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3718
+ }
3719
+ if (!chunk.success) {
3720
+ finishReason = "error";
3721
+ controller.enqueue({ type: "error", error: chunk.error });
3722
+ return;
3723
+ }
3724
+ const value = chunk.value;
3725
+ if (isResponseOutputItemAddedChunk(value)) {
3726
+ if (value.item.type === "function_call") {
3727
+ ongoingToolCalls[value.output_index] = {
3728
+ toolName: value.item.name,
3729
+ toolCallId: value.item.call_id
3730
+ };
3731
+ controller.enqueue({
3732
+ type: "tool-input-start",
3733
+ id: value.item.call_id,
3734
+ toolName: value.item.name
3735
+ });
3736
+ } else if (value.item.type === "web_search_call") {
3737
+ ongoingToolCalls[value.output_index] = {
3738
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3739
+ toolCallId: value.item.id
3740
+ };
3741
+ controller.enqueue({
3742
+ type: "tool-input-start",
3743
+ id: value.item.id,
3744
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3745
+ providerExecuted: true
3746
+ });
3747
+ controller.enqueue({
3748
+ type: "tool-input-end",
3749
+ id: value.item.id
3750
+ });
3751
+ controller.enqueue({
3752
+ type: "tool-call",
3753
+ toolCallId: value.item.id,
3754
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3755
+ input: JSON.stringify({}),
3756
+ providerExecuted: true
3757
+ });
3758
+ } else if (value.item.type === "computer_call") {
3759
+ ongoingToolCalls[value.output_index] = {
3760
+ toolName: "computer_use",
3761
+ toolCallId: value.item.id
3762
+ };
3763
+ controller.enqueue({
3764
+ type: "tool-input-start",
3765
+ id: value.item.id,
3766
+ toolName: "computer_use",
3767
+ providerExecuted: true
3768
+ });
3769
+ } else if (value.item.type === "code_interpreter_call") {
3770
+ ongoingToolCalls[value.output_index] = {
3771
+ toolName: "code_interpreter",
3772
+ toolCallId: value.item.id,
3773
+ codeInterpreter: {
3774
+ containerId: value.item.container_id
3775
+ }
3776
+ };
3777
+ controller.enqueue({
3778
+ type: "tool-input-start",
3779
+ id: value.item.id,
3780
+ toolName: "code_interpreter",
3781
+ providerExecuted: true
3782
+ });
3783
+ controller.enqueue({
3784
+ type: "tool-input-delta",
3785
+ id: value.item.id,
3786
+ delta: `{"containerId":"${value.item.container_id}","code":"`
3787
+ });
3788
+ } else if (value.item.type === "file_search_call") {
3789
+ controller.enqueue({
3790
+ type: "tool-call",
3791
+ toolCallId: value.item.id,
3792
+ toolName: "file_search",
3793
+ input: "{}",
3794
+ providerExecuted: true
3795
+ });
3796
+ } else if (value.item.type === "image_generation_call") {
3797
+ controller.enqueue({
3798
+ type: "tool-call",
3799
+ toolCallId: value.item.id,
3800
+ toolName: "image_generation",
3801
+ input: "{}",
3802
+ providerExecuted: true
3803
+ });
3804
+ } else if (value.item.type === "message") {
3805
+ ongoingAnnotations.splice(0, ongoingAnnotations.length);
3806
+ controller.enqueue({
3807
+ type: "text-start",
3808
+ id: value.item.id,
3809
+ providerMetadata: {
3810
+ openai: {
3811
+ itemId: value.item.id
3812
+ }
3813
+ }
3814
+ });
3815
+ } else if (isResponseOutputItemAddedChunk(value) && value.item.type === "reasoning") {
3816
+ activeReasoning[value.item.id] = {
3817
+ encryptedContent: value.item.encrypted_content,
3818
+ summaryParts: { 0: "active" }
3819
+ };
3820
+ controller.enqueue({
3821
+ type: "reasoning-start",
3822
+ id: `${value.item.id}:0`,
3823
+ providerMetadata: {
3824
+ openai: {
3825
+ itemId: value.item.id,
3826
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
3827
+ }
3828
+ }
3829
+ });
3830
+ }
3831
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type !== "message") {
3832
+ if (value.item.type === "function_call") {
3833
+ ongoingToolCalls[value.output_index] = void 0;
3834
+ hasFunctionCall = true;
3835
+ controller.enqueue({
3836
+ type: "tool-input-end",
3837
+ id: value.item.call_id
3838
+ });
3839
+ controller.enqueue({
3840
+ type: "tool-call",
3841
+ toolCallId: value.item.call_id,
3842
+ toolName: value.item.name,
3843
+ input: value.item.arguments,
3844
+ providerMetadata: {
3845
+ openai: {
3846
+ itemId: value.item.id
3847
+ }
3848
+ }
3849
+ });
3850
+ } else if (value.item.type === "web_search_call") {
3851
+ ongoingToolCalls[value.output_index] = void 0;
3852
+ controller.enqueue({
3853
+ type: "tool-result",
3854
+ toolCallId: value.item.id,
3855
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3856
+ result: mapWebSearchOutput(value.item.action),
3857
+ providerExecuted: true
3858
+ });
3859
+ } else if (value.item.type === "computer_call") {
3860
+ ongoingToolCalls[value.output_index] = void 0;
3861
+ controller.enqueue({
3862
+ type: "tool-input-end",
3863
+ id: value.item.id
3864
+ });
3865
+ controller.enqueue({
3866
+ type: "tool-call",
3867
+ toolCallId: value.item.id,
3868
+ toolName: "computer_use",
3869
+ input: "",
3870
+ providerExecuted: true
3871
+ });
3872
+ controller.enqueue({
3873
+ type: "tool-result",
3874
+ toolCallId: value.item.id,
3875
+ toolName: "computer_use",
3876
+ result: {
3877
+ type: "computer_use_tool_result",
3878
+ status: value.item.status || "completed"
3879
+ },
3880
+ providerExecuted: true
3881
+ });
3882
+ } else if (value.item.type === "file_search_call") {
3883
+ ongoingToolCalls[value.output_index] = void 0;
3884
+ controller.enqueue({
3885
+ type: "tool-result",
3886
+ toolCallId: value.item.id,
3887
+ toolName: "file_search",
3888
+ result: {
3889
+ queries: value.item.queries,
3890
+ results: (_c = (_b = value.item.results) == null ? void 0 : _b.map((result) => ({
3891
+ attributes: result.attributes,
3892
+ fileId: result.file_id,
3893
+ filename: result.filename,
3894
+ score: result.score,
3895
+ text: result.text
3896
+ }))) != null ? _c : null
3897
+ },
3898
+ providerExecuted: true
3899
+ });
3900
+ } else if (value.item.type === "code_interpreter_call") {
3901
+ ongoingToolCalls[value.output_index] = void 0;
3902
+ controller.enqueue({
3903
+ type: "tool-result",
3904
+ toolCallId: value.item.id,
3905
+ toolName: "code_interpreter",
3906
+ result: {
3907
+ outputs: value.item.outputs
3908
+ },
3909
+ providerExecuted: true
3910
+ });
3911
+ } else if (value.item.type === "image_generation_call") {
3912
+ controller.enqueue({
3913
+ type: "tool-result",
3914
+ toolCallId: value.item.id,
3915
+ toolName: "image_generation",
3916
+ result: {
3917
+ result: value.item.result
3918
+ },
3919
+ providerExecuted: true
3920
+ });
3921
+ } else if (value.item.type === "local_shell_call") {
3922
+ ongoingToolCalls[value.output_index] = void 0;
3923
+ controller.enqueue({
3924
+ type: "tool-call",
3925
+ toolCallId: value.item.call_id,
3926
+ toolName: "local_shell",
3927
+ input: JSON.stringify({
3928
+ action: {
3929
+ type: "exec",
3930
+ command: value.item.action.command,
3931
+ timeoutMs: value.item.action.timeout_ms,
3932
+ user: value.item.action.user,
3933
+ workingDirectory: value.item.action.working_directory,
3934
+ env: value.item.action.env
3935
+ }
3936
+ }),
3937
+ providerMetadata: {
3938
+ openai: { itemId: value.item.id }
3939
+ }
3940
+ });
3941
+ } else if (value.item.type === "reasoning") {
3942
+ const activeReasoningPart = activeReasoning[value.item.id];
3943
+ const summaryPartIndices = Object.entries(
3944
+ activeReasoningPart.summaryParts
3945
+ ).filter(
3946
+ ([_, status]) => status === "active" || status === "can-conclude"
3947
+ ).map(([summaryIndex]) => summaryIndex);
3948
+ for (const summaryIndex of summaryPartIndices) {
3949
+ controller.enqueue({
3950
+ type: "reasoning-end",
3951
+ id: `${value.item.id}:${summaryIndex}`,
3952
+ providerMetadata: {
3953
+ openai: {
3954
+ itemId: value.item.id,
3955
+ reasoningEncryptedContent: (_d = value.item.encrypted_content) != null ? _d : null
3956
+ }
3957
+ }
3958
+ });
3959
+ }
3960
+ delete activeReasoning[value.item.id];
3961
+ }
3962
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
3963
+ const toolCall = ongoingToolCalls[value.output_index];
3964
+ if (toolCall != null) {
3965
+ controller.enqueue({
3966
+ type: "tool-input-delta",
3967
+ id: toolCall.toolCallId,
3968
+ delta: value.delta
3969
+ });
3970
+ }
3971
+ } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
3972
+ const toolCall = ongoingToolCalls[value.output_index];
3973
+ if (toolCall != null) {
3974
+ controller.enqueue({
3975
+ type: "tool-input-delta",
3976
+ id: toolCall.toolCallId,
3977
+ // The delta is code, which is embedding in a JSON string.
3978
+ // To escape it, we use JSON.stringify and slice to remove the outer quotes.
3979
+ delta: JSON.stringify(value.delta).slice(1, -1)
3980
+ });
3981
+ }
3982
+ } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
3983
+ const toolCall = ongoingToolCalls[value.output_index];
3984
+ if (toolCall != null) {
3985
+ controller.enqueue({
3986
+ type: "tool-input-delta",
3987
+ id: toolCall.toolCallId,
3988
+ delta: '"}'
3989
+ });
3990
+ controller.enqueue({
3991
+ type: "tool-input-end",
3992
+ id: toolCall.toolCallId
3993
+ });
3994
+ controller.enqueue({
3995
+ type: "tool-call",
3996
+ toolCallId: toolCall.toolCallId,
3997
+ toolName: "code_interpreter",
3998
+ input: JSON.stringify({
3999
+ code: value.code,
4000
+ containerId: toolCall.codeInterpreter.containerId
4001
+ }),
4002
+ providerExecuted: true
4003
+ });
4004
+ }
4005
+ } else if (isResponseCreatedChunk(value)) {
4006
+ responseId = value.response.id;
4007
+ controller.enqueue({
4008
+ type: "response-metadata",
4009
+ id: value.response.id,
4010
+ timestamp: new Date(value.response.created_at * 1e3),
4011
+ modelId: value.response.model
4012
+ });
4013
+ } else if (isTextDeltaChunk(value)) {
4014
+ controller.enqueue({
4015
+ type: "text-delta",
4016
+ id: value.item_id,
4017
+ delta: value.delta
4018
+ });
4019
+ if (((_f = (_e = options.providerOptions) == null ? void 0 : _e.openai) == null ? void 0 : _f.logprobs) && value.logprobs) {
4020
+ logprobs.push(value.logprobs);
4021
+ }
4022
+ } else if (value.type === "response.reasoning_summary_part.added") {
4023
+ if (value.summary_index > 0) {
4024
+ const activeReasoningPart = activeReasoning[value.item_id];
4025
+ activeReasoningPart.summaryParts[value.summary_index] = "active";
4026
+ for (const summaryIndex of Object.keys(
4027
+ activeReasoningPart.summaryParts
4028
+ )) {
4029
+ if (activeReasoningPart.summaryParts[summaryIndex] === "can-conclude") {
4030
+ controller.enqueue({
4031
+ type: "reasoning-end",
4032
+ id: `${value.item_id}:${summaryIndex}`,
4033
+ providerMetadata: { openai: { itemId: value.item_id } }
4034
+ });
4035
+ activeReasoningPart.summaryParts[summaryIndex] = "concluded";
4036
+ }
4037
+ }
4038
+ controller.enqueue({
4039
+ type: "reasoning-start",
4040
+ id: `${value.item_id}:${value.summary_index}`,
4041
+ providerMetadata: {
4042
+ openai: {
4043
+ itemId: value.item_id,
4044
+ reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
4045
+ }
4046
+ }
4047
+ });
4048
+ }
4049
+ } else if (value.type === "response.reasoning_summary_text.delta") {
4050
+ controller.enqueue({
4051
+ type: "reasoning-delta",
4052
+ id: `${value.item_id}:${value.summary_index}`,
4053
+ delta: value.delta,
4054
+ providerMetadata: {
4055
+ openai: {
4056
+ itemId: value.item_id
4057
+ }
4058
+ }
4059
+ });
4060
+ } else if (value.type === "response.reasoning_summary_part.done") {
4061
+ if (store) {
4062
+ controller.enqueue({
4063
+ type: "reasoning-end",
4064
+ id: `${value.item_id}:${value.summary_index}`,
4065
+ providerMetadata: {
4066
+ openai: { itemId: value.item_id }
4067
+ }
4068
+ });
4069
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "concluded";
4070
+ } else {
4071
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
4072
+ }
4073
+ } else if (isResponseFinishedChunk(value)) {
4074
+ finishReason = mapOpenAIResponseFinishReason({
4075
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
4076
+ hasFunctionCall
4077
+ });
4078
+ usage.inputTokens = value.response.usage.input_tokens;
4079
+ usage.outputTokens = value.response.usage.output_tokens;
4080
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
4081
+ usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
4082
+ usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
4083
+ if (typeof value.response.service_tier === "string") {
4084
+ serviceTier = value.response.service_tier;
4085
+ }
4086
+ } else if (isResponseAnnotationAddedChunk(value)) {
4087
+ ongoingAnnotations.push(value.annotation);
4088
+ if (value.annotation.type === "url_citation") {
4089
+ controller.enqueue({
4090
+ type: "source",
4091
+ sourceType: "url",
4092
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : chunkOWIEOL55_cjs.generateId(),
4093
+ url: value.annotation.url,
4094
+ title: value.annotation.title
4095
+ });
4096
+ } else if (value.annotation.type === "file_citation") {
4097
+ controller.enqueue({
4098
+ type: "source",
4099
+ sourceType: "document",
4100
+ id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : chunkOWIEOL55_cjs.generateId(),
4101
+ mediaType: "text/plain",
4102
+ title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
4103
+ filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id,
4104
+ ...value.annotation.file_id ? {
4105
+ providerMetadata: {
4106
+ openai: {
4107
+ fileId: value.annotation.file_id
4108
+ }
4109
+ }
4110
+ } : {}
4111
+ });
4112
+ }
4113
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "message") {
4114
+ controller.enqueue({
4115
+ type: "text-end",
4116
+ id: value.item.id,
4117
+ providerMetadata: {
4118
+ openai: {
4119
+ itemId: value.item.id,
4120
+ ...ongoingAnnotations.length > 0 && {
4121
+ annotations: ongoingAnnotations
4122
+ }
4123
+ }
4124
+ }
4125
+ });
4126
+ } else if (isErrorChunk(value)) {
4127
+ controller.enqueue({ type: "error", error: value });
4128
+ }
4129
+ },
4130
+ flush(controller) {
4131
+ const providerMetadata = {
4132
+ openai: {
4133
+ responseId
4134
+ }
4135
+ };
4136
+ if (logprobs.length > 0) {
4137
+ providerMetadata.openai.logprobs = logprobs;
4138
+ }
4139
+ if (serviceTier !== void 0) {
4140
+ providerMetadata.openai.serviceTier = serviceTier;
4141
+ }
4142
+ controller.enqueue({
4143
+ type: "finish",
4144
+ finishReason,
4145
+ usage,
4146
+ providerMetadata
4147
+ });
4148
+ }
4149
+ })
4150
+ ),
4151
+ request: { body },
4152
+ response: { headers: responseHeaders }
4153
+ };
4154
+ }
4155
+ };
4156
+ function isTextDeltaChunk(chunk) {
4157
+ return chunk.type === "response.output_text.delta";
4158
+ }
4159
+ function isResponseOutputItemDoneChunk(chunk) {
4160
+ return chunk.type === "response.output_item.done";
4161
+ }
4162
+ function isResponseFinishedChunk(chunk) {
4163
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
4164
+ }
4165
+ function isResponseCreatedChunk(chunk) {
4166
+ return chunk.type === "response.created";
4167
+ }
4168
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
4169
+ return chunk.type === "response.function_call_arguments.delta";
4170
+ }
4171
+ function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
4172
+ return chunk.type === "response.code_interpreter_call_code.delta";
4173
+ }
4174
+ function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
4175
+ return chunk.type === "response.code_interpreter_call_code.done";
4176
+ }
4177
+ function isResponseOutputItemAddedChunk(chunk) {
4178
+ return chunk.type === "response.output_item.added";
4179
+ }
4180
+ function isResponseAnnotationAddedChunk(chunk) {
4181
+ return chunk.type === "response.output_text.annotation.added";
4182
+ }
4183
+ function isErrorChunk(chunk) {
4184
+ return chunk.type === "error";
4185
+ }
4186
+ function getResponsesModelConfig(modelId) {
4187
+ const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
4188
+ const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
4189
+ const defaults = {
4190
+ systemMessageMode: "system",
4191
+ supportsFlexProcessing: supportsFlexProcessing2,
4192
+ supportsPriorityProcessing: supportsPriorityProcessing2
4193
+ };
4194
+ if (modelId.startsWith("gpt-5-chat")) {
4195
+ return {
4196
+ ...defaults,
4197
+ isReasoningModel: false
4198
+ };
4199
+ }
4200
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
4201
+ return {
4202
+ ...defaults,
4203
+ isReasoningModel: true,
4204
+ systemMessageMode: "developer"
4205
+ };
4206
+ }
4207
+ return {
4208
+ ...defaults,
4209
+ isReasoningModel: false
4210
+ };
4211
+ }
4212
+ function mapWebSearchOutput(action) {
4213
+ var _a;
4214
+ switch (action.type) {
4215
+ case "search":
4216
+ return {
4217
+ action: { type: "search", query: (_a = action.query) != null ? _a : void 0 },
4218
+ // include sources when provided by the Responses API (behind include flag)
4219
+ ...action.sources != null && { sources: action.sources }
4220
+ };
4221
+ case "open_page":
4222
+ return { action: { type: "openPage", url: action.url } };
4223
+ case "find":
4224
+ return {
4225
+ action: { type: "find", url: action.url, pattern: action.pattern }
4226
+ };
4227
+ }
4228
+ }
4229
+
4230
+ // ../../node_modules/.pnpm/@ai-sdk+azure@2.0.74_zod@3.25.76/node_modules/@ai-sdk/azure/dist/index.mjs
4231
+ var azureOpenaiTools = {
4232
+ codeInterpreter,
4233
+ fileSearch,
4234
+ imageGeneration,
4235
+ webSearchPreview
4236
+ };
4237
+ var VERSION = "2.0.74" ;
4238
+ function createAzure(options = {}) {
4239
+ var _a;
4240
+ const getHeaders = () => {
4241
+ const baseHeaders = {
4242
+ "api-key": chunkOWIEOL55_cjs.loadApiKey({
4243
+ apiKey: options.apiKey,
4244
+ environmentVariableName: "AZURE_API_KEY",
4245
+ description: "Azure OpenAI"
4246
+ }),
4247
+ ...options.headers
4248
+ };
4249
+ return chunkOWIEOL55_cjs.withUserAgentSuffix(baseHeaders, `ai-sdk/azure/${VERSION}`);
4250
+ };
4251
+ const getResourceName = () => chunkOWIEOL55_cjs.loadSetting({
4252
+ settingValue: options.resourceName,
4253
+ settingName: "resourceName",
4254
+ environmentVariableName: "AZURE_RESOURCE_NAME",
4255
+ description: "Azure OpenAI resource name"
4256
+ });
4257
+ const apiVersion = (_a = options.apiVersion) != null ? _a : "v1";
4258
+ const url = ({ path, modelId }) => {
4259
+ var _a2;
4260
+ const baseUrlPrefix = (_a2 = options.baseURL) != null ? _a2 : `https://${getResourceName()}.openai.azure.com/openai`;
4261
+ let fullUrl;
4262
+ if (options.useDeploymentBasedUrls) {
4263
+ fullUrl = new URL(`${baseUrlPrefix}/deployments/${modelId}${path}`);
4264
+ } else {
4265
+ fullUrl = new URL(`${baseUrlPrefix}/v1${path}`);
4266
+ }
4267
+ fullUrl.searchParams.set("api-version", apiVersion);
4268
+ return fullUrl.toString();
4269
+ };
4270
+ const createChatModel = (deploymentName) => new OpenAIChatLanguageModel(deploymentName, {
4271
+ provider: "azure.chat",
4272
+ url,
4273
+ headers: getHeaders,
4274
+ fetch: options.fetch
4275
+ });
4276
+ const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
4277
+ provider: "azure.completion",
4278
+ url,
4279
+ headers: getHeaders,
4280
+ fetch: options.fetch
4281
+ });
4282
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
4283
+ provider: "azure.embeddings",
4284
+ headers: getHeaders,
4285
+ url,
4286
+ fetch: options.fetch
4287
+ });
4288
+ const createResponsesModel = (modelId) => new OpenAIResponsesLanguageModel(modelId, {
4289
+ provider: "azure.responses",
4290
+ url,
4291
+ headers: getHeaders,
4292
+ fetch: options.fetch,
4293
+ fileIdPrefixes: ["assistant-"]
4294
+ });
4295
+ const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
4296
+ provider: "azure.image",
4297
+ url,
4298
+ headers: getHeaders,
4299
+ fetch: options.fetch
4300
+ });
4301
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
4302
+ provider: "azure.transcription",
4303
+ url,
4304
+ headers: getHeaders,
4305
+ fetch: options.fetch
4306
+ });
4307
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
4308
+ provider: "azure.speech",
4309
+ url,
4310
+ headers: getHeaders,
4311
+ fetch: options.fetch
4312
+ });
4313
+ const provider = function(deploymentId) {
4314
+ if (new.target) {
4315
+ throw new Error(
4316
+ "The Azure OpenAI model function cannot be called with the new keyword."
4317
+ );
4318
+ }
4319
+ return createChatModel(deploymentId);
4320
+ };
4321
+ provider.languageModel = createChatModel;
4322
+ provider.chat = createChatModel;
4323
+ provider.completion = createCompletionModel;
4324
+ provider.embedding = createEmbeddingModel;
4325
+ provider.image = createImageModel;
4326
+ provider.imageModel = createImageModel;
4327
+ provider.textEmbedding = createEmbeddingModel;
4328
+ provider.textEmbeddingModel = createEmbeddingModel;
4329
+ provider.responses = createResponsesModel;
4330
+ provider.transcription = createTranscriptionModel;
4331
+ provider.speech = createSpeechModel;
4332
+ provider.tools = azureOpenaiTools;
4333
+ return provider;
4334
+ }
4335
+ createAzure();
4336
+
4337
+ // src/llm/model/gateways/azure.ts
4338
+ var AzureOpenAIGateway = class extends chunkOWIEOL55_cjs.MastraModelGateway {
4339
+ constructor(config) {
4340
+ super();
4341
+ this.config = config;
4342
+ this.validateConfig();
4343
+ }
4344
+ id = "azure-openai";
4345
+ name = "azure-openai";
4346
+ tokenCache = new chunkUIGRFDO6_cjs.InMemoryServerCache();
4347
+ validateConfig() {
4348
+ if (!this.config.resourceName) {
4349
+ throw new chunkHWMMIRIF_cjs.MastraError({
4350
+ id: "AZURE_GATEWAY_INVALID_CONFIG",
4351
+ domain: "LLM",
4352
+ category: "UNKNOWN",
4353
+ text: "resourceName is required for Azure OpenAI gateway"
4354
+ });
4355
+ }
4356
+ if (!this.config.apiKey) {
4357
+ throw new chunkHWMMIRIF_cjs.MastraError({
4358
+ id: "AZURE_GATEWAY_INVALID_CONFIG",
4359
+ domain: "LLM",
4360
+ category: "UNKNOWN",
4361
+ text: "apiKey is required for Azure OpenAI gateway"
4362
+ });
4363
+ }
4364
+ const hasDeployments = this.config.deployments && this.config.deployments.length > 0;
4365
+ const hasManagement = this.config.management !== void 0;
4366
+ if (hasDeployments && hasManagement) {
4367
+ console.warn(
4368
+ "[AzureOpenAIGateway] Both deployments and management credentials provided. Using static deployments list and ignoring management API."
4369
+ );
4370
+ }
4371
+ if (hasManagement) {
4372
+ this.getManagementCredentials(this.config.management);
4373
+ }
4374
+ }
4375
+ async fetchProviders() {
4376
+ if (this.config.deployments && this.config.deployments.length > 0) {
4377
+ return {
4378
+ "azure-openai": {
4379
+ apiKeyEnvVar: [],
4380
+ apiKeyHeader: "api-key",
4381
+ name: "Azure OpenAI",
4382
+ models: this.config.deployments,
4383
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4384
+ gateway: "azure-openai"
4385
+ }
4386
+ };
4387
+ }
4388
+ if (!this.config.management) {
4389
+ return {
4390
+ "azure-openai": {
4391
+ apiKeyEnvVar: [],
4392
+ apiKeyHeader: "api-key",
4393
+ name: "Azure OpenAI",
4394
+ models: [],
4395
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4396
+ gateway: "azure-openai"
4397
+ }
4398
+ };
4399
+ }
4400
+ try {
4401
+ const credentials = this.getManagementCredentials(this.config.management);
4402
+ const token = await this.getAzureADToken({
4403
+ tenantId: credentials.tenantId,
4404
+ clientId: credentials.clientId,
4405
+ clientSecret: credentials.clientSecret
4406
+ });
4407
+ const deployments = await this.fetchDeployments(token, {
4408
+ subscriptionId: credentials.subscriptionId,
4409
+ resourceGroup: credentials.resourceGroup,
4410
+ resourceName: this.config.resourceName
4411
+ });
4412
+ return {
4413
+ "azure-openai": {
4414
+ apiKeyEnvVar: [],
4415
+ apiKeyHeader: "api-key",
4416
+ name: "Azure OpenAI",
4417
+ models: deployments.map((d) => d.name),
4418
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4419
+ gateway: "azure-openai"
4420
+ }
4421
+ };
4422
+ } catch (error) {
4423
+ const errorMsg = error instanceof Error ? error.message : String(error);
4424
+ console.warn(
4425
+ `[AzureOpenAIGateway] Deployment discovery failed: ${errorMsg}`,
4426
+ "\nReturning fallback configuration. Azure OpenAI can still be used by manually specifying deployment names."
4427
+ );
4428
+ return {
4429
+ "azure-openai": {
4430
+ apiKeyEnvVar: [],
4431
+ apiKeyHeader: "api-key",
4432
+ name: "Azure OpenAI",
4433
+ models: [],
4434
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4435
+ gateway: "azure-openai"
4436
+ }
4437
+ };
4438
+ }
4439
+ }
4440
+ getManagementCredentials(management) {
4441
+ const { tenantId, clientId, clientSecret, subscriptionId, resourceGroup } = management;
4442
+ const missing = [];
4443
+ if (!tenantId) missing.push("tenantId");
4444
+ if (!clientId) missing.push("clientId");
4445
+ if (!clientSecret) missing.push("clientSecret");
4446
+ if (!subscriptionId) missing.push("subscriptionId");
4447
+ if (!resourceGroup) missing.push("resourceGroup");
4448
+ if (missing.length > 0) {
4449
+ throw new chunkHWMMIRIF_cjs.MastraError({
4450
+ id: "AZURE_MANAGEMENT_CREDENTIALS_MISSING",
4451
+ domain: "LLM",
4452
+ category: "UNKNOWN",
4453
+ text: `Management credentials incomplete. Missing: ${missing.join(", ")}. Required fields: tenantId, clientId, clientSecret, subscriptionId, resourceGroup.`
4454
+ });
4455
+ }
4456
+ return {
4457
+ tenantId,
4458
+ clientId,
4459
+ clientSecret,
4460
+ subscriptionId,
4461
+ resourceGroup
4462
+ };
4463
+ }
4464
+ async getAzureADToken(credentials) {
4465
+ const { tenantId, clientId, clientSecret } = credentials;
4466
+ const cacheKey = `azure-mgmt-token:${tenantId}:${clientId}`;
4467
+ const cached = await this.tokenCache.get(cacheKey);
4468
+ if (cached && cached.expiresAt > Date.now() / 1e3 + 60) {
4469
+ return cached.token;
4470
+ }
4471
+ const tokenEndpoint = `https://login.microsoftonline.com/${tenantId}/oauth2/v2.0/token`;
4472
+ const body = new URLSearchParams({
4473
+ grant_type: "client_credentials",
4474
+ client_id: clientId,
4475
+ client_secret: clientSecret,
4476
+ scope: "https://management.azure.com/.default"
4477
+ });
4478
+ const response = await fetch(tokenEndpoint, {
4479
+ method: "POST",
4480
+ headers: {
4481
+ "Content-Type": "application/x-www-form-urlencoded"
4482
+ },
4483
+ body: body.toString()
4484
+ });
4485
+ if (!response.ok) {
4486
+ const error = await response.text();
4487
+ throw new chunkHWMMIRIF_cjs.MastraError({
4488
+ id: "AZURE_AD_TOKEN_ERROR",
4489
+ domain: "LLM",
4490
+ category: "UNKNOWN",
4491
+ text: `Failed to get Azure AD token: ${response.status} ${error}`
4492
+ });
4493
+ }
4494
+ const tokenResponse = await response.json();
4495
+ const expiresAt = Math.floor(Date.now() / 1e3) + tokenResponse.expires_in;
4496
+ await this.tokenCache.set(cacheKey, {
4497
+ token: tokenResponse.access_token,
4498
+ expiresAt
4499
+ });
4500
+ return tokenResponse.access_token;
4501
+ }
4502
+ async fetchDeployments(token, credentials) {
4503
+ const { subscriptionId, resourceGroup, resourceName } = credentials;
4504
+ let url = `https://management.azure.com/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.CognitiveServices/accounts/${resourceName}/deployments?api-version=2024-10-01`;
4505
+ const allDeployments = [];
4506
+ while (url) {
4507
+ const response = await fetch(url, {
4508
+ headers: {
4509
+ Authorization: `Bearer ${token}`,
4510
+ "Content-Type": "application/json"
4511
+ }
4512
+ });
4513
+ if (!response.ok) {
4514
+ const error = await response.text();
4515
+ throw new chunkHWMMIRIF_cjs.MastraError({
4516
+ id: "AZURE_DEPLOYMENTS_FETCH_ERROR",
4517
+ domain: "LLM",
4518
+ category: "UNKNOWN",
4519
+ text: `Failed to fetch Azure deployments: ${response.status} ${error}`
4520
+ });
4521
+ }
4522
+ const data = await response.json();
4523
+ allDeployments.push(...data.value);
4524
+ url = data.nextLink;
4525
+ }
4526
+ const successfulDeployments = allDeployments.filter((d) => d.properties.provisioningState === "Succeeded");
4527
+ return successfulDeployments;
4528
+ }
4529
+ buildUrl(_routerId, _envVars) {
4530
+ return void 0;
4531
+ }
4532
+ async getApiKey(_modelId) {
4533
+ return this.config.apiKey;
4534
+ }
4535
+ async resolveLanguageModel({
4536
+ modelId,
4537
+ apiKey
4538
+ }) {
4539
+ const apiVersion = this.config.apiVersion || "2024-04-01-preview";
4540
+ return createAzure({
4541
+ resourceName: this.config.resourceName,
4542
+ apiKey,
4543
+ apiVersion,
4544
+ useDeploymentBasedUrls: true
4545
+ })(modelId);
4546
+ }
4547
+ };
4548
+
4549
+ // src/llm/model/gateways/index.ts
4550
+ function findGatewayForModel(gatewayId, gateways) {
4551
+ const prefixedGateway = gateways.find(
4552
+ (g) => g.id !== "models.dev" && (g.id === gatewayId || gatewayId.startsWith(`${g.id}/`))
4553
+ );
4554
+ if (prefixedGateway) {
4555
+ return prefixedGateway;
4556
+ }
4557
+ const modelsDevGateway = gateways.find((g) => g.id === "models.dev");
4558
+ if (modelsDevGateway) {
4559
+ return modelsDevGateway;
4560
+ }
4561
+ throw new chunkHWMMIRIF_cjs.MastraError({
4562
+ id: "MODEL_ROUTER_NO_GATEWAY_FOUND",
4563
+ category: "USER",
4564
+ domain: "MODEL_ROUTER",
4565
+ text: `No Mastra model router gateway found for model id ${gatewayId}`
4566
+ });
4567
+ }
4568
+
4569
+ // src/llm/model/router.ts
4570
+ function getStaticProvidersByGateway(name) {
4571
+ return Object.fromEntries(Object.entries(chunkAGHLXC4I_cjs.PROVIDER_REGISTRY).filter(([_provider, config]) => config.gateway === name));
4572
+ }
4573
+ var defaultGateways = [new chunk7HEAVZRS_cjs.NetlifyGateway(), new chunkY7MZ5LJT_cjs.ModelsDevGateway(getStaticProvidersByGateway(`models.dev`))];
4574
+ var ModelRouterLanguageModel = class _ModelRouterLanguageModel {
4575
+ specificationVersion = "v2";
4576
+ defaultObjectGenerationMode = "json";
4577
+ supportsStructuredOutputs = true;
4578
+ supportsImageUrls = true;
4579
+ supportedUrls = {};
4580
+ modelId;
4581
+ provider;
4582
+ config;
4583
+ gateway;
4584
+ constructor(config, customGateways) {
4585
+ let normalizedConfig;
4586
+ if (typeof config === "string") {
4587
+ normalizedConfig = { id: config };
4588
+ } else if ("providerId" in config && "modelId" in config) {
4589
+ normalizedConfig = {
4590
+ id: `${config.providerId}/${config.modelId}`,
4591
+ url: config.url,
4592
+ apiKey: config.apiKey,
4593
+ headers: config.headers
4594
+ };
4595
+ } else {
4596
+ normalizedConfig = {
4597
+ id: config.id,
4598
+ url: config.url,
4599
+ apiKey: config.apiKey,
4600
+ headers: config.headers
4601
+ };
4602
+ }
4603
+ const parsedConfig = {
4604
+ ...normalizedConfig,
4605
+ routerId: normalizedConfig.id
4606
+ };
4607
+ this.gateway = findGatewayForModel(normalizedConfig.id, [...customGateways || [], ...defaultGateways]);
4608
+ const gatewayPrefix = this.gateway.id === "models.dev" ? void 0 : this.gateway.id;
4609
+ const parsed = chunkY7MZ5LJT_cjs.parseModelRouterId(normalizedConfig.id, gatewayPrefix);
4610
+ this.provider = parsed.providerId || "openai-compatible";
4611
+ if (parsed.providerId && parsed.modelId !== normalizedConfig.id) {
4612
+ parsedConfig.id = parsed.modelId;
4613
+ }
4614
+ this.modelId = parsedConfig.id;
4615
+ this.config = parsedConfig;
4616
+ }
4617
+ async doGenerate(options) {
4618
+ let apiKey;
4619
+ try {
4620
+ if (this.config.url) {
4621
+ apiKey = this.config.apiKey || "";
4622
+ } else {
4623
+ apiKey = this.config.apiKey || await this.gateway.getApiKey(this.config.routerId);
4624
+ }
4625
+ } catch (error) {
4626
+ return {
4627
+ stream: new ReadableStream({
4628
+ start(controller) {
4629
+ controller.enqueue({
4630
+ type: "error",
4631
+ error
4632
+ });
4633
+ controller.close();
4634
+ }
4635
+ })
4636
+ };
4637
+ }
4638
+ const gatewayPrefix = this.gateway.id === "models.dev" ? void 0 : this.gateway.id;
4639
+ const model = await this.resolveLanguageModel({
4640
+ apiKey,
4641
+ headers: this.config.headers,
4642
+ ...chunkY7MZ5LJT_cjs.parseModelRouterId(this.config.routerId, gatewayPrefix)
4643
+ });
4644
+ const aiSDKV5Model = new chunkSZYSDJTN_cjs.AISDKV5LanguageModel(model);
4645
+ return aiSDKV5Model.doGenerate(options);
4646
+ }
4647
+ async doStream(options) {
4648
+ let apiKey;
4649
+ try {
4650
+ if (this.config.url) {
4651
+ apiKey = this.config.apiKey || "";
4652
+ } else {
4653
+ apiKey = this.config.apiKey || await this.gateway.getApiKey(this.config.routerId);
4654
+ }
4655
+ } catch (error) {
4656
+ return {
4657
+ stream: new ReadableStream({
4658
+ start(controller) {
4659
+ controller.enqueue({
4660
+ type: "error",
4661
+ error
4662
+ });
4663
+ controller.close();
4664
+ }
4665
+ })
4666
+ };
4667
+ }
4668
+ const gatewayPrefix = this.gateway.id === "models.dev" ? void 0 : this.gateway.id;
4669
+ const model = await this.resolveLanguageModel({
4670
+ apiKey,
4671
+ headers: this.config.headers,
4672
+ ...chunkY7MZ5LJT_cjs.parseModelRouterId(this.config.routerId, gatewayPrefix)
4673
+ });
4674
+ const aiSDKV5Model = new chunkSZYSDJTN_cjs.AISDKV5LanguageModel(model);
4675
+ return aiSDKV5Model.doStream(options);
4676
+ }
4677
+ async resolveLanguageModel({
4678
+ modelId,
4679
+ providerId,
4680
+ apiKey,
4681
+ headers
4682
+ }) {
4683
+ const key = crypto.createHash("sha256").update(
4684
+ this.gateway.id + modelId + providerId + apiKey + (this.config.url || "") + (headers ? JSON.stringify(headers) : "")
4685
+ ).digest("hex");
4686
+ if (_ModelRouterLanguageModel.modelInstances.has(key)) return _ModelRouterLanguageModel.modelInstances.get(key);
4687
+ if (this.config.url) {
4688
+ const modelInstance2 = chunkOWIEOL55_cjs.createOpenAICompatible({
4689
+ name: providerId,
4690
+ apiKey,
4691
+ baseURL: this.config.url,
4692
+ headers: this.config.headers,
4693
+ supportsStructuredOutputs: true
4694
+ }).chatModel(modelId);
4695
+ _ModelRouterLanguageModel.modelInstances.set(key, modelInstance2);
4696
+ return modelInstance2;
4697
+ }
4698
+ const modelInstance = await this.gateway.resolveLanguageModel({ modelId, providerId, apiKey, headers });
4699
+ _ModelRouterLanguageModel.modelInstances.set(key, modelInstance);
4700
+ return modelInstance;
4701
+ }
4702
+ static modelInstances = /* @__PURE__ */ new Map();
4703
+ };
4704
+
4705
+ // src/llm/model/resolve-model.ts
4706
+ function isOpenAICompatibleObjectConfig(modelConfig) {
4707
+ if (typeof modelConfig === "object" && "specificationVersion" in modelConfig) return false;
4708
+ if (typeof modelConfig === "object" && !("model" in modelConfig)) {
4709
+ if ("id" in modelConfig) return true;
4710
+ if ("providerId" in modelConfig && "modelId" in modelConfig) return true;
4711
+ }
4712
+ return false;
4713
+ }
4714
+ async function resolveModelConfig(modelConfig, requestContext = new chunkUVHSM2GU_cjs.RequestContext(), mastra) {
4715
+ if (typeof modelConfig === "function") {
4716
+ modelConfig = await modelConfig({ requestContext, mastra });
4717
+ }
4718
+ if (modelConfig instanceof ModelRouterLanguageModel || modelConfig instanceof chunkSZYSDJTN_cjs.AISDKV5LanguageModel) {
4719
+ return modelConfig;
4720
+ }
4721
+ if (typeof modelConfig === "object" && "specificationVersion" in modelConfig) {
4722
+ if (modelConfig.specificationVersion === "v2") {
4723
+ return new chunkSZYSDJTN_cjs.AISDKV5LanguageModel(modelConfig);
4724
+ }
4725
+ return modelConfig;
4726
+ }
4727
+ const gatewayRecord = mastra?.listGateways();
4728
+ const customGateways = gatewayRecord ? Object.values(gatewayRecord) : void 0;
4729
+ if (typeof modelConfig === "string" || isOpenAICompatibleObjectConfig(modelConfig)) {
4730
+ return new ModelRouterLanguageModel(modelConfig, customGateways);
4731
+ }
4732
+ throw new Error("Invalid model configuration provided");
4733
+ }
4734
+
4735
+ // src/llm/model/embedding-router.ts
4736
+ var ModelRouterEmbeddingModel = class {
4737
+ specificationVersion = "v2";
4738
+ modelId;
4739
+ provider;
4740
+ maxEmbeddingsPerCall = 2048;
4741
+ supportsParallelCalls = true;
4742
+ providerModel;
4743
+ constructor(config) {
4744
+ let normalizedConfig;
4745
+ if (typeof config === "string") {
4746
+ const parts = config.split("/");
4747
+ if (parts.length !== 2) {
4748
+ throw new Error(`Invalid model string format: "${config}". Expected format: "provider/model"`);
4749
+ }
4750
+ const [providerId, modelId] = parts;
4751
+ normalizedConfig = { providerId, modelId };
4752
+ } else if ("providerId" in config && "modelId" in config) {
4753
+ normalizedConfig = {
4754
+ providerId: config.providerId,
4755
+ modelId: config.modelId,
4756
+ url: config.url,
4757
+ apiKey: config.apiKey,
4758
+ headers: config.headers
4759
+ };
4760
+ } else {
4761
+ const parts = config.id.split("/");
4762
+ if (parts.length !== 2) {
4763
+ throw new Error(`Invalid model string format: "${config.id}". Expected format: "provider/model"`);
4764
+ }
4765
+ const [providerId, modelId] = parts;
4766
+ normalizedConfig = {
4767
+ providerId,
4768
+ modelId,
4769
+ url: config.url,
4770
+ apiKey: config.apiKey,
4771
+ headers: config.headers
4772
+ };
4773
+ }
4774
+ this.provider = normalizedConfig.providerId;
4775
+ this.modelId = normalizedConfig.modelId;
4776
+ if (normalizedConfig.url) {
4777
+ const apiKey = normalizedConfig.apiKey || "";
4778
+ this.providerModel = chunkOWIEOL55_cjs.createOpenAICompatible({
4779
+ name: normalizedConfig.providerId,
4780
+ apiKey,
4781
+ baseURL: normalizedConfig.url,
4782
+ headers: normalizedConfig.headers
4783
+ }).textEmbeddingModel(normalizedConfig.modelId);
4784
+ } else {
4785
+ const registry = chunkAGHLXC4I_cjs.GatewayRegistry.getInstance();
4786
+ const providerConfig = registry.getProviderConfig(normalizedConfig.providerId);
4787
+ if (!providerConfig) {
4788
+ throw new Error(`Unknown provider: ${normalizedConfig.providerId}`);
4789
+ }
4790
+ let apiKey = normalizedConfig.apiKey;
4791
+ if (!apiKey) {
4792
+ const apiKeyEnvVar = providerConfig.apiKeyEnvVar;
4793
+ if (Array.isArray(apiKeyEnvVar)) {
4794
+ for (const envVar of apiKeyEnvVar) {
4795
+ apiKey = process.env[envVar];
4796
+ if (apiKey) break;
4797
+ }
4798
+ } else {
4799
+ apiKey = process.env[apiKeyEnvVar];
4800
+ }
4801
+ }
4802
+ if (!apiKey) {
4803
+ const envVarDisplay = Array.isArray(providerConfig.apiKeyEnvVar) ? providerConfig.apiKeyEnvVar.join(" or ") : providerConfig.apiKeyEnvVar;
4804
+ throw new Error(`API key not found for provider ${normalizedConfig.providerId}. Set ${envVarDisplay}`);
4805
+ }
4806
+ if (normalizedConfig.providerId === "openai") {
4807
+ this.providerModel = chunkOWIEOL55_cjs.createOpenAI({ apiKey }).textEmbeddingModel(
4808
+ normalizedConfig.modelId
4809
+ );
4810
+ } else if (normalizedConfig.providerId === "google") {
4811
+ this.providerModel = chunkOWIEOL55_cjs.createGoogleGenerativeAI({ apiKey }).textEmbedding(
4812
+ normalizedConfig.modelId
4813
+ );
4814
+ } else {
4815
+ if (!providerConfig.url) {
4816
+ throw new Error(`Provider ${normalizedConfig.providerId} does not have a URL configured`);
4817
+ }
4818
+ this.providerModel = chunkOWIEOL55_cjs.createOpenAICompatible({
4819
+ name: normalizedConfig.providerId,
4820
+ apiKey,
4821
+ baseURL: providerConfig.url
4822
+ }).textEmbeddingModel(normalizedConfig.modelId);
4823
+ }
4824
+ }
4825
+ if (this.providerModel.maxEmbeddingsPerCall !== void 0) {
4826
+ this.maxEmbeddingsPerCall = this.providerModel.maxEmbeddingsPerCall;
4827
+ }
4828
+ if (this.providerModel.supportsParallelCalls !== void 0) {
4829
+ this.supportsParallelCalls = this.providerModel.supportsParallelCalls;
4830
+ }
4831
+ }
4832
+ async doEmbed(args) {
4833
+ return this.providerModel.doEmbed(args);
4834
+ }
4835
+ };
4836
+
4837
+ exports.AzureOpenAIGateway = AzureOpenAIGateway;
4838
+ exports.ModelRouterEmbeddingModel = ModelRouterEmbeddingModel;
4839
+ exports.ModelRouterLanguageModel = ModelRouterLanguageModel;
4840
+ exports.resolveModelConfig = resolveModelConfig;
4841
+ //# sourceMappingURL=chunk-MXBVP7HX.cjs.map
4842
+ //# sourceMappingURL=chunk-MXBVP7HX.cjs.map