@mastra/core 1.0.0-beta.11 → 1.0.0-beta.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (447) hide show
  1. package/CHANGELOG.md +343 -0
  2. package/dist/_types/@internal_ai-sdk-v4/dist/index.d.ts +7549 -0
  3. package/dist/_types/@internal_ai-sdk-v4/dist/test.d.ts +65 -0
  4. package/dist/_types/@internal_ai-sdk-v5/dist/index.d.ts +8396 -0
  5. package/dist/_types/@internal_ai-sdk-v5/dist/test.d.ts +1708 -0
  6. package/dist/_types/@internal_external-types/dist/index.d.ts +858 -0
  7. package/dist/agent/agent-legacy.d.ts +1 -1
  8. package/dist/agent/agent.d.ts +3 -3
  9. package/dist/agent/agent.d.ts.map +1 -1
  10. package/dist/agent/agent.types.d.ts +11 -8
  11. package/dist/agent/agent.types.d.ts.map +1 -1
  12. package/dist/agent/index.cjs +17 -9
  13. package/dist/agent/index.d.ts +1 -1
  14. package/dist/agent/index.d.ts.map +1 -1
  15. package/dist/agent/index.js +2 -2
  16. package/dist/agent/message-list/index.cjs +3 -3
  17. package/dist/agent/message-list/index.d.ts +4 -3
  18. package/dist/agent/message-list/index.d.ts.map +1 -1
  19. package/dist/agent/message-list/index.js +1 -1
  20. package/dist/agent/message-list/prompt/attachments-to-parts.d.ts +1 -1
  21. package/dist/agent/message-list/prompt/invalid-content-error.d.ts +1 -1
  22. package/dist/agent/message-list/types.d.ts +3 -3
  23. package/dist/agent/message-list/types.d.ts.map +1 -1
  24. package/dist/agent/message-list/utils/ai-v4-v5/core-model-message.d.ts +1 -1
  25. package/dist/agent/message-list/utils/ai-v4-v5/ui-message.d.ts +1 -1
  26. package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts +2 -2
  27. package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts.map +1 -1
  28. package/dist/agent/message-list/utils/convert-messages.d.ts +2 -2
  29. package/dist/agent/message-list/utils/convert-messages.d.ts.map +1 -1
  30. package/dist/agent/trip-wire.d.ts +2 -2
  31. package/dist/agent/trip-wire.d.ts.map +1 -1
  32. package/dist/agent/types.d.ts +3 -3
  33. package/dist/agent/utils.d.ts +7 -4
  34. package/dist/agent/utils.d.ts.map +1 -1
  35. package/dist/agent/workflows/prepare-stream/index.d.ts +4 -3
  36. package/dist/agent/workflows/prepare-stream/index.d.ts.map +1 -1
  37. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts +3 -3
  38. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts.map +1 -1
  39. package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts +3 -3
  40. package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts.map +1 -1
  41. package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts +3 -3
  42. package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts.map +1 -1
  43. package/dist/agent/workflows/prepare-stream/stream-step.d.ts +3 -1
  44. package/dist/agent/workflows/prepare-stream/stream-step.d.ts.map +1 -1
  45. package/dist/bundler/types.d.ts +15 -1
  46. package/dist/bundler/types.d.ts.map +1 -1
  47. package/dist/chunk-2AU5ZHBL.js +79 -0
  48. package/dist/chunk-2AU5ZHBL.js.map +1 -0
  49. package/dist/chunk-2SQB3WBT.js +4574 -0
  50. package/dist/chunk-2SQB3WBT.js.map +1 -0
  51. package/dist/{chunk-THZTRBFS.js → chunk-373OC54J.js} +8 -8
  52. package/dist/chunk-373OC54J.js.map +1 -0
  53. package/dist/{chunk-US2U7ECW.js → chunk-3IP3DZ7G.js} +234 -101
  54. package/dist/chunk-3IP3DZ7G.js.map +1 -0
  55. package/dist/{chunk-SXNQRJQD.js → chunk-4AT6YQKZ.js} +26 -20
  56. package/dist/chunk-4AT6YQKZ.js.map +1 -0
  57. package/dist/{chunk-C36YRTZ6.js → chunk-4CMIJQF6.js} +117 -114
  58. package/dist/chunk-4CMIJQF6.js.map +1 -0
  59. package/dist/chunk-53SZJCBX.cjs +4888 -0
  60. package/dist/chunk-53SZJCBX.cjs.map +1 -0
  61. package/dist/chunk-55VPMN3N.js +250 -0
  62. package/dist/chunk-55VPMN3N.js.map +1 -0
  63. package/dist/{chunk-QM5SRDJX.js → chunk-5PTZG26U.js} +66 -84
  64. package/dist/chunk-5PTZG26U.js.map +1 -0
  65. package/dist/{chunk-U3XOLEPX.js → chunk-5UQ5TB6J.js} +6 -32
  66. package/dist/chunk-5UQ5TB6J.js.map +1 -0
  67. package/dist/{chunk-O2BJW7YA.js → chunk-67LM2UCT.js} +9 -9
  68. package/dist/chunk-67LM2UCT.js.map +1 -0
  69. package/dist/{chunk-YC6PJEPH.cjs → chunk-6CG7IY57.cjs} +266 -133
  70. package/dist/chunk-6CG7IY57.cjs.map +1 -0
  71. package/dist/chunk-6PMMP3FR.js +7 -0
  72. package/dist/chunk-6PMMP3FR.js.map +1 -0
  73. package/dist/{chunk-DZUJEN5N.cjs → chunk-6SZKM6EC.cjs} +10 -3
  74. package/dist/{chunk-DZUJEN5N.cjs.map → chunk-6SZKM6EC.cjs.map} +1 -1
  75. package/dist/{chunk-5Q6WAYEY.cjs → chunk-72E3YF6A.cjs} +35 -49
  76. package/dist/chunk-72E3YF6A.cjs.map +1 -0
  77. package/dist/{chunk-5WRI5ZAA.js → chunk-7D4SUZUM.js} +10 -4
  78. package/dist/{chunk-5WRI5ZAA.js.map → chunk-7D4SUZUM.js.map} +1 -1
  79. package/dist/{chunk-7P6BNIJH.js → chunk-AYBJ5GAD.js} +281 -35
  80. package/dist/chunk-AYBJ5GAD.js.map +1 -0
  81. package/dist/chunk-D22XABFZ.js +79 -0
  82. package/dist/chunk-D22XABFZ.js.map +1 -0
  83. package/dist/{chunk-SCUWP4II.cjs → chunk-DBW6S25C.cjs} +47 -74
  84. package/dist/chunk-DBW6S25C.cjs.map +1 -0
  85. package/dist/{chunk-MRFUISXC.cjs → chunk-EGHGFLL3.cjs} +2631 -179
  86. package/dist/chunk-EGHGFLL3.cjs.map +1 -0
  87. package/dist/{chunk-BJXKH4LG.cjs → chunk-ETWAR2YE.cjs} +43 -78
  88. package/dist/chunk-ETWAR2YE.cjs.map +1 -0
  89. package/dist/{chunk-CZEJQSWB.cjs → chunk-F75EQ574.cjs} +65 -6
  90. package/dist/chunk-F75EQ574.cjs.map +1 -0
  91. package/dist/{chunk-BUKY6CTR.cjs → chunk-FPDJ4XN6.cjs} +282 -36
  92. package/dist/chunk-FPDJ4XN6.cjs.map +1 -0
  93. package/dist/chunk-FST2G2FQ.cjs +84 -0
  94. package/dist/chunk-FST2G2FQ.cjs.map +1 -0
  95. package/dist/chunk-FVQTJUBD.cjs +2120 -0
  96. package/dist/chunk-FVQTJUBD.cjs.map +1 -0
  97. package/dist/chunk-G6E6V2Z4.js +2070 -0
  98. package/dist/chunk-G6E6V2Z4.js.map +1 -0
  99. package/dist/{chunk-JIGDJK2O.js → chunk-GBQXIVL6.js} +4 -39
  100. package/dist/chunk-GBQXIVL6.js.map +1 -0
  101. package/dist/{chunk-F2GAJSBI.js → chunk-GELVUDUY.js} +11 -8
  102. package/dist/chunk-GELVUDUY.js.map +1 -0
  103. package/dist/chunk-GVAPYQRO.cjs +252 -0
  104. package/dist/chunk-GVAPYQRO.cjs.map +1 -0
  105. package/dist/{chunk-TWH4PTDG.cjs → chunk-HWMMIRIF.cjs} +32 -27
  106. package/dist/chunk-HWMMIRIF.cjs.map +1 -0
  107. package/dist/{chunk-52RSUALV.cjs → chunk-JAGQZZ43.cjs} +1660 -1196
  108. package/dist/chunk-JAGQZZ43.cjs.map +1 -0
  109. package/dist/{chunk-PK2A5WBG.js → chunk-K66U47VL.js} +54 -7
  110. package/dist/chunk-K66U47VL.js.map +1 -0
  111. package/dist/chunk-L3NKIMF5.cjs +10 -0
  112. package/dist/chunk-L3NKIMF5.cjs.map +1 -0
  113. package/dist/chunk-L4JCRWDY.cjs +252 -0
  114. package/dist/chunk-L4JCRWDY.cjs.map +1 -0
  115. package/dist/{chunk-IVV5TOMD.js → chunk-LDXKZYOV.js} +31 -11
  116. package/dist/chunk-LDXKZYOV.js.map +1 -0
  117. package/dist/chunk-NESKUIRE.cjs +4586 -0
  118. package/dist/chunk-NESKUIRE.cjs.map +1 -0
  119. package/dist/{chunk-SVLMF4UZ.cjs → chunk-NIOEY3N3.cjs} +66 -85
  120. package/dist/chunk-NIOEY3N3.cjs.map +1 -0
  121. package/dist/{chunk-PG5H6QIO.cjs → chunk-O3ULBGV6.cjs} +40 -20
  122. package/dist/chunk-O3ULBGV6.cjs.map +1 -0
  123. package/dist/{chunk-WTSZBHIZ.cjs → chunk-O5BQBZEF.cjs} +28 -28
  124. package/dist/chunk-O5BQBZEF.cjs.map +1 -0
  125. package/dist/{chunk-4JKEUSCC.cjs → chunk-OOUFPYSX.cjs} +25 -22
  126. package/dist/chunk-OOUFPYSX.cjs.map +1 -0
  127. package/dist/chunk-QDVYP2T7.js +4883 -0
  128. package/dist/chunk-QDVYP2T7.js.map +1 -0
  129. package/dist/{chunk-2ULLRN4Y.js → chunk-QF4MHFSU.js} +1294 -834
  130. package/dist/chunk-QF4MHFSU.js.map +1 -0
  131. package/dist/{chunk-Z57R5WS4.js → chunk-SLBWA2F3.js} +4 -4
  132. package/dist/{chunk-Z57R5WS4.js.map → chunk-SLBWA2F3.js.map} +1 -1
  133. package/dist/chunk-ST7NBF4H.cjs +84 -0
  134. package/dist/chunk-ST7NBF4H.cjs.map +1 -0
  135. package/dist/{chunk-YWMMBIOM.cjs → chunk-TDM43G4I.cjs} +15 -15
  136. package/dist/{chunk-YWMMBIOM.cjs.map → chunk-TDM43G4I.cjs.map} +1 -1
  137. package/dist/{chunk-S73Z3PBJ.cjs → chunk-TRUNX3AX.cjs} +138 -134
  138. package/dist/chunk-TRUNX3AX.cjs.map +1 -0
  139. package/dist/chunk-VE6HQ7H6.js +250 -0
  140. package/dist/chunk-VE6HQ7H6.js.map +1 -0
  141. package/dist/{chunk-OEIVMCWX.js → chunk-VZJOEGQA.js} +2536 -84
  142. package/dist/chunk-VZJOEGQA.js.map +1 -0
  143. package/dist/{chunk-JJ5O45LH.js → chunk-YPLZDWG7.js} +32 -27
  144. package/dist/chunk-YPLZDWG7.js.map +1 -0
  145. package/dist/{chunk-MGCGWPQJ.cjs → chunk-Z55SJVEC.cjs} +8 -8
  146. package/dist/chunk-Z55SJVEC.cjs.map +1 -0
  147. package/dist/error/index.cjs +6 -6
  148. package/dist/error/index.d.ts +26 -20
  149. package/dist/error/index.d.ts.map +1 -1
  150. package/dist/error/index.js +1 -1
  151. package/dist/error/utils.d.ts +19 -5
  152. package/dist/error/utils.d.ts.map +1 -1
  153. package/dist/evals/index.cjs +4 -4
  154. package/dist/evals/index.js +1 -1
  155. package/dist/evals/run/index.d.ts +1 -1
  156. package/dist/evals/run/index.d.ts.map +1 -1
  157. package/dist/evals/scoreTraces/index.cjs +8 -8
  158. package/dist/evals/scoreTraces/index.js +2 -2
  159. package/dist/evals/types.d.ts +1 -1
  160. package/dist/events/event-emitter.d.ts +6 -1
  161. package/dist/events/event-emitter.d.ts.map +1 -1
  162. package/dist/index.cjs +2 -2
  163. package/dist/index.js +1 -1
  164. package/dist/integration/index.cjs +2 -2
  165. package/dist/integration/index.js +1 -1
  166. package/dist/llm/index.cjs +15 -15
  167. package/dist/llm/index.d.ts +2 -2
  168. package/dist/llm/index.d.ts.map +1 -1
  169. package/dist/llm/index.js +5 -5
  170. package/dist/llm/model/aisdk/generate-to-stream.d.ts +20 -0
  171. package/dist/llm/model/aisdk/generate-to-stream.d.ts.map +1 -0
  172. package/dist/llm/model/aisdk/v5/model.d.ts +5 -1
  173. package/dist/llm/model/aisdk/v5/model.d.ts.map +1 -1
  174. package/dist/llm/model/aisdk/v6/model.d.ts +51 -0
  175. package/dist/llm/model/aisdk/v6/model.d.ts.map +1 -0
  176. package/dist/llm/model/base.types.d.ts +2 -2
  177. package/dist/llm/model/model.d.ts +1 -1
  178. package/dist/llm/model/model.d.ts.map +1 -1
  179. package/dist/llm/model/model.loop.d.ts +3 -3
  180. package/dist/llm/model/model.loop.d.ts.map +1 -1
  181. package/dist/llm/model/model.loop.types.d.ts +1 -1
  182. package/dist/llm/model/model.loop.types.d.ts.map +1 -1
  183. package/dist/llm/model/provider-types.generated.d.ts +135 -11
  184. package/dist/llm/model/resolve-model.d.ts +2 -2
  185. package/dist/llm/model/resolve-model.d.ts.map +1 -1
  186. package/dist/llm/model/shared.types.d.ts +19 -8
  187. package/dist/llm/model/shared.types.d.ts.map +1 -1
  188. package/dist/loop/index.cjs +2 -2
  189. package/dist/loop/index.js +1 -1
  190. package/dist/loop/loop.d.ts +2 -2
  191. package/dist/loop/loop.d.ts.map +1 -1
  192. package/dist/loop/network/index.d.ts +2 -2
  193. package/dist/loop/network/index.d.ts.map +1 -1
  194. package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts +2 -2
  195. package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts.map +1 -1
  196. package/dist/loop/test-utils/MastraLanguageModelV3Mock.d.ts +37 -0
  197. package/dist/loop/test-utils/MastraLanguageModelV3Mock.d.ts.map +1 -0
  198. package/dist/loop/test-utils/fullStream.d.ts +2 -1
  199. package/dist/loop/test-utils/fullStream.d.ts.map +1 -1
  200. package/dist/loop/test-utils/options.d.ts.map +1 -1
  201. package/dist/loop/test-utils/resultObject.d.ts +2 -1
  202. package/dist/loop/test-utils/resultObject.d.ts.map +1 -1
  203. package/dist/loop/test-utils/streamObject.d.ts +1 -1
  204. package/dist/loop/test-utils/streamObject.d.ts.map +1 -1
  205. package/dist/loop/test-utils/tools.d.ts.map +1 -1
  206. package/dist/loop/test-utils/utils-v3.d.ts +55 -0
  207. package/dist/loop/test-utils/utils-v3.d.ts.map +1 -0
  208. package/dist/loop/types.d.ts +8 -7
  209. package/dist/loop/types.d.ts.map +1 -1
  210. package/dist/loop/workflows/agentic-execution/index.d.ts +49 -49
  211. package/dist/loop/workflows/agentic-execution/index.d.ts.map +1 -1
  212. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts +34 -34
  213. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts.map +1 -1
  214. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts +17 -17
  215. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts.map +1 -1
  216. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts +22 -21
  217. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts.map +1 -1
  218. package/dist/loop/workflows/agentic-loop/index.d.ts +49 -49
  219. package/dist/loop/workflows/agentic-loop/index.d.ts.map +1 -1
  220. package/dist/loop/workflows/run-state.d.ts +2 -2
  221. package/dist/loop/workflows/run-state.d.ts.map +1 -1
  222. package/dist/loop/workflows/schema.d.ts +18 -18
  223. package/dist/loop/workflows/schema.d.ts.map +1 -1
  224. package/dist/loop/workflows/stream.d.ts +2 -2
  225. package/dist/loop/workflows/stream.d.ts.map +1 -1
  226. package/dist/mastra/index.cjs +2 -2
  227. package/dist/mastra/index.js +1 -1
  228. package/dist/mcp/index.cjs +4 -4
  229. package/dist/mcp/index.js +1 -1
  230. package/dist/memory/index.cjs +6 -6
  231. package/dist/memory/index.js +1 -1
  232. package/dist/memory/memory.d.ts +1 -1
  233. package/dist/memory/types.d.ts +3 -3
  234. package/dist/memory/types.d.ts.map +1 -1
  235. package/dist/models-dev-E3WWI7VA.js +3 -0
  236. package/dist/{models-dev-23RN2WHG.js.map → models-dev-E3WWI7VA.js.map} +1 -1
  237. package/dist/models-dev-PPS7X4JM.cjs +12 -0
  238. package/dist/{models-dev-EO3SUIY2.cjs.map → models-dev-PPS7X4JM.cjs.map} +1 -1
  239. package/dist/netlify-TY656UYF.js +3 -0
  240. package/dist/{netlify-GXJ5D5DD.js.map → netlify-TY656UYF.js.map} +1 -1
  241. package/dist/netlify-VZFM5UH3.cjs +12 -0
  242. package/dist/{netlify-KJLY3GFS.cjs.map → netlify-VZFM5UH3.cjs.map} +1 -1
  243. package/dist/processors/index.cjs +37 -37
  244. package/dist/processors/index.d.ts +9 -9
  245. package/dist/processors/index.d.ts.map +1 -1
  246. package/dist/processors/index.js +1 -1
  247. package/dist/processors/runner.d.ts.map +1 -1
  248. package/dist/processors/step-schema.d.ts +1293 -1293
  249. package/dist/processors/step-schema.d.ts.map +1 -1
  250. package/dist/provider-registry-NXVD764B.js +3 -0
  251. package/dist/{provider-registry-F67Y6OF2.js.map → provider-registry-NXVD764B.js.map} +1 -1
  252. package/dist/provider-registry-ZIWSEUQE.cjs +40 -0
  253. package/dist/{provider-registry-3TG2KUD2.cjs.map → provider-registry-ZIWSEUQE.cjs.map} +1 -1
  254. package/dist/provider-registry.json +276 -30
  255. package/dist/{registry-generator-UMTNPBJX.js → registry-generator-AVQXI3GX.js} +2 -2
  256. package/dist/{registry-generator-UMTNPBJX.js.map → registry-generator-AVQXI3GX.js.map} +1 -1
  257. package/dist/{registry-generator-34SC4TAU.cjs → registry-generator-KOFNIIWJ.cjs} +2 -2
  258. package/dist/{registry-generator-34SC4TAU.cjs.map → registry-generator-KOFNIIWJ.cjs.map} +1 -1
  259. package/dist/relevance/index.cjs +3 -3
  260. package/dist/relevance/index.cjs.map +1 -1
  261. package/dist/relevance/index.js +2 -2
  262. package/dist/relevance/index.js.map +1 -1
  263. package/dist/server/index.cjs +5 -5
  264. package/dist/server/index.js +1 -1
  265. package/dist/storage/base.d.ts +2 -10
  266. package/dist/storage/base.d.ts.map +1 -1
  267. package/dist/storage/domains/workflows/base.d.ts +2 -8
  268. package/dist/storage/domains/workflows/base.d.ts.map +1 -1
  269. package/dist/storage/domains/workflows/inmemory.d.ts +2 -8
  270. package/dist/storage/domains/workflows/inmemory.d.ts.map +1 -1
  271. package/dist/storage/index.cjs +38 -38
  272. package/dist/storage/index.js +1 -1
  273. package/dist/storage/mock.d.ts +2 -8
  274. package/dist/storage/mock.d.ts.map +1 -1
  275. package/dist/storage/types.d.ts +9 -1
  276. package/dist/storage/types.d.ts.map +1 -1
  277. package/dist/stream/RunOutput.d.ts +1 -1
  278. package/dist/stream/aisdk/v4/input.d.ts +1 -1
  279. package/dist/stream/aisdk/v5/compat/content.d.ts +1 -1
  280. package/dist/stream/aisdk/v5/compat/content.d.ts.map +1 -1
  281. package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts +1 -1
  282. package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts.map +1 -1
  283. package/dist/stream/aisdk/v5/compat/ui-message.d.ts +1 -1
  284. package/dist/stream/aisdk/v5/compat/ui-message.d.ts.map +1 -1
  285. package/dist/stream/aisdk/v5/compat/validation.d.ts +1 -1
  286. package/dist/stream/aisdk/v5/compat/validation.d.ts.map +1 -1
  287. package/dist/stream/aisdk/v5/execute.d.ts +6 -6
  288. package/dist/stream/aisdk/v5/execute.d.ts.map +1 -1
  289. package/dist/stream/aisdk/v5/input.d.ts +1 -1
  290. package/dist/stream/aisdk/v5/input.d.ts.map +1 -1
  291. package/dist/stream/aisdk/v5/output-helpers.d.ts +12 -27
  292. package/dist/stream/aisdk/v5/output-helpers.d.ts.map +1 -1
  293. package/dist/stream/aisdk/v5/output.d.ts +41 -91
  294. package/dist/stream/aisdk/v5/output.d.ts.map +1 -1
  295. package/dist/stream/aisdk/v5/transform.d.ts +1 -1
  296. package/dist/stream/aisdk/v5/transform.d.ts.map +1 -1
  297. package/dist/stream/base/input.d.ts +1 -1
  298. package/dist/stream/base/output.d.ts +16 -36
  299. package/dist/stream/base/output.d.ts.map +1 -1
  300. package/dist/stream/base/schema.d.ts +2 -2
  301. package/dist/stream/base/schema.d.ts.map +1 -1
  302. package/dist/stream/index.cjs +12 -12
  303. package/dist/stream/index.js +2 -2
  304. package/dist/stream/types.d.ts +32 -23
  305. package/dist/stream/types.d.ts.map +1 -1
  306. package/dist/test-utils/llm-mock.cjs +14587 -14
  307. package/dist/test-utils/llm-mock.cjs.map +1 -1
  308. package/dist/test-utils/llm-mock.d.ts +3 -3
  309. package/dist/test-utils/llm-mock.d.ts.map +1 -1
  310. package/dist/test-utils/llm-mock.js +14577 -4
  311. package/dist/test-utils/llm-mock.js.map +1 -1
  312. package/dist/token-6GSAFR2W-LTZ7QQUP.js +61 -0
  313. package/dist/token-6GSAFR2W-LTZ7QQUP.js.map +1 -0
  314. package/dist/token-6GSAFR2W-SGVIXFCP.cjs +63 -0
  315. package/dist/token-6GSAFR2W-SGVIXFCP.cjs.map +1 -0
  316. package/dist/token-6GSAFR2W-SPYPLMBM.js +61 -0
  317. package/dist/token-6GSAFR2W-SPYPLMBM.js.map +1 -0
  318. package/dist/token-6GSAFR2W-UEEINYAN.cjs +63 -0
  319. package/dist/token-6GSAFR2W-UEEINYAN.cjs.map +1 -0
  320. package/dist/token-util-NEHG7TUY-7GMW5FXI.cjs +10 -0
  321. package/dist/token-util-NEHG7TUY-7GMW5FXI.cjs.map +1 -0
  322. package/dist/token-util-NEHG7TUY-JRJTGTAB.js +8 -0
  323. package/dist/token-util-NEHG7TUY-JRJTGTAB.js.map +1 -0
  324. package/dist/token-util-NEHG7TUY-QTFZ26EN.js +8 -0
  325. package/dist/token-util-NEHG7TUY-QTFZ26EN.js.map +1 -0
  326. package/dist/token-util-NEHG7TUY-WZL2DNCG.cjs +10 -0
  327. package/dist/token-util-NEHG7TUY-WZL2DNCG.cjs.map +1 -0
  328. package/dist/tools/index.cjs +4 -4
  329. package/dist/tools/index.js +1 -1
  330. package/dist/tools/is-vercel-tool.cjs +2 -2
  331. package/dist/tools/is-vercel-tool.js +1 -1
  332. package/dist/tools/tool-builder/builder.d.ts +2 -1
  333. package/dist/tools/tool-builder/builder.d.ts.map +1 -1
  334. package/dist/tools/tool.d.ts.map +1 -1
  335. package/dist/tools/types.d.ts +5 -5
  336. package/dist/tools/types.d.ts.map +1 -1
  337. package/dist/utils.cjs +22 -22
  338. package/dist/utils.d.ts +5 -5
  339. package/dist/utils.d.ts.map +1 -1
  340. package/dist/utils.js +1 -1
  341. package/dist/vector/embed.d.ts +3 -2
  342. package/dist/vector/embed.d.ts.map +1 -1
  343. package/dist/vector/index.cjs +5316 -16
  344. package/dist/vector/index.cjs.map +1 -1
  345. package/dist/vector/index.js +5282 -4
  346. package/dist/vector/index.js.map +1 -1
  347. package/dist/vector/vector.d.ts +15 -2
  348. package/dist/vector/vector.d.ts.map +1 -1
  349. package/dist/voice/aisdk/speech.d.ts +1 -1
  350. package/dist/voice/aisdk/speech.d.ts.map +1 -1
  351. package/dist/voice/aisdk/transcription.d.ts +1 -1
  352. package/dist/voice/aisdk/transcription.d.ts.map +1 -1
  353. package/dist/voice/composite-voice.d.ts +1 -1
  354. package/dist/voice/composite-voice.d.ts.map +1 -1
  355. package/dist/voice/index.cjs +6 -6
  356. package/dist/voice/index.js +1 -1
  357. package/dist/workflows/constants.cjs +4 -4
  358. package/dist/workflows/constants.d.ts +1 -1
  359. package/dist/workflows/constants.d.ts.map +1 -1
  360. package/dist/workflows/constants.js +1 -1
  361. package/dist/workflows/default.d.ts +9 -16
  362. package/dist/workflows/default.d.ts.map +1 -1
  363. package/dist/workflows/evented/execution-engine.d.ts +3 -2
  364. package/dist/workflows/evented/execution-engine.d.ts.map +1 -1
  365. package/dist/workflows/evented/index.cjs +10 -10
  366. package/dist/workflows/evented/index.js +1 -1
  367. package/dist/workflows/evented/step-executor.d.ts +5 -1
  368. package/dist/workflows/evented/step-executor.d.ts.map +1 -1
  369. package/dist/workflows/evented/workflow-event-processor/index.d.ts +16 -1
  370. package/dist/workflows/evented/workflow-event-processor/index.d.ts.map +1 -1
  371. package/dist/workflows/evented/workflow.d.ts +20 -0
  372. package/dist/workflows/evented/workflow.d.ts.map +1 -1
  373. package/dist/workflows/execution-engine.d.ts +25 -2
  374. package/dist/workflows/execution-engine.d.ts.map +1 -1
  375. package/dist/workflows/handlers/control-flow.d.ts +6 -5
  376. package/dist/workflows/handlers/control-flow.d.ts.map +1 -1
  377. package/dist/workflows/handlers/entry.d.ts +5 -3
  378. package/dist/workflows/handlers/entry.d.ts.map +1 -1
  379. package/dist/workflows/handlers/sleep.d.ts +4 -3
  380. package/dist/workflows/handlers/sleep.d.ts.map +1 -1
  381. package/dist/workflows/handlers/step.d.ts +5 -3
  382. package/dist/workflows/handlers/step.d.ts.map +1 -1
  383. package/dist/workflows/index.cjs +26 -22
  384. package/dist/workflows/index.js +1 -1
  385. package/dist/workflows/step.d.ts +5 -4
  386. package/dist/workflows/step.d.ts.map +1 -1
  387. package/dist/workflows/types.d.ts +66 -14
  388. package/dist/workflows/types.d.ts.map +1 -1
  389. package/dist/workflows/utils.d.ts +11 -0
  390. package/dist/workflows/utils.d.ts.map +1 -1
  391. package/dist/workflows/workflow.d.ts +30 -9
  392. package/dist/workflows/workflow.d.ts.map +1 -1
  393. package/package.json +13 -14
  394. package/src/llm/model/provider-types.generated.d.ts +135 -11
  395. package/dist/agent/__tests__/mock-model.d.ts +0 -8
  396. package/dist/agent/__tests__/mock-model.d.ts.map +0 -1
  397. package/dist/agent/agent-types.test-d.d.ts +0 -2
  398. package/dist/agent/agent-types.test-d.d.ts.map +0 -1
  399. package/dist/ai-sdk.types.d.ts +0 -4705
  400. package/dist/chunk-2ULLRN4Y.js.map +0 -1
  401. package/dist/chunk-3E3ILV6T.cjs +0 -518
  402. package/dist/chunk-3E3ILV6T.cjs.map +0 -1
  403. package/dist/chunk-4JKEUSCC.cjs.map +0 -1
  404. package/dist/chunk-52RSUALV.cjs.map +0 -1
  405. package/dist/chunk-5PAEYE3Q.js +0 -513
  406. package/dist/chunk-5PAEYE3Q.js.map +0 -1
  407. package/dist/chunk-5Q6WAYEY.cjs.map +0 -1
  408. package/dist/chunk-7P6BNIJH.js.map +0 -1
  409. package/dist/chunk-ABJOUEVA.cjs +0 -10
  410. package/dist/chunk-ABJOUEVA.cjs.map +0 -1
  411. package/dist/chunk-BJXKH4LG.cjs.map +0 -1
  412. package/dist/chunk-BUKY6CTR.cjs.map +0 -1
  413. package/dist/chunk-C36YRTZ6.js.map +0 -1
  414. package/dist/chunk-CZEJQSWB.cjs.map +0 -1
  415. package/dist/chunk-F2GAJSBI.js.map +0 -1
  416. package/dist/chunk-IVV5TOMD.js.map +0 -1
  417. package/dist/chunk-JIGDJK2O.js.map +0 -1
  418. package/dist/chunk-JJ5O45LH.js.map +0 -1
  419. package/dist/chunk-MGCGWPQJ.cjs.map +0 -1
  420. package/dist/chunk-MRFUISXC.cjs.map +0 -1
  421. package/dist/chunk-NLNKQD2T.js +0 -7
  422. package/dist/chunk-NLNKQD2T.js.map +0 -1
  423. package/dist/chunk-O2BJW7YA.js.map +0 -1
  424. package/dist/chunk-OEIVMCWX.js.map +0 -1
  425. package/dist/chunk-PG5H6QIO.cjs.map +0 -1
  426. package/dist/chunk-PK2A5WBG.js.map +0 -1
  427. package/dist/chunk-QM5SRDJX.js.map +0 -1
  428. package/dist/chunk-S73Z3PBJ.cjs.map +0 -1
  429. package/dist/chunk-SCUWP4II.cjs.map +0 -1
  430. package/dist/chunk-SVLMF4UZ.cjs.map +0 -1
  431. package/dist/chunk-SXNQRJQD.js.map +0 -1
  432. package/dist/chunk-THZTRBFS.js.map +0 -1
  433. package/dist/chunk-TWH4PTDG.cjs.map +0 -1
  434. package/dist/chunk-U3XOLEPX.js.map +0 -1
  435. package/dist/chunk-US2U7ECW.js.map +0 -1
  436. package/dist/chunk-WTSZBHIZ.cjs.map +0 -1
  437. package/dist/chunk-YC6PJEPH.cjs.map +0 -1
  438. package/dist/llm/model/is-v2-model.d.ts +0 -3
  439. package/dist/llm/model/is-v2-model.d.ts.map +0 -1
  440. package/dist/models-dev-23RN2WHG.js +0 -3
  441. package/dist/models-dev-EO3SUIY2.cjs +0 -12
  442. package/dist/netlify-GXJ5D5DD.js +0 -3
  443. package/dist/netlify-KJLY3GFS.cjs +0 -12
  444. package/dist/provider-registry-3TG2KUD2.cjs +0 -40
  445. package/dist/provider-registry-F67Y6OF2.js +0 -3
  446. package/dist/tools/tool-stream-types.test-d.d.ts +0 -2
  447. package/dist/tools/tool-stream-types.test-d.d.ts.map +0 -1
@@ -0,0 +1,4888 @@
1
+ 'use strict';
2
+
3
+ var chunkEGHGFLL3_cjs = require('./chunk-EGHGFLL3.cjs');
4
+ var chunkTDM43G4I_cjs = require('./chunk-TDM43G4I.cjs');
5
+ var chunkF75EQ574_cjs = require('./chunk-F75EQ574.cjs');
6
+ var chunkFPDJ4XN6_cjs = require('./chunk-FPDJ4XN6.cjs');
7
+ var chunkTRUNX3AX_cjs = require('./chunk-TRUNX3AX.cjs');
8
+ var chunkHWMMIRIF_cjs = require('./chunk-HWMMIRIF.cjs');
9
+ var chunkUVHSM2GU_cjs = require('./chunk-UVHSM2GU.cjs');
10
+ var chunkUIGRFDO6_cjs = require('./chunk-UIGRFDO6.cjs');
11
+ var crypto = require('crypto');
12
+ var v4 = require('zod/v4');
13
+
14
+ var openaiErrorDataSchema = v4.z.object({
15
+ error: v4.z.object({
16
+ message: v4.z.string(),
17
+ // The additional information below is handled loosely to support
18
+ // OpenAI-compatible providers that have slightly different error
19
+ // responses:
20
+ type: v4.z.string().nullish(),
21
+ param: v4.z.any().nullish(),
22
+ code: v4.z.union([v4.z.string(), v4.z.number()]).nullish()
23
+ })
24
+ });
25
+ var openaiFailedResponseHandler = chunkF75EQ574_cjs.createJsonErrorResponseHandler({
26
+ errorSchema: openaiErrorDataSchema,
27
+ errorToMessage: (data) => data.error.message
28
+ });
29
+ function convertToOpenAIChatMessages({
30
+ prompt,
31
+ systemMessageMode = "system"
32
+ }) {
33
+ const messages = [];
34
+ const warnings = [];
35
+ for (const { role, content } of prompt) {
36
+ switch (role) {
37
+ case "system": {
38
+ switch (systemMessageMode) {
39
+ case "system": {
40
+ messages.push({ role: "system", content });
41
+ break;
42
+ }
43
+ case "developer": {
44
+ messages.push({ role: "developer", content });
45
+ break;
46
+ }
47
+ case "remove": {
48
+ warnings.push({
49
+ type: "other",
50
+ message: "system messages are removed for this model"
51
+ });
52
+ break;
53
+ }
54
+ default: {
55
+ const _exhaustiveCheck = systemMessageMode;
56
+ throw new Error(
57
+ `Unsupported system message mode: ${_exhaustiveCheck}`
58
+ );
59
+ }
60
+ }
61
+ break;
62
+ }
63
+ case "user": {
64
+ if (content.length === 1 && content[0].type === "text") {
65
+ messages.push({ role: "user", content: content[0].text });
66
+ break;
67
+ }
68
+ messages.push({
69
+ role: "user",
70
+ content: content.map((part, index) => {
71
+ var _a, _b, _c;
72
+ switch (part.type) {
73
+ case "text": {
74
+ return { type: "text", text: part.text };
75
+ }
76
+ case "file": {
77
+ if (part.mediaType.startsWith("image/")) {
78
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
79
+ return {
80
+ type: "image_url",
81
+ image_url: {
82
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${chunkF75EQ574_cjs.convertToBase64(part.data)}`,
83
+ // OpenAI specific extension: image detail
84
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
85
+ }
86
+ };
87
+ } else if (part.mediaType.startsWith("audio/")) {
88
+ if (part.data instanceof URL) {
89
+ throw new chunkF75EQ574_cjs.UnsupportedFunctionalityError({
90
+ functionality: "audio file parts with URLs"
91
+ });
92
+ }
93
+ switch (part.mediaType) {
94
+ case "audio/wav": {
95
+ return {
96
+ type: "input_audio",
97
+ input_audio: {
98
+ data: chunkF75EQ574_cjs.convertToBase64(part.data),
99
+ format: "wav"
100
+ }
101
+ };
102
+ }
103
+ case "audio/mp3":
104
+ case "audio/mpeg": {
105
+ return {
106
+ type: "input_audio",
107
+ input_audio: {
108
+ data: chunkF75EQ574_cjs.convertToBase64(part.data),
109
+ format: "mp3"
110
+ }
111
+ };
112
+ }
113
+ default: {
114
+ throw new chunkF75EQ574_cjs.UnsupportedFunctionalityError({
115
+ functionality: `audio content parts with media type ${part.mediaType}`
116
+ });
117
+ }
118
+ }
119
+ } else if (part.mediaType === "application/pdf") {
120
+ if (part.data instanceof URL) {
121
+ throw new chunkF75EQ574_cjs.UnsupportedFunctionalityError({
122
+ functionality: "PDF file parts with URLs"
123
+ });
124
+ }
125
+ return {
126
+ type: "file",
127
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
128
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
129
+ file_data: `data:application/pdf;base64,${chunkF75EQ574_cjs.convertToBase64(part.data)}`
130
+ }
131
+ };
132
+ } else {
133
+ throw new chunkF75EQ574_cjs.UnsupportedFunctionalityError({
134
+ functionality: `file part media type ${part.mediaType}`
135
+ });
136
+ }
137
+ }
138
+ }
139
+ })
140
+ });
141
+ break;
142
+ }
143
+ case "assistant": {
144
+ let text = "";
145
+ const toolCalls = [];
146
+ for (const part of content) {
147
+ switch (part.type) {
148
+ case "text": {
149
+ text += part.text;
150
+ break;
151
+ }
152
+ case "tool-call": {
153
+ toolCalls.push({
154
+ id: part.toolCallId,
155
+ type: "function",
156
+ function: {
157
+ name: part.toolName,
158
+ arguments: JSON.stringify(part.input)
159
+ }
160
+ });
161
+ break;
162
+ }
163
+ }
164
+ }
165
+ messages.push({
166
+ role: "assistant",
167
+ content: text,
168
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
169
+ });
170
+ break;
171
+ }
172
+ case "tool": {
173
+ for (const toolResponse of content) {
174
+ const output = toolResponse.output;
175
+ let contentValue;
176
+ switch (output.type) {
177
+ case "text":
178
+ case "error-text":
179
+ contentValue = output.value;
180
+ break;
181
+ case "content":
182
+ case "json":
183
+ case "error-json":
184
+ contentValue = JSON.stringify(output.value);
185
+ break;
186
+ }
187
+ messages.push({
188
+ role: "tool",
189
+ tool_call_id: toolResponse.toolCallId,
190
+ content: contentValue
191
+ });
192
+ }
193
+ break;
194
+ }
195
+ default: {
196
+ const _exhaustiveCheck = role;
197
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
198
+ }
199
+ }
200
+ }
201
+ return { messages, warnings };
202
+ }
203
+ function getResponseMetadata({
204
+ id,
205
+ model,
206
+ created
207
+ }) {
208
+ return {
209
+ id: id != null ? id : void 0,
210
+ modelId: model != null ? model : void 0,
211
+ timestamp: created ? new Date(created * 1e3) : void 0
212
+ };
213
+ }
214
+ function mapOpenAIFinishReason(finishReason) {
215
+ switch (finishReason) {
216
+ case "stop":
217
+ return "stop";
218
+ case "length":
219
+ return "length";
220
+ case "content_filter":
221
+ return "content-filter";
222
+ case "function_call":
223
+ case "tool_calls":
224
+ return "tool-calls";
225
+ default:
226
+ return "unknown";
227
+ }
228
+ }
229
+ var openaiChatResponseSchema = chunkF75EQ574_cjs.lazyValidator(
230
+ () => chunkF75EQ574_cjs.zodSchema(
231
+ v4.z.object({
232
+ id: v4.z.string().nullish(),
233
+ created: v4.z.number().nullish(),
234
+ model: v4.z.string().nullish(),
235
+ choices: v4.z.array(
236
+ v4.z.object({
237
+ message: v4.z.object({
238
+ role: v4.z.literal("assistant").nullish(),
239
+ content: v4.z.string().nullish(),
240
+ tool_calls: v4.z.array(
241
+ v4.z.object({
242
+ id: v4.z.string().nullish(),
243
+ type: v4.z.literal("function"),
244
+ function: v4.z.object({
245
+ name: v4.z.string(),
246
+ arguments: v4.z.string()
247
+ })
248
+ })
249
+ ).nullish(),
250
+ annotations: v4.z.array(
251
+ v4.z.object({
252
+ type: v4.z.literal("url_citation"),
253
+ start_index: v4.z.number(),
254
+ end_index: v4.z.number(),
255
+ url: v4.z.string(),
256
+ title: v4.z.string()
257
+ })
258
+ ).nullish()
259
+ }),
260
+ index: v4.z.number(),
261
+ logprobs: v4.z.object({
262
+ content: v4.z.array(
263
+ v4.z.object({
264
+ token: v4.z.string(),
265
+ logprob: v4.z.number(),
266
+ top_logprobs: v4.z.array(
267
+ v4.z.object({
268
+ token: v4.z.string(),
269
+ logprob: v4.z.number()
270
+ })
271
+ )
272
+ })
273
+ ).nullish()
274
+ }).nullish(),
275
+ finish_reason: v4.z.string().nullish()
276
+ })
277
+ ),
278
+ usage: v4.z.object({
279
+ prompt_tokens: v4.z.number().nullish(),
280
+ completion_tokens: v4.z.number().nullish(),
281
+ total_tokens: v4.z.number().nullish(),
282
+ prompt_tokens_details: v4.z.object({
283
+ cached_tokens: v4.z.number().nullish()
284
+ }).nullish(),
285
+ completion_tokens_details: v4.z.object({
286
+ reasoning_tokens: v4.z.number().nullish(),
287
+ accepted_prediction_tokens: v4.z.number().nullish(),
288
+ rejected_prediction_tokens: v4.z.number().nullish()
289
+ }).nullish()
290
+ }).nullish()
291
+ })
292
+ )
293
+ );
294
+ var openaiChatChunkSchema = chunkF75EQ574_cjs.lazyValidator(
295
+ () => chunkF75EQ574_cjs.zodSchema(
296
+ v4.z.union([
297
+ v4.z.object({
298
+ id: v4.z.string().nullish(),
299
+ created: v4.z.number().nullish(),
300
+ model: v4.z.string().nullish(),
301
+ choices: v4.z.array(
302
+ v4.z.object({
303
+ delta: v4.z.object({
304
+ role: v4.z.enum(["assistant"]).nullish(),
305
+ content: v4.z.string().nullish(),
306
+ tool_calls: v4.z.array(
307
+ v4.z.object({
308
+ index: v4.z.number(),
309
+ id: v4.z.string().nullish(),
310
+ type: v4.z.literal("function").nullish(),
311
+ function: v4.z.object({
312
+ name: v4.z.string().nullish(),
313
+ arguments: v4.z.string().nullish()
314
+ })
315
+ })
316
+ ).nullish(),
317
+ annotations: v4.z.array(
318
+ v4.z.object({
319
+ type: v4.z.literal("url_citation"),
320
+ start_index: v4.z.number(),
321
+ end_index: v4.z.number(),
322
+ url: v4.z.string(),
323
+ title: v4.z.string()
324
+ })
325
+ ).nullish()
326
+ }).nullish(),
327
+ logprobs: v4.z.object({
328
+ content: v4.z.array(
329
+ v4.z.object({
330
+ token: v4.z.string(),
331
+ logprob: v4.z.number(),
332
+ top_logprobs: v4.z.array(
333
+ v4.z.object({
334
+ token: v4.z.string(),
335
+ logprob: v4.z.number()
336
+ })
337
+ )
338
+ })
339
+ ).nullish()
340
+ }).nullish(),
341
+ finish_reason: v4.z.string().nullish(),
342
+ index: v4.z.number()
343
+ })
344
+ ),
345
+ usage: v4.z.object({
346
+ prompt_tokens: v4.z.number().nullish(),
347
+ completion_tokens: v4.z.number().nullish(),
348
+ total_tokens: v4.z.number().nullish(),
349
+ prompt_tokens_details: v4.z.object({
350
+ cached_tokens: v4.z.number().nullish()
351
+ }).nullish(),
352
+ completion_tokens_details: v4.z.object({
353
+ reasoning_tokens: v4.z.number().nullish(),
354
+ accepted_prediction_tokens: v4.z.number().nullish(),
355
+ rejected_prediction_tokens: v4.z.number().nullish()
356
+ }).nullish()
357
+ }).nullish()
358
+ }),
359
+ openaiErrorDataSchema
360
+ ])
361
+ )
362
+ );
363
+ var openaiChatLanguageModelOptions = chunkF75EQ574_cjs.lazyValidator(
364
+ () => chunkF75EQ574_cjs.zodSchema(
365
+ v4.z.object({
366
+ /**
367
+ * Modify the likelihood of specified tokens appearing in the completion.
368
+ *
369
+ * Accepts a JSON object that maps tokens (specified by their token ID in
370
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
371
+ */
372
+ logitBias: v4.z.record(v4.z.coerce.number(), v4.z.number()).optional(),
373
+ /**
374
+ * Return the log probabilities of the tokens.
375
+ *
376
+ * Setting to true will return the log probabilities of the tokens that
377
+ * were generated.
378
+ *
379
+ * Setting to a number will return the log probabilities of the top n
380
+ * tokens that were generated.
381
+ */
382
+ logprobs: v4.z.union([v4.z.boolean(), v4.z.number()]).optional(),
383
+ /**
384
+ * Whether to enable parallel function calling during tool use. Default to true.
385
+ */
386
+ parallelToolCalls: v4.z.boolean().optional(),
387
+ /**
388
+ * A unique identifier representing your end-user, which can help OpenAI to
389
+ * monitor and detect abuse.
390
+ */
391
+ user: v4.z.string().optional(),
392
+ /**
393
+ * Reasoning effort for reasoning models. Defaults to `medium`.
394
+ */
395
+ reasoningEffort: v4.z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
396
+ /**
397
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
398
+ */
399
+ maxCompletionTokens: v4.z.number().optional(),
400
+ /**
401
+ * Whether to enable persistence in responses API.
402
+ */
403
+ store: v4.z.boolean().optional(),
404
+ /**
405
+ * Metadata to associate with the request.
406
+ */
407
+ metadata: v4.z.record(v4.z.string().max(64), v4.z.string().max(512)).optional(),
408
+ /**
409
+ * Parameters for prediction mode.
410
+ */
411
+ prediction: v4.z.record(v4.z.string(), v4.z.any()).optional(),
412
+ /**
413
+ * Whether to use structured outputs.
414
+ *
415
+ * @default true
416
+ */
417
+ structuredOutputs: v4.z.boolean().optional(),
418
+ /**
419
+ * Service tier for the request.
420
+ * - 'auto': Default service tier. The request will be processed with the service tier configured in the
421
+ * Project settings. Unless otherwise configured, the Project will use 'default'.
422
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
423
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
424
+ * - 'default': The request will be processed with the standard pricing and performance for the selected model.
425
+ *
426
+ * @default 'auto'
427
+ */
428
+ serviceTier: v4.z.enum(["auto", "flex", "priority", "default"]).optional(),
429
+ /**
430
+ * Whether to use strict JSON schema validation.
431
+ *
432
+ * @default false
433
+ */
434
+ strictJsonSchema: v4.z.boolean().optional(),
435
+ /**
436
+ * Controls the verbosity of the model's responses.
437
+ * Lower values will result in more concise responses, while higher values will result in more verbose responses.
438
+ */
439
+ textVerbosity: v4.z.enum(["low", "medium", "high"]).optional(),
440
+ /**
441
+ * A cache key for prompt caching. Allows manual control over prompt caching behavior.
442
+ * Useful for improving cache hit rates and working around automatic caching issues.
443
+ */
444
+ promptCacheKey: v4.z.string().optional(),
445
+ /**
446
+ * The retention policy for the prompt cache.
447
+ * - 'in_memory': Default. Standard prompt caching behavior.
448
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
449
+ * Currently only available for 5.1 series models.
450
+ *
451
+ * @default 'in_memory'
452
+ */
453
+ promptCacheRetention: v4.z.enum(["in_memory", "24h"]).optional(),
454
+ /**
455
+ * A stable identifier used to help detect users of your application
456
+ * that may be violating OpenAI's usage policies. The IDs should be a
457
+ * string that uniquely identifies each user. We recommend hashing their
458
+ * username or email address, in order to avoid sending us any identifying
459
+ * information.
460
+ */
461
+ safetyIdentifier: v4.z.string().optional()
462
+ })
463
+ )
464
+ );
465
+ function prepareChatTools({
466
+ tools,
467
+ toolChoice,
468
+ structuredOutputs,
469
+ strictJsonSchema
470
+ }) {
471
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
472
+ const toolWarnings = [];
473
+ if (tools == null) {
474
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
475
+ }
476
+ const openaiTools = [];
477
+ for (const tool of tools) {
478
+ switch (tool.type) {
479
+ case "function":
480
+ openaiTools.push({
481
+ type: "function",
482
+ function: {
483
+ name: tool.name,
484
+ description: tool.description,
485
+ parameters: tool.inputSchema,
486
+ strict: structuredOutputs ? strictJsonSchema : void 0
487
+ }
488
+ });
489
+ break;
490
+ default:
491
+ toolWarnings.push({ type: "unsupported-tool", tool });
492
+ break;
493
+ }
494
+ }
495
+ if (toolChoice == null) {
496
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
497
+ }
498
+ const type = toolChoice.type;
499
+ switch (type) {
500
+ case "auto":
501
+ case "none":
502
+ case "required":
503
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
504
+ case "tool":
505
+ return {
506
+ tools: openaiTools,
507
+ toolChoice: {
508
+ type: "function",
509
+ function: {
510
+ name: toolChoice.toolName
511
+ }
512
+ },
513
+ toolWarnings
514
+ };
515
+ default: {
516
+ const _exhaustiveCheck = type;
517
+ throw new chunkF75EQ574_cjs.UnsupportedFunctionalityError({
518
+ functionality: `tool choice type: ${_exhaustiveCheck}`
519
+ });
520
+ }
521
+ }
522
+ }
523
+ var OpenAIChatLanguageModel = class {
524
+ constructor(modelId, config) {
525
+ this.specificationVersion = "v2";
526
+ this.supportedUrls = {
527
+ "image/*": [/^https?:\/\/.*$/]
528
+ };
529
+ this.modelId = modelId;
530
+ this.config = config;
531
+ }
532
+ get provider() {
533
+ return this.config.provider;
534
+ }
535
+ async getArgs({
536
+ prompt,
537
+ maxOutputTokens,
538
+ temperature,
539
+ topP,
540
+ topK,
541
+ frequencyPenalty,
542
+ presencePenalty,
543
+ stopSequences,
544
+ responseFormat,
545
+ seed,
546
+ tools,
547
+ toolChoice,
548
+ providerOptions
549
+ }) {
550
+ var _a, _b, _c, _d;
551
+ const warnings = [];
552
+ const openaiOptions = (_a = await chunkF75EQ574_cjs.parseProviderOptions({
553
+ provider: "openai",
554
+ providerOptions,
555
+ schema: openaiChatLanguageModelOptions
556
+ })) != null ? _a : {};
557
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
558
+ if (topK != null) {
559
+ warnings.push({
560
+ type: "unsupported-setting",
561
+ setting: "topK"
562
+ });
563
+ }
564
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
565
+ warnings.push({
566
+ type: "unsupported-setting",
567
+ setting: "responseFormat",
568
+ details: "JSON response format schema is only supported with structuredOutputs"
569
+ });
570
+ }
571
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
572
+ {
573
+ prompt,
574
+ systemMessageMode: getSystemMessageMode(this.modelId)
575
+ }
576
+ );
577
+ warnings.push(...messageWarnings);
578
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
579
+ const baseArgs = {
580
+ // model id:
581
+ model: this.modelId,
582
+ // model specific settings:
583
+ logit_bias: openaiOptions.logitBias,
584
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
585
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
586
+ user: openaiOptions.user,
587
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
588
+ // standardized settings:
589
+ max_tokens: maxOutputTokens,
590
+ temperature,
591
+ top_p: topP,
592
+ frequency_penalty: frequencyPenalty,
593
+ presence_penalty: presencePenalty,
594
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
595
+ type: "json_schema",
596
+ json_schema: {
597
+ schema: responseFormat.schema,
598
+ strict: strictJsonSchema,
599
+ name: (_d = responseFormat.name) != null ? _d : "response",
600
+ description: responseFormat.description
601
+ }
602
+ } : { type: "json_object" } : void 0,
603
+ stop: stopSequences,
604
+ seed,
605
+ verbosity: openaiOptions.textVerbosity,
606
+ // openai specific settings:
607
+ // TODO AI SDK 6: remove, we auto-map maxOutputTokens now
608
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
609
+ store: openaiOptions.store,
610
+ metadata: openaiOptions.metadata,
611
+ prediction: openaiOptions.prediction,
612
+ reasoning_effort: openaiOptions.reasoningEffort,
613
+ service_tier: openaiOptions.serviceTier,
614
+ prompt_cache_key: openaiOptions.promptCacheKey,
615
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
616
+ safety_identifier: openaiOptions.safetyIdentifier,
617
+ // messages:
618
+ messages
619
+ };
620
+ if (isReasoningModel(this.modelId)) {
621
+ if (baseArgs.temperature != null) {
622
+ baseArgs.temperature = void 0;
623
+ warnings.push({
624
+ type: "unsupported-setting",
625
+ setting: "temperature",
626
+ details: "temperature is not supported for reasoning models"
627
+ });
628
+ }
629
+ if (baseArgs.top_p != null) {
630
+ baseArgs.top_p = void 0;
631
+ warnings.push({
632
+ type: "unsupported-setting",
633
+ setting: "topP",
634
+ details: "topP is not supported for reasoning models"
635
+ });
636
+ }
637
+ if (baseArgs.frequency_penalty != null) {
638
+ baseArgs.frequency_penalty = void 0;
639
+ warnings.push({
640
+ type: "unsupported-setting",
641
+ setting: "frequencyPenalty",
642
+ details: "frequencyPenalty is not supported for reasoning models"
643
+ });
644
+ }
645
+ if (baseArgs.presence_penalty != null) {
646
+ baseArgs.presence_penalty = void 0;
647
+ warnings.push({
648
+ type: "unsupported-setting",
649
+ setting: "presencePenalty",
650
+ details: "presencePenalty is not supported for reasoning models"
651
+ });
652
+ }
653
+ if (baseArgs.logit_bias != null) {
654
+ baseArgs.logit_bias = void 0;
655
+ warnings.push({
656
+ type: "other",
657
+ message: "logitBias is not supported for reasoning models"
658
+ });
659
+ }
660
+ if (baseArgs.logprobs != null) {
661
+ baseArgs.logprobs = void 0;
662
+ warnings.push({
663
+ type: "other",
664
+ message: "logprobs is not supported for reasoning models"
665
+ });
666
+ }
667
+ if (baseArgs.top_logprobs != null) {
668
+ baseArgs.top_logprobs = void 0;
669
+ warnings.push({
670
+ type: "other",
671
+ message: "topLogprobs is not supported for reasoning models"
672
+ });
673
+ }
674
+ if (baseArgs.max_tokens != null) {
675
+ if (baseArgs.max_completion_tokens == null) {
676
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
677
+ }
678
+ baseArgs.max_tokens = void 0;
679
+ }
680
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
681
+ if (baseArgs.temperature != null) {
682
+ baseArgs.temperature = void 0;
683
+ warnings.push({
684
+ type: "unsupported-setting",
685
+ setting: "temperature",
686
+ details: "temperature is not supported for the search preview models and has been removed."
687
+ });
688
+ }
689
+ }
690
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
691
+ warnings.push({
692
+ type: "unsupported-setting",
693
+ setting: "serviceTier",
694
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
695
+ });
696
+ baseArgs.service_tier = void 0;
697
+ }
698
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
699
+ warnings.push({
700
+ type: "unsupported-setting",
701
+ setting: "serviceTier",
702
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
703
+ });
704
+ baseArgs.service_tier = void 0;
705
+ }
706
+ const {
707
+ tools: openaiTools,
708
+ toolChoice: openaiToolChoice,
709
+ toolWarnings
710
+ } = prepareChatTools({
711
+ tools,
712
+ toolChoice,
713
+ structuredOutputs,
714
+ strictJsonSchema
715
+ });
716
+ return {
717
+ args: {
718
+ ...baseArgs,
719
+ tools: openaiTools,
720
+ tool_choice: openaiToolChoice
721
+ },
722
+ warnings: [...warnings, ...toolWarnings]
723
+ };
724
+ }
725
+ async doGenerate(options) {
726
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
727
+ const { args: body, warnings } = await this.getArgs(options);
728
+ const {
729
+ responseHeaders,
730
+ value: response,
731
+ rawValue: rawResponse
732
+ } = await chunkF75EQ574_cjs.postJsonToApi({
733
+ url: this.config.url({
734
+ path: "/chat/completions",
735
+ modelId: this.modelId
736
+ }),
737
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), options.headers),
738
+ body,
739
+ failedResponseHandler: openaiFailedResponseHandler,
740
+ successfulResponseHandler: chunkF75EQ574_cjs.createJsonResponseHandler(
741
+ openaiChatResponseSchema
742
+ ),
743
+ abortSignal: options.abortSignal,
744
+ fetch: this.config.fetch
745
+ });
746
+ const choice = response.choices[0];
747
+ const content = [];
748
+ const text = choice.message.content;
749
+ if (text != null && text.length > 0) {
750
+ content.push({ type: "text", text });
751
+ }
752
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
753
+ content.push({
754
+ type: "tool-call",
755
+ toolCallId: (_b = toolCall.id) != null ? _b : chunkF75EQ574_cjs.generateId(),
756
+ toolName: toolCall.function.name,
757
+ input: toolCall.function.arguments
758
+ });
759
+ }
760
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
761
+ content.push({
762
+ type: "source",
763
+ sourceType: "url",
764
+ id: chunkF75EQ574_cjs.generateId(),
765
+ url: annotation.url,
766
+ title: annotation.title
767
+ });
768
+ }
769
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
770
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
771
+ const providerMetadata = { openai: {} };
772
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
773
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
774
+ }
775
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
776
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
777
+ }
778
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
779
+ providerMetadata.openai.logprobs = choice.logprobs.content;
780
+ }
781
+ return {
782
+ content,
783
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
784
+ usage: {
785
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
786
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
787
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
788
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
789
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
790
+ },
791
+ request: { body },
792
+ response: {
793
+ ...getResponseMetadata(response),
794
+ headers: responseHeaders,
795
+ body: rawResponse
796
+ },
797
+ warnings,
798
+ providerMetadata
799
+ };
800
+ }
801
+ async doStream(options) {
802
+ const { args, warnings } = await this.getArgs(options);
803
+ const body = {
804
+ ...args,
805
+ stream: true,
806
+ stream_options: {
807
+ include_usage: true
808
+ }
809
+ };
810
+ const { responseHeaders, value: response } = await chunkF75EQ574_cjs.postJsonToApi({
811
+ url: this.config.url({
812
+ path: "/chat/completions",
813
+ modelId: this.modelId
814
+ }),
815
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), options.headers),
816
+ body,
817
+ failedResponseHandler: openaiFailedResponseHandler,
818
+ successfulResponseHandler: chunkF75EQ574_cjs.createEventSourceResponseHandler(
819
+ openaiChatChunkSchema
820
+ ),
821
+ abortSignal: options.abortSignal,
822
+ fetch: this.config.fetch
823
+ });
824
+ const toolCalls = [];
825
+ let finishReason = "unknown";
826
+ const usage = {
827
+ inputTokens: void 0,
828
+ outputTokens: void 0,
829
+ totalTokens: void 0
830
+ };
831
+ let metadataExtracted = false;
832
+ let isActiveText = false;
833
+ const providerMetadata = { openai: {} };
834
+ return {
835
+ stream: response.pipeThrough(
836
+ new TransformStream({
837
+ start(controller) {
838
+ controller.enqueue({ type: "stream-start", warnings });
839
+ },
840
+ transform(chunk, controller) {
841
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
842
+ if (options.includeRawChunks) {
843
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
844
+ }
845
+ if (!chunk.success) {
846
+ finishReason = "error";
847
+ controller.enqueue({ type: "error", error: chunk.error });
848
+ return;
849
+ }
850
+ const value = chunk.value;
851
+ if ("error" in value) {
852
+ finishReason = "error";
853
+ controller.enqueue({ type: "error", error: value.error });
854
+ return;
855
+ }
856
+ if (!metadataExtracted) {
857
+ const metadata = getResponseMetadata(value);
858
+ if (Object.values(metadata).some(Boolean)) {
859
+ metadataExtracted = true;
860
+ controller.enqueue({
861
+ type: "response-metadata",
862
+ ...getResponseMetadata(value)
863
+ });
864
+ }
865
+ }
866
+ if (value.usage != null) {
867
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
868
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
869
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
870
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
871
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
872
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
873
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
874
+ }
875
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
876
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
877
+ }
878
+ }
879
+ const choice = value.choices[0];
880
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
881
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
882
+ }
883
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
884
+ providerMetadata.openai.logprobs = choice.logprobs.content;
885
+ }
886
+ if ((choice == null ? void 0 : choice.delta) == null) {
887
+ return;
888
+ }
889
+ const delta = choice.delta;
890
+ if (delta.content != null) {
891
+ if (!isActiveText) {
892
+ controller.enqueue({ type: "text-start", id: "0" });
893
+ isActiveText = true;
894
+ }
895
+ controller.enqueue({
896
+ type: "text-delta",
897
+ id: "0",
898
+ delta: delta.content
899
+ });
900
+ }
901
+ if (delta.tool_calls != null) {
902
+ for (const toolCallDelta of delta.tool_calls) {
903
+ const index = toolCallDelta.index;
904
+ if (toolCalls[index] == null) {
905
+ if (toolCallDelta.type !== "function") {
906
+ throw new chunkF75EQ574_cjs.InvalidResponseDataError({
907
+ data: toolCallDelta,
908
+ message: `Expected 'function' type.`
909
+ });
910
+ }
911
+ if (toolCallDelta.id == null) {
912
+ throw new chunkF75EQ574_cjs.InvalidResponseDataError({
913
+ data: toolCallDelta,
914
+ message: `Expected 'id' to be a string.`
915
+ });
916
+ }
917
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
918
+ throw new chunkF75EQ574_cjs.InvalidResponseDataError({
919
+ data: toolCallDelta,
920
+ message: `Expected 'function.name' to be a string.`
921
+ });
922
+ }
923
+ controller.enqueue({
924
+ type: "tool-input-start",
925
+ id: toolCallDelta.id,
926
+ toolName: toolCallDelta.function.name
927
+ });
928
+ toolCalls[index] = {
929
+ id: toolCallDelta.id,
930
+ type: "function",
931
+ function: {
932
+ name: toolCallDelta.function.name,
933
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
934
+ },
935
+ hasFinished: false
936
+ };
937
+ const toolCall2 = toolCalls[index];
938
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
939
+ if (toolCall2.function.arguments.length > 0) {
940
+ controller.enqueue({
941
+ type: "tool-input-delta",
942
+ id: toolCall2.id,
943
+ delta: toolCall2.function.arguments
944
+ });
945
+ }
946
+ if (chunkF75EQ574_cjs.isParsableJson(toolCall2.function.arguments)) {
947
+ controller.enqueue({
948
+ type: "tool-input-end",
949
+ id: toolCall2.id
950
+ });
951
+ controller.enqueue({
952
+ type: "tool-call",
953
+ toolCallId: (_q = toolCall2.id) != null ? _q : chunkF75EQ574_cjs.generateId(),
954
+ toolName: toolCall2.function.name,
955
+ input: toolCall2.function.arguments
956
+ });
957
+ toolCall2.hasFinished = true;
958
+ }
959
+ }
960
+ continue;
961
+ }
962
+ const toolCall = toolCalls[index];
963
+ if (toolCall.hasFinished) {
964
+ continue;
965
+ }
966
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
967
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
968
+ }
969
+ controller.enqueue({
970
+ type: "tool-input-delta",
971
+ id: toolCall.id,
972
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
973
+ });
974
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && chunkF75EQ574_cjs.isParsableJson(toolCall.function.arguments)) {
975
+ controller.enqueue({
976
+ type: "tool-input-end",
977
+ id: toolCall.id
978
+ });
979
+ controller.enqueue({
980
+ type: "tool-call",
981
+ toolCallId: (_x = toolCall.id) != null ? _x : chunkF75EQ574_cjs.generateId(),
982
+ toolName: toolCall.function.name,
983
+ input: toolCall.function.arguments
984
+ });
985
+ toolCall.hasFinished = true;
986
+ }
987
+ }
988
+ }
989
+ if (delta.annotations != null) {
990
+ for (const annotation of delta.annotations) {
991
+ controller.enqueue({
992
+ type: "source",
993
+ sourceType: "url",
994
+ id: chunkF75EQ574_cjs.generateId(),
995
+ url: annotation.url,
996
+ title: annotation.title
997
+ });
998
+ }
999
+ }
1000
+ },
1001
+ flush(controller) {
1002
+ if (isActiveText) {
1003
+ controller.enqueue({ type: "text-end", id: "0" });
1004
+ }
1005
+ controller.enqueue({
1006
+ type: "finish",
1007
+ finishReason,
1008
+ usage,
1009
+ ...providerMetadata != null ? { providerMetadata } : {}
1010
+ });
1011
+ }
1012
+ })
1013
+ ),
1014
+ request: { body },
1015
+ response: { headers: responseHeaders }
1016
+ };
1017
+ }
1018
+ };
1019
+ function isReasoningModel(modelId) {
1020
+ return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1021
+ }
1022
+ function supportsFlexProcessing(modelId) {
1023
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1024
+ }
1025
+ function supportsPriorityProcessing(modelId) {
1026
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1027
+ }
1028
+ function getSystemMessageMode(modelId) {
1029
+ var _a, _b;
1030
+ if (!isReasoningModel(modelId)) {
1031
+ return "system";
1032
+ }
1033
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1034
+ }
1035
+ var reasoningModels = {
1036
+ o3: {
1037
+ systemMessageMode: "developer"
1038
+ },
1039
+ "o3-2025-04-16": {
1040
+ systemMessageMode: "developer"
1041
+ },
1042
+ "o3-mini": {
1043
+ systemMessageMode: "developer"
1044
+ },
1045
+ "o3-mini-2025-01-31": {
1046
+ systemMessageMode: "developer"
1047
+ },
1048
+ "o4-mini": {
1049
+ systemMessageMode: "developer"
1050
+ },
1051
+ "o4-mini-2025-04-16": {
1052
+ systemMessageMode: "developer"
1053
+ }
1054
+ };
1055
+ function convertToOpenAICompletionPrompt({
1056
+ prompt,
1057
+ user = "user",
1058
+ assistant = "assistant"
1059
+ }) {
1060
+ let text = "";
1061
+ if (prompt[0].role === "system") {
1062
+ text += `${prompt[0].content}
1063
+
1064
+ `;
1065
+ prompt = prompt.slice(1);
1066
+ }
1067
+ for (const { role, content } of prompt) {
1068
+ switch (role) {
1069
+ case "system": {
1070
+ throw new chunkF75EQ574_cjs.InvalidPromptError({
1071
+ message: "Unexpected system message in prompt: ${content}",
1072
+ prompt
1073
+ });
1074
+ }
1075
+ case "user": {
1076
+ const userMessage = content.map((part) => {
1077
+ switch (part.type) {
1078
+ case "text": {
1079
+ return part.text;
1080
+ }
1081
+ }
1082
+ }).filter(Boolean).join("");
1083
+ text += `${user}:
1084
+ ${userMessage}
1085
+
1086
+ `;
1087
+ break;
1088
+ }
1089
+ case "assistant": {
1090
+ const assistantMessage = content.map((part) => {
1091
+ switch (part.type) {
1092
+ case "text": {
1093
+ return part.text;
1094
+ }
1095
+ case "tool-call": {
1096
+ throw new chunkF75EQ574_cjs.UnsupportedFunctionalityError({
1097
+ functionality: "tool-call messages"
1098
+ });
1099
+ }
1100
+ }
1101
+ }).join("");
1102
+ text += `${assistant}:
1103
+ ${assistantMessage}
1104
+
1105
+ `;
1106
+ break;
1107
+ }
1108
+ case "tool": {
1109
+ throw new chunkF75EQ574_cjs.UnsupportedFunctionalityError({
1110
+ functionality: "tool messages"
1111
+ });
1112
+ }
1113
+ default: {
1114
+ const _exhaustiveCheck = role;
1115
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1116
+ }
1117
+ }
1118
+ }
1119
+ text += `${assistant}:
1120
+ `;
1121
+ return {
1122
+ prompt: text,
1123
+ stopSequences: [`
1124
+ ${user}:`]
1125
+ };
1126
+ }
1127
+ function getResponseMetadata2({
1128
+ id,
1129
+ model,
1130
+ created
1131
+ }) {
1132
+ return {
1133
+ id: id != null ? id : void 0,
1134
+ modelId: model != null ? model : void 0,
1135
+ timestamp: created != null ? new Date(created * 1e3) : void 0
1136
+ };
1137
+ }
1138
+ function mapOpenAIFinishReason2(finishReason) {
1139
+ switch (finishReason) {
1140
+ case "stop":
1141
+ return "stop";
1142
+ case "length":
1143
+ return "length";
1144
+ case "content_filter":
1145
+ return "content-filter";
1146
+ case "function_call":
1147
+ case "tool_calls":
1148
+ return "tool-calls";
1149
+ default:
1150
+ return "unknown";
1151
+ }
1152
+ }
1153
+ var openaiCompletionResponseSchema = chunkF75EQ574_cjs.lazyValidator(
1154
+ () => chunkF75EQ574_cjs.zodSchema(
1155
+ v4.z.object({
1156
+ id: v4.z.string().nullish(),
1157
+ created: v4.z.number().nullish(),
1158
+ model: v4.z.string().nullish(),
1159
+ choices: v4.z.array(
1160
+ v4.z.object({
1161
+ text: v4.z.string(),
1162
+ finish_reason: v4.z.string(),
1163
+ logprobs: v4.z.object({
1164
+ tokens: v4.z.array(v4.z.string()),
1165
+ token_logprobs: v4.z.array(v4.z.number()),
1166
+ top_logprobs: v4.z.array(v4.z.record(v4.z.string(), v4.z.number())).nullish()
1167
+ }).nullish()
1168
+ })
1169
+ ),
1170
+ usage: v4.z.object({
1171
+ prompt_tokens: v4.z.number(),
1172
+ completion_tokens: v4.z.number(),
1173
+ total_tokens: v4.z.number()
1174
+ }).nullish()
1175
+ })
1176
+ )
1177
+ );
1178
+ var openaiCompletionChunkSchema = chunkF75EQ574_cjs.lazyValidator(
1179
+ () => chunkF75EQ574_cjs.zodSchema(
1180
+ v4.z.union([
1181
+ v4.z.object({
1182
+ id: v4.z.string().nullish(),
1183
+ created: v4.z.number().nullish(),
1184
+ model: v4.z.string().nullish(),
1185
+ choices: v4.z.array(
1186
+ v4.z.object({
1187
+ text: v4.z.string(),
1188
+ finish_reason: v4.z.string().nullish(),
1189
+ index: v4.z.number(),
1190
+ logprobs: v4.z.object({
1191
+ tokens: v4.z.array(v4.z.string()),
1192
+ token_logprobs: v4.z.array(v4.z.number()),
1193
+ top_logprobs: v4.z.array(v4.z.record(v4.z.string(), v4.z.number())).nullish()
1194
+ }).nullish()
1195
+ })
1196
+ ),
1197
+ usage: v4.z.object({
1198
+ prompt_tokens: v4.z.number(),
1199
+ completion_tokens: v4.z.number(),
1200
+ total_tokens: v4.z.number()
1201
+ }).nullish()
1202
+ }),
1203
+ openaiErrorDataSchema
1204
+ ])
1205
+ )
1206
+ );
1207
+ var openaiCompletionProviderOptions = chunkF75EQ574_cjs.lazyValidator(
1208
+ () => chunkF75EQ574_cjs.zodSchema(
1209
+ v4.z.object({
1210
+ /**
1211
+ Echo back the prompt in addition to the completion.
1212
+ */
1213
+ echo: v4.z.boolean().optional(),
1214
+ /**
1215
+ Modify the likelihood of specified tokens appearing in the completion.
1216
+
1217
+ Accepts a JSON object that maps tokens (specified by their token ID in
1218
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1219
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1220
+ the bias is added to the logits generated by the model prior to sampling.
1221
+ The exact effect will vary per model, but values between -1 and 1 should
1222
+ decrease or increase likelihood of selection; values like -100 or 100
1223
+ should result in a ban or exclusive selection of the relevant token.
1224
+
1225
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1226
+ token from being generated.
1227
+ */
1228
+ logitBias: v4.z.record(v4.z.string(), v4.z.number()).optional(),
1229
+ /**
1230
+ The suffix that comes after a completion of inserted text.
1231
+ */
1232
+ suffix: v4.z.string().optional(),
1233
+ /**
1234
+ A unique identifier representing your end-user, which can help OpenAI to
1235
+ monitor and detect abuse. Learn more.
1236
+ */
1237
+ user: v4.z.string().optional(),
1238
+ /**
1239
+ Return the log probabilities of the tokens. Including logprobs will increase
1240
+ the response size and can slow down response times. However, it can
1241
+ be useful to better understand how the model is behaving.
1242
+ Setting to true will return the log probabilities of the tokens that
1243
+ were generated.
1244
+ Setting to a number will return the log probabilities of the top n
1245
+ tokens that were generated.
1246
+ */
1247
+ logprobs: v4.z.union([v4.z.boolean(), v4.z.number()]).optional()
1248
+ })
1249
+ )
1250
+ );
1251
+ var OpenAICompletionLanguageModel = class {
1252
+ constructor(modelId, config) {
1253
+ this.specificationVersion = "v2";
1254
+ this.supportedUrls = {
1255
+ // No URLs are supported for completion models.
1256
+ };
1257
+ this.modelId = modelId;
1258
+ this.config = config;
1259
+ }
1260
+ get providerOptionsName() {
1261
+ return this.config.provider.split(".")[0].trim();
1262
+ }
1263
+ get provider() {
1264
+ return this.config.provider;
1265
+ }
1266
+ async getArgs({
1267
+ prompt,
1268
+ maxOutputTokens,
1269
+ temperature,
1270
+ topP,
1271
+ topK,
1272
+ frequencyPenalty,
1273
+ presencePenalty,
1274
+ stopSequences: userStopSequences,
1275
+ responseFormat,
1276
+ tools,
1277
+ toolChoice,
1278
+ seed,
1279
+ providerOptions
1280
+ }) {
1281
+ const warnings = [];
1282
+ const openaiOptions = {
1283
+ ...await chunkF75EQ574_cjs.parseProviderOptions({
1284
+ provider: "openai",
1285
+ providerOptions,
1286
+ schema: openaiCompletionProviderOptions
1287
+ }),
1288
+ ...await chunkF75EQ574_cjs.parseProviderOptions({
1289
+ provider: this.providerOptionsName,
1290
+ providerOptions,
1291
+ schema: openaiCompletionProviderOptions
1292
+ })
1293
+ };
1294
+ if (topK != null) {
1295
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1296
+ }
1297
+ if (tools == null ? void 0 : tools.length) {
1298
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1299
+ }
1300
+ if (toolChoice != null) {
1301
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1302
+ }
1303
+ if (responseFormat != null && responseFormat.type !== "text") {
1304
+ warnings.push({
1305
+ type: "unsupported-setting",
1306
+ setting: "responseFormat",
1307
+ details: "JSON response format is not supported."
1308
+ });
1309
+ }
1310
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1311
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1312
+ return {
1313
+ args: {
1314
+ // model id:
1315
+ model: this.modelId,
1316
+ // model specific settings:
1317
+ echo: openaiOptions.echo,
1318
+ logit_bias: openaiOptions.logitBias,
1319
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1320
+ suffix: openaiOptions.suffix,
1321
+ user: openaiOptions.user,
1322
+ // standardized settings:
1323
+ max_tokens: maxOutputTokens,
1324
+ temperature,
1325
+ top_p: topP,
1326
+ frequency_penalty: frequencyPenalty,
1327
+ presence_penalty: presencePenalty,
1328
+ seed,
1329
+ // prompt:
1330
+ prompt: completionPrompt,
1331
+ // stop sequences:
1332
+ stop: stop.length > 0 ? stop : void 0
1333
+ },
1334
+ warnings
1335
+ };
1336
+ }
1337
+ async doGenerate(options) {
1338
+ var _a, _b, _c;
1339
+ const { args, warnings } = await this.getArgs(options);
1340
+ const {
1341
+ responseHeaders,
1342
+ value: response,
1343
+ rawValue: rawResponse
1344
+ } = await chunkF75EQ574_cjs.postJsonToApi({
1345
+ url: this.config.url({
1346
+ path: "/completions",
1347
+ modelId: this.modelId
1348
+ }),
1349
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), options.headers),
1350
+ body: args,
1351
+ failedResponseHandler: openaiFailedResponseHandler,
1352
+ successfulResponseHandler: chunkF75EQ574_cjs.createJsonResponseHandler(
1353
+ openaiCompletionResponseSchema
1354
+ ),
1355
+ abortSignal: options.abortSignal,
1356
+ fetch: this.config.fetch
1357
+ });
1358
+ const choice = response.choices[0];
1359
+ const providerMetadata = { openai: {} };
1360
+ if (choice.logprobs != null) {
1361
+ providerMetadata.openai.logprobs = choice.logprobs;
1362
+ }
1363
+ return {
1364
+ content: [{ type: "text", text: choice.text }],
1365
+ usage: {
1366
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1367
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1368
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1369
+ },
1370
+ finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1371
+ request: { body: args },
1372
+ response: {
1373
+ ...getResponseMetadata2(response),
1374
+ headers: responseHeaders,
1375
+ body: rawResponse
1376
+ },
1377
+ providerMetadata,
1378
+ warnings
1379
+ };
1380
+ }
1381
+ async doStream(options) {
1382
+ const { args, warnings } = await this.getArgs(options);
1383
+ const body = {
1384
+ ...args,
1385
+ stream: true,
1386
+ stream_options: {
1387
+ include_usage: true
1388
+ }
1389
+ };
1390
+ const { responseHeaders, value: response } = await chunkF75EQ574_cjs.postJsonToApi({
1391
+ url: this.config.url({
1392
+ path: "/completions",
1393
+ modelId: this.modelId
1394
+ }),
1395
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), options.headers),
1396
+ body,
1397
+ failedResponseHandler: openaiFailedResponseHandler,
1398
+ successfulResponseHandler: chunkF75EQ574_cjs.createEventSourceResponseHandler(
1399
+ openaiCompletionChunkSchema
1400
+ ),
1401
+ abortSignal: options.abortSignal,
1402
+ fetch: this.config.fetch
1403
+ });
1404
+ let finishReason = "unknown";
1405
+ const providerMetadata = { openai: {} };
1406
+ const usage = {
1407
+ inputTokens: void 0,
1408
+ outputTokens: void 0,
1409
+ totalTokens: void 0
1410
+ };
1411
+ let isFirstChunk = true;
1412
+ return {
1413
+ stream: response.pipeThrough(
1414
+ new TransformStream({
1415
+ start(controller) {
1416
+ controller.enqueue({ type: "stream-start", warnings });
1417
+ },
1418
+ transform(chunk, controller) {
1419
+ if (options.includeRawChunks) {
1420
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1421
+ }
1422
+ if (!chunk.success) {
1423
+ finishReason = "error";
1424
+ controller.enqueue({ type: "error", error: chunk.error });
1425
+ return;
1426
+ }
1427
+ const value = chunk.value;
1428
+ if ("error" in value) {
1429
+ finishReason = "error";
1430
+ controller.enqueue({ type: "error", error: value.error });
1431
+ return;
1432
+ }
1433
+ if (isFirstChunk) {
1434
+ isFirstChunk = false;
1435
+ controller.enqueue({
1436
+ type: "response-metadata",
1437
+ ...getResponseMetadata2(value)
1438
+ });
1439
+ controller.enqueue({ type: "text-start", id: "0" });
1440
+ }
1441
+ if (value.usage != null) {
1442
+ usage.inputTokens = value.usage.prompt_tokens;
1443
+ usage.outputTokens = value.usage.completion_tokens;
1444
+ usage.totalTokens = value.usage.total_tokens;
1445
+ }
1446
+ const choice = value.choices[0];
1447
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1448
+ finishReason = mapOpenAIFinishReason2(choice.finish_reason);
1449
+ }
1450
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1451
+ providerMetadata.openai.logprobs = choice.logprobs;
1452
+ }
1453
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1454
+ controller.enqueue({
1455
+ type: "text-delta",
1456
+ id: "0",
1457
+ delta: choice.text
1458
+ });
1459
+ }
1460
+ },
1461
+ flush(controller) {
1462
+ if (!isFirstChunk) {
1463
+ controller.enqueue({ type: "text-end", id: "0" });
1464
+ }
1465
+ controller.enqueue({
1466
+ type: "finish",
1467
+ finishReason,
1468
+ providerMetadata,
1469
+ usage
1470
+ });
1471
+ }
1472
+ })
1473
+ ),
1474
+ request: { body },
1475
+ response: { headers: responseHeaders }
1476
+ };
1477
+ }
1478
+ };
1479
+ var openaiEmbeddingProviderOptions = chunkF75EQ574_cjs.lazyValidator(
1480
+ () => chunkF75EQ574_cjs.zodSchema(
1481
+ v4.z.object({
1482
+ /**
1483
+ The number of dimensions the resulting output embeddings should have.
1484
+ Only supported in text-embedding-3 and later models.
1485
+ */
1486
+ dimensions: v4.z.number().optional(),
1487
+ /**
1488
+ A unique identifier representing your end-user, which can help OpenAI to
1489
+ monitor and detect abuse. Learn more.
1490
+ */
1491
+ user: v4.z.string().optional()
1492
+ })
1493
+ )
1494
+ );
1495
+ var openaiTextEmbeddingResponseSchema = chunkF75EQ574_cjs.lazyValidator(
1496
+ () => chunkF75EQ574_cjs.zodSchema(
1497
+ v4.z.object({
1498
+ data: v4.z.array(v4.z.object({ embedding: v4.z.array(v4.z.number()) })),
1499
+ usage: v4.z.object({ prompt_tokens: v4.z.number() }).nullish()
1500
+ })
1501
+ )
1502
+ );
1503
+ var OpenAIEmbeddingModel = class {
1504
+ constructor(modelId, config) {
1505
+ this.specificationVersion = "v2";
1506
+ this.maxEmbeddingsPerCall = 2048;
1507
+ this.supportsParallelCalls = true;
1508
+ this.modelId = modelId;
1509
+ this.config = config;
1510
+ }
1511
+ get provider() {
1512
+ return this.config.provider;
1513
+ }
1514
+ async doEmbed({
1515
+ values,
1516
+ headers,
1517
+ abortSignal,
1518
+ providerOptions
1519
+ }) {
1520
+ var _a;
1521
+ if (values.length > this.maxEmbeddingsPerCall) {
1522
+ throw new chunkF75EQ574_cjs.TooManyEmbeddingValuesForCallError({
1523
+ provider: this.provider,
1524
+ modelId: this.modelId,
1525
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1526
+ values
1527
+ });
1528
+ }
1529
+ const openaiOptions = (_a = await chunkF75EQ574_cjs.parseProviderOptions({
1530
+ provider: "openai",
1531
+ providerOptions,
1532
+ schema: openaiEmbeddingProviderOptions
1533
+ })) != null ? _a : {};
1534
+ const {
1535
+ responseHeaders,
1536
+ value: response,
1537
+ rawValue
1538
+ } = await chunkF75EQ574_cjs.postJsonToApi({
1539
+ url: this.config.url({
1540
+ path: "/embeddings",
1541
+ modelId: this.modelId
1542
+ }),
1543
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), headers),
1544
+ body: {
1545
+ model: this.modelId,
1546
+ input: values,
1547
+ encoding_format: "float",
1548
+ dimensions: openaiOptions.dimensions,
1549
+ user: openaiOptions.user
1550
+ },
1551
+ failedResponseHandler: openaiFailedResponseHandler,
1552
+ successfulResponseHandler: chunkF75EQ574_cjs.createJsonResponseHandler(
1553
+ openaiTextEmbeddingResponseSchema
1554
+ ),
1555
+ abortSignal,
1556
+ fetch: this.config.fetch
1557
+ });
1558
+ return {
1559
+ embeddings: response.data.map((item) => item.embedding),
1560
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1561
+ response: { headers: responseHeaders, body: rawValue }
1562
+ };
1563
+ }
1564
+ };
1565
+ var openaiImageResponseSchema = chunkF75EQ574_cjs.lazyValidator(
1566
+ () => chunkF75EQ574_cjs.zodSchema(
1567
+ v4.z.object({
1568
+ data: v4.z.array(
1569
+ v4.z.object({
1570
+ b64_json: v4.z.string(),
1571
+ revised_prompt: v4.z.string().nullish()
1572
+ })
1573
+ )
1574
+ })
1575
+ )
1576
+ );
1577
+ var modelMaxImagesPerCall = {
1578
+ "dall-e-3": 1,
1579
+ "dall-e-2": 10,
1580
+ "gpt-image-1": 10,
1581
+ "gpt-image-1-mini": 10
1582
+ };
1583
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
1584
+ "gpt-image-1",
1585
+ "gpt-image-1-mini"
1586
+ ]);
1587
+ var OpenAIImageModel = class {
1588
+ constructor(modelId, config) {
1589
+ this.modelId = modelId;
1590
+ this.config = config;
1591
+ this.specificationVersion = "v2";
1592
+ }
1593
+ get maxImagesPerCall() {
1594
+ var _a;
1595
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1596
+ }
1597
+ get provider() {
1598
+ return this.config.provider;
1599
+ }
1600
+ async doGenerate({
1601
+ prompt,
1602
+ n,
1603
+ size,
1604
+ aspectRatio,
1605
+ seed,
1606
+ providerOptions,
1607
+ headers,
1608
+ abortSignal
1609
+ }) {
1610
+ var _a, _b, _c, _d;
1611
+ const warnings = [];
1612
+ if (aspectRatio != null) {
1613
+ warnings.push({
1614
+ type: "unsupported-setting",
1615
+ setting: "aspectRatio",
1616
+ details: "This model does not support aspect ratio. Use `size` instead."
1617
+ });
1618
+ }
1619
+ if (seed != null) {
1620
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1621
+ }
1622
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1623
+ const { value: response, responseHeaders } = await chunkF75EQ574_cjs.postJsonToApi({
1624
+ url: this.config.url({
1625
+ path: "/images/generations",
1626
+ modelId: this.modelId
1627
+ }),
1628
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), headers),
1629
+ body: {
1630
+ model: this.modelId,
1631
+ prompt,
1632
+ n,
1633
+ size,
1634
+ ...(_d = providerOptions.openai) != null ? _d : {},
1635
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1636
+ },
1637
+ failedResponseHandler: openaiFailedResponseHandler,
1638
+ successfulResponseHandler: chunkF75EQ574_cjs.createJsonResponseHandler(
1639
+ openaiImageResponseSchema
1640
+ ),
1641
+ abortSignal,
1642
+ fetch: this.config.fetch
1643
+ });
1644
+ return {
1645
+ images: response.data.map((item) => item.b64_json),
1646
+ warnings,
1647
+ response: {
1648
+ timestamp: currentDate,
1649
+ modelId: this.modelId,
1650
+ headers: responseHeaders
1651
+ },
1652
+ providerMetadata: {
1653
+ openai: {
1654
+ images: response.data.map(
1655
+ (item) => item.revised_prompt ? {
1656
+ revisedPrompt: item.revised_prompt
1657
+ } : null
1658
+ )
1659
+ }
1660
+ }
1661
+ };
1662
+ }
1663
+ };
1664
+ var openaiTranscriptionResponseSchema = chunkF75EQ574_cjs.lazyValidator(
1665
+ () => chunkF75EQ574_cjs.zodSchema(
1666
+ v4.z.object({
1667
+ text: v4.z.string(),
1668
+ language: v4.z.string().nullish(),
1669
+ duration: v4.z.number().nullish(),
1670
+ words: v4.z.array(
1671
+ v4.z.object({
1672
+ word: v4.z.string(),
1673
+ start: v4.z.number(),
1674
+ end: v4.z.number()
1675
+ })
1676
+ ).nullish(),
1677
+ segments: v4.z.array(
1678
+ v4.z.object({
1679
+ id: v4.z.number(),
1680
+ seek: v4.z.number(),
1681
+ start: v4.z.number(),
1682
+ end: v4.z.number(),
1683
+ text: v4.z.string(),
1684
+ tokens: v4.z.array(v4.z.number()),
1685
+ temperature: v4.z.number(),
1686
+ avg_logprob: v4.z.number(),
1687
+ compression_ratio: v4.z.number(),
1688
+ no_speech_prob: v4.z.number()
1689
+ })
1690
+ ).nullish()
1691
+ })
1692
+ )
1693
+ );
1694
+ var openAITranscriptionProviderOptions = chunkF75EQ574_cjs.lazyValidator(
1695
+ () => chunkF75EQ574_cjs.zodSchema(
1696
+ v4.z.object({
1697
+ /**
1698
+ * Additional information to include in the transcription response.
1699
+ */
1700
+ include: v4.z.array(v4.z.string()).optional(),
1701
+ /**
1702
+ * The language of the input audio in ISO-639-1 format.
1703
+ */
1704
+ language: v4.z.string().optional(),
1705
+ /**
1706
+ * An optional text to guide the model's style or continue a previous audio segment.
1707
+ */
1708
+ prompt: v4.z.string().optional(),
1709
+ /**
1710
+ * The sampling temperature, between 0 and 1.
1711
+ * @default 0
1712
+ */
1713
+ temperature: v4.z.number().min(0).max(1).default(0).optional(),
1714
+ /**
1715
+ * The timestamp granularities to populate for this transcription.
1716
+ * @default ['segment']
1717
+ */
1718
+ timestampGranularities: v4.z.array(v4.z.enum(["word", "segment"])).default(["segment"]).optional()
1719
+ })
1720
+ )
1721
+ );
1722
+ var languageMap = {
1723
+ afrikaans: "af",
1724
+ arabic: "ar",
1725
+ armenian: "hy",
1726
+ azerbaijani: "az",
1727
+ belarusian: "be",
1728
+ bosnian: "bs",
1729
+ bulgarian: "bg",
1730
+ catalan: "ca",
1731
+ chinese: "zh",
1732
+ croatian: "hr",
1733
+ czech: "cs",
1734
+ danish: "da",
1735
+ dutch: "nl",
1736
+ english: "en",
1737
+ estonian: "et",
1738
+ finnish: "fi",
1739
+ french: "fr",
1740
+ galician: "gl",
1741
+ german: "de",
1742
+ greek: "el",
1743
+ hebrew: "he",
1744
+ hindi: "hi",
1745
+ hungarian: "hu",
1746
+ icelandic: "is",
1747
+ indonesian: "id",
1748
+ italian: "it",
1749
+ japanese: "ja",
1750
+ kannada: "kn",
1751
+ kazakh: "kk",
1752
+ korean: "ko",
1753
+ latvian: "lv",
1754
+ lithuanian: "lt",
1755
+ macedonian: "mk",
1756
+ malay: "ms",
1757
+ marathi: "mr",
1758
+ maori: "mi",
1759
+ nepali: "ne",
1760
+ norwegian: "no",
1761
+ persian: "fa",
1762
+ polish: "pl",
1763
+ portuguese: "pt",
1764
+ romanian: "ro",
1765
+ russian: "ru",
1766
+ serbian: "sr",
1767
+ slovak: "sk",
1768
+ slovenian: "sl",
1769
+ spanish: "es",
1770
+ swahili: "sw",
1771
+ swedish: "sv",
1772
+ tagalog: "tl",
1773
+ tamil: "ta",
1774
+ thai: "th",
1775
+ turkish: "tr",
1776
+ ukrainian: "uk",
1777
+ urdu: "ur",
1778
+ vietnamese: "vi",
1779
+ welsh: "cy"
1780
+ };
1781
+ var OpenAITranscriptionModel = class {
1782
+ constructor(modelId, config) {
1783
+ this.modelId = modelId;
1784
+ this.config = config;
1785
+ this.specificationVersion = "v2";
1786
+ }
1787
+ get provider() {
1788
+ return this.config.provider;
1789
+ }
1790
+ async getArgs({
1791
+ audio,
1792
+ mediaType,
1793
+ providerOptions
1794
+ }) {
1795
+ const warnings = [];
1796
+ const openAIOptions = await chunkF75EQ574_cjs.parseProviderOptions({
1797
+ provider: "openai",
1798
+ providerOptions,
1799
+ schema: openAITranscriptionProviderOptions
1800
+ });
1801
+ const formData = new FormData();
1802
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([chunkF75EQ574_cjs.convertBase64ToUint8Array(audio)]);
1803
+ formData.append("model", this.modelId);
1804
+ const fileExtension = chunkF75EQ574_cjs.mediaTypeToExtension(mediaType);
1805
+ formData.append(
1806
+ "file",
1807
+ new File([blob], "audio", { type: mediaType }),
1808
+ `audio.${fileExtension}`
1809
+ );
1810
+ if (openAIOptions) {
1811
+ const transcriptionModelOptions = {
1812
+ include: openAIOptions.include,
1813
+ language: openAIOptions.language,
1814
+ prompt: openAIOptions.prompt,
1815
+ // https://platform.openai.com/docs/api-reference/audio/createTranscription#audio_createtranscription-response_format
1816
+ // prefer verbose_json to get segments for models that support it
1817
+ response_format: [
1818
+ "gpt-4o-transcribe",
1819
+ "gpt-4o-mini-transcribe"
1820
+ ].includes(this.modelId) ? "json" : "verbose_json",
1821
+ temperature: openAIOptions.temperature,
1822
+ timestamp_granularities: openAIOptions.timestampGranularities
1823
+ };
1824
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1825
+ if (value != null) {
1826
+ if (Array.isArray(value)) {
1827
+ for (const item of value) {
1828
+ formData.append(`${key}[]`, String(item));
1829
+ }
1830
+ } else {
1831
+ formData.append(key, String(value));
1832
+ }
1833
+ }
1834
+ }
1835
+ }
1836
+ return {
1837
+ formData,
1838
+ warnings
1839
+ };
1840
+ }
1841
+ async doGenerate(options) {
1842
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1843
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1844
+ const { formData, warnings } = await this.getArgs(options);
1845
+ const {
1846
+ value: response,
1847
+ responseHeaders,
1848
+ rawValue: rawResponse
1849
+ } = await chunkF75EQ574_cjs.postFormDataToApi({
1850
+ url: this.config.url({
1851
+ path: "/audio/transcriptions",
1852
+ modelId: this.modelId
1853
+ }),
1854
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), options.headers),
1855
+ formData,
1856
+ failedResponseHandler: openaiFailedResponseHandler,
1857
+ successfulResponseHandler: chunkF75EQ574_cjs.createJsonResponseHandler(
1858
+ openaiTranscriptionResponseSchema
1859
+ ),
1860
+ abortSignal: options.abortSignal,
1861
+ fetch: this.config.fetch
1862
+ });
1863
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1864
+ return {
1865
+ text: response.text,
1866
+ segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
1867
+ text: segment.text,
1868
+ startSecond: segment.start,
1869
+ endSecond: segment.end
1870
+ }))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
1871
+ text: word.word,
1872
+ startSecond: word.start,
1873
+ endSecond: word.end
1874
+ }))) != null ? _g : [],
1875
+ language,
1876
+ durationInSeconds: (_h = response.duration) != null ? _h : void 0,
1877
+ warnings,
1878
+ response: {
1879
+ timestamp: currentDate,
1880
+ modelId: this.modelId,
1881
+ headers: responseHeaders,
1882
+ body: rawResponse
1883
+ }
1884
+ };
1885
+ }
1886
+ };
1887
+ var openaiSpeechProviderOptionsSchema = chunkF75EQ574_cjs.lazyValidator(
1888
+ () => chunkF75EQ574_cjs.zodSchema(
1889
+ v4.z.object({
1890
+ instructions: v4.z.string().nullish(),
1891
+ speed: v4.z.number().min(0.25).max(4).default(1).nullish()
1892
+ })
1893
+ )
1894
+ );
1895
+ var OpenAISpeechModel = class {
1896
+ constructor(modelId, config) {
1897
+ this.modelId = modelId;
1898
+ this.config = config;
1899
+ this.specificationVersion = "v2";
1900
+ }
1901
+ get provider() {
1902
+ return this.config.provider;
1903
+ }
1904
+ async getArgs({
1905
+ text,
1906
+ voice = "alloy",
1907
+ outputFormat = "mp3",
1908
+ speed,
1909
+ instructions,
1910
+ language,
1911
+ providerOptions
1912
+ }) {
1913
+ const warnings = [];
1914
+ const openAIOptions = await chunkF75EQ574_cjs.parseProviderOptions({
1915
+ provider: "openai",
1916
+ providerOptions,
1917
+ schema: openaiSpeechProviderOptionsSchema
1918
+ });
1919
+ const requestBody = {
1920
+ model: this.modelId,
1921
+ input: text,
1922
+ voice,
1923
+ response_format: "mp3",
1924
+ speed,
1925
+ instructions
1926
+ };
1927
+ if (outputFormat) {
1928
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1929
+ requestBody.response_format = outputFormat;
1930
+ } else {
1931
+ warnings.push({
1932
+ type: "unsupported-setting",
1933
+ setting: "outputFormat",
1934
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1935
+ });
1936
+ }
1937
+ }
1938
+ if (openAIOptions) {
1939
+ const speechModelOptions = {};
1940
+ for (const key in speechModelOptions) {
1941
+ const value = speechModelOptions[key];
1942
+ if (value !== void 0) {
1943
+ requestBody[key] = value;
1944
+ }
1945
+ }
1946
+ }
1947
+ if (language) {
1948
+ warnings.push({
1949
+ type: "unsupported-setting",
1950
+ setting: "language",
1951
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
1952
+ });
1953
+ }
1954
+ return {
1955
+ requestBody,
1956
+ warnings
1957
+ };
1958
+ }
1959
+ async doGenerate(options) {
1960
+ var _a, _b, _c;
1961
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1962
+ const { requestBody, warnings } = await this.getArgs(options);
1963
+ const {
1964
+ value: audio,
1965
+ responseHeaders,
1966
+ rawValue: rawResponse
1967
+ } = await chunkF75EQ574_cjs.postJsonToApi({
1968
+ url: this.config.url({
1969
+ path: "/audio/speech",
1970
+ modelId: this.modelId
1971
+ }),
1972
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), options.headers),
1973
+ body: requestBody,
1974
+ failedResponseHandler: openaiFailedResponseHandler,
1975
+ successfulResponseHandler: chunkF75EQ574_cjs.createBinaryResponseHandler(),
1976
+ abortSignal: options.abortSignal,
1977
+ fetch: this.config.fetch
1978
+ });
1979
+ return {
1980
+ audio,
1981
+ warnings,
1982
+ request: {
1983
+ body: JSON.stringify(requestBody)
1984
+ },
1985
+ response: {
1986
+ timestamp: currentDate,
1987
+ modelId: this.modelId,
1988
+ headers: responseHeaders,
1989
+ body: rawResponse
1990
+ }
1991
+ };
1992
+ }
1993
+ };
1994
+ var localShellInputSchema = chunkF75EQ574_cjs.lazySchema(
1995
+ () => chunkF75EQ574_cjs.zodSchema(
1996
+ v4.z.object({
1997
+ action: v4.z.object({
1998
+ type: v4.z.literal("exec"),
1999
+ command: v4.z.array(v4.z.string()),
2000
+ timeoutMs: v4.z.number().optional(),
2001
+ user: v4.z.string().optional(),
2002
+ workingDirectory: v4.z.string().optional(),
2003
+ env: v4.z.record(v4.z.string(), v4.z.string()).optional()
2004
+ })
2005
+ })
2006
+ )
2007
+ );
2008
+ var localShellOutputSchema = chunkF75EQ574_cjs.lazySchema(
2009
+ () => chunkF75EQ574_cjs.zodSchema(v4.z.object({ output: v4.z.string() }))
2010
+ );
2011
+ chunkF75EQ574_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2012
+ id: "openai.local_shell",
2013
+ name: "local_shell",
2014
+ inputSchema: localShellInputSchema,
2015
+ outputSchema: localShellOutputSchema
2016
+ });
2017
+ function isFileId(data, prefixes) {
2018
+ if (!prefixes) return false;
2019
+ return prefixes.some((prefix) => data.startsWith(prefix));
2020
+ }
2021
+ async function convertToOpenAIResponsesInput({
2022
+ prompt,
2023
+ systemMessageMode,
2024
+ fileIdPrefixes,
2025
+ store,
2026
+ hasLocalShellTool = false
2027
+ }) {
2028
+ var _a, _b, _c, _d;
2029
+ const input = [];
2030
+ const warnings = [];
2031
+ for (const { role, content } of prompt) {
2032
+ switch (role) {
2033
+ case "system": {
2034
+ switch (systemMessageMode) {
2035
+ case "system": {
2036
+ input.push({ role: "system", content });
2037
+ break;
2038
+ }
2039
+ case "developer": {
2040
+ input.push({ role: "developer", content });
2041
+ break;
2042
+ }
2043
+ case "remove": {
2044
+ warnings.push({
2045
+ type: "other",
2046
+ message: "system messages are removed for this model"
2047
+ });
2048
+ break;
2049
+ }
2050
+ default: {
2051
+ const _exhaustiveCheck = systemMessageMode;
2052
+ throw new Error(
2053
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2054
+ );
2055
+ }
2056
+ }
2057
+ break;
2058
+ }
2059
+ case "user": {
2060
+ input.push({
2061
+ role: "user",
2062
+ content: content.map((part, index) => {
2063
+ var _a2, _b2, _c2;
2064
+ switch (part.type) {
2065
+ case "text": {
2066
+ return { type: "input_text", text: part.text };
2067
+ }
2068
+ case "file": {
2069
+ if (part.mediaType.startsWith("image/")) {
2070
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2071
+ return {
2072
+ type: "input_image",
2073
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2074
+ image_url: `data:${mediaType};base64,${chunkF75EQ574_cjs.convertToBase64(part.data)}`
2075
+ },
2076
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2077
+ };
2078
+ } else if (part.mediaType === "application/pdf") {
2079
+ if (part.data instanceof URL) {
2080
+ return {
2081
+ type: "input_file",
2082
+ file_url: part.data.toString()
2083
+ };
2084
+ }
2085
+ return {
2086
+ type: "input_file",
2087
+ ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2088
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2089
+ file_data: `data:application/pdf;base64,${chunkF75EQ574_cjs.convertToBase64(part.data)}`
2090
+ }
2091
+ };
2092
+ } else {
2093
+ throw new chunkF75EQ574_cjs.UnsupportedFunctionalityError({
2094
+ functionality: `file part media type ${part.mediaType}`
2095
+ });
2096
+ }
2097
+ }
2098
+ }
2099
+ })
2100
+ });
2101
+ break;
2102
+ }
2103
+ case "assistant": {
2104
+ const reasoningMessages = {};
2105
+ for (const part of content) {
2106
+ switch (part.type) {
2107
+ case "text": {
2108
+ const id = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId;
2109
+ if (store && id != null) {
2110
+ input.push({ type: "item_reference", id });
2111
+ break;
2112
+ }
2113
+ input.push({
2114
+ role: "assistant",
2115
+ content: [{ type: "output_text", text: part.text }],
2116
+ id
2117
+ });
2118
+ break;
2119
+ }
2120
+ case "tool-call": {
2121
+ if (part.providerExecuted) {
2122
+ break;
2123
+ }
2124
+ const id = (_d = (_c = part.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.itemId;
2125
+ if (store && id != null) {
2126
+ input.push({ type: "item_reference", id });
2127
+ break;
2128
+ }
2129
+ if (hasLocalShellTool && part.toolName === "local_shell") {
2130
+ const parsedInput = await chunkF75EQ574_cjs.validateTypes({
2131
+ value: part.input,
2132
+ schema: localShellInputSchema
2133
+ });
2134
+ input.push({
2135
+ type: "local_shell_call",
2136
+ call_id: part.toolCallId,
2137
+ id,
2138
+ action: {
2139
+ type: "exec",
2140
+ command: parsedInput.action.command,
2141
+ timeout_ms: parsedInput.action.timeoutMs,
2142
+ user: parsedInput.action.user,
2143
+ working_directory: parsedInput.action.workingDirectory,
2144
+ env: parsedInput.action.env
2145
+ }
2146
+ });
2147
+ break;
2148
+ }
2149
+ input.push({
2150
+ type: "function_call",
2151
+ call_id: part.toolCallId,
2152
+ name: part.toolName,
2153
+ arguments: JSON.stringify(part.input),
2154
+ id
2155
+ });
2156
+ break;
2157
+ }
2158
+ // assistant tool result parts are from provider-executed tools:
2159
+ case "tool-result": {
2160
+ if (store) {
2161
+ input.push({ type: "item_reference", id: part.toolCallId });
2162
+ } else {
2163
+ warnings.push({
2164
+ type: "other",
2165
+ message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`
2166
+ });
2167
+ }
2168
+ break;
2169
+ }
2170
+ case "reasoning": {
2171
+ const providerOptions = await chunkF75EQ574_cjs.parseProviderOptions({
2172
+ provider: "openai",
2173
+ providerOptions: part.providerOptions,
2174
+ schema: openaiResponsesReasoningProviderOptionsSchema
2175
+ });
2176
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2177
+ if (reasoningId != null) {
2178
+ const reasoningMessage = reasoningMessages[reasoningId];
2179
+ if (store) {
2180
+ if (reasoningMessage === void 0) {
2181
+ input.push({ type: "item_reference", id: reasoningId });
2182
+ reasoningMessages[reasoningId] = {
2183
+ type: "reasoning",
2184
+ id: reasoningId,
2185
+ summary: []
2186
+ };
2187
+ }
2188
+ } else {
2189
+ const summaryParts = [];
2190
+ if (part.text.length > 0) {
2191
+ summaryParts.push({
2192
+ type: "summary_text",
2193
+ text: part.text
2194
+ });
2195
+ } else if (reasoningMessage !== void 0) {
2196
+ warnings.push({
2197
+ type: "other",
2198
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2199
+ });
2200
+ }
2201
+ if (reasoningMessage === void 0) {
2202
+ reasoningMessages[reasoningId] = {
2203
+ type: "reasoning",
2204
+ id: reasoningId,
2205
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2206
+ summary: summaryParts
2207
+ };
2208
+ input.push(reasoningMessages[reasoningId]);
2209
+ } else {
2210
+ reasoningMessage.summary.push(...summaryParts);
2211
+ if ((providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent) != null) {
2212
+ reasoningMessage.encrypted_content = providerOptions.reasoningEncryptedContent;
2213
+ }
2214
+ }
2215
+ }
2216
+ } else {
2217
+ warnings.push({
2218
+ type: "other",
2219
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2220
+ });
2221
+ }
2222
+ break;
2223
+ }
2224
+ }
2225
+ }
2226
+ break;
2227
+ }
2228
+ case "tool": {
2229
+ for (const part of content) {
2230
+ const output = part.output;
2231
+ if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
2232
+ const parsedOutput = await chunkF75EQ574_cjs.validateTypes({
2233
+ value: output.value,
2234
+ schema: localShellOutputSchema
2235
+ });
2236
+ input.push({
2237
+ type: "local_shell_call_output",
2238
+ call_id: part.toolCallId,
2239
+ output: parsedOutput.output
2240
+ });
2241
+ break;
2242
+ }
2243
+ let contentValue;
2244
+ switch (output.type) {
2245
+ case "text":
2246
+ case "error-text":
2247
+ contentValue = output.value;
2248
+ break;
2249
+ case "json":
2250
+ case "error-json":
2251
+ contentValue = JSON.stringify(output.value);
2252
+ break;
2253
+ case "content":
2254
+ contentValue = output.value.map((item) => {
2255
+ switch (item.type) {
2256
+ case "text": {
2257
+ return { type: "input_text", text: item.text };
2258
+ }
2259
+ case "media": {
2260
+ return item.mediaType.startsWith("image/") ? {
2261
+ type: "input_image",
2262
+ image_url: `data:${item.mediaType};base64,${item.data}`
2263
+ } : {
2264
+ type: "input_file",
2265
+ filename: "data",
2266
+ file_data: `data:${item.mediaType};base64,${item.data}`
2267
+ };
2268
+ }
2269
+ }
2270
+ });
2271
+ break;
2272
+ }
2273
+ input.push({
2274
+ type: "function_call_output",
2275
+ call_id: part.toolCallId,
2276
+ output: contentValue
2277
+ });
2278
+ }
2279
+ break;
2280
+ }
2281
+ default: {
2282
+ const _exhaustiveCheck = role;
2283
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2284
+ }
2285
+ }
2286
+ }
2287
+ return { input, warnings };
2288
+ }
2289
+ var openaiResponsesReasoningProviderOptionsSchema = v4.z.object({
2290
+ itemId: v4.z.string().nullish(),
2291
+ reasoningEncryptedContent: v4.z.string().nullish()
2292
+ });
2293
+ function mapOpenAIResponseFinishReason({
2294
+ finishReason,
2295
+ hasFunctionCall
2296
+ }) {
2297
+ switch (finishReason) {
2298
+ case void 0:
2299
+ case null:
2300
+ return hasFunctionCall ? "tool-calls" : "stop";
2301
+ case "max_output_tokens":
2302
+ return "length";
2303
+ case "content_filter":
2304
+ return "content-filter";
2305
+ default:
2306
+ return hasFunctionCall ? "tool-calls" : "unknown";
2307
+ }
2308
+ }
2309
+ var openaiResponsesChunkSchema = chunkF75EQ574_cjs.lazyValidator(
2310
+ () => chunkF75EQ574_cjs.zodSchema(
2311
+ v4.z.union([
2312
+ v4.z.object({
2313
+ type: v4.z.literal("response.output_text.delta"),
2314
+ item_id: v4.z.string(),
2315
+ delta: v4.z.string(),
2316
+ logprobs: v4.z.array(
2317
+ v4.z.object({
2318
+ token: v4.z.string(),
2319
+ logprob: v4.z.number(),
2320
+ top_logprobs: v4.z.array(
2321
+ v4.z.object({
2322
+ token: v4.z.string(),
2323
+ logprob: v4.z.number()
2324
+ })
2325
+ )
2326
+ })
2327
+ ).nullish()
2328
+ }),
2329
+ v4.z.object({
2330
+ type: v4.z.enum(["response.completed", "response.incomplete"]),
2331
+ response: v4.z.object({
2332
+ incomplete_details: v4.z.object({ reason: v4.z.string() }).nullish(),
2333
+ usage: v4.z.object({
2334
+ input_tokens: v4.z.number(),
2335
+ input_tokens_details: v4.z.object({ cached_tokens: v4.z.number().nullish() }).nullish(),
2336
+ output_tokens: v4.z.number(),
2337
+ output_tokens_details: v4.z.object({ reasoning_tokens: v4.z.number().nullish() }).nullish()
2338
+ }),
2339
+ service_tier: v4.z.string().nullish()
2340
+ })
2341
+ }),
2342
+ v4.z.object({
2343
+ type: v4.z.literal("response.created"),
2344
+ response: v4.z.object({
2345
+ id: v4.z.string(),
2346
+ created_at: v4.z.number(),
2347
+ model: v4.z.string(),
2348
+ service_tier: v4.z.string().nullish()
2349
+ })
2350
+ }),
2351
+ v4.z.object({
2352
+ type: v4.z.literal("response.output_item.added"),
2353
+ output_index: v4.z.number(),
2354
+ item: v4.z.discriminatedUnion("type", [
2355
+ v4.z.object({
2356
+ type: v4.z.literal("message"),
2357
+ id: v4.z.string()
2358
+ }),
2359
+ v4.z.object({
2360
+ type: v4.z.literal("reasoning"),
2361
+ id: v4.z.string(),
2362
+ encrypted_content: v4.z.string().nullish()
2363
+ }),
2364
+ v4.z.object({
2365
+ type: v4.z.literal("function_call"),
2366
+ id: v4.z.string(),
2367
+ call_id: v4.z.string(),
2368
+ name: v4.z.string(),
2369
+ arguments: v4.z.string()
2370
+ }),
2371
+ v4.z.object({
2372
+ type: v4.z.literal("web_search_call"),
2373
+ id: v4.z.string(),
2374
+ status: v4.z.string()
2375
+ }),
2376
+ v4.z.object({
2377
+ type: v4.z.literal("computer_call"),
2378
+ id: v4.z.string(),
2379
+ status: v4.z.string()
2380
+ }),
2381
+ v4.z.object({
2382
+ type: v4.z.literal("file_search_call"),
2383
+ id: v4.z.string()
2384
+ }),
2385
+ v4.z.object({
2386
+ type: v4.z.literal("image_generation_call"),
2387
+ id: v4.z.string()
2388
+ }),
2389
+ v4.z.object({
2390
+ type: v4.z.literal("code_interpreter_call"),
2391
+ id: v4.z.string(),
2392
+ container_id: v4.z.string(),
2393
+ code: v4.z.string().nullable(),
2394
+ outputs: v4.z.array(
2395
+ v4.z.discriminatedUnion("type", [
2396
+ v4.z.object({ type: v4.z.literal("logs"), logs: v4.z.string() }),
2397
+ v4.z.object({ type: v4.z.literal("image"), url: v4.z.string() })
2398
+ ])
2399
+ ).nullable(),
2400
+ status: v4.z.string()
2401
+ })
2402
+ ])
2403
+ }),
2404
+ v4.z.object({
2405
+ type: v4.z.literal("response.output_item.done"),
2406
+ output_index: v4.z.number(),
2407
+ item: v4.z.discriminatedUnion("type", [
2408
+ v4.z.object({
2409
+ type: v4.z.literal("message"),
2410
+ id: v4.z.string()
2411
+ }),
2412
+ v4.z.object({
2413
+ type: v4.z.literal("reasoning"),
2414
+ id: v4.z.string(),
2415
+ encrypted_content: v4.z.string().nullish()
2416
+ }),
2417
+ v4.z.object({
2418
+ type: v4.z.literal("function_call"),
2419
+ id: v4.z.string(),
2420
+ call_id: v4.z.string(),
2421
+ name: v4.z.string(),
2422
+ arguments: v4.z.string(),
2423
+ status: v4.z.literal("completed")
2424
+ }),
2425
+ v4.z.object({
2426
+ type: v4.z.literal("code_interpreter_call"),
2427
+ id: v4.z.string(),
2428
+ code: v4.z.string().nullable(),
2429
+ container_id: v4.z.string(),
2430
+ outputs: v4.z.array(
2431
+ v4.z.discriminatedUnion("type", [
2432
+ v4.z.object({ type: v4.z.literal("logs"), logs: v4.z.string() }),
2433
+ v4.z.object({ type: v4.z.literal("image"), url: v4.z.string() })
2434
+ ])
2435
+ ).nullable()
2436
+ }),
2437
+ v4.z.object({
2438
+ type: v4.z.literal("image_generation_call"),
2439
+ id: v4.z.string(),
2440
+ result: v4.z.string()
2441
+ }),
2442
+ v4.z.object({
2443
+ type: v4.z.literal("web_search_call"),
2444
+ id: v4.z.string(),
2445
+ status: v4.z.string(),
2446
+ action: v4.z.discriminatedUnion("type", [
2447
+ v4.z.object({
2448
+ type: v4.z.literal("search"),
2449
+ query: v4.z.string().nullish(),
2450
+ sources: v4.z.array(
2451
+ v4.z.discriminatedUnion("type", [
2452
+ v4.z.object({ type: v4.z.literal("url"), url: v4.z.string() }),
2453
+ v4.z.object({ type: v4.z.literal("api"), name: v4.z.string() })
2454
+ ])
2455
+ ).nullish()
2456
+ }),
2457
+ v4.z.object({
2458
+ type: v4.z.literal("open_page"),
2459
+ url: v4.z.string()
2460
+ }),
2461
+ v4.z.object({
2462
+ type: v4.z.literal("find"),
2463
+ url: v4.z.string(),
2464
+ pattern: v4.z.string()
2465
+ })
2466
+ ])
2467
+ }),
2468
+ v4.z.object({
2469
+ type: v4.z.literal("file_search_call"),
2470
+ id: v4.z.string(),
2471
+ queries: v4.z.array(v4.z.string()),
2472
+ results: v4.z.array(
2473
+ v4.z.object({
2474
+ attributes: v4.z.record(v4.z.string(), v4.z.unknown()),
2475
+ file_id: v4.z.string(),
2476
+ filename: v4.z.string(),
2477
+ score: v4.z.number(),
2478
+ text: v4.z.string()
2479
+ })
2480
+ ).nullish()
2481
+ }),
2482
+ v4.z.object({
2483
+ type: v4.z.literal("local_shell_call"),
2484
+ id: v4.z.string(),
2485
+ call_id: v4.z.string(),
2486
+ action: v4.z.object({
2487
+ type: v4.z.literal("exec"),
2488
+ command: v4.z.array(v4.z.string()),
2489
+ timeout_ms: v4.z.number().optional(),
2490
+ user: v4.z.string().optional(),
2491
+ working_directory: v4.z.string().optional(),
2492
+ env: v4.z.record(v4.z.string(), v4.z.string()).optional()
2493
+ })
2494
+ }),
2495
+ v4.z.object({
2496
+ type: v4.z.literal("computer_call"),
2497
+ id: v4.z.string(),
2498
+ status: v4.z.literal("completed")
2499
+ })
2500
+ ])
2501
+ }),
2502
+ v4.z.object({
2503
+ type: v4.z.literal("response.function_call_arguments.delta"),
2504
+ item_id: v4.z.string(),
2505
+ output_index: v4.z.number(),
2506
+ delta: v4.z.string()
2507
+ }),
2508
+ v4.z.object({
2509
+ type: v4.z.literal("response.image_generation_call.partial_image"),
2510
+ item_id: v4.z.string(),
2511
+ output_index: v4.z.number(),
2512
+ partial_image_b64: v4.z.string()
2513
+ }),
2514
+ v4.z.object({
2515
+ type: v4.z.literal("response.code_interpreter_call_code.delta"),
2516
+ item_id: v4.z.string(),
2517
+ output_index: v4.z.number(),
2518
+ delta: v4.z.string()
2519
+ }),
2520
+ v4.z.object({
2521
+ type: v4.z.literal("response.code_interpreter_call_code.done"),
2522
+ item_id: v4.z.string(),
2523
+ output_index: v4.z.number(),
2524
+ code: v4.z.string()
2525
+ }),
2526
+ v4.z.object({
2527
+ type: v4.z.literal("response.output_text.annotation.added"),
2528
+ annotation: v4.z.discriminatedUnion("type", [
2529
+ v4.z.object({
2530
+ type: v4.z.literal("url_citation"),
2531
+ start_index: v4.z.number(),
2532
+ end_index: v4.z.number(),
2533
+ url: v4.z.string(),
2534
+ title: v4.z.string()
2535
+ }),
2536
+ v4.z.object({
2537
+ type: v4.z.literal("file_citation"),
2538
+ file_id: v4.z.string(),
2539
+ filename: v4.z.string().nullish(),
2540
+ index: v4.z.number().nullish(),
2541
+ start_index: v4.z.number().nullish(),
2542
+ end_index: v4.z.number().nullish(),
2543
+ quote: v4.z.string().nullish()
2544
+ })
2545
+ ])
2546
+ }),
2547
+ v4.z.object({
2548
+ type: v4.z.literal("response.reasoning_summary_part.added"),
2549
+ item_id: v4.z.string(),
2550
+ summary_index: v4.z.number()
2551
+ }),
2552
+ v4.z.object({
2553
+ type: v4.z.literal("response.reasoning_summary_text.delta"),
2554
+ item_id: v4.z.string(),
2555
+ summary_index: v4.z.number(),
2556
+ delta: v4.z.string()
2557
+ }),
2558
+ v4.z.object({
2559
+ type: v4.z.literal("response.reasoning_summary_part.done"),
2560
+ item_id: v4.z.string(),
2561
+ summary_index: v4.z.number()
2562
+ }),
2563
+ v4.z.object({
2564
+ type: v4.z.literal("error"),
2565
+ sequence_number: v4.z.number(),
2566
+ error: v4.z.object({
2567
+ type: v4.z.string(),
2568
+ code: v4.z.string(),
2569
+ message: v4.z.string(),
2570
+ param: v4.z.string().nullish()
2571
+ })
2572
+ }),
2573
+ v4.z.object({ type: v4.z.string() }).loose().transform((value) => ({
2574
+ type: "unknown_chunk",
2575
+ message: value.type
2576
+ }))
2577
+ // fallback for unknown chunks
2578
+ ])
2579
+ )
2580
+ );
2581
+ var openaiResponsesResponseSchema = chunkF75EQ574_cjs.lazyValidator(
2582
+ () => chunkF75EQ574_cjs.zodSchema(
2583
+ v4.z.object({
2584
+ id: v4.z.string().optional(),
2585
+ created_at: v4.z.number().optional(),
2586
+ error: v4.z.object({
2587
+ message: v4.z.string(),
2588
+ type: v4.z.string(),
2589
+ param: v4.z.string().nullish(),
2590
+ code: v4.z.string()
2591
+ }).nullish(),
2592
+ model: v4.z.string().optional(),
2593
+ output: v4.z.array(
2594
+ v4.z.discriminatedUnion("type", [
2595
+ v4.z.object({
2596
+ type: v4.z.literal("message"),
2597
+ role: v4.z.literal("assistant"),
2598
+ id: v4.z.string(),
2599
+ content: v4.z.array(
2600
+ v4.z.object({
2601
+ type: v4.z.literal("output_text"),
2602
+ text: v4.z.string(),
2603
+ logprobs: v4.z.array(
2604
+ v4.z.object({
2605
+ token: v4.z.string(),
2606
+ logprob: v4.z.number(),
2607
+ top_logprobs: v4.z.array(
2608
+ v4.z.object({
2609
+ token: v4.z.string(),
2610
+ logprob: v4.z.number()
2611
+ })
2612
+ )
2613
+ })
2614
+ ).nullish(),
2615
+ annotations: v4.z.array(
2616
+ v4.z.discriminatedUnion("type", [
2617
+ v4.z.object({
2618
+ type: v4.z.literal("url_citation"),
2619
+ start_index: v4.z.number(),
2620
+ end_index: v4.z.number(),
2621
+ url: v4.z.string(),
2622
+ title: v4.z.string()
2623
+ }),
2624
+ v4.z.object({
2625
+ type: v4.z.literal("file_citation"),
2626
+ file_id: v4.z.string(),
2627
+ filename: v4.z.string().nullish(),
2628
+ index: v4.z.number().nullish(),
2629
+ start_index: v4.z.number().nullish(),
2630
+ end_index: v4.z.number().nullish(),
2631
+ quote: v4.z.string().nullish()
2632
+ }),
2633
+ v4.z.object({
2634
+ type: v4.z.literal("container_file_citation"),
2635
+ container_id: v4.z.string(),
2636
+ file_id: v4.z.string(),
2637
+ filename: v4.z.string().nullish(),
2638
+ start_index: v4.z.number().nullish(),
2639
+ end_index: v4.z.number().nullish(),
2640
+ index: v4.z.number().nullish()
2641
+ }),
2642
+ v4.z.object({
2643
+ type: v4.z.literal("file_path"),
2644
+ file_id: v4.z.string(),
2645
+ index: v4.z.number().nullish()
2646
+ })
2647
+ ])
2648
+ )
2649
+ })
2650
+ )
2651
+ }),
2652
+ v4.z.object({
2653
+ type: v4.z.literal("web_search_call"),
2654
+ id: v4.z.string(),
2655
+ status: v4.z.string(),
2656
+ action: v4.z.discriminatedUnion("type", [
2657
+ v4.z.object({
2658
+ type: v4.z.literal("search"),
2659
+ query: v4.z.string().nullish(),
2660
+ sources: v4.z.array(
2661
+ v4.z.discriminatedUnion("type", [
2662
+ v4.z.object({ type: v4.z.literal("url"), url: v4.z.string() }),
2663
+ v4.z.object({ type: v4.z.literal("api"), name: v4.z.string() })
2664
+ ])
2665
+ ).nullish()
2666
+ }),
2667
+ v4.z.object({
2668
+ type: v4.z.literal("open_page"),
2669
+ url: v4.z.string()
2670
+ }),
2671
+ v4.z.object({
2672
+ type: v4.z.literal("find"),
2673
+ url: v4.z.string(),
2674
+ pattern: v4.z.string()
2675
+ })
2676
+ ])
2677
+ }),
2678
+ v4.z.object({
2679
+ type: v4.z.literal("file_search_call"),
2680
+ id: v4.z.string(),
2681
+ queries: v4.z.array(v4.z.string()),
2682
+ results: v4.z.array(
2683
+ v4.z.object({
2684
+ attributes: v4.z.record(
2685
+ v4.z.string(),
2686
+ v4.z.union([v4.z.string(), v4.z.number(), v4.z.boolean()])
2687
+ ),
2688
+ file_id: v4.z.string(),
2689
+ filename: v4.z.string(),
2690
+ score: v4.z.number(),
2691
+ text: v4.z.string()
2692
+ })
2693
+ ).nullish()
2694
+ }),
2695
+ v4.z.object({
2696
+ type: v4.z.literal("code_interpreter_call"),
2697
+ id: v4.z.string(),
2698
+ code: v4.z.string().nullable(),
2699
+ container_id: v4.z.string(),
2700
+ outputs: v4.z.array(
2701
+ v4.z.discriminatedUnion("type", [
2702
+ v4.z.object({ type: v4.z.literal("logs"), logs: v4.z.string() }),
2703
+ v4.z.object({ type: v4.z.literal("image"), url: v4.z.string() })
2704
+ ])
2705
+ ).nullable()
2706
+ }),
2707
+ v4.z.object({
2708
+ type: v4.z.literal("image_generation_call"),
2709
+ id: v4.z.string(),
2710
+ result: v4.z.string()
2711
+ }),
2712
+ v4.z.object({
2713
+ type: v4.z.literal("local_shell_call"),
2714
+ id: v4.z.string(),
2715
+ call_id: v4.z.string(),
2716
+ action: v4.z.object({
2717
+ type: v4.z.literal("exec"),
2718
+ command: v4.z.array(v4.z.string()),
2719
+ timeout_ms: v4.z.number().optional(),
2720
+ user: v4.z.string().optional(),
2721
+ working_directory: v4.z.string().optional(),
2722
+ env: v4.z.record(v4.z.string(), v4.z.string()).optional()
2723
+ })
2724
+ }),
2725
+ v4.z.object({
2726
+ type: v4.z.literal("function_call"),
2727
+ call_id: v4.z.string(),
2728
+ name: v4.z.string(),
2729
+ arguments: v4.z.string(),
2730
+ id: v4.z.string()
2731
+ }),
2732
+ v4.z.object({
2733
+ type: v4.z.literal("computer_call"),
2734
+ id: v4.z.string(),
2735
+ status: v4.z.string().optional()
2736
+ }),
2737
+ v4.z.object({
2738
+ type: v4.z.literal("reasoning"),
2739
+ id: v4.z.string(),
2740
+ encrypted_content: v4.z.string().nullish(),
2741
+ summary: v4.z.array(
2742
+ v4.z.object({
2743
+ type: v4.z.literal("summary_text"),
2744
+ text: v4.z.string()
2745
+ })
2746
+ )
2747
+ })
2748
+ ])
2749
+ ).optional(),
2750
+ service_tier: v4.z.string().nullish(),
2751
+ incomplete_details: v4.z.object({ reason: v4.z.string() }).nullish(),
2752
+ usage: v4.z.object({
2753
+ input_tokens: v4.z.number(),
2754
+ input_tokens_details: v4.z.object({ cached_tokens: v4.z.number().nullish() }).nullish(),
2755
+ output_tokens: v4.z.number(),
2756
+ output_tokens_details: v4.z.object({ reasoning_tokens: v4.z.number().nullish() }).nullish()
2757
+ }).optional()
2758
+ })
2759
+ )
2760
+ );
2761
+ var TOP_LOGPROBS_MAX = 20;
2762
+ var openaiResponsesProviderOptionsSchema = chunkF75EQ574_cjs.lazyValidator(
2763
+ () => chunkF75EQ574_cjs.zodSchema(
2764
+ v4.z.object({
2765
+ conversation: v4.z.string().nullish(),
2766
+ include: v4.z.array(
2767
+ v4.z.enum([
2768
+ "reasoning.encrypted_content",
2769
+ // handled internally by default, only needed for unknown reasoning models
2770
+ "file_search_call.results",
2771
+ "message.output_text.logprobs"
2772
+ ])
2773
+ ).nullish(),
2774
+ instructions: v4.z.string().nullish(),
2775
+ /**
2776
+ * Return the log probabilities of the tokens.
2777
+ *
2778
+ * Setting to true will return the log probabilities of the tokens that
2779
+ * were generated.
2780
+ *
2781
+ * Setting to a number will return the log probabilities of the top n
2782
+ * tokens that were generated.
2783
+ *
2784
+ * @see https://platform.openai.com/docs/api-reference/responses/create
2785
+ * @see https://cookbook.openai.com/examples/using_logprobs
2786
+ */
2787
+ logprobs: v4.z.union([v4.z.boolean(), v4.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
2788
+ /**
2789
+ * The maximum number of total calls to built-in tools that can be processed in a response.
2790
+ * This maximum number applies across all built-in tool calls, not per individual tool.
2791
+ * Any further attempts to call a tool by the model will be ignored.
2792
+ */
2793
+ maxToolCalls: v4.z.number().nullish(),
2794
+ metadata: v4.z.any().nullish(),
2795
+ parallelToolCalls: v4.z.boolean().nullish(),
2796
+ previousResponseId: v4.z.string().nullish(),
2797
+ promptCacheKey: v4.z.string().nullish(),
2798
+ /**
2799
+ * The retention policy for the prompt cache.
2800
+ * - 'in_memory': Default. Standard prompt caching behavior.
2801
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
2802
+ * Currently only available for 5.1 series models.
2803
+ *
2804
+ * @default 'in_memory'
2805
+ */
2806
+ promptCacheRetention: v4.z.enum(["in_memory", "24h"]).nullish(),
2807
+ reasoningEffort: v4.z.string().nullish(),
2808
+ reasoningSummary: v4.z.string().nullish(),
2809
+ safetyIdentifier: v4.z.string().nullish(),
2810
+ serviceTier: v4.z.enum(["auto", "flex", "priority", "default"]).nullish(),
2811
+ store: v4.z.boolean().nullish(),
2812
+ strictJsonSchema: v4.z.boolean().nullish(),
2813
+ textVerbosity: v4.z.enum(["low", "medium", "high"]).nullish(),
2814
+ truncation: v4.z.enum(["auto", "disabled"]).nullish(),
2815
+ user: v4.z.string().nullish()
2816
+ })
2817
+ )
2818
+ );
2819
+ var codeInterpreterInputSchema = chunkF75EQ574_cjs.lazySchema(
2820
+ () => chunkF75EQ574_cjs.zodSchema(
2821
+ v4.z.object({
2822
+ code: v4.z.string().nullish(),
2823
+ containerId: v4.z.string()
2824
+ })
2825
+ )
2826
+ );
2827
+ var codeInterpreterOutputSchema = chunkF75EQ574_cjs.lazySchema(
2828
+ () => chunkF75EQ574_cjs.zodSchema(
2829
+ v4.z.object({
2830
+ outputs: v4.z.array(
2831
+ v4.z.discriminatedUnion("type", [
2832
+ v4.z.object({ type: v4.z.literal("logs"), logs: v4.z.string() }),
2833
+ v4.z.object({ type: v4.z.literal("image"), url: v4.z.string() })
2834
+ ])
2835
+ ).nullish()
2836
+ })
2837
+ )
2838
+ );
2839
+ var codeInterpreterArgsSchema = chunkF75EQ574_cjs.lazySchema(
2840
+ () => chunkF75EQ574_cjs.zodSchema(
2841
+ v4.z.object({
2842
+ container: v4.z.union([
2843
+ v4.z.string(),
2844
+ v4.z.object({
2845
+ fileIds: v4.z.array(v4.z.string()).optional()
2846
+ })
2847
+ ]).optional()
2848
+ })
2849
+ )
2850
+ );
2851
+ var codeInterpreterToolFactory = chunkF75EQ574_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2852
+ id: "openai.code_interpreter",
2853
+ name: "code_interpreter",
2854
+ inputSchema: codeInterpreterInputSchema,
2855
+ outputSchema: codeInterpreterOutputSchema
2856
+ });
2857
+ var codeInterpreter = (args = {}) => {
2858
+ return codeInterpreterToolFactory(args);
2859
+ };
2860
+ var comparisonFilterSchema = v4.z.object({
2861
+ key: v4.z.string(),
2862
+ type: v4.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
2863
+ value: v4.z.union([v4.z.string(), v4.z.number(), v4.z.boolean()])
2864
+ });
2865
+ var compoundFilterSchema = v4.z.object({
2866
+ type: v4.z.enum(["and", "or"]),
2867
+ filters: v4.z.array(
2868
+ v4.z.union([comparisonFilterSchema, v4.z.lazy(() => compoundFilterSchema)])
2869
+ )
2870
+ });
2871
+ var fileSearchArgsSchema = chunkF75EQ574_cjs.lazySchema(
2872
+ () => chunkF75EQ574_cjs.zodSchema(
2873
+ v4.z.object({
2874
+ vectorStoreIds: v4.z.array(v4.z.string()),
2875
+ maxNumResults: v4.z.number().optional(),
2876
+ ranking: v4.z.object({
2877
+ ranker: v4.z.string().optional(),
2878
+ scoreThreshold: v4.z.number().optional()
2879
+ }).optional(),
2880
+ filters: v4.z.union([comparisonFilterSchema, compoundFilterSchema]).optional()
2881
+ })
2882
+ )
2883
+ );
2884
+ var fileSearchOutputSchema = chunkF75EQ574_cjs.lazySchema(
2885
+ () => chunkF75EQ574_cjs.zodSchema(
2886
+ v4.z.object({
2887
+ queries: v4.z.array(v4.z.string()),
2888
+ results: v4.z.array(
2889
+ v4.z.object({
2890
+ attributes: v4.z.record(v4.z.string(), v4.z.unknown()),
2891
+ fileId: v4.z.string(),
2892
+ filename: v4.z.string(),
2893
+ score: v4.z.number(),
2894
+ text: v4.z.string()
2895
+ })
2896
+ ).nullable()
2897
+ })
2898
+ )
2899
+ );
2900
+ var fileSearch = chunkF75EQ574_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2901
+ id: "openai.file_search",
2902
+ name: "file_search",
2903
+ inputSchema: v4.z.object({}),
2904
+ outputSchema: fileSearchOutputSchema
2905
+ });
2906
+ var webSearchArgsSchema = chunkF75EQ574_cjs.lazySchema(
2907
+ () => chunkF75EQ574_cjs.zodSchema(
2908
+ v4.z.object({
2909
+ externalWebAccess: v4.z.boolean().optional(),
2910
+ filters: v4.z.object({ allowedDomains: v4.z.array(v4.z.string()).optional() }).optional(),
2911
+ searchContextSize: v4.z.enum(["low", "medium", "high"]).optional(),
2912
+ userLocation: v4.z.object({
2913
+ type: v4.z.literal("approximate"),
2914
+ country: v4.z.string().optional(),
2915
+ city: v4.z.string().optional(),
2916
+ region: v4.z.string().optional(),
2917
+ timezone: v4.z.string().optional()
2918
+ }).optional()
2919
+ })
2920
+ )
2921
+ );
2922
+ var webSearchInputSchema = chunkF75EQ574_cjs.lazySchema(() => chunkF75EQ574_cjs.zodSchema(v4.z.object({})));
2923
+ var webSearchOutputSchema = chunkF75EQ574_cjs.lazySchema(
2924
+ () => chunkF75EQ574_cjs.zodSchema(
2925
+ v4.z.object({
2926
+ action: v4.z.discriminatedUnion("type", [
2927
+ v4.z.object({
2928
+ type: v4.z.literal("search"),
2929
+ query: v4.z.string().optional()
2930
+ }),
2931
+ v4.z.object({
2932
+ type: v4.z.literal("openPage"),
2933
+ url: v4.z.string()
2934
+ }),
2935
+ v4.z.object({
2936
+ type: v4.z.literal("find"),
2937
+ url: v4.z.string(),
2938
+ pattern: v4.z.string()
2939
+ })
2940
+ ]),
2941
+ sources: v4.z.array(
2942
+ v4.z.discriminatedUnion("type", [
2943
+ v4.z.object({ type: v4.z.literal("url"), url: v4.z.string() }),
2944
+ v4.z.object({ type: v4.z.literal("api"), name: v4.z.string() })
2945
+ ])
2946
+ ).optional()
2947
+ })
2948
+ )
2949
+ );
2950
+ chunkF75EQ574_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2951
+ id: "openai.web_search",
2952
+ name: "web_search",
2953
+ inputSchema: webSearchInputSchema,
2954
+ outputSchema: webSearchOutputSchema
2955
+ });
2956
+ var webSearchPreviewArgsSchema = chunkF75EQ574_cjs.lazySchema(
2957
+ () => chunkF75EQ574_cjs.zodSchema(
2958
+ v4.z.object({
2959
+ searchContextSize: v4.z.enum(["low", "medium", "high"]).optional(),
2960
+ userLocation: v4.z.object({
2961
+ type: v4.z.literal("approximate"),
2962
+ country: v4.z.string().optional(),
2963
+ city: v4.z.string().optional(),
2964
+ region: v4.z.string().optional(),
2965
+ timezone: v4.z.string().optional()
2966
+ }).optional()
2967
+ })
2968
+ )
2969
+ );
2970
+ var webSearchPreviewInputSchema = chunkF75EQ574_cjs.lazySchema(
2971
+ () => chunkF75EQ574_cjs.zodSchema(v4.z.object({}))
2972
+ );
2973
+ var webSearchPreviewOutputSchema = chunkF75EQ574_cjs.lazySchema(
2974
+ () => chunkF75EQ574_cjs.zodSchema(
2975
+ v4.z.object({
2976
+ action: v4.z.discriminatedUnion("type", [
2977
+ v4.z.object({
2978
+ type: v4.z.literal("search"),
2979
+ query: v4.z.string().optional()
2980
+ }),
2981
+ v4.z.object({
2982
+ type: v4.z.literal("openPage"),
2983
+ url: v4.z.string()
2984
+ }),
2985
+ v4.z.object({
2986
+ type: v4.z.literal("find"),
2987
+ url: v4.z.string(),
2988
+ pattern: v4.z.string()
2989
+ })
2990
+ ])
2991
+ })
2992
+ )
2993
+ );
2994
+ var webSearchPreview = chunkF75EQ574_cjs.createProviderDefinedToolFactoryWithOutputSchema({
2995
+ id: "openai.web_search_preview",
2996
+ name: "web_search_preview",
2997
+ inputSchema: webSearchPreviewInputSchema,
2998
+ outputSchema: webSearchPreviewOutputSchema
2999
+ });
3000
+ var imageGenerationArgsSchema = chunkF75EQ574_cjs.lazySchema(
3001
+ () => chunkF75EQ574_cjs.zodSchema(
3002
+ v4.z.object({
3003
+ background: v4.z.enum(["auto", "opaque", "transparent"]).optional(),
3004
+ inputFidelity: v4.z.enum(["low", "high"]).optional(),
3005
+ inputImageMask: v4.z.object({
3006
+ fileId: v4.z.string().optional(),
3007
+ imageUrl: v4.z.string().optional()
3008
+ }).optional(),
3009
+ model: v4.z.string().optional(),
3010
+ moderation: v4.z.enum(["auto"]).optional(),
3011
+ outputCompression: v4.z.number().int().min(0).max(100).optional(),
3012
+ outputFormat: v4.z.enum(["png", "jpeg", "webp"]).optional(),
3013
+ partialImages: v4.z.number().int().min(0).max(3).optional(),
3014
+ quality: v4.z.enum(["auto", "low", "medium", "high"]).optional(),
3015
+ size: v4.z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
3016
+ }).strict()
3017
+ )
3018
+ );
3019
+ var imageGenerationInputSchema = chunkF75EQ574_cjs.lazySchema(() => chunkF75EQ574_cjs.zodSchema(v4.z.object({})));
3020
+ var imageGenerationOutputSchema = chunkF75EQ574_cjs.lazySchema(
3021
+ () => chunkF75EQ574_cjs.zodSchema(v4.z.object({ result: v4.z.string() }))
3022
+ );
3023
+ var imageGenerationToolFactory = chunkF75EQ574_cjs.createProviderDefinedToolFactoryWithOutputSchema({
3024
+ id: "openai.image_generation",
3025
+ name: "image_generation",
3026
+ inputSchema: imageGenerationInputSchema,
3027
+ outputSchema: imageGenerationOutputSchema
3028
+ });
3029
+ var imageGeneration = (args = {}) => {
3030
+ return imageGenerationToolFactory(args);
3031
+ };
3032
+ async function prepareResponsesTools({
3033
+ tools,
3034
+ toolChoice,
3035
+ strictJsonSchema
3036
+ }) {
3037
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
3038
+ const toolWarnings = [];
3039
+ if (tools == null) {
3040
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
3041
+ }
3042
+ const openaiTools = [];
3043
+ for (const tool of tools) {
3044
+ switch (tool.type) {
3045
+ case "function":
3046
+ openaiTools.push({
3047
+ type: "function",
3048
+ name: tool.name,
3049
+ description: tool.description,
3050
+ parameters: tool.inputSchema,
3051
+ strict: strictJsonSchema
3052
+ });
3053
+ break;
3054
+ case "provider-defined": {
3055
+ switch (tool.id) {
3056
+ case "openai.file_search": {
3057
+ const args = await chunkF75EQ574_cjs.validateTypes({
3058
+ value: tool.args,
3059
+ schema: fileSearchArgsSchema
3060
+ });
3061
+ openaiTools.push({
3062
+ type: "file_search",
3063
+ vector_store_ids: args.vectorStoreIds,
3064
+ max_num_results: args.maxNumResults,
3065
+ ranking_options: args.ranking ? {
3066
+ ranker: args.ranking.ranker,
3067
+ score_threshold: args.ranking.scoreThreshold
3068
+ } : void 0,
3069
+ filters: args.filters
3070
+ });
3071
+ break;
3072
+ }
3073
+ case "openai.local_shell": {
3074
+ openaiTools.push({
3075
+ type: "local_shell"
3076
+ });
3077
+ break;
3078
+ }
3079
+ case "openai.web_search_preview": {
3080
+ const args = await chunkF75EQ574_cjs.validateTypes({
3081
+ value: tool.args,
3082
+ schema: webSearchPreviewArgsSchema
3083
+ });
3084
+ openaiTools.push({
3085
+ type: "web_search_preview",
3086
+ search_context_size: args.searchContextSize,
3087
+ user_location: args.userLocation
3088
+ });
3089
+ break;
3090
+ }
3091
+ case "openai.web_search": {
3092
+ const args = await chunkF75EQ574_cjs.validateTypes({
3093
+ value: tool.args,
3094
+ schema: webSearchArgsSchema
3095
+ });
3096
+ openaiTools.push({
3097
+ type: "web_search",
3098
+ filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
3099
+ external_web_access: args.externalWebAccess,
3100
+ search_context_size: args.searchContextSize,
3101
+ user_location: args.userLocation
3102
+ });
3103
+ break;
3104
+ }
3105
+ case "openai.code_interpreter": {
3106
+ const args = await chunkF75EQ574_cjs.validateTypes({
3107
+ value: tool.args,
3108
+ schema: codeInterpreterArgsSchema
3109
+ });
3110
+ openaiTools.push({
3111
+ type: "code_interpreter",
3112
+ container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
3113
+ });
3114
+ break;
3115
+ }
3116
+ case "openai.image_generation": {
3117
+ const args = await chunkF75EQ574_cjs.validateTypes({
3118
+ value: tool.args,
3119
+ schema: imageGenerationArgsSchema
3120
+ });
3121
+ openaiTools.push({
3122
+ type: "image_generation",
3123
+ background: args.background,
3124
+ input_fidelity: args.inputFidelity,
3125
+ input_image_mask: args.inputImageMask ? {
3126
+ file_id: args.inputImageMask.fileId,
3127
+ image_url: args.inputImageMask.imageUrl
3128
+ } : void 0,
3129
+ model: args.model,
3130
+ size: args.size,
3131
+ quality: args.quality,
3132
+ moderation: args.moderation,
3133
+ output_format: args.outputFormat,
3134
+ output_compression: args.outputCompression
3135
+ });
3136
+ break;
3137
+ }
3138
+ }
3139
+ break;
3140
+ }
3141
+ default:
3142
+ toolWarnings.push({ type: "unsupported-tool", tool });
3143
+ break;
3144
+ }
3145
+ }
3146
+ if (toolChoice == null) {
3147
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
3148
+ }
3149
+ const type = toolChoice.type;
3150
+ switch (type) {
3151
+ case "auto":
3152
+ case "none":
3153
+ case "required":
3154
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
3155
+ case "tool":
3156
+ return {
3157
+ tools: openaiTools,
3158
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "image_generation" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
3159
+ toolWarnings
3160
+ };
3161
+ default: {
3162
+ const _exhaustiveCheck = type;
3163
+ throw new chunkF75EQ574_cjs.UnsupportedFunctionalityError({
3164
+ functionality: `tool choice type: ${_exhaustiveCheck}`
3165
+ });
3166
+ }
3167
+ }
3168
+ }
3169
+ var OpenAIResponsesLanguageModel = class {
3170
+ constructor(modelId, config) {
3171
+ this.specificationVersion = "v2";
3172
+ this.supportedUrls = {
3173
+ "image/*": [/^https?:\/\/.*$/],
3174
+ "application/pdf": [/^https?:\/\/.*$/]
3175
+ };
3176
+ this.modelId = modelId;
3177
+ this.config = config;
3178
+ }
3179
+ get provider() {
3180
+ return this.config.provider;
3181
+ }
3182
+ async getArgs({
3183
+ maxOutputTokens,
3184
+ temperature,
3185
+ stopSequences,
3186
+ topP,
3187
+ topK,
3188
+ presencePenalty,
3189
+ frequencyPenalty,
3190
+ seed,
3191
+ prompt,
3192
+ providerOptions,
3193
+ tools,
3194
+ toolChoice,
3195
+ responseFormat
3196
+ }) {
3197
+ var _a, _b, _c, _d;
3198
+ const warnings = [];
3199
+ const modelConfig = getResponsesModelConfig(this.modelId);
3200
+ if (topK != null) {
3201
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
3202
+ }
3203
+ if (seed != null) {
3204
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
3205
+ }
3206
+ if (presencePenalty != null) {
3207
+ warnings.push({
3208
+ type: "unsupported-setting",
3209
+ setting: "presencePenalty"
3210
+ });
3211
+ }
3212
+ if (frequencyPenalty != null) {
3213
+ warnings.push({
3214
+ type: "unsupported-setting",
3215
+ setting: "frequencyPenalty"
3216
+ });
3217
+ }
3218
+ if (stopSequences != null) {
3219
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
3220
+ }
3221
+ const openaiOptions = await chunkF75EQ574_cjs.parseProviderOptions({
3222
+ provider: "openai",
3223
+ providerOptions,
3224
+ schema: openaiResponsesProviderOptionsSchema
3225
+ });
3226
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3227
+ warnings.push({
3228
+ type: "unsupported-setting",
3229
+ setting: "conversation",
3230
+ details: "conversation and previousResponseId cannot be used together"
3231
+ });
3232
+ }
3233
+ const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3234
+ prompt,
3235
+ systemMessageMode: modelConfig.systemMessageMode,
3236
+ fileIdPrefixes: this.config.fileIdPrefixes,
3237
+ store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
3238
+ hasLocalShellTool: hasOpenAITool("openai.local_shell")
3239
+ });
3240
+ warnings.push(...inputWarnings);
3241
+ const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
3242
+ let include = openaiOptions == null ? void 0 : openaiOptions.include;
3243
+ function addInclude(key) {
3244
+ if (include == null) {
3245
+ include = [key];
3246
+ } else if (!include.includes(key)) {
3247
+ include = [...include, key];
3248
+ }
3249
+ }
3250
+ function hasOpenAITool(id) {
3251
+ return (tools == null ? void 0 : tools.find(
3252
+ (tool) => tool.type === "provider-defined" && tool.id === id
3253
+ )) != null;
3254
+ }
3255
+ const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
3256
+ if (topLogprobs) {
3257
+ addInclude("message.output_text.logprobs");
3258
+ }
3259
+ const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
3260
+ (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
3261
+ )) == null ? void 0 : _c.name;
3262
+ if (webSearchToolName) {
3263
+ addInclude("web_search_call.action.sources");
3264
+ }
3265
+ if (hasOpenAITool("openai.code_interpreter")) {
3266
+ addInclude("code_interpreter_call.outputs");
3267
+ }
3268
+ const store = openaiOptions == null ? void 0 : openaiOptions.store;
3269
+ if (store === false && modelConfig.isReasoningModel) {
3270
+ addInclude("reasoning.encrypted_content");
3271
+ }
3272
+ const baseArgs = {
3273
+ model: this.modelId,
3274
+ input,
3275
+ temperature,
3276
+ top_p: topP,
3277
+ max_output_tokens: maxOutputTokens,
3278
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
3279
+ text: {
3280
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
3281
+ format: responseFormat.schema != null ? {
3282
+ type: "json_schema",
3283
+ strict: strictJsonSchema,
3284
+ name: (_d = responseFormat.name) != null ? _d : "response",
3285
+ description: responseFormat.description,
3286
+ schema: responseFormat.schema
3287
+ } : { type: "json_object" }
3288
+ },
3289
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
3290
+ verbosity: openaiOptions.textVerbosity
3291
+ }
3292
+ }
3293
+ },
3294
+ // provider options:
3295
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3296
+ max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3297
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3298
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
3299
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
3300
+ store,
3301
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
3302
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
3303
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
3304
+ include,
3305
+ prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3306
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
3307
+ safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3308
+ top_logprobs: topLogprobs,
3309
+ truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
3310
+ // model-specific settings:
3311
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
3312
+ reasoning: {
3313
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
3314
+ effort: openaiOptions.reasoningEffort
3315
+ },
3316
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
3317
+ summary: openaiOptions.reasoningSummary
3318
+ }
3319
+ }
3320
+ }
3321
+ };
3322
+ if (modelConfig.isReasoningModel) {
3323
+ if (baseArgs.temperature != null) {
3324
+ baseArgs.temperature = void 0;
3325
+ warnings.push({
3326
+ type: "unsupported-setting",
3327
+ setting: "temperature",
3328
+ details: "temperature is not supported for reasoning models"
3329
+ });
3330
+ }
3331
+ if (baseArgs.top_p != null) {
3332
+ baseArgs.top_p = void 0;
3333
+ warnings.push({
3334
+ type: "unsupported-setting",
3335
+ setting: "topP",
3336
+ details: "topP is not supported for reasoning models"
3337
+ });
3338
+ }
3339
+ } else {
3340
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
3341
+ warnings.push({
3342
+ type: "unsupported-setting",
3343
+ setting: "reasoningEffort",
3344
+ details: "reasoningEffort is not supported for non-reasoning models"
3345
+ });
3346
+ }
3347
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
3348
+ warnings.push({
3349
+ type: "unsupported-setting",
3350
+ setting: "reasoningSummary",
3351
+ details: "reasoningSummary is not supported for non-reasoning models"
3352
+ });
3353
+ }
3354
+ }
3355
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
3356
+ warnings.push({
3357
+ type: "unsupported-setting",
3358
+ setting: "serviceTier",
3359
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
3360
+ });
3361
+ delete baseArgs.service_tier;
3362
+ }
3363
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
3364
+ warnings.push({
3365
+ type: "unsupported-setting",
3366
+ setting: "serviceTier",
3367
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
3368
+ });
3369
+ delete baseArgs.service_tier;
3370
+ }
3371
+ const {
3372
+ tools: openaiTools,
3373
+ toolChoice: openaiToolChoice,
3374
+ toolWarnings
3375
+ } = await prepareResponsesTools({
3376
+ tools,
3377
+ toolChoice,
3378
+ strictJsonSchema
3379
+ });
3380
+ return {
3381
+ webSearchToolName,
3382
+ args: {
3383
+ ...baseArgs,
3384
+ tools: openaiTools,
3385
+ tool_choice: openaiToolChoice
3386
+ },
3387
+ warnings: [...warnings, ...toolWarnings],
3388
+ store
3389
+ };
3390
+ }
3391
+ async doGenerate(options) {
3392
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
3393
+ const {
3394
+ args: body,
3395
+ warnings,
3396
+ webSearchToolName
3397
+ } = await this.getArgs(options);
3398
+ const url = this.config.url({
3399
+ path: "/responses",
3400
+ modelId: this.modelId
3401
+ });
3402
+ const {
3403
+ responseHeaders,
3404
+ value: response,
3405
+ rawValue: rawResponse
3406
+ } = await chunkF75EQ574_cjs.postJsonToApi({
3407
+ url,
3408
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), options.headers),
3409
+ body,
3410
+ failedResponseHandler: openaiFailedResponseHandler,
3411
+ successfulResponseHandler: chunkF75EQ574_cjs.createJsonResponseHandler(
3412
+ openaiResponsesResponseSchema
3413
+ ),
3414
+ abortSignal: options.abortSignal,
3415
+ fetch: this.config.fetch
3416
+ });
3417
+ if (response.error) {
3418
+ throw new chunkF75EQ574_cjs.APICallError({
3419
+ message: response.error.message,
3420
+ url,
3421
+ requestBodyValues: body,
3422
+ statusCode: 400,
3423
+ responseHeaders,
3424
+ responseBody: rawResponse,
3425
+ isRetryable: false
3426
+ });
3427
+ }
3428
+ const content = [];
3429
+ const logprobs = [];
3430
+ let hasFunctionCall = false;
3431
+ for (const part of response.output) {
3432
+ switch (part.type) {
3433
+ case "reasoning": {
3434
+ if (part.summary.length === 0) {
3435
+ part.summary.push({ type: "summary_text", text: "" });
3436
+ }
3437
+ for (const summary of part.summary) {
3438
+ content.push({
3439
+ type: "reasoning",
3440
+ text: summary.text,
3441
+ providerMetadata: {
3442
+ openai: {
3443
+ itemId: part.id,
3444
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
3445
+ }
3446
+ }
3447
+ });
3448
+ }
3449
+ break;
3450
+ }
3451
+ case "image_generation_call": {
3452
+ content.push({
3453
+ type: "tool-call",
3454
+ toolCallId: part.id,
3455
+ toolName: "image_generation",
3456
+ input: "{}",
3457
+ providerExecuted: true
3458
+ });
3459
+ content.push({
3460
+ type: "tool-result",
3461
+ toolCallId: part.id,
3462
+ toolName: "image_generation",
3463
+ result: {
3464
+ result: part.result
3465
+ },
3466
+ providerExecuted: true
3467
+ });
3468
+ break;
3469
+ }
3470
+ case "local_shell_call": {
3471
+ content.push({
3472
+ type: "tool-call",
3473
+ toolCallId: part.call_id,
3474
+ toolName: "local_shell",
3475
+ input: JSON.stringify({
3476
+ action: part.action
3477
+ }),
3478
+ providerMetadata: {
3479
+ openai: {
3480
+ itemId: part.id
3481
+ }
3482
+ }
3483
+ });
3484
+ break;
3485
+ }
3486
+ case "message": {
3487
+ for (const contentPart of part.content) {
3488
+ if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
3489
+ logprobs.push(contentPart.logprobs);
3490
+ }
3491
+ content.push({
3492
+ type: "text",
3493
+ text: contentPart.text,
3494
+ providerMetadata: {
3495
+ openai: {
3496
+ itemId: part.id
3497
+ }
3498
+ }
3499
+ });
3500
+ for (const annotation of contentPart.annotations) {
3501
+ if (annotation.type === "url_citation") {
3502
+ content.push({
3503
+ type: "source",
3504
+ sourceType: "url",
3505
+ id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : chunkF75EQ574_cjs.generateId(),
3506
+ url: annotation.url,
3507
+ title: annotation.title
3508
+ });
3509
+ } else if (annotation.type === "file_citation") {
3510
+ content.push({
3511
+ type: "source",
3512
+ sourceType: "document",
3513
+ id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : chunkF75EQ574_cjs.generateId(),
3514
+ mediaType: "text/plain",
3515
+ title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
3516
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id,
3517
+ ...annotation.file_id ? {
3518
+ providerMetadata: {
3519
+ openai: {
3520
+ fileId: annotation.file_id
3521
+ }
3522
+ }
3523
+ } : {}
3524
+ });
3525
+ }
3526
+ }
3527
+ }
3528
+ break;
3529
+ }
3530
+ case "function_call": {
3531
+ hasFunctionCall = true;
3532
+ content.push({
3533
+ type: "tool-call",
3534
+ toolCallId: part.call_id,
3535
+ toolName: part.name,
3536
+ input: part.arguments,
3537
+ providerMetadata: {
3538
+ openai: {
3539
+ itemId: part.id
3540
+ }
3541
+ }
3542
+ });
3543
+ break;
3544
+ }
3545
+ case "web_search_call": {
3546
+ content.push({
3547
+ type: "tool-call",
3548
+ toolCallId: part.id,
3549
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3550
+ input: JSON.stringify({}),
3551
+ providerExecuted: true
3552
+ });
3553
+ content.push({
3554
+ type: "tool-result",
3555
+ toolCallId: part.id,
3556
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3557
+ result: mapWebSearchOutput(part.action),
3558
+ providerExecuted: true
3559
+ });
3560
+ break;
3561
+ }
3562
+ case "computer_call": {
3563
+ content.push({
3564
+ type: "tool-call",
3565
+ toolCallId: part.id,
3566
+ toolName: "computer_use",
3567
+ input: "",
3568
+ providerExecuted: true
3569
+ });
3570
+ content.push({
3571
+ type: "tool-result",
3572
+ toolCallId: part.id,
3573
+ toolName: "computer_use",
3574
+ result: {
3575
+ type: "computer_use_tool_result",
3576
+ status: part.status || "completed"
3577
+ },
3578
+ providerExecuted: true
3579
+ });
3580
+ break;
3581
+ }
3582
+ case "file_search_call": {
3583
+ content.push({
3584
+ type: "tool-call",
3585
+ toolCallId: part.id,
3586
+ toolName: "file_search",
3587
+ input: "{}",
3588
+ providerExecuted: true
3589
+ });
3590
+ content.push({
3591
+ type: "tool-result",
3592
+ toolCallId: part.id,
3593
+ toolName: "file_search",
3594
+ result: {
3595
+ queries: part.queries,
3596
+ results: (_n = (_m = part.results) == null ? void 0 : _m.map((result) => ({
3597
+ attributes: result.attributes,
3598
+ fileId: result.file_id,
3599
+ filename: result.filename,
3600
+ score: result.score,
3601
+ text: result.text
3602
+ }))) != null ? _n : null
3603
+ },
3604
+ providerExecuted: true
3605
+ });
3606
+ break;
3607
+ }
3608
+ case "code_interpreter_call": {
3609
+ content.push({
3610
+ type: "tool-call",
3611
+ toolCallId: part.id,
3612
+ toolName: "code_interpreter",
3613
+ input: JSON.stringify({
3614
+ code: part.code,
3615
+ containerId: part.container_id
3616
+ }),
3617
+ providerExecuted: true
3618
+ });
3619
+ content.push({
3620
+ type: "tool-result",
3621
+ toolCallId: part.id,
3622
+ toolName: "code_interpreter",
3623
+ result: {
3624
+ outputs: part.outputs
3625
+ },
3626
+ providerExecuted: true
3627
+ });
3628
+ break;
3629
+ }
3630
+ }
3631
+ }
3632
+ const providerMetadata = {
3633
+ openai: {
3634
+ ...response.id != null ? { responseId: response.id } : {}
3635
+ }
3636
+ };
3637
+ if (logprobs.length > 0) {
3638
+ providerMetadata.openai.logprobs = logprobs;
3639
+ }
3640
+ if (typeof response.service_tier === "string") {
3641
+ providerMetadata.openai.serviceTier = response.service_tier;
3642
+ }
3643
+ const usage = response.usage;
3644
+ return {
3645
+ content,
3646
+ finishReason: mapOpenAIResponseFinishReason({
3647
+ finishReason: (_o = response.incomplete_details) == null ? void 0 : _o.reason,
3648
+ hasFunctionCall
3649
+ }),
3650
+ usage: {
3651
+ inputTokens: usage.input_tokens,
3652
+ outputTokens: usage.output_tokens,
3653
+ totalTokens: usage.input_tokens + usage.output_tokens,
3654
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3655
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3656
+ },
3657
+ request: { body },
3658
+ response: {
3659
+ id: response.id,
3660
+ timestamp: new Date(response.created_at * 1e3),
3661
+ modelId: response.model,
3662
+ headers: responseHeaders,
3663
+ body: rawResponse
3664
+ },
3665
+ providerMetadata,
3666
+ warnings
3667
+ };
3668
+ }
3669
+ async doStream(options) {
3670
+ const {
3671
+ args: body,
3672
+ warnings,
3673
+ webSearchToolName,
3674
+ store
3675
+ } = await this.getArgs(options);
3676
+ const { responseHeaders, value: response } = await chunkF75EQ574_cjs.postJsonToApi({
3677
+ url: this.config.url({
3678
+ path: "/responses",
3679
+ modelId: this.modelId
3680
+ }),
3681
+ headers: chunkF75EQ574_cjs.combineHeaders(this.config.headers(), options.headers),
3682
+ body: {
3683
+ ...body,
3684
+ stream: true
3685
+ },
3686
+ failedResponseHandler: openaiFailedResponseHandler,
3687
+ successfulResponseHandler: chunkF75EQ574_cjs.createEventSourceResponseHandler(
3688
+ openaiResponsesChunkSchema
3689
+ ),
3690
+ abortSignal: options.abortSignal,
3691
+ fetch: this.config.fetch
3692
+ });
3693
+ const self = this;
3694
+ let finishReason = "unknown";
3695
+ const usage = {
3696
+ inputTokens: void 0,
3697
+ outputTokens: void 0,
3698
+ totalTokens: void 0
3699
+ };
3700
+ const logprobs = [];
3701
+ let responseId = null;
3702
+ const ongoingToolCalls = {};
3703
+ const ongoingAnnotations = [];
3704
+ let hasFunctionCall = false;
3705
+ const activeReasoning = {};
3706
+ let serviceTier;
3707
+ return {
3708
+ stream: response.pipeThrough(
3709
+ new TransformStream({
3710
+ start(controller) {
3711
+ controller.enqueue({ type: "stream-start", warnings });
3712
+ },
3713
+ transform(chunk, controller) {
3714
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
3715
+ if (options.includeRawChunks) {
3716
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3717
+ }
3718
+ if (!chunk.success) {
3719
+ finishReason = "error";
3720
+ controller.enqueue({ type: "error", error: chunk.error });
3721
+ return;
3722
+ }
3723
+ const value = chunk.value;
3724
+ if (isResponseOutputItemAddedChunk(value)) {
3725
+ if (value.item.type === "function_call") {
3726
+ ongoingToolCalls[value.output_index] = {
3727
+ toolName: value.item.name,
3728
+ toolCallId: value.item.call_id
3729
+ };
3730
+ controller.enqueue({
3731
+ type: "tool-input-start",
3732
+ id: value.item.call_id,
3733
+ toolName: value.item.name
3734
+ });
3735
+ } else if (value.item.type === "web_search_call") {
3736
+ ongoingToolCalls[value.output_index] = {
3737
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3738
+ toolCallId: value.item.id
3739
+ };
3740
+ controller.enqueue({
3741
+ type: "tool-input-start",
3742
+ id: value.item.id,
3743
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3744
+ providerExecuted: true
3745
+ });
3746
+ controller.enqueue({
3747
+ type: "tool-input-end",
3748
+ id: value.item.id
3749
+ });
3750
+ controller.enqueue({
3751
+ type: "tool-call",
3752
+ toolCallId: value.item.id,
3753
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3754
+ input: JSON.stringify({}),
3755
+ providerExecuted: true
3756
+ });
3757
+ } else if (value.item.type === "computer_call") {
3758
+ ongoingToolCalls[value.output_index] = {
3759
+ toolName: "computer_use",
3760
+ toolCallId: value.item.id
3761
+ };
3762
+ controller.enqueue({
3763
+ type: "tool-input-start",
3764
+ id: value.item.id,
3765
+ toolName: "computer_use",
3766
+ providerExecuted: true
3767
+ });
3768
+ } else if (value.item.type === "code_interpreter_call") {
3769
+ ongoingToolCalls[value.output_index] = {
3770
+ toolName: "code_interpreter",
3771
+ toolCallId: value.item.id,
3772
+ codeInterpreter: {
3773
+ containerId: value.item.container_id
3774
+ }
3775
+ };
3776
+ controller.enqueue({
3777
+ type: "tool-input-start",
3778
+ id: value.item.id,
3779
+ toolName: "code_interpreter",
3780
+ providerExecuted: true
3781
+ });
3782
+ controller.enqueue({
3783
+ type: "tool-input-delta",
3784
+ id: value.item.id,
3785
+ delta: `{"containerId":"${value.item.container_id}","code":"`
3786
+ });
3787
+ } else if (value.item.type === "file_search_call") {
3788
+ controller.enqueue({
3789
+ type: "tool-call",
3790
+ toolCallId: value.item.id,
3791
+ toolName: "file_search",
3792
+ input: "{}",
3793
+ providerExecuted: true
3794
+ });
3795
+ } else if (value.item.type === "image_generation_call") {
3796
+ controller.enqueue({
3797
+ type: "tool-call",
3798
+ toolCallId: value.item.id,
3799
+ toolName: "image_generation",
3800
+ input: "{}",
3801
+ providerExecuted: true
3802
+ });
3803
+ } else if (value.item.type === "message") {
3804
+ ongoingAnnotations.splice(0, ongoingAnnotations.length);
3805
+ controller.enqueue({
3806
+ type: "text-start",
3807
+ id: value.item.id,
3808
+ providerMetadata: {
3809
+ openai: {
3810
+ itemId: value.item.id
3811
+ }
3812
+ }
3813
+ });
3814
+ } else if (isResponseOutputItemAddedChunk(value) && value.item.type === "reasoning") {
3815
+ activeReasoning[value.item.id] = {
3816
+ encryptedContent: value.item.encrypted_content,
3817
+ summaryParts: { 0: "active" }
3818
+ };
3819
+ controller.enqueue({
3820
+ type: "reasoning-start",
3821
+ id: `${value.item.id}:0`,
3822
+ providerMetadata: {
3823
+ openai: {
3824
+ itemId: value.item.id,
3825
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
3826
+ }
3827
+ }
3828
+ });
3829
+ }
3830
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type !== "message") {
3831
+ if (value.item.type === "function_call") {
3832
+ ongoingToolCalls[value.output_index] = void 0;
3833
+ hasFunctionCall = true;
3834
+ controller.enqueue({
3835
+ type: "tool-input-end",
3836
+ id: value.item.call_id
3837
+ });
3838
+ controller.enqueue({
3839
+ type: "tool-call",
3840
+ toolCallId: value.item.call_id,
3841
+ toolName: value.item.name,
3842
+ input: value.item.arguments,
3843
+ providerMetadata: {
3844
+ openai: {
3845
+ itemId: value.item.id
3846
+ }
3847
+ }
3848
+ });
3849
+ } else if (value.item.type === "web_search_call") {
3850
+ ongoingToolCalls[value.output_index] = void 0;
3851
+ controller.enqueue({
3852
+ type: "tool-result",
3853
+ toolCallId: value.item.id,
3854
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3855
+ result: mapWebSearchOutput(value.item.action),
3856
+ providerExecuted: true
3857
+ });
3858
+ } else if (value.item.type === "computer_call") {
3859
+ ongoingToolCalls[value.output_index] = void 0;
3860
+ controller.enqueue({
3861
+ type: "tool-input-end",
3862
+ id: value.item.id
3863
+ });
3864
+ controller.enqueue({
3865
+ type: "tool-call",
3866
+ toolCallId: value.item.id,
3867
+ toolName: "computer_use",
3868
+ input: "",
3869
+ providerExecuted: true
3870
+ });
3871
+ controller.enqueue({
3872
+ type: "tool-result",
3873
+ toolCallId: value.item.id,
3874
+ toolName: "computer_use",
3875
+ result: {
3876
+ type: "computer_use_tool_result",
3877
+ status: value.item.status || "completed"
3878
+ },
3879
+ providerExecuted: true
3880
+ });
3881
+ } else if (value.item.type === "file_search_call") {
3882
+ ongoingToolCalls[value.output_index] = void 0;
3883
+ controller.enqueue({
3884
+ type: "tool-result",
3885
+ toolCallId: value.item.id,
3886
+ toolName: "file_search",
3887
+ result: {
3888
+ queries: value.item.queries,
3889
+ results: (_c = (_b = value.item.results) == null ? void 0 : _b.map((result) => ({
3890
+ attributes: result.attributes,
3891
+ fileId: result.file_id,
3892
+ filename: result.filename,
3893
+ score: result.score,
3894
+ text: result.text
3895
+ }))) != null ? _c : null
3896
+ },
3897
+ providerExecuted: true
3898
+ });
3899
+ } else if (value.item.type === "code_interpreter_call") {
3900
+ ongoingToolCalls[value.output_index] = void 0;
3901
+ controller.enqueue({
3902
+ type: "tool-result",
3903
+ toolCallId: value.item.id,
3904
+ toolName: "code_interpreter",
3905
+ result: {
3906
+ outputs: value.item.outputs
3907
+ },
3908
+ providerExecuted: true
3909
+ });
3910
+ } else if (value.item.type === "image_generation_call") {
3911
+ controller.enqueue({
3912
+ type: "tool-result",
3913
+ toolCallId: value.item.id,
3914
+ toolName: "image_generation",
3915
+ result: {
3916
+ result: value.item.result
3917
+ },
3918
+ providerExecuted: true
3919
+ });
3920
+ } else if (value.item.type === "local_shell_call") {
3921
+ ongoingToolCalls[value.output_index] = void 0;
3922
+ controller.enqueue({
3923
+ type: "tool-call",
3924
+ toolCallId: value.item.call_id,
3925
+ toolName: "local_shell",
3926
+ input: JSON.stringify({
3927
+ action: {
3928
+ type: "exec",
3929
+ command: value.item.action.command,
3930
+ timeoutMs: value.item.action.timeout_ms,
3931
+ user: value.item.action.user,
3932
+ workingDirectory: value.item.action.working_directory,
3933
+ env: value.item.action.env
3934
+ }
3935
+ }),
3936
+ providerMetadata: {
3937
+ openai: { itemId: value.item.id }
3938
+ }
3939
+ });
3940
+ } else if (value.item.type === "reasoning") {
3941
+ const activeReasoningPart = activeReasoning[value.item.id];
3942
+ const summaryPartIndices = Object.entries(
3943
+ activeReasoningPart.summaryParts
3944
+ ).filter(
3945
+ ([_, status]) => status === "active" || status === "can-conclude"
3946
+ ).map(([summaryIndex]) => summaryIndex);
3947
+ for (const summaryIndex of summaryPartIndices) {
3948
+ controller.enqueue({
3949
+ type: "reasoning-end",
3950
+ id: `${value.item.id}:${summaryIndex}`,
3951
+ providerMetadata: {
3952
+ openai: {
3953
+ itemId: value.item.id,
3954
+ reasoningEncryptedContent: (_d = value.item.encrypted_content) != null ? _d : null
3955
+ }
3956
+ }
3957
+ });
3958
+ }
3959
+ delete activeReasoning[value.item.id];
3960
+ }
3961
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
3962
+ const toolCall = ongoingToolCalls[value.output_index];
3963
+ if (toolCall != null) {
3964
+ controller.enqueue({
3965
+ type: "tool-input-delta",
3966
+ id: toolCall.toolCallId,
3967
+ delta: value.delta
3968
+ });
3969
+ }
3970
+ } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
3971
+ const toolCall = ongoingToolCalls[value.output_index];
3972
+ if (toolCall != null) {
3973
+ controller.enqueue({
3974
+ type: "tool-input-delta",
3975
+ id: toolCall.toolCallId,
3976
+ // The delta is code, which is embedding in a JSON string.
3977
+ // To escape it, we use JSON.stringify and slice to remove the outer quotes.
3978
+ delta: JSON.stringify(value.delta).slice(1, -1)
3979
+ });
3980
+ }
3981
+ } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
3982
+ const toolCall = ongoingToolCalls[value.output_index];
3983
+ if (toolCall != null) {
3984
+ controller.enqueue({
3985
+ type: "tool-input-delta",
3986
+ id: toolCall.toolCallId,
3987
+ delta: '"}'
3988
+ });
3989
+ controller.enqueue({
3990
+ type: "tool-input-end",
3991
+ id: toolCall.toolCallId
3992
+ });
3993
+ controller.enqueue({
3994
+ type: "tool-call",
3995
+ toolCallId: toolCall.toolCallId,
3996
+ toolName: "code_interpreter",
3997
+ input: JSON.stringify({
3998
+ code: value.code,
3999
+ containerId: toolCall.codeInterpreter.containerId
4000
+ }),
4001
+ providerExecuted: true
4002
+ });
4003
+ }
4004
+ } else if (isResponseCreatedChunk(value)) {
4005
+ responseId = value.response.id;
4006
+ controller.enqueue({
4007
+ type: "response-metadata",
4008
+ id: value.response.id,
4009
+ timestamp: new Date(value.response.created_at * 1e3),
4010
+ modelId: value.response.model
4011
+ });
4012
+ } else if (isTextDeltaChunk(value)) {
4013
+ controller.enqueue({
4014
+ type: "text-delta",
4015
+ id: value.item_id,
4016
+ delta: value.delta
4017
+ });
4018
+ if (((_f = (_e = options.providerOptions) == null ? void 0 : _e.openai) == null ? void 0 : _f.logprobs) && value.logprobs) {
4019
+ logprobs.push(value.logprobs);
4020
+ }
4021
+ } else if (value.type === "response.reasoning_summary_part.added") {
4022
+ if (value.summary_index > 0) {
4023
+ const activeReasoningPart = activeReasoning[value.item_id];
4024
+ activeReasoningPart.summaryParts[value.summary_index] = "active";
4025
+ for (const summaryIndex of Object.keys(
4026
+ activeReasoningPart.summaryParts
4027
+ )) {
4028
+ if (activeReasoningPart.summaryParts[summaryIndex] === "can-conclude") {
4029
+ controller.enqueue({
4030
+ type: "reasoning-end",
4031
+ id: `${value.item_id}:${summaryIndex}`,
4032
+ providerMetadata: { openai: { itemId: value.item_id } }
4033
+ });
4034
+ activeReasoningPart.summaryParts[summaryIndex] = "concluded";
4035
+ }
4036
+ }
4037
+ controller.enqueue({
4038
+ type: "reasoning-start",
4039
+ id: `${value.item_id}:${value.summary_index}`,
4040
+ providerMetadata: {
4041
+ openai: {
4042
+ itemId: value.item_id,
4043
+ reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
4044
+ }
4045
+ }
4046
+ });
4047
+ }
4048
+ } else if (value.type === "response.reasoning_summary_text.delta") {
4049
+ controller.enqueue({
4050
+ type: "reasoning-delta",
4051
+ id: `${value.item_id}:${value.summary_index}`,
4052
+ delta: value.delta,
4053
+ providerMetadata: {
4054
+ openai: {
4055
+ itemId: value.item_id
4056
+ }
4057
+ }
4058
+ });
4059
+ } else if (value.type === "response.reasoning_summary_part.done") {
4060
+ if (store) {
4061
+ controller.enqueue({
4062
+ type: "reasoning-end",
4063
+ id: `${value.item_id}:${value.summary_index}`,
4064
+ providerMetadata: {
4065
+ openai: { itemId: value.item_id }
4066
+ }
4067
+ });
4068
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "concluded";
4069
+ } else {
4070
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
4071
+ }
4072
+ } else if (isResponseFinishedChunk(value)) {
4073
+ finishReason = mapOpenAIResponseFinishReason({
4074
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
4075
+ hasFunctionCall
4076
+ });
4077
+ usage.inputTokens = value.response.usage.input_tokens;
4078
+ usage.outputTokens = value.response.usage.output_tokens;
4079
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
4080
+ usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
4081
+ usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
4082
+ if (typeof value.response.service_tier === "string") {
4083
+ serviceTier = value.response.service_tier;
4084
+ }
4085
+ } else if (isResponseAnnotationAddedChunk(value)) {
4086
+ ongoingAnnotations.push(value.annotation);
4087
+ if (value.annotation.type === "url_citation") {
4088
+ controller.enqueue({
4089
+ type: "source",
4090
+ sourceType: "url",
4091
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : chunkF75EQ574_cjs.generateId(),
4092
+ url: value.annotation.url,
4093
+ title: value.annotation.title
4094
+ });
4095
+ } else if (value.annotation.type === "file_citation") {
4096
+ controller.enqueue({
4097
+ type: "source",
4098
+ sourceType: "document",
4099
+ id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : chunkF75EQ574_cjs.generateId(),
4100
+ mediaType: "text/plain",
4101
+ title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
4102
+ filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id,
4103
+ ...value.annotation.file_id ? {
4104
+ providerMetadata: {
4105
+ openai: {
4106
+ fileId: value.annotation.file_id
4107
+ }
4108
+ }
4109
+ } : {}
4110
+ });
4111
+ }
4112
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "message") {
4113
+ controller.enqueue({
4114
+ type: "text-end",
4115
+ id: value.item.id,
4116
+ providerMetadata: {
4117
+ openai: {
4118
+ itemId: value.item.id,
4119
+ ...ongoingAnnotations.length > 0 && {
4120
+ annotations: ongoingAnnotations
4121
+ }
4122
+ }
4123
+ }
4124
+ });
4125
+ } else if (isErrorChunk(value)) {
4126
+ controller.enqueue({ type: "error", error: value });
4127
+ }
4128
+ },
4129
+ flush(controller) {
4130
+ const providerMetadata = {
4131
+ openai: {
4132
+ responseId
4133
+ }
4134
+ };
4135
+ if (logprobs.length > 0) {
4136
+ providerMetadata.openai.logprobs = logprobs;
4137
+ }
4138
+ if (serviceTier !== void 0) {
4139
+ providerMetadata.openai.serviceTier = serviceTier;
4140
+ }
4141
+ controller.enqueue({
4142
+ type: "finish",
4143
+ finishReason,
4144
+ usage,
4145
+ providerMetadata
4146
+ });
4147
+ }
4148
+ })
4149
+ ),
4150
+ request: { body },
4151
+ response: { headers: responseHeaders }
4152
+ };
4153
+ }
4154
+ };
4155
+ function isTextDeltaChunk(chunk) {
4156
+ return chunk.type === "response.output_text.delta";
4157
+ }
4158
+ function isResponseOutputItemDoneChunk(chunk) {
4159
+ return chunk.type === "response.output_item.done";
4160
+ }
4161
+ function isResponseFinishedChunk(chunk) {
4162
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
4163
+ }
4164
+ function isResponseCreatedChunk(chunk) {
4165
+ return chunk.type === "response.created";
4166
+ }
4167
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
4168
+ return chunk.type === "response.function_call_arguments.delta";
4169
+ }
4170
+ function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
4171
+ return chunk.type === "response.code_interpreter_call_code.delta";
4172
+ }
4173
+ function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
4174
+ return chunk.type === "response.code_interpreter_call_code.done";
4175
+ }
4176
+ function isResponseOutputItemAddedChunk(chunk) {
4177
+ return chunk.type === "response.output_item.added";
4178
+ }
4179
+ function isResponseAnnotationAddedChunk(chunk) {
4180
+ return chunk.type === "response.output_text.annotation.added";
4181
+ }
4182
+ function isErrorChunk(chunk) {
4183
+ return chunk.type === "error";
4184
+ }
4185
+ function getResponsesModelConfig(modelId) {
4186
+ const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
4187
+ const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
4188
+ const defaults = {
4189
+ systemMessageMode: "system",
4190
+ supportsFlexProcessing: supportsFlexProcessing2,
4191
+ supportsPriorityProcessing: supportsPriorityProcessing2
4192
+ };
4193
+ if (modelId.startsWith("gpt-5-chat")) {
4194
+ return {
4195
+ ...defaults,
4196
+ isReasoningModel: false
4197
+ };
4198
+ }
4199
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
4200
+ return {
4201
+ ...defaults,
4202
+ isReasoningModel: true,
4203
+ systemMessageMode: "developer"
4204
+ };
4205
+ }
4206
+ return {
4207
+ ...defaults,
4208
+ isReasoningModel: false
4209
+ };
4210
+ }
4211
+ function mapWebSearchOutput(action) {
4212
+ var _a;
4213
+ switch (action.type) {
4214
+ case "search":
4215
+ return {
4216
+ action: { type: "search", query: (_a = action.query) != null ? _a : void 0 },
4217
+ // include sources when provided by the Responses API (behind include flag)
4218
+ ...action.sources != null && { sources: action.sources }
4219
+ };
4220
+ case "open_page":
4221
+ return { action: { type: "openPage", url: action.url } };
4222
+ case "find":
4223
+ return {
4224
+ action: { type: "find", url: action.url, pattern: action.pattern }
4225
+ };
4226
+ }
4227
+ }
4228
+
4229
+ // ../../node_modules/.pnpm/@ai-sdk+azure@2.0.74_zod@3.25.76/node_modules/@ai-sdk/azure/dist/index.mjs
4230
+ var azureOpenaiTools = {
4231
+ codeInterpreter,
4232
+ fileSearch,
4233
+ imageGeneration,
4234
+ webSearchPreview
4235
+ };
4236
+ var VERSION = "2.0.74" ;
4237
+ function createAzure(options = {}) {
4238
+ var _a;
4239
+ const getHeaders = () => {
4240
+ const baseHeaders = {
4241
+ "api-key": chunkF75EQ574_cjs.loadApiKey({
4242
+ apiKey: options.apiKey,
4243
+ environmentVariableName: "AZURE_API_KEY",
4244
+ description: "Azure OpenAI"
4245
+ }),
4246
+ ...options.headers
4247
+ };
4248
+ return chunkF75EQ574_cjs.withUserAgentSuffix(baseHeaders, `ai-sdk/azure/${VERSION}`);
4249
+ };
4250
+ const getResourceName = () => chunkF75EQ574_cjs.loadSetting({
4251
+ settingValue: options.resourceName,
4252
+ settingName: "resourceName",
4253
+ environmentVariableName: "AZURE_RESOURCE_NAME",
4254
+ description: "Azure OpenAI resource name"
4255
+ });
4256
+ const apiVersion = (_a = options.apiVersion) != null ? _a : "v1";
4257
+ const url = ({ path, modelId }) => {
4258
+ var _a2;
4259
+ const baseUrlPrefix = (_a2 = options.baseURL) != null ? _a2 : `https://${getResourceName()}.openai.azure.com/openai`;
4260
+ let fullUrl;
4261
+ if (options.useDeploymentBasedUrls) {
4262
+ fullUrl = new URL(`${baseUrlPrefix}/deployments/${modelId}${path}`);
4263
+ } else {
4264
+ fullUrl = new URL(`${baseUrlPrefix}/v1${path}`);
4265
+ }
4266
+ fullUrl.searchParams.set("api-version", apiVersion);
4267
+ return fullUrl.toString();
4268
+ };
4269
+ const createChatModel = (deploymentName) => new OpenAIChatLanguageModel(deploymentName, {
4270
+ provider: "azure.chat",
4271
+ url,
4272
+ headers: getHeaders,
4273
+ fetch: options.fetch
4274
+ });
4275
+ const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
4276
+ provider: "azure.completion",
4277
+ url,
4278
+ headers: getHeaders,
4279
+ fetch: options.fetch
4280
+ });
4281
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
4282
+ provider: "azure.embeddings",
4283
+ headers: getHeaders,
4284
+ url,
4285
+ fetch: options.fetch
4286
+ });
4287
+ const createResponsesModel = (modelId) => new OpenAIResponsesLanguageModel(modelId, {
4288
+ provider: "azure.responses",
4289
+ url,
4290
+ headers: getHeaders,
4291
+ fetch: options.fetch,
4292
+ fileIdPrefixes: ["assistant-"]
4293
+ });
4294
+ const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
4295
+ provider: "azure.image",
4296
+ url,
4297
+ headers: getHeaders,
4298
+ fetch: options.fetch
4299
+ });
4300
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
4301
+ provider: "azure.transcription",
4302
+ url,
4303
+ headers: getHeaders,
4304
+ fetch: options.fetch
4305
+ });
4306
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
4307
+ provider: "azure.speech",
4308
+ url,
4309
+ headers: getHeaders,
4310
+ fetch: options.fetch
4311
+ });
4312
+ const provider = function(deploymentId) {
4313
+ if (new.target) {
4314
+ throw new Error(
4315
+ "The Azure OpenAI model function cannot be called with the new keyword."
4316
+ );
4317
+ }
4318
+ return createChatModel(deploymentId);
4319
+ };
4320
+ provider.languageModel = createChatModel;
4321
+ provider.chat = createChatModel;
4322
+ provider.completion = createCompletionModel;
4323
+ provider.embedding = createEmbeddingModel;
4324
+ provider.image = createImageModel;
4325
+ provider.imageModel = createImageModel;
4326
+ provider.textEmbedding = createEmbeddingModel;
4327
+ provider.textEmbeddingModel = createEmbeddingModel;
4328
+ provider.responses = createResponsesModel;
4329
+ provider.transcription = createTranscriptionModel;
4330
+ provider.speech = createSpeechModel;
4331
+ provider.tools = azureOpenaiTools;
4332
+ return provider;
4333
+ }
4334
+ createAzure();
4335
+
4336
+ // src/llm/model/gateways/azure.ts
4337
+ var AzureOpenAIGateway = class extends chunkF75EQ574_cjs.MastraModelGateway {
4338
+ constructor(config) {
4339
+ super();
4340
+ this.config = config;
4341
+ this.validateConfig();
4342
+ }
4343
+ id = "azure-openai";
4344
+ name = "azure-openai";
4345
+ tokenCache = new chunkUIGRFDO6_cjs.InMemoryServerCache();
4346
+ validateConfig() {
4347
+ if (!this.config.resourceName) {
4348
+ throw new chunkHWMMIRIF_cjs.MastraError({
4349
+ id: "AZURE_GATEWAY_INVALID_CONFIG",
4350
+ domain: "LLM",
4351
+ category: "UNKNOWN",
4352
+ text: "resourceName is required for Azure OpenAI gateway"
4353
+ });
4354
+ }
4355
+ if (!this.config.apiKey) {
4356
+ throw new chunkHWMMIRIF_cjs.MastraError({
4357
+ id: "AZURE_GATEWAY_INVALID_CONFIG",
4358
+ domain: "LLM",
4359
+ category: "UNKNOWN",
4360
+ text: "apiKey is required for Azure OpenAI gateway"
4361
+ });
4362
+ }
4363
+ const hasDeployments = this.config.deployments && this.config.deployments.length > 0;
4364
+ const hasManagement = this.config.management !== void 0;
4365
+ if (hasDeployments && hasManagement) {
4366
+ console.warn(
4367
+ "[AzureOpenAIGateway] Both deployments and management credentials provided. Using static deployments list and ignoring management API."
4368
+ );
4369
+ }
4370
+ if (hasManagement) {
4371
+ this.getManagementCredentials(this.config.management);
4372
+ }
4373
+ }
4374
+ async fetchProviders() {
4375
+ if (this.config.deployments && this.config.deployments.length > 0) {
4376
+ return {
4377
+ "azure-openai": {
4378
+ apiKeyEnvVar: [],
4379
+ apiKeyHeader: "api-key",
4380
+ name: "Azure OpenAI",
4381
+ models: this.config.deployments,
4382
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4383
+ gateway: "azure-openai"
4384
+ }
4385
+ };
4386
+ }
4387
+ if (!this.config.management) {
4388
+ return {
4389
+ "azure-openai": {
4390
+ apiKeyEnvVar: [],
4391
+ apiKeyHeader: "api-key",
4392
+ name: "Azure OpenAI",
4393
+ models: [],
4394
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4395
+ gateway: "azure-openai"
4396
+ }
4397
+ };
4398
+ }
4399
+ try {
4400
+ const credentials = this.getManagementCredentials(this.config.management);
4401
+ const token = await this.getAzureADToken({
4402
+ tenantId: credentials.tenantId,
4403
+ clientId: credentials.clientId,
4404
+ clientSecret: credentials.clientSecret
4405
+ });
4406
+ const deployments = await this.fetchDeployments(token, {
4407
+ subscriptionId: credentials.subscriptionId,
4408
+ resourceGroup: credentials.resourceGroup,
4409
+ resourceName: this.config.resourceName
4410
+ });
4411
+ return {
4412
+ "azure-openai": {
4413
+ apiKeyEnvVar: [],
4414
+ apiKeyHeader: "api-key",
4415
+ name: "Azure OpenAI",
4416
+ models: deployments.map((d) => d.name),
4417
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4418
+ gateway: "azure-openai"
4419
+ }
4420
+ };
4421
+ } catch (error) {
4422
+ const errorMsg = error instanceof Error ? error.message : String(error);
4423
+ console.warn(
4424
+ `[AzureOpenAIGateway] Deployment discovery failed: ${errorMsg}`,
4425
+ "\nReturning fallback configuration. Azure OpenAI can still be used by manually specifying deployment names."
4426
+ );
4427
+ return {
4428
+ "azure-openai": {
4429
+ apiKeyEnvVar: [],
4430
+ apiKeyHeader: "api-key",
4431
+ name: "Azure OpenAI",
4432
+ models: [],
4433
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4434
+ gateway: "azure-openai"
4435
+ }
4436
+ };
4437
+ }
4438
+ }
4439
+ getManagementCredentials(management) {
4440
+ const { tenantId, clientId, clientSecret, subscriptionId, resourceGroup } = management;
4441
+ const missing = [];
4442
+ if (!tenantId) missing.push("tenantId");
4443
+ if (!clientId) missing.push("clientId");
4444
+ if (!clientSecret) missing.push("clientSecret");
4445
+ if (!subscriptionId) missing.push("subscriptionId");
4446
+ if (!resourceGroup) missing.push("resourceGroup");
4447
+ if (missing.length > 0) {
4448
+ throw new chunkHWMMIRIF_cjs.MastraError({
4449
+ id: "AZURE_MANAGEMENT_CREDENTIALS_MISSING",
4450
+ domain: "LLM",
4451
+ category: "UNKNOWN",
4452
+ text: `Management credentials incomplete. Missing: ${missing.join(", ")}. Required fields: tenantId, clientId, clientSecret, subscriptionId, resourceGroup.`
4453
+ });
4454
+ }
4455
+ return {
4456
+ tenantId,
4457
+ clientId,
4458
+ clientSecret,
4459
+ subscriptionId,
4460
+ resourceGroup
4461
+ };
4462
+ }
4463
+ async getAzureADToken(credentials) {
4464
+ const { tenantId, clientId, clientSecret } = credentials;
4465
+ const cacheKey = `azure-mgmt-token:${tenantId}:${clientId}`;
4466
+ const cached = await this.tokenCache.get(cacheKey);
4467
+ if (cached && cached.expiresAt > Date.now() / 1e3 + 60) {
4468
+ return cached.token;
4469
+ }
4470
+ const tokenEndpoint = `https://login.microsoftonline.com/${tenantId}/oauth2/v2.0/token`;
4471
+ const body = new URLSearchParams({
4472
+ grant_type: "client_credentials",
4473
+ client_id: clientId,
4474
+ client_secret: clientSecret,
4475
+ scope: "https://management.azure.com/.default"
4476
+ });
4477
+ const response = await fetch(tokenEndpoint, {
4478
+ method: "POST",
4479
+ headers: {
4480
+ "Content-Type": "application/x-www-form-urlencoded"
4481
+ },
4482
+ body: body.toString()
4483
+ });
4484
+ if (!response.ok) {
4485
+ const error = await response.text();
4486
+ throw new chunkHWMMIRIF_cjs.MastraError({
4487
+ id: "AZURE_AD_TOKEN_ERROR",
4488
+ domain: "LLM",
4489
+ category: "UNKNOWN",
4490
+ text: `Failed to get Azure AD token: ${response.status} ${error}`
4491
+ });
4492
+ }
4493
+ const tokenResponse = await response.json();
4494
+ const expiresAt = Math.floor(Date.now() / 1e3) + tokenResponse.expires_in;
4495
+ await this.tokenCache.set(cacheKey, {
4496
+ token: tokenResponse.access_token,
4497
+ expiresAt
4498
+ });
4499
+ return tokenResponse.access_token;
4500
+ }
4501
+ async fetchDeployments(token, credentials) {
4502
+ const { subscriptionId, resourceGroup, resourceName } = credentials;
4503
+ let url = `https://management.azure.com/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.CognitiveServices/accounts/${resourceName}/deployments?api-version=2024-10-01`;
4504
+ const allDeployments = [];
4505
+ while (url) {
4506
+ const response = await fetch(url, {
4507
+ headers: {
4508
+ Authorization: `Bearer ${token}`,
4509
+ "Content-Type": "application/json"
4510
+ }
4511
+ });
4512
+ if (!response.ok) {
4513
+ const error = await response.text();
4514
+ throw new chunkHWMMIRIF_cjs.MastraError({
4515
+ id: "AZURE_DEPLOYMENTS_FETCH_ERROR",
4516
+ domain: "LLM",
4517
+ category: "UNKNOWN",
4518
+ text: `Failed to fetch Azure deployments: ${response.status} ${error}`
4519
+ });
4520
+ }
4521
+ const data = await response.json();
4522
+ allDeployments.push(...data.value);
4523
+ url = data.nextLink;
4524
+ }
4525
+ const successfulDeployments = allDeployments.filter((d) => d.properties.provisioningState === "Succeeded");
4526
+ return successfulDeployments;
4527
+ }
4528
+ buildUrl(_routerId, _envVars) {
4529
+ return void 0;
4530
+ }
4531
+ async getApiKey(_modelId) {
4532
+ return this.config.apiKey;
4533
+ }
4534
+ async resolveLanguageModel({
4535
+ modelId,
4536
+ apiKey
4537
+ }) {
4538
+ const apiVersion = this.config.apiVersion || "2024-04-01-preview";
4539
+ return createAzure({
4540
+ resourceName: this.config.resourceName,
4541
+ apiKey,
4542
+ apiVersion,
4543
+ useDeploymentBasedUrls: true
4544
+ })(modelId);
4545
+ }
4546
+ };
4547
+
4548
+ // src/llm/model/gateways/index.ts
4549
+ function findGatewayForModel(gatewayId, gateways) {
4550
+ const prefixedGateway = gateways.find(
4551
+ (g) => g.id !== "models.dev" && (g.id === gatewayId || gatewayId.startsWith(`${g.id}/`))
4552
+ );
4553
+ if (prefixedGateway) {
4554
+ return prefixedGateway;
4555
+ }
4556
+ const modelsDevGateway = gateways.find((g) => g.id === "models.dev");
4557
+ if (modelsDevGateway) {
4558
+ return modelsDevGateway;
4559
+ }
4560
+ throw new chunkHWMMIRIF_cjs.MastraError({
4561
+ id: "MODEL_ROUTER_NO_GATEWAY_FOUND",
4562
+ category: "USER",
4563
+ domain: "MODEL_ROUTER",
4564
+ text: `No Mastra model router gateway found for model id ${gatewayId}`
4565
+ });
4566
+ }
4567
+
4568
+ // src/llm/model/router.ts
4569
+ function getStaticProvidersByGateway(name) {
4570
+ return Object.fromEntries(Object.entries(chunkFPDJ4XN6_cjs.PROVIDER_REGISTRY).filter(([_provider, config]) => config.gateway === name));
4571
+ }
4572
+ var defaultGateways = [new chunkTDM43G4I_cjs.NetlifyGateway(), new chunkEGHGFLL3_cjs.ModelsDevGateway(getStaticProvidersByGateway(`models.dev`))];
4573
+ var ModelRouterLanguageModel = class _ModelRouterLanguageModel {
4574
+ specificationVersion = "v2";
4575
+ defaultObjectGenerationMode = "json";
4576
+ supportsStructuredOutputs = true;
4577
+ supportsImageUrls = true;
4578
+ supportedUrls = {};
4579
+ modelId;
4580
+ provider;
4581
+ config;
4582
+ gateway;
4583
+ constructor(config, customGateways) {
4584
+ let normalizedConfig;
4585
+ if (typeof config === "string") {
4586
+ normalizedConfig = { id: config };
4587
+ } else if ("providerId" in config && "modelId" in config) {
4588
+ normalizedConfig = {
4589
+ id: `${config.providerId}/${config.modelId}`,
4590
+ url: config.url,
4591
+ apiKey: config.apiKey,
4592
+ headers: config.headers
4593
+ };
4594
+ } else {
4595
+ normalizedConfig = {
4596
+ id: config.id,
4597
+ url: config.url,
4598
+ apiKey: config.apiKey,
4599
+ headers: config.headers
4600
+ };
4601
+ }
4602
+ const parsedConfig = {
4603
+ ...normalizedConfig,
4604
+ routerId: normalizedConfig.id
4605
+ };
4606
+ this.gateway = findGatewayForModel(normalizedConfig.id, [...customGateways || [], ...defaultGateways]);
4607
+ const gatewayPrefix = this.gateway.id === "models.dev" ? void 0 : this.gateway.id;
4608
+ const parsed = chunkEGHGFLL3_cjs.parseModelRouterId(normalizedConfig.id, gatewayPrefix);
4609
+ this.provider = parsed.providerId || "openai-compatible";
4610
+ if (parsed.providerId && parsed.modelId !== normalizedConfig.id) {
4611
+ parsedConfig.id = parsed.modelId;
4612
+ }
4613
+ this.modelId = parsedConfig.id;
4614
+ this.config = parsedConfig;
4615
+ }
4616
+ async doGenerate(options) {
4617
+ let apiKey;
4618
+ try {
4619
+ if (this.config.url) {
4620
+ apiKey = this.config.apiKey || "";
4621
+ } else {
4622
+ apiKey = this.config.apiKey || await this.gateway.getApiKey(this.config.routerId);
4623
+ }
4624
+ } catch (error) {
4625
+ return {
4626
+ stream: new ReadableStream({
4627
+ start(controller) {
4628
+ controller.enqueue({
4629
+ type: "error",
4630
+ error
4631
+ });
4632
+ controller.close();
4633
+ }
4634
+ })
4635
+ };
4636
+ }
4637
+ const gatewayPrefix = this.gateway.id === "models.dev" ? void 0 : this.gateway.id;
4638
+ const model = await this.resolveLanguageModel({
4639
+ apiKey,
4640
+ headers: this.config.headers,
4641
+ ...chunkEGHGFLL3_cjs.parseModelRouterId(this.config.routerId, gatewayPrefix)
4642
+ });
4643
+ const aiSDKV5Model = new chunkTRUNX3AX_cjs.AISDKV5LanguageModel(model);
4644
+ return aiSDKV5Model.doGenerate(options);
4645
+ }
4646
+ async doStream(options) {
4647
+ let apiKey;
4648
+ try {
4649
+ if (this.config.url) {
4650
+ apiKey = this.config.apiKey || "";
4651
+ } else {
4652
+ apiKey = this.config.apiKey || await this.gateway.getApiKey(this.config.routerId);
4653
+ }
4654
+ } catch (error) {
4655
+ return {
4656
+ stream: new ReadableStream({
4657
+ start(controller) {
4658
+ controller.enqueue({
4659
+ type: "error",
4660
+ error
4661
+ });
4662
+ controller.close();
4663
+ }
4664
+ })
4665
+ };
4666
+ }
4667
+ const gatewayPrefix = this.gateway.id === "models.dev" ? void 0 : this.gateway.id;
4668
+ const model = await this.resolveLanguageModel({
4669
+ apiKey,
4670
+ headers: this.config.headers,
4671
+ ...chunkEGHGFLL3_cjs.parseModelRouterId(this.config.routerId, gatewayPrefix)
4672
+ });
4673
+ const aiSDKV5Model = new chunkTRUNX3AX_cjs.AISDKV5LanguageModel(model);
4674
+ return aiSDKV5Model.doStream(options);
4675
+ }
4676
+ async resolveLanguageModel({
4677
+ modelId,
4678
+ providerId,
4679
+ apiKey,
4680
+ headers
4681
+ }) {
4682
+ const key = crypto.createHash("sha256").update(
4683
+ this.gateway.id + modelId + providerId + apiKey + (this.config.url || "") + (headers ? JSON.stringify(headers) : "")
4684
+ ).digest("hex");
4685
+ if (_ModelRouterLanguageModel.modelInstances.has(key)) return _ModelRouterLanguageModel.modelInstances.get(key);
4686
+ if (this.config.url) {
4687
+ const modelInstance2 = chunkF75EQ574_cjs.createOpenAICompatible({
4688
+ name: providerId,
4689
+ apiKey,
4690
+ baseURL: this.config.url,
4691
+ headers: this.config.headers,
4692
+ supportsStructuredOutputs: true
4693
+ }).chatModel(modelId);
4694
+ _ModelRouterLanguageModel.modelInstances.set(key, modelInstance2);
4695
+ return modelInstance2;
4696
+ }
4697
+ const modelInstance = await this.gateway.resolveLanguageModel({ modelId, providerId, apiKey, headers });
4698
+ _ModelRouterLanguageModel.modelInstances.set(key, modelInstance);
4699
+ return modelInstance;
4700
+ }
4701
+ static modelInstances = /* @__PURE__ */ new Map();
4702
+ };
4703
+
4704
+ // src/llm/model/aisdk/v6/model.ts
4705
+ var AISDKV6LanguageModel = class {
4706
+ /**
4707
+ * The language model must specify which language model interface version it implements.
4708
+ */
4709
+ specificationVersion = "v3";
4710
+ /**
4711
+ * Name of the provider for logging purposes.
4712
+ */
4713
+ provider;
4714
+ /**
4715
+ * Provider-specific model ID for logging purposes.
4716
+ */
4717
+ modelId;
4718
+ /**
4719
+ * Supported URL patterns by media type for the provider.
4720
+ *
4721
+ * The keys are media type patterns or full media types (e.g. `*\/*` for everything, `audio/*`, `video/*`, or `application/pdf`).
4722
+ * and the values are arrays of regular expressions that match the URL paths.
4723
+ * The matching should be against lower-case URLs.
4724
+ * Matched URLs are supported natively by the model and are not downloaded.
4725
+ * @returns A map of supported URL patterns by media type (as a promise or a plain object).
4726
+ */
4727
+ supportedUrls;
4728
+ #model;
4729
+ constructor(config) {
4730
+ this.#model = config;
4731
+ this.provider = this.#model.provider;
4732
+ this.modelId = this.#model.modelId;
4733
+ this.supportedUrls = this.#model.supportedUrls;
4734
+ }
4735
+ async doGenerate(options) {
4736
+ const result = await this.#model.doGenerate(options);
4737
+ return {
4738
+ request: result.request,
4739
+ response: result.response,
4740
+ stream: chunkTRUNX3AX_cjs.createStreamFromGenerateResult(result)
4741
+ };
4742
+ }
4743
+ async doStream(options) {
4744
+ return await this.#model.doStream(options);
4745
+ }
4746
+ };
4747
+
4748
+ // src/llm/model/resolve-model.ts
4749
+ function isOpenAICompatibleObjectConfig(modelConfig) {
4750
+ if (typeof modelConfig === "object" && "specificationVersion" in modelConfig) return false;
4751
+ if (typeof modelConfig === "object" && !("model" in modelConfig)) {
4752
+ if ("id" in modelConfig) return true;
4753
+ if ("providerId" in modelConfig && "modelId" in modelConfig) return true;
4754
+ }
4755
+ return false;
4756
+ }
4757
+ async function resolveModelConfig(modelConfig, requestContext = new chunkUVHSM2GU_cjs.RequestContext(), mastra) {
4758
+ if (typeof modelConfig === "function") {
4759
+ modelConfig = await modelConfig({ requestContext, mastra });
4760
+ }
4761
+ if (modelConfig instanceof ModelRouterLanguageModel || modelConfig instanceof chunkTRUNX3AX_cjs.AISDKV5LanguageModel || modelConfig instanceof AISDKV6LanguageModel) {
4762
+ return modelConfig;
4763
+ }
4764
+ if (typeof modelConfig === "object" && "specificationVersion" in modelConfig) {
4765
+ if (modelConfig.specificationVersion === "v2") {
4766
+ return new chunkTRUNX3AX_cjs.AISDKV5LanguageModel(modelConfig);
4767
+ }
4768
+ if (modelConfig.specificationVersion === "v3") {
4769
+ return new AISDKV6LanguageModel(modelConfig);
4770
+ }
4771
+ return modelConfig;
4772
+ }
4773
+ const gatewayRecord = mastra?.listGateways();
4774
+ const customGateways = gatewayRecord ? Object.values(gatewayRecord) : void 0;
4775
+ if (typeof modelConfig === "string" || isOpenAICompatibleObjectConfig(modelConfig)) {
4776
+ return new ModelRouterLanguageModel(modelConfig, customGateways);
4777
+ }
4778
+ throw new Error("Invalid model configuration provided");
4779
+ }
4780
+
4781
+ // src/llm/model/embedding-router.ts
4782
+ var ModelRouterEmbeddingModel = class {
4783
+ specificationVersion = "v2";
4784
+ modelId;
4785
+ provider;
4786
+ maxEmbeddingsPerCall = 2048;
4787
+ supportsParallelCalls = true;
4788
+ providerModel;
4789
+ constructor(config) {
4790
+ let normalizedConfig;
4791
+ if (typeof config === "string") {
4792
+ const parts = config.split("/");
4793
+ if (parts.length !== 2) {
4794
+ throw new Error(`Invalid model string format: "${config}". Expected format: "provider/model"`);
4795
+ }
4796
+ const [providerId, modelId] = parts;
4797
+ normalizedConfig = { providerId, modelId };
4798
+ } else if ("providerId" in config && "modelId" in config) {
4799
+ normalizedConfig = {
4800
+ providerId: config.providerId,
4801
+ modelId: config.modelId,
4802
+ url: config.url,
4803
+ apiKey: config.apiKey,
4804
+ headers: config.headers
4805
+ };
4806
+ } else {
4807
+ const parts = config.id.split("/");
4808
+ if (parts.length !== 2) {
4809
+ throw new Error(`Invalid model string format: "${config.id}". Expected format: "provider/model"`);
4810
+ }
4811
+ const [providerId, modelId] = parts;
4812
+ normalizedConfig = {
4813
+ providerId,
4814
+ modelId,
4815
+ url: config.url,
4816
+ apiKey: config.apiKey,
4817
+ headers: config.headers
4818
+ };
4819
+ }
4820
+ this.provider = normalizedConfig.providerId;
4821
+ this.modelId = normalizedConfig.modelId;
4822
+ if (normalizedConfig.url) {
4823
+ const apiKey = normalizedConfig.apiKey || "";
4824
+ this.providerModel = chunkF75EQ574_cjs.createOpenAICompatible({
4825
+ name: normalizedConfig.providerId,
4826
+ apiKey,
4827
+ baseURL: normalizedConfig.url,
4828
+ headers: normalizedConfig.headers
4829
+ }).textEmbeddingModel(normalizedConfig.modelId);
4830
+ } else {
4831
+ const registry = chunkFPDJ4XN6_cjs.GatewayRegistry.getInstance();
4832
+ const providerConfig = registry.getProviderConfig(normalizedConfig.providerId);
4833
+ if (!providerConfig) {
4834
+ throw new Error(`Unknown provider: ${normalizedConfig.providerId}`);
4835
+ }
4836
+ let apiKey = normalizedConfig.apiKey;
4837
+ if (!apiKey) {
4838
+ const apiKeyEnvVar = providerConfig.apiKeyEnvVar;
4839
+ if (Array.isArray(apiKeyEnvVar)) {
4840
+ for (const envVar of apiKeyEnvVar) {
4841
+ apiKey = process.env[envVar];
4842
+ if (apiKey) break;
4843
+ }
4844
+ } else {
4845
+ apiKey = process.env[apiKeyEnvVar];
4846
+ }
4847
+ }
4848
+ if (!apiKey) {
4849
+ const envVarDisplay = Array.isArray(providerConfig.apiKeyEnvVar) ? providerConfig.apiKeyEnvVar.join(" or ") : providerConfig.apiKeyEnvVar;
4850
+ throw new Error(`API key not found for provider ${normalizedConfig.providerId}. Set ${envVarDisplay}`);
4851
+ }
4852
+ if (normalizedConfig.providerId === "openai") {
4853
+ this.providerModel = chunkF75EQ574_cjs.createOpenAI({ apiKey }).textEmbeddingModel(
4854
+ normalizedConfig.modelId
4855
+ );
4856
+ } else if (normalizedConfig.providerId === "google") {
4857
+ this.providerModel = chunkF75EQ574_cjs.createGoogleGenerativeAI({ apiKey }).textEmbedding(
4858
+ normalizedConfig.modelId
4859
+ );
4860
+ } else {
4861
+ if (!providerConfig.url) {
4862
+ throw new Error(`Provider ${normalizedConfig.providerId} does not have a URL configured`);
4863
+ }
4864
+ this.providerModel = chunkF75EQ574_cjs.createOpenAICompatible({
4865
+ name: normalizedConfig.providerId,
4866
+ apiKey,
4867
+ baseURL: providerConfig.url
4868
+ }).textEmbeddingModel(normalizedConfig.modelId);
4869
+ }
4870
+ }
4871
+ if (this.providerModel.maxEmbeddingsPerCall !== void 0) {
4872
+ this.maxEmbeddingsPerCall = this.providerModel.maxEmbeddingsPerCall;
4873
+ }
4874
+ if (this.providerModel.supportsParallelCalls !== void 0) {
4875
+ this.supportsParallelCalls = this.providerModel.supportsParallelCalls;
4876
+ }
4877
+ }
4878
+ async doEmbed(args) {
4879
+ return this.providerModel.doEmbed(args);
4880
+ }
4881
+ };
4882
+
4883
+ exports.AzureOpenAIGateway = AzureOpenAIGateway;
4884
+ exports.ModelRouterEmbeddingModel = ModelRouterEmbeddingModel;
4885
+ exports.ModelRouterLanguageModel = ModelRouterLanguageModel;
4886
+ exports.resolveModelConfig = resolveModelConfig;
4887
+ //# sourceMappingURL=chunk-53SZJCBX.cjs.map
4888
+ //# sourceMappingURL=chunk-53SZJCBX.cjs.map