@mastra/core 1.0.0-beta.11 → 1.0.0-beta.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (447) hide show
  1. package/CHANGELOG.md +343 -0
  2. package/dist/_types/@internal_ai-sdk-v4/dist/index.d.ts +7549 -0
  3. package/dist/_types/@internal_ai-sdk-v4/dist/test.d.ts +65 -0
  4. package/dist/_types/@internal_ai-sdk-v5/dist/index.d.ts +8396 -0
  5. package/dist/_types/@internal_ai-sdk-v5/dist/test.d.ts +1708 -0
  6. package/dist/_types/@internal_external-types/dist/index.d.ts +858 -0
  7. package/dist/agent/agent-legacy.d.ts +1 -1
  8. package/dist/agent/agent.d.ts +3 -3
  9. package/dist/agent/agent.d.ts.map +1 -1
  10. package/dist/agent/agent.types.d.ts +11 -8
  11. package/dist/agent/agent.types.d.ts.map +1 -1
  12. package/dist/agent/index.cjs +17 -9
  13. package/dist/agent/index.d.ts +1 -1
  14. package/dist/agent/index.d.ts.map +1 -1
  15. package/dist/agent/index.js +2 -2
  16. package/dist/agent/message-list/index.cjs +3 -3
  17. package/dist/agent/message-list/index.d.ts +4 -3
  18. package/dist/agent/message-list/index.d.ts.map +1 -1
  19. package/dist/agent/message-list/index.js +1 -1
  20. package/dist/agent/message-list/prompt/attachments-to-parts.d.ts +1 -1
  21. package/dist/agent/message-list/prompt/invalid-content-error.d.ts +1 -1
  22. package/dist/agent/message-list/types.d.ts +3 -3
  23. package/dist/agent/message-list/types.d.ts.map +1 -1
  24. package/dist/agent/message-list/utils/ai-v4-v5/core-model-message.d.ts +1 -1
  25. package/dist/agent/message-list/utils/ai-v4-v5/ui-message.d.ts +1 -1
  26. package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts +2 -2
  27. package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts.map +1 -1
  28. package/dist/agent/message-list/utils/convert-messages.d.ts +2 -2
  29. package/dist/agent/message-list/utils/convert-messages.d.ts.map +1 -1
  30. package/dist/agent/trip-wire.d.ts +2 -2
  31. package/dist/agent/trip-wire.d.ts.map +1 -1
  32. package/dist/agent/types.d.ts +3 -3
  33. package/dist/agent/utils.d.ts +7 -4
  34. package/dist/agent/utils.d.ts.map +1 -1
  35. package/dist/agent/workflows/prepare-stream/index.d.ts +4 -3
  36. package/dist/agent/workflows/prepare-stream/index.d.ts.map +1 -1
  37. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts +3 -3
  38. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts.map +1 -1
  39. package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts +3 -3
  40. package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts.map +1 -1
  41. package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts +3 -3
  42. package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts.map +1 -1
  43. package/dist/agent/workflows/prepare-stream/stream-step.d.ts +3 -1
  44. package/dist/agent/workflows/prepare-stream/stream-step.d.ts.map +1 -1
  45. package/dist/bundler/types.d.ts +15 -1
  46. package/dist/bundler/types.d.ts.map +1 -1
  47. package/dist/chunk-2AU5ZHBL.js +79 -0
  48. package/dist/chunk-2AU5ZHBL.js.map +1 -0
  49. package/dist/chunk-2SQB3WBT.js +4574 -0
  50. package/dist/chunk-2SQB3WBT.js.map +1 -0
  51. package/dist/{chunk-THZTRBFS.js → chunk-373OC54J.js} +8 -8
  52. package/dist/chunk-373OC54J.js.map +1 -0
  53. package/dist/{chunk-US2U7ECW.js → chunk-3IP3DZ7G.js} +234 -101
  54. package/dist/chunk-3IP3DZ7G.js.map +1 -0
  55. package/dist/{chunk-SXNQRJQD.js → chunk-4AT6YQKZ.js} +26 -20
  56. package/dist/chunk-4AT6YQKZ.js.map +1 -0
  57. package/dist/{chunk-C36YRTZ6.js → chunk-4CMIJQF6.js} +117 -114
  58. package/dist/chunk-4CMIJQF6.js.map +1 -0
  59. package/dist/chunk-53SZJCBX.cjs +4888 -0
  60. package/dist/chunk-53SZJCBX.cjs.map +1 -0
  61. package/dist/chunk-55VPMN3N.js +250 -0
  62. package/dist/chunk-55VPMN3N.js.map +1 -0
  63. package/dist/{chunk-QM5SRDJX.js → chunk-5PTZG26U.js} +66 -84
  64. package/dist/chunk-5PTZG26U.js.map +1 -0
  65. package/dist/{chunk-U3XOLEPX.js → chunk-5UQ5TB6J.js} +6 -32
  66. package/dist/chunk-5UQ5TB6J.js.map +1 -0
  67. package/dist/{chunk-O2BJW7YA.js → chunk-67LM2UCT.js} +9 -9
  68. package/dist/chunk-67LM2UCT.js.map +1 -0
  69. package/dist/{chunk-YC6PJEPH.cjs → chunk-6CG7IY57.cjs} +266 -133
  70. package/dist/chunk-6CG7IY57.cjs.map +1 -0
  71. package/dist/chunk-6PMMP3FR.js +7 -0
  72. package/dist/chunk-6PMMP3FR.js.map +1 -0
  73. package/dist/{chunk-DZUJEN5N.cjs → chunk-6SZKM6EC.cjs} +10 -3
  74. package/dist/{chunk-DZUJEN5N.cjs.map → chunk-6SZKM6EC.cjs.map} +1 -1
  75. package/dist/{chunk-5Q6WAYEY.cjs → chunk-72E3YF6A.cjs} +35 -49
  76. package/dist/chunk-72E3YF6A.cjs.map +1 -0
  77. package/dist/{chunk-5WRI5ZAA.js → chunk-7D4SUZUM.js} +10 -4
  78. package/dist/{chunk-5WRI5ZAA.js.map → chunk-7D4SUZUM.js.map} +1 -1
  79. package/dist/{chunk-7P6BNIJH.js → chunk-AYBJ5GAD.js} +281 -35
  80. package/dist/chunk-AYBJ5GAD.js.map +1 -0
  81. package/dist/chunk-D22XABFZ.js +79 -0
  82. package/dist/chunk-D22XABFZ.js.map +1 -0
  83. package/dist/{chunk-SCUWP4II.cjs → chunk-DBW6S25C.cjs} +47 -74
  84. package/dist/chunk-DBW6S25C.cjs.map +1 -0
  85. package/dist/{chunk-MRFUISXC.cjs → chunk-EGHGFLL3.cjs} +2631 -179
  86. package/dist/chunk-EGHGFLL3.cjs.map +1 -0
  87. package/dist/{chunk-BJXKH4LG.cjs → chunk-ETWAR2YE.cjs} +43 -78
  88. package/dist/chunk-ETWAR2YE.cjs.map +1 -0
  89. package/dist/{chunk-CZEJQSWB.cjs → chunk-F75EQ574.cjs} +65 -6
  90. package/dist/chunk-F75EQ574.cjs.map +1 -0
  91. package/dist/{chunk-BUKY6CTR.cjs → chunk-FPDJ4XN6.cjs} +282 -36
  92. package/dist/chunk-FPDJ4XN6.cjs.map +1 -0
  93. package/dist/chunk-FST2G2FQ.cjs +84 -0
  94. package/dist/chunk-FST2G2FQ.cjs.map +1 -0
  95. package/dist/chunk-FVQTJUBD.cjs +2120 -0
  96. package/dist/chunk-FVQTJUBD.cjs.map +1 -0
  97. package/dist/chunk-G6E6V2Z4.js +2070 -0
  98. package/dist/chunk-G6E6V2Z4.js.map +1 -0
  99. package/dist/{chunk-JIGDJK2O.js → chunk-GBQXIVL6.js} +4 -39
  100. package/dist/chunk-GBQXIVL6.js.map +1 -0
  101. package/dist/{chunk-F2GAJSBI.js → chunk-GELVUDUY.js} +11 -8
  102. package/dist/chunk-GELVUDUY.js.map +1 -0
  103. package/dist/chunk-GVAPYQRO.cjs +252 -0
  104. package/dist/chunk-GVAPYQRO.cjs.map +1 -0
  105. package/dist/{chunk-TWH4PTDG.cjs → chunk-HWMMIRIF.cjs} +32 -27
  106. package/dist/chunk-HWMMIRIF.cjs.map +1 -0
  107. package/dist/{chunk-52RSUALV.cjs → chunk-JAGQZZ43.cjs} +1660 -1196
  108. package/dist/chunk-JAGQZZ43.cjs.map +1 -0
  109. package/dist/{chunk-PK2A5WBG.js → chunk-K66U47VL.js} +54 -7
  110. package/dist/chunk-K66U47VL.js.map +1 -0
  111. package/dist/chunk-L3NKIMF5.cjs +10 -0
  112. package/dist/chunk-L3NKIMF5.cjs.map +1 -0
  113. package/dist/chunk-L4JCRWDY.cjs +252 -0
  114. package/dist/chunk-L4JCRWDY.cjs.map +1 -0
  115. package/dist/{chunk-IVV5TOMD.js → chunk-LDXKZYOV.js} +31 -11
  116. package/dist/chunk-LDXKZYOV.js.map +1 -0
  117. package/dist/chunk-NESKUIRE.cjs +4586 -0
  118. package/dist/chunk-NESKUIRE.cjs.map +1 -0
  119. package/dist/{chunk-SVLMF4UZ.cjs → chunk-NIOEY3N3.cjs} +66 -85
  120. package/dist/chunk-NIOEY3N3.cjs.map +1 -0
  121. package/dist/{chunk-PG5H6QIO.cjs → chunk-O3ULBGV6.cjs} +40 -20
  122. package/dist/chunk-O3ULBGV6.cjs.map +1 -0
  123. package/dist/{chunk-WTSZBHIZ.cjs → chunk-O5BQBZEF.cjs} +28 -28
  124. package/dist/chunk-O5BQBZEF.cjs.map +1 -0
  125. package/dist/{chunk-4JKEUSCC.cjs → chunk-OOUFPYSX.cjs} +25 -22
  126. package/dist/chunk-OOUFPYSX.cjs.map +1 -0
  127. package/dist/chunk-QDVYP2T7.js +4883 -0
  128. package/dist/chunk-QDVYP2T7.js.map +1 -0
  129. package/dist/{chunk-2ULLRN4Y.js → chunk-QF4MHFSU.js} +1294 -834
  130. package/dist/chunk-QF4MHFSU.js.map +1 -0
  131. package/dist/{chunk-Z57R5WS4.js → chunk-SLBWA2F3.js} +4 -4
  132. package/dist/{chunk-Z57R5WS4.js.map → chunk-SLBWA2F3.js.map} +1 -1
  133. package/dist/chunk-ST7NBF4H.cjs +84 -0
  134. package/dist/chunk-ST7NBF4H.cjs.map +1 -0
  135. package/dist/{chunk-YWMMBIOM.cjs → chunk-TDM43G4I.cjs} +15 -15
  136. package/dist/{chunk-YWMMBIOM.cjs.map → chunk-TDM43G4I.cjs.map} +1 -1
  137. package/dist/{chunk-S73Z3PBJ.cjs → chunk-TRUNX3AX.cjs} +138 -134
  138. package/dist/chunk-TRUNX3AX.cjs.map +1 -0
  139. package/dist/chunk-VE6HQ7H6.js +250 -0
  140. package/dist/chunk-VE6HQ7H6.js.map +1 -0
  141. package/dist/{chunk-OEIVMCWX.js → chunk-VZJOEGQA.js} +2536 -84
  142. package/dist/chunk-VZJOEGQA.js.map +1 -0
  143. package/dist/{chunk-JJ5O45LH.js → chunk-YPLZDWG7.js} +32 -27
  144. package/dist/chunk-YPLZDWG7.js.map +1 -0
  145. package/dist/{chunk-MGCGWPQJ.cjs → chunk-Z55SJVEC.cjs} +8 -8
  146. package/dist/chunk-Z55SJVEC.cjs.map +1 -0
  147. package/dist/error/index.cjs +6 -6
  148. package/dist/error/index.d.ts +26 -20
  149. package/dist/error/index.d.ts.map +1 -1
  150. package/dist/error/index.js +1 -1
  151. package/dist/error/utils.d.ts +19 -5
  152. package/dist/error/utils.d.ts.map +1 -1
  153. package/dist/evals/index.cjs +4 -4
  154. package/dist/evals/index.js +1 -1
  155. package/dist/evals/run/index.d.ts +1 -1
  156. package/dist/evals/run/index.d.ts.map +1 -1
  157. package/dist/evals/scoreTraces/index.cjs +8 -8
  158. package/dist/evals/scoreTraces/index.js +2 -2
  159. package/dist/evals/types.d.ts +1 -1
  160. package/dist/events/event-emitter.d.ts +6 -1
  161. package/dist/events/event-emitter.d.ts.map +1 -1
  162. package/dist/index.cjs +2 -2
  163. package/dist/index.js +1 -1
  164. package/dist/integration/index.cjs +2 -2
  165. package/dist/integration/index.js +1 -1
  166. package/dist/llm/index.cjs +15 -15
  167. package/dist/llm/index.d.ts +2 -2
  168. package/dist/llm/index.d.ts.map +1 -1
  169. package/dist/llm/index.js +5 -5
  170. package/dist/llm/model/aisdk/generate-to-stream.d.ts +20 -0
  171. package/dist/llm/model/aisdk/generate-to-stream.d.ts.map +1 -0
  172. package/dist/llm/model/aisdk/v5/model.d.ts +5 -1
  173. package/dist/llm/model/aisdk/v5/model.d.ts.map +1 -1
  174. package/dist/llm/model/aisdk/v6/model.d.ts +51 -0
  175. package/dist/llm/model/aisdk/v6/model.d.ts.map +1 -0
  176. package/dist/llm/model/base.types.d.ts +2 -2
  177. package/dist/llm/model/model.d.ts +1 -1
  178. package/dist/llm/model/model.d.ts.map +1 -1
  179. package/dist/llm/model/model.loop.d.ts +3 -3
  180. package/dist/llm/model/model.loop.d.ts.map +1 -1
  181. package/dist/llm/model/model.loop.types.d.ts +1 -1
  182. package/dist/llm/model/model.loop.types.d.ts.map +1 -1
  183. package/dist/llm/model/provider-types.generated.d.ts +135 -11
  184. package/dist/llm/model/resolve-model.d.ts +2 -2
  185. package/dist/llm/model/resolve-model.d.ts.map +1 -1
  186. package/dist/llm/model/shared.types.d.ts +19 -8
  187. package/dist/llm/model/shared.types.d.ts.map +1 -1
  188. package/dist/loop/index.cjs +2 -2
  189. package/dist/loop/index.js +1 -1
  190. package/dist/loop/loop.d.ts +2 -2
  191. package/dist/loop/loop.d.ts.map +1 -1
  192. package/dist/loop/network/index.d.ts +2 -2
  193. package/dist/loop/network/index.d.ts.map +1 -1
  194. package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts +2 -2
  195. package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts.map +1 -1
  196. package/dist/loop/test-utils/MastraLanguageModelV3Mock.d.ts +37 -0
  197. package/dist/loop/test-utils/MastraLanguageModelV3Mock.d.ts.map +1 -0
  198. package/dist/loop/test-utils/fullStream.d.ts +2 -1
  199. package/dist/loop/test-utils/fullStream.d.ts.map +1 -1
  200. package/dist/loop/test-utils/options.d.ts.map +1 -1
  201. package/dist/loop/test-utils/resultObject.d.ts +2 -1
  202. package/dist/loop/test-utils/resultObject.d.ts.map +1 -1
  203. package/dist/loop/test-utils/streamObject.d.ts +1 -1
  204. package/dist/loop/test-utils/streamObject.d.ts.map +1 -1
  205. package/dist/loop/test-utils/tools.d.ts.map +1 -1
  206. package/dist/loop/test-utils/utils-v3.d.ts +55 -0
  207. package/dist/loop/test-utils/utils-v3.d.ts.map +1 -0
  208. package/dist/loop/types.d.ts +8 -7
  209. package/dist/loop/types.d.ts.map +1 -1
  210. package/dist/loop/workflows/agentic-execution/index.d.ts +49 -49
  211. package/dist/loop/workflows/agentic-execution/index.d.ts.map +1 -1
  212. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts +34 -34
  213. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts.map +1 -1
  214. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts +17 -17
  215. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts.map +1 -1
  216. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts +22 -21
  217. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts.map +1 -1
  218. package/dist/loop/workflows/agentic-loop/index.d.ts +49 -49
  219. package/dist/loop/workflows/agentic-loop/index.d.ts.map +1 -1
  220. package/dist/loop/workflows/run-state.d.ts +2 -2
  221. package/dist/loop/workflows/run-state.d.ts.map +1 -1
  222. package/dist/loop/workflows/schema.d.ts +18 -18
  223. package/dist/loop/workflows/schema.d.ts.map +1 -1
  224. package/dist/loop/workflows/stream.d.ts +2 -2
  225. package/dist/loop/workflows/stream.d.ts.map +1 -1
  226. package/dist/mastra/index.cjs +2 -2
  227. package/dist/mastra/index.js +1 -1
  228. package/dist/mcp/index.cjs +4 -4
  229. package/dist/mcp/index.js +1 -1
  230. package/dist/memory/index.cjs +6 -6
  231. package/dist/memory/index.js +1 -1
  232. package/dist/memory/memory.d.ts +1 -1
  233. package/dist/memory/types.d.ts +3 -3
  234. package/dist/memory/types.d.ts.map +1 -1
  235. package/dist/models-dev-E3WWI7VA.js +3 -0
  236. package/dist/{models-dev-23RN2WHG.js.map → models-dev-E3WWI7VA.js.map} +1 -1
  237. package/dist/models-dev-PPS7X4JM.cjs +12 -0
  238. package/dist/{models-dev-EO3SUIY2.cjs.map → models-dev-PPS7X4JM.cjs.map} +1 -1
  239. package/dist/netlify-TY656UYF.js +3 -0
  240. package/dist/{netlify-GXJ5D5DD.js.map → netlify-TY656UYF.js.map} +1 -1
  241. package/dist/netlify-VZFM5UH3.cjs +12 -0
  242. package/dist/{netlify-KJLY3GFS.cjs.map → netlify-VZFM5UH3.cjs.map} +1 -1
  243. package/dist/processors/index.cjs +37 -37
  244. package/dist/processors/index.d.ts +9 -9
  245. package/dist/processors/index.d.ts.map +1 -1
  246. package/dist/processors/index.js +1 -1
  247. package/dist/processors/runner.d.ts.map +1 -1
  248. package/dist/processors/step-schema.d.ts +1293 -1293
  249. package/dist/processors/step-schema.d.ts.map +1 -1
  250. package/dist/provider-registry-NXVD764B.js +3 -0
  251. package/dist/{provider-registry-F67Y6OF2.js.map → provider-registry-NXVD764B.js.map} +1 -1
  252. package/dist/provider-registry-ZIWSEUQE.cjs +40 -0
  253. package/dist/{provider-registry-3TG2KUD2.cjs.map → provider-registry-ZIWSEUQE.cjs.map} +1 -1
  254. package/dist/provider-registry.json +276 -30
  255. package/dist/{registry-generator-UMTNPBJX.js → registry-generator-AVQXI3GX.js} +2 -2
  256. package/dist/{registry-generator-UMTNPBJX.js.map → registry-generator-AVQXI3GX.js.map} +1 -1
  257. package/dist/{registry-generator-34SC4TAU.cjs → registry-generator-KOFNIIWJ.cjs} +2 -2
  258. package/dist/{registry-generator-34SC4TAU.cjs.map → registry-generator-KOFNIIWJ.cjs.map} +1 -1
  259. package/dist/relevance/index.cjs +3 -3
  260. package/dist/relevance/index.cjs.map +1 -1
  261. package/dist/relevance/index.js +2 -2
  262. package/dist/relevance/index.js.map +1 -1
  263. package/dist/server/index.cjs +5 -5
  264. package/dist/server/index.js +1 -1
  265. package/dist/storage/base.d.ts +2 -10
  266. package/dist/storage/base.d.ts.map +1 -1
  267. package/dist/storage/domains/workflows/base.d.ts +2 -8
  268. package/dist/storage/domains/workflows/base.d.ts.map +1 -1
  269. package/dist/storage/domains/workflows/inmemory.d.ts +2 -8
  270. package/dist/storage/domains/workflows/inmemory.d.ts.map +1 -1
  271. package/dist/storage/index.cjs +38 -38
  272. package/dist/storage/index.js +1 -1
  273. package/dist/storage/mock.d.ts +2 -8
  274. package/dist/storage/mock.d.ts.map +1 -1
  275. package/dist/storage/types.d.ts +9 -1
  276. package/dist/storage/types.d.ts.map +1 -1
  277. package/dist/stream/RunOutput.d.ts +1 -1
  278. package/dist/stream/aisdk/v4/input.d.ts +1 -1
  279. package/dist/stream/aisdk/v5/compat/content.d.ts +1 -1
  280. package/dist/stream/aisdk/v5/compat/content.d.ts.map +1 -1
  281. package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts +1 -1
  282. package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts.map +1 -1
  283. package/dist/stream/aisdk/v5/compat/ui-message.d.ts +1 -1
  284. package/dist/stream/aisdk/v5/compat/ui-message.d.ts.map +1 -1
  285. package/dist/stream/aisdk/v5/compat/validation.d.ts +1 -1
  286. package/dist/stream/aisdk/v5/compat/validation.d.ts.map +1 -1
  287. package/dist/stream/aisdk/v5/execute.d.ts +6 -6
  288. package/dist/stream/aisdk/v5/execute.d.ts.map +1 -1
  289. package/dist/stream/aisdk/v5/input.d.ts +1 -1
  290. package/dist/stream/aisdk/v5/input.d.ts.map +1 -1
  291. package/dist/stream/aisdk/v5/output-helpers.d.ts +12 -27
  292. package/dist/stream/aisdk/v5/output-helpers.d.ts.map +1 -1
  293. package/dist/stream/aisdk/v5/output.d.ts +41 -91
  294. package/dist/stream/aisdk/v5/output.d.ts.map +1 -1
  295. package/dist/stream/aisdk/v5/transform.d.ts +1 -1
  296. package/dist/stream/aisdk/v5/transform.d.ts.map +1 -1
  297. package/dist/stream/base/input.d.ts +1 -1
  298. package/dist/stream/base/output.d.ts +16 -36
  299. package/dist/stream/base/output.d.ts.map +1 -1
  300. package/dist/stream/base/schema.d.ts +2 -2
  301. package/dist/stream/base/schema.d.ts.map +1 -1
  302. package/dist/stream/index.cjs +12 -12
  303. package/dist/stream/index.js +2 -2
  304. package/dist/stream/types.d.ts +32 -23
  305. package/dist/stream/types.d.ts.map +1 -1
  306. package/dist/test-utils/llm-mock.cjs +14587 -14
  307. package/dist/test-utils/llm-mock.cjs.map +1 -1
  308. package/dist/test-utils/llm-mock.d.ts +3 -3
  309. package/dist/test-utils/llm-mock.d.ts.map +1 -1
  310. package/dist/test-utils/llm-mock.js +14577 -4
  311. package/dist/test-utils/llm-mock.js.map +1 -1
  312. package/dist/token-6GSAFR2W-LTZ7QQUP.js +61 -0
  313. package/dist/token-6GSAFR2W-LTZ7QQUP.js.map +1 -0
  314. package/dist/token-6GSAFR2W-SGVIXFCP.cjs +63 -0
  315. package/dist/token-6GSAFR2W-SGVIXFCP.cjs.map +1 -0
  316. package/dist/token-6GSAFR2W-SPYPLMBM.js +61 -0
  317. package/dist/token-6GSAFR2W-SPYPLMBM.js.map +1 -0
  318. package/dist/token-6GSAFR2W-UEEINYAN.cjs +63 -0
  319. package/dist/token-6GSAFR2W-UEEINYAN.cjs.map +1 -0
  320. package/dist/token-util-NEHG7TUY-7GMW5FXI.cjs +10 -0
  321. package/dist/token-util-NEHG7TUY-7GMW5FXI.cjs.map +1 -0
  322. package/dist/token-util-NEHG7TUY-JRJTGTAB.js +8 -0
  323. package/dist/token-util-NEHG7TUY-JRJTGTAB.js.map +1 -0
  324. package/dist/token-util-NEHG7TUY-QTFZ26EN.js +8 -0
  325. package/dist/token-util-NEHG7TUY-QTFZ26EN.js.map +1 -0
  326. package/dist/token-util-NEHG7TUY-WZL2DNCG.cjs +10 -0
  327. package/dist/token-util-NEHG7TUY-WZL2DNCG.cjs.map +1 -0
  328. package/dist/tools/index.cjs +4 -4
  329. package/dist/tools/index.js +1 -1
  330. package/dist/tools/is-vercel-tool.cjs +2 -2
  331. package/dist/tools/is-vercel-tool.js +1 -1
  332. package/dist/tools/tool-builder/builder.d.ts +2 -1
  333. package/dist/tools/tool-builder/builder.d.ts.map +1 -1
  334. package/dist/tools/tool.d.ts.map +1 -1
  335. package/dist/tools/types.d.ts +5 -5
  336. package/dist/tools/types.d.ts.map +1 -1
  337. package/dist/utils.cjs +22 -22
  338. package/dist/utils.d.ts +5 -5
  339. package/dist/utils.d.ts.map +1 -1
  340. package/dist/utils.js +1 -1
  341. package/dist/vector/embed.d.ts +3 -2
  342. package/dist/vector/embed.d.ts.map +1 -1
  343. package/dist/vector/index.cjs +5316 -16
  344. package/dist/vector/index.cjs.map +1 -1
  345. package/dist/vector/index.js +5282 -4
  346. package/dist/vector/index.js.map +1 -1
  347. package/dist/vector/vector.d.ts +15 -2
  348. package/dist/vector/vector.d.ts.map +1 -1
  349. package/dist/voice/aisdk/speech.d.ts +1 -1
  350. package/dist/voice/aisdk/speech.d.ts.map +1 -1
  351. package/dist/voice/aisdk/transcription.d.ts +1 -1
  352. package/dist/voice/aisdk/transcription.d.ts.map +1 -1
  353. package/dist/voice/composite-voice.d.ts +1 -1
  354. package/dist/voice/composite-voice.d.ts.map +1 -1
  355. package/dist/voice/index.cjs +6 -6
  356. package/dist/voice/index.js +1 -1
  357. package/dist/workflows/constants.cjs +4 -4
  358. package/dist/workflows/constants.d.ts +1 -1
  359. package/dist/workflows/constants.d.ts.map +1 -1
  360. package/dist/workflows/constants.js +1 -1
  361. package/dist/workflows/default.d.ts +9 -16
  362. package/dist/workflows/default.d.ts.map +1 -1
  363. package/dist/workflows/evented/execution-engine.d.ts +3 -2
  364. package/dist/workflows/evented/execution-engine.d.ts.map +1 -1
  365. package/dist/workflows/evented/index.cjs +10 -10
  366. package/dist/workflows/evented/index.js +1 -1
  367. package/dist/workflows/evented/step-executor.d.ts +5 -1
  368. package/dist/workflows/evented/step-executor.d.ts.map +1 -1
  369. package/dist/workflows/evented/workflow-event-processor/index.d.ts +16 -1
  370. package/dist/workflows/evented/workflow-event-processor/index.d.ts.map +1 -1
  371. package/dist/workflows/evented/workflow.d.ts +20 -0
  372. package/dist/workflows/evented/workflow.d.ts.map +1 -1
  373. package/dist/workflows/execution-engine.d.ts +25 -2
  374. package/dist/workflows/execution-engine.d.ts.map +1 -1
  375. package/dist/workflows/handlers/control-flow.d.ts +6 -5
  376. package/dist/workflows/handlers/control-flow.d.ts.map +1 -1
  377. package/dist/workflows/handlers/entry.d.ts +5 -3
  378. package/dist/workflows/handlers/entry.d.ts.map +1 -1
  379. package/dist/workflows/handlers/sleep.d.ts +4 -3
  380. package/dist/workflows/handlers/sleep.d.ts.map +1 -1
  381. package/dist/workflows/handlers/step.d.ts +5 -3
  382. package/dist/workflows/handlers/step.d.ts.map +1 -1
  383. package/dist/workflows/index.cjs +26 -22
  384. package/dist/workflows/index.js +1 -1
  385. package/dist/workflows/step.d.ts +5 -4
  386. package/dist/workflows/step.d.ts.map +1 -1
  387. package/dist/workflows/types.d.ts +66 -14
  388. package/dist/workflows/types.d.ts.map +1 -1
  389. package/dist/workflows/utils.d.ts +11 -0
  390. package/dist/workflows/utils.d.ts.map +1 -1
  391. package/dist/workflows/workflow.d.ts +30 -9
  392. package/dist/workflows/workflow.d.ts.map +1 -1
  393. package/package.json +13 -14
  394. package/src/llm/model/provider-types.generated.d.ts +135 -11
  395. package/dist/agent/__tests__/mock-model.d.ts +0 -8
  396. package/dist/agent/__tests__/mock-model.d.ts.map +0 -1
  397. package/dist/agent/agent-types.test-d.d.ts +0 -2
  398. package/dist/agent/agent-types.test-d.d.ts.map +0 -1
  399. package/dist/ai-sdk.types.d.ts +0 -4705
  400. package/dist/chunk-2ULLRN4Y.js.map +0 -1
  401. package/dist/chunk-3E3ILV6T.cjs +0 -518
  402. package/dist/chunk-3E3ILV6T.cjs.map +0 -1
  403. package/dist/chunk-4JKEUSCC.cjs.map +0 -1
  404. package/dist/chunk-52RSUALV.cjs.map +0 -1
  405. package/dist/chunk-5PAEYE3Q.js +0 -513
  406. package/dist/chunk-5PAEYE3Q.js.map +0 -1
  407. package/dist/chunk-5Q6WAYEY.cjs.map +0 -1
  408. package/dist/chunk-7P6BNIJH.js.map +0 -1
  409. package/dist/chunk-ABJOUEVA.cjs +0 -10
  410. package/dist/chunk-ABJOUEVA.cjs.map +0 -1
  411. package/dist/chunk-BJXKH4LG.cjs.map +0 -1
  412. package/dist/chunk-BUKY6CTR.cjs.map +0 -1
  413. package/dist/chunk-C36YRTZ6.js.map +0 -1
  414. package/dist/chunk-CZEJQSWB.cjs.map +0 -1
  415. package/dist/chunk-F2GAJSBI.js.map +0 -1
  416. package/dist/chunk-IVV5TOMD.js.map +0 -1
  417. package/dist/chunk-JIGDJK2O.js.map +0 -1
  418. package/dist/chunk-JJ5O45LH.js.map +0 -1
  419. package/dist/chunk-MGCGWPQJ.cjs.map +0 -1
  420. package/dist/chunk-MRFUISXC.cjs.map +0 -1
  421. package/dist/chunk-NLNKQD2T.js +0 -7
  422. package/dist/chunk-NLNKQD2T.js.map +0 -1
  423. package/dist/chunk-O2BJW7YA.js.map +0 -1
  424. package/dist/chunk-OEIVMCWX.js.map +0 -1
  425. package/dist/chunk-PG5H6QIO.cjs.map +0 -1
  426. package/dist/chunk-PK2A5WBG.js.map +0 -1
  427. package/dist/chunk-QM5SRDJX.js.map +0 -1
  428. package/dist/chunk-S73Z3PBJ.cjs.map +0 -1
  429. package/dist/chunk-SCUWP4II.cjs.map +0 -1
  430. package/dist/chunk-SVLMF4UZ.cjs.map +0 -1
  431. package/dist/chunk-SXNQRJQD.js.map +0 -1
  432. package/dist/chunk-THZTRBFS.js.map +0 -1
  433. package/dist/chunk-TWH4PTDG.cjs.map +0 -1
  434. package/dist/chunk-U3XOLEPX.js.map +0 -1
  435. package/dist/chunk-US2U7ECW.js.map +0 -1
  436. package/dist/chunk-WTSZBHIZ.cjs.map +0 -1
  437. package/dist/chunk-YC6PJEPH.cjs.map +0 -1
  438. package/dist/llm/model/is-v2-model.d.ts +0 -3
  439. package/dist/llm/model/is-v2-model.d.ts.map +0 -1
  440. package/dist/models-dev-23RN2WHG.js +0 -3
  441. package/dist/models-dev-EO3SUIY2.cjs +0 -12
  442. package/dist/netlify-GXJ5D5DD.js +0 -3
  443. package/dist/netlify-KJLY3GFS.cjs +0 -12
  444. package/dist/provider-registry-3TG2KUD2.cjs +0 -40
  445. package/dist/provider-registry-F67Y6OF2.js +0 -3
  446. package/dist/tools/tool-stream-types.test-d.d.ts +0 -2
  447. package/dist/tools/tool-stream-types.test-d.d.ts.map +0 -1
@@ -0,0 +1,4883 @@
1
+ import { ModelsDevGateway, parseModelRouterId } from './chunk-VZJOEGQA.js';
2
+ import { NetlifyGateway } from './chunk-SLBWA2F3.js';
3
+ import { createJsonErrorResponseHandler, lazyValidator, zodSchema, lazySchema, createProviderDefinedToolFactoryWithOutputSchema, MastraModelGateway, createOpenAICompatible, createOpenAI, createGoogleGenerativeAI, parseProviderOptions, postJsonToApi, createJsonResponseHandler, combineHeaders, generateId, createEventSourceResponseHandler, InvalidResponseDataError, isParsableJson, loadApiKey, withUserAgentSuffix, loadSetting, convertToBase64, UnsupportedFunctionalityError, TooManyEmbeddingValuesForCallError, APICallError, convertBase64ToUint8Array, mediaTypeToExtension, postFormDataToApi, createBinaryResponseHandler, InvalidPromptError, validateTypes } from './chunk-K66U47VL.js';
4
+ import { PROVIDER_REGISTRY, GatewayRegistry } from './chunk-AYBJ5GAD.js';
5
+ import { AISDKV5LanguageModel, createStreamFromGenerateResult } from './chunk-4CMIJQF6.js';
6
+ import { MastraError } from './chunk-YPLZDWG7.js';
7
+ import { RequestContext } from './chunk-6TBWJV35.js';
8
+ import { InMemoryServerCache } from './chunk-3CKZSDTQ.js';
9
+ import { createHash } from 'crypto';
10
+ import { z } from 'zod/v4';
11
+
12
+ var openaiErrorDataSchema = z.object({
13
+ error: z.object({
14
+ message: z.string(),
15
+ // The additional information below is handled loosely to support
16
+ // OpenAI-compatible providers that have slightly different error
17
+ // responses:
18
+ type: z.string().nullish(),
19
+ param: z.any().nullish(),
20
+ code: z.union([z.string(), z.number()]).nullish()
21
+ })
22
+ });
23
+ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
24
+ errorSchema: openaiErrorDataSchema,
25
+ errorToMessage: (data) => data.error.message
26
+ });
27
+ function convertToOpenAIChatMessages({
28
+ prompt,
29
+ systemMessageMode = "system"
30
+ }) {
31
+ const messages = [];
32
+ const warnings = [];
33
+ for (const { role, content } of prompt) {
34
+ switch (role) {
35
+ case "system": {
36
+ switch (systemMessageMode) {
37
+ case "system": {
38
+ messages.push({ role: "system", content });
39
+ break;
40
+ }
41
+ case "developer": {
42
+ messages.push({ role: "developer", content });
43
+ break;
44
+ }
45
+ case "remove": {
46
+ warnings.push({
47
+ type: "other",
48
+ message: "system messages are removed for this model"
49
+ });
50
+ break;
51
+ }
52
+ default: {
53
+ const _exhaustiveCheck = systemMessageMode;
54
+ throw new Error(
55
+ `Unsupported system message mode: ${_exhaustiveCheck}`
56
+ );
57
+ }
58
+ }
59
+ break;
60
+ }
61
+ case "user": {
62
+ if (content.length === 1 && content[0].type === "text") {
63
+ messages.push({ role: "user", content: content[0].text });
64
+ break;
65
+ }
66
+ messages.push({
67
+ role: "user",
68
+ content: content.map((part, index) => {
69
+ var _a, _b, _c;
70
+ switch (part.type) {
71
+ case "text": {
72
+ return { type: "text", text: part.text };
73
+ }
74
+ case "file": {
75
+ if (part.mediaType.startsWith("image/")) {
76
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
77
+ return {
78
+ type: "image_url",
79
+ image_url: {
80
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
81
+ // OpenAI specific extension: image detail
82
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
83
+ }
84
+ };
85
+ } else if (part.mediaType.startsWith("audio/")) {
86
+ if (part.data instanceof URL) {
87
+ throw new UnsupportedFunctionalityError({
88
+ functionality: "audio file parts with URLs"
89
+ });
90
+ }
91
+ switch (part.mediaType) {
92
+ case "audio/wav": {
93
+ return {
94
+ type: "input_audio",
95
+ input_audio: {
96
+ data: convertToBase64(part.data),
97
+ format: "wav"
98
+ }
99
+ };
100
+ }
101
+ case "audio/mp3":
102
+ case "audio/mpeg": {
103
+ return {
104
+ type: "input_audio",
105
+ input_audio: {
106
+ data: convertToBase64(part.data),
107
+ format: "mp3"
108
+ }
109
+ };
110
+ }
111
+ default: {
112
+ throw new UnsupportedFunctionalityError({
113
+ functionality: `audio content parts with media type ${part.mediaType}`
114
+ });
115
+ }
116
+ }
117
+ } else if (part.mediaType === "application/pdf") {
118
+ if (part.data instanceof URL) {
119
+ throw new UnsupportedFunctionalityError({
120
+ functionality: "PDF file parts with URLs"
121
+ });
122
+ }
123
+ return {
124
+ type: "file",
125
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
126
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
127
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
128
+ }
129
+ };
130
+ } else {
131
+ throw new UnsupportedFunctionalityError({
132
+ functionality: `file part media type ${part.mediaType}`
133
+ });
134
+ }
135
+ }
136
+ }
137
+ })
138
+ });
139
+ break;
140
+ }
141
+ case "assistant": {
142
+ let text = "";
143
+ const toolCalls = [];
144
+ for (const part of content) {
145
+ switch (part.type) {
146
+ case "text": {
147
+ text += part.text;
148
+ break;
149
+ }
150
+ case "tool-call": {
151
+ toolCalls.push({
152
+ id: part.toolCallId,
153
+ type: "function",
154
+ function: {
155
+ name: part.toolName,
156
+ arguments: JSON.stringify(part.input)
157
+ }
158
+ });
159
+ break;
160
+ }
161
+ }
162
+ }
163
+ messages.push({
164
+ role: "assistant",
165
+ content: text,
166
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
167
+ });
168
+ break;
169
+ }
170
+ case "tool": {
171
+ for (const toolResponse of content) {
172
+ const output = toolResponse.output;
173
+ let contentValue;
174
+ switch (output.type) {
175
+ case "text":
176
+ case "error-text":
177
+ contentValue = output.value;
178
+ break;
179
+ case "content":
180
+ case "json":
181
+ case "error-json":
182
+ contentValue = JSON.stringify(output.value);
183
+ break;
184
+ }
185
+ messages.push({
186
+ role: "tool",
187
+ tool_call_id: toolResponse.toolCallId,
188
+ content: contentValue
189
+ });
190
+ }
191
+ break;
192
+ }
193
+ default: {
194
+ const _exhaustiveCheck = role;
195
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
196
+ }
197
+ }
198
+ }
199
+ return { messages, warnings };
200
+ }
201
+ function getResponseMetadata({
202
+ id,
203
+ model,
204
+ created
205
+ }) {
206
+ return {
207
+ id: id != null ? id : void 0,
208
+ modelId: model != null ? model : void 0,
209
+ timestamp: created ? new Date(created * 1e3) : void 0
210
+ };
211
+ }
212
+ function mapOpenAIFinishReason(finishReason) {
213
+ switch (finishReason) {
214
+ case "stop":
215
+ return "stop";
216
+ case "length":
217
+ return "length";
218
+ case "content_filter":
219
+ return "content-filter";
220
+ case "function_call":
221
+ case "tool_calls":
222
+ return "tool-calls";
223
+ default:
224
+ return "unknown";
225
+ }
226
+ }
227
+ var openaiChatResponseSchema = lazyValidator(
228
+ () => zodSchema(
229
+ z.object({
230
+ id: z.string().nullish(),
231
+ created: z.number().nullish(),
232
+ model: z.string().nullish(),
233
+ choices: z.array(
234
+ z.object({
235
+ message: z.object({
236
+ role: z.literal("assistant").nullish(),
237
+ content: z.string().nullish(),
238
+ tool_calls: z.array(
239
+ z.object({
240
+ id: z.string().nullish(),
241
+ type: z.literal("function"),
242
+ function: z.object({
243
+ name: z.string(),
244
+ arguments: z.string()
245
+ })
246
+ })
247
+ ).nullish(),
248
+ annotations: z.array(
249
+ z.object({
250
+ type: z.literal("url_citation"),
251
+ start_index: z.number(),
252
+ end_index: z.number(),
253
+ url: z.string(),
254
+ title: z.string()
255
+ })
256
+ ).nullish()
257
+ }),
258
+ index: z.number(),
259
+ logprobs: z.object({
260
+ content: z.array(
261
+ z.object({
262
+ token: z.string(),
263
+ logprob: z.number(),
264
+ top_logprobs: z.array(
265
+ z.object({
266
+ token: z.string(),
267
+ logprob: z.number()
268
+ })
269
+ )
270
+ })
271
+ ).nullish()
272
+ }).nullish(),
273
+ finish_reason: z.string().nullish()
274
+ })
275
+ ),
276
+ usage: z.object({
277
+ prompt_tokens: z.number().nullish(),
278
+ completion_tokens: z.number().nullish(),
279
+ total_tokens: z.number().nullish(),
280
+ prompt_tokens_details: z.object({
281
+ cached_tokens: z.number().nullish()
282
+ }).nullish(),
283
+ completion_tokens_details: z.object({
284
+ reasoning_tokens: z.number().nullish(),
285
+ accepted_prediction_tokens: z.number().nullish(),
286
+ rejected_prediction_tokens: z.number().nullish()
287
+ }).nullish()
288
+ }).nullish()
289
+ })
290
+ )
291
+ );
292
+ var openaiChatChunkSchema = lazyValidator(
293
+ () => zodSchema(
294
+ z.union([
295
+ z.object({
296
+ id: z.string().nullish(),
297
+ created: z.number().nullish(),
298
+ model: z.string().nullish(),
299
+ choices: z.array(
300
+ z.object({
301
+ delta: z.object({
302
+ role: z.enum(["assistant"]).nullish(),
303
+ content: z.string().nullish(),
304
+ tool_calls: z.array(
305
+ z.object({
306
+ index: z.number(),
307
+ id: z.string().nullish(),
308
+ type: z.literal("function").nullish(),
309
+ function: z.object({
310
+ name: z.string().nullish(),
311
+ arguments: z.string().nullish()
312
+ })
313
+ })
314
+ ).nullish(),
315
+ annotations: z.array(
316
+ z.object({
317
+ type: z.literal("url_citation"),
318
+ start_index: z.number(),
319
+ end_index: z.number(),
320
+ url: z.string(),
321
+ title: z.string()
322
+ })
323
+ ).nullish()
324
+ }).nullish(),
325
+ logprobs: z.object({
326
+ content: z.array(
327
+ z.object({
328
+ token: z.string(),
329
+ logprob: z.number(),
330
+ top_logprobs: z.array(
331
+ z.object({
332
+ token: z.string(),
333
+ logprob: z.number()
334
+ })
335
+ )
336
+ })
337
+ ).nullish()
338
+ }).nullish(),
339
+ finish_reason: z.string().nullish(),
340
+ index: z.number()
341
+ })
342
+ ),
343
+ usage: z.object({
344
+ prompt_tokens: z.number().nullish(),
345
+ completion_tokens: z.number().nullish(),
346
+ total_tokens: z.number().nullish(),
347
+ prompt_tokens_details: z.object({
348
+ cached_tokens: z.number().nullish()
349
+ }).nullish(),
350
+ completion_tokens_details: z.object({
351
+ reasoning_tokens: z.number().nullish(),
352
+ accepted_prediction_tokens: z.number().nullish(),
353
+ rejected_prediction_tokens: z.number().nullish()
354
+ }).nullish()
355
+ }).nullish()
356
+ }),
357
+ openaiErrorDataSchema
358
+ ])
359
+ )
360
+ );
361
+ var openaiChatLanguageModelOptions = lazyValidator(
362
+ () => zodSchema(
363
+ z.object({
364
+ /**
365
+ * Modify the likelihood of specified tokens appearing in the completion.
366
+ *
367
+ * Accepts a JSON object that maps tokens (specified by their token ID in
368
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
369
+ */
370
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
371
+ /**
372
+ * Return the log probabilities of the tokens.
373
+ *
374
+ * Setting to true will return the log probabilities of the tokens that
375
+ * were generated.
376
+ *
377
+ * Setting to a number will return the log probabilities of the top n
378
+ * tokens that were generated.
379
+ */
380
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
381
+ /**
382
+ * Whether to enable parallel function calling during tool use. Default to true.
383
+ */
384
+ parallelToolCalls: z.boolean().optional(),
385
+ /**
386
+ * A unique identifier representing your end-user, which can help OpenAI to
387
+ * monitor and detect abuse.
388
+ */
389
+ user: z.string().optional(),
390
+ /**
391
+ * Reasoning effort for reasoning models. Defaults to `medium`.
392
+ */
393
+ reasoningEffort: z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
394
+ /**
395
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
396
+ */
397
+ maxCompletionTokens: z.number().optional(),
398
+ /**
399
+ * Whether to enable persistence in responses API.
400
+ */
401
+ store: z.boolean().optional(),
402
+ /**
403
+ * Metadata to associate with the request.
404
+ */
405
+ metadata: z.record(z.string().max(64), z.string().max(512)).optional(),
406
+ /**
407
+ * Parameters for prediction mode.
408
+ */
409
+ prediction: z.record(z.string(), z.any()).optional(),
410
+ /**
411
+ * Whether to use structured outputs.
412
+ *
413
+ * @default true
414
+ */
415
+ structuredOutputs: z.boolean().optional(),
416
+ /**
417
+ * Service tier for the request.
418
+ * - 'auto': Default service tier. The request will be processed with the service tier configured in the
419
+ * Project settings. Unless otherwise configured, the Project will use 'default'.
420
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
421
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
422
+ * - 'default': The request will be processed with the standard pricing and performance for the selected model.
423
+ *
424
+ * @default 'auto'
425
+ */
426
+ serviceTier: z.enum(["auto", "flex", "priority", "default"]).optional(),
427
+ /**
428
+ * Whether to use strict JSON schema validation.
429
+ *
430
+ * @default false
431
+ */
432
+ strictJsonSchema: z.boolean().optional(),
433
+ /**
434
+ * Controls the verbosity of the model's responses.
435
+ * Lower values will result in more concise responses, while higher values will result in more verbose responses.
436
+ */
437
+ textVerbosity: z.enum(["low", "medium", "high"]).optional(),
438
+ /**
439
+ * A cache key for prompt caching. Allows manual control over prompt caching behavior.
440
+ * Useful for improving cache hit rates and working around automatic caching issues.
441
+ */
442
+ promptCacheKey: z.string().optional(),
443
+ /**
444
+ * The retention policy for the prompt cache.
445
+ * - 'in_memory': Default. Standard prompt caching behavior.
446
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
447
+ * Currently only available for 5.1 series models.
448
+ *
449
+ * @default 'in_memory'
450
+ */
451
+ promptCacheRetention: z.enum(["in_memory", "24h"]).optional(),
452
+ /**
453
+ * A stable identifier used to help detect users of your application
454
+ * that may be violating OpenAI's usage policies. The IDs should be a
455
+ * string that uniquely identifies each user. We recommend hashing their
456
+ * username or email address, in order to avoid sending us any identifying
457
+ * information.
458
+ */
459
+ safetyIdentifier: z.string().optional()
460
+ })
461
+ )
462
+ );
463
+ function prepareChatTools({
464
+ tools,
465
+ toolChoice,
466
+ structuredOutputs,
467
+ strictJsonSchema
468
+ }) {
469
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
470
+ const toolWarnings = [];
471
+ if (tools == null) {
472
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
473
+ }
474
+ const openaiTools = [];
475
+ for (const tool of tools) {
476
+ switch (tool.type) {
477
+ case "function":
478
+ openaiTools.push({
479
+ type: "function",
480
+ function: {
481
+ name: tool.name,
482
+ description: tool.description,
483
+ parameters: tool.inputSchema,
484
+ strict: structuredOutputs ? strictJsonSchema : void 0
485
+ }
486
+ });
487
+ break;
488
+ default:
489
+ toolWarnings.push({ type: "unsupported-tool", tool });
490
+ break;
491
+ }
492
+ }
493
+ if (toolChoice == null) {
494
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
495
+ }
496
+ const type = toolChoice.type;
497
+ switch (type) {
498
+ case "auto":
499
+ case "none":
500
+ case "required":
501
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
502
+ case "tool":
503
+ return {
504
+ tools: openaiTools,
505
+ toolChoice: {
506
+ type: "function",
507
+ function: {
508
+ name: toolChoice.toolName
509
+ }
510
+ },
511
+ toolWarnings
512
+ };
513
+ default: {
514
+ const _exhaustiveCheck = type;
515
+ throw new UnsupportedFunctionalityError({
516
+ functionality: `tool choice type: ${_exhaustiveCheck}`
517
+ });
518
+ }
519
+ }
520
+ }
521
+ var OpenAIChatLanguageModel = class {
522
+ constructor(modelId, config) {
523
+ this.specificationVersion = "v2";
524
+ this.supportedUrls = {
525
+ "image/*": [/^https?:\/\/.*$/]
526
+ };
527
+ this.modelId = modelId;
528
+ this.config = config;
529
+ }
530
+ get provider() {
531
+ return this.config.provider;
532
+ }
533
+ async getArgs({
534
+ prompt,
535
+ maxOutputTokens,
536
+ temperature,
537
+ topP,
538
+ topK,
539
+ frequencyPenalty,
540
+ presencePenalty,
541
+ stopSequences,
542
+ responseFormat,
543
+ seed,
544
+ tools,
545
+ toolChoice,
546
+ providerOptions
547
+ }) {
548
+ var _a, _b, _c, _d;
549
+ const warnings = [];
550
+ const openaiOptions = (_a = await parseProviderOptions({
551
+ provider: "openai",
552
+ providerOptions,
553
+ schema: openaiChatLanguageModelOptions
554
+ })) != null ? _a : {};
555
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
556
+ if (topK != null) {
557
+ warnings.push({
558
+ type: "unsupported-setting",
559
+ setting: "topK"
560
+ });
561
+ }
562
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
563
+ warnings.push({
564
+ type: "unsupported-setting",
565
+ setting: "responseFormat",
566
+ details: "JSON response format schema is only supported with structuredOutputs"
567
+ });
568
+ }
569
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
570
+ {
571
+ prompt,
572
+ systemMessageMode: getSystemMessageMode(this.modelId)
573
+ }
574
+ );
575
+ warnings.push(...messageWarnings);
576
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
577
+ const baseArgs = {
578
+ // model id:
579
+ model: this.modelId,
580
+ // model specific settings:
581
+ logit_bias: openaiOptions.logitBias,
582
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
583
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
584
+ user: openaiOptions.user,
585
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
586
+ // standardized settings:
587
+ max_tokens: maxOutputTokens,
588
+ temperature,
589
+ top_p: topP,
590
+ frequency_penalty: frequencyPenalty,
591
+ presence_penalty: presencePenalty,
592
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
593
+ type: "json_schema",
594
+ json_schema: {
595
+ schema: responseFormat.schema,
596
+ strict: strictJsonSchema,
597
+ name: (_d = responseFormat.name) != null ? _d : "response",
598
+ description: responseFormat.description
599
+ }
600
+ } : { type: "json_object" } : void 0,
601
+ stop: stopSequences,
602
+ seed,
603
+ verbosity: openaiOptions.textVerbosity,
604
+ // openai specific settings:
605
+ // TODO AI SDK 6: remove, we auto-map maxOutputTokens now
606
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
607
+ store: openaiOptions.store,
608
+ metadata: openaiOptions.metadata,
609
+ prediction: openaiOptions.prediction,
610
+ reasoning_effort: openaiOptions.reasoningEffort,
611
+ service_tier: openaiOptions.serviceTier,
612
+ prompt_cache_key: openaiOptions.promptCacheKey,
613
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
614
+ safety_identifier: openaiOptions.safetyIdentifier,
615
+ // messages:
616
+ messages
617
+ };
618
+ if (isReasoningModel(this.modelId)) {
619
+ if (baseArgs.temperature != null) {
620
+ baseArgs.temperature = void 0;
621
+ warnings.push({
622
+ type: "unsupported-setting",
623
+ setting: "temperature",
624
+ details: "temperature is not supported for reasoning models"
625
+ });
626
+ }
627
+ if (baseArgs.top_p != null) {
628
+ baseArgs.top_p = void 0;
629
+ warnings.push({
630
+ type: "unsupported-setting",
631
+ setting: "topP",
632
+ details: "topP is not supported for reasoning models"
633
+ });
634
+ }
635
+ if (baseArgs.frequency_penalty != null) {
636
+ baseArgs.frequency_penalty = void 0;
637
+ warnings.push({
638
+ type: "unsupported-setting",
639
+ setting: "frequencyPenalty",
640
+ details: "frequencyPenalty is not supported for reasoning models"
641
+ });
642
+ }
643
+ if (baseArgs.presence_penalty != null) {
644
+ baseArgs.presence_penalty = void 0;
645
+ warnings.push({
646
+ type: "unsupported-setting",
647
+ setting: "presencePenalty",
648
+ details: "presencePenalty is not supported for reasoning models"
649
+ });
650
+ }
651
+ if (baseArgs.logit_bias != null) {
652
+ baseArgs.logit_bias = void 0;
653
+ warnings.push({
654
+ type: "other",
655
+ message: "logitBias is not supported for reasoning models"
656
+ });
657
+ }
658
+ if (baseArgs.logprobs != null) {
659
+ baseArgs.logprobs = void 0;
660
+ warnings.push({
661
+ type: "other",
662
+ message: "logprobs is not supported for reasoning models"
663
+ });
664
+ }
665
+ if (baseArgs.top_logprobs != null) {
666
+ baseArgs.top_logprobs = void 0;
667
+ warnings.push({
668
+ type: "other",
669
+ message: "topLogprobs is not supported for reasoning models"
670
+ });
671
+ }
672
+ if (baseArgs.max_tokens != null) {
673
+ if (baseArgs.max_completion_tokens == null) {
674
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
675
+ }
676
+ baseArgs.max_tokens = void 0;
677
+ }
678
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
679
+ if (baseArgs.temperature != null) {
680
+ baseArgs.temperature = void 0;
681
+ warnings.push({
682
+ type: "unsupported-setting",
683
+ setting: "temperature",
684
+ details: "temperature is not supported for the search preview models and has been removed."
685
+ });
686
+ }
687
+ }
688
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
689
+ warnings.push({
690
+ type: "unsupported-setting",
691
+ setting: "serviceTier",
692
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
693
+ });
694
+ baseArgs.service_tier = void 0;
695
+ }
696
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
697
+ warnings.push({
698
+ type: "unsupported-setting",
699
+ setting: "serviceTier",
700
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
701
+ });
702
+ baseArgs.service_tier = void 0;
703
+ }
704
+ const {
705
+ tools: openaiTools,
706
+ toolChoice: openaiToolChoice,
707
+ toolWarnings
708
+ } = prepareChatTools({
709
+ tools,
710
+ toolChoice,
711
+ structuredOutputs,
712
+ strictJsonSchema
713
+ });
714
+ return {
715
+ args: {
716
+ ...baseArgs,
717
+ tools: openaiTools,
718
+ tool_choice: openaiToolChoice
719
+ },
720
+ warnings: [...warnings, ...toolWarnings]
721
+ };
722
+ }
723
+ async doGenerate(options) {
724
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
725
+ const { args: body, warnings } = await this.getArgs(options);
726
+ const {
727
+ responseHeaders,
728
+ value: response,
729
+ rawValue: rawResponse
730
+ } = await postJsonToApi({
731
+ url: this.config.url({
732
+ path: "/chat/completions",
733
+ modelId: this.modelId
734
+ }),
735
+ headers: combineHeaders(this.config.headers(), options.headers),
736
+ body,
737
+ failedResponseHandler: openaiFailedResponseHandler,
738
+ successfulResponseHandler: createJsonResponseHandler(
739
+ openaiChatResponseSchema
740
+ ),
741
+ abortSignal: options.abortSignal,
742
+ fetch: this.config.fetch
743
+ });
744
+ const choice = response.choices[0];
745
+ const content = [];
746
+ const text = choice.message.content;
747
+ if (text != null && text.length > 0) {
748
+ content.push({ type: "text", text });
749
+ }
750
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
751
+ content.push({
752
+ type: "tool-call",
753
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
754
+ toolName: toolCall.function.name,
755
+ input: toolCall.function.arguments
756
+ });
757
+ }
758
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
759
+ content.push({
760
+ type: "source",
761
+ sourceType: "url",
762
+ id: generateId(),
763
+ url: annotation.url,
764
+ title: annotation.title
765
+ });
766
+ }
767
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
768
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
769
+ const providerMetadata = { openai: {} };
770
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
771
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
772
+ }
773
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
774
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
775
+ }
776
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
777
+ providerMetadata.openai.logprobs = choice.logprobs.content;
778
+ }
779
+ return {
780
+ content,
781
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
782
+ usage: {
783
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
784
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
785
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
786
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
787
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
788
+ },
789
+ request: { body },
790
+ response: {
791
+ ...getResponseMetadata(response),
792
+ headers: responseHeaders,
793
+ body: rawResponse
794
+ },
795
+ warnings,
796
+ providerMetadata
797
+ };
798
+ }
799
+ async doStream(options) {
800
+ const { args, warnings } = await this.getArgs(options);
801
+ const body = {
802
+ ...args,
803
+ stream: true,
804
+ stream_options: {
805
+ include_usage: true
806
+ }
807
+ };
808
+ const { responseHeaders, value: response } = await postJsonToApi({
809
+ url: this.config.url({
810
+ path: "/chat/completions",
811
+ modelId: this.modelId
812
+ }),
813
+ headers: combineHeaders(this.config.headers(), options.headers),
814
+ body,
815
+ failedResponseHandler: openaiFailedResponseHandler,
816
+ successfulResponseHandler: createEventSourceResponseHandler(
817
+ openaiChatChunkSchema
818
+ ),
819
+ abortSignal: options.abortSignal,
820
+ fetch: this.config.fetch
821
+ });
822
+ const toolCalls = [];
823
+ let finishReason = "unknown";
824
+ const usage = {
825
+ inputTokens: void 0,
826
+ outputTokens: void 0,
827
+ totalTokens: void 0
828
+ };
829
+ let metadataExtracted = false;
830
+ let isActiveText = false;
831
+ const providerMetadata = { openai: {} };
832
+ return {
833
+ stream: response.pipeThrough(
834
+ new TransformStream({
835
+ start(controller) {
836
+ controller.enqueue({ type: "stream-start", warnings });
837
+ },
838
+ transform(chunk, controller) {
839
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
840
+ if (options.includeRawChunks) {
841
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
842
+ }
843
+ if (!chunk.success) {
844
+ finishReason = "error";
845
+ controller.enqueue({ type: "error", error: chunk.error });
846
+ return;
847
+ }
848
+ const value = chunk.value;
849
+ if ("error" in value) {
850
+ finishReason = "error";
851
+ controller.enqueue({ type: "error", error: value.error });
852
+ return;
853
+ }
854
+ if (!metadataExtracted) {
855
+ const metadata = getResponseMetadata(value);
856
+ if (Object.values(metadata).some(Boolean)) {
857
+ metadataExtracted = true;
858
+ controller.enqueue({
859
+ type: "response-metadata",
860
+ ...getResponseMetadata(value)
861
+ });
862
+ }
863
+ }
864
+ if (value.usage != null) {
865
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
866
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
867
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
868
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
869
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
870
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
871
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
872
+ }
873
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
874
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
875
+ }
876
+ }
877
+ const choice = value.choices[0];
878
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
879
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
880
+ }
881
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
882
+ providerMetadata.openai.logprobs = choice.logprobs.content;
883
+ }
884
+ if ((choice == null ? void 0 : choice.delta) == null) {
885
+ return;
886
+ }
887
+ const delta = choice.delta;
888
+ if (delta.content != null) {
889
+ if (!isActiveText) {
890
+ controller.enqueue({ type: "text-start", id: "0" });
891
+ isActiveText = true;
892
+ }
893
+ controller.enqueue({
894
+ type: "text-delta",
895
+ id: "0",
896
+ delta: delta.content
897
+ });
898
+ }
899
+ if (delta.tool_calls != null) {
900
+ for (const toolCallDelta of delta.tool_calls) {
901
+ const index = toolCallDelta.index;
902
+ if (toolCalls[index] == null) {
903
+ if (toolCallDelta.type !== "function") {
904
+ throw new InvalidResponseDataError({
905
+ data: toolCallDelta,
906
+ message: `Expected 'function' type.`
907
+ });
908
+ }
909
+ if (toolCallDelta.id == null) {
910
+ throw new InvalidResponseDataError({
911
+ data: toolCallDelta,
912
+ message: `Expected 'id' to be a string.`
913
+ });
914
+ }
915
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
916
+ throw new InvalidResponseDataError({
917
+ data: toolCallDelta,
918
+ message: `Expected 'function.name' to be a string.`
919
+ });
920
+ }
921
+ controller.enqueue({
922
+ type: "tool-input-start",
923
+ id: toolCallDelta.id,
924
+ toolName: toolCallDelta.function.name
925
+ });
926
+ toolCalls[index] = {
927
+ id: toolCallDelta.id,
928
+ type: "function",
929
+ function: {
930
+ name: toolCallDelta.function.name,
931
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
932
+ },
933
+ hasFinished: false
934
+ };
935
+ const toolCall2 = toolCalls[index];
936
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
937
+ if (toolCall2.function.arguments.length > 0) {
938
+ controller.enqueue({
939
+ type: "tool-input-delta",
940
+ id: toolCall2.id,
941
+ delta: toolCall2.function.arguments
942
+ });
943
+ }
944
+ if (isParsableJson(toolCall2.function.arguments)) {
945
+ controller.enqueue({
946
+ type: "tool-input-end",
947
+ id: toolCall2.id
948
+ });
949
+ controller.enqueue({
950
+ type: "tool-call",
951
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
952
+ toolName: toolCall2.function.name,
953
+ input: toolCall2.function.arguments
954
+ });
955
+ toolCall2.hasFinished = true;
956
+ }
957
+ }
958
+ continue;
959
+ }
960
+ const toolCall = toolCalls[index];
961
+ if (toolCall.hasFinished) {
962
+ continue;
963
+ }
964
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
965
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
966
+ }
967
+ controller.enqueue({
968
+ type: "tool-input-delta",
969
+ id: toolCall.id,
970
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
971
+ });
972
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
973
+ controller.enqueue({
974
+ type: "tool-input-end",
975
+ id: toolCall.id
976
+ });
977
+ controller.enqueue({
978
+ type: "tool-call",
979
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
980
+ toolName: toolCall.function.name,
981
+ input: toolCall.function.arguments
982
+ });
983
+ toolCall.hasFinished = true;
984
+ }
985
+ }
986
+ }
987
+ if (delta.annotations != null) {
988
+ for (const annotation of delta.annotations) {
989
+ controller.enqueue({
990
+ type: "source",
991
+ sourceType: "url",
992
+ id: generateId(),
993
+ url: annotation.url,
994
+ title: annotation.title
995
+ });
996
+ }
997
+ }
998
+ },
999
+ flush(controller) {
1000
+ if (isActiveText) {
1001
+ controller.enqueue({ type: "text-end", id: "0" });
1002
+ }
1003
+ controller.enqueue({
1004
+ type: "finish",
1005
+ finishReason,
1006
+ usage,
1007
+ ...providerMetadata != null ? { providerMetadata } : {}
1008
+ });
1009
+ }
1010
+ })
1011
+ ),
1012
+ request: { body },
1013
+ response: { headers: responseHeaders }
1014
+ };
1015
+ }
1016
+ };
1017
+ function isReasoningModel(modelId) {
1018
+ return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1019
+ }
1020
+ function supportsFlexProcessing(modelId) {
1021
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1022
+ }
1023
+ function supportsPriorityProcessing(modelId) {
1024
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1025
+ }
1026
+ function getSystemMessageMode(modelId) {
1027
+ var _a, _b;
1028
+ if (!isReasoningModel(modelId)) {
1029
+ return "system";
1030
+ }
1031
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1032
+ }
1033
+ var reasoningModels = {
1034
+ o3: {
1035
+ systemMessageMode: "developer"
1036
+ },
1037
+ "o3-2025-04-16": {
1038
+ systemMessageMode: "developer"
1039
+ },
1040
+ "o3-mini": {
1041
+ systemMessageMode: "developer"
1042
+ },
1043
+ "o3-mini-2025-01-31": {
1044
+ systemMessageMode: "developer"
1045
+ },
1046
+ "o4-mini": {
1047
+ systemMessageMode: "developer"
1048
+ },
1049
+ "o4-mini-2025-04-16": {
1050
+ systemMessageMode: "developer"
1051
+ }
1052
+ };
1053
+ function convertToOpenAICompletionPrompt({
1054
+ prompt,
1055
+ user = "user",
1056
+ assistant = "assistant"
1057
+ }) {
1058
+ let text = "";
1059
+ if (prompt[0].role === "system") {
1060
+ text += `${prompt[0].content}
1061
+
1062
+ `;
1063
+ prompt = prompt.slice(1);
1064
+ }
1065
+ for (const { role, content } of prompt) {
1066
+ switch (role) {
1067
+ case "system": {
1068
+ throw new InvalidPromptError({
1069
+ message: "Unexpected system message in prompt: ${content}",
1070
+ prompt
1071
+ });
1072
+ }
1073
+ case "user": {
1074
+ const userMessage = content.map((part) => {
1075
+ switch (part.type) {
1076
+ case "text": {
1077
+ return part.text;
1078
+ }
1079
+ }
1080
+ }).filter(Boolean).join("");
1081
+ text += `${user}:
1082
+ ${userMessage}
1083
+
1084
+ `;
1085
+ break;
1086
+ }
1087
+ case "assistant": {
1088
+ const assistantMessage = content.map((part) => {
1089
+ switch (part.type) {
1090
+ case "text": {
1091
+ return part.text;
1092
+ }
1093
+ case "tool-call": {
1094
+ throw new UnsupportedFunctionalityError({
1095
+ functionality: "tool-call messages"
1096
+ });
1097
+ }
1098
+ }
1099
+ }).join("");
1100
+ text += `${assistant}:
1101
+ ${assistantMessage}
1102
+
1103
+ `;
1104
+ break;
1105
+ }
1106
+ case "tool": {
1107
+ throw new UnsupportedFunctionalityError({
1108
+ functionality: "tool messages"
1109
+ });
1110
+ }
1111
+ default: {
1112
+ const _exhaustiveCheck = role;
1113
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1114
+ }
1115
+ }
1116
+ }
1117
+ text += `${assistant}:
1118
+ `;
1119
+ return {
1120
+ prompt: text,
1121
+ stopSequences: [`
1122
+ ${user}:`]
1123
+ };
1124
+ }
1125
+ function getResponseMetadata2({
1126
+ id,
1127
+ model,
1128
+ created
1129
+ }) {
1130
+ return {
1131
+ id: id != null ? id : void 0,
1132
+ modelId: model != null ? model : void 0,
1133
+ timestamp: created != null ? new Date(created * 1e3) : void 0
1134
+ };
1135
+ }
1136
+ function mapOpenAIFinishReason2(finishReason) {
1137
+ switch (finishReason) {
1138
+ case "stop":
1139
+ return "stop";
1140
+ case "length":
1141
+ return "length";
1142
+ case "content_filter":
1143
+ return "content-filter";
1144
+ case "function_call":
1145
+ case "tool_calls":
1146
+ return "tool-calls";
1147
+ default:
1148
+ return "unknown";
1149
+ }
1150
+ }
1151
+ var openaiCompletionResponseSchema = lazyValidator(
1152
+ () => zodSchema(
1153
+ z.object({
1154
+ id: z.string().nullish(),
1155
+ created: z.number().nullish(),
1156
+ model: z.string().nullish(),
1157
+ choices: z.array(
1158
+ z.object({
1159
+ text: z.string(),
1160
+ finish_reason: z.string(),
1161
+ logprobs: z.object({
1162
+ tokens: z.array(z.string()),
1163
+ token_logprobs: z.array(z.number()),
1164
+ top_logprobs: z.array(z.record(z.string(), z.number())).nullish()
1165
+ }).nullish()
1166
+ })
1167
+ ),
1168
+ usage: z.object({
1169
+ prompt_tokens: z.number(),
1170
+ completion_tokens: z.number(),
1171
+ total_tokens: z.number()
1172
+ }).nullish()
1173
+ })
1174
+ )
1175
+ );
1176
+ var openaiCompletionChunkSchema = lazyValidator(
1177
+ () => zodSchema(
1178
+ z.union([
1179
+ z.object({
1180
+ id: z.string().nullish(),
1181
+ created: z.number().nullish(),
1182
+ model: z.string().nullish(),
1183
+ choices: z.array(
1184
+ z.object({
1185
+ text: z.string(),
1186
+ finish_reason: z.string().nullish(),
1187
+ index: z.number(),
1188
+ logprobs: z.object({
1189
+ tokens: z.array(z.string()),
1190
+ token_logprobs: z.array(z.number()),
1191
+ top_logprobs: z.array(z.record(z.string(), z.number())).nullish()
1192
+ }).nullish()
1193
+ })
1194
+ ),
1195
+ usage: z.object({
1196
+ prompt_tokens: z.number(),
1197
+ completion_tokens: z.number(),
1198
+ total_tokens: z.number()
1199
+ }).nullish()
1200
+ }),
1201
+ openaiErrorDataSchema
1202
+ ])
1203
+ )
1204
+ );
1205
+ var openaiCompletionProviderOptions = lazyValidator(
1206
+ () => zodSchema(
1207
+ z.object({
1208
+ /**
1209
+ Echo back the prompt in addition to the completion.
1210
+ */
1211
+ echo: z.boolean().optional(),
1212
+ /**
1213
+ Modify the likelihood of specified tokens appearing in the completion.
1214
+
1215
+ Accepts a JSON object that maps tokens (specified by their token ID in
1216
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1217
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1218
+ the bias is added to the logits generated by the model prior to sampling.
1219
+ The exact effect will vary per model, but values between -1 and 1 should
1220
+ decrease or increase likelihood of selection; values like -100 or 100
1221
+ should result in a ban or exclusive selection of the relevant token.
1222
+
1223
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1224
+ token from being generated.
1225
+ */
1226
+ logitBias: z.record(z.string(), z.number()).optional(),
1227
+ /**
1228
+ The suffix that comes after a completion of inserted text.
1229
+ */
1230
+ suffix: z.string().optional(),
1231
+ /**
1232
+ A unique identifier representing your end-user, which can help OpenAI to
1233
+ monitor and detect abuse. Learn more.
1234
+ */
1235
+ user: z.string().optional(),
1236
+ /**
1237
+ Return the log probabilities of the tokens. Including logprobs will increase
1238
+ the response size and can slow down response times. However, it can
1239
+ be useful to better understand how the model is behaving.
1240
+ Setting to true will return the log probabilities of the tokens that
1241
+ were generated.
1242
+ Setting to a number will return the log probabilities of the top n
1243
+ tokens that were generated.
1244
+ */
1245
+ logprobs: z.union([z.boolean(), z.number()]).optional()
1246
+ })
1247
+ )
1248
+ );
1249
+ var OpenAICompletionLanguageModel = class {
1250
+ constructor(modelId, config) {
1251
+ this.specificationVersion = "v2";
1252
+ this.supportedUrls = {
1253
+ // No URLs are supported for completion models.
1254
+ };
1255
+ this.modelId = modelId;
1256
+ this.config = config;
1257
+ }
1258
+ get providerOptionsName() {
1259
+ return this.config.provider.split(".")[0].trim();
1260
+ }
1261
+ get provider() {
1262
+ return this.config.provider;
1263
+ }
1264
+ async getArgs({
1265
+ prompt,
1266
+ maxOutputTokens,
1267
+ temperature,
1268
+ topP,
1269
+ topK,
1270
+ frequencyPenalty,
1271
+ presencePenalty,
1272
+ stopSequences: userStopSequences,
1273
+ responseFormat,
1274
+ tools,
1275
+ toolChoice,
1276
+ seed,
1277
+ providerOptions
1278
+ }) {
1279
+ const warnings = [];
1280
+ const openaiOptions = {
1281
+ ...await parseProviderOptions({
1282
+ provider: "openai",
1283
+ providerOptions,
1284
+ schema: openaiCompletionProviderOptions
1285
+ }),
1286
+ ...await parseProviderOptions({
1287
+ provider: this.providerOptionsName,
1288
+ providerOptions,
1289
+ schema: openaiCompletionProviderOptions
1290
+ })
1291
+ };
1292
+ if (topK != null) {
1293
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1294
+ }
1295
+ if (tools == null ? void 0 : tools.length) {
1296
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1297
+ }
1298
+ if (toolChoice != null) {
1299
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1300
+ }
1301
+ if (responseFormat != null && responseFormat.type !== "text") {
1302
+ warnings.push({
1303
+ type: "unsupported-setting",
1304
+ setting: "responseFormat",
1305
+ details: "JSON response format is not supported."
1306
+ });
1307
+ }
1308
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1309
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1310
+ return {
1311
+ args: {
1312
+ // model id:
1313
+ model: this.modelId,
1314
+ // model specific settings:
1315
+ echo: openaiOptions.echo,
1316
+ logit_bias: openaiOptions.logitBias,
1317
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1318
+ suffix: openaiOptions.suffix,
1319
+ user: openaiOptions.user,
1320
+ // standardized settings:
1321
+ max_tokens: maxOutputTokens,
1322
+ temperature,
1323
+ top_p: topP,
1324
+ frequency_penalty: frequencyPenalty,
1325
+ presence_penalty: presencePenalty,
1326
+ seed,
1327
+ // prompt:
1328
+ prompt: completionPrompt,
1329
+ // stop sequences:
1330
+ stop: stop.length > 0 ? stop : void 0
1331
+ },
1332
+ warnings
1333
+ };
1334
+ }
1335
+ async doGenerate(options) {
1336
+ var _a, _b, _c;
1337
+ const { args, warnings } = await this.getArgs(options);
1338
+ const {
1339
+ responseHeaders,
1340
+ value: response,
1341
+ rawValue: rawResponse
1342
+ } = await postJsonToApi({
1343
+ url: this.config.url({
1344
+ path: "/completions",
1345
+ modelId: this.modelId
1346
+ }),
1347
+ headers: combineHeaders(this.config.headers(), options.headers),
1348
+ body: args,
1349
+ failedResponseHandler: openaiFailedResponseHandler,
1350
+ successfulResponseHandler: createJsonResponseHandler(
1351
+ openaiCompletionResponseSchema
1352
+ ),
1353
+ abortSignal: options.abortSignal,
1354
+ fetch: this.config.fetch
1355
+ });
1356
+ const choice = response.choices[0];
1357
+ const providerMetadata = { openai: {} };
1358
+ if (choice.logprobs != null) {
1359
+ providerMetadata.openai.logprobs = choice.logprobs;
1360
+ }
1361
+ return {
1362
+ content: [{ type: "text", text: choice.text }],
1363
+ usage: {
1364
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1365
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1366
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1367
+ },
1368
+ finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1369
+ request: { body: args },
1370
+ response: {
1371
+ ...getResponseMetadata2(response),
1372
+ headers: responseHeaders,
1373
+ body: rawResponse
1374
+ },
1375
+ providerMetadata,
1376
+ warnings
1377
+ };
1378
+ }
1379
+ async doStream(options) {
1380
+ const { args, warnings } = await this.getArgs(options);
1381
+ const body = {
1382
+ ...args,
1383
+ stream: true,
1384
+ stream_options: {
1385
+ include_usage: true
1386
+ }
1387
+ };
1388
+ const { responseHeaders, value: response } = await postJsonToApi({
1389
+ url: this.config.url({
1390
+ path: "/completions",
1391
+ modelId: this.modelId
1392
+ }),
1393
+ headers: combineHeaders(this.config.headers(), options.headers),
1394
+ body,
1395
+ failedResponseHandler: openaiFailedResponseHandler,
1396
+ successfulResponseHandler: createEventSourceResponseHandler(
1397
+ openaiCompletionChunkSchema
1398
+ ),
1399
+ abortSignal: options.abortSignal,
1400
+ fetch: this.config.fetch
1401
+ });
1402
+ let finishReason = "unknown";
1403
+ const providerMetadata = { openai: {} };
1404
+ const usage = {
1405
+ inputTokens: void 0,
1406
+ outputTokens: void 0,
1407
+ totalTokens: void 0
1408
+ };
1409
+ let isFirstChunk = true;
1410
+ return {
1411
+ stream: response.pipeThrough(
1412
+ new TransformStream({
1413
+ start(controller) {
1414
+ controller.enqueue({ type: "stream-start", warnings });
1415
+ },
1416
+ transform(chunk, controller) {
1417
+ if (options.includeRawChunks) {
1418
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1419
+ }
1420
+ if (!chunk.success) {
1421
+ finishReason = "error";
1422
+ controller.enqueue({ type: "error", error: chunk.error });
1423
+ return;
1424
+ }
1425
+ const value = chunk.value;
1426
+ if ("error" in value) {
1427
+ finishReason = "error";
1428
+ controller.enqueue({ type: "error", error: value.error });
1429
+ return;
1430
+ }
1431
+ if (isFirstChunk) {
1432
+ isFirstChunk = false;
1433
+ controller.enqueue({
1434
+ type: "response-metadata",
1435
+ ...getResponseMetadata2(value)
1436
+ });
1437
+ controller.enqueue({ type: "text-start", id: "0" });
1438
+ }
1439
+ if (value.usage != null) {
1440
+ usage.inputTokens = value.usage.prompt_tokens;
1441
+ usage.outputTokens = value.usage.completion_tokens;
1442
+ usage.totalTokens = value.usage.total_tokens;
1443
+ }
1444
+ const choice = value.choices[0];
1445
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1446
+ finishReason = mapOpenAIFinishReason2(choice.finish_reason);
1447
+ }
1448
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1449
+ providerMetadata.openai.logprobs = choice.logprobs;
1450
+ }
1451
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1452
+ controller.enqueue({
1453
+ type: "text-delta",
1454
+ id: "0",
1455
+ delta: choice.text
1456
+ });
1457
+ }
1458
+ },
1459
+ flush(controller) {
1460
+ if (!isFirstChunk) {
1461
+ controller.enqueue({ type: "text-end", id: "0" });
1462
+ }
1463
+ controller.enqueue({
1464
+ type: "finish",
1465
+ finishReason,
1466
+ providerMetadata,
1467
+ usage
1468
+ });
1469
+ }
1470
+ })
1471
+ ),
1472
+ request: { body },
1473
+ response: { headers: responseHeaders }
1474
+ };
1475
+ }
1476
+ };
1477
+ var openaiEmbeddingProviderOptions = lazyValidator(
1478
+ () => zodSchema(
1479
+ z.object({
1480
+ /**
1481
+ The number of dimensions the resulting output embeddings should have.
1482
+ Only supported in text-embedding-3 and later models.
1483
+ */
1484
+ dimensions: z.number().optional(),
1485
+ /**
1486
+ A unique identifier representing your end-user, which can help OpenAI to
1487
+ monitor and detect abuse. Learn more.
1488
+ */
1489
+ user: z.string().optional()
1490
+ })
1491
+ )
1492
+ );
1493
+ var openaiTextEmbeddingResponseSchema = lazyValidator(
1494
+ () => zodSchema(
1495
+ z.object({
1496
+ data: z.array(z.object({ embedding: z.array(z.number()) })),
1497
+ usage: z.object({ prompt_tokens: z.number() }).nullish()
1498
+ })
1499
+ )
1500
+ );
1501
+ var OpenAIEmbeddingModel = class {
1502
+ constructor(modelId, config) {
1503
+ this.specificationVersion = "v2";
1504
+ this.maxEmbeddingsPerCall = 2048;
1505
+ this.supportsParallelCalls = true;
1506
+ this.modelId = modelId;
1507
+ this.config = config;
1508
+ }
1509
+ get provider() {
1510
+ return this.config.provider;
1511
+ }
1512
+ async doEmbed({
1513
+ values,
1514
+ headers,
1515
+ abortSignal,
1516
+ providerOptions
1517
+ }) {
1518
+ var _a;
1519
+ if (values.length > this.maxEmbeddingsPerCall) {
1520
+ throw new TooManyEmbeddingValuesForCallError({
1521
+ provider: this.provider,
1522
+ modelId: this.modelId,
1523
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1524
+ values
1525
+ });
1526
+ }
1527
+ const openaiOptions = (_a = await parseProviderOptions({
1528
+ provider: "openai",
1529
+ providerOptions,
1530
+ schema: openaiEmbeddingProviderOptions
1531
+ })) != null ? _a : {};
1532
+ const {
1533
+ responseHeaders,
1534
+ value: response,
1535
+ rawValue
1536
+ } = await postJsonToApi({
1537
+ url: this.config.url({
1538
+ path: "/embeddings",
1539
+ modelId: this.modelId
1540
+ }),
1541
+ headers: combineHeaders(this.config.headers(), headers),
1542
+ body: {
1543
+ model: this.modelId,
1544
+ input: values,
1545
+ encoding_format: "float",
1546
+ dimensions: openaiOptions.dimensions,
1547
+ user: openaiOptions.user
1548
+ },
1549
+ failedResponseHandler: openaiFailedResponseHandler,
1550
+ successfulResponseHandler: createJsonResponseHandler(
1551
+ openaiTextEmbeddingResponseSchema
1552
+ ),
1553
+ abortSignal,
1554
+ fetch: this.config.fetch
1555
+ });
1556
+ return {
1557
+ embeddings: response.data.map((item) => item.embedding),
1558
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1559
+ response: { headers: responseHeaders, body: rawValue }
1560
+ };
1561
+ }
1562
+ };
1563
+ var openaiImageResponseSchema = lazyValidator(
1564
+ () => zodSchema(
1565
+ z.object({
1566
+ data: z.array(
1567
+ z.object({
1568
+ b64_json: z.string(),
1569
+ revised_prompt: z.string().nullish()
1570
+ })
1571
+ )
1572
+ })
1573
+ )
1574
+ );
1575
+ var modelMaxImagesPerCall = {
1576
+ "dall-e-3": 1,
1577
+ "dall-e-2": 10,
1578
+ "gpt-image-1": 10,
1579
+ "gpt-image-1-mini": 10
1580
+ };
1581
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
1582
+ "gpt-image-1",
1583
+ "gpt-image-1-mini"
1584
+ ]);
1585
+ var OpenAIImageModel = class {
1586
+ constructor(modelId, config) {
1587
+ this.modelId = modelId;
1588
+ this.config = config;
1589
+ this.specificationVersion = "v2";
1590
+ }
1591
+ get maxImagesPerCall() {
1592
+ var _a;
1593
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1594
+ }
1595
+ get provider() {
1596
+ return this.config.provider;
1597
+ }
1598
+ async doGenerate({
1599
+ prompt,
1600
+ n,
1601
+ size,
1602
+ aspectRatio,
1603
+ seed,
1604
+ providerOptions,
1605
+ headers,
1606
+ abortSignal
1607
+ }) {
1608
+ var _a, _b, _c, _d;
1609
+ const warnings = [];
1610
+ if (aspectRatio != null) {
1611
+ warnings.push({
1612
+ type: "unsupported-setting",
1613
+ setting: "aspectRatio",
1614
+ details: "This model does not support aspect ratio. Use `size` instead."
1615
+ });
1616
+ }
1617
+ if (seed != null) {
1618
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1619
+ }
1620
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1621
+ const { value: response, responseHeaders } = await postJsonToApi({
1622
+ url: this.config.url({
1623
+ path: "/images/generations",
1624
+ modelId: this.modelId
1625
+ }),
1626
+ headers: combineHeaders(this.config.headers(), headers),
1627
+ body: {
1628
+ model: this.modelId,
1629
+ prompt,
1630
+ n,
1631
+ size,
1632
+ ...(_d = providerOptions.openai) != null ? _d : {},
1633
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1634
+ },
1635
+ failedResponseHandler: openaiFailedResponseHandler,
1636
+ successfulResponseHandler: createJsonResponseHandler(
1637
+ openaiImageResponseSchema
1638
+ ),
1639
+ abortSignal,
1640
+ fetch: this.config.fetch
1641
+ });
1642
+ return {
1643
+ images: response.data.map((item) => item.b64_json),
1644
+ warnings,
1645
+ response: {
1646
+ timestamp: currentDate,
1647
+ modelId: this.modelId,
1648
+ headers: responseHeaders
1649
+ },
1650
+ providerMetadata: {
1651
+ openai: {
1652
+ images: response.data.map(
1653
+ (item) => item.revised_prompt ? {
1654
+ revisedPrompt: item.revised_prompt
1655
+ } : null
1656
+ )
1657
+ }
1658
+ }
1659
+ };
1660
+ }
1661
+ };
1662
+ var openaiTranscriptionResponseSchema = lazyValidator(
1663
+ () => zodSchema(
1664
+ z.object({
1665
+ text: z.string(),
1666
+ language: z.string().nullish(),
1667
+ duration: z.number().nullish(),
1668
+ words: z.array(
1669
+ z.object({
1670
+ word: z.string(),
1671
+ start: z.number(),
1672
+ end: z.number()
1673
+ })
1674
+ ).nullish(),
1675
+ segments: z.array(
1676
+ z.object({
1677
+ id: z.number(),
1678
+ seek: z.number(),
1679
+ start: z.number(),
1680
+ end: z.number(),
1681
+ text: z.string(),
1682
+ tokens: z.array(z.number()),
1683
+ temperature: z.number(),
1684
+ avg_logprob: z.number(),
1685
+ compression_ratio: z.number(),
1686
+ no_speech_prob: z.number()
1687
+ })
1688
+ ).nullish()
1689
+ })
1690
+ )
1691
+ );
1692
+ var openAITranscriptionProviderOptions = lazyValidator(
1693
+ () => zodSchema(
1694
+ z.object({
1695
+ /**
1696
+ * Additional information to include in the transcription response.
1697
+ */
1698
+ include: z.array(z.string()).optional(),
1699
+ /**
1700
+ * The language of the input audio in ISO-639-1 format.
1701
+ */
1702
+ language: z.string().optional(),
1703
+ /**
1704
+ * An optional text to guide the model's style or continue a previous audio segment.
1705
+ */
1706
+ prompt: z.string().optional(),
1707
+ /**
1708
+ * The sampling temperature, between 0 and 1.
1709
+ * @default 0
1710
+ */
1711
+ temperature: z.number().min(0).max(1).default(0).optional(),
1712
+ /**
1713
+ * The timestamp granularities to populate for this transcription.
1714
+ * @default ['segment']
1715
+ */
1716
+ timestampGranularities: z.array(z.enum(["word", "segment"])).default(["segment"]).optional()
1717
+ })
1718
+ )
1719
+ );
1720
+ var languageMap = {
1721
+ afrikaans: "af",
1722
+ arabic: "ar",
1723
+ armenian: "hy",
1724
+ azerbaijani: "az",
1725
+ belarusian: "be",
1726
+ bosnian: "bs",
1727
+ bulgarian: "bg",
1728
+ catalan: "ca",
1729
+ chinese: "zh",
1730
+ croatian: "hr",
1731
+ czech: "cs",
1732
+ danish: "da",
1733
+ dutch: "nl",
1734
+ english: "en",
1735
+ estonian: "et",
1736
+ finnish: "fi",
1737
+ french: "fr",
1738
+ galician: "gl",
1739
+ german: "de",
1740
+ greek: "el",
1741
+ hebrew: "he",
1742
+ hindi: "hi",
1743
+ hungarian: "hu",
1744
+ icelandic: "is",
1745
+ indonesian: "id",
1746
+ italian: "it",
1747
+ japanese: "ja",
1748
+ kannada: "kn",
1749
+ kazakh: "kk",
1750
+ korean: "ko",
1751
+ latvian: "lv",
1752
+ lithuanian: "lt",
1753
+ macedonian: "mk",
1754
+ malay: "ms",
1755
+ marathi: "mr",
1756
+ maori: "mi",
1757
+ nepali: "ne",
1758
+ norwegian: "no",
1759
+ persian: "fa",
1760
+ polish: "pl",
1761
+ portuguese: "pt",
1762
+ romanian: "ro",
1763
+ russian: "ru",
1764
+ serbian: "sr",
1765
+ slovak: "sk",
1766
+ slovenian: "sl",
1767
+ spanish: "es",
1768
+ swahili: "sw",
1769
+ swedish: "sv",
1770
+ tagalog: "tl",
1771
+ tamil: "ta",
1772
+ thai: "th",
1773
+ turkish: "tr",
1774
+ ukrainian: "uk",
1775
+ urdu: "ur",
1776
+ vietnamese: "vi",
1777
+ welsh: "cy"
1778
+ };
1779
+ var OpenAITranscriptionModel = class {
1780
+ constructor(modelId, config) {
1781
+ this.modelId = modelId;
1782
+ this.config = config;
1783
+ this.specificationVersion = "v2";
1784
+ }
1785
+ get provider() {
1786
+ return this.config.provider;
1787
+ }
1788
+ async getArgs({
1789
+ audio,
1790
+ mediaType,
1791
+ providerOptions
1792
+ }) {
1793
+ const warnings = [];
1794
+ const openAIOptions = await parseProviderOptions({
1795
+ provider: "openai",
1796
+ providerOptions,
1797
+ schema: openAITranscriptionProviderOptions
1798
+ });
1799
+ const formData = new FormData();
1800
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
1801
+ formData.append("model", this.modelId);
1802
+ const fileExtension = mediaTypeToExtension(mediaType);
1803
+ formData.append(
1804
+ "file",
1805
+ new File([blob], "audio", { type: mediaType }),
1806
+ `audio.${fileExtension}`
1807
+ );
1808
+ if (openAIOptions) {
1809
+ const transcriptionModelOptions = {
1810
+ include: openAIOptions.include,
1811
+ language: openAIOptions.language,
1812
+ prompt: openAIOptions.prompt,
1813
+ // https://platform.openai.com/docs/api-reference/audio/createTranscription#audio_createtranscription-response_format
1814
+ // prefer verbose_json to get segments for models that support it
1815
+ response_format: [
1816
+ "gpt-4o-transcribe",
1817
+ "gpt-4o-mini-transcribe"
1818
+ ].includes(this.modelId) ? "json" : "verbose_json",
1819
+ temperature: openAIOptions.temperature,
1820
+ timestamp_granularities: openAIOptions.timestampGranularities
1821
+ };
1822
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1823
+ if (value != null) {
1824
+ if (Array.isArray(value)) {
1825
+ for (const item of value) {
1826
+ formData.append(`${key}[]`, String(item));
1827
+ }
1828
+ } else {
1829
+ formData.append(key, String(value));
1830
+ }
1831
+ }
1832
+ }
1833
+ }
1834
+ return {
1835
+ formData,
1836
+ warnings
1837
+ };
1838
+ }
1839
+ async doGenerate(options) {
1840
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1841
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1842
+ const { formData, warnings } = await this.getArgs(options);
1843
+ const {
1844
+ value: response,
1845
+ responseHeaders,
1846
+ rawValue: rawResponse
1847
+ } = await postFormDataToApi({
1848
+ url: this.config.url({
1849
+ path: "/audio/transcriptions",
1850
+ modelId: this.modelId
1851
+ }),
1852
+ headers: combineHeaders(this.config.headers(), options.headers),
1853
+ formData,
1854
+ failedResponseHandler: openaiFailedResponseHandler,
1855
+ successfulResponseHandler: createJsonResponseHandler(
1856
+ openaiTranscriptionResponseSchema
1857
+ ),
1858
+ abortSignal: options.abortSignal,
1859
+ fetch: this.config.fetch
1860
+ });
1861
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1862
+ return {
1863
+ text: response.text,
1864
+ segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
1865
+ text: segment.text,
1866
+ startSecond: segment.start,
1867
+ endSecond: segment.end
1868
+ }))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
1869
+ text: word.word,
1870
+ startSecond: word.start,
1871
+ endSecond: word.end
1872
+ }))) != null ? _g : [],
1873
+ language,
1874
+ durationInSeconds: (_h = response.duration) != null ? _h : void 0,
1875
+ warnings,
1876
+ response: {
1877
+ timestamp: currentDate,
1878
+ modelId: this.modelId,
1879
+ headers: responseHeaders,
1880
+ body: rawResponse
1881
+ }
1882
+ };
1883
+ }
1884
+ };
1885
+ var openaiSpeechProviderOptionsSchema = lazyValidator(
1886
+ () => zodSchema(
1887
+ z.object({
1888
+ instructions: z.string().nullish(),
1889
+ speed: z.number().min(0.25).max(4).default(1).nullish()
1890
+ })
1891
+ )
1892
+ );
1893
+ var OpenAISpeechModel = class {
1894
+ constructor(modelId, config) {
1895
+ this.modelId = modelId;
1896
+ this.config = config;
1897
+ this.specificationVersion = "v2";
1898
+ }
1899
+ get provider() {
1900
+ return this.config.provider;
1901
+ }
1902
+ async getArgs({
1903
+ text,
1904
+ voice = "alloy",
1905
+ outputFormat = "mp3",
1906
+ speed,
1907
+ instructions,
1908
+ language,
1909
+ providerOptions
1910
+ }) {
1911
+ const warnings = [];
1912
+ const openAIOptions = await parseProviderOptions({
1913
+ provider: "openai",
1914
+ providerOptions,
1915
+ schema: openaiSpeechProviderOptionsSchema
1916
+ });
1917
+ const requestBody = {
1918
+ model: this.modelId,
1919
+ input: text,
1920
+ voice,
1921
+ response_format: "mp3",
1922
+ speed,
1923
+ instructions
1924
+ };
1925
+ if (outputFormat) {
1926
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1927
+ requestBody.response_format = outputFormat;
1928
+ } else {
1929
+ warnings.push({
1930
+ type: "unsupported-setting",
1931
+ setting: "outputFormat",
1932
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1933
+ });
1934
+ }
1935
+ }
1936
+ if (openAIOptions) {
1937
+ const speechModelOptions = {};
1938
+ for (const key in speechModelOptions) {
1939
+ const value = speechModelOptions[key];
1940
+ if (value !== void 0) {
1941
+ requestBody[key] = value;
1942
+ }
1943
+ }
1944
+ }
1945
+ if (language) {
1946
+ warnings.push({
1947
+ type: "unsupported-setting",
1948
+ setting: "language",
1949
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
1950
+ });
1951
+ }
1952
+ return {
1953
+ requestBody,
1954
+ warnings
1955
+ };
1956
+ }
1957
+ async doGenerate(options) {
1958
+ var _a, _b, _c;
1959
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1960
+ const { requestBody, warnings } = await this.getArgs(options);
1961
+ const {
1962
+ value: audio,
1963
+ responseHeaders,
1964
+ rawValue: rawResponse
1965
+ } = await postJsonToApi({
1966
+ url: this.config.url({
1967
+ path: "/audio/speech",
1968
+ modelId: this.modelId
1969
+ }),
1970
+ headers: combineHeaders(this.config.headers(), options.headers),
1971
+ body: requestBody,
1972
+ failedResponseHandler: openaiFailedResponseHandler,
1973
+ successfulResponseHandler: createBinaryResponseHandler(),
1974
+ abortSignal: options.abortSignal,
1975
+ fetch: this.config.fetch
1976
+ });
1977
+ return {
1978
+ audio,
1979
+ warnings,
1980
+ request: {
1981
+ body: JSON.stringify(requestBody)
1982
+ },
1983
+ response: {
1984
+ timestamp: currentDate,
1985
+ modelId: this.modelId,
1986
+ headers: responseHeaders,
1987
+ body: rawResponse
1988
+ }
1989
+ };
1990
+ }
1991
+ };
1992
+ var localShellInputSchema = lazySchema(
1993
+ () => zodSchema(
1994
+ z.object({
1995
+ action: z.object({
1996
+ type: z.literal("exec"),
1997
+ command: z.array(z.string()),
1998
+ timeoutMs: z.number().optional(),
1999
+ user: z.string().optional(),
2000
+ workingDirectory: z.string().optional(),
2001
+ env: z.record(z.string(), z.string()).optional()
2002
+ })
2003
+ })
2004
+ )
2005
+ );
2006
+ var localShellOutputSchema = lazySchema(
2007
+ () => zodSchema(z.object({ output: z.string() }))
2008
+ );
2009
+ createProviderDefinedToolFactoryWithOutputSchema({
2010
+ id: "openai.local_shell",
2011
+ name: "local_shell",
2012
+ inputSchema: localShellInputSchema,
2013
+ outputSchema: localShellOutputSchema
2014
+ });
2015
+ function isFileId(data, prefixes) {
2016
+ if (!prefixes) return false;
2017
+ return prefixes.some((prefix) => data.startsWith(prefix));
2018
+ }
2019
+ async function convertToOpenAIResponsesInput({
2020
+ prompt,
2021
+ systemMessageMode,
2022
+ fileIdPrefixes,
2023
+ store,
2024
+ hasLocalShellTool = false
2025
+ }) {
2026
+ var _a, _b, _c, _d;
2027
+ const input = [];
2028
+ const warnings = [];
2029
+ for (const { role, content } of prompt) {
2030
+ switch (role) {
2031
+ case "system": {
2032
+ switch (systemMessageMode) {
2033
+ case "system": {
2034
+ input.push({ role: "system", content });
2035
+ break;
2036
+ }
2037
+ case "developer": {
2038
+ input.push({ role: "developer", content });
2039
+ break;
2040
+ }
2041
+ case "remove": {
2042
+ warnings.push({
2043
+ type: "other",
2044
+ message: "system messages are removed for this model"
2045
+ });
2046
+ break;
2047
+ }
2048
+ default: {
2049
+ const _exhaustiveCheck = systemMessageMode;
2050
+ throw new Error(
2051
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2052
+ );
2053
+ }
2054
+ }
2055
+ break;
2056
+ }
2057
+ case "user": {
2058
+ input.push({
2059
+ role: "user",
2060
+ content: content.map((part, index) => {
2061
+ var _a2, _b2, _c2;
2062
+ switch (part.type) {
2063
+ case "text": {
2064
+ return { type: "input_text", text: part.text };
2065
+ }
2066
+ case "file": {
2067
+ if (part.mediaType.startsWith("image/")) {
2068
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2069
+ return {
2070
+ type: "input_image",
2071
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2072
+ image_url: `data:${mediaType};base64,${convertToBase64(part.data)}`
2073
+ },
2074
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2075
+ };
2076
+ } else if (part.mediaType === "application/pdf") {
2077
+ if (part.data instanceof URL) {
2078
+ return {
2079
+ type: "input_file",
2080
+ file_url: part.data.toString()
2081
+ };
2082
+ }
2083
+ return {
2084
+ type: "input_file",
2085
+ ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2086
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2087
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
2088
+ }
2089
+ };
2090
+ } else {
2091
+ throw new UnsupportedFunctionalityError({
2092
+ functionality: `file part media type ${part.mediaType}`
2093
+ });
2094
+ }
2095
+ }
2096
+ }
2097
+ })
2098
+ });
2099
+ break;
2100
+ }
2101
+ case "assistant": {
2102
+ const reasoningMessages = {};
2103
+ for (const part of content) {
2104
+ switch (part.type) {
2105
+ case "text": {
2106
+ const id = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId;
2107
+ if (store && id != null) {
2108
+ input.push({ type: "item_reference", id });
2109
+ break;
2110
+ }
2111
+ input.push({
2112
+ role: "assistant",
2113
+ content: [{ type: "output_text", text: part.text }],
2114
+ id
2115
+ });
2116
+ break;
2117
+ }
2118
+ case "tool-call": {
2119
+ if (part.providerExecuted) {
2120
+ break;
2121
+ }
2122
+ const id = (_d = (_c = part.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.itemId;
2123
+ if (store && id != null) {
2124
+ input.push({ type: "item_reference", id });
2125
+ break;
2126
+ }
2127
+ if (hasLocalShellTool && part.toolName === "local_shell") {
2128
+ const parsedInput = await validateTypes({
2129
+ value: part.input,
2130
+ schema: localShellInputSchema
2131
+ });
2132
+ input.push({
2133
+ type: "local_shell_call",
2134
+ call_id: part.toolCallId,
2135
+ id,
2136
+ action: {
2137
+ type: "exec",
2138
+ command: parsedInput.action.command,
2139
+ timeout_ms: parsedInput.action.timeoutMs,
2140
+ user: parsedInput.action.user,
2141
+ working_directory: parsedInput.action.workingDirectory,
2142
+ env: parsedInput.action.env
2143
+ }
2144
+ });
2145
+ break;
2146
+ }
2147
+ input.push({
2148
+ type: "function_call",
2149
+ call_id: part.toolCallId,
2150
+ name: part.toolName,
2151
+ arguments: JSON.stringify(part.input),
2152
+ id
2153
+ });
2154
+ break;
2155
+ }
2156
+ // assistant tool result parts are from provider-executed tools:
2157
+ case "tool-result": {
2158
+ if (store) {
2159
+ input.push({ type: "item_reference", id: part.toolCallId });
2160
+ } else {
2161
+ warnings.push({
2162
+ type: "other",
2163
+ message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`
2164
+ });
2165
+ }
2166
+ break;
2167
+ }
2168
+ case "reasoning": {
2169
+ const providerOptions = await parseProviderOptions({
2170
+ provider: "openai",
2171
+ providerOptions: part.providerOptions,
2172
+ schema: openaiResponsesReasoningProviderOptionsSchema
2173
+ });
2174
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2175
+ if (reasoningId != null) {
2176
+ const reasoningMessage = reasoningMessages[reasoningId];
2177
+ if (store) {
2178
+ if (reasoningMessage === void 0) {
2179
+ input.push({ type: "item_reference", id: reasoningId });
2180
+ reasoningMessages[reasoningId] = {
2181
+ type: "reasoning",
2182
+ id: reasoningId,
2183
+ summary: []
2184
+ };
2185
+ }
2186
+ } else {
2187
+ const summaryParts = [];
2188
+ if (part.text.length > 0) {
2189
+ summaryParts.push({
2190
+ type: "summary_text",
2191
+ text: part.text
2192
+ });
2193
+ } else if (reasoningMessage !== void 0) {
2194
+ warnings.push({
2195
+ type: "other",
2196
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2197
+ });
2198
+ }
2199
+ if (reasoningMessage === void 0) {
2200
+ reasoningMessages[reasoningId] = {
2201
+ type: "reasoning",
2202
+ id: reasoningId,
2203
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2204
+ summary: summaryParts
2205
+ };
2206
+ input.push(reasoningMessages[reasoningId]);
2207
+ } else {
2208
+ reasoningMessage.summary.push(...summaryParts);
2209
+ if ((providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent) != null) {
2210
+ reasoningMessage.encrypted_content = providerOptions.reasoningEncryptedContent;
2211
+ }
2212
+ }
2213
+ }
2214
+ } else {
2215
+ warnings.push({
2216
+ type: "other",
2217
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2218
+ });
2219
+ }
2220
+ break;
2221
+ }
2222
+ }
2223
+ }
2224
+ break;
2225
+ }
2226
+ case "tool": {
2227
+ for (const part of content) {
2228
+ const output = part.output;
2229
+ if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
2230
+ const parsedOutput = await validateTypes({
2231
+ value: output.value,
2232
+ schema: localShellOutputSchema
2233
+ });
2234
+ input.push({
2235
+ type: "local_shell_call_output",
2236
+ call_id: part.toolCallId,
2237
+ output: parsedOutput.output
2238
+ });
2239
+ break;
2240
+ }
2241
+ let contentValue;
2242
+ switch (output.type) {
2243
+ case "text":
2244
+ case "error-text":
2245
+ contentValue = output.value;
2246
+ break;
2247
+ case "json":
2248
+ case "error-json":
2249
+ contentValue = JSON.stringify(output.value);
2250
+ break;
2251
+ case "content":
2252
+ contentValue = output.value.map((item) => {
2253
+ switch (item.type) {
2254
+ case "text": {
2255
+ return { type: "input_text", text: item.text };
2256
+ }
2257
+ case "media": {
2258
+ return item.mediaType.startsWith("image/") ? {
2259
+ type: "input_image",
2260
+ image_url: `data:${item.mediaType};base64,${item.data}`
2261
+ } : {
2262
+ type: "input_file",
2263
+ filename: "data",
2264
+ file_data: `data:${item.mediaType};base64,${item.data}`
2265
+ };
2266
+ }
2267
+ }
2268
+ });
2269
+ break;
2270
+ }
2271
+ input.push({
2272
+ type: "function_call_output",
2273
+ call_id: part.toolCallId,
2274
+ output: contentValue
2275
+ });
2276
+ }
2277
+ break;
2278
+ }
2279
+ default: {
2280
+ const _exhaustiveCheck = role;
2281
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2282
+ }
2283
+ }
2284
+ }
2285
+ return { input, warnings };
2286
+ }
2287
+ var openaiResponsesReasoningProviderOptionsSchema = z.object({
2288
+ itemId: z.string().nullish(),
2289
+ reasoningEncryptedContent: z.string().nullish()
2290
+ });
2291
+ function mapOpenAIResponseFinishReason({
2292
+ finishReason,
2293
+ hasFunctionCall
2294
+ }) {
2295
+ switch (finishReason) {
2296
+ case void 0:
2297
+ case null:
2298
+ return hasFunctionCall ? "tool-calls" : "stop";
2299
+ case "max_output_tokens":
2300
+ return "length";
2301
+ case "content_filter":
2302
+ return "content-filter";
2303
+ default:
2304
+ return hasFunctionCall ? "tool-calls" : "unknown";
2305
+ }
2306
+ }
2307
+ var openaiResponsesChunkSchema = lazyValidator(
2308
+ () => zodSchema(
2309
+ z.union([
2310
+ z.object({
2311
+ type: z.literal("response.output_text.delta"),
2312
+ item_id: z.string(),
2313
+ delta: z.string(),
2314
+ logprobs: z.array(
2315
+ z.object({
2316
+ token: z.string(),
2317
+ logprob: z.number(),
2318
+ top_logprobs: z.array(
2319
+ z.object({
2320
+ token: z.string(),
2321
+ logprob: z.number()
2322
+ })
2323
+ )
2324
+ })
2325
+ ).nullish()
2326
+ }),
2327
+ z.object({
2328
+ type: z.enum(["response.completed", "response.incomplete"]),
2329
+ response: z.object({
2330
+ incomplete_details: z.object({ reason: z.string() }).nullish(),
2331
+ usage: z.object({
2332
+ input_tokens: z.number(),
2333
+ input_tokens_details: z.object({ cached_tokens: z.number().nullish() }).nullish(),
2334
+ output_tokens: z.number(),
2335
+ output_tokens_details: z.object({ reasoning_tokens: z.number().nullish() }).nullish()
2336
+ }),
2337
+ service_tier: z.string().nullish()
2338
+ })
2339
+ }),
2340
+ z.object({
2341
+ type: z.literal("response.created"),
2342
+ response: z.object({
2343
+ id: z.string(),
2344
+ created_at: z.number(),
2345
+ model: z.string(),
2346
+ service_tier: z.string().nullish()
2347
+ })
2348
+ }),
2349
+ z.object({
2350
+ type: z.literal("response.output_item.added"),
2351
+ output_index: z.number(),
2352
+ item: z.discriminatedUnion("type", [
2353
+ z.object({
2354
+ type: z.literal("message"),
2355
+ id: z.string()
2356
+ }),
2357
+ z.object({
2358
+ type: z.literal("reasoning"),
2359
+ id: z.string(),
2360
+ encrypted_content: z.string().nullish()
2361
+ }),
2362
+ z.object({
2363
+ type: z.literal("function_call"),
2364
+ id: z.string(),
2365
+ call_id: z.string(),
2366
+ name: z.string(),
2367
+ arguments: z.string()
2368
+ }),
2369
+ z.object({
2370
+ type: z.literal("web_search_call"),
2371
+ id: z.string(),
2372
+ status: z.string()
2373
+ }),
2374
+ z.object({
2375
+ type: z.literal("computer_call"),
2376
+ id: z.string(),
2377
+ status: z.string()
2378
+ }),
2379
+ z.object({
2380
+ type: z.literal("file_search_call"),
2381
+ id: z.string()
2382
+ }),
2383
+ z.object({
2384
+ type: z.literal("image_generation_call"),
2385
+ id: z.string()
2386
+ }),
2387
+ z.object({
2388
+ type: z.literal("code_interpreter_call"),
2389
+ id: z.string(),
2390
+ container_id: z.string(),
2391
+ code: z.string().nullable(),
2392
+ outputs: z.array(
2393
+ z.discriminatedUnion("type", [
2394
+ z.object({ type: z.literal("logs"), logs: z.string() }),
2395
+ z.object({ type: z.literal("image"), url: z.string() })
2396
+ ])
2397
+ ).nullable(),
2398
+ status: z.string()
2399
+ })
2400
+ ])
2401
+ }),
2402
+ z.object({
2403
+ type: z.literal("response.output_item.done"),
2404
+ output_index: z.number(),
2405
+ item: z.discriminatedUnion("type", [
2406
+ z.object({
2407
+ type: z.literal("message"),
2408
+ id: z.string()
2409
+ }),
2410
+ z.object({
2411
+ type: z.literal("reasoning"),
2412
+ id: z.string(),
2413
+ encrypted_content: z.string().nullish()
2414
+ }),
2415
+ z.object({
2416
+ type: z.literal("function_call"),
2417
+ id: z.string(),
2418
+ call_id: z.string(),
2419
+ name: z.string(),
2420
+ arguments: z.string(),
2421
+ status: z.literal("completed")
2422
+ }),
2423
+ z.object({
2424
+ type: z.literal("code_interpreter_call"),
2425
+ id: z.string(),
2426
+ code: z.string().nullable(),
2427
+ container_id: z.string(),
2428
+ outputs: z.array(
2429
+ z.discriminatedUnion("type", [
2430
+ z.object({ type: z.literal("logs"), logs: z.string() }),
2431
+ z.object({ type: z.literal("image"), url: z.string() })
2432
+ ])
2433
+ ).nullable()
2434
+ }),
2435
+ z.object({
2436
+ type: z.literal("image_generation_call"),
2437
+ id: z.string(),
2438
+ result: z.string()
2439
+ }),
2440
+ z.object({
2441
+ type: z.literal("web_search_call"),
2442
+ id: z.string(),
2443
+ status: z.string(),
2444
+ action: z.discriminatedUnion("type", [
2445
+ z.object({
2446
+ type: z.literal("search"),
2447
+ query: z.string().nullish(),
2448
+ sources: z.array(
2449
+ z.discriminatedUnion("type", [
2450
+ z.object({ type: z.literal("url"), url: z.string() }),
2451
+ z.object({ type: z.literal("api"), name: z.string() })
2452
+ ])
2453
+ ).nullish()
2454
+ }),
2455
+ z.object({
2456
+ type: z.literal("open_page"),
2457
+ url: z.string()
2458
+ }),
2459
+ z.object({
2460
+ type: z.literal("find"),
2461
+ url: z.string(),
2462
+ pattern: z.string()
2463
+ })
2464
+ ])
2465
+ }),
2466
+ z.object({
2467
+ type: z.literal("file_search_call"),
2468
+ id: z.string(),
2469
+ queries: z.array(z.string()),
2470
+ results: z.array(
2471
+ z.object({
2472
+ attributes: z.record(z.string(), z.unknown()),
2473
+ file_id: z.string(),
2474
+ filename: z.string(),
2475
+ score: z.number(),
2476
+ text: z.string()
2477
+ })
2478
+ ).nullish()
2479
+ }),
2480
+ z.object({
2481
+ type: z.literal("local_shell_call"),
2482
+ id: z.string(),
2483
+ call_id: z.string(),
2484
+ action: z.object({
2485
+ type: z.literal("exec"),
2486
+ command: z.array(z.string()),
2487
+ timeout_ms: z.number().optional(),
2488
+ user: z.string().optional(),
2489
+ working_directory: z.string().optional(),
2490
+ env: z.record(z.string(), z.string()).optional()
2491
+ })
2492
+ }),
2493
+ z.object({
2494
+ type: z.literal("computer_call"),
2495
+ id: z.string(),
2496
+ status: z.literal("completed")
2497
+ })
2498
+ ])
2499
+ }),
2500
+ z.object({
2501
+ type: z.literal("response.function_call_arguments.delta"),
2502
+ item_id: z.string(),
2503
+ output_index: z.number(),
2504
+ delta: z.string()
2505
+ }),
2506
+ z.object({
2507
+ type: z.literal("response.image_generation_call.partial_image"),
2508
+ item_id: z.string(),
2509
+ output_index: z.number(),
2510
+ partial_image_b64: z.string()
2511
+ }),
2512
+ z.object({
2513
+ type: z.literal("response.code_interpreter_call_code.delta"),
2514
+ item_id: z.string(),
2515
+ output_index: z.number(),
2516
+ delta: z.string()
2517
+ }),
2518
+ z.object({
2519
+ type: z.literal("response.code_interpreter_call_code.done"),
2520
+ item_id: z.string(),
2521
+ output_index: z.number(),
2522
+ code: z.string()
2523
+ }),
2524
+ z.object({
2525
+ type: z.literal("response.output_text.annotation.added"),
2526
+ annotation: z.discriminatedUnion("type", [
2527
+ z.object({
2528
+ type: z.literal("url_citation"),
2529
+ start_index: z.number(),
2530
+ end_index: z.number(),
2531
+ url: z.string(),
2532
+ title: z.string()
2533
+ }),
2534
+ z.object({
2535
+ type: z.literal("file_citation"),
2536
+ file_id: z.string(),
2537
+ filename: z.string().nullish(),
2538
+ index: z.number().nullish(),
2539
+ start_index: z.number().nullish(),
2540
+ end_index: z.number().nullish(),
2541
+ quote: z.string().nullish()
2542
+ })
2543
+ ])
2544
+ }),
2545
+ z.object({
2546
+ type: z.literal("response.reasoning_summary_part.added"),
2547
+ item_id: z.string(),
2548
+ summary_index: z.number()
2549
+ }),
2550
+ z.object({
2551
+ type: z.literal("response.reasoning_summary_text.delta"),
2552
+ item_id: z.string(),
2553
+ summary_index: z.number(),
2554
+ delta: z.string()
2555
+ }),
2556
+ z.object({
2557
+ type: z.literal("response.reasoning_summary_part.done"),
2558
+ item_id: z.string(),
2559
+ summary_index: z.number()
2560
+ }),
2561
+ z.object({
2562
+ type: z.literal("error"),
2563
+ sequence_number: z.number(),
2564
+ error: z.object({
2565
+ type: z.string(),
2566
+ code: z.string(),
2567
+ message: z.string(),
2568
+ param: z.string().nullish()
2569
+ })
2570
+ }),
2571
+ z.object({ type: z.string() }).loose().transform((value) => ({
2572
+ type: "unknown_chunk",
2573
+ message: value.type
2574
+ }))
2575
+ // fallback for unknown chunks
2576
+ ])
2577
+ )
2578
+ );
2579
+ var openaiResponsesResponseSchema = lazyValidator(
2580
+ () => zodSchema(
2581
+ z.object({
2582
+ id: z.string().optional(),
2583
+ created_at: z.number().optional(),
2584
+ error: z.object({
2585
+ message: z.string(),
2586
+ type: z.string(),
2587
+ param: z.string().nullish(),
2588
+ code: z.string()
2589
+ }).nullish(),
2590
+ model: z.string().optional(),
2591
+ output: z.array(
2592
+ z.discriminatedUnion("type", [
2593
+ z.object({
2594
+ type: z.literal("message"),
2595
+ role: z.literal("assistant"),
2596
+ id: z.string(),
2597
+ content: z.array(
2598
+ z.object({
2599
+ type: z.literal("output_text"),
2600
+ text: z.string(),
2601
+ logprobs: z.array(
2602
+ z.object({
2603
+ token: z.string(),
2604
+ logprob: z.number(),
2605
+ top_logprobs: z.array(
2606
+ z.object({
2607
+ token: z.string(),
2608
+ logprob: z.number()
2609
+ })
2610
+ )
2611
+ })
2612
+ ).nullish(),
2613
+ annotations: z.array(
2614
+ z.discriminatedUnion("type", [
2615
+ z.object({
2616
+ type: z.literal("url_citation"),
2617
+ start_index: z.number(),
2618
+ end_index: z.number(),
2619
+ url: z.string(),
2620
+ title: z.string()
2621
+ }),
2622
+ z.object({
2623
+ type: z.literal("file_citation"),
2624
+ file_id: z.string(),
2625
+ filename: z.string().nullish(),
2626
+ index: z.number().nullish(),
2627
+ start_index: z.number().nullish(),
2628
+ end_index: z.number().nullish(),
2629
+ quote: z.string().nullish()
2630
+ }),
2631
+ z.object({
2632
+ type: z.literal("container_file_citation"),
2633
+ container_id: z.string(),
2634
+ file_id: z.string(),
2635
+ filename: z.string().nullish(),
2636
+ start_index: z.number().nullish(),
2637
+ end_index: z.number().nullish(),
2638
+ index: z.number().nullish()
2639
+ }),
2640
+ z.object({
2641
+ type: z.literal("file_path"),
2642
+ file_id: z.string(),
2643
+ index: z.number().nullish()
2644
+ })
2645
+ ])
2646
+ )
2647
+ })
2648
+ )
2649
+ }),
2650
+ z.object({
2651
+ type: z.literal("web_search_call"),
2652
+ id: z.string(),
2653
+ status: z.string(),
2654
+ action: z.discriminatedUnion("type", [
2655
+ z.object({
2656
+ type: z.literal("search"),
2657
+ query: z.string().nullish(),
2658
+ sources: z.array(
2659
+ z.discriminatedUnion("type", [
2660
+ z.object({ type: z.literal("url"), url: z.string() }),
2661
+ z.object({ type: z.literal("api"), name: z.string() })
2662
+ ])
2663
+ ).nullish()
2664
+ }),
2665
+ z.object({
2666
+ type: z.literal("open_page"),
2667
+ url: z.string()
2668
+ }),
2669
+ z.object({
2670
+ type: z.literal("find"),
2671
+ url: z.string(),
2672
+ pattern: z.string()
2673
+ })
2674
+ ])
2675
+ }),
2676
+ z.object({
2677
+ type: z.literal("file_search_call"),
2678
+ id: z.string(),
2679
+ queries: z.array(z.string()),
2680
+ results: z.array(
2681
+ z.object({
2682
+ attributes: z.record(
2683
+ z.string(),
2684
+ z.union([z.string(), z.number(), z.boolean()])
2685
+ ),
2686
+ file_id: z.string(),
2687
+ filename: z.string(),
2688
+ score: z.number(),
2689
+ text: z.string()
2690
+ })
2691
+ ).nullish()
2692
+ }),
2693
+ z.object({
2694
+ type: z.literal("code_interpreter_call"),
2695
+ id: z.string(),
2696
+ code: z.string().nullable(),
2697
+ container_id: z.string(),
2698
+ outputs: z.array(
2699
+ z.discriminatedUnion("type", [
2700
+ z.object({ type: z.literal("logs"), logs: z.string() }),
2701
+ z.object({ type: z.literal("image"), url: z.string() })
2702
+ ])
2703
+ ).nullable()
2704
+ }),
2705
+ z.object({
2706
+ type: z.literal("image_generation_call"),
2707
+ id: z.string(),
2708
+ result: z.string()
2709
+ }),
2710
+ z.object({
2711
+ type: z.literal("local_shell_call"),
2712
+ id: z.string(),
2713
+ call_id: z.string(),
2714
+ action: z.object({
2715
+ type: z.literal("exec"),
2716
+ command: z.array(z.string()),
2717
+ timeout_ms: z.number().optional(),
2718
+ user: z.string().optional(),
2719
+ working_directory: z.string().optional(),
2720
+ env: z.record(z.string(), z.string()).optional()
2721
+ })
2722
+ }),
2723
+ z.object({
2724
+ type: z.literal("function_call"),
2725
+ call_id: z.string(),
2726
+ name: z.string(),
2727
+ arguments: z.string(),
2728
+ id: z.string()
2729
+ }),
2730
+ z.object({
2731
+ type: z.literal("computer_call"),
2732
+ id: z.string(),
2733
+ status: z.string().optional()
2734
+ }),
2735
+ z.object({
2736
+ type: z.literal("reasoning"),
2737
+ id: z.string(),
2738
+ encrypted_content: z.string().nullish(),
2739
+ summary: z.array(
2740
+ z.object({
2741
+ type: z.literal("summary_text"),
2742
+ text: z.string()
2743
+ })
2744
+ )
2745
+ })
2746
+ ])
2747
+ ).optional(),
2748
+ service_tier: z.string().nullish(),
2749
+ incomplete_details: z.object({ reason: z.string() }).nullish(),
2750
+ usage: z.object({
2751
+ input_tokens: z.number(),
2752
+ input_tokens_details: z.object({ cached_tokens: z.number().nullish() }).nullish(),
2753
+ output_tokens: z.number(),
2754
+ output_tokens_details: z.object({ reasoning_tokens: z.number().nullish() }).nullish()
2755
+ }).optional()
2756
+ })
2757
+ )
2758
+ );
2759
+ var TOP_LOGPROBS_MAX = 20;
2760
+ var openaiResponsesProviderOptionsSchema = lazyValidator(
2761
+ () => zodSchema(
2762
+ z.object({
2763
+ conversation: z.string().nullish(),
2764
+ include: z.array(
2765
+ z.enum([
2766
+ "reasoning.encrypted_content",
2767
+ // handled internally by default, only needed for unknown reasoning models
2768
+ "file_search_call.results",
2769
+ "message.output_text.logprobs"
2770
+ ])
2771
+ ).nullish(),
2772
+ instructions: z.string().nullish(),
2773
+ /**
2774
+ * Return the log probabilities of the tokens.
2775
+ *
2776
+ * Setting to true will return the log probabilities of the tokens that
2777
+ * were generated.
2778
+ *
2779
+ * Setting to a number will return the log probabilities of the top n
2780
+ * tokens that were generated.
2781
+ *
2782
+ * @see https://platform.openai.com/docs/api-reference/responses/create
2783
+ * @see https://cookbook.openai.com/examples/using_logprobs
2784
+ */
2785
+ logprobs: z.union([z.boolean(), z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
2786
+ /**
2787
+ * The maximum number of total calls to built-in tools that can be processed in a response.
2788
+ * This maximum number applies across all built-in tool calls, not per individual tool.
2789
+ * Any further attempts to call a tool by the model will be ignored.
2790
+ */
2791
+ maxToolCalls: z.number().nullish(),
2792
+ metadata: z.any().nullish(),
2793
+ parallelToolCalls: z.boolean().nullish(),
2794
+ previousResponseId: z.string().nullish(),
2795
+ promptCacheKey: z.string().nullish(),
2796
+ /**
2797
+ * The retention policy for the prompt cache.
2798
+ * - 'in_memory': Default. Standard prompt caching behavior.
2799
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
2800
+ * Currently only available for 5.1 series models.
2801
+ *
2802
+ * @default 'in_memory'
2803
+ */
2804
+ promptCacheRetention: z.enum(["in_memory", "24h"]).nullish(),
2805
+ reasoningEffort: z.string().nullish(),
2806
+ reasoningSummary: z.string().nullish(),
2807
+ safetyIdentifier: z.string().nullish(),
2808
+ serviceTier: z.enum(["auto", "flex", "priority", "default"]).nullish(),
2809
+ store: z.boolean().nullish(),
2810
+ strictJsonSchema: z.boolean().nullish(),
2811
+ textVerbosity: z.enum(["low", "medium", "high"]).nullish(),
2812
+ truncation: z.enum(["auto", "disabled"]).nullish(),
2813
+ user: z.string().nullish()
2814
+ })
2815
+ )
2816
+ );
2817
+ var codeInterpreterInputSchema = lazySchema(
2818
+ () => zodSchema(
2819
+ z.object({
2820
+ code: z.string().nullish(),
2821
+ containerId: z.string()
2822
+ })
2823
+ )
2824
+ );
2825
+ var codeInterpreterOutputSchema = lazySchema(
2826
+ () => zodSchema(
2827
+ z.object({
2828
+ outputs: z.array(
2829
+ z.discriminatedUnion("type", [
2830
+ z.object({ type: z.literal("logs"), logs: z.string() }),
2831
+ z.object({ type: z.literal("image"), url: z.string() })
2832
+ ])
2833
+ ).nullish()
2834
+ })
2835
+ )
2836
+ );
2837
+ var codeInterpreterArgsSchema = lazySchema(
2838
+ () => zodSchema(
2839
+ z.object({
2840
+ container: z.union([
2841
+ z.string(),
2842
+ z.object({
2843
+ fileIds: z.array(z.string()).optional()
2844
+ })
2845
+ ]).optional()
2846
+ })
2847
+ )
2848
+ );
2849
+ var codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchema({
2850
+ id: "openai.code_interpreter",
2851
+ name: "code_interpreter",
2852
+ inputSchema: codeInterpreterInputSchema,
2853
+ outputSchema: codeInterpreterOutputSchema
2854
+ });
2855
+ var codeInterpreter = (args = {}) => {
2856
+ return codeInterpreterToolFactory(args);
2857
+ };
2858
+ var comparisonFilterSchema = z.object({
2859
+ key: z.string(),
2860
+ type: z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
2861
+ value: z.union([z.string(), z.number(), z.boolean()])
2862
+ });
2863
+ var compoundFilterSchema = z.object({
2864
+ type: z.enum(["and", "or"]),
2865
+ filters: z.array(
2866
+ z.union([comparisonFilterSchema, z.lazy(() => compoundFilterSchema)])
2867
+ )
2868
+ });
2869
+ var fileSearchArgsSchema = lazySchema(
2870
+ () => zodSchema(
2871
+ z.object({
2872
+ vectorStoreIds: z.array(z.string()),
2873
+ maxNumResults: z.number().optional(),
2874
+ ranking: z.object({
2875
+ ranker: z.string().optional(),
2876
+ scoreThreshold: z.number().optional()
2877
+ }).optional(),
2878
+ filters: z.union([comparisonFilterSchema, compoundFilterSchema]).optional()
2879
+ })
2880
+ )
2881
+ );
2882
+ var fileSearchOutputSchema = lazySchema(
2883
+ () => zodSchema(
2884
+ z.object({
2885
+ queries: z.array(z.string()),
2886
+ results: z.array(
2887
+ z.object({
2888
+ attributes: z.record(z.string(), z.unknown()),
2889
+ fileId: z.string(),
2890
+ filename: z.string(),
2891
+ score: z.number(),
2892
+ text: z.string()
2893
+ })
2894
+ ).nullable()
2895
+ })
2896
+ )
2897
+ );
2898
+ var fileSearch = createProviderDefinedToolFactoryWithOutputSchema({
2899
+ id: "openai.file_search",
2900
+ name: "file_search",
2901
+ inputSchema: z.object({}),
2902
+ outputSchema: fileSearchOutputSchema
2903
+ });
2904
+ var webSearchArgsSchema = lazySchema(
2905
+ () => zodSchema(
2906
+ z.object({
2907
+ externalWebAccess: z.boolean().optional(),
2908
+ filters: z.object({ allowedDomains: z.array(z.string()).optional() }).optional(),
2909
+ searchContextSize: z.enum(["low", "medium", "high"]).optional(),
2910
+ userLocation: z.object({
2911
+ type: z.literal("approximate"),
2912
+ country: z.string().optional(),
2913
+ city: z.string().optional(),
2914
+ region: z.string().optional(),
2915
+ timezone: z.string().optional()
2916
+ }).optional()
2917
+ })
2918
+ )
2919
+ );
2920
+ var webSearchInputSchema = lazySchema(() => zodSchema(z.object({})));
2921
+ var webSearchOutputSchema = lazySchema(
2922
+ () => zodSchema(
2923
+ z.object({
2924
+ action: z.discriminatedUnion("type", [
2925
+ z.object({
2926
+ type: z.literal("search"),
2927
+ query: z.string().optional()
2928
+ }),
2929
+ z.object({
2930
+ type: z.literal("openPage"),
2931
+ url: z.string()
2932
+ }),
2933
+ z.object({
2934
+ type: z.literal("find"),
2935
+ url: z.string(),
2936
+ pattern: z.string()
2937
+ })
2938
+ ]),
2939
+ sources: z.array(
2940
+ z.discriminatedUnion("type", [
2941
+ z.object({ type: z.literal("url"), url: z.string() }),
2942
+ z.object({ type: z.literal("api"), name: z.string() })
2943
+ ])
2944
+ ).optional()
2945
+ })
2946
+ )
2947
+ );
2948
+ createProviderDefinedToolFactoryWithOutputSchema({
2949
+ id: "openai.web_search",
2950
+ name: "web_search",
2951
+ inputSchema: webSearchInputSchema,
2952
+ outputSchema: webSearchOutputSchema
2953
+ });
2954
+ var webSearchPreviewArgsSchema = lazySchema(
2955
+ () => zodSchema(
2956
+ z.object({
2957
+ searchContextSize: z.enum(["low", "medium", "high"]).optional(),
2958
+ userLocation: z.object({
2959
+ type: z.literal("approximate"),
2960
+ country: z.string().optional(),
2961
+ city: z.string().optional(),
2962
+ region: z.string().optional(),
2963
+ timezone: z.string().optional()
2964
+ }).optional()
2965
+ })
2966
+ )
2967
+ );
2968
+ var webSearchPreviewInputSchema = lazySchema(
2969
+ () => zodSchema(z.object({}))
2970
+ );
2971
+ var webSearchPreviewOutputSchema = lazySchema(
2972
+ () => zodSchema(
2973
+ z.object({
2974
+ action: z.discriminatedUnion("type", [
2975
+ z.object({
2976
+ type: z.literal("search"),
2977
+ query: z.string().optional()
2978
+ }),
2979
+ z.object({
2980
+ type: z.literal("openPage"),
2981
+ url: z.string()
2982
+ }),
2983
+ z.object({
2984
+ type: z.literal("find"),
2985
+ url: z.string(),
2986
+ pattern: z.string()
2987
+ })
2988
+ ])
2989
+ })
2990
+ )
2991
+ );
2992
+ var webSearchPreview = createProviderDefinedToolFactoryWithOutputSchema({
2993
+ id: "openai.web_search_preview",
2994
+ name: "web_search_preview",
2995
+ inputSchema: webSearchPreviewInputSchema,
2996
+ outputSchema: webSearchPreviewOutputSchema
2997
+ });
2998
+ var imageGenerationArgsSchema = lazySchema(
2999
+ () => zodSchema(
3000
+ z.object({
3001
+ background: z.enum(["auto", "opaque", "transparent"]).optional(),
3002
+ inputFidelity: z.enum(["low", "high"]).optional(),
3003
+ inputImageMask: z.object({
3004
+ fileId: z.string().optional(),
3005
+ imageUrl: z.string().optional()
3006
+ }).optional(),
3007
+ model: z.string().optional(),
3008
+ moderation: z.enum(["auto"]).optional(),
3009
+ outputCompression: z.number().int().min(0).max(100).optional(),
3010
+ outputFormat: z.enum(["png", "jpeg", "webp"]).optional(),
3011
+ partialImages: z.number().int().min(0).max(3).optional(),
3012
+ quality: z.enum(["auto", "low", "medium", "high"]).optional(),
3013
+ size: z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
3014
+ }).strict()
3015
+ )
3016
+ );
3017
+ var imageGenerationInputSchema = lazySchema(() => zodSchema(z.object({})));
3018
+ var imageGenerationOutputSchema = lazySchema(
3019
+ () => zodSchema(z.object({ result: z.string() }))
3020
+ );
3021
+ var imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema({
3022
+ id: "openai.image_generation",
3023
+ name: "image_generation",
3024
+ inputSchema: imageGenerationInputSchema,
3025
+ outputSchema: imageGenerationOutputSchema
3026
+ });
3027
+ var imageGeneration = (args = {}) => {
3028
+ return imageGenerationToolFactory(args);
3029
+ };
3030
+ async function prepareResponsesTools({
3031
+ tools,
3032
+ toolChoice,
3033
+ strictJsonSchema
3034
+ }) {
3035
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
3036
+ const toolWarnings = [];
3037
+ if (tools == null) {
3038
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
3039
+ }
3040
+ const openaiTools = [];
3041
+ for (const tool of tools) {
3042
+ switch (tool.type) {
3043
+ case "function":
3044
+ openaiTools.push({
3045
+ type: "function",
3046
+ name: tool.name,
3047
+ description: tool.description,
3048
+ parameters: tool.inputSchema,
3049
+ strict: strictJsonSchema
3050
+ });
3051
+ break;
3052
+ case "provider-defined": {
3053
+ switch (tool.id) {
3054
+ case "openai.file_search": {
3055
+ const args = await validateTypes({
3056
+ value: tool.args,
3057
+ schema: fileSearchArgsSchema
3058
+ });
3059
+ openaiTools.push({
3060
+ type: "file_search",
3061
+ vector_store_ids: args.vectorStoreIds,
3062
+ max_num_results: args.maxNumResults,
3063
+ ranking_options: args.ranking ? {
3064
+ ranker: args.ranking.ranker,
3065
+ score_threshold: args.ranking.scoreThreshold
3066
+ } : void 0,
3067
+ filters: args.filters
3068
+ });
3069
+ break;
3070
+ }
3071
+ case "openai.local_shell": {
3072
+ openaiTools.push({
3073
+ type: "local_shell"
3074
+ });
3075
+ break;
3076
+ }
3077
+ case "openai.web_search_preview": {
3078
+ const args = await validateTypes({
3079
+ value: tool.args,
3080
+ schema: webSearchPreviewArgsSchema
3081
+ });
3082
+ openaiTools.push({
3083
+ type: "web_search_preview",
3084
+ search_context_size: args.searchContextSize,
3085
+ user_location: args.userLocation
3086
+ });
3087
+ break;
3088
+ }
3089
+ case "openai.web_search": {
3090
+ const args = await validateTypes({
3091
+ value: tool.args,
3092
+ schema: webSearchArgsSchema
3093
+ });
3094
+ openaiTools.push({
3095
+ type: "web_search",
3096
+ filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
3097
+ external_web_access: args.externalWebAccess,
3098
+ search_context_size: args.searchContextSize,
3099
+ user_location: args.userLocation
3100
+ });
3101
+ break;
3102
+ }
3103
+ case "openai.code_interpreter": {
3104
+ const args = await validateTypes({
3105
+ value: tool.args,
3106
+ schema: codeInterpreterArgsSchema
3107
+ });
3108
+ openaiTools.push({
3109
+ type: "code_interpreter",
3110
+ container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
3111
+ });
3112
+ break;
3113
+ }
3114
+ case "openai.image_generation": {
3115
+ const args = await validateTypes({
3116
+ value: tool.args,
3117
+ schema: imageGenerationArgsSchema
3118
+ });
3119
+ openaiTools.push({
3120
+ type: "image_generation",
3121
+ background: args.background,
3122
+ input_fidelity: args.inputFidelity,
3123
+ input_image_mask: args.inputImageMask ? {
3124
+ file_id: args.inputImageMask.fileId,
3125
+ image_url: args.inputImageMask.imageUrl
3126
+ } : void 0,
3127
+ model: args.model,
3128
+ size: args.size,
3129
+ quality: args.quality,
3130
+ moderation: args.moderation,
3131
+ output_format: args.outputFormat,
3132
+ output_compression: args.outputCompression
3133
+ });
3134
+ break;
3135
+ }
3136
+ }
3137
+ break;
3138
+ }
3139
+ default:
3140
+ toolWarnings.push({ type: "unsupported-tool", tool });
3141
+ break;
3142
+ }
3143
+ }
3144
+ if (toolChoice == null) {
3145
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
3146
+ }
3147
+ const type = toolChoice.type;
3148
+ switch (type) {
3149
+ case "auto":
3150
+ case "none":
3151
+ case "required":
3152
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
3153
+ case "tool":
3154
+ return {
3155
+ tools: openaiTools,
3156
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "image_generation" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
3157
+ toolWarnings
3158
+ };
3159
+ default: {
3160
+ const _exhaustiveCheck = type;
3161
+ throw new UnsupportedFunctionalityError({
3162
+ functionality: `tool choice type: ${_exhaustiveCheck}`
3163
+ });
3164
+ }
3165
+ }
3166
+ }
3167
+ var OpenAIResponsesLanguageModel = class {
3168
+ constructor(modelId, config) {
3169
+ this.specificationVersion = "v2";
3170
+ this.supportedUrls = {
3171
+ "image/*": [/^https?:\/\/.*$/],
3172
+ "application/pdf": [/^https?:\/\/.*$/]
3173
+ };
3174
+ this.modelId = modelId;
3175
+ this.config = config;
3176
+ }
3177
+ get provider() {
3178
+ return this.config.provider;
3179
+ }
3180
+ async getArgs({
3181
+ maxOutputTokens,
3182
+ temperature,
3183
+ stopSequences,
3184
+ topP,
3185
+ topK,
3186
+ presencePenalty,
3187
+ frequencyPenalty,
3188
+ seed,
3189
+ prompt,
3190
+ providerOptions,
3191
+ tools,
3192
+ toolChoice,
3193
+ responseFormat
3194
+ }) {
3195
+ var _a, _b, _c, _d;
3196
+ const warnings = [];
3197
+ const modelConfig = getResponsesModelConfig(this.modelId);
3198
+ if (topK != null) {
3199
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
3200
+ }
3201
+ if (seed != null) {
3202
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
3203
+ }
3204
+ if (presencePenalty != null) {
3205
+ warnings.push({
3206
+ type: "unsupported-setting",
3207
+ setting: "presencePenalty"
3208
+ });
3209
+ }
3210
+ if (frequencyPenalty != null) {
3211
+ warnings.push({
3212
+ type: "unsupported-setting",
3213
+ setting: "frequencyPenalty"
3214
+ });
3215
+ }
3216
+ if (stopSequences != null) {
3217
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
3218
+ }
3219
+ const openaiOptions = await parseProviderOptions({
3220
+ provider: "openai",
3221
+ providerOptions,
3222
+ schema: openaiResponsesProviderOptionsSchema
3223
+ });
3224
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3225
+ warnings.push({
3226
+ type: "unsupported-setting",
3227
+ setting: "conversation",
3228
+ details: "conversation and previousResponseId cannot be used together"
3229
+ });
3230
+ }
3231
+ const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3232
+ prompt,
3233
+ systemMessageMode: modelConfig.systemMessageMode,
3234
+ fileIdPrefixes: this.config.fileIdPrefixes,
3235
+ store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
3236
+ hasLocalShellTool: hasOpenAITool("openai.local_shell")
3237
+ });
3238
+ warnings.push(...inputWarnings);
3239
+ const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
3240
+ let include = openaiOptions == null ? void 0 : openaiOptions.include;
3241
+ function addInclude(key) {
3242
+ if (include == null) {
3243
+ include = [key];
3244
+ } else if (!include.includes(key)) {
3245
+ include = [...include, key];
3246
+ }
3247
+ }
3248
+ function hasOpenAITool(id) {
3249
+ return (tools == null ? void 0 : tools.find(
3250
+ (tool) => tool.type === "provider-defined" && tool.id === id
3251
+ )) != null;
3252
+ }
3253
+ const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
3254
+ if (topLogprobs) {
3255
+ addInclude("message.output_text.logprobs");
3256
+ }
3257
+ const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
3258
+ (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
3259
+ )) == null ? void 0 : _c.name;
3260
+ if (webSearchToolName) {
3261
+ addInclude("web_search_call.action.sources");
3262
+ }
3263
+ if (hasOpenAITool("openai.code_interpreter")) {
3264
+ addInclude("code_interpreter_call.outputs");
3265
+ }
3266
+ const store = openaiOptions == null ? void 0 : openaiOptions.store;
3267
+ if (store === false && modelConfig.isReasoningModel) {
3268
+ addInclude("reasoning.encrypted_content");
3269
+ }
3270
+ const baseArgs = {
3271
+ model: this.modelId,
3272
+ input,
3273
+ temperature,
3274
+ top_p: topP,
3275
+ max_output_tokens: maxOutputTokens,
3276
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
3277
+ text: {
3278
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
3279
+ format: responseFormat.schema != null ? {
3280
+ type: "json_schema",
3281
+ strict: strictJsonSchema,
3282
+ name: (_d = responseFormat.name) != null ? _d : "response",
3283
+ description: responseFormat.description,
3284
+ schema: responseFormat.schema
3285
+ } : { type: "json_object" }
3286
+ },
3287
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
3288
+ verbosity: openaiOptions.textVerbosity
3289
+ }
3290
+ }
3291
+ },
3292
+ // provider options:
3293
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3294
+ max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3295
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3296
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
3297
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
3298
+ store,
3299
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
3300
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
3301
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
3302
+ include,
3303
+ prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3304
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
3305
+ safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3306
+ top_logprobs: topLogprobs,
3307
+ truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
3308
+ // model-specific settings:
3309
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
3310
+ reasoning: {
3311
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
3312
+ effort: openaiOptions.reasoningEffort
3313
+ },
3314
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
3315
+ summary: openaiOptions.reasoningSummary
3316
+ }
3317
+ }
3318
+ }
3319
+ };
3320
+ if (modelConfig.isReasoningModel) {
3321
+ if (baseArgs.temperature != null) {
3322
+ baseArgs.temperature = void 0;
3323
+ warnings.push({
3324
+ type: "unsupported-setting",
3325
+ setting: "temperature",
3326
+ details: "temperature is not supported for reasoning models"
3327
+ });
3328
+ }
3329
+ if (baseArgs.top_p != null) {
3330
+ baseArgs.top_p = void 0;
3331
+ warnings.push({
3332
+ type: "unsupported-setting",
3333
+ setting: "topP",
3334
+ details: "topP is not supported for reasoning models"
3335
+ });
3336
+ }
3337
+ } else {
3338
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
3339
+ warnings.push({
3340
+ type: "unsupported-setting",
3341
+ setting: "reasoningEffort",
3342
+ details: "reasoningEffort is not supported for non-reasoning models"
3343
+ });
3344
+ }
3345
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
3346
+ warnings.push({
3347
+ type: "unsupported-setting",
3348
+ setting: "reasoningSummary",
3349
+ details: "reasoningSummary is not supported for non-reasoning models"
3350
+ });
3351
+ }
3352
+ }
3353
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
3354
+ warnings.push({
3355
+ type: "unsupported-setting",
3356
+ setting: "serviceTier",
3357
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
3358
+ });
3359
+ delete baseArgs.service_tier;
3360
+ }
3361
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
3362
+ warnings.push({
3363
+ type: "unsupported-setting",
3364
+ setting: "serviceTier",
3365
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
3366
+ });
3367
+ delete baseArgs.service_tier;
3368
+ }
3369
+ const {
3370
+ tools: openaiTools,
3371
+ toolChoice: openaiToolChoice,
3372
+ toolWarnings
3373
+ } = await prepareResponsesTools({
3374
+ tools,
3375
+ toolChoice,
3376
+ strictJsonSchema
3377
+ });
3378
+ return {
3379
+ webSearchToolName,
3380
+ args: {
3381
+ ...baseArgs,
3382
+ tools: openaiTools,
3383
+ tool_choice: openaiToolChoice
3384
+ },
3385
+ warnings: [...warnings, ...toolWarnings],
3386
+ store
3387
+ };
3388
+ }
3389
+ async doGenerate(options) {
3390
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
3391
+ const {
3392
+ args: body,
3393
+ warnings,
3394
+ webSearchToolName
3395
+ } = await this.getArgs(options);
3396
+ const url = this.config.url({
3397
+ path: "/responses",
3398
+ modelId: this.modelId
3399
+ });
3400
+ const {
3401
+ responseHeaders,
3402
+ value: response,
3403
+ rawValue: rawResponse
3404
+ } = await postJsonToApi({
3405
+ url,
3406
+ headers: combineHeaders(this.config.headers(), options.headers),
3407
+ body,
3408
+ failedResponseHandler: openaiFailedResponseHandler,
3409
+ successfulResponseHandler: createJsonResponseHandler(
3410
+ openaiResponsesResponseSchema
3411
+ ),
3412
+ abortSignal: options.abortSignal,
3413
+ fetch: this.config.fetch
3414
+ });
3415
+ if (response.error) {
3416
+ throw new APICallError({
3417
+ message: response.error.message,
3418
+ url,
3419
+ requestBodyValues: body,
3420
+ statusCode: 400,
3421
+ responseHeaders,
3422
+ responseBody: rawResponse,
3423
+ isRetryable: false
3424
+ });
3425
+ }
3426
+ const content = [];
3427
+ const logprobs = [];
3428
+ let hasFunctionCall = false;
3429
+ for (const part of response.output) {
3430
+ switch (part.type) {
3431
+ case "reasoning": {
3432
+ if (part.summary.length === 0) {
3433
+ part.summary.push({ type: "summary_text", text: "" });
3434
+ }
3435
+ for (const summary of part.summary) {
3436
+ content.push({
3437
+ type: "reasoning",
3438
+ text: summary.text,
3439
+ providerMetadata: {
3440
+ openai: {
3441
+ itemId: part.id,
3442
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
3443
+ }
3444
+ }
3445
+ });
3446
+ }
3447
+ break;
3448
+ }
3449
+ case "image_generation_call": {
3450
+ content.push({
3451
+ type: "tool-call",
3452
+ toolCallId: part.id,
3453
+ toolName: "image_generation",
3454
+ input: "{}",
3455
+ providerExecuted: true
3456
+ });
3457
+ content.push({
3458
+ type: "tool-result",
3459
+ toolCallId: part.id,
3460
+ toolName: "image_generation",
3461
+ result: {
3462
+ result: part.result
3463
+ },
3464
+ providerExecuted: true
3465
+ });
3466
+ break;
3467
+ }
3468
+ case "local_shell_call": {
3469
+ content.push({
3470
+ type: "tool-call",
3471
+ toolCallId: part.call_id,
3472
+ toolName: "local_shell",
3473
+ input: JSON.stringify({
3474
+ action: part.action
3475
+ }),
3476
+ providerMetadata: {
3477
+ openai: {
3478
+ itemId: part.id
3479
+ }
3480
+ }
3481
+ });
3482
+ break;
3483
+ }
3484
+ case "message": {
3485
+ for (const contentPart of part.content) {
3486
+ if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
3487
+ logprobs.push(contentPart.logprobs);
3488
+ }
3489
+ content.push({
3490
+ type: "text",
3491
+ text: contentPart.text,
3492
+ providerMetadata: {
3493
+ openai: {
3494
+ itemId: part.id
3495
+ }
3496
+ }
3497
+ });
3498
+ for (const annotation of contentPart.annotations) {
3499
+ if (annotation.type === "url_citation") {
3500
+ content.push({
3501
+ type: "source",
3502
+ sourceType: "url",
3503
+ id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId(),
3504
+ url: annotation.url,
3505
+ title: annotation.title
3506
+ });
3507
+ } else if (annotation.type === "file_citation") {
3508
+ content.push({
3509
+ type: "source",
3510
+ sourceType: "document",
3511
+ id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId(),
3512
+ mediaType: "text/plain",
3513
+ title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
3514
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id,
3515
+ ...annotation.file_id ? {
3516
+ providerMetadata: {
3517
+ openai: {
3518
+ fileId: annotation.file_id
3519
+ }
3520
+ }
3521
+ } : {}
3522
+ });
3523
+ }
3524
+ }
3525
+ }
3526
+ break;
3527
+ }
3528
+ case "function_call": {
3529
+ hasFunctionCall = true;
3530
+ content.push({
3531
+ type: "tool-call",
3532
+ toolCallId: part.call_id,
3533
+ toolName: part.name,
3534
+ input: part.arguments,
3535
+ providerMetadata: {
3536
+ openai: {
3537
+ itemId: part.id
3538
+ }
3539
+ }
3540
+ });
3541
+ break;
3542
+ }
3543
+ case "web_search_call": {
3544
+ content.push({
3545
+ type: "tool-call",
3546
+ toolCallId: part.id,
3547
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3548
+ input: JSON.stringify({}),
3549
+ providerExecuted: true
3550
+ });
3551
+ content.push({
3552
+ type: "tool-result",
3553
+ toolCallId: part.id,
3554
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3555
+ result: mapWebSearchOutput(part.action),
3556
+ providerExecuted: true
3557
+ });
3558
+ break;
3559
+ }
3560
+ case "computer_call": {
3561
+ content.push({
3562
+ type: "tool-call",
3563
+ toolCallId: part.id,
3564
+ toolName: "computer_use",
3565
+ input: "",
3566
+ providerExecuted: true
3567
+ });
3568
+ content.push({
3569
+ type: "tool-result",
3570
+ toolCallId: part.id,
3571
+ toolName: "computer_use",
3572
+ result: {
3573
+ type: "computer_use_tool_result",
3574
+ status: part.status || "completed"
3575
+ },
3576
+ providerExecuted: true
3577
+ });
3578
+ break;
3579
+ }
3580
+ case "file_search_call": {
3581
+ content.push({
3582
+ type: "tool-call",
3583
+ toolCallId: part.id,
3584
+ toolName: "file_search",
3585
+ input: "{}",
3586
+ providerExecuted: true
3587
+ });
3588
+ content.push({
3589
+ type: "tool-result",
3590
+ toolCallId: part.id,
3591
+ toolName: "file_search",
3592
+ result: {
3593
+ queries: part.queries,
3594
+ results: (_n = (_m = part.results) == null ? void 0 : _m.map((result) => ({
3595
+ attributes: result.attributes,
3596
+ fileId: result.file_id,
3597
+ filename: result.filename,
3598
+ score: result.score,
3599
+ text: result.text
3600
+ }))) != null ? _n : null
3601
+ },
3602
+ providerExecuted: true
3603
+ });
3604
+ break;
3605
+ }
3606
+ case "code_interpreter_call": {
3607
+ content.push({
3608
+ type: "tool-call",
3609
+ toolCallId: part.id,
3610
+ toolName: "code_interpreter",
3611
+ input: JSON.stringify({
3612
+ code: part.code,
3613
+ containerId: part.container_id
3614
+ }),
3615
+ providerExecuted: true
3616
+ });
3617
+ content.push({
3618
+ type: "tool-result",
3619
+ toolCallId: part.id,
3620
+ toolName: "code_interpreter",
3621
+ result: {
3622
+ outputs: part.outputs
3623
+ },
3624
+ providerExecuted: true
3625
+ });
3626
+ break;
3627
+ }
3628
+ }
3629
+ }
3630
+ const providerMetadata = {
3631
+ openai: {
3632
+ ...response.id != null ? { responseId: response.id } : {}
3633
+ }
3634
+ };
3635
+ if (logprobs.length > 0) {
3636
+ providerMetadata.openai.logprobs = logprobs;
3637
+ }
3638
+ if (typeof response.service_tier === "string") {
3639
+ providerMetadata.openai.serviceTier = response.service_tier;
3640
+ }
3641
+ const usage = response.usage;
3642
+ return {
3643
+ content,
3644
+ finishReason: mapOpenAIResponseFinishReason({
3645
+ finishReason: (_o = response.incomplete_details) == null ? void 0 : _o.reason,
3646
+ hasFunctionCall
3647
+ }),
3648
+ usage: {
3649
+ inputTokens: usage.input_tokens,
3650
+ outputTokens: usage.output_tokens,
3651
+ totalTokens: usage.input_tokens + usage.output_tokens,
3652
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3653
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3654
+ },
3655
+ request: { body },
3656
+ response: {
3657
+ id: response.id,
3658
+ timestamp: new Date(response.created_at * 1e3),
3659
+ modelId: response.model,
3660
+ headers: responseHeaders,
3661
+ body: rawResponse
3662
+ },
3663
+ providerMetadata,
3664
+ warnings
3665
+ };
3666
+ }
3667
+ async doStream(options) {
3668
+ const {
3669
+ args: body,
3670
+ warnings,
3671
+ webSearchToolName,
3672
+ store
3673
+ } = await this.getArgs(options);
3674
+ const { responseHeaders, value: response } = await postJsonToApi({
3675
+ url: this.config.url({
3676
+ path: "/responses",
3677
+ modelId: this.modelId
3678
+ }),
3679
+ headers: combineHeaders(this.config.headers(), options.headers),
3680
+ body: {
3681
+ ...body,
3682
+ stream: true
3683
+ },
3684
+ failedResponseHandler: openaiFailedResponseHandler,
3685
+ successfulResponseHandler: createEventSourceResponseHandler(
3686
+ openaiResponsesChunkSchema
3687
+ ),
3688
+ abortSignal: options.abortSignal,
3689
+ fetch: this.config.fetch
3690
+ });
3691
+ const self = this;
3692
+ let finishReason = "unknown";
3693
+ const usage = {
3694
+ inputTokens: void 0,
3695
+ outputTokens: void 0,
3696
+ totalTokens: void 0
3697
+ };
3698
+ const logprobs = [];
3699
+ let responseId = null;
3700
+ const ongoingToolCalls = {};
3701
+ const ongoingAnnotations = [];
3702
+ let hasFunctionCall = false;
3703
+ const activeReasoning = {};
3704
+ let serviceTier;
3705
+ return {
3706
+ stream: response.pipeThrough(
3707
+ new TransformStream({
3708
+ start(controller) {
3709
+ controller.enqueue({ type: "stream-start", warnings });
3710
+ },
3711
+ transform(chunk, controller) {
3712
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
3713
+ if (options.includeRawChunks) {
3714
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3715
+ }
3716
+ if (!chunk.success) {
3717
+ finishReason = "error";
3718
+ controller.enqueue({ type: "error", error: chunk.error });
3719
+ return;
3720
+ }
3721
+ const value = chunk.value;
3722
+ if (isResponseOutputItemAddedChunk(value)) {
3723
+ if (value.item.type === "function_call") {
3724
+ ongoingToolCalls[value.output_index] = {
3725
+ toolName: value.item.name,
3726
+ toolCallId: value.item.call_id
3727
+ };
3728
+ controller.enqueue({
3729
+ type: "tool-input-start",
3730
+ id: value.item.call_id,
3731
+ toolName: value.item.name
3732
+ });
3733
+ } else if (value.item.type === "web_search_call") {
3734
+ ongoingToolCalls[value.output_index] = {
3735
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3736
+ toolCallId: value.item.id
3737
+ };
3738
+ controller.enqueue({
3739
+ type: "tool-input-start",
3740
+ id: value.item.id,
3741
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3742
+ providerExecuted: true
3743
+ });
3744
+ controller.enqueue({
3745
+ type: "tool-input-end",
3746
+ id: value.item.id
3747
+ });
3748
+ controller.enqueue({
3749
+ type: "tool-call",
3750
+ toolCallId: value.item.id,
3751
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3752
+ input: JSON.stringify({}),
3753
+ providerExecuted: true
3754
+ });
3755
+ } else if (value.item.type === "computer_call") {
3756
+ ongoingToolCalls[value.output_index] = {
3757
+ toolName: "computer_use",
3758
+ toolCallId: value.item.id
3759
+ };
3760
+ controller.enqueue({
3761
+ type: "tool-input-start",
3762
+ id: value.item.id,
3763
+ toolName: "computer_use",
3764
+ providerExecuted: true
3765
+ });
3766
+ } else if (value.item.type === "code_interpreter_call") {
3767
+ ongoingToolCalls[value.output_index] = {
3768
+ toolName: "code_interpreter",
3769
+ toolCallId: value.item.id,
3770
+ codeInterpreter: {
3771
+ containerId: value.item.container_id
3772
+ }
3773
+ };
3774
+ controller.enqueue({
3775
+ type: "tool-input-start",
3776
+ id: value.item.id,
3777
+ toolName: "code_interpreter",
3778
+ providerExecuted: true
3779
+ });
3780
+ controller.enqueue({
3781
+ type: "tool-input-delta",
3782
+ id: value.item.id,
3783
+ delta: `{"containerId":"${value.item.container_id}","code":"`
3784
+ });
3785
+ } else if (value.item.type === "file_search_call") {
3786
+ controller.enqueue({
3787
+ type: "tool-call",
3788
+ toolCallId: value.item.id,
3789
+ toolName: "file_search",
3790
+ input: "{}",
3791
+ providerExecuted: true
3792
+ });
3793
+ } else if (value.item.type === "image_generation_call") {
3794
+ controller.enqueue({
3795
+ type: "tool-call",
3796
+ toolCallId: value.item.id,
3797
+ toolName: "image_generation",
3798
+ input: "{}",
3799
+ providerExecuted: true
3800
+ });
3801
+ } else if (value.item.type === "message") {
3802
+ ongoingAnnotations.splice(0, ongoingAnnotations.length);
3803
+ controller.enqueue({
3804
+ type: "text-start",
3805
+ id: value.item.id,
3806
+ providerMetadata: {
3807
+ openai: {
3808
+ itemId: value.item.id
3809
+ }
3810
+ }
3811
+ });
3812
+ } else if (isResponseOutputItemAddedChunk(value) && value.item.type === "reasoning") {
3813
+ activeReasoning[value.item.id] = {
3814
+ encryptedContent: value.item.encrypted_content,
3815
+ summaryParts: { 0: "active" }
3816
+ };
3817
+ controller.enqueue({
3818
+ type: "reasoning-start",
3819
+ id: `${value.item.id}:0`,
3820
+ providerMetadata: {
3821
+ openai: {
3822
+ itemId: value.item.id,
3823
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
3824
+ }
3825
+ }
3826
+ });
3827
+ }
3828
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type !== "message") {
3829
+ if (value.item.type === "function_call") {
3830
+ ongoingToolCalls[value.output_index] = void 0;
3831
+ hasFunctionCall = true;
3832
+ controller.enqueue({
3833
+ type: "tool-input-end",
3834
+ id: value.item.call_id
3835
+ });
3836
+ controller.enqueue({
3837
+ type: "tool-call",
3838
+ toolCallId: value.item.call_id,
3839
+ toolName: value.item.name,
3840
+ input: value.item.arguments,
3841
+ providerMetadata: {
3842
+ openai: {
3843
+ itemId: value.item.id
3844
+ }
3845
+ }
3846
+ });
3847
+ } else if (value.item.type === "web_search_call") {
3848
+ ongoingToolCalls[value.output_index] = void 0;
3849
+ controller.enqueue({
3850
+ type: "tool-result",
3851
+ toolCallId: value.item.id,
3852
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3853
+ result: mapWebSearchOutput(value.item.action),
3854
+ providerExecuted: true
3855
+ });
3856
+ } else if (value.item.type === "computer_call") {
3857
+ ongoingToolCalls[value.output_index] = void 0;
3858
+ controller.enqueue({
3859
+ type: "tool-input-end",
3860
+ id: value.item.id
3861
+ });
3862
+ controller.enqueue({
3863
+ type: "tool-call",
3864
+ toolCallId: value.item.id,
3865
+ toolName: "computer_use",
3866
+ input: "",
3867
+ providerExecuted: true
3868
+ });
3869
+ controller.enqueue({
3870
+ type: "tool-result",
3871
+ toolCallId: value.item.id,
3872
+ toolName: "computer_use",
3873
+ result: {
3874
+ type: "computer_use_tool_result",
3875
+ status: value.item.status || "completed"
3876
+ },
3877
+ providerExecuted: true
3878
+ });
3879
+ } else if (value.item.type === "file_search_call") {
3880
+ ongoingToolCalls[value.output_index] = void 0;
3881
+ controller.enqueue({
3882
+ type: "tool-result",
3883
+ toolCallId: value.item.id,
3884
+ toolName: "file_search",
3885
+ result: {
3886
+ queries: value.item.queries,
3887
+ results: (_c = (_b = value.item.results) == null ? void 0 : _b.map((result) => ({
3888
+ attributes: result.attributes,
3889
+ fileId: result.file_id,
3890
+ filename: result.filename,
3891
+ score: result.score,
3892
+ text: result.text
3893
+ }))) != null ? _c : null
3894
+ },
3895
+ providerExecuted: true
3896
+ });
3897
+ } else if (value.item.type === "code_interpreter_call") {
3898
+ ongoingToolCalls[value.output_index] = void 0;
3899
+ controller.enqueue({
3900
+ type: "tool-result",
3901
+ toolCallId: value.item.id,
3902
+ toolName: "code_interpreter",
3903
+ result: {
3904
+ outputs: value.item.outputs
3905
+ },
3906
+ providerExecuted: true
3907
+ });
3908
+ } else if (value.item.type === "image_generation_call") {
3909
+ controller.enqueue({
3910
+ type: "tool-result",
3911
+ toolCallId: value.item.id,
3912
+ toolName: "image_generation",
3913
+ result: {
3914
+ result: value.item.result
3915
+ },
3916
+ providerExecuted: true
3917
+ });
3918
+ } else if (value.item.type === "local_shell_call") {
3919
+ ongoingToolCalls[value.output_index] = void 0;
3920
+ controller.enqueue({
3921
+ type: "tool-call",
3922
+ toolCallId: value.item.call_id,
3923
+ toolName: "local_shell",
3924
+ input: JSON.stringify({
3925
+ action: {
3926
+ type: "exec",
3927
+ command: value.item.action.command,
3928
+ timeoutMs: value.item.action.timeout_ms,
3929
+ user: value.item.action.user,
3930
+ workingDirectory: value.item.action.working_directory,
3931
+ env: value.item.action.env
3932
+ }
3933
+ }),
3934
+ providerMetadata: {
3935
+ openai: { itemId: value.item.id }
3936
+ }
3937
+ });
3938
+ } else if (value.item.type === "reasoning") {
3939
+ const activeReasoningPart = activeReasoning[value.item.id];
3940
+ const summaryPartIndices = Object.entries(
3941
+ activeReasoningPart.summaryParts
3942
+ ).filter(
3943
+ ([_, status]) => status === "active" || status === "can-conclude"
3944
+ ).map(([summaryIndex]) => summaryIndex);
3945
+ for (const summaryIndex of summaryPartIndices) {
3946
+ controller.enqueue({
3947
+ type: "reasoning-end",
3948
+ id: `${value.item.id}:${summaryIndex}`,
3949
+ providerMetadata: {
3950
+ openai: {
3951
+ itemId: value.item.id,
3952
+ reasoningEncryptedContent: (_d = value.item.encrypted_content) != null ? _d : null
3953
+ }
3954
+ }
3955
+ });
3956
+ }
3957
+ delete activeReasoning[value.item.id];
3958
+ }
3959
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
3960
+ const toolCall = ongoingToolCalls[value.output_index];
3961
+ if (toolCall != null) {
3962
+ controller.enqueue({
3963
+ type: "tool-input-delta",
3964
+ id: toolCall.toolCallId,
3965
+ delta: value.delta
3966
+ });
3967
+ }
3968
+ } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
3969
+ const toolCall = ongoingToolCalls[value.output_index];
3970
+ if (toolCall != null) {
3971
+ controller.enqueue({
3972
+ type: "tool-input-delta",
3973
+ id: toolCall.toolCallId,
3974
+ // The delta is code, which is embedding in a JSON string.
3975
+ // To escape it, we use JSON.stringify and slice to remove the outer quotes.
3976
+ delta: JSON.stringify(value.delta).slice(1, -1)
3977
+ });
3978
+ }
3979
+ } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
3980
+ const toolCall = ongoingToolCalls[value.output_index];
3981
+ if (toolCall != null) {
3982
+ controller.enqueue({
3983
+ type: "tool-input-delta",
3984
+ id: toolCall.toolCallId,
3985
+ delta: '"}'
3986
+ });
3987
+ controller.enqueue({
3988
+ type: "tool-input-end",
3989
+ id: toolCall.toolCallId
3990
+ });
3991
+ controller.enqueue({
3992
+ type: "tool-call",
3993
+ toolCallId: toolCall.toolCallId,
3994
+ toolName: "code_interpreter",
3995
+ input: JSON.stringify({
3996
+ code: value.code,
3997
+ containerId: toolCall.codeInterpreter.containerId
3998
+ }),
3999
+ providerExecuted: true
4000
+ });
4001
+ }
4002
+ } else if (isResponseCreatedChunk(value)) {
4003
+ responseId = value.response.id;
4004
+ controller.enqueue({
4005
+ type: "response-metadata",
4006
+ id: value.response.id,
4007
+ timestamp: new Date(value.response.created_at * 1e3),
4008
+ modelId: value.response.model
4009
+ });
4010
+ } else if (isTextDeltaChunk(value)) {
4011
+ controller.enqueue({
4012
+ type: "text-delta",
4013
+ id: value.item_id,
4014
+ delta: value.delta
4015
+ });
4016
+ if (((_f = (_e = options.providerOptions) == null ? void 0 : _e.openai) == null ? void 0 : _f.logprobs) && value.logprobs) {
4017
+ logprobs.push(value.logprobs);
4018
+ }
4019
+ } else if (value.type === "response.reasoning_summary_part.added") {
4020
+ if (value.summary_index > 0) {
4021
+ const activeReasoningPart = activeReasoning[value.item_id];
4022
+ activeReasoningPart.summaryParts[value.summary_index] = "active";
4023
+ for (const summaryIndex of Object.keys(
4024
+ activeReasoningPart.summaryParts
4025
+ )) {
4026
+ if (activeReasoningPart.summaryParts[summaryIndex] === "can-conclude") {
4027
+ controller.enqueue({
4028
+ type: "reasoning-end",
4029
+ id: `${value.item_id}:${summaryIndex}`,
4030
+ providerMetadata: { openai: { itemId: value.item_id } }
4031
+ });
4032
+ activeReasoningPart.summaryParts[summaryIndex] = "concluded";
4033
+ }
4034
+ }
4035
+ controller.enqueue({
4036
+ type: "reasoning-start",
4037
+ id: `${value.item_id}:${value.summary_index}`,
4038
+ providerMetadata: {
4039
+ openai: {
4040
+ itemId: value.item_id,
4041
+ reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
4042
+ }
4043
+ }
4044
+ });
4045
+ }
4046
+ } else if (value.type === "response.reasoning_summary_text.delta") {
4047
+ controller.enqueue({
4048
+ type: "reasoning-delta",
4049
+ id: `${value.item_id}:${value.summary_index}`,
4050
+ delta: value.delta,
4051
+ providerMetadata: {
4052
+ openai: {
4053
+ itemId: value.item_id
4054
+ }
4055
+ }
4056
+ });
4057
+ } else if (value.type === "response.reasoning_summary_part.done") {
4058
+ if (store) {
4059
+ controller.enqueue({
4060
+ type: "reasoning-end",
4061
+ id: `${value.item_id}:${value.summary_index}`,
4062
+ providerMetadata: {
4063
+ openai: { itemId: value.item_id }
4064
+ }
4065
+ });
4066
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "concluded";
4067
+ } else {
4068
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
4069
+ }
4070
+ } else if (isResponseFinishedChunk(value)) {
4071
+ finishReason = mapOpenAIResponseFinishReason({
4072
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
4073
+ hasFunctionCall
4074
+ });
4075
+ usage.inputTokens = value.response.usage.input_tokens;
4076
+ usage.outputTokens = value.response.usage.output_tokens;
4077
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
4078
+ usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
4079
+ usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
4080
+ if (typeof value.response.service_tier === "string") {
4081
+ serviceTier = value.response.service_tier;
4082
+ }
4083
+ } else if (isResponseAnnotationAddedChunk(value)) {
4084
+ ongoingAnnotations.push(value.annotation);
4085
+ if (value.annotation.type === "url_citation") {
4086
+ controller.enqueue({
4087
+ type: "source",
4088
+ sourceType: "url",
4089
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : generateId(),
4090
+ url: value.annotation.url,
4091
+ title: value.annotation.title
4092
+ });
4093
+ } else if (value.annotation.type === "file_citation") {
4094
+ controller.enqueue({
4095
+ type: "source",
4096
+ sourceType: "document",
4097
+ id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : generateId(),
4098
+ mediaType: "text/plain",
4099
+ title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
4100
+ filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id,
4101
+ ...value.annotation.file_id ? {
4102
+ providerMetadata: {
4103
+ openai: {
4104
+ fileId: value.annotation.file_id
4105
+ }
4106
+ }
4107
+ } : {}
4108
+ });
4109
+ }
4110
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "message") {
4111
+ controller.enqueue({
4112
+ type: "text-end",
4113
+ id: value.item.id,
4114
+ providerMetadata: {
4115
+ openai: {
4116
+ itemId: value.item.id,
4117
+ ...ongoingAnnotations.length > 0 && {
4118
+ annotations: ongoingAnnotations
4119
+ }
4120
+ }
4121
+ }
4122
+ });
4123
+ } else if (isErrorChunk(value)) {
4124
+ controller.enqueue({ type: "error", error: value });
4125
+ }
4126
+ },
4127
+ flush(controller) {
4128
+ const providerMetadata = {
4129
+ openai: {
4130
+ responseId
4131
+ }
4132
+ };
4133
+ if (logprobs.length > 0) {
4134
+ providerMetadata.openai.logprobs = logprobs;
4135
+ }
4136
+ if (serviceTier !== void 0) {
4137
+ providerMetadata.openai.serviceTier = serviceTier;
4138
+ }
4139
+ controller.enqueue({
4140
+ type: "finish",
4141
+ finishReason,
4142
+ usage,
4143
+ providerMetadata
4144
+ });
4145
+ }
4146
+ })
4147
+ ),
4148
+ request: { body },
4149
+ response: { headers: responseHeaders }
4150
+ };
4151
+ }
4152
+ };
4153
+ function isTextDeltaChunk(chunk) {
4154
+ return chunk.type === "response.output_text.delta";
4155
+ }
4156
+ function isResponseOutputItemDoneChunk(chunk) {
4157
+ return chunk.type === "response.output_item.done";
4158
+ }
4159
+ function isResponseFinishedChunk(chunk) {
4160
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
4161
+ }
4162
+ function isResponseCreatedChunk(chunk) {
4163
+ return chunk.type === "response.created";
4164
+ }
4165
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
4166
+ return chunk.type === "response.function_call_arguments.delta";
4167
+ }
4168
+ function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
4169
+ return chunk.type === "response.code_interpreter_call_code.delta";
4170
+ }
4171
+ function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
4172
+ return chunk.type === "response.code_interpreter_call_code.done";
4173
+ }
4174
+ function isResponseOutputItemAddedChunk(chunk) {
4175
+ return chunk.type === "response.output_item.added";
4176
+ }
4177
+ function isResponseAnnotationAddedChunk(chunk) {
4178
+ return chunk.type === "response.output_text.annotation.added";
4179
+ }
4180
+ function isErrorChunk(chunk) {
4181
+ return chunk.type === "error";
4182
+ }
4183
+ function getResponsesModelConfig(modelId) {
4184
+ const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
4185
+ const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
4186
+ const defaults = {
4187
+ systemMessageMode: "system",
4188
+ supportsFlexProcessing: supportsFlexProcessing2,
4189
+ supportsPriorityProcessing: supportsPriorityProcessing2
4190
+ };
4191
+ if (modelId.startsWith("gpt-5-chat")) {
4192
+ return {
4193
+ ...defaults,
4194
+ isReasoningModel: false
4195
+ };
4196
+ }
4197
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
4198
+ return {
4199
+ ...defaults,
4200
+ isReasoningModel: true,
4201
+ systemMessageMode: "developer"
4202
+ };
4203
+ }
4204
+ return {
4205
+ ...defaults,
4206
+ isReasoningModel: false
4207
+ };
4208
+ }
4209
+ function mapWebSearchOutput(action) {
4210
+ var _a;
4211
+ switch (action.type) {
4212
+ case "search":
4213
+ return {
4214
+ action: { type: "search", query: (_a = action.query) != null ? _a : void 0 },
4215
+ // include sources when provided by the Responses API (behind include flag)
4216
+ ...action.sources != null && { sources: action.sources }
4217
+ };
4218
+ case "open_page":
4219
+ return { action: { type: "openPage", url: action.url } };
4220
+ case "find":
4221
+ return {
4222
+ action: { type: "find", url: action.url, pattern: action.pattern }
4223
+ };
4224
+ }
4225
+ }
4226
+
4227
+ // ../../node_modules/.pnpm/@ai-sdk+azure@2.0.74_zod@3.25.76/node_modules/@ai-sdk/azure/dist/index.mjs
4228
+ var azureOpenaiTools = {
4229
+ codeInterpreter,
4230
+ fileSearch,
4231
+ imageGeneration,
4232
+ webSearchPreview
4233
+ };
4234
+ var VERSION = "2.0.74" ;
4235
+ function createAzure(options = {}) {
4236
+ var _a;
4237
+ const getHeaders = () => {
4238
+ const baseHeaders = {
4239
+ "api-key": loadApiKey({
4240
+ apiKey: options.apiKey,
4241
+ environmentVariableName: "AZURE_API_KEY",
4242
+ description: "Azure OpenAI"
4243
+ }),
4244
+ ...options.headers
4245
+ };
4246
+ return withUserAgentSuffix(baseHeaders, `ai-sdk/azure/${VERSION}`);
4247
+ };
4248
+ const getResourceName = () => loadSetting({
4249
+ settingValue: options.resourceName,
4250
+ settingName: "resourceName",
4251
+ environmentVariableName: "AZURE_RESOURCE_NAME",
4252
+ description: "Azure OpenAI resource name"
4253
+ });
4254
+ const apiVersion = (_a = options.apiVersion) != null ? _a : "v1";
4255
+ const url = ({ path, modelId }) => {
4256
+ var _a2;
4257
+ const baseUrlPrefix = (_a2 = options.baseURL) != null ? _a2 : `https://${getResourceName()}.openai.azure.com/openai`;
4258
+ let fullUrl;
4259
+ if (options.useDeploymentBasedUrls) {
4260
+ fullUrl = new URL(`${baseUrlPrefix}/deployments/${modelId}${path}`);
4261
+ } else {
4262
+ fullUrl = new URL(`${baseUrlPrefix}/v1${path}`);
4263
+ }
4264
+ fullUrl.searchParams.set("api-version", apiVersion);
4265
+ return fullUrl.toString();
4266
+ };
4267
+ const createChatModel = (deploymentName) => new OpenAIChatLanguageModel(deploymentName, {
4268
+ provider: "azure.chat",
4269
+ url,
4270
+ headers: getHeaders,
4271
+ fetch: options.fetch
4272
+ });
4273
+ const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
4274
+ provider: "azure.completion",
4275
+ url,
4276
+ headers: getHeaders,
4277
+ fetch: options.fetch
4278
+ });
4279
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
4280
+ provider: "azure.embeddings",
4281
+ headers: getHeaders,
4282
+ url,
4283
+ fetch: options.fetch
4284
+ });
4285
+ const createResponsesModel = (modelId) => new OpenAIResponsesLanguageModel(modelId, {
4286
+ provider: "azure.responses",
4287
+ url,
4288
+ headers: getHeaders,
4289
+ fetch: options.fetch,
4290
+ fileIdPrefixes: ["assistant-"]
4291
+ });
4292
+ const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
4293
+ provider: "azure.image",
4294
+ url,
4295
+ headers: getHeaders,
4296
+ fetch: options.fetch
4297
+ });
4298
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
4299
+ provider: "azure.transcription",
4300
+ url,
4301
+ headers: getHeaders,
4302
+ fetch: options.fetch
4303
+ });
4304
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
4305
+ provider: "azure.speech",
4306
+ url,
4307
+ headers: getHeaders,
4308
+ fetch: options.fetch
4309
+ });
4310
+ const provider = function(deploymentId) {
4311
+ if (new.target) {
4312
+ throw new Error(
4313
+ "The Azure OpenAI model function cannot be called with the new keyword."
4314
+ );
4315
+ }
4316
+ return createChatModel(deploymentId);
4317
+ };
4318
+ provider.languageModel = createChatModel;
4319
+ provider.chat = createChatModel;
4320
+ provider.completion = createCompletionModel;
4321
+ provider.embedding = createEmbeddingModel;
4322
+ provider.image = createImageModel;
4323
+ provider.imageModel = createImageModel;
4324
+ provider.textEmbedding = createEmbeddingModel;
4325
+ provider.textEmbeddingModel = createEmbeddingModel;
4326
+ provider.responses = createResponsesModel;
4327
+ provider.transcription = createTranscriptionModel;
4328
+ provider.speech = createSpeechModel;
4329
+ provider.tools = azureOpenaiTools;
4330
+ return provider;
4331
+ }
4332
+ createAzure();
4333
+
4334
+ // src/llm/model/gateways/azure.ts
4335
+ var AzureOpenAIGateway = class extends MastraModelGateway {
4336
+ constructor(config) {
4337
+ super();
4338
+ this.config = config;
4339
+ this.validateConfig();
4340
+ }
4341
+ id = "azure-openai";
4342
+ name = "azure-openai";
4343
+ tokenCache = new InMemoryServerCache();
4344
+ validateConfig() {
4345
+ if (!this.config.resourceName) {
4346
+ throw new MastraError({
4347
+ id: "AZURE_GATEWAY_INVALID_CONFIG",
4348
+ domain: "LLM",
4349
+ category: "UNKNOWN",
4350
+ text: "resourceName is required for Azure OpenAI gateway"
4351
+ });
4352
+ }
4353
+ if (!this.config.apiKey) {
4354
+ throw new MastraError({
4355
+ id: "AZURE_GATEWAY_INVALID_CONFIG",
4356
+ domain: "LLM",
4357
+ category: "UNKNOWN",
4358
+ text: "apiKey is required for Azure OpenAI gateway"
4359
+ });
4360
+ }
4361
+ const hasDeployments = this.config.deployments && this.config.deployments.length > 0;
4362
+ const hasManagement = this.config.management !== void 0;
4363
+ if (hasDeployments && hasManagement) {
4364
+ console.warn(
4365
+ "[AzureOpenAIGateway] Both deployments and management credentials provided. Using static deployments list and ignoring management API."
4366
+ );
4367
+ }
4368
+ if (hasManagement) {
4369
+ this.getManagementCredentials(this.config.management);
4370
+ }
4371
+ }
4372
+ async fetchProviders() {
4373
+ if (this.config.deployments && this.config.deployments.length > 0) {
4374
+ return {
4375
+ "azure-openai": {
4376
+ apiKeyEnvVar: [],
4377
+ apiKeyHeader: "api-key",
4378
+ name: "Azure OpenAI",
4379
+ models: this.config.deployments,
4380
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4381
+ gateway: "azure-openai"
4382
+ }
4383
+ };
4384
+ }
4385
+ if (!this.config.management) {
4386
+ return {
4387
+ "azure-openai": {
4388
+ apiKeyEnvVar: [],
4389
+ apiKeyHeader: "api-key",
4390
+ name: "Azure OpenAI",
4391
+ models: [],
4392
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4393
+ gateway: "azure-openai"
4394
+ }
4395
+ };
4396
+ }
4397
+ try {
4398
+ const credentials = this.getManagementCredentials(this.config.management);
4399
+ const token = await this.getAzureADToken({
4400
+ tenantId: credentials.tenantId,
4401
+ clientId: credentials.clientId,
4402
+ clientSecret: credentials.clientSecret
4403
+ });
4404
+ const deployments = await this.fetchDeployments(token, {
4405
+ subscriptionId: credentials.subscriptionId,
4406
+ resourceGroup: credentials.resourceGroup,
4407
+ resourceName: this.config.resourceName
4408
+ });
4409
+ return {
4410
+ "azure-openai": {
4411
+ apiKeyEnvVar: [],
4412
+ apiKeyHeader: "api-key",
4413
+ name: "Azure OpenAI",
4414
+ models: deployments.map((d) => d.name),
4415
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4416
+ gateway: "azure-openai"
4417
+ }
4418
+ };
4419
+ } catch (error) {
4420
+ const errorMsg = error instanceof Error ? error.message : String(error);
4421
+ console.warn(
4422
+ `[AzureOpenAIGateway] Deployment discovery failed: ${errorMsg}`,
4423
+ "\nReturning fallback configuration. Azure OpenAI can still be used by manually specifying deployment names."
4424
+ );
4425
+ return {
4426
+ "azure-openai": {
4427
+ apiKeyEnvVar: [],
4428
+ apiKeyHeader: "api-key",
4429
+ name: "Azure OpenAI",
4430
+ models: [],
4431
+ docUrl: "https://learn.microsoft.com/en-us/azure/ai-services/openai/",
4432
+ gateway: "azure-openai"
4433
+ }
4434
+ };
4435
+ }
4436
+ }
4437
+ getManagementCredentials(management) {
4438
+ const { tenantId, clientId, clientSecret, subscriptionId, resourceGroup } = management;
4439
+ const missing = [];
4440
+ if (!tenantId) missing.push("tenantId");
4441
+ if (!clientId) missing.push("clientId");
4442
+ if (!clientSecret) missing.push("clientSecret");
4443
+ if (!subscriptionId) missing.push("subscriptionId");
4444
+ if (!resourceGroup) missing.push("resourceGroup");
4445
+ if (missing.length > 0) {
4446
+ throw new MastraError({
4447
+ id: "AZURE_MANAGEMENT_CREDENTIALS_MISSING",
4448
+ domain: "LLM",
4449
+ category: "UNKNOWN",
4450
+ text: `Management credentials incomplete. Missing: ${missing.join(", ")}. Required fields: tenantId, clientId, clientSecret, subscriptionId, resourceGroup.`
4451
+ });
4452
+ }
4453
+ return {
4454
+ tenantId,
4455
+ clientId,
4456
+ clientSecret,
4457
+ subscriptionId,
4458
+ resourceGroup
4459
+ };
4460
+ }
4461
+ async getAzureADToken(credentials) {
4462
+ const { tenantId, clientId, clientSecret } = credentials;
4463
+ const cacheKey = `azure-mgmt-token:${tenantId}:${clientId}`;
4464
+ const cached = await this.tokenCache.get(cacheKey);
4465
+ if (cached && cached.expiresAt > Date.now() / 1e3 + 60) {
4466
+ return cached.token;
4467
+ }
4468
+ const tokenEndpoint = `https://login.microsoftonline.com/${tenantId}/oauth2/v2.0/token`;
4469
+ const body = new URLSearchParams({
4470
+ grant_type: "client_credentials",
4471
+ client_id: clientId,
4472
+ client_secret: clientSecret,
4473
+ scope: "https://management.azure.com/.default"
4474
+ });
4475
+ const response = await fetch(tokenEndpoint, {
4476
+ method: "POST",
4477
+ headers: {
4478
+ "Content-Type": "application/x-www-form-urlencoded"
4479
+ },
4480
+ body: body.toString()
4481
+ });
4482
+ if (!response.ok) {
4483
+ const error = await response.text();
4484
+ throw new MastraError({
4485
+ id: "AZURE_AD_TOKEN_ERROR",
4486
+ domain: "LLM",
4487
+ category: "UNKNOWN",
4488
+ text: `Failed to get Azure AD token: ${response.status} ${error}`
4489
+ });
4490
+ }
4491
+ const tokenResponse = await response.json();
4492
+ const expiresAt = Math.floor(Date.now() / 1e3) + tokenResponse.expires_in;
4493
+ await this.tokenCache.set(cacheKey, {
4494
+ token: tokenResponse.access_token,
4495
+ expiresAt
4496
+ });
4497
+ return tokenResponse.access_token;
4498
+ }
4499
+ async fetchDeployments(token, credentials) {
4500
+ const { subscriptionId, resourceGroup, resourceName } = credentials;
4501
+ let url = `https://management.azure.com/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.CognitiveServices/accounts/${resourceName}/deployments?api-version=2024-10-01`;
4502
+ const allDeployments = [];
4503
+ while (url) {
4504
+ const response = await fetch(url, {
4505
+ headers: {
4506
+ Authorization: `Bearer ${token}`,
4507
+ "Content-Type": "application/json"
4508
+ }
4509
+ });
4510
+ if (!response.ok) {
4511
+ const error = await response.text();
4512
+ throw new MastraError({
4513
+ id: "AZURE_DEPLOYMENTS_FETCH_ERROR",
4514
+ domain: "LLM",
4515
+ category: "UNKNOWN",
4516
+ text: `Failed to fetch Azure deployments: ${response.status} ${error}`
4517
+ });
4518
+ }
4519
+ const data = await response.json();
4520
+ allDeployments.push(...data.value);
4521
+ url = data.nextLink;
4522
+ }
4523
+ const successfulDeployments = allDeployments.filter((d) => d.properties.provisioningState === "Succeeded");
4524
+ return successfulDeployments;
4525
+ }
4526
+ buildUrl(_routerId, _envVars) {
4527
+ return void 0;
4528
+ }
4529
+ async getApiKey(_modelId) {
4530
+ return this.config.apiKey;
4531
+ }
4532
+ async resolveLanguageModel({
4533
+ modelId,
4534
+ apiKey
4535
+ }) {
4536
+ const apiVersion = this.config.apiVersion || "2024-04-01-preview";
4537
+ return createAzure({
4538
+ resourceName: this.config.resourceName,
4539
+ apiKey,
4540
+ apiVersion,
4541
+ useDeploymentBasedUrls: true
4542
+ })(modelId);
4543
+ }
4544
+ };
4545
+
4546
+ // src/llm/model/gateways/index.ts
4547
+ function findGatewayForModel(gatewayId, gateways) {
4548
+ const prefixedGateway = gateways.find(
4549
+ (g) => g.id !== "models.dev" && (g.id === gatewayId || gatewayId.startsWith(`${g.id}/`))
4550
+ );
4551
+ if (prefixedGateway) {
4552
+ return prefixedGateway;
4553
+ }
4554
+ const modelsDevGateway = gateways.find((g) => g.id === "models.dev");
4555
+ if (modelsDevGateway) {
4556
+ return modelsDevGateway;
4557
+ }
4558
+ throw new MastraError({
4559
+ id: "MODEL_ROUTER_NO_GATEWAY_FOUND",
4560
+ category: "USER",
4561
+ domain: "MODEL_ROUTER",
4562
+ text: `No Mastra model router gateway found for model id ${gatewayId}`
4563
+ });
4564
+ }
4565
+
4566
+ // src/llm/model/router.ts
4567
+ function getStaticProvidersByGateway(name) {
4568
+ return Object.fromEntries(Object.entries(PROVIDER_REGISTRY).filter(([_provider, config]) => config.gateway === name));
4569
+ }
4570
+ var defaultGateways = [new NetlifyGateway(), new ModelsDevGateway(getStaticProvidersByGateway(`models.dev`))];
4571
+ var ModelRouterLanguageModel = class _ModelRouterLanguageModel {
4572
+ specificationVersion = "v2";
4573
+ defaultObjectGenerationMode = "json";
4574
+ supportsStructuredOutputs = true;
4575
+ supportsImageUrls = true;
4576
+ supportedUrls = {};
4577
+ modelId;
4578
+ provider;
4579
+ config;
4580
+ gateway;
4581
+ constructor(config, customGateways) {
4582
+ let normalizedConfig;
4583
+ if (typeof config === "string") {
4584
+ normalizedConfig = { id: config };
4585
+ } else if ("providerId" in config && "modelId" in config) {
4586
+ normalizedConfig = {
4587
+ id: `${config.providerId}/${config.modelId}`,
4588
+ url: config.url,
4589
+ apiKey: config.apiKey,
4590
+ headers: config.headers
4591
+ };
4592
+ } else {
4593
+ normalizedConfig = {
4594
+ id: config.id,
4595
+ url: config.url,
4596
+ apiKey: config.apiKey,
4597
+ headers: config.headers
4598
+ };
4599
+ }
4600
+ const parsedConfig = {
4601
+ ...normalizedConfig,
4602
+ routerId: normalizedConfig.id
4603
+ };
4604
+ this.gateway = findGatewayForModel(normalizedConfig.id, [...customGateways || [], ...defaultGateways]);
4605
+ const gatewayPrefix = this.gateway.id === "models.dev" ? void 0 : this.gateway.id;
4606
+ const parsed = parseModelRouterId(normalizedConfig.id, gatewayPrefix);
4607
+ this.provider = parsed.providerId || "openai-compatible";
4608
+ if (parsed.providerId && parsed.modelId !== normalizedConfig.id) {
4609
+ parsedConfig.id = parsed.modelId;
4610
+ }
4611
+ this.modelId = parsedConfig.id;
4612
+ this.config = parsedConfig;
4613
+ }
4614
+ async doGenerate(options) {
4615
+ let apiKey;
4616
+ try {
4617
+ if (this.config.url) {
4618
+ apiKey = this.config.apiKey || "";
4619
+ } else {
4620
+ apiKey = this.config.apiKey || await this.gateway.getApiKey(this.config.routerId);
4621
+ }
4622
+ } catch (error) {
4623
+ return {
4624
+ stream: new ReadableStream({
4625
+ start(controller) {
4626
+ controller.enqueue({
4627
+ type: "error",
4628
+ error
4629
+ });
4630
+ controller.close();
4631
+ }
4632
+ })
4633
+ };
4634
+ }
4635
+ const gatewayPrefix = this.gateway.id === "models.dev" ? void 0 : this.gateway.id;
4636
+ const model = await this.resolveLanguageModel({
4637
+ apiKey,
4638
+ headers: this.config.headers,
4639
+ ...parseModelRouterId(this.config.routerId, gatewayPrefix)
4640
+ });
4641
+ const aiSDKV5Model = new AISDKV5LanguageModel(model);
4642
+ return aiSDKV5Model.doGenerate(options);
4643
+ }
4644
+ async doStream(options) {
4645
+ let apiKey;
4646
+ try {
4647
+ if (this.config.url) {
4648
+ apiKey = this.config.apiKey || "";
4649
+ } else {
4650
+ apiKey = this.config.apiKey || await this.gateway.getApiKey(this.config.routerId);
4651
+ }
4652
+ } catch (error) {
4653
+ return {
4654
+ stream: new ReadableStream({
4655
+ start(controller) {
4656
+ controller.enqueue({
4657
+ type: "error",
4658
+ error
4659
+ });
4660
+ controller.close();
4661
+ }
4662
+ })
4663
+ };
4664
+ }
4665
+ const gatewayPrefix = this.gateway.id === "models.dev" ? void 0 : this.gateway.id;
4666
+ const model = await this.resolveLanguageModel({
4667
+ apiKey,
4668
+ headers: this.config.headers,
4669
+ ...parseModelRouterId(this.config.routerId, gatewayPrefix)
4670
+ });
4671
+ const aiSDKV5Model = new AISDKV5LanguageModel(model);
4672
+ return aiSDKV5Model.doStream(options);
4673
+ }
4674
+ async resolveLanguageModel({
4675
+ modelId,
4676
+ providerId,
4677
+ apiKey,
4678
+ headers
4679
+ }) {
4680
+ const key = createHash("sha256").update(
4681
+ this.gateway.id + modelId + providerId + apiKey + (this.config.url || "") + (headers ? JSON.stringify(headers) : "")
4682
+ ).digest("hex");
4683
+ if (_ModelRouterLanguageModel.modelInstances.has(key)) return _ModelRouterLanguageModel.modelInstances.get(key);
4684
+ if (this.config.url) {
4685
+ const modelInstance2 = createOpenAICompatible({
4686
+ name: providerId,
4687
+ apiKey,
4688
+ baseURL: this.config.url,
4689
+ headers: this.config.headers,
4690
+ supportsStructuredOutputs: true
4691
+ }).chatModel(modelId);
4692
+ _ModelRouterLanguageModel.modelInstances.set(key, modelInstance2);
4693
+ return modelInstance2;
4694
+ }
4695
+ const modelInstance = await this.gateway.resolveLanguageModel({ modelId, providerId, apiKey, headers });
4696
+ _ModelRouterLanguageModel.modelInstances.set(key, modelInstance);
4697
+ return modelInstance;
4698
+ }
4699
+ static modelInstances = /* @__PURE__ */ new Map();
4700
+ };
4701
+
4702
+ // src/llm/model/aisdk/v6/model.ts
4703
+ var AISDKV6LanguageModel = class {
4704
+ /**
4705
+ * The language model must specify which language model interface version it implements.
4706
+ */
4707
+ specificationVersion = "v3";
4708
+ /**
4709
+ * Name of the provider for logging purposes.
4710
+ */
4711
+ provider;
4712
+ /**
4713
+ * Provider-specific model ID for logging purposes.
4714
+ */
4715
+ modelId;
4716
+ /**
4717
+ * Supported URL patterns by media type for the provider.
4718
+ *
4719
+ * The keys are media type patterns or full media types (e.g. `*\/*` for everything, `audio/*`, `video/*`, or `application/pdf`).
4720
+ * and the values are arrays of regular expressions that match the URL paths.
4721
+ * The matching should be against lower-case URLs.
4722
+ * Matched URLs are supported natively by the model and are not downloaded.
4723
+ * @returns A map of supported URL patterns by media type (as a promise or a plain object).
4724
+ */
4725
+ supportedUrls;
4726
+ #model;
4727
+ constructor(config) {
4728
+ this.#model = config;
4729
+ this.provider = this.#model.provider;
4730
+ this.modelId = this.#model.modelId;
4731
+ this.supportedUrls = this.#model.supportedUrls;
4732
+ }
4733
+ async doGenerate(options) {
4734
+ const result = await this.#model.doGenerate(options);
4735
+ return {
4736
+ request: result.request,
4737
+ response: result.response,
4738
+ stream: createStreamFromGenerateResult(result)
4739
+ };
4740
+ }
4741
+ async doStream(options) {
4742
+ return await this.#model.doStream(options);
4743
+ }
4744
+ };
4745
+
4746
+ // src/llm/model/resolve-model.ts
4747
+ function isOpenAICompatibleObjectConfig(modelConfig) {
4748
+ if (typeof modelConfig === "object" && "specificationVersion" in modelConfig) return false;
4749
+ if (typeof modelConfig === "object" && !("model" in modelConfig)) {
4750
+ if ("id" in modelConfig) return true;
4751
+ if ("providerId" in modelConfig && "modelId" in modelConfig) return true;
4752
+ }
4753
+ return false;
4754
+ }
4755
+ async function resolveModelConfig(modelConfig, requestContext = new RequestContext(), mastra) {
4756
+ if (typeof modelConfig === "function") {
4757
+ modelConfig = await modelConfig({ requestContext, mastra });
4758
+ }
4759
+ if (modelConfig instanceof ModelRouterLanguageModel || modelConfig instanceof AISDKV5LanguageModel || modelConfig instanceof AISDKV6LanguageModel) {
4760
+ return modelConfig;
4761
+ }
4762
+ if (typeof modelConfig === "object" && "specificationVersion" in modelConfig) {
4763
+ if (modelConfig.specificationVersion === "v2") {
4764
+ return new AISDKV5LanguageModel(modelConfig);
4765
+ }
4766
+ if (modelConfig.specificationVersion === "v3") {
4767
+ return new AISDKV6LanguageModel(modelConfig);
4768
+ }
4769
+ return modelConfig;
4770
+ }
4771
+ const gatewayRecord = mastra?.listGateways();
4772
+ const customGateways = gatewayRecord ? Object.values(gatewayRecord) : void 0;
4773
+ if (typeof modelConfig === "string" || isOpenAICompatibleObjectConfig(modelConfig)) {
4774
+ return new ModelRouterLanguageModel(modelConfig, customGateways);
4775
+ }
4776
+ throw new Error("Invalid model configuration provided");
4777
+ }
4778
+
4779
+ // src/llm/model/embedding-router.ts
4780
+ var ModelRouterEmbeddingModel = class {
4781
+ specificationVersion = "v2";
4782
+ modelId;
4783
+ provider;
4784
+ maxEmbeddingsPerCall = 2048;
4785
+ supportsParallelCalls = true;
4786
+ providerModel;
4787
+ constructor(config) {
4788
+ let normalizedConfig;
4789
+ if (typeof config === "string") {
4790
+ const parts = config.split("/");
4791
+ if (parts.length !== 2) {
4792
+ throw new Error(`Invalid model string format: "${config}". Expected format: "provider/model"`);
4793
+ }
4794
+ const [providerId, modelId] = parts;
4795
+ normalizedConfig = { providerId, modelId };
4796
+ } else if ("providerId" in config && "modelId" in config) {
4797
+ normalizedConfig = {
4798
+ providerId: config.providerId,
4799
+ modelId: config.modelId,
4800
+ url: config.url,
4801
+ apiKey: config.apiKey,
4802
+ headers: config.headers
4803
+ };
4804
+ } else {
4805
+ const parts = config.id.split("/");
4806
+ if (parts.length !== 2) {
4807
+ throw new Error(`Invalid model string format: "${config.id}". Expected format: "provider/model"`);
4808
+ }
4809
+ const [providerId, modelId] = parts;
4810
+ normalizedConfig = {
4811
+ providerId,
4812
+ modelId,
4813
+ url: config.url,
4814
+ apiKey: config.apiKey,
4815
+ headers: config.headers
4816
+ };
4817
+ }
4818
+ this.provider = normalizedConfig.providerId;
4819
+ this.modelId = normalizedConfig.modelId;
4820
+ if (normalizedConfig.url) {
4821
+ const apiKey = normalizedConfig.apiKey || "";
4822
+ this.providerModel = createOpenAICompatible({
4823
+ name: normalizedConfig.providerId,
4824
+ apiKey,
4825
+ baseURL: normalizedConfig.url,
4826
+ headers: normalizedConfig.headers
4827
+ }).textEmbeddingModel(normalizedConfig.modelId);
4828
+ } else {
4829
+ const registry = GatewayRegistry.getInstance();
4830
+ const providerConfig = registry.getProviderConfig(normalizedConfig.providerId);
4831
+ if (!providerConfig) {
4832
+ throw new Error(`Unknown provider: ${normalizedConfig.providerId}`);
4833
+ }
4834
+ let apiKey = normalizedConfig.apiKey;
4835
+ if (!apiKey) {
4836
+ const apiKeyEnvVar = providerConfig.apiKeyEnvVar;
4837
+ if (Array.isArray(apiKeyEnvVar)) {
4838
+ for (const envVar of apiKeyEnvVar) {
4839
+ apiKey = process.env[envVar];
4840
+ if (apiKey) break;
4841
+ }
4842
+ } else {
4843
+ apiKey = process.env[apiKeyEnvVar];
4844
+ }
4845
+ }
4846
+ if (!apiKey) {
4847
+ const envVarDisplay = Array.isArray(providerConfig.apiKeyEnvVar) ? providerConfig.apiKeyEnvVar.join(" or ") : providerConfig.apiKeyEnvVar;
4848
+ throw new Error(`API key not found for provider ${normalizedConfig.providerId}. Set ${envVarDisplay}`);
4849
+ }
4850
+ if (normalizedConfig.providerId === "openai") {
4851
+ this.providerModel = createOpenAI({ apiKey }).textEmbeddingModel(
4852
+ normalizedConfig.modelId
4853
+ );
4854
+ } else if (normalizedConfig.providerId === "google") {
4855
+ this.providerModel = createGoogleGenerativeAI({ apiKey }).textEmbedding(
4856
+ normalizedConfig.modelId
4857
+ );
4858
+ } else {
4859
+ if (!providerConfig.url) {
4860
+ throw new Error(`Provider ${normalizedConfig.providerId} does not have a URL configured`);
4861
+ }
4862
+ this.providerModel = createOpenAICompatible({
4863
+ name: normalizedConfig.providerId,
4864
+ apiKey,
4865
+ baseURL: providerConfig.url
4866
+ }).textEmbeddingModel(normalizedConfig.modelId);
4867
+ }
4868
+ }
4869
+ if (this.providerModel.maxEmbeddingsPerCall !== void 0) {
4870
+ this.maxEmbeddingsPerCall = this.providerModel.maxEmbeddingsPerCall;
4871
+ }
4872
+ if (this.providerModel.supportsParallelCalls !== void 0) {
4873
+ this.supportsParallelCalls = this.providerModel.supportsParallelCalls;
4874
+ }
4875
+ }
4876
+ async doEmbed(args) {
4877
+ return this.providerModel.doEmbed(args);
4878
+ }
4879
+ };
4880
+
4881
+ export { AzureOpenAIGateway, ModelRouterEmbeddingModel, ModelRouterLanguageModel, resolveModelConfig };
4882
+ //# sourceMappingURL=chunk-QDVYP2T7.js.map
4883
+ //# sourceMappingURL=chunk-QDVYP2T7.js.map