@mastra/core 1.0.0-beta.2 → 1.0.0-beta.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (313) hide show
  1. package/CHANGELOG.md +206 -0
  2. package/dist/agent/agent-legacy.d.ts +2 -2
  3. package/dist/agent/agent-legacy.d.ts.map +1 -1
  4. package/dist/agent/agent.d.ts +1 -1
  5. package/dist/agent/agent.d.ts.map +1 -1
  6. package/dist/agent/agent.types.d.ts +3 -2
  7. package/dist/agent/agent.types.d.ts.map +1 -1
  8. package/dist/agent/index.cjs +9 -9
  9. package/dist/agent/index.js +2 -2
  10. package/dist/agent/message-list/index.cjs +3 -3
  11. package/dist/agent/message-list/index.d.ts +5 -0
  12. package/dist/agent/message-list/index.d.ts.map +1 -1
  13. package/dist/agent/message-list/index.js +1 -1
  14. package/dist/agent/message-list/prompt/convert-file.d.ts +1 -1
  15. package/dist/agent/message-list/prompt/convert-file.d.ts.map +1 -1
  16. package/dist/agent/message-list/prompt/download-assets.d.ts.map +1 -1
  17. package/dist/agent/types.d.ts +1 -0
  18. package/dist/agent/types.d.ts.map +1 -1
  19. package/dist/agent/utils.d.ts.map +1 -1
  20. package/dist/agent/workflows/prepare-stream/index.d.ts +2 -1
  21. package/dist/agent/workflows/prepare-stream/index.d.ts.map +1 -1
  22. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts +3 -1
  23. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts.map +1 -1
  24. package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts +2 -1
  25. package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts.map +1 -1
  26. package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts +2 -1
  27. package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts.map +1 -1
  28. package/dist/agent/workflows/prepare-stream/stream-step.d.ts +3 -1
  29. package/dist/agent/workflows/prepare-stream/stream-step.d.ts.map +1 -1
  30. package/dist/{chunk-YCVEJ3UN.cjs → chunk-2NVBZKZI.cjs} +988 -233
  31. package/dist/chunk-2NVBZKZI.cjs.map +1 -0
  32. package/dist/{chunk-SNPVZPLB.js → chunk-2OTDXX73.js} +6 -5
  33. package/dist/chunk-2OTDXX73.js.map +1 -0
  34. package/dist/chunk-3PSWNGBF.js +3 -0
  35. package/dist/{chunk-ZV5CC35D.js.map → chunk-3PSWNGBF.js.map} +1 -1
  36. package/dist/{chunk-ZGHTOYHW.js → chunk-3RW5EMSB.js} +155 -24
  37. package/dist/chunk-3RW5EMSB.js.map +1 -0
  38. package/dist/chunk-3W5RQCCY.cjs +440 -0
  39. package/dist/chunk-3W5RQCCY.cjs.map +1 -0
  40. package/dist/{chunk-ET6UOTTU.cjs → chunk-4IKJAKCD.cjs} +40 -4
  41. package/dist/chunk-4IKJAKCD.cjs.map +1 -0
  42. package/dist/{chunk-2ZVKF4HP.cjs → chunk-4RXG622P.cjs} +184 -285
  43. package/dist/chunk-4RXG622P.cjs.map +1 -0
  44. package/dist/{chunk-22443P6A.cjs → chunk-5WXEYDFI.cjs} +173 -42
  45. package/dist/chunk-5WXEYDFI.cjs.map +1 -0
  46. package/dist/{chunk-ZWNI5IWX.cjs → chunk-CYVNOIXS.cjs} +13 -12
  47. package/dist/chunk-CYVNOIXS.cjs.map +1 -0
  48. package/dist/{chunk-4CDL2QJT.js → chunk-D6EDHNGV.js} +53 -16
  49. package/dist/chunk-D6EDHNGV.js.map +1 -0
  50. package/dist/chunk-FVNT7VTO.js +436 -0
  51. package/dist/chunk-FVNT7VTO.js.map +1 -0
  52. package/dist/{chunk-WM6CK2F3.cjs → chunk-HBJPYQRN.cjs} +57 -19
  53. package/dist/chunk-HBJPYQRN.cjs.map +1 -0
  54. package/dist/{chunk-HDJFSJCK.js → chunk-IHJDOC3A.js} +35 -587
  55. package/dist/chunk-IHJDOC3A.js.map +1 -0
  56. package/dist/{chunk-QUKUN6NR.cjs → chunk-ISMGVGUM.cjs} +105 -5
  57. package/dist/chunk-ISMGVGUM.cjs.map +1 -0
  58. package/dist/{chunk-JYYQQEBH.cjs → chunk-IWB65P37.cjs} +241 -4
  59. package/dist/chunk-IWB65P37.cjs.map +1 -0
  60. package/dist/{chunk-MV7KHWUT.js → chunk-IWQDBVJK.js} +25 -4
  61. package/dist/chunk-IWQDBVJK.js.map +1 -0
  62. package/dist/{chunk-XEVG546F.js → chunk-JXESKY4A.js} +3 -3
  63. package/dist/{chunk-XEVG546F.js.map → chunk-JXESKY4A.js.map} +1 -1
  64. package/dist/{chunk-7AHYOMHJ.js → chunk-KEURQGCQ.js} +40 -5
  65. package/dist/chunk-KEURQGCQ.js.map +1 -0
  66. package/dist/{chunk-JPGVRWWL.js → chunk-MDKPL2R2.js} +470 -124
  67. package/dist/chunk-MDKPL2R2.js.map +1 -0
  68. package/dist/{chunk-UIZSWUKP.js → chunk-NZAXAFI3.js} +104 -6
  69. package/dist/chunk-NZAXAFI3.js.map +1 -0
  70. package/dist/{chunk-VOY2RXOC.cjs → chunk-O6NA3Z43.cjs} +6 -6
  71. package/dist/{chunk-VOY2RXOC.cjs.map → chunk-O6NA3Z43.cjs.map} +1 -1
  72. package/dist/chunk-PE3V7GUL.cjs +4 -0
  73. package/dist/{chunk-LJFJTTZQ.cjs.map → chunk-PE3V7GUL.cjs.map} +1 -1
  74. package/dist/{chunk-I4CXL4SR.js → chunk-RXDJL5QT.js} +5 -4
  75. package/dist/chunk-RXDJL5QT.js.map +1 -0
  76. package/dist/{chunk-LWBQ4P4N.cjs → chunk-S6OEQHEI.cjs} +62 -614
  77. package/dist/chunk-S6OEQHEI.cjs.map +1 -0
  78. package/dist/{chunk-7PO6SEJF.js → chunk-U7VECK2G.js} +240 -3
  79. package/dist/chunk-U7VECK2G.js.map +1 -0
  80. package/dist/{chunk-ECFXGXWO.cjs → chunk-VSM3NLUX.cjs} +312 -210
  81. package/dist/chunk-VSM3NLUX.cjs.map +1 -0
  82. package/dist/{chunk-IQO7ANVS.cjs → chunk-VZC4BWWH.cjs} +10 -9
  83. package/dist/chunk-VZC4BWWH.cjs.map +1 -0
  84. package/dist/{chunk-JV2KH24V.js → chunk-W3DD3XP5.js} +312 -210
  85. package/dist/chunk-W3DD3XP5.js.map +1 -0
  86. package/dist/{chunk-GGYKYORQ.cjs → chunk-WQSGX6XA.cjs} +27 -6
  87. package/dist/chunk-WQSGX6XA.cjs.map +1 -0
  88. package/dist/{chunk-W7UH2PWL.js → chunk-WTYNK7Q4.js} +179 -282
  89. package/dist/chunk-WTYNK7Q4.js.map +1 -0
  90. package/dist/{chunk-7CBEP2ZQ.js → chunk-XXBWX7DT.js} +949 -197
  91. package/dist/chunk-XXBWX7DT.js.map +1 -0
  92. package/dist/{chunk-CB575O6L.cjs → chunk-ZCVTH3CH.cjs} +470 -130
  93. package/dist/chunk-ZCVTH3CH.cjs.map +1 -0
  94. package/dist/evals/base.d.ts.map +1 -1
  95. package/dist/evals/index.cjs +4 -4
  96. package/dist/evals/index.js +1 -1
  97. package/dist/evals/scoreTraces/index.cjs +5 -4
  98. package/dist/evals/scoreTraces/index.cjs.map +1 -1
  99. package/dist/evals/scoreTraces/index.js +3 -2
  100. package/dist/evals/scoreTraces/index.js.map +1 -1
  101. package/dist/evals/scoreTraces/scoreTracesWorkflow.d.ts.map +1 -1
  102. package/dist/index.cjs +2 -2
  103. package/dist/index.js +1 -1
  104. package/dist/integration/index.cjs +2 -2
  105. package/dist/integration/index.js +1 -1
  106. package/dist/llm/index.cjs +26 -10
  107. package/dist/llm/index.d.ts +1 -0
  108. package/dist/llm/index.d.ts.map +1 -1
  109. package/dist/llm/index.js +5 -1
  110. package/dist/llm/model/aisdk/v5/model.d.ts +47 -0
  111. package/dist/llm/model/aisdk/v5/model.d.ts.map +1 -0
  112. package/dist/llm/model/gateways/base.d.ts +8 -0
  113. package/dist/llm/model/gateways/base.d.ts.map +1 -1
  114. package/dist/llm/model/gateways/models-dev.d.ts +1 -0
  115. package/dist/llm/model/gateways/models-dev.d.ts.map +1 -1
  116. package/dist/llm/model/gateways/netlify.d.ts +2 -1
  117. package/dist/llm/model/gateways/netlify.d.ts.map +1 -1
  118. package/dist/llm/model/is-v2-model.d.ts +3 -0
  119. package/dist/llm/model/is-v2-model.d.ts.map +1 -0
  120. package/dist/llm/model/model-method-from-agent.d.ts +4 -0
  121. package/dist/llm/model/model-method-from-agent.d.ts.map +1 -0
  122. package/dist/llm/model/model.loop.d.ts +2 -2
  123. package/dist/llm/model/model.loop.d.ts.map +1 -1
  124. package/dist/llm/model/model.loop.types.d.ts +2 -0
  125. package/dist/llm/model/model.loop.types.d.ts.map +1 -1
  126. package/dist/llm/model/provider-registry.d.ts +11 -1
  127. package/dist/llm/model/provider-registry.d.ts.map +1 -1
  128. package/dist/llm/model/provider-types.generated.d.ts +56 -7
  129. package/dist/llm/model/registry-generator.d.ts.map +1 -1
  130. package/dist/llm/model/resolve-model.d.ts +1 -1
  131. package/dist/llm/model/resolve-model.d.ts.map +1 -1
  132. package/dist/llm/model/router.d.ts +12 -5
  133. package/dist/llm/model/router.d.ts.map +1 -1
  134. package/dist/llm/model/shared.types.d.ts +10 -4
  135. package/dist/llm/model/shared.types.d.ts.map +1 -1
  136. package/dist/loop/index.cjs +2 -2
  137. package/dist/loop/index.js +1 -1
  138. package/dist/loop/network/index.d.ts.map +1 -1
  139. package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts +37 -0
  140. package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts.map +1 -0
  141. package/dist/loop/test-utils/fullStream.d.ts.map +1 -1
  142. package/dist/loop/test-utils/generateText.d.ts.map +1 -1
  143. package/dist/loop/test-utils/options.d.ts.map +1 -1
  144. package/dist/loop/test-utils/resultObject.d.ts.map +1 -1
  145. package/dist/loop/test-utils/streamObject.d.ts.map +1 -1
  146. package/dist/loop/test-utils/textStream.d.ts.map +1 -1
  147. package/dist/loop/test-utils/toUIMessageStream.d.ts.map +1 -1
  148. package/dist/loop/test-utils/tools.d.ts.map +1 -1
  149. package/dist/loop/test-utils/utils.d.ts +1 -1
  150. package/dist/loop/test-utils/utils.d.ts.map +1 -1
  151. package/dist/loop/types.d.ts +8 -3
  152. package/dist/loop/types.d.ts.map +1 -1
  153. package/dist/loop/workflows/agentic-execution/index.d.ts +12 -12
  154. package/dist/loop/workflows/agentic-execution/index.d.ts.map +1 -1
  155. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts +9 -9
  156. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts.map +1 -1
  157. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts +4 -4
  158. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts.map +1 -1
  159. package/dist/loop/workflows/agentic-loop/index.d.ts +13 -12
  160. package/dist/loop/workflows/agentic-loop/index.d.ts.map +1 -1
  161. package/dist/loop/workflows/run-state.d.ts +2 -2
  162. package/dist/loop/workflows/run-state.d.ts.map +1 -1
  163. package/dist/loop/workflows/schema.d.ts +4 -4
  164. package/dist/loop/workflows/stream.d.ts.map +1 -1
  165. package/dist/mastra/index.cjs +2 -2
  166. package/dist/mastra/index.d.ts +127 -3
  167. package/dist/mastra/index.d.ts.map +1 -1
  168. package/dist/mastra/index.js +1 -1
  169. package/dist/memory/index.cjs +8 -8
  170. package/dist/memory/index.js +4 -4
  171. package/dist/models-dev-6PRLJKVZ.js +3 -0
  172. package/dist/{models-dev-DNBKXHT4.js.map → models-dev-6PRLJKVZ.js.map} +1 -1
  173. package/dist/models-dev-WHMI5G6Y.cjs +12 -0
  174. package/dist/{models-dev-YBEEQIX6.cjs.map → models-dev-WHMI5G6Y.cjs.map} +1 -1
  175. package/dist/netlify-46I3SNNV.cjs +12 -0
  176. package/dist/{netlify-GWNGSIRZ.cjs.map → netlify-46I3SNNV.cjs.map} +1 -1
  177. package/dist/netlify-EBQ6YUC6.js +3 -0
  178. package/dist/{netlify-7G2L5VSH.js.map → netlify-EBQ6YUC6.js.map} +1 -1
  179. package/dist/processors/index.cjs +11 -11
  180. package/dist/processors/index.js +1 -1
  181. package/dist/provider-registry-HDG6UMUC.js +3 -0
  182. package/dist/provider-registry-HDG6UMUC.js.map +1 -0
  183. package/dist/provider-registry-RP2W4B24.cjs +40 -0
  184. package/dist/provider-registry-RP2W4B24.cjs.map +1 -0
  185. package/dist/provider-registry.json +140 -18
  186. package/dist/{registry-generator-MK63POJO.cjs → registry-generator-JPCV47SC.cjs} +6 -4
  187. package/dist/registry-generator-JPCV47SC.cjs.map +1 -0
  188. package/dist/{registry-generator-H4YNODDH.js → registry-generator-XD4FPZTU.js} +6 -4
  189. package/dist/registry-generator-XD4FPZTU.js.map +1 -0
  190. package/dist/relevance/index.cjs +2 -2
  191. package/dist/relevance/index.js +1 -1
  192. package/dist/server/auth.d.ts +11 -0
  193. package/dist/server/auth.d.ts.map +1 -1
  194. package/dist/server/index.cjs +12 -1
  195. package/dist/server/index.cjs.map +1 -1
  196. package/dist/server/index.d.ts +4 -0
  197. package/dist/server/index.d.ts.map +1 -1
  198. package/dist/server/index.js +12 -1
  199. package/dist/server/index.js.map +1 -1
  200. package/dist/storage/domains/workflows/inmemory.d.ts +1 -1
  201. package/dist/storage/domains/workflows/inmemory.d.ts.map +1 -1
  202. package/dist/storage/index.cjs +29 -29
  203. package/dist/storage/index.js +1 -1
  204. package/dist/storage/types.d.ts +2 -1
  205. package/dist/storage/types.d.ts.map +1 -1
  206. package/dist/stream/RunOutput.d.ts +1 -1
  207. package/dist/stream/RunOutput.d.ts.map +1 -1
  208. package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts.map +1 -1
  209. package/dist/stream/aisdk/v5/execute.d.ts +6 -3
  210. package/dist/stream/aisdk/v5/execute.d.ts.map +1 -1
  211. package/dist/stream/aisdk/v5/output.d.ts.map +1 -1
  212. package/dist/stream/base/input.d.ts +1 -1
  213. package/dist/stream/base/output.d.ts.map +1 -1
  214. package/dist/stream/index.cjs +11 -11
  215. package/dist/stream/index.js +2 -2
  216. package/dist/stream/types.d.ts +4 -3
  217. package/dist/stream/types.d.ts.map +1 -1
  218. package/dist/test-utils/llm-mock.cjs +68 -31
  219. package/dist/test-utils/llm-mock.cjs.map +1 -1
  220. package/dist/test-utils/llm-mock.d.ts +4 -2
  221. package/dist/test-utils/llm-mock.d.ts.map +1 -1
  222. package/dist/test-utils/llm-mock.js +67 -30
  223. package/dist/test-utils/llm-mock.js.map +1 -1
  224. package/dist/tools/index.cjs +4 -4
  225. package/dist/tools/index.js +1 -1
  226. package/dist/tools/is-vercel-tool.cjs +2 -2
  227. package/dist/tools/is-vercel-tool.js +1 -1
  228. package/dist/tools/tool-builder/builder.d.ts.map +1 -1
  229. package/dist/tools/tool.d.ts.map +1 -1
  230. package/dist/tools/types.d.ts +1 -0
  231. package/dist/tools/types.d.ts.map +1 -1
  232. package/dist/tools/validation.d.ts +12 -0
  233. package/dist/tools/validation.d.ts.map +1 -1
  234. package/dist/utils.cjs +25 -21
  235. package/dist/utils.d.ts +4 -1
  236. package/dist/utils.d.ts.map +1 -1
  237. package/dist/utils.js +1 -1
  238. package/dist/voice/aisdk/index.d.ts +3 -0
  239. package/dist/voice/aisdk/index.d.ts.map +1 -0
  240. package/dist/voice/aisdk/speech.d.ts +23 -0
  241. package/dist/voice/aisdk/speech.d.ts.map +1 -0
  242. package/dist/voice/aisdk/transcription.d.ts +22 -0
  243. package/dist/voice/aisdk/transcription.d.ts.map +1 -0
  244. package/dist/voice/composite-voice.d.ts +4 -3
  245. package/dist/voice/composite-voice.d.ts.map +1 -1
  246. package/dist/voice/index.cjs +12 -4
  247. package/dist/voice/index.d.ts +1 -0
  248. package/dist/voice/index.d.ts.map +1 -1
  249. package/dist/voice/index.js +1 -1
  250. package/dist/workflows/default.d.ts +24 -8
  251. package/dist/workflows/default.d.ts.map +1 -1
  252. package/dist/workflows/evented/execution-engine.d.ts +3 -1
  253. package/dist/workflows/evented/execution-engine.d.ts.map +1 -1
  254. package/dist/workflows/evented/index.cjs +10 -10
  255. package/dist/workflows/evented/index.js +1 -1
  256. package/dist/workflows/evented/workflow-event-processor/index.d.ts +5 -4
  257. package/dist/workflows/evented/workflow-event-processor/index.d.ts.map +1 -1
  258. package/dist/workflows/evented/workflow-event-processor/loop.d.ts +1 -1
  259. package/dist/workflows/evented/workflow-event-processor/loop.d.ts.map +1 -1
  260. package/dist/workflows/evented/workflow-event-processor/parallel.d.ts +2 -2
  261. package/dist/workflows/evented/workflow-event-processor/parallel.d.ts.map +1 -1
  262. package/dist/workflows/evented/workflow-event-processor/sleep.d.ts +2 -2
  263. package/dist/workflows/evented/workflow-event-processor/sleep.d.ts.map +1 -1
  264. package/dist/workflows/evented/workflow.d.ts +2 -1
  265. package/dist/workflows/evented/workflow.d.ts.map +1 -1
  266. package/dist/workflows/execution-engine.d.ts +4 -2
  267. package/dist/workflows/execution-engine.d.ts.map +1 -1
  268. package/dist/workflows/index.cjs +28 -16
  269. package/dist/workflows/index.js +1 -1
  270. package/dist/workflows/step.d.ts +1 -0
  271. package/dist/workflows/step.d.ts.map +1 -1
  272. package/dist/workflows/types.d.ts +54 -2
  273. package/dist/workflows/types.d.ts.map +1 -1
  274. package/dist/workflows/utils.d.ts +20 -0
  275. package/dist/workflows/utils.d.ts.map +1 -1
  276. package/dist/workflows/workflow.d.ts +108 -40
  277. package/dist/workflows/workflow.d.ts.map +1 -1
  278. package/package.json +19 -27
  279. package/src/llm/model/provider-types.generated.d.ts +56 -7
  280. package/dist/chunk-22443P6A.cjs.map +0 -1
  281. package/dist/chunk-2ZVKF4HP.cjs.map +0 -1
  282. package/dist/chunk-4CDL2QJT.js.map +0 -1
  283. package/dist/chunk-7AHYOMHJ.js.map +0 -1
  284. package/dist/chunk-7CBEP2ZQ.js.map +0 -1
  285. package/dist/chunk-7PO6SEJF.js.map +0 -1
  286. package/dist/chunk-CB575O6L.cjs.map +0 -1
  287. package/dist/chunk-ECFXGXWO.cjs.map +0 -1
  288. package/dist/chunk-ET6UOTTU.cjs.map +0 -1
  289. package/dist/chunk-GGYKYORQ.cjs.map +0 -1
  290. package/dist/chunk-HDJFSJCK.js.map +0 -1
  291. package/dist/chunk-I4CXL4SR.js.map +0 -1
  292. package/dist/chunk-IQO7ANVS.cjs.map +0 -1
  293. package/dist/chunk-JPGVRWWL.js.map +0 -1
  294. package/dist/chunk-JV2KH24V.js.map +0 -1
  295. package/dist/chunk-JYYQQEBH.cjs.map +0 -1
  296. package/dist/chunk-LJFJTTZQ.cjs +0 -4
  297. package/dist/chunk-LWBQ4P4N.cjs.map +0 -1
  298. package/dist/chunk-MV7KHWUT.js.map +0 -1
  299. package/dist/chunk-QUKUN6NR.cjs.map +0 -1
  300. package/dist/chunk-SNPVZPLB.js.map +0 -1
  301. package/dist/chunk-UIZSWUKP.js.map +0 -1
  302. package/dist/chunk-W7UH2PWL.js.map +0 -1
  303. package/dist/chunk-WM6CK2F3.cjs.map +0 -1
  304. package/dist/chunk-YCVEJ3UN.cjs.map +0 -1
  305. package/dist/chunk-ZGHTOYHW.js.map +0 -1
  306. package/dist/chunk-ZV5CC35D.js +0 -3
  307. package/dist/chunk-ZWNI5IWX.cjs.map +0 -1
  308. package/dist/models-dev-DNBKXHT4.js +0 -3
  309. package/dist/models-dev-YBEEQIX6.cjs +0 -12
  310. package/dist/netlify-7G2L5VSH.js +0 -3
  311. package/dist/netlify-GWNGSIRZ.cjs +0 -12
  312. package/dist/registry-generator-H4YNODDH.js.map +0 -1
  313. package/dist/registry-generator-MK63POJO.cjs.map +0 -1
@@ -499,24 +499,74 @@ function getRuntimeEnvironmentUserAgent(globalThisAny = globalThis) {
499
499
  }
500
500
  return "runtime/unknown";
501
501
  }
502
- function removeUndefinedEntries(record) {
503
- return Object.fromEntries(
504
- Object.entries(record).filter(([_key, value]) => value != null)
505
- );
502
+ function normalizeHeaders(headers) {
503
+ if (headers == null) {
504
+ return {};
505
+ }
506
+ const normalized = {};
507
+ if (headers instanceof Headers) {
508
+ headers.forEach((value, key) => {
509
+ normalized[key.toLowerCase()] = value;
510
+ });
511
+ } else {
512
+ if (!Array.isArray(headers)) {
513
+ headers = Object.entries(headers);
514
+ }
515
+ for (const [key, value] of headers) {
516
+ if (value != null) {
517
+ normalized[key.toLowerCase()] = value;
518
+ }
519
+ }
520
+ }
521
+ return normalized;
506
522
  }
507
523
  function withUserAgentSuffix(headers, ...userAgentSuffixParts) {
508
- const cleanedHeaders = removeUndefinedEntries(
509
- headers != null ? headers : {}
510
- );
511
- const normalizedHeaders = new Headers(cleanedHeaders);
524
+ const normalizedHeaders = new Headers(normalizeHeaders(headers));
512
525
  const currentUserAgentHeader = normalizedHeaders.get("user-agent") || "";
513
526
  normalizedHeaders.set(
514
527
  "user-agent",
515
528
  [currentUserAgentHeader, ...userAgentSuffixParts].filter(Boolean).join(" ")
516
529
  );
517
- return Object.fromEntries(normalizedHeaders);
530
+ return Object.fromEntries(normalizedHeaders.entries());
531
+ }
532
+ var VERSION = "3.0.17" ;
533
+ var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
534
+ var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
535
+ var DEFAULT_GENERIC_SUFFIX = "You MUST answer with JSON.";
536
+ function injectJsonInstruction({
537
+ prompt,
538
+ schema,
539
+ schemaPrefix = schema != null ? DEFAULT_SCHEMA_PREFIX : void 0,
540
+ schemaSuffix = schema != null ? DEFAULT_SCHEMA_SUFFIX : DEFAULT_GENERIC_SUFFIX
541
+ }) {
542
+ return [
543
+ prompt != null && prompt.length > 0 ? prompt : void 0,
544
+ prompt != null && prompt.length > 0 ? "" : void 0,
545
+ // add a newline if prompt is not null
546
+ schemaPrefix,
547
+ schema != null ? JSON.stringify(schema) : void 0,
548
+ schemaSuffix
549
+ ].filter((line) => line != null).join("\n");
550
+ }
551
+ function injectJsonInstructionIntoMessages({
552
+ messages,
553
+ schema,
554
+ schemaPrefix,
555
+ schemaSuffix
556
+ }) {
557
+ var _a15, _b;
558
+ const systemMessage = ((_a15 = messages[0]) == null ? void 0 : _a15.role) === "system" ? { ...messages[0] } : { role: "system", content: "" };
559
+ systemMessage.content = injectJsonInstruction({
560
+ prompt: systemMessage.content,
561
+ schema,
562
+ schemaPrefix,
563
+ schemaSuffix
564
+ });
565
+ return [
566
+ systemMessage,
567
+ ...((_b = messages[0]) == null ? void 0 : _b.role) === "system" ? messages.slice(1) : messages
568
+ ];
518
569
  }
519
- var VERSION = "3.0.12" ;
520
570
  function loadApiKey({
521
571
  apiKey,
522
572
  environmentVariableName,
@@ -612,7 +662,11 @@ function filter(obj) {
612
662
  }
613
663
  function secureJsonParse(text) {
614
664
  const { stackTraceLimit } = Error;
615
- Error.stackTraceLimit = 0;
665
+ try {
666
+ Error.stackTraceLimit = 0;
667
+ } catch (e) {
668
+ return _parse(text);
669
+ }
616
670
  try {
617
671
  return _parse(text);
618
672
  } finally {
@@ -3537,7 +3591,7 @@ var OpenAICompatibleImageModel = class {
3537
3591
  var openaiCompatibleImageResponseSchema = z.object({
3538
3592
  data: z.array(z.object({ b64_json: z.string() }))
3539
3593
  });
3540
- var VERSION2 = "1.0.22" ;
3594
+ var VERSION2 = "1.0.27" ;
3541
3595
  function createOpenAICompatible(options) {
3542
3596
  const baseURL = withoutTrailingSlash(options.baseURL);
3543
3597
  const providerName = options.name;
@@ -3583,8 +3637,14 @@ function createOpenAICompatible(options) {
3583
3637
 
3584
3638
  // src/llm/model/gateways/base.ts
3585
3639
  var MastraModelGateway = class {
3640
+ /**
3641
+ * Get the gateway ID
3642
+ */
3643
+ getId() {
3644
+ return this.id;
3645
+ }
3586
3646
  };
3587
- var VERSION3 = "2.0.33" ;
3647
+ var VERSION3 = "2.0.45" ;
3588
3648
  var anthropicErrorDataSchema = lazySchema(
3589
3649
  () => zodSchema(
3590
3650
  z.object({
@@ -3786,7 +3846,18 @@ var anthropicMessagesResponseSchema = lazySchema(
3786
3846
  output_tokens: z.number(),
3787
3847
  cache_creation_input_tokens: z.number().nullish(),
3788
3848
  cache_read_input_tokens: z.number().nullish()
3789
- })
3849
+ }),
3850
+ container: z.object({
3851
+ expires_at: z.string(),
3852
+ id: z.string(),
3853
+ skills: z.array(
3854
+ z.object({
3855
+ type: z.union([z.literal("anthropic"), z.literal("custom")]),
3856
+ skill_id: z.string(),
3857
+ version: z.string()
3858
+ })
3859
+ ).nullish()
3860
+ }).nullish()
3790
3861
  })
3791
3862
  )
3792
3863
  );
@@ -4016,7 +4087,21 @@ var anthropicMessagesChunkSchema = lazySchema(
4016
4087
  type: z.literal("message_delta"),
4017
4088
  delta: z.object({
4018
4089
  stop_reason: z.string().nullish(),
4019
- stop_sequence: z.string().nullish()
4090
+ stop_sequence: z.string().nullish(),
4091
+ container: z.object({
4092
+ expires_at: z.string(),
4093
+ id: z.string(),
4094
+ skills: z.array(
4095
+ z.object({
4096
+ type: z.union([
4097
+ z.literal("anthropic"),
4098
+ z.literal("custom")
4099
+ ]),
4100
+ skill_id: z.string(),
4101
+ version: z.string()
4102
+ })
4103
+ ).nullish()
4104
+ }).nullish()
4020
4105
  }),
4021
4106
  usage: z.looseObject({
4022
4107
  output_tokens: z.number(),
@@ -5259,6 +5344,21 @@ var AnthropicMessagesLanguageModel = class {
5259
5344
  setting: "seed"
5260
5345
  });
5261
5346
  }
5347
+ if (temperature != null && temperature > 1) {
5348
+ warnings.push({
5349
+ type: "unsupported-setting",
5350
+ setting: "temperature",
5351
+ details: `${temperature} exceeds anthropic maximum of 1.0. clamped to 1.0`
5352
+ });
5353
+ temperature = 1;
5354
+ } else if (temperature != null && temperature < 0) {
5355
+ warnings.push({
5356
+ type: "unsupported-setting",
5357
+ setting: "temperature",
5358
+ details: `${temperature} is below anthropic minimum of 0. clamped to 0`
5359
+ });
5360
+ temperature = 0;
5361
+ }
5262
5362
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json") {
5263
5363
  if (responseFormat.schema == null) {
5264
5364
  warnings.push({
@@ -5294,7 +5394,7 @@ var AnthropicMessagesLanguageModel = class {
5294
5394
  });
5295
5395
  const isThinking = ((_b = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _b.type) === "enabled";
5296
5396
  const thinkingBudget = (_c = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _c.budgetTokens;
5297
- const maxOutputTokensForModel = getMaxOutputTokensForModel(this.modelId);
5397
+ const { maxOutputTokens: maxOutputTokensForModel, knownModel } = getMaxOutputTokensForModel(this.modelId);
5298
5398
  const maxTokens = maxOutputTokens != null ? maxOutputTokens : maxOutputTokensForModel;
5299
5399
  const baseArgs = {
5300
5400
  // model id:
@@ -5356,7 +5456,7 @@ var AnthropicMessagesLanguageModel = class {
5356
5456
  }
5357
5457
  baseArgs.max_tokens = maxTokens + thinkingBudget;
5358
5458
  }
5359
- if (baseArgs.max_tokens > maxOutputTokensForModel) {
5459
+ if (knownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
5360
5460
  if (maxOutputTokens != null) {
5361
5461
  warnings.push({
5362
5462
  type: "unsupported-setting",
@@ -5451,7 +5551,7 @@ var AnthropicMessagesLanguageModel = class {
5451
5551
  });
5452
5552
  }
5453
5553
  async doGenerate(options) {
5454
- var _a15, _b, _c, _d, _e, _f;
5554
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
5455
5555
  const { args, warnings, betas, usesJsonResponseTool } = await this.getArgs(options);
5456
5556
  const citationDocuments = this.extractCitationDocuments(options.prompt);
5457
5557
  const {
@@ -5702,7 +5802,16 @@ var AnthropicMessagesLanguageModel = class {
5702
5802
  anthropic: {
5703
5803
  usage: response.usage,
5704
5804
  cacheCreationInputTokens: (_e = response.usage.cache_creation_input_tokens) != null ? _e : null,
5705
- stopSequence: (_f = response.stop_sequence) != null ? _f : null
5805
+ stopSequence: (_f = response.stop_sequence) != null ? _f : null,
5806
+ container: response.container ? {
5807
+ expiresAt: response.container.expires_at,
5808
+ id: response.container.id,
5809
+ skills: (_h = (_g = response.container.skills) == null ? void 0 : _g.map((skill) => ({
5810
+ type: skill.type,
5811
+ skillId: skill.skill_id,
5812
+ version: skill.version
5813
+ }))) != null ? _h : null
5814
+ } : null
5706
5815
  }
5707
5816
  }
5708
5817
  };
@@ -5732,6 +5841,7 @@ var AnthropicMessagesLanguageModel = class {
5732
5841
  let rawUsage = void 0;
5733
5842
  let cacheCreationInputTokens = null;
5734
5843
  let stopSequence = null;
5844
+ let container = null;
5735
5845
  let blockType = void 0;
5736
5846
  const generateId3 = this.generateId;
5737
5847
  return {
@@ -5741,7 +5851,7 @@ var AnthropicMessagesLanguageModel = class {
5741
5851
  controller.enqueue({ type: "stream-start", warnings });
5742
5852
  },
5743
5853
  transform(chunk, controller) {
5744
- var _a15, _b, _c, _d, _e, _f, _g, _h;
5854
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j;
5745
5855
  if (options.includeRawChunks) {
5746
5856
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
5747
5857
  }
@@ -5854,7 +5964,8 @@ var AnthropicMessagesLanguageModel = class {
5854
5964
  data: part.content.content.source.data
5855
5965
  }
5856
5966
  }
5857
- }
5967
+ },
5968
+ providerExecuted: true
5858
5969
  });
5859
5970
  } else if (part.content.type === "web_fetch_tool_result_error") {
5860
5971
  controller.enqueue({
@@ -6122,6 +6233,15 @@ var AnthropicMessagesLanguageModel = class {
6122
6233
  isJsonResponseFromTool: usesJsonResponseTool
6123
6234
  });
6124
6235
  stopSequence = (_h = value.delta.stop_sequence) != null ? _h : null;
6236
+ container = value.delta.container != null ? {
6237
+ expiresAt: value.delta.container.expires_at,
6238
+ id: value.delta.container.id,
6239
+ skills: (_j = (_i = value.delta.container.skills) == null ? void 0 : _i.map((skill) => ({
6240
+ type: skill.type,
6241
+ skillId: skill.skill_id,
6242
+ version: skill.version
6243
+ }))) != null ? _j : null
6244
+ } : null;
6125
6245
  rawUsage = {
6126
6246
  ...rawUsage,
6127
6247
  ...value.usage
@@ -6137,7 +6257,8 @@ var AnthropicMessagesLanguageModel = class {
6137
6257
  anthropic: {
6138
6258
  usage: rawUsage != null ? rawUsage : null,
6139
6259
  cacheCreationInputTokens,
6140
- stopSequence
6260
+ stopSequence,
6261
+ container
6141
6262
  }
6142
6263
  }
6143
6264
  });
@@ -6162,13 +6283,15 @@ var AnthropicMessagesLanguageModel = class {
6162
6283
  };
6163
6284
  function getMaxOutputTokensForModel(modelId) {
6164
6285
  if (modelId.includes("claude-sonnet-4-") || modelId.includes("claude-3-7-sonnet") || modelId.includes("claude-haiku-4-5")) {
6165
- return 64e3;
6286
+ return { maxOutputTokens: 64e3, knownModel: true };
6166
6287
  } else if (modelId.includes("claude-opus-4-")) {
6167
- return 32e3;
6288
+ return { maxOutputTokens: 32e3, knownModel: true };
6168
6289
  } else if (modelId.includes("claude-3-5-haiku")) {
6169
- return 8192;
6290
+ return { maxOutputTokens: 8192, knownModel: true };
6291
+ } else if (modelId.includes("claude-3-haiku")) {
6292
+ return { maxOutputTokens: 4096, knownModel: true };
6170
6293
  } else {
6171
- return 4096;
6294
+ return { maxOutputTokens: 4096, knownModel: false };
6172
6295
  }
6173
6296
  }
6174
6297
  var bash_20241022InputSchema = lazySchema(
@@ -6504,8 +6627,14 @@ var anthropicTools = {
6504
6627
  webSearch_20250305
6505
6628
  };
6506
6629
  function createAnthropic(options = {}) {
6507
- var _a15;
6508
- const baseURL = (_a15 = withoutTrailingSlash(options.baseURL)) != null ? _a15 : "https://api.anthropic.com/v1";
6630
+ var _a15, _b;
6631
+ const baseURL = (_a15 = withoutTrailingSlash(
6632
+ loadOptionalSetting({
6633
+ settingValue: options.baseURL,
6634
+ environmentVariableName: "ANTHROPIC_BASE_URL"
6635
+ })
6636
+ )) != null ? _a15 : "https://api.anthropic.com/v1";
6637
+ const providerName = (_b = options.name) != null ? _b : "anthropic.messages";
6509
6638
  const getHeaders = () => withUserAgentSuffix(
6510
6639
  {
6511
6640
  "anthropic-version": "2023-06-01",
@@ -6521,7 +6650,7 @@ function createAnthropic(options = {}) {
6521
6650
  const createChatModel = (modelId) => {
6522
6651
  var _a22;
6523
6652
  return new AnthropicMessagesLanguageModel(modelId, {
6524
- provider: "anthropic.messages",
6653
+ provider: providerName,
6525
6654
  baseURL,
6526
6655
  headers: getHeaders,
6527
6656
  fetch: options.fetch,
@@ -6552,7 +6681,7 @@ function createAnthropic(options = {}) {
6552
6681
  return provider;
6553
6682
  }
6554
6683
  createAnthropic();
6555
- var VERSION4 = "2.0.23" ;
6684
+ var VERSION4 = "2.0.39" ;
6556
6685
  var googleErrorDataSchema = lazySchema(
6557
6686
  () => zodSchema(
6558
6687
  z.object({
@@ -6852,19 +6981,20 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
6852
6981
  contents.push({
6853
6982
  role: "model",
6854
6983
  parts: content.map((part) => {
6855
- var _a22, _b, _c, _d, _e, _f;
6984
+ var _a22, _b, _c;
6985
+ const thoughtSignature = ((_b = (_a22 = part.providerOptions) == null ? void 0 : _a22.google) == null ? void 0 : _b.thoughtSignature) != null ? String((_c = part.providerOptions.google) == null ? void 0 : _c.thoughtSignature) : void 0;
6856
6986
  switch (part.type) {
6857
6987
  case "text": {
6858
6988
  return part.text.length === 0 ? void 0 : {
6859
6989
  text: part.text,
6860
- thoughtSignature: (_b = (_a22 = part.providerOptions) == null ? void 0 : _a22.google) == null ? void 0 : _b.thoughtSignature
6990
+ thoughtSignature
6861
6991
  };
6862
6992
  }
6863
6993
  case "reasoning": {
6864
6994
  return part.text.length === 0 ? void 0 : {
6865
6995
  text: part.text,
6866
6996
  thought: true,
6867
- thoughtSignature: (_d = (_c = part.providerOptions) == null ? void 0 : _c.google) == null ? void 0 : _d.thoughtSignature
6997
+ thoughtSignature
6868
6998
  };
6869
6999
  }
6870
7000
  case "file": {
@@ -6891,7 +7021,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
6891
7021
  name: part.toolName,
6892
7022
  args: part.input
6893
7023
  },
6894
- thoughtSignature: (_f = (_e = part.providerOptions) == null ? void 0 : _e.google) == null ? void 0 : _f.thoughtSignature
7024
+ thoughtSignature
6895
7025
  };
6896
7026
  }
6897
7027
  }
@@ -6974,7 +7104,9 @@ var googleGenerativeAIProviderOptions = lazySchema(
6974
7104
  responseModalities: z.array(z.enum(["TEXT", "IMAGE"])).optional(),
6975
7105
  thinkingConfig: z.object({
6976
7106
  thinkingBudget: z.number().optional(),
6977
- includeThoughts: z.boolean().optional()
7107
+ includeThoughts: z.boolean().optional(),
7108
+ // https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#thinking_level
7109
+ thinkingLevel: z.enum(["low", "medium", "high"]).optional()
6978
7110
  }).optional(),
6979
7111
  /**
6980
7112
  * Optional.
@@ -7075,8 +7207,14 @@ function prepareTools3({
7075
7207
  var _a15;
7076
7208
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
7077
7209
  const toolWarnings = [];
7078
- const isGemini2 = modelId.includes("gemini-2");
7210
+ const isLatest = [
7211
+ "gemini-flash-latest",
7212
+ "gemini-flash-lite-latest",
7213
+ "gemini-pro-latest"
7214
+ ].some((id) => id === modelId);
7215
+ const isGemini2orNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || isLatest;
7079
7216
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
7217
+ const supportsFileSearch = modelId.includes("gemini-2.5");
7080
7218
  if (tools == null) {
7081
7219
  return { tools: void 0, toolConfig: void 0, toolWarnings };
7082
7220
  }
@@ -7085,10 +7223,11 @@ function prepareTools3({
7085
7223
  (tool2) => tool2.type === "provider-defined"
7086
7224
  );
7087
7225
  if (hasFunctionTools && hasProviderDefinedTools) {
7226
+ const functionTools = tools.filter((tool2) => tool2.type === "function");
7088
7227
  toolWarnings.push({
7089
7228
  type: "unsupported-tool",
7090
7229
  tool: tools.find((tool2) => tool2.type === "function"),
7091
- details: "Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both."
7230
+ details: `Cannot mix function tools with provider-defined tools in the same request. Falling back to provider-defined tools only. The following function tools will be ignored: ${functionTools.map((t) => t.name).join(", ")}. Please use either function tools or provider-defined tools, but not both.`
7092
7231
  });
7093
7232
  }
7094
7233
  if (hasProviderDefinedTools) {
@@ -7099,7 +7238,7 @@ function prepareTools3({
7099
7238
  providerDefinedTools.forEach((tool2) => {
7100
7239
  switch (tool2.id) {
7101
7240
  case "google.google_search":
7102
- if (isGemini2) {
7241
+ if (isGemini2orNewer) {
7103
7242
  googleTools2.push({ googleSearch: {} });
7104
7243
  } else if (supportsDynamicRetrieval) {
7105
7244
  googleTools2.push({
@@ -7115,7 +7254,7 @@ function prepareTools3({
7115
7254
  }
7116
7255
  break;
7117
7256
  case "google.url_context":
7118
- if (isGemini2) {
7257
+ if (isGemini2orNewer) {
7119
7258
  googleTools2.push({ urlContext: {} });
7120
7259
  } else {
7121
7260
  toolWarnings.push({
@@ -7126,7 +7265,7 @@ function prepareTools3({
7126
7265
  }
7127
7266
  break;
7128
7267
  case "google.code_execution":
7129
- if (isGemini2) {
7268
+ if (isGemini2orNewer) {
7130
7269
  googleTools2.push({ codeExecution: {} });
7131
7270
  } else {
7132
7271
  toolWarnings.push({
@@ -7136,6 +7275,37 @@ function prepareTools3({
7136
7275
  });
7137
7276
  }
7138
7277
  break;
7278
+ case "google.file_search":
7279
+ if (supportsFileSearch) {
7280
+ googleTools2.push({ fileSearch: { ...tool2.args } });
7281
+ } else {
7282
+ toolWarnings.push({
7283
+ type: "unsupported-tool",
7284
+ tool: tool2,
7285
+ details: "The file search tool is only supported with Gemini 2.5 models."
7286
+ });
7287
+ }
7288
+ break;
7289
+ case "google.vertex_rag_store":
7290
+ if (isGemini2orNewer) {
7291
+ googleTools2.push({
7292
+ retrieval: {
7293
+ vertex_rag_store: {
7294
+ rag_resources: {
7295
+ rag_corpus: tool2.args.ragCorpus
7296
+ },
7297
+ similarity_top_k: tool2.args.topK
7298
+ }
7299
+ }
7300
+ });
7301
+ } else {
7302
+ toolWarnings.push({
7303
+ type: "unsupported-tool",
7304
+ tool: tool2,
7305
+ details: "The RAG store tool is not supported with other Gemini models than Gemini 2."
7306
+ });
7307
+ }
7308
+ break;
7139
7309
  default:
7140
7310
  toolWarnings.push({ type: "unsupported-tool", tool: tool2 });
7141
7311
  break;
@@ -7263,17 +7433,19 @@ var GoogleGenerativeAILanguageModel = class {
7263
7433
  toolChoice,
7264
7434
  providerOptions
7265
7435
  }) {
7266
- var _a15, _b;
7436
+ var _a15;
7267
7437
  const warnings = [];
7268
7438
  const googleOptions = await parseProviderOptions({
7269
7439
  provider: "google",
7270
7440
  providerOptions,
7271
7441
  schema: googleGenerativeAIProviderOptions
7272
7442
  });
7273
- if (((_a15 = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a15.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
7443
+ if ((tools == null ? void 0 : tools.some(
7444
+ (tool2) => tool2.type === "provider-defined" && tool2.id === "google.vertex_rag_store"
7445
+ )) && !this.config.provider.startsWith("google.vertex.")) {
7274
7446
  warnings.push({
7275
7447
  type: "other",
7276
- message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
7448
+ message: `The 'vertex_rag_store' tool is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
7277
7449
  });
7278
7450
  }
7279
7451
  const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
@@ -7307,7 +7479,7 @@ var GoogleGenerativeAILanguageModel = class {
7307
7479
  responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
7308
7480
  // so this is needed as an escape hatch:
7309
7481
  // TODO convert into provider option
7310
- ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
7482
+ ((_a15 = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _a15 : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
7311
7483
  ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
7312
7484
  audioTimestamp: googleOptions.audioTimestamp
7313
7485
  },
@@ -7719,16 +7891,64 @@ function extractSources({
7719
7891
  groundingMetadata,
7720
7892
  generateId: generateId3
7721
7893
  }) {
7722
- var _a15;
7723
- return (_a15 = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a15.filter(
7724
- (chunk) => chunk.web != null
7725
- ).map((chunk) => ({
7726
- type: "source",
7727
- sourceType: "url",
7728
- id: generateId3(),
7729
- url: chunk.web.uri,
7730
- title: chunk.web.title
7731
- }));
7894
+ var _a15, _b, _c;
7895
+ if (!(groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks)) {
7896
+ return void 0;
7897
+ }
7898
+ const sources = [];
7899
+ for (const chunk of groundingMetadata.groundingChunks) {
7900
+ if (chunk.web != null) {
7901
+ sources.push({
7902
+ type: "source",
7903
+ sourceType: "url",
7904
+ id: generateId3(),
7905
+ url: chunk.web.uri,
7906
+ title: (_a15 = chunk.web.title) != null ? _a15 : void 0
7907
+ });
7908
+ } else if (chunk.retrievedContext != null) {
7909
+ const uri = chunk.retrievedContext.uri;
7910
+ if (uri.startsWith("http://") || uri.startsWith("https://")) {
7911
+ sources.push({
7912
+ type: "source",
7913
+ sourceType: "url",
7914
+ id: generateId3(),
7915
+ url: uri,
7916
+ title: (_b = chunk.retrievedContext.title) != null ? _b : void 0
7917
+ });
7918
+ } else {
7919
+ const title = (_c = chunk.retrievedContext.title) != null ? _c : "Unknown Document";
7920
+ let mediaType = "application/octet-stream";
7921
+ let filename = void 0;
7922
+ if (uri.endsWith(".pdf")) {
7923
+ mediaType = "application/pdf";
7924
+ filename = uri.split("/").pop();
7925
+ } else if (uri.endsWith(".txt")) {
7926
+ mediaType = "text/plain";
7927
+ filename = uri.split("/").pop();
7928
+ } else if (uri.endsWith(".docx")) {
7929
+ mediaType = "application/vnd.openxmlformats-officedocument.wordprocessingml.document";
7930
+ filename = uri.split("/").pop();
7931
+ } else if (uri.endsWith(".doc")) {
7932
+ mediaType = "application/msword";
7933
+ filename = uri.split("/").pop();
7934
+ } else if (uri.match(/\.(md|markdown)$/)) {
7935
+ mediaType = "text/markdown";
7936
+ filename = uri.split("/").pop();
7937
+ } else {
7938
+ filename = uri.split("/").pop();
7939
+ }
7940
+ sources.push({
7941
+ type: "source",
7942
+ sourceType: "document",
7943
+ id: generateId3(),
7944
+ mediaType,
7945
+ title,
7946
+ filename
7947
+ });
7948
+ }
7949
+ }
7950
+ }
7951
+ return sources.length > 0 ? sources : void 0;
7732
7952
  }
7733
7953
  var getGroundingMetadataSchema = () => z.object({
7734
7954
  webSearchQueries: z.array(z.string()).nullish(),
@@ -7736,8 +7956,12 @@ var getGroundingMetadataSchema = () => z.object({
7736
7956
  searchEntryPoint: z.object({ renderedContent: z.string() }).nullish(),
7737
7957
  groundingChunks: z.array(
7738
7958
  z.object({
7739
- web: z.object({ uri: z.string(), title: z.string() }).nullish(),
7740
- retrievedContext: z.object({ uri: z.string(), title: z.string() }).nullish()
7959
+ web: z.object({ uri: z.string(), title: z.string().nullish() }).nullish(),
7960
+ retrievedContext: z.object({
7961
+ uri: z.string(),
7962
+ title: z.string().nullish(),
7963
+ text: z.string().nullish()
7964
+ }).nullish()
7741
7965
  })
7742
7966
  ).nullish(),
7743
7967
  groundingSupports: z.array(
@@ -7807,7 +8031,9 @@ var usageSchema2 = z.object({
7807
8031
  thoughtsTokenCount: z.number().nullish(),
7808
8032
  promptTokenCount: z.number().nullish(),
7809
8033
  candidatesTokenCount: z.number().nullish(),
7810
- totalTokenCount: z.number().nullish()
8034
+ totalTokenCount: z.number().nullish(),
8035
+ // https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest/v1/GenerateContentResponse#TrafficType
8036
+ trafficType: z.string().nullish()
7811
8037
  });
7812
8038
  var getUrlContextMetadataSchema = () => z.object({
7813
8039
  urlMetadata: z.array(
@@ -7869,6 +8095,30 @@ var codeExecution = createProviderDefinedToolFactoryWithOutputSchema({
7869
8095
  output: z.string().describe("The output from the code execution.")
7870
8096
  })
7871
8097
  });
8098
+ var fileSearchArgsBaseSchema = z.object({
8099
+ /** The names of the file_search_stores to retrieve from.
8100
+ * Example: `fileSearchStores/my-file-search-store-123`
8101
+ */
8102
+ fileSearchStoreNames: z.array(z.string()).describe(
8103
+ "The names of the file_search_stores to retrieve from. Example: `fileSearchStores/my-file-search-store-123`"
8104
+ ),
8105
+ /** The number of file search retrieval chunks to retrieve. */
8106
+ topK: z.number().int().positive().describe("The number of file search retrieval chunks to retrieve.").optional(),
8107
+ /** Metadata filter to apply to the file search retrieval documents.
8108
+ * See https://google.aip.dev/160 for the syntax of the filter expression.
8109
+ */
8110
+ metadataFilter: z.string().describe(
8111
+ "Metadata filter to apply to the file search retrieval documents. See https://google.aip.dev/160 for the syntax of the filter expression."
8112
+ ).optional()
8113
+ }).passthrough();
8114
+ var fileSearchArgsSchema = lazySchema(
8115
+ () => zodSchema(fileSearchArgsBaseSchema)
8116
+ );
8117
+ var fileSearch = createProviderDefinedToolFactory({
8118
+ id: "google.file_search",
8119
+ name: "file_search",
8120
+ inputSchema: fileSearchArgsSchema
8121
+ });
7872
8122
  var googleSearch = createProviderDefinedToolFactory({
7873
8123
  id: "google.google_search",
7874
8124
  name: "google_search",
@@ -7886,6 +8136,14 @@ var urlContext = createProviderDefinedToolFactory({
7886
8136
  name: "url_context",
7887
8137
  inputSchema: lazySchema(() => zodSchema(z.object({})))
7888
8138
  });
8139
+ var vertexRagStore = createProviderDefinedToolFactory({
8140
+ id: "google.vertex_rag_store",
8141
+ name: "vertex_rag_store",
8142
+ inputSchema: z.object({
8143
+ ragCorpus: z.string(),
8144
+ topK: z.number().optional()
8145
+ })
8146
+ });
7889
8147
  var googleTools = {
7890
8148
  /**
7891
8149
  * Creates a Google search tool that gives Google direct access to real-time web content.
@@ -7897,6 +8155,17 @@ var googleTools = {
7897
8155
  * Must have name "url_context".
7898
8156
  */
7899
8157
  urlContext,
8158
+ /**
8159
+ * Enables Retrieval Augmented Generation (RAG) via the Gemini File Search tool.
8160
+ * Must have name "file_search".
8161
+ *
8162
+ * @param fileSearchStoreNames - Fully-qualified File Search store resource names.
8163
+ * @param metadataFilter - Optional filter expression to restrict the files that can be retrieved.
8164
+ * @param topK - Optional result limit for the number of chunks returned from File Search.
8165
+ *
8166
+ * @see https://ai.google.dev/gemini-api/docs/file-search
8167
+ */
8168
+ fileSearch,
7900
8169
  /**
7901
8170
  * A tool that enables the model to generate and run Python code.
7902
8171
  * Must have name "code_execution".
@@ -7907,7 +8176,12 @@ var googleTools = {
7907
8176
  * @see https://ai.google.dev/gemini-api/docs/code-execution (Google AI)
7908
8177
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/code-execution-api (Vertex AI)
7909
8178
  */
7910
- codeExecution
8179
+ codeExecution,
8180
+ /**
8181
+ * Creates a Vertex RAG Store tool that enables the model to perform RAG searches against a Vertex RAG Store.
8182
+ * Must have name "vertex_rag_store".
8183
+ */
8184
+ vertexRagStore
7911
8185
  };
7912
8186
  var GoogleGenerativeAIImageModel = class {
7913
8187
  constructor(modelId, settings, config) {
@@ -8016,8 +8290,9 @@ var googleImageProviderOptionsSchema = lazySchema(
8016
8290
  )
8017
8291
  );
8018
8292
  function createGoogleGenerativeAI(options = {}) {
8019
- var _a15;
8293
+ var _a15, _b;
8020
8294
  const baseURL = (_a15 = withoutTrailingSlash(options.baseURL)) != null ? _a15 : "https://generativelanguage.googleapis.com/v1beta";
8295
+ const providerName = (_b = options.name) != null ? _b : "google.generative-ai";
8021
8296
  const getHeaders = () => withUserAgentSuffix(
8022
8297
  {
8023
8298
  "x-goog-api-key": loadApiKey({
@@ -8032,7 +8307,7 @@ function createGoogleGenerativeAI(options = {}) {
8032
8307
  const createChatModel = (modelId) => {
8033
8308
  var _a22;
8034
8309
  return new GoogleGenerativeAILanguageModel(modelId, {
8035
- provider: "google.generative-ai",
8310
+ provider: providerName,
8036
8311
  baseURL,
8037
8312
  headers: getHeaders,
8038
8313
  generateId: (_a22 = options.generateId) != null ? _a22 : generateId,
@@ -8052,13 +8327,13 @@ function createGoogleGenerativeAI(options = {}) {
8052
8327
  });
8053
8328
  };
8054
8329
  const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
8055
- provider: "google.generative-ai",
8330
+ provider: providerName,
8056
8331
  baseURL,
8057
8332
  headers: getHeaders,
8058
8333
  fetch: options.fetch
8059
8334
  });
8060
8335
  const createImageModel = (modelId, settings = {}) => new GoogleGenerativeAIImageModel(modelId, settings, {
8061
- provider: "google.generative-ai",
8336
+ provider: providerName,
8062
8337
  baseURL,
8063
8338
  headers: getHeaders,
8064
8339
  fetch: options.fetch
@@ -8280,7 +8555,7 @@ function getResponseMetadata3({
8280
8555
  return {
8281
8556
  id: id != null ? id : void 0,
8282
8557
  modelId: model != null ? model : void 0,
8283
- timestamp: created != null ? new Date(created * 1e3) : void 0
8558
+ timestamp: created ? new Date(created * 1e3) : void 0
8284
8559
  };
8285
8560
  }
8286
8561
  function mapOpenAIFinishReason(finishReason) {
@@ -8464,7 +8739,7 @@ var openaiChatLanguageModelOptions = lazyValidator(
8464
8739
  /**
8465
8740
  * Reasoning effort for reasoning models. Defaults to `medium`.
8466
8741
  */
8467
- reasoningEffort: z.enum(["minimal", "low", "medium", "high"]).optional(),
8742
+ reasoningEffort: z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
8468
8743
  /**
8469
8744
  * Maximum number of completion tokens to generate. Useful for reasoning models.
8470
8745
  */
@@ -8514,6 +8789,15 @@ var openaiChatLanguageModelOptions = lazyValidator(
8514
8789
  * Useful for improving cache hit rates and working around automatic caching issues.
8515
8790
  */
8516
8791
  promptCacheKey: z.string().optional(),
8792
+ /**
8793
+ * The retention policy for the prompt cache.
8794
+ * - 'in_memory': Default. Standard prompt caching behavior.
8795
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
8796
+ * Currently only available for 5.1 series models.
8797
+ *
8798
+ * @default 'in_memory'
8799
+ */
8800
+ promptCacheRetention: z.enum(["in_memory", "24h"]).optional(),
8517
8801
  /**
8518
8802
  * A stable identifier used to help detect users of your application
8519
8803
  * that may be violating OpenAI's usage policies. The IDs should be a
@@ -8675,6 +8959,7 @@ var OpenAIChatLanguageModel = class {
8675
8959
  reasoning_effort: openaiOptions.reasoningEffort,
8676
8960
  service_tier: openaiOptions.serviceTier,
8677
8961
  prompt_cache_key: openaiOptions.promptCacheKey,
8962
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
8678
8963
  safety_identifier: openaiOptions.safetyIdentifier,
8679
8964
  // messages:
8680
8965
  messages
@@ -8890,7 +9175,7 @@ var OpenAIChatLanguageModel = class {
8890
9175
  outputTokens: void 0,
8891
9176
  totalTokens: void 0
8892
9177
  };
8893
- let isFirstChunk = true;
9178
+ let metadataExtracted = false;
8894
9179
  let isActiveText = false;
8895
9180
  const providerMetadata = { openai: {} };
8896
9181
  return {
@@ -8915,12 +9200,15 @@ var OpenAIChatLanguageModel = class {
8915
9200
  controller.enqueue({ type: "error", error: value.error });
8916
9201
  return;
8917
9202
  }
8918
- if (isFirstChunk) {
8919
- isFirstChunk = false;
8920
- controller.enqueue({
8921
- type: "response-metadata",
8922
- ...getResponseMetadata3(value)
8923
- });
9203
+ if (!metadataExtracted) {
9204
+ const metadata = getResponseMetadata3(value);
9205
+ if (Object.values(metadata).some(Boolean)) {
9206
+ metadataExtracted = true;
9207
+ controller.enqueue({
9208
+ type: "response-metadata",
9209
+ ...getResponseMetadata3(value)
9210
+ });
9211
+ }
8924
9212
  }
8925
9213
  if (value.usage != null) {
8926
9214
  usage.inputTokens = (_a15 = value.usage.prompt_tokens) != null ? _a15 : void 0;
@@ -9092,18 +9380,6 @@ function getSystemMessageMode(modelId) {
9092
9380
  return (_b = (_a15 = reasoningModels[modelId]) == null ? void 0 : _a15.systemMessageMode) != null ? _b : "developer";
9093
9381
  }
9094
9382
  var reasoningModels = {
9095
- "o1-mini": {
9096
- systemMessageMode: "remove"
9097
- },
9098
- "o1-mini-2024-09-12": {
9099
- systemMessageMode: "remove"
9100
- },
9101
- "o1-preview": {
9102
- systemMessageMode: "remove"
9103
- },
9104
- "o1-preview-2024-09-12": {
9105
- systemMessageMode: "remove"
9106
- },
9107
9383
  o3: {
9108
9384
  systemMessageMode: "developer"
9109
9385
  },
@@ -9639,7 +9915,7 @@ var openaiImageResponseSchema = lazyValidator(
9639
9915
  data: z.array(
9640
9916
  z.object({
9641
9917
  b64_json: z.string(),
9642
- revised_prompt: z.string().optional()
9918
+ revised_prompt: z.string().nullish()
9643
9919
  })
9644
9920
  )
9645
9921
  })
@@ -9784,7 +10060,7 @@ var compoundFilterSchema = z.object({
9784
10060
  z.union([comparisonFilterSchema, z.lazy(() => compoundFilterSchema)])
9785
10061
  )
9786
10062
  });
9787
- var fileSearchArgsSchema = lazySchema(
10063
+ var fileSearchArgsSchema2 = lazySchema(
9788
10064
  () => zodSchema(
9789
10065
  z.object({
9790
10066
  vectorStoreIds: z.array(z.string()),
@@ -9813,7 +10089,7 @@ var fileSearchOutputSchema = lazySchema(
9813
10089
  })
9814
10090
  )
9815
10091
  );
9816
- var fileSearch = createProviderDefinedToolFactoryWithOutputSchema({
10092
+ var fileSearch2 = createProviderDefinedToolFactoryWithOutputSchema({
9817
10093
  id: "openai.file_search",
9818
10094
  name: "file_search",
9819
10095
  inputSchema: z.object({}),
@@ -9907,7 +10183,13 @@ var webSearchOutputSchema = lazySchema(
9907
10183
  url: z.string(),
9908
10184
  pattern: z.string()
9909
10185
  })
9910
- ])
10186
+ ]),
10187
+ sources: z.array(
10188
+ z.discriminatedUnion("type", [
10189
+ z.object({ type: z.literal("url"), url: z.string() }),
10190
+ z.object({ type: z.literal("api"), name: z.string() })
10191
+ ])
10192
+ ).optional()
9911
10193
  })
9912
10194
  )
9913
10195
  );
@@ -9985,7 +10267,7 @@ var openaiTools = {
9985
10267
  * @param ranking - The ranking options to use for the file search.
9986
10268
  * @param filters - The filters to use for the file search.
9987
10269
  */
9988
- fileSearch,
10270
+ fileSearch: fileSearch2,
9989
10271
  /**
9990
10272
  * The image generation tool allows you to generate images using a text prompt,
9991
10273
  * and optionally image inputs. It leverages the GPT Image model,
@@ -10465,7 +10747,13 @@ var openaiResponsesChunkSchema = lazyValidator(
10465
10747
  action: z.discriminatedUnion("type", [
10466
10748
  z.object({
10467
10749
  type: z.literal("search"),
10468
- query: z.string().nullish()
10750
+ query: z.string().nullish(),
10751
+ sources: z.array(
10752
+ z.discriminatedUnion("type", [
10753
+ z.object({ type: z.literal("url"), url: z.string() }),
10754
+ z.object({ type: z.literal("api"), name: z.string() })
10755
+ ])
10756
+ ).nullish()
10469
10757
  }),
10470
10758
  z.object({
10471
10759
  type: z.literal("open_page"),
@@ -10573,10 +10861,13 @@ var openaiResponsesChunkSchema = lazyValidator(
10573
10861
  }),
10574
10862
  z.object({
10575
10863
  type: z.literal("error"),
10576
- code: z.string(),
10577
- message: z.string(),
10578
- param: z.string().nullish(),
10579
- sequence_number: z.number()
10864
+ sequence_number: z.number(),
10865
+ error: z.object({
10866
+ type: z.string(),
10867
+ code: z.string(),
10868
+ message: z.string(),
10869
+ param: z.string().nullish()
10870
+ })
10580
10871
  }),
10581
10872
  z.object({ type: z.string() }).loose().transform((value) => ({
10582
10873
  type: "unknown_chunk",
@@ -10589,13 +10880,15 @@ var openaiResponsesChunkSchema = lazyValidator(
10589
10880
  var openaiResponsesResponseSchema = lazyValidator(
10590
10881
  () => zodSchema(
10591
10882
  z.object({
10592
- id: z.string(),
10593
- created_at: z.number(),
10883
+ id: z.string().optional(),
10884
+ created_at: z.number().optional(),
10594
10885
  error: z.object({
10595
- code: z.string(),
10596
- message: z.string()
10886
+ message: z.string(),
10887
+ type: z.string(),
10888
+ param: z.string().nullish(),
10889
+ code: z.string()
10597
10890
  }).nullish(),
10598
- model: z.string(),
10891
+ model: z.string().optional(),
10599
10892
  output: z.array(
10600
10893
  z.discriminatedUnion("type", [
10601
10894
  z.object({
@@ -10637,7 +10930,18 @@ var openaiResponsesResponseSchema = lazyValidator(
10637
10930
  quote: z.string().nullish()
10638
10931
  }),
10639
10932
  z.object({
10640
- type: z.literal("container_file_citation")
10933
+ type: z.literal("container_file_citation"),
10934
+ container_id: z.string(),
10935
+ file_id: z.string(),
10936
+ filename: z.string().nullish(),
10937
+ start_index: z.number().nullish(),
10938
+ end_index: z.number().nullish(),
10939
+ index: z.number().nullish()
10940
+ }),
10941
+ z.object({
10942
+ type: z.literal("file_path"),
10943
+ file_id: z.string(),
10944
+ index: z.number().nullish()
10641
10945
  })
10642
10946
  ])
10643
10947
  )
@@ -10651,7 +10955,13 @@ var openaiResponsesResponseSchema = lazyValidator(
10651
10955
  action: z.discriminatedUnion("type", [
10652
10956
  z.object({
10653
10957
  type: z.literal("search"),
10654
- query: z.string().nullish()
10958
+ query: z.string().nullish(),
10959
+ sources: z.array(
10960
+ z.discriminatedUnion("type", [
10961
+ z.object({ type: z.literal("url"), url: z.string() }),
10962
+ z.object({ type: z.literal("api"), name: z.string() })
10963
+ ])
10964
+ ).nullish()
10655
10965
  }),
10656
10966
  z.object({
10657
10967
  type: z.literal("open_page"),
@@ -10670,7 +10980,10 @@ var openaiResponsesResponseSchema = lazyValidator(
10670
10980
  queries: z.array(z.string()),
10671
10981
  results: z.array(
10672
10982
  z.object({
10673
- attributes: z.record(z.string(), z.unknown()),
10983
+ attributes: z.record(
10984
+ z.string(),
10985
+ z.union([z.string(), z.number(), z.boolean()])
10986
+ ),
10674
10987
  file_id: z.string(),
10675
10988
  filename: z.string(),
10676
10989
  score: z.number(),
@@ -10732,7 +11045,7 @@ var openaiResponsesResponseSchema = lazyValidator(
10732
11045
  )
10733
11046
  })
10734
11047
  ])
10735
- ),
11048
+ ).optional(),
10736
11049
  service_tier: z.string().nullish(),
10737
11050
  incomplete_details: z.object({ reason: z.string() }).nullish(),
10738
11051
  usage: z.object({
@@ -10740,7 +11053,7 @@ var openaiResponsesResponseSchema = lazyValidator(
10740
11053
  input_tokens_details: z.object({ cached_tokens: z.number().nullish() }).nullish(),
10741
11054
  output_tokens: z.number(),
10742
11055
  output_tokens_details: z.object({ reasoning_tokens: z.number().nullish() }).nullish()
10743
- })
11056
+ }).optional()
10744
11057
  })
10745
11058
  )
10746
11059
  );
@@ -10748,6 +11061,7 @@ var TOP_LOGPROBS_MAX = 20;
10748
11061
  var openaiResponsesProviderOptionsSchema = lazyValidator(
10749
11062
  () => zodSchema(
10750
11063
  z.object({
11064
+ conversation: z.string().nullish(),
10751
11065
  include: z.array(
10752
11066
  z.enum([
10753
11067
  "reasoning.encrypted_content",
@@ -10780,6 +11094,15 @@ var openaiResponsesProviderOptionsSchema = lazyValidator(
10780
11094
  parallelToolCalls: z.boolean().nullish(),
10781
11095
  previousResponseId: z.string().nullish(),
10782
11096
  promptCacheKey: z.string().nullish(),
11097
+ /**
11098
+ * The retention policy for the prompt cache.
11099
+ * - 'in_memory': Default. Standard prompt caching behavior.
11100
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
11101
+ * Currently only available for 5.1 series models.
11102
+ *
11103
+ * @default 'in_memory'
11104
+ */
11105
+ promptCacheRetention: z.enum(["in_memory", "24h"]).nullish(),
10783
11106
  reasoningEffort: z.string().nullish(),
10784
11107
  reasoningSummary: z.string().nullish(),
10785
11108
  safetyIdentifier: z.string().nullish(),
@@ -10819,7 +11142,7 @@ async function prepareResponsesTools({
10819
11142
  case "openai.file_search": {
10820
11143
  const args = await validateTypes({
10821
11144
  value: tool2.args,
10822
- schema: fileSearchArgsSchema
11145
+ schema: fileSearchArgsSchema2
10823
11146
  });
10824
11147
  openaiTools2.push({
10825
11148
  type: "file_search",
@@ -10985,6 +11308,13 @@ var OpenAIResponsesLanguageModel = class {
10985
11308
  providerOptions,
10986
11309
  schema: openaiResponsesProviderOptionsSchema
10987
11310
  });
11311
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
11312
+ warnings.push({
11313
+ type: "unsupported-setting",
11314
+ setting: "conversation",
11315
+ details: "conversation and previousResponseId cannot be used together"
11316
+ });
11317
+ }
10988
11318
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
10989
11319
  prompt,
10990
11320
  systemMessageMode: modelConfig.systemMessageMode,
@@ -11047,6 +11377,7 @@ var OpenAIResponsesLanguageModel = class {
11047
11377
  }
11048
11378
  },
11049
11379
  // provider options:
11380
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
11050
11381
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
11051
11382
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
11052
11383
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -11057,6 +11388,7 @@ var OpenAIResponsesLanguageModel = class {
11057
11388
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
11058
11389
  include,
11059
11390
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
11391
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
11060
11392
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
11061
11393
  top_logprobs: topLogprobs,
11062
11394
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
@@ -11266,7 +11598,14 @@ var OpenAIResponsesLanguageModel = class {
11266
11598
  id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId(),
11267
11599
  mediaType: "text/plain",
11268
11600
  title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
11269
- filename: (_l = annotation.filename) != null ? _l : annotation.file_id
11601
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id,
11602
+ ...annotation.file_id ? {
11603
+ providerMetadata: {
11604
+ openai: {
11605
+ fileId: annotation.file_id
11606
+ }
11607
+ }
11608
+ } : {}
11270
11609
  });
11271
11610
  }
11272
11611
  }
@@ -11376,7 +11715,9 @@ var OpenAIResponsesLanguageModel = class {
11376
11715
  }
11377
11716
  }
11378
11717
  const providerMetadata = {
11379
- openai: { responseId: response.id }
11718
+ openai: {
11719
+ ...response.id != null ? { responseId: response.id } : {}
11720
+ }
11380
11721
  };
11381
11722
  if (logprobs.length > 0) {
11382
11723
  providerMetadata.openai.logprobs = logprobs;
@@ -11384,6 +11725,7 @@ var OpenAIResponsesLanguageModel = class {
11384
11725
  if (typeof response.service_tier === "string") {
11385
11726
  providerMetadata.openai.serviceTier = response.service_tier;
11386
11727
  }
11728
+ const usage = response.usage;
11387
11729
  return {
11388
11730
  content,
11389
11731
  finishReason: mapOpenAIResponseFinishReason({
@@ -11391,11 +11733,11 @@ var OpenAIResponsesLanguageModel = class {
11391
11733
  hasFunctionCall
11392
11734
  }),
11393
11735
  usage: {
11394
- inputTokens: response.usage.input_tokens,
11395
- outputTokens: response.usage.output_tokens,
11396
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
11397
- reasoningTokens: (_q = (_p = response.usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
11398
- cachedInputTokens: (_s = (_r = response.usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
11736
+ inputTokens: usage.input_tokens,
11737
+ outputTokens: usage.output_tokens,
11738
+ totalTokens: usage.input_tokens + usage.output_tokens,
11739
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
11740
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
11399
11741
  },
11400
11742
  request: { body },
11401
11743
  response: {
@@ -11844,7 +12186,14 @@ var OpenAIResponsesLanguageModel = class {
11844
12186
  id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : generateId(),
11845
12187
  mediaType: "text/plain",
11846
12188
  title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
11847
- filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id
12189
+ filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id,
12190
+ ...value.annotation.file_id ? {
12191
+ providerMetadata: {
12192
+ openai: {
12193
+ fileId: value.annotation.file_id
12194
+ }
12195
+ }
12196
+ } : {}
11848
12197
  });
11849
12198
  }
11850
12199
  } else if (isErrorChunk(value)) {
@@ -11922,13 +12271,6 @@ function getResponsesModelConfig(modelId) {
11922
12271
  };
11923
12272
  }
11924
12273
  if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
11925
- if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
11926
- return {
11927
- ...defaults,
11928
- isReasoningModel: true,
11929
- systemMessageMode: "remove"
11930
- };
11931
- }
11932
12274
  return {
11933
12275
  ...defaults,
11934
12276
  isReasoningModel: true,
@@ -11944,7 +12286,11 @@ function mapWebSearchOutput(action) {
11944
12286
  var _a15;
11945
12287
  switch (action.type) {
11946
12288
  case "search":
11947
- return { action: { type: "search", query: (_a15 = action.query) != null ? _a15 : void 0 } };
12289
+ return {
12290
+ action: { type: "search", query: (_a15 = action.query) != null ? _a15 : void 0 },
12291
+ // include sources when provided by the Responses API (behind include flag)
12292
+ ...action.sources != null && { sources: action.sources }
12293
+ };
11948
12294
  case "open_page":
11949
12295
  return { action: { type: "openPage", url: action.url } };
11950
12296
  case "find":
@@ -12283,7 +12629,7 @@ var OpenAITranscriptionModel = class {
12283
12629
  };
12284
12630
  }
12285
12631
  };
12286
- var VERSION5 = "2.0.53" ;
12632
+ var VERSION5 = "2.0.69" ;
12287
12633
  function createOpenAI(options = {}) {
12288
12634
  var _a15, _b;
12289
12635
  const baseURL = (_a15 = withoutTrailingSlash(
@@ -12380,6 +12726,6 @@ function createOpenAI(options = {}) {
12380
12726
  }
12381
12727
  createOpenAI();
12382
12728
 
12383
- export { APICallError, EmptyResponseBodyError, EventSourceParserStream, InvalidArgumentError, JSONParseError, LoadAPIKeyError, MastraModelGateway, NoSuchModelError, OpenAICompatibleImageModel, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError, combineHeaders, convertToBase64, createAnthropic, createEventSourceResponseHandler, createGoogleGenerativeAI, createJsonErrorResponseHandler, createJsonResponseHandler, createOpenAI, createOpenAICompatible, generateId, loadApiKey, parseProviderOptions, postJsonToApi, withUserAgentSuffix, withoutTrailingSlash };
12384
- //# sourceMappingURL=chunk-JPGVRWWL.js.map
12385
- //# sourceMappingURL=chunk-JPGVRWWL.js.map
12729
+ export { MastraModelGateway, NoSuchModelError, OpenAICompatibleImageModel, TooManyEmbeddingValuesForCallError, UnsupportedFunctionalityError, combineHeaders, convertToBase64, createAnthropic, createEventSourceResponseHandler, createGoogleGenerativeAI, createJsonErrorResponseHandler, createJsonResponseHandler, createOpenAI, createOpenAICompatible, generateId, injectJsonInstructionIntoMessages, loadApiKey, parseProviderOptions, postJsonToApi, withUserAgentSuffix, withoutTrailingSlash };
12730
+ //# sourceMappingURL=chunk-MDKPL2R2.js.map
12731
+ //# sourceMappingURL=chunk-MDKPL2R2.js.map