@mastra/core 1.0.0-beta.3 → 1.0.0-beta.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (314) hide show
  1. package/CHANGELOG.md +369 -0
  2. package/dist/agent/agent.d.ts +2 -2
  3. package/dist/agent/agent.d.ts.map +1 -1
  4. package/dist/agent/agent.types.d.ts +2 -0
  5. package/dist/agent/agent.types.d.ts.map +1 -1
  6. package/dist/agent/index.cjs +9 -9
  7. package/dist/agent/index.js +2 -2
  8. package/dist/agent/message-list/index.cjs +3 -3
  9. package/dist/agent/message-list/index.d.ts +5 -0
  10. package/dist/agent/message-list/index.d.ts.map +1 -1
  11. package/dist/agent/message-list/index.js +1 -1
  12. package/dist/agent/message-list/prompt/attachments-to-parts.d.ts.map +1 -1
  13. package/dist/agent/types.d.ts +2 -3
  14. package/dist/agent/types.d.ts.map +1 -1
  15. package/dist/agent/utils.d.ts.map +1 -1
  16. package/dist/agent/workflows/prepare-stream/index.d.ts.map +1 -1
  17. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts.map +1 -1
  18. package/dist/agent/workflows/prepare-stream/stream-step.d.ts +8 -1
  19. package/dist/agent/workflows/prepare-stream/stream-step.d.ts.map +1 -1
  20. package/dist/{chunk-4DWZ4Z6H.js → chunk-3B2OPLGG.js} +13 -7
  21. package/dist/chunk-3B2OPLGG.js.map +1 -0
  22. package/dist/{chunk-GRGPQ32U.js → chunk-3ZQ7LX73.js} +13 -13
  23. package/dist/chunk-3ZQ7LX73.js.map +1 -0
  24. package/dist/{chunk-VZGBVYXA.cjs → chunk-556MJ7CL.cjs} +33 -27
  25. package/dist/chunk-556MJ7CL.cjs.map +1 -0
  26. package/dist/{chunk-HGNRQ3OG.js → chunk-5O52O25J.js} +15 -8
  27. package/dist/chunk-5O52O25J.js.map +1 -0
  28. package/dist/{chunk-VU6DVS7J.js → chunk-5SA2EZ33.js} +421 -29
  29. package/dist/chunk-5SA2EZ33.js.map +1 -0
  30. package/dist/chunk-6XCINXZ7.cjs +194 -0
  31. package/dist/chunk-6XCINXZ7.cjs.map +1 -0
  32. package/dist/{chunk-KOSW5PP5.js → chunk-7ZADRRDW.js} +466 -125
  33. package/dist/chunk-7ZADRRDW.js.map +1 -0
  34. package/dist/{chunk-JXESKY4A.js → chunk-B5J5HYDN.js} +7 -5
  35. package/dist/chunk-B5J5HYDN.js.map +1 -0
  36. package/dist/chunk-BWYU7D33.js +192 -0
  37. package/dist/chunk-BWYU7D33.js.map +1 -0
  38. package/dist/{chunk-MCUX2D5Q.js → chunk-D7CJ4HIQ.js} +263 -24
  39. package/dist/chunk-D7CJ4HIQ.js.map +1 -0
  40. package/dist/{chunk-G36A2JRR.cjs → chunk-DQIZ5FFX.cjs} +457 -326
  41. package/dist/chunk-DQIZ5FFX.cjs.map +1 -0
  42. package/dist/{chunk-ZPMFINU2.cjs → chunk-HNHZGFZY.cjs} +466 -131
  43. package/dist/chunk-HNHZGFZY.cjs.map +1 -0
  44. package/dist/{chunk-3VOUB4ZU.cjs → chunk-IITXXVYI.cjs} +17 -17
  45. package/dist/chunk-IITXXVYI.cjs.map +1 -0
  46. package/dist/{chunk-QUKUN6NR.cjs → chunk-ISMGVGUM.cjs} +105 -5
  47. package/dist/chunk-ISMGVGUM.cjs.map +1 -0
  48. package/dist/{chunk-OQF4H5Y2.js → chunk-KJIQGPQR.js} +4 -4
  49. package/dist/{chunk-OQF4H5Y2.js.map → chunk-KJIQGPQR.js.map} +1 -1
  50. package/dist/{chunk-OWX2PUFH.cjs → chunk-KP42JLXE.cjs} +506 -236
  51. package/dist/chunk-KP42JLXE.cjs.map +1 -0
  52. package/dist/{chunk-4RSHBKDJ.cjs → chunk-KWWD3U7G.cjs} +5 -5
  53. package/dist/chunk-KWWD3U7G.cjs.map +1 -0
  54. package/dist/{chunk-N4SJ4YX7.cjs → chunk-NHP6ZIDG.cjs} +271 -31
  55. package/dist/chunk-NHP6ZIDG.cjs.map +1 -0
  56. package/dist/{chunk-UIZSWUKP.js → chunk-NZAXAFI3.js} +104 -6
  57. package/dist/chunk-NZAXAFI3.js.map +1 -0
  58. package/dist/{chunk-O6NA3Z43.cjs → chunk-OUUPUAGA.cjs} +10 -8
  59. package/dist/chunk-OUUPUAGA.cjs.map +1 -0
  60. package/dist/{chunk-D6EDHNGV.js → chunk-PC6EKOWK.js} +64 -11
  61. package/dist/chunk-PC6EKOWK.js.map +1 -0
  62. package/dist/{chunk-YQ7NLZZ3.cjs → chunk-QGWNF2QJ.cjs} +74 -618
  63. package/dist/chunk-QGWNF2QJ.cjs.map +1 -0
  64. package/dist/{chunk-HBJPYQRN.cjs → chunk-RROQ46B6.cjs} +69 -16
  65. package/dist/chunk-RROQ46B6.cjs.map +1 -0
  66. package/dist/{chunk-G3OOCXAI.js → chunk-T2CJRA6E.js} +4 -4
  67. package/dist/chunk-T2CJRA6E.js.map +1 -0
  68. package/dist/{chunk-T3WZCEC4.js → chunk-T2UNO766.js} +47 -591
  69. package/dist/chunk-T2UNO766.js.map +1 -0
  70. package/dist/{chunk-EZVRSZMK.cjs → chunk-U3VE2EVB.cjs} +11 -11
  71. package/dist/{chunk-EZVRSZMK.cjs.map → chunk-U3VE2EVB.cjs.map} +1 -1
  72. package/dist/{chunk-4IKJAKCD.cjs → chunk-V537VSV4.cjs} +74 -16
  73. package/dist/chunk-V537VSV4.cjs.map +1 -0
  74. package/dist/{chunk-XRIVPHXV.cjs → chunk-VYJXTHII.cjs} +422 -30
  75. package/dist/chunk-VYJXTHII.cjs.map +1 -0
  76. package/dist/{chunk-LRSB62Z6.cjs → chunk-X7F4CSGR.cjs} +15 -8
  77. package/dist/chunk-X7F4CSGR.cjs.map +1 -0
  78. package/dist/{chunk-CKGIPST2.js → chunk-XBO6W7LZ.js} +462 -193
  79. package/dist/chunk-XBO6W7LZ.js.map +1 -0
  80. package/dist/{chunk-KEURQGCQ.js → chunk-XIDKHXNR.js} +74 -17
  81. package/dist/chunk-XIDKHXNR.js.map +1 -0
  82. package/dist/{chunk-JTXVR2RA.cjs → chunk-XJQX54QP.cjs} +5 -5
  83. package/dist/{chunk-JTXVR2RA.cjs.map → chunk-XJQX54QP.cjs.map} +1 -1
  84. package/dist/{chunk-BAMR7HKO.js → chunk-YDFX3JR2.js} +457 -326
  85. package/dist/chunk-YDFX3JR2.js.map +1 -0
  86. package/dist/{chunk-5CWWU22H.js → chunk-ZTTMSCLU.js} +3 -3
  87. package/dist/{chunk-5CWWU22H.js.map → chunk-ZTTMSCLU.js.map} +1 -1
  88. package/dist/evals/base.d.ts +1 -1
  89. package/dist/evals/base.d.ts.map +1 -1
  90. package/dist/evals/base.test-utils.d.ts +25 -25
  91. package/dist/evals/index.cjs +4 -4
  92. package/dist/evals/index.js +1 -1
  93. package/dist/evals/scoreTraces/index.cjs +3 -3
  94. package/dist/evals/scoreTraces/index.js +1 -1
  95. package/dist/index.cjs +2 -2
  96. package/dist/index.js +1 -1
  97. package/dist/integration/index.cjs +2 -2
  98. package/dist/integration/index.js +1 -1
  99. package/dist/llm/index.cjs +18 -14
  100. package/dist/llm/index.d.ts +2 -1
  101. package/dist/llm/index.d.ts.map +1 -1
  102. package/dist/llm/index.js +5 -5
  103. package/dist/llm/model/aisdk/v5/model.d.ts.map +1 -1
  104. package/dist/llm/model/gateway-resolver.d.ts.map +1 -1
  105. package/dist/llm/model/gateways/azure.d.ts +36 -0
  106. package/dist/llm/model/gateways/azure.d.ts.map +1 -0
  107. package/dist/llm/model/gateways/base.d.ts +3 -6
  108. package/dist/llm/model/gateways/base.d.ts.map +1 -1
  109. package/dist/llm/model/gateways/index.d.ts +4 -1
  110. package/dist/llm/model/gateways/index.d.ts.map +1 -1
  111. package/dist/llm/model/gateways/models-dev.d.ts +2 -2
  112. package/dist/llm/model/gateways/models-dev.d.ts.map +1 -1
  113. package/dist/llm/model/gateways/netlify.d.ts +2 -2
  114. package/dist/llm/model/gateways/netlify.d.ts.map +1 -1
  115. package/dist/llm/model/model.d.ts.map +1 -1
  116. package/dist/llm/model/model.loop.d.ts +1 -1
  117. package/dist/llm/model/model.loop.d.ts.map +1 -1
  118. package/dist/llm/model/provider-registry.d.ts.map +1 -1
  119. package/dist/llm/model/provider-types.generated.d.ts +192 -11
  120. package/dist/llm/model/registry-generator.d.ts +12 -0
  121. package/dist/llm/model/registry-generator.d.ts.map +1 -1
  122. package/dist/llm/model/router.d.ts.map +1 -1
  123. package/dist/loop/index.cjs +2 -2
  124. package/dist/loop/index.js +1 -1
  125. package/dist/loop/loop.d.ts.map +1 -1
  126. package/dist/loop/network/index.d.ts.map +1 -1
  127. package/dist/loop/test-utils/generateText.d.ts.map +1 -1
  128. package/dist/loop/test-utils/resultObject.d.ts.map +1 -1
  129. package/dist/loop/test-utils/streamObject.d.ts.map +1 -1
  130. package/dist/loop/test-utils/toUIMessageStream.d.ts.map +1 -1
  131. package/dist/loop/test-utils/utils.d.ts.map +1 -1
  132. package/dist/loop/types.d.ts +8 -0
  133. package/dist/loop/types.d.ts.map +1 -1
  134. package/dist/loop/workflows/agentic-execution/index.d.ts.map +1 -1
  135. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts.map +1 -1
  136. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts.map +1 -1
  137. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts +1 -1
  138. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts.map +1 -1
  139. package/dist/mastra/index.cjs +2 -2
  140. package/dist/mastra/index.d.ts +9 -9
  141. package/dist/mastra/index.d.ts.map +1 -1
  142. package/dist/mastra/index.js +1 -1
  143. package/dist/mcp/index.cjs.map +1 -1
  144. package/dist/mcp/index.d.ts +4 -4
  145. package/dist/mcp/index.d.ts.map +1 -1
  146. package/dist/mcp/index.js.map +1 -1
  147. package/dist/mcp/types.d.ts +2 -2
  148. package/dist/mcp/types.d.ts.map +1 -1
  149. package/dist/memory/index.cjs +89 -34
  150. package/dist/memory/index.cjs.map +1 -1
  151. package/dist/memory/index.js +85 -30
  152. package/dist/memory/index.js.map +1 -1
  153. package/dist/memory/mock.d.ts +9 -13
  154. package/dist/memory/mock.d.ts.map +1 -1
  155. package/dist/models-dev-F6MTIYTO.js +3 -0
  156. package/dist/{models-dev-GCVENVWA.js.map → models-dev-F6MTIYTO.js.map} +1 -1
  157. package/dist/models-dev-XIVR5EJV.cjs +12 -0
  158. package/dist/{models-dev-TIBJR6IG.cjs.map → models-dev-XIVR5EJV.cjs.map} +1 -1
  159. package/dist/netlify-MXBOGAJR.cjs +12 -0
  160. package/dist/{netlify-NTSNNT6F.cjs.map → netlify-MXBOGAJR.cjs.map} +1 -1
  161. package/dist/netlify-RX3JXCFQ.js +3 -0
  162. package/dist/{netlify-O5NJW7CF.js.map → netlify-RX3JXCFQ.js.map} +1 -1
  163. package/dist/processors/index.cjs +11 -11
  164. package/dist/processors/index.d.ts +2 -2
  165. package/dist/processors/index.d.ts.map +1 -1
  166. package/dist/processors/index.js +1 -1
  167. package/dist/processors/processors/batch-parts.d.ts +1 -1
  168. package/dist/processors/processors/batch-parts.d.ts.map +1 -1
  169. package/dist/processors/processors/language-detector.d.ts +1 -1
  170. package/dist/processors/processors/language-detector.d.ts.map +1 -1
  171. package/dist/processors/processors/moderation.d.ts +1 -1
  172. package/dist/processors/processors/moderation.d.ts.map +1 -1
  173. package/dist/processors/processors/pii-detector.d.ts +1 -1
  174. package/dist/processors/processors/pii-detector.d.ts.map +1 -1
  175. package/dist/processors/processors/prompt-injection-detector.d.ts +1 -1
  176. package/dist/processors/processors/prompt-injection-detector.d.ts.map +1 -1
  177. package/dist/processors/processors/structured-output.d.ts +1 -1
  178. package/dist/processors/processors/structured-output.d.ts.map +1 -1
  179. package/dist/processors/processors/system-prompt-scrubber.d.ts +1 -1
  180. package/dist/processors/processors/system-prompt-scrubber.d.ts.map +1 -1
  181. package/dist/processors/processors/token-limiter.d.ts +1 -1
  182. package/dist/processors/processors/token-limiter.d.ts.map +1 -1
  183. package/dist/processors/processors/unicode-normalizer.d.ts +1 -1
  184. package/dist/processors/processors/unicode-normalizer.d.ts.map +1 -1
  185. package/dist/provider-registry-3LUCE7FT.js +3 -0
  186. package/dist/{provider-registry-74GMFZKT.js.map → provider-registry-3LUCE7FT.js.map} +1 -1
  187. package/dist/provider-registry-NBRXBOQT.cjs +40 -0
  188. package/dist/{provider-registry-BZP3DIIV.cjs.map → provider-registry-NBRXBOQT.cjs.map} +1 -1
  189. package/dist/provider-registry.json +400 -22
  190. package/dist/{registry-generator-JPCV47SC.cjs → registry-generator-DEPPRYYJ.cjs} +21 -6
  191. package/dist/registry-generator-DEPPRYYJ.cjs.map +1 -0
  192. package/dist/{registry-generator-XD4FPZTU.js → registry-generator-FLW6NV42.js} +21 -7
  193. package/dist/registry-generator-FLW6NV42.js.map +1 -0
  194. package/dist/relevance/index.cjs +2 -2
  195. package/dist/relevance/index.js +1 -1
  196. package/dist/server/auth.d.ts +11 -0
  197. package/dist/server/auth.d.ts.map +1 -1
  198. package/dist/server/composite-auth.d.ts +9 -0
  199. package/dist/server/composite-auth.d.ts.map +1 -0
  200. package/dist/server/index.cjs +41 -0
  201. package/dist/server/index.cjs.map +1 -1
  202. package/dist/server/index.d.ts +1 -0
  203. package/dist/server/index.d.ts.map +1 -1
  204. package/dist/server/index.js +41 -1
  205. package/dist/server/index.js.map +1 -1
  206. package/dist/storage/index.cjs +29 -29
  207. package/dist/storage/index.js +1 -1
  208. package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts.map +1 -1
  209. package/dist/stream/aisdk/v5/output.d.ts.map +1 -1
  210. package/dist/stream/base/output.d.ts.map +1 -1
  211. package/dist/stream/index.cjs +11 -11
  212. package/dist/stream/index.js +2 -2
  213. package/dist/stream/types.d.ts +7 -2
  214. package/dist/stream/types.d.ts.map +1 -1
  215. package/dist/test-utils/llm-mock.cjs +68 -31
  216. package/dist/test-utils/llm-mock.cjs.map +1 -1
  217. package/dist/test-utils/llm-mock.d.ts +4 -2
  218. package/dist/test-utils/llm-mock.d.ts.map +1 -1
  219. package/dist/test-utils/llm-mock.js +67 -30
  220. package/dist/test-utils/llm-mock.js.map +1 -1
  221. package/dist/tools/index.cjs +6 -6
  222. package/dist/tools/index.js +2 -2
  223. package/dist/tools/is-vercel-tool.cjs +2 -2
  224. package/dist/tools/is-vercel-tool.js +1 -1
  225. package/dist/tools/stream.d.ts +1 -0
  226. package/dist/tools/stream.d.ts.map +1 -1
  227. package/dist/tools/tool-builder/builder.d.ts +2 -0
  228. package/dist/tools/tool-builder/builder.d.ts.map +1 -1
  229. package/dist/tools/tool.d.ts +6 -6
  230. package/dist/tools/tool.d.ts.map +1 -1
  231. package/dist/tools/types.d.ts +6 -2
  232. package/dist/tools/types.d.ts.map +1 -1
  233. package/dist/tools/validation.d.ts +13 -1
  234. package/dist/tools/validation.d.ts.map +1 -1
  235. package/dist/utils.cjs +22 -22
  236. package/dist/utils.d.ts +1 -1
  237. package/dist/utils.d.ts.map +1 -1
  238. package/dist/utils.js +1 -1
  239. package/dist/vector/filter/index.cjs +7 -189
  240. package/dist/vector/filter/index.cjs.map +1 -1
  241. package/dist/vector/filter/index.js +1 -190
  242. package/dist/vector/filter/index.js.map +1 -1
  243. package/dist/vector/index.cjs +5 -0
  244. package/dist/vector/index.cjs.map +1 -1
  245. package/dist/vector/index.d.ts +1 -0
  246. package/dist/vector/index.d.ts.map +1 -1
  247. package/dist/vector/index.js +1 -0
  248. package/dist/vector/index.js.map +1 -1
  249. package/dist/vector/types.d.ts +86 -3
  250. package/dist/vector/types.d.ts.map +1 -1
  251. package/dist/vector/vector.d.ts +39 -2
  252. package/dist/vector/vector.d.ts.map +1 -1
  253. package/dist/voice/aisdk/index.d.ts +3 -0
  254. package/dist/voice/aisdk/index.d.ts.map +1 -0
  255. package/dist/voice/aisdk/speech.d.ts +23 -0
  256. package/dist/voice/aisdk/speech.d.ts.map +1 -0
  257. package/dist/voice/aisdk/transcription.d.ts +22 -0
  258. package/dist/voice/aisdk/transcription.d.ts.map +1 -0
  259. package/dist/voice/composite-voice.d.ts +4 -3
  260. package/dist/voice/composite-voice.d.ts.map +1 -1
  261. package/dist/voice/index.cjs +12 -4
  262. package/dist/voice/index.d.ts +1 -0
  263. package/dist/voice/index.d.ts.map +1 -1
  264. package/dist/voice/index.js +1 -1
  265. package/dist/workflows/default.d.ts.map +1 -1
  266. package/dist/workflows/evented/index.cjs +10 -10
  267. package/dist/workflows/evented/index.js +1 -1
  268. package/dist/workflows/evented/step-executor.d.ts.map +1 -1
  269. package/dist/workflows/index.cjs +23 -19
  270. package/dist/workflows/index.js +1 -1
  271. package/dist/workflows/types.d.ts +2 -2
  272. package/dist/workflows/types.d.ts.map +1 -1
  273. package/dist/workflows/utils.d.ts +7 -0
  274. package/dist/workflows/utils.d.ts.map +1 -1
  275. package/dist/workflows/workflow.d.ts +4 -2
  276. package/dist/workflows/workflow.d.ts.map +1 -1
  277. package/package.json +17 -16
  278. package/src/llm/model/provider-types.generated.d.ts +192 -11
  279. package/dist/chunk-3VOUB4ZU.cjs.map +0 -1
  280. package/dist/chunk-4DWZ4Z6H.js.map +0 -1
  281. package/dist/chunk-4IKJAKCD.cjs.map +0 -1
  282. package/dist/chunk-4RSHBKDJ.cjs.map +0 -1
  283. package/dist/chunk-BAMR7HKO.js.map +0 -1
  284. package/dist/chunk-CKGIPST2.js.map +0 -1
  285. package/dist/chunk-D6EDHNGV.js.map +0 -1
  286. package/dist/chunk-G36A2JRR.cjs.map +0 -1
  287. package/dist/chunk-G3OOCXAI.js.map +0 -1
  288. package/dist/chunk-GRGPQ32U.js.map +0 -1
  289. package/dist/chunk-HBJPYQRN.cjs.map +0 -1
  290. package/dist/chunk-HGNRQ3OG.js.map +0 -1
  291. package/dist/chunk-JXESKY4A.js.map +0 -1
  292. package/dist/chunk-KEURQGCQ.js.map +0 -1
  293. package/dist/chunk-KOSW5PP5.js.map +0 -1
  294. package/dist/chunk-LRSB62Z6.cjs.map +0 -1
  295. package/dist/chunk-MCUX2D5Q.js.map +0 -1
  296. package/dist/chunk-N4SJ4YX7.cjs.map +0 -1
  297. package/dist/chunk-O6NA3Z43.cjs.map +0 -1
  298. package/dist/chunk-OWX2PUFH.cjs.map +0 -1
  299. package/dist/chunk-QUKUN6NR.cjs.map +0 -1
  300. package/dist/chunk-T3WZCEC4.js.map +0 -1
  301. package/dist/chunk-UIZSWUKP.js.map +0 -1
  302. package/dist/chunk-VU6DVS7J.js.map +0 -1
  303. package/dist/chunk-VZGBVYXA.cjs.map +0 -1
  304. package/dist/chunk-XRIVPHXV.cjs.map +0 -1
  305. package/dist/chunk-YQ7NLZZ3.cjs.map +0 -1
  306. package/dist/chunk-ZPMFINU2.cjs.map +0 -1
  307. package/dist/models-dev-GCVENVWA.js +0 -3
  308. package/dist/models-dev-TIBJR6IG.cjs +0 -12
  309. package/dist/netlify-NTSNNT6F.cjs +0 -12
  310. package/dist/netlify-O5NJW7CF.js +0 -3
  311. package/dist/provider-registry-74GMFZKT.js +0 -3
  312. package/dist/provider-registry-BZP3DIIV.cjs +0 -40
  313. package/dist/registry-generator-JPCV47SC.cjs.map +0 -1
  314. package/dist/registry-generator-XD4FPZTU.js.map +0 -1
@@ -520,24 +520,74 @@ function getRuntimeEnvironmentUserAgent(globalThisAny = globalThis) {
520
520
  }
521
521
  return "runtime/unknown";
522
522
  }
523
- function removeUndefinedEntries(record) {
524
- return Object.fromEntries(
525
- Object.entries(record).filter(([_key, value]) => value != null)
526
- );
523
+ function normalizeHeaders(headers) {
524
+ if (headers == null) {
525
+ return {};
526
+ }
527
+ const normalized = {};
528
+ if (headers instanceof Headers) {
529
+ headers.forEach((value, key) => {
530
+ normalized[key.toLowerCase()] = value;
531
+ });
532
+ } else {
533
+ if (!Array.isArray(headers)) {
534
+ headers = Object.entries(headers);
535
+ }
536
+ for (const [key, value] of headers) {
537
+ if (value != null) {
538
+ normalized[key.toLowerCase()] = value;
539
+ }
540
+ }
541
+ }
542
+ return normalized;
527
543
  }
528
544
  function withUserAgentSuffix(headers, ...userAgentSuffixParts) {
529
- const cleanedHeaders = removeUndefinedEntries(
530
- headers != null ? headers : {}
531
- );
532
- const normalizedHeaders = new Headers(cleanedHeaders);
545
+ const normalizedHeaders = new Headers(normalizeHeaders(headers));
533
546
  const currentUserAgentHeader = normalizedHeaders.get("user-agent") || "";
534
547
  normalizedHeaders.set(
535
548
  "user-agent",
536
549
  [currentUserAgentHeader, ...userAgentSuffixParts].filter(Boolean).join(" ")
537
550
  );
538
- return Object.fromEntries(normalizedHeaders);
551
+ return Object.fromEntries(normalizedHeaders.entries());
552
+ }
553
+ var VERSION = "3.0.17" ;
554
+ var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
555
+ var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
556
+ var DEFAULT_GENERIC_SUFFIX = "You MUST answer with JSON.";
557
+ function injectJsonInstruction({
558
+ prompt,
559
+ schema,
560
+ schemaPrefix = schema != null ? DEFAULT_SCHEMA_PREFIX : void 0,
561
+ schemaSuffix = schema != null ? DEFAULT_SCHEMA_SUFFIX : DEFAULT_GENERIC_SUFFIX
562
+ }) {
563
+ return [
564
+ prompt != null && prompt.length > 0 ? prompt : void 0,
565
+ prompt != null && prompt.length > 0 ? "" : void 0,
566
+ // add a newline if prompt is not null
567
+ schemaPrefix,
568
+ schema != null ? JSON.stringify(schema) : void 0,
569
+ schemaSuffix
570
+ ].filter((line) => line != null).join("\n");
571
+ }
572
+ function injectJsonInstructionIntoMessages({
573
+ messages,
574
+ schema,
575
+ schemaPrefix,
576
+ schemaSuffix
577
+ }) {
578
+ var _a15, _b;
579
+ const systemMessage = ((_a15 = messages[0]) == null ? void 0 : _a15.role) === "system" ? { ...messages[0] } : { role: "system", content: "" };
580
+ systemMessage.content = injectJsonInstruction({
581
+ prompt: systemMessage.content,
582
+ schema,
583
+ schemaPrefix,
584
+ schemaSuffix
585
+ });
586
+ return [
587
+ systemMessage,
588
+ ...((_b = messages[0]) == null ? void 0 : _b.role) === "system" ? messages.slice(1) : messages
589
+ ];
539
590
  }
540
- var VERSION = "3.0.12" ;
541
591
  function loadApiKey({
542
592
  apiKey,
543
593
  environmentVariableName,
@@ -633,7 +683,11 @@ function filter(obj) {
633
683
  }
634
684
  function secureJsonParse(text) {
635
685
  const { stackTraceLimit } = Error;
636
- Error.stackTraceLimit = 0;
686
+ try {
687
+ Error.stackTraceLimit = 0;
688
+ } catch (e) {
689
+ return _parse(text);
690
+ }
637
691
  try {
638
692
  return _parse(text);
639
693
  } finally {
@@ -3558,7 +3612,7 @@ var OpenAICompatibleImageModel = class {
3558
3612
  var openaiCompatibleImageResponseSchema = z4.z.object({
3559
3613
  data: z4.z.array(z4.z.object({ b64_json: z4.z.string() }))
3560
3614
  });
3561
- var VERSION2 = "1.0.22" ;
3615
+ var VERSION2 = "1.0.27" ;
3562
3616
  function createOpenAICompatible(options) {
3563
3617
  const baseURL = withoutTrailingSlash(options.baseURL);
3564
3618
  const providerName = options.name;
@@ -3611,7 +3665,7 @@ var MastraModelGateway = class {
3611
3665
  return this.id;
3612
3666
  }
3613
3667
  };
3614
- var VERSION3 = "2.0.33" ;
3668
+ var VERSION3 = "2.0.45" ;
3615
3669
  var anthropicErrorDataSchema = lazySchema(
3616
3670
  () => zodSchema(
3617
3671
  z4.z.object({
@@ -3813,7 +3867,18 @@ var anthropicMessagesResponseSchema = lazySchema(
3813
3867
  output_tokens: z4.z.number(),
3814
3868
  cache_creation_input_tokens: z4.z.number().nullish(),
3815
3869
  cache_read_input_tokens: z4.z.number().nullish()
3816
- })
3870
+ }),
3871
+ container: z4.z.object({
3872
+ expires_at: z4.z.string(),
3873
+ id: z4.z.string(),
3874
+ skills: z4.z.array(
3875
+ z4.z.object({
3876
+ type: z4.z.union([z4.z.literal("anthropic"), z4.z.literal("custom")]),
3877
+ skill_id: z4.z.string(),
3878
+ version: z4.z.string()
3879
+ })
3880
+ ).nullish()
3881
+ }).nullish()
3817
3882
  })
3818
3883
  )
3819
3884
  );
@@ -4043,7 +4108,21 @@ var anthropicMessagesChunkSchema = lazySchema(
4043
4108
  type: z4.z.literal("message_delta"),
4044
4109
  delta: z4.z.object({
4045
4110
  stop_reason: z4.z.string().nullish(),
4046
- stop_sequence: z4.z.string().nullish()
4111
+ stop_sequence: z4.z.string().nullish(),
4112
+ container: z4.z.object({
4113
+ expires_at: z4.z.string(),
4114
+ id: z4.z.string(),
4115
+ skills: z4.z.array(
4116
+ z4.z.object({
4117
+ type: z4.z.union([
4118
+ z4.z.literal("anthropic"),
4119
+ z4.z.literal("custom")
4120
+ ]),
4121
+ skill_id: z4.z.string(),
4122
+ version: z4.z.string()
4123
+ })
4124
+ ).nullish()
4125
+ }).nullish()
4047
4126
  }),
4048
4127
  usage: z4.z.looseObject({
4049
4128
  output_tokens: z4.z.number(),
@@ -5286,6 +5365,21 @@ var AnthropicMessagesLanguageModel = class {
5286
5365
  setting: "seed"
5287
5366
  });
5288
5367
  }
5368
+ if (temperature != null && temperature > 1) {
5369
+ warnings.push({
5370
+ type: "unsupported-setting",
5371
+ setting: "temperature",
5372
+ details: `${temperature} exceeds anthropic maximum of 1.0. clamped to 1.0`
5373
+ });
5374
+ temperature = 1;
5375
+ } else if (temperature != null && temperature < 0) {
5376
+ warnings.push({
5377
+ type: "unsupported-setting",
5378
+ setting: "temperature",
5379
+ details: `${temperature} is below anthropic minimum of 0. clamped to 0`
5380
+ });
5381
+ temperature = 0;
5382
+ }
5289
5383
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json") {
5290
5384
  if (responseFormat.schema == null) {
5291
5385
  warnings.push({
@@ -5321,7 +5415,7 @@ var AnthropicMessagesLanguageModel = class {
5321
5415
  });
5322
5416
  const isThinking = ((_b = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _b.type) === "enabled";
5323
5417
  const thinkingBudget = (_c = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _c.budgetTokens;
5324
- const maxOutputTokensForModel = getMaxOutputTokensForModel(this.modelId);
5418
+ const { maxOutputTokens: maxOutputTokensForModel, knownModel } = getMaxOutputTokensForModel(this.modelId);
5325
5419
  const maxTokens = maxOutputTokens != null ? maxOutputTokens : maxOutputTokensForModel;
5326
5420
  const baseArgs = {
5327
5421
  // model id:
@@ -5383,7 +5477,7 @@ var AnthropicMessagesLanguageModel = class {
5383
5477
  }
5384
5478
  baseArgs.max_tokens = maxTokens + thinkingBudget;
5385
5479
  }
5386
- if (baseArgs.max_tokens > maxOutputTokensForModel) {
5480
+ if (knownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
5387
5481
  if (maxOutputTokens != null) {
5388
5482
  warnings.push({
5389
5483
  type: "unsupported-setting",
@@ -5478,7 +5572,7 @@ var AnthropicMessagesLanguageModel = class {
5478
5572
  });
5479
5573
  }
5480
5574
  async doGenerate(options) {
5481
- var _a15, _b, _c, _d, _e, _f;
5575
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
5482
5576
  const { args, warnings, betas, usesJsonResponseTool } = await this.getArgs(options);
5483
5577
  const citationDocuments = this.extractCitationDocuments(options.prompt);
5484
5578
  const {
@@ -5729,7 +5823,16 @@ var AnthropicMessagesLanguageModel = class {
5729
5823
  anthropic: {
5730
5824
  usage: response.usage,
5731
5825
  cacheCreationInputTokens: (_e = response.usage.cache_creation_input_tokens) != null ? _e : null,
5732
- stopSequence: (_f = response.stop_sequence) != null ? _f : null
5826
+ stopSequence: (_f = response.stop_sequence) != null ? _f : null,
5827
+ container: response.container ? {
5828
+ expiresAt: response.container.expires_at,
5829
+ id: response.container.id,
5830
+ skills: (_h = (_g = response.container.skills) == null ? void 0 : _g.map((skill) => ({
5831
+ type: skill.type,
5832
+ skillId: skill.skill_id,
5833
+ version: skill.version
5834
+ }))) != null ? _h : null
5835
+ } : null
5733
5836
  }
5734
5837
  }
5735
5838
  };
@@ -5759,6 +5862,7 @@ var AnthropicMessagesLanguageModel = class {
5759
5862
  let rawUsage = void 0;
5760
5863
  let cacheCreationInputTokens = null;
5761
5864
  let stopSequence = null;
5865
+ let container = null;
5762
5866
  let blockType = void 0;
5763
5867
  const generateId3 = this.generateId;
5764
5868
  return {
@@ -5768,7 +5872,7 @@ var AnthropicMessagesLanguageModel = class {
5768
5872
  controller.enqueue({ type: "stream-start", warnings });
5769
5873
  },
5770
5874
  transform(chunk, controller) {
5771
- var _a15, _b, _c, _d, _e, _f, _g, _h;
5875
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j;
5772
5876
  if (options.includeRawChunks) {
5773
5877
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
5774
5878
  }
@@ -5881,7 +5985,8 @@ var AnthropicMessagesLanguageModel = class {
5881
5985
  data: part.content.content.source.data
5882
5986
  }
5883
5987
  }
5884
- }
5988
+ },
5989
+ providerExecuted: true
5885
5990
  });
5886
5991
  } else if (part.content.type === "web_fetch_tool_result_error") {
5887
5992
  controller.enqueue({
@@ -6149,6 +6254,15 @@ var AnthropicMessagesLanguageModel = class {
6149
6254
  isJsonResponseFromTool: usesJsonResponseTool
6150
6255
  });
6151
6256
  stopSequence = (_h = value.delta.stop_sequence) != null ? _h : null;
6257
+ container = value.delta.container != null ? {
6258
+ expiresAt: value.delta.container.expires_at,
6259
+ id: value.delta.container.id,
6260
+ skills: (_j = (_i = value.delta.container.skills) == null ? void 0 : _i.map((skill) => ({
6261
+ type: skill.type,
6262
+ skillId: skill.skill_id,
6263
+ version: skill.version
6264
+ }))) != null ? _j : null
6265
+ } : null;
6152
6266
  rawUsage = {
6153
6267
  ...rawUsage,
6154
6268
  ...value.usage
@@ -6164,7 +6278,8 @@ var AnthropicMessagesLanguageModel = class {
6164
6278
  anthropic: {
6165
6279
  usage: rawUsage != null ? rawUsage : null,
6166
6280
  cacheCreationInputTokens,
6167
- stopSequence
6281
+ stopSequence,
6282
+ container
6168
6283
  }
6169
6284
  }
6170
6285
  });
@@ -6189,13 +6304,15 @@ var AnthropicMessagesLanguageModel = class {
6189
6304
  };
6190
6305
  function getMaxOutputTokensForModel(modelId) {
6191
6306
  if (modelId.includes("claude-sonnet-4-") || modelId.includes("claude-3-7-sonnet") || modelId.includes("claude-haiku-4-5")) {
6192
- return 64e3;
6307
+ return { maxOutputTokens: 64e3, knownModel: true };
6193
6308
  } else if (modelId.includes("claude-opus-4-")) {
6194
- return 32e3;
6309
+ return { maxOutputTokens: 32e3, knownModel: true };
6195
6310
  } else if (modelId.includes("claude-3-5-haiku")) {
6196
- return 8192;
6311
+ return { maxOutputTokens: 8192, knownModel: true };
6312
+ } else if (modelId.includes("claude-3-haiku")) {
6313
+ return { maxOutputTokens: 4096, knownModel: true };
6197
6314
  } else {
6198
- return 4096;
6315
+ return { maxOutputTokens: 4096, knownModel: false };
6199
6316
  }
6200
6317
  }
6201
6318
  var bash_20241022InputSchema = lazySchema(
@@ -6531,8 +6648,14 @@ var anthropicTools = {
6531
6648
  webSearch_20250305
6532
6649
  };
6533
6650
  function createAnthropic(options = {}) {
6534
- var _a15;
6535
- const baseURL = (_a15 = withoutTrailingSlash(options.baseURL)) != null ? _a15 : "https://api.anthropic.com/v1";
6651
+ var _a15, _b;
6652
+ const baseURL = (_a15 = withoutTrailingSlash(
6653
+ loadOptionalSetting({
6654
+ settingValue: options.baseURL,
6655
+ environmentVariableName: "ANTHROPIC_BASE_URL"
6656
+ })
6657
+ )) != null ? _a15 : "https://api.anthropic.com/v1";
6658
+ const providerName = (_b = options.name) != null ? _b : "anthropic.messages";
6536
6659
  const getHeaders = () => withUserAgentSuffix(
6537
6660
  {
6538
6661
  "anthropic-version": "2023-06-01",
@@ -6548,7 +6671,7 @@ function createAnthropic(options = {}) {
6548
6671
  const createChatModel = (modelId) => {
6549
6672
  var _a22;
6550
6673
  return new AnthropicMessagesLanguageModel(modelId, {
6551
- provider: "anthropic.messages",
6674
+ provider: providerName,
6552
6675
  baseURL,
6553
6676
  headers: getHeaders,
6554
6677
  fetch: options.fetch,
@@ -6579,7 +6702,7 @@ function createAnthropic(options = {}) {
6579
6702
  return provider;
6580
6703
  }
6581
6704
  createAnthropic();
6582
- var VERSION4 = "2.0.23" ;
6705
+ var VERSION4 = "2.0.40" ;
6583
6706
  var googleErrorDataSchema = lazySchema(
6584
6707
  () => zodSchema(
6585
6708
  z4.z.object({
@@ -6879,19 +7002,20 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
6879
7002
  contents.push({
6880
7003
  role: "model",
6881
7004
  parts: content.map((part) => {
6882
- var _a22, _b, _c, _d, _e, _f;
7005
+ var _a22, _b, _c;
7006
+ const thoughtSignature = ((_b = (_a22 = part.providerOptions) == null ? void 0 : _a22.google) == null ? void 0 : _b.thoughtSignature) != null ? String((_c = part.providerOptions.google) == null ? void 0 : _c.thoughtSignature) : void 0;
6883
7007
  switch (part.type) {
6884
7008
  case "text": {
6885
7009
  return part.text.length === 0 ? void 0 : {
6886
7010
  text: part.text,
6887
- thoughtSignature: (_b = (_a22 = part.providerOptions) == null ? void 0 : _a22.google) == null ? void 0 : _b.thoughtSignature
7011
+ thoughtSignature
6888
7012
  };
6889
7013
  }
6890
7014
  case "reasoning": {
6891
7015
  return part.text.length === 0 ? void 0 : {
6892
7016
  text: part.text,
6893
7017
  thought: true,
6894
- thoughtSignature: (_d = (_c = part.providerOptions) == null ? void 0 : _c.google) == null ? void 0 : _d.thoughtSignature
7018
+ thoughtSignature
6895
7019
  };
6896
7020
  }
6897
7021
  case "file": {
@@ -6918,7 +7042,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
6918
7042
  name: part.toolName,
6919
7043
  args: part.input
6920
7044
  },
6921
- thoughtSignature: (_f = (_e = part.providerOptions) == null ? void 0 : _e.google) == null ? void 0 : _f.thoughtSignature
7045
+ thoughtSignature
6922
7046
  };
6923
7047
  }
6924
7048
  }
@@ -7001,7 +7125,9 @@ var googleGenerativeAIProviderOptions = lazySchema(
7001
7125
  responseModalities: z4.z.array(z4.z.enum(["TEXT", "IMAGE"])).optional(),
7002
7126
  thinkingConfig: z4.z.object({
7003
7127
  thinkingBudget: z4.z.number().optional(),
7004
- includeThoughts: z4.z.boolean().optional()
7128
+ includeThoughts: z4.z.boolean().optional(),
7129
+ // https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#thinking_level
7130
+ thinkingLevel: z4.z.enum(["low", "medium", "high"]).optional()
7005
7131
  }).optional(),
7006
7132
  /**
7007
7133
  * Optional.
@@ -7089,7 +7215,8 @@ var googleGenerativeAIProviderOptions = lazySchema(
7089
7215
  "9:16",
7090
7216
  "16:9",
7091
7217
  "21:9"
7092
- ]).optional()
7218
+ ]).optional(),
7219
+ imageSize: z4.z.enum(["1K", "2K", "4K"]).optional()
7093
7220
  }).optional()
7094
7221
  })
7095
7222
  )
@@ -7102,8 +7229,14 @@ function prepareTools3({
7102
7229
  var _a15;
7103
7230
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
7104
7231
  const toolWarnings = [];
7105
- const isGemini2 = modelId.includes("gemini-2");
7232
+ const isLatest = [
7233
+ "gemini-flash-latest",
7234
+ "gemini-flash-lite-latest",
7235
+ "gemini-pro-latest"
7236
+ ].some((id) => id === modelId);
7237
+ const isGemini2orNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || isLatest;
7106
7238
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
7239
+ const supportsFileSearch = modelId.includes("gemini-2.5");
7107
7240
  if (tools == null) {
7108
7241
  return { tools: void 0, toolConfig: void 0, toolWarnings };
7109
7242
  }
@@ -7112,10 +7245,11 @@ function prepareTools3({
7112
7245
  (tool2) => tool2.type === "provider-defined"
7113
7246
  );
7114
7247
  if (hasFunctionTools && hasProviderDefinedTools) {
7248
+ const functionTools = tools.filter((tool2) => tool2.type === "function");
7115
7249
  toolWarnings.push({
7116
7250
  type: "unsupported-tool",
7117
7251
  tool: tools.find((tool2) => tool2.type === "function"),
7118
- details: "Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both."
7252
+ details: `Cannot mix function tools with provider-defined tools in the same request. Falling back to provider-defined tools only. The following function tools will be ignored: ${functionTools.map((t) => t.name).join(", ")}. Please use either function tools or provider-defined tools, but not both.`
7119
7253
  });
7120
7254
  }
7121
7255
  if (hasProviderDefinedTools) {
@@ -7126,7 +7260,7 @@ function prepareTools3({
7126
7260
  providerDefinedTools.forEach((tool2) => {
7127
7261
  switch (tool2.id) {
7128
7262
  case "google.google_search":
7129
- if (isGemini2) {
7263
+ if (isGemini2orNewer) {
7130
7264
  googleTools2.push({ googleSearch: {} });
7131
7265
  } else if (supportsDynamicRetrieval) {
7132
7266
  googleTools2.push({
@@ -7142,7 +7276,7 @@ function prepareTools3({
7142
7276
  }
7143
7277
  break;
7144
7278
  case "google.url_context":
7145
- if (isGemini2) {
7279
+ if (isGemini2orNewer) {
7146
7280
  googleTools2.push({ urlContext: {} });
7147
7281
  } else {
7148
7282
  toolWarnings.push({
@@ -7153,7 +7287,7 @@ function prepareTools3({
7153
7287
  }
7154
7288
  break;
7155
7289
  case "google.code_execution":
7156
- if (isGemini2) {
7290
+ if (isGemini2orNewer) {
7157
7291
  googleTools2.push({ codeExecution: {} });
7158
7292
  } else {
7159
7293
  toolWarnings.push({
@@ -7163,6 +7297,37 @@ function prepareTools3({
7163
7297
  });
7164
7298
  }
7165
7299
  break;
7300
+ case "google.file_search":
7301
+ if (supportsFileSearch) {
7302
+ googleTools2.push({ fileSearch: { ...tool2.args } });
7303
+ } else {
7304
+ toolWarnings.push({
7305
+ type: "unsupported-tool",
7306
+ tool: tool2,
7307
+ details: "The file search tool is only supported with Gemini 2.5 models."
7308
+ });
7309
+ }
7310
+ break;
7311
+ case "google.vertex_rag_store":
7312
+ if (isGemini2orNewer) {
7313
+ googleTools2.push({
7314
+ retrieval: {
7315
+ vertex_rag_store: {
7316
+ rag_resources: {
7317
+ rag_corpus: tool2.args.ragCorpus
7318
+ },
7319
+ similarity_top_k: tool2.args.topK
7320
+ }
7321
+ }
7322
+ });
7323
+ } else {
7324
+ toolWarnings.push({
7325
+ type: "unsupported-tool",
7326
+ tool: tool2,
7327
+ details: "The RAG store tool is not supported with other Gemini models than Gemini 2."
7328
+ });
7329
+ }
7330
+ break;
7166
7331
  default:
7167
7332
  toolWarnings.push({ type: "unsupported-tool", tool: tool2 });
7168
7333
  break;
@@ -7290,17 +7455,19 @@ var GoogleGenerativeAILanguageModel = class {
7290
7455
  toolChoice,
7291
7456
  providerOptions
7292
7457
  }) {
7293
- var _a15, _b;
7458
+ var _a15;
7294
7459
  const warnings = [];
7295
7460
  const googleOptions = await parseProviderOptions({
7296
7461
  provider: "google",
7297
7462
  providerOptions,
7298
7463
  schema: googleGenerativeAIProviderOptions
7299
7464
  });
7300
- if (((_a15 = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a15.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
7465
+ if ((tools == null ? void 0 : tools.some(
7466
+ (tool2) => tool2.type === "provider-defined" && tool2.id === "google.vertex_rag_store"
7467
+ )) && !this.config.provider.startsWith("google.vertex.")) {
7301
7468
  warnings.push({
7302
7469
  type: "other",
7303
- message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
7470
+ message: `The 'vertex_rag_store' tool is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
7304
7471
  });
7305
7472
  }
7306
7473
  const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
@@ -7334,7 +7501,7 @@ var GoogleGenerativeAILanguageModel = class {
7334
7501
  responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
7335
7502
  // so this is needed as an escape hatch:
7336
7503
  // TODO convert into provider option
7337
- ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
7504
+ ((_a15 = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _a15 : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
7338
7505
  ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
7339
7506
  audioTimestamp: googleOptions.audioTimestamp
7340
7507
  },
@@ -7746,16 +7913,64 @@ function extractSources({
7746
7913
  groundingMetadata,
7747
7914
  generateId: generateId3
7748
7915
  }) {
7749
- var _a15;
7750
- return (_a15 = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a15.filter(
7751
- (chunk) => chunk.web != null
7752
- ).map((chunk) => ({
7753
- type: "source",
7754
- sourceType: "url",
7755
- id: generateId3(),
7756
- url: chunk.web.uri,
7757
- title: chunk.web.title
7758
- }));
7916
+ var _a15, _b, _c;
7917
+ if (!(groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks)) {
7918
+ return void 0;
7919
+ }
7920
+ const sources = [];
7921
+ for (const chunk of groundingMetadata.groundingChunks) {
7922
+ if (chunk.web != null) {
7923
+ sources.push({
7924
+ type: "source",
7925
+ sourceType: "url",
7926
+ id: generateId3(),
7927
+ url: chunk.web.uri,
7928
+ title: (_a15 = chunk.web.title) != null ? _a15 : void 0
7929
+ });
7930
+ } else if (chunk.retrievedContext != null) {
7931
+ const uri = chunk.retrievedContext.uri;
7932
+ if (uri.startsWith("http://") || uri.startsWith("https://")) {
7933
+ sources.push({
7934
+ type: "source",
7935
+ sourceType: "url",
7936
+ id: generateId3(),
7937
+ url: uri,
7938
+ title: (_b = chunk.retrievedContext.title) != null ? _b : void 0
7939
+ });
7940
+ } else {
7941
+ const title = (_c = chunk.retrievedContext.title) != null ? _c : "Unknown Document";
7942
+ let mediaType = "application/octet-stream";
7943
+ let filename = void 0;
7944
+ if (uri.endsWith(".pdf")) {
7945
+ mediaType = "application/pdf";
7946
+ filename = uri.split("/").pop();
7947
+ } else if (uri.endsWith(".txt")) {
7948
+ mediaType = "text/plain";
7949
+ filename = uri.split("/").pop();
7950
+ } else if (uri.endsWith(".docx")) {
7951
+ mediaType = "application/vnd.openxmlformats-officedocument.wordprocessingml.document";
7952
+ filename = uri.split("/").pop();
7953
+ } else if (uri.endsWith(".doc")) {
7954
+ mediaType = "application/msword";
7955
+ filename = uri.split("/").pop();
7956
+ } else if (uri.match(/\.(md|markdown)$/)) {
7957
+ mediaType = "text/markdown";
7958
+ filename = uri.split("/").pop();
7959
+ } else {
7960
+ filename = uri.split("/").pop();
7961
+ }
7962
+ sources.push({
7963
+ type: "source",
7964
+ sourceType: "document",
7965
+ id: generateId3(),
7966
+ mediaType,
7967
+ title,
7968
+ filename
7969
+ });
7970
+ }
7971
+ }
7972
+ }
7973
+ return sources.length > 0 ? sources : void 0;
7759
7974
  }
7760
7975
  var getGroundingMetadataSchema = () => z4.z.object({
7761
7976
  webSearchQueries: z4.z.array(z4.z.string()).nullish(),
@@ -7763,8 +7978,12 @@ var getGroundingMetadataSchema = () => z4.z.object({
7763
7978
  searchEntryPoint: z4.z.object({ renderedContent: z4.z.string() }).nullish(),
7764
7979
  groundingChunks: z4.z.array(
7765
7980
  z4.z.object({
7766
- web: z4.z.object({ uri: z4.z.string(), title: z4.z.string() }).nullish(),
7767
- retrievedContext: z4.z.object({ uri: z4.z.string(), title: z4.z.string() }).nullish()
7981
+ web: z4.z.object({ uri: z4.z.string(), title: z4.z.string().nullish() }).nullish(),
7982
+ retrievedContext: z4.z.object({
7983
+ uri: z4.z.string(),
7984
+ title: z4.z.string().nullish(),
7985
+ text: z4.z.string().nullish()
7986
+ }).nullish()
7768
7987
  })
7769
7988
  ).nullish(),
7770
7989
  groundingSupports: z4.z.array(
@@ -7834,7 +8053,9 @@ var usageSchema2 = z4.z.object({
7834
8053
  thoughtsTokenCount: z4.z.number().nullish(),
7835
8054
  promptTokenCount: z4.z.number().nullish(),
7836
8055
  candidatesTokenCount: z4.z.number().nullish(),
7837
- totalTokenCount: z4.z.number().nullish()
8056
+ totalTokenCount: z4.z.number().nullish(),
8057
+ // https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest/v1/GenerateContentResponse#TrafficType
8058
+ trafficType: z4.z.string().nullish()
7838
8059
  });
7839
8060
  var getUrlContextMetadataSchema = () => z4.z.object({
7840
8061
  urlMetadata: z4.z.array(
@@ -7896,6 +8117,30 @@ var codeExecution = createProviderDefinedToolFactoryWithOutputSchema({
7896
8117
  output: z4.z.string().describe("The output from the code execution.")
7897
8118
  })
7898
8119
  });
8120
+ var fileSearchArgsBaseSchema = z4.z.object({
8121
+ /** The names of the file_search_stores to retrieve from.
8122
+ * Example: `fileSearchStores/my-file-search-store-123`
8123
+ */
8124
+ fileSearchStoreNames: z4.z.array(z4.z.string()).describe(
8125
+ "The names of the file_search_stores to retrieve from. Example: `fileSearchStores/my-file-search-store-123`"
8126
+ ),
8127
+ /** The number of file search retrieval chunks to retrieve. */
8128
+ topK: z4.z.number().int().positive().describe("The number of file search retrieval chunks to retrieve.").optional(),
8129
+ /** Metadata filter to apply to the file search retrieval documents.
8130
+ * See https://google.aip.dev/160 for the syntax of the filter expression.
8131
+ */
8132
+ metadataFilter: z4.z.string().describe(
8133
+ "Metadata filter to apply to the file search retrieval documents. See https://google.aip.dev/160 for the syntax of the filter expression."
8134
+ ).optional()
8135
+ }).passthrough();
8136
+ var fileSearchArgsSchema = lazySchema(
8137
+ () => zodSchema(fileSearchArgsBaseSchema)
8138
+ );
8139
+ var fileSearch = createProviderDefinedToolFactory({
8140
+ id: "google.file_search",
8141
+ name: "file_search",
8142
+ inputSchema: fileSearchArgsSchema
8143
+ });
7899
8144
  var googleSearch = createProviderDefinedToolFactory({
7900
8145
  id: "google.google_search",
7901
8146
  name: "google_search",
@@ -7913,6 +8158,14 @@ var urlContext = createProviderDefinedToolFactory({
7913
8158
  name: "url_context",
7914
8159
  inputSchema: lazySchema(() => zodSchema(z4.z.object({})))
7915
8160
  });
8161
+ var vertexRagStore = createProviderDefinedToolFactory({
8162
+ id: "google.vertex_rag_store",
8163
+ name: "vertex_rag_store",
8164
+ inputSchema: z4.z.object({
8165
+ ragCorpus: z4.z.string(),
8166
+ topK: z4.z.number().optional()
8167
+ })
8168
+ });
7916
8169
  var googleTools = {
7917
8170
  /**
7918
8171
  * Creates a Google search tool that gives Google direct access to real-time web content.
@@ -7924,6 +8177,17 @@ var googleTools = {
7924
8177
  * Must have name "url_context".
7925
8178
  */
7926
8179
  urlContext,
8180
+ /**
8181
+ * Enables Retrieval Augmented Generation (RAG) via the Gemini File Search tool.
8182
+ * Must have name "file_search".
8183
+ *
8184
+ * @param fileSearchStoreNames - Fully-qualified File Search store resource names.
8185
+ * @param metadataFilter - Optional filter expression to restrict the files that can be retrieved.
8186
+ * @param topK - Optional result limit for the number of chunks returned from File Search.
8187
+ *
8188
+ * @see https://ai.google.dev/gemini-api/docs/file-search
8189
+ */
8190
+ fileSearch,
7927
8191
  /**
7928
8192
  * A tool that enables the model to generate and run Python code.
7929
8193
  * Must have name "code_execution".
@@ -7934,7 +8198,12 @@ var googleTools = {
7934
8198
  * @see https://ai.google.dev/gemini-api/docs/code-execution (Google AI)
7935
8199
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/code-execution-api (Vertex AI)
7936
8200
  */
7937
- codeExecution
8201
+ codeExecution,
8202
+ /**
8203
+ * Creates a Vertex RAG Store tool that enables the model to perform RAG searches against a Vertex RAG Store.
8204
+ * Must have name "vertex_rag_store".
8205
+ */
8206
+ vertexRagStore
7938
8207
  };
7939
8208
  var GoogleGenerativeAIImageModel = class {
7940
8209
  constructor(modelId, settings, config) {
@@ -8043,8 +8312,9 @@ var googleImageProviderOptionsSchema = lazySchema(
8043
8312
  )
8044
8313
  );
8045
8314
  function createGoogleGenerativeAI(options = {}) {
8046
- var _a15;
8315
+ var _a15, _b;
8047
8316
  const baseURL = (_a15 = withoutTrailingSlash(options.baseURL)) != null ? _a15 : "https://generativelanguage.googleapis.com/v1beta";
8317
+ const providerName = (_b = options.name) != null ? _b : "google.generative-ai";
8048
8318
  const getHeaders = () => withUserAgentSuffix(
8049
8319
  {
8050
8320
  "x-goog-api-key": loadApiKey({
@@ -8059,7 +8329,7 @@ function createGoogleGenerativeAI(options = {}) {
8059
8329
  const createChatModel = (modelId) => {
8060
8330
  var _a22;
8061
8331
  return new GoogleGenerativeAILanguageModel(modelId, {
8062
- provider: "google.generative-ai",
8332
+ provider: providerName,
8063
8333
  baseURL,
8064
8334
  headers: getHeaders,
8065
8335
  generateId: (_a22 = options.generateId) != null ? _a22 : generateId,
@@ -8079,13 +8349,13 @@ function createGoogleGenerativeAI(options = {}) {
8079
8349
  });
8080
8350
  };
8081
8351
  const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
8082
- provider: "google.generative-ai",
8352
+ provider: providerName,
8083
8353
  baseURL,
8084
8354
  headers: getHeaders,
8085
8355
  fetch: options.fetch
8086
8356
  });
8087
8357
  const createImageModel = (modelId, settings = {}) => new GoogleGenerativeAIImageModel(modelId, settings, {
8088
- provider: "google.generative-ai",
8358
+ provider: providerName,
8089
8359
  baseURL,
8090
8360
  headers: getHeaders,
8091
8361
  fetch: options.fetch
@@ -8307,7 +8577,7 @@ function getResponseMetadata3({
8307
8577
  return {
8308
8578
  id: id != null ? id : void 0,
8309
8579
  modelId: model != null ? model : void 0,
8310
- timestamp: created != null ? new Date(created * 1e3) : void 0
8580
+ timestamp: created ? new Date(created * 1e3) : void 0
8311
8581
  };
8312
8582
  }
8313
8583
  function mapOpenAIFinishReason(finishReason) {
@@ -8491,7 +8761,7 @@ var openaiChatLanguageModelOptions = lazyValidator(
8491
8761
  /**
8492
8762
  * Reasoning effort for reasoning models. Defaults to `medium`.
8493
8763
  */
8494
- reasoningEffort: z4.z.enum(["minimal", "low", "medium", "high"]).optional(),
8764
+ reasoningEffort: z4.z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
8495
8765
  /**
8496
8766
  * Maximum number of completion tokens to generate. Useful for reasoning models.
8497
8767
  */
@@ -8541,6 +8811,15 @@ var openaiChatLanguageModelOptions = lazyValidator(
8541
8811
  * Useful for improving cache hit rates and working around automatic caching issues.
8542
8812
  */
8543
8813
  promptCacheKey: z4.z.string().optional(),
8814
+ /**
8815
+ * The retention policy for the prompt cache.
8816
+ * - 'in_memory': Default. Standard prompt caching behavior.
8817
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
8818
+ * Currently only available for 5.1 series models.
8819
+ *
8820
+ * @default 'in_memory'
8821
+ */
8822
+ promptCacheRetention: z4.z.enum(["in_memory", "24h"]).optional(),
8544
8823
  /**
8545
8824
  * A stable identifier used to help detect users of your application
8546
8825
  * that may be violating OpenAI's usage policies. The IDs should be a
@@ -8702,6 +8981,7 @@ var OpenAIChatLanguageModel = class {
8702
8981
  reasoning_effort: openaiOptions.reasoningEffort,
8703
8982
  service_tier: openaiOptions.serviceTier,
8704
8983
  prompt_cache_key: openaiOptions.promptCacheKey,
8984
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
8705
8985
  safety_identifier: openaiOptions.safetyIdentifier,
8706
8986
  // messages:
8707
8987
  messages
@@ -8917,7 +9197,7 @@ var OpenAIChatLanguageModel = class {
8917
9197
  outputTokens: void 0,
8918
9198
  totalTokens: void 0
8919
9199
  };
8920
- let isFirstChunk = true;
9200
+ let metadataExtracted = false;
8921
9201
  let isActiveText = false;
8922
9202
  const providerMetadata = { openai: {} };
8923
9203
  return {
@@ -8942,12 +9222,15 @@ var OpenAIChatLanguageModel = class {
8942
9222
  controller.enqueue({ type: "error", error: value.error });
8943
9223
  return;
8944
9224
  }
8945
- if (isFirstChunk) {
8946
- isFirstChunk = false;
8947
- controller.enqueue({
8948
- type: "response-metadata",
8949
- ...getResponseMetadata3(value)
8950
- });
9225
+ if (!metadataExtracted) {
9226
+ const metadata = getResponseMetadata3(value);
9227
+ if (Object.values(metadata).some(Boolean)) {
9228
+ metadataExtracted = true;
9229
+ controller.enqueue({
9230
+ type: "response-metadata",
9231
+ ...getResponseMetadata3(value)
9232
+ });
9233
+ }
8951
9234
  }
8952
9235
  if (value.usage != null) {
8953
9236
  usage.inputTokens = (_a15 = value.usage.prompt_tokens) != null ? _a15 : void 0;
@@ -9119,18 +9402,6 @@ function getSystemMessageMode(modelId) {
9119
9402
  return (_b = (_a15 = reasoningModels[modelId]) == null ? void 0 : _a15.systemMessageMode) != null ? _b : "developer";
9120
9403
  }
9121
9404
  var reasoningModels = {
9122
- "o1-mini": {
9123
- systemMessageMode: "remove"
9124
- },
9125
- "o1-mini-2024-09-12": {
9126
- systemMessageMode: "remove"
9127
- },
9128
- "o1-preview": {
9129
- systemMessageMode: "remove"
9130
- },
9131
- "o1-preview-2024-09-12": {
9132
- systemMessageMode: "remove"
9133
- },
9134
9405
  o3: {
9135
9406
  systemMessageMode: "developer"
9136
9407
  },
@@ -9666,7 +9937,7 @@ var openaiImageResponseSchema = lazyValidator(
9666
9937
  data: z4.z.array(
9667
9938
  z4.z.object({
9668
9939
  b64_json: z4.z.string(),
9669
- revised_prompt: z4.z.string().optional()
9940
+ revised_prompt: z4.z.string().nullish()
9670
9941
  })
9671
9942
  )
9672
9943
  })
@@ -9811,7 +10082,7 @@ var compoundFilterSchema = z4.z.object({
9811
10082
  z4.z.union([comparisonFilterSchema, z4.z.lazy(() => compoundFilterSchema)])
9812
10083
  )
9813
10084
  });
9814
- var fileSearchArgsSchema = lazySchema(
10085
+ var fileSearchArgsSchema2 = lazySchema(
9815
10086
  () => zodSchema(
9816
10087
  z4.z.object({
9817
10088
  vectorStoreIds: z4.z.array(z4.z.string()),
@@ -9840,7 +10111,7 @@ var fileSearchOutputSchema = lazySchema(
9840
10111
  })
9841
10112
  )
9842
10113
  );
9843
- var fileSearch = createProviderDefinedToolFactoryWithOutputSchema({
10114
+ var fileSearch2 = createProviderDefinedToolFactoryWithOutputSchema({
9844
10115
  id: "openai.file_search",
9845
10116
  name: "file_search",
9846
10117
  inputSchema: z4.z.object({}),
@@ -9934,7 +10205,13 @@ var webSearchOutputSchema = lazySchema(
9934
10205
  url: z4.z.string(),
9935
10206
  pattern: z4.z.string()
9936
10207
  })
9937
- ])
10208
+ ]),
10209
+ sources: z4.z.array(
10210
+ z4.z.discriminatedUnion("type", [
10211
+ z4.z.object({ type: z4.z.literal("url"), url: z4.z.string() }),
10212
+ z4.z.object({ type: z4.z.literal("api"), name: z4.z.string() })
10213
+ ])
10214
+ ).optional()
9938
10215
  })
9939
10216
  )
9940
10217
  );
@@ -10012,7 +10289,7 @@ var openaiTools = {
10012
10289
  * @param ranking - The ranking options to use for the file search.
10013
10290
  * @param filters - The filters to use for the file search.
10014
10291
  */
10015
- fileSearch,
10292
+ fileSearch: fileSearch2,
10016
10293
  /**
10017
10294
  * The image generation tool allows you to generate images using a text prompt,
10018
10295
  * and optionally image inputs. It leverages the GPT Image model,
@@ -10492,7 +10769,13 @@ var openaiResponsesChunkSchema = lazyValidator(
10492
10769
  action: z4.z.discriminatedUnion("type", [
10493
10770
  z4.z.object({
10494
10771
  type: z4.z.literal("search"),
10495
- query: z4.z.string().nullish()
10772
+ query: z4.z.string().nullish(),
10773
+ sources: z4.z.array(
10774
+ z4.z.discriminatedUnion("type", [
10775
+ z4.z.object({ type: z4.z.literal("url"), url: z4.z.string() }),
10776
+ z4.z.object({ type: z4.z.literal("api"), name: z4.z.string() })
10777
+ ])
10778
+ ).nullish()
10496
10779
  }),
10497
10780
  z4.z.object({
10498
10781
  type: z4.z.literal("open_page"),
@@ -10600,10 +10883,13 @@ var openaiResponsesChunkSchema = lazyValidator(
10600
10883
  }),
10601
10884
  z4.z.object({
10602
10885
  type: z4.z.literal("error"),
10603
- code: z4.z.string(),
10604
- message: z4.z.string(),
10605
- param: z4.z.string().nullish(),
10606
- sequence_number: z4.z.number()
10886
+ sequence_number: z4.z.number(),
10887
+ error: z4.z.object({
10888
+ type: z4.z.string(),
10889
+ code: z4.z.string(),
10890
+ message: z4.z.string(),
10891
+ param: z4.z.string().nullish()
10892
+ })
10607
10893
  }),
10608
10894
  z4.z.object({ type: z4.z.string() }).loose().transform((value) => ({
10609
10895
  type: "unknown_chunk",
@@ -10616,13 +10902,15 @@ var openaiResponsesChunkSchema = lazyValidator(
10616
10902
  var openaiResponsesResponseSchema = lazyValidator(
10617
10903
  () => zodSchema(
10618
10904
  z4.z.object({
10619
- id: z4.z.string(),
10620
- created_at: z4.z.number(),
10905
+ id: z4.z.string().optional(),
10906
+ created_at: z4.z.number().optional(),
10621
10907
  error: z4.z.object({
10622
- code: z4.z.string(),
10623
- message: z4.z.string()
10908
+ message: z4.z.string(),
10909
+ type: z4.z.string(),
10910
+ param: z4.z.string().nullish(),
10911
+ code: z4.z.string()
10624
10912
  }).nullish(),
10625
- model: z4.z.string(),
10913
+ model: z4.z.string().optional(),
10626
10914
  output: z4.z.array(
10627
10915
  z4.z.discriminatedUnion("type", [
10628
10916
  z4.z.object({
@@ -10664,7 +10952,18 @@ var openaiResponsesResponseSchema = lazyValidator(
10664
10952
  quote: z4.z.string().nullish()
10665
10953
  }),
10666
10954
  z4.z.object({
10667
- type: z4.z.literal("container_file_citation")
10955
+ type: z4.z.literal("container_file_citation"),
10956
+ container_id: z4.z.string(),
10957
+ file_id: z4.z.string(),
10958
+ filename: z4.z.string().nullish(),
10959
+ start_index: z4.z.number().nullish(),
10960
+ end_index: z4.z.number().nullish(),
10961
+ index: z4.z.number().nullish()
10962
+ }),
10963
+ z4.z.object({
10964
+ type: z4.z.literal("file_path"),
10965
+ file_id: z4.z.string(),
10966
+ index: z4.z.number().nullish()
10668
10967
  })
10669
10968
  ])
10670
10969
  )
@@ -10678,7 +10977,13 @@ var openaiResponsesResponseSchema = lazyValidator(
10678
10977
  action: z4.z.discriminatedUnion("type", [
10679
10978
  z4.z.object({
10680
10979
  type: z4.z.literal("search"),
10681
- query: z4.z.string().nullish()
10980
+ query: z4.z.string().nullish(),
10981
+ sources: z4.z.array(
10982
+ z4.z.discriminatedUnion("type", [
10983
+ z4.z.object({ type: z4.z.literal("url"), url: z4.z.string() }),
10984
+ z4.z.object({ type: z4.z.literal("api"), name: z4.z.string() })
10985
+ ])
10986
+ ).nullish()
10682
10987
  }),
10683
10988
  z4.z.object({
10684
10989
  type: z4.z.literal("open_page"),
@@ -10697,7 +11002,10 @@ var openaiResponsesResponseSchema = lazyValidator(
10697
11002
  queries: z4.z.array(z4.z.string()),
10698
11003
  results: z4.z.array(
10699
11004
  z4.z.object({
10700
- attributes: z4.z.record(z4.z.string(), z4.z.unknown()),
11005
+ attributes: z4.z.record(
11006
+ z4.z.string(),
11007
+ z4.z.union([z4.z.string(), z4.z.number(), z4.z.boolean()])
11008
+ ),
10701
11009
  file_id: z4.z.string(),
10702
11010
  filename: z4.z.string(),
10703
11011
  score: z4.z.number(),
@@ -10759,7 +11067,7 @@ var openaiResponsesResponseSchema = lazyValidator(
10759
11067
  )
10760
11068
  })
10761
11069
  ])
10762
- ),
11070
+ ).optional(),
10763
11071
  service_tier: z4.z.string().nullish(),
10764
11072
  incomplete_details: z4.z.object({ reason: z4.z.string() }).nullish(),
10765
11073
  usage: z4.z.object({
@@ -10767,7 +11075,7 @@ var openaiResponsesResponseSchema = lazyValidator(
10767
11075
  input_tokens_details: z4.z.object({ cached_tokens: z4.z.number().nullish() }).nullish(),
10768
11076
  output_tokens: z4.z.number(),
10769
11077
  output_tokens_details: z4.z.object({ reasoning_tokens: z4.z.number().nullish() }).nullish()
10770
- })
11078
+ }).optional()
10771
11079
  })
10772
11080
  )
10773
11081
  );
@@ -10775,6 +11083,7 @@ var TOP_LOGPROBS_MAX = 20;
10775
11083
  var openaiResponsesProviderOptionsSchema = lazyValidator(
10776
11084
  () => zodSchema(
10777
11085
  z4.z.object({
11086
+ conversation: z4.z.string().nullish(),
10778
11087
  include: z4.z.array(
10779
11088
  z4.z.enum([
10780
11089
  "reasoning.encrypted_content",
@@ -10807,6 +11116,15 @@ var openaiResponsesProviderOptionsSchema = lazyValidator(
10807
11116
  parallelToolCalls: z4.z.boolean().nullish(),
10808
11117
  previousResponseId: z4.z.string().nullish(),
10809
11118
  promptCacheKey: z4.z.string().nullish(),
11119
+ /**
11120
+ * The retention policy for the prompt cache.
11121
+ * - 'in_memory': Default. Standard prompt caching behavior.
11122
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
11123
+ * Currently only available for 5.1 series models.
11124
+ *
11125
+ * @default 'in_memory'
11126
+ */
11127
+ promptCacheRetention: z4.z.enum(["in_memory", "24h"]).nullish(),
10810
11128
  reasoningEffort: z4.z.string().nullish(),
10811
11129
  reasoningSummary: z4.z.string().nullish(),
10812
11130
  safetyIdentifier: z4.z.string().nullish(),
@@ -10846,7 +11164,7 @@ async function prepareResponsesTools({
10846
11164
  case "openai.file_search": {
10847
11165
  const args = await validateTypes({
10848
11166
  value: tool2.args,
10849
- schema: fileSearchArgsSchema
11167
+ schema: fileSearchArgsSchema2
10850
11168
  });
10851
11169
  openaiTools2.push({
10852
11170
  type: "file_search",
@@ -11012,6 +11330,13 @@ var OpenAIResponsesLanguageModel = class {
11012
11330
  providerOptions,
11013
11331
  schema: openaiResponsesProviderOptionsSchema
11014
11332
  });
11333
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
11334
+ warnings.push({
11335
+ type: "unsupported-setting",
11336
+ setting: "conversation",
11337
+ details: "conversation and previousResponseId cannot be used together"
11338
+ });
11339
+ }
11015
11340
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
11016
11341
  prompt,
11017
11342
  systemMessageMode: modelConfig.systemMessageMode,
@@ -11074,6 +11399,7 @@ var OpenAIResponsesLanguageModel = class {
11074
11399
  }
11075
11400
  },
11076
11401
  // provider options:
11402
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
11077
11403
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
11078
11404
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
11079
11405
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -11084,6 +11410,7 @@ var OpenAIResponsesLanguageModel = class {
11084
11410
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
11085
11411
  include,
11086
11412
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
11413
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
11087
11414
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
11088
11415
  top_logprobs: topLogprobs,
11089
11416
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
@@ -11293,7 +11620,14 @@ var OpenAIResponsesLanguageModel = class {
11293
11620
  id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId(),
11294
11621
  mediaType: "text/plain",
11295
11622
  title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
11296
- filename: (_l = annotation.filename) != null ? _l : annotation.file_id
11623
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id,
11624
+ ...annotation.file_id ? {
11625
+ providerMetadata: {
11626
+ openai: {
11627
+ fileId: annotation.file_id
11628
+ }
11629
+ }
11630
+ } : {}
11297
11631
  });
11298
11632
  }
11299
11633
  }
@@ -11403,7 +11737,9 @@ var OpenAIResponsesLanguageModel = class {
11403
11737
  }
11404
11738
  }
11405
11739
  const providerMetadata = {
11406
- openai: { responseId: response.id }
11740
+ openai: {
11741
+ ...response.id != null ? { responseId: response.id } : {}
11742
+ }
11407
11743
  };
11408
11744
  if (logprobs.length > 0) {
11409
11745
  providerMetadata.openai.logprobs = logprobs;
@@ -11411,6 +11747,7 @@ var OpenAIResponsesLanguageModel = class {
11411
11747
  if (typeof response.service_tier === "string") {
11412
11748
  providerMetadata.openai.serviceTier = response.service_tier;
11413
11749
  }
11750
+ const usage = response.usage;
11414
11751
  return {
11415
11752
  content,
11416
11753
  finishReason: mapOpenAIResponseFinishReason({
@@ -11418,11 +11755,11 @@ var OpenAIResponsesLanguageModel = class {
11418
11755
  hasFunctionCall
11419
11756
  }),
11420
11757
  usage: {
11421
- inputTokens: response.usage.input_tokens,
11422
- outputTokens: response.usage.output_tokens,
11423
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
11424
- reasoningTokens: (_q = (_p = response.usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
11425
- cachedInputTokens: (_s = (_r = response.usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
11758
+ inputTokens: usage.input_tokens,
11759
+ outputTokens: usage.output_tokens,
11760
+ totalTokens: usage.input_tokens + usage.output_tokens,
11761
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
11762
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
11426
11763
  },
11427
11764
  request: { body },
11428
11765
  response: {
@@ -11871,7 +12208,14 @@ var OpenAIResponsesLanguageModel = class {
11871
12208
  id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : generateId(),
11872
12209
  mediaType: "text/plain",
11873
12210
  title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
11874
- filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id
12211
+ filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id,
12212
+ ...value.annotation.file_id ? {
12213
+ providerMetadata: {
12214
+ openai: {
12215
+ fileId: value.annotation.file_id
12216
+ }
12217
+ }
12218
+ } : {}
11875
12219
  });
11876
12220
  }
11877
12221
  } else if (isErrorChunk(value)) {
@@ -11949,13 +12293,6 @@ function getResponsesModelConfig(modelId) {
11949
12293
  };
11950
12294
  }
11951
12295
  if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
11952
- if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
11953
- return {
11954
- ...defaults,
11955
- isReasoningModel: true,
11956
- systemMessageMode: "remove"
11957
- };
11958
- }
11959
12296
  return {
11960
12297
  ...defaults,
11961
12298
  isReasoningModel: true,
@@ -11971,7 +12308,11 @@ function mapWebSearchOutput(action) {
11971
12308
  var _a15;
11972
12309
  switch (action.type) {
11973
12310
  case "search":
11974
- return { action: { type: "search", query: (_a15 = action.query) != null ? _a15 : void 0 } };
12311
+ return {
12312
+ action: { type: "search", query: (_a15 = action.query) != null ? _a15 : void 0 },
12313
+ // include sources when provided by the Responses API (behind include flag)
12314
+ ...action.sources != null && { sources: action.sources }
12315
+ };
11975
12316
  case "open_page":
11976
12317
  return { action: { type: "openPage", url: action.url } };
11977
12318
  case "find":
@@ -12310,7 +12651,7 @@ var OpenAITranscriptionModel = class {
12310
12651
  };
12311
12652
  }
12312
12653
  };
12313
- var VERSION5 = "2.0.53" ;
12654
+ var VERSION5 = "2.0.69" ;
12314
12655
  function createOpenAI(options = {}) {
12315
12656
  var _a15, _b;
12316
12657
  const baseURL = (_a15 = withoutTrailingSlash(
@@ -12407,17 +12748,10 @@ function createOpenAI(options = {}) {
12407
12748
  }
12408
12749
  createOpenAI();
12409
12750
 
12410
- exports.APICallError = APICallError;
12411
- exports.EmptyResponseBodyError = EmptyResponseBodyError;
12412
- exports.EventSourceParserStream = EventSourceParserStream;
12413
- exports.InvalidArgumentError = InvalidArgumentError;
12414
- exports.JSONParseError = JSONParseError;
12415
- exports.LoadAPIKeyError = LoadAPIKeyError;
12416
12751
  exports.MastraModelGateway = MastraModelGateway;
12417
12752
  exports.NoSuchModelError = NoSuchModelError;
12418
12753
  exports.OpenAICompatibleImageModel = OpenAICompatibleImageModel;
12419
12754
  exports.TooManyEmbeddingValuesForCallError = TooManyEmbeddingValuesForCallError;
12420
- exports.TypeValidationError = TypeValidationError;
12421
12755
  exports.UnsupportedFunctionalityError = UnsupportedFunctionalityError;
12422
12756
  exports.combineHeaders = combineHeaders;
12423
12757
  exports.convertToBase64 = convertToBase64;
@@ -12429,10 +12763,11 @@ exports.createJsonResponseHandler = createJsonResponseHandler;
12429
12763
  exports.createOpenAI = createOpenAI;
12430
12764
  exports.createOpenAICompatible = createOpenAICompatible;
12431
12765
  exports.generateId = generateId;
12766
+ exports.injectJsonInstructionIntoMessages = injectJsonInstructionIntoMessages;
12432
12767
  exports.loadApiKey = loadApiKey;
12433
12768
  exports.parseProviderOptions = parseProviderOptions;
12434
12769
  exports.postJsonToApi = postJsonToApi;
12435
12770
  exports.withUserAgentSuffix = withUserAgentSuffix;
12436
12771
  exports.withoutTrailingSlash = withoutTrailingSlash;
12437
- //# sourceMappingURL=chunk-ZPMFINU2.cjs.map
12438
- //# sourceMappingURL=chunk-ZPMFINU2.cjs.map
12772
+ //# sourceMappingURL=chunk-HNHZGFZY.cjs.map
12773
+ //# sourceMappingURL=chunk-HNHZGFZY.cjs.map